rabbitmq-server/deps/rabbit/test/config_schema_SUITE_data/rabbit.snippets

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

1151 lines
38 KiB
Plaintext
Raw Normal View History

2023-02-14 09:01:56 +08:00
% vim:ft=erlang:
%
[{internal_auth_backend,
"auth_backends.1 = internal",
[{rabbit,[{auth_backends,[rabbit_auth_backend_internal]}]}],
[]},
{ldap_auth_backend,
"auth_backends.1 = ldap",
[{rabbit,[{auth_backends,[rabbit_auth_backend_ldap]}]}],
[]},
{http_auth_backend,
"auth_backends.1 = http",
[{rabbit,[{auth_backends,[rabbit_auth_backend_http]}]}],
[]},
{oauth2_auth_backend,
"auth_backends.1 = oauth2",
[{rabbit,[{auth_backends,[rabbit_auth_backend_oauth2]}]}],
[]},
{multiple_auth_backends,
"auth_backends.1 = ldap
auth_backends.2 = internal",
[{rabbit,
[{auth_backends,
[rabbit_auth_backend_ldap,rabbit_auth_backend_internal]}]}],
[]},
{full_name_auth_backend,
"auth_backends.1 = ldap
# uses module name instead of a short alias, \"http\"
auth_backends.2 = rabbit_auth_backend_http",
[{rabbit,
[{auth_backends,[rabbit_auth_backend_ldap,rabbit_auth_backend_http]}]}],
[]},
{third_party_auth_backend,
"auth_backends.1.authn = internal
# uses module name because this backend is from a 3rd party
auth_backends.1.authz = rabbit_auth_backend_ip_range",
[{rabbit,
[{auth_backends,
[{rabbit_auth_backend_internal,rabbit_auth_backend_ip_range}]}]}],
[]},
{authn_authz_backend,
"auth_backends.1.authn = ldap
auth_backends.1.authz = internal",
[{rabbit,
[{auth_backends,
[{rabbit_auth_backend_ldap,rabbit_auth_backend_internal}]}]}],
[]},
{authn_authz_multiple_backends,
"auth_backends.1.authn = ldap
auth_backends.1.authz = internal
auth_backends.2 = internal",
[{rabbit,
[{auth_backends,
[{rabbit_auth_backend_ldap,rabbit_auth_backend_internal},
rabbit_auth_backend_internal]}]}],
[]},
{authn_backend_only,
"auth_backends.1.authn = ldap",
[{rabbit,
[{auth_backends,
[{rabbit_auth_backend_ldap,rabbit_auth_backend_ldap}]}]}],
[]},
{ssl_options,
"ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
ssl_options.verify = verify_peer
ssl_options.fail_if_no_peer_cert = true",
[{rabbit,
[{ssl_options,
[{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
{certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
{keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
{verify,verify_peer},
{fail_if_no_peer_cert,true}]}]}],
[]},
{tcp_listener,
"listeners.tcp.default = 5673",
[{rabbit,[{tcp_listeners,[5673]}]}],[]},
{ssl_listener,
"listeners.ssl = none",[{rabbit,[{ssl_listeners,[]}]}],[]},
{num_acceptors,
"num_acceptors.ssl = 1",[{rabbit,[{num_ssl_acceptors,1}]}],[]},
{distribution_listener,
"distribution.listener.interface = 192.168.0.1
distribution.listener.port_range.min = 25679
distribution.listener.port_range.max = 25679",
[{kernel, [
{inet_dist_listen_min, 25679},
{inet_dist_listen_max, 25679},
{inet_dist_use_interface, {192,168,0,1}}
]}],
[]},
{distribution_listener_ipv6,
"distribution.listener.interface = ::1
distribution.listener.port_range.min = 25679
distribution.listener.port_range.max = 25679",
[{kernel, [
{inet_dist_listen_min, 25679},
{inet_dist_listen_max, 25679},
{inet_dist_use_interface, {0,0,0,0,0,0,0,1}}
]}],
[]},
{socket_writer_gc_threshold,
"socket_writer.gc_threshold = 999666111", [{rabbit, [{writer_gc_threshold, 999666111}]}],[]},
{socket_writer_gc_threshold_off,
"socket_writer.gc_threshold = off", [{rabbit, [{writer_gc_threshold, undefined}]}],[]},
{disk_free_limit_absolute_priority,
"disk_free_limit.relative = 1.0
disk_free_limit.absolute = 50000",
[{rabbit, [{disk_free_limit, 50000}]}],[]},
{disk_free_limit_absolute_priority_regardless_of_order,
"disk_free_limit.absolute = 50000
disk_free_limit.relative = 1.0",
[{rabbit, [{disk_free_limit, 50000}]}],[]},
{disk_free_limit_only_relative,
"disk_free_limit.relative = 1.0",
[{rabbit, [{disk_free_limit, {mem_relative, 1.0}}]}],[]},
2024-01-16 11:11:57 +08:00
%%
%% Absolute free disk space limit
%%
{disk_free_limit_only_absolute_integer,
"disk_free_limit.absolute = 50000",
[{rabbit, [{disk_free_limit, 50000}]}],[]},
2024-01-16 11:11:57 +08:00
{disk_free_limit_only_absolute_units_gb,
"disk_free_limit.absolute = 2GB",
[{rabbit,[{disk_free_limit, "2GB"}]}],
[]},
{disk_free_limit_only_absolute_units_gib,
"disk_free_limit.absolute = 2GiB",
[{rabbit,[{disk_free_limit, "2GiB"}]}],
[]},
{disk_free_limit_only_absolute_units_g,
"disk_free_limit.absolute = 2G",
[{rabbit,[{disk_free_limit, "2G"}]}],
[]},
{disk_free_limit_only_absolute_units_tb,
"disk_free_limit.absolute = 2TB",
[{rabbit,[{disk_free_limit, "2TB"}]}],
[]},
{disk_free_limit_only_absolute_units_tib,
"disk_free_limit.absolute = 2TiB",
[{rabbit,[{disk_free_limit, "2TiB"}]}],
[]},
{disk_free_limit_only_absolute_units_t,
"disk_free_limit.absolute = 2T",
[{rabbit,[{disk_free_limit, "2T"}]}],
[]},
{disk_free_limit_only_absolute_units_pb,
"disk_free_limit.absolute = 2PB",
[{rabbit,[{disk_free_limit, "2PB"}]}],
[]},
{disk_free_limit_only_absolute_units_pib,
"disk_free_limit.absolute = 2PiB",
[{rabbit,[{disk_free_limit, "2PiB"}]}],
[]},
{disk_free_limit_only_absolute_units_p,
"disk_free_limit.absolute = 2P",
[{rabbit,[{disk_free_limit, "2P"}]}],
[]},
2023-02-14 09:01:56 +08:00
{default_users,
"
default_users.a.vhost_pattern = banana
default_users.a.tags = administrator,operator
default_users.a.password = SECRET
default_users.a.read = .*
",
[{rabbit, [{default_users, [
{<<"a">>, [{<<"vhost_pattern">>, "banana"},
{<<"tags">>, [administrator, operator]},
{<<"password">>, <<"SECRET">>},
2023-02-14 09:01:56 +08:00
{<<"read">>, ".*"}]}]}]}],
[]},
{default_policies_operator,
"
default_policies.operator.a.expires = 1h
default_policies.operator.a.queue_pattern = apple
default_policies.operator.a.vhost_pattern = banana
default_policies.operator.a.classic_queues.ha_mode = exactly
default_policies.operator.a.classic_queues.ha_params = 2
2023-04-28 06:16:39 +08:00
default_policies.operator.a.classic_queues.ha_sync_mode = automatic
default_policies.operator.a.classic_queues.queue_version = 2
2023-04-28 06:16:39 +08:00
",
[{rabbit, [{default_policies, [{operator, [
{<<"a">>, [{<<"expires">>, 3600000},
{<<"ha_mode">>, <<"exactly">>},
2023-02-14 09:01:56 +08:00
{<<"ha_params">>, 2},
{<<"ha_sync_mode">>, <<"automatic">>},
2023-09-27 04:57:24 +08:00
{<<"queue_pattern">>, <<"apple">>},
{<<"queue_version">>, 2},
2023-02-14 09:01:56 +08:00
{<<"vhost_pattern">>, "banana"}]}]}]}]}],
[]},
{default_vhost_limits,
"
default_limits.vhosts.a.pattern = banana
2022-10-20 07:08:06 +08:00
default_limits.vhosts.a.max_queues = 10
",
[{rabbit, [{default_limits, [{vhosts, [
{<<"a">>, [{<<"pattern">>, "banana"},
2023-02-14 09:01:56 +08:00
{<<"max_queues">>, 10}]}]}]}]}],
[]},
{default_user_settings,
"default_user = guest
default_pass = guest
Add SASL mechanism ANONYMOUS ## 1. Introduce new SASL mechanism ANONYMOUS ### What? Introduce a new `rabbit_auth_mechanism` implementation for SASL mechanism ANONYMOUS called `rabbit_auth_mechanism_anonymous`. ### Why? As described in AMQP section 5.3.3.1, ANONYMOUS should be used when the client doesn't need to authenticate. Introducing a new `rabbit_auth_mechanism` consolidates and simplifies how anonymous logins work across all RabbitMQ protocols that support SASL. This commit therefore allows AMQP 0.9.1, AMQP 1.0, stream clients to connect out of the box to RabbitMQ without providing any username or password. Today's AMQP 0.9.1 and stream protocol client libs hard code RabbitMQ default credentials `guest:guest` for example done in: * https://github.com/rabbitmq/rabbitmq-java-client/blob/0215e85643a9ae0800822869be0200024e2ab569/src/main/java/com/rabbitmq/client/ConnectionFactory.java#L58-L61 * https://github.com/rabbitmq/amqp091-go/blob/ddb7a2f0685689063e6d709b8e417dbf9d09469c/uri.go#L31-L32 Hard coding RabbitMQ specific default credentials in dozens of different client libraries is an anti-pattern in my opinion. Furthermore, there are various AMQP 1.0 and MQTT client libraries which we do not control or maintain and which still should work out of the box when a user is getting started with RabbitMQ (that is without providing `guest:guest` credentials). ### How? The old RabbitMQ 3.13 AMQP 1.0 plugin `default_user` [configuration](https://github.com/rabbitmq/rabbitmq-server/blob/146b4862d8e570b344c99c37d91246760e218b18/deps/rabbitmq_amqp1_0/Makefile#L6) is replaced with the following two new `rabbit` configurations: ``` {anonymous_login_user, <<"guest">>}, {anonymous_login_pass, <<"guest">>}, ``` We call it `anonymous_login_user` because this user will be used for anonymous logins. The subsequent commit uses the same setting for anonymous logins in MQTT. Hence, this user is orthogonal to the protocol used when the client connects. Setting `anonymous_login_pass` could have been left out. This commit decides to include it because our documentation has so far recommended: > It is highly recommended to pre-configure a new user with a generated username and password or delete the guest user > or at least change its password to reasonably secure generated value that won't be known to the public. By having the new module `rabbit_auth_mechanism_anonymous` internally authenticate with `anonymous_login_pass` instead of blindly allowing access without any password, we protect operators that relied on the sentence: > or at least change its password to reasonably secure generated value that won't be known to the public To ease the getting started experience, since RabbitMQ already deploys a guest user with full access to the default virtual host `/`, this commit also allows SASL mechanism ANONYMOUS in `rabbit` setting `auth_mechanisms`. In production, operators should disable SASL mechanism ANONYMOUS by setting `anonymous_login_user` to `none` (or by removing ANONYMOUS from the `auth_mechanisms` setting. This will be documented separately. Even if operators forget or don't read the docs, this new ANONYMOUS mechanism won't do any harm because it relies on the default user name `guest` and password `guest`, which is recommended against in production, and who by default can only connect from the local host. ## 2. Require SASL security layer in AMQP 1.0 ### What? An AMQP 1.0 client must use the SASL security layer. ### Why? This is in line with the mandatory usage of SASL in AMQP 0.9.1 and RabbitMQ stream protocol. Since (presumably) any AMQP 1.0 client knows how to authenticate with a username and password using SASL mechanism PLAIN, any AMQP 1.0 client also (presumably) implements the trivial SASL mechanism ANONYMOUS. Skipping SASL is not recommended in production anyway. By requiring SASL, configuration for operators becomes easier. Following the principle of least surprise, when an an operator configures `auth_mechanisms` to exclude `ANONYMOUS`, anonymous logins will be prohibited in SASL and also by disallowing skipping the SASL layer. ### How? This commit implements AMQP 1.0 figure 2.13. A follow-up commit needs to be pushed to `v3.13.x` which will use SASL mechanism `anon` instead of `none` in the Erlang AMQP 1.0 client such that AMQP 1.0 shovels running on 3.13 can connect to 4.0 RabbitMQ nodes.
2024-08-14 18:19:17 +08:00
anonymous_login_user = guest
anonymous_login_pass = guest
default_user_tags.administrator = true
default_permissions.configure = .*
default_permissions.read = .*
default_permissions.write = .*",
[{rabbit,
[{default_user,<<"guest">>},
{default_pass,<<"guest">>},
Add SASL mechanism ANONYMOUS ## 1. Introduce new SASL mechanism ANONYMOUS ### What? Introduce a new `rabbit_auth_mechanism` implementation for SASL mechanism ANONYMOUS called `rabbit_auth_mechanism_anonymous`. ### Why? As described in AMQP section 5.3.3.1, ANONYMOUS should be used when the client doesn't need to authenticate. Introducing a new `rabbit_auth_mechanism` consolidates and simplifies how anonymous logins work across all RabbitMQ protocols that support SASL. This commit therefore allows AMQP 0.9.1, AMQP 1.0, stream clients to connect out of the box to RabbitMQ without providing any username or password. Today's AMQP 0.9.1 and stream protocol client libs hard code RabbitMQ default credentials `guest:guest` for example done in: * https://github.com/rabbitmq/rabbitmq-java-client/blob/0215e85643a9ae0800822869be0200024e2ab569/src/main/java/com/rabbitmq/client/ConnectionFactory.java#L58-L61 * https://github.com/rabbitmq/amqp091-go/blob/ddb7a2f0685689063e6d709b8e417dbf9d09469c/uri.go#L31-L32 Hard coding RabbitMQ specific default credentials in dozens of different client libraries is an anti-pattern in my opinion. Furthermore, there are various AMQP 1.0 and MQTT client libraries which we do not control or maintain and which still should work out of the box when a user is getting started with RabbitMQ (that is without providing `guest:guest` credentials). ### How? The old RabbitMQ 3.13 AMQP 1.0 plugin `default_user` [configuration](https://github.com/rabbitmq/rabbitmq-server/blob/146b4862d8e570b344c99c37d91246760e218b18/deps/rabbitmq_amqp1_0/Makefile#L6) is replaced with the following two new `rabbit` configurations: ``` {anonymous_login_user, <<"guest">>}, {anonymous_login_pass, <<"guest">>}, ``` We call it `anonymous_login_user` because this user will be used for anonymous logins. The subsequent commit uses the same setting for anonymous logins in MQTT. Hence, this user is orthogonal to the protocol used when the client connects. Setting `anonymous_login_pass` could have been left out. This commit decides to include it because our documentation has so far recommended: > It is highly recommended to pre-configure a new user with a generated username and password or delete the guest user > or at least change its password to reasonably secure generated value that won't be known to the public. By having the new module `rabbit_auth_mechanism_anonymous` internally authenticate with `anonymous_login_pass` instead of blindly allowing access without any password, we protect operators that relied on the sentence: > or at least change its password to reasonably secure generated value that won't be known to the public To ease the getting started experience, since RabbitMQ already deploys a guest user with full access to the default virtual host `/`, this commit also allows SASL mechanism ANONYMOUS in `rabbit` setting `auth_mechanisms`. In production, operators should disable SASL mechanism ANONYMOUS by setting `anonymous_login_user` to `none` (or by removing ANONYMOUS from the `auth_mechanisms` setting. This will be documented separately. Even if operators forget or don't read the docs, this new ANONYMOUS mechanism won't do any harm because it relies on the default user name `guest` and password `guest`, which is recommended against in production, and who by default can only connect from the local host. ## 2. Require SASL security layer in AMQP 1.0 ### What? An AMQP 1.0 client must use the SASL security layer. ### Why? This is in line with the mandatory usage of SASL in AMQP 0.9.1 and RabbitMQ stream protocol. Since (presumably) any AMQP 1.0 client knows how to authenticate with a username and password using SASL mechanism PLAIN, any AMQP 1.0 client also (presumably) implements the trivial SASL mechanism ANONYMOUS. Skipping SASL is not recommended in production anyway. By requiring SASL, configuration for operators becomes easier. Following the principle of least surprise, when an an operator configures `auth_mechanisms` to exclude `ANONYMOUS`, anonymous logins will be prohibited in SASL and also by disallowing skipping the SASL layer. ### How? This commit implements AMQP 1.0 figure 2.13. A follow-up commit needs to be pushed to `v3.13.x` which will use SASL mechanism `anon` instead of `none` in the Erlang AMQP 1.0 client such that AMQP 1.0 shovels running on 3.13 can connect to 4.0 RabbitMQ nodes.
2024-08-14 18:19:17 +08:00
{anonymous_login_user,<<"guest">>},
{anonymous_login_pass,<<"guest">>},
{default_user_tags,[administrator]},
{default_permissions,[<<".*">>,<<".*">>,<<".*">>]}]}],
[]},
Add SASL mechanism ANONYMOUS ## 1. Introduce new SASL mechanism ANONYMOUS ### What? Introduce a new `rabbit_auth_mechanism` implementation for SASL mechanism ANONYMOUS called `rabbit_auth_mechanism_anonymous`. ### Why? As described in AMQP section 5.3.3.1, ANONYMOUS should be used when the client doesn't need to authenticate. Introducing a new `rabbit_auth_mechanism` consolidates and simplifies how anonymous logins work across all RabbitMQ protocols that support SASL. This commit therefore allows AMQP 0.9.1, AMQP 1.0, stream clients to connect out of the box to RabbitMQ without providing any username or password. Today's AMQP 0.9.1 and stream protocol client libs hard code RabbitMQ default credentials `guest:guest` for example done in: * https://github.com/rabbitmq/rabbitmq-java-client/blob/0215e85643a9ae0800822869be0200024e2ab569/src/main/java/com/rabbitmq/client/ConnectionFactory.java#L58-L61 * https://github.com/rabbitmq/amqp091-go/blob/ddb7a2f0685689063e6d709b8e417dbf9d09469c/uri.go#L31-L32 Hard coding RabbitMQ specific default credentials in dozens of different client libraries is an anti-pattern in my opinion. Furthermore, there are various AMQP 1.0 and MQTT client libraries which we do not control or maintain and which still should work out of the box when a user is getting started with RabbitMQ (that is without providing `guest:guest` credentials). ### How? The old RabbitMQ 3.13 AMQP 1.0 plugin `default_user` [configuration](https://github.com/rabbitmq/rabbitmq-server/blob/146b4862d8e570b344c99c37d91246760e218b18/deps/rabbitmq_amqp1_0/Makefile#L6) is replaced with the following two new `rabbit` configurations: ``` {anonymous_login_user, <<"guest">>}, {anonymous_login_pass, <<"guest">>}, ``` We call it `anonymous_login_user` because this user will be used for anonymous logins. The subsequent commit uses the same setting for anonymous logins in MQTT. Hence, this user is orthogonal to the protocol used when the client connects. Setting `anonymous_login_pass` could have been left out. This commit decides to include it because our documentation has so far recommended: > It is highly recommended to pre-configure a new user with a generated username and password or delete the guest user > or at least change its password to reasonably secure generated value that won't be known to the public. By having the new module `rabbit_auth_mechanism_anonymous` internally authenticate with `anonymous_login_pass` instead of blindly allowing access without any password, we protect operators that relied on the sentence: > or at least change its password to reasonably secure generated value that won't be known to the public To ease the getting started experience, since RabbitMQ already deploys a guest user with full access to the default virtual host `/`, this commit also allows SASL mechanism ANONYMOUS in `rabbit` setting `auth_mechanisms`. In production, operators should disable SASL mechanism ANONYMOUS by setting `anonymous_login_user` to `none` (or by removing ANONYMOUS from the `auth_mechanisms` setting. This will be documented separately. Even if operators forget or don't read the docs, this new ANONYMOUS mechanism won't do any harm because it relies on the default user name `guest` and password `guest`, which is recommended against in production, and who by default can only connect from the local host. ## 2. Require SASL security layer in AMQP 1.0 ### What? An AMQP 1.0 client must use the SASL security layer. ### Why? This is in line with the mandatory usage of SASL in AMQP 0.9.1 and RabbitMQ stream protocol. Since (presumably) any AMQP 1.0 client knows how to authenticate with a username and password using SASL mechanism PLAIN, any AMQP 1.0 client also (presumably) implements the trivial SASL mechanism ANONYMOUS. Skipping SASL is not recommended in production anyway. By requiring SASL, configuration for operators becomes easier. Following the principle of least surprise, when an an operator configures `auth_mechanisms` to exclude `ANONYMOUS`, anonymous logins will be prohibited in SASL and also by disallowing skipping the SASL layer. ### How? This commit implements AMQP 1.0 figure 2.13. A follow-up commit needs to be pushed to `v3.13.x` which will use SASL mechanism `anon` instead of `none` in the Erlang AMQP 1.0 client such that AMQP 1.0 shovels running on 3.13 can connect to 4.0 RabbitMQ nodes.
2024-08-14 18:19:17 +08:00
{anonymous_login_user,
"anonymous_login_user = none",
[{rabbit,
[{anonymous_login_user, none}]}],
[]},
{auth_mechanisms_ordered,
"auth_mechanisms.1 = PLAIN
auth_mechanisms.2 = AMQPLAIN
auth_mechanisms.3 = ANONYMOUS",
[],
[{rabbit,
%% We expect the mechanisms in the order as declared.
[{auth_mechanisms, ['PLAIN', 'AMQPLAIN', 'ANONYMOUS']}]
}],
[],
nosort
},
{cluster_formation,
"cluster_formation.peer_discovery_backend = rabbit_peer_discovery_classic_config
cluster_formation.classic_config.nodes.peer1 = rabbit@hostname1
cluster_formation.classic_config.nodes.peer2 = rabbit@hostname2
cluster_formation.node_type = disc",
[{rabbit,
[{cluster_formation,
[{peer_discovery_backend,rabbit_peer_discovery_classic_config},
{node_type,disc}]},
{cluster_nodes,{[rabbit@hostname2,rabbit@hostname1],disc}}]}],
[]},
{cluster_formation_module_classic_confog_alias,
"cluster_formation.peer_discovery_backend = classic_config
cluster_formation.classic_config.nodes.peer1 = rabbit@hostname1
cluster_formation.classic_config.nodes.peer2 = rabbit@hostname2",
[{rabbit,
[{cluster_formation,
[{peer_discovery_backend,rabbit_peer_discovery_classic_config}]},
{cluster_nodes,{[rabbit@hostname2,rabbit@hostname1],disc}}]}],
[]},
{cluster_formation_module_dns_alias,
"cluster_formation.peer_discovery_backend = dns
cluster_formation.dns.hostname = discovery.eng.example.local",
[{rabbit,
[
{cluster_formation,
[{peer_discovery_backend,rabbit_peer_discovery_dns},
{peer_discovery_dns, [
{hostname, <<"discovery.eng.example.local">>}
]}]}
]}],
[]},
{cluster_formation_disk,
"cluster_formation.peer_discovery_backend = rabbit_peer_discovery_classic_config
cluster_formation.classic_config.nodes.peer1 = rabbit@hostname1
cluster_formation.classic_config.nodes.peer2 = rabbit@hostname2
cluster_formation.node_type = disk",
[{rabbit,
[{cluster_formation,
[{peer_discovery_backend,rabbit_peer_discovery_classic_config},
{node_type,disc}]},
{cluster_nodes,{[rabbit@hostname2,rabbit@hostname1],disc}}]}],
[]},
{cluster_formation_ram_ignored,
"cluster_formation.node_type = ram",[],[]},
{cluster_formation_target_cluster_size_hint,
"cluster_formation.target_cluster_size_hint = 3",
[{rabbit, [
{cluster_formation, [
{target_cluster_size_hint, 3}
]}
]}],
[]},
%% registration is enabled by default for the backends that support it
2025-02-04 08:14:25 +08:00
{cluster_formation_explicitly_enable_registration,
"cluster_formation.registration.enabled = true",
[{rabbit,
[{cluster_formation, [
{perform_registration, true}
]}]
}],
[]},
{cluster_formation_opt_out_of_registration,
"cluster_formation.registration.enabled = false",
[{rabbit,
[{cluster_formation, [
{perform_registration, false}
]}]
}],
[]},
{tcp_listen_options,
"tcp_listen_options.backlog = 128
tcp_listen_options.nodelay = true
tcp_listen_options.exit_on_close = false",
[{rabbit,
[{tcp_listen_options,
[{backlog,128},{nodelay,true},{exit_on_close,false}]}]}],
[]},
2024-01-16 11:11:57 +08:00
%%
%% Absolute high runtime memory watermark
%%
{vm_memory_watermark_absolute_integer,
"vm_memory_high_watermark.absolute = 1073741824",
[{rabbit,[{vm_memory_high_watermark,{absolute,1073741824}}]}],
[]},
2024-01-16 11:11:57 +08:00
{vm_memory_watermark_absolute_units_mb,
"vm_memory_high_watermark.absolute = 1024MB",
[{rabbit,[{vm_memory_high_watermark,{absolute,"1024MB"}}]}],
[]},
2024-01-16 11:11:57 +08:00
{vm_memory_watermark_absolute_units_mib,
"vm_memory_high_watermark.absolute = 1024MiB",
[{rabbit,[{vm_memory_high_watermark,{absolute,"1024MiB"}}]}],
[]},
{vm_memory_watermark_absolute_units_m,
"vm_memory_high_watermark.absolute = 1024M",
[{rabbit,[{vm_memory_high_watermark,{absolute,"1024M"}}]}],
[]},
{vm_memory_watermark_absolute_units_gb,
"vm_memory_high_watermark.absolute = 4GB",
[{rabbit,[{vm_memory_high_watermark,{absolute,"4GB"}}]}],
[]},
{vm_memory_watermark_absolute_units_gib,
"vm_memory_high_watermark.absolute = 3GiB",
[{rabbit,[{vm_memory_high_watermark,{absolute,"3GiB"}}]}],
[]},
{vm_memory_watermark_absolute_units_g,
"vm_memory_high_watermark.absolute = 10G",
[{rabbit,[{vm_memory_high_watermark,{absolute,"10G"}}]}],
[]},
{vm_memory_watermark_absolute_units_tb,
"vm_memory_high_watermark.absolute = 1TB",
[{rabbit,[{vm_memory_high_watermark,{absolute,"1TB"}}]}],
[]},
{vm_memory_watermark_absolute_units_tib,
"vm_memory_high_watermark.absolute = 1TiB",
[{rabbit,[{vm_memory_high_watermark,{absolute,"1TiB"}}]}],
[]},
{vm_memory_watermark_absolute_units_t,
"vm_memory_high_watermark.absolute = 1T",
[{rabbit,[{vm_memory_high_watermark,{absolute,"1T"}}]}],
[]},
{vm_memory_watermark_absolute_priority,
"vm_memory_high_watermark.absolute = 1073741824
vm_memory_high_watermark.relative = 0.4",
[{rabbit,[{vm_memory_high_watermark,{absolute,1073741824}}]}],
[]},
2024-08-27 23:35:22 +08:00
%% DEPRECATED; just for backwards compatibility
{vm_memory_watermark_paging_ratio,
"vm_memory_high_watermark_paging_ratio = 0.75
vm_memory_high_watermark.relative = 0.4",
[{rabbit,
[{vm_memory_high_watermark_paging_ratio,0.75},
{vm_memory_high_watermark,0.4}]}],
[]},
2024-08-27 23:35:22 +08:00
%% DEPRECATED; just for backwards compatibility
{memory_monitor_interval, "memory_monitor_interval = 5000",
[{rabbit,
[{memory_monitor_interval, 5000}]}],
[]},
{vm_memory_calculation_strategy, "vm_memory_calculation_strategy = rss",
[{rabbit,
[{vm_memory_calculation_strategy, rss}]}],
[]},
{vm_memory_calculation_strategy, "vm_memory_calculation_strategy = erlang",
[{rabbit,
[{vm_memory_calculation_strategy, erlang}]}],
[]},
{vm_memory_calculation_strategy, "vm_memory_calculation_strategy = allocated",
[{rabbit,
[{vm_memory_calculation_strategy, allocated}]}],
[]},
{vm_memory_calculation_strategy, "vm_memory_calculation_strategy = legacy",
[{rabbit,
[{vm_memory_calculation_strategy, legacy}]}],
[]},
{total_memory_available_override_value,
"total_memory_available_override_value = 1000000000",
[{rabbit,[{total_memory_available_override_value, 1000000000}]}],
[]},
{total_memory_available_override_value_units,
"total_memory_available_override_value = 1024MB",
[{rabbit,[{total_memory_available_override_value, "1024MB"}]}],
[]},
{ranch_connection_max,
"ranch_connection_max = 999",
[{rabbit,[{ranch_connection_max, 999}]}],
[]},
{ranch_connection_max,
"ranch_connection_max = infinity",
[{rabbit,[{ranch_connection_max, infinity}]}],
[]},
{channel_max,
"channel_max = 16",
[{rabbit,[{channel_max, 16}]}],
[]},
2024-01-17 03:41:13 +08:00
{channel_max_per_node,
"channel_max_per_node = 16",
[{rabbit,[{channel_max_per_node, 16}]}],
[]},
2024-01-18 04:19:38 +08:00
{channel_max_per_node,
"channel_max_per_node = infinity",
[{rabbit,[{channel_max_per_node, infinity}]}],
[]},
{session_max_per_connection_1,
"session_max_per_connection = 1",
[{rabbit,[{session_max_per_connection, 1}]}],
Enforce AMQP 1.0 channel-max (#12221) * Enforce AMQP 1.0 channel-max Enforce AMQP 1.0 field `channel-max` in the `open` frame by introducing a new more user friendly setting called `session_max`: > The channel-max value is the highest channel number that can be used on the connection. > This value plus one is the maximum number of sessions that can be simultaneously active on the connection. We set the default value of `session_max` to 64 such that, by default, RabbitMQ 4.0 allows maximum 64 AMQP 1.0 sessions per AMQP 1.0 connection. More than 64 AMQP 1.0 sessions per connection make little sense. See also https://www.rabbitmq.com/blog/2024/09/02/amqp-flow-control#session Limiting the maximum number of sessions per connection can be useful to protect against * applications that accidentally open new sessions without ending old sessions (session leaks) * too many metrics being exposed, for example in the future via the "/metrics/per-object" Prometheus endpoint with timeseries per session being emitted. This commit does not make use of the existing `channel_max` setting because: 1. Given that `channel_max = 0` means "no limit", there is no way for an operator to limit the number of sessions per connections to 1. 2. Operators might want to set different limits for maximum number of AMQP 0.9.1 channels and maximum number of AMQP 1.0 sessions. 3. The default of `channel_max` is very high: It allows using more than 2,000 AMQP 0.9.1 channels per connection. Lowering this default might break existing AMQP 0.9.1 applications. This commit also fixes a bug in the AMQP 1.0 Erlang client which, prior to this commit used channel number 1 for the first session. That's wrong if a broker allows maximum 1 session by replying with `channel-max = 0` in the `open` frame. Additionally, the spec recommends: > To make it easier to monitor AMQP sessions, it is RECOMMENDED that implementations always assign the lowest available unused channel number. Note that in AMQP 0.9.1, channel number 0 has a special meaning: > The channel number is 0 for all frames which are global to the connection and 1-65535 for frames that refer to specific channels. * Apply PR feedback
2024-09-05 23:45:27 +08:00
[]},
{session_max_per_connection,
"session_max_per_connection = 65000",
[{rabbit,[{session_max_per_connection, 65_000}]}],
[]},
{link_max_per_session_1,
"link_max_per_session = 1",
[{rabbit,[{link_max_per_session, 1}]}],
[]},
{link_max_per_session,
"link_max_per_session = 4200000000",
[{rabbit,[{link_max_per_session, 4_200_000_000}]}],
Enforce AMQP 1.0 channel-max (#12221) * Enforce AMQP 1.0 channel-max Enforce AMQP 1.0 field `channel-max` in the `open` frame by introducing a new more user friendly setting called `session_max`: > The channel-max value is the highest channel number that can be used on the connection. > This value plus one is the maximum number of sessions that can be simultaneously active on the connection. We set the default value of `session_max` to 64 such that, by default, RabbitMQ 4.0 allows maximum 64 AMQP 1.0 sessions per AMQP 1.0 connection. More than 64 AMQP 1.0 sessions per connection make little sense. See also https://www.rabbitmq.com/blog/2024/09/02/amqp-flow-control#session Limiting the maximum number of sessions per connection can be useful to protect against * applications that accidentally open new sessions without ending old sessions (session leaks) * too many metrics being exposed, for example in the future via the "/metrics/per-object" Prometheus endpoint with timeseries per session being emitted. This commit does not make use of the existing `channel_max` setting because: 1. Given that `channel_max = 0` means "no limit", there is no way for an operator to limit the number of sessions per connections to 1. 2. Operators might want to set different limits for maximum number of AMQP 0.9.1 channels and maximum number of AMQP 1.0 sessions. 3. The default of `channel_max` is very high: It allows using more than 2,000 AMQP 0.9.1 channels per connection. Lowering this default might break existing AMQP 0.9.1 applications. This commit also fixes a bug in the AMQP 1.0 Erlang client which, prior to this commit used channel number 1 for the first session. That's wrong if a broker allows maximum 1 session by replying with `channel-max = 0` in the `open` frame. Additionally, the spec recommends: > To make it easier to monitor AMQP sessions, it is RECOMMENDED that implementations always assign the lowest available unused channel number. Note that in AMQP 0.9.1, channel number 0 has a special meaning: > The channel number is 0 for all frames which are global to the connection and 1-65535 for frames that refer to specific channels. * Apply PR feedback
2024-09-05 23:45:27 +08:00
[]},
2024-03-15 07:09:26 +08:00
{consumer_max_per_channel,
"consumer_max_per_channel = 16",
[{rabbit,[{consumer_max_per_channel, 16}]}],
[]},
{consumer_max_per_channel,
"consumer_max_per_channel = infinity",
[{rabbit,[{consumer_max_per_channel, infinity}]}],
[]},
{max_message_size,
"max_message_size = 131072",
[{rabbit, [{max_message_size, 131072}]}],
[]},
{listeners_tcp_ip,
"listeners.tcp.1 = 192.168.1.99:5672",
[{rabbit,[{tcp_listeners,[{"192.168.1.99",5672}]}]}],
[]},
{listeners_tcp_ip_multiple,
"listeners.tcp.1 = 127.0.0.1:5672
listeners.tcp.2 = ::1:5672",
[{rabbit,[{tcp_listeners,[{"127.0.0.1",5672},{"::1",5672}]}]}],
[]},
{listeners_tcp_ip_all,"listeners.tcp.1 = :::5672",
[{rabbit,[{tcp_listeners,[{"::",5672}]}]}],
[]},
{listeners_tcp_ipv6,
"listeners.tcp.1 = fe80::2acf:e9ff:fe17:f97b:5672",
[{rabbit,[{tcp_listeners,[{"fe80::2acf:e9ff:fe17:f97b",5672}]}]}],
[]},
{tcp_options_sndbuf,
"tcp_listen_options.backlog = 128
tcp_listen_options.nodelay = true
tcp_listen_options.sndbuf = 196608
tcp_listen_options.recbuf = 196608",
[{rabbit,
[{tcp_listen_options,
[{backlog,128},{nodelay,true},{sndbuf,196608},{recbuf,196608}]}]}],
[]},
{tcp_listen_options_nodelay_with_kernel,
"tcp_listen_options.backlog = 4096
tcp_listen_options.nodelay = true",
[{kernel,
[{inet_default_connect_options,[{nodelay,true}]},
{inet_default_listen_options,[{nodelay,true}]}]}],
[{kernel,
[{inet_default_connect_options,[{nodelay,true}]},
{inet_default_listen_options,[{nodelay,true}]}]},
{rabbit,[{tcp_listen_options,[{backlog,4096},{nodelay,true}]}]}],
[]},
{tcp_listen_options_nodelay,
"tcp_listen_options.backlog = 4096
tcp_listen_options.nodelay = true",
[{rabbit,[{tcp_listen_options,[{backlog,4096},{nodelay,true}]}]}],
[]},
{ssl_handshake_timeout,
"ssl_handshake_timeout = 10000",
[{rabbit,[{ssl_handshake_timeout,10000}]}],
[]},
{cluster_partition_handling_pause_if_all_down,
"cluster_partition_handling = pause_if_all_down
## Recover strategy. Can be either 'autoheal' or 'ignore'
cluster_partition_handling.pause_if_all_down.recover = ignore
## Node names to check
cluster_partition_handling.pause_if_all_down.nodes.1 = rabbit@myhost1
cluster_partition_handling.pause_if_all_down.nodes.2 = rabbit@myhost2",
[{rabbit,
[{cluster_partition_handling,
{pause_if_all_down,[rabbit@myhost2,rabbit@myhost1],ignore}}]}],
[]},
{cluster_partition_handling_autoheal,
"cluster_partition_handling = autoheal",
[{rabbit,[{cluster_partition_handling,autoheal}]}],
[]},
{password_hashing,
"password_hashing_module = rabbit_password_hashing_sha512",
[{rabbit,[{password_hashing_module,rabbit_password_hashing_sha512}]}],
[]},
{ssl_options_verify_peer,
"listeners.ssl.1 = 5671
ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
ssl_options.verify = verify_peer
ssl_options.fail_if_no_peer_cert = false",
[{rabbit,
[{ssl_listeners,[5671]},
{ssl_options,
[{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
{certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
{keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
{verify,verify_peer},
{fail_if_no_peer_cert,false}]}]}],
[]},
{ssl_options_password,
"listeners.ssl.1 = 5671
ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
ssl_options.password = t0p$3kRe7",
[{rabbit,
[{ssl_listeners,[5671]},
{ssl_options,
[{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
{certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
{keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
{password,<<"t0p$3kRe7">>}]}]}],
[]},
{ssl_options_tls_ver_old,
"listeners.ssl.1 = 5671
ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
ssl_options.versions.tls1_2 = tlsv1.2
ssl_options.versions.tls1_1 = tlsv1.1
ssl_options.versions.tls1 = tlsv1",
[{ssl,[{versions,['tlsv1.2','tlsv1.1',tlsv1]}]}],
[{ssl,[{versions,['tlsv1.2','tlsv1.1',tlsv1]}]},
{rabbit,
[{ssl_listeners,[5671]},
{ssl_options,
[{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
{certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
{keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
{versions,['tlsv1.2','tlsv1.1',tlsv1]}]}]}],
[]},
{ssl_options_tls_ver_new,
"listeners.ssl.1 = 5671
ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
ssl_options.versions.tls1_2 = tlsv1.2
ssl_options.versions.tls1_1 = tlsv1.1",
[{ssl,[{versions,['tlsv1.2','tlsv1.1']}]}],
[{ssl,[{versions,['tlsv1.2','tlsv1.1']}]},
{rabbit,
[{ssl_listeners,[5671]},
{ssl_options,
[{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
{certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
{keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
{versions,['tlsv1.2','tlsv1.1']}]}]}],
[]},
{ssl_options_ciphers,
"listeners.ssl.1 = 5671
ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
ssl_options.versions.1 = tlsv1.2
ssl_options.versions.2 = tlsv1.1
ssl_options.ciphers.1 = ECDHE-ECDSA-AES256-GCM-SHA384
ssl_options.ciphers.2 = ECDHE-RSA-AES256-GCM-SHA384
ssl_options.ciphers.3 = ECDHE-ECDSA-AES256-SHA384
ssl_options.ciphers.4 = ECDHE-RSA-AES256-SHA384
ssl_options.ciphers.5 = ECDH-ECDSA-AES256-GCM-SHA384
ssl_options.ciphers.6 = ECDH-RSA-AES256-GCM-SHA384
ssl_options.ciphers.7 = ECDH-ECDSA-AES256-SHA384
ssl_options.ciphers.8 = ECDH-RSA-AES256-SHA384
ssl_options.ciphers.9 = DHE-RSA-AES256-GCM-SHA384",
[{ssl,[{versions,['tlsv1.2','tlsv1.1']}]}],
[{ssl,[{versions,['tlsv1.2','tlsv1.1']}]},
{rabbit,
[{ssl_listeners,[5671]},
{ssl_options,
[{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
{ciphers, [
"ECDHE-ECDSA-AES256-GCM-SHA384",
"ECDHE-RSA-AES256-GCM-SHA384",
"ECDHE-ECDSA-AES256-SHA384",
"ECDHE-RSA-AES256-SHA384",
"ECDH-ECDSA-AES256-GCM-SHA384",
"ECDH-RSA-AES256-GCM-SHA384",
"ECDH-ECDSA-AES256-SHA384",
"ECDH-RSA-AES256-SHA384",
"DHE-RSA-AES256-GCM-SHA384"
]},
{certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
{keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
{versions,['tlsv1.2','tlsv1.1']}]}]}],
[]},
{ssl_options_allow_poodle,
"listeners.ssl.1 = 5671
ssl_allow_poodle_attack = true
ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
ssl_options.verify = verify_peer
ssl_options.fail_if_no_peer_cert = false",
[{rabbit,
[{ssl_listeners,[5671]},
{ssl_allow_poodle_attack,true},
{ssl_options,
[{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
{certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
{keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
{verify,verify_peer},
{fail_if_no_peer_cert,false}]}]}],
[]},
{ssl_options_depth,
"listeners.ssl.1 = 5671
ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
ssl_options.depth = 2
ssl_options.verify = verify_peer
ssl_options.fail_if_no_peer_cert = false",
[{rabbit,
[{ssl_listeners,[5671]},
{ssl_options,
[{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
{certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
{keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
{depth,2},
{verify,verify_peer},
{fail_if_no_peer_cert,false}]}]}],
[]},
{ssl_options_depth_0,
"listeners.ssl.1 = 5671
ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
ssl_options.depth = 0
ssl_options.verify = verify_peer
ssl_options.fail_if_no_peer_cert = false",
[{rabbit,
[{ssl_listeners,[5671]},
{ssl_options,
[{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
{certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
{keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
2020-11-04 21:09:09 +08:00
{depth,0},
{verify,verify_peer},
{fail_if_no_peer_cert,false}]}]}],
[]},
2020-11-04 21:44:05 +08:00
{ssl_options_depth_255,
"listeners.ssl.1 = 5671
ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
ssl_options.depth = 255
ssl_options.verify = verify_peer
ssl_options.fail_if_no_peer_cert = false",
[{rabbit,
[{ssl_listeners,[5671]},
{ssl_options,
[{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
{certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
{keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
{depth,255},
{verify,verify_peer},
{fail_if_no_peer_cert,false}]}]}],
[]},
{ssl_options_honor_cipher_order,
"listeners.ssl.1 = 5671
ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
ssl_options.depth = 2
ssl_options.verify = verify_peer
ssl_options.fail_if_no_peer_cert = false
ssl_options.honor_cipher_order = true",
[{rabbit,
[{ssl_listeners,[5671]},
{ssl_options,
[{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
{certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
{keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
{depth,2},
{verify,verify_peer},
{fail_if_no_peer_cert, false},
{honor_cipher_order, true}]}]}],
[]},
{ssl_options_honor_ecc_order,
"listeners.ssl.1 = 5671
ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
ssl_options.depth = 2
ssl_options.verify = verify_peer
ssl_options.fail_if_no_peer_cert = false
ssl_options.honor_ecc_order = true",
[{rabbit,
[{ssl_listeners,[5671]},
{ssl_options,
[{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
{certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
{keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
{depth,2},
{verify,verify_peer},
{fail_if_no_peer_cert, false},
{honor_ecc_order, true}]}]}],
[]},
{ssl_cert_login_from_cn,
"ssl_cert_login_from = common_name",
[{rabbit,[{ssl_cert_login_from, common_name}]}],
[]},
{ssl_cert_login_from_dn,
"ssl_cert_login_from = distinguished_name",
[{rabbit,[{ssl_cert_login_from, distinguished_name}]}],
[]},
{ssl_cert_login_from_san_dns,
"ssl_cert_login_from = subject_alternative_name
ssl_cert_login_san_type = dns
ssl_cert_login_san_index = 0",
[{rabbit,[
{ssl_cert_login_from, subject_alternative_name},
{ssl_cert_login_san_type, dns},
{ssl_cert_login_san_index, 0}
]}],
[]},
{ssl_options_bypass_pem_cache,
"ssl_options.bypass_pem_cache = true",
[{ssl, [
{bypass_pem_cache, true}
]}],
[]},
{tcp_listen_options_linger_on,
"tcp_listen_options.linger.on = true
tcp_listen_options.linger.timeout = 100",
[{rabbit,[{tcp_listen_options,[{linger,{true,100}}]}]}],
[]},
{tcp_listen_options_linger_off,
"tcp_listen_options.linger.on = false
tcp_listen_options.linger.timeout = 100",
[{rabbit,[{tcp_listen_options,[{linger,{false,100}}]}]}],
[]},
{tcp_listen_options_linger_on_notimeout,
"tcp_listen_options.linger.on = true",
[{rabbit,[{tcp_listen_options,[{linger,{true,0}}]}]}],
[]},
{tcp_listen_options_linger_timeout,
"tcp_listen_options.linger.timeout = 100",
[{rabbit,[{tcp_listen_options,[{linger,{false,100}}]}]}],
[]},
Remove randomized startup delays On initial cluster formation, only one node in a multi node cluster should initialize the Mnesia database schema (i.e. form the cluster). To ensure that for nodes starting up in parallel, RabbitMQ peer discovery backends have used either locks or randomized startup delays. Locks work great: When a node holds the lock, it either starts a new blank node (if there is no other node in the cluster), or it joins an existing node. This makes it impossible to have two nodes forming the cluster at the same time. Consul and etcd peer discovery backends use locks. The lock is acquired in the consul and etcd infrastructure, respectively. For other peer discovery backends (classic, DNS, AWS), randomized startup delays were used. They work good enough in most cases. However, in https://github.com/rabbitmq/cluster-operator/issues/662 we observed that in 1% - 10% of the cases (the more nodes or the smaller the randomized startup delay range, the higher the chances), two nodes decide to form the cluster. That's bad since it will end up in a single Erlang cluster, but in two RabbitMQ clusters. Even worse, no obvious alert got triggered or error message logged. To solve this issue, one could increase the randomized startup delay range from e.g. 0m - 1m to 0m - 3m. However, this makes initial cluster formation very slow since it will take up to 3 minutes until every node is ready. In rare cases, we still end up with two nodes forming the cluster. Another way to solve the problem is to name a dedicated node to be the seed node (forming the cluster). This was explored in https://github.com/rabbitmq/cluster-operator/pull/689 and works well. Two minor downsides to this approach are: 1. If the seed node never becomes available, the whole cluster won't be formed (which is okay), and 2. it doesn't integrate with existing dynamic peer discovery backends (e.g. K8s, AWS) since nodes are not yet known at deploy time. In this commit, we take a better approach: We remove randomized startup delays altogether. We replace them with locks. However, instead of implementing our own lock implementation in an external system (e.g. in K8s), we re-use Erlang's locking mechanism global:set_lock/3. global:set_lock/3 has some convenient properties: 1. It accepts a list of nodes to set the lock on. 2. The nodes in that list connect to each other (i.e. create an Erlang cluster). 3. The method is synchronous with a timeout (number of retries). It blocks until the lock becomes available. 4. If a process that holds a lock dies, or the node goes down, the lock held by the process is deleted. The list of nodes passed to global:set_lock/3 corresponds to the nodes the peer discovery backend discovers (lists). Two special cases worth mentioning: 1. That list can be all desired nodes in the cluster (e.g. in classic peer discovery where nodes are known at deploy time) while only a subset of nodes is available. In that case, global:set_lock/3 still sets the lock not blocking until all nodes can be connected to. This is good since nodes might start sequentially (non-parallel). 2. In dynamic peer discovery backends (e.g. K8s, AWS), this list can be just a subset of desired nodes since nodes might not startup in parallel. That's also not a problem as long as the following requirement is met: "The peer disovery backend does not list two disjoint sets of nodes (on different nodes) at the same time." For example, in a 2-node cluster, the peer discovery backend must not list only node 1 on node 1 and only node 2 on node 2. Existing peer discovery backends fullfil that requirement because the resource the nodes are discovered from is global. For example, in K8s, once node 1 is part of the Endpoints object, it will be returned on both node 1 and node 2. Likewise, in AWS, once node 1 started, the described list of instances with a specific tag will include node 1 when the AWS peer discovery backend runs on node 1 or node 2. Removing randomized startup delays also makes cluster formation considerably faster (up to 1 minute faster if that was the upper bound in the range).
2021-05-18 07:01:08 +08:00
{cluster_formation_internal_lock_retries,
"cluster_formation.internal_lock_retries = 10",
[{rabbit,[{cluster_formation,[{internal_lock_retries,10}]}]}],
[]},
{cluster_formation_dns,
"cluster_formation.peer_discovery_backend = rabbit_peer_discovery_dns
cluster_formation.dns.hostname = 192.168.0.2.xip.io
cluster_formation.node_type = disc",
[{rabbit,
[{cluster_formation,
[{peer_discovery_dns,[{hostname,<<"192.168.0.2.xip.io">>}]},
{peer_discovery_backend,rabbit_peer_discovery_dns},
{node_type,disc}]}]}],
[]},
{cluster_formation_classic,
"cluster_formation.peer_discovery_backend = rabbit_peer_discovery_classic_config
cluster_formation.node_type = disc",
[{rabbit,
[{cluster_formation,
[{peer_discovery_backend,rabbit_peer_discovery_classic_config},
{node_type,disc}]}]}],
[]},
{cluster_formation_classic_ram,
"cluster_formation.peer_discovery_backend = rabbit_peer_discovery_classic_config
cluster_formation.node_type = ram",
[{rabbit,
[{cluster_formation,
[{peer_discovery_backend,rabbit_peer_discovery_classic_config},
{node_type,ram}]}]}],
[]},
{cluster_formation_retry_limit_integer,
"cluster_formation.discovery_retry_limit = 500",
[{rabbit,
[{cluster_formation,
[{discovery_retry_limit, 500}]}]}],
[]},
{cluster_formation_retry_limit_infinity,
"cluster_formation.discovery_retry_limit = unlimited",
[{rabbit,
[{cluster_formation,
[{discovery_retry_limit, unlimited}]}]}],
[]},
{background_gc_enabled,
"background_gc_enabled = true
background_gc_target_interval = 30000",
[{rabbit,
[{background_gc_enabled,true},{background_gc_target_interval,30000}]}],
[]},
{background_gc_disabled,
"background_gc_enabled = false
background_gc_target_interval = 30000",
[{rabbit,
[{background_gc_enabled,false},{background_gc_target_interval,30000}]}],
[]},
{credential_validator_length,
"credential_validator.validation_backend = rabbit_credential_validator_min_password_length
credential_validator.min_length = 10",
[{rabbit,
[{credential_validator,
[{validation_backend,
rabbit_credential_validator_min_password_length},
{min_length,10}]}]}],
[]},
{credential_validator_regexp,
"credential_validator.validation_backend = rabbit_credential_validator_password_regexp
credential_validator.regexp = ^abc\\d+",
[{rabbit,
[{credential_validator,
[{validation_backend,rabbit_credential_validator_password_regexp},
{regexp,"^abc\\d+"}]}]}],
[]},
{proxy_protocol_on,
"proxy_protocol = true",
[{rabbit,[{proxy_protocol,true}]}],[]},
{proxy_protocol_off,
"proxy_protocol = false",
[{rabbit,[{proxy_protocol,false}]}],[]},
{default_worker_pool_size,
"default_worker_pool_size = 512",
[{rabbit, [
{default_worker_pool_size, 512}
]}],
[]},
{delegate_count,
"delegate_count = 64",
[{rabbit, [
{delegate_count, 64}
]}],
[]},
{kernel_net_ticktime,
"net_ticktime = 20",
[{kernel, [
{net_ticktime, 20}
]}],
[]},
2019-11-15 01:01:12 +08:00
{rabbit_consumer_timeout,
"consumer_timeout = 20000",
[{rabbit, [
{consumer_timeout, 20000}
]}],
[]},
{rabbit_msg_store_shutdown_timeout,
"message_store_shutdown_timeout = 600000",
[{rabbit, [
{msg_store_shutdown_timeout, 600000}
]}],
[]},
{rabbit_mnesia_table_loading_retry_timeout,
"mnesia_table_loading_retry_timeout = 45000",
[{rabbit, [
{mnesia_table_loading_retry_timeout, 45000}
]}],
[]},
%%
%% Definitions
%%
%% classic configuration key, implies a local filesystem path
{definition_files, "load_definitions = test/definition_import_SUITE_data/case1.json",
[{rabbit,
[{load_definitions, "test/definition_import_SUITE_data/case1.json"}]}],
[]},
%% modern configuration key, local filesystem source
{definition_files, "definitions.import_backend = local_filesystem
definitions.local.path = test/definition_import_SUITE_data/case1.json",
[{rabbit, [
{definitions, [
{import_backend, rabbit_definitions_import_local_filesystem},
{local_path, "test/definition_import_SUITE_data/case1.json"}
]}
]}],
[]},
%% modern configuration key, HTTPS source
{definition_files, "definitions.import_backend = https
definitions.https.url = https://rabbitmq.eng.megacorp.local/env-1/case1.json
definitions.tls.versions.1 = tlsv1.2
definitions.tls.log_level = error
definitions.tls.secure_renegotiate = true
definitions.tls.ciphers.1 = ECDHE-ECDSA-AES256-GCM-SHA384
definitions.tls.ciphers.2 = ECDHE-RSA-AES256-GCM-SHA384
definitions.tls.ciphers.3 = ECDH-ECDSA-AES256-GCM-SHA384
definitions.tls.ciphers.4 = ECDH-RSA-AES256-GCM-SHA384
definitions.tls.ciphers.5 = DHE-RSA-AES256-GCM-SHA384
definitions.tls.ciphers.6 = DHE-DSS-AES256-GCM-SHA384
definitions.tls.ciphers.7 = ECDHE-ECDSA-AES128-GCM-SHA256
definitions.tls.ciphers.8 = ECDHE-RSA-AES128-GCM-SHA256
definitions.tls.ciphers.9 = ECDH-ECDSA-AES128-GCM-SHA256
definitions.tls.ciphers.10 = ECDH-RSA-AES128-GCM-SHA256
definitions.tls.ciphers.11 = DHE-RSA-AES128-GCM-SHA256
definitions.tls.ciphers.12 = DHE-DSS-AES128-GCM-SHA256",
[{rabbit, [
{definitions, [
{import_backend, rabbit_definitions_import_https},
{url, "https://rabbitmq.eng.megacorp.local/env-1/case1.json"},
{ssl_options, [
{log_level, error},
{secure_renegotiate, true},
{versions, ['tlsv1.2']},
{ciphers, [
"ECDHE-ECDSA-AES256-GCM-SHA384",
"ECDHE-RSA-AES256-GCM-SHA384",
"ECDH-ECDSA-AES256-GCM-SHA384",
"ECDH-RSA-AES256-GCM-SHA384",
"DHE-RSA-AES256-GCM-SHA384",
"DHE-DSS-AES256-GCM-SHA384",
"ECDHE-ECDSA-AES128-GCM-SHA256",
"ECDHE-RSA-AES128-GCM-SHA256",
"ECDH-ECDSA-AES128-GCM-SHA256",
"ECDH-RSA-AES128-GCM-SHA256",
"DHE-RSA-AES128-GCM-SHA256",
"DHE-DSS-AES128-GCM-SHA256"
]}
]}
]}]}],
[]},
%%
%% Raft
%%
{raft_data_dir,
"raft.data_dir = /data/rabbitmq/raft/log",
[{ra, [
{data_dir, "/data/rabbitmq/raft/log"}
]}],
[]},
{raft_segment_max_entries,
2023-10-19 23:27:14 +08:00
"raft.segment_max_entries = 32768",
[{ra, [
2023-10-19 23:27:14 +08:00
{segment_max_entries, 32768}
]}],
[]},
{raft_wal_max_size_bytes,
"raft.wal_max_size_bytes = 1048576",
[{ra, [
{wal_max_size_bytes, 1048576}
]}],
[]},
{raft_wal_max_batch_size,
"raft.wal_max_batch_size = 4096",
[{ra, [
{wal_max_batch_size, 4096}
]}],
[]},
{raft_snapshot_chunk_size,
"raft.snapshot_chunk_size = 1000000",
[{ra, [
{snapshot_chunk_size, 1000000}
]}],
[]},
{raft_adaptive_failure_detector_poll_interval,
"raft.adaptive_failure_detector.poll_interval = 10000",
[{aten, [
{poll_interval, 10000}
]}],
[]},
%%
%% Backing queue version
%%
{classic_queue_default_version_2,
"classic_queue.default_version = 2",
[{rabbit, [
{classic_queue_default_version, 2}
]}],
[]},
{classic_queue_default_version_invalid,
"classic_queue.default_version = 3",
[],
[]},
%%
%% Quorum queue
%%
{quorum_queue_compute_checksums,
"quorum_queue.compute_checksums = true",
[{rabbit, [
{quorum_compute_checksums, true}
]}],
[]},
2023-04-18 01:19:33 +08:00
%%
%% Runtime parameters
%%
{runtime_parameters_limits,
"
2023-04-18 08:51:38 +08:00
runtime_parameters.limits.federation = 2
runtime_parameters.limits.shovel = 1
2023-04-18 01:19:33 +08:00
",
[{rabbit, [{runtime_parameters, [{limits, [
2023-04-18 08:51:38 +08:00
{<<"shovel">>, 1},
{<<"federation">>, 2}
2023-04-18 01:19:33 +08:00
]}]}]}],
[]},
%%
%% Deprecated features
%%
{deprecated_features_cmq,
Mark classic queue mirroring as deprecated [Why] Classic queue mirroring will be removed in RabbitMQ 4.0. Quorum queues provide a better safer alternative. Non-replicated classic queues remain supported. [How] Classic queue mirroring is marked as deprecated in the code using the Deprecated features subsystem (based on feature flags). See #7390 for a description of that subsystem. To test RabbitMQ behavior as if the feature was removed, the following configuration setting can be used: deprecated_features.permit.classic_queue_mirroring = false To turn off classic queue mirroring, there must be no classic mirrored queues declared and no HA policy defined. A node with classic mirrored queues will refuse to start if classic queue mirroring is turned off. Once classic queue mirroring is turned off, users will not be able to declare HA policies. Trying to do that from the CLI or the management API will be rejected with a warning in the logs. This impacts clustering too: a node with classic queue mirroring turned off will only cluster with another node which has no HA policy or has classic queue mirroring turned off. Note that given the marketing calendar, the deprecated feature will go directly from "permitted by default" to "removed" in RabbitMQ 4.0. It won't go through the gradual deprecation process. V2: Renamed the deprecated feature from `classic_mirrored_queues` to `classic_queue_mirroring` to better reflect the intention. Otherwise it could be unclear is only the mirroring property is deprecated/removed or classic queues entirely.
2023-07-05 17:10:53 +08:00
"deprecated_features.permit.classic_queue_mirroring = false",
[{rabbit, [
Mark classic queue mirroring as deprecated [Why] Classic queue mirroring will be removed in RabbitMQ 4.0. Quorum queues provide a better safer alternative. Non-replicated classic queues remain supported. [How] Classic queue mirroring is marked as deprecated in the code using the Deprecated features subsystem (based on feature flags). See #7390 for a description of that subsystem. To test RabbitMQ behavior as if the feature was removed, the following configuration setting can be used: deprecated_features.permit.classic_queue_mirroring = false To turn off classic queue mirroring, there must be no classic mirrored queues declared and no HA policy defined. A node with classic mirrored queues will refuse to start if classic queue mirroring is turned off. Once classic queue mirroring is turned off, users will not be able to declare HA policies. Trying to do that from the CLI or the management API will be rejected with a warning in the logs. This impacts clustering too: a node with classic queue mirroring turned off will only cluster with another node which has no HA policy or has classic queue mirroring turned off. Note that given the marketing calendar, the deprecated feature will go directly from "permitted by default" to "removed" in RabbitMQ 4.0. It won't go through the gradual deprecation process. V2: Renamed the deprecated feature from `classic_mirrored_queues` to `classic_queue_mirroring` to better reflect the intention. Otherwise it could be unclear is only the mirroring property is deprecated/removed or classic queues entirely.
2023-07-05 17:10:53 +08:00
{permit_deprecated_features, #{classic_queue_mirroring => false}}
]}],
Move plugin rabbitmq-message-timestamp to the core As reported in https://groups.google.com/g/rabbitmq-users/c/x8ACs4dBlkI/ plugins that implement rabbit_channel_interceptor break with Native MQTT in 3.12 because Native MQTT does not use rabbit_channel anymore. Specifically, these plugins don't work anymore in 3.12 when sending a message from an MQTT publisher to an AMQP 0.9.1 consumer. Two of these plugins are https://github.com/rabbitmq/rabbitmq-message-timestamp and https://github.com/rabbitmq/rabbitmq-routing-node-stamp This commit moves both plugins into rabbitmq-server. Therefore, these plugins are deprecated starting in 3.12. Instead of using these plugins, the user gets the same behaviour by configuring rabbitmq.conf as follows: ``` incoming_message_interceptors.set_header_timestamp.overwrite = false incoming_message_interceptors.set_header_routing_node.overwrite = false ``` While both plugins were incompatible to be used together, this commit allows setting both headers. We name the top level configuration key `incoming_message_interceptors` because only incoming messages are intercepted. Currently, only `set_header_timestamp` and `set_header_routing_node` are supported. (We might support more in the future.) Both can set `overwrite` to `false` or `true`. The meaning of `overwrite` is the same as documented in https://github.com/rabbitmq/rabbitmq-message-timestamp#always-overwrite-timestamps i.e. whether headers should be overwritten if they are already present in the message. Both `set_header_timestamp` and `set_header_routing_node` behave exactly to plugins `rabbitmq-message-timestamp` and `rabbitmq-routing-node-stamp`, respectively. Upon node boot, the configuration is put into persistent_term to not cause any performance penalty in the default case where these settings are disabled. The channel and MQTT connection process will intercept incoming messages and - if configured - add the desired AMQP 0.9.1 headers. For now, this allows using Native MQTT in 3.12 with the old plugins behaviour. In the future, once "message containers" are implemented, we can think about more generic message interceptors where plugins can be written to modify arbitrary headers or message contents for various protocols. Likewise, in the future, once MQTT 5.0 is implemented, we can think about an MQTT connection interceptor which could function similar to a `rabbit_channel_interceptor` allowing to modify any MQTT packet.
2023-05-12 22:12:50 +08:00
[]},
%%
%% Message interceptors
%%
{message_interceptors,
"message_interceptors.incoming.set_header_timestamp.overwrite = true",
Move plugin rabbitmq-message-timestamp to the core As reported in https://groups.google.com/g/rabbitmq-users/c/x8ACs4dBlkI/ plugins that implement rabbit_channel_interceptor break with Native MQTT in 3.12 because Native MQTT does not use rabbit_channel anymore. Specifically, these plugins don't work anymore in 3.12 when sending a message from an MQTT publisher to an AMQP 0.9.1 consumer. Two of these plugins are https://github.com/rabbitmq/rabbitmq-message-timestamp and https://github.com/rabbitmq/rabbitmq-routing-node-stamp This commit moves both plugins into rabbitmq-server. Therefore, these plugins are deprecated starting in 3.12. Instead of using these plugins, the user gets the same behaviour by configuring rabbitmq.conf as follows: ``` incoming_message_interceptors.set_header_timestamp.overwrite = false incoming_message_interceptors.set_header_routing_node.overwrite = false ``` While both plugins were incompatible to be used together, this commit allows setting both headers. We name the top level configuration key `incoming_message_interceptors` because only incoming messages are intercepted. Currently, only `set_header_timestamp` and `set_header_routing_node` are supported. (We might support more in the future.) Both can set `overwrite` to `false` or `true`. The meaning of `overwrite` is the same as documented in https://github.com/rabbitmq/rabbitmq-message-timestamp#always-overwrite-timestamps i.e. whether headers should be overwritten if they are already present in the message. Both `set_header_timestamp` and `set_header_routing_node` behave exactly to plugins `rabbitmq-message-timestamp` and `rabbitmq-routing-node-stamp`, respectively. Upon node boot, the configuration is put into persistent_term to not cause any performance penalty in the default case where these settings are disabled. The channel and MQTT connection process will intercept incoming messages and - if configured - add the desired AMQP 0.9.1 headers. For now, this allows using Native MQTT in 3.12 with the old plugins behaviour. In the future, once "message containers" are implemented, we can think about more generic message interceptors where plugins can be written to modify arbitrary headers or message contents for various protocols. Likewise, in the future, once MQTT 5.0 is implemented, we can think about an MQTT connection interceptor which could function similar to a `rabbit_channel_interceptor` allowing to modify any MQTT packet.
2023-05-12 22:12:50 +08:00
[{rabbit, [
{incoming_message_interceptors, [{set_header_timestamp, true}]}
]}],
[]},
{message_interceptors,
"
message_interceptors.incoming.set_header_routing_node.overwrite = false
message_interceptors.incoming.set_header_timestamp.overwrite = false
Move plugin rabbitmq-message-timestamp to the core As reported in https://groups.google.com/g/rabbitmq-users/c/x8ACs4dBlkI/ plugins that implement rabbit_channel_interceptor break with Native MQTT in 3.12 because Native MQTT does not use rabbit_channel anymore. Specifically, these plugins don't work anymore in 3.12 when sending a message from an MQTT publisher to an AMQP 0.9.1 consumer. Two of these plugins are https://github.com/rabbitmq/rabbitmq-message-timestamp and https://github.com/rabbitmq/rabbitmq-routing-node-stamp This commit moves both plugins into rabbitmq-server. Therefore, these plugins are deprecated starting in 3.12. Instead of using these plugins, the user gets the same behaviour by configuring rabbitmq.conf as follows: ``` incoming_message_interceptors.set_header_timestamp.overwrite = false incoming_message_interceptors.set_header_routing_node.overwrite = false ``` While both plugins were incompatible to be used together, this commit allows setting both headers. We name the top level configuration key `incoming_message_interceptors` because only incoming messages are intercepted. Currently, only `set_header_timestamp` and `set_header_routing_node` are supported. (We might support more in the future.) Both can set `overwrite` to `false` or `true`. The meaning of `overwrite` is the same as documented in https://github.com/rabbitmq/rabbitmq-message-timestamp#always-overwrite-timestamps i.e. whether headers should be overwritten if they are already present in the message. Both `set_header_timestamp` and `set_header_routing_node` behave exactly to plugins `rabbitmq-message-timestamp` and `rabbitmq-routing-node-stamp`, respectively. Upon node boot, the configuration is put into persistent_term to not cause any performance penalty in the default case where these settings are disabled. The channel and MQTT connection process will intercept incoming messages and - if configured - add the desired AMQP 0.9.1 headers. For now, this allows using Native MQTT in 3.12 with the old plugins behaviour. In the future, once "message containers" are implemented, we can think about more generic message interceptors where plugins can be written to modify arbitrary headers or message contents for various protocols. Likewise, in the future, once MQTT 5.0 is implemented, we can think about an MQTT connection interceptor which could function similar to a `rabbit_channel_interceptor` allowing to modify any MQTT packet.
2023-05-12 22:12:50 +08:00
",
[{rabbit, [
{incoming_message_interceptors, [{set_header_routing_node, false},
{set_header_timestamp, false}]}
]}],
[]},
%%
%% Stream replication port range
%%
{stream_replication_port_range,
"
stream.replication.port_range.min = 4000
stream.replication.port_range.max = 4600
",
[{osiris, [
{port_range, {4000, 4600}}
]}],
[]},
{stream_replication_port_range,
"
stream.replication.port_range.min = 4000
",
[{osiris, [
{port_range, {4000, 4500}}
]}],
[]},
{stream_replication_port_range,
"
stream.replication.port_range.max = 4600
",
[{osiris, [
{port_range, {4100, 4600}}
]}],
[]}
2017-08-30 22:08:05 +08:00
].