rabbitmq-server/deps/rabbit/priv/schema/rabbit.schema

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

2899 lines
98 KiB
Plaintext
Raw Normal View History

Add ability to customize product name, version & banner To override the product name (defaulting to "RabbitMQ"): * set the `$RABBITMQ_PRODUCT_NAME` environment variable, or * set the `rabbit` application `product_name` variable. To override the product version: * set the `$RABBITMQ_PRODUCT_VERSION` environment variable, or * set the `rabbit` application `product_version` variable. To add content to the banner (both the copy logged and the one printed to stdout), indicate the filename which contains it, à la `/etc/motd` using: * the `$RABBITMQ_MOTD_FILE` environment variable, or * the `rabbit` application `motd_file` variable. The default motd file is `/etc/rabbitmq/motd` on Unix and `%APPDATA%\RabbitMQ\motd.txt` on Windows. Here is an example of the printed banner with name, version & motd configured: ## ## WeatherMQ 1.2.3 ## ## ########## Copyright (c) 2007-2020 Pivotal Software, Inc. ###### ## ########## Licensed under the MPL 1.1. Website: https://rabbitmq.com This is an example of a RabbitMQ message of the day. The message is written in Paris, France. \ / It is partly cloudy outside, with a _ /"".-. temperature of 12°C. Wind is around \_( ). 30-40 km/h, from south-west. /(___(__) Doc guides: https://rabbitmq.com/documentation.html Support: https://rabbitmq.com/contact.html Tutorials: https://rabbitmq.com/getstarted.html Monitoring: https://rabbitmq.com/monitoring.html Logs: /tmp/rabbitmq-test-instances/rabbit/log/rabbit@cassini.log /tmp/rabbitmq-test-instances/rabbit/log/rabbit@cassini_upgrade.log Config file(s): /tmp/rabbitmq-test-instances/test.config Starting broker... completed with 0 plugins. New APIS are available to query those product informations and use them in e.g. plugins such as the management API/UI: * rabbit:product_info/0 * rabbit:product_name/0 * rabbit:product_version/0 * rabbit:motd_file/0 * rabbit:motd/0 [#170054940]
2020-01-13 18:24:01 +08:00
% vim:ft=erlang:
2016-02-01 19:43:05 +08:00
% ==============================
% Rabbit app section
% ==============================
2016-01-22 23:47:01 +08:00
%%
2016-02-01 19:43:05 +08:00
%% Network Connectivity
%% ====================
%%
%% By default, RabbitMQ will listen on all interfaces, using
%% the standard (reserved) AMQP port.
%%
%% {tcp_listeners, [5672]},
%% To listen on a specific interface, provide a tuple of {IpAddress, Port}.
%% For example, to listen only on localhost for both IPv4 and IPv6:
%%
%% {tcp_listeners, [{"127.0.0.1", 5672},
2016-03-22 19:58:10 +08:00
%% {"[::1]", 5672}]},
2016-01-22 23:47:01 +08:00
2016-03-01 01:25:19 +08:00
{mapping, "listeners.tcp", "rabbit.tcp_listeners",[
{datatype, {enum, [none]}}
]}.
2016-02-17 01:17:53 +08:00
{mapping, "listeners.tcp.$name", "rabbit.tcp_listeners",[
2016-02-01 22:27:56 +08:00
{datatype, [integer, ip]}
2016-01-22 23:47:01 +08:00
]}.
{translation, "rabbit.tcp_listeners",
fun(Conf) ->
2016-03-01 01:25:19 +08:00
case cuttlefish:conf_get("listeners.tcp", Conf, undefined) of
none -> [];
_ ->
Settings = cuttlefish_variable:filter_by_prefix("listeners.tcp", Conf),
[ V || {_, V} <- Settings ]
end
2016-01-22 23:47:01 +08:00
end}.
%% TLS listeners are configured in the same fashion as TCP listeners,
2016-02-01 19:43:05 +08:00
%% including the option to control the choice of interface.
%%
%% {ssl_listeners, [5671]},
2016-01-22 23:47:01 +08:00
2016-03-01 01:25:19 +08:00
{mapping, "listeners.ssl", "rabbit.ssl_listeners",[
{datatype, {enum, [none]}}
]}.
2016-02-17 01:17:53 +08:00
{mapping, "listeners.ssl.$name", "rabbit.ssl_listeners",[
2016-02-01 22:27:56 +08:00
{datatype, [integer, ip]}
2016-01-22 23:47:01 +08:00
]}.
{translation, "rabbit.ssl_listeners",
fun(Conf) ->
2016-03-01 01:25:19 +08:00
case cuttlefish:conf_get("listeners.ssl", Conf, undefined) of
none -> [];
_ ->
Settings = cuttlefish_variable:filter_by_prefix("listeners.ssl", Conf),
[ V || {_, V} <- Settings ]
end
2016-01-22 23:47:01 +08:00
end}.
2016-02-01 19:43:05 +08:00
%% Number of Erlang processes that will accept connections for the TCP
%% and TLS listeners.
2016-02-01 19:43:05 +08:00
%%
%% {num_tcp_acceptors, 10},
%% {num_ssl_acceptors, 1},
2016-01-22 23:47:01 +08:00
{mapping, "num_acceptors.ssl", "rabbit.num_ssl_acceptors", [
{datatype, integer}
]}.
{mapping, "num_acceptors.tcp", "rabbit.num_tcp_acceptors", [
{datatype, integer}
]}.
{mapping, "socket_writer.gc_threshold", "rabbit.writer_gc_threshold", [
{datatype, [{atom, off}, integer]}
]}.
{translation, "rabbit.writer_gc_threshold",
fun(Conf) ->
case cuttlefish:conf_get("socket_writer.gc_threshold", Conf, undefined) of
%% missing from the config
undefined -> cuttlefish:unset();
%% explicitly disabled
off -> undefined;
Int when is_integer(Int) andalso Int > 0 ->
Int;
_ ->
cuttlefish:invalid("should be a non-negative integer")
end
end
}.
%% Maximum time for 0-9-1 handshake (after socket connection
%% and TLS handshake), in milliseconds.
2016-02-01 19:43:05 +08:00
%%
%% {handshake_timeout, 10000},
2016-01-22 23:47:01 +08:00
{mapping, "handshake_timeout", "rabbit.handshake_timeout", [
{datatype, [{atom, infinity}, integer]}
2016-01-22 23:47:01 +08:00
]}.
2016-02-01 19:43:05 +08:00
%% Set to 'true' to perform reverse DNS lookups when accepting a
%% connection. Hostnames will then be shown instead of IP addresses
%% in rabbitmqctl and the management plugin.
%%
%% {reverse_dns_lookups, true},
2016-01-22 23:47:01 +08:00
{mapping, "reverse_dns_lookups", "rabbit.reverse_dns_lookups", [
{datatype, {enum, [true, false]}}
]}.
{mapping, "erlang.K", "vm_args.+K", [
{default, "true"},
{level, advanced}
]}.
%%
%% Definition import
%%
%% Original key for definition loading from a JSON file or directory of files. See
%% https://www.rabbitmq.com/docs/management#load-definitions
{mapping, "load_definitions", "rabbit.load_definitions",
[{datatype, string},
{validators, ["file_accessible"]}]}.
%% Newer syntax for definition loading from a JSON file or directory of files. See
%% https://www.rabbitmq.com/docs/management#load-definitions
{mapping, "definitions.local.path", "rabbit.definitions.local_path",
[{datatype, string},
{validators, ["file_accessible"]}]}.
%% Extensive mechanism for loading definitions from a remote source
{mapping, "definitions.import_backend", "rabbit.definitions.import_backend", [
{datatype, atom}
]}.
{translation, "rabbit.definitions.import_backend",
fun(Conf) ->
case cuttlefish:conf_get("definitions.import_backend", Conf, rabbit_definitions_import_local_filesystem) of
%% short aliases for known backends
local_filesystem -> rabbit_definitions_import_local_filesystem;
local -> rabbit_definitions_import_local_filesystem;
https -> rabbit_definitions_import_https;
http -> rabbit_definitions_import_https;
%% accept both rabbitmq_ and rabbit_ (typical core module prefix)
rabbitmq_definitions_import_local_filesystem -> rabbit_definitions_import_local_filesystem;
rabbitmq_definitions_import_http -> rabbit_definitions_import_https;
%% any other value is used as is
Module -> Module
end
end}.
{mapping, "definitions.skip_if_unchanged", "rabbit.definitions.skip_if_unchanged", [
{datatype, {enum, [true, false]}}]}.
{mapping, "definitions.hashing.algorithm", "rabbit.definitions.hashing_algorithm", [
{datatype, {enum, [sha, sha224, sha256, sha384, sha512]}}]}.
%% Load definitions from a remote URL over HTTPS. See
%% https://www.rabbitmq.com/docs/management#load-definitions
{mapping, "definitions.https.url", "rabbit.definitions.url",
[{datatype, string}]}.
%% Client-side TLS settings used by e.g. HTTPS definition loading mechanism.
%% These can be reused by other clients.
{mapping, "definitions.tls.verify", "rabbit.definitions.ssl_options.verify", [
{datatype, {enum, [verify_peer, verify_none]}}]}.
{mapping, "definitions.tls.fail_if_no_peer_cert", "rabbit.definitions.ssl_options.fail_if_no_peer_cert", [
{datatype, {enum, [true, false]}}]}.
{mapping, "definitions.tls.cacertfile", "rabbit.definitions.ssl_options.cacertfile",
[{datatype, string}, {validators, ["file_accessible"]}]}.
{mapping, "definitions.tls.certfile", "rabbit.definitions.ssl_options.certfile",
[{datatype, string}, {validators, ["file_accessible"]}]}.
{mapping, "definitions.tls.cacerts.$name", "rabbit.definitions.ssl_options.cacerts",
[{datatype, string}]}.
{translation, "rabbit.definitions.ssl_options.cacerts",
fun(Conf) ->
Settings = cuttlefish_variable:filter_by_prefix("definitions.tls.cacerts", Conf),
[ list_to_binary(V) || {_, V} <- Settings ]
end}.
{mapping, "definitions.tls.cert", "rabbit.definitions.ssl_options.cert",
[{datatype, string}]}.
{translation, "rabbit.definitions.ssl_options.cert",
fun(Conf) ->
list_to_binary(cuttlefish:conf_get("definitions.tls.cert", Conf))
end}.
{mapping, "definitions.tls.reuse_session", "rabbit.definitions.ssl_options.reuse_session",
[{datatype, {enum, [true, false]}}]}.
{mapping, "definitions.tls.crl_check", "rabbit.definitions.ssl_options.crl_check",
[{datatype, [{enum, [true, false, peer, best_effort]}]}]}.
{mapping, "definitions.tls.depth", "rabbit.definitions.ssl_options.depth",
[{datatype, integer}, {validators, ["byte"]}]}.
{mapping, "definitions.tls.dh", "rabbit.definitions.ssl_options.dh",
[{datatype, string}]}.
{translation, "rabbit.definitions.ssl_options.dh",
fun(Conf) ->
list_to_binary(cuttlefish:conf_get("definitions.tls.dh", Conf))
end}.
{translation, "rabbit.definitions.ssl_options.key",
fun(Conf) ->
case cuttlefish_variable:filter_by_prefix("definitions.tls.key", Conf) of
[{[_,_,Key], Val}|_] -> {list_to_atom(Key), list_to_binary(Val)};
_ -> cuttlefish:unset()
end
end}.
{mapping, "definitions.tls.keyfile", "rabbit.definitions.ssl_options.keyfile",
[{datatype, string}, {validators, ["file_accessible"]}]}.
{mapping, "definitions.tls.log_alert", "rabbit.definitions.ssl_options.log_alert",
[{datatype, {enum, [true, false]}}]}.
{mapping, "definitions.tls.password", "rabbit.definitions.ssl_options.password",
[{datatype, [tagged_binary, binary]}]}.
{translation, "rabbit.definitions.ssl_options.password",
fun(Conf) ->
rabbit_cuttlefish:optionally_tagged_string("definitions.tls.password", Conf)
end}.
{mapping, "definitions.tls.secure_renegotiate", "rabbit.definitions.ssl_options.secure_renegotiate",
[{datatype, {enum, [true, false]}}]}.
{mapping, "definitions.tls.reuse_sessions", "rabbit.definitions.ssl_options.reuse_sessions",
[{datatype, {enum, [true, false]}}]}.
{mapping, "definitions.tls.versions.$version", "rabbit.definitions.ssl_options.versions",
[{datatype, atom}]}.
{translation, "rabbit.definitions.ssl_options.versions",
fun(Conf) ->
Settings = cuttlefish_variable:filter_by_prefix("definitions.tls.versions", Conf),
[V || {_, V} <- Settings]
end}.
{mapping, "definitions.tls.ciphers.$cipher", "rabbit.definitions.ssl_options.ciphers",
[{datatype, string}]}.
{translation, "rabbit.definitions.ssl_options.ciphers",
fun(Conf) ->
Settings = cuttlefish_variable:filter_by_prefix("definitions.tls.ciphers", Conf),
lists:reverse([V || {_, V} <- Settings])
end}.
{mapping, "definitions.tls.log_level", "rabbit.definitions.ssl_options.log_level",
[{datatype, {enum, [emergency, alert, critical, error, warning, notice, info, debug]}}]}.
2016-02-01 19:43:05 +08:00
%%
%% Seed User, Authentication, Access Control
2016-02-01 19:43:05 +08:00
%%
2016-01-22 23:47:01 +08:00
2016-02-01 19:43:05 +08:00
%% The default "guest" user is only permitted to access the server
%% via a loopback interface (e.g. localhost).
%% {loopback_users, [<<"guest">>]},
%%
%% Uncomment the following line if you want to allow access to the
%% guest user from anywhere on the network.
%% {loopback_users, []},
2016-01-22 23:47:01 +08:00
2016-02-26 22:30:11 +08:00
{mapping, "loopback_users", "rabbit.loopback_users", [
{datatype, {enum, [none]}}
]}.
2016-02-17 01:17:53 +08:00
{mapping, "loopback_users.$user", "rabbit.loopback_users", [
2016-02-01 22:27:56 +08:00
{datatype, atom}
2016-01-22 23:47:01 +08:00
]}.
{translation, "rabbit.loopback_users",
fun(Conf) ->
2016-03-03 23:04:50 +08:00
None = cuttlefish:conf_get("loopback_users", Conf, undefined),
2016-02-26 22:30:11 +08:00
case None of
none -> [];
_ ->
Settings = cuttlefish_variable:filter_by_prefix("loopback_users", Conf),
[ list_to_binary(U) || {["loopback_users", U], V} <- Settings, V == true ]
end
2016-01-22 23:47:01 +08:00
end}.
%% TLS options.
%% See https://www.rabbitmq.com/docs/ssl for full documentation.
2016-02-01 19:43:05 +08:00
%%
%% {ssl_options, [{cacertfile, "/path/to/testca/cacert.pem"},
%% {certfile, "/path/to/server/cert.pem"},
%% {keyfile, "/path/to/server/key.pem"},
%% {verify, verify_peer},
%% {fail_if_no_peer_cert, false}]},
2016-01-22 23:47:01 +08:00
2016-03-01 02:10:07 +08:00
{mapping, "ssl_allow_poodle_attack", "rabbit.ssl_allow_poodle_attack",
[{datatype, {enum, [true, false]}}]}.
2016-02-26 22:30:11 +08:00
{mapping, "ssl_options", "rabbit.ssl_options", [
{datatype, {enum, [none]}}
]}.
{translation, "rabbit.ssl_options",
fun(Conf) ->
2016-03-03 23:04:50 +08:00
case cuttlefish:conf_get("ssl_options", Conf, undefined) of
2016-02-26 22:30:11 +08:00
none -> [];
_ -> cuttlefish:invalid("Invalid ssl_options")
end
end}.
2016-02-17 01:17:53 +08:00
{mapping, "ssl_options.verify", "rabbit.ssl_options.verify", [
2016-01-22 23:47:01 +08:00
{datatype, {enum, [verify_peer, verify_none]}}]}.
2016-02-17 01:17:53 +08:00
{mapping, "ssl_options.fail_if_no_peer_cert", "rabbit.ssl_options.fail_if_no_peer_cert", [
2016-01-22 23:47:01 +08:00
{datatype, {enum, [true, false]}}]}.
2016-03-17 21:48:27 +08:00
{mapping, "ssl_options.cacertfile", "rabbit.ssl_options.cacertfile",
2016-01-22 23:47:01 +08:00
[{datatype, string}, {validators, ["file_accessible"]}]}.
2016-03-17 21:48:27 +08:00
{mapping, "ssl_options.certfile", "rabbit.ssl_options.certfile",
2016-01-22 23:47:01 +08:00
[{datatype, string}, {validators, ["file_accessible"]}]}.
2016-03-17 21:48:27 +08:00
{mapping, "ssl_options.cacerts.$name", "rabbit.ssl_options.cacerts",
2016-01-22 23:47:01 +08:00
[{datatype, string}]}.
{translation, "rabbit.ssl_options.cacerts",
fun(Conf) ->
2016-02-17 01:17:53 +08:00
Settings = cuttlefish_variable:filter_by_prefix("ssl_options.cacerts", Conf),
2016-01-22 23:47:01 +08:00
[ list_to_binary(V) || {_, V} <- Settings ]
end}.
2016-03-17 21:48:27 +08:00
{mapping, "ssl_options.cert", "rabbit.ssl_options.cert",
2016-01-22 23:47:01 +08:00
[{datatype, string}]}.
{translation, "rabbit.ssl_options.cert",
fun(Conf) ->
2016-02-17 01:17:53 +08:00
list_to_binary(cuttlefish:conf_get("ssl_options.cert", Conf))
2016-01-22 23:47:01 +08:00
end}.
2016-03-17 21:48:27 +08:00
{mapping, "ssl_options.client_renegotiation", "rabbit.ssl_options.client_renegotiation",
2016-01-22 23:47:01 +08:00
[{datatype, {enum, [true, false]}}]}.
2016-03-17 21:48:27 +08:00
{mapping, "ssl_options.crl_check", "rabbit.ssl_options.crl_check",
2016-01-22 23:47:01 +08:00
[{datatype, [{enum, [true, false, peer, best_effort]}]}]}.
2016-03-17 21:48:27 +08:00
{mapping, "ssl_options.depth", "rabbit.ssl_options.depth",
2016-01-22 23:47:01 +08:00
[{datatype, integer}, {validators, ["byte"]}]}.
2016-03-17 21:48:27 +08:00
{mapping, "ssl_options.dh", "rabbit.ssl_options.dh",
2016-01-22 23:47:01 +08:00
[{datatype, string}]}.
{translation, "rabbit.ssl_options.dh",
fun(Conf) ->
2016-02-17 01:17:53 +08:00
list_to_binary(cuttlefish:conf_get("ssl_options.dh", Conf))
2016-01-22 23:47:01 +08:00
end}.
2016-03-17 21:48:27 +08:00
{mapping, "ssl_options.dhfile", "rabbit.ssl_options.dhfile",
2016-01-22 23:47:01 +08:00
[{datatype, string}, {validators, ["file_accessible"]}]}.
2016-03-17 21:48:27 +08:00
{mapping, "ssl_options.honor_cipher_order", "rabbit.ssl_options.honor_cipher_order",
2016-01-22 23:47:01 +08:00
[{datatype, {enum, [true, false]}}]}.
{mapping, "ssl_options.honor_ecc_order", "rabbit.ssl_options.honor_ecc_order",
[{datatype, {enum, [true, false]}}]}.
2016-03-17 21:48:27 +08:00
{mapping, "ssl_options.key.RSAPrivateKey", "rabbit.ssl_options.key",
2016-01-22 23:47:01 +08:00
[{datatype, string}]}.
2016-03-17 21:48:27 +08:00
{mapping, "ssl_options.key.DSAPrivateKey", "rabbit.ssl_options.key",
2016-01-22 23:47:01 +08:00
[{datatype, string}]}.
2016-03-17 21:48:27 +08:00
{mapping, "ssl_options.key.PrivateKeyInfo", "rabbit.ssl_options.key",
2016-01-22 23:47:01 +08:00
[{datatype, string}]}.
{translation, "rabbit.ssl_options.key",
fun(Conf) ->
2016-02-17 01:17:53 +08:00
case cuttlefish_variable:filter_by_prefix("ssl_options.key", Conf) of
2016-01-22 23:47:01 +08:00
[{[_,_,Key], Val}|_] -> {list_to_atom(Key), list_to_binary(Val)};
_ -> cuttlefish:unset()
2016-01-22 23:47:01 +08:00
end
end}.
2016-03-17 21:48:27 +08:00
{mapping, "ssl_options.keyfile", "rabbit.ssl_options.keyfile",
2016-01-22 23:47:01 +08:00
[{datatype, string}, {validators, ["file_accessible"]}]}.
{mapping, "ssl_options.log_level", "rabbit.ssl_options.log_level",
[{datatype, {enum, [emergency, alert, critical, error, warning, notice, info, debug]}}]}.
2016-03-17 21:48:27 +08:00
{mapping, "ssl_options.log_alert", "rabbit.ssl_options.log_alert",
2016-01-22 23:47:01 +08:00
[{datatype, {enum, [true, false]}}]}.
2016-03-17 21:48:27 +08:00
{mapping, "ssl_options.password", "rabbit.ssl_options.password",
[{datatype, [tagged_binary, binary]}]}.
{translation, "rabbit.ssl_options.password",
fun(Conf) ->
rabbit_cuttlefish:optionally_tagged_binary("ssl_options.password", Conf)
end}.
2016-01-22 23:47:01 +08:00
2016-03-17 21:48:27 +08:00
{mapping, "ssl_options.psk_identity", "rabbit.ssl_options.psk_identity",
2016-01-22 23:47:01 +08:00
[{datatype, string}]}.
2016-03-17 21:48:27 +08:00
{mapping, "ssl_options.reuse_sessions", "rabbit.ssl_options.reuse_sessions",
2016-01-22 23:47:01 +08:00
[{datatype, {enum, [true, false]}}]}.
2016-03-17 21:48:27 +08:00
{mapping, "ssl_options.secure_renegotiate", "rabbit.ssl_options.secure_renegotiate",
2016-01-22 23:47:01 +08:00
[{datatype, {enum, [true, false]}}]}.
2016-03-17 21:48:27 +08:00
{mapping, "ssl_options.versions.$version", "rabbit.ssl_options.versions",
2016-01-22 23:47:01 +08:00
[{datatype, atom}]}.
{translation, "rabbit.ssl_options.versions",
fun(Conf) ->
2016-03-01 02:10:07 +08:00
Settings = cuttlefish_variable:filter_by_prefix("ssl_options.versions", Conf),
[V || {_, V} <- Settings]
end}.
{mapping, "ssl_options.ciphers.$cipher", "rabbit.ssl_options.ciphers",
[{datatype, string}]}.
{translation, "rabbit.ssl_options.ciphers",
fun(Conf) ->
Settings = cuttlefish_variable:filter_by_prefix("ssl_options.ciphers", Conf),
lists:reverse([V || {_, V} <- Settings])
2016-01-22 23:47:01 +08:00
end}.
{mapping, "ssl_options.bypass_pem_cache", "ssl.bypass_pem_cache",
[{datatype, {enum, [true, false]}}]}.
Allow to use Khepri database to store metadata instead of Mnesia [Why] Mnesia is a very powerful and convenient tool for Erlang applications: it is a persistent disc-based database, it handles replication accross multiple Erlang nodes and it is available out-of-the-box from the Erlang/OTP distribution. RabbitMQ relies on Mnesia to manage all its metadata: * virtual hosts' properties * intenal users * queue, exchange and binding declarations (not queues data) * runtime parameters and policies * ... Unfortunately Mnesia makes it difficult to handle network partition and, as a consequence, the merge conflicts between Erlang nodes once the network partition is resolved. RabbitMQ provides several partition handling strategies but they are not bullet-proof. Users still hit situations where it is a pain to repair a cluster following a network partition. [How] @kjnilsson created Ra [1], a Raft consensus library that RabbitMQ already uses successfully to implement quorum queues and streams for instance. Those queues do not suffer from network partitions. We created Khepri [2], a new persistent and replicated database engine based on Ra and we want to use it in place of Mnesia in RabbitMQ to solve the problems with network partitions. This patch integrates Khepri as an experimental feature. When enabled, RabbitMQ will store all its metadata in Khepri instead of Mnesia. This change comes with behavior changes. While Khepri remains disabled, you should see no changes to the behavior of RabbitMQ. If there are changes, it is a bug. After Khepri is enabled, there are significant changes of behavior that you should be aware of. Because it is based on the Raft consensus algorithm, when there is a network partition, only the cluster members that are in the partition with at least `(Number of nodes in the cluster ÷ 2) + 1` number of nodes can "make progress". In other words, only those nodes may write to the Khepri database and read from the database and expect a consistent result. For instance in a cluster of 5 RabbitMQ nodes: * If there are two partitions, one with 3 nodes, one with 2 nodes, only the group of 3 nodes will be able to write to the database. * If there are three partitions, two with 2 nodes, one with 1 node, none of the group can write to the database. Because the Khepri database will be used for all kind of metadata, it means that RabbitMQ nodes that can't write to the database will be unable to perform some operations. A list of operations and what to expect is documented in the associated pull request and the RabbitMQ website. This requirement from Raft also affects the startup of RabbitMQ nodes in a cluster. Indeed, at least a quorum number of nodes must be started at once to allow nodes to become ready. To enable Khepri, you need to enable the `khepri_db` feature flag: rabbitmqctl enable_feature_flag khepri_db When the `khepri_db` feature flag is enabled, the migration code performs the following two tasks: 1. It synchronizes the Khepri cluster membership from the Mnesia cluster. It uses `mnesia_to_khepri:sync_cluster_membership/1` from the `khepri_mnesia_migration` application [3]. 2. It copies data from relevant Mnesia tables to Khepri, doing some conversion if necessary on the way. Again, it uses `mnesia_to_khepri:copy_tables/4` from `khepri_mnesia_migration` to do it. This can be performed on a running standalone RabbitMQ node or cluster. Data will be migrated from Mnesia to Khepri without any service interruption. Note that during the migration, the performance may decrease and the memory footprint may go up. Because this feature flag is considered experimental, it is not enabled by default even on a brand new RabbitMQ deployment. More about the implementation details below: In the past months, all accesses to Mnesia were isolated in a collection of `rabbit_db*` modules. This is where the integration of Khepri mostly takes place: we use a function called `rabbit_khepri:handle_fallback/1` which selects the database and perform the query or the transaction. Here is an example from `rabbit_db_vhost`: * Up until RabbitMQ 3.12.x: get(VHostName) when is_binary(VHostName) -> get_in_mnesia(VHostName). * Starting with RabbitMQ 3.13.0: get(VHostName) when is_binary(VHostName) -> rabbit_khepri:handle_fallback( #{mnesia => fun() -> get_in_mnesia(VHostName) end, khepri => fun() -> get_in_khepri(VHostName) end}). This `rabbit_khepri:handle_fallback/1` function relies on two things: 1. the fact that the `khepri_db` feature flag is enabled, in which case it always executes the Khepri-based variant. 4. the ability or not to read and write to Mnesia tables otherwise. Before the feature flag is enabled, or during the migration, the function will try to execute the Mnesia-based variant. If it succeeds, then it returns the result. If it fails because one or more Mnesia tables can't be used, it restarts from scratch: it means the feature flag is being enabled and depending on the outcome, either the Mnesia-based variant will succeed (the feature flag couldn't be enabled) or the feature flag will be marked as enabled and it will call the Khepri-based variant. The meat of this function really lives in the `khepri_mnesia_migration` application [3] and `rabbit_khepri:handle_fallback/1` is a wrapper on top of it that knows about the feature flag. However, some calls to the database do not depend on the existence of Mnesia tables, such as functions where we need to learn about the members of a cluster. For those, we can't rely on exceptions from Mnesia. Therefore, we just look at the state of the feature flag to determine which database to use. There are two situations though: * Sometimes, we need the feature flag state query to block because the function interested in it can't return a valid answer during the migration. Here is an example: case rabbit_khepri:is_enabled(RemoteNode) of true -> can_join_using_khepri(RemoteNode); false -> can_join_using_mnesia(RemoteNode) end * Sometimes, we need the feature flag state query to NOT block (for instance because it would cause a deadlock). Here is an example: case rabbit_khepri:get_feature_state() of enabled -> members_using_khepri(); _ -> members_using_mnesia() end Direct accesses to Mnesia still exists. They are limited to code that is specific to Mnesia such as classic queue mirroring or network partitions handling strategies. Now, to discover the Mnesia tables to migrate and how to migrate them, we use an Erlang module attribute called `rabbit_mnesia_tables_to_khepri_db` which indicates a list of Mnesia tables and an associated converter module. Here is an example in the `rabbitmq_recent_history_exchange` plugin: -rabbit_mnesia_tables_to_khepri_db( [{?RH_TABLE, rabbit_db_rh_exchange_m2k_converter}]). The converter module — `rabbit_db_rh_exchange_m2k_converter` in this example — is is fact a "sub" converter module called but `rabbit_db_m2k_converter`. See the documentation of a `mnesia_to_khepri` converter module to learn more about these modules. [1] https://github.com/rabbitmq/ra [2] https://github.com/rabbitmq/khepri [3] https://github.com/rabbitmq/khepri_mnesia_migration See #7206. Co-authored-by: Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com> Co-authored-by: Diana Parra Corbacho <dparracorbac@vmware.com> Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
2023-01-05 20:57:50 +08:00
{mapping, "metadata_store.khepri.default_timeout", "rabbit.khepri_default_timeout",
[{datatype, integer}]}.
%% ===========================================================================
2016-02-01 19:43:05 +08:00
%% Choose the available SASL mechanism(s) to expose.
Add SASL mechanism ANONYMOUS ## 1. Introduce new SASL mechanism ANONYMOUS ### What? Introduce a new `rabbit_auth_mechanism` implementation for SASL mechanism ANONYMOUS called `rabbit_auth_mechanism_anonymous`. ### Why? As described in AMQP section 5.3.3.1, ANONYMOUS should be used when the client doesn't need to authenticate. Introducing a new `rabbit_auth_mechanism` consolidates and simplifies how anonymous logins work across all RabbitMQ protocols that support SASL. This commit therefore allows AMQP 0.9.1, AMQP 1.0, stream clients to connect out of the box to RabbitMQ without providing any username or password. Today's AMQP 0.9.1 and stream protocol client libs hard code RabbitMQ default credentials `guest:guest` for example done in: * https://github.com/rabbitmq/rabbitmq-java-client/blob/0215e85643a9ae0800822869be0200024e2ab569/src/main/java/com/rabbitmq/client/ConnectionFactory.java#L58-L61 * https://github.com/rabbitmq/amqp091-go/blob/ddb7a2f0685689063e6d709b8e417dbf9d09469c/uri.go#L31-L32 Hard coding RabbitMQ specific default credentials in dozens of different client libraries is an anti-pattern in my opinion. Furthermore, there are various AMQP 1.0 and MQTT client libraries which we do not control or maintain and which still should work out of the box when a user is getting started with RabbitMQ (that is without providing `guest:guest` credentials). ### How? The old RabbitMQ 3.13 AMQP 1.0 plugin `default_user` [configuration](https://github.com/rabbitmq/rabbitmq-server/blob/146b4862d8e570b344c99c37d91246760e218b18/deps/rabbitmq_amqp1_0/Makefile#L6) is replaced with the following two new `rabbit` configurations: ``` {anonymous_login_user, <<"guest">>}, {anonymous_login_pass, <<"guest">>}, ``` We call it `anonymous_login_user` because this user will be used for anonymous logins. The subsequent commit uses the same setting for anonymous logins in MQTT. Hence, this user is orthogonal to the protocol used when the client connects. Setting `anonymous_login_pass` could have been left out. This commit decides to include it because our documentation has so far recommended: > It is highly recommended to pre-configure a new user with a generated username and password or delete the guest user > or at least change its password to reasonably secure generated value that won't be known to the public. By having the new module `rabbit_auth_mechanism_anonymous` internally authenticate with `anonymous_login_pass` instead of blindly allowing access without any password, we protect operators that relied on the sentence: > or at least change its password to reasonably secure generated value that won't be known to the public To ease the getting started experience, since RabbitMQ already deploys a guest user with full access to the default virtual host `/`, this commit also allows SASL mechanism ANONYMOUS in `rabbit` setting `auth_mechanisms`. In production, operators should disable SASL mechanism ANONYMOUS by setting `anonymous_login_user` to `none` (or by removing ANONYMOUS from the `auth_mechanisms` setting. This will be documented separately. Even if operators forget or don't read the docs, this new ANONYMOUS mechanism won't do any harm because it relies on the default user name `guest` and password `guest`, which is recommended against in production, and who by default can only connect from the local host. ## 2. Require SASL security layer in AMQP 1.0 ### What? An AMQP 1.0 client must use the SASL security layer. ### Why? This is in line with the mandatory usage of SASL in AMQP 0.9.1 and RabbitMQ stream protocol. Since (presumably) any AMQP 1.0 client knows how to authenticate with a username and password using SASL mechanism PLAIN, any AMQP 1.0 client also (presumably) implements the trivial SASL mechanism ANONYMOUS. Skipping SASL is not recommended in production anyway. By requiring SASL, configuration for operators becomes easier. Following the principle of least surprise, when an an operator configures `auth_mechanisms` to exclude `ANONYMOUS`, anonymous logins will be prohibited in SASL and also by disallowing skipping the SASL layer. ### How? This commit implements AMQP 1.0 figure 2.13. A follow-up commit needs to be pushed to `v3.13.x` which will use SASL mechanism `anon` instead of `none` in the Erlang AMQP 1.0 client such that AMQP 1.0 shovels running on 3.13 can connect to 4.0 RabbitMQ nodes.
2024-08-14 18:19:17 +08:00
%% The three default (built in) mechanisms are 'PLAIN', 'AMQPLAIN' and 'ANONYMOUS'.
%% Additional mechanisms can be added via plugins.
2016-02-01 19:43:05 +08:00
%%
2024-08-16 04:04:41 +08:00
%% See https://www.rabbitmq.com/docs/access-control for more details.
2016-02-01 19:43:05 +08:00
%%
Add SASL mechanism ANONYMOUS ## 1. Introduce new SASL mechanism ANONYMOUS ### What? Introduce a new `rabbit_auth_mechanism` implementation for SASL mechanism ANONYMOUS called `rabbit_auth_mechanism_anonymous`. ### Why? As described in AMQP section 5.3.3.1, ANONYMOUS should be used when the client doesn't need to authenticate. Introducing a new `rabbit_auth_mechanism` consolidates and simplifies how anonymous logins work across all RabbitMQ protocols that support SASL. This commit therefore allows AMQP 0.9.1, AMQP 1.0, stream clients to connect out of the box to RabbitMQ without providing any username or password. Today's AMQP 0.9.1 and stream protocol client libs hard code RabbitMQ default credentials `guest:guest` for example done in: * https://github.com/rabbitmq/rabbitmq-java-client/blob/0215e85643a9ae0800822869be0200024e2ab569/src/main/java/com/rabbitmq/client/ConnectionFactory.java#L58-L61 * https://github.com/rabbitmq/amqp091-go/blob/ddb7a2f0685689063e6d709b8e417dbf9d09469c/uri.go#L31-L32 Hard coding RabbitMQ specific default credentials in dozens of different client libraries is an anti-pattern in my opinion. Furthermore, there are various AMQP 1.0 and MQTT client libraries which we do not control or maintain and which still should work out of the box when a user is getting started with RabbitMQ (that is without providing `guest:guest` credentials). ### How? The old RabbitMQ 3.13 AMQP 1.0 plugin `default_user` [configuration](https://github.com/rabbitmq/rabbitmq-server/blob/146b4862d8e570b344c99c37d91246760e218b18/deps/rabbitmq_amqp1_0/Makefile#L6) is replaced with the following two new `rabbit` configurations: ``` {anonymous_login_user, <<"guest">>}, {anonymous_login_pass, <<"guest">>}, ``` We call it `anonymous_login_user` because this user will be used for anonymous logins. The subsequent commit uses the same setting for anonymous logins in MQTT. Hence, this user is orthogonal to the protocol used when the client connects. Setting `anonymous_login_pass` could have been left out. This commit decides to include it because our documentation has so far recommended: > It is highly recommended to pre-configure a new user with a generated username and password or delete the guest user > or at least change its password to reasonably secure generated value that won't be known to the public. By having the new module `rabbit_auth_mechanism_anonymous` internally authenticate with `anonymous_login_pass` instead of blindly allowing access without any password, we protect operators that relied on the sentence: > or at least change its password to reasonably secure generated value that won't be known to the public To ease the getting started experience, since RabbitMQ already deploys a guest user with full access to the default virtual host `/`, this commit also allows SASL mechanism ANONYMOUS in `rabbit` setting `auth_mechanisms`. In production, operators should disable SASL mechanism ANONYMOUS by setting `anonymous_login_user` to `none` (or by removing ANONYMOUS from the `auth_mechanisms` setting. This will be documented separately. Even if operators forget or don't read the docs, this new ANONYMOUS mechanism won't do any harm because it relies on the default user name `guest` and password `guest`, which is recommended against in production, and who by default can only connect from the local host. ## 2. Require SASL security layer in AMQP 1.0 ### What? An AMQP 1.0 client must use the SASL security layer. ### Why? This is in line with the mandatory usage of SASL in AMQP 0.9.1 and RabbitMQ stream protocol. Since (presumably) any AMQP 1.0 client knows how to authenticate with a username and password using SASL mechanism PLAIN, any AMQP 1.0 client also (presumably) implements the trivial SASL mechanism ANONYMOUS. Skipping SASL is not recommended in production anyway. By requiring SASL, configuration for operators becomes easier. Following the principle of least surprise, when an an operator configures `auth_mechanisms` to exclude `ANONYMOUS`, anonymous logins will be prohibited in SASL and also by disallowing skipping the SASL layer. ### How? This commit implements AMQP 1.0 figure 2.13. A follow-up commit needs to be pushed to `v3.13.x` which will use SASL mechanism `anon` instead of `none` in the Erlang AMQP 1.0 client such that AMQP 1.0 shovels running on 3.13 can connect to 4.0 RabbitMQ nodes.
2024-08-14 18:19:17 +08:00
%% {auth_mechanisms, ['PLAIN', 'AMQPLAIN', 'ANONYMOUS']},
2016-01-22 23:47:01 +08:00
2016-02-17 01:17:53 +08:00
{mapping, "auth_mechanisms.$name", "rabbit.auth_mechanisms", [
2016-01-22 23:47:01 +08:00
{datatype, atom}]}.
2016-03-17 21:48:27 +08:00
{translation, "rabbit.auth_mechanisms",
fun(Conf) ->
Settings = cuttlefish_variable:filter_by_prefix("auth_mechanisms", Conf),
Sorted = lists:keysort(1, Settings),
[V || {_, V} <- Sorted]
end}.
2016-01-22 23:47:01 +08:00
2016-02-01 19:43:05 +08:00
%% Select an authentication backend to use. RabbitMQ provides an
%% internal backend in the core.
2016-02-01 19:43:05 +08:00
%%
%% {auth_backends, [rabbit_auth_backend_internal]},
2016-03-17 21:48:27 +08:00
{translation, "rabbit.auth_backends",
2016-01-22 23:47:01 +08:00
fun(Conf) ->
2016-02-01 22:27:56 +08:00
Settings = cuttlefish_variable:filter_by_prefix("auth_backends", Conf),
BackendModule = fun
(internal) -> rabbit_auth_backend_internal;
(ldap) -> rabbit_auth_backend_ldap;
(http) -> rabbit_auth_backend_http;
(oauth) -> rabbit_auth_backend_oauth2;
(oauth2) -> rabbit_auth_backend_oauth2;
(cache) -> rabbit_auth_backend_cache;
2016-02-01 22:27:56 +08:00
(amqp) -> rabbit_auth_backend_amqp;
(dummy) -> rabbit_auth_backend_dummy;
(Other) when is_atom(Other) -> Other;
(_) -> cuttlefish:invalid("Unknown/unsupported auth backend")
2016-02-01 22:27:56 +08:00
end,
AuthBackends = [{Num, {default, BackendModule(V)}} || {["auth_backends", Num], V} <- Settings],
AuthNBackends = [{Num, {authn, BackendModule(V)}} || {["auth_backends", Num, "authn"], V} <- Settings],
AuthZBackends = [{Num, {authz, BackendModule(V)}} || {["auth_backends", Num, "authz"], V} <- Settings],
2016-02-01 22:27:56 +08:00
Backends = lists:foldl(
fun({NumStr, {Type, V}}, Acc) ->
Num = case catch list_to_integer(NumStr) of
N when is_integer(N) -> N;
2016-03-17 21:48:27 +08:00
Err ->
2016-02-01 22:27:56 +08:00
cuttlefish:invalid(
iolist_to_binary(io_lib:format(
"Auth backend position in the chain should be an integer ~p", [Err])))
2016-02-01 22:27:56 +08:00
end,
NewVal = case dict:find(Num, Acc) of
{ok, {AuthN, AuthZ}} ->
case {Type, AuthN, AuthZ} of
{authn, undefined, _} ->
{V, AuthZ};
{authz, _, undefined} ->
{AuthN, V};
_ ->
cuttlefish:invalid(
iolist_to_binary(
io_lib:format(
"Auth backend already defined for the ~pth ~p backend",
2016-02-01 22:27:56 +08:00
[Num, Type])))
end;
error ->
case Type of
authn -> {V, undefined};
authz -> {undefined, V};
default -> {V, V}
end
end,
dict:store(Num, NewVal, Acc)
end,
dict:new(),
AuthBackends ++ AuthNBackends ++ AuthZBackends),
lists:map(
fun
({Num, {undefined, AuthZ}}) ->
cuttlefish:warn(
io_lib:format(
"Auth backend undefined for the ~pth authz backend. Using ~p",
2016-02-01 22:27:56 +08:00
[Num, AuthZ])),
{AuthZ, AuthZ};
({Num, {AuthN, undefined}}) ->
cuttlefish:warn(
io_lib:format(
"Authz backend undefined for the ~pth authn backend. Using ~p",
2016-02-01 22:27:56 +08:00
[Num, AuthN])),
{AuthN, AuthN};
({_Num, {Auth, Auth}}) -> Auth;
({_Num, {AuthN, AuthZ}}) -> {AuthN, AuthZ}
end,
lists:keysort(1, dict:to_list(Backends)))
2016-01-22 23:47:01 +08:00
end}.
2016-02-01 22:27:56 +08:00
{mapping, "auth_backends.$num", "rabbit.auth_backends", [
{datatype, atom}
]}.
{mapping, "auth_backends.$num.authn", "rabbit.auth_backends",[
{datatype, atom}
]}.
{mapping, "auth_backends.$num.authz", "rabbit.auth_backends",[
{datatype, atom}
]}.
2016-02-01 19:43:05 +08:00
%% This pertains to both the rabbitmq_auth_mechanism_ssl plugin and
%% STOMP ssl_cert_login configurations. See the rabbitmq_stomp
%% configuration section later in this file and the README in
%% https://github.com/rabbitmq/rabbitmq-auth-mechanism-ssl for further
%% details.
%%
%% To use the peer certificate's Common Name (CN) field
%% instead of its Distinguished Name (DN) for username extraction.
2016-02-01 19:43:05 +08:00
%%
%% {ssl_cert_login_from, common_name},
%%
%% To use the first SAN value of type DNS:
%%
%% {ssl_cert_login_from, subject_alternative_name},
%% {ssl_cert_login_san_type, dns},
%% {ssl_cert_login_san_index, 0}
2016-01-22 23:47:01 +08:00
{mapping, "ssl_cert_login_from", "rabbit.ssl_cert_login_from", [
{datatype, {enum, [distinguished_name, common_name, subject_alternative_name, subject_alt_name]}}
]}.
{mapping, "ssl_cert_login_san_type", "rabbit.ssl_cert_login_san_type", [
{datatype, {enum, [dns, ip, email, uri, other_name]}}
]}.
{mapping, "ssl_cert_login_san_index", "rabbit.ssl_cert_login_san_index", [
{datatype, integer}, {validators, ["non_negative_integer"]}
2016-01-22 23:47:01 +08:00
]}.
%% TLS handshake timeout, in milliseconds.
2016-02-01 19:43:05 +08:00
%%
%% {ssl_handshake_timeout, 5000},
2016-01-22 23:47:01 +08:00
{mapping, "ssl_handshake_timeout", "rabbit.ssl_handshake_timeout", [
{datatype, integer}
]}.
%% Cluster name
{mapping, "cluster_name", "rabbit.cluster_name", [
{datatype, string}
]}.
%% Default worker process pool size. Used to limit maximum concurrency rate
%% of certain operations, e.g. queue initialisation and recovery on node boot.
{mapping, "default_worker_pool_size", "rabbit.default_worker_pool_size", [
{datatype, integer}, {validators, ["non_negative_integer"]}
]}.
2016-02-01 19:43:05 +08:00
%% Password hashing implementation. Will only affect newly
%% created users. To recalculate hash for an existing user
%% it's necessary to update her password.
%%
%% When importing definitions exported from versions earlier
%% than 3.6.0, it is possible to go back to MD5 (only do this
%% as a temporary measure!) by setting this to rabbit_password_hashing_md5.
%%
%% To use SHA-512, set to rabbit_password_hashing_sha512.
%%
%% {password_hashing_module, rabbit_password_hashing_sha256},
2016-01-22 23:47:01 +08:00
{mapping, "password_hashing_module", "rabbit.password_hashing_module", [
{datatype, atom}
]}.
%% Credential validation.
%%
{mapping, "credential_validator.validation_backend", "rabbit.credential_validator.validation_backend", [
{datatype, atom}
]}.
{mapping, "credential_validator.min_length", "rabbit.credential_validator.min_length", [
{datatype, integer}, {validators, ["non_negative_integer"]}
]}.
{mapping, "credential_validator.regexp", "rabbit.credential_validator.regexp", [
{datatype, string}
]}.
2016-02-01 19:43:05 +08:00
%%
%% Default User / VHost
%% ====================
%%
%% On first start RabbitMQ will create a vhost and a user. These
%% config items control what gets created. See
%% https://www.rabbitmq.com/docs/access-control for further
2016-02-01 19:43:05 +08:00
%% information about vhosts and access control.
%%
%% {default_vhost, <<"/">>},
%% {default_user, <<"guest">>},
%% {default_pass, <<"guest">>},
%% {default_permissions, [<<".*">>, <<".*">>, <<".*">>]},
2016-01-22 23:47:01 +08:00
{mapping, "default_vhost", "rabbit.default_vhost", [
{datatype, string}
]}.
{translation, "rabbit.default_vhost",
fun(Conf) ->
list_to_binary(cuttlefish:conf_get("default_vhost", Conf))
end}.
{mapping, "default_user", "rabbit.default_user", [
{datatype, string}
]}.
{translation, "rabbit.default_user",
fun(Conf) ->
list_to_binary(cuttlefish:conf_get("default_user", Conf))
end}.
{mapping, "default_pass", "rabbit.default_pass", [
{datatype, [tagged_binary, binary]}
2016-01-22 23:47:01 +08:00
]}.
{translation, "rabbit.default_pass",
fun(Conf) ->
rabbit_cuttlefish:optionally_tagged_binary("default_pass", Conf)
2016-01-22 23:47:01 +08:00
end}.
{mapping, "default_permissions.configure", "rabbit.default_permissions", [
{datatype, string}
]}.
{mapping, "default_permissions.read", "rabbit.default_permissions", [
{datatype, string}
]}.
{mapping, "default_permissions.write", "rabbit.default_permissions", [
{datatype, string}
]}.
2016-03-17 21:48:27 +08:00
{translation, "rabbit.default_permissions",
2016-01-22 23:47:01 +08:00
fun(Conf) ->
Settings = cuttlefish_variable:filter_by_prefix("default_permissions", Conf),
Configure = proplists:get_value(["default_permissions", "configure"], Settings),
Read = proplists:get_value(["default_permissions", "read"], Settings),
Write = proplists:get_value(["default_permissions", "write"], Settings),
[list_to_binary(Configure), list_to_binary(Read), list_to_binary(Write)]
end}.
2023-02-14 09:01:56 +08:00
%%
%% Extra Default Users
%% ====================
%%
{mapping, "default_users.$name.vhost_pattern", "rabbit.default_users", [
{validators, ["valid_regex"]},
{datatype, string}
]}.
{mapping, "default_users.$name.password", "rabbit.default_users", [
{datatype, [tagged_binary, binary]}
2023-02-14 09:01:56 +08:00
]}.
{mapping, "default_users.$name.configure", "rabbit.default_users", [
{validators, ["valid_regex"]},
{datatype, string}
]}.
{mapping, "default_users.$name.read", "rabbit.default_users", [
{validators, ["valid_regex"]},
{datatype, string}
]}.
{mapping, "default_users.$name.write", "rabbit.default_users", [
{validators, ["valid_regex"]},
{datatype, string}
]}.
{mapping, "default_users.$name.tags", "rabbit.default_users", [
{datatype, {list, atom}}
]}.
{translation, "rabbit.default_users", fun(Conf) ->
case rabbit_cuttlefish:aggregate_props(Conf, ["default_users"]) of
[] -> cuttlefish:unset();
Props -> Props
end
end}.
Add SASL mechanism ANONYMOUS ## 1. Introduce new SASL mechanism ANONYMOUS ### What? Introduce a new `rabbit_auth_mechanism` implementation for SASL mechanism ANONYMOUS called `rabbit_auth_mechanism_anonymous`. ### Why? As described in AMQP section 5.3.3.1, ANONYMOUS should be used when the client doesn't need to authenticate. Introducing a new `rabbit_auth_mechanism` consolidates and simplifies how anonymous logins work across all RabbitMQ protocols that support SASL. This commit therefore allows AMQP 0.9.1, AMQP 1.0, stream clients to connect out of the box to RabbitMQ without providing any username or password. Today's AMQP 0.9.1 and stream protocol client libs hard code RabbitMQ default credentials `guest:guest` for example done in: * https://github.com/rabbitmq/rabbitmq-java-client/blob/0215e85643a9ae0800822869be0200024e2ab569/src/main/java/com/rabbitmq/client/ConnectionFactory.java#L58-L61 * https://github.com/rabbitmq/amqp091-go/blob/ddb7a2f0685689063e6d709b8e417dbf9d09469c/uri.go#L31-L32 Hard coding RabbitMQ specific default credentials in dozens of different client libraries is an anti-pattern in my opinion. Furthermore, there are various AMQP 1.0 and MQTT client libraries which we do not control or maintain and which still should work out of the box when a user is getting started with RabbitMQ (that is without providing `guest:guest` credentials). ### How? The old RabbitMQ 3.13 AMQP 1.0 plugin `default_user` [configuration](https://github.com/rabbitmq/rabbitmq-server/blob/146b4862d8e570b344c99c37d91246760e218b18/deps/rabbitmq_amqp1_0/Makefile#L6) is replaced with the following two new `rabbit` configurations: ``` {anonymous_login_user, <<"guest">>}, {anonymous_login_pass, <<"guest">>}, ``` We call it `anonymous_login_user` because this user will be used for anonymous logins. The subsequent commit uses the same setting for anonymous logins in MQTT. Hence, this user is orthogonal to the protocol used when the client connects. Setting `anonymous_login_pass` could have been left out. This commit decides to include it because our documentation has so far recommended: > It is highly recommended to pre-configure a new user with a generated username and password or delete the guest user > or at least change its password to reasonably secure generated value that won't be known to the public. By having the new module `rabbit_auth_mechanism_anonymous` internally authenticate with `anonymous_login_pass` instead of blindly allowing access without any password, we protect operators that relied on the sentence: > or at least change its password to reasonably secure generated value that won't be known to the public To ease the getting started experience, since RabbitMQ already deploys a guest user with full access to the default virtual host `/`, this commit also allows SASL mechanism ANONYMOUS in `rabbit` setting `auth_mechanisms`. In production, operators should disable SASL mechanism ANONYMOUS by setting `anonymous_login_user` to `none` (or by removing ANONYMOUS from the `auth_mechanisms` setting. This will be documented separately. Even if operators forget or don't read the docs, this new ANONYMOUS mechanism won't do any harm because it relies on the default user name `guest` and password `guest`, which is recommended against in production, and who by default can only connect from the local host. ## 2. Require SASL security layer in AMQP 1.0 ### What? An AMQP 1.0 client must use the SASL security layer. ### Why? This is in line with the mandatory usage of SASL in AMQP 0.9.1 and RabbitMQ stream protocol. Since (presumably) any AMQP 1.0 client knows how to authenticate with a username and password using SASL mechanism PLAIN, any AMQP 1.0 client also (presumably) implements the trivial SASL mechanism ANONYMOUS. Skipping SASL is not recommended in production anyway. By requiring SASL, configuration for operators becomes easier. Following the principle of least surprise, when an an operator configures `auth_mechanisms` to exclude `ANONYMOUS`, anonymous logins will be prohibited in SASL and also by disallowing skipping the SASL layer. ### How? This commit implements AMQP 1.0 figure 2.13. A follow-up commit needs to be pushed to `v3.13.x` which will use SASL mechanism `anon` instead of `none` in the Erlang AMQP 1.0 client such that AMQP 1.0 shovels running on 3.13 can connect to 4.0 RabbitMQ nodes.
2024-08-14 18:19:17 +08:00
%% Connections that skip SASL layer or use SASL mechanism ANONYMOUS will use this identity.
%% Setting this to a username will allow (anonymous) clients to connect and act as this
%% given user. For production environments, set this value to 'none'.
{mapping, "anonymous_login_user", "rabbit.anonymous_login_user",
2024-08-15 21:00:09 +08:00
[{datatype, [{enum, [none]}, binary]}]}.
Add SASL mechanism ANONYMOUS ## 1. Introduce new SASL mechanism ANONYMOUS ### What? Introduce a new `rabbit_auth_mechanism` implementation for SASL mechanism ANONYMOUS called `rabbit_auth_mechanism_anonymous`. ### Why? As described in AMQP section 5.3.3.1, ANONYMOUS should be used when the client doesn't need to authenticate. Introducing a new `rabbit_auth_mechanism` consolidates and simplifies how anonymous logins work across all RabbitMQ protocols that support SASL. This commit therefore allows AMQP 0.9.1, AMQP 1.0, stream clients to connect out of the box to RabbitMQ without providing any username or password. Today's AMQP 0.9.1 and stream protocol client libs hard code RabbitMQ default credentials `guest:guest` for example done in: * https://github.com/rabbitmq/rabbitmq-java-client/blob/0215e85643a9ae0800822869be0200024e2ab569/src/main/java/com/rabbitmq/client/ConnectionFactory.java#L58-L61 * https://github.com/rabbitmq/amqp091-go/blob/ddb7a2f0685689063e6d709b8e417dbf9d09469c/uri.go#L31-L32 Hard coding RabbitMQ specific default credentials in dozens of different client libraries is an anti-pattern in my opinion. Furthermore, there are various AMQP 1.0 and MQTT client libraries which we do not control or maintain and which still should work out of the box when a user is getting started with RabbitMQ (that is without providing `guest:guest` credentials). ### How? The old RabbitMQ 3.13 AMQP 1.0 plugin `default_user` [configuration](https://github.com/rabbitmq/rabbitmq-server/blob/146b4862d8e570b344c99c37d91246760e218b18/deps/rabbitmq_amqp1_0/Makefile#L6) is replaced with the following two new `rabbit` configurations: ``` {anonymous_login_user, <<"guest">>}, {anonymous_login_pass, <<"guest">>}, ``` We call it `anonymous_login_user` because this user will be used for anonymous logins. The subsequent commit uses the same setting for anonymous logins in MQTT. Hence, this user is orthogonal to the protocol used when the client connects. Setting `anonymous_login_pass` could have been left out. This commit decides to include it because our documentation has so far recommended: > It is highly recommended to pre-configure a new user with a generated username and password or delete the guest user > or at least change its password to reasonably secure generated value that won't be known to the public. By having the new module `rabbit_auth_mechanism_anonymous` internally authenticate with `anonymous_login_pass` instead of blindly allowing access without any password, we protect operators that relied on the sentence: > or at least change its password to reasonably secure generated value that won't be known to the public To ease the getting started experience, since RabbitMQ already deploys a guest user with full access to the default virtual host `/`, this commit also allows SASL mechanism ANONYMOUS in `rabbit` setting `auth_mechanisms`. In production, operators should disable SASL mechanism ANONYMOUS by setting `anonymous_login_user` to `none` (or by removing ANONYMOUS from the `auth_mechanisms` setting. This will be documented separately. Even if operators forget or don't read the docs, this new ANONYMOUS mechanism won't do any harm because it relies on the default user name `guest` and password `guest`, which is recommended against in production, and who by default can only connect from the local host. ## 2. Require SASL security layer in AMQP 1.0 ### What? An AMQP 1.0 client must use the SASL security layer. ### Why? This is in line with the mandatory usage of SASL in AMQP 0.9.1 and RabbitMQ stream protocol. Since (presumably) any AMQP 1.0 client knows how to authenticate with a username and password using SASL mechanism PLAIN, any AMQP 1.0 client also (presumably) implements the trivial SASL mechanism ANONYMOUS. Skipping SASL is not recommended in production anyway. By requiring SASL, configuration for operators becomes easier. Following the principle of least surprise, when an an operator configures `auth_mechanisms` to exclude `ANONYMOUS`, anonymous logins will be prohibited in SASL and also by disallowing skipping the SASL layer. ### How? This commit implements AMQP 1.0 figure 2.13. A follow-up commit needs to be pushed to `v3.13.x` which will use SASL mechanism `anon` instead of `none` in the Erlang AMQP 1.0 client such that AMQP 1.0 shovels running on 3.13 can connect to 4.0 RabbitMQ nodes.
2024-08-14 18:19:17 +08:00
{mapping, "anonymous_login_pass", "rabbit.anonymous_login_pass", [
{datatype, [tagged_binary, binary]}
]}.
{translation, "rabbit.anonymous_login_pass",
fun(Conf) ->
rabbit_cuttlefish:optionally_tagged_binary("anonymous_login_pass", Conf)
end}.
2023-02-14 09:01:56 +08:00
%%
%% Default Policies
%% ====================
%%
{mapping, "default_policies.operator.$id.vhost_pattern", "rabbit.default_policies.operator", [
{validators, ["valid_regex"]},
{datatype, string}
]}.
{mapping, "default_policies.operator.$id.queue_pattern", "rabbit.default_policies.operator", [
{validators, ["valid_regex"]},
{datatype, string}
]}.
{mapping, "default_policies.operator.$id.expires", "rabbit.default_policies.operator", [
{datatype, {duration, ms}}
]}.
{mapping, "default_policies.operator.$id.message_ttl", "rabbit.default_policies.operator", [
{datatype, {duration, ms}}
]}.
{mapping, "default_policies.operator.$id.max_length", "rabbit.default_policies.operator", [
{validators, ["non_zero_positive_integer"]},
{datatype, integer}
]}.
{mapping, "default_policies.operator.$id.max_length_bytes", "rabbit.default_policies.operator", [
{validators, ["non_zero_positive_integer"]},
{datatype, bytesize}
]}.
{mapping, "default_policies.operator.$id.max_in_memory_bytes", "rabbit.default_policies.operator", [
{validators, ["non_zero_positive_integer"]},
{datatype, bytesize}
]}.
{mapping, "default_policies.operator.$id.max_in_memory_length", "rabbit.default_policies.operator",
[
{validators, ["non_zero_positive_integer"]},
{datatype, integer}
]}.
{mapping, "default_policies.operator.$id.delivery_limit", "rabbit.default_policies.operator", [
{validators, ["non_zero_positive_integer"]},
{datatype, integer}
]}.
{mapping, "default_policies.operator.$id.classic_queues.ha_mode", "rabbit.default_policies.operator", [
{datatype, string}
]}.
{mapping, "default_policies.operator.$id.classic_queues.ha_params", "rabbit.default_policies.operator", [
{datatype, [integer, {list, string}]}
]}.
2023-04-28 06:16:39 +08:00
{mapping, "default_policies.operator.$id.classic_queues.ha_sync_mode", "rabbit.default_policies.operator", [
{datatype, string}
]}.
{mapping, "default_policies.operator.$id.classic_queues.queue_version", "rabbit.default_policies.operator",
[
{validators, ["non_zero_positive_integer"]},
{datatype, integer}
]}.
{translation, "rabbit.default_policies.operator", fun(Conf) ->
Props = rabbit_cuttlefish:aggregate_props(
Conf,
["default_policies", "operator"],
fun({["default_policies","operator",ID,"classic_queues"|T], V}) ->
NewV = case T of
["ha_sync_mode"] ->
list_to_binary(V);
["ha_mode"] ->
list_to_binary(V);
_ -> V
end,
{["default_policies","operator",ID|T], NewV};
2023-09-27 04:57:24 +08:00
({["default_policies","operator",ID, "queue_pattern"], V}) ->
{["default_policies","operator",ID,"queue_pattern"], list_to_binary(V)};
(E) -> E
end),
2023-02-14 09:01:56 +08:00
case Props of
[] -> cuttlefish:unset();
2023-02-14 09:01:56 +08:00
Props -> Props
end
end}.
2023-02-14 09:01:56 +08:00
%%
%% Default VHost Limits
%% ====================
%%
2022-10-20 07:08:06 +08:00
{mapping, "default_limits.vhosts.$id.pattern", "rabbit.default_limits.vhosts", [
{validators, ["valid_regex"]},
{datatype, string}
]}.
2022-10-20 07:08:06 +08:00
{mapping, "default_limits.vhosts.$id.max_connections", "rabbit.default_limits.vhosts", [
{validators, [ "non_zero_positive_integer"]},
{datatype, integer}
]}.
2022-10-20 07:08:06 +08:00
{mapping, "default_limits.vhosts.$id.max_queues", "rabbit.default_limits.vhosts", [
{validators, [ "non_zero_positive_integer"]},
{datatype, integer}
]}.
{translation, "rabbit.default_limits.vhosts", fun(Conf) ->
2023-02-14 09:01:56 +08:00
case rabbit_cuttlefish:aggregate_props(Conf, ["default_limits", "vhosts"]) of
[] -> cuttlefish:unset();
2023-02-14 09:01:56 +08:00
Props -> Props
end
end}.
2016-02-01 19:43:05 +08:00
%% Tags for default user
%%
%% For more details about tags, see the documentation for the
%% Management Plugin at https://www.rabbitmq.com/docs/management.
2016-02-01 19:43:05 +08:00
%%
%% {default_user_tags, [administrator]},
2016-01-22 23:47:01 +08:00
2016-03-17 21:48:27 +08:00
{mapping, "default_user_tags.$tag", "rabbit.default_user_tags",
2016-01-22 23:47:01 +08:00
[{datatype, {enum, [true, false]}}]}.
{translation, "rabbit.default_user_tags",
fun(Conf) ->
Settings = cuttlefish_variable:filter_by_prefix("default_user_tags", Conf),
[ list_to_atom(Key) || {[_,Key], Val} <- Settings, Val == true ]
end}.
2016-02-01 19:43:05 +08:00
%%
%% Additional network and protocol related configuration
%% =====================================================
%%
2016-01-22 23:47:01 +08:00
2018-03-21 16:26:40 +08:00
%% Set the default connection heartbeat timeout (in seconds).
2016-02-01 19:43:05 +08:00
%%
%% {heartbeat, 600},
2016-01-22 23:47:01 +08:00
2016-02-01 19:43:05 +08:00
{mapping, "heartbeat", "rabbit.heartbeat", [{datatype, integer}]}.
2016-01-22 23:47:01 +08:00
2018-03-21 16:26:40 +08:00
%% Set the max permissible size of an AMQP 0-9-1 frame (in bytes).
2016-02-01 19:43:05 +08:00
%%
%% {frame_max, 131072},
2016-01-22 23:47:01 +08:00
{mapping, "frame_max", "rabbit.frame_max", [{datatype, bytesize}]}.
2016-02-01 19:43:05 +08:00
%% Set the max frame size the server will accept before connection
2018-03-21 16:26:40 +08:00
%% tuning starts
2016-02-01 19:43:05 +08:00
%%
%% {initial_frame_max, 4096},
2016-01-22 23:47:01 +08:00
{mapping, "initial_frame_max", "rabbit.initial_frame_max", [{datatype, bytesize}]}.
2016-02-01 19:43:05 +08:00
%% Set the max permissible number of channels per connection.
%% 0 means "no limit".
%%
%% {channel_max, 0},
2016-02-01 19:43:05 +08:00
2016-01-22 23:47:01 +08:00
{mapping, "channel_max", "rabbit.channel_max", [{datatype, integer}]}.
2024-01-17 03:41:13 +08:00
{mapping, "channel_max_per_node", "rabbit.channel_max_per_node",
2024-01-18 04:19:38 +08:00
[{datatype, [{atom, infinity}, integer]}]}.
{translation, "rabbit.channel_max_per_node",
fun(Conf) ->
case cuttlefish:conf_get("channel_max_per_node", Conf, undefined) of
undefined -> cuttlefish:unset();
infinity -> infinity;
Val when is_integer(Val) andalso Val > 0 -> Val;
_ -> cuttlefish:invalid("should be positive integer or 'infinity'")
end
end
}.
2024-03-15 07:09:26 +08:00
%% Set the max allowed number of consumers per channel.
%% `infinity` means "no limit".
%%
%% {consumer_max_per_channel, infinity},
{mapping, "consumer_max_per_channel", "rabbit.consumer_max_per_channel",
[{datatype, [{atom, infinity}, integer]}]}.
{translation, "rabbit.consumer_max_per_channel",
fun(Conf) ->
case cuttlefish:conf_get("consumer_max_per_channel", Conf, undefined) of
undefined -> cuttlefish:unset();
infinity -> infinity;
Val when is_integer(Val) andalso Val > 0 -> Val;
_ -> cuttlefish:invalid("should be positive integer or 'infinity'")
end
end
}.
2024-01-17 03:41:13 +08:00
Enforce AMQP 1.0 channel-max (#12221) * Enforce AMQP 1.0 channel-max Enforce AMQP 1.0 field `channel-max` in the `open` frame by introducing a new more user friendly setting called `session_max`: > The channel-max value is the highest channel number that can be used on the connection. > This value plus one is the maximum number of sessions that can be simultaneously active on the connection. We set the default value of `session_max` to 64 such that, by default, RabbitMQ 4.0 allows maximum 64 AMQP 1.0 sessions per AMQP 1.0 connection. More than 64 AMQP 1.0 sessions per connection make little sense. See also https://www.rabbitmq.com/blog/2024/09/02/amqp-flow-control#session Limiting the maximum number of sessions per connection can be useful to protect against * applications that accidentally open new sessions without ending old sessions (session leaks) * too many metrics being exposed, for example in the future via the "/metrics/per-object" Prometheus endpoint with timeseries per session being emitted. This commit does not make use of the existing `channel_max` setting because: 1. Given that `channel_max = 0` means "no limit", there is no way for an operator to limit the number of sessions per connections to 1. 2. Operators might want to set different limits for maximum number of AMQP 0.9.1 channels and maximum number of AMQP 1.0 sessions. 3. The default of `channel_max` is very high: It allows using more than 2,000 AMQP 0.9.1 channels per connection. Lowering this default might break existing AMQP 0.9.1 applications. This commit also fixes a bug in the AMQP 1.0 Erlang client which, prior to this commit used channel number 1 for the first session. That's wrong if a broker allows maximum 1 session by replying with `channel-max = 0` in the `open` frame. Additionally, the spec recommends: > To make it easier to monitor AMQP sessions, it is RECOMMENDED that implementations always assign the lowest available unused channel number. Note that in AMQP 0.9.1, channel number 0 has a special meaning: > The channel number is 0 for all frames which are global to the connection and 1-65535 for frames that refer to specific channels. * Apply PR feedback
2024-09-05 23:45:27 +08:00
%% Sets the maximum number of AMQP 1.0 sessions that can be simultaneously
%% active on an AMQP 1.0 connection.
%%
%% {session_max_per_connection, 1},
{mapping, "session_max_per_connection", "rabbit.session_max_per_connection",
[{datatype, integer}, {validators, ["positive_16_bit_unsigned_integer"]}]}.
%% Sets the maximum number of AMQP 1.0 links that can be simultaneously
%% active on an AMQP 1.0 session.
%%
%% {link_max_per_session, 10},
{mapping, "link_max_per_session", "rabbit.link_max_per_session",
[{datatype, integer}, {validators, ["positive_32_bit_unsigned_integer"]}]}.
Enforce AMQP 1.0 channel-max (#12221) * Enforce AMQP 1.0 channel-max Enforce AMQP 1.0 field `channel-max` in the `open` frame by introducing a new more user friendly setting called `session_max`: > The channel-max value is the highest channel number that can be used on the connection. > This value plus one is the maximum number of sessions that can be simultaneously active on the connection. We set the default value of `session_max` to 64 such that, by default, RabbitMQ 4.0 allows maximum 64 AMQP 1.0 sessions per AMQP 1.0 connection. More than 64 AMQP 1.0 sessions per connection make little sense. See also https://www.rabbitmq.com/blog/2024/09/02/amqp-flow-control#session Limiting the maximum number of sessions per connection can be useful to protect against * applications that accidentally open new sessions without ending old sessions (session leaks) * too many metrics being exposed, for example in the future via the "/metrics/per-object" Prometheus endpoint with timeseries per session being emitted. This commit does not make use of the existing `channel_max` setting because: 1. Given that `channel_max = 0` means "no limit", there is no way for an operator to limit the number of sessions per connections to 1. 2. Operators might want to set different limits for maximum number of AMQP 0.9.1 channels and maximum number of AMQP 1.0 sessions. 3. The default of `channel_max` is very high: It allows using more than 2,000 AMQP 0.9.1 channels per connection. Lowering this default might break existing AMQP 0.9.1 applications. This commit also fixes a bug in the AMQP 1.0 Erlang client which, prior to this commit used channel number 1 for the first session. That's wrong if a broker allows maximum 1 session by replying with `channel-max = 0` in the `open` frame. Additionally, the spec recommends: > To make it easier to monitor AMQP sessions, it is RECOMMENDED that implementations always assign the lowest available unused channel number. Note that in AMQP 0.9.1, channel number 0 has a special meaning: > The channel number is 0 for all frames which are global to the connection and 1-65535 for frames that refer to specific channels. * Apply PR feedback
2024-09-05 23:45:27 +08:00
%% Set the max permissible number of client connections per node.
%% `infinity` means "no limit".
%%
%% {connection_max, infinity},
{mapping, "connection_max", "rabbit.connection_max",
[{datatype, [{atom, infinity}, integer]}]}.
{translation, "rabbit.connection_max",
fun(Conf) ->
case cuttlefish:conf_get("connection_max", Conf, undefined) of
undefined -> cuttlefish:unset();
infinity -> infinity;
Val when is_integer(Val) -> Val;
_ -> cuttlefish:invalid("should be a non-negative integer")
end
end
}.
{mapping, "ranch_connection_max", "rabbit.ranch_connection_max",
[{datatype, [{atom, infinity}, integer]}]}.
{translation, "rabbit.ranch_connection_max",
fun(Conf) ->
case cuttlefish:conf_get("ranch_connection_max", Conf, undefined) of
undefined -> cuttlefish:unset();
infinity -> infinity;
Val when is_integer(Val) -> Val;
_ -> cuttlefish:invalid("should be a non-negative integer")
end
end
}.
{mapping, "vhost_max", "rabbit.vhost_max",
2023-04-02 03:11:29 +08:00
[{datatype, [{atom, infinity}, integer]}, {validators, ["non_negative_integer"]}]}.
{translation, "rabbit.vhost_max",
fun(Conf) ->
case cuttlefish:conf_get("vhost_max", Conf, undefined) of
undefined -> cuttlefish:unset();
infinity -> infinity;
Val when is_integer(Val) -> Val;
_ -> cuttlefish:invalid("should be a non-negative integer")
end
end
}.
{mapping, "max_message_size", "rabbit.max_message_size",
[{datatype, integer}, {validators, ["max_message_size"]}]}.
{mapping, "cluster_exchange_limit", "rabbit.cluster_exchange_limit",
[{datatype, [{atom, infinity}, integer]}, {validators, ["non_negative_integer"]}]}.
{translation, "rabbit.cluster_exchange_limit",
fun(Conf) ->
case cuttlefish:conf_get("cluster_exchange_limit", Conf, undefined) of
undefined -> cuttlefish:unset();
infinity -> infinity;
Val when is_integer(Val) -> Val;
_ -> cuttlefish:invalid("should be a non-negative integer")
end
end
}.
2016-02-01 19:43:05 +08:00
%% Customising Socket Options.
%%
URL Cleanup This commit updates URLs to prefer the https protocol. Redirects are not followed to avoid accidentally expanding intentionally shortened URLs (i.e. if using a URL shortener). # HTTP URLs that Could Not Be Fixed These URLs were unable to be fixed. Please review them to see if they can be manually resolved. * http://alvaro-videla.com/2013/09/rabbitmq-internals-credit-flow-for-erlang-processes.html (200) with 1 occurrences could not be migrated: ([https](https://alvaro-videla.com/2013/09/rabbitmq-internals-credit-flow-for-erlang-processes.html) result SSLHandshakeException). * http://blog.listincomprehension.com/search/label/procket (200) with 2 occurrences could not be migrated: ([https](https://blog.listincomprehension.com/search/label/procket) result ClosedChannelException). * http://dozzie.jarowit.net/trac/wiki/TOML (200) with 2 occurrences could not be migrated: ([https](https://dozzie.jarowit.net/trac/wiki/TOML) result SSLHandshakeException). * http://dozzie.jarowit.net/trac/wiki/subproc (200) with 2 occurrences could not be migrated: ([https](https://dozzie.jarowit.net/trac/wiki/subproc) result SSLHandshakeException). * http://e2project.org (200) with 2 occurrences could not be migrated: ([https](https://e2project.org) result AnnotatedConnectException). * http://erlang.org/doc/man/kernel_app.html (200) with 2 occurrences could not be migrated: ([https](https://erlang.org/doc/man/kernel_app.html) result ConnectTimeoutException). * http://erlang.org/pipermail/erlang-questions/2012-September/069320.html (200) with 1 occurrences could not be migrated: ([https](https://erlang.org/pipermail/erlang-questions/2012-September/069320.html) result ConnectTimeoutException). * http://nitrogenproject.com/ (200) with 4 occurrences could not be migrated: ([https](https://nitrogenproject.com/) result ConnectTimeoutException). * http://proper.softlab.ntua.gr (200) with 2 occurrences could not be migrated: ([https](https://proper.softlab.ntua.gr) result SSLHandshakeException). * http://proper.softlab.ntua.gr/ (200) with 1 occurrences could not be migrated: ([https](https://proper.softlab.ntua.gr/) result SSLHandshakeException). * http://rubybunny.info (200) with 1 occurrences could not be migrated: ([https](https://rubybunny.info) result AnnotatedConnectException). * http://yaws.hyber.org (200) with 2 occurrences could not be migrated: ([https](https://yaws.hyber.org) result AnnotatedConnectException). * http://choven.ca (503) with 2 occurrences could not be migrated: ([https](https://choven.ca) result ConnectTimeoutException). # Fixed URLs ## Fixed But Review Recommended These URLs were fixed, but the https status was not OK. However, the https status was the same as the http request or http redirected to an https URL, so they were migrated. Your review is recommended. * http://fixprotocol.org/ (301) with 1 occurrences migrated to: https://fixtrading.org ([https](https://fixprotocol.org/) result SSLHandshakeException). * http://erldb.org (UnknownHostException) with 1 occurrences migrated to: https://erldb.org ([https](https://erldb.org) result UnknownHostException). * http://www.erlang.org/cgi-bin/ezmlm-cgi?2:mss:1569 (404) with 1 occurrences migrated to: https://www.erlang.org/cgi-bin/ezmlm-cgi?2:mss:1569 ([https](https://www.erlang.org/cgi-bin/ezmlm-cgi?2:mss:1569) result 404). * http://www.rabbitmq.com/quorum-queues.html (404) with 1 occurrences migrated to: https://www.rabbitmq.com/quorum-queues.html ([https](https://www.rabbitmq.com/quorum-queues.html) result 404). ## Fixed Success These URLs were switched to an https URL with a 2xx status. While the status was successful, your review is still recommended. * http://cloudi.org/ with 27 occurrences migrated to: https://cloudi.org/ ([https](https://cloudi.org/) result 200). * http://erlware.org/ with 1 occurrences migrated to: https://erlware.org/ ([https](https://erlware.org/) result 200). * http://inaka.github.io/cowboy-trails/ with 1 occurrences migrated to: https://inaka.github.io/cowboy-trails/ ([https](https://inaka.github.io/cowboy-trails/) result 200). * http://ninenines.eu with 6 occurrences migrated to: https://ninenines.eu ([https](https://ninenines.eu) result 200). * http://pivotal.io with 1 occurrences migrated to: https://pivotal.io ([https](https://pivotal.io) result 200). * http://pubs.opengroup.org/onlinepubs/009695399/utilities/kill.html with 1 occurrences migrated to: https://pubs.opengroup.org/onlinepubs/009695399/utilities/kill.html ([https](https://pubs.opengroup.org/onlinepubs/009695399/utilities/kill.html) result 200). * http://www.actordb.com/ with 2 occurrences migrated to: https://www.actordb.com/ ([https](https://www.actordb.com/) result 200). * http://www.cs.kent.ac.uk/projects/wrangler/Home.html with 1 occurrences migrated to: https://www.cs.kent.ac.uk/projects/wrangler/Home.html ([https](https://www.cs.kent.ac.uk/projects/wrangler/Home.html) result 200). * http://www.erlang.org/ with 1 occurrences migrated to: https://www.erlang.org/ ([https](https://www.erlang.org/) result 200). * http://www.rabbitmq.com/access-control.html with 3 occurrences migrated to: https://www.rabbitmq.com/access-control.html ([https](https://www.rabbitmq.com/access-control.html) result 200). * http://www.rabbitmq.com/authentication.html with 2 occurrences migrated to: https://www.rabbitmq.com/authentication.html ([https](https://www.rabbitmq.com/authentication.html) result 200). * http://www.rabbitmq.com/clustering.html with 5 occurrences migrated to: https://www.rabbitmq.com/clustering.html ([https](https://www.rabbitmq.com/clustering.html) result 200). * http://www.rabbitmq.com/configure.html with 2 occurrences migrated to: https://www.rabbitmq.com/configure.html ([https](https://www.rabbitmq.com/configure.html) result 200). * http://www.rabbitmq.com/confirms.html with 1 occurrences migrated to: https://www.rabbitmq.com/confirms.html ([https](https://www.rabbitmq.com/confirms.html) result 200). * http://www.rabbitmq.com/dlx.html with 1 occurrences migrated to: https://www.rabbitmq.com/dlx.html ([https](https://www.rabbitmq.com/dlx.html) result 200). * http://www.rabbitmq.com/documentation.html with 1 occurrences migrated to: https://www.rabbitmq.com/documentation.html ([https](https://www.rabbitmq.com/documentation.html) result 200). * http://www.rabbitmq.com/download.html with 2 occurrences migrated to: https://www.rabbitmq.com/download.html ([https](https://www.rabbitmq.com/download.html) result 200). * http://www.rabbitmq.com/heartbeats.html with 1 occurrences migrated to: https://www.rabbitmq.com/heartbeats.html ([https](https://www.rabbitmq.com/heartbeats.html) result 200). * http://www.rabbitmq.com/lazy-queues.html with 1 occurrences migrated to: https://www.rabbitmq.com/lazy-queues.html ([https](https://www.rabbitmq.com/lazy-queues.html) result 200). * http://www.rabbitmq.com/ldap.html with 4 occurrences migrated to: https://www.rabbitmq.com/ldap.html ([https](https://www.rabbitmq.com/ldap.html) result 200). * http://www.rabbitmq.com/management.html with 6 occurrences migrated to: https://www.rabbitmq.com/management.html ([https](https://www.rabbitmq.com/management.html) result 200). * http://www.rabbitmq.com/memory-use.html with 3 occurrences migrated to: https://www.rabbitmq.com/memory-use.html ([https](https://www.rabbitmq.com/memory-use.html) result 200). * http://www.rabbitmq.com/memory.html with 2 occurrences migrated to: https://www.rabbitmq.com/memory.html ([https](https://www.rabbitmq.com/memory.html) result 200). * http://www.rabbitmq.com/monitoring.html with 1 occurrences migrated to: https://www.rabbitmq.com/monitoring.html ([https](https://www.rabbitmq.com/monitoring.html) result 200). * http://www.rabbitmq.com/nettick.html with 2 occurrences migrated to: https://www.rabbitmq.com/nettick.html ([https](https://www.rabbitmq.com/nettick.html) result 200). * http://www.rabbitmq.com/networking.html with 7 occurrences migrated to: https://www.rabbitmq.com/networking.html ([https](https://www.rabbitmq.com/networking.html) result 200). * http://www.rabbitmq.com/partitions.html with 2 occurrences migrated to: https://www.rabbitmq.com/partitions.html ([https](https://www.rabbitmq.com/partitions.html) result 200). * http://www.rabbitmq.com/persistence-conf.html with 3 occurrences migrated to: https://www.rabbitmq.com/persistence-conf.html ([https](https://www.rabbitmq.com/persistence-conf.html) result 200). * http://www.rabbitmq.com/plugins.html with 1 occurrences migrated to: https://www.rabbitmq.com/plugins.html ([https](https://www.rabbitmq.com/plugins.html) result 200). * http://www.rabbitmq.com/previous.html with 1 occurrences migrated to: https://www.rabbitmq.com/previous.html ([https](https://www.rabbitmq.com/previous.html) result 200). * http://www.rabbitmq.com/shovel.html with 2 occurrences migrated to: https://www.rabbitmq.com/shovel.html ([https](https://www.rabbitmq.com/shovel.html) result 200). * http://www.rabbitmq.com/ssl.html with 2 occurrences migrated to: https://www.rabbitmq.com/ssl.html ([https](https://www.rabbitmq.com/ssl.html) result 200). * http://www.rabbitmq.com/stomp.html with 1 occurrences migrated to: https://www.rabbitmq.com/stomp.html ([https](https://www.rabbitmq.com/stomp.html) result 200). * http://www.rebar3.org with 1 occurrences migrated to: https://www.rebar3.org ([https](https://www.rebar3.org) result 200). * http://contributor-covenant.org with 1 occurrences migrated to: https://contributor-covenant.org ([https](https://contributor-covenant.org) result 301). * http://contributor-covenant.org/version/1/3/0/ with 1 occurrences migrated to: https://contributor-covenant.org/version/1/3/0/ ([https](https://contributor-covenant.org/version/1/3/0/) result 301). * http://inaka.github.com/apns4erl with 1 occurrences migrated to: https://inaka.github.com/apns4erl ([https](https://inaka.github.com/apns4erl) result 301). * http://inaka.github.com/edis/ with 1 occurrences migrated to: https://inaka.github.com/edis/ ([https](https://inaka.github.com/edis/) result 301). * http://lasp-lang.org/ with 1 occurrences migrated to: https://lasp-lang.org/ ([https](https://lasp-lang.org/) result 301). * http://rabbitmq.com//cluster-formation.html with 1 occurrences migrated to: https://rabbitmq.com//cluster-formation.html ([https](https://rabbitmq.com//cluster-formation.html) result 301). * http://rabbitmq.com/access-control.html with 4 occurrences migrated to: https://rabbitmq.com/access-control.html ([https](https://rabbitmq.com/access-control.html) result 301). * http://rabbitmq.com/authentication.html with 2 occurrences migrated to: https://rabbitmq.com/authentication.html ([https](https://rabbitmq.com/authentication.html) result 301). * http://rabbitmq.com/clustering.html with 1 occurrences migrated to: https://rabbitmq.com/clustering.html ([https](https://rabbitmq.com/clustering.html) result 301). * http://rabbitmq.com/configure.html with 1 occurrences migrated to: https://rabbitmq.com/configure.html ([https](https://rabbitmq.com/configure.html) result 301). * http://rabbitmq.com/documentation.html with 2 occurrences migrated to: https://rabbitmq.com/documentation.html ([https](https://rabbitmq.com/documentation.html) result 301). * http://rabbitmq.com/heartbeats.html with 1 occurrences migrated to: https://rabbitmq.com/heartbeats.html ([https](https://rabbitmq.com/heartbeats.html) result 301). * http://rabbitmq.com/lazy-queues.html with 1 occurrences migrated to: https://rabbitmq.com/lazy-queues.html ([https](https://rabbitmq.com/lazy-queues.html) result 301). * http://rabbitmq.com/ldap.html with 3 occurrences migrated to: https://rabbitmq.com/ldap.html ([https](https://rabbitmq.com/ldap.html) result 301). * http://rabbitmq.com/logging.html with 1 occurrences migrated to: https://rabbitmq.com/logging.html ([https](https://rabbitmq.com/logging.html) result 301). * http://rabbitmq.com/management.html with 5 occurrences migrated to: https://rabbitmq.com/management.html ([https](https://rabbitmq.com/management.html) result 301). * http://rabbitmq.com/memory.html with 1 occurrences migrated to: https://rabbitmq.com/memory.html ([https](https://rabbitmq.com/memory.html) result 301). * http://rabbitmq.com/networking.html with 3 occurrences migrated to: https://rabbitmq.com/networking.html ([https](https://rabbitmq.com/networking.html) result 301). * http://rabbitmq.com/persistence-conf.html with 1 occurrences migrated to: https://rabbitmq.com/persistence-conf.html ([https](https://rabbitmq.com/persistence-conf.html) result 301). * http://rabbitmq.com/plugins.html with 1 occurrences migrated to: https://rabbitmq.com/plugins.html ([https](https://rabbitmq.com/plugins.html) result 301). * http://rabbitmq.com/shovel.html with 1 occurrences migrated to: https://rabbitmq.com/shovel.html ([https](https://rabbitmq.com/shovel.html) result 301). * http://rabbitmq.com/ssl.html with 1 occurrences migrated to: https://rabbitmq.com/ssl.html ([https](https://rabbitmq.com/ssl.html) result 301). * http://rabbitmq.com/stomp.html with 1 occurrences migrated to: https://rabbitmq.com/stomp.html ([https](https://rabbitmq.com/stomp.html) result 301). * http://saleyn.github.com/erlexec with 1 occurrences migrated to: https://saleyn.github.com/erlexec ([https](https://saleyn.github.com/erlexec) result 301). * http://www.erlang.org/doc/man/inet.html with 2 occurrences migrated to: https://www.erlang.org/doc/man/inet.html ([https](https://www.erlang.org/doc/man/inet.html) result 301). * http://www.erlang.org/doc/man/sys.html with 1 occurrences migrated to: https://www.erlang.org/doc/man/sys.html ([https](https://www.erlang.org/doc/man/sys.html) result 301). * http://www.mozilla.org/MPL/ with 228 occurrences migrated to: https://www.mozilla.org/MPL/ ([https](https://www.mozilla.org/MPL/) result 301). * http://zhongwencool.github.io/observer_cli with 1 occurrences migrated to: https://zhongwencool.github.io/observer_cli ([https](https://zhongwencool.github.io/observer_cli) result 301).
2019-03-20 16:21:37 +08:00
%% See (https://www.erlang.org/doc/man/inet.html#setopts-2) for
2016-02-01 19:43:05 +08:00
%% further documentation.
%%
%% {tcp_listen_options, [{backlog, 128},
%% {nodelay, true},
%% {exit_on_close, false}]},
2016-01-22 23:47:01 +08:00
%% TCP listener section ======================================================
2016-02-26 22:30:11 +08:00
{mapping, "tcp_listen_options", "rabbit.tcp_listen_options", [
{datatype, {enum, [none]}}]}.
2016-03-17 21:48:27 +08:00
{translation, "rabbit.tcp_listen_options",
2016-02-26 22:30:11 +08:00
fun(Conf) ->
2016-08-11 00:06:09 +08:00
case cuttlefish:conf_get("tcp_listen_options", Conf, undefined) of
2016-02-26 22:30:11 +08:00
none -> [];
_ -> cuttlefish:invalid("Invalid tcp_listen_options")
end
end}.
2016-02-17 01:17:53 +08:00
{mapping, "tcp_listen_options.backlog", "rabbit.tcp_listen_options.backlog", [
2016-01-22 23:47:01 +08:00
{datatype, integer}
]}.
2016-02-17 01:17:53 +08:00
{mapping, "tcp_listen_options.nodelay", "rabbit.tcp_listen_options.nodelay", [
2016-01-22 23:47:01 +08:00
{datatype, {enum, [true, false]}}
]}.
2016-03-17 21:48:27 +08:00
{mapping, "tcp_listen_options.buffer", "rabbit.tcp_listen_options.buffer",
2016-01-22 23:47:01 +08:00
[{datatype, integer}]}.
2016-03-17 21:48:27 +08:00
{mapping, "tcp_listen_options.delay_send", "rabbit.tcp_listen_options.delay_send",
2016-01-22 23:47:01 +08:00
[{datatype, {enum, [true, false]}}]}.
2016-03-17 21:48:27 +08:00
{mapping, "tcp_listen_options.dontroute", "rabbit.tcp_listen_options.dontroute",
2016-01-22 23:47:01 +08:00
[{datatype, {enum, [true, false]}}]}.
2016-03-17 21:48:27 +08:00
{mapping, "tcp_listen_options.exit_on_close", "rabbit.tcp_listen_options.exit_on_close",
2016-01-22 23:47:01 +08:00
[{datatype, {enum, [true, false]}}]}.
2016-03-17 21:48:27 +08:00
{mapping, "tcp_listen_options.fd", "rabbit.tcp_listen_options.fd",
2016-01-22 23:47:01 +08:00
[{datatype, integer}]}.
2016-03-17 21:48:27 +08:00
{mapping, "tcp_listen_options.high_msgq_watermark", "rabbit.tcp_listen_options.high_msgq_watermark",
2016-01-22 23:47:01 +08:00
[{datatype, integer}]}.
2016-03-17 21:48:27 +08:00
{mapping, "tcp_listen_options.high_watermark", "rabbit.tcp_listen_options.high_watermark",
2016-01-22 23:47:01 +08:00
[{datatype, integer}]}.
2016-03-17 21:48:27 +08:00
{mapping, "tcp_listen_options.keepalive", "rabbit.tcp_listen_options.keepalive",
2016-01-22 23:47:01 +08:00
[{datatype, {enum, [true, false]}}]}.
2016-03-17 21:48:27 +08:00
{mapping, "tcp_listen_options.low_msgq_watermark", "rabbit.tcp_listen_options.low_msgq_watermark",
2016-01-22 23:47:01 +08:00
[{datatype, integer}]}.
2016-03-17 21:48:27 +08:00
{mapping, "tcp_listen_options.low_watermark", "rabbit.tcp_listen_options.low_watermark",
2016-01-22 23:47:01 +08:00
[{datatype, integer}]}.
2016-03-17 21:48:27 +08:00
{mapping, "tcp_listen_options.port", "rabbit.tcp_listen_options.port",
2016-01-22 23:47:01 +08:00
[{datatype, integer}, {validators, ["port"]}]}.
2016-03-17 21:48:27 +08:00
{mapping, "tcp_listen_options.priority", "rabbit.tcp_listen_options.priority",
2016-01-22 23:47:01 +08:00
[{datatype, integer}]}.
2016-03-17 21:48:27 +08:00
{mapping, "tcp_listen_options.recbuf", "rabbit.tcp_listen_options.recbuf",
2016-01-22 23:47:01 +08:00
[{datatype, integer}]}.
2016-03-17 21:48:27 +08:00
{mapping, "tcp_listen_options.send_timeout", "rabbit.tcp_listen_options.send_timeout",
2016-01-22 23:47:01 +08:00
[{datatype, integer}]}.
2016-03-17 21:48:27 +08:00
{mapping, "tcp_listen_options.send_timeout_close", "rabbit.tcp_listen_options.send_timeout_close",
2016-01-22 23:47:01 +08:00
[{datatype, {enum, [true, false]}}]}.
2016-03-17 21:48:27 +08:00
{mapping, "tcp_listen_options.sndbuf", "rabbit.tcp_listen_options.sndbuf",
2016-01-22 23:47:01 +08:00
[{datatype, integer}]}.
2016-03-17 21:48:27 +08:00
{mapping, "tcp_listen_options.tos", "rabbit.tcp_listen_options.tos",
2016-01-22 23:47:01 +08:00
[{datatype, integer}]}.
2016-08-11 00:06:09 +08:00
{mapping, "tcp_listen_options.linger.on", "rabbit.tcp_listen_options.linger",
[{datatype, {enum, [true, false]}}]}.
{mapping, "tcp_listen_options.linger.timeout", "rabbit.tcp_listen_options.linger",
2016-08-11 01:13:12 +08:00
[{datatype, integer}, {validators, ["non_negative_integer"]}]}.
2016-08-11 00:06:09 +08:00
{translation, "rabbit.tcp_listen_options.linger",
fun(Conf) ->
LingerOn = cuttlefish:conf_get("tcp_listen_options.linger.on", Conf, false),
LingerTimeout = cuttlefish:conf_get("tcp_listen_options.linger.timeout", Conf, 0),
{LingerOn, LingerTimeout}
end}.
%% ==========================================================================
2016-01-22 23:47:01 +08:00
2016-02-01 19:43:05 +08:00
%%
%% Resource Limits & Flow Control
%% ==============================
%%
%% See https://www.rabbitmq.com/docs/memory for full details.
2016-02-01 19:43:05 +08:00
%% Memory-based Flow Control threshold.
%%
%% {vm_memory_high_watermark, 0.6},
2016-02-01 19:43:05 +08:00
%% Alternatively, we can set a limit (in bytes) of RAM used by the node.
%%
%% {vm_memory_high_watermark, {absolute, 1073741824}},
%%
%% Or you can set absolute value using memory unit symbols (with RabbitMQ 3.6.0+).
2016-02-01 19:43:05 +08:00
%%
%% {vm_memory_high_watermark, {absolute, "1024M"}},
%%
%% Supported unit symbols:
2016-02-01 19:43:05 +08:00
%%
%% k, kiB: kibibytes (2^10 - 1,024 bytes)
%% M, MiB: mebibytes (2^20 - 1,048,576 bytes)
%% G, GiB: gibibytes (2^30 - 1,073,741,824 bytes)
%% kB: kilobytes (10^3 - 1,000 bytes)
%% MB: megabytes (10^6 - 1,000,000 bytes)
%% GB: gigabytes (10^9 - 1,000,000,000 bytes)
2016-01-22 23:47:01 +08:00
{mapping, "vm_memory_high_watermark.relative", "rabbit.vm_memory_high_watermark", [
2024-01-16 11:11:57 +08:00
{datatype, float}
]}.
2016-01-22 23:47:01 +08:00
{mapping, "vm_memory_high_watermark.absolute", "rabbit.vm_memory_high_watermark", [
2024-01-16 11:11:57 +08:00
{datatype, [integer, string]},
{validators, ["is_supported_information_unit"]}
]}.
2016-01-22 23:47:01 +08:00
2016-03-17 21:48:27 +08:00
{translation, "rabbit.vm_memory_high_watermark",
2016-01-22 23:47:01 +08:00
fun(Conf) ->
Settings = cuttlefish_variable:filter_by_prefix("vm_memory_high_watermark", Conf),
Absolute = proplists:get_value(["vm_memory_high_watermark", "absolute"], Settings),
Relative = proplists:get_value(["vm_memory_high_watermark", "relative"], Settings),
case {Absolute, Relative} of
{undefined, undefined} -> cuttlefish:invalid("No vm watermark defined");
{_, undefined} -> {absolute, Absolute};
{undefined, _} -> Relative;
_ ->
cuttlefish:warn("Both vm_memory_high_watermark.absolute and "
"vm_memory_high_watermark.relative are configured. "
"vm_memory_high_watermark.absolute has precedence"),
{absolute, Absolute}
2016-01-22 23:47:01 +08:00
end
end}.
%% DEPRECATED. Not used since RabbitMQ 4.0
%%
2016-02-01 19:43:05 +08:00
%% Fraction of the high watermark limit at which queues start to
%% page message out to disc in order to free up memory.
%%
%% Values greater than 0.9 can be dangerous and should be used carefully.
%%
%% {vm_memory_high_watermark_paging_ratio, 0.5},
2016-01-22 23:47:01 +08:00
2016-03-17 21:48:27 +08:00
{mapping, "vm_memory_high_watermark_paging_ratio",
"rabbit.vm_memory_high_watermark_paging_ratio",
2016-01-22 23:47:01 +08:00
[{datatype, float}, {validators, ["less_than_1"]}]}.
2024-08-27 23:35:22 +08:00
%% DEPRECATED. Not used since RabbitMQ 4.0
%%
2016-02-01 19:43:05 +08:00
%% Interval (in milliseconds) at which we perform the check of the memory
%% levels against the watermarks.
%%
%% {memory_monitor_interval, 2500},
2016-01-22 23:47:01 +08:00
2016-03-17 21:48:27 +08:00
{mapping, "memory_monitor_interval", "rabbit.memory_monitor_interval",
2016-01-22 23:47:01 +08:00
[{datatype, integer}]}.
%% Selects Erlang VM memory consumption calculation strategy.
%% Can be `allocated`, `rss` or `legacy` (aliased as `erlang`).
%%
%% {vm_memory_calculation_strategy, rss},
{mapping, "vm_memory_calculation_strategy", "rabbit.vm_memory_calculation_strategy",
[{datatype, {enum, [rss, erlang, allocated, legacy]}}]}.
%% The total memory available can be calculated from the OS resources
%% (default option) or provided as a configuration parameter
{mapping, "total_memory_available_override_value", "rabbit.total_memory_available_override_value", [
{datatype, [integer, string]}]}.
2016-02-01 19:43:05 +08:00
%% Set disk free limit (in bytes). Once free disk space reaches this
%% lower bound, a disk alarm will be set - see the documentation
%% listed above for more details.
%%
%% {disk_free_limit, 50000000},
%%
%% Or you can set it using memory units (same as in vm_memory_high_watermark)
%% {disk_free_limit, "50MB"},
%% {disk_free_limit, "50000kB"},
%% {disk_free_limit, "2GB"},
%% Alternatively, we can set a limit relative to total available RAM.
%%
%% Values lower than 1.0 can be dangerous and should be used carefully.
%% {disk_free_limit, {mem_relative, 2.0}},
2016-01-22 23:47:01 +08:00
{mapping, "disk_free_limit.relative", "rabbit.disk_free_limit", [
{datatype, float}]}.
2016-01-22 23:47:01 +08:00
{mapping, "disk_free_limit.absolute", "rabbit.disk_free_limit", [
2024-01-16 11:11:57 +08:00
{datatype, [integer, string]},
{validators, ["is_supported_information_unit"]}
]}.
2016-01-22 23:47:01 +08:00
2016-03-17 21:48:27 +08:00
{translation, "rabbit.disk_free_limit",
2016-01-22 23:47:01 +08:00
fun(Conf) ->
Settings = cuttlefish_variable:filter_by_prefix("disk_free_limit", Conf),
Absolute = proplists:get_value(["disk_free_limit", "absolute"], Settings),
Relative = proplists:get_value(["disk_free_limit", "relative"], Settings),
case {Absolute, Relative} of
{undefined, undefined} -> cuttlefish:invalid("No disk limit defined");
{_, undefined} -> Absolute;
{undefined, _} -> {mem_relative, Relative};
_ ->
cuttlefish:warn("Both disk_free_limit.absolute and "
"disk_free_limit.relative are configured. "
"disk_free_limit.absolute has precedence"),
Absolute
2016-01-22 23:47:01 +08:00
end
end}.
2016-02-01 19:43:05 +08:00
%%
%% Clustering
%% =====================
%%
2016-01-22 23:47:01 +08:00
2016-02-01 19:43:05 +08:00
%% How to respond to cluster partitions.
%% See https://www.rabbitmq.com/docs/partitions for further details.
2016-02-01 19:43:05 +08:00
%%
%% {cluster_partition_handling, ignore},
2016-01-22 23:47:01 +08:00
2016-03-17 21:48:27 +08:00
{mapping, "cluster_partition_handling", "rabbit.cluster_partition_handling",
2016-01-22 23:47:01 +08:00
[{datatype, {enum, [ignore, pause_minority, autoheal, pause_if_all_down]}}]}.
2016-03-17 21:48:27 +08:00
{mapping, "cluster_partition_handling.pause_if_all_down.recover",
2016-01-22 23:47:01 +08:00
"rabbit.cluster_partition_handling",
[{datatype, {enum, [ignore, autoheal]}}]}.
2016-03-17 21:48:27 +08:00
{mapping, "cluster_partition_handling.pause_if_all_down.nodes.$name",
2016-01-22 23:47:01 +08:00
"rabbit.cluster_partition_handling",
[{datatype, atom}]}.
{translation, "rabbit.cluster_partition_handling",
fun(Conf) ->
2016-01-25 21:04:09 +08:00
case cuttlefish:conf_get("cluster_partition_handling", Conf) of
2016-01-22 23:47:01 +08:00
pause_if_all_down ->
PauseIfAllDownNodes = cuttlefish_variable:filter_by_prefix(
2016-03-17 21:48:27 +08:00
"cluster_partition_handling.pause_if_all_down.nodes",
2016-01-22 23:47:01 +08:00
Conf),
case PauseIfAllDownNodes of
2016-03-17 21:48:27 +08:00
[] ->
2016-01-22 23:47:01 +08:00
cuttlefish:invalid("Nodes required for pause_if_all_down");
_ ->
Nodes = [ V || {K,V} <- PauseIfAllDownNodes ],
2016-01-25 21:04:09 +08:00
PauseIfAllDownRecover = cuttlefish:conf_get(
2016-01-22 23:47:01 +08:00
"cluster_partition_handling.pause_if_all_down.recover",
Conf),
case PauseIfAllDownRecover of
Recover when Recover == ignore; Recover == autoheal ->
{pause_if_all_down, Nodes, Recover};
2016-03-17 21:48:27 +08:00
Invalid ->
2016-01-22 23:47:01 +08:00
cuttlefish:invalid("Recover strategy required for pause_if_all_down")
end
end;
Other -> Other
end
end}.
%% Number of delegate processes to use for intra-cluster
%% communication. On a machine which has a very large number of cores
%% and is also part of a cluster, you may wish to increase this value.
%%
{mapping, "delegate_count", "rabbit.delegate_count", [
{datatype, integer}, {validators, ["non_negative_integer"]}
]}.
2016-02-01 19:43:05 +08:00
%% Mirror sync batch size, in messages. Increasing this will speed
%% up syncing but total batch size in bytes must not exceed 2 GiB.
%% Available in RabbitMQ 3.6.0 or later.
%%
%% {mirroring_sync_batch_size, 4096},
2016-01-22 23:47:01 +08:00
2016-03-17 21:48:27 +08:00
{mapping, "mirroring_sync_batch_size", "rabbit.mirroring_sync_batch_size",
[{datatype, bytesize}, {validators, ["mirroring_sync_batch_size"]}]}.
2016-01-22 23:47:01 +08:00
%% Mirror sync max throughput (in bytes) per second.
%% Supported unit symbols:
%% k, kiB: kibibytes (2^10 - 1,024 bytes)
%% M, MiB: mebibytes (2^20 - 1,048,576 bytes)
%% G, GiB: gibibytes (2^30 - 1,073,741,824 bytes)
%% kB: kilobytes (10^3 - 1,000 bytes)
%% MB: megabytes (10^6 - 1,000,000 bytes)
%% GB: gigabytes (10^9 - 1,000,000,000 bytes)
%%
%% 0 means "no limit".
%%
%% {mirroring_sync_max_throughput, 0},
{mapping, "mirroring_sync_max_throughput", "rabbit.mirroring_sync_max_throughput", [
{datatype, [integer, string]}
]}.
%% Peer discovery backend used by cluster formation.
%%
{mapping, "cluster_formation.peer_discovery_backend", "rabbit.cluster_formation.peer_discovery_backend", [
{datatype, atom}
]}.
{translation, "rabbit.cluster_formation.peer_discovery_backend",
fun(Conf) ->
case cuttlefish:conf_get("cluster_formation.peer_discovery_backend", Conf, rabbit_peer_discovery_classic_config) of
classic_config -> rabbit_peer_discovery_classic_config;
classic -> rabbit_peer_discovery_classic_config;
config -> rabbit_peer_discovery_classic_config;
dns -> rabbit_peer_discovery_dns;
aws -> rabbit_peer_discovery_aws;
consul -> rabbit_peer_discovery_consul;
2017-06-09 05:36:36 +08:00
etcd -> rabbit_peer_discovery_etcd;
kubernetes -> rabbit_peer_discovery_k8s;
k8s -> rabbit_peer_discovery_k8s;
Module -> Module
end
end}.
%% Own node type, used by cluster formation.
%%
{mapping, "cluster_formation.node_type", "rabbit.cluster_formation.node_type", [
{datatype, {enum, [disc, disk, ram]}}
]}.
{translation, "rabbit.cluster_formation.node_type",
fun(Conf) ->
%% if peer discovery backend isn't configured, don't generate
%% node type
case cuttlefish:conf_get("cluster_formation.peer_discovery_backend", Conf, undefined) of
undefined -> cuttlefish:unset();
_Backend ->
case cuttlefish:conf_get("cluster_formation.node_type", Conf) of
disc -> disc;
%% always cast to `disc`
disk -> disc;
ram -> ram;
_Other -> disc
end
end
end}.
%% Register node during cluster formation when backend supports registration.
%%
{mapping, "cluster_formation.registration.enabled", "rabbit.cluster_formation.perform_registration", [
{datatype, {enum, [true, false]}}
]}.
Remove randomized startup delays On initial cluster formation, only one node in a multi node cluster should initialize the Mnesia database schema (i.e. form the cluster). To ensure that for nodes starting up in parallel, RabbitMQ peer discovery backends have used either locks or randomized startup delays. Locks work great: When a node holds the lock, it either starts a new blank node (if there is no other node in the cluster), or it joins an existing node. This makes it impossible to have two nodes forming the cluster at the same time. Consul and etcd peer discovery backends use locks. The lock is acquired in the consul and etcd infrastructure, respectively. For other peer discovery backends (classic, DNS, AWS), randomized startup delays were used. They work good enough in most cases. However, in https://github.com/rabbitmq/cluster-operator/issues/662 we observed that in 1% - 10% of the cases (the more nodes or the smaller the randomized startup delay range, the higher the chances), two nodes decide to form the cluster. That's bad since it will end up in a single Erlang cluster, but in two RabbitMQ clusters. Even worse, no obvious alert got triggered or error message logged. To solve this issue, one could increase the randomized startup delay range from e.g. 0m - 1m to 0m - 3m. However, this makes initial cluster formation very slow since it will take up to 3 minutes until every node is ready. In rare cases, we still end up with two nodes forming the cluster. Another way to solve the problem is to name a dedicated node to be the seed node (forming the cluster). This was explored in https://github.com/rabbitmq/cluster-operator/pull/689 and works well. Two minor downsides to this approach are: 1. If the seed node never becomes available, the whole cluster won't be formed (which is okay), and 2. it doesn't integrate with existing dynamic peer discovery backends (e.g. K8s, AWS) since nodes are not yet known at deploy time. In this commit, we take a better approach: We remove randomized startup delays altogether. We replace them with locks. However, instead of implementing our own lock implementation in an external system (e.g. in K8s), we re-use Erlang's locking mechanism global:set_lock/3. global:set_lock/3 has some convenient properties: 1. It accepts a list of nodes to set the lock on. 2. The nodes in that list connect to each other (i.e. create an Erlang cluster). 3. The method is synchronous with a timeout (number of retries). It blocks until the lock becomes available. 4. If a process that holds a lock dies, or the node goes down, the lock held by the process is deleted. The list of nodes passed to global:set_lock/3 corresponds to the nodes the peer discovery backend discovers (lists). Two special cases worth mentioning: 1. That list can be all desired nodes in the cluster (e.g. in classic peer discovery where nodes are known at deploy time) while only a subset of nodes is available. In that case, global:set_lock/3 still sets the lock not blocking until all nodes can be connected to. This is good since nodes might start sequentially (non-parallel). 2. In dynamic peer discovery backends (e.g. K8s, AWS), this list can be just a subset of desired nodes since nodes might not startup in parallel. That's also not a problem as long as the following requirement is met: "The peer disovery backend does not list two disjoint sets of nodes (on different nodes) at the same time." For example, in a 2-node cluster, the peer discovery backend must not list only node 1 on node 1 and only node 2 on node 2. Existing peer discovery backends fullfil that requirement because the resource the nodes are discovered from is global. For example, in K8s, once node 1 is part of the Endpoints object, it will be returned on both node 1 and node 2. Likewise, in AWS, once node 1 started, the described list of instances with a specific tag will include node 1 when the AWS peer discovery backend runs on node 1 or node 2. Removing randomized startup delays also makes cluster formation considerably faster (up to 1 minute faster if that was the upper bound in the range).
2021-05-18 07:01:08 +08:00
%% Cluster formation: lock acquisition retries as passed to https://erlang.org/doc/man/global.html#set_lock-3
%%
%% Currently used in classic, k8s, and aws peer discovery backends.
{mapping, "cluster_formation.internal_lock_retries", "rabbit.cluster_formation.internal_lock_retries",
[
{datatype, integer},
{validators, ["non_zero_positive_integer"]}
]}.
%% Cluster formation: discovery failure retries
{mapping, "cluster_formation.lock_retry_limit", "rabbit.cluster_formation.lock_retry_limit",
[
{datatype, integer},
{validators, ["non_zero_positive_integer"]}
]}.
{mapping, "cluster_formation.lock_retry_timeout", "rabbit.cluster_formation.lock_retry_timeout",
[
{datatype, integer},
{validators, ["non_zero_positive_integer"]}
]}.
{mapping, "cluster_formation.discovery_retry_limit", "rabbit.cluster_formation.discovery_retry_limit",
[{datatype, [{atom, unlimited}, integer]}]}.
{translation, "rabbit.cluster_formation.discovery_retry_limit",
fun(Conf) ->
case cuttlefish:conf_get("cluster_formation.discovery_retry_limit", Conf, undefined) of
undefined -> cuttlefish:unset();
unlimited -> unlimited;
Val when is_integer(Val) andalso Val > 0 -> Val;
_ -> cuttlefish:invalid("should be positive integer or 'unlimited'")
end
end
}.
{mapping, "cluster_formation.discovery_retry_interval", "rabbit.cluster_formation.discovery_retry_interval",
[
{datatype, integer},
{validators, ["non_zero_positive_integer"]}
]}.
%% Target cluster size hint may be used by certain core features or plugins to perform
%% actions that should only be performed when a certain number of nodes (or a quorum of a certain number)
%% has already joined (started).
%%
{mapping, "cluster_formation.target_cluster_size_hint", "rabbit.cluster_formation.target_cluster_size_hint", [
{datatype, integer},
{validators, ["non_zero_positive_integer"]}
]}.
2019-02-13 00:59:28 +08:00
%% Classic config-driven peer discovery backend.
%%
2016-02-01 19:43:05 +08:00
%% Make clustering happen *automatically* at startup - only applied
%% to nodes that have just been reset or started for the first time.
%% See https://www.rabbitmq.com/docs/clustering#auto-config for
2016-02-01 19:43:05 +08:00
%% further details.
%%
%% {cluster_nodes, {['rabbit@my.host.com'], disc}},
2016-01-22 23:47:01 +08:00
{mapping, "cluster_formation.classic_config.nodes.$node", "rabbit.cluster_nodes",
2016-01-22 23:47:01 +08:00
[{datatype, atom}]}.
{translation, "rabbit.cluster_nodes",
fun(Conf) ->
Nodes = [V || {_, V} <- cuttlefish_variable:filter_by_prefix("cluster_formation.classic_config.nodes", Conf)],
case Nodes of
[] -> cuttlefish:unset();
Other ->
case cuttlefish:conf_get("cluster_formation.node_type", Conf, disc) of
disc -> {Other, disc};
%% Always cast to `disc`
disk -> {Other, disc};
ram -> {Other, ram}
end
2016-01-22 23:47:01 +08:00
end
end}.
%% DNS (A records and reverse lookups)-based peer discovery.
%%
{mapping, "cluster_formation.dns.hostname", "rabbit.cluster_formation.peer_discovery_dns.hostname",
[{datatype, string}]}.
{translation, "rabbit.cluster_formation.peer_discovery_dns.hostname",
fun(Conf) ->
case cuttlefish:conf_get("cluster_formation.dns.hostname", Conf, undefined) of
undefined -> cuttlefish:unset();
Value -> list_to_binary(Value)
end
end}.
2016-01-22 23:47:01 +08:00
2024-05-09 05:20:38 +08:00
{mapping, "cluster_queue_limit", "rabbit.cluster_queue_limit",
[{datatype, [{atom, infinity}, integer]}]}.
{translation, "rabbit.cluster_queue_limit",
fun(Conf) ->
case cuttlefish:conf_get("cluster_queue_limit", Conf, undefined) of
undefined -> cuttlefish:unset();
infinity -> infinity;
Val when is_integer(Val) andalso Val > 0 -> Val;
_ -> cuttlefish:invalid("should be positive integer or 'infinity'")
end
end
}.
2016-02-01 19:43:05 +08:00
%% Interval (in milliseconds) at which we send keepalive messages
%% to other cluster members. Note that this is not the same thing
%% as net_ticktime; missed keepalive messages will not cause nodes
%% to be considered down.
%%
%% {cluster_keepalive_interval, 10000},
2016-01-22 23:47:01 +08:00
2016-03-17 21:48:27 +08:00
{mapping, "cluster_keepalive_interval", "rabbit.cluster_keepalive_interval",
2016-01-22 23:47:01 +08:00
[{datatype, integer}]}.
%% Queue master locator (classic queues)
%%
%% For backwards compatibility only as of 4.0.
%% We still allow values of min-masters, random and client-local
%% but the behaviour is only local or balanced.
%% Use queue_leader_locator instead.
{mapping, "queue_master_locator", "rabbit.queue_master_locator",
[{datatype, string}]}.
{translation, "rabbit.queue_master_locator",
fun(Conf) ->
list_to_binary(cuttlefish:conf_get("queue_master_locator", Conf))
end}.
%% Queue leader locator (quorum queues and streams)
%%
{mapping, "queue_leader_locator", "rabbit.queue_leader_locator",
[{datatype, string}]}.
{translation, "rabbit.queue_leader_locator",
fun(Conf) ->
list_to_binary(cuttlefish:conf_get("queue_leader_locator", Conf))
end}.
2016-02-01 19:43:05 +08:00
%%
%% Statistics Collection
%% =====================
%%
2016-01-22 23:47:01 +08:00
2016-02-01 19:43:05 +08:00
%% Set (internal) statistics collection granularity.
%%
%% {collect_statistics, none},
2016-01-22 23:47:01 +08:00
2016-03-17 21:48:27 +08:00
{mapping, "collect_statistics", "rabbit.collect_statistics",
2016-01-22 23:47:01 +08:00
[{datatype, {enum, [none, coarse, fine]}}]}.
2016-02-01 19:43:05 +08:00
%% Statistics collection interval (in milliseconds). Increasing
%% this will reduce the load on management database.
%%
%% {collect_statistics_interval, 5000},
2016-01-22 23:47:01 +08:00
{mapping, "collect_statistics_interval", "rabbit.collect_statistics_interval",
[{datatype, integer}]}.
2016-02-01 19:43:05 +08:00
%%
%% Misc/Advanced Options
%% =====================
%%
%% NB: Change these only if you understand what you are doing!
%%
%% Explicitly enable/disable hipe compilation.
%%
%% {hipe_compile, true},
%%
%% DEPRECATED: this is a no-op and is kept only to allow old configs.
{mapping, "hipe_compile", "rabbit.hipe_compile",
[{datatype, {enum, [true, false]}}]}.
2016-02-01 19:43:05 +08:00
%% Timeout used when waiting for Mnesia tables in a cluster to
%% become available.
%%
%% {mnesia_table_loading_retry_timeout, 30000},
2016-01-22 23:47:01 +08:00
{mapping, "mnesia_table_loading_retry_timeout", "rabbit.mnesia_table_loading_retry_timeout",
[{datatype, integer}]}.
%% Retries when waiting for Mnesia tables in the cluster startup. Note that
%% this setting is not applied to Mnesia upgrades or node deletions.
%%
%% {mnesia_table_loading_retry_limit, 10},
{mapping, "mnesia_table_loading_retry_limit", "rabbit.mnesia_table_loading_retry_limit",
2016-01-22 23:47:01 +08:00
[{datatype, integer}]}.
{mapping, "message_store_shutdown_timeout", "rabbit.msg_store_shutdown_timeout",
[
{datatype, integer},
{validators, ["non_zero_positive_integer"]}
]}.
2016-02-01 19:43:05 +08:00
%% Size in bytes below which to embed messages in the queue index. See
%% https://www.rabbitmq.com/docs/persistence-conf
2016-02-01 19:43:05 +08:00
%%
%% {queue_index_embed_msgs_below, 4096}
2016-01-22 23:47:01 +08:00
{mapping, "queue_index_embed_msgs_below", "rabbit.queue_index_embed_msgs_below",
[{datatype, bytesize}]}.
%% Whether or not to enable background GC.
%%
%% {background_gc_enabled, true}
{mapping, "background_gc_enabled", "rabbit.background_gc_enabled",
[{datatype, {enum, [true, false]}}]}.
%% Interval (in milliseconds) at which we run background GC.
%%
%% {background_gc_target_interval, 60000}
{mapping, "background_gc_target_interval", "rabbit.background_gc_target_interval",
[{datatype, integer}]}.
%% Whether or not to enable proxy protocol support.
%%
%% {proxy_protocol, false}
{mapping, "proxy_protocol", "rabbit.proxy_protocol",
[{datatype, {enum, [true, false]}}]}.
2017-07-06 20:20:18 +08:00
%% Whether to stop the rabbit application if a vhost has
%% to terminate for any reason.
{mapping, "vhost_restart_strategy", "rabbit.vhost_restart_strategy",
[{datatype, {enum, [stop_node, continue, transient, persistent]}}]}.
2019-11-15 00:06:57 +08:00
%% Approximate maximum time a consumer can spend processing a message before
%% the channel is terminated, in milliseconds.
2019-11-15 01:01:12 +08:00
%%
%% {consumer_timeout, 1800000},
2019-11-15 00:06:57 +08:00
{mapping, "consumer_timeout", "rabbit.consumer_timeout", [
2019-11-15 01:07:15 +08:00
{datatype, integer},
{validators, ["non_zero_positive_integer"]}
2019-11-15 00:06:57 +08:00
]}.
Add ability to customize product name, version & banner To override the product name (defaulting to "RabbitMQ"): * set the `$RABBITMQ_PRODUCT_NAME` environment variable, or * set the `rabbit` application `product_name` variable. To override the product version: * set the `$RABBITMQ_PRODUCT_VERSION` environment variable, or * set the `rabbit` application `product_version` variable. To add content to the banner (both the copy logged and the one printed to stdout), indicate the filename which contains it, à la `/etc/motd` using: * the `$RABBITMQ_MOTD_FILE` environment variable, or * the `rabbit` application `motd_file` variable. The default motd file is `/etc/rabbitmq/motd` on Unix and `%APPDATA%\RabbitMQ\motd.txt` on Windows. Here is an example of the printed banner with name, version & motd configured: ## ## WeatherMQ 1.2.3 ## ## ########## Copyright (c) 2007-2020 Pivotal Software, Inc. ###### ## ########## Licensed under the MPL 1.1. Website: https://rabbitmq.com This is an example of a RabbitMQ message of the day. The message is written in Paris, France. \ / It is partly cloudy outside, with a _ /"".-. temperature of 12°C. Wind is around \_( ). 30-40 km/h, from south-west. /(___(__) Doc guides: https://rabbitmq.com/documentation.html Support: https://rabbitmq.com/contact.html Tutorials: https://rabbitmq.com/getstarted.html Monitoring: https://rabbitmq.com/monitoring.html Logs: /tmp/rabbitmq-test-instances/rabbit/log/rabbit@cassini.log /tmp/rabbitmq-test-instances/rabbit/log/rabbit@cassini_upgrade.log Config file(s): /tmp/rabbitmq-test-instances/test.config Starting broker... completed with 0 plugins. New APIS are available to query those product informations and use them in e.g. plugins such as the management API/UI: * rabbit:product_info/0 * rabbit:product_name/0 * rabbit:product_version/0 * rabbit:motd_file/0 * rabbit:motd/0 [#170054940]
2020-01-13 18:24:01 +08:00
%% Product name & version overrides.
{mapping, "product.name", "rabbit.product_name", [
{datatype, string}
]}.
{mapping, "product.version", "rabbit.product_version", [
{datatype, string}
]}.
%% Message of the day file.
%% The content of that file is added to the banners, both logged and
%% printed.
{mapping, "motd_file", "rabbit.motd_file", [
{datatype, string}
]}.
%% Whether to verify if this is the first time a node starts.
%% When enabled, nodes will create a marker file on first startup
%% and refuse to start if the marker exists but tables are empty.
%%
{mapping, "prevent_startup_if_node_was_reset", "rabbit.prevent_startup_if_node_was_reset", [
{datatype, {enum, [true, false]}}
]}.
2016-02-01 19:43:05 +08:00
% ==========================
Switch from Lager to the new Erlang Logger API for logging The configuration remains the same for the end-user. The only exception is the log root directory: it is now set through the `log_root` application env. variable in `rabbit`. People using the Cuttlefish-based configuration file are not affected by this exception. The main change is how the logging facility is configured. It now happens in `rabbit_prelaunch_logging`. The `rabbit_lager` module is removed. The supported outputs remain the same: the console, text files, the `amq.rabbitmq.log` exchange and syslog. The message text format slightly changed: the timestamp is more precise (now to the microsecond) and the level can be abbreviated to always be 4-character long to align all messages and improve readability. Here is an example: 2021-03-03 10:22:30.377392+01:00 [dbug] <0.229.0> == Prelaunch DONE == 2021-03-03 10:22:30.377860+01:00 [info] <0.229.0> 2021-03-03 10:22:30.377860+01:00 [info] <0.229.0> Starting RabbitMQ 3.8.10+115.g071f3fb on Erlang 23.2.5 2021-03-03 10:22:30.377860+01:00 [info] <0.229.0> Licensed under the MPL 2.0. Website: https://rabbitmq.com The example above also shows that multiline messages are supported and each line is prepended with the same prefix (the timestamp, the level and the Erlang process PID). JSON is also supported as a message format and now for any outputs. Indeed, it is possible to use it with e.g. syslog or the exchange. Here is an example of a JSON-formatted message sent to syslog: Mar 3 11:23:06 localhost rabbitmq-server[27908] <0.229.0> - {"time":"2021-03-03T11:23:06.998466+01:00","level":"notice","msg":"Logging: configured log handlers are now ACTIVE","meta":{"domain":"rabbitmq.prelaunch","file":"src/rabbit_prelaunch_logging.erl","gl":"<0.228.0>","line":311,"mfa":["rabbit_prelaunch_logging","configure_logger",1],"pid":"<0.229.0>"}} For quick testing, the values accepted by the `$RABBITMQ_LOGS` environment variables were extended: * `-` still means stdout * `-stderr` means stderr * `syslog:` means syslog on localhost * `exchange:` means logging to `amq.rabbitmq.log` `$RABBITMQ_LOG` was also extended. It now accepts a `+json` modifier (in addition to the existing `+color` one). With that modifier, messages are formatted as JSON intead of plain text. The `rabbitmqctl rotate_logs` command is deprecated. The reason is Logger does not expose a function to force log rotation. However, it will detect when a file was rotated by an external tool. From a developer point of view, the old `rabbit_log*` API remains supported, though it is now deprecated. It is implemented as regular modules: there is no `parse_transform` involved anymore. In the code, it is recommended to use the new Logger macros. For instance, `?LOG_INFO(Format, Args)`. If possible, messages should be augmented with some metadata. For instance (note the map after the message): ?LOG_NOTICE("Logging: switching to configured handler(s); following " "messages may not be visible in this log output", #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), Domains in Erlang Logger parlance are the way to categorize messages. Some predefined domains, matching previous categories, are currently defined in `rabbit_common/include/logging.hrl` or headers in the relevant plugins for plugin-specific categories. At this point, very few messages have been converted from the old `rabbit_log*` API to the new macros. It can be done gradually when working on a particular module or logging. The Erlang builtin console/file handler, `logger_std_h`, has been forked because it lacks date-based file rotation. The configuration of date-based rotation is identical to Lager. Once the dust has settled for this feature, the goal is to submit it upstream for inclusion in Erlang. The forked module is calld `rabbit_logger_std_h` and is based `logger_std_h` in Erlang 23.0.
2021-01-13 00:55:27 +08:00
% Logging section
2016-02-01 19:43:05 +08:00
% ==========================
2016-01-22 23:47:01 +08:00
Switch from Lager to the new Erlang Logger API for logging The configuration remains the same for the end-user. The only exception is the log root directory: it is now set through the `log_root` application env. variable in `rabbit`. People using the Cuttlefish-based configuration file are not affected by this exception. The main change is how the logging facility is configured. It now happens in `rabbit_prelaunch_logging`. The `rabbit_lager` module is removed. The supported outputs remain the same: the console, text files, the `amq.rabbitmq.log` exchange and syslog. The message text format slightly changed: the timestamp is more precise (now to the microsecond) and the level can be abbreviated to always be 4-character long to align all messages and improve readability. Here is an example: 2021-03-03 10:22:30.377392+01:00 [dbug] <0.229.0> == Prelaunch DONE == 2021-03-03 10:22:30.377860+01:00 [info] <0.229.0> 2021-03-03 10:22:30.377860+01:00 [info] <0.229.0> Starting RabbitMQ 3.8.10+115.g071f3fb on Erlang 23.2.5 2021-03-03 10:22:30.377860+01:00 [info] <0.229.0> Licensed under the MPL 2.0. Website: https://rabbitmq.com The example above also shows that multiline messages are supported and each line is prepended with the same prefix (the timestamp, the level and the Erlang process PID). JSON is also supported as a message format and now for any outputs. Indeed, it is possible to use it with e.g. syslog or the exchange. Here is an example of a JSON-formatted message sent to syslog: Mar 3 11:23:06 localhost rabbitmq-server[27908] <0.229.0> - {"time":"2021-03-03T11:23:06.998466+01:00","level":"notice","msg":"Logging: configured log handlers are now ACTIVE","meta":{"domain":"rabbitmq.prelaunch","file":"src/rabbit_prelaunch_logging.erl","gl":"<0.228.0>","line":311,"mfa":["rabbit_prelaunch_logging","configure_logger",1],"pid":"<0.229.0>"}} For quick testing, the values accepted by the `$RABBITMQ_LOGS` environment variables were extended: * `-` still means stdout * `-stderr` means stderr * `syslog:` means syslog on localhost * `exchange:` means logging to `amq.rabbitmq.log` `$RABBITMQ_LOG` was also extended. It now accepts a `+json` modifier (in addition to the existing `+color` one). With that modifier, messages are formatted as JSON intead of plain text. The `rabbitmqctl rotate_logs` command is deprecated. The reason is Logger does not expose a function to force log rotation. However, it will detect when a file was rotated by an external tool. From a developer point of view, the old `rabbit_log*` API remains supported, though it is now deprecated. It is implemented as regular modules: there is no `parse_transform` involved anymore. In the code, it is recommended to use the new Logger macros. For instance, `?LOG_INFO(Format, Args)`. If possible, messages should be augmented with some metadata. For instance (note the map after the message): ?LOG_NOTICE("Logging: switching to configured handler(s); following " "messages may not be visible in this log output", #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), Domains in Erlang Logger parlance are the way to categorize messages. Some predefined domains, matching previous categories, are currently defined in `rabbit_common/include/logging.hrl` or headers in the relevant plugins for plugin-specific categories. At this point, very few messages have been converted from the old `rabbit_log*` API to the new macros. It can be done gradually when working on a particular module or logging. The Erlang builtin console/file handler, `logger_std_h`, has been forked because it lacks date-based file rotation. The configuration of date-based rotation is identical to Lager. Once the dust has settled for this feature, the goal is to submit it upstream for inclusion in Erlang. The forked module is calld `rabbit_logger_std_h` and is based `logger_std_h` in Erlang 23.0.
2021-01-13 00:55:27 +08:00
{mapping, "log.dir", "rabbit.log_root", [
{datatype, string},
{validators, ["dir_writable"]}]}.
{mapping, "log.console", "rabbit.log.console.enabled", [
2016-02-01 22:27:56 +08:00
{datatype, {enum, [true, false]}}
2016-01-22 23:47:01 +08:00
]}.
{mapping, "log.console.level", "rabbit.log.console.level", [
{datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
]}.
Logging: Add configuration variables to set various formats In addition to the existing configuration variables to configure logging, the following variables were added to extend the settings. log.*.formatter = plaintext | json Selects between the plain text (default) and JSON formatters. log.*.formatter.time_format = rfc3339_space | rfc3339_T | epoch_usecs | epoch_secs | lager_default Configures how the timestamp should be formatted. It has several values to get RFC3339 date & time, Epoch-based integers and Lager default format. log.*.formatter.level_format = lc | uc | lc3 | uc3 | lc4 | uc4 Configures how to format the level. Things like uppercase vs. lowercase, full vs. truncated. Examples: lc: debug uc: DEBUG lc3: dbg uc3: DBG lw4: dbug uc4: DBUG log.*.formatter.single_line = on | off Indicates if multi-line messages should be reformatted as a single-line message. A multi-line message is converted to a single-line message by joining all lines and separating them with ", ". log.*.formatter.plaintext.format Set to a pattern to indicate the format of the entire message. The format pattern is a string with $-based variables. Each variable corresponds to a field in the log event. Here is a non-exhaustive list of common fields: time level msg pid file line Example: $time [$level] $pid $msg log.*.formatter.json.field_map Indicates if fields should be renamed or removed, and the ordering which they should appear in the final JSON object. The order is set by the order of fields in that coniguration variable. Example: time:ts level msg *:- In this example, `time` is renamed to `ts`. `*:-` tells to remove all fields not mentionned in the list. In the end the JSON object will contain the fields in the following order: ts, level, msg. log.*.formatter.json.verbosity_map Indicates if a verbosity field should be added and how it should be derived from the level. If the verbosity map is not set, no verbosity field is added to the JSON object. Example: debug:2 info:1 notice:1 *:0 In this example, debug verbosity is 2, info and notice verbosity is 1, other levels have a verbosity of 0. All of them work with the console, exchange, file and syslog outputs. The console output has specific variables too: log.console.stdio = stdout | stderr Indicates if stdout or stderr should be used. The default is stdout. log.console.use_colors = on | off Indicates if colors should be used in log messages. The default depends on the environment. log.console.color_esc_seqs.* Indicates how each level is mapped to a color. The value can be any string but the idea is to use an ANSI escape sequence. Example: log.console.color_esc_seqs.error = \033[1;31m V2: A custom time format pattern was introduced, first using variables, then a reference date & time (e.g. "Mon 2 Jan 2006"), thanks to @ansd. However, we decided to remove it for now until we have a better implementation of the reference date & time parser. V3: The testsuite was extended to cover new settings as well as the syslog output. To test it, a fake syslogd server was added (Erlang process, part of the testsuite). V4: The dependency to cuttlefish is moved to rabbitmq_prelaunch which actually uses the library. The version is updated to 3.0.1 because we need Kyorai/cuttlefish#25.
2021-03-23 23:56:20 +08:00
{mapping, "log.console.stdio", "rabbit.log.console.stdio", [
{default, stdout},
{datatype, {enum, [stdout, stderr]}}
]}.
{mapping, "log.console.use_colors", "rabbit.log.console.formatter", [
{default, on},
{datatype, flag}
]}.
{mapping, "log.console.color_esc_seqs.debug", "rabbit.log.console.formatter", [
{default, "\033[38;5;246m"},
{datatype, string}
]}.
{mapping, "log.console.color_esc_seqs.info", "rabbit.log.console.formatter", [
{default, ""},
{datatype, string}
]}.
{mapping, "log.console.color_esc_seqs.notice", "rabbit.log.console.formatter", [
{default, "\033[38;5;87m"},
{datatype, string}
]}.
{mapping, "log.console.color_esc_seqs.warning", "rabbit.log.console.formatter", [
{default, "\033[38;5;214m"},
{datatype, string}
]}.
{mapping, "log.console.color_esc_seqs.error", "rabbit.log.console.formatter", [
{default, "\033[38;5;160m"},
{datatype, string}
]}.
{mapping, "log.console.color_esc_seqs.critical", "rabbit.log.console.formatter", [
{default, "\033[1;37m\033[48;5;20m"},
{datatype, string}
]}.
{mapping, "log.console.color_esc_seqs.alert", "rabbit.log.console.formatter", [
{default, "\033[1;37m\033[48;5;93m"},
{datatype, string}
]}.
{mapping, "log.console.color_esc_seqs.emergency", "rabbit.log.console.formatter", [
{default, "\033[1;37m\033[48;5;196m"},
{datatype, string}
]}.
{mapping, "log.console.formatter", "rabbit.log.console.formatter", [
{default, plaintext},
{datatype, {enum, [plaintext, json]}}
]}.
{mapping, "log.console.formatter.time_format", "rabbit.log.console.formatter", [
{default, rfc3339_space},
{datatype, {enum, [rfc3339_space, rfc3339_T, epoch_usecs, epoch_secs, lager_default]}}
]}.
{mapping, "log.console.formatter.level_format", "rabbit.log.console.formatter", [
{default, lc},
{datatype, {enum, [lc, uc, lc3, uc3, lc4, uc4]}}
]}.
{mapping, "log.console.formatter.single_line", "rabbit.log.console.formatter", [
Logging: Add configuration variables to set various formats In addition to the existing configuration variables to configure logging, the following variables were added to extend the settings. log.*.formatter = plaintext | json Selects between the plain text (default) and JSON formatters. log.*.formatter.time_format = rfc3339_space | rfc3339_T | epoch_usecs | epoch_secs | lager_default Configures how the timestamp should be formatted. It has several values to get RFC3339 date & time, Epoch-based integers and Lager default format. log.*.formatter.level_format = lc | uc | lc3 | uc3 | lc4 | uc4 Configures how to format the level. Things like uppercase vs. lowercase, full vs. truncated. Examples: lc: debug uc: DEBUG lc3: dbg uc3: DBG lw4: dbug uc4: DBUG log.*.formatter.single_line = on | off Indicates if multi-line messages should be reformatted as a single-line message. A multi-line message is converted to a single-line message by joining all lines and separating them with ", ". log.*.formatter.plaintext.format Set to a pattern to indicate the format of the entire message. The format pattern is a string with $-based variables. Each variable corresponds to a field in the log event. Here is a non-exhaustive list of common fields: time level msg pid file line Example: $time [$level] $pid $msg log.*.formatter.json.field_map Indicates if fields should be renamed or removed, and the ordering which they should appear in the final JSON object. The order is set by the order of fields in that coniguration variable. Example: time:ts level msg *:- In this example, `time` is renamed to `ts`. `*:-` tells to remove all fields not mentionned in the list. In the end the JSON object will contain the fields in the following order: ts, level, msg. log.*.formatter.json.verbosity_map Indicates if a verbosity field should be added and how it should be derived from the level. If the verbosity map is not set, no verbosity field is added to the JSON object. Example: debug:2 info:1 notice:1 *:0 In this example, debug verbosity is 2, info and notice verbosity is 1, other levels have a verbosity of 0. All of them work with the console, exchange, file and syslog outputs. The console output has specific variables too: log.console.stdio = stdout | stderr Indicates if stdout or stderr should be used. The default is stdout. log.console.use_colors = on | off Indicates if colors should be used in log messages. The default depends on the environment. log.console.color_esc_seqs.* Indicates how each level is mapped to a color. The value can be any string but the idea is to use an ANSI escape sequence. Example: log.console.color_esc_seqs.error = \033[1;31m V2: A custom time format pattern was introduced, first using variables, then a reference date & time (e.g. "Mon 2 Jan 2006"), thanks to @ansd. However, we decided to remove it for now until we have a better implementation of the reference date & time parser. V3: The testsuite was extended to cover new settings as well as the syslog output. To test it, a fake syslogd server was added (Erlang process, part of the testsuite). V4: The dependency to cuttlefish is moved to rabbitmq_prelaunch which actually uses the library. The version is updated to 3.0.1 because we need Kyorai/cuttlefish#25.
2021-03-23 23:56:20 +08:00
{default, off},
{datatype, flag}
]}.
{mapping, "log.console.formatter.plaintext.format", "rabbit.log.console.formatter", [
{default, "$time [$level] $pid $msg"},
{datatype, string}
]}.
{mapping, "log.console.formatter.json.field_map", "rabbit.log.console.formatter", [
{default, "time level msg gl:- logger_formatter:- report_cb:- error_logger:-"},
{datatype, string}
]}.
{mapping, "log.console.formatter.json.verbosity_map", "rabbit.log.console.formatter", [
{default, ""},
{datatype, string}
]}.
{translation, "rabbit.log.console.formatter",
fun(Conf) ->
rabbit_prelaunch_early_logging:translate_formatter_conf("log.console.formatter", Conf)
end}.
2016-01-22 23:47:01 +08:00
{mapping, "log.exchange", "rabbit.log.exchange.enabled", [
{datatype, {enum, [true, false]}}
]}.
{mapping, "log.exchange.level", "rabbit.log.exchange.level", [
{datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
]}.
Logging: Add configuration variables to set various formats In addition to the existing configuration variables to configure logging, the following variables were added to extend the settings. log.*.formatter = plaintext | json Selects between the plain text (default) and JSON formatters. log.*.formatter.time_format = rfc3339_space | rfc3339_T | epoch_usecs | epoch_secs | lager_default Configures how the timestamp should be formatted. It has several values to get RFC3339 date & time, Epoch-based integers and Lager default format. log.*.formatter.level_format = lc | uc | lc3 | uc3 | lc4 | uc4 Configures how to format the level. Things like uppercase vs. lowercase, full vs. truncated. Examples: lc: debug uc: DEBUG lc3: dbg uc3: DBG lw4: dbug uc4: DBUG log.*.formatter.single_line = on | off Indicates if multi-line messages should be reformatted as a single-line message. A multi-line message is converted to a single-line message by joining all lines and separating them with ", ". log.*.formatter.plaintext.format Set to a pattern to indicate the format of the entire message. The format pattern is a string with $-based variables. Each variable corresponds to a field in the log event. Here is a non-exhaustive list of common fields: time level msg pid file line Example: $time [$level] $pid $msg log.*.formatter.json.field_map Indicates if fields should be renamed or removed, and the ordering which they should appear in the final JSON object. The order is set by the order of fields in that coniguration variable. Example: time:ts level msg *:- In this example, `time` is renamed to `ts`. `*:-` tells to remove all fields not mentionned in the list. In the end the JSON object will contain the fields in the following order: ts, level, msg. log.*.formatter.json.verbosity_map Indicates if a verbosity field should be added and how it should be derived from the level. If the verbosity map is not set, no verbosity field is added to the JSON object. Example: debug:2 info:1 notice:1 *:0 In this example, debug verbosity is 2, info and notice verbosity is 1, other levels have a verbosity of 0. All of them work with the console, exchange, file and syslog outputs. The console output has specific variables too: log.console.stdio = stdout | stderr Indicates if stdout or stderr should be used. The default is stdout. log.console.use_colors = on | off Indicates if colors should be used in log messages. The default depends on the environment. log.console.color_esc_seqs.* Indicates how each level is mapped to a color. The value can be any string but the idea is to use an ANSI escape sequence. Example: log.console.color_esc_seqs.error = \033[1;31m V2: A custom time format pattern was introduced, first using variables, then a reference date & time (e.g. "Mon 2 Jan 2006"), thanks to @ansd. However, we decided to remove it for now until we have a better implementation of the reference date & time parser. V3: The testsuite was extended to cover new settings as well as the syslog output. To test it, a fake syslogd server was added (Erlang process, part of the testsuite). V4: The dependency to cuttlefish is moved to rabbitmq_prelaunch which actually uses the library. The version is updated to 3.0.1 because we need Kyorai/cuttlefish#25.
2021-03-23 23:56:20 +08:00
{mapping, "log.exchange.formatter", "rabbit.log.exchange.formatter", [
{default, plaintext},
{datatype, {enum, [plaintext, json]}}
]}.
{mapping, "log.exchange.formatter.time_format", "rabbit.log.console.formatter", [
{default, rfc3339_space},
{datatype, [{enum, [rfc3339_space, rfc3339_T, epoch_usecs, epoch_secs, lager_default]}, string]}
]}.
{mapping, "log.exchange.formatter.level_format", "rabbit.log.exchange.formatter", [
{default, lc},
{datatype, {enum, [lc, uc, lc3, uc3, lc4, uc4]}}
]}.
{mapping, "log.exchange.formatter.single_line", "rabbit.log.exchange.formatter", [
Logging: Add configuration variables to set various formats In addition to the existing configuration variables to configure logging, the following variables were added to extend the settings. log.*.formatter = plaintext | json Selects between the plain text (default) and JSON formatters. log.*.formatter.time_format = rfc3339_space | rfc3339_T | epoch_usecs | epoch_secs | lager_default Configures how the timestamp should be formatted. It has several values to get RFC3339 date & time, Epoch-based integers and Lager default format. log.*.formatter.level_format = lc | uc | lc3 | uc3 | lc4 | uc4 Configures how to format the level. Things like uppercase vs. lowercase, full vs. truncated. Examples: lc: debug uc: DEBUG lc3: dbg uc3: DBG lw4: dbug uc4: DBUG log.*.formatter.single_line = on | off Indicates if multi-line messages should be reformatted as a single-line message. A multi-line message is converted to a single-line message by joining all lines and separating them with ", ". log.*.formatter.plaintext.format Set to a pattern to indicate the format of the entire message. The format pattern is a string with $-based variables. Each variable corresponds to a field in the log event. Here is a non-exhaustive list of common fields: time level msg pid file line Example: $time [$level] $pid $msg log.*.formatter.json.field_map Indicates if fields should be renamed or removed, and the ordering which they should appear in the final JSON object. The order is set by the order of fields in that coniguration variable. Example: time:ts level msg *:- In this example, `time` is renamed to `ts`. `*:-` tells to remove all fields not mentionned in the list. In the end the JSON object will contain the fields in the following order: ts, level, msg. log.*.formatter.json.verbosity_map Indicates if a verbosity field should be added and how it should be derived from the level. If the verbosity map is not set, no verbosity field is added to the JSON object. Example: debug:2 info:1 notice:1 *:0 In this example, debug verbosity is 2, info and notice verbosity is 1, other levels have a verbosity of 0. All of them work with the console, exchange, file and syslog outputs. The console output has specific variables too: log.console.stdio = stdout | stderr Indicates if stdout or stderr should be used. The default is stdout. log.console.use_colors = on | off Indicates if colors should be used in log messages. The default depends on the environment. log.console.color_esc_seqs.* Indicates how each level is mapped to a color. The value can be any string but the idea is to use an ANSI escape sequence. Example: log.console.color_esc_seqs.error = \033[1;31m V2: A custom time format pattern was introduced, first using variables, then a reference date & time (e.g. "Mon 2 Jan 2006"), thanks to @ansd. However, we decided to remove it for now until we have a better implementation of the reference date & time parser. V3: The testsuite was extended to cover new settings as well as the syslog output. To test it, a fake syslogd server was added (Erlang process, part of the testsuite). V4: The dependency to cuttlefish is moved to rabbitmq_prelaunch which actually uses the library. The version is updated to 3.0.1 because we need Kyorai/cuttlefish#25.
2021-03-23 23:56:20 +08:00
{default, off},
{datatype, flag}
]}.
{mapping, "log.exchange.formatter.plaintext.format", "rabbit.log.exchange.formatter", [
{default, "$time [$level] $pid $msg"},
{datatype, string}
]}.
{mapping, "log.exchange.formatter.json.field_map", "rabbit.log.exchange.formatter", [
{default, "time level msg gl:- logger_formatter:- report_cb:- error_logger:-"},
{datatype, string}
]}.
{mapping, "log.exchange.formatter.json.verbosity_map", "rabbit.log.exchange.formatter", [
{default, ""},
{datatype, string}
]}.
{translation, "rabbit.log.exchange.formatter",
fun(Conf) ->
rabbit_prelaunch_early_logging:translate_formatter_conf("log.exchange.formatter", Conf)
end}.
Logging: Add journald support The implementation depends on erlang-systemd [1] which uses Unix socket support introduced in Erlang 19. Therefore it doesn't rely on a native library. We also don't need special handling if the host doesn't use journald. To enable the journald handler, add the following configuration variable: log.journald = true The log level can also be set the same way it is with other handlers: log.journald.level = debug The log messages are communicated to journald using structured data. It is possible to configure which fields are transmitted and how they are named: log.journald.fields = SYSLOG_IDENTIFIER="rabbitmq-server" syslog_timestamp syslog_pid priority ERL_PID=pid In this example: * the `SYSLOG_IDENTIFIER` is set to a string literal * `syslog_timestamp and `syslog_pid` are aliases for SYSLOG_TIMESTAMP=time and SYSLOG_PID=os_pid * `priority` is a special field computed from the log level * `ERL_PID=pid` indicates `pid` should be sent as the `ERL_PID` field. The message itself is implicit and always sent. Otherwise, the list of fields must be exhaustive: fields which are unset in a particular log event meta are sent as an empty string and non-mentionned fields are not sent. The order is not important. Here are some messages printed by `journalctl -f` during RabbitMQ startup: Mar 26 11:58:31 ip-172-31-43-179 rabbitmq-server[19286]: Ready to start client connection listeners Mar 26 11:58:31 ip-172-31-43-179 rabbitmq-server[19286]: started TCP listener on [::]:5672 Mar 26 11:58:31 ip-172-31-43-179 rabbitmq-server[19286]: Server startup complete; 0 plugins started. [1] https://github.com/rabbitmq/erlang-systemd
2021-03-26 00:08:09 +08:00
{mapping, "log.journald", "rabbit.log.journald.enabled", [
{datatype, {enum, [true, false]}}
]}.
{mapping, "log.journald.level", "rabbit.log.journald.level", [
{datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
]}.
{mapping, "log.journald.fields", "rabbit.log.journald.fields", [
{default, "SYSLOG_IDENTIFIER=\"rabbitmq-server\" syslog_timestamp syslog_pid priority ERL_PID=pid CODE_FILE=file CODE_LINE=line CODE_MFA=mfa"},
{datatype, string}
]}.
{translation, "rabbit.log.journald.fields",
fun(Conf) ->
rabbit_prelaunch_early_logging:translate_journald_fields_conf("log.journald.fields", Conf)
end}.
{mapping, "log.syslog", "rabbit.log.syslog.enabled", [
2016-02-01 22:27:56 +08:00
{datatype, {enum, [true, false]}}
2016-01-22 23:47:01 +08:00
]}.
{mapping, "log.syslog.level", "rabbit.log.syslog.level", [
{datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
]}.
Logging: Add configuration variables to set various formats In addition to the existing configuration variables to configure logging, the following variables were added to extend the settings. log.*.formatter = plaintext | json Selects between the plain text (default) and JSON formatters. log.*.formatter.time_format = rfc3339_space | rfc3339_T | epoch_usecs | epoch_secs | lager_default Configures how the timestamp should be formatted. It has several values to get RFC3339 date & time, Epoch-based integers and Lager default format. log.*.formatter.level_format = lc | uc | lc3 | uc3 | lc4 | uc4 Configures how to format the level. Things like uppercase vs. lowercase, full vs. truncated. Examples: lc: debug uc: DEBUG lc3: dbg uc3: DBG lw4: dbug uc4: DBUG log.*.formatter.single_line = on | off Indicates if multi-line messages should be reformatted as a single-line message. A multi-line message is converted to a single-line message by joining all lines and separating them with ", ". log.*.formatter.plaintext.format Set to a pattern to indicate the format of the entire message. The format pattern is a string with $-based variables. Each variable corresponds to a field in the log event. Here is a non-exhaustive list of common fields: time level msg pid file line Example: $time [$level] $pid $msg log.*.formatter.json.field_map Indicates if fields should be renamed or removed, and the ordering which they should appear in the final JSON object. The order is set by the order of fields in that coniguration variable. Example: time:ts level msg *:- In this example, `time` is renamed to `ts`. `*:-` tells to remove all fields not mentionned in the list. In the end the JSON object will contain the fields in the following order: ts, level, msg. log.*.formatter.json.verbosity_map Indicates if a verbosity field should be added and how it should be derived from the level. If the verbosity map is not set, no verbosity field is added to the JSON object. Example: debug:2 info:1 notice:1 *:0 In this example, debug verbosity is 2, info and notice verbosity is 1, other levels have a verbosity of 0. All of them work with the console, exchange, file and syslog outputs. The console output has specific variables too: log.console.stdio = stdout | stderr Indicates if stdout or stderr should be used. The default is stdout. log.console.use_colors = on | off Indicates if colors should be used in log messages. The default depends on the environment. log.console.color_esc_seqs.* Indicates how each level is mapped to a color. The value can be any string but the idea is to use an ANSI escape sequence. Example: log.console.color_esc_seqs.error = \033[1;31m V2: A custom time format pattern was introduced, first using variables, then a reference date & time (e.g. "Mon 2 Jan 2006"), thanks to @ansd. However, we decided to remove it for now until we have a better implementation of the reference date & time parser. V3: The testsuite was extended to cover new settings as well as the syslog output. To test it, a fake syslogd server was added (Erlang process, part of the testsuite). V4: The dependency to cuttlefish is moved to rabbitmq_prelaunch which actually uses the library. The version is updated to 3.0.1 because we need Kyorai/cuttlefish#25.
2021-03-23 23:56:20 +08:00
{mapping, "log.syslog.formatter", "rabbit.log.syslog.formatter", [
{default, plaintext},
{datatype, {enum, [plaintext, json]}}
]}.
{mapping, "log.syslog.formatter.time_format", "rabbit.log.console.formatter", [
{default, rfc3339_space},
{datatype, [{enum, [rfc3339_space, rfc3339_T, epoch_usecs, epoch_secs, lager_default]}, string]}
]}.
{mapping, "log.syslog.formatter.level_format", "rabbit.log.syslog.formatter", [
{default, lc},
{datatype, {enum, [lc, uc, lc3, uc3, lc4, uc4]}}
]}.
{mapping, "log.syslog.formatter.single_line", "rabbit.log.syslog.formatter", [
Logging: Add configuration variables to set various formats In addition to the existing configuration variables to configure logging, the following variables were added to extend the settings. log.*.formatter = plaintext | json Selects between the plain text (default) and JSON formatters. log.*.formatter.time_format = rfc3339_space | rfc3339_T | epoch_usecs | epoch_secs | lager_default Configures how the timestamp should be formatted. It has several values to get RFC3339 date & time, Epoch-based integers and Lager default format. log.*.formatter.level_format = lc | uc | lc3 | uc3 | lc4 | uc4 Configures how to format the level. Things like uppercase vs. lowercase, full vs. truncated. Examples: lc: debug uc: DEBUG lc3: dbg uc3: DBG lw4: dbug uc4: DBUG log.*.formatter.single_line = on | off Indicates if multi-line messages should be reformatted as a single-line message. A multi-line message is converted to a single-line message by joining all lines and separating them with ", ". log.*.formatter.plaintext.format Set to a pattern to indicate the format of the entire message. The format pattern is a string with $-based variables. Each variable corresponds to a field in the log event. Here is a non-exhaustive list of common fields: time level msg pid file line Example: $time [$level] $pid $msg log.*.formatter.json.field_map Indicates if fields should be renamed or removed, and the ordering which they should appear in the final JSON object. The order is set by the order of fields in that coniguration variable. Example: time:ts level msg *:- In this example, `time` is renamed to `ts`. `*:-` tells to remove all fields not mentionned in the list. In the end the JSON object will contain the fields in the following order: ts, level, msg. log.*.formatter.json.verbosity_map Indicates if a verbosity field should be added and how it should be derived from the level. If the verbosity map is not set, no verbosity field is added to the JSON object. Example: debug:2 info:1 notice:1 *:0 In this example, debug verbosity is 2, info and notice verbosity is 1, other levels have a verbosity of 0. All of them work with the console, exchange, file and syslog outputs. The console output has specific variables too: log.console.stdio = stdout | stderr Indicates if stdout or stderr should be used. The default is stdout. log.console.use_colors = on | off Indicates if colors should be used in log messages. The default depends on the environment. log.console.color_esc_seqs.* Indicates how each level is mapped to a color. The value can be any string but the idea is to use an ANSI escape sequence. Example: log.console.color_esc_seqs.error = \033[1;31m V2: A custom time format pattern was introduced, first using variables, then a reference date & time (e.g. "Mon 2 Jan 2006"), thanks to @ansd. However, we decided to remove it for now until we have a better implementation of the reference date & time parser. V3: The testsuite was extended to cover new settings as well as the syslog output. To test it, a fake syslogd server was added (Erlang process, part of the testsuite). V4: The dependency to cuttlefish is moved to rabbitmq_prelaunch which actually uses the library. The version is updated to 3.0.1 because we need Kyorai/cuttlefish#25.
2021-03-23 23:56:20 +08:00
{default, off},
{datatype, flag}
]}.
{mapping, "log.syslog.formatter.plaintext.format", "rabbit.log.syslog.formatter", [
{default, "$msg"},
{datatype, string}
]}.
{mapping, "log.syslog.formatter.json.field_map", "rabbit.log.syslog.formatter", [
{default, "time level msg gl:- logger_formatter:- report_cb:- error_logger:-"},
{datatype, string}
]}.
{mapping, "log.syslog.formatter.json.verbosity_map", "rabbit.log.syslog.formatter", [
{default, ""},
{datatype, string}
]}.
{translation, "rabbit.log.syslog.formatter",
fun(Conf) ->
rabbit_prelaunch_early_logging:translate_formatter_conf("log.syslog.formatter", Conf)
end}.
{mapping, "log.syslog.identity", "syslog.app_name", [
{datatype, string}
]}.
{mapping, "log.syslog.facility", "syslog.facility", [
{datatype, {enum, [kern, kernel, user, mail, daemon, auth, syslog, lpr,
news, uucp, cron, authpriv, ftp, ntp, audit, alert,
clock, local0, local1, local2, local3, local4,
local5, local6, local7]}}
]}.
{mapping, "log.syslog.multiline_mode", "syslog.multiline_mode", [
{datatype, {enum, [true, false]}}
]}.
{mapping, "log.syslog.ip", "syslog.dest_host", [
{datatype, string},
{validators, ["is_ip"]}
]}.
{mapping, "log.syslog.host", "syslog.dest_host", [
{datatype, string}
]}.
{translation, "syslog.dest_host",
fun(Conf) ->
case cuttlefish:conf_get("log.syslog", Conf) of
true ->
case cuttlefish:conf_get("log.syslog.ip", Conf, undefined) of
undefined ->
% If log.syslog.ip is not set, then this must be set
cuttlefish:conf_get("log.syslog.host", Conf);
IpAddr ->
IpAddr
end;
_ ->
cuttlefish:invalid("log.syslog must be set to true to set log.syslog.host or log.syslog.ip")
end
end}.
{mapping, "log.syslog.port", "syslog.dest_port", [
{datatype, integer}
]}.
{mapping, "log.syslog.transport", "syslog.protocol", [
{datatype, {enum, [udp, tcp, tls, ssl]}}
]}.
{mapping, "log.syslog.protocol", "syslog.protocol", [
{datatype, {enum, [rfc3164, rfc5424]}}
2016-01-22 23:47:01 +08:00
]}.
{mapping, "log.syslog.ssl_options.verify", "syslog.protocol", [
{datatype, {enum, [verify_peer, verify_none]}}]}.
{mapping, "log.syslog.ssl_options.fail_if_no_peer_cert", "syslog.protocol", [
{datatype, {enum, [true, false]}}]}.
{mapping, "log.syslog.ssl_options.cacertfile", "syslog.protocol",
[{datatype, string}, {validators, ["file_accessible"]}]}.
{mapping, "log.syslog.ssl_options.certfile", "syslog.protocol",
[{datatype, string}, {validators, ["file_accessible"]}]}.
{mapping, "log.syslog.ssl_options.cacerts.$name", "syslog.protocol",
[{datatype, string}]}.
{mapping, "log.syslog.ssl_options.cert", "syslog.protocol",
[{datatype, string}]}.
{mapping, "log.syslog.ssl_options.client_renegotiation", "syslog.protocol",
[{datatype, {enum, [true, false]}}]}.
{mapping, "log.syslog.ssl_options.crl_check", "syslog.protocol",
[{datatype, [{enum, [true, false, peer, best_effort]}]}]}.
{mapping, "log.syslog.ssl_options.depth", "syslog.protocol",
[{datatype, integer}, {validators, ["byte"]}]}.
{mapping, "log.syslog.ssl_options.dh", "syslog.protocol",
[{datatype, string}]}.
{mapping, "log.syslog.ssl_options.dhfile", "syslog.protocol",
[{datatype, string}, {validators, ["file_accessible"]}]}.
{mapping, "log.syslog.ssl_options.honor_cipher_order", "syslog.protocol",
[{datatype, {enum, [true, false]}}]}.
{mapping, "log.syslog.ssl_options.honor_ecc_order", "syslog.protocol",
[{datatype, {enum, [true, false]}}]}.
{mapping, "log.syslog.ssl_options.key.RSAPrivateKey", "syslog.protocol",
[{datatype, string}]}.
{mapping, "log.syslog.ssl_options.key.DSAPrivateKey", "syslog.protocol",
[{datatype, string}]}.
{mapping, "log.syslog.ssl_options.key.PrivateKeyInfo", "syslog.protocol",
[{datatype, string}]}.
{mapping, "log.syslog.ssl_options.keyfile", "syslog.protocol",
[{datatype, string}, {validators, ["file_accessible"]}]}.
{mapping, "log.syslog.ssl_options.log_alert", "syslog.protocol",
[{datatype, {enum, [true, false]}}]}.
{mapping, "log.syslog.ssl_options.password", "syslog.protocol",
[{datatype, string}]}.
{mapping, "log.syslog.ssl_options.psk_identity", "syslog.protocol",
[{datatype, string}]}.
{mapping, "log.syslog.ssl_options.reuse_sessions", "syslog.protocol",
[{datatype, {enum, [true, false]}}]}.
{mapping, "log.syslog.ssl_options.secure_renegotiate", "syslog.protocol",
[{datatype, {enum, [true, false]}}]}.
{mapping, "log.syslog.ssl_options.versions.$version", "syslog.protocol",
[{datatype, atom}]}.
{translation, "syslog.protocol",
fun(Conf) ->
ParseSslOptions = fun() ->
RawSettings = [
{verify, cuttlefish:conf_get("log.syslog.ssl_options.verify", Conf, undefined)},
{fail_if_no_peer_cert, cuttlefish:conf_get("log.syslog.ssl_options.fail_if_no_peer_cert", Conf, undefined)},
{cacertfile, cuttlefish:conf_get("log.syslog.ssl_options.cacertfile", Conf, undefined)},
{certfile, cuttlefish:conf_get("log.syslog.ssl_options.certfile", Conf, undefined)},
{cert, cuttlefish:conf_get("log.syslog.ssl_options.cert", Conf, undefined)},
{client_renegotiation, cuttlefish:conf_get("log.syslog.ssl_options.client_renegotiation", Conf, undefined)},
{crl_check, cuttlefish:conf_get("log.syslog.ssl_options.crl_check", Conf, undefined)},
{depth, cuttlefish:conf_get("log.syslog.ssl_options.depth", Conf, undefined)},
{dh, cuttlefish:conf_get("log.syslog.ssl_options.dh", Conf, undefined)},
{dhfile, cuttlefish:conf_get("log.syslog.ssl_options.dhfile", Conf, undefined)},
{honor_cipher_order, cuttlefish:conf_get("log.syslog.ssl_options.honor_cipher_order", Conf, undefined)},
{honor_ecc_order, cuttlefish:conf_get("log.syslog.ssl_options.honor_ecc_order", Conf, undefined)},
{keyfile, cuttlefish:conf_get("log.syslog.ssl_options.keyfile", Conf, undefined)},
{log_alert, cuttlefish:conf_get("log.syslog.ssl_options.log_alert", Conf, undefined)},
{password, cuttlefish:conf_get("log.syslog.ssl_options.password", Conf, undefined)},
{psk_identity, cuttlefish:conf_get("log.syslog.ssl_options.psk_identity", Conf, undefined)},
{reuse_sessions, cuttlefish:conf_get("log.syslog.ssl_options.reuse_sessions", Conf, undefined)},
{secure_renegotiate, cuttlefish:conf_get("log.syslog.ssl_options.secure_renegotiate", Conf, undefined)}
],
DefinedSettings = [{K, V} || {K, V} <- RawSettings, V =/= undefined],
lists:map(
fun({K, Val}) when K == dh; K == cert -> {K, list_to_binary(Val)};
({K, Val}) -> {K, Val}
end,
DefinedSettings) ++
[ {K, V}
|| {K, V} <-
[{cacerts, [ list_to_binary(V) || {_, V} <- cuttlefish_variable:filter_by_prefix("log.syslog.ssl_options.cacerts", Conf)]},
{versions, [ V || {_, V} <- cuttlefish_variable:filter_by_prefix("log.syslog.ssl_options.versions", Conf) ]},
{key, case cuttlefish_variable:filter_by_prefix("log.syslog.ssl_options.key", Conf) of
[{[_,_,Key], Val}|_] -> {list_to_atom(Key), list_to_binary(Val)};
_ -> undefined
end}],
V =/= undefined,
V =/= []]
end,
Proto = cuttlefish:conf_get("log.syslog.protocol", Conf, undefined),
Transport = cuttlefish:conf_get("log.syslog.transport", Conf, udp),
case Transport of
TLS when TLS == tls; TLS == ssl ->
case Proto of
rfc3164 ->
cuttlefish:invalid("Syslog protocol rfc3164 is not compatible with TLS");
_ ->
{rfc5424, tls, ParseSslOptions()}
end;
_ when Transport == udp; Transport == tcp ->
case Proto of
undefined -> {rfc3164, Transport};
_ -> {Proto, Transport}
end;
_ -> cuttlefish:invalid("Invalid syslog transport ~p~n", [Transport])
end
end}.
2016-01-22 23:47:01 +08:00
{mapping, "log.file", "rabbit.log.file.file", [
{datatype, [{enum, [false]}, string]}
2016-01-22 23:47:01 +08:00
]}.
{mapping, "log.file.level", "rabbit.log.file.level", [
{datatype,
{enum, ['=debug', debug,
info, '!=info',
notice, '<=notice',
'<warning', warning,
error,
critical,
alert,
emergency,
none]}}
2016-01-22 23:47:01 +08:00
]}.
{mapping, "log.file.rotation.date", "rabbit.log.file.date", [
2016-02-01 22:27:56 +08:00
{datatype, string}
2016-01-22 23:47:01 +08:00
]}.
{mapping, "log.file.rotation.compress", "rabbit.log.file.compress", [
{default, false},
{datatype, {enum, [true, false]}}
]}.
{mapping, "log.file.rotation.size", "rabbit.log.file.size", [
2016-02-01 22:27:56 +08:00
{datatype, integer}
2016-01-22 23:47:01 +08:00
]}.
{mapping, "log.file.rotation.count", "rabbit.log.file.count", [
2016-02-01 22:27:56 +08:00
{datatype, integer}
2016-01-22 23:47:01 +08:00
]}.
Logging: Add configuration variables to set various formats In addition to the existing configuration variables to configure logging, the following variables were added to extend the settings. log.*.formatter = plaintext | json Selects between the plain text (default) and JSON formatters. log.*.formatter.time_format = rfc3339_space | rfc3339_T | epoch_usecs | epoch_secs | lager_default Configures how the timestamp should be formatted. It has several values to get RFC3339 date & time, Epoch-based integers and Lager default format. log.*.formatter.level_format = lc | uc | lc3 | uc3 | lc4 | uc4 Configures how to format the level. Things like uppercase vs. lowercase, full vs. truncated. Examples: lc: debug uc: DEBUG lc3: dbg uc3: DBG lw4: dbug uc4: DBUG log.*.formatter.single_line = on | off Indicates if multi-line messages should be reformatted as a single-line message. A multi-line message is converted to a single-line message by joining all lines and separating them with ", ". log.*.formatter.plaintext.format Set to a pattern to indicate the format of the entire message. The format pattern is a string with $-based variables. Each variable corresponds to a field in the log event. Here is a non-exhaustive list of common fields: time level msg pid file line Example: $time [$level] $pid $msg log.*.formatter.json.field_map Indicates if fields should be renamed or removed, and the ordering which they should appear in the final JSON object. The order is set by the order of fields in that coniguration variable. Example: time:ts level msg *:- In this example, `time` is renamed to `ts`. `*:-` tells to remove all fields not mentionned in the list. In the end the JSON object will contain the fields in the following order: ts, level, msg. log.*.formatter.json.verbosity_map Indicates if a verbosity field should be added and how it should be derived from the level. If the verbosity map is not set, no verbosity field is added to the JSON object. Example: debug:2 info:1 notice:1 *:0 In this example, debug verbosity is 2, info and notice verbosity is 1, other levels have a verbosity of 0. All of them work with the console, exchange, file and syslog outputs. The console output has specific variables too: log.console.stdio = stdout | stderr Indicates if stdout or stderr should be used. The default is stdout. log.console.use_colors = on | off Indicates if colors should be used in log messages. The default depends on the environment. log.console.color_esc_seqs.* Indicates how each level is mapped to a color. The value can be any string but the idea is to use an ANSI escape sequence. Example: log.console.color_esc_seqs.error = \033[1;31m V2: A custom time format pattern was introduced, first using variables, then a reference date & time (e.g. "Mon 2 Jan 2006"), thanks to @ansd. However, we decided to remove it for now until we have a better implementation of the reference date & time parser. V3: The testsuite was extended to cover new settings as well as the syslog output. To test it, a fake syslogd server was added (Erlang process, part of the testsuite). V4: The dependency to cuttlefish is moved to rabbitmq_prelaunch which actually uses the library. The version is updated to 3.0.1 because we need Kyorai/cuttlefish#25.
2021-03-23 23:56:20 +08:00
{mapping, "log.file.formatter", "rabbit.log.file.formatter", [
{default, plaintext},
{datatype, {enum, [plaintext, json]}}
]}.
{mapping, "log.file.formatter.time_format", "rabbit.log.file.formatter", [
Logging: Add configuration variables to set various formats In addition to the existing configuration variables to configure logging, the following variables were added to extend the settings. log.*.formatter = plaintext | json Selects between the plain text (default) and JSON formatters. log.*.formatter.time_format = rfc3339_space | rfc3339_T | epoch_usecs | epoch_secs | lager_default Configures how the timestamp should be formatted. It has several values to get RFC3339 date & time, Epoch-based integers and Lager default format. log.*.formatter.level_format = lc | uc | lc3 | uc3 | lc4 | uc4 Configures how to format the level. Things like uppercase vs. lowercase, full vs. truncated. Examples: lc: debug uc: DEBUG lc3: dbg uc3: DBG lw4: dbug uc4: DBUG log.*.formatter.single_line = on | off Indicates if multi-line messages should be reformatted as a single-line message. A multi-line message is converted to a single-line message by joining all lines and separating them with ", ". log.*.formatter.plaintext.format Set to a pattern to indicate the format of the entire message. The format pattern is a string with $-based variables. Each variable corresponds to a field in the log event. Here is a non-exhaustive list of common fields: time level msg pid file line Example: $time [$level] $pid $msg log.*.formatter.json.field_map Indicates if fields should be renamed or removed, and the ordering which they should appear in the final JSON object. The order is set by the order of fields in that coniguration variable. Example: time:ts level msg *:- In this example, `time` is renamed to `ts`. `*:-` tells to remove all fields not mentionned in the list. In the end the JSON object will contain the fields in the following order: ts, level, msg. log.*.formatter.json.verbosity_map Indicates if a verbosity field should be added and how it should be derived from the level. If the verbosity map is not set, no verbosity field is added to the JSON object. Example: debug:2 info:1 notice:1 *:0 In this example, debug verbosity is 2, info and notice verbosity is 1, other levels have a verbosity of 0. All of them work with the console, exchange, file and syslog outputs. The console output has specific variables too: log.console.stdio = stdout | stderr Indicates if stdout or stderr should be used. The default is stdout. log.console.use_colors = on | off Indicates if colors should be used in log messages. The default depends on the environment. log.console.color_esc_seqs.* Indicates how each level is mapped to a color. The value can be any string but the idea is to use an ANSI escape sequence. Example: log.console.color_esc_seqs.error = \033[1;31m V2: A custom time format pattern was introduced, first using variables, then a reference date & time (e.g. "Mon 2 Jan 2006"), thanks to @ansd. However, we decided to remove it for now until we have a better implementation of the reference date & time parser. V3: The testsuite was extended to cover new settings as well as the syslog output. To test it, a fake syslogd server was added (Erlang process, part of the testsuite). V4: The dependency to cuttlefish is moved to rabbitmq_prelaunch which actually uses the library. The version is updated to 3.0.1 because we need Kyorai/cuttlefish#25.
2021-03-23 23:56:20 +08:00
{default, rfc3339_space},
{datatype, [{enum, [rfc3339_space, rfc3339_T, epoch_usecs, epoch_secs, lager_default]}, string]}
]}.
{mapping, "log.file.formatter.level_format", "rabbit.log.file.formatter", [
{default, lc},
{datatype, {enum, [lc, uc, lc3, uc3, lc4, uc4]}}
]}.
{mapping, "log.file.formatter.single_line", "rabbit.log.file.formatter", [
{default, off},
{datatype, flag}
]}.
{mapping, "log.file.formatter.plaintext.format", "rabbit.log.file.formatter", [
{default, "$time [$level] $pid $msg"},
{datatype, string}
]}.
{mapping, "log.file.formatter.json.field_map", "rabbit.log.file.formatter", [
{default, "time level msg gl:- logger_formatter:- report_cb:- error_logger:-"},
{datatype, string}
]}.
{mapping, "log.file.formatter.json.verbosity_map", "rabbit.log.file.formatter", [
{default, ""},
{datatype, string}
]}.
{translation, "rabbit.log.file.formatter",
fun(Conf) ->
rabbit_prelaunch_early_logging:translate_formatter_conf("log.file.formatter", Conf)
end}.
2016-01-22 23:47:01 +08:00
2022-05-04 01:48:10 +08:00
%% Connection log.
{mapping, "log.connection.level", "rabbit.log.categories.connection.level", [
{datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
]}.
{mapping, "log.connection.file", "rabbit.log.categories.connection.file", [
2016-01-22 23:47:01 +08:00
{datatype, string}
]}.
2022-05-04 01:48:10 +08:00
{mapping, "log.connection.rotation.date", "rabbit.log.categories.connection.rotate_on_date", [
{datatype, string}
]}.
{mapping, "log.connection.rotation.compress", "rabbit.log.categories.connection.compress_on_rotate", [
{default, false},
{datatype, {enum, [true, false]}}
]}.
{mapping, "log.connection.rotation.size", "rabbit.log.categories.connection.max_no_bytes", [
{datatype, integer}
]}.
{mapping, "log.connection.rotation.count", "rabbit.log.categories.connection.max_no_files", [
{datatype, integer}
]}.
2022-05-04 01:48:10 +08:00
%% Channel log.
{mapping, "log.channel.level", "rabbit.log.categories.channel.level", [
{datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
]}.
{mapping, "log.channel.file", "rabbit.log.categories.channel.file", [
{datatype, string}
2016-01-22 23:47:01 +08:00
]}.
2022-05-04 01:48:10 +08:00
{mapping, "log.channel.rotation.date", "rabbit.log.categories.channel.rotate_on_date", [
{datatype, string}
]}.
{mapping, "log.channel.rotation.compress", "rabbit.log.categories.channel.compress_on_rotate", [
{default, false},
{datatype, {enum, [true, false]}}
]}.
{mapping, "log.channel.rotation.size", "rabbit.log.categories.channel.max_no_bytes", [
{datatype, integer}
]}.
{mapping, "log.channel.rotation.count", "rabbit.log.categories.channel.max_no_files", [
{datatype, integer}
]}.
2016-01-22 23:47:01 +08:00
2022-05-04 01:48:10 +08:00
%% Mirroring log.
{mapping, "log.mirroring.level", "rabbit.log.categories.mirroring.level", [
{datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
]}.
{mapping, "log.mirroring.file", "rabbit.log.categories.mirroring.file", [
{datatype, string}
]}.
2022-05-04 01:48:10 +08:00
{mapping, "log.mirroring.rotation.date", "rabbit.log.categories.mirroring.rotate_on_date", [
{datatype, string}
]}.
{mapping, "log.mirroring.rotation.compress", "rabbit.log.categories.mirroring.compress_on_rotate", [
{default, false},
{datatype, {enum, [true, false]}}
]}.
{mapping, "log.mirroring.rotation.size", "rabbit.log.categories.mirroring.max_no_bytes", [
{datatype, integer}
]}.
{mapping, "log.mirroring.rotation.count", "rabbit.log.categories.mirroring.max_no_files", [
{datatype, integer}
]}.
2022-05-04 01:48:10 +08:00
%% Queue log.
{mapping, "log.queue.level", "rabbit.log.categories.queue.level", [
{datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
]}.
{mapping, "log.queue.file", "rabbit.log.categories.queue.file", [
{datatype, string}
]}.
2022-05-04 01:48:10 +08:00
{mapping, "log.queue.rotation.date", "rabbit.log.categories.queue.rotate_on_date", [
{datatype, string}
]}.
{mapping, "log.queue.rotation.compress", "rabbit.log.categories.queue.compress_on_rotate", [
{default, false},
{datatype, {enum, [true, false]}}
]}.
{mapping, "log.queue.rotation.size", "rabbit.log.categories.queue.max_no_bytes", [
{datatype, integer}
]}.
{mapping, "log.queue.rotation.count", "rabbit.log.categories.queue.max_no_files", [
{datatype, integer}
]}.
2016-01-22 23:47:01 +08:00
2022-05-04 01:48:10 +08:00
%% Federation log.
{mapping, "log.federation.level", "rabbit.log.categories.federation.level", [
{datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
]}.
{mapping, "log.federation.file", "rabbit.log.categories.federation.file", [
{datatype, string}
]}.
2022-05-04 01:48:10 +08:00
{mapping, "log.federation.rotation.date", "rabbit.log.categories.federation.rotate_on_date", [
{datatype, string}
]}.
{mapping, "log.federation.rotation.compress", "rabbit.log.categories.federation.compress_on_rotate", [
{default, false},
{datatype, {enum, [true, false]}}
]}.
{mapping, "log.federation.rotation.size", "rabbit.log.categories.federation.max_no_bytes", [
{datatype, integer}
]}.
{mapping, "log.federation.rotation.count", "rabbit.log.categories.federation.max_no_files", [
{datatype, integer}
]}.
2022-05-04 01:48:10 +08:00
%% Upgrade log.
{mapping, "log.upgrade.level", "rabbit.log.categories.upgrade.level", [
{datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
]}.
{mapping, "log.upgrade.file", "rabbit.log.categories.upgrade.file", [
{datatype, string}
]}.
2022-05-04 01:48:10 +08:00
{mapping, "log.upgrade.rotation.date", "rabbit.log.categories.upgrade.rotate_on_date", [
{datatype, string}
]}.
{mapping, "log.upgrade.rotation.compress", "rabbit.log.categories.upgrade.compress_on_rotate", [
{default, false},
{datatype, {enum, [true, false]}}
]}.
{mapping, "log.upgrade.rotation.size", "rabbit.log.categories.upgrade.max_no_bytes", [
{datatype, integer}
]}.
{mapping, "log.upgrade.rotation.count", "rabbit.log.categories.upgrade.max_no_files", [
{datatype, integer}
]}.
2022-05-04 01:48:10 +08:00
%% Ra log.
{mapping, "log.ra.level", "rabbit.log.categories.ra.level", [
{datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
]}.
{mapping, "log.ra.file", "rabbit.log.categories.ra.file", [
{datatype, string}
]}.
2022-05-04 01:48:10 +08:00
{mapping, "log.ra.rotation.date", "rabbit.log.categories.ra.rotate_on_date", [
{datatype, string}
]}.
{mapping, "log.ra.rotation.compress", "rabbit.log.categories.ra.compress_on_rotate", [
{default, false},
{datatype, {enum, [true, false]}}
]}.
{mapping, "log.ra.rotation.size", "rabbit.log.categories.ra.max_no_bytes", [
{datatype, integer}
]}.
{mapping, "log.ra.rotation.count", "rabbit.log.categories.ra.max_no_files", [
{datatype, integer}
]}.
2022-05-04 01:48:10 +08:00
%% Default logging config.
{mapping, "log.default.level", "rabbit.log.categories.default.level", [
{datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
]}.
2022-05-04 01:48:10 +08:00
{mapping, "log.default.rotation.date", "rabbit.log.categories.default.rotate_on_date", [
{datatype, string}
]}.
{mapping, "log.default.rotation.compress", "rabbit.log.categories.default.compress_on_rotate", [
{default, false},
{datatype, {enum, [true, false]}}
]}.
{mapping, "log.default.rotation.size", "rabbit.log.categories.default.max_no_bytes", [
{datatype, integer}
]}.
{mapping, "log.default.rotation.count", "rabbit.log.categories.default.max_no_files", [
{datatype, integer}
]}.
2016-01-22 23:47:01 +08:00
%%
%% Feature flags and deprecated features
%% =====================================
%%
{mapping,
Deprecated features: New module to manage deprecated features (!) This introduces a way to declare deprecated features in the code, not only in our communication. The new module allows to disallow the use of a deprecated feature and/or warn the user when he relies on such a feature. [Why] Currently, we only tell people about deprecated features through blog posts and the mailing-list. This might be insufficiant for our users that a feature they use will be removed in a future version: * They may not read our blog or mailing-list * They may not understand that they use such a deprecated feature * They might wait for the big removal before they plan testing * They might not take it seriously enough The idea behind this patch is to increase the chance that users notice that they are using something which is about to be dropped from RabbitMQ. Anopther benefit is that they should be able to test how RabbitMQ will behave in the future before the actual removal. This should allow them to test and plan changes. [How] When a feature is deprecated in other large projects (such as FreeBSD where I took the idea from), it goes through a lifecycle: 1. The feature is still available, but users get a warning somehow when they use it. They can disable it to test. 2. The feature is still available, but disabled out-of-the-box. Users can re-enable it (and get a warning). 3. The feature is disconnected from the build. Therefore, the code behind it is still there, but users have to recompile the thing to be able to use it. 4. The feature is removed from the source code. Users have to adapt or they can't upgrade anymore. The solution in this patch offers the same lifecycle. A deprecated feature will be in one of these deprecation phases: 1. `permitted_by_default`: The feature is available. Users get a warning if they use it. They can disable it from the configuration. 2. `denied_by_default`: The feature is available but disabled by default. Users get an error if they use it and RabbitMQ behaves like the feature is removed. They can re-enable is from the configuration and get a warning. 3. `disconnected`: The feature is present in the source code, but is disabled and can't be re-enabled without recompiling RabbitMQ. Users get the same behavior as if the code was removed. 4. `removed`: The feature's code is gone. The whole thing is based on the feature flags subsystem, but it has the following differences with other feature flags: * The semantic is reversed: the feature flag behind a deprecated feature is disabled when the deprecated feature is permitted, or enabled when the deprecated feature is denied. * The feature flag behind a deprecated feature is enabled out-of-the-box (meaning the deprecated feature is denied): * if the deprecation phase is `permitted_by_default` and the configuration denies the deprecated feature * if the deprecation phase is `denied_by_default` and the configuration doesn't permit the deprecated feature * if the deprecation phase is `disconnected` or `removed` * Feature flags behind deprecated feature don't appear in feature flags listings. Otherwise, deprecated features' feature flags are managed like other feature flags, in particular inside clusters. To declare a deprecated feature: -rabbit_deprecated_feature( {my_deprecated_feature, #{deprecation_phase => permitted_by_default, msgs => #{when_permitted => "This feature will be removed in RabbitMQ X.0"}, }}). Then, to check the state of a deprecated feature in the code: case rabbit_deprecated_features:is_permitted(my_deprecated_feature) of true -> %% The deprecated feature is still permitted. ok; false -> %% The deprecated feature is gone or should be considered %% unavailable. error end. Warnings and errors are logged automatically. A message is generated automatically, but it is possible to define a message in the deprecated feature flag declaration like in the example above. Here is an example of a logged warning that was generated automatically: Feature `my_deprecated_feature` is deprecated. By default, this feature can still be used for now. Its use will not be permitted by default in a future minor RabbitMQ version and the feature will be removed from a future major RabbitMQ version; actual versions to be determined. To continue using this feature when it is not permitted by default, set the following parameter in your configuration: "deprecated_features.permit.my_deprecated_feature = true" To test RabbitMQ as if the feature was removed, set this in your configuration: "deprecated_features.permit.my_deprecated_feature = false" To override the default state of `permitted_by_default` and `denied_by_default` deprecation phases, users can set the following configuration: # In rabbitmq.conf: deprecated_features.permit.my_deprecated_feature = true # or false The actual behavior protected by a deprecated feature check is out of scope for this subsystem. It is the repsonsibility of each deprecated feature code to determine what to do when the deprecated feature is denied. V1: Deprecated feature states are initially computed during the initialization of the registry, based on their deprecation phase and possibly the configuration. They don't go through the `enable/1` code at all. V2: Manage deprecated feature states as any other non-required feature flags. This allows to execute an `is_feature_used()` callback to determine if a deprecated feature can be denied. This also allows to prevent the RabbitMQ node from starting if it continues to use a deprecated feature. V3: Manage deprecated feature states from the registry initialization again. This is required because we need to know very early if some of them are denied, so that an upgrade to a version of RabbitMQ where a deprecated feature is disconnected or removed can be performed. To still prevent the start of a RabbitMQ node when a denied deprecated feature is actively used, we run the `is_feature_used()` callback of all denied deprecated features as part of the `sync_cluster()` task. This task is executed as part of a feature flag refresh executed when RabbitMQ starts or when plugins are enabled. So even though a deprecated feature is marked as denied in the registry early in the boot process, we will still abort the start of a RabbitMQ node if the feature is used. V4: Support context-dependent warnings. It is now possible to set a specific message when deprecated feature is permitted, when it is denied and when it is removed. Generic per-context messages are still generated. V5: Improve default warning messages, thanks to @pstack2021. V6: Rename the configuration variable from `permit_deprecated_features.*` to `deprecated_features.permit.*`. As @michaelklishin said, we tend to use shorter top-level names.
2023-02-23 00:26:52 +08:00
"deprecated_features.permit.$name", "rabbit.permit_deprecated_features",
[{datatype, {enum, [true, false]}}]
}.
%% This converts:
Deprecated features: New module to manage deprecated features (!) This introduces a way to declare deprecated features in the code, not only in our communication. The new module allows to disallow the use of a deprecated feature and/or warn the user when he relies on such a feature. [Why] Currently, we only tell people about deprecated features through blog posts and the mailing-list. This might be insufficiant for our users that a feature they use will be removed in a future version: * They may not read our blog or mailing-list * They may not understand that they use such a deprecated feature * They might wait for the big removal before they plan testing * They might not take it seriously enough The idea behind this patch is to increase the chance that users notice that they are using something which is about to be dropped from RabbitMQ. Anopther benefit is that they should be able to test how RabbitMQ will behave in the future before the actual removal. This should allow them to test and plan changes. [How] When a feature is deprecated in other large projects (such as FreeBSD where I took the idea from), it goes through a lifecycle: 1. The feature is still available, but users get a warning somehow when they use it. They can disable it to test. 2. The feature is still available, but disabled out-of-the-box. Users can re-enable it (and get a warning). 3. The feature is disconnected from the build. Therefore, the code behind it is still there, but users have to recompile the thing to be able to use it. 4. The feature is removed from the source code. Users have to adapt or they can't upgrade anymore. The solution in this patch offers the same lifecycle. A deprecated feature will be in one of these deprecation phases: 1. `permitted_by_default`: The feature is available. Users get a warning if they use it. They can disable it from the configuration. 2. `denied_by_default`: The feature is available but disabled by default. Users get an error if they use it and RabbitMQ behaves like the feature is removed. They can re-enable is from the configuration and get a warning. 3. `disconnected`: The feature is present in the source code, but is disabled and can't be re-enabled without recompiling RabbitMQ. Users get the same behavior as if the code was removed. 4. `removed`: The feature's code is gone. The whole thing is based on the feature flags subsystem, but it has the following differences with other feature flags: * The semantic is reversed: the feature flag behind a deprecated feature is disabled when the deprecated feature is permitted, or enabled when the deprecated feature is denied. * The feature flag behind a deprecated feature is enabled out-of-the-box (meaning the deprecated feature is denied): * if the deprecation phase is `permitted_by_default` and the configuration denies the deprecated feature * if the deprecation phase is `denied_by_default` and the configuration doesn't permit the deprecated feature * if the deprecation phase is `disconnected` or `removed` * Feature flags behind deprecated feature don't appear in feature flags listings. Otherwise, deprecated features' feature flags are managed like other feature flags, in particular inside clusters. To declare a deprecated feature: -rabbit_deprecated_feature( {my_deprecated_feature, #{deprecation_phase => permitted_by_default, msgs => #{when_permitted => "This feature will be removed in RabbitMQ X.0"}, }}). Then, to check the state of a deprecated feature in the code: case rabbit_deprecated_features:is_permitted(my_deprecated_feature) of true -> %% The deprecated feature is still permitted. ok; false -> %% The deprecated feature is gone or should be considered %% unavailable. error end. Warnings and errors are logged automatically. A message is generated automatically, but it is possible to define a message in the deprecated feature flag declaration like in the example above. Here is an example of a logged warning that was generated automatically: Feature `my_deprecated_feature` is deprecated. By default, this feature can still be used for now. Its use will not be permitted by default in a future minor RabbitMQ version and the feature will be removed from a future major RabbitMQ version; actual versions to be determined. To continue using this feature when it is not permitted by default, set the following parameter in your configuration: "deprecated_features.permit.my_deprecated_feature = true" To test RabbitMQ as if the feature was removed, set this in your configuration: "deprecated_features.permit.my_deprecated_feature = false" To override the default state of `permitted_by_default` and `denied_by_default` deprecation phases, users can set the following configuration: # In rabbitmq.conf: deprecated_features.permit.my_deprecated_feature = true # or false The actual behavior protected by a deprecated feature check is out of scope for this subsystem. It is the repsonsibility of each deprecated feature code to determine what to do when the deprecated feature is denied. V1: Deprecated feature states are initially computed during the initialization of the registry, based on their deprecation phase and possibly the configuration. They don't go through the `enable/1` code at all. V2: Manage deprecated feature states as any other non-required feature flags. This allows to execute an `is_feature_used()` callback to determine if a deprecated feature can be denied. This also allows to prevent the RabbitMQ node from starting if it continues to use a deprecated feature. V3: Manage deprecated feature states from the registry initialization again. This is required because we need to know very early if some of them are denied, so that an upgrade to a version of RabbitMQ where a deprecated feature is disconnected or removed can be performed. To still prevent the start of a RabbitMQ node when a denied deprecated feature is actively used, we run the `is_feature_used()` callback of all denied deprecated features as part of the `sync_cluster()` task. This task is executed as part of a feature flag refresh executed when RabbitMQ starts or when plugins are enabled. So even though a deprecated feature is marked as denied in the registry early in the boot process, we will still abort the start of a RabbitMQ node if the feature is used. V4: Support context-dependent warnings. It is now possible to set a specific message when deprecated feature is permitted, when it is denied and when it is removed. Generic per-context messages are still generated. V5: Improve default warning messages, thanks to @pstack2021. V6: Rename the configuration variable from `permit_deprecated_features.*` to `deprecated_features.permit.*`. As @michaelklishin said, we tend to use shorter top-level names.
2023-02-23 00:26:52 +08:00
%% deprecated_features.permit.my_feature = true
%% to:
Deprecated features: New module to manage deprecated features (!) This introduces a way to declare deprecated features in the code, not only in our communication. The new module allows to disallow the use of a deprecated feature and/or warn the user when he relies on such a feature. [Why] Currently, we only tell people about deprecated features through blog posts and the mailing-list. This might be insufficiant for our users that a feature they use will be removed in a future version: * They may not read our blog or mailing-list * They may not understand that they use such a deprecated feature * They might wait for the big removal before they plan testing * They might not take it seriously enough The idea behind this patch is to increase the chance that users notice that they are using something which is about to be dropped from RabbitMQ. Anopther benefit is that they should be able to test how RabbitMQ will behave in the future before the actual removal. This should allow them to test and plan changes. [How] When a feature is deprecated in other large projects (such as FreeBSD where I took the idea from), it goes through a lifecycle: 1. The feature is still available, but users get a warning somehow when they use it. They can disable it to test. 2. The feature is still available, but disabled out-of-the-box. Users can re-enable it (and get a warning). 3. The feature is disconnected from the build. Therefore, the code behind it is still there, but users have to recompile the thing to be able to use it. 4. The feature is removed from the source code. Users have to adapt or they can't upgrade anymore. The solution in this patch offers the same lifecycle. A deprecated feature will be in one of these deprecation phases: 1. `permitted_by_default`: The feature is available. Users get a warning if they use it. They can disable it from the configuration. 2. `denied_by_default`: The feature is available but disabled by default. Users get an error if they use it and RabbitMQ behaves like the feature is removed. They can re-enable is from the configuration and get a warning. 3. `disconnected`: The feature is present in the source code, but is disabled and can't be re-enabled without recompiling RabbitMQ. Users get the same behavior as if the code was removed. 4. `removed`: The feature's code is gone. The whole thing is based on the feature flags subsystem, but it has the following differences with other feature flags: * The semantic is reversed: the feature flag behind a deprecated feature is disabled when the deprecated feature is permitted, or enabled when the deprecated feature is denied. * The feature flag behind a deprecated feature is enabled out-of-the-box (meaning the deprecated feature is denied): * if the deprecation phase is `permitted_by_default` and the configuration denies the deprecated feature * if the deprecation phase is `denied_by_default` and the configuration doesn't permit the deprecated feature * if the deprecation phase is `disconnected` or `removed` * Feature flags behind deprecated feature don't appear in feature flags listings. Otherwise, deprecated features' feature flags are managed like other feature flags, in particular inside clusters. To declare a deprecated feature: -rabbit_deprecated_feature( {my_deprecated_feature, #{deprecation_phase => permitted_by_default, msgs => #{when_permitted => "This feature will be removed in RabbitMQ X.0"}, }}). Then, to check the state of a deprecated feature in the code: case rabbit_deprecated_features:is_permitted(my_deprecated_feature) of true -> %% The deprecated feature is still permitted. ok; false -> %% The deprecated feature is gone or should be considered %% unavailable. error end. Warnings and errors are logged automatically. A message is generated automatically, but it is possible to define a message in the deprecated feature flag declaration like in the example above. Here is an example of a logged warning that was generated automatically: Feature `my_deprecated_feature` is deprecated. By default, this feature can still be used for now. Its use will not be permitted by default in a future minor RabbitMQ version and the feature will be removed from a future major RabbitMQ version; actual versions to be determined. To continue using this feature when it is not permitted by default, set the following parameter in your configuration: "deprecated_features.permit.my_deprecated_feature = true" To test RabbitMQ as if the feature was removed, set this in your configuration: "deprecated_features.permit.my_deprecated_feature = false" To override the default state of `permitted_by_default` and `denied_by_default` deprecation phases, users can set the following configuration: # In rabbitmq.conf: deprecated_features.permit.my_deprecated_feature = true # or false The actual behavior protected by a deprecated feature check is out of scope for this subsystem. It is the repsonsibility of each deprecated feature code to determine what to do when the deprecated feature is denied. V1: Deprecated feature states are initially computed during the initialization of the registry, based on their deprecation phase and possibly the configuration. They don't go through the `enable/1` code at all. V2: Manage deprecated feature states as any other non-required feature flags. This allows to execute an `is_feature_used()` callback to determine if a deprecated feature can be denied. This also allows to prevent the RabbitMQ node from starting if it continues to use a deprecated feature. V3: Manage deprecated feature states from the registry initialization again. This is required because we need to know very early if some of them are denied, so that an upgrade to a version of RabbitMQ where a deprecated feature is disconnected or removed can be performed. To still prevent the start of a RabbitMQ node when a denied deprecated feature is actively used, we run the `is_feature_used()` callback of all denied deprecated features as part of the `sync_cluster()` task. This task is executed as part of a feature flag refresh executed when RabbitMQ starts or when plugins are enabled. So even though a deprecated feature is marked as denied in the registry early in the boot process, we will still abort the start of a RabbitMQ node if the feature is used. V4: Support context-dependent warnings. It is now possible to set a specific message when deprecated feature is permitted, when it is denied and when it is removed. Generic per-context messages are still generated. V5: Improve default warning messages, thanks to @pstack2021. V6: Rename the configuration variable from `permit_deprecated_features.*` to `deprecated_features.permit.*`. As @michaelklishin said, we tend to use shorter top-level names.
2023-02-23 00:26:52 +08:00
%% {rabbit, [{permit_deprecated_features, #{my_feature => true}}]}.
{translation, "rabbit.permit_deprecated_features",
fun(Conf) ->
Settings = cuttlefish_variable:filter_by_prefix(
"deprecated_features.permit", Conf),
maps:from_list(
[{list_to_atom(FeatureName), State}
2023-03-25 00:22:10 +08:00
|| {["deprecated_features", "permit", FeatureName], State}
<- Settings])
end}.
% ==========================
% Kernel section
% ==========================
{mapping, "net_ticktime", "kernel.net_ticktime",[
2018-02-22 23:10:26 +08:00
{datatype, [integer]},
{validators, ["non_zero_positive_integer"]}
]}.
{mapping, "distribution.listener.port_range.min", "kernel.inet_dist_listen_min", [
{datatype, [integer]},
{validators, ["non_zero_positive_integer"]}
]}.
{mapping, "distribution.listener.port_range.max", "kernel.inet_dist_listen_max", [
{datatype, [integer]},
{validators, ["non_zero_positive_integer"]}
]}.
{mapping, "distribution.listener.interface", "kernel.inet_dist_use_interface", [
{datatype, [string]},
{validators, ["is_ip"]}
]}.
{translation, "kernel.inet_dist_use_interface",
fun(Conf) ->
case cuttlefish:conf_get("distribution.listener.interface", Conf, undefined) of
undefined ->
cuttlefish:unset();
Value when is_list(Value) ->
case inet:parse_address(Value) of
{ok, Parsed} -> Parsed;
{error, _} -> cuttlefish:invalid("should be a valid IP address")
end;
_ ->
cuttlefish:invalid("should be a valid IP address")
end
end
}.
% ==========================
% sysmon_handler section
% ==========================
%% @doc The threshold at which to warn about the number of processes
%% that are overly busy. Processes with large heaps or that take a
%% long time to garbage collect will count toward this threshold.
{mapping, "sysmon_handler.thresholds.busy_processes", "sysmon_handler.process_limit", [
{datatype, integer},
hidden
]}.
{translation, "sysmon_handler.process_limit",
fun(Conf) ->
case cuttlefish:conf_get("sysmon_handler.thresholds.busy_processes", Conf, undefined) of
undefined ->
cuttlefish:unset();
Int when is_integer(Int) ->
Int;
_ ->
cuttlefish:invalid("should be a non-negative integer")
end
end
}.
%% @doc The threshold at which to warn about the number of ports that
%% are overly busy. Ports with full input buffers count toward this
%% threshold.
{mapping, "sysmon_handler.thresholds.busy_ports", "sysmon_handler.port_limit", [
{datatype, integer},
hidden
]}.
{translation, "sysmon_handler.port_limit",
fun(Conf) ->
case cuttlefish:conf_get("sysmon_handler.thresholds.busy_ports", Conf, undefined) of
undefined ->
cuttlefish:unset();
Int when is_integer(Int) ->
Int;
_ ->
cuttlefish:invalid("should be a non-negative integer")
end
end
}.
%% @doc A process will become busy when it exceeds this amount of time
%% doing garbage collection.
%% @see sysmon_handler.thresholds.busy_processes
{mapping, "sysmon_handler.triggers.process.garbage_collection", "sysmon_handler.gc_ms_limit", [
{datatype, [{atom, off},
{duration, ms}]},
hidden
]}.
{translation, "sysmon_handler.gc_ms_limit",
fun(Conf) ->
case cuttlefish:conf_get("sysmon_handler.triggers.process.garbage_collection", Conf, undefined) of
undefined ->
cuttlefish:unset();
off ->
0;
Int when is_integer(Int) ->
Int;
_ ->
cuttlefish:invalid("should be a non-negative integer")
end
end
}.
%% @doc A process will become busy when it exceeds this amount of time
%% during a single process scheduling & execution cycle.
{mapping, "sysmon_handler.triggers.process.long_scheduled_execution", "sysmon_handler.schedule_ms_limit", [
{datatype, [{atom, off},
{duration, ms}]},
hidden
]}.
{translation, "sysmon_handler.schedule_ms_limit",
fun(Conf) ->
case cuttlefish:conf_get("sysmon_handler.triggers.process.long_scheduled_execution", Conf, undefined) of
undefined ->
cuttlefish:unset();
off ->
0;
Int when is_integer(Int) ->
Int;
_ ->
cuttlefish:invalid("should be a non-negative integer")
end
end
}.
%% @doc A process will become busy when its heap exceeds this size.
%% @see sysmon_handler.thresholds.busy_processes
{mapping, "sysmon_handler.triggers.process.heap_size", "sysmon_handler.heap_word_limit", [
{datatype, [{atom, off},
bytesize]},
hidden
]}.
{translation, "sysmon_handler.heap_word_limit",
fun(Conf) ->
case cuttlefish:conf_get("sysmon_handler.triggers.process.heap_size", Conf, undefined) of
undefined ->
cuttlefish:unset();
off ->
0;
Bytes when is_integer(Bytes) ->
WordSize = erlang:system_info(wordsize),
Bytes div WordSize;
_ ->
cuttlefish:invalid("should be a non-negative integer")
end
end
}.
%% @doc Whether ports with full input buffers will be counted as
%% busy. Ports can represent open files or network sockets.
%% @see sysmon_handler.thresholds.busy_ports
{mapping, "sysmon_handler.triggers.port", "sysmon_handler.busy_port", [
{datatype, flag},
hidden
]}.
{translation, "sysmon_handler.busy_port",
fun(Conf) ->
case cuttlefish:conf_get("sysmon_handler.triggers.port", Conf, undefined) of
undefined ->
cuttlefish:unset();
Val -> Val
end
end
}.
%% @doc Whether distribution ports with full input buffers will be
%% counted as busy. Distribution ports connect Erlang nodes within a
%% single cluster.
%% @see sysmon_handler.thresholds.busy_ports
{mapping, "sysmon_handler.triggers.distribution_port", "sysmon_handler.busy_dist_port", [
{datatype, flag},
hidden
]}.
{translation, "sysmon_handler.busy_dist_port",
fun(Conf) ->
case cuttlefish:conf_get("sysmon_handler.triggers.distribution_port", Conf, undefined) of
undefined ->
cuttlefish:unset();
Val -> Val
end
end
}.
%%
%% Ra
%%
{mapping, "raft.segment_max_entries", "ra.segment_max_entries", [
{datatype, integer},
{validators, ["non_zero_positive_integer", "positive_16_bit_unsigned_integer"]}
]}.
{translation, "ra.segment_max_entries",
fun(Conf) ->
case cuttlefish:conf_get("raft.segment_max_entries", Conf, undefined) of
undefined -> cuttlefish:unset();
Val -> Val
end
end
}.
{mapping, "raft.wal_max_size_bytes", "ra.wal_max_size_bytes", [
{datatype, integer},
{validators, ["non_zero_positive_integer"]}
]}.
{translation, "ra.wal_max_size_bytes",
fun(Conf) ->
case cuttlefish:conf_get("raft.wal_max_size_bytes", Conf, undefined) of
undefined -> cuttlefish:unset();
Val -> Val
end
end
}.
{mapping, "raft.wal_max_entries", "ra.wal_max_entries", [
{datatype, integer},
{validators, ["non_zero_positive_integer"]}
]}.
{translation, "ra.wal_max_entries",
fun(Conf) ->
case cuttlefish:conf_get("raft.wal_max_entries", Conf, undefined) of
undefined -> cuttlefish:unset();
Val -> Val
end
end
}.
{mapping, "raft.wal_hibernate_after", "ra.wal_hibernate_after", [
{datatype, integer},
{validators, ["non_zero_positive_integer"]}
]}.
{translation, "ra.wal_hibernate_after",
fun(Conf) ->
case cuttlefish:conf_get("raft.wal_hibernate_after", Conf, undefined) of
undefined -> cuttlefish:unset();
Val -> Val
end
end
}.
{mapping, "raft.wal_max_batch_size", "ra.wal_max_batch_size", [
{datatype, integer},
{validators, ["non_zero_positive_integer"]}
]}.
{translation, "ra.wal_max_batch_size",
fun(Conf) ->
case cuttlefish:conf_get("raft.wal_max_batch_size", Conf, undefined) of
undefined -> cuttlefish:unset();
Val -> Val
end
end
}.
{mapping, "raft.snapshot_chunk_size", "ra.snapshot_chunk_size", [
{datatype, integer},
{validators, ["non_zero_positive_integer"]}
]}.
{translation, "ra.snapshot_chunk_size",
fun(Conf) ->
case cuttlefish:conf_get("raft.snapshot_chunk_size", Conf, undefined) of
undefined -> cuttlefish:unset();
Val -> Val
end
end
}.
{mapping, "raft.data_dir", "ra.data_dir", [
{datatype, string}
]}.
{translation, "ra.data_dir",
fun(Conf) ->
case cuttlefish:conf_get("raft.data_dir", Conf, undefined) of
undefined -> cuttlefish:unset();
Val -> Val
end
end
}.
{mapping, "raft.adaptive_failure_detector.poll_interval", "aten.poll_interval", [
{datatype, integer},
{validators, ["non_zero_positive_integer"]}
]}.
{translation, "aten.poll_interval",
fun(Conf) ->
case cuttlefish:conf_get("raft.adaptive_failure_detector.poll_interval", Conf, undefined) of
undefined -> cuttlefish:unset();
Val -> Val
end
end
}.
2024-05-04 02:41:59 +08:00
{mapping, "default_queue_type", "rabbit.default_queue_type", [
2024-05-04 03:18:21 +08:00
{datatype, atom}
]}.
{translation, "rabbit.default_queue_type",
fun(Conf) ->
case cuttlefish:conf_get("default_queue_type", Conf, rabbit_classic_queue) of
classic -> rabbit_classic_queue;
quorum -> rabbit_quorum_queue;
stream -> rabbit_stream_queue;
Module -> Module
end
end}.
%% Enable or disable local random exchange
{mapping, "exchange_types.local_random.enabled", "rabbit.local_random_exchange_enabled", [
{datatype, {enum, [true, false]}}
]}.
2024-05-04 02:41:59 +08:00
%%
%% Backing queue version
%%
2024-08-28 06:04:35 +08:00
%% DEPRECATED. Not used since RabbitMQ 4.0
{mapping, "classic_queue.default_version", "rabbit.classic_queue_default_version", [
{datatype, integer},
{validators, ["non_zero_positive_integer"]}
]}.
{translation, "rabbit.classic_queue_default_version",
fun(Conf) ->
case cuttlefish:conf_get("classic_queue.default_version", Conf, 2) of
1 -> cuttlefish:invalid("Classic queues v1 are no longer supported");
2 -> 2;
_ -> cuttlefish:unset()
end
end
}.
{mapping, "quorum_queue.compute_checksums", "rabbit.quorum_compute_checksums", [
{datatype, {enum, [true, false]}}]}.
{mapping, "quorum_queue.property_equivalence.relaxed_checks_on_redeclaration", "rabbit.quorum_relaxed_checks_on_redeclaration", [
{datatype, {enum, [true, false]}}]}.
2023-04-18 01:19:33 +08:00
2025-04-14 20:38:03 +08:00
{mapping, "quorum_queue.initial_cluster_size", "rabbit.quorum_cluster_size", [
2025-04-14 19:22:57 +08:00
{datatype, integer},
{validators, ["non_zero_positive_integer"]}
]}.
{mapping, "quorum_queue.commands_soft_limit", "rabbit.quorum_commands_soft_limit", [
{datatype, integer},
{validators, ["non_zero_positive_integer"]}
]}.
%%
%% Quorum Queue membership reconciliation
%%
{mapping, "quorum_queue.continuous_membership_reconciliation.enabled", "rabbit.quorum_membership_reconciliation_enabled", [
{datatype, {enum, [true, false]}}]}.
{mapping, "quorum_queue.continuous_membership_reconciliation.auto_remove", "rabbit.quorum_membership_reconciliation_auto_remove", [
{datatype, {enum, [true, false]}}]}.
{mapping, "quorum_queue.continuous_membership_reconciliation.interval", "rabbit.quorum_membership_reconciliation_interval", [
{datatype, integer}, {validators, ["non_negative_integer"]}
]}.
{mapping, "quorum_queue.continuous_membership_reconciliation.trigger_interval", "rabbit.quorum_membership_reconciliation_trigger_interval", [
{datatype, integer}, {validators, ["non_negative_integer"]}
]}.
{mapping, "quorum_queue.continuous_membership_reconciliation.target_group_size", "rabbit.quorum_membership_reconciliation_target_group_size", [
{datatype, integer}, {validators, ["non_negative_integer"]}
]}.
2023-04-18 01:19:33 +08:00
%%
%% Runtime parameters
%%
{mapping, "runtime_parameters.limits.$category", "rabbit.runtime_parameters.limits", [
{datatype, integer},
{validators, ["non_negative_integer"]}
]}.
{translation, "rabbit.runtime_parameters.limits",
fun(Conf) ->
case cuttlefish_variable:filter_by_prefix("runtime_parameters.limits", Conf) of
[] -> cuttlefish:unset();
Ss -> [ {list_to_binary(Category), Limit} || {[_, _, Category], Limit} <- Ss ]
end
end
}.
Move plugin rabbitmq-message-timestamp to the core As reported in https://groups.google.com/g/rabbitmq-users/c/x8ACs4dBlkI/ plugins that implement rabbit_channel_interceptor break with Native MQTT in 3.12 because Native MQTT does not use rabbit_channel anymore. Specifically, these plugins don't work anymore in 3.12 when sending a message from an MQTT publisher to an AMQP 0.9.1 consumer. Two of these plugins are https://github.com/rabbitmq/rabbitmq-message-timestamp and https://github.com/rabbitmq/rabbitmq-routing-node-stamp This commit moves both plugins into rabbitmq-server. Therefore, these plugins are deprecated starting in 3.12. Instead of using these plugins, the user gets the same behaviour by configuring rabbitmq.conf as follows: ``` incoming_message_interceptors.set_header_timestamp.overwrite = false incoming_message_interceptors.set_header_routing_node.overwrite = false ``` While both plugins were incompatible to be used together, this commit allows setting both headers. We name the top level configuration key `incoming_message_interceptors` because only incoming messages are intercepted. Currently, only `set_header_timestamp` and `set_header_routing_node` are supported. (We might support more in the future.) Both can set `overwrite` to `false` or `true`. The meaning of `overwrite` is the same as documented in https://github.com/rabbitmq/rabbitmq-message-timestamp#always-overwrite-timestamps i.e. whether headers should be overwritten if they are already present in the message. Both `set_header_timestamp` and `set_header_routing_node` behave exactly to plugins `rabbitmq-message-timestamp` and `rabbitmq-routing-node-stamp`, respectively. Upon node boot, the configuration is put into persistent_term to not cause any performance penalty in the default case where these settings are disabled. The channel and MQTT connection process will intercept incoming messages and - if configured - add the desired AMQP 0.9.1 headers. For now, this allows using Native MQTT in 3.12 with the old plugins behaviour. In the future, once "message containers" are implemented, we can think about more generic message interceptors where plugins can be written to modify arbitrary headers or message contents for various protocols. Likewise, in the future, once MQTT 5.0 is implemented, we can think about an MQTT connection interceptor which could function similar to a `rabbit_channel_interceptor` allowing to modify any MQTT packet.
2023-05-12 22:12:50 +08:00
%%
%% Message interceptors
%%
2025-04-17 16:15:32 +08:00
{mapping, "message_interceptors.$stage.$name.$key", "rabbit.message_interceptors", [
Move plugin rabbitmq-message-timestamp to the core As reported in https://groups.google.com/g/rabbitmq-users/c/x8ACs4dBlkI/ plugins that implement rabbit_channel_interceptor break with Native MQTT in 3.12 because Native MQTT does not use rabbit_channel anymore. Specifically, these plugins don't work anymore in 3.12 when sending a message from an MQTT publisher to an AMQP 0.9.1 consumer. Two of these plugins are https://github.com/rabbitmq/rabbitmq-message-timestamp and https://github.com/rabbitmq/rabbitmq-routing-node-stamp This commit moves both plugins into rabbitmq-server. Therefore, these plugins are deprecated starting in 3.12. Instead of using these plugins, the user gets the same behaviour by configuring rabbitmq.conf as follows: ``` incoming_message_interceptors.set_header_timestamp.overwrite = false incoming_message_interceptors.set_header_routing_node.overwrite = false ``` While both plugins were incompatible to be used together, this commit allows setting both headers. We name the top level configuration key `incoming_message_interceptors` because only incoming messages are intercepted. Currently, only `set_header_timestamp` and `set_header_routing_node` are supported. (We might support more in the future.) Both can set `overwrite` to `false` or `true`. The meaning of `overwrite` is the same as documented in https://github.com/rabbitmq/rabbitmq-message-timestamp#always-overwrite-timestamps i.e. whether headers should be overwritten if they are already present in the message. Both `set_header_timestamp` and `set_header_routing_node` behave exactly to plugins `rabbitmq-message-timestamp` and `rabbitmq-routing-node-stamp`, respectively. Upon node boot, the configuration is put into persistent_term to not cause any performance penalty in the default case where these settings are disabled. The channel and MQTT connection process will intercept incoming messages and - if configured - add the desired AMQP 0.9.1 headers. For now, this allows using Native MQTT in 3.12 with the old plugins behaviour. In the future, once "message containers" are implemented, we can think about more generic message interceptors where plugins can be written to modify arbitrary headers or message contents for various protocols. Likewise, in the future, once MQTT 5.0 is implemented, we can think about an MQTT connection interceptor which could function similar to a `rabbit_channel_interceptor` allowing to modify any MQTT packet.
2023-05-12 22:12:50 +08:00
{datatype, {enum, [true, false]}}]}.
2025-04-17 16:15:32 +08:00
{translation, "rabbit.message_interceptors",
fun(Conf) ->
case cuttlefish_variable:filter_by_prefix("message_interceptors", Conf) of
[] ->
cuttlefish:unset();
L ->
2025-04-17 16:15:32 +08:00
lists:foldr(
fun({["message_interceptors", "incoming", "set_header_routing_node", "overwrite"], Overwrite}, Acc)
when is_boolean(Overwrite) ->
Mod = rabbit_msg_interceptor_routing_node,
Cfg = #{overwrite => Overwrite},
[{Mod, Cfg} | Acc];
({["message_interceptors", "incoming", "set_header_timestamp", "overwrite"], Overwrite}, Acc)
when is_boolean(Overwrite) ->
Mod = rabbit_msg_interceptor_timestamp,
Cfg = #{incoming => true,
overwrite => Overwrite},
case lists:keytake(Mod, 1, Acc) of
false ->
[{Mod, Cfg} | Acc];
{value, {Mod, Cfg1}, Acc1} ->
Cfg2 = maps:merge(Cfg1, Cfg),
[{Mod, Cfg2} | Acc1]
end;
({["message_interceptors", "outgoing", "timestamp", "enabled"], Enabled}, Acc) ->
case Enabled of
true ->
Mod = rabbit_msg_interceptor_timestamp,
Cfg = #{outgoing => true},
case lists:keytake(Mod, 1, Acc) of
false ->
[{Mod, Cfg} | Acc];
{value, {Mod, Cfg1}, Acc1} ->
Cfg2 = maps:merge(Cfg1, Cfg),
[{Mod, Cfg2} | Acc1]
end;
false ->
Acc
end;
(Other, _Acc) ->
cuttlefish:invalid(io_lib:format("~p is invalid", [Other]))
end, [], L)
end
end
Move plugin rabbitmq-message-timestamp to the core As reported in https://groups.google.com/g/rabbitmq-users/c/x8ACs4dBlkI/ plugins that implement rabbit_channel_interceptor break with Native MQTT in 3.12 because Native MQTT does not use rabbit_channel anymore. Specifically, these plugins don't work anymore in 3.12 when sending a message from an MQTT publisher to an AMQP 0.9.1 consumer. Two of these plugins are https://github.com/rabbitmq/rabbitmq-message-timestamp and https://github.com/rabbitmq/rabbitmq-routing-node-stamp This commit moves both plugins into rabbitmq-server. Therefore, these plugins are deprecated starting in 3.12. Instead of using these plugins, the user gets the same behaviour by configuring rabbitmq.conf as follows: ``` incoming_message_interceptors.set_header_timestamp.overwrite = false incoming_message_interceptors.set_header_routing_node.overwrite = false ``` While both plugins were incompatible to be used together, this commit allows setting both headers. We name the top level configuration key `incoming_message_interceptors` because only incoming messages are intercepted. Currently, only `set_header_timestamp` and `set_header_routing_node` are supported. (We might support more in the future.) Both can set `overwrite` to `false` or `true`. The meaning of `overwrite` is the same as documented in https://github.com/rabbitmq/rabbitmq-message-timestamp#always-overwrite-timestamps i.e. whether headers should be overwritten if they are already present in the message. Both `set_header_timestamp` and `set_header_routing_node` behave exactly to plugins `rabbitmq-message-timestamp` and `rabbitmq-routing-node-stamp`, respectively. Upon node boot, the configuration is put into persistent_term to not cause any performance penalty in the default case where these settings are disabled. The channel and MQTT connection process will intercept incoming messages and - if configured - add the desired AMQP 0.9.1 headers. For now, this allows using Native MQTT in 3.12 with the old plugins behaviour. In the future, once "message containers" are implemented, we can think about more generic message interceptors where plugins can be written to modify arbitrary headers or message contents for various protocols. Likewise, in the future, once MQTT 5.0 is implemented, we can think about an MQTT connection interceptor which could function similar to a `rabbit_channel_interceptor` allowing to modify any MQTT packet.
2023-05-12 22:12:50 +08:00
}.
{mapping, "stream.replication.port_range.min", "osiris.port_range", [
{datatype, [integer]},
{validators, ["non_zero_positive_integer"]}
]}.
{mapping, "stream.replication.port_range.max", "osiris.port_range", [
{datatype, [integer]},
{validators, ["non_zero_positive_integer"]}
]}.
{translation, "osiris.port_range",
fun(Conf) ->
Min = cuttlefish:conf_get("stream.replication.port_range.min", Conf, undefined),
Max = cuttlefish:conf_get("stream.replication.port_range.max", Conf, undefined),
case {Min, Max} of
{undefined, undefined} ->
cuttlefish:unset();
{Mn, undefined} ->
{Mn, Mn + 500};
{undefined, Mx} ->
{Mx - 500, Mx};
_ ->
{Min, Max}
end
end}.
{mapping, "cluster_tags.$tag", "rabbit.cluster_tags", [
{datatype, [binary]}
]}.
{translation, "rabbit.cluster_tags",
fun(Conf) ->
case cuttlefish:conf_get("cluster_tags", Conf, undefined) of
none -> [];
_ ->
Settings = cuttlefish_variable:filter_by_prefix("cluster_tags", Conf),
[ {list_to_binary(K), V} || {[_, K], V} <- Settings]
end
end}.
{mapping, "node_tags.$tag", "rabbit.node_tags", [
{datatype, [binary]}
]}.
{translation, "rabbit.node_tags",
fun(Conf) ->
case cuttlefish:conf_get("node_tags", Conf, undefined) of
none -> [];
_ ->
Settings = cuttlefish_variable:filter_by_prefix("node_tags", Conf),
[ {list_to_binary(K), V} || {[_, K], V} <- Settings]
end
end}.
2016-02-01 19:43:05 +08:00
% ===============================
% Validators
% ===============================
{validator, "mirroring_sync_batch_size", "Batch size should be greater than 0 and less than 1M",
2016-01-22 23:47:01 +08:00
fun(Size) when is_integer(Size) ->
Size > 0 andalso Size =< 1000000
2016-01-22 23:47:01 +08:00
end}.
{validator, "max_message_size", "Max message size should be between 0 and 512MB",
fun(Size) when is_integer(Size) ->
Size > 0 andalso Size =< 536870912
end}.
2019-02-13 01:43:44 +08:00
{validator, "less_than_1", "Float is not between 0 and 1",
2016-01-22 23:47:01 +08:00
fun(Float) when is_float(Float) ->
Float > 0 andalso Float < 1
end}.
{validator, "port", "Invalid port number",
fun(Port) when is_integer(Port) ->
Port > 0 andalso Port < 65535
end}.
2020-11-05 03:15:03 +08:00
{validator, "byte", "Integer must be in the range [0, 255]",
2016-01-22 23:47:01 +08:00
fun(Int) when is_integer(Int) ->
2020-11-04 21:45:08 +08:00
Int >= 0 andalso Int =< 255
2016-01-22 23:47:01 +08:00
end}.
2021-07-20 13:34:40 +08:00
{validator, "dir_writable", "Directory must be writable",
2016-01-22 23:47:01 +08:00
fun(Dir) ->
TestFile = filename:join(Dir, "test_file"),
file:delete(TestFile),
Res = ok == file:write_file(TestFile, <<"test">>),
file:delete(TestFile),
Res
end}.
2021-02-01 20:37:31 +08:00
{validator, "file_accessible", "file does not exist or cannot be read by the node",
2016-01-22 23:47:01 +08:00
fun(File) ->
case file:read_file_info(File) of
{ok, FileInfo} -> (element(4, FileInfo) == read) or (element(4, FileInfo) == read_write);
_ -> false
end
2016-01-22 23:47:01 +08:00
end}.
{validator, "is_ip", "value should be a valid IP address",
fun(IpStr) ->
Res = inet:parse_address(IpStr),
element(1, Res) == ok
end}.
2016-08-11 01:13:12 +08:00
{validator, "non_negative_integer", "number should be greater or equal to zero",
2016-08-11 01:00:37 +08:00
fun(Int) when is_integer(Int) ->
Int >= 0
end}.
{validator, "non_zero_positive_integer", "number should be greater or equal to one",
fun(Int) when is_integer(Int) ->
Int >= 1
end}.
{validator, "positive_16_bit_unsigned_integer", "number should be between 1 and 65535",
fun(Int) when is_integer(Int) ->
(Int >= 1) and (Int =< 16#ff_ff)
end}.
{validator, "positive_32_bit_unsigned_integer", "number should be between 1 and 4294967295",
fun(Int) when is_integer(Int) ->
(Int >= 1) and (Int =< 16#ff_ff_ff_ff)
end}.
2023-10-19 23:27:14 +08:00
{validator, "valid_regex", "string must be a valid regular expression",
fun("") -> false;
(String) -> {Res, _ } = re:compile(String),
Res =:= ok
end}.
2024-01-16 11:11:57 +08:00
{validator, "is_supported_information_unit", "supported formats: 500MB, 500MiB, 10GB, 10GiB, 2TB, 2TiB, 10000000000",
fun(S0) ->
case is_integer(S0) of
true -> true;
false ->
%% this is a string
S = string:strip(S0, right),
2024-01-18 12:00:07 +08:00
%% The suffix is optional
2024-01-16 11:11:57 +08:00
{ok, HasIUSuffix} = re:compile("([0-9]+)([a-zA-Z]){1,3}$", [dollar_endonly, caseless]),
%% Here are the prefixes we accept. This must match
%% what rabbit_resource_monitor_misc and 'rabbitmq-diagnostics status' can format.
{ok, SuffixExtractor} = re:compile("(k|ki|kb|kib|m|mi|mb|mib|g|gi|gb|gib|t|ti|tb|tib|p|pi|pb|pib)$", [dollar_endonly, caseless]),
case re:run(S, HasIUSuffix) of
nomatch -> false;
{match, _} ->
case re:split(S, SuffixExtractor) of
[] -> false;
[_CompleteMatch] -> false;
[_CompleteMatch, Suffix | _] -> true
end
end
end
2024-01-18 12:00:07 +08:00
end}.