Add ability to customize product name, version & banner
To override the product name (defaulting to "RabbitMQ"):
* set the `$RABBITMQ_PRODUCT_NAME` environment variable, or
* set the `rabbit` application `product_name` variable.
To override the product version:
* set the `$RABBITMQ_PRODUCT_VERSION` environment variable, or
* set the `rabbit` application `product_version` variable.
To add content to the banner (both the copy logged and the one printed
to stdout), indicate the filename which contains it, à la `/etc/motd`
using:
* the `$RABBITMQ_MOTD_FILE` environment variable, or
* the `rabbit` application `motd_file` variable.
The default motd file is `/etc/rabbitmq/motd` on Unix and
`%APPDATA%\RabbitMQ\motd.txt` on Windows.
Here is an example of the printed banner with name, version & motd
configured:
## ## WeatherMQ 1.2.3
## ##
########## Copyright (c) 2007-2020 Pivotal Software, Inc.
###### ##
########## Licensed under the MPL 1.1. Website: https://rabbitmq.com
This is an example of a RabbitMQ message of the day.
The message is written in Paris, France. \ /
It is partly cloudy outside, with a _ /"".-.
temperature of 12°C. Wind is around \_( ).
30-40 km/h, from south-west. /(___(__)
Doc guides: https://rabbitmq.com/documentation.html
Support: https://rabbitmq.com/contact.html
Tutorials: https://rabbitmq.com/getstarted.html
Monitoring: https://rabbitmq.com/monitoring.html
Logs: /tmp/rabbitmq-test-instances/rabbit/log/rabbit@cassini.log
/tmp/rabbitmq-test-instances/rabbit/log/rabbit@cassini_upgrade.log
Config file(s): /tmp/rabbitmq-test-instances/test.config
Starting broker... completed with 0 plugins.
New APIS are available to query those product informations and use them
in e.g. plugins such as the management API/UI:
* rabbit:product_info/0
* rabbit:product_name/0
* rabbit:product_version/0
* rabbit:motd_file/0
* rabbit:motd/0
[#170054940]
2020-01-13 18:24:01 +08:00
|
|
|
% vim:ft=erlang:
|
2016-02-01 19:43:05 +08:00
|
|
|
% ==============================
|
|
|
|
% Rabbit app section
|
|
|
|
% ==============================
|
|
|
|
|
2016-01-22 23:47:01 +08:00
|
|
|
%%
|
2016-02-01 19:43:05 +08:00
|
|
|
%% Network Connectivity
|
|
|
|
%% ====================
|
|
|
|
%%
|
|
|
|
|
|
|
|
%% By default, RabbitMQ will listen on all interfaces, using
|
|
|
|
%% the standard (reserved) AMQP port.
|
|
|
|
%%
|
|
|
|
%% {tcp_listeners, [5672]},
|
|
|
|
%% To listen on a specific interface, provide a tuple of {IpAddress, Port}.
|
|
|
|
%% For example, to listen only on localhost for both IPv4 and IPv6:
|
|
|
|
%%
|
|
|
|
%% {tcp_listeners, [{"127.0.0.1", 5672},
|
2016-03-22 19:58:10 +08:00
|
|
|
%% {"[::1]", 5672}]},
|
2016-01-22 23:47:01 +08:00
|
|
|
|
2016-03-01 01:25:19 +08:00
|
|
|
{mapping, "listeners.tcp", "rabbit.tcp_listeners",[
|
|
|
|
{datatype, {enum, [none]}}
|
|
|
|
]}.
|
|
|
|
|
2016-02-17 01:17:53 +08:00
|
|
|
{mapping, "listeners.tcp.$name", "rabbit.tcp_listeners",[
|
2016-02-01 22:27:56 +08:00
|
|
|
{datatype, [integer, ip]}
|
2016-01-22 23:47:01 +08:00
|
|
|
]}.
|
|
|
|
|
|
|
|
{translation, "rabbit.tcp_listeners",
|
|
|
|
fun(Conf) ->
|
2016-03-01 01:25:19 +08:00
|
|
|
case cuttlefish:conf_get("listeners.tcp", Conf, undefined) of
|
|
|
|
none -> [];
|
|
|
|
_ ->
|
|
|
|
Settings = cuttlefish_variable:filter_by_prefix("listeners.tcp", Conf),
|
|
|
|
[ V || {_, V} <- Settings ]
|
|
|
|
end
|
2016-01-22 23:47:01 +08:00
|
|
|
end}.
|
|
|
|
|
2018-10-23 21:38:08 +08:00
|
|
|
%% TLS listeners are configured in the same fashion as TCP listeners,
|
2016-02-01 19:43:05 +08:00
|
|
|
%% including the option to control the choice of interface.
|
|
|
|
%%
|
|
|
|
%% {ssl_listeners, [5671]},
|
2016-01-22 23:47:01 +08:00
|
|
|
|
2016-03-01 01:25:19 +08:00
|
|
|
{mapping, "listeners.ssl", "rabbit.ssl_listeners",[
|
|
|
|
{datatype, {enum, [none]}}
|
|
|
|
]}.
|
|
|
|
|
2016-02-17 01:17:53 +08:00
|
|
|
{mapping, "listeners.ssl.$name", "rabbit.ssl_listeners",[
|
2016-02-01 22:27:56 +08:00
|
|
|
{datatype, [integer, ip]}
|
2016-01-22 23:47:01 +08:00
|
|
|
]}.
|
|
|
|
|
|
|
|
{translation, "rabbit.ssl_listeners",
|
|
|
|
fun(Conf) ->
|
2016-03-01 01:25:19 +08:00
|
|
|
case cuttlefish:conf_get("listeners.ssl", Conf, undefined) of
|
|
|
|
none -> [];
|
|
|
|
_ ->
|
|
|
|
Settings = cuttlefish_variable:filter_by_prefix("listeners.ssl", Conf),
|
|
|
|
[ V || {_, V} <- Settings ]
|
|
|
|
end
|
2016-01-22 23:47:01 +08:00
|
|
|
end}.
|
|
|
|
|
2016-02-01 19:43:05 +08:00
|
|
|
%% Number of Erlang processes that will accept connections for the TCP
|
2019-12-25 05:43:12 +08:00
|
|
|
%% and TLS listeners.
|
2016-02-01 19:43:05 +08:00
|
|
|
%%
|
|
|
|
%% {num_tcp_acceptors, 10},
|
|
|
|
%% {num_ssl_acceptors, 1},
|
2016-01-22 23:47:01 +08:00
|
|
|
|
|
|
|
{mapping, "num_acceptors.ssl", "rabbit.num_ssl_acceptors", [
|
|
|
|
{datatype, integer}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{mapping, "num_acceptors.tcp", "rabbit.num_tcp_acceptors", [
|
|
|
|
{datatype, integer}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
|
2019-12-25 05:43:12 +08:00
|
|
|
{mapping, "socket_writer.gc_threshold", "rabbit.writer_gc_threshold", [
|
|
|
|
{datatype, [{atom, off}, integer]}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{translation, "rabbit.writer_gc_threshold",
|
|
|
|
fun(Conf) ->
|
|
|
|
case cuttlefish:conf_get("socket_writer.gc_threshold", Conf, undefined) of
|
|
|
|
%% missing from the config
|
|
|
|
undefined -> cuttlefish:unset();
|
|
|
|
%% explicitly disabled
|
|
|
|
off -> undefined;
|
|
|
|
Int when is_integer(Int) andalso Int > 0 ->
|
|
|
|
Int;
|
|
|
|
_ ->
|
|
|
|
cuttlefish:invalid("should be a non-negative integer")
|
|
|
|
end
|
|
|
|
end
|
|
|
|
}.
|
|
|
|
|
2019-11-12 09:12:55 +08:00
|
|
|
%% Maximum time for 0-9-1 handshake (after socket connection
|
2019-12-25 05:43:12 +08:00
|
|
|
%% and TLS handshake), in milliseconds.
|
2016-02-01 19:43:05 +08:00
|
|
|
%%
|
|
|
|
%% {handshake_timeout, 10000},
|
2016-01-22 23:47:01 +08:00
|
|
|
|
|
|
|
{mapping, "handshake_timeout", "rabbit.handshake_timeout", [
|
2019-12-25 05:43:12 +08:00
|
|
|
{datatype, [{atom, infinity}, integer]}
|
2016-01-22 23:47:01 +08:00
|
|
|
]}.
|
|
|
|
|
2016-02-01 19:43:05 +08:00
|
|
|
%% Set to 'true' to perform reverse DNS lookups when accepting a
|
|
|
|
%% connection. Hostnames will then be shown instead of IP addresses
|
|
|
|
%% in rabbitmqctl and the management plugin.
|
|
|
|
%%
|
|
|
|
%% {reverse_dns_lookups, true},
|
2016-01-22 23:47:01 +08:00
|
|
|
|
|
|
|
{mapping, "reverse_dns_lookups", "rabbit.reverse_dns_lookups", [
|
|
|
|
{datatype, {enum, [true, false]}}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{mapping, "erlang.K", "vm_args.+K", [
|
|
|
|
{default, "true"},
|
|
|
|
{level, advanced}
|
|
|
|
]}.
|
|
|
|
|
2019-11-12 09:12:55 +08:00
|
|
|
%%
|
|
|
|
%% Definition import
|
|
|
|
%%
|
|
|
|
|
2021-08-14 19:53:45 +08:00
|
|
|
%% Original key for definition loading from a JSON file or directory of files. See
|
2019-11-12 09:12:55 +08:00
|
|
|
%% https://www.rabbitmq.com/management.html#load-definitions
|
|
|
|
{mapping, "load_definitions", "rabbit.load_definitions",
|
|
|
|
[{datatype, string},
|
|
|
|
{validators, ["file_accessible"]}]}.
|
|
|
|
|
2021-08-14 19:53:45 +08:00
|
|
|
%% Newer syntax for definition loading from a JSON file or directory of files. See
|
|
|
|
%% https://www.rabbitmq.com/management.html#load-definitions
|
|
|
|
{mapping, "definitions.local.path", "rabbit.definitions.local_path",
|
|
|
|
[{datatype, string},
|
|
|
|
{validators, ["file_accessible"]}]}.
|
|
|
|
|
|
|
|
%% Extensive mechanism for loading definitions from a remote source
|
|
|
|
{mapping, "definitions.import_backend", "rabbit.definitions.import_backend", [
|
|
|
|
{datatype, atom}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{translation, "rabbit.definitions.import_backend",
|
|
|
|
fun(Conf) ->
|
|
|
|
case cuttlefish:conf_get("definitions.import_backend", Conf, rabbit_definitions_import_local_filesystem) of
|
|
|
|
%% short aliases for known backends
|
|
|
|
local_filesystem -> rabbit_definitions_import_local_filesystem;
|
|
|
|
local -> rabbit_definitions_import_local_filesystem;
|
|
|
|
https -> rabbit_definitions_import_https;
|
|
|
|
http -> rabbit_definitions_import_https;
|
|
|
|
%% accept both rabbitmq_ and rabbit_ (typical core module prefix)
|
|
|
|
rabbitmq_definitions_import_local_filesystem -> rabbit_definitions_import_local_filesystem;
|
2022-01-31 14:27:17 +08:00
|
|
|
rabbitmq_definitions_import_http -> rabbit_definitions_import_https;
|
2021-08-14 19:53:45 +08:00
|
|
|
%% any other value is used as is
|
|
|
|
Module -> Module
|
|
|
|
end
|
|
|
|
end}.
|
|
|
|
|
2022-01-28 23:31:23 +08:00
|
|
|
{mapping, "definitions.skip_if_unchanged", "rabbit.definitions.skip_if_unchanged", [
|
2022-01-13 21:09:17 +08:00
|
|
|
{datatype, {enum, [true, false]}}]}.
|
|
|
|
|
2022-01-19 16:47:46 +08:00
|
|
|
{mapping, "definitions.hashing.algorithm", "rabbit.definitions.hashing_algorithm", [
|
2022-01-13 21:09:17 +08:00
|
|
|
{datatype, {enum, [sha, sha224, sha256, sha384, sha512]}}]}.
|
|
|
|
|
2021-08-14 19:53:45 +08:00
|
|
|
%% Load definitions from a remote URL over HTTPS. See
|
|
|
|
%% https://www.rabbitmq.com/management.html#load-definitions
|
|
|
|
{mapping, "definitions.https.url", "rabbit.definitions.url",
|
|
|
|
[{datatype, string}]}.
|
|
|
|
|
2021-08-18 01:42:53 +08:00
|
|
|
%% Client-side TLS settings used by e.g. HTTPS definition loading mechanism.
|
|
|
|
%% These can be reused by other clients.
|
|
|
|
|
|
|
|
{mapping, "definitions.tls.verify", "rabbit.definitions.ssl_options.verify", [
|
|
|
|
{datatype, {enum, [verify_peer, verify_none]}}]}.
|
|
|
|
|
|
|
|
{mapping, "definitions.tls.fail_if_no_peer_cert", "rabbit.definitions.ssl_options.fail_if_no_peer_cert", [
|
|
|
|
{datatype, {enum, [true, false]}}]}.
|
|
|
|
|
|
|
|
{mapping, "definitions.tls.cacertfile", "rabbit.definitions.ssl_options.cacertfile",
|
|
|
|
[{datatype, string}, {validators, ["file_accessible"]}]}.
|
|
|
|
|
|
|
|
{mapping, "definitions.tls.certfile", "rabbit.definitions.ssl_options.certfile",
|
|
|
|
[{datatype, string}, {validators, ["file_accessible"]}]}.
|
|
|
|
|
|
|
|
{mapping, "definitions.tls.cacerts.$name", "rabbit.definitions.ssl_options.cacerts",
|
|
|
|
[{datatype, string}]}.
|
|
|
|
|
|
|
|
{translation, "rabbit.definitions.ssl_options.cacerts",
|
|
|
|
fun(Conf) ->
|
|
|
|
Settings = cuttlefish_variable:filter_by_prefix("definitions.tls.cacerts", Conf),
|
|
|
|
[ list_to_binary(V) || {_, V} <- Settings ]
|
|
|
|
end}.
|
|
|
|
|
|
|
|
{mapping, "definitions.tls.cert", "rabbit.definitions.ssl_options.cert",
|
|
|
|
[{datatype, string}]}.
|
|
|
|
|
|
|
|
{translation, "rabbit.definitions.ssl_options.cert",
|
|
|
|
fun(Conf) ->
|
|
|
|
list_to_binary(cuttlefish:conf_get("definitions.tls.cert", Conf))
|
|
|
|
end}.
|
|
|
|
|
|
|
|
{mapping, "definitions.tls.reuse_session", "rabbit.definitions.ssl_options.reuse_session",
|
|
|
|
[{datatype, {enum, [true, false]}}]}.
|
|
|
|
|
|
|
|
{mapping, "definitions.tls.crl_check", "rabbit.definitions.ssl_options.crl_check",
|
|
|
|
[{datatype, [{enum, [true, false, peer, best_effort]}]}]}.
|
|
|
|
|
|
|
|
{mapping, "definitions.tls.depth", "rabbit.definitions.ssl_options.depth",
|
|
|
|
[{datatype, integer}, {validators, ["byte"]}]}.
|
|
|
|
|
|
|
|
{mapping, "definitions.tls.dh", "rabbit.definitions.ssl_options.dh",
|
|
|
|
[{datatype, string}]}.
|
|
|
|
|
|
|
|
{translation, "rabbit.definitions.ssl_options.dh",
|
|
|
|
fun(Conf) ->
|
|
|
|
list_to_binary(cuttlefish:conf_get("definitions.tls.dh", Conf))
|
|
|
|
end}.
|
|
|
|
|
|
|
|
{translation, "rabbit.definitions.ssl_options.key",
|
|
|
|
fun(Conf) ->
|
|
|
|
case cuttlefish_variable:filter_by_prefix("definitions.tls.key", Conf) of
|
|
|
|
[{[_,_,Key], Val}|_] -> {list_to_atom(Key), list_to_binary(Val)};
|
|
|
|
_ -> cuttlefish:unset()
|
|
|
|
end
|
|
|
|
end}.
|
|
|
|
|
|
|
|
{mapping, "definitions.tls.keyfile", "rabbit.definitions.ssl_options.keyfile",
|
|
|
|
[{datatype, string}, {validators, ["file_accessible"]}]}.
|
|
|
|
|
|
|
|
{mapping, "definitions.tls.log_alert", "rabbit.definitions.ssl_options.log_alert",
|
|
|
|
[{datatype, {enum, [true, false]}}]}.
|
|
|
|
|
|
|
|
{mapping, "definitions.tls.password", "rabbit.definitions.ssl_options.password",
|
|
|
|
[{datatype, string}]}.
|
|
|
|
|
|
|
|
{mapping, "definitions.tls.secure_renegotiate", "rabbit.definitions.ssl_options.secure_renegotiate",
|
|
|
|
[{datatype, {enum, [true, false]}}]}.
|
|
|
|
|
|
|
|
{mapping, "definitions.tls.reuse_sessions", "rabbit.definitions.ssl_options.reuse_sessions",
|
|
|
|
[{datatype, {enum, [true, false]}}]}.
|
|
|
|
|
|
|
|
{mapping, "definitions.tls.versions.$version", "rabbit.definitions.ssl_options.versions",
|
|
|
|
[{datatype, atom}]}.
|
|
|
|
|
|
|
|
{translation, "rabbit.definitions.ssl_options.versions",
|
|
|
|
fun(Conf) ->
|
|
|
|
Settings = cuttlefish_variable:filter_by_prefix("definitions.tls.versions", Conf),
|
|
|
|
[V || {_, V} <- Settings]
|
|
|
|
end}.
|
|
|
|
|
|
|
|
{mapping, "definitions.tls.ciphers.$cipher", "rabbit.definitions.ssl_options.ciphers",
|
|
|
|
[{datatype, string}]}.
|
|
|
|
|
|
|
|
{translation, "rabbit.definitions.ssl_options.ciphers",
|
|
|
|
fun(Conf) ->
|
|
|
|
Settings = cuttlefish_variable:filter_by_prefix("definitions.tls.ciphers", Conf),
|
|
|
|
lists:reverse([V || {_, V} <- Settings])
|
|
|
|
end}.
|
|
|
|
|
|
|
|
{mapping, "definitions.tls.log_level", "rabbit.definitions.ssl_options.log_level",
|
|
|
|
[{datatype, {enum, [emergency, alert, critical, error, warning, notice, info, debug]}}]}.
|
|
|
|
|
2016-02-01 19:43:05 +08:00
|
|
|
%%
|
2021-08-18 01:42:53 +08:00
|
|
|
%% Seed User, Authentication, Access Control
|
2016-02-01 19:43:05 +08:00
|
|
|
%%
|
2016-01-22 23:47:01 +08:00
|
|
|
|
2016-02-01 19:43:05 +08:00
|
|
|
%% The default "guest" user is only permitted to access the server
|
|
|
|
%% via a loopback interface (e.g. localhost).
|
|
|
|
%% {loopback_users, [<<"guest">>]},
|
|
|
|
%%
|
|
|
|
%% Uncomment the following line if you want to allow access to the
|
|
|
|
%% guest user from anywhere on the network.
|
|
|
|
%% {loopback_users, []},
|
2016-01-22 23:47:01 +08:00
|
|
|
|
2016-02-26 22:30:11 +08:00
|
|
|
{mapping, "loopback_users", "rabbit.loopback_users", [
|
|
|
|
{datatype, {enum, [none]}}
|
|
|
|
]}.
|
|
|
|
|
2016-02-17 01:17:53 +08:00
|
|
|
{mapping, "loopback_users.$user", "rabbit.loopback_users", [
|
2016-02-01 22:27:56 +08:00
|
|
|
{datatype, atom}
|
2016-01-22 23:47:01 +08:00
|
|
|
]}.
|
|
|
|
|
|
|
|
{translation, "rabbit.loopback_users",
|
|
|
|
fun(Conf) ->
|
2016-03-03 23:04:50 +08:00
|
|
|
None = cuttlefish:conf_get("loopback_users", Conf, undefined),
|
2016-02-26 22:30:11 +08:00
|
|
|
case None of
|
|
|
|
none -> [];
|
|
|
|
_ ->
|
|
|
|
Settings = cuttlefish_variable:filter_by_prefix("loopback_users", Conf),
|
|
|
|
[ list_to_binary(U) || {["loopback_users", U], V} <- Settings, V == true ]
|
|
|
|
end
|
2016-01-22 23:47:01 +08:00
|
|
|
end}.
|
|
|
|
|
2018-10-23 21:38:08 +08:00
|
|
|
%% TLS options.
|
2019-03-20 16:21:37 +08:00
|
|
|
%% See https://www.rabbitmq.com/ssl.html for full documentation.
|
2016-02-01 19:43:05 +08:00
|
|
|
%%
|
|
|
|
%% {ssl_options, [{cacertfile, "/path/to/testca/cacert.pem"},
|
|
|
|
%% {certfile, "/path/to/server/cert.pem"},
|
|
|
|
%% {keyfile, "/path/to/server/key.pem"},
|
|
|
|
%% {verify, verify_peer},
|
|
|
|
%% {fail_if_no_peer_cert, false}]},
|
2016-01-22 23:47:01 +08:00
|
|
|
|
2016-03-01 02:10:07 +08:00
|
|
|
{mapping, "ssl_allow_poodle_attack", "rabbit.ssl_allow_poodle_attack",
|
|
|
|
[{datatype, {enum, [true, false]}}]}.
|
|
|
|
|
2016-02-26 22:30:11 +08:00
|
|
|
{mapping, "ssl_options", "rabbit.ssl_options", [
|
|
|
|
{datatype, {enum, [none]}}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{translation, "rabbit.ssl_options",
|
|
|
|
fun(Conf) ->
|
2016-03-03 23:04:50 +08:00
|
|
|
case cuttlefish:conf_get("ssl_options", Conf, undefined) of
|
2016-02-26 22:30:11 +08:00
|
|
|
none -> [];
|
|
|
|
_ -> cuttlefish:invalid("Invalid ssl_options")
|
|
|
|
end
|
|
|
|
end}.
|
|
|
|
|
2016-02-17 01:17:53 +08:00
|
|
|
{mapping, "ssl_options.verify", "rabbit.ssl_options.verify", [
|
2016-01-22 23:47:01 +08:00
|
|
|
{datatype, {enum, [verify_peer, verify_none]}}]}.
|
|
|
|
|
2016-02-17 01:17:53 +08:00
|
|
|
{mapping, "ssl_options.fail_if_no_peer_cert", "rabbit.ssl_options.fail_if_no_peer_cert", [
|
2016-01-22 23:47:01 +08:00
|
|
|
{datatype, {enum, [true, false]}}]}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "ssl_options.cacertfile", "rabbit.ssl_options.cacertfile",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, string}, {validators, ["file_accessible"]}]}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "ssl_options.certfile", "rabbit.ssl_options.certfile",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, string}, {validators, ["file_accessible"]}]}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "ssl_options.cacerts.$name", "rabbit.ssl_options.cacerts",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, string}]}.
|
|
|
|
|
|
|
|
{translation, "rabbit.ssl_options.cacerts",
|
|
|
|
fun(Conf) ->
|
2016-02-17 01:17:53 +08:00
|
|
|
Settings = cuttlefish_variable:filter_by_prefix("ssl_options.cacerts", Conf),
|
2016-01-22 23:47:01 +08:00
|
|
|
[ list_to_binary(V) || {_, V} <- Settings ]
|
|
|
|
end}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "ssl_options.cert", "rabbit.ssl_options.cert",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, string}]}.
|
|
|
|
|
|
|
|
{translation, "rabbit.ssl_options.cert",
|
|
|
|
fun(Conf) ->
|
2016-02-17 01:17:53 +08:00
|
|
|
list_to_binary(cuttlefish:conf_get("ssl_options.cert", Conf))
|
2016-01-22 23:47:01 +08:00
|
|
|
end}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "ssl_options.client_renegotiation", "rabbit.ssl_options.client_renegotiation",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, {enum, [true, false]}}]}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "ssl_options.crl_check", "rabbit.ssl_options.crl_check",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, [{enum, [true, false, peer, best_effort]}]}]}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "ssl_options.depth", "rabbit.ssl_options.depth",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, integer}, {validators, ["byte"]}]}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "ssl_options.dh", "rabbit.ssl_options.dh",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, string}]}.
|
|
|
|
|
|
|
|
{translation, "rabbit.ssl_options.dh",
|
|
|
|
fun(Conf) ->
|
2016-02-17 01:17:53 +08:00
|
|
|
list_to_binary(cuttlefish:conf_get("ssl_options.dh", Conf))
|
2016-01-22 23:47:01 +08:00
|
|
|
end}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "ssl_options.dhfile", "rabbit.ssl_options.dhfile",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, string}, {validators, ["file_accessible"]}]}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "ssl_options.honor_cipher_order", "rabbit.ssl_options.honor_cipher_order",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, {enum, [true, false]}}]}.
|
|
|
|
|
2017-06-24 04:51:16 +08:00
|
|
|
{mapping, "ssl_options.honor_ecc_order", "rabbit.ssl_options.honor_ecc_order",
|
|
|
|
[{datatype, {enum, [true, false]}}]}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "ssl_options.key.RSAPrivateKey", "rabbit.ssl_options.key",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, string}]}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "ssl_options.key.DSAPrivateKey", "rabbit.ssl_options.key",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, string}]}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "ssl_options.key.PrivateKeyInfo", "rabbit.ssl_options.key",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, string}]}.
|
|
|
|
|
|
|
|
{translation, "rabbit.ssl_options.key",
|
|
|
|
fun(Conf) ->
|
2016-02-17 01:17:53 +08:00
|
|
|
case cuttlefish_variable:filter_by_prefix("ssl_options.key", Conf) of
|
2016-01-22 23:47:01 +08:00
|
|
|
[{[_,_,Key], Val}|_] -> {list_to_atom(Key), list_to_binary(Val)};
|
2021-08-18 01:42:53 +08:00
|
|
|
_ -> cuttlefish:unset()
|
2016-01-22 23:47:01 +08:00
|
|
|
end
|
|
|
|
end}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "ssl_options.keyfile", "rabbit.ssl_options.keyfile",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, string}, {validators, ["file_accessible"]}]}.
|
|
|
|
|
2021-08-18 01:42:53 +08:00
|
|
|
{mapping, "ssl_options.log_level", "rabbit.ssl_options.log_level",
|
|
|
|
[{datatype, {enum, [emergency, alert, critical, error, warning, notice, info, debug]}}]}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "ssl_options.log_alert", "rabbit.ssl_options.log_alert",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, {enum, [true, false]}}]}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "ssl_options.password", "rabbit.ssl_options.password",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, string}]}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "ssl_options.psk_identity", "rabbit.ssl_options.psk_identity",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, string}]}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "ssl_options.reuse_sessions", "rabbit.ssl_options.reuse_sessions",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, {enum, [true, false]}}]}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "ssl_options.secure_renegotiate", "rabbit.ssl_options.secure_renegotiate",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, {enum, [true, false]}}]}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "ssl_options.versions.$version", "rabbit.ssl_options.versions",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, atom}]}.
|
|
|
|
|
|
|
|
{translation, "rabbit.ssl_options.versions",
|
|
|
|
fun(Conf) ->
|
2016-03-01 02:10:07 +08:00
|
|
|
Settings = cuttlefish_variable:filter_by_prefix("ssl_options.versions", Conf),
|
2018-10-23 21:38:08 +08:00
|
|
|
[V || {_, V} <- Settings]
|
|
|
|
end}.
|
|
|
|
|
|
|
|
{mapping, "ssl_options.ciphers.$cipher", "rabbit.ssl_options.ciphers",
|
|
|
|
[{datatype, string}]}.
|
|
|
|
|
|
|
|
{translation, "rabbit.ssl_options.ciphers",
|
|
|
|
fun(Conf) ->
|
|
|
|
Settings = cuttlefish_variable:filter_by_prefix("ssl_options.ciphers", Conf),
|
2019-01-01 03:46:42 +08:00
|
|
|
lists:reverse([V || {_, V} <- Settings])
|
2016-01-22 23:47:01 +08:00
|
|
|
end}.
|
|
|
|
|
2020-12-17 23:53:14 +08:00
|
|
|
{mapping, "ssl_options.bypass_pem_cache", "ssl.bypass_pem_cache",
|
|
|
|
[{datatype, {enum, [true, false]}}]}.
|
|
|
|
|
2016-01-23 00:08:48 +08:00
|
|
|
%% ===========================================================================
|
|
|
|
|
2016-02-01 19:43:05 +08:00
|
|
|
%% Choose the available SASL mechanism(s) to expose.
|
|
|
|
%% The two default (built in) mechanisms are 'PLAIN' and
|
|
|
|
%% 'AMQPLAIN'. Additional mechanisms can be added via
|
|
|
|
%% plugins.
|
|
|
|
%%
|
2019-03-20 16:21:37 +08:00
|
|
|
%% See https://www.rabbitmq.com/authentication.html for more details.
|
2016-02-01 19:43:05 +08:00
|
|
|
%%
|
|
|
|
%% {auth_mechanisms, ['PLAIN', 'AMQPLAIN']},
|
2016-01-22 23:47:01 +08:00
|
|
|
|
2016-02-17 01:17:53 +08:00
|
|
|
{mapping, "auth_mechanisms.$name", "rabbit.auth_mechanisms", [
|
2016-01-22 23:47:01 +08:00
|
|
|
{datatype, atom}]}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{translation, "rabbit.auth_mechanisms",
|
2016-01-22 23:47:01 +08:00
|
|
|
fun(Conf) ->
|
2016-02-17 01:17:53 +08:00
|
|
|
Settings = cuttlefish_variable:filter_by_prefix("auth_mechanisms", Conf),
|
2016-01-22 23:47:01 +08:00
|
|
|
[ V || {_, V} <- Settings ]
|
|
|
|
end}.
|
|
|
|
|
2016-02-01 19:43:05 +08:00
|
|
|
|
2016-03-23 14:38:54 +08:00
|
|
|
%% Select an authentication backend to use. RabbitMQ provides an
|
|
|
|
%% internal backend in the core.
|
2016-02-01 19:43:05 +08:00
|
|
|
%%
|
|
|
|
%% {auth_backends, [rabbit_auth_backend_internal]},
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{translation, "rabbit.auth_backends",
|
2016-01-22 23:47:01 +08:00
|
|
|
fun(Conf) ->
|
2016-02-01 22:27:56 +08:00
|
|
|
Settings = cuttlefish_variable:filter_by_prefix("auth_backends", Conf),
|
|
|
|
BackendModule = fun
|
|
|
|
(internal) -> rabbit_auth_backend_internal;
|
|
|
|
(ldap) -> rabbit_auth_backend_ldap;
|
|
|
|
(http) -> rabbit_auth_backend_http;
|
2017-02-09 23:56:02 +08:00
|
|
|
(cache) -> rabbit_auth_backend_cache;
|
2016-02-01 22:27:56 +08:00
|
|
|
(amqp) -> rabbit_auth_backend_amqp;
|
|
|
|
(dummy) -> rabbit_auth_backend_dummy;
|
|
|
|
(Other) when is_atom(Other) -> Other;
|
2016-03-23 14:38:54 +08:00
|
|
|
(_) -> cuttlefish:invalid("Unknown/unsupported auth backend")
|
2016-02-01 22:27:56 +08:00
|
|
|
end,
|
2016-03-23 14:38:54 +08:00
|
|
|
AuthBackends = [{Num, {default, BackendModule(V)}} || {["auth_backends", Num], V} <- Settings],
|
|
|
|
AuthNBackends = [{Num, {authn, BackendModule(V)}} || {["auth_backends", Num, "authn"], V} <- Settings],
|
|
|
|
AuthZBackends = [{Num, {authz, BackendModule(V)}} || {["auth_backends", Num, "authz"], V} <- Settings],
|
2016-02-01 22:27:56 +08:00
|
|
|
Backends = lists:foldl(
|
|
|
|
fun({NumStr, {Type, V}}, Acc) ->
|
|
|
|
Num = case catch list_to_integer(NumStr) of
|
|
|
|
N when is_integer(N) -> N;
|
2016-03-17 21:48:27 +08:00
|
|
|
Err ->
|
2016-02-01 22:27:56 +08:00
|
|
|
cuttlefish:invalid(
|
|
|
|
iolist_to_binary(io_lib:format(
|
2016-03-23 14:38:54 +08:00
|
|
|
"Auth backend position in the chain should be an integer ~p", [Err])))
|
2016-02-01 22:27:56 +08:00
|
|
|
end,
|
|
|
|
NewVal = case dict:find(Num, Acc) of
|
|
|
|
{ok, {AuthN, AuthZ}} ->
|
|
|
|
case {Type, AuthN, AuthZ} of
|
|
|
|
{authn, undefined, _} ->
|
|
|
|
{V, AuthZ};
|
|
|
|
{authz, _, undefined} ->
|
|
|
|
{AuthN, V};
|
|
|
|
_ ->
|
|
|
|
cuttlefish:invalid(
|
|
|
|
iolist_to_binary(
|
|
|
|
io_lib:format(
|
2016-03-23 14:38:54 +08:00
|
|
|
"Auth backend already defined for the ~pth ~p backend",
|
2016-02-01 22:27:56 +08:00
|
|
|
[Num, Type])))
|
|
|
|
end;
|
|
|
|
error ->
|
|
|
|
case Type of
|
|
|
|
authn -> {V, undefined};
|
|
|
|
authz -> {undefined, V};
|
|
|
|
default -> {V, V}
|
|
|
|
end
|
|
|
|
end,
|
|
|
|
dict:store(Num, NewVal, Acc)
|
|
|
|
end,
|
|
|
|
dict:new(),
|
|
|
|
AuthBackends ++ AuthNBackends ++ AuthZBackends),
|
|
|
|
lists:map(
|
|
|
|
fun
|
|
|
|
({Num, {undefined, AuthZ}}) ->
|
|
|
|
cuttlefish:warn(
|
|
|
|
io_lib:format(
|
2016-03-23 14:38:54 +08:00
|
|
|
"Auth backend undefined for the ~pth authz backend. Using ~p",
|
2016-02-01 22:27:56 +08:00
|
|
|
[Num, AuthZ])),
|
|
|
|
{AuthZ, AuthZ};
|
|
|
|
({Num, {AuthN, undefined}}) ->
|
|
|
|
cuttlefish:warn(
|
|
|
|
io_lib:format(
|
2016-03-23 14:38:54 +08:00
|
|
|
"Authz backend undefined for the ~pth authn backend. Using ~p",
|
2016-02-01 22:27:56 +08:00
|
|
|
[Num, AuthN])),
|
|
|
|
{AuthN, AuthN};
|
|
|
|
({_Num, {Auth, Auth}}) -> Auth;
|
|
|
|
({_Num, {AuthN, AuthZ}}) -> {AuthN, AuthZ}
|
|
|
|
end,
|
|
|
|
lists:keysort(1, dict:to_list(Backends)))
|
2016-01-22 23:47:01 +08:00
|
|
|
end}.
|
|
|
|
|
2016-02-01 22:27:56 +08:00
|
|
|
{mapping, "auth_backends.$num", "rabbit.auth_backends", [
|
|
|
|
{datatype, atom}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{mapping, "auth_backends.$num.authn", "rabbit.auth_backends",[
|
|
|
|
{datatype, atom}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{mapping, "auth_backends.$num.authz", "rabbit.auth_backends",[
|
|
|
|
{datatype, atom}
|
|
|
|
]}.
|
|
|
|
|
2016-02-01 19:43:05 +08:00
|
|
|
%% This pertains to both the rabbitmq_auth_mechanism_ssl plugin and
|
|
|
|
%% STOMP ssl_cert_login configurations. See the rabbitmq_stomp
|
|
|
|
%% configuration section later in this file and the README in
|
|
|
|
%% https://github.com/rabbitmq/rabbitmq-auth-mechanism-ssl for further
|
|
|
|
%% details.
|
|
|
|
%%
|
2020-10-30 06:22:38 +08:00
|
|
|
%% To use the peer certificate's Common Name (CN) field
|
|
|
|
%% instead of its Distinguished Name (DN) for username extraction.
|
2016-02-01 19:43:05 +08:00
|
|
|
%%
|
|
|
|
%% {ssl_cert_login_from, common_name},
|
2020-10-30 06:22:38 +08:00
|
|
|
%%
|
|
|
|
%% To use the first SAN value of type DNS:
|
|
|
|
%%
|
|
|
|
%% {ssl_cert_login_from, subject_alternative_name},
|
|
|
|
%% {ssl_cert_login_san_type, dns},
|
|
|
|
%% {ssl_cert_login_san_index, 0}
|
2016-01-22 23:47:01 +08:00
|
|
|
|
|
|
|
{mapping, "ssl_cert_login_from", "rabbit.ssl_cert_login_from", [
|
2020-10-30 06:22:38 +08:00
|
|
|
{datatype, {enum, [distinguished_name, common_name, subject_alternative_name, subject_alt_name]}}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{mapping, "ssl_cert_login_san_type", "rabbit.ssl_cert_login_san_type", [
|
|
|
|
{datatype, {enum, [dns, ip, email, uri, other_name]}}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{mapping, "ssl_cert_login_san_index", "rabbit.ssl_cert_login_san_index", [
|
|
|
|
{datatype, integer}, {validators, ["non_negative_integer"]}
|
2016-01-22 23:47:01 +08:00
|
|
|
]}.
|
|
|
|
|
2019-08-12 19:55:15 +08:00
|
|
|
%% TLS handshake timeout, in milliseconds.
|
2016-02-01 19:43:05 +08:00
|
|
|
%%
|
|
|
|
%% {ssl_handshake_timeout, 5000},
|
|
|
|
|
2016-01-22 23:47:01 +08:00
|
|
|
{mapping, "ssl_handshake_timeout", "rabbit.ssl_handshake_timeout", [
|
|
|
|
{datatype, integer}
|
|
|
|
]}.
|
|
|
|
|
2019-08-12 19:55:15 +08:00
|
|
|
%% Cluster name
|
|
|
|
|
|
|
|
{mapping, "cluster_name", "rabbit.cluster_name", [
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
|
2019-06-13 21:55:16 +08:00
|
|
|
%% Default worker process pool size. Used to limit maximum concurrency rate
|
|
|
|
%% of certain operations, e.g. queue initialisation and recovery on node boot.
|
|
|
|
|
|
|
|
{mapping, "default_worker_pool_size", "rabbit.default_worker_pool_size", [
|
|
|
|
{datatype, integer}, {validators, ["non_negative_integer"]}
|
|
|
|
]}.
|
|
|
|
|
2016-02-01 19:43:05 +08:00
|
|
|
%% Password hashing implementation. Will only affect newly
|
|
|
|
%% created users. To recalculate hash for an existing user
|
|
|
|
%% it's necessary to update her password.
|
|
|
|
%%
|
|
|
|
%% When importing definitions exported from versions earlier
|
|
|
|
%% than 3.6.0, it is possible to go back to MD5 (only do this
|
|
|
|
%% as a temporary measure!) by setting this to rabbit_password_hashing_md5.
|
|
|
|
%%
|
|
|
|
%% To use SHA-512, set to rabbit_password_hashing_sha512.
|
|
|
|
%%
|
|
|
|
%% {password_hashing_module, rabbit_password_hashing_sha256},
|
2016-01-22 23:47:01 +08:00
|
|
|
|
|
|
|
{mapping, "password_hashing_module", "rabbit.password_hashing_module", [
|
|
|
|
{datatype, atom}
|
|
|
|
]}.
|
|
|
|
|
2017-01-10 15:08:17 +08:00
|
|
|
%% Credential validation.
|
|
|
|
%%
|
|
|
|
|
|
|
|
{mapping, "credential_validator.validation_backend", "rabbit.credential_validator.validation_backend", [
|
|
|
|
{datatype, atom}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{mapping, "credential_validator.min_length", "rabbit.credential_validator.min_length", [
|
|
|
|
{datatype, integer}, {validators, ["non_negative_integer"]}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{mapping, "credential_validator.regexp", "rabbit.credential_validator.regexp", [
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
|
2016-02-01 19:43:05 +08:00
|
|
|
%%
|
|
|
|
%% Default User / VHost
|
|
|
|
%% ====================
|
|
|
|
%%
|
|
|
|
|
|
|
|
%% On first start RabbitMQ will create a vhost and a user. These
|
|
|
|
%% config items control what gets created. See
|
2019-03-20 16:21:37 +08:00
|
|
|
%% https://www.rabbitmq.com/access-control.html for further
|
2016-02-01 19:43:05 +08:00
|
|
|
%% information about vhosts and access control.
|
|
|
|
%%
|
|
|
|
%% {default_vhost, <<"/">>},
|
|
|
|
%% {default_user, <<"guest">>},
|
|
|
|
%% {default_pass, <<"guest">>},
|
|
|
|
%% {default_permissions, [<<".*">>, <<".*">>, <<".*">>]},
|
2016-01-22 23:47:01 +08:00
|
|
|
|
|
|
|
{mapping, "default_vhost", "rabbit.default_vhost", [
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{translation, "rabbit.default_vhost",
|
|
|
|
fun(Conf) ->
|
|
|
|
list_to_binary(cuttlefish:conf_get("default_vhost", Conf))
|
|
|
|
end}.
|
|
|
|
|
|
|
|
{mapping, "default_user", "rabbit.default_user", [
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{translation, "rabbit.default_user",
|
|
|
|
fun(Conf) ->
|
|
|
|
list_to_binary(cuttlefish:conf_get("default_user", Conf))
|
|
|
|
end}.
|
|
|
|
|
|
|
|
{mapping, "default_pass", "rabbit.default_pass", [
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{translation, "rabbit.default_pass",
|
|
|
|
fun(Conf) ->
|
|
|
|
list_to_binary(cuttlefish:conf_get("default_pass", Conf))
|
|
|
|
end}.
|
|
|
|
|
|
|
|
{mapping, "default_permissions.configure", "rabbit.default_permissions", [
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{mapping, "default_permissions.read", "rabbit.default_permissions", [
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{mapping, "default_permissions.write", "rabbit.default_permissions", [
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{translation, "rabbit.default_permissions",
|
2016-01-22 23:47:01 +08:00
|
|
|
fun(Conf) ->
|
|
|
|
Settings = cuttlefish_variable:filter_by_prefix("default_permissions", Conf),
|
|
|
|
Configure = proplists:get_value(["default_permissions", "configure"], Settings),
|
|
|
|
Read = proplists:get_value(["default_permissions", "read"], Settings),
|
|
|
|
Write = proplists:get_value(["default_permissions", "write"], Settings),
|
|
|
|
[list_to_binary(Configure), list_to_binary(Read), list_to_binary(Write)]
|
|
|
|
end}.
|
|
|
|
|
2023-02-14 09:01:56 +08:00
|
|
|
%%
|
|
|
|
%% Extra Default Users
|
|
|
|
%% ====================
|
|
|
|
%%
|
|
|
|
|
|
|
|
{mapping, "default_users.$name.vhost_pattern", "rabbit.default_users", [
|
|
|
|
{validators, ["valid_regex"]},
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{mapping, "default_users.$name.password", "rabbit.default_users", [
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{mapping, "default_users.$name.configure", "rabbit.default_users", [
|
|
|
|
{validators, ["valid_regex"]},
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{mapping, "default_users.$name.read", "rabbit.default_users", [
|
|
|
|
{validators, ["valid_regex"]},
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{mapping, "default_users.$name.write", "rabbit.default_users", [
|
|
|
|
{validators, ["valid_regex"]},
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{mapping, "default_users.$name.tags", "rabbit.default_users", [
|
|
|
|
{datatype, {list, atom}}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{translation, "rabbit.default_users", fun(Conf) ->
|
|
|
|
case rabbit_cuttlefish:aggregate_props(Conf, ["default_users"]) of
|
|
|
|
[] -> cuttlefish:unset();
|
|
|
|
Props -> Props
|
|
|
|
end
|
|
|
|
end}.
|
|
|
|
|
|
|
|
%%
|
|
|
|
%% Default Policies
|
|
|
|
%% ====================
|
|
|
|
%%
|
|
|
|
|
2022-12-07 09:02:56 +08:00
|
|
|
{mapping, "default_policies.operator.$id.vhost_pattern", "rabbit.default_policies.operator", [
|
|
|
|
{validators, ["valid_regex"]},
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{mapping, "default_policies.operator.$id.queue_pattern", "rabbit.default_policies.operator", [
|
|
|
|
{validators, ["valid_regex"]},
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{mapping, "default_policies.operator.$id.expires", "rabbit.default_policies.operator", [
|
|
|
|
{datatype, {duration, ms}}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{mapping, "default_policies.operator.$id.message_ttl", "rabbit.default_policies.operator", [
|
|
|
|
{datatype, {duration, ms}}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{mapping, "default_policies.operator.$id.max_length", "rabbit.default_policies.operator", [
|
|
|
|
{validators, ["non_zero_positive_integer"]},
|
|
|
|
{datatype, integer}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{mapping, "default_policies.operator.$id.max_length_bytes", "rabbit.default_policies.operator", [
|
|
|
|
{validators, ["non_zero_positive_integer"]},
|
|
|
|
{datatype, bytesize}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{mapping, "default_policies.operator.$id.max_in_memory_bytes", "rabbit.default_policies.operator", [
|
|
|
|
{validators, ["non_zero_positive_integer"]},
|
|
|
|
{datatype, bytesize}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{mapping, "default_policies.operator.$id.max_in_memory_length", "rabbit.default_policies.operator",
|
|
|
|
[
|
|
|
|
{validators, ["non_zero_positive_integer"]},
|
|
|
|
{datatype, integer}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{mapping, "default_policies.operator.$id.delivery_limit", "rabbit.default_policies.operator", [
|
|
|
|
{validators, ["non_zero_positive_integer"]},
|
|
|
|
{datatype, integer}
|
|
|
|
]}.
|
|
|
|
|
2023-02-23 03:46:03 +08:00
|
|
|
{mapping, "default_policies.operator.$id.classic_queues.ha_mode", "rabbit.default_policies.operator", [
|
2023-02-22 03:45:49 +08:00
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
|
2023-02-23 03:46:03 +08:00
|
|
|
{mapping, "default_policies.operator.$id.classic_queues.ha_params", "rabbit.default_policies.operator", [
|
2023-02-22 03:45:49 +08:00
|
|
|
{datatype, [integer, {list, string}]}
|
|
|
|
]}.
|
|
|
|
|
2022-12-07 09:02:56 +08:00
|
|
|
{translation, "rabbit.default_policies.operator", fun(Conf) ->
|
2023-02-23 03:46:03 +08:00
|
|
|
Props = rabbit_cuttlefish:aggregate_props(
|
|
|
|
Conf,
|
|
|
|
["default_policies", "operator"],
|
|
|
|
fun({["default_policies","operator",ID,"classic_queues"|T], V}) ->
|
|
|
|
{["default_policies","operator",ID|T],V};
|
|
|
|
(E) -> E
|
|
|
|
end),
|
2023-02-14 09:01:56 +08:00
|
|
|
case Props of
|
2022-12-07 09:02:56 +08:00
|
|
|
[] -> cuttlefish:unset();
|
2023-02-14 09:01:56 +08:00
|
|
|
Props -> Props
|
|
|
|
end
|
2022-12-07 09:02:56 +08:00
|
|
|
end}.
|
|
|
|
|
2023-02-14 09:01:56 +08:00
|
|
|
%%
|
|
|
|
%% Default VHost Limits
|
|
|
|
%% ====================
|
|
|
|
%%
|
|
|
|
|
2022-10-20 07:08:06 +08:00
|
|
|
{mapping, "default_limits.vhosts.$id.pattern", "rabbit.default_limits.vhosts", [
|
2022-10-14 01:59:36 +08:00
|
|
|
{validators, ["valid_regex"]},
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
|
2022-10-20 07:08:06 +08:00
|
|
|
{mapping, "default_limits.vhosts.$id.max_connections", "rabbit.default_limits.vhosts", [
|
2022-10-14 01:59:36 +08:00
|
|
|
{validators, [ "non_zero_positive_integer"]},
|
|
|
|
{datatype, integer}
|
|
|
|
]}.
|
|
|
|
|
2022-10-20 07:08:06 +08:00
|
|
|
{mapping, "default_limits.vhosts.$id.max_queues", "rabbit.default_limits.vhosts", [
|
2022-10-14 01:59:36 +08:00
|
|
|
{validators, [ "non_zero_positive_integer"]},
|
|
|
|
{datatype, integer}
|
|
|
|
]}.
|
|
|
|
|
2022-12-07 09:02:56 +08:00
|
|
|
{translation, "rabbit.default_limits.vhosts", fun(Conf) ->
|
2023-02-14 09:01:56 +08:00
|
|
|
case rabbit_cuttlefish:aggregate_props(Conf, ["default_limits", "vhosts"]) of
|
2022-10-14 01:59:36 +08:00
|
|
|
[] -> cuttlefish:unset();
|
2023-02-14 09:01:56 +08:00
|
|
|
Props -> Props
|
|
|
|
end
|
2022-10-14 01:59:36 +08:00
|
|
|
end}.
|
|
|
|
|
2016-02-01 19:43:05 +08:00
|
|
|
%% Tags for default user
|
|
|
|
%%
|
|
|
|
%% For more details about tags, see the documentation for the
|
2019-03-20 16:21:37 +08:00
|
|
|
%% Management Plugin at https://www.rabbitmq.com/management.html.
|
2016-02-01 19:43:05 +08:00
|
|
|
%%
|
|
|
|
%% {default_user_tags, [administrator]},
|
2016-01-22 23:47:01 +08:00
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "default_user_tags.$tag", "rabbit.default_user_tags",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, {enum, [true, false]}}]}.
|
|
|
|
|
|
|
|
{translation, "rabbit.default_user_tags",
|
|
|
|
fun(Conf) ->
|
|
|
|
Settings = cuttlefish_variable:filter_by_prefix("default_user_tags", Conf),
|
|
|
|
[ list_to_atom(Key) || {[_,Key], Val} <- Settings, Val == true ]
|
|
|
|
end}.
|
|
|
|
|
2016-02-01 19:43:05 +08:00
|
|
|
%%
|
|
|
|
%% Additional network and protocol related configuration
|
|
|
|
%% =====================================================
|
|
|
|
%%
|
2016-01-22 23:47:01 +08:00
|
|
|
|
2018-03-21 16:26:40 +08:00
|
|
|
%% Set the default connection heartbeat timeout (in seconds).
|
2016-02-01 19:43:05 +08:00
|
|
|
%%
|
|
|
|
%% {heartbeat, 600},
|
2016-01-22 23:47:01 +08:00
|
|
|
|
2016-02-01 19:43:05 +08:00
|
|
|
{mapping, "heartbeat", "rabbit.heartbeat", [{datatype, integer}]}.
|
2016-01-22 23:47:01 +08:00
|
|
|
|
2018-03-21 16:26:40 +08:00
|
|
|
%% Set the max permissible size of an AMQP 0-9-1 frame (in bytes).
|
2016-02-01 19:43:05 +08:00
|
|
|
%%
|
|
|
|
%% {frame_max, 131072},
|
2016-01-22 23:47:01 +08:00
|
|
|
|
|
|
|
{mapping, "frame_max", "rabbit.frame_max", [{datatype, bytesize}]}.
|
2016-02-01 19:43:05 +08:00
|
|
|
|
|
|
|
%% Set the max frame size the server will accept before connection
|
2018-03-21 16:26:40 +08:00
|
|
|
%% tuning starts
|
2016-02-01 19:43:05 +08:00
|
|
|
%%
|
|
|
|
%% {initial_frame_max, 4096},
|
|
|
|
|
2016-01-22 23:47:01 +08:00
|
|
|
{mapping, "initial_frame_max", "rabbit.initial_frame_max", [{datatype, bytesize}]}.
|
2016-02-01 19:43:05 +08:00
|
|
|
|
|
|
|
%% Set the max permissible number of channels per connection.
|
|
|
|
%% 0 means "no limit".
|
|
|
|
%%
|
2017-10-22 04:48:08 +08:00
|
|
|
%% {channel_max, 0},
|
2016-02-01 19:43:05 +08:00
|
|
|
|
2016-01-22 23:47:01 +08:00
|
|
|
{mapping, "channel_max", "rabbit.channel_max", [{datatype, integer}]}.
|
|
|
|
|
2017-10-22 04:48:08 +08:00
|
|
|
%% Set the max permissible number of client connections per node.
|
|
|
|
%% `infinity` means "no limit".
|
|
|
|
%%
|
|
|
|
%% {connection_max, infinity},
|
|
|
|
|
|
|
|
{mapping, "connection_max", "rabbit.connection_max",
|
|
|
|
[{datatype, [{atom, infinity}, integer]}]}.
|
|
|
|
|
|
|
|
{translation, "rabbit.connection_max",
|
|
|
|
fun(Conf) ->
|
|
|
|
case cuttlefish:conf_get("connection_max", Conf, undefined) of
|
|
|
|
undefined -> cuttlefish:unset();
|
|
|
|
infinity -> infinity;
|
2023-03-29 08:07:57 +08:00
|
|
|
Val when is_integer(Val) -> Val;
|
|
|
|
_ -> cuttlefish:invalid("should be a non-negative integer")
|
|
|
|
end
|
|
|
|
end
|
|
|
|
}.
|
|
|
|
|
|
|
|
{mapping, "ranch_connection_max", "rabbit.ranch_connection_max",
|
|
|
|
[{datatype, [{atom, infinity}, integer]}]}.
|
|
|
|
|
|
|
|
{translation, "rabbit.ranch_connection_max",
|
|
|
|
fun(Conf) ->
|
|
|
|
case cuttlefish:conf_get("ranch_connection_max", Conf, undefined) of
|
|
|
|
undefined -> cuttlefish:unset();
|
|
|
|
infinity -> infinity;
|
2017-10-22 04:48:08 +08:00
|
|
|
Val when is_integer(Val) -> Val;
|
|
|
|
_ -> cuttlefish:invalid("should be a non-negative integer")
|
|
|
|
end
|
|
|
|
end
|
|
|
|
}.
|
|
|
|
|
2023-03-31 07:25:21 +08:00
|
|
|
{mapping, "vhost_max", "rabbit.vhost_max",
|
2023-04-02 03:11:29 +08:00
|
|
|
[{datatype, [{atom, infinity}, integer]}, {validators, ["non_negative_integer"]}]}.
|
2023-03-31 07:25:21 +08:00
|
|
|
|
|
|
|
{translation, "rabbit.vhost_max",
|
|
|
|
fun(Conf) ->
|
|
|
|
case cuttlefish:conf_get("vhost_max", Conf, undefined) of
|
|
|
|
undefined -> cuttlefish:unset();
|
|
|
|
infinity -> infinity;
|
|
|
|
Val when is_integer(Val) -> Val;
|
|
|
|
_ -> cuttlefish:invalid("should be a non-negative integer")
|
|
|
|
end
|
|
|
|
end
|
|
|
|
}.
|
2018-02-07 06:40:33 +08:00
|
|
|
|
2019-01-09 22:43:48 +08:00
|
|
|
{mapping, "max_message_size", "rabbit.max_message_size",
|
2021-09-10 18:16:21 +08:00
|
|
|
[{datatype, integer}, {validators, ["max_message_size"]}]}.
|
2018-12-27 23:26:37 +08:00
|
|
|
|
2016-02-01 19:43:05 +08:00
|
|
|
%% Customising Socket Options.
|
|
|
|
%%
|
2019-03-20 16:21:37 +08:00
|
|
|
%% See (https://www.erlang.org/doc/man/inet.html#setopts-2) for
|
2016-02-01 19:43:05 +08:00
|
|
|
%% further documentation.
|
|
|
|
%%
|
|
|
|
%% {tcp_listen_options, [{backlog, 128},
|
|
|
|
%% {nodelay, true},
|
|
|
|
%% {exit_on_close, false}]},
|
2016-01-22 23:47:01 +08:00
|
|
|
|
2016-01-23 00:08:48 +08:00
|
|
|
%% TCP listener section ======================================================
|
|
|
|
|
2016-02-26 22:30:11 +08:00
|
|
|
{mapping, "tcp_listen_options", "rabbit.tcp_listen_options", [
|
|
|
|
{datatype, {enum, [none]}}]}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{translation, "rabbit.tcp_listen_options",
|
2016-02-26 22:30:11 +08:00
|
|
|
fun(Conf) ->
|
2016-08-11 00:06:09 +08:00
|
|
|
case cuttlefish:conf_get("tcp_listen_options", Conf, undefined) of
|
2016-02-26 22:30:11 +08:00
|
|
|
none -> [];
|
|
|
|
_ -> cuttlefish:invalid("Invalid tcp_listen_options")
|
|
|
|
end
|
|
|
|
end}.
|
|
|
|
|
2016-02-17 01:17:53 +08:00
|
|
|
{mapping, "tcp_listen_options.backlog", "rabbit.tcp_listen_options.backlog", [
|
2016-01-22 23:47:01 +08:00
|
|
|
{datatype, integer}
|
|
|
|
]}.
|
|
|
|
|
2016-02-17 01:17:53 +08:00
|
|
|
{mapping, "tcp_listen_options.nodelay", "rabbit.tcp_listen_options.nodelay", [
|
2016-01-22 23:47:01 +08:00
|
|
|
{datatype, {enum, [true, false]}}
|
|
|
|
]}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "tcp_listen_options.buffer", "rabbit.tcp_listen_options.buffer",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, integer}]}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "tcp_listen_options.delay_send", "rabbit.tcp_listen_options.delay_send",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, {enum, [true, false]}}]}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "tcp_listen_options.dontroute", "rabbit.tcp_listen_options.dontroute",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, {enum, [true, false]}}]}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "tcp_listen_options.exit_on_close", "rabbit.tcp_listen_options.exit_on_close",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, {enum, [true, false]}}]}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "tcp_listen_options.fd", "rabbit.tcp_listen_options.fd",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, integer}]}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "tcp_listen_options.high_msgq_watermark", "rabbit.tcp_listen_options.high_msgq_watermark",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, integer}]}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "tcp_listen_options.high_watermark", "rabbit.tcp_listen_options.high_watermark",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, integer}]}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "tcp_listen_options.keepalive", "rabbit.tcp_listen_options.keepalive",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, {enum, [true, false]}}]}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "tcp_listen_options.low_msgq_watermark", "rabbit.tcp_listen_options.low_msgq_watermark",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, integer}]}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "tcp_listen_options.low_watermark", "rabbit.tcp_listen_options.low_watermark",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, integer}]}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "tcp_listen_options.port", "rabbit.tcp_listen_options.port",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, integer}, {validators, ["port"]}]}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "tcp_listen_options.priority", "rabbit.tcp_listen_options.priority",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, integer}]}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "tcp_listen_options.recbuf", "rabbit.tcp_listen_options.recbuf",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, integer}]}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "tcp_listen_options.send_timeout", "rabbit.tcp_listen_options.send_timeout",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, integer}]}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "tcp_listen_options.send_timeout_close", "rabbit.tcp_listen_options.send_timeout_close",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, {enum, [true, false]}}]}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "tcp_listen_options.sndbuf", "rabbit.tcp_listen_options.sndbuf",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, integer}]}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "tcp_listen_options.tos", "rabbit.tcp_listen_options.tos",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, integer}]}.
|
|
|
|
|
2016-08-11 00:06:09 +08:00
|
|
|
{mapping, "tcp_listen_options.linger.on", "rabbit.tcp_listen_options.linger",
|
|
|
|
[{datatype, {enum, [true, false]}}]}.
|
|
|
|
|
|
|
|
{mapping, "tcp_listen_options.linger.timeout", "rabbit.tcp_listen_options.linger",
|
2016-08-11 01:13:12 +08:00
|
|
|
[{datatype, integer}, {validators, ["non_negative_integer"]}]}.
|
2016-08-11 00:06:09 +08:00
|
|
|
|
|
|
|
{translation, "rabbit.tcp_listen_options.linger",
|
|
|
|
fun(Conf) ->
|
|
|
|
LingerOn = cuttlefish:conf_get("tcp_listen_options.linger.on", Conf, false),
|
|
|
|
LingerTimeout = cuttlefish:conf_get("tcp_listen_options.linger.timeout", Conf, 0),
|
|
|
|
{LingerOn, LingerTimeout}
|
|
|
|
end}.
|
|
|
|
|
|
|
|
|
2016-01-23 00:08:48 +08:00
|
|
|
%% ==========================================================================
|
2016-01-22 23:47:01 +08:00
|
|
|
|
2016-02-01 19:43:05 +08:00
|
|
|
%%
|
|
|
|
%% Resource Limits & Flow Control
|
|
|
|
%% ==============================
|
|
|
|
%%
|
2019-03-20 16:21:37 +08:00
|
|
|
%% See https://www.rabbitmq.com/memory.html for full details.
|
2016-02-01 19:43:05 +08:00
|
|
|
|
|
|
|
%% Memory-based Flow Control threshold.
|
|
|
|
%%
|
|
|
|
%% {vm_memory_high_watermark, 0.4},
|
|
|
|
|
|
|
|
%% Alternatively, we can set a limit (in bytes) of RAM used by the node.
|
|
|
|
%%
|
|
|
|
%% {vm_memory_high_watermark, {absolute, 1073741824}},
|
|
|
|
%%
|
2019-04-23 23:08:01 +08:00
|
|
|
%% Or you can set absolute value using memory unit symbols (with RabbitMQ 3.6.0+).
|
2016-02-01 19:43:05 +08:00
|
|
|
%%
|
|
|
|
%% {vm_memory_high_watermark, {absolute, "1024M"}},
|
|
|
|
%%
|
2019-04-23 23:08:01 +08:00
|
|
|
%% Supported unit symbols:
|
2016-02-01 19:43:05 +08:00
|
|
|
%%
|
2019-04-23 23:08:01 +08:00
|
|
|
%% k, kiB: kibibytes (2^10 - 1,024 bytes)
|
|
|
|
%% M, MiB: mebibytes (2^20 - 1,048,576 bytes)
|
|
|
|
%% G, GiB: gibibytes (2^30 - 1,073,741,824 bytes)
|
|
|
|
%% kB: kilobytes (10^3 - 1,000 bytes)
|
|
|
|
%% MB: megabytes (10^6 - 1,000,000 bytes)
|
|
|
|
%% GB: gigabytes (10^9 - 1,000,000,000 bytes)
|
2016-01-22 23:47:01 +08:00
|
|
|
|
|
|
|
{mapping, "vm_memory_high_watermark.relative", "rabbit.vm_memory_high_watermark", [
|
|
|
|
{datatype, float}]}.
|
|
|
|
|
|
|
|
{mapping, "vm_memory_high_watermark.absolute", "rabbit.vm_memory_high_watermark", [
|
|
|
|
{datatype, [integer, string]}]}.
|
|
|
|
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{translation, "rabbit.vm_memory_high_watermark",
|
2016-01-22 23:47:01 +08:00
|
|
|
fun(Conf) ->
|
|
|
|
Settings = cuttlefish_variable:filter_by_prefix("vm_memory_high_watermark", Conf),
|
|
|
|
Absolute = proplists:get_value(["vm_memory_high_watermark", "absolute"], Settings),
|
|
|
|
Relative = proplists:get_value(["vm_memory_high_watermark", "relative"], Settings),
|
|
|
|
case {Absolute, Relative} of
|
|
|
|
{undefined, undefined} -> cuttlefish:invalid("No vm watermark defined");
|
|
|
|
{_, undefined} -> {absolute, Absolute};
|
2022-11-29 08:32:00 +08:00
|
|
|
{undefined, _} -> Relative;
|
|
|
|
_ ->
|
|
|
|
cuttlefish:warn("Both vm_memory_high_watermark.absolute and "
|
|
|
|
"vm_memory_high_watermark.relative are configured. "
|
|
|
|
"vm_memory_high_watermark.absolute has precedence"),
|
|
|
|
{absolute, Absolute}
|
2016-01-22 23:47:01 +08:00
|
|
|
end
|
|
|
|
end}.
|
|
|
|
|
2016-02-01 19:43:05 +08:00
|
|
|
%% Fraction of the high watermark limit at which queues start to
|
|
|
|
%% page message out to disc in order to free up memory.
|
|
|
|
%%
|
|
|
|
%% Values greater than 0.9 can be dangerous and should be used carefully.
|
|
|
|
%%
|
|
|
|
%% {vm_memory_high_watermark_paging_ratio, 0.5},
|
2016-01-22 23:47:01 +08:00
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "vm_memory_high_watermark_paging_ratio",
|
|
|
|
"rabbit.vm_memory_high_watermark_paging_ratio",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, float}, {validators, ["less_than_1"]}]}.
|
|
|
|
|
2016-02-01 19:43:05 +08:00
|
|
|
%% Interval (in milliseconds) at which we perform the check of the memory
|
|
|
|
%% levels against the watermarks.
|
|
|
|
%%
|
|
|
|
%% {memory_monitor_interval, 2500},
|
2016-01-22 23:47:01 +08:00
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "memory_monitor_interval", "rabbit.memory_monitor_interval",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, integer}]}.
|
|
|
|
|
2017-10-12 23:11:03 +08:00
|
|
|
%% Selects Erlang VM memory consumption calculation strategy.
|
|
|
|
%% Can be `allocated`, `rss` or `legacy` (aliased as `erlang`).
|
2017-06-14 22:31:40 +08:00
|
|
|
%%
|
2017-10-19 05:46:04 +08:00
|
|
|
%% {vm_memory_calculation_strategy, rss},
|
2017-06-14 22:31:40 +08:00
|
|
|
|
|
|
|
{mapping, "vm_memory_calculation_strategy", "rabbit.vm_memory_calculation_strategy",
|
2017-10-12 23:11:03 +08:00
|
|
|
[{datatype, {enum, [rss, erlang, allocated, legacy]}}]}.
|
2017-06-14 22:31:40 +08:00
|
|
|
|
2017-12-08 12:17:48 +08:00
|
|
|
%% The total memory available can be calculated from the OS resources
|
|
|
|
%% (default option) or provided as a configuration parameter
|
|
|
|
{mapping, "total_memory_available_override_value", "rabbit.total_memory_available_override_value", [
|
|
|
|
{datatype, [integer, string]}]}.
|
|
|
|
|
2016-02-01 19:43:05 +08:00
|
|
|
%% Set disk free limit (in bytes). Once free disk space reaches this
|
|
|
|
%% lower bound, a disk alarm will be set - see the documentation
|
|
|
|
%% listed above for more details.
|
|
|
|
%%
|
|
|
|
%% {disk_free_limit, 50000000},
|
|
|
|
%%
|
|
|
|
%% Or you can set it using memory units (same as in vm_memory_high_watermark)
|
|
|
|
%% with RabbitMQ 3.6.0+.
|
|
|
|
%% {disk_free_limit, "50MB"},
|
|
|
|
%% {disk_free_limit, "50000kB"},
|
|
|
|
%% {disk_free_limit, "2GB"},
|
|
|
|
|
|
|
|
%% Alternatively, we can set a limit relative to total available RAM.
|
|
|
|
%%
|
|
|
|
%% Values lower than 1.0 can be dangerous and should be used carefully.
|
|
|
|
%% {disk_free_limit, {mem_relative, 2.0}},
|
2016-01-22 23:47:01 +08:00
|
|
|
|
|
|
|
{mapping, "disk_free_limit.relative", "rabbit.disk_free_limit", [
|
2016-01-23 00:08:48 +08:00
|
|
|
{datatype, float}]}.
|
2016-01-22 23:47:01 +08:00
|
|
|
|
|
|
|
{mapping, "disk_free_limit.absolute", "rabbit.disk_free_limit", [
|
|
|
|
{datatype, [integer, string]}]}.
|
|
|
|
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{translation, "rabbit.disk_free_limit",
|
2016-01-22 23:47:01 +08:00
|
|
|
fun(Conf) ->
|
|
|
|
Settings = cuttlefish_variable:filter_by_prefix("disk_free_limit", Conf),
|
|
|
|
Absolute = proplists:get_value(["disk_free_limit", "absolute"], Settings),
|
|
|
|
Relative = proplists:get_value(["disk_free_limit", "relative"], Settings),
|
|
|
|
case {Absolute, Relative} of
|
|
|
|
{undefined, undefined} -> cuttlefish:invalid("No disk limit defined");
|
|
|
|
{_, undefined} -> Absolute;
|
2022-11-29 08:32:00 +08:00
|
|
|
{undefined, _} -> {mem_relative, Relative};
|
|
|
|
_ ->
|
|
|
|
cuttlefish:warn("Both disk_free_limit.absolute and "
|
|
|
|
"disk_free_limit.relative are configured. "
|
|
|
|
"disk_free_limit.absolute has precedence"),
|
|
|
|
Absolute
|
2016-01-22 23:47:01 +08:00
|
|
|
end
|
|
|
|
end}.
|
|
|
|
|
2016-02-01 19:43:05 +08:00
|
|
|
%%
|
|
|
|
%% Clustering
|
|
|
|
%% =====================
|
|
|
|
%%
|
2016-01-22 23:47:01 +08:00
|
|
|
|
2016-02-01 19:43:05 +08:00
|
|
|
%% How to respond to cluster partitions.
|
2019-03-20 16:21:37 +08:00
|
|
|
%% See https://www.rabbitmq.com/partitions.html for further details.
|
2016-02-01 19:43:05 +08:00
|
|
|
%%
|
|
|
|
%% {cluster_partition_handling, ignore},
|
2016-01-22 23:47:01 +08:00
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "cluster_partition_handling", "rabbit.cluster_partition_handling",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, {enum, [ignore, pause_minority, autoheal, pause_if_all_down]}}]}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "cluster_partition_handling.pause_if_all_down.recover",
|
2016-01-22 23:47:01 +08:00
|
|
|
"rabbit.cluster_partition_handling",
|
|
|
|
[{datatype, {enum, [ignore, autoheal]}}]}.
|
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "cluster_partition_handling.pause_if_all_down.nodes.$name",
|
2016-01-22 23:47:01 +08:00
|
|
|
"rabbit.cluster_partition_handling",
|
|
|
|
[{datatype, atom}]}.
|
|
|
|
|
|
|
|
{translation, "rabbit.cluster_partition_handling",
|
|
|
|
fun(Conf) ->
|
2016-01-25 21:04:09 +08:00
|
|
|
case cuttlefish:conf_get("cluster_partition_handling", Conf) of
|
2016-01-22 23:47:01 +08:00
|
|
|
pause_if_all_down ->
|
|
|
|
PauseIfAllDownNodes = cuttlefish_variable:filter_by_prefix(
|
2016-03-17 21:48:27 +08:00
|
|
|
"cluster_partition_handling.pause_if_all_down.nodes",
|
2016-01-22 23:47:01 +08:00
|
|
|
Conf),
|
|
|
|
case PauseIfAllDownNodes of
|
2016-03-17 21:48:27 +08:00
|
|
|
[] ->
|
2016-01-22 23:47:01 +08:00
|
|
|
cuttlefish:invalid("Nodes required for pause_if_all_down");
|
|
|
|
_ ->
|
|
|
|
Nodes = [ V || {K,V} <- PauseIfAllDownNodes ],
|
2016-01-25 21:04:09 +08:00
|
|
|
PauseIfAllDownRecover = cuttlefish:conf_get(
|
2016-01-22 23:47:01 +08:00
|
|
|
"cluster_partition_handling.pause_if_all_down.recover",
|
|
|
|
Conf),
|
|
|
|
case PauseIfAllDownRecover of
|
|
|
|
Recover when Recover == ignore; Recover == autoheal ->
|
|
|
|
{pause_if_all_down, Nodes, Recover};
|
2016-03-17 21:48:27 +08:00
|
|
|
Invalid ->
|
2016-01-22 23:47:01 +08:00
|
|
|
cuttlefish:invalid("Recover strategy required for pause_if_all_down")
|
|
|
|
end
|
|
|
|
end;
|
|
|
|
Other -> Other
|
|
|
|
end
|
|
|
|
end}.
|
|
|
|
|
2018-02-07 06:40:33 +08:00
|
|
|
%% Number of delegate processes to use for intra-cluster
|
|
|
|
%% communication. On a machine which has a very large number of cores
|
|
|
|
%% and is also part of a cluster, you may wish to increase this value.
|
|
|
|
%%
|
|
|
|
|
|
|
|
{mapping, "delegate_count", "rabbit.delegate_count", [
|
|
|
|
{datatype, integer}, {validators, ["non_negative_integer"]}
|
|
|
|
]}.
|
|
|
|
|
2016-02-01 19:43:05 +08:00
|
|
|
%% Mirror sync batch size, in messages. Increasing this will speed
|
|
|
|
%% up syncing but total batch size in bytes must not exceed 2 GiB.
|
|
|
|
%% Available in RabbitMQ 3.6.0 or later.
|
|
|
|
%%
|
|
|
|
%% {mirroring_sync_batch_size, 4096},
|
2016-01-22 23:47:01 +08:00
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "mirroring_sync_batch_size", "rabbit.mirroring_sync_batch_size",
|
2021-09-10 18:16:21 +08:00
|
|
|
[{datatype, bytesize}, {validators, ["mirroring_sync_batch_size"]}]}.
|
2016-01-22 23:47:01 +08:00
|
|
|
|
2021-12-09 02:45:28 +08:00
|
|
|
%% Mirror sync max throughput (in bytes) per second.
|
|
|
|
%% Supported unit symbols:
|
|
|
|
%% k, kiB: kibibytes (2^10 - 1,024 bytes)
|
|
|
|
%% M, MiB: mebibytes (2^20 - 1,048,576 bytes)
|
|
|
|
%% G, GiB: gibibytes (2^30 - 1,073,741,824 bytes)
|
|
|
|
%% kB: kilobytes (10^3 - 1,000 bytes)
|
|
|
|
%% MB: megabytes (10^6 - 1,000,000 bytes)
|
|
|
|
%% GB: gigabytes (10^9 - 1,000,000,000 bytes)
|
|
|
|
%%
|
|
|
|
%% 0 means "no limit".
|
|
|
|
%%
|
|
|
|
%% {mirroring_sync_max_throughput, 0},
|
|
|
|
|
|
|
|
{mapping, "mirroring_sync_max_throughput", "rabbit.mirroring_sync_max_throughput", [
|
|
|
|
{datatype, [integer, string]}
|
|
|
|
]}.
|
|
|
|
|
2017-06-05 21:12:23 +08:00
|
|
|
%% Peer discovery backend used by cluster formation.
|
2016-11-03 17:15:47 +08:00
|
|
|
%%
|
|
|
|
|
2017-06-05 21:12:23 +08:00
|
|
|
{mapping, "cluster_formation.peer_discovery_backend", "rabbit.cluster_formation.peer_discovery_backend", [
|
2016-11-03 17:15:47 +08:00
|
|
|
{datatype, atom}
|
|
|
|
]}.
|
|
|
|
|
2017-06-06 08:57:08 +08:00
|
|
|
{translation, "rabbit.cluster_formation.peer_discovery_backend",
|
|
|
|
fun(Conf) ->
|
|
|
|
case cuttlefish:conf_get("cluster_formation.peer_discovery_backend", Conf, rabbit_peer_discovery_classic_config) of
|
|
|
|
classic_config -> rabbit_peer_discovery_classic_config;
|
|
|
|
classic -> rabbit_peer_discovery_classic_config;
|
|
|
|
config -> rabbit_peer_discovery_classic_config;
|
|
|
|
dns -> rabbit_peer_discovery_dns;
|
|
|
|
aws -> rabbit_peer_discovery_aws;
|
|
|
|
consul -> rabbit_peer_discovery_consul;
|
2017-06-09 05:36:36 +08:00
|
|
|
etcd -> rabbit_peer_discovery_etcd;
|
|
|
|
kubernetes -> rabbit_peer_discovery_k8s;
|
|
|
|
k8s -> rabbit_peer_discovery_k8s;
|
2017-06-06 08:57:08 +08:00
|
|
|
Module -> Module
|
|
|
|
end
|
|
|
|
end}.
|
|
|
|
|
2017-06-05 21:12:23 +08:00
|
|
|
%% Own node type, used by cluster formation.
|
2016-10-25 22:50:02 +08:00
|
|
|
%%
|
|
|
|
|
2017-06-05 21:12:23 +08:00
|
|
|
{mapping, "cluster_formation.node_type", "rabbit.cluster_formation.node_type", [
|
2016-10-25 22:50:02 +08:00
|
|
|
{datatype, {enum, [disc, disk, ram]}}
|
|
|
|
]}.
|
|
|
|
|
2017-06-05 21:12:23 +08:00
|
|
|
{translation, "rabbit.cluster_formation.node_type",
|
2016-10-25 22:50:02 +08:00
|
|
|
fun(Conf) ->
|
2016-11-03 17:15:47 +08:00
|
|
|
%% if peer discovery backend isn't configured, don't generate
|
|
|
|
%% node type
|
2017-06-05 21:12:23 +08:00
|
|
|
case cuttlefish:conf_get("cluster_formation.peer_discovery_backend", Conf, undefined) of
|
2016-11-03 17:15:47 +08:00
|
|
|
undefined -> cuttlefish:unset();
|
|
|
|
_Backend ->
|
2017-06-05 21:12:23 +08:00
|
|
|
case cuttlefish:conf_get("cluster_formation.node_type", Conf) of
|
2016-11-03 17:15:47 +08:00
|
|
|
disc -> disc;
|
|
|
|
%% always cast to `disc`
|
|
|
|
disk -> disc;
|
|
|
|
ram -> ram;
|
|
|
|
_Other -> disc
|
|
|
|
end
|
2016-10-25 22:50:02 +08:00
|
|
|
end
|
|
|
|
end}.
|
|
|
|
|
2017-09-11 12:31:25 +08:00
|
|
|
%% Cluster formation: Randomized startup delay
|
Remove randomized startup delays
On initial cluster formation, only one node in a multi node cluster
should initialize the Mnesia database schema (i.e. form the cluster).
To ensure that for nodes starting up in parallel,
RabbitMQ peer discovery backends have used
either locks or randomized startup delays.
Locks work great: When a node holds the lock, it either starts a new
blank node (if there is no other node in the cluster), or it joins
an existing node. This makes it impossible to have two nodes forming
the cluster at the same time.
Consul and etcd peer discovery backends use locks. The lock is acquired
in the consul and etcd infrastructure, respectively.
For other peer discovery backends (classic, DNS, AWS), randomized
startup delays were used. They work good enough in most cases.
However, in https://github.com/rabbitmq/cluster-operator/issues/662 we
observed that in 1% - 10% of the cases (the more nodes or the
smaller the randomized startup delay range, the higher the chances), two
nodes decide to form the cluster. That's bad since it will end up in a
single Erlang cluster, but in two RabbitMQ clusters. Even worse, no
obvious alert got triggered or error message logged.
To solve this issue, one could increase the randomized startup delay
range from e.g. 0m - 1m to 0m - 3m. However, this makes initial cluster
formation very slow since it will take up to 3 minutes until
every node is ready. In rare cases, we still end up with two nodes
forming the cluster.
Another way to solve the problem is to name a dedicated node to be the
seed node (forming the cluster). This was explored in
https://github.com/rabbitmq/cluster-operator/pull/689 and works well.
Two minor downsides to this approach are: 1. If the seed node never
becomes available, the whole cluster won't be formed (which is okay),
and 2. it doesn't integrate with existing dynamic peer discovery backends
(e.g. K8s, AWS) since nodes are not yet known at deploy time.
In this commit, we take a better approach: We remove randomized startup
delays altogether. We replace them with locks. However, instead of
implementing our own lock implementation in an external system (e.g. in K8s),
we re-use Erlang's locking mechanism global:set_lock/3.
global:set_lock/3 has some convenient properties:
1. It accepts a list of nodes to set the lock on.
2. The nodes in that list connect to each other (i.e. create an Erlang
cluster).
3. The method is synchronous with a timeout (number of retries). It
blocks until the lock becomes available.
4. If a process that holds a lock dies, or the node goes down, the lock
held by the process is deleted.
The list of nodes passed to global:set_lock/3 corresponds to the nodes
the peer discovery backend discovers (lists).
Two special cases worth mentioning:
1. That list can be all desired nodes in the cluster
(e.g. in classic peer discovery where nodes are known at
deploy time) while only a subset of nodes is available.
In that case, global:set_lock/3 still sets the lock not
blocking until all nodes can be connected to. This is good since
nodes might start sequentially (non-parallel).
2. In dynamic peer discovery backends (e.g. K8s, AWS), this
list can be just a subset of desired nodes since nodes might not startup
in parallel. That's also not a problem as long as the following
requirement is met: "The peer disovery backend does not list two disjoint
sets of nodes (on different nodes) at the same time."
For example, in a 2-node cluster, the peer discovery backend must not
list only node 1 on node 1 and only node 2 on node 2.
Existing peer discovery backends fullfil that requirement because the
resource the nodes are discovered from is global.
For example, in K8s, once node 1 is part of the Endpoints object, it
will be returned on both node 1 and node 2.
Likewise, in AWS, once node 1 started, the described list of instances
with a specific tag will include node 1 when the AWS peer discovery backend
runs on node 1 or node 2.
Removing randomized startup delays also makes cluster formation
considerably faster (up to 1 minute faster if that was the
upper bound in the range).
2021-05-18 07:01:08 +08:00
|
|
|
%%
|
|
|
|
%% DEPRECATED: This is a no-op. Old configs are still allowed, but a warning will be printed.
|
2017-09-11 12:31:25 +08:00
|
|
|
|
Remove randomized startup delays
On initial cluster formation, only one node in a multi node cluster
should initialize the Mnesia database schema (i.e. form the cluster).
To ensure that for nodes starting up in parallel,
RabbitMQ peer discovery backends have used
either locks or randomized startup delays.
Locks work great: When a node holds the lock, it either starts a new
blank node (if there is no other node in the cluster), or it joins
an existing node. This makes it impossible to have two nodes forming
the cluster at the same time.
Consul and etcd peer discovery backends use locks. The lock is acquired
in the consul and etcd infrastructure, respectively.
For other peer discovery backends (classic, DNS, AWS), randomized
startup delays were used. They work good enough in most cases.
However, in https://github.com/rabbitmq/cluster-operator/issues/662 we
observed that in 1% - 10% of the cases (the more nodes or the
smaller the randomized startup delay range, the higher the chances), two
nodes decide to form the cluster. That's bad since it will end up in a
single Erlang cluster, but in two RabbitMQ clusters. Even worse, no
obvious alert got triggered or error message logged.
To solve this issue, one could increase the randomized startup delay
range from e.g. 0m - 1m to 0m - 3m. However, this makes initial cluster
formation very slow since it will take up to 3 minutes until
every node is ready. In rare cases, we still end up with two nodes
forming the cluster.
Another way to solve the problem is to name a dedicated node to be the
seed node (forming the cluster). This was explored in
https://github.com/rabbitmq/cluster-operator/pull/689 and works well.
Two minor downsides to this approach are: 1. If the seed node never
becomes available, the whole cluster won't be formed (which is okay),
and 2. it doesn't integrate with existing dynamic peer discovery backends
(e.g. K8s, AWS) since nodes are not yet known at deploy time.
In this commit, we take a better approach: We remove randomized startup
delays altogether. We replace them with locks. However, instead of
implementing our own lock implementation in an external system (e.g. in K8s),
we re-use Erlang's locking mechanism global:set_lock/3.
global:set_lock/3 has some convenient properties:
1. It accepts a list of nodes to set the lock on.
2. The nodes in that list connect to each other (i.e. create an Erlang
cluster).
3. The method is synchronous with a timeout (number of retries). It
blocks until the lock becomes available.
4. If a process that holds a lock dies, or the node goes down, the lock
held by the process is deleted.
The list of nodes passed to global:set_lock/3 corresponds to the nodes
the peer discovery backend discovers (lists).
Two special cases worth mentioning:
1. That list can be all desired nodes in the cluster
(e.g. in classic peer discovery where nodes are known at
deploy time) while only a subset of nodes is available.
In that case, global:set_lock/3 still sets the lock not
blocking until all nodes can be connected to. This is good since
nodes might start sequentially (non-parallel).
2. In dynamic peer discovery backends (e.g. K8s, AWS), this
list can be just a subset of desired nodes since nodes might not startup
in parallel. That's also not a problem as long as the following
requirement is met: "The peer disovery backend does not list two disjoint
sets of nodes (on different nodes) at the same time."
For example, in a 2-node cluster, the peer discovery backend must not
list only node 1 on node 1 and only node 2 on node 2.
Existing peer discovery backends fullfil that requirement because the
resource the nodes are discovered from is global.
For example, in K8s, once node 1 is part of the Endpoints object, it
will be returned on both node 1 and node 2.
Likewise, in AWS, once node 1 started, the described list of instances
with a specific tag will include node 1 when the AWS peer discovery backend
runs on node 1 or node 2.
Removing randomized startup delays also makes cluster formation
considerably faster (up to 1 minute faster if that was the
upper bound in the range).
2021-05-18 07:01:08 +08:00
|
|
|
{mapping, "cluster_formation.randomized_startup_delay_range.min", "rabbit.cluster_formation.randomized_startup_delay_range", []}.
|
|
|
|
{mapping, "cluster_formation.randomized_startup_delay_range.max", "rabbit.cluster_formation.randomized_startup_delay_range", []}.
|
2017-09-11 12:31:25 +08:00
|
|
|
|
2018-02-27 08:39:14 +08:00
|
|
|
{translation, "rabbit.cluster_formation.randomized_startup_delay_range",
|
2017-09-11 12:31:25 +08:00
|
|
|
fun(Conf) ->
|
|
|
|
Min = cuttlefish:conf_get("cluster_formation.randomized_startup_delay_range.min", Conf, undefined),
|
|
|
|
Max = cuttlefish:conf_get("cluster_formation.randomized_startup_delay_range.max", Conf, undefined),
|
|
|
|
|
|
|
|
case {Min, Max} of
|
Remove randomized startup delays
On initial cluster formation, only one node in a multi node cluster
should initialize the Mnesia database schema (i.e. form the cluster).
To ensure that for nodes starting up in parallel,
RabbitMQ peer discovery backends have used
either locks or randomized startup delays.
Locks work great: When a node holds the lock, it either starts a new
blank node (if there is no other node in the cluster), or it joins
an existing node. This makes it impossible to have two nodes forming
the cluster at the same time.
Consul and etcd peer discovery backends use locks. The lock is acquired
in the consul and etcd infrastructure, respectively.
For other peer discovery backends (classic, DNS, AWS), randomized
startup delays were used. They work good enough in most cases.
However, in https://github.com/rabbitmq/cluster-operator/issues/662 we
observed that in 1% - 10% of the cases (the more nodes or the
smaller the randomized startup delay range, the higher the chances), two
nodes decide to form the cluster. That's bad since it will end up in a
single Erlang cluster, but in two RabbitMQ clusters. Even worse, no
obvious alert got triggered or error message logged.
To solve this issue, one could increase the randomized startup delay
range from e.g. 0m - 1m to 0m - 3m. However, this makes initial cluster
formation very slow since it will take up to 3 minutes until
every node is ready. In rare cases, we still end up with two nodes
forming the cluster.
Another way to solve the problem is to name a dedicated node to be the
seed node (forming the cluster). This was explored in
https://github.com/rabbitmq/cluster-operator/pull/689 and works well.
Two minor downsides to this approach are: 1. If the seed node never
becomes available, the whole cluster won't be formed (which is okay),
and 2. it doesn't integrate with existing dynamic peer discovery backends
(e.g. K8s, AWS) since nodes are not yet known at deploy time.
In this commit, we take a better approach: We remove randomized startup
delays altogether. We replace them with locks. However, instead of
implementing our own lock implementation in an external system (e.g. in K8s),
we re-use Erlang's locking mechanism global:set_lock/3.
global:set_lock/3 has some convenient properties:
1. It accepts a list of nodes to set the lock on.
2. The nodes in that list connect to each other (i.e. create an Erlang
cluster).
3. The method is synchronous with a timeout (number of retries). It
blocks until the lock becomes available.
4. If a process that holds a lock dies, or the node goes down, the lock
held by the process is deleted.
The list of nodes passed to global:set_lock/3 corresponds to the nodes
the peer discovery backend discovers (lists).
Two special cases worth mentioning:
1. That list can be all desired nodes in the cluster
(e.g. in classic peer discovery where nodes are known at
deploy time) while only a subset of nodes is available.
In that case, global:set_lock/3 still sets the lock not
blocking until all nodes can be connected to. This is good since
nodes might start sequentially (non-parallel).
2. In dynamic peer discovery backends (e.g. K8s, AWS), this
list can be just a subset of desired nodes since nodes might not startup
in parallel. That's also not a problem as long as the following
requirement is met: "The peer disovery backend does not list two disjoint
sets of nodes (on different nodes) at the same time."
For example, in a 2-node cluster, the peer discovery backend must not
list only node 1 on node 1 and only node 2 on node 2.
Existing peer discovery backends fullfil that requirement because the
resource the nodes are discovered from is global.
For example, in K8s, once node 1 is part of the Endpoints object, it
will be returned on both node 1 and node 2.
Likewise, in AWS, once node 1 started, the described list of instances
with a specific tag will include node 1 when the AWS peer discovery backend
runs on node 1 or node 2.
Removing randomized startup delays also makes cluster formation
considerably faster (up to 1 minute faster if that was the
upper bound in the range).
2021-05-18 07:01:08 +08:00
|
|
|
{undefined, undefined} ->
|
|
|
|
ok;
|
|
|
|
_ ->
|
|
|
|
cuttlefish:warn("cluster_formation.randomized_startup_delay_range.min and "
|
|
|
|
"cluster_formation.randomized_startup_delay_range.max are deprecated")
|
|
|
|
end,
|
|
|
|
cuttlefish:unset()
|
2017-09-11 12:31:25 +08:00
|
|
|
end}.
|
|
|
|
|
Remove randomized startup delays
On initial cluster formation, only one node in a multi node cluster
should initialize the Mnesia database schema (i.e. form the cluster).
To ensure that for nodes starting up in parallel,
RabbitMQ peer discovery backends have used
either locks or randomized startup delays.
Locks work great: When a node holds the lock, it either starts a new
blank node (if there is no other node in the cluster), or it joins
an existing node. This makes it impossible to have two nodes forming
the cluster at the same time.
Consul and etcd peer discovery backends use locks. The lock is acquired
in the consul and etcd infrastructure, respectively.
For other peer discovery backends (classic, DNS, AWS), randomized
startup delays were used. They work good enough in most cases.
However, in https://github.com/rabbitmq/cluster-operator/issues/662 we
observed that in 1% - 10% of the cases (the more nodes or the
smaller the randomized startup delay range, the higher the chances), two
nodes decide to form the cluster. That's bad since it will end up in a
single Erlang cluster, but in two RabbitMQ clusters. Even worse, no
obvious alert got triggered or error message logged.
To solve this issue, one could increase the randomized startup delay
range from e.g. 0m - 1m to 0m - 3m. However, this makes initial cluster
formation very slow since it will take up to 3 minutes until
every node is ready. In rare cases, we still end up with two nodes
forming the cluster.
Another way to solve the problem is to name a dedicated node to be the
seed node (forming the cluster). This was explored in
https://github.com/rabbitmq/cluster-operator/pull/689 and works well.
Two minor downsides to this approach are: 1. If the seed node never
becomes available, the whole cluster won't be formed (which is okay),
and 2. it doesn't integrate with existing dynamic peer discovery backends
(e.g. K8s, AWS) since nodes are not yet known at deploy time.
In this commit, we take a better approach: We remove randomized startup
delays altogether. We replace them with locks. However, instead of
implementing our own lock implementation in an external system (e.g. in K8s),
we re-use Erlang's locking mechanism global:set_lock/3.
global:set_lock/3 has some convenient properties:
1. It accepts a list of nodes to set the lock on.
2. The nodes in that list connect to each other (i.e. create an Erlang
cluster).
3. The method is synchronous with a timeout (number of retries). It
blocks until the lock becomes available.
4. If a process that holds a lock dies, or the node goes down, the lock
held by the process is deleted.
The list of nodes passed to global:set_lock/3 corresponds to the nodes
the peer discovery backend discovers (lists).
Two special cases worth mentioning:
1. That list can be all desired nodes in the cluster
(e.g. in classic peer discovery where nodes are known at
deploy time) while only a subset of nodes is available.
In that case, global:set_lock/3 still sets the lock not
blocking until all nodes can be connected to. This is good since
nodes might start sequentially (non-parallel).
2. In dynamic peer discovery backends (e.g. K8s, AWS), this
list can be just a subset of desired nodes since nodes might not startup
in parallel. That's also not a problem as long as the following
requirement is met: "The peer disovery backend does not list two disjoint
sets of nodes (on different nodes) at the same time."
For example, in a 2-node cluster, the peer discovery backend must not
list only node 1 on node 1 and only node 2 on node 2.
Existing peer discovery backends fullfil that requirement because the
resource the nodes are discovered from is global.
For example, in K8s, once node 1 is part of the Endpoints object, it
will be returned on both node 1 and node 2.
Likewise, in AWS, once node 1 started, the described list of instances
with a specific tag will include node 1 when the AWS peer discovery backend
runs on node 1 or node 2.
Removing randomized startup delays also makes cluster formation
considerably faster (up to 1 minute faster if that was the
upper bound in the range).
2021-05-18 07:01:08 +08:00
|
|
|
%% Cluster formation: lock acquisition retries as passed to https://erlang.org/doc/man/global.html#set_lock-3
|
|
|
|
%%
|
|
|
|
%% Currently used in classic, k8s, and aws peer discovery backends.
|
|
|
|
|
|
|
|
{mapping, "cluster_formation.internal_lock_retries", "rabbit.cluster_formation.internal_lock_retries",
|
|
|
|
[
|
|
|
|
{datatype, integer},
|
|
|
|
{validators, ["non_zero_positive_integer"]}
|
|
|
|
]}.
|
|
|
|
|
2020-03-16 18:47:39 +08:00
|
|
|
%% Cluster formation: discovery failure retries
|
|
|
|
|
|
|
|
{mapping, "cluster_formation.lock_retry_limit", "rabbit.cluster_formation.lock_retry_limit",
|
|
|
|
[
|
|
|
|
{datatype, integer},
|
|
|
|
{validators, ["non_zero_positive_integer"]}
|
|
|
|
]}.
|
|
|
|
{mapping, "cluster_formation.lock_retry_timeout", "rabbit.cluster_formation.lock_retry_timeout",
|
|
|
|
[
|
|
|
|
{datatype, integer},
|
|
|
|
{validators, ["non_zero_positive_integer"]}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{mapping, "cluster_formation.discovery_retry_limit", "rabbit.cluster_formation.discovery_retry_limit",
|
|
|
|
[
|
|
|
|
{datatype, integer},
|
|
|
|
{validators, ["non_zero_positive_integer"]}
|
|
|
|
]}.
|
|
|
|
{mapping, "cluster_formation.discovery_retry_interval", "rabbit.cluster_formation.discovery_retry_interval",
|
|
|
|
[
|
|
|
|
{datatype, integer},
|
|
|
|
{validators, ["non_zero_positive_integer"]}
|
|
|
|
]}.
|
|
|
|
|
2021-11-03 06:40:05 +08:00
|
|
|
|
|
|
|
%% Target cluster size hint may be used by certain core features or plugins to perform
|
|
|
|
%% actions that should only be performed when a certain number of nodes (or a quorum of a certain number)
|
|
|
|
%% has already joined (started).
|
|
|
|
%%
|
|
|
|
|
|
|
|
{mapping, "cluster_formation.target_cluster_size_hint", "rabbit.cluster_formation.target_cluster_size_hint", [
|
|
|
|
{datatype, integer},
|
|
|
|
{validators, ["non_zero_positive_integer"]}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
|
2019-02-13 00:59:28 +08:00
|
|
|
%% Classic config-driven peer discovery backend.
|
2016-10-25 22:50:02 +08:00
|
|
|
%%
|
2016-02-01 19:43:05 +08:00
|
|
|
%% Make clustering happen *automatically* at startup - only applied
|
|
|
|
%% to nodes that have just been reset or started for the first time.
|
2019-03-20 16:21:37 +08:00
|
|
|
%% See https://www.rabbitmq.com/clustering.html#auto-config for
|
2016-02-01 19:43:05 +08:00
|
|
|
%% further details.
|
|
|
|
%%
|
|
|
|
%% {cluster_nodes, {['rabbit@my.host.com'], disc}},
|
2016-01-22 23:47:01 +08:00
|
|
|
|
2017-06-05 21:12:23 +08:00
|
|
|
{mapping, "cluster_formation.classic_config.nodes.$node", "rabbit.cluster_nodes",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, atom}]}.
|
|
|
|
|
|
|
|
{translation, "rabbit.cluster_nodes",
|
|
|
|
fun(Conf) ->
|
2017-06-05 21:12:23 +08:00
|
|
|
Nodes = [V || {_, V} <- cuttlefish_variable:filter_by_prefix("cluster_formation.classic_config.nodes", Conf)],
|
2016-10-12 18:14:04 +08:00
|
|
|
|
|
|
|
case Nodes of
|
|
|
|
[] -> cuttlefish:unset();
|
|
|
|
Other ->
|
2017-06-05 21:12:23 +08:00
|
|
|
case cuttlefish:conf_get("cluster_formation.node_type", Conf, disc) of
|
2016-10-12 18:14:04 +08:00
|
|
|
disc -> {Other, disc};
|
2016-10-25 22:50:02 +08:00
|
|
|
%% Always cast to `disc`
|
2016-10-12 20:18:00 +08:00
|
|
|
disk -> {Other, disc};
|
2016-10-12 18:14:04 +08:00
|
|
|
ram -> {Other, ram}
|
|
|
|
end
|
2016-01-22 23:47:01 +08:00
|
|
|
end
|
|
|
|
end}.
|
|
|
|
|
2016-10-25 22:50:02 +08:00
|
|
|
%% DNS (A records and reverse lookups)-based peer discovery.
|
|
|
|
%%
|
|
|
|
|
2017-06-05 21:12:23 +08:00
|
|
|
{mapping, "cluster_formation.dns.hostname", "rabbit.cluster_formation.peer_discovery_dns.hostname",
|
2016-10-25 22:50:02 +08:00
|
|
|
[{datatype, string}]}.
|
|
|
|
|
2017-06-05 21:12:23 +08:00
|
|
|
{translation, "rabbit.cluster_formation.peer_discovery_dns.hostname",
|
2016-10-25 22:50:02 +08:00
|
|
|
fun(Conf) ->
|
2017-06-05 21:12:23 +08:00
|
|
|
case cuttlefish:conf_get("cluster_formation.dns.hostname", Conf, undefined) of
|
2016-10-25 22:50:02 +08:00
|
|
|
undefined -> cuttlefish:unset();
|
|
|
|
Value -> list_to_binary(Value)
|
|
|
|
end
|
|
|
|
end}.
|
|
|
|
|
2016-01-22 23:47:01 +08:00
|
|
|
|
2016-02-01 19:43:05 +08:00
|
|
|
%% Interval (in milliseconds) at which we send keepalive messages
|
|
|
|
%% to other cluster members. Note that this is not the same thing
|
|
|
|
%% as net_ticktime; missed keepalive messages will not cause nodes
|
|
|
|
%% to be considered down.
|
|
|
|
%%
|
|
|
|
%% {cluster_keepalive_interval, 10000},
|
2016-01-22 23:47:01 +08:00
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "cluster_keepalive_interval", "rabbit.cluster_keepalive_interval",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, integer}]}.
|
|
|
|
|
2022-04-08 22:52:32 +08:00
|
|
|
%% Queue master locator (classic queues)
|
2016-10-25 22:50:02 +08:00
|
|
|
%%
|
2016-03-17 20:31:11 +08:00
|
|
|
|
|
|
|
{mapping, "queue_master_locator", "rabbit.queue_master_locator",
|
|
|
|
[{datatype, string}]}.
|
|
|
|
|
|
|
|
{translation, "rabbit.queue_master_locator",
|
|
|
|
fun(Conf) ->
|
|
|
|
list_to_binary(cuttlefish:conf_get("queue_master_locator", Conf))
|
|
|
|
end}.
|
|
|
|
|
2022-04-08 22:52:32 +08:00
|
|
|
%% Queue leader locator (quorum queues and streams)
|
|
|
|
%%
|
|
|
|
|
|
|
|
{mapping, "queue_leader_locator", "rabbit.queue_leader_locator",
|
|
|
|
[{datatype, string}]}.
|
|
|
|
|
|
|
|
{translation, "rabbit.queue_leader_locator",
|
|
|
|
fun(Conf) ->
|
|
|
|
list_to_binary(cuttlefish:conf_get("queue_leader_locator", Conf))
|
|
|
|
end}.
|
|
|
|
|
2016-02-01 19:43:05 +08:00
|
|
|
%%
|
|
|
|
%% Statistics Collection
|
|
|
|
%% =====================
|
|
|
|
%%
|
2016-01-22 23:47:01 +08:00
|
|
|
|
2016-02-01 19:43:05 +08:00
|
|
|
%% Set (internal) statistics collection granularity.
|
|
|
|
%%
|
|
|
|
%% {collect_statistics, none},
|
2016-01-22 23:47:01 +08:00
|
|
|
|
2016-03-17 21:48:27 +08:00
|
|
|
{mapping, "collect_statistics", "rabbit.collect_statistics",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, {enum, [none, coarse, fine]}}]}.
|
|
|
|
|
2016-02-01 19:43:05 +08:00
|
|
|
%% Statistics collection interval (in milliseconds). Increasing
|
|
|
|
%% this will reduce the load on management database.
|
|
|
|
%%
|
|
|
|
%% {collect_statistics_interval, 5000},
|
2016-01-22 23:47:01 +08:00
|
|
|
|
|
|
|
{mapping, "collect_statistics_interval", "rabbit.collect_statistics_interval",
|
|
|
|
[{datatype, integer}]}.
|
|
|
|
|
2016-02-01 19:43:05 +08:00
|
|
|
%%
|
|
|
|
%% Misc/Advanced Options
|
|
|
|
%% =====================
|
|
|
|
%%
|
|
|
|
%% NB: Change these only if you understand what you are doing!
|
|
|
|
%%
|
|
|
|
|
2020-06-25 01:36:06 +08:00
|
|
|
%% Explicitly enable/disable hipe compilation.
|
|
|
|
%%
|
|
|
|
%% {hipe_compile, true},
|
|
|
|
%%
|
|
|
|
%% DEPRECATED: this is a no-op and is kept only to allow old configs.
|
|
|
|
|
|
|
|
{mapping, "hipe_compile", "rabbit.hipe_compile",
|
|
|
|
[{datatype, {enum, [true, false]}}]}.
|
|
|
|
|
2016-02-01 19:43:05 +08:00
|
|
|
%% Timeout used when waiting for Mnesia tables in a cluster to
|
|
|
|
%% become available.
|
|
|
|
%%
|
2016-11-04 23:05:51 +08:00
|
|
|
%% {mnesia_table_loading_retry_timeout, 30000},
|
2016-01-22 23:47:01 +08:00
|
|
|
|
2016-11-04 23:05:51 +08:00
|
|
|
{mapping, "mnesia_table_loading_retry_timeout", "rabbit.mnesia_table_loading_retry_timeout",
|
|
|
|
[{datatype, integer}]}.
|
|
|
|
|
|
|
|
%% Retries when waiting for Mnesia tables in the cluster startup. Note that
|
|
|
|
%% this setting is not applied to Mnesia upgrades or node deletions.
|
|
|
|
%%
|
|
|
|
%% {mnesia_table_loading_retry_limit, 10},
|
|
|
|
|
|
|
|
{mapping, "mnesia_table_loading_retry_limit", "rabbit.mnesia_table_loading_retry_limit",
|
2016-01-22 23:47:01 +08:00
|
|
|
[{datatype, integer}]}.
|
|
|
|
|
2020-06-04 01:36:17 +08:00
|
|
|
{mapping, "message_store_shutdown_timeout", "rabbit.msg_store_shutdown_timeout",
|
2020-06-04 01:27:05 +08:00
|
|
|
[
|
|
|
|
{datatype, integer},
|
|
|
|
{validators, ["non_zero_positive_integer"]}
|
|
|
|
]}.
|
|
|
|
|
2016-02-01 19:43:05 +08:00
|
|
|
%% Size in bytes below which to embed messages in the queue index. See
|
2019-03-20 16:21:37 +08:00
|
|
|
%% https://www.rabbitmq.com/persistence-conf.html
|
2016-02-01 19:43:05 +08:00
|
|
|
%%
|
|
|
|
%% {queue_index_embed_msgs_below, 4096}
|
2016-01-22 23:47:01 +08:00
|
|
|
|
|
|
|
{mapping, "queue_index_embed_msgs_below", "rabbit.queue_index_embed_msgs_below",
|
|
|
|
[{datatype, bytesize}]}.
|
|
|
|
|
2016-11-29 17:25:32 +08:00
|
|
|
%% Whether or not to enable background GC.
|
|
|
|
%%
|
|
|
|
%% {background_gc_enabled, true}
|
|
|
|
|
|
|
|
{mapping, "background_gc_enabled", "rabbit.background_gc_enabled",
|
|
|
|
[{datatype, {enum, [true, false]}}]}.
|
|
|
|
|
|
|
|
%% Interval (in milliseconds) at which we run background GC.
|
|
|
|
%%
|
|
|
|
%% {background_gc_target_interval, 60000}
|
|
|
|
|
|
|
|
{mapping, "background_gc_target_interval", "rabbit.background_gc_target_interval",
|
|
|
|
[{datatype, integer}]}.
|
|
|
|
|
2017-02-13 18:52:38 +08:00
|
|
|
%% Whether or not to enable proxy protocol support.
|
|
|
|
%%
|
|
|
|
%% {proxy_protocol, false}
|
|
|
|
|
|
|
|
{mapping, "proxy_protocol", "rabbit.proxy_protocol",
|
|
|
|
[{datatype, {enum, [true, false]}}]}.
|
|
|
|
|
2017-07-06 20:20:18 +08:00
|
|
|
%% Whether to stop the rabbit application if a vhost has
|
|
|
|
%% to terminate for any reason.
|
2017-05-09 17:27:00 +08:00
|
|
|
|
|
|
|
{mapping, "vhost_restart_strategy", "rabbit.vhost_restart_strategy",
|
2017-07-01 11:39:03 +08:00
|
|
|
[{datatype, {enum, [stop_node, continue, transient, persistent]}}]}.
|
2017-05-09 17:27:00 +08:00
|
|
|
|
2019-11-15 00:06:57 +08:00
|
|
|
%% Approximate maximum time a consumer can spend processing a message before
|
2021-05-12 01:25:29 +08:00
|
|
|
%% the channel is terminated, in milliseconds.
|
2019-11-15 01:01:12 +08:00
|
|
|
%%
|
2021-05-12 01:25:29 +08:00
|
|
|
%% {consumer_timeout, 1800000},
|
2019-11-15 00:06:57 +08:00
|
|
|
|
|
|
|
{mapping, "consumer_timeout", "rabbit.consumer_timeout", [
|
2019-11-15 01:07:15 +08:00
|
|
|
{datatype, integer},
|
|
|
|
{validators, ["non_zero_positive_integer"]}
|
2019-11-15 00:06:57 +08:00
|
|
|
]}.
|
|
|
|
|
Add ability to customize product name, version & banner
To override the product name (defaulting to "RabbitMQ"):
* set the `$RABBITMQ_PRODUCT_NAME` environment variable, or
* set the `rabbit` application `product_name` variable.
To override the product version:
* set the `$RABBITMQ_PRODUCT_VERSION` environment variable, or
* set the `rabbit` application `product_version` variable.
To add content to the banner (both the copy logged and the one printed
to stdout), indicate the filename which contains it, à la `/etc/motd`
using:
* the `$RABBITMQ_MOTD_FILE` environment variable, or
* the `rabbit` application `motd_file` variable.
The default motd file is `/etc/rabbitmq/motd` on Unix and
`%APPDATA%\RabbitMQ\motd.txt` on Windows.
Here is an example of the printed banner with name, version & motd
configured:
## ## WeatherMQ 1.2.3
## ##
########## Copyright (c) 2007-2020 Pivotal Software, Inc.
###### ##
########## Licensed under the MPL 1.1. Website: https://rabbitmq.com
This is an example of a RabbitMQ message of the day.
The message is written in Paris, France. \ /
It is partly cloudy outside, with a _ /"".-.
temperature of 12°C. Wind is around \_( ).
30-40 km/h, from south-west. /(___(__)
Doc guides: https://rabbitmq.com/documentation.html
Support: https://rabbitmq.com/contact.html
Tutorials: https://rabbitmq.com/getstarted.html
Monitoring: https://rabbitmq.com/monitoring.html
Logs: /tmp/rabbitmq-test-instances/rabbit/log/rabbit@cassini.log
/tmp/rabbitmq-test-instances/rabbit/log/rabbit@cassini_upgrade.log
Config file(s): /tmp/rabbitmq-test-instances/test.config
Starting broker... completed with 0 plugins.
New APIS are available to query those product informations and use them
in e.g. plugins such as the management API/UI:
* rabbit:product_info/0
* rabbit:product_name/0
* rabbit:product_version/0
* rabbit:motd_file/0
* rabbit:motd/0
[#170054940]
2020-01-13 18:24:01 +08:00
|
|
|
%% Product name & version overrides.
|
|
|
|
|
|
|
|
{mapping, "product.name", "rabbit.product_name", [
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
{mapping, "product.version", "rabbit.product_version", [
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
%% Message of the day file.
|
|
|
|
%% The content of that file is added to the banners, both logged and
|
|
|
|
%% printed.
|
|
|
|
|
|
|
|
{mapping, "motd_file", "rabbit.motd_file", [
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
|
2016-02-01 19:43:05 +08:00
|
|
|
% ==========================
|
Switch from Lager to the new Erlang Logger API for logging
The configuration remains the same for the end-user. The only exception
is the log root directory: it is now set through the `log_root`
application env. variable in `rabbit`. People using the Cuttlefish-based
configuration file are not affected by this exception.
The main change is how the logging facility is configured. It now
happens in `rabbit_prelaunch_logging`. The `rabbit_lager` module is
removed.
The supported outputs remain the same: the console, text files, the
`amq.rabbitmq.log` exchange and syslog.
The message text format slightly changed: the timestamp is more precise
(now to the microsecond) and the level can be abbreviated to always be
4-character long to align all messages and improve readability. Here is
an example:
2021-03-03 10:22:30.377392+01:00 [dbug] <0.229.0> == Prelaunch DONE ==
2021-03-03 10:22:30.377860+01:00 [info] <0.229.0>
2021-03-03 10:22:30.377860+01:00 [info] <0.229.0> Starting RabbitMQ 3.8.10+115.g071f3fb on Erlang 23.2.5
2021-03-03 10:22:30.377860+01:00 [info] <0.229.0> Licensed under the MPL 2.0. Website: https://rabbitmq.com
The example above also shows that multiline messages are supported and
each line is prepended with the same prefix (the timestamp, the level
and the Erlang process PID).
JSON is also supported as a message format and now for any outputs.
Indeed, it is possible to use it with e.g. syslog or the exchange. Here
is an example of a JSON-formatted message sent to syslog:
Mar 3 11:23:06 localhost rabbitmq-server[27908] <0.229.0> - {"time":"2021-03-03T11:23:06.998466+01:00","level":"notice","msg":"Logging: configured log handlers are now ACTIVE","meta":{"domain":"rabbitmq.prelaunch","file":"src/rabbit_prelaunch_logging.erl","gl":"<0.228.0>","line":311,"mfa":["rabbit_prelaunch_logging","configure_logger",1],"pid":"<0.229.0>"}}
For quick testing, the values accepted by the `$RABBITMQ_LOGS`
environment variables were extended:
* `-` still means stdout
* `-stderr` means stderr
* `syslog:` means syslog on localhost
* `exchange:` means logging to `amq.rabbitmq.log`
`$RABBITMQ_LOG` was also extended. It now accepts a `+json` modifier (in
addition to the existing `+color` one). With that modifier, messages are
formatted as JSON intead of plain text.
The `rabbitmqctl rotate_logs` command is deprecated. The reason is
Logger does not expose a function to force log rotation. However, it
will detect when a file was rotated by an external tool.
From a developer point of view, the old `rabbit_log*` API remains
supported, though it is now deprecated. It is implemented as regular
modules: there is no `parse_transform` involved anymore.
In the code, it is recommended to use the new Logger macros. For
instance, `?LOG_INFO(Format, Args)`. If possible, messages should be
augmented with some metadata. For instance (note the map after the
message):
?LOG_NOTICE("Logging: switching to configured handler(s); following "
"messages may not be visible in this log output",
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
Domains in Erlang Logger parlance are the way to categorize messages.
Some predefined domains, matching previous categories, are currently
defined in `rabbit_common/include/logging.hrl` or headers in the
relevant plugins for plugin-specific categories.
At this point, very few messages have been converted from the old
`rabbit_log*` API to the new macros. It can be done gradually when
working on a particular module or logging.
The Erlang builtin console/file handler, `logger_std_h`, has been forked
because it lacks date-based file rotation. The configuration of
date-based rotation is identical to Lager. Once the dust has settled for
this feature, the goal is to submit it upstream for inclusion in Erlang.
The forked module is calld `rabbit_logger_std_h` and is based
`logger_std_h` in Erlang 23.0.
2021-01-13 00:55:27 +08:00
|
|
|
% Logging section
|
2016-02-01 19:43:05 +08:00
|
|
|
% ==========================
|
2016-01-22 23:47:01 +08:00
|
|
|
|
Switch from Lager to the new Erlang Logger API for logging
The configuration remains the same for the end-user. The only exception
is the log root directory: it is now set through the `log_root`
application env. variable in `rabbit`. People using the Cuttlefish-based
configuration file are not affected by this exception.
The main change is how the logging facility is configured. It now
happens in `rabbit_prelaunch_logging`. The `rabbit_lager` module is
removed.
The supported outputs remain the same: the console, text files, the
`amq.rabbitmq.log` exchange and syslog.
The message text format slightly changed: the timestamp is more precise
(now to the microsecond) and the level can be abbreviated to always be
4-character long to align all messages and improve readability. Here is
an example:
2021-03-03 10:22:30.377392+01:00 [dbug] <0.229.0> == Prelaunch DONE ==
2021-03-03 10:22:30.377860+01:00 [info] <0.229.0>
2021-03-03 10:22:30.377860+01:00 [info] <0.229.0> Starting RabbitMQ 3.8.10+115.g071f3fb on Erlang 23.2.5
2021-03-03 10:22:30.377860+01:00 [info] <0.229.0> Licensed under the MPL 2.0. Website: https://rabbitmq.com
The example above also shows that multiline messages are supported and
each line is prepended with the same prefix (the timestamp, the level
and the Erlang process PID).
JSON is also supported as a message format and now for any outputs.
Indeed, it is possible to use it with e.g. syslog or the exchange. Here
is an example of a JSON-formatted message sent to syslog:
Mar 3 11:23:06 localhost rabbitmq-server[27908] <0.229.0> - {"time":"2021-03-03T11:23:06.998466+01:00","level":"notice","msg":"Logging: configured log handlers are now ACTIVE","meta":{"domain":"rabbitmq.prelaunch","file":"src/rabbit_prelaunch_logging.erl","gl":"<0.228.0>","line":311,"mfa":["rabbit_prelaunch_logging","configure_logger",1],"pid":"<0.229.0>"}}
For quick testing, the values accepted by the `$RABBITMQ_LOGS`
environment variables were extended:
* `-` still means stdout
* `-stderr` means stderr
* `syslog:` means syslog on localhost
* `exchange:` means logging to `amq.rabbitmq.log`
`$RABBITMQ_LOG` was also extended. It now accepts a `+json` modifier (in
addition to the existing `+color` one). With that modifier, messages are
formatted as JSON intead of plain text.
The `rabbitmqctl rotate_logs` command is deprecated. The reason is
Logger does not expose a function to force log rotation. However, it
will detect when a file was rotated by an external tool.
From a developer point of view, the old `rabbit_log*` API remains
supported, though it is now deprecated. It is implemented as regular
modules: there is no `parse_transform` involved anymore.
In the code, it is recommended to use the new Logger macros. For
instance, `?LOG_INFO(Format, Args)`. If possible, messages should be
augmented with some metadata. For instance (note the map after the
message):
?LOG_NOTICE("Logging: switching to configured handler(s); following "
"messages may not be visible in this log output",
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
Domains in Erlang Logger parlance are the way to categorize messages.
Some predefined domains, matching previous categories, are currently
defined in `rabbit_common/include/logging.hrl` or headers in the
relevant plugins for plugin-specific categories.
At this point, very few messages have been converted from the old
`rabbit_log*` API to the new macros. It can be done gradually when
working on a particular module or logging.
The Erlang builtin console/file handler, `logger_std_h`, has been forked
because it lacks date-based file rotation. The configuration of
date-based rotation is identical to Lager. Once the dust has settled for
this feature, the goal is to submit it upstream for inclusion in Erlang.
The forked module is calld `rabbit_logger_std_h` and is based
`logger_std_h` in Erlang 23.0.
2021-01-13 00:55:27 +08:00
|
|
|
{mapping, "log.dir", "rabbit.log_root", [
|
2016-01-23 00:08:48 +08:00
|
|
|
{datatype, string},
|
|
|
|
{validators, ["dir_writable"]}]}.
|
|
|
|
|
2017-08-18 18:14:56 +08:00
|
|
|
{mapping, "log.console", "rabbit.log.console.enabled", [
|
2016-02-01 22:27:56 +08:00
|
|
|
{datatype, {enum, [true, false]}}
|
2016-01-22 23:47:01 +08:00
|
|
|
]}.
|
2017-08-18 18:14:56 +08:00
|
|
|
{mapping, "log.console.level", "rabbit.log.console.level", [
|
|
|
|
{datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
|
|
|
|
]}.
|
Logging: Add configuration variables to set various formats
In addition to the existing configuration variables to configure
logging, the following variables were added to extend the settings.
log.*.formatter = plaintext | json
Selects between the plain text (default) and JSON formatters.
log.*.formatter.time_format = rfc3339_space | rfc3339_T | epoch_usecs | epoch_secs | lager_default
Configures how the timestamp should be formatted. It has several
values to get RFC3339 date & time, Epoch-based integers and Lager
default format.
log.*.formatter.level_format = lc | uc | lc3 | uc3 | lc4 | uc4
Configures how to format the level. Things like uppercase vs.
lowercase, full vs. truncated.
Examples:
lc: debug
uc: DEBUG
lc3: dbg
uc3: DBG
lw4: dbug
uc4: DBUG
log.*.formatter.single_line = on | off
Indicates if multi-line messages should be reformatted as a
single-line message. A multi-line message is converted to a
single-line message by joining all lines and separating them
with ", ".
log.*.formatter.plaintext.format
Set to a pattern to indicate the format of the entire message. The
format pattern is a string with $-based variables. Each variable
corresponds to a field in the log event. Here is a non-exhaustive list
of common fields:
time
level
msg
pid
file
line
Example:
$time [$level] $pid $msg
log.*.formatter.json.field_map
Indicates if fields should be renamed or removed, and the ordering
which they should appear in the final JSON object. The order is set by
the order of fields in that coniguration variable.
Example:
time:ts level msg *:-
In this example, `time` is renamed to `ts`. `*:-` tells to remove all
fields not mentionned in the list. In the end the JSON object will
contain the fields in the following order: ts, level, msg.
log.*.formatter.json.verbosity_map
Indicates if a verbosity field should be added and how it should be
derived from the level. If the verbosity map is not set, no verbosity
field is added to the JSON object.
Example:
debug:2 info:1 notice:1 *:0
In this example, debug verbosity is 2, info and notice verbosity is 1,
other levels have a verbosity of 0.
All of them work with the console, exchange, file and syslog outputs.
The console output has specific variables too:
log.console.stdio = stdout | stderr
Indicates if stdout or stderr should be used. The default is stdout.
log.console.use_colors = on | off
Indicates if colors should be used in log messages. The default
depends on the environment.
log.console.color_esc_seqs.*
Indicates how each level is mapped to a color. The value can be any
string but the idea is to use an ANSI escape sequence.
Example:
log.console.color_esc_seqs.error = \033[1;31m
V2: A custom time format pattern was introduced, first using variables,
then a reference date & time (e.g. "Mon 2 Jan 2006"), thanks to
@ansd. However, we decided to remove it for now until we have a
better implementation of the reference date & time parser.
V3: The testsuite was extended to cover new settings as well as the
syslog output. To test it, a fake syslogd server was added (Erlang
process, part of the testsuite).
V4: The dependency to cuttlefish is moved to rabbitmq_prelaunch which
actually uses the library. The version is updated to 3.0.1 because
we need Kyorai/cuttlefish#25.
2021-03-23 23:56:20 +08:00
|
|
|
{mapping, "log.console.stdio", "rabbit.log.console.stdio", [
|
|
|
|
{default, stdout},
|
|
|
|
{datatype, {enum, [stdout, stderr]}}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.console.use_colors", "rabbit.log.console.formatter", [
|
|
|
|
{default, on},
|
|
|
|
{datatype, flag}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.console.color_esc_seqs.debug", "rabbit.log.console.formatter", [
|
|
|
|
{default, "\033[38;5;246m"},
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.console.color_esc_seqs.info", "rabbit.log.console.formatter", [
|
|
|
|
{default, ""},
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.console.color_esc_seqs.notice", "rabbit.log.console.formatter", [
|
|
|
|
{default, "\033[38;5;87m"},
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.console.color_esc_seqs.warning", "rabbit.log.console.formatter", [
|
|
|
|
{default, "\033[38;5;214m"},
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.console.color_esc_seqs.error", "rabbit.log.console.formatter", [
|
|
|
|
{default, "\033[38;5;160m"},
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.console.color_esc_seqs.critical", "rabbit.log.console.formatter", [
|
|
|
|
{default, "\033[1;37m\033[48;5;20m"},
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.console.color_esc_seqs.alert", "rabbit.log.console.formatter", [
|
|
|
|
{default, "\033[1;37m\033[48;5;93m"},
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.console.color_esc_seqs.emergency", "rabbit.log.console.formatter", [
|
|
|
|
{default, "\033[1;37m\033[48;5;196m"},
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.console.formatter", "rabbit.log.console.formatter", [
|
|
|
|
{default, plaintext},
|
|
|
|
{datatype, {enum, [plaintext, json]}}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.console.formatter.time_format", "rabbit.log.console.formatter", [
|
|
|
|
{default, rfc3339_space},
|
|
|
|
{datatype, {enum, [rfc3339_space, rfc3339_T, epoch_usecs, epoch_secs, lager_default]}}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.console.formatter.level_format", "rabbit.log.console.formatter", [
|
|
|
|
{default, lc},
|
|
|
|
{datatype, {enum, [lc, uc, lc3, uc3, lc4, uc4]}}
|
|
|
|
]}.
|
2021-04-12 21:14:09 +08:00
|
|
|
{mapping, "log.console.formatter.single_line", "rabbit.log.console.formatter", [
|
Logging: Add configuration variables to set various formats
In addition to the existing configuration variables to configure
logging, the following variables were added to extend the settings.
log.*.formatter = plaintext | json
Selects between the plain text (default) and JSON formatters.
log.*.formatter.time_format = rfc3339_space | rfc3339_T | epoch_usecs | epoch_secs | lager_default
Configures how the timestamp should be formatted. It has several
values to get RFC3339 date & time, Epoch-based integers and Lager
default format.
log.*.formatter.level_format = lc | uc | lc3 | uc3 | lc4 | uc4
Configures how to format the level. Things like uppercase vs.
lowercase, full vs. truncated.
Examples:
lc: debug
uc: DEBUG
lc3: dbg
uc3: DBG
lw4: dbug
uc4: DBUG
log.*.formatter.single_line = on | off
Indicates if multi-line messages should be reformatted as a
single-line message. A multi-line message is converted to a
single-line message by joining all lines and separating them
with ", ".
log.*.formatter.plaintext.format
Set to a pattern to indicate the format of the entire message. The
format pattern is a string with $-based variables. Each variable
corresponds to a field in the log event. Here is a non-exhaustive list
of common fields:
time
level
msg
pid
file
line
Example:
$time [$level] $pid $msg
log.*.formatter.json.field_map
Indicates if fields should be renamed or removed, and the ordering
which they should appear in the final JSON object. The order is set by
the order of fields in that coniguration variable.
Example:
time:ts level msg *:-
In this example, `time` is renamed to `ts`. `*:-` tells to remove all
fields not mentionned in the list. In the end the JSON object will
contain the fields in the following order: ts, level, msg.
log.*.formatter.json.verbosity_map
Indicates if a verbosity field should be added and how it should be
derived from the level. If the verbosity map is not set, no verbosity
field is added to the JSON object.
Example:
debug:2 info:1 notice:1 *:0
In this example, debug verbosity is 2, info and notice verbosity is 1,
other levels have a verbosity of 0.
All of them work with the console, exchange, file and syslog outputs.
The console output has specific variables too:
log.console.stdio = stdout | stderr
Indicates if stdout or stderr should be used. The default is stdout.
log.console.use_colors = on | off
Indicates if colors should be used in log messages. The default
depends on the environment.
log.console.color_esc_seqs.*
Indicates how each level is mapped to a color. The value can be any
string but the idea is to use an ANSI escape sequence.
Example:
log.console.color_esc_seqs.error = \033[1;31m
V2: A custom time format pattern was introduced, first using variables,
then a reference date & time (e.g. "Mon 2 Jan 2006"), thanks to
@ansd. However, we decided to remove it for now until we have a
better implementation of the reference date & time parser.
V3: The testsuite was extended to cover new settings as well as the
syslog output. To test it, a fake syslogd server was added (Erlang
process, part of the testsuite).
V4: The dependency to cuttlefish is moved to rabbitmq_prelaunch which
actually uses the library. The version is updated to 3.0.1 because
we need Kyorai/cuttlefish#25.
2021-03-23 23:56:20 +08:00
|
|
|
{default, off},
|
|
|
|
{datatype, flag}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.console.formatter.plaintext.format", "rabbit.log.console.formatter", [
|
|
|
|
{default, "$time [$level] $pid $msg"},
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.console.formatter.json.field_map", "rabbit.log.console.formatter", [
|
|
|
|
{default, "time level msg gl:- logger_formatter:- report_cb:- error_logger:-"},
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.console.formatter.json.verbosity_map", "rabbit.log.console.formatter", [
|
|
|
|
{default, ""},
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
{translation, "rabbit.log.console.formatter",
|
|
|
|
fun(Conf) ->
|
|
|
|
rabbit_prelaunch_early_logging:translate_formatter_conf("log.console.formatter", Conf)
|
|
|
|
end}.
|
2016-01-22 23:47:01 +08:00
|
|
|
|
2017-12-30 00:23:01 +08:00
|
|
|
{mapping, "log.exchange", "rabbit.log.exchange.enabled", [
|
2017-12-29 01:42:17 +08:00
|
|
|
{datatype, {enum, [true, false]}}
|
|
|
|
]}.
|
2017-12-30 00:23:01 +08:00
|
|
|
{mapping, "log.exchange.level", "rabbit.log.exchange.level", [
|
2017-12-29 01:42:17 +08:00
|
|
|
{datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
|
|
|
|
]}.
|
Logging: Add configuration variables to set various formats
In addition to the existing configuration variables to configure
logging, the following variables were added to extend the settings.
log.*.formatter = plaintext | json
Selects between the plain text (default) and JSON formatters.
log.*.formatter.time_format = rfc3339_space | rfc3339_T | epoch_usecs | epoch_secs | lager_default
Configures how the timestamp should be formatted. It has several
values to get RFC3339 date & time, Epoch-based integers and Lager
default format.
log.*.formatter.level_format = lc | uc | lc3 | uc3 | lc4 | uc4
Configures how to format the level. Things like uppercase vs.
lowercase, full vs. truncated.
Examples:
lc: debug
uc: DEBUG
lc3: dbg
uc3: DBG
lw4: dbug
uc4: DBUG
log.*.formatter.single_line = on | off
Indicates if multi-line messages should be reformatted as a
single-line message. A multi-line message is converted to a
single-line message by joining all lines and separating them
with ", ".
log.*.formatter.plaintext.format
Set to a pattern to indicate the format of the entire message. The
format pattern is a string with $-based variables. Each variable
corresponds to a field in the log event. Here is a non-exhaustive list
of common fields:
time
level
msg
pid
file
line
Example:
$time [$level] $pid $msg
log.*.formatter.json.field_map
Indicates if fields should be renamed or removed, and the ordering
which they should appear in the final JSON object. The order is set by
the order of fields in that coniguration variable.
Example:
time:ts level msg *:-
In this example, `time` is renamed to `ts`. `*:-` tells to remove all
fields not mentionned in the list. In the end the JSON object will
contain the fields in the following order: ts, level, msg.
log.*.formatter.json.verbosity_map
Indicates if a verbosity field should be added and how it should be
derived from the level. If the verbosity map is not set, no verbosity
field is added to the JSON object.
Example:
debug:2 info:1 notice:1 *:0
In this example, debug verbosity is 2, info and notice verbosity is 1,
other levels have a verbosity of 0.
All of them work with the console, exchange, file and syslog outputs.
The console output has specific variables too:
log.console.stdio = stdout | stderr
Indicates if stdout or stderr should be used. The default is stdout.
log.console.use_colors = on | off
Indicates if colors should be used in log messages. The default
depends on the environment.
log.console.color_esc_seqs.*
Indicates how each level is mapped to a color. The value can be any
string but the idea is to use an ANSI escape sequence.
Example:
log.console.color_esc_seqs.error = \033[1;31m
V2: A custom time format pattern was introduced, first using variables,
then a reference date & time (e.g. "Mon 2 Jan 2006"), thanks to
@ansd. However, we decided to remove it for now until we have a
better implementation of the reference date & time parser.
V3: The testsuite was extended to cover new settings as well as the
syslog output. To test it, a fake syslogd server was added (Erlang
process, part of the testsuite).
V4: The dependency to cuttlefish is moved to rabbitmq_prelaunch which
actually uses the library. The version is updated to 3.0.1 because
we need Kyorai/cuttlefish#25.
2021-03-23 23:56:20 +08:00
|
|
|
{mapping, "log.exchange.formatter", "rabbit.log.exchange.formatter", [
|
|
|
|
{default, plaintext},
|
|
|
|
{datatype, {enum, [plaintext, json]}}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.exchange.formatter.time_format", "rabbit.log.console.formatter", [
|
|
|
|
{default, rfc3339_space},
|
|
|
|
{datatype, [{enum, [rfc3339_space, rfc3339_T, epoch_usecs, epoch_secs, lager_default]}, string]}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.exchange.formatter.level_format", "rabbit.log.exchange.formatter", [
|
|
|
|
{default, lc},
|
|
|
|
{datatype, {enum, [lc, uc, lc3, uc3, lc4, uc4]}}
|
|
|
|
]}.
|
2021-04-12 21:14:09 +08:00
|
|
|
{mapping, "log.exchange.formatter.single_line", "rabbit.log.exchange.formatter", [
|
Logging: Add configuration variables to set various formats
In addition to the existing configuration variables to configure
logging, the following variables were added to extend the settings.
log.*.formatter = plaintext | json
Selects between the plain text (default) and JSON formatters.
log.*.formatter.time_format = rfc3339_space | rfc3339_T | epoch_usecs | epoch_secs | lager_default
Configures how the timestamp should be formatted. It has several
values to get RFC3339 date & time, Epoch-based integers and Lager
default format.
log.*.formatter.level_format = lc | uc | lc3 | uc3 | lc4 | uc4
Configures how to format the level. Things like uppercase vs.
lowercase, full vs. truncated.
Examples:
lc: debug
uc: DEBUG
lc3: dbg
uc3: DBG
lw4: dbug
uc4: DBUG
log.*.formatter.single_line = on | off
Indicates if multi-line messages should be reformatted as a
single-line message. A multi-line message is converted to a
single-line message by joining all lines and separating them
with ", ".
log.*.formatter.plaintext.format
Set to a pattern to indicate the format of the entire message. The
format pattern is a string with $-based variables. Each variable
corresponds to a field in the log event. Here is a non-exhaustive list
of common fields:
time
level
msg
pid
file
line
Example:
$time [$level] $pid $msg
log.*.formatter.json.field_map
Indicates if fields should be renamed or removed, and the ordering
which they should appear in the final JSON object. The order is set by
the order of fields in that coniguration variable.
Example:
time:ts level msg *:-
In this example, `time` is renamed to `ts`. `*:-` tells to remove all
fields not mentionned in the list. In the end the JSON object will
contain the fields in the following order: ts, level, msg.
log.*.formatter.json.verbosity_map
Indicates if a verbosity field should be added and how it should be
derived from the level. If the verbosity map is not set, no verbosity
field is added to the JSON object.
Example:
debug:2 info:1 notice:1 *:0
In this example, debug verbosity is 2, info and notice verbosity is 1,
other levels have a verbosity of 0.
All of them work with the console, exchange, file and syslog outputs.
The console output has specific variables too:
log.console.stdio = stdout | stderr
Indicates if stdout or stderr should be used. The default is stdout.
log.console.use_colors = on | off
Indicates if colors should be used in log messages. The default
depends on the environment.
log.console.color_esc_seqs.*
Indicates how each level is mapped to a color. The value can be any
string but the idea is to use an ANSI escape sequence.
Example:
log.console.color_esc_seqs.error = \033[1;31m
V2: A custom time format pattern was introduced, first using variables,
then a reference date & time (e.g. "Mon 2 Jan 2006"), thanks to
@ansd. However, we decided to remove it for now until we have a
better implementation of the reference date & time parser.
V3: The testsuite was extended to cover new settings as well as the
syslog output. To test it, a fake syslogd server was added (Erlang
process, part of the testsuite).
V4: The dependency to cuttlefish is moved to rabbitmq_prelaunch which
actually uses the library. The version is updated to 3.0.1 because
we need Kyorai/cuttlefish#25.
2021-03-23 23:56:20 +08:00
|
|
|
{default, off},
|
|
|
|
{datatype, flag}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.exchange.formatter.plaintext.format", "rabbit.log.exchange.formatter", [
|
|
|
|
{default, "$time [$level] $pid $msg"},
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.exchange.formatter.json.field_map", "rabbit.log.exchange.formatter", [
|
|
|
|
{default, "time level msg gl:- logger_formatter:- report_cb:- error_logger:-"},
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.exchange.formatter.json.verbosity_map", "rabbit.log.exchange.formatter", [
|
|
|
|
{default, ""},
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
{translation, "rabbit.log.exchange.formatter",
|
|
|
|
fun(Conf) ->
|
|
|
|
rabbit_prelaunch_early_logging:translate_formatter_conf("log.exchange.formatter", Conf)
|
|
|
|
end}.
|
2017-12-29 01:42:17 +08:00
|
|
|
|
2021-03-26 00:08:09 +08:00
|
|
|
{mapping, "log.journald", "rabbit.log.journald.enabled", [
|
|
|
|
{datatype, {enum, [true, false]}}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.journald.level", "rabbit.log.journald.level", [
|
|
|
|
{datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.journald.fields", "rabbit.log.journald.fields", [
|
|
|
|
{default, "SYSLOG_IDENTIFIER=\"rabbitmq-server\" syslog_timestamp syslog_pid priority ERL_PID=pid CODE_FILE=file CODE_LINE=line CODE_MFA=mfa"},
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
{translation, "rabbit.log.journald.fields",
|
|
|
|
fun(Conf) ->
|
|
|
|
rabbit_prelaunch_early_logging:translate_journald_fields_conf("log.journald.fields", Conf)
|
|
|
|
end}.
|
|
|
|
|
2017-08-18 18:14:56 +08:00
|
|
|
{mapping, "log.syslog", "rabbit.log.syslog.enabled", [
|
2016-02-01 22:27:56 +08:00
|
|
|
{datatype, {enum, [true, false]}}
|
2016-01-22 23:47:01 +08:00
|
|
|
]}.
|
2018-08-03 05:39:47 +08:00
|
|
|
|
2017-08-18 18:14:56 +08:00
|
|
|
{mapping, "log.syslog.level", "rabbit.log.syslog.level", [
|
|
|
|
{datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
|
|
|
|
]}.
|
Logging: Add configuration variables to set various formats
In addition to the existing configuration variables to configure
logging, the following variables were added to extend the settings.
log.*.formatter = plaintext | json
Selects between the plain text (default) and JSON formatters.
log.*.formatter.time_format = rfc3339_space | rfc3339_T | epoch_usecs | epoch_secs | lager_default
Configures how the timestamp should be formatted. It has several
values to get RFC3339 date & time, Epoch-based integers and Lager
default format.
log.*.formatter.level_format = lc | uc | lc3 | uc3 | lc4 | uc4
Configures how to format the level. Things like uppercase vs.
lowercase, full vs. truncated.
Examples:
lc: debug
uc: DEBUG
lc3: dbg
uc3: DBG
lw4: dbug
uc4: DBUG
log.*.formatter.single_line = on | off
Indicates if multi-line messages should be reformatted as a
single-line message. A multi-line message is converted to a
single-line message by joining all lines and separating them
with ", ".
log.*.formatter.plaintext.format
Set to a pattern to indicate the format of the entire message. The
format pattern is a string with $-based variables. Each variable
corresponds to a field in the log event. Here is a non-exhaustive list
of common fields:
time
level
msg
pid
file
line
Example:
$time [$level] $pid $msg
log.*.formatter.json.field_map
Indicates if fields should be renamed or removed, and the ordering
which they should appear in the final JSON object. The order is set by
the order of fields in that coniguration variable.
Example:
time:ts level msg *:-
In this example, `time` is renamed to `ts`. `*:-` tells to remove all
fields not mentionned in the list. In the end the JSON object will
contain the fields in the following order: ts, level, msg.
log.*.formatter.json.verbosity_map
Indicates if a verbosity field should be added and how it should be
derived from the level. If the verbosity map is not set, no verbosity
field is added to the JSON object.
Example:
debug:2 info:1 notice:1 *:0
In this example, debug verbosity is 2, info and notice verbosity is 1,
other levels have a verbosity of 0.
All of them work with the console, exchange, file and syslog outputs.
The console output has specific variables too:
log.console.stdio = stdout | stderr
Indicates if stdout or stderr should be used. The default is stdout.
log.console.use_colors = on | off
Indicates if colors should be used in log messages. The default
depends on the environment.
log.console.color_esc_seqs.*
Indicates how each level is mapped to a color. The value can be any
string but the idea is to use an ANSI escape sequence.
Example:
log.console.color_esc_seqs.error = \033[1;31m
V2: A custom time format pattern was introduced, first using variables,
then a reference date & time (e.g. "Mon 2 Jan 2006"), thanks to
@ansd. However, we decided to remove it for now until we have a
better implementation of the reference date & time parser.
V3: The testsuite was extended to cover new settings as well as the
syslog output. To test it, a fake syslogd server was added (Erlang
process, part of the testsuite).
V4: The dependency to cuttlefish is moved to rabbitmq_prelaunch which
actually uses the library. The version is updated to 3.0.1 because
we need Kyorai/cuttlefish#25.
2021-03-23 23:56:20 +08:00
|
|
|
{mapping, "log.syslog.formatter", "rabbit.log.syslog.formatter", [
|
|
|
|
{default, plaintext},
|
|
|
|
{datatype, {enum, [plaintext, json]}}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.syslog.formatter.time_format", "rabbit.log.console.formatter", [
|
|
|
|
{default, rfc3339_space},
|
|
|
|
{datatype, [{enum, [rfc3339_space, rfc3339_T, epoch_usecs, epoch_secs, lager_default]}, string]}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.syslog.formatter.level_format", "rabbit.log.syslog.formatter", [
|
|
|
|
{default, lc},
|
|
|
|
{datatype, {enum, [lc, uc, lc3, uc3, lc4, uc4]}}
|
|
|
|
]}.
|
2021-04-12 21:14:09 +08:00
|
|
|
{mapping, "log.syslog.formatter.single_line", "rabbit.log.syslog.formatter", [
|
Logging: Add configuration variables to set various formats
In addition to the existing configuration variables to configure
logging, the following variables were added to extend the settings.
log.*.formatter = plaintext | json
Selects between the plain text (default) and JSON formatters.
log.*.formatter.time_format = rfc3339_space | rfc3339_T | epoch_usecs | epoch_secs | lager_default
Configures how the timestamp should be formatted. It has several
values to get RFC3339 date & time, Epoch-based integers and Lager
default format.
log.*.formatter.level_format = lc | uc | lc3 | uc3 | lc4 | uc4
Configures how to format the level. Things like uppercase vs.
lowercase, full vs. truncated.
Examples:
lc: debug
uc: DEBUG
lc3: dbg
uc3: DBG
lw4: dbug
uc4: DBUG
log.*.formatter.single_line = on | off
Indicates if multi-line messages should be reformatted as a
single-line message. A multi-line message is converted to a
single-line message by joining all lines and separating them
with ", ".
log.*.formatter.plaintext.format
Set to a pattern to indicate the format of the entire message. The
format pattern is a string with $-based variables. Each variable
corresponds to a field in the log event. Here is a non-exhaustive list
of common fields:
time
level
msg
pid
file
line
Example:
$time [$level] $pid $msg
log.*.formatter.json.field_map
Indicates if fields should be renamed or removed, and the ordering
which they should appear in the final JSON object. The order is set by
the order of fields in that coniguration variable.
Example:
time:ts level msg *:-
In this example, `time` is renamed to `ts`. `*:-` tells to remove all
fields not mentionned in the list. In the end the JSON object will
contain the fields in the following order: ts, level, msg.
log.*.formatter.json.verbosity_map
Indicates if a verbosity field should be added and how it should be
derived from the level. If the verbosity map is not set, no verbosity
field is added to the JSON object.
Example:
debug:2 info:1 notice:1 *:0
In this example, debug verbosity is 2, info and notice verbosity is 1,
other levels have a verbosity of 0.
All of them work with the console, exchange, file and syslog outputs.
The console output has specific variables too:
log.console.stdio = stdout | stderr
Indicates if stdout or stderr should be used. The default is stdout.
log.console.use_colors = on | off
Indicates if colors should be used in log messages. The default
depends on the environment.
log.console.color_esc_seqs.*
Indicates how each level is mapped to a color. The value can be any
string but the idea is to use an ANSI escape sequence.
Example:
log.console.color_esc_seqs.error = \033[1;31m
V2: A custom time format pattern was introduced, first using variables,
then a reference date & time (e.g. "Mon 2 Jan 2006"), thanks to
@ansd. However, we decided to remove it for now until we have a
better implementation of the reference date & time parser.
V3: The testsuite was extended to cover new settings as well as the
syslog output. To test it, a fake syslogd server was added (Erlang
process, part of the testsuite).
V4: The dependency to cuttlefish is moved to rabbitmq_prelaunch which
actually uses the library. The version is updated to 3.0.1 because
we need Kyorai/cuttlefish#25.
2021-03-23 23:56:20 +08:00
|
|
|
{default, off},
|
|
|
|
{datatype, flag}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.syslog.formatter.plaintext.format", "rabbit.log.syslog.formatter", [
|
|
|
|
{default, "$msg"},
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.syslog.formatter.json.field_map", "rabbit.log.syslog.formatter", [
|
|
|
|
{default, "time level msg gl:- logger_formatter:- report_cb:- error_logger:-"},
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.syslog.formatter.json.verbosity_map", "rabbit.log.syslog.formatter", [
|
|
|
|
{default, ""},
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
{translation, "rabbit.log.syslog.formatter",
|
|
|
|
fun(Conf) ->
|
|
|
|
rabbit_prelaunch_early_logging:translate_formatter_conf("log.syslog.formatter", Conf)
|
|
|
|
end}.
|
2018-05-17 22:35:41 +08:00
|
|
|
|
|
|
|
{mapping, "log.syslog.identity", "syslog.app_name", [
|
2017-08-18 18:14:56 +08:00
|
|
|
{datatype, string}
|
|
|
|
]}.
|
2018-05-17 22:35:41 +08:00
|
|
|
|
|
|
|
{mapping, "log.syslog.facility", "syslog.facility", [
|
|
|
|
{datatype, {enum, [kern, kernel, user, mail, daemon, auth, syslog, lpr,
|
|
|
|
news, uucp, cron, authpriv, ftp, ntp, audit, alert,
|
|
|
|
clock, local0, local1, local2, local3, local4,
|
|
|
|
local5, local6, local7]}}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{mapping, "log.syslog.multiline_mode", "syslog.multiline_mode", [
|
|
|
|
{datatype, {enum, [true, false]}}
|
|
|
|
]}.
|
|
|
|
|
2018-08-03 05:39:47 +08:00
|
|
|
{mapping, "log.syslog.ip", "syslog.dest_host", [
|
|
|
|
{datatype, string},
|
|
|
|
{validators, ["is_ip"]}
|
|
|
|
]}.
|
|
|
|
|
2018-08-03 02:55:20 +08:00
|
|
|
{mapping, "log.syslog.host", "syslog.dest_host", [
|
|
|
|
{datatype, string}
|
2018-05-17 22:35:41 +08:00
|
|
|
]}.
|
|
|
|
|
2018-08-03 05:39:47 +08:00
|
|
|
{translation, "syslog.dest_host",
|
|
|
|
fun(Conf) ->
|
|
|
|
case cuttlefish:conf_get("log.syslog", Conf) of
|
|
|
|
true ->
|
|
|
|
case cuttlefish:conf_get("log.syslog.ip", Conf, undefined) of
|
|
|
|
undefined ->
|
|
|
|
% If log.syslog.ip is not set, then this must be set
|
|
|
|
cuttlefish:conf_get("log.syslog.host", Conf);
|
|
|
|
IpAddr ->
|
|
|
|
IpAddr
|
|
|
|
end;
|
|
|
|
_ ->
|
|
|
|
cuttlefish:invalid("log.syslog must be set to true to set log.syslog.host or log.syslog.ip")
|
|
|
|
end
|
|
|
|
end}.
|
|
|
|
|
2018-05-17 22:35:41 +08:00
|
|
|
{mapping, "log.syslog.port", "syslog.dest_port", [
|
|
|
|
{datatype, integer}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{mapping, "log.syslog.transport", "syslog.protocol", [
|
|
|
|
{datatype, {enum, [udp, tcp, tls, ssl]}}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.syslog.protocol", "syslog.protocol", [
|
|
|
|
{datatype, {enum, [rfc3164, rfc5424]}}
|
2016-01-22 23:47:01 +08:00
|
|
|
]}.
|
2018-05-17 22:35:41 +08:00
|
|
|
{mapping, "log.syslog.ssl_options.verify", "syslog.protocol", [
|
|
|
|
{datatype, {enum, [verify_peer, verify_none]}}]}.
|
|
|
|
|
|
|
|
{mapping, "log.syslog.ssl_options.fail_if_no_peer_cert", "syslog.protocol", [
|
|
|
|
{datatype, {enum, [true, false]}}]}.
|
|
|
|
|
|
|
|
{mapping, "log.syslog.ssl_options.cacertfile", "syslog.protocol",
|
|
|
|
[{datatype, string}, {validators, ["file_accessible"]}]}.
|
|
|
|
|
|
|
|
{mapping, "log.syslog.ssl_options.certfile", "syslog.protocol",
|
|
|
|
[{datatype, string}, {validators, ["file_accessible"]}]}.
|
|
|
|
|
|
|
|
{mapping, "log.syslog.ssl_options.cacerts.$name", "syslog.protocol",
|
|
|
|
[{datatype, string}]}.
|
|
|
|
|
|
|
|
{mapping, "log.syslog.ssl_options.cert", "syslog.protocol",
|
|
|
|
[{datatype, string}]}.
|
|
|
|
|
|
|
|
{mapping, "log.syslog.ssl_options.client_renegotiation", "syslog.protocol",
|
|
|
|
[{datatype, {enum, [true, false]}}]}.
|
|
|
|
|
|
|
|
{mapping, "log.syslog.ssl_options.crl_check", "syslog.protocol",
|
|
|
|
[{datatype, [{enum, [true, false, peer, best_effort]}]}]}.
|
|
|
|
|
|
|
|
{mapping, "log.syslog.ssl_options.depth", "syslog.protocol",
|
|
|
|
[{datatype, integer}, {validators, ["byte"]}]}.
|
|
|
|
|
|
|
|
{mapping, "log.syslog.ssl_options.dh", "syslog.protocol",
|
|
|
|
[{datatype, string}]}.
|
|
|
|
|
|
|
|
{mapping, "log.syslog.ssl_options.dhfile", "syslog.protocol",
|
|
|
|
[{datatype, string}, {validators, ["file_accessible"]}]}.
|
|
|
|
|
|
|
|
{mapping, "log.syslog.ssl_options.honor_cipher_order", "syslog.protocol",
|
|
|
|
[{datatype, {enum, [true, false]}}]}.
|
|
|
|
|
|
|
|
{mapping, "log.syslog.ssl_options.honor_ecc_order", "syslog.protocol",
|
|
|
|
[{datatype, {enum, [true, false]}}]}.
|
|
|
|
|
|
|
|
{mapping, "log.syslog.ssl_options.key.RSAPrivateKey", "syslog.protocol",
|
|
|
|
[{datatype, string}]}.
|
|
|
|
|
|
|
|
{mapping, "log.syslog.ssl_options.key.DSAPrivateKey", "syslog.protocol",
|
|
|
|
[{datatype, string}]}.
|
|
|
|
|
|
|
|
{mapping, "log.syslog.ssl_options.key.PrivateKeyInfo", "syslog.protocol",
|
|
|
|
[{datatype, string}]}.
|
|
|
|
|
|
|
|
{mapping, "log.syslog.ssl_options.keyfile", "syslog.protocol",
|
|
|
|
[{datatype, string}, {validators, ["file_accessible"]}]}.
|
|
|
|
|
|
|
|
{mapping, "log.syslog.ssl_options.log_alert", "syslog.protocol",
|
|
|
|
[{datatype, {enum, [true, false]}}]}.
|
|
|
|
|
|
|
|
{mapping, "log.syslog.ssl_options.password", "syslog.protocol",
|
|
|
|
[{datatype, string}]}.
|
|
|
|
|
|
|
|
{mapping, "log.syslog.ssl_options.psk_identity", "syslog.protocol",
|
|
|
|
[{datatype, string}]}.
|
|
|
|
|
|
|
|
{mapping, "log.syslog.ssl_options.reuse_sessions", "syslog.protocol",
|
|
|
|
[{datatype, {enum, [true, false]}}]}.
|
|
|
|
|
|
|
|
{mapping, "log.syslog.ssl_options.secure_renegotiate", "syslog.protocol",
|
|
|
|
[{datatype, {enum, [true, false]}}]}.
|
|
|
|
|
|
|
|
{mapping, "log.syslog.ssl_options.versions.$version", "syslog.protocol",
|
|
|
|
[{datatype, atom}]}.
|
|
|
|
|
|
|
|
{translation, "syslog.protocol",
|
|
|
|
fun(Conf) ->
|
|
|
|
ParseSslOptions = fun() ->
|
|
|
|
RawSettings = [
|
|
|
|
{verify, cuttlefish:conf_get("log.syslog.ssl_options.verify", Conf, undefined)},
|
|
|
|
{fail_if_no_peer_cert, cuttlefish:conf_get("log.syslog.ssl_options.fail_if_no_peer_cert", Conf, undefined)},
|
|
|
|
{cacertfile, cuttlefish:conf_get("log.syslog.ssl_options.cacertfile", Conf, undefined)},
|
|
|
|
{certfile, cuttlefish:conf_get("log.syslog.ssl_options.certfile", Conf, undefined)},
|
|
|
|
{cert, cuttlefish:conf_get("log.syslog.ssl_options.cert", Conf, undefined)},
|
|
|
|
{client_renegotiation, cuttlefish:conf_get("log.syslog.ssl_options.client_renegotiation", Conf, undefined)},
|
|
|
|
{crl_check, cuttlefish:conf_get("log.syslog.ssl_options.crl_check", Conf, undefined)},
|
|
|
|
{depth, cuttlefish:conf_get("log.syslog.ssl_options.depth", Conf, undefined)},
|
|
|
|
{dh, cuttlefish:conf_get("log.syslog.ssl_options.dh", Conf, undefined)},
|
|
|
|
{dhfile, cuttlefish:conf_get("log.syslog.ssl_options.dhfile", Conf, undefined)},
|
|
|
|
{honor_cipher_order, cuttlefish:conf_get("log.syslog.ssl_options.honor_cipher_order", Conf, undefined)},
|
|
|
|
{honor_ecc_order, cuttlefish:conf_get("log.syslog.ssl_options.honor_ecc_order", Conf, undefined)},
|
|
|
|
|
|
|
|
{keyfile, cuttlefish:conf_get("log.syslog.ssl_options.keyfile", Conf, undefined)},
|
|
|
|
{log_alert, cuttlefish:conf_get("log.syslog.ssl_options.log_alert", Conf, undefined)},
|
|
|
|
{password, cuttlefish:conf_get("log.syslog.ssl_options.password", Conf, undefined)},
|
|
|
|
{psk_identity, cuttlefish:conf_get("log.syslog.ssl_options.psk_identity", Conf, undefined)},
|
|
|
|
{reuse_sessions, cuttlefish:conf_get("log.syslog.ssl_options.reuse_sessions", Conf, undefined)},
|
|
|
|
{secure_renegotiate, cuttlefish:conf_get("log.syslog.ssl_options.secure_renegotiate", Conf, undefined)}
|
|
|
|
],
|
|
|
|
DefinedSettings = [{K, V} || {K, V} <- RawSettings, V =/= undefined],
|
|
|
|
|
|
|
|
lists:map(
|
|
|
|
fun({K, Val}) when K == dh; K == cert -> {K, list_to_binary(Val)};
|
|
|
|
({K, Val}) -> {K, Val}
|
|
|
|
end,
|
|
|
|
DefinedSettings) ++
|
|
|
|
[ {K, V}
|
|
|
|
|| {K, V} <-
|
|
|
|
[{cacerts, [ list_to_binary(V) || {_, V} <- cuttlefish_variable:filter_by_prefix("log.syslog.ssl_options.cacerts", Conf)]},
|
|
|
|
{versions, [ V || {_, V} <- cuttlefish_variable:filter_by_prefix("log.syslog.ssl_options.versions", Conf) ]},
|
|
|
|
{key, case cuttlefish_variable:filter_by_prefix("log.syslog.ssl_options.key", Conf) of
|
|
|
|
[{[_,_,Key], Val}|_] -> {list_to_atom(Key), list_to_binary(Val)};
|
|
|
|
_ -> undefined
|
|
|
|
end}],
|
|
|
|
V =/= undefined,
|
|
|
|
V =/= []]
|
|
|
|
end,
|
|
|
|
|
|
|
|
Proto = cuttlefish:conf_get("log.syslog.protocol", Conf, undefined),
|
|
|
|
Transport = cuttlefish:conf_get("log.syslog.transport", Conf, udp),
|
|
|
|
case Transport of
|
|
|
|
TLS when TLS == tls; TLS == ssl ->
|
|
|
|
case Proto of
|
|
|
|
rfc3164 ->
|
|
|
|
cuttlefish:invalid("Syslog protocol rfc3164 is not compatible with TLS");
|
|
|
|
_ ->
|
|
|
|
{rfc5424, tls, ParseSslOptions()}
|
|
|
|
end;
|
|
|
|
_ when Transport == udp; Transport == tcp ->
|
|
|
|
case Proto of
|
|
|
|
undefined -> {rfc3164, Transport};
|
|
|
|
_ -> {Proto, Transport}
|
|
|
|
end;
|
|
|
|
_ -> cuttlefish:invalid("Invalid syslog transport ~p~n", [Transport])
|
|
|
|
end
|
|
|
|
end}.
|
2016-01-22 23:47:01 +08:00
|
|
|
|
2017-08-18 18:14:56 +08:00
|
|
|
{mapping, "log.file", "rabbit.log.file.file", [
|
|
|
|
{datatype, [{enum, [false]}, string]}
|
2016-01-22 23:47:01 +08:00
|
|
|
]}.
|
2017-08-18 18:14:56 +08:00
|
|
|
{mapping, "log.file.level", "rabbit.log.file.level", [
|
|
|
|
{datatype,
|
|
|
|
{enum, ['=debug', debug,
|
|
|
|
info, '!=info',
|
|
|
|
notice, '<=notice',
|
|
|
|
'<warning', warning,
|
|
|
|
error,
|
|
|
|
critical,
|
|
|
|
alert,
|
|
|
|
emergency,
|
|
|
|
none]}}
|
2016-01-22 23:47:01 +08:00
|
|
|
]}.
|
2017-08-18 18:14:56 +08:00
|
|
|
{mapping, "log.file.rotation.date", "rabbit.log.file.date", [
|
2016-02-01 22:27:56 +08:00
|
|
|
{datatype, string}
|
2016-01-22 23:47:01 +08:00
|
|
|
]}.
|
2021-12-03 06:24:29 +08:00
|
|
|
{mapping, "log.file.rotation.compress", "rabbit.log.file.compress", [
|
|
|
|
{default, false},
|
|
|
|
{datatype, {enum, [true, false]}}
|
|
|
|
]}.
|
2017-08-18 18:14:56 +08:00
|
|
|
{mapping, "log.file.rotation.size", "rabbit.log.file.size", [
|
2016-02-01 22:27:56 +08:00
|
|
|
{datatype, integer}
|
2016-01-22 23:47:01 +08:00
|
|
|
]}.
|
2017-08-18 18:14:56 +08:00
|
|
|
{mapping, "log.file.rotation.count", "rabbit.log.file.count", [
|
2016-02-01 22:27:56 +08:00
|
|
|
{datatype, integer}
|
2016-01-22 23:47:01 +08:00
|
|
|
]}.
|
Logging: Add configuration variables to set various formats
In addition to the existing configuration variables to configure
logging, the following variables were added to extend the settings.
log.*.formatter = plaintext | json
Selects between the plain text (default) and JSON formatters.
log.*.formatter.time_format = rfc3339_space | rfc3339_T | epoch_usecs | epoch_secs | lager_default
Configures how the timestamp should be formatted. It has several
values to get RFC3339 date & time, Epoch-based integers and Lager
default format.
log.*.formatter.level_format = lc | uc | lc3 | uc3 | lc4 | uc4
Configures how to format the level. Things like uppercase vs.
lowercase, full vs. truncated.
Examples:
lc: debug
uc: DEBUG
lc3: dbg
uc3: DBG
lw4: dbug
uc4: DBUG
log.*.formatter.single_line = on | off
Indicates if multi-line messages should be reformatted as a
single-line message. A multi-line message is converted to a
single-line message by joining all lines and separating them
with ", ".
log.*.formatter.plaintext.format
Set to a pattern to indicate the format of the entire message. The
format pattern is a string with $-based variables. Each variable
corresponds to a field in the log event. Here is a non-exhaustive list
of common fields:
time
level
msg
pid
file
line
Example:
$time [$level] $pid $msg
log.*.formatter.json.field_map
Indicates if fields should be renamed or removed, and the ordering
which they should appear in the final JSON object. The order is set by
the order of fields in that coniguration variable.
Example:
time:ts level msg *:-
In this example, `time` is renamed to `ts`. `*:-` tells to remove all
fields not mentionned in the list. In the end the JSON object will
contain the fields in the following order: ts, level, msg.
log.*.formatter.json.verbosity_map
Indicates if a verbosity field should be added and how it should be
derived from the level. If the verbosity map is not set, no verbosity
field is added to the JSON object.
Example:
debug:2 info:1 notice:1 *:0
In this example, debug verbosity is 2, info and notice verbosity is 1,
other levels have a verbosity of 0.
All of them work with the console, exchange, file and syslog outputs.
The console output has specific variables too:
log.console.stdio = stdout | stderr
Indicates if stdout or stderr should be used. The default is stdout.
log.console.use_colors = on | off
Indicates if colors should be used in log messages. The default
depends on the environment.
log.console.color_esc_seqs.*
Indicates how each level is mapped to a color. The value can be any
string but the idea is to use an ANSI escape sequence.
Example:
log.console.color_esc_seqs.error = \033[1;31m
V2: A custom time format pattern was introduced, first using variables,
then a reference date & time (e.g. "Mon 2 Jan 2006"), thanks to
@ansd. However, we decided to remove it for now until we have a
better implementation of the reference date & time parser.
V3: The testsuite was extended to cover new settings as well as the
syslog output. To test it, a fake syslogd server was added (Erlang
process, part of the testsuite).
V4: The dependency to cuttlefish is moved to rabbitmq_prelaunch which
actually uses the library. The version is updated to 3.0.1 because
we need Kyorai/cuttlefish#25.
2021-03-23 23:56:20 +08:00
|
|
|
{mapping, "log.file.formatter", "rabbit.log.file.formatter", [
|
|
|
|
{default, plaintext},
|
|
|
|
{datatype, {enum, [plaintext, json]}}
|
|
|
|
]}.
|
2021-04-12 21:14:09 +08:00
|
|
|
{mapping, "log.file.formatter.time_format", "rabbit.log.file.formatter", [
|
Logging: Add configuration variables to set various formats
In addition to the existing configuration variables to configure
logging, the following variables were added to extend the settings.
log.*.formatter = plaintext | json
Selects between the plain text (default) and JSON formatters.
log.*.formatter.time_format = rfc3339_space | rfc3339_T | epoch_usecs | epoch_secs | lager_default
Configures how the timestamp should be formatted. It has several
values to get RFC3339 date & time, Epoch-based integers and Lager
default format.
log.*.formatter.level_format = lc | uc | lc3 | uc3 | lc4 | uc4
Configures how to format the level. Things like uppercase vs.
lowercase, full vs. truncated.
Examples:
lc: debug
uc: DEBUG
lc3: dbg
uc3: DBG
lw4: dbug
uc4: DBUG
log.*.formatter.single_line = on | off
Indicates if multi-line messages should be reformatted as a
single-line message. A multi-line message is converted to a
single-line message by joining all lines and separating them
with ", ".
log.*.formatter.plaintext.format
Set to a pattern to indicate the format of the entire message. The
format pattern is a string with $-based variables. Each variable
corresponds to a field in the log event. Here is a non-exhaustive list
of common fields:
time
level
msg
pid
file
line
Example:
$time [$level] $pid $msg
log.*.formatter.json.field_map
Indicates if fields should be renamed or removed, and the ordering
which they should appear in the final JSON object. The order is set by
the order of fields in that coniguration variable.
Example:
time:ts level msg *:-
In this example, `time` is renamed to `ts`. `*:-` tells to remove all
fields not mentionned in the list. In the end the JSON object will
contain the fields in the following order: ts, level, msg.
log.*.formatter.json.verbosity_map
Indicates if a verbosity field should be added and how it should be
derived from the level. If the verbosity map is not set, no verbosity
field is added to the JSON object.
Example:
debug:2 info:1 notice:1 *:0
In this example, debug verbosity is 2, info and notice verbosity is 1,
other levels have a verbosity of 0.
All of them work with the console, exchange, file and syslog outputs.
The console output has specific variables too:
log.console.stdio = stdout | stderr
Indicates if stdout or stderr should be used. The default is stdout.
log.console.use_colors = on | off
Indicates if colors should be used in log messages. The default
depends on the environment.
log.console.color_esc_seqs.*
Indicates how each level is mapped to a color. The value can be any
string but the idea is to use an ANSI escape sequence.
Example:
log.console.color_esc_seqs.error = \033[1;31m
V2: A custom time format pattern was introduced, first using variables,
then a reference date & time (e.g. "Mon 2 Jan 2006"), thanks to
@ansd. However, we decided to remove it for now until we have a
better implementation of the reference date & time parser.
V3: The testsuite was extended to cover new settings as well as the
syslog output. To test it, a fake syslogd server was added (Erlang
process, part of the testsuite).
V4: The dependency to cuttlefish is moved to rabbitmq_prelaunch which
actually uses the library. The version is updated to 3.0.1 because
we need Kyorai/cuttlefish#25.
2021-03-23 23:56:20 +08:00
|
|
|
{default, rfc3339_space},
|
|
|
|
{datatype, [{enum, [rfc3339_space, rfc3339_T, epoch_usecs, epoch_secs, lager_default]}, string]}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.file.formatter.level_format", "rabbit.log.file.formatter", [
|
|
|
|
{default, lc},
|
|
|
|
{datatype, {enum, [lc, uc, lc3, uc3, lc4, uc4]}}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.file.formatter.single_line", "rabbit.log.file.formatter", [
|
|
|
|
{default, off},
|
|
|
|
{datatype, flag}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.file.formatter.plaintext.format", "rabbit.log.file.formatter", [
|
|
|
|
{default, "$time [$level] $pid $msg"},
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.file.formatter.json.field_map", "rabbit.log.file.formatter", [
|
|
|
|
{default, "time level msg gl:- logger_formatter:- report_cb:- error_logger:-"},
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.file.formatter.json.verbosity_map", "rabbit.log.file.formatter", [
|
|
|
|
{default, ""},
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
{translation, "rabbit.log.file.formatter",
|
|
|
|
fun(Conf) ->
|
|
|
|
rabbit_prelaunch_early_logging:translate_formatter_conf("log.file.formatter", Conf)
|
|
|
|
end}.
|
2016-01-22 23:47:01 +08:00
|
|
|
|
2022-05-04 01:48:10 +08:00
|
|
|
%% Connection log.
|
2017-08-18 18:14:56 +08:00
|
|
|
{mapping, "log.connection.level", "rabbit.log.categories.connection.level", [
|
|
|
|
{datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.connection.file", "rabbit.log.categories.connection.file", [
|
2016-01-22 23:47:01 +08:00
|
|
|
{datatype, string}
|
|
|
|
]}.
|
2022-05-04 01:48:10 +08:00
|
|
|
{mapping, "log.connection.rotation.date", "rabbit.log.categories.connection.rotate_on_date", [
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.connection.rotation.compress", "rabbit.log.categories.connection.compress_on_rotate", [
|
|
|
|
{default, false},
|
|
|
|
{datatype, {enum, [true, false]}}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.connection.rotation.size", "rabbit.log.categories.connection.max_no_bytes", [
|
|
|
|
{datatype, integer}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.connection.rotation.count", "rabbit.log.categories.connection.max_no_files", [
|
|
|
|
{datatype, integer}
|
|
|
|
]}.
|
2017-08-18 18:14:56 +08:00
|
|
|
|
2022-05-04 01:48:10 +08:00
|
|
|
%% Channel log.
|
2017-08-18 18:14:56 +08:00
|
|
|
{mapping, "log.channel.level", "rabbit.log.categories.channel.level", [
|
|
|
|
{datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.channel.file", "rabbit.log.categories.channel.file", [
|
|
|
|
{datatype, string}
|
2016-01-22 23:47:01 +08:00
|
|
|
]}.
|
2022-05-04 01:48:10 +08:00
|
|
|
{mapping, "log.channel.rotation.date", "rabbit.log.categories.channel.rotate_on_date", [
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.channel.rotation.compress", "rabbit.log.categories.channel.compress_on_rotate", [
|
|
|
|
{default, false},
|
|
|
|
{datatype, {enum, [true, false]}}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.channel.rotation.size", "rabbit.log.categories.channel.max_no_bytes", [
|
|
|
|
{datatype, integer}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.channel.rotation.count", "rabbit.log.categories.channel.max_no_files", [
|
|
|
|
{datatype, integer}
|
|
|
|
]}.
|
2016-01-22 23:47:01 +08:00
|
|
|
|
2022-05-04 01:48:10 +08:00
|
|
|
%% Mirroring log.
|
2017-08-18 18:14:56 +08:00
|
|
|
{mapping, "log.mirroring.level", "rabbit.log.categories.mirroring.level", [
|
|
|
|
{datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.mirroring.file", "rabbit.log.categories.mirroring.file", [
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
2022-05-04 01:48:10 +08:00
|
|
|
{mapping, "log.mirroring.rotation.date", "rabbit.log.categories.mirroring.rotate_on_date", [
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.mirroring.rotation.compress", "rabbit.log.categories.mirroring.compress_on_rotate", [
|
|
|
|
{default, false},
|
|
|
|
{datatype, {enum, [true, false]}}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.mirroring.rotation.size", "rabbit.log.categories.mirroring.max_no_bytes", [
|
|
|
|
{datatype, integer}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.mirroring.rotation.count", "rabbit.log.categories.mirroring.max_no_files", [
|
|
|
|
{datatype, integer}
|
|
|
|
]}.
|
2017-08-18 18:14:56 +08:00
|
|
|
|
2022-05-04 01:48:10 +08:00
|
|
|
%% Queue log.
|
2017-08-18 18:14:56 +08:00
|
|
|
{mapping, "log.queue.level", "rabbit.log.categories.queue.level", [
|
|
|
|
{datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.queue.file", "rabbit.log.categories.queue.file", [
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
2022-05-04 01:48:10 +08:00
|
|
|
{mapping, "log.queue.rotation.date", "rabbit.log.categories.queue.rotate_on_date", [
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.queue.rotation.compress", "rabbit.log.categories.queue.compress_on_rotate", [
|
|
|
|
{default, false},
|
|
|
|
{datatype, {enum, [true, false]}}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.queue.rotation.size", "rabbit.log.categories.queue.max_no_bytes", [
|
|
|
|
{datatype, integer}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.queue.rotation.count", "rabbit.log.categories.queue.max_no_files", [
|
|
|
|
{datatype, integer}
|
|
|
|
]}.
|
2016-01-22 23:47:01 +08:00
|
|
|
|
2022-05-04 01:48:10 +08:00
|
|
|
%% Federation log.
|
2017-08-18 18:14:56 +08:00
|
|
|
{mapping, "log.federation.level", "rabbit.log.categories.federation.level", [
|
|
|
|
{datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.federation.file", "rabbit.log.categories.federation.file", [
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
2022-05-04 01:48:10 +08:00
|
|
|
{mapping, "log.federation.rotation.date", "rabbit.log.categories.federation.rotate_on_date", [
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.federation.rotation.compress", "rabbit.log.categories.federation.compress_on_rotate", [
|
|
|
|
{default, false},
|
|
|
|
{datatype, {enum, [true, false]}}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.federation.rotation.size", "rabbit.log.categories.federation.max_no_bytes", [
|
|
|
|
{datatype, integer}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.federation.rotation.count", "rabbit.log.categories.federation.max_no_files", [
|
|
|
|
{datatype, integer}
|
|
|
|
]}.
|
2017-08-18 18:14:56 +08:00
|
|
|
|
2022-05-04 01:48:10 +08:00
|
|
|
%% Upgrade log.
|
2017-08-18 18:14:56 +08:00
|
|
|
{mapping, "log.upgrade.level", "rabbit.log.categories.upgrade.level", [
|
|
|
|
{datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.upgrade.file", "rabbit.log.categories.upgrade.file", [
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
2022-05-04 01:48:10 +08:00
|
|
|
{mapping, "log.upgrade.rotation.date", "rabbit.log.categories.upgrade.rotate_on_date", [
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.upgrade.rotation.compress", "rabbit.log.categories.upgrade.compress_on_rotate", [
|
|
|
|
{default, false},
|
|
|
|
{datatype, {enum, [true, false]}}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.upgrade.rotation.size", "rabbit.log.categories.upgrade.max_no_bytes", [
|
|
|
|
{datatype, integer}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.upgrade.rotation.count", "rabbit.log.categories.upgrade.max_no_files", [
|
|
|
|
{datatype, integer}
|
|
|
|
]}.
|
2017-08-18 18:14:56 +08:00
|
|
|
|
2022-05-04 01:48:10 +08:00
|
|
|
%% Ra log.
|
2019-02-08 00:24:40 +08:00
|
|
|
{mapping, "log.ra.level", "rabbit.log.categories.ra.level", [
|
|
|
|
{datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.ra.file", "rabbit.log.categories.ra.file", [
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
2022-05-04 01:48:10 +08:00
|
|
|
{mapping, "log.ra.rotation.date", "rabbit.log.categories.ra.rotate_on_date", [
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.ra.rotation.compress", "rabbit.log.categories.ra.compress_on_rotate", [
|
|
|
|
{default, false},
|
|
|
|
{datatype, {enum, [true, false]}}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.ra.rotation.size", "rabbit.log.categories.ra.max_no_bytes", [
|
|
|
|
{datatype, integer}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.ra.rotation.count", "rabbit.log.categories.ra.max_no_files", [
|
|
|
|
{datatype, integer}
|
|
|
|
]}.
|
2019-02-08 00:24:40 +08:00
|
|
|
|
2022-05-04 01:48:10 +08:00
|
|
|
%% Default logging config.
|
2017-08-18 18:14:56 +08:00
|
|
|
{mapping, "log.default.level", "rabbit.log.categories.default.level", [
|
|
|
|
{datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
|
|
|
|
]}.
|
2022-05-04 01:48:10 +08:00
|
|
|
{mapping, "log.default.rotation.date", "rabbit.log.categories.default.rotate_on_date", [
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.default.rotation.compress", "rabbit.log.categories.default.compress_on_rotate", [
|
|
|
|
{default, false},
|
|
|
|
{datatype, {enum, [true, false]}}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.default.rotation.size", "rabbit.log.categories.default.max_no_bytes", [
|
|
|
|
{datatype, integer}
|
|
|
|
]}.
|
|
|
|
{mapping, "log.default.rotation.count", "rabbit.log.categories.default.max_no_files", [
|
|
|
|
{datatype, integer}
|
|
|
|
]}.
|
2016-01-22 23:47:01 +08:00
|
|
|
|
2023-03-23 21:26:14 +08:00
|
|
|
%%
|
|
|
|
%% Feature flags and deprecated features
|
|
|
|
%% =====================================
|
|
|
|
%%
|
|
|
|
|
|
|
|
%% NOTE: `true` is intentionally omitted - add it back when mirrored
|
|
|
|
%% queue deprecation is converted to use deprecated features system.
|
|
|
|
{mapping,
|
2023-03-24 23:54:58 +08:00
|
|
|
"deprecated_features.permit.$name", "rabbit.permitted_deprecated_features",
|
2023-03-23 21:26:14 +08:00
|
|
|
[{datatype, {enum, [false]}}]
|
|
|
|
}.
|
|
|
|
|
|
|
|
%% This converts:
|
2023-03-24 23:54:58 +08:00
|
|
|
%% deprecated_features.permit.my_feature = false
|
2023-03-23 21:26:14 +08:00
|
|
|
%% to:
|
2023-03-24 23:54:58 +08:00
|
|
|
%% {rabbit, [{permitted_deprecated_features, #{my_feature => false}}]}.
|
|
|
|
{translation, "rabbit.permitted_deprecated_features",
|
2023-03-23 21:26:14 +08:00
|
|
|
fun(Conf) ->
|
|
|
|
Settings = cuttlefish_variable:filter_by_prefix(
|
2023-03-24 23:54:58 +08:00
|
|
|
"deprecated_features.permit", Conf),
|
2023-03-23 21:26:14 +08:00
|
|
|
maps:from_list(
|
|
|
|
[{list_to_atom(FeatureName), State}
|
2023-03-25 00:22:10 +08:00
|
|
|
|| {["deprecated_features", "permit", FeatureName], State}
|
2023-03-23 21:26:14 +08:00
|
|
|
<- Settings])
|
|
|
|
end}.
|
|
|
|
|
2018-02-22 18:25:45 +08:00
|
|
|
% ==========================
|
|
|
|
% Kernel section
|
|
|
|
% ==========================
|
|
|
|
|
|
|
|
{mapping, "net_ticktime", "kernel.net_ticktime",[
|
2018-02-22 23:10:26 +08:00
|
|
|
{datatype, [integer]},
|
2018-02-23 00:35:43 +08:00
|
|
|
{validators, ["non_zero_positive_integer"]}
|
2018-02-22 18:25:45 +08:00
|
|
|
]}.
|
|
|
|
|
2021-11-16 21:37:28 +08:00
|
|
|
{mapping, "distribution.listener.port_range.min", "kernel.inet_dist_listen_min", [
|
|
|
|
{datatype, [integer]},
|
|
|
|
{validators, ["non_zero_positive_integer"]}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{mapping, "distribution.listener.port_range.max", "kernel.inet_dist_listen_max", [
|
|
|
|
{datatype, [integer]},
|
|
|
|
{validators, ["non_zero_positive_integer"]}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{mapping, "distribution.listener.interface", "kernel.inet_dist_use_interface", [
|
|
|
|
{datatype, [string]},
|
|
|
|
{validators, ["is_ip"]}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{translation, "kernel.inet_dist_use_interface",
|
|
|
|
fun(Conf) ->
|
|
|
|
case cuttlefish:conf_get("distribution.listener.interface", Conf, undefined) of
|
|
|
|
undefined ->
|
|
|
|
cuttlefish:unset();
|
|
|
|
Value when is_list(Value) ->
|
|
|
|
case inet:parse_address(Value) of
|
|
|
|
{ok, Parsed} -> Parsed;
|
|
|
|
{error, _} -> cuttlefish:invalid("should be a valid IP address")
|
|
|
|
end;
|
|
|
|
_ ->
|
|
|
|
cuttlefish:invalid("should be a valid IP address")
|
|
|
|
end
|
|
|
|
end
|
|
|
|
}.
|
|
|
|
|
2019-01-04 03:19:16 +08:00
|
|
|
% ==========================
|
|
|
|
% sysmon_handler section
|
|
|
|
% ==========================
|
|
|
|
|
|
|
|
%% @doc The threshold at which to warn about the number of processes
|
|
|
|
%% that are overly busy. Processes with large heaps or that take a
|
|
|
|
%% long time to garbage collect will count toward this threshold.
|
|
|
|
{mapping, "sysmon_handler.thresholds.busy_processes", "sysmon_handler.process_limit", [
|
2019-01-10 02:55:55 +08:00
|
|
|
{datatype, integer},
|
|
|
|
hidden
|
2019-01-04 03:19:16 +08:00
|
|
|
]}.
|
|
|
|
|
2019-01-10 02:55:55 +08:00
|
|
|
{translation, "sysmon_handler.process_limit",
|
|
|
|
fun(Conf) ->
|
|
|
|
case cuttlefish:conf_get("sysmon_handler.thresholds.busy_processes", Conf, undefined) of
|
|
|
|
undefined ->
|
|
|
|
cuttlefish:unset();
|
|
|
|
Int when is_integer(Int) ->
|
|
|
|
Int;
|
|
|
|
_ ->
|
|
|
|
cuttlefish:invalid("should be a non-negative integer")
|
|
|
|
end
|
|
|
|
end
|
|
|
|
}.
|
|
|
|
|
2019-01-04 03:19:16 +08:00
|
|
|
%% @doc The threshold at which to warn about the number of ports that
|
|
|
|
%% are overly busy. Ports with full input buffers count toward this
|
|
|
|
%% threshold.
|
|
|
|
{mapping, "sysmon_handler.thresholds.busy_ports", "sysmon_handler.port_limit", [
|
|
|
|
{datatype, integer},
|
|
|
|
hidden
|
|
|
|
]}.
|
|
|
|
|
2019-01-10 02:55:55 +08:00
|
|
|
{translation, "sysmon_handler.port_limit",
|
|
|
|
fun(Conf) ->
|
|
|
|
case cuttlefish:conf_get("sysmon_handler.thresholds.busy_ports", Conf, undefined) of
|
|
|
|
undefined ->
|
|
|
|
cuttlefish:unset();
|
|
|
|
Int when is_integer(Int) ->
|
|
|
|
Int;
|
|
|
|
_ ->
|
|
|
|
cuttlefish:invalid("should be a non-negative integer")
|
|
|
|
end
|
|
|
|
end
|
|
|
|
}.
|
|
|
|
|
2019-01-04 03:19:16 +08:00
|
|
|
%% @doc A process will become busy when it exceeds this amount of time
|
|
|
|
%% doing garbage collection.
|
|
|
|
%% @see sysmon_handler.thresholds.busy_processes
|
|
|
|
{mapping, "sysmon_handler.triggers.process.garbage_collection", "sysmon_handler.gc_ms_limit", [
|
|
|
|
{datatype, [{atom, off},
|
|
|
|
{duration, ms}]},
|
|
|
|
hidden
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{translation, "sysmon_handler.gc_ms_limit",
|
2019-01-10 02:55:55 +08:00
|
|
|
fun(Conf) ->
|
|
|
|
case cuttlefish:conf_get("sysmon_handler.triggers.process.garbage_collection", Conf, undefined) of
|
|
|
|
undefined ->
|
|
|
|
cuttlefish:unset();
|
|
|
|
off ->
|
|
|
|
0;
|
|
|
|
Int when is_integer(Int) ->
|
|
|
|
Int;
|
|
|
|
_ ->
|
|
|
|
cuttlefish:invalid("should be a non-negative integer")
|
|
|
|
end
|
|
|
|
end
|
|
|
|
}.
|
2019-01-04 03:19:16 +08:00
|
|
|
|
|
|
|
%% @doc A process will become busy when it exceeds this amount of time
|
|
|
|
%% during a single process scheduling & execution cycle.
|
|
|
|
{mapping, "sysmon_handler.triggers.process.long_scheduled_execution", "sysmon_handler.schedule_ms_limit", [
|
|
|
|
{datatype, [{atom, off},
|
|
|
|
{duration, ms}]},
|
|
|
|
hidden
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{translation, "sysmon_handler.schedule_ms_limit",
|
2019-01-10 02:55:55 +08:00
|
|
|
fun(Conf) ->
|
|
|
|
case cuttlefish:conf_get("sysmon_handler.triggers.process.long_scheduled_execution", Conf, undefined) of
|
|
|
|
undefined ->
|
|
|
|
cuttlefish:unset();
|
|
|
|
off ->
|
|
|
|
0;
|
|
|
|
Int when is_integer(Int) ->
|
|
|
|
Int;
|
|
|
|
_ ->
|
|
|
|
cuttlefish:invalid("should be a non-negative integer")
|
|
|
|
end
|
|
|
|
end
|
|
|
|
}.
|
2019-01-04 03:19:16 +08:00
|
|
|
|
|
|
|
%% @doc A process will become busy when its heap exceeds this size.
|
|
|
|
%% @see sysmon_handler.thresholds.busy_processes
|
|
|
|
{mapping, "sysmon_handler.triggers.process.heap_size", "sysmon_handler.heap_word_limit", [
|
2019-01-10 02:55:55 +08:00
|
|
|
{datatype, [{atom, off},
|
|
|
|
bytesize]},
|
2019-01-04 03:19:16 +08:00
|
|
|
hidden
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{translation, "sysmon_handler.heap_word_limit",
|
2019-01-10 02:55:55 +08:00
|
|
|
fun(Conf) ->
|
|
|
|
case cuttlefish:conf_get("sysmon_handler.triggers.process.heap_size", Conf, undefined) of
|
|
|
|
undefined ->
|
|
|
|
cuttlefish:unset();
|
|
|
|
off ->
|
|
|
|
0;
|
|
|
|
Bytes when is_integer(Bytes) ->
|
|
|
|
WordSize = erlang:system_info(wordsize),
|
|
|
|
Bytes div WordSize;
|
|
|
|
_ ->
|
|
|
|
cuttlefish:invalid("should be a non-negative integer")
|
|
|
|
end
|
2019-01-04 03:19:16 +08:00
|
|
|
end
|
2019-01-10 02:55:55 +08:00
|
|
|
}.
|
2019-01-04 03:19:16 +08:00
|
|
|
|
|
|
|
%% @doc Whether ports with full input buffers will be counted as
|
|
|
|
%% busy. Ports can represent open files or network sockets.
|
|
|
|
%% @see sysmon_handler.thresholds.busy_ports
|
|
|
|
{mapping, "sysmon_handler.triggers.port", "sysmon_handler.busy_port", [
|
|
|
|
{datatype, flag},
|
|
|
|
hidden
|
|
|
|
]}.
|
|
|
|
|
2019-01-10 02:55:55 +08:00
|
|
|
{translation, "sysmon_handler.busy_port",
|
|
|
|
fun(Conf) ->
|
|
|
|
case cuttlefish:conf_get("sysmon_handler.triggers.port", Conf, undefined) of
|
|
|
|
undefined ->
|
|
|
|
cuttlefish:unset();
|
|
|
|
Val -> Val
|
|
|
|
end
|
|
|
|
end
|
|
|
|
}.
|
|
|
|
|
2019-01-04 03:19:16 +08:00
|
|
|
%% @doc Whether distribution ports with full input buffers will be
|
|
|
|
%% counted as busy. Distribution ports connect Erlang nodes within a
|
|
|
|
%% single cluster.
|
|
|
|
%% @see sysmon_handler.thresholds.busy_ports
|
|
|
|
{mapping, "sysmon_handler.triggers.distribution_port", "sysmon_handler.busy_dist_port", [
|
|
|
|
{datatype, flag},
|
|
|
|
hidden
|
|
|
|
]}.
|
|
|
|
|
2019-01-10 02:55:55 +08:00
|
|
|
{translation, "sysmon_handler.busy_dist_port",
|
|
|
|
fun(Conf) ->
|
|
|
|
case cuttlefish:conf_get("sysmon_handler.triggers.distribution_port", Conf, undefined) of
|
|
|
|
undefined ->
|
|
|
|
cuttlefish:unset();
|
|
|
|
Val -> Val
|
|
|
|
end
|
|
|
|
end
|
|
|
|
}.
|
|
|
|
|
2019-10-28 01:35:04 +08:00
|
|
|
%%
|
|
|
|
%% Ra
|
|
|
|
%%
|
|
|
|
|
|
|
|
{mapping, "raft.segment_max_entries", "ra.segment_max_entries", [
|
|
|
|
{datatype, integer},
|
|
|
|
{validators, ["non_zero_positive_integer"]}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{translation, "ra.segment_max_entries",
|
|
|
|
fun(Conf) ->
|
|
|
|
case cuttlefish:conf_get("raft.segment_max_entries", Conf, undefined) of
|
|
|
|
undefined -> cuttlefish:unset();
|
|
|
|
Val -> Val
|
|
|
|
end
|
|
|
|
end
|
|
|
|
}.
|
|
|
|
|
|
|
|
{mapping, "raft.wal_max_size_bytes", "ra.wal_max_size_bytes", [
|
|
|
|
{datatype, integer},
|
|
|
|
{validators, ["non_zero_positive_integer"]}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{translation, "ra.wal_max_size_bytes",
|
|
|
|
fun(Conf) ->
|
|
|
|
case cuttlefish:conf_get("raft.wal_max_size_bytes", Conf, undefined) of
|
|
|
|
undefined -> cuttlefish:unset();
|
|
|
|
Val -> Val
|
|
|
|
end
|
|
|
|
end
|
|
|
|
}.
|
|
|
|
|
2020-09-11 00:50:53 +08:00
|
|
|
{mapping, "raft.wal_max_entries", "ra.wal_max_entries", [
|
|
|
|
{datatype, integer},
|
|
|
|
{validators, ["non_zero_positive_integer"]}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{translation, "ra.wal_max_entries",
|
|
|
|
fun(Conf) ->
|
|
|
|
case cuttlefish:conf_get("raft.wal_max_entries", Conf, undefined) of
|
|
|
|
undefined -> cuttlefish:unset();
|
|
|
|
Val -> Val
|
|
|
|
end
|
|
|
|
end
|
|
|
|
}.
|
|
|
|
|
|
|
|
{mapping, "raft.wal_hibernate_after", "ra.wal_hibernate_after", [
|
|
|
|
{datatype, integer},
|
|
|
|
{validators, ["non_zero_positive_integer"]}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{translation, "ra.wal_hibernate_after",
|
|
|
|
fun(Conf) ->
|
|
|
|
case cuttlefish:conf_get("raft.wal_hibernate_after", Conf, undefined) of
|
|
|
|
undefined -> cuttlefish:unset();
|
|
|
|
Val -> Val
|
|
|
|
end
|
|
|
|
end
|
|
|
|
}.
|
|
|
|
|
2019-10-28 01:35:04 +08:00
|
|
|
{mapping, "raft.wal_max_batch_size", "ra.wal_max_batch_size", [
|
|
|
|
{datatype, integer},
|
|
|
|
{validators, ["non_zero_positive_integer"]}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{translation, "ra.wal_max_batch_size",
|
|
|
|
fun(Conf) ->
|
|
|
|
case cuttlefish:conf_get("raft.wal_max_batch_size", Conf, undefined) of
|
|
|
|
undefined -> cuttlefish:unset();
|
|
|
|
Val -> Val
|
|
|
|
end
|
|
|
|
end
|
|
|
|
}.
|
|
|
|
|
|
|
|
{mapping, "raft.snapshot_chunk_size", "ra.snapshot_chunk_size", [
|
|
|
|
{datatype, integer},
|
|
|
|
{validators, ["non_zero_positive_integer"]}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{translation, "ra.snapshot_chunk_size",
|
|
|
|
fun(Conf) ->
|
|
|
|
case cuttlefish:conf_get("raft.snapshot_chunk_size", Conf, undefined) of
|
|
|
|
undefined -> cuttlefish:unset();
|
|
|
|
Val -> Val
|
|
|
|
end
|
|
|
|
end
|
|
|
|
}.
|
|
|
|
|
2020-05-30 01:58:50 +08:00
|
|
|
{mapping, "raft.data_dir", "ra.data_dir", [
|
|
|
|
{datatype, string}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{translation, "ra.data_dir",
|
|
|
|
fun(Conf) ->
|
|
|
|
case cuttlefish:conf_get("raft.data_dir", Conf, undefined) of
|
|
|
|
undefined -> cuttlefish:unset();
|
|
|
|
Val -> Val
|
|
|
|
end
|
|
|
|
end
|
|
|
|
}.
|
2019-10-28 01:35:04 +08:00
|
|
|
|
2022-12-12 20:45:45 +08:00
|
|
|
{mapping, "raft.adaptive_failure_detector.poll_interval", "aten.poll_interval", [
|
2022-12-12 20:49:37 +08:00
|
|
|
{datatype, integer},
|
|
|
|
{validators, ["non_zero_positive_integer"]}
|
2022-12-12 20:45:45 +08:00
|
|
|
]}.
|
|
|
|
|
|
|
|
{translation, "aten.poll_interval",
|
|
|
|
fun(Conf) ->
|
|
|
|
case cuttlefish:conf_get("raft.adaptive_failure_detector.poll_interval", Conf, undefined) of
|
|
|
|
undefined -> cuttlefish:unset();
|
|
|
|
Val -> Val
|
|
|
|
end
|
|
|
|
end
|
|
|
|
}.
|
|
|
|
|
2022-01-19 04:36:22 +08:00
|
|
|
%%
|
|
|
|
%% Backing queue version
|
|
|
|
%%
|
|
|
|
|
2022-02-09 23:20:35 +08:00
|
|
|
{mapping, "classic_queue.default_version", "rabbit.classic_queue_default_version", [
|
2022-01-19 04:36:22 +08:00
|
|
|
{datatype, integer},
|
|
|
|
{validators, ["non_zero_positive_integer"]}
|
|
|
|
]}.
|
|
|
|
|
2022-01-24 23:19:22 +08:00
|
|
|
{translation, "rabbit.classic_queue_default_version",
|
2022-01-19 04:36:22 +08:00
|
|
|
fun(Conf) ->
|
2023-03-21 16:55:52 +08:00
|
|
|
case cuttlefish:conf_get("classic_queue.default_version", Conf, 1) of
|
2022-01-19 04:36:22 +08:00
|
|
|
1 -> 1;
|
|
|
|
2 -> 2;
|
|
|
|
_ -> cuttlefish:unset()
|
|
|
|
end
|
|
|
|
end
|
|
|
|
}.
|
|
|
|
|
2022-07-06 18:23:15 +08:00
|
|
|
{mapping, "quorum_queue.compute_checksums", "rabbit.quorum_compute_checksums", [
|
|
|
|
{datatype, {enum, [true, false]}}]}.
|
|
|
|
|
2023-04-18 01:19:33 +08:00
|
|
|
|
|
|
|
%%
|
|
|
|
%% Runtime parameters
|
|
|
|
%%
|
|
|
|
|
|
|
|
{mapping, "runtime_parameters.limits.$category", "rabbit.runtime_parameters.limits", [
|
|
|
|
{datatype, integer},
|
|
|
|
{validators, ["non_negative_integer"]}
|
|
|
|
]}.
|
|
|
|
|
|
|
|
{translation, "rabbit.runtime_parameters.limits",
|
|
|
|
fun(Conf) ->
|
|
|
|
case cuttlefish_variable:filter_by_prefix("runtime_parameters.limits", Conf) of
|
|
|
|
[] -> cuttlefish:unset();
|
|
|
|
Ss -> [ {list_to_binary(Category), Limit} || {[_, _, Category], Limit} <- Ss ]
|
|
|
|
end
|
|
|
|
end
|
|
|
|
}.
|
|
|
|
|
2016-02-01 19:43:05 +08:00
|
|
|
% ===============================
|
|
|
|
% Validators
|
|
|
|
% ===============================
|
|
|
|
|
2021-09-10 18:16:21 +08:00
|
|
|
{validator, "mirroring_sync_batch_size", "Batch size should be greater than 0 and less than 1M",
|
2016-01-22 23:47:01 +08:00
|
|
|
fun(Size) when is_integer(Size) ->
|
2021-09-10 18:16:21 +08:00
|
|
|
Size > 0 andalso Size =< 1000000
|
2016-01-22 23:47:01 +08:00
|
|
|
end}.
|
|
|
|
|
2021-09-10 18:16:21 +08:00
|
|
|
{validator, "max_message_size", "Max message size should be between 0 and 512MB",
|
2018-12-27 23:26:37 +08:00
|
|
|
fun(Size) when is_integer(Size) ->
|
2021-09-10 18:16:21 +08:00
|
|
|
Size > 0 andalso Size =< 536870912
|
2018-12-27 23:26:37 +08:00
|
|
|
end}.
|
|
|
|
|
2019-02-13 01:43:44 +08:00
|
|
|
{validator, "less_than_1", "Float is not between 0 and 1",
|
2016-01-22 23:47:01 +08:00
|
|
|
fun(Float) when is_float(Float) ->
|
|
|
|
Float > 0 andalso Float < 1
|
|
|
|
end}.
|
|
|
|
|
|
|
|
{validator, "port", "Invalid port number",
|
|
|
|
fun(Port) when is_integer(Port) ->
|
|
|
|
Port > 0 andalso Port < 65535
|
|
|
|
end}.
|
|
|
|
|
2020-11-05 03:15:03 +08:00
|
|
|
{validator, "byte", "Integer must be in the range [0, 255]",
|
2016-01-22 23:47:01 +08:00
|
|
|
fun(Int) when is_integer(Int) ->
|
2020-11-04 21:45:08 +08:00
|
|
|
Int >= 0 andalso Int =< 255
|
2016-01-22 23:47:01 +08:00
|
|
|
end}.
|
|
|
|
|
2021-07-20 13:34:40 +08:00
|
|
|
{validator, "dir_writable", "Directory must be writable",
|
2016-01-22 23:47:01 +08:00
|
|
|
fun(Dir) ->
|
|
|
|
TestFile = filename:join(Dir, "test_file"),
|
|
|
|
file:delete(TestFile),
|
|
|
|
Res = ok == file:write_file(TestFile, <<"test">>),
|
|
|
|
file:delete(TestFile),
|
|
|
|
Res
|
|
|
|
end}.
|
|
|
|
|
2021-02-01 20:37:31 +08:00
|
|
|
{validator, "file_accessible", "file does not exist or cannot be read by the node",
|
2016-01-22 23:47:01 +08:00
|
|
|
fun(File) ->
|
2021-01-24 01:52:10 +08:00
|
|
|
case file:read_file_info(File) of
|
|
|
|
{ok, FileInfo} -> (element(4, FileInfo) == read) or (element(4, FileInfo) == read_write);
|
|
|
|
_ -> false
|
|
|
|
end
|
2016-01-22 23:47:01 +08:00
|
|
|
end}.
|
2016-02-08 20:33:43 +08:00
|
|
|
|
2021-11-16 21:37:28 +08:00
|
|
|
{validator, "is_ip", "value should be a valid IP address",
|
2018-08-03 05:39:47 +08:00
|
|
|
fun(IpStr) ->
|
|
|
|
Res = inet:parse_address(IpStr),
|
|
|
|
element(1, Res) == ok
|
|
|
|
end}.
|
|
|
|
|
2016-08-11 01:13:12 +08:00
|
|
|
{validator, "non_negative_integer", "number should be greater or equal to zero",
|
2016-08-11 01:00:37 +08:00
|
|
|
fun(Int) when is_integer(Int) ->
|
|
|
|
Int >= 0
|
|
|
|
end}.
|
2018-02-23 00:35:43 +08:00
|
|
|
|
|
|
|
{validator, "non_zero_positive_integer", "number should be greater or equal to one",
|
|
|
|
fun(Int) when is_integer(Int) ->
|
|
|
|
Int >= 1
|
|
|
|
end}.
|
2022-10-14 01:59:36 +08:00
|
|
|
|
|
|
|
{validator, "valid_regex", "string must be a valid regular expression",
|
|
|
|
fun("") -> false;
|
|
|
|
(String) -> {Res, _ } = re:compile(String),
|
|
|
|
Res =:= ok
|
|
|
|
end}.
|