rabbitmq-server/deps/rabbit/priv/schema/rabbit.schema

2872 lines
97 KiB
Erlang

% vim:ft=erlang:
% ==============================
% Rabbit app section
% ==============================
%%
%% Network Connectivity
%% ====================
%%
%% By default, RabbitMQ will listen on all interfaces, using
%% the standard (reserved) AMQP port.
%%
%% {tcp_listeners, [5672]},
%% To listen on a specific interface, provide a tuple of {IpAddress, Port}.
%% For example, to listen only on localhost for both IPv4 and IPv6:
%%
%% {tcp_listeners, [{"127.0.0.1", 5672},
%% {"[::1]", 5672}]},
{mapping, "listeners.tcp", "rabbit.tcp_listeners",[
{datatype, {enum, [none]}}
]}.
{mapping, "listeners.tcp.$name", "rabbit.tcp_listeners",[
{datatype, [integer, ip]}
]}.
{translation, "rabbit.tcp_listeners",
fun(Conf) ->
case cuttlefish:conf_get("listeners.tcp", Conf, undefined) of
none -> [];
_ ->
Settings = cuttlefish_variable:filter_by_prefix("listeners.tcp", Conf),
[ V || {_, V} <- Settings ]
end
end}.
%% TLS listeners are configured in the same fashion as TCP listeners,
%% including the option to control the choice of interface.
%%
%% {ssl_listeners, [5671]},
{mapping, "listeners.ssl", "rabbit.ssl_listeners",[
{datatype, {enum, [none]}}
]}.
{mapping, "listeners.ssl.$name", "rabbit.ssl_listeners",[
{datatype, [integer, ip]}
]}.
{translation, "rabbit.ssl_listeners",
fun(Conf) ->
case cuttlefish:conf_get("listeners.ssl", Conf, undefined) of
none -> [];
_ ->
Settings = cuttlefish_variable:filter_by_prefix("listeners.ssl", Conf),
[ V || {_, V} <- Settings ]
end
end}.
%% Number of Erlang processes that will accept connections for the TCP
%% and TLS listeners.
%%
%% {num_tcp_acceptors, 10},
%% {num_ssl_acceptors, 1},
{mapping, "num_acceptors.ssl", "rabbit.num_ssl_acceptors", [
{datatype, integer}
]}.
{mapping, "num_acceptors.tcp", "rabbit.num_tcp_acceptors", [
{datatype, integer}
]}.
{mapping, "socket_writer.gc_threshold", "rabbit.writer_gc_threshold", [
{datatype, [{atom, off}, integer]}
]}.
{translation, "rabbit.writer_gc_threshold",
fun(Conf) ->
case cuttlefish:conf_get("socket_writer.gc_threshold", Conf, undefined) of
%% missing from the config
undefined -> cuttlefish:unset();
%% explicitly disabled
off -> undefined;
Int when is_integer(Int) andalso Int > 0 ->
Int;
_ ->
cuttlefish:invalid("should be a non-negative integer")
end
end
}.
%% Maximum time for 0-9-1 handshake (after socket connection
%% and TLS handshake), in milliseconds.
%%
%% {handshake_timeout, 10000},
{mapping, "handshake_timeout", "rabbit.handshake_timeout", [
{datatype, [{atom, infinity}, integer]}
]}.
%% Set to 'true' to perform reverse DNS lookups when accepting a
%% connection. Hostnames will then be shown instead of IP addresses
%% in rabbitmqctl and the management plugin.
%%
%% {reverse_dns_lookups, true},
{mapping, "reverse_dns_lookups", "rabbit.reverse_dns_lookups", [
{datatype, {enum, [true, false]}}
]}.
{mapping, "erlang.K", "vm_args.+K", [
{default, "true"},
{level, advanced}
]}.
%%
%% Definition import
%%
%% Original key for definition loading from a JSON file or directory of files. See
%% https://www.rabbitmq.com/docs/management#load-definitions
{mapping, "load_definitions", "rabbit.load_definitions",
[{datatype, string},
{validators, ["file_accessible"]}]}.
%% Newer syntax for definition loading from a JSON file or directory of files. See
%% https://www.rabbitmq.com/docs/management#load-definitions
{mapping, "definitions.local.path", "rabbit.definitions.local_path",
[{datatype, string},
{validators, ["file_accessible"]}]}.
%% Extensive mechanism for loading definitions from a remote source
{mapping, "definitions.import_backend", "rabbit.definitions.import_backend", [
{datatype, atom}
]}.
{translation, "rabbit.definitions.import_backend",
fun(Conf) ->
case cuttlefish:conf_get("definitions.import_backend", Conf, rabbit_definitions_import_local_filesystem) of
%% short aliases for known backends
local_filesystem -> rabbit_definitions_import_local_filesystem;
local -> rabbit_definitions_import_local_filesystem;
https -> rabbit_definitions_import_https;
http -> rabbit_definitions_import_https;
%% accept both rabbitmq_ and rabbit_ (typical core module prefix)
rabbitmq_definitions_import_local_filesystem -> rabbit_definitions_import_local_filesystem;
rabbitmq_definitions_import_http -> rabbit_definitions_import_https;
%% any other value is used as is
Module -> Module
end
end}.
{mapping, "definitions.skip_if_unchanged", "rabbit.definitions.skip_if_unchanged", [
{datatype, {enum, [true, false]}}]}.
{mapping, "definitions.hashing.algorithm", "rabbit.definitions.hashing_algorithm", [
{datatype, {enum, [sha, sha224, sha256, sha384, sha512]}}]}.
%% Load definitions from a remote URL over HTTPS. See
%% https://www.rabbitmq.com/docs/management#load-definitions
{mapping, "definitions.https.url", "rabbit.definitions.url",
[{datatype, string}]}.
%% Client-side TLS settings used by e.g. HTTPS definition loading mechanism.
%% These can be reused by other clients.
{mapping, "definitions.tls.verify", "rabbit.definitions.ssl_options.verify", [
{datatype, {enum, [verify_peer, verify_none]}}]}.
{mapping, "definitions.tls.fail_if_no_peer_cert", "rabbit.definitions.ssl_options.fail_if_no_peer_cert", [
{datatype, {enum, [true, false]}}]}.
{mapping, "definitions.tls.cacertfile", "rabbit.definitions.ssl_options.cacertfile",
[{datatype, string}, {validators, ["file_accessible"]}]}.
{mapping, "definitions.tls.certfile", "rabbit.definitions.ssl_options.certfile",
[{datatype, string}, {validators, ["file_accessible"]}]}.
{mapping, "definitions.tls.cacerts.$name", "rabbit.definitions.ssl_options.cacerts",
[{datatype, string}]}.
{translation, "rabbit.definitions.ssl_options.cacerts",
fun(Conf) ->
Settings = cuttlefish_variable:filter_by_prefix("definitions.tls.cacerts", Conf),
[ list_to_binary(V) || {_, V} <- Settings ]
end}.
{mapping, "definitions.tls.cert", "rabbit.definitions.ssl_options.cert",
[{datatype, string}]}.
{translation, "rabbit.definitions.ssl_options.cert",
fun(Conf) ->
list_to_binary(cuttlefish:conf_get("definitions.tls.cert", Conf))
end}.
{mapping, "definitions.tls.reuse_session", "rabbit.definitions.ssl_options.reuse_session",
[{datatype, {enum, [true, false]}}]}.
{mapping, "definitions.tls.crl_check", "rabbit.definitions.ssl_options.crl_check",
[{datatype, [{enum, [true, false, peer, best_effort]}]}]}.
{mapping, "definitions.tls.depth", "rabbit.definitions.ssl_options.depth",
[{datatype, integer}, {validators, ["byte"]}]}.
{mapping, "definitions.tls.dh", "rabbit.definitions.ssl_options.dh",
[{datatype, string}]}.
{translation, "rabbit.definitions.ssl_options.dh",
fun(Conf) ->
list_to_binary(cuttlefish:conf_get("definitions.tls.dh", Conf))
end}.
{translation, "rabbit.definitions.ssl_options.key",
fun(Conf) ->
case cuttlefish_variable:filter_by_prefix("definitions.tls.key", Conf) of
[{[_,_,Key], Val}|_] -> {list_to_atom(Key), list_to_binary(Val)};
_ -> cuttlefish:unset()
end
end}.
{mapping, "definitions.tls.keyfile", "rabbit.definitions.ssl_options.keyfile",
[{datatype, string}, {validators, ["file_accessible"]}]}.
{mapping, "definitions.tls.log_alert", "rabbit.definitions.ssl_options.log_alert",
[{datatype, {enum, [true, false]}}]}.
{mapping, "definitions.tls.password", "rabbit.definitions.ssl_options.password",
[{datatype, [tagged_binary, binary]}]}.
{translation, "rabbit.definitions.ssl_options.password",
fun(Conf) ->
rabbit_cuttlefish:optionally_tagged_string("definitions.tls.password", Conf)
end}.
{mapping, "definitions.tls.secure_renegotiate", "rabbit.definitions.ssl_options.secure_renegotiate",
[{datatype, {enum, [true, false]}}]}.
{mapping, "definitions.tls.reuse_sessions", "rabbit.definitions.ssl_options.reuse_sessions",
[{datatype, {enum, [true, false]}}]}.
{mapping, "definitions.tls.versions.$version", "rabbit.definitions.ssl_options.versions",
[{datatype, atom}]}.
{translation, "rabbit.definitions.ssl_options.versions",
fun(Conf) ->
Settings = cuttlefish_variable:filter_by_prefix("definitions.tls.versions", Conf),
[V || {_, V} <- Settings]
end}.
{mapping, "definitions.tls.ciphers.$cipher", "rabbit.definitions.ssl_options.ciphers",
[{datatype, string}]}.
{translation, "rabbit.definitions.ssl_options.ciphers",
fun(Conf) ->
Settings = cuttlefish_variable:filter_by_prefix("definitions.tls.ciphers", Conf),
lists:reverse([V || {_, V} <- Settings])
end}.
{mapping, "definitions.tls.log_level", "rabbit.definitions.ssl_options.log_level",
[{datatype, {enum, [emergency, alert, critical, error, warning, notice, info, debug]}}]}.
%%
%% Seed User, Authentication, Access Control
%%
%% The default "guest" user is only permitted to access the server
%% via a loopback interface (e.g. localhost).
%% {loopback_users, [<<"guest">>]},
%%
%% Uncomment the following line if you want to allow access to the
%% guest user from anywhere on the network.
%% {loopback_users, []},
{mapping, "loopback_users", "rabbit.loopback_users", [
{datatype, {enum, [none]}}
]}.
{mapping, "loopback_users.$user", "rabbit.loopback_users", [
{datatype, atom}
]}.
{translation, "rabbit.loopback_users",
fun(Conf) ->
None = cuttlefish:conf_get("loopback_users", Conf, undefined),
case None of
none -> [];
_ ->
Settings = cuttlefish_variable:filter_by_prefix("loopback_users", Conf),
[ list_to_binary(U) || {["loopback_users", U], V} <- Settings, V == true ]
end
end}.
%% TLS options.
%% See https://www.rabbitmq.com/docs/ssl for full documentation.
%%
%% {ssl_options, [{cacertfile, "/path/to/testca/cacert.pem"},
%% {certfile, "/path/to/server/cert.pem"},
%% {keyfile, "/path/to/server/key.pem"},
%% {verify, verify_peer},
%% {fail_if_no_peer_cert, false}]},
{mapping, "ssl_allow_poodle_attack", "rabbit.ssl_allow_poodle_attack",
[{datatype, {enum, [true, false]}}]}.
{mapping, "ssl_options", "rabbit.ssl_options", [
{datatype, {enum, [none]}}
]}.
{translation, "rabbit.ssl_options",
fun(Conf) ->
case cuttlefish:conf_get("ssl_options", Conf, undefined) of
none -> [];
_ -> cuttlefish:invalid("Invalid ssl_options")
end
end}.
{mapping, "ssl_options.verify", "rabbit.ssl_options.verify", [
{datatype, {enum, [verify_peer, verify_none]}}]}.
{mapping, "ssl_options.fail_if_no_peer_cert", "rabbit.ssl_options.fail_if_no_peer_cert", [
{datatype, {enum, [true, false]}}]}.
{mapping, "ssl_options.cacertfile", "rabbit.ssl_options.cacertfile",
[{datatype, string}, {validators, ["file_accessible"]}]}.
{mapping, "ssl_options.certfile", "rabbit.ssl_options.certfile",
[{datatype, string}, {validators, ["file_accessible"]}]}.
{mapping, "ssl_options.cacerts.$name", "rabbit.ssl_options.cacerts",
[{datatype, string}]}.
{translation, "rabbit.ssl_options.cacerts",
fun(Conf) ->
Settings = cuttlefish_variable:filter_by_prefix("ssl_options.cacerts", Conf),
[ list_to_binary(V) || {_, V} <- Settings ]
end}.
{mapping, "ssl_options.cert", "rabbit.ssl_options.cert",
[{datatype, string}]}.
{translation, "rabbit.ssl_options.cert",
fun(Conf) ->
list_to_binary(cuttlefish:conf_get("ssl_options.cert", Conf))
end}.
{mapping, "ssl_options.client_renegotiation", "rabbit.ssl_options.client_renegotiation",
[{datatype, {enum, [true, false]}}]}.
{mapping, "ssl_options.crl_check", "rabbit.ssl_options.crl_check",
[{datatype, [{enum, [true, false, peer, best_effort]}]}]}.
{mapping, "ssl_options.depth", "rabbit.ssl_options.depth",
[{datatype, integer}, {validators, ["byte"]}]}.
{mapping, "ssl_options.dh", "rabbit.ssl_options.dh",
[{datatype, string}]}.
{translation, "rabbit.ssl_options.dh",
fun(Conf) ->
list_to_binary(cuttlefish:conf_get("ssl_options.dh", Conf))
end}.
{mapping, "ssl_options.dhfile", "rabbit.ssl_options.dhfile",
[{datatype, string}, {validators, ["file_accessible"]}]}.
{mapping, "ssl_options.honor_cipher_order", "rabbit.ssl_options.honor_cipher_order",
[{datatype, {enum, [true, false]}}]}.
{mapping, "ssl_options.honor_ecc_order", "rabbit.ssl_options.honor_ecc_order",
[{datatype, {enum, [true, false]}}]}.
{mapping, "ssl_options.key.RSAPrivateKey", "rabbit.ssl_options.key",
[{datatype, string}]}.
{mapping, "ssl_options.key.DSAPrivateKey", "rabbit.ssl_options.key",
[{datatype, string}]}.
{mapping, "ssl_options.key.PrivateKeyInfo", "rabbit.ssl_options.key",
[{datatype, string}]}.
{translation, "rabbit.ssl_options.key",
fun(Conf) ->
case cuttlefish_variable:filter_by_prefix("ssl_options.key", Conf) of
[{[_,_,Key], Val}|_] -> {list_to_atom(Key), list_to_binary(Val)};
_ -> cuttlefish:unset()
end
end}.
{mapping, "ssl_options.keyfile", "rabbit.ssl_options.keyfile",
[{datatype, string}, {validators, ["file_accessible"]}]}.
{mapping, "ssl_options.log_level", "rabbit.ssl_options.log_level",
[{datatype, {enum, [emergency, alert, critical, error, warning, notice, info, debug]}}]}.
{mapping, "ssl_options.log_alert", "rabbit.ssl_options.log_alert",
[{datatype, {enum, [true, false]}}]}.
{mapping, "ssl_options.password", "rabbit.ssl_options.password",
[{datatype, [tagged_binary, binary]}]}.
{translation, "rabbit.ssl_options.password",
fun(Conf) ->
rabbit_cuttlefish:optionally_tagged_binary("ssl_options.password", Conf)
end}.
{mapping, "ssl_options.psk_identity", "rabbit.ssl_options.psk_identity",
[{datatype, string}]}.
{mapping, "ssl_options.reuse_sessions", "rabbit.ssl_options.reuse_sessions",
[{datatype, {enum, [true, false]}}]}.
{mapping, "ssl_options.secure_renegotiate", "rabbit.ssl_options.secure_renegotiate",
[{datatype, {enum, [true, false]}}]}.
{mapping, "ssl_options.versions.$version", "rabbit.ssl_options.versions",
[{datatype, atom}]}.
{translation, "rabbit.ssl_options.versions",
fun(Conf) ->
Settings = cuttlefish_variable:filter_by_prefix("ssl_options.versions", Conf),
[V || {_, V} <- Settings]
end}.
{mapping, "ssl_options.ciphers.$cipher", "rabbit.ssl_options.ciphers",
[{datatype, string}]}.
{translation, "rabbit.ssl_options.ciphers",
fun(Conf) ->
Settings = cuttlefish_variable:filter_by_prefix("ssl_options.ciphers", Conf),
lists:reverse([V || {_, V} <- Settings])
end}.
{mapping, "ssl_options.bypass_pem_cache", "ssl.bypass_pem_cache",
[{datatype, {enum, [true, false]}}]}.
{mapping, "metadata_store.khepri.default_timeout", "rabbit.khepri_default_timeout",
[{datatype, integer}]}.
%% ===========================================================================
%% Choose the available SASL mechanism(s) to expose.
%% The three default (built in) mechanisms are 'PLAIN', 'AMQPLAIN' and 'ANONYMOUS'.
%% Additional mechanisms can be added via plugins.
%%
%% See https://www.rabbitmq.com/docs/access-control for more details.
%%
%% {auth_mechanisms, ['PLAIN', 'AMQPLAIN', 'ANONYMOUS']},
{mapping, "auth_mechanisms.$name", "rabbit.auth_mechanisms", [
{datatype, atom}]}.
{translation, "rabbit.auth_mechanisms",
fun(Conf) ->
Settings = cuttlefish_variable:filter_by_prefix("auth_mechanisms", Conf),
Sorted = lists:keysort(1, Settings),
[V || {_, V} <- Sorted]
end}.
%% Select an authentication backend to use. RabbitMQ provides an
%% internal backend in the core.
%%
%% {auth_backends, [rabbit_auth_backend_internal]},
{translation, "rabbit.auth_backends",
fun(Conf) ->
Settings = cuttlefish_variable:filter_by_prefix("auth_backends", Conf),
BackendModule = fun
(internal) -> rabbit_auth_backend_internal;
(ldap) -> rabbit_auth_backend_ldap;
(http) -> rabbit_auth_backend_http;
(oauth) -> rabbit_auth_backend_oauth2;
(oauth2) -> rabbit_auth_backend_oauth2;
(cache) -> rabbit_auth_backend_cache;
(amqp) -> rabbit_auth_backend_amqp;
(dummy) -> rabbit_auth_backend_dummy;
(Other) when is_atom(Other) -> Other;
(_) -> cuttlefish:invalid("Unknown/unsupported auth backend")
end,
AuthBackends = [{Num, {default, BackendModule(V)}} || {["auth_backends", Num], V} <- Settings],
AuthNBackends = [{Num, {authn, BackendModule(V)}} || {["auth_backends", Num, "authn"], V} <- Settings],
AuthZBackends = [{Num, {authz, BackendModule(V)}} || {["auth_backends", Num, "authz"], V} <- Settings],
Backends = lists:foldl(
fun({NumStr, {Type, V}}, Acc) ->
Num = case catch list_to_integer(NumStr) of
N when is_integer(N) -> N;
Err ->
cuttlefish:invalid(
iolist_to_binary(io_lib:format(
"Auth backend position in the chain should be an integer ~p", [Err])))
end,
NewVal = case dict:find(Num, Acc) of
{ok, {AuthN, AuthZ}} ->
case {Type, AuthN, AuthZ} of
{authn, undefined, _} ->
{V, AuthZ};
{authz, _, undefined} ->
{AuthN, V};
_ ->
cuttlefish:invalid(
iolist_to_binary(
io_lib:format(
"Auth backend already defined for the ~pth ~p backend",
[Num, Type])))
end;
error ->
case Type of
authn -> {V, undefined};
authz -> {undefined, V};
default -> {V, V}
end
end,
dict:store(Num, NewVal, Acc)
end,
dict:new(),
AuthBackends ++ AuthNBackends ++ AuthZBackends),
lists:map(
fun
({Num, {undefined, AuthZ}}) ->
cuttlefish:warn(
io_lib:format(
"Auth backend undefined for the ~pth authz backend. Using ~p",
[Num, AuthZ])),
{AuthZ, AuthZ};
({Num, {AuthN, undefined}}) ->
cuttlefish:warn(
io_lib:format(
"Authz backend undefined for the ~pth authn backend. Using ~p",
[Num, AuthN])),
{AuthN, AuthN};
({_Num, {Auth, Auth}}) -> Auth;
({_Num, {AuthN, AuthZ}}) -> {AuthN, AuthZ}
end,
lists:keysort(1, dict:to_list(Backends)))
end}.
{mapping, "auth_backends.$num", "rabbit.auth_backends", [
{datatype, atom}
]}.
{mapping, "auth_backends.$num.authn", "rabbit.auth_backends",[
{datatype, atom}
]}.
{mapping, "auth_backends.$num.authz", "rabbit.auth_backends",[
{datatype, atom}
]}.
%% This pertains to both the rabbitmq_auth_mechanism_ssl plugin and
%% STOMP ssl_cert_login configurations. See the rabbitmq_stomp
%% configuration section later in this file and the README in
%% https://github.com/rabbitmq/rabbitmq-auth-mechanism-ssl for further
%% details.
%%
%% To use the peer certificate's Common Name (CN) field
%% instead of its Distinguished Name (DN) for username extraction.
%%
%% {ssl_cert_login_from, common_name},
%%
%% To use the first SAN value of type DNS:
%%
%% {ssl_cert_login_from, subject_alternative_name},
%% {ssl_cert_login_san_type, dns},
%% {ssl_cert_login_san_index, 0}
{mapping, "ssl_cert_login_from", "rabbit.ssl_cert_login_from", [
{datatype, {enum, [distinguished_name, common_name, subject_alternative_name, subject_alt_name]}}
]}.
{mapping, "ssl_cert_login_san_type", "rabbit.ssl_cert_login_san_type", [
{datatype, {enum, [dns, ip, email, uri, other_name]}}
]}.
{mapping, "ssl_cert_login_san_index", "rabbit.ssl_cert_login_san_index", [
{datatype, integer}, {validators, ["non_negative_integer"]}
]}.
%% TLS handshake timeout, in milliseconds.
%%
%% {ssl_handshake_timeout, 5000},
{mapping, "ssl_handshake_timeout", "rabbit.ssl_handshake_timeout", [
{datatype, integer}
]}.
%% Cluster name
{mapping, "cluster_name", "rabbit.cluster_name", [
{datatype, string}
]}.
%% Default worker process pool size. Used to limit maximum concurrency rate
%% of certain operations, e.g. queue initialisation and recovery on node boot.
{mapping, "default_worker_pool_size", "rabbit.default_worker_pool_size", [
{datatype, integer}, {validators, ["non_negative_integer"]}
]}.
%% Password hashing implementation. Will only affect newly
%% created users. To recalculate hash for an existing user
%% it's necessary to update her password.
%%
%% When importing definitions exported from versions earlier
%% than 3.6.0, it is possible to go back to MD5 (only do this
%% as a temporary measure!) by setting this to rabbit_password_hashing_md5.
%%
%% To use SHA-512, set to rabbit_password_hashing_sha512.
%%
%% {password_hashing_module, rabbit_password_hashing_sha256},
{mapping, "password_hashing_module", "rabbit.password_hashing_module", [
{datatype, atom}
]}.
%% Credential validation.
%%
{mapping, "credential_validator.validation_backend", "rabbit.credential_validator.validation_backend", [
{datatype, atom}
]}.
{mapping, "credential_validator.min_length", "rabbit.credential_validator.min_length", [
{datatype, integer}, {validators, ["non_negative_integer"]}
]}.
{mapping, "credential_validator.regexp", "rabbit.credential_validator.regexp", [
{datatype, string}
]}.
%%
%% Default User / VHost
%% ====================
%%
%% On first start RabbitMQ will create a vhost and a user. These
%% config items control what gets created. See
%% https://www.rabbitmq.com/docs/access-control for further
%% information about vhosts and access control.
%%
%% {default_vhost, <<"/">>},
%% {default_user, <<"guest">>},
%% {default_pass, <<"guest">>},
%% {default_permissions, [<<".*">>, <<".*">>, <<".*">>]},
{mapping, "default_vhost", "rabbit.default_vhost", [
{datatype, string}
]}.
{translation, "rabbit.default_vhost",
fun(Conf) ->
list_to_binary(cuttlefish:conf_get("default_vhost", Conf))
end}.
{mapping, "default_user", "rabbit.default_user", [
{datatype, string}
]}.
{translation, "rabbit.default_user",
fun(Conf) ->
list_to_binary(cuttlefish:conf_get("default_user", Conf))
end}.
{mapping, "default_pass", "rabbit.default_pass", [
{datatype, [tagged_binary, binary]}
]}.
{translation, "rabbit.default_pass",
fun(Conf) ->
rabbit_cuttlefish:optionally_tagged_binary("default_pass", Conf)
end}.
{mapping, "default_permissions.configure", "rabbit.default_permissions", [
{datatype, string}
]}.
{mapping, "default_permissions.read", "rabbit.default_permissions", [
{datatype, string}
]}.
{mapping, "default_permissions.write", "rabbit.default_permissions", [
{datatype, string}
]}.
{translation, "rabbit.default_permissions",
fun(Conf) ->
Settings = cuttlefish_variable:filter_by_prefix("default_permissions", Conf),
Configure = proplists:get_value(["default_permissions", "configure"], Settings),
Read = proplists:get_value(["default_permissions", "read"], Settings),
Write = proplists:get_value(["default_permissions", "write"], Settings),
[list_to_binary(Configure), list_to_binary(Read), list_to_binary(Write)]
end}.
%%
%% Extra Default Users
%% ====================
%%
{mapping, "default_users.$name.vhost_pattern", "rabbit.default_users", [
{validators, ["valid_regex"]},
{datatype, string}
]}.
{mapping, "default_users.$name.password", "rabbit.default_users", [
{datatype, [tagged_binary, binary]}
]}.
{mapping, "default_users.$name.configure", "rabbit.default_users", [
{validators, ["valid_regex"]},
{datatype, string}
]}.
{mapping, "default_users.$name.read", "rabbit.default_users", [
{validators, ["valid_regex"]},
{datatype, string}
]}.
{mapping, "default_users.$name.write", "rabbit.default_users", [
{validators, ["valid_regex"]},
{datatype, string}
]}.
{mapping, "default_users.$name.tags", "rabbit.default_users", [
{datatype, {list, atom}}
]}.
{translation, "rabbit.default_users", fun(Conf) ->
case rabbit_cuttlefish:aggregate_props(Conf, ["default_users"]) of
[] -> cuttlefish:unset();
Props -> Props
end
end}.
%% Connections that skip SASL layer or use SASL mechanism ANONYMOUS will use this identity.
%% Setting this to a username will allow (anonymous) clients to connect and act as this
%% given user. For production environments, set this value to 'none'.
{mapping, "anonymous_login_user", "rabbit.anonymous_login_user",
[{datatype, [{enum, [none]}, binary]}]}.
{mapping, "anonymous_login_pass", "rabbit.anonymous_login_pass", [
{datatype, [tagged_binary, binary]}
]}.
{translation, "rabbit.anonymous_login_pass",
fun(Conf) ->
rabbit_cuttlefish:optionally_tagged_binary("anonymous_login_pass", Conf)
end}.
%%
%% Default Policies
%% ====================
%%
{mapping, "default_policies.operator.$id.vhost_pattern", "rabbit.default_policies.operator", [
{validators, ["valid_regex"]},
{datatype, string}
]}.
{mapping, "default_policies.operator.$id.queue_pattern", "rabbit.default_policies.operator", [
{validators, ["valid_regex"]},
{datatype, string}
]}.
{mapping, "default_policies.operator.$id.expires", "rabbit.default_policies.operator", [
{datatype, {duration, ms}}
]}.
{mapping, "default_policies.operator.$id.message_ttl", "rabbit.default_policies.operator", [
{datatype, {duration, ms}}
]}.
{mapping, "default_policies.operator.$id.max_length", "rabbit.default_policies.operator", [
{validators, ["non_zero_positive_integer"]},
{datatype, integer}
]}.
{mapping, "default_policies.operator.$id.max_length_bytes", "rabbit.default_policies.operator", [
{validators, ["non_zero_positive_integer"]},
{datatype, bytesize}
]}.
{mapping, "default_policies.operator.$id.max_in_memory_bytes", "rabbit.default_policies.operator", [
{validators, ["non_zero_positive_integer"]},
{datatype, bytesize}
]}.
{mapping, "default_policies.operator.$id.max_in_memory_length", "rabbit.default_policies.operator",
[
{validators, ["non_zero_positive_integer"]},
{datatype, integer}
]}.
{mapping, "default_policies.operator.$id.delivery_limit", "rabbit.default_policies.operator", [
{validators, ["non_zero_positive_integer"]},
{datatype, integer}
]}.
{mapping, "default_policies.operator.$id.classic_queues.ha_mode", "rabbit.default_policies.operator", [
{datatype, string}
]}.
{mapping, "default_policies.operator.$id.classic_queues.ha_params", "rabbit.default_policies.operator", [
{datatype, [integer, {list, string}]}
]}.
{mapping, "default_policies.operator.$id.classic_queues.ha_sync_mode", "rabbit.default_policies.operator", [
{datatype, string}
]}.
{mapping, "default_policies.operator.$id.classic_queues.queue_version", "rabbit.default_policies.operator",
[
{validators, ["non_zero_positive_integer"]},
{datatype, integer}
]}.
{translation, "rabbit.default_policies.operator", fun(Conf) ->
Props = rabbit_cuttlefish:aggregate_props(
Conf,
["default_policies", "operator"],
fun({["default_policies","operator",ID,"classic_queues"|T], V}) ->
NewV = case T of
["ha_sync_mode"] ->
list_to_binary(V);
["ha_mode"] ->
list_to_binary(V);
_ -> V
end,
{["default_policies","operator",ID|T], NewV};
({["default_policies","operator",ID, "queue_pattern"], V}) ->
{["default_policies","operator",ID,"queue_pattern"], list_to_binary(V)};
(E) -> E
end),
case Props of
[] -> cuttlefish:unset();
Props -> Props
end
end}.
%%
%% Default VHost Limits
%% ====================
%%
{mapping, "default_limits.vhosts.$id.pattern", "rabbit.default_limits.vhosts", [
{validators, ["valid_regex"]},
{datatype, string}
]}.
{mapping, "default_limits.vhosts.$id.max_connections", "rabbit.default_limits.vhosts", [
{validators, [ "non_zero_positive_integer"]},
{datatype, integer}
]}.
{mapping, "default_limits.vhosts.$id.max_queues", "rabbit.default_limits.vhosts", [
{validators, [ "non_zero_positive_integer"]},
{datatype, integer}
]}.
{translation, "rabbit.default_limits.vhosts", fun(Conf) ->
case rabbit_cuttlefish:aggregate_props(Conf, ["default_limits", "vhosts"]) of
[] -> cuttlefish:unset();
Props -> Props
end
end}.
%% Tags for default user
%%
%% For more details about tags, see the documentation for the
%% Management Plugin at https://www.rabbitmq.com/docs/management.
%%
%% {default_user_tags, [administrator]},
{mapping, "default_user_tags.$tag", "rabbit.default_user_tags",
[{datatype, {enum, [true, false]}}]}.
{translation, "rabbit.default_user_tags",
fun(Conf) ->
Settings = cuttlefish_variable:filter_by_prefix("default_user_tags", Conf),
[ list_to_atom(Key) || {[_,Key], Val} <- Settings, Val == true ]
end}.
%%
%% Additional network and protocol related configuration
%% =====================================================
%%
%% Set the default connection heartbeat timeout (in seconds).
%%
%% {heartbeat, 600},
{mapping, "heartbeat", "rabbit.heartbeat", [{datatype, integer}]}.
%% Set the max permissible size of an AMQP 0-9-1 frame (in bytes).
%%
%% {frame_max, 131072},
{mapping, "frame_max", "rabbit.frame_max", [{datatype, bytesize}]}.
%% Set the max frame size the server will accept before connection
%% tuning starts
%%
%% {initial_frame_max, 4096},
{mapping, "initial_frame_max", "rabbit.initial_frame_max", [{datatype, bytesize}]}.
%% Set the max permissible number of channels per connection.
%% 0 means "no limit".
%%
%% {channel_max, 0},
{mapping, "channel_max", "rabbit.channel_max", [{datatype, integer}]}.
{mapping, "channel_max_per_node", "rabbit.channel_max_per_node",
[{datatype, [{atom, infinity}, integer]}]}.
{translation, "rabbit.channel_max_per_node",
fun(Conf) ->
case cuttlefish:conf_get("channel_max_per_node", Conf, undefined) of
undefined -> cuttlefish:unset();
infinity -> infinity;
Val when is_integer(Val) andalso Val > 0 -> Val;
_ -> cuttlefish:invalid("should be positive integer or 'infinity'")
end
end
}.
%% Set the max allowed number of consumers per channel.
%% `infinity` means "no limit".
%%
%% {consumer_max_per_channel, infinity},
{mapping, "consumer_max_per_channel", "rabbit.consumer_max_per_channel",
[{datatype, [{atom, infinity}, integer]}]}.
{translation, "rabbit.consumer_max_per_channel",
fun(Conf) ->
case cuttlefish:conf_get("consumer_max_per_channel", Conf, undefined) of
undefined -> cuttlefish:unset();
infinity -> infinity;
Val when is_integer(Val) andalso Val > 0 -> Val;
_ -> cuttlefish:invalid("should be positive integer or 'infinity'")
end
end
}.
%% Sets the maximum number of AMQP 1.0 sessions that can be simultaneously
%% active on an AMQP 1.0 connection.
%%
%% {session_max_per_connection, 1},
{mapping, "session_max_per_connection", "rabbit.session_max_per_connection",
[{datatype, integer}, {validators, ["positive_16_bit_unsigned_integer"]}]}.
%% Sets the maximum number of AMQP 1.0 links that can be simultaneously
%% active on an AMQP 1.0 session.
%%
%% {link_max_per_session, 10},
{mapping, "link_max_per_session", "rabbit.link_max_per_session",
[{datatype, integer}, {validators, ["positive_32_bit_unsigned_integer"]}]}.
%% Set the max permissible number of client connections per node.
%% `infinity` means "no limit".
%%
%% {connection_max, infinity},
{mapping, "connection_max", "rabbit.connection_max",
[{datatype, [{atom, infinity}, integer]}]}.
{translation, "rabbit.connection_max",
fun(Conf) ->
case cuttlefish:conf_get("connection_max", Conf, undefined) of
undefined -> cuttlefish:unset();
infinity -> infinity;
Val when is_integer(Val) -> Val;
_ -> cuttlefish:invalid("should be a non-negative integer")
end
end
}.
{mapping, "ranch_connection_max", "rabbit.ranch_connection_max",
[{datatype, [{atom, infinity}, integer]}]}.
{translation, "rabbit.ranch_connection_max",
fun(Conf) ->
case cuttlefish:conf_get("ranch_connection_max", Conf, undefined) of
undefined -> cuttlefish:unset();
infinity -> infinity;
Val when is_integer(Val) -> Val;
_ -> cuttlefish:invalid("should be a non-negative integer")
end
end
}.
{mapping, "vhost_max", "rabbit.vhost_max",
[{datatype, [{atom, infinity}, integer]}, {validators, ["non_negative_integer"]}]}.
{translation, "rabbit.vhost_max",
fun(Conf) ->
case cuttlefish:conf_get("vhost_max", Conf, undefined) of
undefined -> cuttlefish:unset();
infinity -> infinity;
Val when is_integer(Val) -> Val;
_ -> cuttlefish:invalid("should be a non-negative integer")
end
end
}.
{mapping, "max_message_size", "rabbit.max_message_size",
[{datatype, integer}, {validators, ["max_message_size"]}]}.
%% Customising Socket Options.
%%
%% See (https://www.erlang.org/doc/man/inet.html#setopts-2) for
%% further documentation.
%%
%% {tcp_listen_options, [{backlog, 128},
%% {nodelay, true},
%% {exit_on_close, false}]},
%% TCP listener section ======================================================
{mapping, "tcp_listen_options", "rabbit.tcp_listen_options", [
{datatype, {enum, [none]}}]}.
{translation, "rabbit.tcp_listen_options",
fun(Conf) ->
case cuttlefish:conf_get("tcp_listen_options", Conf, undefined) of
none -> [];
_ -> cuttlefish:invalid("Invalid tcp_listen_options")
end
end}.
{mapping, "tcp_listen_options.backlog", "rabbit.tcp_listen_options.backlog", [
{datatype, integer}
]}.
{mapping, "tcp_listen_options.nodelay", "rabbit.tcp_listen_options.nodelay", [
{datatype, {enum, [true, false]}}
]}.
{mapping, "tcp_listen_options.buffer", "rabbit.tcp_listen_options.buffer",
[{datatype, integer}]}.
{mapping, "tcp_listen_options.delay_send", "rabbit.tcp_listen_options.delay_send",
[{datatype, {enum, [true, false]}}]}.
{mapping, "tcp_listen_options.dontroute", "rabbit.tcp_listen_options.dontroute",
[{datatype, {enum, [true, false]}}]}.
{mapping, "tcp_listen_options.exit_on_close", "rabbit.tcp_listen_options.exit_on_close",
[{datatype, {enum, [true, false]}}]}.
{mapping, "tcp_listen_options.fd", "rabbit.tcp_listen_options.fd",
[{datatype, integer}]}.
{mapping, "tcp_listen_options.high_msgq_watermark", "rabbit.tcp_listen_options.high_msgq_watermark",
[{datatype, integer}]}.
{mapping, "tcp_listen_options.high_watermark", "rabbit.tcp_listen_options.high_watermark",
[{datatype, integer}]}.
{mapping, "tcp_listen_options.keepalive", "rabbit.tcp_listen_options.keepalive",
[{datatype, {enum, [true, false]}}]}.
{mapping, "tcp_listen_options.low_msgq_watermark", "rabbit.tcp_listen_options.low_msgq_watermark",
[{datatype, integer}]}.
{mapping, "tcp_listen_options.low_watermark", "rabbit.tcp_listen_options.low_watermark",
[{datatype, integer}]}.
{mapping, "tcp_listen_options.port", "rabbit.tcp_listen_options.port",
[{datatype, integer}, {validators, ["port"]}]}.
{mapping, "tcp_listen_options.priority", "rabbit.tcp_listen_options.priority",
[{datatype, integer}]}.
{mapping, "tcp_listen_options.recbuf", "rabbit.tcp_listen_options.recbuf",
[{datatype, integer}]}.
{mapping, "tcp_listen_options.send_timeout", "rabbit.tcp_listen_options.send_timeout",
[{datatype, integer}]}.
{mapping, "tcp_listen_options.send_timeout_close", "rabbit.tcp_listen_options.send_timeout_close",
[{datatype, {enum, [true, false]}}]}.
{mapping, "tcp_listen_options.sndbuf", "rabbit.tcp_listen_options.sndbuf",
[{datatype, integer}]}.
{mapping, "tcp_listen_options.tos", "rabbit.tcp_listen_options.tos",
[{datatype, integer}]}.
{mapping, "tcp_listen_options.linger.on", "rabbit.tcp_listen_options.linger",
[{datatype, {enum, [true, false]}}]}.
{mapping, "tcp_listen_options.linger.timeout", "rabbit.tcp_listen_options.linger",
[{datatype, integer}, {validators, ["non_negative_integer"]}]}.
{translation, "rabbit.tcp_listen_options.linger",
fun(Conf) ->
LingerOn = cuttlefish:conf_get("tcp_listen_options.linger.on", Conf, false),
LingerTimeout = cuttlefish:conf_get("tcp_listen_options.linger.timeout", Conf, 0),
{LingerOn, LingerTimeout}
end}.
%% ==========================================================================
%%
%% Resource Limits & Flow Control
%% ==============================
%%
%% See https://www.rabbitmq.com/docs/memory for full details.
%% Memory-based Flow Control threshold.
%%
%% {vm_memory_high_watermark, 0.6},
%% Alternatively, we can set a limit (in bytes) of RAM used by the node.
%%
%% {vm_memory_high_watermark, {absolute, 1073741824}},
%%
%% Or you can set absolute value using memory unit symbols (with RabbitMQ 3.6.0+).
%%
%% {vm_memory_high_watermark, {absolute, "1024M"}},
%%
%% Supported unit symbols:
%%
%% k, kiB: kibibytes (2^10 - 1,024 bytes)
%% M, MiB: mebibytes (2^20 - 1,048,576 bytes)
%% G, GiB: gibibytes (2^30 - 1,073,741,824 bytes)
%% kB: kilobytes (10^3 - 1,000 bytes)
%% MB: megabytes (10^6 - 1,000,000 bytes)
%% GB: gigabytes (10^9 - 1,000,000,000 bytes)
{mapping, "vm_memory_high_watermark.relative", "rabbit.vm_memory_high_watermark", [
{datatype, float}
]}.
{mapping, "vm_memory_high_watermark.absolute", "rabbit.vm_memory_high_watermark", [
{datatype, [integer, string]},
{validators, ["is_supported_information_unit"]}
]}.
{translation, "rabbit.vm_memory_high_watermark",
fun(Conf) ->
Settings = cuttlefish_variable:filter_by_prefix("vm_memory_high_watermark", Conf),
Absolute = proplists:get_value(["vm_memory_high_watermark", "absolute"], Settings),
Relative = proplists:get_value(["vm_memory_high_watermark", "relative"], Settings),
case {Absolute, Relative} of
{undefined, undefined} -> cuttlefish:invalid("No vm watermark defined");
{_, undefined} -> {absolute, Absolute};
{undefined, _} -> Relative;
_ ->
cuttlefish:warn("Both vm_memory_high_watermark.absolute and "
"vm_memory_high_watermark.relative are configured. "
"vm_memory_high_watermark.absolute has precedence"),
{absolute, Absolute}
end
end}.
%% DEPRECATED. Not used since RabbitMQ 4.0
%%
%% Fraction of the high watermark limit at which queues start to
%% page message out to disc in order to free up memory.
%%
%% Values greater than 0.9 can be dangerous and should be used carefully.
%%
%% {vm_memory_high_watermark_paging_ratio, 0.5},
{mapping, "vm_memory_high_watermark_paging_ratio",
"rabbit.vm_memory_high_watermark_paging_ratio",
[{datatype, float}, {validators, ["less_than_1"]}]}.
%% DEPRECATED. Not used since RabbitMQ 4.0
%%
%% Interval (in milliseconds) at which we perform the check of the memory
%% levels against the watermarks.
%%
%% {memory_monitor_interval, 2500},
{mapping, "memory_monitor_interval", "rabbit.memory_monitor_interval",
[{datatype, integer}]}.
%% Selects Erlang VM memory consumption calculation strategy.
%% Can be `allocated`, `rss` or `legacy` (aliased as `erlang`).
%%
%% {vm_memory_calculation_strategy, rss},
{mapping, "vm_memory_calculation_strategy", "rabbit.vm_memory_calculation_strategy",
[{datatype, {enum, [rss, erlang, allocated, legacy]}}]}.
%% The total memory available can be calculated from the OS resources
%% (default option) or provided as a configuration parameter
{mapping, "total_memory_available_override_value", "rabbit.total_memory_available_override_value", [
{datatype, [integer, string]}]}.
%% Set disk free limit (in bytes). Once free disk space reaches this
%% lower bound, a disk alarm will be set - see the documentation
%% listed above for more details.
%%
%% {disk_free_limit, 50000000},
%%
%% Or you can set it using memory units (same as in vm_memory_high_watermark)
%% {disk_free_limit, "50MB"},
%% {disk_free_limit, "50000kB"},
%% {disk_free_limit, "2GB"},
%% Alternatively, we can set a limit relative to total available RAM.
%%
%% Values lower than 1.0 can be dangerous and should be used carefully.
%% {disk_free_limit, {mem_relative, 2.0}},
{mapping, "disk_free_limit.relative", "rabbit.disk_free_limit", [
{datatype, float}]}.
{mapping, "disk_free_limit.absolute", "rabbit.disk_free_limit", [
{datatype, [integer, string]},
{validators, ["is_supported_information_unit"]}
]}.
{translation, "rabbit.disk_free_limit",
fun(Conf) ->
Settings = cuttlefish_variable:filter_by_prefix("disk_free_limit", Conf),
Absolute = proplists:get_value(["disk_free_limit", "absolute"], Settings),
Relative = proplists:get_value(["disk_free_limit", "relative"], Settings),
case {Absolute, Relative} of
{undefined, undefined} -> cuttlefish:invalid("No disk limit defined");
{_, undefined} -> Absolute;
{undefined, _} -> {mem_relative, Relative};
_ ->
cuttlefish:warn("Both disk_free_limit.absolute and "
"disk_free_limit.relative are configured. "
"disk_free_limit.absolute has precedence"),
Absolute
end
end}.
%%
%% Clustering
%% =====================
%%
%% How to respond to cluster partitions.
%% See https://www.rabbitmq.com/docs/partitions for further details.
%%
%% {cluster_partition_handling, ignore},
{mapping, "cluster_partition_handling", "rabbit.cluster_partition_handling",
[{datatype, {enum, [ignore, pause_minority, autoheal, pause_if_all_down]}}]}.
{mapping, "cluster_partition_handling.pause_if_all_down.recover",
"rabbit.cluster_partition_handling",
[{datatype, {enum, [ignore, autoheal]}}]}.
{mapping, "cluster_partition_handling.pause_if_all_down.nodes.$name",
"rabbit.cluster_partition_handling",
[{datatype, atom}]}.
{translation, "rabbit.cluster_partition_handling",
fun(Conf) ->
case cuttlefish:conf_get("cluster_partition_handling", Conf) of
pause_if_all_down ->
PauseIfAllDownNodes = cuttlefish_variable:filter_by_prefix(
"cluster_partition_handling.pause_if_all_down.nodes",
Conf),
case PauseIfAllDownNodes of
[] ->
cuttlefish:invalid("Nodes required for pause_if_all_down");
_ ->
Nodes = [ V || {K,V} <- PauseIfAllDownNodes ],
PauseIfAllDownRecover = cuttlefish:conf_get(
"cluster_partition_handling.pause_if_all_down.recover",
Conf),
case PauseIfAllDownRecover of
Recover when Recover == ignore; Recover == autoheal ->
{pause_if_all_down, Nodes, Recover};
Invalid ->
cuttlefish:invalid("Recover strategy required for pause_if_all_down")
end
end;
Other -> Other
end
end}.
%% Number of delegate processes to use for intra-cluster
%% communication. On a machine which has a very large number of cores
%% and is also part of a cluster, you may wish to increase this value.
%%
{mapping, "delegate_count", "rabbit.delegate_count", [
{datatype, integer}, {validators, ["non_negative_integer"]}
]}.
%% Mirror sync batch size, in messages. Increasing this will speed
%% up syncing but total batch size in bytes must not exceed 2 GiB.
%% Available in RabbitMQ 3.6.0 or later.
%%
%% {mirroring_sync_batch_size, 4096},
{mapping, "mirroring_sync_batch_size", "rabbit.mirroring_sync_batch_size",
[{datatype, bytesize}, {validators, ["mirroring_sync_batch_size"]}]}.
%% Mirror sync max throughput (in bytes) per second.
%% Supported unit symbols:
%% k, kiB: kibibytes (2^10 - 1,024 bytes)
%% M, MiB: mebibytes (2^20 - 1,048,576 bytes)
%% G, GiB: gibibytes (2^30 - 1,073,741,824 bytes)
%% kB: kilobytes (10^3 - 1,000 bytes)
%% MB: megabytes (10^6 - 1,000,000 bytes)
%% GB: gigabytes (10^9 - 1,000,000,000 bytes)
%%
%% 0 means "no limit".
%%
%% {mirroring_sync_max_throughput, 0},
{mapping, "mirroring_sync_max_throughput", "rabbit.mirroring_sync_max_throughput", [
{datatype, [integer, string]}
]}.
%% Peer discovery backend used by cluster formation.
%%
{mapping, "cluster_formation.peer_discovery_backend", "rabbit.cluster_formation.peer_discovery_backend", [
{datatype, atom}
]}.
{translation, "rabbit.cluster_formation.peer_discovery_backend",
fun(Conf) ->
case cuttlefish:conf_get("cluster_formation.peer_discovery_backend", Conf, rabbit_peer_discovery_classic_config) of
classic_config -> rabbit_peer_discovery_classic_config;
classic -> rabbit_peer_discovery_classic_config;
config -> rabbit_peer_discovery_classic_config;
dns -> rabbit_peer_discovery_dns;
aws -> rabbit_peer_discovery_aws;
consul -> rabbit_peer_discovery_consul;
etcd -> rabbit_peer_discovery_etcd;
kubernetes -> rabbit_peer_discovery_k8s;
k8s -> rabbit_peer_discovery_k8s;
Module -> Module
end
end}.
%% Own node type, used by cluster formation.
%%
{mapping, "cluster_formation.node_type", "rabbit.cluster_formation.node_type", [
{datatype, {enum, [disc, disk, ram]}}
]}.
{translation, "rabbit.cluster_formation.node_type",
fun(Conf) ->
%% if peer discovery backend isn't configured, don't generate
%% node type
case cuttlefish:conf_get("cluster_formation.peer_discovery_backend", Conf, undefined) of
undefined -> cuttlefish:unset();
_Backend ->
case cuttlefish:conf_get("cluster_formation.node_type", Conf) of
disc -> disc;
%% always cast to `disc`
disk -> disc;
ram -> ram;
_Other -> disc
end
end
end}.
%% Register node during cluster formation when backend supports registration.
%%
{mapping, "cluster_formation.registration.enabled", "rabbit.cluster_formation.perform_registration", [
{datatype, {enum, [true, false]}}
]}.
%% Cluster formation: lock acquisition retries as passed to https://erlang.org/doc/man/global.html#set_lock-3
%%
%% Currently used in classic, k8s, and aws peer discovery backends.
{mapping, "cluster_formation.internal_lock_retries", "rabbit.cluster_formation.internal_lock_retries",
[
{datatype, integer},
{validators, ["non_zero_positive_integer"]}
]}.
%% Cluster formation: discovery failure retries
{mapping, "cluster_formation.lock_retry_limit", "rabbit.cluster_formation.lock_retry_limit",
[
{datatype, integer},
{validators, ["non_zero_positive_integer"]}
]}.
{mapping, "cluster_formation.lock_retry_timeout", "rabbit.cluster_formation.lock_retry_timeout",
[
{datatype, integer},
{validators, ["non_zero_positive_integer"]}
]}.
{mapping, "cluster_formation.discovery_retry_limit", "rabbit.cluster_formation.discovery_retry_limit",
[{datatype, [{atom, unlimited}, integer]}]}.
{translation, "rabbit.cluster_formation.discovery_retry_limit",
fun(Conf) ->
case cuttlefish:conf_get("cluster_formation.discovery_retry_limit", Conf, undefined) of
undefined -> cuttlefish:unset();
unlimited -> unlimited;
Val when is_integer(Val) andalso Val > 0 -> Val;
_ -> cuttlefish:invalid("should be positive integer or 'unlimited'")
end
end
}.
{mapping, "cluster_formation.discovery_retry_interval", "rabbit.cluster_formation.discovery_retry_interval",
[
{datatype, integer},
{validators, ["non_zero_positive_integer"]}
]}.
%% Target cluster size hint may be used by certain core features or plugins to perform
%% actions that should only be performed when a certain number of nodes (or a quorum of a certain number)
%% has already joined (started).
%%
{mapping, "cluster_formation.target_cluster_size_hint", "rabbit.cluster_formation.target_cluster_size_hint", [
{datatype, integer},
{validators, ["non_zero_positive_integer"]}
]}.
%% Classic config-driven peer discovery backend.
%%
%% Make clustering happen *automatically* at startup - only applied
%% to nodes that have just been reset or started for the first time.
%% See https://www.rabbitmq.com/docs/clustering#auto-config for
%% further details.
%%
%% {cluster_nodes, {['rabbit@my.host.com'], disc}},
{mapping, "cluster_formation.classic_config.nodes.$node", "rabbit.cluster_nodes",
[{datatype, atom}]}.
{translation, "rabbit.cluster_nodes",
fun(Conf) ->
Nodes = [V || {_, V} <- cuttlefish_variable:filter_by_prefix("cluster_formation.classic_config.nodes", Conf)],
case Nodes of
[] -> cuttlefish:unset();
Other ->
case cuttlefish:conf_get("cluster_formation.node_type", Conf, disc) of
disc -> {Other, disc};
%% Always cast to `disc`
disk -> {Other, disc};
ram -> {Other, ram}
end
end
end}.
%% DNS (A records and reverse lookups)-based peer discovery.
%%
{mapping, "cluster_formation.dns.hostname", "rabbit.cluster_formation.peer_discovery_dns.hostname",
[{datatype, string}]}.
{translation, "rabbit.cluster_formation.peer_discovery_dns.hostname",
fun(Conf) ->
case cuttlefish:conf_get("cluster_formation.dns.hostname", Conf, undefined) of
undefined -> cuttlefish:unset();
Value -> list_to_binary(Value)
end
end}.
{mapping, "cluster_queue_limit", "rabbit.cluster_queue_limit",
[{datatype, [{atom, infinity}, integer]}]}.
{translation, "rabbit.cluster_queue_limit",
fun(Conf) ->
case cuttlefish:conf_get("cluster_queue_limit", Conf, undefined) of
undefined -> cuttlefish:unset();
infinity -> infinity;
Val when is_integer(Val) andalso Val > 0 -> Val;
_ -> cuttlefish:invalid("should be positive integer or 'infinity'")
end
end
}.
%% Interval (in milliseconds) at which we send keepalive messages
%% to other cluster members. Note that this is not the same thing
%% as net_ticktime; missed keepalive messages will not cause nodes
%% to be considered down.
%%
%% {cluster_keepalive_interval, 10000},
{mapping, "cluster_keepalive_interval", "rabbit.cluster_keepalive_interval",
[{datatype, integer}]}.
%% Queue master locator (classic queues)
%%
%% For backwards compatibility only as of 4.0.
%% We still allow values of min-masters, random and client-local
%% but the behaviour is only local or balanced.
%% Use queue_leader_locator instead.
{mapping, "queue_master_locator", "rabbit.queue_master_locator",
[{datatype, string}]}.
{translation, "rabbit.queue_master_locator",
fun(Conf) ->
list_to_binary(cuttlefish:conf_get("queue_master_locator", Conf))
end}.
%% Queue leader locator (quorum queues and streams)
%%
{mapping, "queue_leader_locator", "rabbit.queue_leader_locator",
[{datatype, string}]}.
{translation, "rabbit.queue_leader_locator",
fun(Conf) ->
list_to_binary(cuttlefish:conf_get("queue_leader_locator", Conf))
end}.
%%
%% Statistics Collection
%% =====================
%%
%% Set (internal) statistics collection granularity.
%%
%% {collect_statistics, none},
{mapping, "collect_statistics", "rabbit.collect_statistics",
[{datatype, {enum, [none, coarse, fine]}}]}.
%% Statistics collection interval (in milliseconds). Increasing
%% this will reduce the load on management database.
%%
%% {collect_statistics_interval, 5000},
{mapping, "collect_statistics_interval", "rabbit.collect_statistics_interval",
[{datatype, integer}]}.
%%
%% Misc/Advanced Options
%% =====================
%%
%% NB: Change these only if you understand what you are doing!
%%
%% Explicitly enable/disable hipe compilation.
%%
%% {hipe_compile, true},
%%
%% DEPRECATED: this is a no-op and is kept only to allow old configs.
{mapping, "hipe_compile", "rabbit.hipe_compile",
[{datatype, {enum, [true, false]}}]}.
%% Timeout used when waiting for Mnesia tables in a cluster to
%% become available.
%%
%% {mnesia_table_loading_retry_timeout, 30000},
{mapping, "mnesia_table_loading_retry_timeout", "rabbit.mnesia_table_loading_retry_timeout",
[{datatype, integer}]}.
%% Retries when waiting for Mnesia tables in the cluster startup. Note that
%% this setting is not applied to Mnesia upgrades or node deletions.
%%
%% {mnesia_table_loading_retry_limit, 10},
{mapping, "mnesia_table_loading_retry_limit", "rabbit.mnesia_table_loading_retry_limit",
[{datatype, integer}]}.
{mapping, "message_store_shutdown_timeout", "rabbit.msg_store_shutdown_timeout",
[
{datatype, integer},
{validators, ["non_zero_positive_integer"]}
]}.
%% Size in bytes below which to embed messages in the queue index. See
%% https://www.rabbitmq.com/docs/persistence-conf
%%
%% {queue_index_embed_msgs_below, 4096}
{mapping, "queue_index_embed_msgs_below", "rabbit.queue_index_embed_msgs_below",
[{datatype, bytesize}]}.
%% Whether or not to enable background GC.
%%
%% {background_gc_enabled, true}
{mapping, "background_gc_enabled", "rabbit.background_gc_enabled",
[{datatype, {enum, [true, false]}}]}.
%% Interval (in milliseconds) at which we run background GC.
%%
%% {background_gc_target_interval, 60000}
{mapping, "background_gc_target_interval", "rabbit.background_gc_target_interval",
[{datatype, integer}]}.
%% Whether or not to enable proxy protocol support.
%%
%% {proxy_protocol, false}
{mapping, "proxy_protocol", "rabbit.proxy_protocol",
[{datatype, {enum, [true, false]}}]}.
%% Whether to stop the rabbit application if a vhost has
%% to terminate for any reason.
{mapping, "vhost_restart_strategy", "rabbit.vhost_restart_strategy",
[{datatype, {enum, [stop_node, continue, transient, persistent]}}]}.
%% Approximate maximum time a consumer can spend processing a message before
%% the channel is terminated, in milliseconds.
%%
%% {consumer_timeout, 1800000},
{mapping, "consumer_timeout", "rabbit.consumer_timeout", [
{datatype, integer},
{validators, ["non_zero_positive_integer"]}
]}.
%% Product name & version overrides.
{mapping, "product.name", "rabbit.product_name", [
{datatype, string}
]}.
{mapping, "product.version", "rabbit.product_version", [
{datatype, string}
]}.
%% Message of the day file.
%% The content of that file is added to the banners, both logged and
%% printed.
{mapping, "motd_file", "rabbit.motd_file", [
{datatype, string}
]}.
% ==========================
% Logging section
% ==========================
{mapping, "log.dir", "rabbit.log_root", [
{datatype, string},
{validators, ["dir_writable"]}]}.
{mapping, "log.console", "rabbit.log.console.enabled", [
{datatype, {enum, [true, false]}}
]}.
{mapping, "log.console.level", "rabbit.log.console.level", [
{datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
]}.
{mapping, "log.console.stdio", "rabbit.log.console.stdio", [
{default, stdout},
{datatype, {enum, [stdout, stderr]}}
]}.
{mapping, "log.console.use_colors", "rabbit.log.console.formatter", [
{default, on},
{datatype, flag}
]}.
{mapping, "log.console.color_esc_seqs.debug", "rabbit.log.console.formatter", [
{default, "\033[38;5;246m"},
{datatype, string}
]}.
{mapping, "log.console.color_esc_seqs.info", "rabbit.log.console.formatter", [
{default, ""},
{datatype, string}
]}.
{mapping, "log.console.color_esc_seqs.notice", "rabbit.log.console.formatter", [
{default, "\033[38;5;87m"},
{datatype, string}
]}.
{mapping, "log.console.color_esc_seqs.warning", "rabbit.log.console.formatter", [
{default, "\033[38;5;214m"},
{datatype, string}
]}.
{mapping, "log.console.color_esc_seqs.error", "rabbit.log.console.formatter", [
{default, "\033[38;5;160m"},
{datatype, string}
]}.
{mapping, "log.console.color_esc_seqs.critical", "rabbit.log.console.formatter", [
{default, "\033[1;37m\033[48;5;20m"},
{datatype, string}
]}.
{mapping, "log.console.color_esc_seqs.alert", "rabbit.log.console.formatter", [
{default, "\033[1;37m\033[48;5;93m"},
{datatype, string}
]}.
{mapping, "log.console.color_esc_seqs.emergency", "rabbit.log.console.formatter", [
{default, "\033[1;37m\033[48;5;196m"},
{datatype, string}
]}.
{mapping, "log.console.formatter", "rabbit.log.console.formatter", [
{default, plaintext},
{datatype, {enum, [plaintext, json]}}
]}.
{mapping, "log.console.formatter.time_format", "rabbit.log.console.formatter", [
{default, rfc3339_space},
{datatype, {enum, [rfc3339_space, rfc3339_T, epoch_usecs, epoch_secs, lager_default]}}
]}.
{mapping, "log.console.formatter.level_format", "rabbit.log.console.formatter", [
{default, lc},
{datatype, {enum, [lc, uc, lc3, uc3, lc4, uc4]}}
]}.
{mapping, "log.console.formatter.single_line", "rabbit.log.console.formatter", [
{default, off},
{datatype, flag}
]}.
{mapping, "log.console.formatter.plaintext.format", "rabbit.log.console.formatter", [
{default, "$time [$level] $pid $msg"},
{datatype, string}
]}.
{mapping, "log.console.formatter.json.field_map", "rabbit.log.console.formatter", [
{default, "time level msg gl:- logger_formatter:- report_cb:- error_logger:-"},
{datatype, string}
]}.
{mapping, "log.console.formatter.json.verbosity_map", "rabbit.log.console.formatter", [
{default, ""},
{datatype, string}
]}.
{translation, "rabbit.log.console.formatter",
fun(Conf) ->
rabbit_prelaunch_early_logging:translate_formatter_conf("log.console.formatter", Conf)
end}.
{mapping, "log.exchange", "rabbit.log.exchange.enabled", [
{datatype, {enum, [true, false]}}
]}.
{mapping, "log.exchange.level", "rabbit.log.exchange.level", [
{datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
]}.
{mapping, "log.exchange.formatter", "rabbit.log.exchange.formatter", [
{default, plaintext},
{datatype, {enum, [plaintext, json]}}
]}.
{mapping, "log.exchange.formatter.time_format", "rabbit.log.console.formatter", [
{default, rfc3339_space},
{datatype, [{enum, [rfc3339_space, rfc3339_T, epoch_usecs, epoch_secs, lager_default]}, string]}
]}.
{mapping, "log.exchange.formatter.level_format", "rabbit.log.exchange.formatter", [
{default, lc},
{datatype, {enum, [lc, uc, lc3, uc3, lc4, uc4]}}
]}.
{mapping, "log.exchange.formatter.single_line", "rabbit.log.exchange.formatter", [
{default, off},
{datatype, flag}
]}.
{mapping, "log.exchange.formatter.plaintext.format", "rabbit.log.exchange.formatter", [
{default, "$time [$level] $pid $msg"},
{datatype, string}
]}.
{mapping, "log.exchange.formatter.json.field_map", "rabbit.log.exchange.formatter", [
{default, "time level msg gl:- logger_formatter:- report_cb:- error_logger:-"},
{datatype, string}
]}.
{mapping, "log.exchange.formatter.json.verbosity_map", "rabbit.log.exchange.formatter", [
{default, ""},
{datatype, string}
]}.
{translation, "rabbit.log.exchange.formatter",
fun(Conf) ->
rabbit_prelaunch_early_logging:translate_formatter_conf("log.exchange.formatter", Conf)
end}.
{mapping, "log.journald", "rabbit.log.journald.enabled", [
{datatype, {enum, [true, false]}}
]}.
{mapping, "log.journald.level", "rabbit.log.journald.level", [
{datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
]}.
{mapping, "log.journald.fields", "rabbit.log.journald.fields", [
{default, "SYSLOG_IDENTIFIER=\"rabbitmq-server\" syslog_timestamp syslog_pid priority ERL_PID=pid CODE_FILE=file CODE_LINE=line CODE_MFA=mfa"},
{datatype, string}
]}.
{translation, "rabbit.log.journald.fields",
fun(Conf) ->
rabbit_prelaunch_early_logging:translate_journald_fields_conf("log.journald.fields", Conf)
end}.
{mapping, "log.syslog", "rabbit.log.syslog.enabled", [
{datatype, {enum, [true, false]}}
]}.
{mapping, "log.syslog.level", "rabbit.log.syslog.level", [
{datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
]}.
{mapping, "log.syslog.formatter", "rabbit.log.syslog.formatter", [
{default, plaintext},
{datatype, {enum, [plaintext, json]}}
]}.
{mapping, "log.syslog.formatter.time_format", "rabbit.log.console.formatter", [
{default, rfc3339_space},
{datatype, [{enum, [rfc3339_space, rfc3339_T, epoch_usecs, epoch_secs, lager_default]}, string]}
]}.
{mapping, "log.syslog.formatter.level_format", "rabbit.log.syslog.formatter", [
{default, lc},
{datatype, {enum, [lc, uc, lc3, uc3, lc4, uc4]}}
]}.
{mapping, "log.syslog.formatter.single_line", "rabbit.log.syslog.formatter", [
{default, off},
{datatype, flag}
]}.
{mapping, "log.syslog.formatter.plaintext.format", "rabbit.log.syslog.formatter", [
{default, "$msg"},
{datatype, string}
]}.
{mapping, "log.syslog.formatter.json.field_map", "rabbit.log.syslog.formatter", [
{default, "time level msg gl:- logger_formatter:- report_cb:- error_logger:-"},
{datatype, string}
]}.
{mapping, "log.syslog.formatter.json.verbosity_map", "rabbit.log.syslog.formatter", [
{default, ""},
{datatype, string}
]}.
{translation, "rabbit.log.syslog.formatter",
fun(Conf) ->
rabbit_prelaunch_early_logging:translate_formatter_conf("log.syslog.formatter", Conf)
end}.
{mapping, "log.syslog.identity", "syslog.app_name", [
{datatype, string}
]}.
{mapping, "log.syslog.facility", "syslog.facility", [
{datatype, {enum, [kern, kernel, user, mail, daemon, auth, syslog, lpr,
news, uucp, cron, authpriv, ftp, ntp, audit, alert,
clock, local0, local1, local2, local3, local4,
local5, local6, local7]}}
]}.
{mapping, "log.syslog.multiline_mode", "syslog.multiline_mode", [
{datatype, {enum, [true, false]}}
]}.
{mapping, "log.syslog.ip", "syslog.dest_host", [
{datatype, string},
{validators, ["is_ip"]}
]}.
{mapping, "log.syslog.host", "syslog.dest_host", [
{datatype, string}
]}.
{translation, "syslog.dest_host",
fun(Conf) ->
case cuttlefish:conf_get("log.syslog", Conf) of
true ->
case cuttlefish:conf_get("log.syslog.ip", Conf, undefined) of
undefined ->
% If log.syslog.ip is not set, then this must be set
cuttlefish:conf_get("log.syslog.host", Conf);
IpAddr ->
IpAddr
end;
_ ->
cuttlefish:invalid("log.syslog must be set to true to set log.syslog.host or log.syslog.ip")
end
end}.
{mapping, "log.syslog.port", "syslog.dest_port", [
{datatype, integer}
]}.
{mapping, "log.syslog.transport", "syslog.protocol", [
{datatype, {enum, [udp, tcp, tls, ssl]}}
]}.
{mapping, "log.syslog.protocol", "syslog.protocol", [
{datatype, {enum, [rfc3164, rfc5424]}}
]}.
{mapping, "log.syslog.ssl_options.verify", "syslog.protocol", [
{datatype, {enum, [verify_peer, verify_none]}}]}.
{mapping, "log.syslog.ssl_options.fail_if_no_peer_cert", "syslog.protocol", [
{datatype, {enum, [true, false]}}]}.
{mapping, "log.syslog.ssl_options.cacertfile", "syslog.protocol",
[{datatype, string}, {validators, ["file_accessible"]}]}.
{mapping, "log.syslog.ssl_options.certfile", "syslog.protocol",
[{datatype, string}, {validators, ["file_accessible"]}]}.
{mapping, "log.syslog.ssl_options.cacerts.$name", "syslog.protocol",
[{datatype, string}]}.
{mapping, "log.syslog.ssl_options.cert", "syslog.protocol",
[{datatype, string}]}.
{mapping, "log.syslog.ssl_options.client_renegotiation", "syslog.protocol",
[{datatype, {enum, [true, false]}}]}.
{mapping, "log.syslog.ssl_options.crl_check", "syslog.protocol",
[{datatype, [{enum, [true, false, peer, best_effort]}]}]}.
{mapping, "log.syslog.ssl_options.depth", "syslog.protocol",
[{datatype, integer}, {validators, ["byte"]}]}.
{mapping, "log.syslog.ssl_options.dh", "syslog.protocol",
[{datatype, string}]}.
{mapping, "log.syslog.ssl_options.dhfile", "syslog.protocol",
[{datatype, string}, {validators, ["file_accessible"]}]}.
{mapping, "log.syslog.ssl_options.honor_cipher_order", "syslog.protocol",
[{datatype, {enum, [true, false]}}]}.
{mapping, "log.syslog.ssl_options.honor_ecc_order", "syslog.protocol",
[{datatype, {enum, [true, false]}}]}.
{mapping, "log.syslog.ssl_options.key.RSAPrivateKey", "syslog.protocol",
[{datatype, string}]}.
{mapping, "log.syslog.ssl_options.key.DSAPrivateKey", "syslog.protocol",
[{datatype, string}]}.
{mapping, "log.syslog.ssl_options.key.PrivateKeyInfo", "syslog.protocol",
[{datatype, string}]}.
{mapping, "log.syslog.ssl_options.keyfile", "syslog.protocol",
[{datatype, string}, {validators, ["file_accessible"]}]}.
{mapping, "log.syslog.ssl_options.log_alert", "syslog.protocol",
[{datatype, {enum, [true, false]}}]}.
{mapping, "log.syslog.ssl_options.password", "syslog.protocol",
[{datatype, string}]}.
{mapping, "log.syslog.ssl_options.psk_identity", "syslog.protocol",
[{datatype, string}]}.
{mapping, "log.syslog.ssl_options.reuse_sessions", "syslog.protocol",
[{datatype, {enum, [true, false]}}]}.
{mapping, "log.syslog.ssl_options.secure_renegotiate", "syslog.protocol",
[{datatype, {enum, [true, false]}}]}.
{mapping, "log.syslog.ssl_options.versions.$version", "syslog.protocol",
[{datatype, atom}]}.
{translation, "syslog.protocol",
fun(Conf) ->
ParseSslOptions = fun() ->
RawSettings = [
{verify, cuttlefish:conf_get("log.syslog.ssl_options.verify", Conf, undefined)},
{fail_if_no_peer_cert, cuttlefish:conf_get("log.syslog.ssl_options.fail_if_no_peer_cert", Conf, undefined)},
{cacertfile, cuttlefish:conf_get("log.syslog.ssl_options.cacertfile", Conf, undefined)},
{certfile, cuttlefish:conf_get("log.syslog.ssl_options.certfile", Conf, undefined)},
{cert, cuttlefish:conf_get("log.syslog.ssl_options.cert", Conf, undefined)},
{client_renegotiation, cuttlefish:conf_get("log.syslog.ssl_options.client_renegotiation", Conf, undefined)},
{crl_check, cuttlefish:conf_get("log.syslog.ssl_options.crl_check", Conf, undefined)},
{depth, cuttlefish:conf_get("log.syslog.ssl_options.depth", Conf, undefined)},
{dh, cuttlefish:conf_get("log.syslog.ssl_options.dh", Conf, undefined)},
{dhfile, cuttlefish:conf_get("log.syslog.ssl_options.dhfile", Conf, undefined)},
{honor_cipher_order, cuttlefish:conf_get("log.syslog.ssl_options.honor_cipher_order", Conf, undefined)},
{honor_ecc_order, cuttlefish:conf_get("log.syslog.ssl_options.honor_ecc_order", Conf, undefined)},
{keyfile, cuttlefish:conf_get("log.syslog.ssl_options.keyfile", Conf, undefined)},
{log_alert, cuttlefish:conf_get("log.syslog.ssl_options.log_alert", Conf, undefined)},
{password, cuttlefish:conf_get("log.syslog.ssl_options.password", Conf, undefined)},
{psk_identity, cuttlefish:conf_get("log.syslog.ssl_options.psk_identity", Conf, undefined)},
{reuse_sessions, cuttlefish:conf_get("log.syslog.ssl_options.reuse_sessions", Conf, undefined)},
{secure_renegotiate, cuttlefish:conf_get("log.syslog.ssl_options.secure_renegotiate", Conf, undefined)}
],
DefinedSettings = [{K, V} || {K, V} <- RawSettings, V =/= undefined],
lists:map(
fun({K, Val}) when K == dh; K == cert -> {K, list_to_binary(Val)};
({K, Val}) -> {K, Val}
end,
DefinedSettings) ++
[ {K, V}
|| {K, V} <-
[{cacerts, [ list_to_binary(V) || {_, V} <- cuttlefish_variable:filter_by_prefix("log.syslog.ssl_options.cacerts", Conf)]},
{versions, [ V || {_, V} <- cuttlefish_variable:filter_by_prefix("log.syslog.ssl_options.versions", Conf) ]},
{key, case cuttlefish_variable:filter_by_prefix("log.syslog.ssl_options.key", Conf) of
[{[_,_,Key], Val}|_] -> {list_to_atom(Key), list_to_binary(Val)};
_ -> undefined
end}],
V =/= undefined,
V =/= []]
end,
Proto = cuttlefish:conf_get("log.syslog.protocol", Conf, undefined),
Transport = cuttlefish:conf_get("log.syslog.transport", Conf, udp),
case Transport of
TLS when TLS == tls; TLS == ssl ->
case Proto of
rfc3164 ->
cuttlefish:invalid("Syslog protocol rfc3164 is not compatible with TLS");
_ ->
{rfc5424, tls, ParseSslOptions()}
end;
_ when Transport == udp; Transport == tcp ->
case Proto of
undefined -> {rfc3164, Transport};
_ -> {Proto, Transport}
end;
_ -> cuttlefish:invalid("Invalid syslog transport ~p~n", [Transport])
end
end}.
{mapping, "log.file", "rabbit.log.file.file", [
{datatype, [{enum, [false]}, string]}
]}.
{mapping, "log.file.level", "rabbit.log.file.level", [
{datatype,
{enum, ['=debug', debug,
info, '!=info',
notice, '<=notice',
'<warning', warning,
error,
critical,
alert,
emergency,
none]}}
]}.
{mapping, "log.file.rotation.date", "rabbit.log.file.date", [
{datatype, string}
]}.
{mapping, "log.file.rotation.compress", "rabbit.log.file.compress", [
{default, false},
{datatype, {enum, [true, false]}}
]}.
{mapping, "log.file.rotation.size", "rabbit.log.file.size", [
{datatype, integer}
]}.
{mapping, "log.file.rotation.count", "rabbit.log.file.count", [
{datatype, integer}
]}.
{mapping, "log.file.formatter", "rabbit.log.file.formatter", [
{default, plaintext},
{datatype, {enum, [plaintext, json]}}
]}.
{mapping, "log.file.formatter.time_format", "rabbit.log.file.formatter", [
{default, rfc3339_space},
{datatype, [{enum, [rfc3339_space, rfc3339_T, epoch_usecs, epoch_secs, lager_default]}, string]}
]}.
{mapping, "log.file.formatter.level_format", "rabbit.log.file.formatter", [
{default, lc},
{datatype, {enum, [lc, uc, lc3, uc3, lc4, uc4]}}
]}.
{mapping, "log.file.formatter.single_line", "rabbit.log.file.formatter", [
{default, off},
{datatype, flag}
]}.
{mapping, "log.file.formatter.plaintext.format", "rabbit.log.file.formatter", [
{default, "$time [$level] $pid $msg"},
{datatype, string}
]}.
{mapping, "log.file.formatter.json.field_map", "rabbit.log.file.formatter", [
{default, "time level msg gl:- logger_formatter:- report_cb:- error_logger:-"},
{datatype, string}
]}.
{mapping, "log.file.formatter.json.verbosity_map", "rabbit.log.file.formatter", [
{default, ""},
{datatype, string}
]}.
{translation, "rabbit.log.file.formatter",
fun(Conf) ->
rabbit_prelaunch_early_logging:translate_formatter_conf("log.file.formatter", Conf)
end}.
%% Connection log.
{mapping, "log.connection.level", "rabbit.log.categories.connection.level", [
{datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
]}.
{mapping, "log.connection.file", "rabbit.log.categories.connection.file", [
{datatype, string}
]}.
{mapping, "log.connection.rotation.date", "rabbit.log.categories.connection.rotate_on_date", [
{datatype, string}
]}.
{mapping, "log.connection.rotation.compress", "rabbit.log.categories.connection.compress_on_rotate", [
{default, false},
{datatype, {enum, [true, false]}}
]}.
{mapping, "log.connection.rotation.size", "rabbit.log.categories.connection.max_no_bytes", [
{datatype, integer}
]}.
{mapping, "log.connection.rotation.count", "rabbit.log.categories.connection.max_no_files", [
{datatype, integer}
]}.
%% Channel log.
{mapping, "log.channel.level", "rabbit.log.categories.channel.level", [
{datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
]}.
{mapping, "log.channel.file", "rabbit.log.categories.channel.file", [
{datatype, string}
]}.
{mapping, "log.channel.rotation.date", "rabbit.log.categories.channel.rotate_on_date", [
{datatype, string}
]}.
{mapping, "log.channel.rotation.compress", "rabbit.log.categories.channel.compress_on_rotate", [
{default, false},
{datatype, {enum, [true, false]}}
]}.
{mapping, "log.channel.rotation.size", "rabbit.log.categories.channel.max_no_bytes", [
{datatype, integer}
]}.
{mapping, "log.channel.rotation.count", "rabbit.log.categories.channel.max_no_files", [
{datatype, integer}
]}.
%% Mirroring log.
{mapping, "log.mirroring.level", "rabbit.log.categories.mirroring.level", [
{datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
]}.
{mapping, "log.mirroring.file", "rabbit.log.categories.mirroring.file", [
{datatype, string}
]}.
{mapping, "log.mirroring.rotation.date", "rabbit.log.categories.mirroring.rotate_on_date", [
{datatype, string}
]}.
{mapping, "log.mirroring.rotation.compress", "rabbit.log.categories.mirroring.compress_on_rotate", [
{default, false},
{datatype, {enum, [true, false]}}
]}.
{mapping, "log.mirroring.rotation.size", "rabbit.log.categories.mirroring.max_no_bytes", [
{datatype, integer}
]}.
{mapping, "log.mirroring.rotation.count", "rabbit.log.categories.mirroring.max_no_files", [
{datatype, integer}
]}.
%% Queue log.
{mapping, "log.queue.level", "rabbit.log.categories.queue.level", [
{datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
]}.
{mapping, "log.queue.file", "rabbit.log.categories.queue.file", [
{datatype, string}
]}.
{mapping, "log.queue.rotation.date", "rabbit.log.categories.queue.rotate_on_date", [
{datatype, string}
]}.
{mapping, "log.queue.rotation.compress", "rabbit.log.categories.queue.compress_on_rotate", [
{default, false},
{datatype, {enum, [true, false]}}
]}.
{mapping, "log.queue.rotation.size", "rabbit.log.categories.queue.max_no_bytes", [
{datatype, integer}
]}.
{mapping, "log.queue.rotation.count", "rabbit.log.categories.queue.max_no_files", [
{datatype, integer}
]}.
%% Federation log.
{mapping, "log.federation.level", "rabbit.log.categories.federation.level", [
{datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
]}.
{mapping, "log.federation.file", "rabbit.log.categories.federation.file", [
{datatype, string}
]}.
{mapping, "log.federation.rotation.date", "rabbit.log.categories.federation.rotate_on_date", [
{datatype, string}
]}.
{mapping, "log.federation.rotation.compress", "rabbit.log.categories.federation.compress_on_rotate", [
{default, false},
{datatype, {enum, [true, false]}}
]}.
{mapping, "log.federation.rotation.size", "rabbit.log.categories.federation.max_no_bytes", [
{datatype, integer}
]}.
{mapping, "log.federation.rotation.count", "rabbit.log.categories.federation.max_no_files", [
{datatype, integer}
]}.
%% Upgrade log.
{mapping, "log.upgrade.level", "rabbit.log.categories.upgrade.level", [
{datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
]}.
{mapping, "log.upgrade.file", "rabbit.log.categories.upgrade.file", [
{datatype, string}
]}.
{mapping, "log.upgrade.rotation.date", "rabbit.log.categories.upgrade.rotate_on_date", [
{datatype, string}
]}.
{mapping, "log.upgrade.rotation.compress", "rabbit.log.categories.upgrade.compress_on_rotate", [
{default, false},
{datatype, {enum, [true, false]}}
]}.
{mapping, "log.upgrade.rotation.size", "rabbit.log.categories.upgrade.max_no_bytes", [
{datatype, integer}
]}.
{mapping, "log.upgrade.rotation.count", "rabbit.log.categories.upgrade.max_no_files", [
{datatype, integer}
]}.
%% Ra log.
{mapping, "log.ra.level", "rabbit.log.categories.ra.level", [
{datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
]}.
{mapping, "log.ra.file", "rabbit.log.categories.ra.file", [
{datatype, string}
]}.
{mapping, "log.ra.rotation.date", "rabbit.log.categories.ra.rotate_on_date", [
{datatype, string}
]}.
{mapping, "log.ra.rotation.compress", "rabbit.log.categories.ra.compress_on_rotate", [
{default, false},
{datatype, {enum, [true, false]}}
]}.
{mapping, "log.ra.rotation.size", "rabbit.log.categories.ra.max_no_bytes", [
{datatype, integer}
]}.
{mapping, "log.ra.rotation.count", "rabbit.log.categories.ra.max_no_files", [
{datatype, integer}
]}.
%% Default logging config.
{mapping, "log.default.level", "rabbit.log.categories.default.level", [
{datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
]}.
{mapping, "log.default.rotation.date", "rabbit.log.categories.default.rotate_on_date", [
{datatype, string}
]}.
{mapping, "log.default.rotation.compress", "rabbit.log.categories.default.compress_on_rotate", [
{default, false},
{datatype, {enum, [true, false]}}
]}.
{mapping, "log.default.rotation.size", "rabbit.log.categories.default.max_no_bytes", [
{datatype, integer}
]}.
{mapping, "log.default.rotation.count", "rabbit.log.categories.default.max_no_files", [
{datatype, integer}
]}.
%%
%% Feature flags and deprecated features
%% =====================================
%%
{mapping,
"deprecated_features.permit.$name", "rabbit.permit_deprecated_features",
[{datatype, {enum, [true, false]}}]
}.
%% This converts:
%% deprecated_features.permit.my_feature = true
%% to:
%% {rabbit, [{permit_deprecated_features, #{my_feature => true}}]}.
{translation, "rabbit.permit_deprecated_features",
fun(Conf) ->
Settings = cuttlefish_variable:filter_by_prefix(
"deprecated_features.permit", Conf),
maps:from_list(
[{list_to_atom(FeatureName), State}
|| {["deprecated_features", "permit", FeatureName], State}
<- Settings])
end}.
% ==========================
% Kernel section
% ==========================
{mapping, "net_ticktime", "kernel.net_ticktime",[
{datatype, [integer]},
{validators, ["non_zero_positive_integer"]}
]}.
{mapping, "distribution.listener.port_range.min", "kernel.inet_dist_listen_min", [
{datatype, [integer]},
{validators, ["non_zero_positive_integer"]}
]}.
{mapping, "distribution.listener.port_range.max", "kernel.inet_dist_listen_max", [
{datatype, [integer]},
{validators, ["non_zero_positive_integer"]}
]}.
{mapping, "distribution.listener.interface", "kernel.inet_dist_use_interface", [
{datatype, [string]},
{validators, ["is_ip"]}
]}.
{translation, "kernel.inet_dist_use_interface",
fun(Conf) ->
case cuttlefish:conf_get("distribution.listener.interface", Conf, undefined) of
undefined ->
cuttlefish:unset();
Value when is_list(Value) ->
case inet:parse_address(Value) of
{ok, Parsed} -> Parsed;
{error, _} -> cuttlefish:invalid("should be a valid IP address")
end;
_ ->
cuttlefish:invalid("should be a valid IP address")
end
end
}.
% ==========================
% sysmon_handler section
% ==========================
%% @doc The threshold at which to warn about the number of processes
%% that are overly busy. Processes with large heaps or that take a
%% long time to garbage collect will count toward this threshold.
{mapping, "sysmon_handler.thresholds.busy_processes", "sysmon_handler.process_limit", [
{datatype, integer},
hidden
]}.
{translation, "sysmon_handler.process_limit",
fun(Conf) ->
case cuttlefish:conf_get("sysmon_handler.thresholds.busy_processes", Conf, undefined) of
undefined ->
cuttlefish:unset();
Int when is_integer(Int) ->
Int;
_ ->
cuttlefish:invalid("should be a non-negative integer")
end
end
}.
%% @doc The threshold at which to warn about the number of ports that
%% are overly busy. Ports with full input buffers count toward this
%% threshold.
{mapping, "sysmon_handler.thresholds.busy_ports", "sysmon_handler.port_limit", [
{datatype, integer},
hidden
]}.
{translation, "sysmon_handler.port_limit",
fun(Conf) ->
case cuttlefish:conf_get("sysmon_handler.thresholds.busy_ports", Conf, undefined) of
undefined ->
cuttlefish:unset();
Int when is_integer(Int) ->
Int;
_ ->
cuttlefish:invalid("should be a non-negative integer")
end
end
}.
%% @doc A process will become busy when it exceeds this amount of time
%% doing garbage collection.
%% @see sysmon_handler.thresholds.busy_processes
{mapping, "sysmon_handler.triggers.process.garbage_collection", "sysmon_handler.gc_ms_limit", [
{datatype, [{atom, off},
{duration, ms}]},
hidden
]}.
{translation, "sysmon_handler.gc_ms_limit",
fun(Conf) ->
case cuttlefish:conf_get("sysmon_handler.triggers.process.garbage_collection", Conf, undefined) of
undefined ->
cuttlefish:unset();
off ->
0;
Int when is_integer(Int) ->
Int;
_ ->
cuttlefish:invalid("should be a non-negative integer")
end
end
}.
%% @doc A process will become busy when it exceeds this amount of time
%% during a single process scheduling & execution cycle.
{mapping, "sysmon_handler.triggers.process.long_scheduled_execution", "sysmon_handler.schedule_ms_limit", [
{datatype, [{atom, off},
{duration, ms}]},
hidden
]}.
{translation, "sysmon_handler.schedule_ms_limit",
fun(Conf) ->
case cuttlefish:conf_get("sysmon_handler.triggers.process.long_scheduled_execution", Conf, undefined) of
undefined ->
cuttlefish:unset();
off ->
0;
Int when is_integer(Int) ->
Int;
_ ->
cuttlefish:invalid("should be a non-negative integer")
end
end
}.
%% @doc A process will become busy when its heap exceeds this size.
%% @see sysmon_handler.thresholds.busy_processes
{mapping, "sysmon_handler.triggers.process.heap_size", "sysmon_handler.heap_word_limit", [
{datatype, [{atom, off},
bytesize]},
hidden
]}.
{translation, "sysmon_handler.heap_word_limit",
fun(Conf) ->
case cuttlefish:conf_get("sysmon_handler.triggers.process.heap_size", Conf, undefined) of
undefined ->
cuttlefish:unset();
off ->
0;
Bytes when is_integer(Bytes) ->
WordSize = erlang:system_info(wordsize),
Bytes div WordSize;
_ ->
cuttlefish:invalid("should be a non-negative integer")
end
end
}.
%% @doc Whether ports with full input buffers will be counted as
%% busy. Ports can represent open files or network sockets.
%% @see sysmon_handler.thresholds.busy_ports
{mapping, "sysmon_handler.triggers.port", "sysmon_handler.busy_port", [
{datatype, flag},
hidden
]}.
{translation, "sysmon_handler.busy_port",
fun(Conf) ->
case cuttlefish:conf_get("sysmon_handler.triggers.port", Conf, undefined) of
undefined ->
cuttlefish:unset();
Val -> Val
end
end
}.
%% @doc Whether distribution ports with full input buffers will be
%% counted as busy. Distribution ports connect Erlang nodes within a
%% single cluster.
%% @see sysmon_handler.thresholds.busy_ports
{mapping, "sysmon_handler.triggers.distribution_port", "sysmon_handler.busy_dist_port", [
{datatype, flag},
hidden
]}.
{translation, "sysmon_handler.busy_dist_port",
fun(Conf) ->
case cuttlefish:conf_get("sysmon_handler.triggers.distribution_port", Conf, undefined) of
undefined ->
cuttlefish:unset();
Val -> Val
end
end
}.
%%
%% Ra
%%
{mapping, "raft.segment_max_entries", "ra.segment_max_entries", [
{datatype, integer},
{validators, ["non_zero_positive_integer", "positive_16_bit_unsigned_integer"]}
]}.
{translation, "ra.segment_max_entries",
fun(Conf) ->
case cuttlefish:conf_get("raft.segment_max_entries", Conf, undefined) of
undefined -> cuttlefish:unset();
Val -> Val
end
end
}.
{mapping, "raft.wal_max_size_bytes", "ra.wal_max_size_bytes", [
{datatype, integer},
{validators, ["non_zero_positive_integer"]}
]}.
{translation, "ra.wal_max_size_bytes",
fun(Conf) ->
case cuttlefish:conf_get("raft.wal_max_size_bytes", Conf, undefined) of
undefined -> cuttlefish:unset();
Val -> Val
end
end
}.
{mapping, "raft.wal_max_entries", "ra.wal_max_entries", [
{datatype, integer},
{validators, ["non_zero_positive_integer"]}
]}.
{translation, "ra.wal_max_entries",
fun(Conf) ->
case cuttlefish:conf_get("raft.wal_max_entries", Conf, undefined) of
undefined -> cuttlefish:unset();
Val -> Val
end
end
}.
{mapping, "raft.wal_hibernate_after", "ra.wal_hibernate_after", [
{datatype, integer},
{validators, ["non_zero_positive_integer"]}
]}.
{translation, "ra.wal_hibernate_after",
fun(Conf) ->
case cuttlefish:conf_get("raft.wal_hibernate_after", Conf, undefined) of
undefined -> cuttlefish:unset();
Val -> Val
end
end
}.
{mapping, "raft.wal_max_batch_size", "ra.wal_max_batch_size", [
{datatype, integer},
{validators, ["non_zero_positive_integer"]}
]}.
{translation, "ra.wal_max_batch_size",
fun(Conf) ->
case cuttlefish:conf_get("raft.wal_max_batch_size", Conf, undefined) of
undefined -> cuttlefish:unset();
Val -> Val
end
end
}.
{mapping, "raft.snapshot_chunk_size", "ra.snapshot_chunk_size", [
{datatype, integer},
{validators, ["non_zero_positive_integer"]}
]}.
{translation, "ra.snapshot_chunk_size",
fun(Conf) ->
case cuttlefish:conf_get("raft.snapshot_chunk_size", Conf, undefined) of
undefined -> cuttlefish:unset();
Val -> Val
end
end
}.
{mapping, "raft.data_dir", "ra.data_dir", [
{datatype, string}
]}.
{translation, "ra.data_dir",
fun(Conf) ->
case cuttlefish:conf_get("raft.data_dir", Conf, undefined) of
undefined -> cuttlefish:unset();
Val -> Val
end
end
}.
{mapping, "raft.adaptive_failure_detector.poll_interval", "aten.poll_interval", [
{datatype, integer},
{validators, ["non_zero_positive_integer"]}
]}.
{translation, "aten.poll_interval",
fun(Conf) ->
case cuttlefish:conf_get("raft.adaptive_failure_detector.poll_interval", Conf, undefined) of
undefined -> cuttlefish:unset();
Val -> Val
end
end
}.
{mapping, "default_queue_type", "rabbit.default_queue_type", [
{datatype, atom}
]}.
{translation, "rabbit.default_queue_type",
fun(Conf) ->
case cuttlefish:conf_get("default_queue_type", Conf, rabbit_classic_queue) of
classic -> rabbit_classic_queue;
quorum -> rabbit_quorum_queue;
stream -> rabbit_stream_queue;
Module -> Module
end
end}.
%%
%% Backing queue version
%%
%% DEPRECATED. Not used since RabbitMQ 4.0
{mapping, "classic_queue.default_version", "rabbit.classic_queue_default_version", [
{datatype, integer},
{validators, ["non_zero_positive_integer"]}
]}.
{translation, "rabbit.classic_queue_default_version",
fun(Conf) ->
case cuttlefish:conf_get("classic_queue.default_version", Conf, 2) of
1 -> cuttlefish:invalid("Classic queues v1 are no longer supported");
2 -> 2;
_ -> cuttlefish:unset()
end
end
}.
{mapping, "quorum_queue.compute_checksums", "rabbit.quorum_compute_checksums", [
{datatype, {enum, [true, false]}}]}.
{mapping, "quorum_queue.property_equivalence.relaxed_checks_on_redeclaration", "rabbit.quorum_relaxed_checks_on_redeclaration", [
{datatype, {enum, [true, false]}}]}.
{mapping, "quorum_queue.initial_cluster_size", "rabbit.quorum_cluster_size", [
{datatype, integer},
{validators, ["non_zero_positive_integer"]}
]}.
{mapping, "quorum_queue.commands_soft_limit", "rabbit.quorum_commands_soft_limit", [
{datatype, integer},
{validators, ["non_zero_positive_integer"]}
]}.
%%
%% Quorum Queue membership reconciliation
%%
{mapping, "quorum_queue.continuous_membership_reconciliation.enabled", "rabbit.quorum_membership_reconciliation_enabled", [
{datatype, {enum, [true, false]}}]}.
{mapping, "quorum_queue.continuous_membership_reconciliation.auto_remove", "rabbit.quorum_membership_reconciliation_auto_remove", [
{datatype, {enum, [true, false]}}]}.
{mapping, "quorum_queue.continuous_membership_reconciliation.interval", "rabbit.quorum_membership_reconciliation_interval", [
{datatype, integer}, {validators, ["non_negative_integer"]}
]}.
{mapping, "quorum_queue.continuous_membership_reconciliation.trigger_interval", "rabbit.quorum_membership_reconciliation_trigger_interval", [
{datatype, integer}, {validators, ["non_negative_integer"]}
]}.
{mapping, "quorum_queue.continuous_membership_reconciliation.target_group_size", "rabbit.quorum_membership_reconciliation_target_group_size", [
{datatype, integer}, {validators, ["non_negative_integer"]}
]}.
%%
%% Runtime parameters
%%
{mapping, "runtime_parameters.limits.$category", "rabbit.runtime_parameters.limits", [
{datatype, integer},
{validators, ["non_negative_integer"]}
]}.
{translation, "rabbit.runtime_parameters.limits",
fun(Conf) ->
case cuttlefish_variable:filter_by_prefix("runtime_parameters.limits", Conf) of
[] -> cuttlefish:unset();
Ss -> [ {list_to_binary(Category), Limit} || {[_, _, Category], Limit} <- Ss ]
end
end
}.
%%
%% Message interceptors
%%
{mapping, "message_interceptors.$stage.$name.$key", "rabbit.message_interceptors", [
{datatype, {enum, [true, false]}}]}.
{translation, "rabbit.message_interceptors",
fun(Conf) ->
case cuttlefish_variable:filter_by_prefix("message_interceptors", Conf) of
[] ->
cuttlefish:unset();
L ->
lists:foldr(
fun({["message_interceptors", "incoming", "set_header_routing_node", "overwrite"], Overwrite}, Acc)
when is_boolean(Overwrite) ->
Mod = rabbit_msg_interceptor_routing_node,
Cfg = #{overwrite => Overwrite},
[{Mod, Cfg} | Acc];
({["message_interceptors", "incoming", "set_header_timestamp", "overwrite"], Overwrite}, Acc)
when is_boolean(Overwrite) ->
Mod = rabbit_msg_interceptor_timestamp,
Cfg = #{incoming => true,
overwrite => Overwrite},
case lists:keytake(Mod, 1, Acc) of
false ->
[{Mod, Cfg} | Acc];
{value, {Mod, Cfg1}, Acc1} ->
Cfg2 = maps:merge(Cfg1, Cfg),
[{Mod, Cfg2} | Acc1]
end;
({["message_interceptors", "outgoing", "timestamp", "enabled"], Enabled}, Acc) ->
case Enabled of
true ->
Mod = rabbit_msg_interceptor_timestamp,
Cfg = #{outgoing => true},
case lists:keytake(Mod, 1, Acc) of
false ->
[{Mod, Cfg} | Acc];
{value, {Mod, Cfg1}, Acc1} ->
Cfg2 = maps:merge(Cfg1, Cfg),
[{Mod, Cfg2} | Acc1]
end;
false ->
Acc
end;
(Other, _Acc) ->
cuttlefish:invalid(io_lib:format("~p is invalid", [Other]))
end, [], L)
end
end
}.
{mapping, "stream.replication.port_range.min", "osiris.port_range", [
{datatype, [integer]},
{validators, ["non_zero_positive_integer"]}
]}.
{mapping, "stream.replication.port_range.max", "osiris.port_range", [
{datatype, [integer]},
{validators, ["non_zero_positive_integer"]}
]}.
{translation, "osiris.port_range",
fun(Conf) ->
Min = cuttlefish:conf_get("stream.replication.port_range.min", Conf, undefined),
Max = cuttlefish:conf_get("stream.replication.port_range.max", Conf, undefined),
case {Min, Max} of
{undefined, undefined} ->
cuttlefish:unset();
{Mn, undefined} ->
{Mn, Mn + 500};
{undefined, Mx} ->
{Mx - 500, Mx};
_ ->
{Min, Max}
end
end}.
{mapping, "cluster_tags.$tag", "rabbit.cluster_tags", [
{datatype, [binary]}
]}.
{translation, "rabbit.cluster_tags",
fun(Conf) ->
case cuttlefish:conf_get("cluster_tags", Conf, undefined) of
none -> [];
_ ->
Settings = cuttlefish_variable:filter_by_prefix("cluster_tags", Conf),
[ {list_to_binary(K), V} || {[_, K], V} <- Settings]
end
end}.
{mapping, "node_tags.$tag", "rabbit.node_tags", [
{datatype, [binary]}
]}.
{translation, "rabbit.node_tags",
fun(Conf) ->
case cuttlefish:conf_get("node_tags", Conf, undefined) of
none -> [];
_ ->
Settings = cuttlefish_variable:filter_by_prefix("node_tags", Conf),
[ {list_to_binary(K), V} || {[_, K], V} <- Settings]
end
end}.
% ===============================
% Validators
% ===============================
{validator, "mirroring_sync_batch_size", "Batch size should be greater than 0 and less than 1M",
fun(Size) when is_integer(Size) ->
Size > 0 andalso Size =< 1000000
end}.
{validator, "max_message_size", "Max message size should be between 0 and 512MB",
fun(Size) when is_integer(Size) ->
Size > 0 andalso Size =< 536870912
end}.
{validator, "less_than_1", "Float is not between 0 and 1",
fun(Float) when is_float(Float) ->
Float > 0 andalso Float < 1
end}.
{validator, "port", "Invalid port number",
fun(Port) when is_integer(Port) ->
Port > 0 andalso Port < 65535
end}.
{validator, "byte", "Integer must be in the range [0, 255]",
fun(Int) when is_integer(Int) ->
Int >= 0 andalso Int =< 255
end}.
{validator, "dir_writable", "Directory must be writable",
fun(Dir) ->
TestFile = filename:join(Dir, "test_file"),
file:delete(TestFile),
Res = ok == file:write_file(TestFile, <<"test">>),
file:delete(TestFile),
Res
end}.
{validator, "file_accessible", "file does not exist or cannot be read by the node",
fun(File) ->
case file:read_file_info(File) of
{ok, FileInfo} -> (element(4, FileInfo) == read) or (element(4, FileInfo) == read_write);
_ -> false
end
end}.
{validator, "is_ip", "value should be a valid IP address",
fun(IpStr) ->
Res = inet:parse_address(IpStr),
element(1, Res) == ok
end}.
{validator, "non_negative_integer", "number should be greater or equal to zero",
fun(Int) when is_integer(Int) ->
Int >= 0
end}.
{validator, "non_zero_positive_integer", "number should be greater or equal to one",
fun(Int) when is_integer(Int) ->
Int >= 1
end}.
{validator, "positive_16_bit_unsigned_integer", "number should be between 1 and 65535",
fun(Int) when is_integer(Int) ->
(Int >= 1) and (Int =< 16#ff_ff)
end}.
{validator, "positive_32_bit_unsigned_integer", "number should be between 1 and 4294967295",
fun(Int) when is_integer(Int) ->
(Int >= 1) and (Int =< 16#ff_ff_ff_ff)
end}.
{validator, "valid_regex", "string must be a valid regular expression",
fun("") -> false;
(String) -> {Res, _ } = re:compile(String),
Res =:= ok
end}.
{validator, "is_supported_information_unit", "supported formats: 500MB, 500MiB, 10GB, 10GiB, 2TB, 2TiB, 10000000000",
fun(S0) ->
case is_integer(S0) of
true -> true;
false ->
%% this is a string
S = string:strip(S0, right),
%% The suffix is optional
{ok, HasIUSuffix} = re:compile("([0-9]+)([a-zA-Z]){1,3}$", [dollar_endonly, caseless]),
%% Here are the prefixes we accept. This must match
%% what rabbit_resource_monitor_misc and 'rabbitmq-diagnostics status' can format.
{ok, SuffixExtractor} = re:compile("(k|ki|kb|kib|m|mi|mb|mib|g|gi|gb|gib|t|ti|tb|tib|p|pi|pb|pib)$", [dollar_endonly, caseless]),
case re:run(S, HasIUSuffix) of
nomatch -> false;
{match, _} ->
case re:split(S, SuffixExtractor) of
[] -> false;
[_CompleteMatch] -> false;
[_CompleteMatch, Suffix | _] -> true
end
end
end
end}.