[skip ci] Remove rabbit_log and switch to LOG_ macros

This commit is contained in:
Michal Kuratczyk 2025-07-11 12:37:10 +02:00
parent ff93f2c9c1
commit 11a22c407d
No known key found for this signature in database
152 changed files with 1165 additions and 1070 deletions

View File

@ -17,12 +17,13 @@
]). ]).
-include("oauth2_client.hrl"). -include("oauth2_client.hrl").
-include_lib("kernel/include/logger.hrl").
-spec get_access_token(oauth_provider(), access_token_request()) -> -spec get_access_token(oauth_provider(), access_token_request()) ->
{ok, successful_access_token_response()} | {ok, successful_access_token_response()} |
{error, unsuccessful_access_token_response() | any()}. {error, unsuccessful_access_token_response() | any()}.
get_access_token(OAuthProvider, Request) -> get_access_token(OAuthProvider, Request) ->
rabbit_log:debug("get_access_token using OAuthProvider:~p and client_id:~p", ?LOG_DEBUG("get_access_token using OAuthProvider:~p and client_id:~p",
[OAuthProvider, Request#access_token_request.client_id]), [OAuthProvider, Request#access_token_request.client_id]),
URL = OAuthProvider#oauth_provider.token_endpoint, URL = OAuthProvider#oauth_provider.token_endpoint,
Header = [], Header = [],
@ -96,7 +97,7 @@ drop_trailing_path_separator(Path) when is_list(Path) ->
-spec get_openid_configuration(DiscoveryEndpoint :: uri_string:uri_string(), -spec get_openid_configuration(DiscoveryEndpoint :: uri_string:uri_string(),
ssl:tls_option() | []) -> {ok, openid_configuration()} | {error, term()}. ssl:tls_option() | []) -> {ok, openid_configuration()} | {error, term()}.
get_openid_configuration(DiscoverEndpoint, TLSOptions) -> get_openid_configuration(DiscoverEndpoint, TLSOptions) ->
rabbit_log:debug("get_openid_configuration from ~p (~p)", [DiscoverEndpoint, ?LOG_DEBUG("get_openid_configuration from ~p (~p)", [DiscoverEndpoint,
format_ssl_options(TLSOptions)]), format_ssl_options(TLSOptions)]),
Options = [], Options = [],
Response = httpc:request(get, {DiscoverEndpoint, []}, TLSOptions, Options), Response = httpc:request(get, {DiscoverEndpoint, []}, TLSOptions, Options),
@ -219,7 +220,7 @@ do_update_oauth_provider_endpoints_configuration(OAuthProvider) when
undefined -> do_nothing; undefined -> do_nothing;
JwksUri -> set_env(jwks_uri, JwksUri) JwksUri -> set_env(jwks_uri, JwksUri)
end, end,
rabbit_log:debug("Updated oauth_provider details: ~p ", ?LOG_DEBUG("Updated oauth_provider details: ~p ",
[format_oauth_provider(OAuthProvider)]), [format_oauth_provider(OAuthProvider)]),
OAuthProvider; OAuthProvider;
@ -230,7 +231,7 @@ do_update_oauth_provider_endpoints_configuration(OAuthProvider) ->
ModifiedOAuthProviders = maps:put(OAuthProviderId, ModifiedOAuthProviders = maps:put(OAuthProviderId,
merge_oauth_provider(OAuthProvider, Proplist), OAuthProviders), merge_oauth_provider(OAuthProvider, Proplist), OAuthProviders),
set_env(oauth_providers, ModifiedOAuthProviders), set_env(oauth_providers, ModifiedOAuthProviders),
rabbit_log:debug("Replaced oauth_providers "), ?LOG_DEBUG("Replaced oauth_providers "),
OAuthProvider. OAuthProvider.
use_global_locks_on_all_nodes() -> use_global_locks_on_all_nodes() ->
@ -271,7 +272,7 @@ get_oauth_provider(ListOfRequiredAttributes) ->
case get_env(default_oauth_provider) of case get_env(default_oauth_provider) of
undefined -> get_root_oauth_provider(ListOfRequiredAttributes); undefined -> get_root_oauth_provider(ListOfRequiredAttributes);
DefaultOauthProviderId -> DefaultOauthProviderId ->
rabbit_log:debug("Using default_oauth_provider ~p", ?LOG_DEBUG("Using default_oauth_provider ~p",
[DefaultOauthProviderId]), [DefaultOauthProviderId]),
get_oauth_provider(DefaultOauthProviderId, ListOfRequiredAttributes) get_oauth_provider(DefaultOauthProviderId, ListOfRequiredAttributes)
end. end.
@ -282,7 +283,7 @@ download_oauth_provider(OAuthProvider) ->
case OAuthProvider#oauth_provider.discovery_endpoint of case OAuthProvider#oauth_provider.discovery_endpoint of
undefined -> {error, {missing_oauth_provider_attributes, [issuer]}}; undefined -> {error, {missing_oauth_provider_attributes, [issuer]}};
URL -> URL ->
rabbit_log:debug("Downloading oauth_provider using ~p ", [URL]), ?LOG_DEBUG("Downloading oauth_provider using ~p ", [URL]),
case get_openid_configuration(URL, get_ssl_options_if_any(OAuthProvider)) of case get_openid_configuration(URL, get_ssl_options_if_any(OAuthProvider)) of
{ok, OpenIdConfiguration} -> {ok, OpenIdConfiguration} ->
{ok, update_oauth_provider_endpoints_configuration( {ok, update_oauth_provider_endpoints_configuration(
@ -294,7 +295,7 @@ download_oauth_provider(OAuthProvider) ->
ensure_oauth_provider_has_attributes(OAuthProvider, ListOfRequiredAttributes) -> ensure_oauth_provider_has_attributes(OAuthProvider, ListOfRequiredAttributes) ->
case find_missing_attributes(OAuthProvider, ListOfRequiredAttributes) of case find_missing_attributes(OAuthProvider, ListOfRequiredAttributes) of
[] -> [] ->
rabbit_log:debug("Resolved oauth_provider ~p", ?LOG_DEBUG("Resolved oauth_provider ~p",
[format_oauth_provider(OAuthProvider)]), [format_oauth_provider(OAuthProvider)]),
{ok, OAuthProvider}; {ok, OAuthProvider};
_ = Attrs -> _ = Attrs ->
@ -303,13 +304,13 @@ ensure_oauth_provider_has_attributes(OAuthProvider, ListOfRequiredAttributes) ->
get_root_oauth_provider(ListOfRequiredAttributes) -> get_root_oauth_provider(ListOfRequiredAttributes) ->
OAuthProvider = lookup_root_oauth_provider(), OAuthProvider = lookup_root_oauth_provider(),
rabbit_log:debug("Using root oauth_provider ~p", ?LOG_DEBUG("Using root oauth_provider ~p",
[format_oauth_provider(OAuthProvider)]), [format_oauth_provider(OAuthProvider)]),
case find_missing_attributes(OAuthProvider, ListOfRequiredAttributes) of case find_missing_attributes(OAuthProvider, ListOfRequiredAttributes) of
[] -> [] ->
{ok, OAuthProvider}; {ok, OAuthProvider};
_ = MissingAttributes -> _ = MissingAttributes ->
rabbit_log:debug("Looking up missing attributes ~p ...", ?LOG_DEBUG("Looking up missing attributes ~p ...",
[MissingAttributes]), [MissingAttributes]),
case download_oauth_provider(OAuthProvider) of case download_oauth_provider(OAuthProvider) of
{ok, OAuthProvider2} -> {ok, OAuthProvider2} ->
@ -333,22 +334,22 @@ get_oauth_provider(OAuth2ProviderId, ListOfRequiredAttributes)
get_oauth_provider(OAuthProviderId, ListOfRequiredAttributes) get_oauth_provider(OAuthProviderId, ListOfRequiredAttributes)
when is_binary(OAuthProviderId) -> when is_binary(OAuthProviderId) ->
rabbit_log:debug("get_oauth_provider ~p with at least these attributes: ~p", ?LOG_DEBUG("get_oauth_provider ~p with at least these attributes: ~p",
[OAuthProviderId, ListOfRequiredAttributes]), [OAuthProviderId, ListOfRequiredAttributes]),
case lookup_oauth_provider_config(OAuthProviderId) of case lookup_oauth_provider_config(OAuthProviderId) of
{error, _} = Error0 -> {error, _} = Error0 ->
rabbit_log:debug("Failed to find oauth_provider ~p configuration due to ~p", ?LOG_DEBUG("Failed to find oauth_provider ~p configuration due to ~p",
[OAuthProviderId, Error0]), [OAuthProviderId, Error0]),
Error0; Error0;
Config -> Config ->
rabbit_log:debug("Found oauth_provider configuration ~p", [Config]), ?LOG_DEBUG("Found oauth_provider configuration ~p", [Config]),
OAuthProvider = map_to_oauth_provider(Config), OAuthProvider = map_to_oauth_provider(Config),
rabbit_log:debug("Resolved oauth_provider ~p", [format_oauth_provider(OAuthProvider)]), ?LOG_DEBUG("Resolved oauth_provider ~p", [format_oauth_provider(OAuthProvider)]),
case find_missing_attributes(OAuthProvider, ListOfRequiredAttributes) of case find_missing_attributes(OAuthProvider, ListOfRequiredAttributes) of
[] -> [] ->
{ok, OAuthProvider}; {ok, OAuthProvider};
_ = MissingAttributes -> _ = MissingAttributes ->
rabbit_log:debug("OauthProvider has following missing attributes ~p", [MissingAttributes]), ?LOG_DEBUG("OauthProvider has following missing attributes ~p", [MissingAttributes]),
case download_oauth_provider(OAuthProvider) of case download_oauth_provider(OAuthProvider) of
{ok, OAuthProvider2} -> {ok, OAuthProvider2} ->
ensure_oauth_provider_has_attributes(OAuthProvider2, ensure_oauth_provider_has_attributes(OAuthProvider2,

View File

@ -3,7 +3,7 @@
-ifdef(TRACE_AMQP). -ifdef(TRACE_AMQP).
-warning("AMQP tracing is enabled"). -warning("AMQP tracing is enabled").
-define(TRACE(Format, Args), -define(TRACE(Format, Args),
rabbit_log:debug( ?LOG_DEBUG(
"~s:~s/~b ~b~n" ++ Format ++ "~n", "~s:~s/~b ~b~n" ++ Format ++ "~n",
[?MODULE, ?FUNCTION_NAME, ?FUNCTION_ARITY, ?LINE] ++ Args)). [?MODULE, ?FUNCTION_NAME, ?FUNCTION_ARITY, ?LINE] ++ Args)).
-else. -else.

View File

@ -9,6 +9,8 @@
-module(code_server_cache). -module(code_server_cache).
-include_lib("kernel/include/logger.hrl").
-behaviour(gen_server). -behaviour(gen_server).
%% API %% API
@ -70,7 +72,7 @@ handle_maybe_call_mfa(true, {Module, Function, Args, Default}, State) ->
error:undef -> error:undef ->
handle_maybe_call_mfa_error(Module, Default, State); handle_maybe_call_mfa_error(Module, Default, State);
Err:Reason -> Err:Reason ->
rabbit_log:error("Calling ~tp:~tp failed: ~tp:~tp", ?LOG_ERROR("Calling ~tp:~tp failed: ~tp:~tp",
[Module, Function, Err, Reason]), [Module, Function, Err, Reason]),
handle_maybe_call_mfa_error(Module, Default, State) handle_maybe_call_mfa_error(Module, Default, State)
end. end.

View File

@ -7,6 +7,8 @@
-module(file_handle_cache). -module(file_handle_cache).
-include_lib("kernel/include/logger.hrl").
%% A File Handle Cache %% A File Handle Cache
%% %%
%% This extends a subset of the functionality of the Erlang file %% This extends a subset of the functionality of the Erlang file
@ -1451,19 +1453,19 @@ update_counts(open, Pid, Delta,
State = #fhc_state { open_count = OpenCount, State = #fhc_state { open_count = OpenCount,
clients = Clients }) -> clients = Clients }) ->
safe_ets_update_counter(Clients, Pid, {#cstate.opened, Delta}, safe_ets_update_counter(Clients, Pid, {#cstate.opened, Delta},
fun() -> rabbit_log:warning("FHC: failed to update counter 'opened', client pid: ~p", [Pid]) end), fun() -> ?LOG_WARNING("FHC: failed to update counter 'opened', client pid: ~p", [Pid]) end),
State #fhc_state { open_count = OpenCount + Delta}; State #fhc_state { open_count = OpenCount + Delta};
update_counts({obtain, file}, Pid, Delta, update_counts({obtain, file}, Pid, Delta,
State = #fhc_state {obtain_count_file = ObtainCountF, State = #fhc_state {obtain_count_file = ObtainCountF,
clients = Clients }) -> clients = Clients }) ->
safe_ets_update_counter(Clients, Pid, {#cstate.obtained_file, Delta}, safe_ets_update_counter(Clients, Pid, {#cstate.obtained_file, Delta},
fun() -> rabbit_log:warning("FHC: failed to update counter 'obtained_file', client pid: ~p", [Pid]) end), fun() -> ?LOG_WARNING("FHC: failed to update counter 'obtained_file', client pid: ~p", [Pid]) end),
State #fhc_state { obtain_count_file = ObtainCountF + Delta}; State #fhc_state { obtain_count_file = ObtainCountF + Delta};
update_counts({obtain, socket}, Pid, Delta, update_counts({obtain, socket}, Pid, Delta,
State = #fhc_state {obtain_count_socket = ObtainCountS, State = #fhc_state {obtain_count_socket = ObtainCountS,
clients = Clients }) -> clients = Clients }) ->
safe_ets_update_counter(Clients, Pid, {#cstate.obtained_socket, Delta}, safe_ets_update_counter(Clients, Pid, {#cstate.obtained_socket, Delta},
fun() -> rabbit_log:warning("FHC: failed to update counter 'obtained_socket', client pid: ~p", [Pid]) end), fun() -> ?LOG_WARNING("FHC: failed to update counter 'obtained_socket', client pid: ~p", [Pid]) end),
State #fhc_state { obtain_count_socket = ObtainCountS + Delta}; State #fhc_state { obtain_count_socket = ObtainCountS + Delta};
update_counts({reserve, file}, Pid, NewReservation, update_counts({reserve, file}, Pid, NewReservation,
State = #fhc_state {reserve_count_file = ReserveCountF, State = #fhc_state {reserve_count_file = ReserveCountF,
@ -1471,7 +1473,7 @@ update_counts({reserve, file}, Pid, NewReservation,
[#cstate{reserved_file = R}] = ets:lookup(Clients, Pid), [#cstate{reserved_file = R}] = ets:lookup(Clients, Pid),
Delta = NewReservation - R, Delta = NewReservation - R,
safe_ets_update_counter(Clients, Pid, {#cstate.reserved_file, Delta}, safe_ets_update_counter(Clients, Pid, {#cstate.reserved_file, Delta},
fun() -> rabbit_log:warning("FHC: failed to update counter 'reserved_file', client pid: ~p", [Pid]) end), fun() -> ?LOG_WARNING("FHC: failed to update counter 'reserved_file', client pid: ~p", [Pid]) end),
State #fhc_state { reserve_count_file = ReserveCountF + Delta}; State #fhc_state { reserve_count_file = ReserveCountF + Delta};
update_counts({reserve, socket}, Pid, NewReservation, update_counts({reserve, socket}, Pid, NewReservation,
State = #fhc_state {reserve_count_socket = ReserveCountS, State = #fhc_state {reserve_count_socket = ReserveCountS,
@ -1479,7 +1481,7 @@ update_counts({reserve, socket}, Pid, NewReservation,
[#cstate{reserved_file = R}] = ets:lookup(Clients, Pid), [#cstate{reserved_file = R}] = ets:lookup(Clients, Pid),
Delta = NewReservation - R, Delta = NewReservation - R,
safe_ets_update_counter(Clients, Pid, {#cstate.reserved_socket, Delta}, safe_ets_update_counter(Clients, Pid, {#cstate.reserved_socket, Delta},
fun() -> rabbit_log:warning("FHC: failed to update counter 'reserved_socket', client pid: ~p", [Pid]) end), fun() -> ?LOG_WARNING("FHC: failed to update counter 'reserved_socket', client pid: ~p", [Pid]) end),
State #fhc_state { reserve_count_socket = ReserveCountS + Delta}. State #fhc_state { reserve_count_socket = ReserveCountS + Delta}.
maybe_reduce(State) -> maybe_reduce(State) ->

View File

@ -3,6 +3,7 @@
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("rabbit_common/include/rabbit_framing.hrl"). -include_lib("rabbit_common/include/rabbit_framing.hrl").
-include("mc.hrl"). -include("mc.hrl").
-include_lib("kernel/include/logger.hrl").
-export([ -export([
%init/3, %init/3,
@ -267,7 +268,7 @@ update_x_death_header(Info, Headers) ->
Headers, <<"x-death">>, array, Headers, <<"x-death">>, array,
[{table, rabbit_misc:sort_field_table(Info1)} | Others]); [{table, rabbit_misc:sort_field_table(Info1)} | Others]);
{<<"x-death">>, InvalidType, Header} -> {<<"x-death">>, InvalidType, Header} ->
rabbit_log:warning("Message has invalid x-death header (type: ~tp)." ?LOG_WARNING("Message has invalid x-death header (type: ~tp)."
" Resetting header ~tp", " Resetting header ~tp",
[InvalidType, Header]), [InvalidType, Header]),
%% if x-death is something other than an array (list) %% if x-death is something other than an array (list)

View File

@ -7,6 +7,8 @@
-module(mirrored_supervisor). -module(mirrored_supervisor).
-include_lib("kernel/include/logger.hrl").
%% Mirrored Supervisor %% Mirrored Supervisor
%% =================== %% ===================
%% %%
@ -252,13 +254,13 @@ handle_call({init, Overall}, _From,
LockId = mirrored_supervisor_locks:lock(Group), LockId = mirrored_supervisor_locks:lock(Group),
maybe_log_lock_acquisition_failure(LockId, Group), maybe_log_lock_acquisition_failure(LockId, Group),
ok = pg:join(Group, Overall), ok = pg:join(Group, Overall),
rabbit_log:debug("Mirrored supervisor: initializing, overall supervisor ~tp joined group ~tp", [Overall, Group]), ?LOG_DEBUG("Mirrored supervisor: initializing, overall supervisor ~tp joined group ~tp", [Overall, Group]),
Rest = pg:get_members(Group) -- [Overall], Rest = pg:get_members(Group) -- [Overall],
Nodes = [node(M) || M <- Rest], Nodes = [node(M) || M <- Rest],
rabbit_log:debug("Mirrored supervisor: known group ~tp members: ~tp on nodes ~tp", [Group, Rest, Nodes]), ?LOG_DEBUG("Mirrored supervisor: known group ~tp members: ~tp on nodes ~tp", [Group, Rest, Nodes]),
case Rest of case Rest of
[] -> [] ->
rabbit_log:debug("Mirrored supervisor: no known peer members in group ~tp, will delete all child records for it", [Group]), ?LOG_DEBUG("Mirrored supervisor: no known peer members in group ~tp, will delete all child records for it", [Group]),
delete_all(Group); delete_all(Group);
_ -> ok _ -> ok
end, end,
@ -282,18 +284,18 @@ handle_call({start_child, ChildSpec}, _From,
group = Group}) -> group = Group}) ->
LockId = mirrored_supervisor_locks:lock(Group), LockId = mirrored_supervisor_locks:lock(Group),
maybe_log_lock_acquisition_failure(LockId, Group), maybe_log_lock_acquisition_failure(LockId, Group),
rabbit_log:debug("Mirrored supervisor: asked to consider starting a child, group: ~tp", [Group]), ?LOG_DEBUG("Mirrored supervisor: asked to consider starting a child, group: ~tp", [Group]),
Result = case maybe_start(Group, Overall, Delegate, ChildSpec) of Result = case maybe_start(Group, Overall, Delegate, ChildSpec) of
already_in_store -> already_in_store ->
rabbit_log:debug("Mirrored supervisor: maybe_start for group ~tp," ?LOG_DEBUG("Mirrored supervisor: maybe_start for group ~tp,"
" overall ~p returned 'record already present'", [Group, Overall]), " overall ~p returned 'record already present'", [Group, Overall]),
{error, already_present}; {error, already_present};
{already_in_store, Pid} -> {already_in_store, Pid} ->
rabbit_log:debug("Mirrored supervisor: maybe_start for group ~tp," ?LOG_DEBUG("Mirrored supervisor: maybe_start for group ~tp,"
" overall ~p returned 'already running: ~tp'", [Group, Overall, Pid]), " overall ~p returned 'already running: ~tp'", [Group, Overall, Pid]),
{error, {already_started, Pid}}; {error, {already_started, Pid}};
Else -> Else ->
rabbit_log:debug("Mirrored supervisor: maybe_start for group ~tp," ?LOG_DEBUG("Mirrored supervisor: maybe_start for group ~tp,"
" overall ~tp returned ~tp", [Group, Overall, Else]), " overall ~tp returned ~tp", [Group, Overall, Else]),
Else Else
end, end,
@ -377,19 +379,19 @@ tell_all_peers_to_die(Group, Reason) ->
[cast(P, {die, Reason}) || P <- pg:get_members(Group) -- [self()]]. [cast(P, {die, Reason}) || P <- pg:get_members(Group) -- [self()]].
maybe_start(Group, Overall, Delegate, ChildSpec) -> maybe_start(Group, Overall, Delegate, ChildSpec) ->
rabbit_log:debug("Mirrored supervisor: asked to consider starting, group: ~tp", ?LOG_DEBUG("Mirrored supervisor: asked to consider starting, group: ~tp",
[Group]), [Group]),
try check_start(Group, Overall, Delegate, ChildSpec) of try check_start(Group, Overall, Delegate, ChildSpec) of
start -> start ->
rabbit_log:debug("Mirrored supervisor: check_start for group ~tp," ?LOG_DEBUG("Mirrored supervisor: check_start for group ~tp,"
" overall ~tp returned 'do start'", [Group, Overall]), " overall ~tp returned 'do start'", [Group, Overall]),
start(Delegate, ChildSpec); start(Delegate, ChildSpec);
undefined -> undefined ->
rabbit_log:debug("Mirrored supervisor: check_start for group ~tp," ?LOG_DEBUG("Mirrored supervisor: check_start for group ~tp,"
" overall ~tp returned 'undefined'", [Group, Overall]), " overall ~tp returned 'undefined'", [Group, Overall]),
already_in_store; already_in_store;
Pid -> Pid ->
rabbit_log:debug("Mirrored supervisor: check_start for group ~tp," ?LOG_DEBUG("Mirrored supervisor: check_start for group ~tp,"
" overall ~tp returned 'already running (~tp)'", " overall ~tp returned 'already running (~tp)'",
[Group, Overall, Pid]), [Group, Overall, Pid]),
{already_in_store, Pid} {already_in_store, Pid}
@ -400,7 +402,7 @@ maybe_start(Group, Overall, Delegate, ChildSpec) ->
check_start(Group, Overall, Delegate, ChildSpec) -> check_start(Group, Overall, Delegate, ChildSpec) ->
Id = id(ChildSpec), Id = id(ChildSpec),
rabbit_log:debug("Mirrored supervisor: check_start for group ~tp, id: ~tp, " ?LOG_DEBUG("Mirrored supervisor: check_start for group ~tp, id: ~tp, "
"overall: ~tp", [Group, Id, Overall]), "overall: ~tp", [Group, Id, Overall]),
case rabbit_db_msup:create_or_update(Group, Overall, Delegate, ChildSpec, Id) of case rabbit_db_msup:create_or_update(Group, Overall, Delegate, ChildSpec, Id) of
Delegate0 when is_pid(Delegate0) -> Delegate0 when is_pid(Delegate0) ->
@ -486,6 +488,6 @@ restore_child_order(ChildSpecs, ChildOrder) ->
end, ChildSpecs). end, ChildSpecs).
maybe_log_lock_acquisition_failure(undefined = _LockId, Group) -> maybe_log_lock_acquisition_failure(undefined = _LockId, Group) ->
rabbit_log:warning("Mirrored supervisor: could not acquire lock for group ~ts", [Group]); ?LOG_WARNING("Mirrored supervisor: could not acquire lock for group ~ts", [Group]);
maybe_log_lock_acquisition_failure(_, _) -> maybe_log_lock_acquisition_failure(_, _) ->
ok. ok.

View File

@ -1696,7 +1696,7 @@ maybe_warn_low_fd_limit() ->
L when L > 1024 -> L when L > 1024 ->
ok; ok;
L -> L ->
rabbit_log:warning("Available file handles: ~tp. " ?LOG_WARNING("Available file handles: ~tp. "
"Please consider increasing system limits", [L]) "Please consider increasing system limits", [L])
end. end.
@ -1724,7 +1724,7 @@ persist_static_configuration() ->
MoreCreditAfter =< InitialCredit -> MoreCreditAfter =< InitialCredit ->
{InitialCredit, MoreCreditAfter}; {InitialCredit, MoreCreditAfter};
Other -> Other ->
rabbit_log:error("Refusing to boot due to an invalid value of 'rabbit.credit_flow_default_credit'"), ?LOG_ERROR("Refusing to boot due to an invalid value of 'rabbit.credit_flow_default_credit'"),
throw({error, {invalid_credit_flow_default_credit_value, Other}}) throw({error, {invalid_credit_flow_default_credit_value, Other}})
end, end,
ok = persistent_term:put(credit_flow_default_credit, CreditFlowDefaultCredit), ok = persistent_term:put(credit_flow_default_credit, CreditFlowDefaultCredit),

View File

@ -196,10 +196,10 @@ check_user_login(Username, AuthProps, Modules) ->
%% it gives us %% it gives us
case try_authenticate(Mod, Username, AuthProps) of case try_authenticate(Mod, Username, AuthProps) of
{ok, ModNUser = #auth_user{username = Username2, impl = Impl}} -> {ok, ModNUser = #auth_user{username = Username2, impl = Impl}} ->
rabbit_log:debug("User '~ts' authenticated successfully by backend ~ts", [Username2, Mod]), ?LOG_DEBUG("User '~ts' authenticated successfully by backend ~ts", [Username2, Mod]),
user(ModNUser, {ok, [{Mod, Impl}], []}); user(ModNUser, {ok, [{Mod, Impl}], []});
Else -> Else ->
rabbit_log:debug("User '~ts' failed authentication by backend ~ts", [Username, Mod]), ?LOG_DEBUG("User '~ts' failed authentication by backend ~ts", [Username, Mod]),
Else Else
end; end;
(_, {ok, User}) -> (_, {ok, User}) ->
@ -209,7 +209,7 @@ check_user_login(Username, AuthProps, Modules) ->
{refused, Username, "No modules checked '~ts'", [Username]}, Modules) {refused, Username, "No modules checked '~ts'", [Username]}, Modules)
catch catch
Type:Error:Stacktrace -> Type:Error:Stacktrace ->
rabbit_log:debug("User '~ts' authentication failed with ~ts:~tp:~n~tp", [Username, Type, Error, Stacktrace]), ?LOG_DEBUG("User '~ts' authentication failed with ~ts:~tp:~n~tp", [Username, Type, Error, Stacktrace]),
{refused, Username, "User '~ts' authentication failed with internal error. " {refused, Username, "User '~ts' authentication failed with internal error. "
"Enable debug logs to see the real error.", [Username]} "Enable debug logs to see the real error.", [Username]}
@ -222,7 +222,7 @@ try_authenticate_and_try_authorize(ModN, ModZs0, Username, AuthProps) ->
end, end,
case try_authenticate(ModN, Username, AuthProps) of case try_authenticate(ModN, Username, AuthProps) of
{ok, ModNUser = #auth_user{username = Username2}} -> {ok, ModNUser = #auth_user{username = Username2}} ->
rabbit_log:debug("User '~ts' authenticated successfully by backend ~ts", [Username2, ModN]), ?LOG_DEBUG("User '~ts' authenticated successfully by backend ~ts", [Username2, ModN]),
user(ModNUser, try_authorize(ModZs, Username2, AuthProps)); user(ModNUser, try_authorize(ModZs, Username2, AuthProps));
Else -> Else ->
Else Else
@ -364,7 +364,7 @@ check_access(Fun, Module, ErrStr, ErrArgs, ErrName) ->
{error, E} -> {error, E} ->
FullErrStr = ErrStr ++ ", backend ~ts returned an error: ~tp", FullErrStr = ErrStr ++ ", backend ~ts returned an error: ~tp",
FullErrArgs = ErrArgs ++ [Module, E], FullErrArgs = ErrArgs ++ [Module, E],
rabbit_log:error(FullErrStr, FullErrArgs), ?LOG_ERROR(FullErrStr, FullErrArgs),
rabbit_misc:protocol_error(ErrName, FullErrStr, FullErrArgs) rabbit_misc:protocol_error(ErrName, FullErrStr, FullErrArgs)
end. end.

View File

@ -18,6 +18,8 @@
-module(rabbit_alarm). -module(rabbit_alarm).
-include_lib("kernel/include/logger.hrl").
-behaviour(gen_event). -behaviour(gen_event).
-export([start_link/0, start/0, stop/0, register/2, set_alarm/1, -export([start_link/0, start/0, stop/0, register/2, set_alarm/1,
@ -239,7 +241,7 @@ handle_event({node_down, Node}, #alarms{alarmed_nodes = AN} = State) ->
error -> [] error -> []
end, end,
{ok, lists:foldr(fun(Source, AccState) -> {ok, lists:foldr(fun(Source, AccState) ->
rabbit_log:warning("~ts resource limit alarm cleared for dead node ~tp", ?LOG_WARNING("~ts resource limit alarm cleared for dead node ~tp",
[Source, Node]), [Source, Node]),
maybe_alert(fun dict_unappend/3, Node, Source, false, AccState) maybe_alert(fun dict_unappend/3, Node, Source, false, AccState)
end, State, AlarmsForDeadNode)}; end, State, AlarmsForDeadNode)};
@ -291,7 +293,7 @@ maybe_alert(UpdateFun, Node, Source, WasAlertAdded,
StillHasAlerts = lists:any(fun ({_Node, NodeAlerts}) -> lists:member(Source, NodeAlerts) end, dict:to_list(AN1)), StillHasAlerts = lists:any(fun ({_Node, NodeAlerts}) -> lists:member(Source, NodeAlerts) end, dict:to_list(AN1)),
case StillHasAlerts of case StillHasAlerts of
true -> ok; true -> ok;
false -> rabbit_log:warning("~ts resource limit alarm cleared across the cluster", [Source]) false -> ?LOG_WARNING("~ts resource limit alarm cleared across the cluster", [Source])
end, end,
Alert = {WasAlertAdded, StillHasAlerts, Node}, Alert = {WasAlertAdded, StillHasAlerts, Node},
case node() of case node() of
@ -327,7 +329,7 @@ internal_register(Pid, {M, F, A} = AlertMFA,
State#alarms{alertees = NewAlertees}. State#alarms{alertees = NewAlertees}.
handle_set_resource_alarm(Source, Node, State) -> handle_set_resource_alarm(Source, Node, State) ->
rabbit_log:warning( ?LOG_WARNING(
"~ts resource limit alarm set on node ~tp.~n~n" "~ts resource limit alarm set on node ~tp.~n~n"
"**********************************************************~n" "**********************************************************~n"
"*** Publishers will be blocked until this alarm clears ***~n" "*** Publishers will be blocked until this alarm clears ***~n"
@ -336,26 +338,26 @@ handle_set_resource_alarm(Source, Node, State) ->
{ok, maybe_alert(fun dict_append/3, Node, Source, true, State)}. {ok, maybe_alert(fun dict_append/3, Node, Source, true, State)}.
handle_set_alarm({file_descriptor_limit, []}, State) -> handle_set_alarm({file_descriptor_limit, []}, State) ->
rabbit_log:warning( ?LOG_WARNING(
"file descriptor limit alarm set.~n~n" "file descriptor limit alarm set.~n~n"
"********************************************************************~n" "********************************************************************~n"
"*** New connections will not be accepted until this alarm clears ***~n" "*** New connections will not be accepted until this alarm clears ***~n"
"********************************************************************~n"), "********************************************************************~n"),
{ok, State}; {ok, State};
handle_set_alarm(Alarm, State) -> handle_set_alarm(Alarm, State) ->
rabbit_log:warning("alarm '~tp' set", [Alarm]), ?LOG_WARNING("alarm '~tp' set", [Alarm]),
{ok, State}. {ok, State}.
handle_clear_resource_alarm(Source, Node, State) -> handle_clear_resource_alarm(Source, Node, State) ->
rabbit_log:warning("~ts resource limit alarm cleared on node ~tp", ?LOG_WARNING("~ts resource limit alarm cleared on node ~tp",
[Source, Node]), [Source, Node]),
{ok, maybe_alert(fun dict_unappend/3, Node, Source, false, State)}. {ok, maybe_alert(fun dict_unappend/3, Node, Source, false, State)}.
handle_clear_alarm(file_descriptor_limit, State) -> handle_clear_alarm(file_descriptor_limit, State) ->
rabbit_log:warning("file descriptor limit alarm cleared~n"), ?LOG_WARNING("file descriptor limit alarm cleared~n"),
{ok, State}; {ok, State};
handle_clear_alarm(Alarm, State) -> handle_clear_alarm(Alarm, State) ->
rabbit_log:warning("alarm '~tp' cleared", [Alarm]), ?LOG_WARNING("alarm '~tp' cleared", [Alarm]),
{ok, State}. {ok, State}.
is_node_alarmed(Source, Node, #alarms{alarmed_nodes = AN}) -> is_node_alarmed(Source, Node, #alarms{alarmed_nodes = AN}) ->

View File

@ -2,6 +2,7 @@
-include("rabbit_amqp.hrl"). -include("rabbit_amqp.hrl").
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
-export([handle_request/5]). -export([handle_request/5]).
@ -49,7 +50,7 @@ handle_request(Request, Vhost, User, ConnectionPid, PermCaches0) ->
ConnectionPid, ConnectionPid,
PermCaches0) PermCaches0)
catch throw:{?MODULE, StatusCode0, Explanation} -> catch throw:{?MODULE, StatusCode0, Explanation} ->
rabbit_log:warning("request ~ts ~ts failed: ~ts", ?LOG_WARNING("request ~ts ~ts failed: ~ts",
[HttpMethod, HttpRequestTarget, Explanation]), [HttpMethod, HttpRequestTarget, Explanation]),
{StatusCode0, {utf8, Explanation}, PermCaches0} {StatusCode0, {utf8, Explanation}, PermCaches0}
end, end,

View File

@ -82,6 +82,7 @@
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("stdlib/include/qlc.hrl"). -include_lib("stdlib/include/qlc.hrl").
-include("amqqueue.hrl"). -include("amqqueue.hrl").
-include_lib("kernel/include/logger.hrl").
-define(INTEGER_ARG_TYPES, [byte, short, signedint, long, -define(INTEGER_ARG_TYPES, [byte, short, signedint, long,
unsignedbyte, unsignedshort, unsignedint]). unsignedbyte, unsignedshort, unsignedint]).
@ -425,7 +426,7 @@ rebalance(Type, VhostSpec, QueueSpec) ->
maybe_rebalance(get_rebalance_lock(self()), Type, VhostSpec, QueueSpec). maybe_rebalance(get_rebalance_lock(self()), Type, VhostSpec, QueueSpec).
maybe_rebalance({true, Id}, Type, VhostSpec, QueueSpec) -> maybe_rebalance({true, Id}, Type, VhostSpec, QueueSpec) ->
rabbit_log:info("Starting queue rebalance operation: '~ts' for vhosts matching '~ts' and queues matching '~ts'", ?LOG_INFO("Starting queue rebalance operation: '~ts' for vhosts matching '~ts' and queues matching '~ts'",
[Type, VhostSpec, QueueSpec]), [Type, VhostSpec, QueueSpec]),
Running = rabbit_maintenance:filter_out_drained_nodes_consistent_read(rabbit_nodes:list_running()), Running = rabbit_maintenance:filter_out_drained_nodes_consistent_read(rabbit_nodes:list_running()),
NumRunning = length(Running), NumRunning = length(Running),
@ -443,10 +444,10 @@ maybe_rebalance({true, Id}, Type, VhostSpec, QueueSpec) ->
MaxQueuesDesired = (NumToRebalance div NumRunning) + Rem, MaxQueuesDesired = (NumToRebalance div NumRunning) + Rem,
Result = iterative_rebalance(ByNode, MaxQueuesDesired), Result = iterative_rebalance(ByNode, MaxQueuesDesired),
global:del_lock(Id), global:del_lock(Id),
rabbit_log:info("Finished queue rebalance operation"), ?LOG_INFO("Finished queue rebalance operation"),
Result; Result;
maybe_rebalance(false, _Type, _VhostSpec, _QueueSpec) -> maybe_rebalance(false, _Type, _VhostSpec, _QueueSpec) ->
rabbit_log:warning("Queue rebalance operation is in progress, please wait."), ?LOG_WARNING("Queue rebalance operation is in progress, please wait."),
{error, rebalance_in_progress}. {error, rebalance_in_progress}.
%% Stream queues don't yet support rebalance %% Stream queues don't yet support rebalance
@ -476,7 +477,7 @@ is_match(Subj, RegEx) ->
iterative_rebalance(ByNode, MaxQueuesDesired) -> iterative_rebalance(ByNode, MaxQueuesDesired) ->
case maybe_migrate(ByNode, MaxQueuesDesired) of case maybe_migrate(ByNode, MaxQueuesDesired) of
{ok, Summary} -> {ok, Summary} ->
rabbit_log:info("All queue leaders are balanced"), ?LOG_INFO("All queue leaders are balanced"),
{ok, Summary}; {ok, Summary};
{migrated, Other} -> {migrated, Other} ->
iterative_rebalance(Other, MaxQueuesDesired); iterative_rebalance(Other, MaxQueuesDesired);
@ -507,23 +508,23 @@ maybe_migrate(ByNode, MaxQueuesDesired, [N | Nodes]) ->
{not_migrated, update_not_migrated_queue(N, Queue, Queues, ByNode)}; {not_migrated, update_not_migrated_queue(N, Queue, Queues, ByNode)};
_ -> _ ->
[{Length, Destination} | _] = sort_by_number_of_queues(Candidates, ByNode), [{Length, Destination} | _] = sort_by_number_of_queues(Candidates, ByNode),
rabbit_log:info("Migrating queue ~tp from node ~tp with ~tp queues to node ~tp with ~tp queues", ?LOG_INFO("Migrating queue ~tp from node ~tp with ~tp queues to node ~tp with ~tp queues",
[Name, N, length(All), Destination, Length]), [Name, N, length(All), Destination, Length]),
case Module:transfer_leadership(Q, Destination) of case Module:transfer_leadership(Q, Destination) of
{migrated, NewNode} -> {migrated, NewNode} ->
rabbit_log:info("Queue ~tp migrated to ~tp", [Name, NewNode]), ?LOG_INFO("Queue ~tp migrated to ~tp", [Name, NewNode]),
{migrated, update_migrated_queue(NewNode, N, Queue, Queues, ByNode)}; {migrated, update_migrated_queue(NewNode, N, Queue, Queues, ByNode)};
{not_migrated, Reason} -> {not_migrated, Reason} ->
rabbit_log:warning("Error migrating queue ~tp: ~tp", [Name, Reason]), ?LOG_WARNING("Error migrating queue ~tp: ~tp", [Name, Reason]),
{not_migrated, update_not_migrated_queue(N, Queue, Queues, ByNode)} {not_migrated, update_not_migrated_queue(N, Queue, Queues, ByNode)}
end end
end; end;
[{_, _, true} | _] = All when length(All) > MaxQueuesDesired -> [{_, _, true} | _] = All when length(All) > MaxQueuesDesired ->
rabbit_log:warning("Node ~tp contains ~tp queues, but all have already migrated. " ?LOG_WARNING("Node ~tp contains ~tp queues, but all have already migrated. "
"Do nothing", [N, length(All)]), "Do nothing", [N, length(All)]),
maybe_migrate(ByNode, MaxQueuesDesired, Nodes); maybe_migrate(ByNode, MaxQueuesDesired, Nodes);
All -> All ->
rabbit_log:debug("Node ~tp only contains ~tp queues, do nothing", ?LOG_DEBUG("Node ~tp only contains ~tp queues, do nothing",
[N, length(All)]), [N, length(All)]),
maybe_migrate(ByNode, MaxQueuesDesired, Nodes) maybe_migrate(ByNode, MaxQueuesDesired, Nodes)
end. end.
@ -611,7 +612,7 @@ retry_wait(Q, F, E, RetriesLeft) ->
%% The old check would have crashed here, %% The old check would have crashed here,
%% instead, log it and run the exit fun. absent & alive is weird, %% instead, log it and run the exit fun. absent & alive is weird,
%% but better than crashing with badmatch,true %% but better than crashing with badmatch,true
rabbit_log:debug("Unexpected alive queue process ~tp", [QPid]), ?LOG_DEBUG("Unexpected alive queue process ~tp", [QPid]),
E({absent, Q, alive}); E({absent, Q, alive});
false -> false ->
ok % Expected result ok % Expected result
@ -1881,7 +1882,7 @@ internal_delete(Queue, ActingUser, Reason) ->
%% TODO this is used by `rabbit_mnesia:remove_node_if_mnesia_running` %% TODO this is used by `rabbit_mnesia:remove_node_if_mnesia_running`
%% Does it make any sense once mnesia is not used/removed? %% Does it make any sense once mnesia is not used/removed?
forget_all_durable(Node) -> forget_all_durable(Node) ->
rabbit_log:info("Will remove all classic queues from node ~ts. The node is likely being removed from the cluster.", [Node]), ?LOG_INFO("Will remove all classic queues from node ~ts. The node is likely being removed from the cluster.", [Node]),
UpdateFun = fun(Q) -> UpdateFun = fun(Q) ->
forget_node_for_queue(Q) forget_node_for_queue(Q)
end, end,
@ -1949,7 +1950,7 @@ on_node_down(Node) ->
%% `rabbit_khepri:init/0': we also try this deletion when the node %% `rabbit_khepri:init/0': we also try this deletion when the node
%% restarts - a time that the cluster is very likely to have a %% restarts - a time that the cluster is very likely to have a
%% majority - to ensure these records are deleted. %% majority - to ensure these records are deleted.
rabbit_log:warning("transient queues for node '~ts' could not be " ?LOG_WARNING("transient queues for node '~ts' could not be "
"deleted because of a timeout. These queues " "deleted because of a timeout. These queues "
"will be removed when node '~ts' restarts or " "will be removed when node '~ts' restarts or "
"is removed from the cluster.", [Node, Node]), "is removed from the cluster.", [Node, Node]),
@ -1970,7 +1971,7 @@ delete_transient_queues_on_node(Node) ->
{QueueNames, Deletions} when is_list(QueueNames) -> {QueueNames, Deletions} when is_list(QueueNames) ->
case length(QueueNames) of case length(QueueNames) of
0 -> ok; 0 -> ok;
N -> rabbit_log:info("~b transient queues from node '~ts' " N -> ?LOG_INFO("~b transient queues from node '~ts' "
"deleted in ~fs", "deleted in ~fs",
[N, Node, Time / 1_000_000]) [N, Node, Time / 1_000_000])
end, end,

View File

@ -8,6 +8,7 @@
-module(rabbit_amqqueue_process). -module(rabbit_amqqueue_process).
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include("amqqueue.hrl"). -include("amqqueue.hrl").
-include_lib("kernel/include/logger.hrl").
-behaviour(gen_server2). -behaviour(gen_server2).
@ -150,7 +151,7 @@ init({Q, Marker}) ->
%% restart %% restart
QueueName = amqqueue:get_name(Q), QueueName = amqqueue:get_name(Q),
{ok, Q1} = rabbit_amqqueue:lookup(QueueName), {ok, Q1} = rabbit_amqqueue:lookup(QueueName),
rabbit_log:error("Restarting crashed ~ts.", [rabbit_misc:rs(QueueName)]), ?LOG_ERROR("Restarting crashed ~ts.", [rabbit_misc:rs(QueueName)]),
gen_server2:cast(self(), init), gen_server2:cast(self(), init),
init(Q1) init(Q1)
end; end;
@ -1609,7 +1610,7 @@ handle_cast({force_event_refresh, Ref},
rabbit_event:notify(queue_created, queue_created_infos(State), Ref), rabbit_event:notify(queue_created, queue_created_infos(State), Ref),
QName = qname(State), QName = qname(State),
AllConsumers = rabbit_queue_consumers:all(Consumers), AllConsumers = rabbit_queue_consumers:all(Consumers),
rabbit_log:debug("Queue ~ts forced to re-emit events, consumers: ~tp", [rabbit_misc:rs(QName), AllConsumers]), ?LOG_DEBUG("Queue ~ts forced to re-emit events, consumers: ~tp", [rabbit_misc:rs(QName), AllConsumers]),
[emit_consumer_created( [emit_consumer_created(
Ch, CTag, ActiveOrExclusive, AckRequired, QName, Prefetch, Ch, CTag, ActiveOrExclusive, AckRequired, QName, Prefetch,
Args, Ref, ActingUser) || Args, Ref, ActingUser) ||

View File

@ -16,6 +16,7 @@
-export([init/1]). -export([init/1]).
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
-define(SERVER, ?MODULE). -define(SERVER, ?MODULE).
@ -74,7 +75,7 @@ start_for_vhost(VHost) ->
%% we can get here if a vhost is added and removed concurrently %% we can get here if a vhost is added and removed concurrently
%% e.g. some integration tests do it %% e.g. some integration tests do it
{error, {no_such_vhost, VHost}} -> {error, {no_such_vhost, VHost}} ->
rabbit_log:error("Failed to start a queue process supervisor for vhost ~ts: vhost no longer exists!", ?LOG_ERROR("Failed to start a queue process supervisor for vhost ~ts: vhost no longer exists!",
[VHost]), [VHost]),
{error, {no_such_vhost, VHost}} {error, {no_such_vhost, VHost}}
end. end.
@ -87,7 +88,7 @@ stop_for_vhost(VHost) ->
ok = supervisor:delete_child(VHostSup, rabbit_amqqueue_sup_sup); ok = supervisor:delete_child(VHostSup, rabbit_amqqueue_sup_sup);
%% see start/1 %% see start/1
{error, {no_such_vhost, VHost}} -> {error, {no_such_vhost, VHost}} ->
rabbit_log:error("Failed to stop a queue process supervisor for vhost ~ts: vhost no longer exists!", ?LOG_ERROR("Failed to stop a queue process supervisor for vhost ~ts: vhost no longer exists!",
[VHost]), [VHost]),
ok ok
end. end.

View File

@ -7,6 +7,7 @@
-module(rabbit_auth_backend_internal). -module(rabbit_auth_backend_internal).
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
-behaviour(rabbit_authn_backend). -behaviour(rabbit_authn_backend).
-behaviour(rabbit_authz_backend). -behaviour(rabbit_authz_backend).
@ -204,7 +205,7 @@ validate_and_alternate_credentials(Username, Password, ActingUser, Fun) ->
ok -> ok ->
Fun(Username, Password, ActingUser); Fun(Username, Password, ActingUser);
{error, Err} -> {error, Err} ->
rabbit_log:error("Credential validation for user '~ts' failed!", [Username]), ?LOG_ERROR("Credential validation for user '~ts' failed!", [Username]),
{error, Err} {error, Err}
end. end.
@ -238,7 +239,7 @@ add_user_sans_validation(Limits, Tags) ->
end. end.
add_user_sans_validation(Username, Password, ActingUser, Limits, Tags) -> add_user_sans_validation(Username, Password, ActingUser, Limits, Tags) ->
rabbit_log:debug("Asked to create a new user '~ts', password length in bytes: ~tp", [Username, bit_size(Password)]), ?LOG_DEBUG("Asked to create a new user '~ts', password length in bytes: ~tp", [Username, bit_size(Password)]),
%% hash_password will pick the hashing function configured for us %% hash_password will pick the hashing function configured for us
%% but we also need to store a hint as part of the record, so we %% but we also need to store a hint as part of the record, so we
%% retrieve it here one more time %% retrieve it here one more time
@ -254,7 +255,7 @@ add_user_sans_validation(Username, Password, ActingUser, Limits, Tags) ->
add_user_sans_validation_in(Username, User, ConvertedTags, Limits, ActingUser). add_user_sans_validation_in(Username, User, ConvertedTags, Limits, ActingUser).
add_user_sans_validation(Username, PasswordHash, HashingMod, Tags, Limits, ActingUser) -> add_user_sans_validation(Username, PasswordHash, HashingMod, Tags, Limits, ActingUser) ->
rabbit_log:debug("Asked to create a new user '~ts' with password hash", [Username]), ?LOG_DEBUG("Asked to create a new user '~ts' with password hash", [Username]),
ConvertedTags = [rabbit_data_coercion:to_atom(I) || I <- Tags], ConvertedTags = [rabbit_data_coercion:to_atom(I) || I <- Tags],
User0 = internal_user:create_user(Username, PasswordHash, HashingMod), User0 = internal_user:create_user(Username, PasswordHash, HashingMod),
User1 = internal_user:set_tags( User1 = internal_user:set_tags(
@ -269,7 +270,7 @@ add_user_sans_validation(Username, PasswordHash, HashingMod, Tags, Limits, Actin
add_user_sans_validation_in(Username, User, ConvertedTags, Limits, ActingUser) -> add_user_sans_validation_in(Username, User, ConvertedTags, Limits, ActingUser) ->
try try
R = rabbit_db_user:create(User), R = rabbit_db_user:create(User),
rabbit_log:info("Created user '~ts'", [Username]), ?LOG_INFO("Created user '~ts'", [Username]),
rabbit_event:notify(user_created, [{name, Username}, rabbit_event:notify(user_created, [{name, Username},
{user_who_performed_action, ActingUser}]), {user_who_performed_action, ActingUser}]),
case ConvertedTags of case ConvertedTags of
@ -283,21 +284,21 @@ add_user_sans_validation_in(Username, User, ConvertedTags, Limits, ActingUser) -
R R
catch catch
throw:{error, {user_already_exists, _}} = Error -> throw:{error, {user_already_exists, _}} = Error ->
rabbit_log:warning("Failed to add user '~ts': the user already exists", [Username]), ?LOG_WARNING("Failed to add user '~ts': the user already exists", [Username]),
throw(Error); throw(Error);
Class:Error:Stacktrace -> Class:Error:Stacktrace ->
rabbit_log:warning("Failed to add user '~ts': ~tp", [Username, Error]), ?LOG_WARNING("Failed to add user '~ts': ~tp", [Username, Error]),
erlang:raise(Class, Error, Stacktrace) erlang:raise(Class, Error, Stacktrace)
end . end .
-spec delete_user(rabbit_types:username(), rabbit_types:username()) -> 'ok'. -spec delete_user(rabbit_types:username(), rabbit_types:username()) -> 'ok'.
delete_user(Username, ActingUser) -> delete_user(Username, ActingUser) ->
rabbit_log:debug("Asked to delete user '~ts'", [Username]), ?LOG_DEBUG("Asked to delete user '~ts'", [Username]),
try try
case rabbit_db_user:delete(Username) of case rabbit_db_user:delete(Username) of
true -> true ->
rabbit_log:info("Deleted user '~ts'", [Username]), ?LOG_INFO("Deleted user '~ts'", [Username]),
rabbit_event:notify(user_deleted, rabbit_event:notify(user_deleted,
[{name, Username}, [{name, Username},
{user_who_performed_action, ActingUser}]), {user_who_performed_action, ActingUser}]),
@ -305,12 +306,12 @@ delete_user(Username, ActingUser) ->
false -> false ->
ok; ok;
Error0 -> Error0 ->
rabbit_log:info("Failed to delete user '~ts': ~tp", [Username, Error0]), ?LOG_INFO("Failed to delete user '~ts': ~tp", [Username, Error0]),
throw(Error0) throw(Error0)
end end
catch catch
Class:Error:Stacktrace -> Class:Error:Stacktrace ->
rabbit_log:warning("Failed to delete user '~ts': ~tp", [Username, Error]), ?LOG_WARNING("Failed to delete user '~ts': ~tp", [Username, Error]),
erlang:raise(Class, Error, Stacktrace) erlang:raise(Class, Error, Stacktrace)
end . end .
@ -342,23 +343,23 @@ change_password(Username, Password, ActingUser) ->
change_password_sans_validation(Username, Password, ActingUser) -> change_password_sans_validation(Username, Password, ActingUser) ->
try try
rabbit_log:debug("Asked to change password of user '~ts', new password length in bytes: ~tp", [Username, bit_size(Password)]), ?LOG_DEBUG("Asked to change password of user '~ts', new password length in bytes: ~tp", [Username, bit_size(Password)]),
HashingAlgorithm = rabbit_password:hashing_mod(), HashingAlgorithm = rabbit_password:hashing_mod(),
R = change_password_hash(Username, R = change_password_hash(Username,
hash_password(rabbit_password:hashing_mod(), hash_password(rabbit_password:hashing_mod(),
Password), Password),
HashingAlgorithm), HashingAlgorithm),
rabbit_log:info("Successfully changed password for user '~ts'", [Username]), ?LOG_INFO("Successfully changed password for user '~ts'", [Username]),
rabbit_event:notify(user_password_changed, rabbit_event:notify(user_password_changed,
[{name, Username}, [{name, Username},
{user_who_performed_action, ActingUser}]), {user_who_performed_action, ActingUser}]),
R R
catch catch
throw:{error, {no_such_user, _}} = Error -> throw:{error, {no_such_user, _}} = Error ->
rabbit_log:warning("Failed to change password for user '~ts': the user does not exist", [Username]), ?LOG_WARNING("Failed to change password for user '~ts': the user does not exist", [Username]),
throw(Error); throw(Error);
Class:Error:Stacktrace -> Class:Error:Stacktrace ->
rabbit_log:warning("Failed to change password for user '~ts': ~tp", [Username, Error]), ?LOG_WARNING("Failed to change password for user '~ts': ~tp", [Username, Error]),
erlang:raise(Class, Error, Stacktrace) erlang:raise(Class, Error, Stacktrace)
end. end.
@ -369,10 +370,10 @@ update_user(Username, Password, Tags, Limits, ActingUser) ->
update_user_sans_validation(Tags, Limits) -> update_user_sans_validation(Tags, Limits) ->
fun(Username, Password, ActingUser) -> fun(Username, Password, ActingUser) ->
try try
rabbit_log:debug("Asked to change password of user '~ts', new password length in bytes: ~tp", [Username, bit_size(Password)]), ?LOG_DEBUG("Asked to change password of user '~ts', new password length in bytes: ~tp", [Username, bit_size(Password)]),
HashingAlgorithm = rabbit_password:hashing_mod(), HashingAlgorithm = rabbit_password:hashing_mod(),
rabbit_log:debug("Asked to set user tags for user '~ts' to ~tp", [Username, Tags]), ?LOG_DEBUG("Asked to set user tags for user '~ts' to ~tp", [Username, Tags]),
ConvertedTags = [rabbit_data_coercion:to_atom(I) || I <- Tags], ConvertedTags = [rabbit_data_coercion:to_atom(I) || I <- Tags],
R = update_user_with_hash(Username, R = update_user_with_hash(Username,
@ -381,7 +382,7 @@ update_user_sans_validation(Tags, Limits) ->
HashingAlgorithm, HashingAlgorithm,
ConvertedTags, ConvertedTags,
Limits), Limits),
rabbit_log:info("Successfully changed password for user '~ts'", [Username]), ?LOG_INFO("Successfully changed password for user '~ts'", [Username]),
rabbit_event:notify(user_password_changed, rabbit_event:notify(user_password_changed,
[{name, Username}, [{name, Username},
{user_who_performed_action, ActingUser}]), {user_who_performed_action, ActingUser}]),
@ -390,10 +391,10 @@ update_user_sans_validation(Tags, Limits) ->
R R
catch catch
throw:{error, {no_such_user, _}} = Error -> throw:{error, {no_such_user, _}} = Error ->
rabbit_log:warning("Failed to change password for user '~ts': the user does not exist", [Username]), ?LOG_WARNING("Failed to change password for user '~ts': the user does not exist", [Username]),
throw(Error); throw(Error);
Class:Error:Stacktrace -> Class:Error:Stacktrace ->
rabbit_log:warning("Failed to change password for user '~ts': ~tp", [Username, Error]), ?LOG_WARNING("Failed to change password for user '~ts': ~tp", [Username, Error]),
erlang:raise(Class, Error, Stacktrace) erlang:raise(Class, Error, Stacktrace)
end end
end. end.
@ -401,7 +402,7 @@ update_user_sans_validation(Tags, Limits) ->
-spec clear_password(rabbit_types:username(), rabbit_types:username()) -> 'ok'. -spec clear_password(rabbit_types:username(), rabbit_types:username()) -> 'ok'.
clear_password(Username, ActingUser) -> clear_password(Username, ActingUser) ->
rabbit_log:info("Clearing password for user '~ts'", [Username]), ?LOG_INFO("Clearing password for user '~ts'", [Username]),
R = change_password_hash(Username, <<"">>), R = change_password_hash(Username, <<"">>),
rabbit_event:notify(user_password_cleared, rabbit_event:notify(user_password_cleared,
[{name, Username}, [{name, Username},
@ -443,7 +444,7 @@ update_user_with_hash(Username, PasswordHash, HashingAlgorithm, ConvertedTags, L
set_tags(Username, Tags, ActingUser) -> set_tags(Username, Tags, ActingUser) ->
ConvertedTags = [rabbit_data_coercion:to_atom(I) || I <- Tags], ConvertedTags = [rabbit_data_coercion:to_atom(I) || I <- Tags],
rabbit_log:debug("Asked to set user tags for user '~ts' to ~tp", [Username, ConvertedTags]), ?LOG_DEBUG("Asked to set user tags for user '~ts' to ~tp", [Username, ConvertedTags]),
try try
R = rabbit_db_user:update(Username, fun(User) -> R = rabbit_db_user:update(Username, fun(User) ->
internal_user:set_tags(User, ConvertedTags) internal_user:set_tags(User, ConvertedTags)
@ -452,15 +453,15 @@ set_tags(Username, Tags, ActingUser) ->
R R
catch catch
throw:{error, {no_such_user, _}} = Error -> throw:{error, {no_such_user, _}} = Error ->
rabbit_log:warning("Failed to set tags for user '~ts': the user does not exist", [Username]), ?LOG_WARNING("Failed to set tags for user '~ts': the user does not exist", [Username]),
throw(Error); throw(Error);
Class:Error:Stacktrace -> Class:Error:Stacktrace ->
rabbit_log:warning("Failed to set tags for user '~ts': ~tp", [Username, Error]), ?LOG_WARNING("Failed to set tags for user '~ts': ~tp", [Username, Error]),
erlang:raise(Class, Error, Stacktrace) erlang:raise(Class, Error, Stacktrace)
end . end .
notify_user_tags_set(Username, ConvertedTags, ActingUser) -> notify_user_tags_set(Username, ConvertedTags, ActingUser) ->
rabbit_log:info("Successfully set user tags for user '~ts' to ~tp", [Username, ConvertedTags]), ?LOG_INFO("Successfully set user tags for user '~ts' to ~tp", [Username, ConvertedTags]),
rabbit_event:notify(user_tags_set, [{name, Username}, {tags, ConvertedTags}, rabbit_event:notify(user_tags_set, [{name, Username}, {tags, ConvertedTags},
{user_who_performed_action, ActingUser}]). {user_who_performed_action, ActingUser}]).
@ -470,7 +471,7 @@ notify_user_tags_set(Username, ConvertedTags, ActingUser) ->
'ok'. 'ok'.
set_permissions(Username, VirtualHost, ConfigurePerm, WritePerm, ReadPerm, ActingUser) -> set_permissions(Username, VirtualHost, ConfigurePerm, WritePerm, ReadPerm, ActingUser) ->
rabbit_log:debug("Asked to set permissions for user " ?LOG_DEBUG("Asked to set permissions for user "
"'~ts' in virtual host '~ts' to '~ts', '~ts', '~ts'", "'~ts' in virtual host '~ts' to '~ts', '~ts', '~ts'",
[Username, VirtualHost, ConfigurePerm, WritePerm, ReadPerm]), [Username, VirtualHost, ConfigurePerm, WritePerm, ReadPerm]),
_ = lists:map( _ = lists:map(
@ -479,7 +480,7 @@ set_permissions(Username, VirtualHost, ConfigurePerm, WritePerm, ReadPerm, Actin
case re:compile(Regexp) of case re:compile(Regexp) of
{ok, _} -> ok; {ok, _} -> ok;
{error, Reason} -> {error, Reason} ->
rabbit_log:warning("Failed to set permissions for user '~ts' in virtual host '~ts': " ?LOG_WARNING("Failed to set permissions for user '~ts' in virtual host '~ts': "
"regular expression '~ts' is invalid", "regular expression '~ts' is invalid",
[Username, VirtualHost, RegexpBin]), [Username, VirtualHost, RegexpBin]),
throw({error, {invalid_regexp, Regexp, Reason}}) throw({error, {invalid_regexp, Regexp, Reason}})
@ -495,7 +496,7 @@ set_permissions(Username, VirtualHost, ConfigurePerm, WritePerm, ReadPerm, Actin
write = WritePerm, write = WritePerm,
read = ReadPerm}}, read = ReadPerm}},
R = rabbit_db_user:set_user_permissions(UserPermission), R = rabbit_db_user:set_user_permissions(UserPermission),
rabbit_log:info("Successfully set permissions for user " ?LOG_INFO("Successfully set permissions for user "
"'~ts' in virtual host '~ts' to '~ts', '~ts', '~ts'", "'~ts' in virtual host '~ts' to '~ts', '~ts', '~ts'",
[Username, VirtualHost, ConfigurePerm, WritePerm, ReadPerm]), [Username, VirtualHost, ConfigurePerm, WritePerm, ReadPerm]),
rabbit_event:notify(permission_created, [{user, Username}, rabbit_event:notify(permission_created, [{user, Username},
@ -507,15 +508,15 @@ set_permissions(Username, VirtualHost, ConfigurePerm, WritePerm, ReadPerm, Actin
R R
catch catch
throw:{error, {no_such_vhost, _}} = Error -> throw:{error, {no_such_vhost, _}} = Error ->
rabbit_log:warning("Failed to set permissions for user '~ts': virtual host '~ts' does not exist", ?LOG_WARNING("Failed to set permissions for user '~ts': virtual host '~ts' does not exist",
[Username, VirtualHost]), [Username, VirtualHost]),
throw(Error); throw(Error);
throw:{error, {no_such_user, _}} = Error -> throw:{error, {no_such_user, _}} = Error ->
rabbit_log:warning("Failed to set permissions for user '~ts': the user does not exist", ?LOG_WARNING("Failed to set permissions for user '~ts': the user does not exist",
[Username]), [Username]),
throw(Error); throw(Error);
Class:Error:Stacktrace -> Class:Error:Stacktrace ->
rabbit_log:warning("Failed to set permissions for user '~ts' in virtual host '~ts': ~tp", ?LOG_WARNING("Failed to set permissions for user '~ts' in virtual host '~ts': ~tp",
[Username, VirtualHost, Error]), [Username, VirtualHost, Error]),
erlang:raise(Class, Error, Stacktrace) erlang:raise(Class, Error, Stacktrace)
end. end.
@ -524,11 +525,11 @@ set_permissions(Username, VirtualHost, ConfigurePerm, WritePerm, ReadPerm, Actin
(rabbit_types:username(), rabbit_types:vhost(), rabbit_types:username()) -> 'ok'. (rabbit_types:username(), rabbit_types:vhost(), rabbit_types:username()) -> 'ok'.
clear_permissions(Username, VirtualHost, ActingUser) -> clear_permissions(Username, VirtualHost, ActingUser) ->
rabbit_log:debug("Asked to clear permissions for user '~ts' in virtual host '~ts'", ?LOG_DEBUG("Asked to clear permissions for user '~ts' in virtual host '~ts'",
[Username, VirtualHost]), [Username, VirtualHost]),
try try
R = rabbit_db_user:clear_user_permissions(Username, VirtualHost), R = rabbit_db_user:clear_user_permissions(Username, VirtualHost),
rabbit_log:info("Successfully cleared permissions for user '~ts' in virtual host '~ts'", ?LOG_INFO("Successfully cleared permissions for user '~ts' in virtual host '~ts'",
[Username, VirtualHost]), [Username, VirtualHost]),
rabbit_event:notify(permission_deleted, [{user, Username}, rabbit_event:notify(permission_deleted, [{user, Username},
{vhost, VirtualHost}, {vhost, VirtualHost},
@ -536,7 +537,7 @@ clear_permissions(Username, VirtualHost, ActingUser) ->
R R
catch catch
Class:Error:Stacktrace -> Class:Error:Stacktrace ->
rabbit_log:warning("Failed to clear permissions for user '~ts' in virtual host '~ts': ~tp", ?LOG_WARNING("Failed to clear permissions for user '~ts' in virtual host '~ts': ~tp",
[Username, VirtualHost, Error]), [Username, VirtualHost, Error]),
erlang:raise(Class, Error, Stacktrace) erlang:raise(Class, Error, Stacktrace)
end. end.
@ -577,7 +578,7 @@ set_permissions_globally(Username, ConfigurePerm, WritePerm, ReadPerm, ActingUse
ok. ok.
set_topic_permissions(Username, VirtualHost, Exchange, WritePerm, ReadPerm, ActingUser) -> set_topic_permissions(Username, VirtualHost, Exchange, WritePerm, ReadPerm, ActingUser) ->
rabbit_log:debug("Asked to set topic permissions on exchange '~ts' for " ?LOG_DEBUG("Asked to set topic permissions on exchange '~ts' for "
"user '~ts' in virtual host '~ts' to '~ts', '~ts'", "user '~ts' in virtual host '~ts' to '~ts', '~ts'",
[Exchange, Username, VirtualHost, WritePerm, ReadPerm]), [Exchange, Username, VirtualHost, WritePerm, ReadPerm]),
WritePermRegex = rabbit_data_coercion:to_binary(WritePerm), WritePermRegex = rabbit_data_coercion:to_binary(WritePerm),
@ -587,7 +588,7 @@ set_topic_permissions(Username, VirtualHost, Exchange, WritePerm, ReadPerm, Acti
case re:compile(RegexpBin) of case re:compile(RegexpBin) of
{ok, _} -> ok; {ok, _} -> ok;
{error, Reason} -> {error, Reason} ->
rabbit_log:warning("Failed to set topic permissions on exchange '~ts' for user " ?LOG_WARNING("Failed to set topic permissions on exchange '~ts' for user "
"'~ts' in virtual host '~ts': regular expression '~ts' is invalid", "'~ts' in virtual host '~ts': regular expression '~ts' is invalid",
[Exchange, Username, VirtualHost, RegexpBin]), [Exchange, Username, VirtualHost, RegexpBin]),
throw({error, {invalid_regexp, RegexpBin, Reason}}) throw({error, {invalid_regexp, RegexpBin, Reason}})
@ -607,7 +608,7 @@ set_topic_permissions(Username, VirtualHost, Exchange, WritePerm, ReadPerm, Acti
} }
}, },
R = rabbit_db_user:set_topic_permissions(TopicPermission), R = rabbit_db_user:set_topic_permissions(TopicPermission),
rabbit_log:info("Successfully set topic permissions on exchange '~ts' for " ?LOG_INFO("Successfully set topic permissions on exchange '~ts' for "
"user '~ts' in virtual host '~ts' to '~ts', '~ts'", "user '~ts' in virtual host '~ts' to '~ts', '~ts'",
[Exchange, Username, VirtualHost, WritePerm, ReadPerm]), [Exchange, Username, VirtualHost, WritePerm, ReadPerm]),
rabbit_event:notify(topic_permission_created, [ rabbit_event:notify(topic_permission_created, [
@ -620,25 +621,25 @@ set_topic_permissions(Username, VirtualHost, Exchange, WritePerm, ReadPerm, Acti
R R
catch catch
throw:{error, {no_such_vhost, _}} = Error -> throw:{error, {no_such_vhost, _}} = Error ->
rabbit_log:warning("Failed to set topic permissions on exchange '~ts' for user '~ts': virtual host '~ts' does not exist.", ?LOG_WARNING("Failed to set topic permissions on exchange '~ts' for user '~ts': virtual host '~ts' does not exist.",
[Exchange, Username, VirtualHost]), [Exchange, Username, VirtualHost]),
throw(Error); throw(Error);
throw:{error, {no_such_user, _}} = Error -> throw:{error, {no_such_user, _}} = Error ->
rabbit_log:warning("Failed to set topic permissions on exchange '~ts' for user '~ts': the user does not exist.", ?LOG_WARNING("Failed to set topic permissions on exchange '~ts' for user '~ts': the user does not exist.",
[Exchange, Username]), [Exchange, Username]),
throw(Error); throw(Error);
Class:Error:Stacktrace -> Class:Error:Stacktrace ->
rabbit_log:warning("Failed to set topic permissions on exchange '~ts' for user '~ts' in virtual host '~ts': ~tp.", ?LOG_WARNING("Failed to set topic permissions on exchange '~ts' for user '~ts' in virtual host '~ts': ~tp.",
[Exchange, Username, VirtualHost, Error]), [Exchange, Username, VirtualHost, Error]),
erlang:raise(Class, Error, Stacktrace) erlang:raise(Class, Error, Stacktrace)
end . end .
clear_topic_permissions(Username, VirtualHost, ActingUser) -> clear_topic_permissions(Username, VirtualHost, ActingUser) ->
rabbit_log:debug("Asked to clear topic permissions for user '~ts' in virtual host '~ts'", ?LOG_DEBUG("Asked to clear topic permissions for user '~ts' in virtual host '~ts'",
[Username, VirtualHost]), [Username, VirtualHost]),
try try
R = rabbit_db_user:clear_topic_permissions(Username, VirtualHost, '_'), R = rabbit_db_user:clear_topic_permissions(Username, VirtualHost, '_'),
rabbit_log:info("Successfully cleared topic permissions for user '~ts' in virtual host '~ts'", ?LOG_INFO("Successfully cleared topic permissions for user '~ts' in virtual host '~ts'",
[Username, VirtualHost]), [Username, VirtualHost]),
rabbit_event:notify(topic_permission_deleted, [{user, Username}, rabbit_event:notify(topic_permission_deleted, [{user, Username},
{vhost, VirtualHost}, {vhost, VirtualHost},
@ -646,18 +647,18 @@ clear_topic_permissions(Username, VirtualHost, ActingUser) ->
R R
catch catch
Class:Error:Stacktrace -> Class:Error:Stacktrace ->
rabbit_log:warning("Failed to clear topic permissions for user '~ts' in virtual host '~ts': ~tp", ?LOG_WARNING("Failed to clear topic permissions for user '~ts' in virtual host '~ts': ~tp",
[Username, VirtualHost, Error]), [Username, VirtualHost, Error]),
erlang:raise(Class, Error, Stacktrace) erlang:raise(Class, Error, Stacktrace)
end. end.
clear_topic_permissions(Username, VirtualHost, Exchange, ActingUser) -> clear_topic_permissions(Username, VirtualHost, Exchange, ActingUser) ->
rabbit_log:debug("Asked to clear topic permissions on exchange '~ts' for user '~ts' in virtual host '~ts'", ?LOG_DEBUG("Asked to clear topic permissions on exchange '~ts' for user '~ts' in virtual host '~ts'",
[Exchange, Username, VirtualHost]), [Exchange, Username, VirtualHost]),
try try
R = rabbit_db_user:clear_topic_permissions( R = rabbit_db_user:clear_topic_permissions(
Username, VirtualHost, Exchange), Username, VirtualHost, Exchange),
rabbit_log:info("Successfully cleared topic permissions on exchange '~ts' for user '~ts' in virtual host '~ts'", ?LOG_INFO("Successfully cleared topic permissions on exchange '~ts' for user '~ts' in virtual host '~ts'",
[Exchange, Username, VirtualHost]), [Exchange, Username, VirtualHost]),
rabbit_event:notify(topic_permission_deleted, [{user, Username}, rabbit_event:notify(topic_permission_deleted, [{user, Username},
{vhost, VirtualHost}, {vhost, VirtualHost},
@ -665,7 +666,7 @@ clear_topic_permissions(Username, VirtualHost, Exchange, ActingUser) ->
R R
catch catch
Class:Error:Stacktrace -> Class:Error:Stacktrace ->
rabbit_log:warning("Failed to clear topic permissions on exchange '~ts' for user '~ts' in virtual host '~ts': ~tp", ?LOG_WARNING("Failed to clear topic permissions on exchange '~ts' for user '~ts' in virtual host '~ts': ~tp",
[Exchange, Username, VirtualHost, Error]), [Exchange, Username, VirtualHost, Error]),
erlang:raise(Class, Error, Stacktrace) erlang:raise(Class, Error, Stacktrace)
end. end.

View File

@ -7,6 +7,9 @@
-module(rabbit_autoheal). -module(rabbit_autoheal).
-include_lib("kernel/include/logger.hrl").
-export([init/0, enabled/0, maybe_start/1, rabbit_down/2, node_down/2, -export([init/0, enabled/0, maybe_start/1, rabbit_down/2, node_down/2,
handle_msg/3, process_down/2]). handle_msg/3, process_down/2]).
@ -117,7 +120,7 @@ init() ->
ok = application:unset_env(rabbit, ?AUTOHEAL_STATE_AFTER_RESTART), ok = application:unset_env(rabbit, ?AUTOHEAL_STATE_AFTER_RESTART),
case State of case State of
{leader_waiting, Winner, _} -> {leader_waiting, Winner, _} ->
rabbit_log:info( ?LOG_INFO(
"Autoheal: in progress, requesting report from ~tp", [Winner]), "Autoheal: in progress, requesting report from ~tp", [Winner]),
_ = send(Winner, report_autoheal_status), _ = send(Winner, report_autoheal_status),
ok; ok;
@ -130,7 +133,7 @@ maybe_start(not_healing) ->
case enabled() of case enabled() of
true -> Leader = leader(), true -> Leader = leader(),
_ = send(Leader, {request_start, node()}), _ = send(Leader, {request_start, node()}),
rabbit_log:info("Autoheal request sent to ~tp", [Leader]), ?LOG_INFO("Autoheal request sent to ~tp", [Leader]),
not_healing; not_healing;
false -> not_healing false -> not_healing
end; end;
@ -151,7 +154,7 @@ leader() ->
%% This is the winner receiving its last notification that a node has %% This is the winner receiving its last notification that a node has
%% stopped - all nodes can now start again %% stopped - all nodes can now start again
rabbit_down(Node, {winner_waiting, [Node], Notify}) -> rabbit_down(Node, {winner_waiting, [Node], Notify}) ->
rabbit_log:info("Autoheal: final node has stopped, starting...",[]), ?LOG_INFO("Autoheal: final node has stopped, starting...",[]),
winner_finish(Notify); winner_finish(Notify);
rabbit_down(Node, {winner_waiting, WaitFor, Notify}) -> rabbit_down(Node, {winner_waiting, WaitFor, Notify}) ->
@ -174,24 +177,24 @@ node_down(Node, {winner_waiting, _, Notify}) ->
node_down(Node, {leader_waiting, Node, _Notify}) -> node_down(Node, {leader_waiting, Node, _Notify}) ->
%% The winner went down, we don't know what to do so we simply abort. %% The winner went down, we don't know what to do so we simply abort.
rabbit_log:info("Autoheal: aborting - winner ~tp went down", [Node]), ?LOG_INFO("Autoheal: aborting - winner ~tp went down", [Node]),
not_healing; not_healing;
node_down(Node, {leader_waiting, _, _} = St) -> node_down(Node, {leader_waiting, _, _} = St) ->
%% If it is a partial partition, the winner might continue with the %% If it is a partial partition, the winner might continue with the
%% healing process. If it is a full partition, the winner will also %% healing process. If it is a full partition, the winner will also
%% see it and abort. Let's wait for it. %% see it and abort. Let's wait for it.
rabbit_log:info("Autoheal: ~tp went down, waiting for winner decision ", [Node]), ?LOG_INFO("Autoheal: ~tp went down, waiting for winner decision ", [Node]),
St; St;
node_down(Node, _State) -> node_down(Node, _State) ->
rabbit_log:info("Autoheal: aborting - ~tp went down", [Node]), ?LOG_INFO("Autoheal: aborting - ~tp went down", [Node]),
not_healing. not_healing.
%% If the process that has to restart the node crashes for an unexpected reason, %% If the process that has to restart the node crashes for an unexpected reason,
%% we go back to a not healing state so the node is able to recover. %% we go back to a not healing state so the node is able to recover.
process_down({'EXIT', Pid, Reason}, {restarting, Pid}) when Reason =/= normal -> process_down({'EXIT', Pid, Reason}, {restarting, Pid}) when Reason =/= normal ->
rabbit_log:info("Autoheal: aborting - the process responsible for restarting the " ?LOG_INFO("Autoheal: aborting - the process responsible for restarting the "
"node terminated with reason: ~tp", [Reason]), "node terminated with reason: ~tp", [Reason]),
not_healing; not_healing;
@ -204,14 +207,14 @@ handle_msg({request_start, _Node}, not_healing, []) ->
not_healing; not_healing;
handle_msg({request_start, Node}, handle_msg({request_start, Node},
not_healing, Partitions) -> not_healing, Partitions) ->
rabbit_log:info("Autoheal request received from ~tp", [Node]), ?LOG_INFO("Autoheal request received from ~tp", [Node]),
case check_other_nodes(Partitions) of case check_other_nodes(Partitions) of
{error, E} -> {error, E} ->
rabbit_log:info("Autoheal request denied: ~ts", [fmt_error(E)]), ?LOG_INFO("Autoheal request denied: ~ts", [fmt_error(E)]),
not_healing; not_healing;
{ok, AllPartitions} -> {ok, AllPartitions} ->
{Winner, Losers} = make_decision(AllPartitions), {Winner, Losers} = make_decision(AllPartitions),
rabbit_log:info("Autoheal decision~n" ?LOG_INFO("Autoheal decision~n"
" * Partitions: ~tp~n" " * Partitions: ~tp~n"
" * Winner: ~tp~n" " * Winner: ~tp~n"
" * Losers: ~tp", " * Losers: ~tp",
@ -226,13 +229,13 @@ handle_msg({request_start, Node},
handle_msg({request_start, Node}, handle_msg({request_start, Node},
State, _Partitions) -> State, _Partitions) ->
rabbit_log:info("Autoheal request received from ~tp when healing; " ?LOG_INFO("Autoheal request received from ~tp when healing; "
"ignoring", [Node]), "ignoring", [Node]),
State; State;
handle_msg({become_winner, Losers}, handle_msg({become_winner, Losers},
not_healing, _Partitions) -> not_healing, _Partitions) ->
rabbit_log:info("Autoheal: I am the winner, waiting for ~tp to stop", ?LOG_INFO("Autoheal: I am the winner, waiting for ~tp to stop",
[Losers]), [Losers]),
stop_partition(Losers); stop_partition(Losers);
@ -240,7 +243,7 @@ handle_msg({become_winner, Losers},
{winner_waiting, _, Losers}, _Partitions) -> {winner_waiting, _, Losers}, _Partitions) ->
%% The leader has aborted the healing, might have seen us down but %% The leader has aborted the healing, might have seen us down but
%% we didn't see the same. Let's try again as it is the same partition. %% we didn't see the same. Let's try again as it is the same partition.
rabbit_log:info("Autoheal: I am the winner and received a duplicated " ?LOG_INFO("Autoheal: I am the winner and received a duplicated "
"request, waiting again for ~tp to stop", [Losers]), "request, waiting again for ~tp to stop", [Losers]),
stop_partition(Losers); stop_partition(Losers);
@ -248,7 +251,7 @@ handle_msg({become_winner, _},
{winner_waiting, _, Losers}, _Partitions) -> {winner_waiting, _, Losers}, _Partitions) ->
%% Something has happened to the leader, it might have seen us down but we %% Something has happened to the leader, it might have seen us down but we
%% are still alive. Partitions have changed, cannot continue. %% are still alive. Partitions have changed, cannot continue.
rabbit_log:info("Autoheal: I am the winner and received another healing " ?LOG_INFO("Autoheal: I am the winner and received another healing "
"request, partitions have changed to ~tp. Aborting ", [Losers]), "request, partitions have changed to ~tp. Aborting ", [Losers]),
winner_finish(Losers), winner_finish(Losers),
not_healing; not_healing;
@ -272,7 +275,7 @@ handle_msg({winner_is, Winner}, State = {winner_waiting, _OutstandingStops, _Not
handle_msg(Request, {restarting, Pid} = St, _Partitions) -> handle_msg(Request, {restarting, Pid} = St, _Partitions) ->
%% ignore, we can contribute no further %% ignore, we can contribute no further
rabbit_log:info("Autoheal: Received the request ~tp while waiting for ~tp " ?LOG_INFO("Autoheal: Received the request ~tp while waiting for ~tp "
"to restart the node. Ignoring it ", [Request, Pid]), "to restart the node. Ignoring it ", [Request, Pid]),
St; St;
@ -295,21 +298,21 @@ handle_msg({autoheal_finished, Winner},
%% The winner is finished with the autoheal process and notified us %% The winner is finished with the autoheal process and notified us
%% (the leader). We can transition to the "not_healing" state and %% (the leader). We can transition to the "not_healing" state and
%% accept new requests. %% accept new requests.
rabbit_log:info("Autoheal finished according to winner ~tp", [Winner]), ?LOG_INFO("Autoheal finished according to winner ~tp", [Winner]),
not_healing; not_healing;
handle_msg({autoheal_finished, Winner}, not_healing, _Partitions) handle_msg({autoheal_finished, Winner}, not_healing, _Partitions)
when Winner =:= node() -> when Winner =:= node() ->
%% We are the leader and the winner. The state already transitioned %% We are the leader and the winner. The state already transitioned
%% to "not_healing" at the end of the autoheal process. %% to "not_healing" at the end of the autoheal process.
rabbit_log:info("Autoheal finished according to winner ~tp", [node()]), ?LOG_INFO("Autoheal finished according to winner ~tp", [node()]),
not_healing; not_healing;
handle_msg({autoheal_finished, Winner}, not_healing, _Partitions) -> handle_msg({autoheal_finished, Winner}, not_healing, _Partitions) ->
%% We might have seen the winner down during a partial partition and %% We might have seen the winner down during a partial partition and
%% transitioned to not_healing. However, the winner was still able %% transitioned to not_healing. However, the winner was still able
%% to finish. Let it pass. %% to finish. Let it pass.
rabbit_log:info("Autoheal finished according to winner ~tp." ?LOG_INFO("Autoheal finished according to winner ~tp."
" Unexpected, I might have previously seen the winner down", [Winner]), " Unexpected, I might have previously seen the winner down", [Winner]),
not_healing. not_healing.
@ -318,7 +321,7 @@ handle_msg({autoheal_finished, Winner}, not_healing, _Partitions) ->
send(Node, Msg) -> {?SERVER, Node} ! {autoheal_msg, Msg}. send(Node, Msg) -> {?SERVER, Node} ! {autoheal_msg, Msg}.
abort(Down, Notify) -> abort(Down, Notify) ->
rabbit_log:info("Autoheal: aborting - ~tp down", [Down]), ?LOG_INFO("Autoheal: aborting - ~tp down", [Down]),
%% Make sure any nodes waiting for us start - it won't necessarily %% Make sure any nodes waiting for us start - it won't necessarily
%% heal the partition but at least they won't get stuck. %% heal the partition but at least they won't get stuck.
%% If we are executing this, we are not stopping. Thus, don't wait %% If we are executing this, we are not stopping. Thus, don't wait
@ -362,7 +365,7 @@ wait_for_supervisors(Monitors) ->
after after
60000 -> 60000 ->
AliveLosers = [Node || {_, Node} <- pmon:monitored(Monitors)], AliveLosers = [Node || {_, Node} <- pmon:monitored(Monitors)],
rabbit_log:info("Autoheal: mnesia in nodes ~tp is still up, sending " ?LOG_INFO("Autoheal: mnesia in nodes ~tp is still up, sending "
"winner notification again to these ", [AliveLosers]), "winner notification again to these ", [AliveLosers]),
_ = [send(L, {winner_is, node()}) || L <- AliveLosers], _ = [send(L, {winner_is, node()}) || L <- AliveLosers],
wait_for_mnesia_shutdown(AliveLosers) wait_for_mnesia_shutdown(AliveLosers)
@ -370,7 +373,7 @@ wait_for_supervisors(Monitors) ->
end. end.
restart_loser(State, Winner) -> restart_loser(State, Winner) ->
rabbit_log:warning("Autoheal: we were selected to restart; winner is ~tp", [Winner]), ?LOG_WARNING("Autoheal: we were selected to restart; winner is ~tp", [Winner]),
NextStateTimeout = application:get_env(rabbit, autoheal_state_transition_timeout, 60000), NextStateTimeout = application:get_env(rabbit, autoheal_state_transition_timeout, 60000),
rabbit_node_monitor:run_outside_applications( rabbit_node_monitor:run_outside_applications(
fun () -> fun () ->
@ -382,7 +385,7 @@ restart_loser(State, Winner) ->
autoheal_safe_to_start -> autoheal_safe_to_start ->
State State
after NextStateTimeout -> after NextStateTimeout ->
rabbit_log:warning( ?LOG_WARNING(
"Autoheal: timed out waiting for a safe-to-start message from the winner (~tp); will retry", "Autoheal: timed out waiting for a safe-to-start message from the winner (~tp); will retry",
[Winner]), [Winner]),
not_healing not_healing

View File

@ -8,6 +8,7 @@
-module(rabbit_binding). -module(rabbit_binding).
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include("amqqueue.hrl"). -include("amqqueue.hrl").
-include_lib("kernel/include/logger.hrl").
-export([recover/0, recover/2, exists/1, add/2, add/3, remove/2, remove/3]). -export([recover/0, recover/2, exists/1, add/2, add/3, remove/2, remove/3]).
-export([list/1, list_for_source/1, list_for_destination/1, -export([list/1, list_for_source/1, list_for_destination/1,
@ -117,7 +118,7 @@ recover_semi_durable_route(Gatherer, Binding, Src, Dst, ToRecover, Fun) ->
gatherer:finish(Gatherer) gatherer:finish(Gatherer)
end); end);
{error, not_found}=Error -> {error, not_found}=Error ->
rabbit_log:warning( ?LOG_WARNING(
"expected exchange ~tp to exist during recovery, " "expected exchange ~tp to exist during recovery, "
"error: ~tp", [Src, Error]), "error: ~tp", [Src, Error]),
ok ok

View File

@ -18,7 +18,7 @@ run_boot_steps() ->
run_boot_steps(Apps) -> run_boot_steps(Apps) ->
[begin [begin
rabbit_log:info("Running boot step ~ts defined by app ~ts", [Step, App]), ?LOG_INFO("Running boot step ~ts defined by app ~ts", [Step, App]),
ok = run_step(Attrs, mfa) ok = run_step(Attrs, mfa)
end || {App, Step, Attrs} <- find_steps(Apps)], end || {App, Step, Attrs} <- find_steps(Apps)],
ok. ok.
@ -46,11 +46,11 @@ find_steps(Apps) ->
run_step(Attributes, AttributeName) -> run_step(Attributes, AttributeName) ->
[begin [begin
rabbit_log:debug("Applying MFA: M = ~ts, F = ~ts, A = ~tp", ?LOG_DEBUG("Applying MFA: M = ~ts, F = ~ts, A = ~tp",
[M, F, A]), [M, F, A]),
case apply(M,F,A) of case apply(M,F,A) of
ok -> ok ->
rabbit_log:debug("Finished MFA: M = ~ts, F = ~ts, A = ~tp", ?LOG_DEBUG("Finished MFA: M = ~ts, F = ~ts, A = ~tp",
[M, F, A]); [M, F, A]);
{error, Reason} -> exit({error, Reason}) {error, Reason} -> exit({error, Reason})
end end

View File

@ -362,7 +362,7 @@ info(Pid) ->
end end
catch catch
exit:{timeout, _} -> exit:{timeout, _} ->
rabbit_log:error("Timed out getting channel ~tp info", [Pid]), ?LOG_ERROR("Timed out getting channel ~tp info", [Pid]),
throw(timeout) throw(timeout)
end. end.
@ -377,7 +377,7 @@ info(Pid, Items) ->
end end
catch catch
exit:{timeout, _} -> exit:{timeout, _} ->
rabbit_log:error("Timed out getting channel ~tp info", [Pid]), ?LOG_ERROR("Timed out getting channel ~tp info", [Pid]),
throw(timeout) throw(timeout)
end. end.
@ -413,7 +413,7 @@ refresh_config_local() ->
try try
gen_server2:call(C, refresh_config, infinity) gen_server2:call(C, refresh_config, infinity)
catch _:Reason -> catch _:Reason ->
rabbit_log:error("Failed to refresh channel config " ?LOG_ERROR("Failed to refresh channel config "
"for channel ~tp. Reason ~tp", "for channel ~tp. Reason ~tp",
[C, Reason]) [C, Reason])
end end
@ -427,7 +427,7 @@ refresh_interceptors() ->
try try
gen_server2:call(C, refresh_interceptors, ?REFRESH_TIMEOUT) gen_server2:call(C, refresh_interceptors, ?REFRESH_TIMEOUT)
catch _:Reason -> catch _:Reason ->
rabbit_log:error("Failed to refresh channel interceptors " ?LOG_ERROR("Failed to refresh channel interceptors "
"for channel ~tp. Reason ~tp", "for channel ~tp. Reason ~tp",
[C, Reason]) [C, Reason])
end end
@ -641,7 +641,7 @@ handle_cast(terminate, State = #ch{cfg = #conf{writer_pid = WriterPid}}) ->
ok = rabbit_writer:flush(WriterPid) ok = rabbit_writer:flush(WriterPid)
catch catch
_Class:Reason -> _Class:Reason ->
rabbit_log:debug("Failed to flush pending writes on a terminating connection, reason: ~tp", [Reason]) ?LOG_DEBUG("Failed to flush pending writes on a terminating connection, reason: ~tp", [Reason])
end, end,
{stop, normal, State}; {stop, normal, State};
@ -802,7 +802,7 @@ terminate(_Reason,
case rabbit_confirms:size(State#ch.unconfirmed) of case rabbit_confirms:size(State#ch.unconfirmed) of
0 -> ok; 0 -> ok;
NumConfirms -> NumConfirms ->
rabbit_log:warning("Channel is stopping with ~b pending publisher confirms", ?LOG_WARNING("Channel is stopping with ~b pending publisher confirms",
[NumConfirms]) [NumConfirms])
end. end.

View File

@ -34,6 +34,7 @@
-export([count_local_tracked_items_of_user/1]). -export([count_local_tracked_items_of_user/1]).
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
-import(rabbit_misc, [pget/2]). -import(rabbit_misc, [pget/2]).
@ -214,13 +215,13 @@ ensure_tracked_tables_for_this_node() ->
%% Create tables %% Create tables
ensure_tracked_channels_table_for_this_node() -> ensure_tracked_channels_table_for_this_node() ->
rabbit_log:info("Setting up a table for channel tracking on this node: ~tp", ?LOG_INFO("Setting up a table for channel tracking on this node: ~tp",
[?TRACKED_CHANNEL_TABLE]), [?TRACKED_CHANNEL_TABLE]),
ets:new(?TRACKED_CHANNEL_TABLE, [named_table, public, {write_concurrency, true}, ets:new(?TRACKED_CHANNEL_TABLE, [named_table, public, {write_concurrency, true},
{keypos, #tracked_channel.pid}]). {keypos, #tracked_channel.pid}]).
ensure_per_user_tracked_channels_table_for_this_node() -> ensure_per_user_tracked_channels_table_for_this_node() ->
rabbit_log:info("Setting up a table for channel tracking on this node: ~tp", ?LOG_INFO("Setting up a table for channel tracking on this node: ~tp",
[?TRACKED_CHANNEL_TABLE_PER_USER]), [?TRACKED_CHANNEL_TABLE_PER_USER]),
ets:new(?TRACKED_CHANNEL_TABLE_PER_USER, [named_table, public, {write_concurrency, true}]). ets:new(?TRACKED_CHANNEL_TABLE_PER_USER, [named_table, public, {write_concurrency, true}]).

View File

@ -4,6 +4,7 @@
-include("amqqueue.hrl"). -include("amqqueue.hrl").
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
%% TODO possible to use sets / maps instead of lists? %% TODO possible to use sets / maps instead of lists?
%% Check performance with QoS 1 and 1 million target queues. %% Check performance with QoS 1 and 1 million target queues.
@ -172,13 +173,13 @@ delete(Q0, IfUnused, IfEmpty, ActingUser) when ?amqqueue_is_classic(Q0) ->
#resource{name = Name, virtual_host = Vhost} = QName, #resource{name = Name, virtual_host = Vhost} = QName,
case IfEmpty of case IfEmpty of
true -> true ->
rabbit_log:error("Queue ~ts in vhost ~ts is down. " ?LOG_ERROR("Queue ~ts in vhost ~ts is down. "
"The queue may be non-empty. " "The queue may be non-empty. "
"Refusing to force-delete.", "Refusing to force-delete.",
[Name, Vhost]), [Name, Vhost]),
{error, not_empty}; {error, not_empty};
false -> false ->
rabbit_log:warning("Queue ~ts in vhost ~ts is down. " ?LOG_WARNING("Queue ~ts in vhost ~ts is down. "
"Forcing queue deletion.", "Forcing queue deletion.",
[Name, Vhost]), [Name, Vhost]),
case delete_crashed_internal(Q, ActingUser) of case delete_crashed_internal(Q, ActingUser) of
@ -214,7 +215,7 @@ recover(VHost, Queues) ->
FailedQs = find_missing_queues(Queues,RecoveredQs), FailedQs = find_missing_queues(Queues,RecoveredQs),
{RecoveredQs, FailedQs}; {RecoveredQs, FailedQs};
{error, Reason} -> {error, Reason} ->
rabbit_log:error("Failed to start queue supervisor for vhost '~ts': ~ts", [VHost, Reason]), ?LOG_ERROR("Failed to start queue supervisor for vhost '~ts': ~ts", [VHost, Reason]),
throw({error, Reason}) throw({error, Reason})
end. end.
@ -635,7 +636,7 @@ recover_durable_queues(QueuesAndRecoveryTerms) ->
gen_server2:mcall( gen_server2:mcall(
[{rabbit_amqqueue_sup_sup:start_queue_process(node(), Q), [{rabbit_amqqueue_sup_sup:start_queue_process(node(), Q),
{init, {self(), Terms}}} || {Q, Terms} <- QueuesAndRecoveryTerms]), {init, {self(), Terms}}} || {Q, Terms} <- QueuesAndRecoveryTerms]),
[rabbit_log:error("Queue ~tp failed to initialise: ~tp", [?LOG_ERROR("Queue ~tp failed to initialise: ~tp",
[Pid, Error]) || {Pid, Error} <- Failures], [Pid, Error]) || {Pid, Error} <- Failures],
[Q || {_, {new, Q}} <- Results]. [Q || {_, {new, Q}} <- Results].

View File

@ -42,6 +42,7 @@
-define(ENTRY_SIZE, 32). %% bytes -define(ENTRY_SIZE, 32). %% bytes
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
%% Set to true to get an awful lot of debug logs. %% Set to true to get an awful lot of debug logs.
-if(false). -if(false).
-define(DEBUG(X,Y), logger:debug("~0p: " ++ X, [?FUNCTION_NAME|Y])). -define(DEBUG(X,Y), logger:debug("~0p: " ++ X, [?FUNCTION_NAME|Y])).
@ -255,7 +256,7 @@ recover(#resource{ virtual_host = VHost, name = QueueName } = Name, Terms,
State = recover_segments(State0, Terms, IsMsgStoreClean, State = recover_segments(State0, Terms, IsMsgStoreClean,
ContainsCheckFun, OnSyncFun, OnSyncMsgFun, ContainsCheckFun, OnSyncFun, OnSyncMsgFun,
CountersRef, Context), CountersRef, Context),
rabbit_log:warning("Queue ~ts in vhost ~ts dropped ~b/~b/~b persistent messages " ?LOG_WARNING("Queue ~ts in vhost ~ts dropped ~b/~b/~b persistent messages "
"and ~b transient messages after unclean shutdown", "and ~b transient messages after unclean shutdown",
[QueueName, VHost, [QueueName, VHost,
counters:get(CountersRef, ?RECOVER_DROPPED_PERSISTENT_PER_VHOST), counters:get(CountersRef, ?RECOVER_DROPPED_PERSISTENT_PER_VHOST),
@ -329,7 +330,7 @@ recover_segments(State0, ContainsCheckFun, StoreState0, CountersRef, [Segment|Ta
%% File was either empty or the header was invalid. %% File was either empty or the header was invalid.
%% We cannot recover this file. %% We cannot recover this file.
_ -> _ ->
rabbit_log:warning("Deleting invalid v2 segment file ~ts (file has invalid header)", ?LOG_WARNING("Deleting invalid v2 segment file ~ts (file has invalid header)",
[SegmentFile]), [SegmentFile]),
ok = file:close(Fd), ok = file:close(Fd),
_ = prim_file:delete(SegmentFile), _ = prim_file:delete(SegmentFile),
@ -436,7 +437,7 @@ recover_segment(State, ContainsCheckFun, StoreState0, CountersRef, Fd,
recover_index_v1_clean(State0 = #qi{ queue_name = Name }, Terms, IsMsgStoreClean, recover_index_v1_clean(State0 = #qi{ queue_name = Name }, Terms, IsMsgStoreClean,
ContainsCheckFun, OnSyncFun, OnSyncMsgFun) -> ContainsCheckFun, OnSyncFun, OnSyncMsgFun) ->
#resource{virtual_host = VHost, name = QName} = Name, #resource{virtual_host = VHost, name = QName} = Name,
rabbit_log:info("Converting queue ~ts in vhost ~ts from v1 to v2 after clean shutdown", [QName, VHost]), ?LOG_INFO("Converting queue ~ts in vhost ~ts from v1 to v2 after clean shutdown", [QName, VHost]),
{_, _, V1State} = rabbit_queue_index:recover(Name, Terms, IsMsgStoreClean, {_, _, V1State} = rabbit_queue_index:recover(Name, Terms, IsMsgStoreClean,
ContainsCheckFun, OnSyncFun, OnSyncMsgFun, ContainsCheckFun, OnSyncFun, OnSyncMsgFun,
convert), convert),
@ -445,7 +446,7 @@ recover_index_v1_clean(State0 = #qi{ queue_name = Name }, Terms, IsMsgStoreClean
%% share code with dirty recovery. %% share code with dirty recovery.
CountersRef = counters:new(?RECOVER_COUNTER_SIZE, []), CountersRef = counters:new(?RECOVER_COUNTER_SIZE, []),
State = recover_index_v1_common(State0, V1State, CountersRef), State = recover_index_v1_common(State0, V1State, CountersRef),
rabbit_log:info("Queue ~ts in vhost ~ts converted ~b total messages from v1 to v2", ?LOG_INFO("Queue ~ts in vhost ~ts converted ~b total messages from v1 to v2",
[QName, VHost, counters:get(CountersRef, ?RECOVER_COUNT)]), [QName, VHost, counters:get(CountersRef, ?RECOVER_COUNT)]),
State. State.
@ -453,7 +454,7 @@ recover_index_v1_dirty(State0 = #qi{ queue_name = Name }, Terms, IsMsgStoreClean
ContainsCheckFun, OnSyncFun, OnSyncMsgFun, ContainsCheckFun, OnSyncFun, OnSyncMsgFun,
CountersRef) -> CountersRef) ->
#resource{virtual_host = VHost, name = QName} = Name, #resource{virtual_host = VHost, name = QName} = Name,
rabbit_log:info("Converting queue ~ts in vhost ~ts from v1 to v2 after unclean shutdown", [QName, VHost]), ?LOG_INFO("Converting queue ~ts in vhost ~ts from v1 to v2 after unclean shutdown", [QName, VHost]),
%% We ignore the count and bytes returned here because we cannot trust %% We ignore the count and bytes returned here because we cannot trust
%% rabbit_queue_index: it has a bug that may lead to more bytes being %% rabbit_queue_index: it has a bug that may lead to more bytes being
%% returned than it really has. %% returned than it really has.
@ -464,7 +465,7 @@ recover_index_v1_dirty(State0 = #qi{ queue_name = Name }, Terms, IsMsgStoreClean
ContainsCheckFun, OnSyncFun, OnSyncMsgFun, ContainsCheckFun, OnSyncFun, OnSyncMsgFun,
convert), convert),
State = recover_index_v1_common(State0, V1State, CountersRef), State = recover_index_v1_common(State0, V1State, CountersRef),
rabbit_log:info("Queue ~ts in vhost ~ts converted ~b total messages from v1 to v2", ?LOG_INFO("Queue ~ts in vhost ~ts converted ~b total messages from v1 to v2",
[QName, VHost, counters:get(CountersRef, ?RECOVER_COUNT)]), [QName, VHost, counters:get(CountersRef, ?RECOVER_COUNT)]),
State. State.

View File

@ -56,6 +56,7 @@
-define(ENTRY_HEADER_SIZE, 8). %% bytes -define(ENTRY_HEADER_SIZE, 8). %% bytes
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
%% Set to true to get an awful lot of debug logs. %% Set to true to get an awful lot of debug logs.
-if(false). -if(false).
@ -317,7 +318,7 @@ read_from_disk(SeqId, {?MODULE, Offset, Size}, State0) ->
CRC32Expected = <<CRC32:16>>, CRC32Expected = <<CRC32:16>>,
ok ok
catch C:E:S -> catch C:E:S ->
rabbit_log:error("Per-queue store CRC32 check failed in ~ts seq id ~b offset ~b size ~b", ?LOG_ERROR("Per-queue store CRC32 check failed in ~ts seq id ~b offset ~b size ~b",
[segment_file(Segment, State), SeqId, Offset, Size]), [segment_file(Segment, State), SeqId, Offset, Size]),
erlang:raise(C, E, S) erlang:raise(C, E, S)
end end
@ -415,7 +416,7 @@ parse_many_from_disk([<<Size:32/unsigned, _:7, UseCRC32:1, CRC32Expected:16/bits
CRC32Expected = <<CRC32:16>>, CRC32Expected = <<CRC32:16>>,
ok ok
catch C:E:S -> catch C:E:S ->
rabbit_log:error("Per-queue store CRC32 check failed in ~ts", ?LOG_ERROR("Per-queue store CRC32 check failed in ~ts",
[segment_file(Segment, State)]), [segment_file(Segment, State)]),
erlang:raise(C, E, S) erlang:raise(C, E, S)
end end

View File

@ -41,6 +41,7 @@
count_local_tracked_items_of_user/1]). count_local_tracked_items_of_user/1]).
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
-import(rabbit_misc, [pget/2]). -import(rabbit_misc, [pget/2]).
@ -189,17 +190,17 @@ ensure_tracked_tables_for_this_node() ->
ensure_tracked_connections_table_for_this_node() -> ensure_tracked_connections_table_for_this_node() ->
_ = ets:new(?TRACKED_CONNECTION_TABLE, [named_table, public, {write_concurrency, true}, _ = ets:new(?TRACKED_CONNECTION_TABLE, [named_table, public, {write_concurrency, true},
{keypos, #tracked_connection.id}]), {keypos, #tracked_connection.id}]),
rabbit_log:info("Setting up a table for connection tracking on this node: ~tp", ?LOG_INFO("Setting up a table for connection tracking on this node: ~tp",
[?TRACKED_CONNECTION_TABLE]). [?TRACKED_CONNECTION_TABLE]).
ensure_per_vhost_tracked_connections_table_for_this_node() -> ensure_per_vhost_tracked_connections_table_for_this_node() ->
rabbit_log:info("Setting up a table for per-vhost connection counting on this node: ~tp", ?LOG_INFO("Setting up a table for per-vhost connection counting on this node: ~tp",
[?TRACKED_CONNECTION_TABLE_PER_VHOST]), [?TRACKED_CONNECTION_TABLE_PER_VHOST]),
ets:new(?TRACKED_CONNECTION_TABLE_PER_VHOST, [named_table, public, {write_concurrency, true}]). ets:new(?TRACKED_CONNECTION_TABLE_PER_VHOST, [named_table, public, {write_concurrency, true}]).
ensure_per_user_tracked_connections_table_for_this_node() -> ensure_per_user_tracked_connections_table_for_this_node() ->
_ = ets:new(?TRACKED_CONNECTION_TABLE_PER_USER, [named_table, public, {write_concurrency, true}]), _ = ets:new(?TRACKED_CONNECTION_TABLE_PER_USER, [named_table, public, {write_concurrency, true}]),
rabbit_log:info("Setting up a table for per-user connection counting on this node: ~tp", ?LOG_INFO("Setting up a table for per-user connection counting on this node: ~tp",
[?TRACKED_CONNECTION_TABLE_PER_USER]). [?TRACKED_CONNECTION_TABLE_PER_USER]).
-spec tracked_connection_table_name_for(node()) -> atom(). -spec tracked_connection_table_name_for(node()) -> atom().
@ -420,7 +421,7 @@ close_connection(#tracked_connection{pid = Pid, type = network}, Message) ->
ok; ok;
_:Err -> _:Err ->
%% ignore, don't terminate %% ignore, don't terminate
rabbit_log:warning("Could not close connection ~tp: ~tp", [Pid, Err]), ?LOG_WARNING("Could not close connection ~tp: ~tp", [Pid, Err]),
ok ok
end; end;
close_connection(#tracked_connection{pid = Pid, type = direct}, Message) -> close_connection(#tracked_connection{pid = Pid, type = direct}, Message) ->

View File

@ -223,7 +223,7 @@ join(RemoteNode, NodeType)
%% as RemoteNode thinks this node is already in the cluster. %% as RemoteNode thinks this node is already in the cluster.
%% Attempt to leave the RemoteNode cluster, the discovery cluster, %% Attempt to leave the RemoteNode cluster, the discovery cluster,
%% and simply retry the operation. %% and simply retry the operation.
rabbit_log:info("Mnesia: node ~tp thinks it's clustered " ?LOG_INFO("Mnesia: node ~tp thinks it's clustered "
"with node ~tp, but ~tp disagrees. ~tp will ask " "with node ~tp, but ~tp disagrees. ~tp will ask "
"to leave the cluster and try again.", "to leave the cluster and try again.",
[RemoteNode, node(), node(), node()]), [RemoteNode, node(), node(), node()]),

View File

@ -11,6 +11,7 @@
-include("mirrored_supervisor.hrl"). -include("mirrored_supervisor.hrl").
-include("include/rabbit_khepri.hrl"). -include("include/rabbit_khepri.hrl").
-include_lib("kernel/include/logger.hrl").
-export([ -export([
create_tables/0, create_tables/0,
@ -96,7 +97,7 @@ create_or_update_in_mnesia(Group, Overall, Delegate, ChildSpec, Id) ->
rabbit_mnesia:execute_mnesia_transaction( rabbit_mnesia:execute_mnesia_transaction(
fun() -> fun() ->
ReadResult = mnesia:wread({?TABLE, {Group, Id}}), ReadResult = mnesia:wread({?TABLE, {Group, Id}}),
rabbit_log:debug("Mirrored supervisor: check_start table ~ts read for key ~tp returned ~tp", ?LOG_DEBUG("Mirrored supervisor: check_start table ~ts read for key ~tp returned ~tp",
[?TABLE, {Group, Id}, ReadResult]), [?TABLE, {Group, Id}, ReadResult]),
case ReadResult of case ReadResult of
[] -> _ = write_in_mnesia(Group, Overall, ChildSpec, Id), [] -> _ = write_in_mnesia(Group, Overall, ChildSpec, Id),
@ -105,12 +106,12 @@ create_or_update_in_mnesia(Group, Overall, Delegate, ChildSpec, Id) ->
mirroring_pid = Pid} = S, mirroring_pid = Pid} = S,
case Overall of case Overall of
Pid -> Pid ->
rabbit_log:debug("Mirrored supervisor: overall matched mirrored pid ~tp", [Pid]), ?LOG_DEBUG("Mirrored supervisor: overall matched mirrored pid ~tp", [Pid]),
Delegate; Delegate;
_ -> _ ->
rabbit_log:debug("Mirrored supervisor: overall ~tp did not match mirrored pid ~tp", [Overall, Pid]), ?LOG_DEBUG("Mirrored supervisor: overall ~tp did not match mirrored pid ~tp", [Overall, Pid]),
Sup = mirrored_supervisor:supervisor(Pid), Sup = mirrored_supervisor:supervisor(Pid),
rabbit_log:debug("Mirrored supervisor: supervisor(~tp) returned ~tp", [Pid, Sup]), ?LOG_DEBUG("Mirrored supervisor: supervisor(~tp) returned ~tp", [Pid, Sup]),
case Sup of case Sup of
dead -> dead ->
_ = write_in_mnesia(Group, Overall, ChildSpec, Id), _ = write_in_mnesia(Group, Overall, ChildSpec, Id),

View File

@ -14,6 +14,7 @@
-include("amqqueue.hrl"). -include("amqqueue.hrl").
-include("include/rabbit_khepri.hrl"). -include("include/rabbit_khepri.hrl").
-include_lib("kernel/include/logger.hrl").
-export([ -export([
get/1, get/1,
@ -341,7 +342,7 @@ count(VHostName) ->
try try
list_for_count(VHostName) list_for_count(VHostName)
catch _:Err -> catch _:Err ->
rabbit_log:error("Failed to fetch number of queues in vhost ~p:~n~p", ?LOG_ERROR("Failed to fetch number of queues in vhost ~p:~n~p",
[VHostName, Err]), [VHostName, Err]),
0 0
end. end.

View File

@ -13,6 +13,7 @@
-include("include/rabbit_khepri.hrl"). -include("include/rabbit_khepri.hrl").
-include("vhost.hrl"). -include("vhost.hrl").
-include_lib("kernel/include/logger.hrl").
-export([create_or_get/3, -export([create_or_get/3,
merge_metadata/2, merge_metadata/2,
@ -102,7 +103,7 @@ create_or_get_in_mnesia_tx(VHostName, VHost) ->
create_or_get_in_khepri(VHostName, VHost) -> create_or_get_in_khepri(VHostName, VHost) ->
Path = khepri_vhost_path(VHostName), Path = khepri_vhost_path(VHostName),
rabbit_log:debug("Inserting a virtual host record ~tp", [VHost]), ?LOG_DEBUG("Inserting a virtual host record ~tp", [VHost]),
case rabbit_khepri:create(Path, VHost) of case rabbit_khepri:create(Path, VHost) of
ok -> ok ->
{new, VHost}; {new, VHost};
@ -137,7 +138,7 @@ merge_metadata(VHostName, Metadata)
when is_binary(VHostName) andalso is_map(Metadata) -> when is_binary(VHostName) andalso is_map(Metadata) ->
case do_merge_metadata(VHostName, Metadata) of case do_merge_metadata(VHostName, Metadata) of
{ok, VHost} when ?is_vhost(VHost) -> {ok, VHost} when ?is_vhost(VHost) ->
rabbit_log:debug("Updated a virtual host record ~tp", [VHost]), ?LOG_DEBUG("Updated a virtual host record ~tp", [VHost]),
{ok, VHost}; {ok, VHost};
{error, _} = Error -> {error, _} = Error ->
Error Error
@ -169,7 +170,7 @@ merge_metadata_in_khepri(VHostName, Metadata) ->
case Ret1 of case Ret1 of
{ok, #{data := VHost0, payload_version := DVersion}} -> {ok, #{data := VHost0, payload_version := DVersion}} ->
VHost = vhost:merge_metadata(VHost0, Metadata), VHost = vhost:merge_metadata(VHost0, Metadata),
rabbit_log:debug("Updating a virtual host record ~p", [VHost]), ?LOG_DEBUG("Updating a virtual host record ~p", [VHost]),
Path1 = khepri_path:combine_with_conditions( Path1 = khepri_path:combine_with_conditions(
Path, [#if_payload_version{version = DVersion}]), Path, [#if_payload_version{version = DVersion}]),
Ret2 = rabbit_khepri:put(Path1, VHost), Ret2 = rabbit_khepri:put(Path1, VHost),
@ -240,7 +241,7 @@ enable_protection_from_deletion(VHostName) ->
MetadataPatch = #{ MetadataPatch = #{
protected_from_deletion => true protected_from_deletion => true
}, },
rabbit_log:info("Enabling deletion protection for virtual host '~ts'", [VHostName]), ?LOG_INFO("Enabling deletion protection for virtual host '~ts'", [VHostName]),
merge_metadata(VHostName, MetadataPatch). merge_metadata(VHostName, MetadataPatch).
-spec disable_protection_from_deletion(VHostName) -> Ret when -spec disable_protection_from_deletion(VHostName) -> Ret when
@ -253,7 +254,7 @@ disable_protection_from_deletion(VHostName) ->
MetadataPatch = #{ MetadataPatch = #{
protected_from_deletion => false protected_from_deletion => false
}, },
rabbit_log:info("Disabling deletion protection for virtual host '~ts'", [VHostName]), ?LOG_INFO("Disabling deletion protection for virtual host '~ts'", [VHostName]),
merge_metadata(VHostName, MetadataPatch). merge_metadata(VHostName, MetadataPatch).
%% ------------------------------------------------------------------- %% -------------------------------------------------------------------

View File

@ -7,6 +7,9 @@
-module(rabbit_db_vhost_defaults). -module(rabbit_db_vhost_defaults).
-include_lib("kernel/include/logger.hrl").
-export([apply/2]). -export([apply/2]).
-export([list_limits/1, list_operator_policies/1, list_users/1]). -export([list_limits/1, list_operator_policies/1, list_users/1]).
@ -36,20 +39,20 @@ apply(VHost, ActingUser) ->
ok; ok;
L -> L ->
ok = rabbit_vhost_limit:set(VHost, L, ActingUser), ok = rabbit_vhost_limit:set(VHost, L, ActingUser),
rabbit_log:info("Applied default limits to vhost '~tp': ~tp", [VHost, L]) ?LOG_INFO("Applied default limits to vhost '~tp': ~tp", [VHost, L])
end, end,
lists:foreach( lists:foreach(
fun(P) -> fun(P) ->
ok = rabbit_policy:set_op(VHost, P#seeding_policy.name, P#seeding_policy.queue_pattern, P#seeding_policy.definition, ok = rabbit_policy:set_op(VHost, P#seeding_policy.name, P#seeding_policy.queue_pattern, P#seeding_policy.definition,
undefined, undefined, ActingUser), undefined, undefined, ActingUser),
rabbit_log:info("Applied default operator policy to vhost '~tp': ~tp", [VHost, P]) ?LOG_INFO("Applied default operator policy to vhost '~tp': ~tp", [VHost, P])
end, end,
list_operator_policies(VHost) list_operator_policies(VHost)
), ),
lists:foreach( lists:foreach(
fun(U) -> fun(U) ->
ok = add_user(VHost, U, ActingUser), ok = add_user(VHost, U, ActingUser),
rabbit_log:info("Added default user to vhost '~tp': ~tp", [VHost, maps:remove(password, U)]) ?LOG_INFO("Added default user to vhost '~tp': ~tp", [VHost, maps:remove(password, U)])
end, end,
list_users(VHost) list_users(VHost)
), ),

View File

@ -12,6 +12,7 @@
detect_cycles/3]). detect_cycles/3]).
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
%%---------------------------------------------------------------------------- %%----------------------------------------------------------------------------
@ -74,7 +75,7 @@ log_cycle_once(Cycle) ->
true -> true ->
ok; ok;
undefined -> undefined ->
rabbit_log:warning( ?LOG_WARNING(
"Message dropped because the following list of queues (ordered by " "Message dropped because the following list of queues (ordered by "
"death recency) contains a dead letter cycle without reason 'rejected'. " "death recency) contains a dead letter cycle without reason 'rejected'. "
"This list will not be logged again: ~tp", "This list will not be logged again: ~tp",

View File

@ -30,6 +30,7 @@
%% * rabbit_definitions_hashing %% * rabbit_definitions_hashing
-module(rabbit_definitions). -module(rabbit_definitions).
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
-export([boot/0]). -export([boot/0]).
%% automatic import on boot %% automatic import on boot
@ -177,7 +178,7 @@ validate_definitions(Body) when is_binary(Body) ->
-spec import_raw(Body :: binary() | iolist()) -> ok | {error, term()}. -spec import_raw(Body :: binary() | iolist()) -> ok | {error, term()}.
import_raw(Body) -> import_raw(Body) ->
rabbit_log:info("Asked to import definitions. Acting user: ~ts", [?INTERNAL_USER]), ?LOG_INFO("Asked to import definitions. Acting user: ~ts", [?INTERNAL_USER]),
case decode([], Body) of case decode([], Body) of
{error, E} -> {error, E}; {error, E} -> {error, E};
{ok, _, Map} -> apply_defs(Map, ?INTERNAL_USER) {ok, _, Map} -> apply_defs(Map, ?INTERNAL_USER)
@ -185,7 +186,7 @@ import_raw(Body) ->
-spec import_raw(Body :: binary() | iolist(), VHost :: vhost:name()) -> ok | {error, term()}. -spec import_raw(Body :: binary() | iolist(), VHost :: vhost:name()) -> ok | {error, term()}.
import_raw(Body, VHost) -> import_raw(Body, VHost) ->
rabbit_log:info("Asked to import definitions. Acting user: ~ts", [?INTERNAL_USER]), ?LOG_INFO("Asked to import definitions. Acting user: ~ts", [?INTERNAL_USER]),
case decode([], Body) of case decode([], Body) of
{error, E} -> {error, E}; {error, E} -> {error, E};
{ok, _, Map} -> apply_defs(Map, ?INTERNAL_USER, fun() -> ok end, VHost) {ok, _, Map} -> apply_defs(Map, ?INTERNAL_USER, fun() -> ok end, VHost)
@ -195,7 +196,7 @@ import_raw(Body, VHost) ->
import_parsed(Body0) when is_list(Body0) -> import_parsed(Body0) when is_list(Body0) ->
import_parsed(maps:from_list(Body0)); import_parsed(maps:from_list(Body0));
import_parsed(Body0) when is_map(Body0) -> import_parsed(Body0) when is_map(Body0) ->
rabbit_log:info("Asked to import definitions. Acting user: ~ts", [?INTERNAL_USER]), ?LOG_INFO("Asked to import definitions. Acting user: ~ts", [?INTERNAL_USER]),
Body = atomise_map_keys(Body0), Body = atomise_map_keys(Body0),
apply_defs(Body, ?INTERNAL_USER). apply_defs(Body, ?INTERNAL_USER).
@ -203,7 +204,7 @@ import_parsed(Body0) when is_map(Body0) ->
import_parsed(Body0, VHost) when is_list(Body0) -> import_parsed(Body0, VHost) when is_list(Body0) ->
import_parsed(maps:from_list(Body0), VHost); import_parsed(maps:from_list(Body0), VHost);
import_parsed(Body0, VHost) -> import_parsed(Body0, VHost) ->
rabbit_log:info("Asked to import definitions. Acting user: ~ts", [?INTERNAL_USER]), ?LOG_INFO("Asked to import definitions. Acting user: ~ts", [?INTERNAL_USER]),
Body = atomise_map_keys(Body0), Body = atomise_map_keys(Body0),
apply_defs(Body, ?INTERNAL_USER, fun() -> ok end, VHost). apply_defs(Body, ?INTERNAL_USER, fun() -> ok end, VHost).
@ -212,7 +213,7 @@ import_parsed(Body0, VHost) ->
import_parsed_with_hashing(Body0) when is_list(Body0) -> import_parsed_with_hashing(Body0) when is_list(Body0) ->
import_parsed(maps:from_list(Body0)); import_parsed(maps:from_list(Body0));
import_parsed_with_hashing(Body0) when is_map(Body0) -> import_parsed_with_hashing(Body0) when is_map(Body0) ->
rabbit_log:info("Asked to import definitions. Acting user: ~ts", [?INTERNAL_USER]), ?LOG_INFO("Asked to import definitions. Acting user: ~ts", [?INTERNAL_USER]),
case should_skip_if_unchanged() of case should_skip_if_unchanged() of
false -> false ->
import_parsed(Body0); import_parsed(Body0);
@ -222,10 +223,10 @@ import_parsed_with_hashing(Body0) when is_map(Body0) ->
Algo = rabbit_definitions_hashing:hashing_algorithm(), Algo = rabbit_definitions_hashing:hashing_algorithm(),
case rabbit_definitions_hashing:hash(Algo, Body) of case rabbit_definitions_hashing:hash(Algo, Body) of
PreviousHash -> PreviousHash ->
rabbit_log:info("Submitted definition content hash matches the stored one: ~ts", [binary:part(rabbit_misc:hexify(PreviousHash), 0, 12)]), ?LOG_INFO("Submitted definition content hash matches the stored one: ~ts", [binary:part(rabbit_misc:hexify(PreviousHash), 0, 12)]),
ok; ok;
Other -> Other ->
rabbit_log:debug("Submitted definition content hash: ~ts, stored one: ~ts", [ ?LOG_DEBUG("Submitted definition content hash: ~ts, stored one: ~ts", [
binary:part(rabbit_misc:hexify(PreviousHash), 0, 10), binary:part(rabbit_misc:hexify(PreviousHash), 0, 10),
binary:part(rabbit_misc:hexify(Other), 0, 10) binary:part(rabbit_misc:hexify(Other), 0, 10)
]), ]),
@ -239,7 +240,7 @@ import_parsed_with_hashing(Body0) when is_map(Body0) ->
import_parsed_with_hashing(Body0, VHost) when is_list(Body0) -> import_parsed_with_hashing(Body0, VHost) when is_list(Body0) ->
import_parsed(maps:from_list(Body0), VHost); import_parsed(maps:from_list(Body0), VHost);
import_parsed_with_hashing(Body0, VHost) -> import_parsed_with_hashing(Body0, VHost) ->
rabbit_log:info("Asked to import definitions for virtual host '~ts'. Acting user: ~ts", [?INTERNAL_USER, VHost]), ?LOG_INFO("Asked to import definitions for virtual host '~ts'. Acting user: ~ts", [?INTERNAL_USER, VHost]),
case should_skip_if_unchanged() of case should_skip_if_unchanged() of
false -> false ->
@ -250,10 +251,10 @@ import_parsed_with_hashing(Body0, VHost) ->
Algo = rabbit_definitions_hashing:hashing_algorithm(), Algo = rabbit_definitions_hashing:hashing_algorithm(),
case rabbit_definitions_hashing:hash(Algo, Body) of case rabbit_definitions_hashing:hash(Algo, Body) of
PreviousHash -> PreviousHash ->
rabbit_log:info("Submitted definition content hash matches the stored one: ~ts", [binary:part(rabbit_misc:hexify(PreviousHash), 0, 12)]), ?LOG_INFO("Submitted definition content hash matches the stored one: ~ts", [binary:part(rabbit_misc:hexify(PreviousHash), 0, 12)]),
ok; ok;
Other -> Other ->
rabbit_log:debug("Submitted definition content hash: ~ts, stored one: ~ts", [ ?LOG_DEBUG("Submitted definition content hash: ~ts, stored one: ~ts", [
binary:part(rabbit_misc:hexify(PreviousHash), 0, 10), binary:part(rabbit_misc:hexify(PreviousHash), 0, 10),
binary:part(rabbit_misc:hexify(Other), 0, 10) binary:part(rabbit_misc:hexify(Other), 0, 10)
]), ]),
@ -340,14 +341,14 @@ maybe_load_definitions_from_local_filesystem(App, Key) ->
undefined -> ok; undefined -> ok;
{ok, none} -> ok; {ok, none} -> ok;
{ok, Path} -> {ok, Path} ->
rabbit_log:debug("~ts.~ts is set to '~ts', will discover definition file(s) to import", [App, Key, Path]), ?LOG_DEBUG("~ts.~ts is set to '~ts', will discover definition file(s) to import", [App, Key, Path]),
IsDir = filelib:is_dir(Path), IsDir = filelib:is_dir(Path),
Mod = rabbit_definitions_import_local_filesystem, Mod = rabbit_definitions_import_local_filesystem,
rabbit_log:debug("Will use module ~ts to import definitions", [Mod]), ?LOG_DEBUG("Will use module ~ts to import definitions", [Mod]),
case should_skip_if_unchanged() of case should_skip_if_unchanged() of
false -> false ->
rabbit_log:debug("Will re-import definitions even if they have not changed"), ?LOG_DEBUG("Will re-import definitions even if they have not changed"),
Mod:load(IsDir, Path); Mod:load(IsDir, Path);
true -> true ->
maybe_load_definitions_from_local_filesystem_if_unchanged(Mod, IsDir, Path) maybe_load_definitions_from_local_filesystem_if_unchanged(Mod, IsDir, Path)
@ -356,16 +357,16 @@ maybe_load_definitions_from_local_filesystem(App, Key) ->
maybe_load_definitions_from_local_filesystem_if_unchanged(Mod, IsDir, Path) -> maybe_load_definitions_from_local_filesystem_if_unchanged(Mod, IsDir, Path) ->
Algo = rabbit_definitions_hashing:hashing_algorithm(), Algo = rabbit_definitions_hashing:hashing_algorithm(),
rabbit_log:debug("Will import definitions only if definition file/directory has changed, hashing algo: ~ts", [Algo]), ?LOG_DEBUG("Will import definitions only if definition file/directory has changed, hashing algo: ~ts", [Algo]),
CurrentHash = rabbit_definitions_hashing:stored_global_hash(), CurrentHash = rabbit_definitions_hashing:stored_global_hash(),
rabbit_log:debug("Previously stored hash value of imported definitions: ~ts...", [binary:part(rabbit_misc:hexify(CurrentHash), 0, 12)]), ?LOG_DEBUG("Previously stored hash value of imported definitions: ~ts...", [binary:part(rabbit_misc:hexify(CurrentHash), 0, 12)]),
case Mod:load_with_hashing(IsDir, Path, CurrentHash, Algo) of case Mod:load_with_hashing(IsDir, Path, CurrentHash, Algo) of
{error, Err} -> {error, Err} ->
{error, Err}; {error, Err};
CurrentHash -> CurrentHash ->
rabbit_log:info("Hash value of imported definitions matches current contents"); ?LOG_INFO("Hash value of imported definitions matches current contents");
UpdatedHash -> UpdatedHash ->
rabbit_log:debug("Hash value of imported definitions has changed to ~ts", [binary:part(rabbit_misc:hexify(UpdatedHash), 0, 12)]), ?LOG_DEBUG("Hash value of imported definitions has changed to ~ts", [binary:part(rabbit_misc:hexify(UpdatedHash), 0, 12)]),
rabbit_definitions_hashing:store_global_hash(UpdatedHash) rabbit_definitions_hashing:store_global_hash(UpdatedHash)
end. end.
@ -387,20 +388,20 @@ maybe_load_definitions_from_pluggable_source(App, Key) ->
maybe_load_definitions_from_pluggable_source_if_unchanged(Mod, Proplist) -> maybe_load_definitions_from_pluggable_source_if_unchanged(Mod, Proplist) ->
case should_skip_if_unchanged() of case should_skip_if_unchanged() of
false -> false ->
rabbit_log:debug("Will use module ~ts to import definitions", [Mod]), ?LOG_DEBUG("Will use module ~ts to import definitions", [Mod]),
Mod:load(Proplist); Mod:load(Proplist);
true -> true ->
rabbit_log:debug("Will use module ~ts to import definitions (if definition file/directory/source has changed)", [Mod]), ?LOG_DEBUG("Will use module ~ts to import definitions (if definition file/directory/source has changed)", [Mod]),
CurrentHash = rabbit_definitions_hashing:stored_global_hash(), CurrentHash = rabbit_definitions_hashing:stored_global_hash(),
rabbit_log:debug("Previously stored hash value of imported definitions: ~ts...", [binary:part(rabbit_misc:hexify(CurrentHash), 0, 12)]), ?LOG_DEBUG("Previously stored hash value of imported definitions: ~ts...", [binary:part(rabbit_misc:hexify(CurrentHash), 0, 12)]),
Algo = rabbit_definitions_hashing:hashing_algorithm(), Algo = rabbit_definitions_hashing:hashing_algorithm(),
case Mod:load_with_hashing(Proplist, CurrentHash, Algo) of case Mod:load_with_hashing(Proplist, CurrentHash, Algo) of
{error, Err} -> {error, Err} ->
{error, Err}; {error, Err};
CurrentHash -> CurrentHash ->
rabbit_log:info("Hash value of imported definitions matches current contents"); ?LOG_INFO("Hash value of imported definitions matches current contents");
UpdatedHash -> UpdatedHash ->
rabbit_log:debug("Hash value of imported definitions has changed to ~ts...", [binary:part(rabbit_misc:hexify(CurrentHash), 0, 12)]), ?LOG_DEBUG("Hash value of imported definitions has changed to ~ts...", [binary:part(rabbit_misc:hexify(CurrentHash), 0, 12)]),
rabbit_definitions_hashing:store_global_hash(UpdatedHash) rabbit_definitions_hashing:store_global_hash(UpdatedHash)
end end
end. end.
@ -467,7 +468,7 @@ should_skip_if_unchanged() ->
OptedIn andalso ReachedTargetClusterSize. OptedIn andalso ReachedTargetClusterSize.
log_an_error_about_orphaned_objects() -> log_an_error_about_orphaned_objects() ->
rabbit_log:error("Definitions import: some queues, exchanges or bindings in the definition file " ?LOG_ERROR("Definitions import: some queues, exchanges or bindings in the definition file "
"are missing the virtual host field. Such files are produced when definitions of " "are missing the virtual host field. Such files are produced when definitions of "
"a single virtual host are exported. They cannot be used to import definitions at boot time"). "a single virtual host are exported. They cannot be used to import definitions at boot time").
@ -524,7 +525,7 @@ apply_defs(Map, ActingUser, SuccessFun) when is_function(SuccessFun) ->
end, end,
fun() -> fun() ->
rabbit_log:info("There are fewer than target cluster size (~b) nodes online," ?LOG_INFO("There are fewer than target cluster size (~b) nodes online,"
" skipping queue and binding import from definitions", " skipping queue and binding import from definitions",
[rabbit_nodes:target_cluster_size_hint()]) [rabbit_nodes:target_cluster_size_hint()])
end end
@ -544,7 +545,7 @@ apply_defs(Map, ActingUser, SuccessFun) when is_function(SuccessFun) ->
VHost :: vhost:name()) -> 'ok' | {error, term()}. VHost :: vhost:name()) -> 'ok' | {error, term()}.
apply_defs(Map, ActingUser, SuccessFun, VHost) when is_function(SuccessFun); is_binary(VHost) -> apply_defs(Map, ActingUser, SuccessFun, VHost) when is_function(SuccessFun); is_binary(VHost) ->
rabbit_log:info("Asked to import definitions for a virtual host. Virtual host: ~tp, acting user: ~tp", ?LOG_INFO("Asked to import definitions for a virtual host. Virtual host: ~tp, acting user: ~tp",
[VHost, ActingUser]), [VHost, ActingUser]),
try try
validate_limits(Map, VHost), validate_limits(Map, VHost),
@ -562,7 +563,7 @@ apply_defs(Map, ActingUser, SuccessFun, VHost) when is_function(SuccessFun); is_
end, end,
fun() -> fun() ->
rabbit_log:info("There are fewer than target cluster size (~b) nodes online," ?LOG_INFO("There are fewer than target cluster size (~b) nodes online,"
" skipping queue and binding import from definitions", " skipping queue and binding import from definitions",
[rabbit_nodes:target_cluster_size_hint()]) [rabbit_nodes:target_cluster_size_hint()])
end end
@ -589,7 +590,7 @@ sequential_for_all0(Category, ActingUser, Definitions, Fun) ->
List -> List ->
case length(List) of case length(List) of
0 -> ok; 0 -> ok;
N -> rabbit_log:info("Importing sequentially ~tp ~ts...", [N, human_readable_category_name(Category)]) N -> ?LOG_INFO("Importing sequentially ~tp ~ts...", [N, human_readable_category_name(Category)])
end, end,
[begin [begin
%% keys are expected to be atoms %% keys are expected to be atoms
@ -626,7 +627,7 @@ concurrent_for_all0(Category, ActingUser, Definitions, Fun) ->
List -> List ->
case length(List) of case length(List) of
0 -> ok; 0 -> ok;
N -> rabbit_log:info("Importing concurrently ~tp ~ts...", [N, human_readable_category_name(Category)]) N -> ?LOG_INFO("Importing concurrently ~tp ~ts...", [N, human_readable_category_name(Category)])
end, end,
WorkPoolFun = fun(M) -> WorkPoolFun = fun(M) ->
Fun(atomize_keys(M), ActingUser) Fun(atomize_keys(M), ActingUser)
@ -664,7 +665,7 @@ do_concurrent_for_all(List, WorkPoolFun) ->
WorkPoolFun(M) WorkPoolFun(M)
catch {error, E} -> gatherer:in(Gatherer, {error, E}); catch {error, E} -> gatherer:in(Gatherer, {error, E});
_:E:Stacktrace -> _:E:Stacktrace ->
rabbit_log:debug("Definition import: a work pool operation has thrown an exception ~st, stacktrace: ~p", ?LOG_DEBUG("Definition import: a work pool operation has thrown an exception ~st, stacktrace: ~p",
[E, Stacktrace]), [E, Stacktrace]),
gatherer:in(Gatherer, {error, E}) gatherer:in(Gatherer, {error, E})
end, end,
@ -706,7 +707,7 @@ format({no_such_vhost, VHost}) ->
format({vhost_limit_exceeded, ErrMsg}) -> format({vhost_limit_exceeded, ErrMsg}) ->
rabbit_data_coercion:to_binary(ErrMsg); rabbit_data_coercion:to_binary(ErrMsg);
format({shutdown, _} = Error) -> format({shutdown, _} = Error) ->
rabbit_log:debug("Metadata store is unavailable: ~p", [Error]), ?LOG_DEBUG("Metadata store is unavailable: ~p", [Error]),
rabbit_data_coercion:to_binary( rabbit_data_coercion:to_binary(
rabbit_misc:format("Metadata store is unavailable. Please try again.", [])); rabbit_misc:format("Metadata store is unavailable. Please try again.", []));
format(E) -> format(E) ->
@ -825,11 +826,11 @@ add_queue(VHost, Queue, ActingUser) ->
add_queue_int(_Queue, R = #resource{kind = queue, add_queue_int(_Queue, R = #resource{kind = queue,
name = <<"amq.", _/binary>>}, ActingUser) -> name = <<"amq.", _/binary>>}, ActingUser) ->
Name = R#resource.name, Name = R#resource.name,
rabbit_log:warning("Skipping import of a queue whose name begins with 'amq.', " ?LOG_WARNING("Skipping import of a queue whose name begins with 'amq.', "
"name: ~ts, acting user: ~ts", [Name, ActingUser]); "name: ~ts, acting user: ~ts", [Name, ActingUser]);
add_queue_int(_Queue, R = #resource{kind = queue, virtual_host = undefined}, ActingUser) -> add_queue_int(_Queue, R = #resource{kind = queue, virtual_host = undefined}, ActingUser) ->
Name = R#resource.name, Name = R#resource.name,
rabbit_log:warning("Skipping import of a queue with an unset virtual host field, " ?LOG_WARNING("Skipping import of a queue with an unset virtual host field, "
"name: ~ts, acting user: ~ts", [Name, ActingUser]); "name: ~ts, acting user: ~ts", [Name, ActingUser]);
add_queue_int(Queue, Name = #resource{virtual_host = VHostName}, ActingUser) -> add_queue_int(Queue, Name = #resource{virtual_host = VHostName}, ActingUser) ->
case rabbit_amqqueue:exists(Name) of case rabbit_amqqueue:exists(Name) of
@ -862,11 +863,11 @@ add_exchange(VHost, Exchange, ActingUser) ->
add_exchange_int(Exchange, rv(VHost, exchange, Exchange), ActingUser). add_exchange_int(Exchange, rv(VHost, exchange, Exchange), ActingUser).
add_exchange_int(_Exchange, #resource{kind = exchange, name = <<"">>}, ActingUser) -> add_exchange_int(_Exchange, #resource{kind = exchange, name = <<"">>}, ActingUser) ->
rabbit_log:warning("Not importing the default exchange, acting user: ~ts", [ActingUser]); ?LOG_WARNING("Not importing the default exchange, acting user: ~ts", [ActingUser]);
add_exchange_int(_Exchange, R = #resource{kind = exchange, add_exchange_int(_Exchange, R = #resource{kind = exchange,
name = <<"amq.", _/binary>>}, ActingUser) -> name = <<"amq.", _/binary>>}, ActingUser) ->
Name = R#resource.name, Name = R#resource.name,
rabbit_log:warning("Skipping import of an exchange whose name begins with 'amq.', " ?LOG_WARNING("Skipping import of an exchange whose name begins with 'amq.', "
"name: ~ts, acting user: ~ts", [Name, ActingUser]); "name: ~ts, acting user: ~ts", [Name, ActingUser]);
add_exchange_int(Exchange, Name, ActingUser) -> add_exchange_int(Exchange, Name, ActingUser) ->
case rabbit_exchange:exists(Name) of case rabbit_exchange:exists(Name) of
@ -934,7 +935,7 @@ validate_limits(All) ->
undefined -> ok; undefined -> ok;
Queues0 -> Queues0 ->
{ok, VHostMap} = filter_out_existing_queues(Queues0), {ok, VHostMap} = filter_out_existing_queues(Queues0),
_ = rabbit_log:debug("Definition import. Virtual host map for validation: ~p", [VHostMap]), _ = ?LOG_DEBUG("Definition import. Virtual host map for validation: ~p", [VHostMap]),
maps:fold(fun validate_vhost_limit/3, ok, VHostMap) maps:fold(fun validate_vhost_limit/3, ok, VHostMap)
end. end.

View File

@ -20,6 +20,7 @@
-behaviour(rabbit_runtime_parameter). -behaviour(rabbit_runtime_parameter).
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
-import(rabbit_misc, [pget/2, pget/3]). -import(rabbit_misc, [pget/2, pget/3]).
@ -109,7 +110,7 @@ stored_vhost_specific_hash(VHostName) ->
-spec store_global_hash(Value :: term()) -> ok. -spec store_global_hash(Value :: term()) -> ok.
store_global_hash(Value) -> store_global_hash(Value) ->
rabbit_log:debug("Storing global imported definitions content hash, hex value: ~ts", [rabbit_misc:hexify(Value)]), ?LOG_DEBUG("Storing global imported definitions content hash, hex value: ~ts", [rabbit_misc:hexify(Value)]),
store_global_hash(Value, ?INTERNAL_USER). store_global_hash(Value, ?INTERNAL_USER).
-spec store_global_hash(Value0 :: term(), Username :: rabbit_types:username()) -> ok. -spec store_global_hash(Value0 :: term(), Username :: rabbit_types:username()) -> ok.

View File

@ -14,6 +14,9 @@
%% * rabbit_definitions_import_local_filesystem %% * rabbit_definitions_import_local_filesystem
%% * rabbit_definitions_hashing %% * rabbit_definitions_hashing
-module(rabbit_definitions_import_https). -module(rabbit_definitions_import_https).
-include_lib("kernel/include/logger.hrl").
-export([ -export([
is_enabled/0, is_enabled/0,
load/1, load/1,
@ -47,8 +50,8 @@ is_enabled() ->
-spec load(Proplist :: list() | map()) -> ok | {error, term()}. -spec load(Proplist :: list() | map()) -> ok | {error, term()}.
load(Proplist) -> load(Proplist) ->
URL = pget(url, Proplist), URL = pget(url, Proplist),
rabbit_log:info("Applying definitions from a remote URL"), ?LOG_INFO("Applying definitions from a remote URL"),
rabbit_log:debug("HTTPS URL: ~ts", [URL]), ?LOG_DEBUG("HTTPS URL: ~ts", [URL]),
TLSOptions0 = tls_options_or_default(Proplist), TLSOptions0 = tls_options_or_default(Proplist),
TLSOptions = rabbit_ssl:wrap_password_opt(TLSOptions0), TLSOptions = rabbit_ssl:wrap_password_opt(TLSOptions0),
HTTPOptions = http_options(TLSOptions), HTTPOptions = http_options(TLSOptions),
@ -57,8 +60,8 @@ load(Proplist) ->
-spec load_with_hashing(Proplist :: list() | map(), PreviousHash :: binary() | 'undefined', Algo :: crypto:sha1() | crypto:sha2()) -> binary() | 'undefined'. -spec load_with_hashing(Proplist :: list() | map(), PreviousHash :: binary() | 'undefined', Algo :: crypto:sha1() | crypto:sha2()) -> binary() | 'undefined'.
load_with_hashing(Proplist, PreviousHash, Algo) -> load_with_hashing(Proplist, PreviousHash, Algo) ->
URL = pget(url, Proplist), URL = pget(url, Proplist),
rabbit_log:info("Applying definitions from a remote URL"), ?LOG_INFO("Applying definitions from a remote URL"),
rabbit_log:debug("Loading definitions with content hashing enabled, HTTPS URL: ~ts, previous hash value: ~ts", ?LOG_DEBUG("Loading definitions with content hashing enabled, HTTPS URL: ~ts, previous hash value: ~ts",
[URL, rabbit_misc:hexify(PreviousHash)]), [URL, rabbit_misc:hexify(PreviousHash)]),
TLSOptions = tls_options_or_default(Proplist), TLSOptions = tls_options_or_default(Proplist),
@ -67,20 +70,20 @@ load_with_hashing(Proplist, PreviousHash, Algo) ->
case httpc_get(URL, HTTPOptions) of case httpc_get(URL, HTTPOptions) of
%% 2XX %% 2XX
{ok, {{_, Code, _}, _Headers, Body}} when Code div 100 == 2 -> {ok, {{_, Code, _}, _Headers, Body}} when Code div 100 == 2 ->
rabbit_log:debug("Requested definitions from remote URL '~ts', response code: ~b", [URL, Code]), ?LOG_DEBUG("Requested definitions from remote URL '~ts', response code: ~b", [URL, Code]),
rabbit_log:debug("Requested definitions from remote URL '~ts', body: ~tp", [URL, Body]), ?LOG_DEBUG("Requested definitions from remote URL '~ts', body: ~tp", [URL, Body]),
case rabbit_definitions_hashing:hash(Algo, Body) of case rabbit_definitions_hashing:hash(Algo, Body) of
PreviousHash -> PreviousHash; PreviousHash -> PreviousHash;
Other -> Other ->
rabbit_log:debug("New hash: ~ts", [rabbit_misc:hexify(Other)]), ?LOG_DEBUG("New hash: ~ts", [rabbit_misc:hexify(Other)]),
_ = import_raw(Body), _ = import_raw(Body),
Other Other
end; end;
{ok, {{_, Code, _}, _Headers, _Body}} when Code >= 400 -> {ok, {{_, Code, _}, _Headers, _Body}} when Code >= 400 ->
rabbit_log:debug("Requested definitions from remote URL '~ts', response code: ~b", [URL, Code]), ?LOG_DEBUG("Requested definitions from remote URL '~ts', response code: ~b", [URL, Code]),
{error, {could_not_read_defs, {URL, rabbit_misc:format("URL request failed with response code ~b", [Code])}}}; {error, {could_not_read_defs, {URL, rabbit_misc:format("URL request failed with response code ~b", [Code])}}};
{error, Reason} -> {error, Reason} ->
rabbit_log:error("Requested definitions from remote URL '~ts', error: ~tp", [URL, Reason]), ?LOG_ERROR("Requested definitions from remote URL '~ts', error: ~tp", [URL, Reason]),
{error, {could_not_read_defs, {URL, Reason}}} {error, {could_not_read_defs, {URL, Reason}}}
end. end.
@ -93,14 +96,14 @@ load_from_url(URL, HTTPOptions0) ->
case httpc_get(URL, HTTPOptions0) of case httpc_get(URL, HTTPOptions0) of
%% 2XX %% 2XX
{ok, {{_, Code, _}, _Headers, Body}} when Code div 100 == 2 -> {ok, {{_, Code, _}, _Headers, Body}} when Code div 100 == 2 ->
rabbit_log:debug("Requested definitions from remote URL '~ts', response code: ~b", [URL, Code]), ?LOG_DEBUG("Requested definitions from remote URL '~ts', response code: ~b", [URL, Code]),
rabbit_log:debug("Requested definitions from remote URL '~ts', body: ~tp", [URL, Body]), ?LOG_DEBUG("Requested definitions from remote URL '~ts', body: ~tp", [URL, Body]),
import_raw(Body); import_raw(Body);
{ok, {{_, Code, _}, _Headers, _Body}} when Code >= 400 -> {ok, {{_, Code, _}, _Headers, _Body}} when Code >= 400 ->
rabbit_log:debug("Requested definitions from remote URL '~ts', response code: ~b", [URL, Code]), ?LOG_DEBUG("Requested definitions from remote URL '~ts', response code: ~b", [URL, Code]),
{error, {could_not_read_defs, {URL, rabbit_misc:format("URL request failed with response code ~b", [Code])}}}; {error, {could_not_read_defs, {URL, rabbit_misc:format("URL request failed with response code ~b", [Code])}}};
{error, Reason} -> {error, Reason} ->
rabbit_log:error("Requested definitions from remote URL '~ts', error: ~tp", [URL, Reason]), ?LOG_ERROR("Requested definitions from remote URL '~ts', error: ~tp", [URL, Reason]),
{error, {could_not_read_defs, {URL, Reason}}} {error, {could_not_read_defs, {URL, Reason}}}
end. end.

View File

@ -15,6 +15,9 @@
%% * rabbit_definitions_import_http %% * rabbit_definitions_import_http
%% * rabbit_definitions_hashing %% * rabbit_definitions_hashing
-module(rabbit_definitions_import_local_filesystem). -module(rabbit_definitions_import_local_filesystem).
-include_lib("kernel/include/logger.hrl").
-export([ -export([
is_enabled/0, is_enabled/0,
%% definition source options %% definition source options
@ -48,7 +51,7 @@ load(Proplist) when is_list(Proplist) ->
case pget(local_path, Proplist, undefined) of case pget(local_path, Proplist, undefined) of
undefined -> {error, "local definition file path is not configured: local_path is not set"}; undefined -> {error, "local definition file path is not configured: local_path is not set"};
Path -> Path ->
rabbit_log:debug("Asked to import definitions from a local file or directory at '~ts'", [Path]), ?LOG_DEBUG("Asked to import definitions from a local file or directory at '~ts'", [Path]),
IsDir = filelib:is_dir(Path), IsDir = filelib:is_dir(Path),
case IsDir of case IsDir of
true -> true ->
@ -75,7 +78,7 @@ load_with_hashing(Proplist, PreviousHash, Algo) ->
-spec load_with_hashing(IsDir :: boolean(), Path :: file:name_all(), PreviousHash :: binary() | 'undefined', Algo :: crypto:sha1() | crypto:sha2()) -> binary() | 'undefined'. -spec load_with_hashing(IsDir :: boolean(), Path :: file:name_all(), PreviousHash :: binary() | 'undefined', Algo :: crypto:sha1() | crypto:sha2()) -> binary() | 'undefined'.
load_with_hashing(IsDir, Path, PreviousHash, Algo) when is_boolean(IsDir) -> load_with_hashing(IsDir, Path, PreviousHash, Algo) when is_boolean(IsDir) ->
rabbit_log:debug("Loading definitions with content hashing enabled, path: ~ts, is directory?: ~tp, previous hash value: ~ts", ?LOG_DEBUG("Loading definitions with content hashing enabled, path: ~ts, is directory?: ~tp, previous hash value: ~ts",
[Path, IsDir, rabbit_misc:hexify(PreviousHash)]), [Path, IsDir, rabbit_misc:hexify(PreviousHash)]),
case compiled_definitions_from_local_path(IsDir, Path) of case compiled_definitions_from_local_path(IsDir, Path) of
%% the directory is empty or no files could be read %% the directory is empty or no files could be read
@ -87,12 +90,12 @@ load_with_hashing(IsDir, Path, PreviousHash, Algo) when is_boolean(IsDir) ->
case rabbit_definitions_hashing:hash(Algo, Defs) of case rabbit_definitions_hashing:hash(Algo, Defs) of
PreviousHash -> PreviousHash; PreviousHash -> PreviousHash;
Other -> Other ->
rabbit_log:debug("New hash: ~ts", [rabbit_misc:hexify(Other)]), ?LOG_DEBUG("New hash: ~ts", [rabbit_misc:hexify(Other)]),
_ = load_from_local_path(IsDir, Path), _ = load_from_local_path(IsDir, Path),
Other Other
end; end;
false -> false ->
rabbit_log:error("Definitions file at path ~p failed validation. The file must be a valid JSON document " ?LOG_ERROR("Definitions file at path ~p failed validation. The file must be a valid JSON document "
"and all virtual host-scoped resources must have a virtual host field to be set. " "and all virtual host-scoped resources must have a virtual host field to be set. "
"Definition files exported for a single virtual host CANNOT be imported at boot time", [Path]), "Definition files exported for a single virtual host CANNOT be imported at boot time", [Path]),
{error, not_json} {error, not_json}
@ -107,10 +110,10 @@ location() ->
-spec load_from_local_path(IsDir :: boolean(), Path :: file:name_all()) -> ok | {error, term()}. -spec load_from_local_path(IsDir :: boolean(), Path :: file:name_all()) -> ok | {error, term()}.
load_from_local_path(true, Dir) -> load_from_local_path(true, Dir) ->
rabbit_log:info("Applying definitions from directory ~ts", [Dir]), ?LOG_INFO("Applying definitions from directory ~ts", [Dir]),
load_from_files(file:list_dir(Dir), Dir); load_from_files(file:list_dir(Dir), Dir);
load_from_local_path(false, File) -> load_from_local_path(false, File) ->
rabbit_log:info("Applying definitions from regular file at ~ts", [File]), ?LOG_INFO("Applying definitions from regular file at ~ts", [File]),
load_from_single_file(File). load_from_single_file(File).
%% %%
@ -169,7 +172,7 @@ compiled_definitions_from_local_path(true = _IsDir, Dir) ->
end, ReadResults), end, ReadResults),
[Body || {ok, Body} <- Successes]; [Body || {ok, Body} <- Successes];
{error, E} -> {error, E} ->
rabbit_log:error("Could not list files in '~ts', error: ~tp", [Dir, E]), ?LOG_ERROR("Could not list files in '~ts', error: ~tp", [Dir, E]),
{error, {could_not_read_defs, {Dir, E}}} {error, {could_not_read_defs, {Dir, E}}}
end; end;
compiled_definitions_from_local_path(false = _IsDir, Path) -> compiled_definitions_from_local_path(false = _IsDir, Path) ->
@ -184,7 +187,7 @@ read_file_contents(Path) ->
{ok, Body} -> {ok, Body} ->
Body; Body;
{error, E} -> {error, E} ->
rabbit_log:error("Could not read definitions from file at '~ts', error: ~tp", [Path, E]), ?LOG_ERROR("Could not read definitions from file at '~ts', error: ~tp", [Path, E]),
{error, {could_not_read_defs, {Path, E}}} {error, {could_not_read_defs, {Path, E}}}
end. end.
@ -193,7 +196,7 @@ load_from_files({ok, Filenames0}, Dir) ->
Filenames2 = [filename:join(Dir, F) || F <- Filenames1], Filenames2 = [filename:join(Dir, F) || F <- Filenames1],
load_from_multiple_files(Filenames2); load_from_multiple_files(Filenames2);
load_from_files({error, E}, Dir) -> load_from_files({error, E}, Dir) ->
rabbit_log:error("Could not read definitions from directory ~ts, Error: ~tp", [Dir, E]), ?LOG_ERROR("Could not read definitions from directory ~ts, Error: ~tp", [Dir, E]),
{error, {could_not_read_defs, E}}. {error, {could_not_read_defs, E}}.
load_from_multiple_files([]) -> load_from_multiple_files([]) ->
@ -205,7 +208,7 @@ load_from_multiple_files([File|Rest]) ->
end. end.
load_from_single_file(Path) -> load_from_single_file(Path) ->
rabbit_log:debug("Will try to load definitions from a local file or directory at '~ts'", [Path]), ?LOG_DEBUG("Will try to load definitions from a local file or directory at '~ts'", [Path]),
case file:read_file_info(Path, [raw]) of case file:read_file_info(Path, [raw]) of
{ok, FileInfo} -> {ok, FileInfo} ->
@ -215,10 +218,10 @@ load_from_single_file(Path) ->
true -> true ->
case rabbit_misc:raw_read_file(Path) of case rabbit_misc:raw_read_file(Path) of
{ok, Body} -> {ok, Body} ->
rabbit_log:info("Applying definitions from file at '~ts'", [Path]), ?LOG_INFO("Applying definitions from file at '~ts'", [Path]),
import_raw(Body); import_raw(Body);
{error, E} -> {error, E} ->
rabbit_log:error("Could not read definitions from file at '~ts', error: ~tp", [Path, E]), ?LOG_ERROR("Could not read definitions from file at '~ts', error: ~tp", [Path, E]),
{error, {could_not_read_defs, {Path, E}}} {error, {could_not_read_defs, {Path, E}}}
end; end;
false -> false ->

View File

@ -7,6 +7,9 @@
-module(rabbit_disk_monitor). -module(rabbit_disk_monitor).
-include_lib("kernel/include/logger.hrl").
%% Disk monitoring server. Monitors free disk space %% Disk monitoring server. Monitors free disk space
%% periodically and sets alarms when it is below a certain %% periodically and sets alarms when it is below a certain
%% watermark (configurable either as an absolute value or %% watermark (configurable either as an absolute value or
@ -145,7 +148,7 @@ init([Limit]) ->
{ok, State4}. {ok, State4}.
handle_call({set_disk_free_limit, _}, _From, #state{enabled = false} = State) -> handle_call({set_disk_free_limit, _}, _From, #state{enabled = false} = State) ->
rabbit_log:info("Cannot set disk free limit: " ?LOG_INFO("Cannot set disk free limit: "
"disabled disk free space monitoring", []), "disabled disk free space monitoring", []),
{reply, ok, State}; {reply, ok, State};
@ -163,22 +166,22 @@ handle_call({set_max_check_interval, MaxInterval}, _From, State) ->
handle_call({set_enabled, _Enabled = true}, _From, State = #state{enabled = true}) -> handle_call({set_enabled, _Enabled = true}, _From, State = #state{enabled = true}) ->
_ = start_timer(set_disk_limits(State, State#state.limit)), _ = start_timer(set_disk_limits(State, State#state.limit)),
rabbit_log:info("Free disk space monitor was already enabled"), ?LOG_INFO("Free disk space monitor was already enabled"),
{reply, ok, State#state{enabled = true}}; {reply, ok, State#state{enabled = true}};
handle_call({set_enabled, _Enabled = true}, _From, State = #state{enabled = false}) -> handle_call({set_enabled, _Enabled = true}, _From, State = #state{enabled = false}) ->
_ = start_timer(set_disk_limits(State, State#state.limit)), _ = start_timer(set_disk_limits(State, State#state.limit)),
rabbit_log:info("Free disk space monitor was manually enabled"), ?LOG_INFO("Free disk space monitor was manually enabled"),
{reply, ok, State#state{enabled = true}}; {reply, ok, State#state{enabled = true}};
handle_call({set_enabled, _Enabled = false}, _From, State = #state{enabled = true}) -> handle_call({set_enabled, _Enabled = false}, _From, State = #state{enabled = true}) ->
_ = erlang:cancel_timer(State#state.timer), _ = erlang:cancel_timer(State#state.timer),
rabbit_log:info("Free disk space monitor was manually disabled"), ?LOG_INFO("Free disk space monitor was manually disabled"),
{reply, ok, State#state{enabled = false}}; {reply, ok, State#state{enabled = false}};
handle_call({set_enabled, _Enabled = false}, _From, State = #state{enabled = false}) -> handle_call({set_enabled, _Enabled = false}, _From, State = #state{enabled = false}) ->
_ = erlang:cancel_timer(State#state.timer), _ = erlang:cancel_timer(State#state.timer),
rabbit_log:info("Free disk space monitor was already disabled"), ?LOG_INFO("Free disk space monitor was already disabled"),
{reply, ok, State#state{enabled = false}}; {reply, ok, State#state{enabled = false}};
handle_call(_Request, _From, State) -> handle_call(_Request, _From, State) ->
@ -194,7 +197,7 @@ handle_info(update, State) ->
{noreply, start_timer(internal_update(State))}; {noreply, start_timer(internal_update(State))};
handle_info(Info, State) -> handle_info(Info, State) ->
rabbit_log:debug("~tp unhandled msg: ~tp", [?MODULE, Info]), ?LOG_DEBUG("~tp unhandled msg: ~tp", [?MODULE, Info]),
{noreply, State}. {noreply, State}.
terminate(_Reason, _State) -> terminate(_Reason, _State) ->
@ -271,7 +274,7 @@ set_max_check_interval(MaxInterval, State) ->
set_disk_limits(State, Limit0) -> set_disk_limits(State, Limit0) ->
Limit = interpret_limit(Limit0), Limit = interpret_limit(Limit0),
State1 = State#state { limit = Limit }, State1 = State#state { limit = Limit },
rabbit_log:info("Disk free limit set to ~bMB", ?LOG_INFO("Disk free limit set to ~bMB",
[trunc(Limit / 1000000)]), [trunc(Limit / 1000000)]),
ets:insert(?ETS_NAME, {disk_free_limit, Limit}), ets:insert(?ETS_NAME, {disk_free_limit, Limit}),
internal_update(State1). internal_update(State1).
@ -309,7 +312,7 @@ get_disk_free(Dir, {win32, _}, not_used) ->
% "c:/Users/username/AppData/Roaming/RabbitMQ/db/rabbit2@username-z01-mnesia" % "c:/Users/username/AppData/Roaming/RabbitMQ/db/rabbit2@username-z01-mnesia"
case win32_get_drive_letter(Dir) of case win32_get_drive_letter(Dir) of
error -> error ->
rabbit_log:warning("Expected the mnesia directory absolute " ?LOG_WARNING("Expected the mnesia directory absolute "
"path to start with a drive letter like " "path to start with a drive letter like "
"'C:'. The path is: '~tp'", [Dir]), "'C:'. The path is: '~tp'", [Dir]),
{ok, Free} = win32_get_disk_free_dir(Dir), {ok, Free} = win32_get_disk_free_dir(Dir),
@ -340,7 +343,7 @@ get_disk_free(Dir, {win32, _}, not_used) ->
%% could not compute the result %% could not compute the result
'NaN'; 'NaN';
_:Reason:_ -> _:Reason:_ ->
rabbit_log:warning("Free disk space monitoring failed to retrieve the amount of available space: ~p", [Reason]), ?LOG_WARNING("Free disk space monitoring failed to retrieve the amount of available space: ~p", [Reason]),
%% could not compute the result %% could not compute the result
'NaN' 'NaN'
end end
@ -405,13 +408,13 @@ interpret_limit(Absolute) ->
case rabbit_resource_monitor_misc:parse_information_unit(Absolute) of case rabbit_resource_monitor_misc:parse_information_unit(Absolute) of
{ok, ParsedAbsolute} -> ParsedAbsolute; {ok, ParsedAbsolute} -> ParsedAbsolute;
{error, parse_error} -> {error, parse_error} ->
rabbit_log:error("Unable to parse disk_free_limit value ~tp", ?LOG_ERROR("Unable to parse disk_free_limit value ~tp",
[Absolute]), [Absolute]),
?DEFAULT_DISK_FREE_LIMIT ?DEFAULT_DISK_FREE_LIMIT
end. end.
emit_update_info(StateStr, CurrentFree, Limit) -> emit_update_info(StateStr, CurrentFree, Limit) ->
rabbit_log:info( ?LOG_INFO(
"Free disk space is ~ts. Free bytes: ~b. Limit: ~b", "Free disk space is ~ts. Free bytes: ~b. Limit: ~b",
[StateStr, CurrentFree, Limit]). [StateStr, CurrentFree, Limit]).
@ -432,7 +435,7 @@ interval(#state{limit = Limit,
trunc(erlang:max(MinInterval, erlang:min(MaxInterval, IdealInterval))). trunc(erlang:max(MinInterval, erlang:min(MaxInterval, IdealInterval))).
enable(#state{retries = 0} = State) -> enable(#state{retries = 0} = State) ->
rabbit_log:error("Free disk space monitor failed to start!"), ?LOG_ERROR("Free disk space monitor failed to start!"),
State; State;
enable(#state{dir = Dir, os = OS, port = Port} = State) -> enable(#state{dir = Dir, os = OS, port = Port} = State) ->
enable_handle_disk_free(catch get_disk_free(Dir, OS, Port), State). enable_handle_disk_free(catch get_disk_free(Dir, OS, Port), State).
@ -440,7 +443,7 @@ enable(#state{dir = Dir, os = OS, port = Port} = State) ->
enable_handle_disk_free(DiskFree, State) when is_integer(DiskFree) -> enable_handle_disk_free(DiskFree, State) when is_integer(DiskFree) ->
enable_handle_total_memory(catch vm_memory_monitor:get_total_memory(), DiskFree, State); enable_handle_total_memory(catch vm_memory_monitor:get_total_memory(), DiskFree, State);
enable_handle_disk_free(Error, #state{interval = Interval, retries = Retries} = State) -> enable_handle_disk_free(Error, #state{interval = Interval, retries = Retries} = State) ->
rabbit_log:warning("Free disk space monitor encountered an error " ?LOG_WARNING("Free disk space monitor encountered an error "
"(e.g. failed to parse output from OS tools). " "(e.g. failed to parse output from OS tools). "
"Retries left: ~b Error:~n~tp", "Retries left: ~b Error:~n~tp",
[Retries, Error]), [Retries, Error]),
@ -448,11 +451,11 @@ enable_handle_disk_free(Error, #state{interval = Interval, retries = Retries} =
State#state{enabled = false}. State#state{enabled = false}.
enable_handle_total_memory(TotalMemory, DiskFree, #state{limit = Limit} = State) when is_integer(TotalMemory) -> enable_handle_total_memory(TotalMemory, DiskFree, #state{limit = Limit} = State) when is_integer(TotalMemory) ->
rabbit_log:info("Enabling free disk space monitoring " ?LOG_INFO("Enabling free disk space monitoring "
"(disk free space: ~b, total memory: ~b)", [DiskFree, TotalMemory]), "(disk free space: ~b, total memory: ~b)", [DiskFree, TotalMemory]),
start_timer(set_disk_limits(State, Limit)); start_timer(set_disk_limits(State, Limit));
enable_handle_total_memory(Error, _DiskFree, #state{interval = Interval, retries = Retries} = State) -> enable_handle_total_memory(Error, _DiskFree, #state{interval = Interval, retries = Retries} = State) ->
rabbit_log:warning("Free disk space monitor encountered an error " ?LOG_WARNING("Free disk space monitor encountered an error "
"retrieving total memory. " "retrieving total memory. "
"Retries left: ~b Error:~n~tp", "Retries left: ~b Error:~n~tp",
[Retries, Error]), [Retries, Error]),
@ -472,6 +475,6 @@ run_os_cmd(Cmd) ->
CmdResult CmdResult
after 5000 -> after 5000 ->
exit(CmdPid, kill), exit(CmdPid, kill),
rabbit_log:error("Command timed out: '~ts'", [Cmd]), ?LOG_ERROR("Command timed out: '~ts'", [Cmd]),
{error, timeout} {error, timeout}
end. end.

View File

@ -7,6 +7,9 @@
-module(rabbit_epmd_monitor). -module(rabbit_epmd_monitor).
-include_lib("kernel/include/logger.hrl").
-behaviour(gen_server). -behaviour(gen_server).
-export([start_link/0]). -export([start_link/0]).
@ -84,19 +87,19 @@ check_epmd(State = #state{mod = Mod,
{ok, State#state{port = Port1}}. {ok, State#state{port = Port1}}.
handle_port_please(init, noport, Me, Port) -> handle_port_please(init, noport, Me, Port) ->
rabbit_log:info("epmd does not know us, re-registering as ~ts", [Me]), ?LOG_INFO("epmd does not know us, re-registering as ~ts", [Me]),
{ok, Port}; {ok, Port};
handle_port_please(check, noport, Me, Port) -> handle_port_please(check, noport, Me, Port) ->
rabbit_log:warning("epmd does not know us, re-registering ~ts at port ~b", [Me, Port]), ?LOG_WARNING("epmd does not know us, re-registering ~ts at port ~b", [Me, Port]),
{ok, Port}; {ok, Port};
handle_port_please(_, closed, _Me, Port) -> handle_port_please(_, closed, _Me, Port) ->
rabbit_log:error("epmd monitor failed to retrieve our port from epmd: closed"), ?LOG_ERROR("epmd monitor failed to retrieve our port from epmd: closed"),
{ok, Port}; {ok, Port};
handle_port_please(init, {port, NewPort, _Version}, _Me, _Port) -> handle_port_please(init, {port, NewPort, _Version}, _Me, _Port) ->
rabbit_log:info("epmd monitor knows us, inter-node communication (distribution) port: ~tp", [NewPort]), ?LOG_INFO("epmd monitor knows us, inter-node communication (distribution) port: ~tp", [NewPort]),
{ok, NewPort}; {ok, NewPort};
handle_port_please(check, {port, NewPort, _Version}, _Me, _Port) -> handle_port_please(check, {port, NewPort, _Version}, _Me, _Port) ->
{ok, NewPort}; {ok, NewPort};
handle_port_please(_, {error, Error}, _Me, Port) -> handle_port_please(_, {error, Error}, _Me, Port) ->
rabbit_log:error("epmd monitor failed to retrieve our port from epmd: ~tp", [Error]), ?LOG_ERROR("epmd monitor failed to retrieve our port from epmd: ~tp", [Error]),
{ok, Port}. {ok, Port}.

View File

@ -7,6 +7,7 @@
-module(rabbit_exchange). -module(rabbit_exchange).
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
-export([recover/1, policy_changed/2, callback/4, declare/7, -export([recover/1, policy_changed/2, callback/4, declare/7,
assert_equivalence/6, assert_args_equivalence/2, check_type/1, exists/1, assert_equivalence/6, assert_args_equivalence/2, check_type/1, exists/1,
@ -139,7 +140,7 @@ declare(XName, Type, Durable, AutoDelete, Internal, Args, Username) ->
Err Err
end; end;
_ -> _ ->
rabbit_log:warning("ignoring exchange.declare for exchange ~tp, ?LOG_WARNING("ignoring exchange.declare for exchange ~tp,
exchange.delete in progress~n.", [XName]), exchange.delete in progress~n.", [XName]),
{ok, X} {ok, X}
end. end.
@ -554,7 +555,7 @@ peek_serial(XName) ->
rabbit_db_exchange:peek_serial(XName). rabbit_db_exchange:peek_serial(XName).
invalid_module(T) -> invalid_module(T) ->
rabbit_log:warning("Could not find exchange type ~ts.", [T]), ?LOG_WARNING("Could not find exchange type ~ts.", [T]),
put({xtype_to_module, T}, rabbit_exchange_type_invalid), put({xtype_to_module, T}, rabbit_exchange_type_invalid),
rabbit_exchange_type_invalid. rabbit_exchange_type_invalid.

View File

@ -14,6 +14,7 @@
-dialyzer(no_improper_lists). -dialyzer(no_improper_lists).
-include("rabbit_fifo.hrl"). -include("rabbit_fifo.hrl").
-include_lib("kernel/include/logger.hrl").
-define(STATE, ?MODULE). -define(STATE, ?MODULE).
@ -676,7 +677,7 @@ apply(Meta, {dlx, _} = Cmd,
checkout(Meta, State0, State1, Effects0); checkout(Meta, State0, State1, Effects0);
apply(_Meta, Cmd, State) -> apply(_Meta, Cmd, State) ->
%% handle unhandled commands gracefully %% handle unhandled commands gracefully
rabbit_log:debug("rabbit_fifo: unhandled command ~W", [Cmd, 10]), ?LOG_DEBUG("rabbit_fifo: unhandled command ~W", [Cmd, 10]),
{State, ok, []}. {State, ok, []}.
convert_v3_to_v4(#{} = _Meta, StateV3) -> convert_v3_to_v4(#{} = _Meta, StateV3) ->
@ -1157,7 +1158,7 @@ handle_aux(_RaState, _, force_checkpoint,
bytes_in = BytesIn} = Aux, RaAux) -> bytes_in = BytesIn} = Aux, RaAux) ->
Ts = erlang:system_time(millisecond), Ts = erlang:system_time(millisecond),
#?STATE{cfg = #cfg{resource = QR}} = ra_aux:machine_state(RaAux), #?STATE{cfg = #cfg{resource = QR}} = ra_aux:machine_state(RaAux),
rabbit_log:debug("~ts: rabbit_fifo: forcing checkpoint at ~b", ?LOG_DEBUG("~ts: rabbit_fifo: forcing checkpoint at ~b",
[rabbit_misc:rs(QR), ra_aux:last_applied(RaAux)]), [rabbit_misc:rs(QR), ra_aux:last_applied(RaAux)]),
{Check, Effects} = do_checkpoints(Ts, Check0, RaAux, BytesIn, true), {Check, Effects} = do_checkpoints(Ts, Check0, RaAux, BytesIn, true),
{no_reply, Aux#?AUX{last_checkpoint = Check}, RaAux, Effects}; {no_reply, Aux#?AUX{last_checkpoint = Check}, RaAux, Effects};
@ -1178,7 +1179,7 @@ eval_gc(RaAux, MacState,
Mem > ?GC_MEM_LIMIT_B -> Mem > ?GC_MEM_LIMIT_B ->
garbage_collect(), garbage_collect(),
{memory, MemAfter} = erlang:process_info(self(), memory), {memory, MemAfter} = erlang:process_info(self(), memory),
rabbit_log:debug("~ts: full GC sweep complete. " ?LOG_DEBUG("~ts: full GC sweep complete. "
"Process memory changed from ~.2fMB to ~.2fMB.", "Process memory changed from ~.2fMB to ~.2fMB.",
[rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]), [rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]),
AuxState#?AUX{gc = Gc#aux_gc{last_raft_idx = Idx}}; AuxState#?AUX{gc = Gc#aux_gc{last_raft_idx = Idx}};
@ -1195,7 +1196,7 @@ force_eval_gc(RaAux,
true -> true ->
garbage_collect(), garbage_collect(),
{memory, MemAfter} = erlang:process_info(self(), memory), {memory, MemAfter} = erlang:process_info(self(), memory),
rabbit_log:debug("~ts: full GC sweep complete. " ?LOG_DEBUG("~ts: full GC sweep complete. "
"Process memory changed from ~.2fMB to ~.2fMB.", "Process memory changed from ~.2fMB to ~.2fMB.",
[rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]), [rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]),
AuxState#?AUX{gc = Gc#aux_gc{last_raft_idx = Idx}}; AuxState#?AUX{gc = Gc#aux_gc{last_raft_idx = Idx}};

View File

@ -11,6 +11,9 @@
%% Handles command tracking and other non-functional concerns. %% Handles command tracking and other non-functional concerns.
-module(rabbit_fifo_client). -module(rabbit_fifo_client).
-include_lib("kernel/include/logger.hrl").
-export([ -export([
init/1, init/1,
init/2, init/2,
@ -143,13 +146,13 @@ enqueue(QName, Correlation, Msg,
%% to send it %% to send it
{reject_publish, State0}; {reject_publish, State0};
{error, {shutdown, delete}} -> {error, {shutdown, delete}} ->
rabbit_log:debug("~ts: QQ ~ts tried to register enqueuer during delete shutdown", ?LOG_DEBUG("~ts: QQ ~ts tried to register enqueuer during delete shutdown",
[?MODULE, rabbit_misc:rs(QName)]), [?MODULE, rabbit_misc:rs(QName)]),
{reject_publish, State0}; {reject_publish, State0};
{timeout, _} -> {timeout, _} ->
{reject_publish, State0}; {reject_publish, State0};
Err -> Err ->
rabbit_log:debug("~ts: QQ ~ts error when registering enqueuer ~p", ?LOG_DEBUG("~ts: QQ ~ts error when registering enqueuer ~p",
[?MODULE, rabbit_misc:rs(QName), Err]), [?MODULE, rabbit_misc:rs(QName), Err]),
exit(Err) exit(Err)
end; end;
@ -628,7 +631,7 @@ handle_ra_event(QName, Leader, {applied, Seqs},
{ok, _, ActualLeader} {ok, _, ActualLeader}
when ActualLeader =/= OldLeader -> when ActualLeader =/= OldLeader ->
%% there is a new leader %% there is a new leader
rabbit_log:debug("~ts: Detected QQ leader change (applied) " ?LOG_DEBUG("~ts: Detected QQ leader change (applied) "
"from ~w to ~w, " "from ~w to ~w, "
"resending ~b pending commands", "resending ~b pending commands",
[?MODULE, OldLeader, ActualLeader, [?MODULE, OldLeader, ActualLeader,
@ -698,7 +701,7 @@ handle_ra_event(QName, Leader, {machine, leader_change},
pending = Pending} = State0) -> pending = Pending} = State0) ->
%% we need to update leader %% we need to update leader
%% and resend any pending commands %% and resend any pending commands
rabbit_log:debug("~ts: ~s Detected QQ leader change from ~w to ~w, " ?LOG_DEBUG("~ts: ~s Detected QQ leader change from ~w to ~w, "
"resending ~b pending commands", "resending ~b pending commands",
[rabbit_misc:rs(QName), ?MODULE, OldLeader, [rabbit_misc:rs(QName), ?MODULE, OldLeader,
Leader, maps:size(Pending)]), Leader, maps:size(Pending)]),
@ -710,7 +713,7 @@ handle_ra_event(_QName, _From, {rejected, {not_leader, Leader, _Seq}},
handle_ra_event(QName, _From, {rejected, {not_leader, Leader, _Seq}}, handle_ra_event(QName, _From, {rejected, {not_leader, Leader, _Seq}},
#state{leader = OldLeader, #state{leader = OldLeader,
pending = Pending} = State0) -> pending = Pending} = State0) ->
rabbit_log:debug("~ts: ~s Detected QQ leader change (rejection) from ~w to ~w, " ?LOG_DEBUG("~ts: ~s Detected QQ leader change (rejection) from ~w to ~w, "
"resending ~b pending commands", "resending ~b pending commands",
[rabbit_misc:rs(QName), ?MODULE, OldLeader, [rabbit_misc:rs(QName), ?MODULE, OldLeader,
Leader, maps:size(Pending)]), Leader, maps:size(Pending)]),
@ -739,7 +742,7 @@ handle_ra_event(QName, Leader, close_cached_segments,
{_TRef, Last, Cache} -> {_TRef, Last, Cache} ->
case now_ms() > Last + ?CACHE_SEG_TIMEOUT of case now_ms() > Last + ?CACHE_SEG_TIMEOUT of
true -> true ->
rabbit_log:debug("~ts: closing_cached_segments", ?LOG_DEBUG("~ts: closing_cached_segments",
[rabbit_misc:rs(QName)]), [rabbit_misc:rs(QName)]),
%% its been long enough, evict all %% its been long enough, evict all
_ = ra_flru:evict_all(Cache), _ = ra_flru:evict_all(Cache),
@ -982,7 +985,7 @@ add_delivery_count(DelCntIncr, Tag, #state{consumers = CDels0} = State) ->
get_missing_deliveries(State, From, To, ConsumerTag) -> get_missing_deliveries(State, From, To, ConsumerTag) ->
%% find local server %% find local server
ConsumerKey = consumer_key(ConsumerTag, State), ConsumerKey = consumer_key(ConsumerTag, State),
rabbit_log:debug("get_missing_deliveries for consumer '~s' from ~b to ~b", ?LOG_DEBUG("get_missing_deliveries for consumer '~s' from ~b to ~b",
[ConsumerTag, From, To]), [ConsumerTag, From, To]),
Cmd = {get_checked_out, ConsumerKey, lists:seq(From, To)}, Cmd = {get_checked_out, ConsumerKey, lists:seq(From, To)},
ServerId = find_local_or_leader(State), ServerId = find_local_or_leader(State),

View File

@ -8,6 +8,7 @@
-include("rabbit_fifo_dlx.hrl"). -include("rabbit_fifo_dlx.hrl").
-include("rabbit_fifo.hrl"). -include("rabbit_fifo.hrl").
-include_lib("kernel/include/logger.hrl").
-compile({no_auto_import, [apply/3]}). -compile({no_auto_import, [apply/3]}).
-export([ -export([
@ -123,7 +124,7 @@ apply(_, {dlx, #checkout{consumer = ConsumerPid,
OldConsumerPid -> OldConsumerPid ->
ok; ok;
_ -> _ ->
rabbit_log:debug("Terminating ~p since ~p becomes active rabbit_fifo_dlx_worker", ?LOG_DEBUG("Terminating ~p since ~p becomes active rabbit_fifo_dlx_worker",
[OldConsumerPid, ConsumerPid]), [OldConsumerPid, ConsumerPid]),
ensure_worker_terminated(State0) ensure_worker_terminated(State0)
end, end,
@ -144,7 +145,7 @@ apply(_, {dlx, #checkout{consumer = ConsumerPid,
msg_bytes_checkout = BytesCheckout - BytesMoved}, msg_bytes_checkout = BytesCheckout - BytesMoved},
{State, []}; {State, []};
apply(_, Cmd, DLH, State) -> apply(_, Cmd, DLH, State) ->
rabbit_log:debug("Ignoring command ~tp for dead_letter_handler ~tp", [Cmd, DLH]), ?LOG_DEBUG("Ignoring command ~tp for dead_letter_handler ~tp", [Cmd, DLH]),
{State, []}. {State, []}.
-spec discard([msg()], rabbit_dead_letter:reason(), dead_letter_handler(), state()) -> -spec discard([msg()], rabbit_dead_letter:reason(), dead_letter_handler(), state()) ->
@ -257,7 +258,7 @@ ensure_worker_started(QRef, #?MODULE{consumer = undefined}) ->
ensure_worker_started(QRef, #?MODULE{consumer = #dlx_consumer{pid = Pid}}) -> ensure_worker_started(QRef, #?MODULE{consumer = #dlx_consumer{pid = Pid}}) ->
case is_local_and_alive(Pid) of case is_local_and_alive(Pid) of
true -> true ->
rabbit_log:debug("rabbit_fifo_dlx_worker ~tp already started for ~ts", ?LOG_DEBUG("rabbit_fifo_dlx_worker ~tp already started for ~ts",
[Pid, rabbit_misc:rs(QRef)]); [Pid, rabbit_misc:rs(QRef)]);
false -> false ->
start_worker(QRef) start_worker(QRef)
@ -269,7 +270,7 @@ ensure_worker_started(QRef, #?MODULE{consumer = #dlx_consumer{pid = Pid}}) ->
%% Ra server process crash in which case another Ra node will become leader. %% Ra server process crash in which case another Ra node will become leader.
start_worker(QRef) -> start_worker(QRef) ->
{ok, Pid} = supervisor:start_child(rabbit_fifo_dlx_sup, [QRef]), {ok, Pid} = supervisor:start_child(rabbit_fifo_dlx_sup, [QRef]),
rabbit_log:debug("started rabbit_fifo_dlx_worker ~tp for ~ts", ?LOG_DEBUG("started rabbit_fifo_dlx_worker ~tp for ~ts",
[Pid, rabbit_misc:rs(QRef)]). [Pid, rabbit_misc:rs(QRef)]).
ensure_worker_terminated(#?MODULE{consumer = undefined}) -> ensure_worker_terminated(#?MODULE{consumer = undefined}) ->
@ -280,7 +281,7 @@ ensure_worker_terminated(#?MODULE{consumer = #dlx_consumer{pid = Pid}}) ->
%% Note that we can't return a mod_call effect here %% Note that we can't return a mod_call effect here
%% because mod_call is executed on the leader only. %% because mod_call is executed on the leader only.
ok = supervisor:terminate_child(rabbit_fifo_dlx_sup, Pid), ok = supervisor:terminate_child(rabbit_fifo_dlx_sup, Pid),
rabbit_log:debug("terminated rabbit_fifo_dlx_worker ~tp", [Pid]); ?LOG_DEBUG("terminated rabbit_fifo_dlx_worker ~tp", [Pid]);
false -> false ->
ok ok
end. end.

View File

@ -6,6 +6,9 @@
-module(rabbit_fifo_dlx_client). -module(rabbit_fifo_dlx_client).
-include_lib("kernel/include/logger.hrl").
-export([checkout/3, settle/2, handle_ra_event/3, -export([checkout/3, settle/2, handle_ra_event/3,
overview/1]). overview/1]).
@ -47,11 +50,11 @@ process_command(Cmd, #state{leader = Leader} = State, Tries) ->
{ok, ok, Leader} -> {ok, ok, Leader} ->
{ok, State#state{leader = Leader}}; {ok, State#state{leader = Leader}};
{ok, ok, NonLocalLeader} -> {ok, ok, NonLocalLeader} ->
rabbit_log:warning("Failed to process command ~tp on quorum queue leader ~tp because actual leader is ~tp.", ?LOG_WARNING("Failed to process command ~tp on quorum queue leader ~tp because actual leader is ~tp.",
[Cmd, Leader, NonLocalLeader]), [Cmd, Leader, NonLocalLeader]),
{error, non_local_leader}; {error, non_local_leader};
Err -> Err ->
rabbit_log:warning("Failed to process command ~tp on quorum queue leader ~tp: ~tp~n" ?LOG_WARNING("Failed to process command ~tp on quorum queue leader ~tp: ~tp~n"
"Trying ~b more time(s)...", "Trying ~b more time(s)...",
[Cmd, Leader, Err, Tries]), [Cmd, Leader, Err, Tries]),
process_command(Cmd, State, Tries - 1) process_command(Cmd, State, Tries - 1)
@ -63,7 +66,7 @@ handle_ra_event(Leader, {dlx_delivery, _} = Del,
#state{leader = _Leader} = State) when node(Leader) == node() -> #state{leader = _Leader} = State) when node(Leader) == node() ->
handle_delivery(Del, State); handle_delivery(Del, State);
handle_ra_event(From, Evt, State) -> handle_ra_event(From, Evt, State) ->
rabbit_log:debug("Ignoring ra event ~tp from ~tp", [Evt, From]), ?LOG_DEBUG("Ignoring ra event ~tp from ~tp", [Evt, From]),
{ok, State, []}. {ok, State, []}.
handle_delivery({dlx_delivery, [{FstId, _} | _] = IdMsgs}, handle_delivery({dlx_delivery, [{FstId, _} | _] = IdMsgs},

View File

@ -25,6 +25,7 @@
-include("mc.hrl"). -include("mc.hrl").
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
% -include_lib("rabbit_common/include/rabbit_framing.hrl"). % -include_lib("rabbit_common/include/rabbit_framing.hrl").
-behaviour(gen_server). -behaviour(gen_server).
@ -135,7 +136,7 @@ terminate(_Reason, State) ->
cancel_timer(State). cancel_timer(State).
handle_call(Request, From, State) -> handle_call(Request, From, State) ->
rabbit_log:info("~ts received unhandled call from ~tp: ~tp", [?MODULE, From, Request]), ?LOG_INFO("~ts received unhandled call from ~tp: ~tp", [?MODULE, From, Request]),
{noreply, State}. {noreply, State}.
handle_cast({dlx_event, _LeaderPid, lookup_topology}, handle_cast({dlx_event, _LeaderPid, lookup_topology},
@ -169,7 +170,7 @@ handle_cast(settle_timeout, State0) ->
State = State0#state{timer = undefined}, State = State0#state{timer = undefined},
redeliver_and_ack(State); redeliver_and_ack(State);
handle_cast(Request, State) -> handle_cast(Request, State) ->
rabbit_log:info("~ts received unhandled cast ~tp", [?MODULE, Request]), ?LOG_INFO("~ts received unhandled cast ~tp", [?MODULE, Request]),
{noreply, State}. {noreply, State}.
redeliver_and_ack(State0) -> redeliver_and_ack(State0) ->
@ -183,7 +184,7 @@ handle_info({'DOWN', Ref, process, _, _},
queue_ref = QRef}) -> queue_ref = QRef}) ->
%% Source quorum queue is down. Therefore, terminate ourself. %% Source quorum queue is down. Therefore, terminate ourself.
%% The new leader will re-create another dlx_worker. %% The new leader will re-create another dlx_worker.
rabbit_log:debug("~ts terminating itself because leader of ~ts is down...", ?LOG_DEBUG("~ts terminating itself because leader of ~ts is down...",
[?MODULE, rabbit_misc:rs(QRef)]), [?MODULE, rabbit_misc:rs(QRef)]),
supervisor:terminate_child(rabbit_fifo_dlx_sup, self()); supervisor:terminate_child(rabbit_fifo_dlx_sup, self());
handle_info({{'DOWN', QName}, _MRef, process, QPid, Reason}, handle_info({{'DOWN', QName}, _MRef, process, QPid, Reason},
@ -197,7 +198,7 @@ handle_info({{'DOWN', QName}, _MRef, process, QPid, Reason},
remove_queue(QRef, State0#state{queue_type_state = QTypeState}) remove_queue(QRef, State0#state{queue_type_state = QTypeState})
end; end;
handle_info(Info, State) -> handle_info(Info, State) ->
rabbit_log:info("~ts received unhandled info ~tp", [?MODULE, Info]), ?LOG_INFO("~ts received unhandled info ~tp", [?MODULE, Info]),
{noreply, State}. {noreply, State}.
code_change(_OldVsn, State, _Extra) -> code_change(_OldVsn, State, _Extra) ->
@ -219,7 +220,7 @@ remove_queue(QRef, #state{pendings = Pendings0,
queue_type_state = QTypeState}}. queue_type_state = QTypeState}}.
wait_for_queue_deleted(QRef, 0) -> wait_for_queue_deleted(QRef, 0) ->
rabbit_log:debug("Received deletion event for ~ts but queue still exists in ETS table.", ?LOG_DEBUG("Received deletion event for ~ts but queue still exists in ETS table.",
[rabbit_misc:rs(QRef)]); [rabbit_misc:rs(QRef)]);
wait_for_queue_deleted(QRef, N) -> wait_for_queue_deleted(QRef, N) ->
case rabbit_amqqueue:exists(QRef) of case rabbit_amqqueue:exists(QRef) of
@ -289,7 +290,7 @@ rejected(SeqNo, Qs, Pendings)
end, end,
Pendings); Pendings);
false -> false ->
rabbit_log:debug("Ignoring rejection for unknown sequence number ~b " ?LOG_DEBUG("Ignoring rejection for unknown sequence number ~b "
"from target dead letter queues ~tp", "from target dead letter queues ~tp",
[SeqNo, Qs]), [SeqNo, Qs]),
Pendings Pendings
@ -386,7 +387,7 @@ deliver_to_queues(Msg, Options, Qs, #state{queue_type_state = QTypeState0,
%% we won't rely on rabbit_fifo_client to re-deliver on behalf of us %% we won't rely on rabbit_fifo_client to re-deliver on behalf of us
%% (and therefore preventing messages to get stuck in our 'unsettled' state). %% (and therefore preventing messages to get stuck in our 'unsettled' state).
QNames = queue_names(Qs), QNames = queue_names(Qs),
rabbit_log:debug("Failed to deliver message with seq_no ~b to " ?LOG_DEBUG("Failed to deliver message with seq_no ~b to "
"queues ~tp: ~tp", "queues ~tp: ~tp",
[SeqNo, QNames, Reason]), [SeqNo, QNames, Reason]),
{State0#state{pendings = rejected(SeqNo, QNames, Pendings)}, []} {State0#state{pendings = rejected(SeqNo, QNames, Pendings)}, []}
@ -419,7 +420,7 @@ handle_settled0(QRef, MsgSeq, #state{pendings = Pendings,
settled = [QRef | Settled]}, settled = [QRef | Settled]},
State#state{pendings = maps:update(MsgSeq, Pend, Pendings)}; State#state{pendings = maps:update(MsgSeq, Pend, Pendings)};
error -> error ->
rabbit_log:debug("Ignoring publisher confirm for unknown sequence number ~b " ?LOG_DEBUG("Ignoring publisher confirm for unknown sequence number ~b "
"from target dead letter ~ts", "from target dead letter ~ts",
[MsgSeq, rabbit_misc:rs(QRef)]), [MsgSeq, rabbit_misc:rs(QRef)]),
State State
@ -634,7 +635,7 @@ log_missing_dlx_once(#state{exchange_ref = SameDlx,
log_missing_dlx_once(#state{exchange_ref = DlxResource, log_missing_dlx_once(#state{exchange_ref = DlxResource,
queue_ref = QueueResource, queue_ref = QueueResource,
logged = Logged} = State) -> logged = Logged} = State) ->
rabbit_log:warning("Cannot forward any dead-letter messages from source quorum ~ts because " ?LOG_WARNING("Cannot forward any dead-letter messages from source quorum ~ts because "
"its configured dead-letter-exchange ~ts does not exist. " "its configured dead-letter-exchange ~ts does not exist. "
"Either create the configured dead-letter-exchange or re-configure " "Either create the configured dead-letter-exchange or re-configure "
"the dead-letter-exchange policy for the source quorum queue to prevent " "the dead-letter-exchange policy for the source quorum queue to prevent "
@ -651,7 +652,7 @@ log_no_route_once(#state{queue_ref = QueueResource,
exchange_ref = DlxResource, exchange_ref = DlxResource,
routing_key = RoutingKey, routing_key = RoutingKey,
logged = Logged} = State) -> logged = Logged} = State) ->
rabbit_log:warning("Cannot forward any dead-letter messages from source quorum ~ts " ?LOG_WARNING("Cannot forward any dead-letter messages from source quorum ~ts "
"with configured dead-letter-exchange ~ts and configured " "with configured dead-letter-exchange ~ts and configured "
"dead-letter-routing-key '~ts'. This can happen either if the dead-letter " "dead-letter-routing-key '~ts'. This can happen either if the dead-letter "
"routing topology is misconfigured (for example no queue bound to " "routing topology is misconfigured (for example no queue bound to "
@ -672,7 +673,7 @@ log_cycle_once(Queues, _, #state{logged = Logged} = State)
log_cycle_once(Queues, RoutingKeys, #state{exchange_ref = DlxResource, log_cycle_once(Queues, RoutingKeys, #state{exchange_ref = DlxResource,
queue_ref = QueueResource, queue_ref = QueueResource,
logged = Logged} = State) -> logged = Logged} = State) ->
rabbit_log:warning("Dead-letter queues cycle detected for source quorum ~ts " ?LOG_WARNING("Dead-letter queues cycle detected for source quorum ~ts "
"with dead-letter exchange ~ts and routing keys ~tp: ~tp " "with dead-letter exchange ~ts and routing keys ~tp: ~tp "
"This message will not be logged again.", "This message will not be logged again.",
[rabbit_misc:rs(QueueResource), rabbit_misc:rs(DlxResource), [rabbit_misc:rs(QueueResource), rabbit_misc:rs(DlxResource),

View File

@ -15,6 +15,7 @@
-include("rabbit_fifo_v0.hrl"). -include("rabbit_fifo_v0.hrl").
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
-export([ -export([
init/1, init/1,
@ -673,7 +674,7 @@ eval_gc(Log, #?STATE{cfg = #cfg{resource = QR}} = MacState,
Mem > ?GC_MEM_LIMIT_B -> Mem > ?GC_MEM_LIMIT_B ->
garbage_collect(), garbage_collect(),
{memory, MemAfter} = erlang:process_info(self(), memory), {memory, MemAfter} = erlang:process_info(self(), memory),
rabbit_log:debug("~ts: full GC sweep complete. " ?LOG_DEBUG("~ts: full GC sweep complete. "
"Process memory changed from ~.2fMB to ~.2fMB.", "Process memory changed from ~.2fMB to ~.2fMB.",
[rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]), [rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]),
AuxState#aux{gc = Gc#aux_gc{last_raft_idx = Idx}}; AuxState#aux{gc = Gc#aux_gc{last_raft_idx = Idx}};

View File

@ -15,6 +15,7 @@
-include("rabbit_fifo_v1.hrl"). -include("rabbit_fifo_v1.hrl").
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
-export([ -export([
init/1, init/1,
@ -533,7 +534,7 @@ apply(_Meta, {machine_version, 0, 1}, V0State) ->
{State, ok, []}; {State, ok, []};
apply(_Meta, Cmd, State) -> apply(_Meta, Cmd, State) ->
%% handle unhandled commands gracefully %% handle unhandled commands gracefully
rabbit_log:debug("rabbit_fifo: unhandled command ~W", [Cmd, 10]), ?LOG_DEBUG("rabbit_fifo: unhandled command ~W", [Cmd, 10]),
{State, ok, []}. {State, ok, []}.
convert_v0_to_v1(V0State0) -> convert_v0_to_v1(V0State0) ->
@ -855,7 +856,7 @@ eval_gc(Log, #?STATE{cfg = #cfg{resource = QR}} = MacState,
Mem > ?GC_MEM_LIMIT_B -> Mem > ?GC_MEM_LIMIT_B ->
garbage_collect(), garbage_collect(),
{memory, MemAfter} = erlang:process_info(self(), memory), {memory, MemAfter} = erlang:process_info(self(), memory),
rabbit_log:debug("~ts: full GC sweep complete. " ?LOG_DEBUG("~ts: full GC sweep complete. "
"Process memory changed from ~.2fMB to ~.2fMB.", "Process memory changed from ~.2fMB to ~.2fMB.",
[rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]), [rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]),
AuxState#aux{gc = Gc#aux_gc{last_raft_idx = Idx}}; AuxState#aux{gc = Gc#aux_gc{last_raft_idx = Idx}};
@ -871,7 +872,7 @@ force_eval_gc(Log, #?STATE{cfg = #cfg{resource = QR}},
true -> true ->
garbage_collect(), garbage_collect(),
{memory, MemAfter} = erlang:process_info(self(), memory), {memory, MemAfter} = erlang:process_info(self(), memory),
rabbit_log:debug("~ts: full GC sweep complete. " ?LOG_DEBUG("~ts: full GC sweep complete. "
"Process memory changed from ~.2fMB to ~.2fMB.", "Process memory changed from ~.2fMB to ~.2fMB.",
[rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]), [rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]),
AuxState#aux{gc = Gc#aux_gc{last_raft_idx = Idx}}; AuxState#aux{gc = Gc#aux_gc{last_raft_idx = Idx}};

View File

@ -15,6 +15,7 @@
-include("rabbit_fifo_v3.hrl"). -include("rabbit_fifo_v3.hrl").
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
-define(STATE, rabbit_fifo). -define(STATE, rabbit_fifo).
@ -619,7 +620,7 @@ apply(#{index := IncomingRaftIdx} = Meta, {dlx, _} = Cmd,
update_smallest_raft_index(IncomingRaftIdx, State, Effects); update_smallest_raft_index(IncomingRaftIdx, State, Effects);
apply(_Meta, Cmd, State) -> apply(_Meta, Cmd, State) ->
%% handle unhandled commands gracefully %% handle unhandled commands gracefully
rabbit_log:debug("rabbit_fifo: unhandled command ~W", [Cmd, 10]), ?LOG_DEBUG("rabbit_fifo: unhandled command ~W", [Cmd, 10]),
{State, ok, []}. {State, ok, []}.
convert_msg({RaftIdx, {Header, empty}}) when is_integer(RaftIdx) -> convert_msg({RaftIdx, {Header, empty}}) when is_integer(RaftIdx) ->
@ -1172,7 +1173,7 @@ eval_gc(Log, #?STATE{cfg = #cfg{resource = QR}} = MacState,
Mem > ?GC_MEM_LIMIT_B -> Mem > ?GC_MEM_LIMIT_B ->
garbage_collect(), garbage_collect(),
{memory, MemAfter} = erlang:process_info(self(), memory), {memory, MemAfter} = erlang:process_info(self(), memory),
rabbit_log:debug("~ts: full GC sweep complete. " ?LOG_DEBUG("~ts: full GC sweep complete. "
"Process memory changed from ~.2fMB to ~.2fMB.", "Process memory changed from ~.2fMB to ~.2fMB.",
[rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]), [rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]),
AuxState#?AUX{gc = Gc#aux_gc{last_raft_idx = Idx}}; AuxState#?AUX{gc = Gc#aux_gc{last_raft_idx = Idx}};
@ -1188,7 +1189,7 @@ force_eval_gc(Log, #?STATE{cfg = #cfg{resource = QR}},
true -> true ->
garbage_collect(), garbage_collect(),
{memory, MemAfter} = erlang:process_info(self(), memory), {memory, MemAfter} = erlang:process_info(self(), memory),
rabbit_log:debug("~ts: full GC sweep complete. " ?LOG_DEBUG("~ts: full GC sweep complete. "
"Process memory changed from ~.2fMB to ~.2fMB.", "Process memory changed from ~.2fMB to ~.2fMB.",
[rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]), [rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]),
AuxState#?AUX{gc = Gc#aux_gc{last_raft_idx = Idx}}; AuxState#?AUX{gc = Gc#aux_gc{last_raft_idx = Idx}};

View File

@ -6,6 +6,9 @@
%% %%
-module(rabbit_health_check). -module(rabbit_health_check).
-include_lib("kernel/include/logger.hrl").
%% External API %% External API
-export([node/1, node/2]). -export([node/1, node/2]).
@ -28,7 +31,7 @@ node(Node, Timeout) ->
-spec local() -> ok | {error_string, string()}. -spec local() -> ok | {error_string, string()}.
local() -> local() ->
rabbit_log:warning("rabbitmqctl node_health_check and its HTTP API counterpart are DEPRECATED. " ?LOG_WARNING("rabbitmqctl node_health_check and its HTTP API counterpart are DEPRECATED. "
"See https://www.rabbitmq.com/docs/monitoring#health-checks for replacement options."), "See https://www.rabbitmq.com/docs/monitoring#health-checks for replacement options."),
run_checks([list_channels, list_queues, alarms, rabbit_node_monitor]). run_checks([list_channels, list_queues, alarms, rabbit_node_monitor]).

View File

@ -8,6 +8,7 @@
-module(rabbit_maintenance). -module(rabbit_maintenance).
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
%% FIXME: Ra consistent queries are currently fragile in the sense that the %% FIXME: Ra consistent queries are currently fragile in the sense that the
%% query function may run on a remote node and the function reference or MFA %% query function may run on a remote node and the function reference or MFA
@ -63,13 +64,13 @@ is_enabled() ->
-spec drain() -> ok. -spec drain() -> ok.
drain() -> drain() ->
rabbit_log:warning("This node is being put into maintenance (drain) mode"), ?LOG_WARNING("This node is being put into maintenance (drain) mode"),
mark_as_being_drained(), mark_as_being_drained(),
rabbit_log:info("Marked this node as undergoing maintenance"), ?LOG_INFO("Marked this node as undergoing maintenance"),
_ = suspend_all_client_listeners(), _ = suspend_all_client_listeners(),
rabbit_log:warning("Suspended all listeners and will no longer accept client connections"), ?LOG_WARNING("Suspended all listeners and will no longer accept client connections"),
{ok, NConnections} = close_all_client_connections(), {ok, NConnections} = close_all_client_connections(),
rabbit_log:warning("Closed ~b local client connections", [NConnections]), ?LOG_WARNING("Closed ~b local client connections", [NConnections]),
%% allow plugins to react e.g. by closing their protocol connections %% allow plugins to react e.g. by closing their protocol connections
rabbit_event:notify(maintenance_connections_closed, #{ rabbit_event:notify(maintenance_connections_closed, #{
reason => <<"node is being put into maintenance">> reason => <<"node is being put into maintenance">>
@ -92,19 +93,19 @@ drain() ->
rabbit_event:notify(maintenance_draining, #{ rabbit_event:notify(maintenance_draining, #{
reason => <<"node is being put into maintenance">> reason => <<"node is being put into maintenance">>
}), }),
rabbit_log:info("Node is ready to be shut down for maintenance or upgrade"), ?LOG_INFO("Node is ready to be shut down for maintenance or upgrade"),
ok. ok.
-spec revive() -> ok. -spec revive() -> ok.
revive() -> revive() ->
rabbit_log:info("This node is being revived from maintenance (drain) mode"), ?LOG_INFO("This node is being revived from maintenance (drain) mode"),
revive_local_quorum_queue_replicas(), revive_local_quorum_queue_replicas(),
rabbit_log:info("Resumed all listeners and will accept client connections again"), ?LOG_INFO("Resumed all listeners and will accept client connections again"),
_ = resume_all_client_listeners(), _ = resume_all_client_listeners(),
rabbit_log:info("Resumed all listeners and will accept client connections again"), ?LOG_INFO("Resumed all listeners and will accept client connections again"),
unmark_as_being_drained(), unmark_as_being_drained(),
rabbit_log:info("Marked this node as back from maintenance and ready to serve clients"), ?LOG_INFO("Marked this node as back from maintenance and ready to serve clients"),
%% allow plugins to react %% allow plugins to react
rabbit_event:notify(maintenance_revived, #{}), rabbit_event:notify(maintenance_revived, #{}),
@ -113,12 +114,12 @@ revive() ->
-spec mark_as_being_drained() -> boolean(). -spec mark_as_being_drained() -> boolean().
mark_as_being_drained() -> mark_as_being_drained() ->
rabbit_log:debug("Marking the node as undergoing maintenance"), ?LOG_DEBUG("Marking the node as undergoing maintenance"),
rabbit_db_maintenance:set(?DRAINING_STATUS). rabbit_db_maintenance:set(?DRAINING_STATUS).
-spec unmark_as_being_drained() -> boolean(). -spec unmark_as_being_drained() -> boolean().
unmark_as_being_drained() -> unmark_as_being_drained() ->
rabbit_log:debug("Unmarking the node as undergoing maintenance"), ?LOG_DEBUG("Unmarking the node as undergoing maintenance"),
rabbit_db_maintenance:set(?DEFAULT_STATUS). rabbit_db_maintenance:set(?DEFAULT_STATUS).
-spec is_being_drained_local_read(node()) -> boolean(). -spec is_being_drained_local_read(node()) -> boolean().
@ -164,7 +165,7 @@ filter_out_drained_nodes_consistent_read(Nodes) ->
%% but previously established connections won't be interrupted. %% but previously established connections won't be interrupted.
suspend_all_client_listeners() -> suspend_all_client_listeners() ->
Listeners = rabbit_networking:node_client_listeners(node()), Listeners = rabbit_networking:node_client_listeners(node()),
rabbit_log:info("Asked to suspend ~b client connection listeners. " ?LOG_INFO("Asked to suspend ~b client connection listeners. "
"No new client connections will be accepted until these listeners are resumed!", [length(Listeners)]), "No new client connections will be accepted until these listeners are resumed!", [length(Listeners)]),
Results = lists:foldl(local_listener_fold_fun(fun ranch:suspend_listener/1), [], Listeners), Results = lists:foldl(local_listener_fold_fun(fun ranch:suspend_listener/1), [], Listeners),
lists:foldl(fun ok_or_first_error/2, ok, Results). lists:foldl(fun ok_or_first_error/2, ok, Results).
@ -175,7 +176,7 @@ suspend_all_client_listeners() ->
%% A resumed listener will accept new client connections. %% A resumed listener will accept new client connections.
resume_all_client_listeners() -> resume_all_client_listeners() ->
Listeners = rabbit_networking:node_client_listeners(node()), Listeners = rabbit_networking:node_client_listeners(node()),
rabbit_log:info("Asked to resume ~b client connection listeners. " ?LOG_INFO("Asked to resume ~b client connection listeners. "
"New client connections will be accepted from now on", [length(Listeners)]), "New client connections will be accepted from now on", [length(Listeners)]),
Results = lists:foldl(local_listener_fold_fun(fun ranch:resume_listener/1), [], Listeners), Results = lists:foldl(local_listener_fold_fun(fun ranch:resume_listener/1), [], Listeners),
lists:foldl(fun ok_or_first_error/2, ok, Results). lists:foldl(fun ok_or_first_error/2, ok, Results).
@ -213,15 +214,15 @@ transfer_leadership_of_quorum_queues(_TransferCandidates) ->
rabbit_log:info("Leadership transfer for quorum queues hosted on this node has been initiated"). rabbit_log:info("Leadership transfer for quorum queues hosted on this node has been initiated").
transfer_leadership_of_metadata_store(TransferCandidates) -> transfer_leadership_of_metadata_store(TransferCandidates) ->
rabbit_log:info("Will transfer leadership of metadata store with current leader on this node", ?LOG_INFO("Will transfer leadership of metadata store with current leader on this node",
[]), []),
case rabbit_khepri:transfer_leadership(TransferCandidates) of case rabbit_khepri:transfer_leadership(TransferCandidates) of
{ok, Node} when Node == node(); Node == undefined -> {ok, Node} when Node == node(); Node == undefined ->
rabbit_log:info("Skipping leadership transfer of metadata store: current leader is not on this node"); ?LOG_INFO("Skipping leadership transfer of metadata store: current leader is not on this node");
{ok, Node} -> {ok, Node} ->
rabbit_log:info("Leadership transfer for metadata store on this node has been done. The new leader is ~p", [Node]); ?LOG_INFO("Leadership transfer for metadata store on this node has been done. The new leader is ~p", [Node]);
Error -> Error ->
rabbit_log:warning("Skipping leadership transfer of metadata store: ~p", [Error]) ?LOG_WARNING("Skipping leadership transfer of metadata store: ~p", [Error])
end. end.
-spec transfer_leadership_of_stream_coordinator([node()]) -> ok. -spec transfer_leadership_of_stream_coordinator([node()]) -> ok.

View File

@ -8,6 +8,7 @@
-module(rabbit_mnesia). -module(rabbit_mnesia).
-include_lib("rabbit_common/include/logging.hrl"). -include_lib("rabbit_common/include/logging.hrl").
-include_lib("kernel/include/logger.hrl").
-export([%% Main interface -export([%% Main interface
init/0, init/0,
@ -123,7 +124,7 @@ init() ->
NodeType = node_type(), NodeType = node_type(),
case is_node_type_permitted(NodeType) of case is_node_type_permitted(NodeType) of
false -> false ->
rabbit_log:info( ?LOG_INFO(
"RAM nodes are deprecated and not permitted. This " "RAM nodes are deprecated and not permitted. This "
"node will be converted to a disc node."), "node will be converted to a disc node."),
init_db_and_upgrade(cluster_nodes(all), disc, init_db_and_upgrade(cluster_nodes(all), disc,
@ -175,7 +176,7 @@ can_join_cluster(DiscoveryNode) ->
%% do we think so ourselves? %% do we think so ourselves?
case are_we_clustered_with(DiscoveryNode) of case are_we_clustered_with(DiscoveryNode) of
true -> true ->
rabbit_log:info("Asked to join a cluster but already a member of it: ~tp", [ClusterNodes]), ?LOG_INFO("Asked to join a cluster but already a member of it: ~tp", [ClusterNodes]),
{ok, already_member}; {ok, already_member};
false -> false ->
Msg = format_inconsistent_cluster_message(DiscoveryNode, node()), Msg = format_inconsistent_cluster_message(DiscoveryNode, node()),
@ -195,7 +196,7 @@ join_cluster(ClusterNodes, NodeType) when is_list(ClusterNodes) ->
false -> disc; false -> disc;
true -> NodeType true -> NodeType
end, end,
rabbit_log:info("Clustering with ~tp as ~tp node", ?LOG_INFO("Clustering with ~tp as ~tp node",
[ClusterNodes, NodeType1]), [ClusterNodes, NodeType1]),
ok = init_db_with_mnesia(ClusterNodes, NodeType1, ok = init_db_with_mnesia(ClusterNodes, NodeType1,
true, true, _Retry = true), true, true, _Retry = true),
@ -230,7 +231,7 @@ reset() ->
force_reset() -> force_reset() ->
ensure_mnesia_not_running(), ensure_mnesia_not_running(),
rabbit_log:info("Resetting Rabbit forcefully", []), ?LOG_INFO("Resetting Rabbit forcefully", []),
wipe(). wipe().
reset_gracefully() -> reset_gracefully() ->
@ -300,7 +301,7 @@ forget_cluster_node(Node, RemoveWhenOffline) ->
{true, false} -> remove_node_offline_node(Node); {true, false} -> remove_node_offline_node(Node);
{true, true} -> e(online_node_offline_flag); {true, true} -> e(online_node_offline_flag);
{false, false} -> e(offline_node_no_offline_flag); {false, false} -> e(offline_node_no_offline_flag);
{false, true} -> rabbit_log:info( {false, true} -> ?LOG_INFO(
"Removing node ~tp from cluster", [Node]), "Removing node ~tp from cluster", [Node]),
case remove_node_if_mnesia_running(Node) of case remove_node_if_mnesia_running(Node) of
ok -> ok; ok -> ok;
@ -550,7 +551,7 @@ init_db(ClusterNodes, NodeType, CheckOtherNodes) ->
ensure_node_type_is_permitted(NodeType), ensure_node_type_is_permitted(NodeType),
NodeIsVirgin = is_virgin_node(), NodeIsVirgin = is_virgin_node(),
rabbit_log:debug("Does data directory looks like that of a blank (uninitialised) node? ~tp", [NodeIsVirgin]), ?LOG_DEBUG("Does data directory looks like that of a blank (uninitialised) node? ~tp", [NodeIsVirgin]),
Nodes = change_extra_db_nodes(ClusterNodes, CheckOtherNodes), Nodes = change_extra_db_nodes(ClusterNodes, CheckOtherNodes),
%% Note that we use `system_info' here and not the cluster status %% Note that we use `system_info' here and not the cluster status
%% since when we start rabbit for the first time the cluster %% since when we start rabbit for the first time the cluster
@ -744,7 +745,7 @@ remote_node_info(Node) ->
on_node_up(Node) -> on_node_up(Node) ->
case running_disc_nodes() of case running_disc_nodes() of
[Node] -> rabbit_log:info("cluster contains disc nodes again~n"); [Node] -> ?LOG_INFO("cluster contains disc nodes again~n");
_ -> ok _ -> ok
end. end.
@ -752,7 +753,7 @@ on_node_up(Node) ->
on_node_down(_Node) -> on_node_down(_Node) ->
case running_disc_nodes() of case running_disc_nodes() of
[] -> rabbit_log:info("only running disc node went down~n"); [] -> ?LOG_INFO("only running disc node went down~n");
_ -> ok _ -> ok
end. end.
@ -891,17 +892,17 @@ create_schema() ->
false = rabbit_khepri:is_enabled(), false = rabbit_khepri:is_enabled(),
stop_mnesia(), stop_mnesia(),
rabbit_log:debug("Will bootstrap a schema database..."), ?LOG_DEBUG("Will bootstrap a schema database..."),
rabbit_misc:ensure_ok(mnesia:create_schema([node()]), cannot_create_schema), rabbit_misc:ensure_ok(mnesia:create_schema([node()]), cannot_create_schema),
rabbit_log:debug("Bootstraped a schema database successfully"), ?LOG_DEBUG("Bootstraped a schema database successfully"),
start_mnesia(), start_mnesia(),
rabbit_log:debug("Will create schema database tables"), ?LOG_DEBUG("Will create schema database tables"),
ok = rabbit_table:create(), ok = rabbit_table:create(),
rabbit_log:debug("Created schema database tables successfully"), ?LOG_DEBUG("Created schema database tables successfully"),
rabbit_log:debug("Will check schema database integrity..."), ?LOG_DEBUG("Will check schema database integrity..."),
ensure_schema_integrity(), ensure_schema_integrity(),
rabbit_log:debug("Schema database schema integrity check passed"), ?LOG_DEBUG("Schema database schema integrity check passed"),
ok. ok.
remove_node_if_mnesia_running(Node) -> remove_node_if_mnesia_running(Node) ->
@ -945,7 +946,7 @@ leave_cluster(Node) ->
end. end.
wait_for(Condition) -> wait_for(Condition) ->
rabbit_log:info("Waiting for ~tp...", [Condition]), ?LOG_INFO("Waiting for ~tp...", [Condition]),
timer:sleep(1000). timer:sleep(1000).
start_mnesia(CheckConsistency) -> start_mnesia(CheckConsistency) ->
@ -1067,10 +1068,10 @@ mnesia_and_msg_store_files() ->
rabbit_feature_flags:enabled_feature_flags_list_file(), rabbit_feature_flags:enabled_feature_flags_list_file(),
rabbit_khepri:dir()], rabbit_khepri:dir()],
IgnoredFiles = [filename:basename(File) || File <- IgnoredFiles0], IgnoredFiles = [filename:basename(File) || File <- IgnoredFiles0],
rabbit_log:debug("Files and directories found in node's data directory: ~ts, of them to be ignored: ~ts", ?LOG_DEBUG("Files and directories found in node's data directory: ~ts, of them to be ignored: ~ts",
[string:join(lists:usort(List0), ", "), string:join(lists:usort(IgnoredFiles), ", ")]), [string:join(lists:usort(List0), ", "), string:join(lists:usort(IgnoredFiles), ", ")]),
List = List0 -- IgnoredFiles, List = List0 -- IgnoredFiles,
rabbit_log:debug("Files and directories found in node's data directory sans ignored ones: ~ts", [string:join(lists:usort(List), ", ")]), ?LOG_DEBUG("Files and directories found in node's data directory sans ignored ones: ~ts", [string:join(lists:usort(List), ", ")]),
List List
end. end.

View File

@ -25,6 +25,7 @@
%%---------------------------------------------------------------------------- %%----------------------------------------------------------------------------
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
-type(msg() :: any()). -type(msg() :: any()).
@ -792,11 +793,11 @@ init([VHost, Type, BaseDir, ClientRefs, StartupFunState]) ->
true -> "clean"; true -> "clean";
false -> "unclean" false -> "unclean"
end, end,
rabbit_log:debug("Rebuilding message location index after ~ts shutdown...", ?LOG_DEBUG("Rebuilding message location index after ~ts shutdown...",
[Cleanliness]), [Cleanliness]),
{CurOffset, State1 = #msstate { current_file = CurFile }} = {CurOffset, State1 = #msstate { current_file = CurFile }} =
build_index(CleanShutdown, StartupFunState, State), build_index(CleanShutdown, StartupFunState, State),
rabbit_log:debug("Finished rebuilding index", []), ?LOG_DEBUG("Finished rebuilding index", []),
%% Open the most recent file. %% Open the most recent file.
{ok, CurHdl} = writer_recover(Dir, CurFile, CurOffset), {ok, CurHdl} = writer_recover(Dir, CurFile, CurOffset),
{ok, State1 #msstate { current_file_handle = CurHdl, {ok, State1 #msstate { current_file_handle = CurHdl,
@ -971,7 +972,7 @@ terminate(Reason, State = #msstate { index_ets = IndexEts,
{shutdown, _} -> {"", []}; {shutdown, _} -> {"", []};
_ -> {" with reason ~0p", [Reason]} _ -> {" with reason ~0p", [Reason]}
end, end,
rabbit_log:info("Stopping message store for directory '~ts'" ++ ExtraLog, [Dir|ExtraLogArgs]), ?LOG_INFO("Stopping message store for directory '~ts'" ++ ExtraLog, [Dir|ExtraLogArgs]),
%% stop the gc first, otherwise it could be working and we pull %% stop the gc first, otherwise it could be working and we pull
%% out the ets tables from under it. %% out the ets tables from under it.
ok = rabbit_msg_store_gc:stop(GCPid), ok = rabbit_msg_store_gc:stop(GCPid),
@ -984,7 +985,7 @@ terminate(Reason, State = #msstate { index_ets = IndexEts,
case store_file_summary(FileSummaryEts, Dir) of case store_file_summary(FileSummaryEts, Dir) of
ok -> ok; ok -> ok;
{error, FSErr} -> {error, FSErr} ->
rabbit_log:error("Unable to store file summary" ?LOG_ERROR("Unable to store file summary"
" for vhost message store for directory ~tp~n" " for vhost message store for directory ~tp~n"
"Error: ~tp", "Error: ~tp",
[Dir, FSErr]) [Dir, FSErr])
@ -994,10 +995,10 @@ terminate(Reason, State = #msstate { index_ets = IndexEts,
index_terminate(IndexEts, Dir), index_terminate(IndexEts, Dir),
case store_recovery_terms([{client_refs, maps:keys(Clients)}], Dir) of case store_recovery_terms([{client_refs, maps:keys(Clients)}], Dir) of
ok -> ok ->
rabbit_log:info("Message store for directory '~ts' is stopped", [Dir]), ?LOG_INFO("Message store for directory '~ts' is stopped", [Dir]),
ok; ok;
{error, RTErr} -> {error, RTErr} ->
rabbit_log:error("Unable to save message store recovery terms" ?LOG_ERROR("Unable to save message store recovery terms"
" for directory ~tp~nError: ~tp", " for directory ~tp~nError: ~tp",
[Dir, RTErr]) [Dir, RTErr])
end, end,
@ -1702,7 +1703,7 @@ index_terminate(IndexEts, Dir) ->
[{extended_info, [object_count]}]) of [{extended_info, [object_count]}]) of
ok -> ok; ok -> ok;
{error, Err} -> {error, Err} ->
rabbit_log:error("Unable to save message store index" ?LOG_ERROR("Unable to save message store index"
" for directory ~tp.~nError: ~tp", " for directory ~tp.~nError: ~tp",
[Dir, Err]) [Dir, Err])
end, end,
@ -1715,11 +1716,11 @@ index_terminate(IndexEts, Dir) ->
recover_index_and_client_refs(_Recover, undefined, Dir, _Name) -> recover_index_and_client_refs(_Recover, undefined, Dir, _Name) ->
{false, index_new(Dir), []}; {false, index_new(Dir), []};
recover_index_and_client_refs(false, _ClientRefs, Dir, Name) -> recover_index_and_client_refs(false, _ClientRefs, Dir, Name) ->
rabbit_log:warning("Message store ~tp: rebuilding indices from scratch", [Name]), ?LOG_WARNING("Message store ~tp: rebuilding indices from scratch", [Name]),
{false, index_new(Dir), []}; {false, index_new(Dir), []};
recover_index_and_client_refs(true, ClientRefs, Dir, Name) -> recover_index_and_client_refs(true, ClientRefs, Dir, Name) ->
Fresh = fun (ErrorMsg, ErrorArgs) -> Fresh = fun (ErrorMsg, ErrorArgs) ->
rabbit_log:warning("Message store ~tp : " ++ ErrorMsg ++ "~n" ?LOG_WARNING("Message store ~tp : " ++ ErrorMsg ++ "~n"
"rebuilding indices from scratch", "rebuilding indices from scratch",
[Name | ErrorArgs]), [Name | ErrorArgs]),
{false, index_new(Dir), []} {false, index_new(Dir), []}
@ -1812,9 +1813,9 @@ build_index(true, _StartupFunState,
{FileSize, State#msstate{ current_file = File }}; {FileSize, State#msstate{ current_file = File }};
build_index(false, {MsgRefDeltaGen, MsgRefDeltaGenInit}, build_index(false, {MsgRefDeltaGen, MsgRefDeltaGenInit},
State = #msstate { dir = Dir }) -> State = #msstate { dir = Dir }) ->
rabbit_log:debug("Rebuilding message refcount...", []), ?LOG_DEBUG("Rebuilding message refcount...", []),
ok = count_msg_refs(MsgRefDeltaGen, MsgRefDeltaGenInit, State), ok = count_msg_refs(MsgRefDeltaGen, MsgRefDeltaGenInit, State),
rabbit_log:debug("Done rebuilding message refcount", []), ?LOG_DEBUG("Done rebuilding message refcount", []),
{ok, Pid} = gatherer:start_link(), {ok, Pid} = gatherer:start_link(),
case [filename_to_num(FileName) || case [filename_to_num(FileName) ||
FileName <- list_sorted_filenames(Dir, ?FILE_EXTENSION)] of FileName <- list_sorted_filenames(Dir, ?FILE_EXTENSION)] of
@ -1828,7 +1829,7 @@ build_index(false, {MsgRefDeltaGen, MsgRefDeltaGenInit},
build_index_worker(Gatherer, #msstate { index_ets = IndexEts, dir = Dir }, build_index_worker(Gatherer, #msstate { index_ets = IndexEts, dir = Dir },
File, Files) -> File, Files) ->
Path = form_filename(Dir, filenum_to_name(File)), Path = form_filename(Dir, filenum_to_name(File)),
rabbit_log:debug("Rebuilding message location index from ~ts (~B file(s) remaining)", ?LOG_DEBUG("Rebuilding message location index from ~ts (~B file(s) remaining)",
[Path, length(Files)]), [Path, length(Files)]),
%% The scan function already dealt with duplicate messages %% The scan function already dealt with duplicate messages
%% within the file, and only returns valid messages (we do %% within the file, and only returns valid messages (we do
@ -2000,7 +2001,7 @@ delete_file_if_empty(File, State = #msstate {
compact_file(File, State = #gc_state { file_summary_ets = FileSummaryEts }) -> compact_file(File, State = #gc_state { file_summary_ets = FileSummaryEts }) ->
case ets:lookup(FileSummaryEts, File) of case ets:lookup(FileSummaryEts, File) of
[] -> [] ->
rabbit_log:debug("File ~tp has already been deleted; no need to compact", ?LOG_DEBUG("File ~tp has already been deleted; no need to compact",
[File]), [File]),
ok; ok;
[#file_summary{file_size = FileSize}] -> [#file_summary{file_size = FileSize}] ->
@ -2045,7 +2046,7 @@ compact_file(File, FileSize,
%% after truncation. This is a debug message so it doesn't hurt to %% after truncation. This is a debug message so it doesn't hurt to
%% put out more details around what's happening. %% put out more details around what's happening.
Reclaimed = FileSize - TruncateSize, Reclaimed = FileSize - TruncateSize,
rabbit_log:debug("Compacted segment file number ~tp; ~tp bytes can now be reclaimed", ?LOG_DEBUG("Compacted segment file number ~tp; ~tp bytes can now be reclaimed",
[File, Reclaimed]), [File, Reclaimed]),
%% Tell the message store to update its state. %% Tell the message store to update its state.
gen_server2:cast(Server, {compacted_file, File}), gen_server2:cast(Server, {compacted_file, File}),
@ -2146,7 +2147,7 @@ truncate_file(File, Size, ThresholdTimestamp, #gc_state{ file_summary_ets = File
case ets:select(FileHandlesEts, [{{{'_', File}, '$1'}, case ets:select(FileHandlesEts, [{{{'_', File}, '$1'},
[{'=<', '$1', ThresholdTimestamp}], ['$$']}], 1) of [{'=<', '$1', ThresholdTimestamp}], ['$$']}], 1) of
{[_|_], _Cont} -> {[_|_], _Cont} ->
rabbit_log:debug("Asked to truncate file ~p but it has active readers. Deferring.", ?LOG_DEBUG("Asked to truncate file ~p but it has active readers. Deferring.",
[File]), [File]),
defer; defer;
_ -> _ ->
@ -2157,7 +2158,7 @@ truncate_file(File, Size, ThresholdTimestamp, #gc_state{ file_summary_ets = File
ok = file:close(Fd), ok = file:close(Fd),
true = ets:update_element(FileSummaryEts, File, true = ets:update_element(FileSummaryEts, File,
{#file_summary.file_size, Size}), {#file_summary.file_size, Size}),
rabbit_log:debug("Truncated file number ~tp; new size ~tp bytes", [File, Size]), ?LOG_DEBUG("Truncated file number ~tp; new size ~tp bytes", [File, Size]),
ok ok
end end
end. end.
@ -2169,7 +2170,7 @@ delete_file(File, #gc_state { file_summary_ets = FileSummaryEts,
dir = Dir }) -> dir = Dir }) ->
case ets:match_object(FileHandlesEts, {{'_', File}, '_'}, 1) of case ets:match_object(FileHandlesEts, {{'_', File}, '_'}, 1) of
{[_|_], _Cont} -> {[_|_], _Cont} ->
rabbit_log:debug("Asked to delete file ~p but it has active readers. Deferring.", ?LOG_DEBUG("Asked to delete file ~p but it has active readers. Deferring.",
[File]), [File]),
defer; defer;
_ -> _ ->
@ -2177,7 +2178,7 @@ delete_file(File, #gc_state { file_summary_ets = FileSummaryEts,
file_size = FileSize }] = ets:lookup(FileSummaryEts, File), file_size = FileSize }] = ets:lookup(FileSummaryEts, File),
ok = file:delete(form_filename(Dir, filenum_to_name(File))), ok = file:delete(form_filename(Dir, filenum_to_name(File))),
true = ets:delete(FileSummaryEts, File), true = ets:delete(FileSummaryEts, File),
rabbit_log:debug("Deleted empty file number ~tp; reclaimed ~tp bytes", [File, FileSize]), ?LOG_DEBUG("Deleted empty file number ~tp; reclaimed ~tp bytes", [File, FileSize]),
ok ok
end. end.

View File

@ -55,6 +55,7 @@
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("rabbit_common/include/rabbit_misc.hrl"). -include_lib("rabbit_common/include/rabbit_misc.hrl").
-include_lib("kernel/include/logger.hrl").
%% IANA-suggested ephemeral port range is 49152 to 65535 %% IANA-suggested ephemeral port range is 49152 to 65535
-define(FIRST_TEST_BIND_PORT, 49152). -define(FIRST_TEST_BIND_PORT, 49152).
@ -90,7 +91,7 @@
boot() -> boot() ->
ok = record_distribution_listener(), ok = record_distribution_listener(),
_ = application:start(ranch), _ = application:start(ranch),
rabbit_log:debug("Started Ranch"), ?LOG_DEBUG("Started Ranch"),
%% Failures will throw exceptions %% Failures will throw exceptions
_ = boot_listeners(fun boot_tcp/2, application:get_env(rabbit, num_tcp_acceptors, 10), _ = boot_listeners(fun boot_tcp/2, application:get_env(rabbit, num_tcp_acceptors, 10),
application:get_env(rabbit, num_conns_sups, 1), "TCP"), application:get_env(rabbit, num_conns_sups, 1), "TCP"),
@ -103,7 +104,7 @@ boot_listeners(Fun, NumAcceptors, ConcurrentConnsSupsCount, Type) ->
ok -> ok ->
ok; ok;
{error, {could_not_start_listener, Address, Port, Details}} = Error -> {error, {could_not_start_listener, Address, Port, Details}} = Error ->
rabbit_log:error("Failed to start ~ts listener [~ts]:~tp, error: ~tp", ?LOG_ERROR("Failed to start ~ts listener [~ts]:~tp, error: ~tp",
[Type, Address, Port, Details]), [Type, Address, Port, Details]),
throw(Error) throw(Error)
end. end.
@ -156,7 +157,7 @@ tcp_listener_addresses({Host, Port, Family0})
[{IPAddress, Port, Family} || [{IPAddress, Port, Family} ||
{IPAddress, Family} <- getaddr(Host, Family0)]; {IPAddress, Family} <- getaddr(Host, Family0)];
tcp_listener_addresses({_Host, Port, _Family0}) -> tcp_listener_addresses({_Host, Port, _Family0}) ->
rabbit_log:error("invalid port ~tp - not 0..65535", [Port]), ?LOG_ERROR("invalid port ~tp - not 0..65535", [Port]),
throw({error, {invalid_port, Port}}). throw({error, {invalid_port, Port}}).
tcp_listener_addresses_auto(Port) -> tcp_listener_addresses_auto(Port) ->
@ -264,7 +265,7 @@ stop_ranch_listener_of_protocol(Protocol) ->
case ranch_ref_of_protocol(Protocol) of case ranch_ref_of_protocol(Protocol) of
undefined -> ok; undefined -> ok;
Ref -> Ref ->
rabbit_log:debug("Stopping Ranch listener for protocol ~ts", [Protocol]), ?LOG_DEBUG("Stopping Ranch listener for protocol ~ts", [Protocol]),
ranch:stop_listener(Ref) ranch:stop_listener(Ref)
end. end.
@ -404,7 +405,7 @@ epmd_port_please(Name, Host) ->
epmd_port_please(Name, Host, 0) -> epmd_port_please(Name, Host, 0) ->
maybe_get_epmd_port(Name, Host); maybe_get_epmd_port(Name, Host);
epmd_port_please(Name, Host, RetriesLeft) -> epmd_port_please(Name, Host, RetriesLeft) ->
rabbit_log:debug("Getting epmd port node '~ts', ~b retries left", ?LOG_DEBUG("Getting epmd port node '~ts', ~b retries left",
[Name, RetriesLeft]), [Name, RetriesLeft]),
case catch maybe_get_epmd_port(Name, Host) of case catch maybe_get_epmd_port(Name, Host) of
ok -> ok; ok -> ok;
@ -520,11 +521,11 @@ emit_connection_info_local(Items, Ref, AggregatorPid) ->
-spec close_connection(pid(), string()) -> 'ok'. -spec close_connection(pid(), string()) -> 'ok'.
close_connection(Pid, Explanation) -> close_connection(Pid, Explanation) ->
rabbit_log:info("Closing connection ~tp because ~tp", ?LOG_INFO("Closing connection ~tp because ~tp",
[Pid, Explanation]), [Pid, Explanation]),
try rabbit_reader:shutdown(Pid, Explanation) try rabbit_reader:shutdown(Pid, Explanation)
catch exit:{Reason, _Location} -> catch exit:{Reason, _Location} ->
rabbit_log:warning("Could not close connection ~tp (reason: ~tp): ~p", ?LOG_WARNING("Could not close connection ~tp (reason: ~tp): ~p",
[Pid, Explanation, Reason]) [Pid, Explanation, Reason])
end. end.
@ -561,7 +562,7 @@ failed_to_recv_proxy_header(Ref, Error) ->
closed -> "error when receiving proxy header: TCP socket was ~tp prematurely"; closed -> "error when receiving proxy header: TCP socket was ~tp prematurely";
_Other -> "error when receiving proxy header: ~tp" _Other -> "error when receiving proxy header: ~tp"
end, end,
rabbit_log:debug(Msg, [Error]), ?LOG_DEBUG(Msg, [Error]),
% The following call will clean up resources then exit % The following call will clean up resources then exit
_ = try ranch:handshake(Ref) catch _ = try ranch:handshake(Ref) catch
_:_ -> ok _:_ -> ok
@ -602,7 +603,7 @@ ranch_handshake(Ref) ->
exit:{shutdown, {Reason, {PeerIp, PeerPort}}} = Error:Stacktrace -> exit:{shutdown, {Reason, {PeerIp, PeerPort}}} = Error:Stacktrace ->
PeerAddress = io_lib:format("~ts:~tp", [rabbit_misc:ntoab(PeerIp), PeerPort]), PeerAddress = io_lib:format("~ts:~tp", [rabbit_misc:ntoab(PeerIp), PeerPort]),
Protocol = ranch_ref_to_protocol(Ref), Protocol = ranch_ref_to_protocol(Ref),
rabbit_log:error("~p error during handshake for protocol ~p and peer ~ts", ?LOG_ERROR("~p error during handshake for protocol ~p and peer ~ts",
[Reason, Protocol, PeerAddress]), [Reason, Protocol, PeerAddress]),
erlang:raise(exit, Error, Stacktrace) erlang:raise(exit, Error, Stacktrace)
end. end.
@ -664,7 +665,7 @@ gethostaddr(Host, Family) ->
-spec host_lookup_error(_, _) -> no_return(). -spec host_lookup_error(_, _) -> no_return().
host_lookup_error(Host, Reason) -> host_lookup_error(Host, Reason) ->
rabbit_log:error("invalid host ~tp - ~tp", [Host, Reason]), ?LOG_ERROR("invalid host ~tp - ~tp", [Host, Reason]),
throw({error, {invalid_host, Host, Reason}}). throw({error, {invalid_host, Host, Reason}}).
resolve_family({_,_,_,_}, auto) -> inet; resolve_family({_,_,_,_}, auto) -> inet;

View File

@ -7,6 +7,9 @@
-module(rabbit_node_monitor). -module(rabbit_node_monitor).
-include_lib("kernel/include/logger.hrl").
-behaviour(gen_server). -behaviour(gen_server).
-export([start_link/0]). -export([start_link/0]).
@ -492,14 +495,14 @@ handle_cast({check_partial_partition, Node, Rep, NodeGUID, MyGUID, RepGUID},
case rpc:call(Node, erlang, system_info, [creation]) of case rpc:call(Node, erlang, system_info, [creation]) of
{badrpc, _} -> ok; {badrpc, _} -> ok;
NodeGUID -> NodeGUID ->
rabbit_log:warning("Received a 'DOWN' message" ?LOG_WARNING("Received a 'DOWN' message"
" from ~tp but still can" " from ~tp but still can"
" communicate with it ", " communicate with it ",
[Node]), [Node]),
cast(Rep, {partial_partition, cast(Rep, {partial_partition,
Node, node(), RepGUID}); Node, node(), RepGUID});
_ -> _ ->
rabbit_log:warning("Node ~tp was restarted", [Node]), ?LOG_WARNING("Node ~tp was restarted", [Node]),
ok ok
end end
end), end),
@ -530,7 +533,7 @@ handle_cast({partial_partition, NotReallyDown, Proxy, MyGUID},
ArgsBase = [NotReallyDown, Proxy, NotReallyDown], ArgsBase = [NotReallyDown, Proxy, NotReallyDown],
case application:get_env(rabbit, cluster_partition_handling) of case application:get_env(rabbit, cluster_partition_handling) of
{ok, pause_minority} -> {ok, pause_minority} ->
rabbit_log:error( ?LOG_ERROR(
FmtBase ++ " * pause_minority mode enabled~n" FmtBase ++ " * pause_minority mode enabled~n"
"We will therefore pause until the *entire* cluster recovers", "We will therefore pause until the *entire* cluster recovers",
ArgsBase), ArgsBase),
@ -538,17 +541,17 @@ handle_cast({partial_partition, NotReallyDown, Proxy, MyGUID},
{noreply, State}; {noreply, State};
{ok, {pause_if_all_down, PreferredNodes, _}} -> {ok, {pause_if_all_down, PreferredNodes, _}} ->
case in_preferred_partition(PreferredNodes) of case in_preferred_partition(PreferredNodes) of
true -> rabbit_log:error( true -> ?LOG_ERROR(
FmtBase ++ "We will therefore intentionally " FmtBase ++ "We will therefore intentionally "
"disconnect from ~ts", ArgsBase ++ [Proxy]), "disconnect from ~ts", ArgsBase ++ [Proxy]),
upgrade_to_full_partition(Proxy); upgrade_to_full_partition(Proxy);
false -> rabbit_log:info( false -> ?LOG_INFO(
FmtBase ++ "We are about to pause, no need " FmtBase ++ "We are about to pause, no need "
"for further actions", ArgsBase) "for further actions", ArgsBase)
end, end,
{noreply, State}; {noreply, State};
{ok, _} -> {ok, _} ->
rabbit_log:error( ?LOG_ERROR(
FmtBase ++ "We will therefore intentionally disconnect from ~ts", FmtBase ++ "We will therefore intentionally disconnect from ~ts",
ArgsBase ++ [Proxy]), ArgsBase ++ [Proxy]),
upgrade_to_full_partition(Proxy), upgrade_to_full_partition(Proxy),
@ -562,7 +565,7 @@ handle_cast({partial_partition, _GUID, _Reporter, _Proxy}, State) ->
%% messages reliably when another node disconnects from us. Therefore %% messages reliably when another node disconnects from us. Therefore
%% we are told just before the disconnection so we can reciprocate. %% we are told just before the disconnection so we can reciprocate.
handle_cast({partial_partition_disconnect, Other}, State) -> handle_cast({partial_partition_disconnect, Other}, State) ->
rabbit_log:error("Partial partition disconnect from ~ts", [Other]), ?LOG_ERROR("Partial partition disconnect from ~ts", [Other]),
disconnect(Other), disconnect(Other),
{noreply, State}; {noreply, State};
@ -571,7 +574,7 @@ handle_cast({partial_partition_disconnect, Other}, State) ->
%% mnesia propagation. %% mnesia propagation.
handle_cast({node_up, Node, NodeType}, handle_cast({node_up, Node, NodeType},
State = #state{monitors = Monitors}) -> State = #state{monitors = Monitors}) ->
rabbit_log:info("rabbit on node ~tp up", [Node]), ?LOG_INFO("rabbit on node ~tp up", [Node]),
case rabbit_khepri:is_enabled() of case rabbit_khepri:is_enabled() of
true -> true ->
ok; ok;
@ -606,7 +609,7 @@ handle_cast({joined_cluster, Node, NodeType}, State) ->
end, end,
RunningNodes}) RunningNodes})
end, end,
rabbit_log:debug("Node '~tp' has joined the cluster", [Node]), ?LOG_DEBUG("Node '~tp' has joined the cluster", [Node]),
rabbit_event:notify(node_added, [{node, Node}]), rabbit_event:notify(node_added, [{node, Node}]),
{noreply, State}; {noreply, State};
@ -634,7 +637,7 @@ handle_cast(_Msg, State) ->
handle_info({'DOWN', _MRef, process, {rabbit, Node}, _Reason}, handle_info({'DOWN', _MRef, process, {rabbit, Node}, _Reason},
State = #state{monitors = Monitors, subscribers = Subscribers}) -> State = #state{monitors = Monitors, subscribers = Subscribers}) ->
rabbit_log:info("rabbit on node ~tp down", [Node]), ?LOG_INFO("rabbit on node ~tp down", [Node]),
case rabbit_khepri:is_enabled() of case rabbit_khepri:is_enabled() of
true -> true ->
ok; ok;
@ -653,7 +656,7 @@ handle_info({'DOWN', _MRef, process, Pid, _Reason},
{noreply, State#state{subscribers = pmon:erase(Pid, Subscribers)}}; {noreply, State#state{subscribers = pmon:erase(Pid, Subscribers)}};
handle_info({nodedown, Node, Info}, State) -> handle_info({nodedown, Node, Info}, State) ->
rabbit_log:info("node ~tp down: ~tp", ?LOG_INFO("node ~tp down: ~tp",
[Node, proplists:get_value(nodedown_reason, Info)]), [Node, proplists:get_value(nodedown_reason, Info)]),
case rabbit_khepri:is_enabled() of case rabbit_khepri:is_enabled() of
true -> {noreply, State}; true -> {noreply, State};
@ -661,7 +664,7 @@ handle_info({nodedown, Node, Info}, State) ->
end; end;
handle_info({nodeup, Node, _Info}, State) -> handle_info({nodeup, Node, _Info}, State) ->
rabbit_log:info("node ~tp up", [Node]), ?LOG_INFO("node ~tp up", [Node]),
{noreply, State}; {noreply, State};
handle_info({mnesia_system_event, handle_info({mnesia_system_event,
@ -781,13 +784,13 @@ handle_dead_node(Node, State = #state{autoheal = Autoheal}) ->
{ok, autoheal} -> {ok, autoheal} ->
State#state{autoheal = rabbit_autoheal:node_down(Node, Autoheal)}; State#state{autoheal = rabbit_autoheal:node_down(Node, Autoheal)};
{ok, Term} -> {ok, Term} ->
rabbit_log:warning("cluster_partition_handling ~tp unrecognised, " ?LOG_WARNING("cluster_partition_handling ~tp unrecognised, "
"assuming 'ignore'", [Term]), "assuming 'ignore'", [Term]),
State State
end. end.
await_cluster_recovery(Condition) -> await_cluster_recovery(Condition) ->
rabbit_log:warning("Cluster minority/secondary status detected - " ?LOG_WARNING("Cluster minority/secondary status detected - "
"awaiting recovery", []), "awaiting recovery", []),
run_outside_applications(fun () -> run_outside_applications(fun () ->
rabbit:stop(), rabbit:stop(),
@ -838,7 +841,7 @@ do_run_outside_app_fun(Fun) ->
try try
Fun() Fun()
catch _:E:Stacktrace -> catch _:E:Stacktrace ->
rabbit_log:error( ?LOG_ERROR(
"rabbit_outside_app_process:~n~tp~n~tp", "rabbit_outside_app_process:~n~tp~n~tp",
[E, Stacktrace]) [E, Stacktrace])
end. end.
@ -1050,14 +1053,14 @@ possibly_partitioned_nodes() ->
alive_rabbit_nodes() -- rabbit_mnesia:cluster_nodes(running). alive_rabbit_nodes() -- rabbit_mnesia:cluster_nodes(running).
startup_log() -> startup_log() ->
rabbit_log:info("Starting rabbit_node_monitor (partition handling strategy unapplicable with Khepri)", []). ?LOG_INFO("Starting rabbit_node_monitor (partition handling strategy unapplicable with Khepri)", []).
startup_log(Nodes) -> startup_log(Nodes) ->
{ok, M} = application:get_env(rabbit, cluster_partition_handling), {ok, M} = application:get_env(rabbit, cluster_partition_handling),
startup_log(Nodes, M). startup_log(Nodes, M).
startup_log([], PartitionHandling) -> startup_log([], PartitionHandling) ->
rabbit_log:info("Starting rabbit_node_monitor (in ~tp mode)", [PartitionHandling]); ?LOG_INFO("Starting rabbit_node_monitor (in ~tp mode)", [PartitionHandling]);
startup_log(Nodes, PartitionHandling) -> startup_log(Nodes, PartitionHandling) ->
rabbit_log:info("Starting rabbit_node_monitor (in ~tp mode), might be partitioned from ~tp", ?LOG_INFO("Starting rabbit_node_monitor (in ~tp mode), might be partitioned from ~tp",
[PartitionHandling, Nodes]). [PartitionHandling, Nodes]).

View File

@ -127,7 +127,7 @@ seed_internal_cluster_id() ->
case rabbit_runtime_parameters:lookup_global(?INTERNAL_CLUSTER_ID_PARAM_NAME) of case rabbit_runtime_parameters:lookup_global(?INTERNAL_CLUSTER_ID_PARAM_NAME) of
not_found -> not_found ->
Id = rabbit_guid:binary(rabbit_guid:gen(), "rabbitmq-cluster-id"), Id = rabbit_guid:binary(rabbit_guid:gen(), "rabbitmq-cluster-id"),
rabbit_log:info("Initialising internal cluster ID to '~ts'", [Id]), ?LOG_INFO("Initialising internal cluster ID to '~ts'", [Id]),
rabbit_runtime_parameters:set_global(?INTERNAL_CLUSTER_ID_PARAM_NAME, Id, ?INTERNAL_USER), rabbit_runtime_parameters:set_global(?INTERNAL_CLUSTER_ID_PARAM_NAME, Id, ?INTERNAL_USER),
Id; Id;
Param -> Param ->
@ -139,7 +139,7 @@ seed_user_provided_cluster_name() ->
case application:get_env(rabbit, cluster_name) of case application:get_env(rabbit, cluster_name) of
undefined -> ok; undefined -> ok;
{ok, Name} -> {ok, Name} ->
rabbit_log:info("Setting cluster name to '~ts' as configured", [Name]), ?LOG_INFO("Setting cluster name to '~ts' as configured", [Name]),
set_cluster_name(rabbit_data_coercion:to_binary(Name)) set_cluster_name(rabbit_data_coercion:to_binary(Name))
end. end.

View File

@ -6,6 +6,9 @@
%% %%
-module(rabbit_peer_discovery_classic_config). -module(rabbit_peer_discovery_classic_config).
-include_lib("kernel/include/logger.hrl").
-behaviour(rabbit_peer_discovery_backend). -behaviour(rabbit_peer_discovery_backend).
-export([list_nodes/0, supports_registration/0, register/0, unregister/0, -export([list_nodes/0, supports_registration/0, register/0, unregister/0,
@ -42,7 +45,7 @@ check_duplicates(Nodes) ->
true -> true ->
ok; ok;
false -> false ->
rabbit_log:warning("Classic peer discovery backend: list of " ?LOG_WARNING("Classic peer discovery backend: list of "
"nodes contains duplicates ~0tp", "nodes contains duplicates ~0tp",
[Nodes]) [Nodes])
end. end.
@ -52,7 +55,7 @@ check_local_node(Nodes) ->
true -> true ->
ok; ok;
false -> false ->
rabbit_log:warning("Classic peer discovery backend: list of " ?LOG_WARNING("Classic peer discovery backend: list of "
"nodes does not contain the local node ~0tp", "nodes does not contain the local node ~0tp",
[Nodes]) [Nodes])
end. end.
@ -65,7 +68,7 @@ lock(Nodes) ->
Node = node(), Node = node(),
case lists:member(Node, Nodes) of case lists:member(Node, Nodes) of
false when Nodes =/= [] -> false when Nodes =/= [] ->
rabbit_log:warning("Local node ~ts is not part of configured nodes ~tp. " ?LOG_WARNING("Local node ~ts is not part of configured nodes ~tp. "
"This might lead to incorrect cluster formation.", [Node, Nodes]); "This might lead to incorrect cluster formation.", [Node, Nodes]);
_ -> ok _ -> ok
end, end,

View File

@ -6,6 +6,9 @@
%% %%
-module(rabbit_peer_discovery_dns). -module(rabbit_peer_discovery_dns).
-include_lib("kernel/include/logger.hrl").
-behaviour(rabbit_peer_discovery_backend). -behaviour(rabbit_peer_discovery_backend).
-export([list_nodes/0, supports_registration/0, register/0, unregister/0, -export([list_nodes/0, supports_registration/0, register/0, unregister/0,
@ -27,7 +30,7 @@ list_nodes() ->
{ok, ClusterFormation} -> {ok, ClusterFormation} ->
case proplists:get_value(peer_discovery_dns, ClusterFormation) of case proplists:get_value(peer_discovery_dns, ClusterFormation) of
undefined -> undefined ->
rabbit_log:warning("Peer discovery backend is set to ~ts " ?LOG_WARNING("Peer discovery backend is set to ~ts "
"but final config does not contain rabbit.cluster_formation.peer_discovery_dns. " "but final config does not contain rabbit.cluster_formation.peer_discovery_dns. "
"Cannot discover any nodes because seed hostname is not configured!", "Cannot discover any nodes because seed hostname is not configured!",
[?MODULE]), [?MODULE]),
@ -90,7 +93,7 @@ decode_record(ipv6) ->
lookup(SeedHostname, LongNamesUsed, IPv) -> lookup(SeedHostname, LongNamesUsed, IPv) ->
IPs = inet_res:lookup(SeedHostname, in, decode_record(IPv)), IPs = inet_res:lookup(SeedHostname, in, decode_record(IPv)),
rabbit_log:info("Addresses discovered via ~ts records of ~ts: ~ts", ?LOG_INFO("Addresses discovered via ~ts records of ~ts: ~ts",
[string:to_upper(atom_to_list(decode_record(IPv))), [string:to_upper(atom_to_list(decode_record(IPv))),
SeedHostname, SeedHostname,
string:join([inet_parse:ntoa(IP) || IP <- IPs], ", ")]), string:join([inet_parse:ntoa(IP) || IP <- IPs], ", ")]),
@ -106,6 +109,6 @@ extract_host({ok, {hostent, FQDN, _, _, _, _}}, true, _Address) ->
extract_host({ok, {hostent, FQDN, _, _, _, _}}, false, _Address) -> extract_host({ok, {hostent, FQDN, _, _, _, _}}, false, _Address) ->
lists:nth(1, string:tokens(FQDN, ".")); lists:nth(1, string:tokens(FQDN, "."));
extract_host({error, Error}, _, Address) -> extract_host({error, Error}, _, Address) ->
rabbit_log:error("Reverse DNS lookup for address ~ts failed: ~tp", ?LOG_ERROR("Reverse DNS lookup for address ~ts failed: ~tp",
[inet_parse:ntoa(Address), Error]), [inet_parse:ntoa(Address), Error]),
error. error.

View File

@ -7,6 +7,7 @@
-module(rabbit_plugins). -module(rabbit_plugins).
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
-export([setup/0, active/0, read_enabled/1, list/0, list/1, list/2, dependencies/3, running_plugins/0]). -export([setup/0, active/0, read_enabled/1, list/0, list/1, list/2, dependencies/3, running_plugins/0]).
-export([ensure/1]). -export([ensure/1]).
-export([validate_plugins/1, format_invalid_plugins/1]). -export([validate_plugins/1, format_invalid_plugins/1]).
@ -60,13 +61,13 @@ ensure1(FileJustChanged0) ->
{[], []} -> {[], []} ->
ok; ok;
{[], _} -> {[], _} ->
rabbit_log:info("Plugins changed; disabled ~tp", ?LOG_INFO("Plugins changed; disabled ~tp",
[Stop]); [Stop]);
{_, []} -> {_, []} ->
rabbit_log:info("Plugins changed; enabled ~tp", ?LOG_INFO("Plugins changed; enabled ~tp",
[Start]); [Start]);
{_, _} -> {_, _} ->
rabbit_log:info("Plugins changed; enabled ~tp, disabled ~tp", ?LOG_INFO("Plugins changed; enabled ~tp, disabled ~tp",
[Start, Stop]) [Start, Stop])
end, end,
{ok, Start, Stop}; {ok, Start, Stop};
@ -357,7 +358,7 @@ maybe_warn_about_invalid_plugins([]) ->
ok; ok;
maybe_warn_about_invalid_plugins(InvalidPlugins) -> maybe_warn_about_invalid_plugins(InvalidPlugins) ->
%% TODO: error message formatting %% TODO: error message formatting
rabbit_log:warning(format_invalid_plugins(InvalidPlugins)). ?LOG_WARNING(format_invalid_plugins(InvalidPlugins)).
format_invalid_plugins(InvalidPlugins) -> format_invalid_plugins(InvalidPlugins) ->
@ -413,7 +414,7 @@ validate_plugins(Plugins, BrokerVersion) ->
true -> true ->
case BrokerVersion of case BrokerVersion of
"0.0.0" -> "0.0.0" ->
rabbit_log:warning( ?LOG_WARNING(
"Running development version of the broker." "Running development version of the broker."
" Requirement ~tp for plugin ~tp is ignored.", " Requirement ~tp for plugin ~tp is ignored.",
[BrokerVersionReqs, Name]); [BrokerVersionReqs, Name]);
@ -444,7 +445,7 @@ check_plugins_versions(PluginName, AllPlugins, RequiredVersions) ->
true -> true ->
case Version of case Version of
"" -> "" ->
rabbit_log:warning( ?LOG_WARNING(
"~tp plugin version is not defined." "~tp plugin version is not defined."
" Requirement ~tp for plugin ~tp is ignored", " Requirement ~tp for plugin ~tp is ignored",
[Name, Versions, PluginName]); [Name, Versions, PluginName]);
@ -512,7 +513,7 @@ prepare_dir_plugin(PluginAppDescPath) ->
{module, _} -> {module, _} ->
ok; ok;
{error, badfile} -> {error, badfile} ->
rabbit_log:error("Failed to enable plugin \"~ts\": " ?LOG_ERROR("Failed to enable plugin \"~ts\": "
"it may have been built with an " "it may have been built with an "
"incompatible (more recent?) " "incompatible (more recent?) "
"version of Erlang", [Plugin]), "version of Erlang", [Plugin]),
@ -545,11 +546,11 @@ prepare_plugin(#plugin{type = ez, name = Name, location = Location}, ExpandDir)
[PluginAppDescPath|_] -> [PluginAppDescPath|_] ->
prepare_dir_plugin(PluginAppDescPath); prepare_dir_plugin(PluginAppDescPath);
_ -> _ ->
rabbit_log:error("Plugin archive '~ts' doesn't contain an .app file", [Location]), ?LOG_ERROR("Plugin archive '~ts' doesn't contain an .app file", [Location]),
throw({app_file_missing, Name, Location}) throw({app_file_missing, Name, Location})
end; end;
{error, Reason} -> {error, Reason} ->
rabbit_log:error("Could not unzip plugin archive '~ts': ~tp", [Location, Reason]), ?LOG_ERROR("Could not unzip plugin archive '~ts': ~tp", [Location, Reason]),
throw({failed_to_unzip_plugin, Name, Location, Reason}) throw({failed_to_unzip_plugin, Name, Location, Reason})
end; end;
prepare_plugin(#plugin{type = dir, location = Location, name = Name}, prepare_plugin(#plugin{type = dir, location = Location, name = Name},
@ -558,7 +559,7 @@ prepare_plugin(#plugin{type = dir, location = Location, name = Name},
[PluginAppDescPath|_] -> [PluginAppDescPath|_] ->
prepare_dir_plugin(PluginAppDescPath); prepare_dir_plugin(PluginAppDescPath);
_ -> _ ->
rabbit_log:error("Plugin directory '~ts' doesn't contain an .app file", [Location]), ?LOG_ERROR("Plugin directory '~ts' doesn't contain an .app file", [Location]),
throw({app_file_missing, Name, Location}) throw({app_file_missing, Name, Location})
end. end.

View File

@ -29,6 +29,7 @@
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include("amqqueue.hrl"). -include("amqqueue.hrl").
-include_lib("kernel/include/logger.hrl").
-import(rabbit_misc, [pget/2, pget/3]). -import(rabbit_misc, [pget/2, pget/3]).
@ -285,7 +286,7 @@ parse_set0(Type, VHost, Name, Pattern, Defn, Priority, ApplyTo, ActingUser) ->
{<<"priority">>, Priority}, {<<"priority">>, Priority},
{<<"apply-to">>, ApplyTo}], {<<"apply-to">>, ApplyTo}],
ActingUser), ActingUser),
rabbit_log:info("Successfully set policy '~ts' matching ~ts names in virtual host '~ts' using pattern '~ts'", ?LOG_INFO("Successfully set policy '~ts' matching ~ts names in virtual host '~ts' using pattern '~ts'",
[Name, ApplyTo, VHost, Pattern]), [Name, ApplyTo, VHost, Pattern]),
R; R;
{error, Reason} -> {error, Reason} ->

View File

@ -9,6 +9,7 @@
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include("amqqueue.hrl"). -include("amqqueue.hrl").
-include_lib("kernel/include/logger.hrl").
-behaviour(rabbit_backing_queue). -behaviour(rabbit_backing_queue).
@ -66,7 +67,7 @@ enable() ->
{ok, RealBQ} = application:get_env(rabbit, backing_queue_module), {ok, RealBQ} = application:get_env(rabbit, backing_queue_module),
case RealBQ of case RealBQ of
?MODULE -> ok; ?MODULE -> ok;
_ -> rabbit_log:info("Priority queues enabled, real BQ is ~ts", _ -> ?LOG_INFO("Priority queues enabled, real BQ is ~ts",
[RealBQ]), [RealBQ]),
application:set_env( application:set_env(
rabbitmq_priority_queue, backing_queue_module, RealBQ), rabbitmq_priority_queue, backing_queue_module, RealBQ),

View File

@ -223,6 +223,7 @@
}). }).
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
%%---------------------------------------------------------------------------- %%----------------------------------------------------------------------------
@ -556,7 +557,7 @@ start(VHost, DurableQueueNames) ->
ToDelete = [filename:join([rabbit_vhost:msg_store_dir_path(VHost), "queues", Dir]) ToDelete = [filename:join([rabbit_vhost:msg_store_dir_path(VHost), "queues", Dir])
|| Dir <- lists:subtract(all_queue_directory_names(VHost), || Dir <- lists:subtract(all_queue_directory_names(VHost),
sets:to_list(DurableDirectories))], sets:to_list(DurableDirectories))],
rabbit_log:debug("Deleting unknown files/folders: ~p", [ToDelete]), ?LOG_DEBUG("Deleting unknown files/folders: ~p", [ToDelete]),
_ = rabbit_file:recursive_delete(ToDelete), _ = rabbit_file:recursive_delete(ToDelete),
rabbit_recovery_terms:clear(VHost), rabbit_recovery_terms:clear(VHost),
@ -1182,7 +1183,7 @@ load_segment(KeepAcked, #segment { path = Path }) ->
%% was missing above). We also log some information. %% was missing above). We also log some information.
case SegBin of case SegBin of
<<0:Size/unit:8>> -> <<0:Size/unit:8>> ->
rabbit_log:warning("Deleting invalid v1 segment file ~ts (file only contains NUL bytes)", ?LOG_WARNING("Deleting invalid v1 segment file ~ts (file only contains NUL bytes)",
[Path]), [Path]),
_ = rabbit_file:delete(Path), _ = rabbit_file:delete(Path),
Empty; Empty;

View File

@ -14,6 +14,7 @@
-include("vhost.hrl"). -include("vhost.hrl").
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("amqp10_common/include/amqp10_types.hrl"). -include_lib("amqp10_common/include/amqp10_types.hrl").
-include_lib("kernel/include/logger.hrl").
-export([ -export([
init/0, init/0,
@ -561,7 +562,7 @@ recover(VHost, Qs) ->
end, ByType0, Qs), end, ByType0, Qs),
maps:fold(fun (Mod, Queues, {R0, F0}) -> maps:fold(fun (Mod, Queues, {R0, F0}) ->
{Taken, {R, F}} = timer:tc(Mod, recover, [VHost, Queues]), {Taken, {R, F}} = timer:tc(Mod, recover, [VHost, Queues]),
rabbit_log:info("Recovering ~b queues of type ~ts took ~bms", ?LOG_INFO("Recovering ~b queues of type ~ts took ~bms",
[length(Queues), Mod, Taken div 1000]), [length(Queues), Mod, Taken div 1000]),
{R0 ++ R, F0 ++ F} {R0 ++ R, F0 ++ F}
end, {[], []}, ByType). end, {[], []}, ByType).

View File

@ -99,6 +99,7 @@
-include_lib("stdlib/include/qlc.hrl"). -include_lib("stdlib/include/qlc.hrl").
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include("amqqueue.hrl"). -include("amqqueue.hrl").
-include_lib("kernel/include/logger.hrl").
-type msg_id() :: non_neg_integer(). -type msg_id() :: non_neg_integer().
-type qmsg() :: {rabbit_types:r('queue'), pid(), msg_id(), boolean(), -type qmsg() :: {rabbit_types:r('queue'), pid(), msg_id(), boolean(),
@ -113,7 +114,7 @@
-define(DEFAULT_DELIVERY_LIMIT, 20). -define(DEFAULT_DELIVERY_LIMIT, 20).
-define(INFO(Str, Args), -define(INFO(Str, Args),
rabbit_log:info("[~s:~s/~b] " Str, ?LOG_INFO("[~s:~s/~b] " Str,
[?MODULE, ?FUNCTION_NAME, ?FUNCTION_ARITY | Args])). [?MODULE, ?FUNCTION_NAME, ?FUNCTION_ARITY | Args])).
@ -272,7 +273,7 @@ start_cluster(Q) ->
?RPC_TIMEOUT)], ?RPC_TIMEOUT)],
MinVersion = lists:min([rabbit_fifo:version() | Versions]), MinVersion = lists:min([rabbit_fifo:version() | Versions]),
rabbit_log:debug("Will start up to ~w replicas for quorum queue ~ts with " ?LOG_DEBUG("Will start up to ~w replicas for quorum queue ~ts with "
"leader on node '~ts', initial machine version ~b", "leader on node '~ts', initial machine version ~b",
[QuorumSize, rabbit_misc:rs(QName), LeaderNode, MinVersion]), [QuorumSize, rabbit_misc:rs(QName), LeaderNode, MinVersion]),
case rabbit_amqqueue:internal_declare(NewQ1, false) of case rabbit_amqqueue:internal_declare(NewQ1, false) of
@ -342,7 +343,7 @@ gather_policy_config(Q, IsQueueDeclaration) ->
undefined -> undefined ->
case IsQueueDeclaration of case IsQueueDeclaration of
true -> true ->
rabbit_log:info( ?LOG_INFO(
"~ts: delivery_limit not set, defaulting to ~b", "~ts: delivery_limit not set, defaulting to ~b",
[rabbit_misc:rs(QName), ?DEFAULT_DELIVERY_LIMIT]); [rabbit_misc:rs(QName), ?DEFAULT_DELIVERY_LIMIT]);
false -> false ->
@ -644,7 +645,7 @@ handle_tick(QName,
ok -> ok ->
ok; ok;
repaired -> repaired ->
rabbit_log:debug("Repaired quorum queue ~ts amqqueue record", ?LOG_DEBUG("Repaired quorum queue ~ts amqqueue record",
[rabbit_misc:rs(QName)]) [rabbit_misc:rs(QName)])
end, end,
ExpectedNodes = rabbit_nodes:list_members(), ExpectedNodes = rabbit_nodes:list_members(),
@ -654,7 +655,7 @@ handle_tick(QName,
Stale when length(ExpectedNodes) > 0 -> Stale when length(ExpectedNodes) > 0 ->
%% rabbit_nodes:list_members/0 returns [] when there %% rabbit_nodes:list_members/0 returns [] when there
%% is an error so we need to handle that case %% is an error so we need to handle that case
rabbit_log:debug("~ts: stale nodes detected in quorum " ?LOG_DEBUG("~ts: stale nodes detected in quorum "
"queue state. Purging ~w", "queue state. Purging ~w",
[rabbit_misc:rs(QName), Stale]), [rabbit_misc:rs(QName), Stale]),
%% pipeline purge command %% pipeline purge command
@ -668,13 +669,13 @@ handle_tick(QName,
ok ok
catch catch
_:Err -> _:Err ->
rabbit_log:debug("~ts: handle tick failed with ~p", ?LOG_DEBUG("~ts: handle tick failed with ~p",
[rabbit_misc:rs(QName), Err]), [rabbit_misc:rs(QName), Err]),
ok ok
end end
end); end);
handle_tick(QName, Config, _Nodes) -> handle_tick(QName, Config, _Nodes) ->
rabbit_log:debug("~ts: handle tick received unexpected config format ~tp", ?LOG_DEBUG("~ts: handle tick received unexpected config format ~tp",
[rabbit_misc:rs(QName), Config]). [rabbit_misc:rs(QName), Config]).
repair_leader_record(Q, Name) -> repair_leader_record(Q, Name) ->
@ -685,7 +686,7 @@ repair_leader_record(Q, Name) ->
ok; ok;
_ -> _ ->
QName = amqqueue:get_name(Q), QName = amqqueue:get_name(Q),
rabbit_log:debug("~ts: updating leader record to current node ~ts", ?LOG_DEBUG("~ts: updating leader record to current node ~ts",
[rabbit_misc:rs(QName), Node]), [rabbit_misc:rs(QName), Node]),
ok = become_leader0(QName, Name), ok = become_leader0(QName, Name),
ok ok
@ -760,7 +761,7 @@ maybe_apply_policies(Q, #{config := CurrentConfig}) ->
ShouldUpdate = NewPolicyConfig =/= CurrentPolicyConfig, ShouldUpdate = NewPolicyConfig =/= CurrentPolicyConfig,
case ShouldUpdate of case ShouldUpdate of
true -> true ->
rabbit_log:debug("Re-applying policies to ~ts", [rabbit_misc:rs(amqqueue:get_name(Q))]), ?LOG_DEBUG("Re-applying policies to ~ts", [rabbit_misc:rs(amqqueue:get_name(Q))]),
policy_changed(Q), policy_changed(Q),
ok; ok;
false -> ok false -> ok
@ -782,7 +783,7 @@ recover(_Vhost, Queues) ->
{error, Err1} {error, Err1}
when Err1 == not_started orelse when Err1 == not_started orelse
Err1 == name_not_registered -> Err1 == name_not_registered ->
rabbit_log:warning("Quorum queue recovery: configured member of ~ts was not found on this node. Starting member as a new one. " ?LOG_WARNING("Quorum queue recovery: configured member of ~ts was not found on this node. Starting member as a new one. "
"Context: ~s", "Context: ~s",
[rabbit_misc:rs(QName), Err1]), [rabbit_misc:rs(QName), Err1]),
% queue was never started on this node % queue was never started on this node
@ -790,7 +791,7 @@ recover(_Vhost, Queues) ->
case start_server(make_ra_conf(Q0, ServerId)) of case start_server(make_ra_conf(Q0, ServerId)) of
ok -> ok; ok -> ok;
Err2 -> Err2 ->
rabbit_log:warning("recover: quorum queue ~w could not" ?LOG_WARNING("recover: quorum queue ~w could not"
" be started ~w", [Name, Err2]), " be started ~w", [Name, Err2]),
fail fail
end; end;
@ -801,7 +802,7 @@ recover(_Vhost, Queues) ->
ok; ok;
Err -> Err ->
%% catch all clause to avoid causing the vhost not to start %% catch all clause to avoid causing the vhost not to start
rabbit_log:warning("recover: quorum queue ~w could not be " ?LOG_WARNING("recover: quorum queue ~w could not be "
"restarted ~w", [Name, Err]), "restarted ~w", [Name, Err]),
fail fail
end, end,
@ -892,7 +893,7 @@ delete(Q, _IfUnused, _IfEmpty, ActingUser) when ?amqqueue_is_quorum(Q) ->
ok; ok;
false -> false ->
%% attempt forced deletion of all servers %% attempt forced deletion of all servers
rabbit_log:warning( ?LOG_WARNING(
"Could not delete quorum '~ts', not enough nodes " "Could not delete quorum '~ts', not enough nodes "
" online to reach a quorum: ~255p." " online to reach a quorum: ~255p."
" Attempting force delete.", " Attempting force delete.",
@ -913,7 +914,7 @@ force_delete_queue(Servers) ->
case catch(ra:force_delete_server(?RA_SYSTEM, S)) of case catch(ra:force_delete_server(?RA_SYSTEM, S)) of
ok -> ok; ok -> ok;
Err -> Err ->
rabbit_log:warning( ?LOG_WARNING(
"Force delete of ~w failed with: ~w" "Force delete of ~w failed with: ~w"
"This may require manual data clean up", "This may require manual data clean up",
[S, Err]), [S, Err]),
@ -1212,7 +1213,7 @@ policy_changed(Q) ->
ok; ok;
Err -> Err ->
FormattedQueueName = rabbit_misc:rs(amqqueue:get_name(Q)), FormattedQueueName = rabbit_misc:rs(amqqueue:get_name(Q)),
rabbit_log:warning("~s: policy may not have been successfully applied. Error: ~p", ?LOG_WARNING("~s: policy may not have been successfully applied. Error: ~p",
[FormattedQueueName, Err]), [FormattedQueueName, Err]),
ok ok
end. end.
@ -1330,7 +1331,7 @@ add_member(VHost, Name, Node, Membership, Timeout)
is_binary(Name) andalso is_binary(Name) andalso
is_atom(Node) -> is_atom(Node) ->
QName = #resource{virtual_host = VHost, name = Name, kind = queue}, QName = #resource{virtual_host = VHost, name = Name, kind = queue},
rabbit_log:debug("Asked to add a replica for queue ~ts on node ~ts", ?LOG_DEBUG("Asked to add a replica for queue ~ts on node ~ts",
[rabbit_misc:rs(QName), Node]), [rabbit_misc:rs(QName), Node]),
case rabbit_amqqueue:lookup(QName) of case rabbit_amqqueue:lookup(QName) of
{ok, Q} when ?amqqueue_is_classic(Q) -> {ok, Q} when ?amqqueue_is_classic(Q) ->
@ -1344,7 +1345,7 @@ add_member(VHost, Name, Node, Membership, Timeout)
case lists:member(Node, QNodes) of case lists:member(Node, QNodes) of
true -> true ->
%% idempotent by design %% idempotent by design
rabbit_log:debug("Quorum ~ts already has a replica on node ~ts", ?LOG_DEBUG("Quorum ~ts already has a replica on node ~ts",
[rabbit_misc:rs(QName), Node]), [rabbit_misc:rs(QName), Node]),
ok; ok;
false -> false ->
@ -1412,7 +1413,7 @@ do_add_member(Q, Node, Membership, Timeout)
{erlang, is_list, []}, {erlang, is_list, []},
#{condition => {applied, {RaIndex, RaTerm}}}), #{condition => {applied, {RaIndex, RaTerm}}}),
_ = rabbit_amqqueue:update(QName, Fun), _ = rabbit_amqqueue:update(QName, Fun),
rabbit_log:info("Added a replica of quorum ~ts on node ~ts", [rabbit_misc:rs(QName), Node]), ?LOG_INFO("Added a replica of quorum ~ts on node ~ts", [rabbit_misc:rs(QName), Node]),
ok; ok;
{timeout, _} -> {timeout, _} ->
_ = ra:force_delete_server(?RA_SYSTEM, ServerId), _ = ra:force_delete_server(?RA_SYSTEM, ServerId),
@ -1423,7 +1424,7 @@ do_add_member(Q, Node, Membership, Timeout)
E E
end; end;
E -> E ->
rabbit_log:warning("Could not add a replica of quorum ~ts on node ~ts: ~p", ?LOG_WARNING("Could not add a replica of quorum ~ts on node ~ts: ~p",
[rabbit_misc:rs(QName), Node, E]), [rabbit_misc:rs(QName), Node, E]),
E E
end. end.
@ -1474,7 +1475,7 @@ delete_member(Q, Node) when ?amqqueue_is_quorum(Q) ->
_ = rabbit_amqqueue:update(QName, Fun), _ = rabbit_amqqueue:update(QName, Fun),
case ra:force_delete_server(?RA_SYSTEM, ServerId) of case ra:force_delete_server(?RA_SYSTEM, ServerId) of
ok -> ok ->
rabbit_log:info("Deleted a replica of quorum ~ts on node ~ts", [rabbit_misc:rs(QName), Node]), ?LOG_INFO("Deleted a replica of quorum ~ts on node ~ts", [rabbit_misc:rs(QName), Node]),
ok; ok;
{error, {badrpc, nodedown}} -> {error, {badrpc, nodedown}} ->
ok; ok;
@ -1497,10 +1498,10 @@ delete_member(Q, Node) when ?amqqueue_is_quorum(Q) ->
[{rabbit_amqqueue:name(), [{rabbit_amqqueue:name(),
{ok, pos_integer()} | {error, pos_integer(), term()}}]. {ok, pos_integer()} | {error, pos_integer(), term()}}].
shrink_all(Node) -> shrink_all(Node) ->
rabbit_log:info("Asked to remove all quorum queue replicas from node ~ts", [Node]), ?LOG_INFO("Asked to remove all quorum queue replicas from node ~ts", [Node]),
[begin [begin
QName = amqqueue:get_name(Q), QName = amqqueue:get_name(Q),
rabbit_log:info("~ts: removing member (replica) on node ~w", ?LOG_INFO("~ts: removing member (replica) on node ~w",
[rabbit_misc:rs(QName), Node]), [rabbit_misc:rs(QName), Node]),
Size = length(get_nodes(Q)), Size = length(get_nodes(Q)),
case delete_member(Q, Node) of case delete_member(Q, Node) of
@ -1510,7 +1511,7 @@ shrink_all(Node) ->
%% this could be timing related and due to a new leader just being %% this could be timing related and due to a new leader just being
%% elected but it's noop command not been committed yet. %% elected but it's noop command not been committed yet.
%% lets sleep and retry once %% lets sleep and retry once
rabbit_log:info("~ts: failed to remove member (replica) on node ~w " ?LOG_INFO("~ts: failed to remove member (replica) on node ~w "
"as cluster change is not permitted. " "as cluster change is not permitted. "
"retrying once in 500ms", "retrying once in 500ms",
[rabbit_misc:rs(QName), Node]), [rabbit_misc:rs(QName), Node]),
@ -1519,12 +1520,12 @@ shrink_all(Node) ->
ok -> ok ->
{QName, {ok, Size-1}}; {QName, {ok, Size-1}};
{error, Err} -> {error, Err} ->
rabbit_log:warning("~ts: failed to remove member (replica) on node ~w, error: ~w", ?LOG_WARNING("~ts: failed to remove member (replica) on node ~w, error: ~w",
[rabbit_misc:rs(QName), Node, Err]), [rabbit_misc:rs(QName), Node, Err]),
{QName, {error, Size, Err}} {QName, {error, Size, Err}}
end; end;
{error, Err} -> {error, Err} ->
rabbit_log:warning("~ts: failed to remove member (replica) on node ~w, error: ~w", ?LOG_WARNING("~ts: failed to remove member (replica) on node ~w, error: ~w",
[rabbit_misc:rs(QName), Node, Err]), [rabbit_misc:rs(QName), Node, Err]),
{QName, {error, Size, Err}} {QName, {error, Size, Err}}
end end
@ -1544,13 +1545,13 @@ grow(Node, VhostSpec, QueueSpec, Strategy, Membership) ->
[begin [begin
Size = length(get_nodes(Q)), Size = length(get_nodes(Q)),
QName = amqqueue:get_name(Q), QName = amqqueue:get_name(Q),
rabbit_log:info("~ts: adding a new member (replica) on node ~w", ?LOG_INFO("~ts: adding a new member (replica) on node ~w",
[rabbit_misc:rs(QName), Node]), [rabbit_misc:rs(QName), Node]),
case add_member(Q, Node, Membership) of case add_member(Q, Node, Membership) of
ok -> ok ->
{QName, {ok, Size + 1}}; {QName, {ok, Size + 1}};
{error, Err} -> {error, Err} ->
rabbit_log:warning( ?LOG_WARNING(
"~ts: failed to add member (replica) on node ~w, error: ~w", "~ts: failed to add member (replica) on node ~w, error: ~w",
[rabbit_misc:rs(QName), Node, Err]), [rabbit_misc:rs(QName), Node, Err]),
{QName, {error, Size, Err}} {QName, {error, Size, Err}}
@ -1637,19 +1638,19 @@ dead_letter_handler(Q, Overflow) ->
dlh(undefined, undefined, undefined, _, _) -> dlh(undefined, undefined, undefined, _, _) ->
undefined; undefined;
dlh(undefined, RoutingKey, undefined, _, QName) -> dlh(undefined, RoutingKey, undefined, _, QName) ->
rabbit_log:warning("Disabling dead-lettering for ~ts despite configured dead-letter-routing-key '~ts' " ?LOG_WARNING("Disabling dead-lettering for ~ts despite configured dead-letter-routing-key '~ts' "
"because dead-letter-exchange is not configured.", "because dead-letter-exchange is not configured.",
[rabbit_misc:rs(QName), RoutingKey]), [rabbit_misc:rs(QName), RoutingKey]),
undefined; undefined;
dlh(undefined, _, Strategy, _, QName) -> dlh(undefined, _, Strategy, _, QName) ->
rabbit_log:warning("Disabling dead-lettering for ~ts despite configured dead-letter-strategy '~ts' " ?LOG_WARNING("Disabling dead-lettering for ~ts despite configured dead-letter-strategy '~ts' "
"because dead-letter-exchange is not configured.", "because dead-letter-exchange is not configured.",
[rabbit_misc:rs(QName), Strategy]), [rabbit_misc:rs(QName), Strategy]),
undefined; undefined;
dlh(_, _, <<"at-least-once">>, reject_publish, _) -> dlh(_, _, <<"at-least-once">>, reject_publish, _) ->
at_least_once; at_least_once;
dlh(Exchange, RoutingKey, <<"at-least-once">>, drop_head, QName) -> dlh(Exchange, RoutingKey, <<"at-least-once">>, drop_head, QName) ->
rabbit_log:warning("Falling back to dead-letter-strategy at-most-once for ~ts " ?LOG_WARNING("Falling back to dead-letter-strategy at-most-once for ~ts "
"because configured dead-letter-strategy at-least-once is incompatible with " "because configured dead-letter-strategy at-least-once is incompatible with "
"effective overflow strategy drop-head. To enable dead-letter-strategy " "effective overflow strategy drop-head. To enable dead-letter-strategy "
"at-least-once, set overflow strategy to reject-publish.", "at-least-once, set overflow strategy to reject-publish.",
@ -2020,7 +2021,7 @@ overflow(undefined, Def, _QName) -> Def;
overflow(<<"reject-publish">>, _Def, _QName) -> reject_publish; overflow(<<"reject-publish">>, _Def, _QName) -> reject_publish;
overflow(<<"drop-head">>, _Def, _QName) -> drop_head; overflow(<<"drop-head">>, _Def, _QName) -> drop_head;
overflow(<<"reject-publish-dlx">> = V, Def, QName) -> overflow(<<"reject-publish-dlx">> = V, Def, QName) ->
rabbit_log:warning("Invalid overflow strategy ~tp for quorum queue: ~ts", ?LOG_WARNING("Invalid overflow strategy ~tp for quorum queue: ~ts",
[V, rabbit_misc:rs(QName)]), [V, rabbit_misc:rs(QName)]),
Def. Def.
@ -2059,7 +2060,7 @@ force_shrink_member_to_current_member(VHost, Name) ->
Node = node(), Node = node(),
QName = rabbit_misc:r(VHost, queue, Name), QName = rabbit_misc:r(VHost, queue, Name),
QNameFmt = rabbit_misc:rs(QName), QNameFmt = rabbit_misc:rs(QName),
rabbit_log:warning("Shrinking ~ts to a single node: ~ts", [QNameFmt, Node]), ?LOG_WARNING("Shrinking ~ts to a single node: ~ts", [QNameFmt, Node]),
case rabbit_amqqueue:lookup(QName) of case rabbit_amqqueue:lookup(QName) of
{ok, Q} when ?is_amqqueue(Q) -> {ok, Q} when ?is_amqqueue(Q) ->
{RaName, _} = amqqueue:get_pid(Q), {RaName, _} = amqqueue:get_pid(Q),
@ -2072,19 +2073,19 @@ force_shrink_member_to_current_member(VHost, Name) ->
end, end,
_ = rabbit_amqqueue:update(QName, Fun), _ = rabbit_amqqueue:update(QName, Fun),
_ = [ra:force_delete_server(?RA_SYSTEM, {RaName, N}) || N <- OtherNodes], _ = [ra:force_delete_server(?RA_SYSTEM, {RaName, N}) || N <- OtherNodes],
rabbit_log:warning("Shrinking ~ts finished", [QNameFmt]); ?LOG_WARNING("Shrinking ~ts finished", [QNameFmt]);
_ -> _ ->
rabbit_log:warning("Shrinking failed, ~ts not found", [QNameFmt]), ?LOG_WARNING("Shrinking failed, ~ts not found", [QNameFmt]),
{error, not_found} {error, not_found}
end. end.
force_vhost_queues_shrink_member_to_current_member(VHost) when is_binary(VHost) -> force_vhost_queues_shrink_member_to_current_member(VHost) when is_binary(VHost) ->
rabbit_log:warning("Shrinking all quorum queues in vhost '~ts' to a single node: ~ts", [VHost, node()]), ?LOG_WARNING("Shrinking all quorum queues in vhost '~ts' to a single node: ~ts", [VHost, node()]),
ListQQs = fun() -> rabbit_amqqueue:list(VHost) end, ListQQs = fun() -> rabbit_amqqueue:list(VHost) end,
force_all_queues_shrink_member_to_current_member(ListQQs). force_all_queues_shrink_member_to_current_member(ListQQs).
force_all_queues_shrink_member_to_current_member() -> force_all_queues_shrink_member_to_current_member() ->
rabbit_log:warning("Shrinking all quorum queues to a single node: ~ts", [node()]), ?LOG_WARNING("Shrinking all quorum queues to a single node: ~ts", [node()]),
ListQQs = fun() -> rabbit_amqqueue:list() end, ListQQs = fun() -> rabbit_amqqueue:list() end,
force_all_queues_shrink_member_to_current_member(ListQQs). force_all_queues_shrink_member_to_current_member(ListQQs).
@ -2094,7 +2095,7 @@ force_all_queues_shrink_member_to_current_member(ListQQFun) when is_function(Lis
QName = amqqueue:get_name(Q), QName = amqqueue:get_name(Q),
{RaName, _} = amqqueue:get_pid(Q), {RaName, _} = amqqueue:get_pid(Q),
OtherNodes = lists:delete(Node, get_nodes(Q)), OtherNodes = lists:delete(Node, get_nodes(Q)),
rabbit_log:warning("Shrinking queue ~ts to a single node: ~ts", [rabbit_misc:rs(QName), Node]), ?LOG_WARNING("Shrinking queue ~ts to a single node: ~ts", [rabbit_misc:rs(QName), Node]),
ok = ra_server_proc:force_shrink_members_to_current_member({RaName, Node}), ok = ra_server_proc:force_shrink_members_to_current_member({RaName, Node}),
Fun = fun (QQ) -> Fun = fun (QQ) ->
TS0 = amqqueue:get_type_state(QQ), TS0 = amqqueue:get_type_state(QQ),
@ -2104,7 +2105,7 @@ force_all_queues_shrink_member_to_current_member(ListQQFun) when is_function(Lis
_ = rabbit_amqqueue:update(QName, Fun), _ = rabbit_amqqueue:update(QName, Fun),
_ = [ra:force_delete_server(?RA_SYSTEM, {RaName, N}) || N <- OtherNodes] _ = [ra:force_delete_server(?RA_SYSTEM, {RaName, N}) || N <- OtherNodes]
end || Q <- ListQQFun(), amqqueue:get_type(Q) == ?MODULE], end || Q <- ListQQFun(), amqqueue:get_type(Q) == ?MODULE],
rabbit_log:warning("Shrinking finished"), ?LOG_WARNING("Shrinking finished"),
ok. ok.
force_checkpoint_on_queue(QName) -> force_checkpoint_on_queue(QName) ->
@ -2114,7 +2115,7 @@ force_checkpoint_on_queue(QName) ->
{error, classic_queue_not_supported}; {error, classic_queue_not_supported};
{ok, Q} when ?amqqueue_is_quorum(Q) -> {ok, Q} when ?amqqueue_is_quorum(Q) ->
{RaName, _} = amqqueue:get_pid(Q), {RaName, _} = amqqueue:get_pid(Q),
rabbit_log:debug("Sending command to force ~ts to take a checkpoint", [QNameFmt]), ?LOG_DEBUG("Sending command to force ~ts to take a checkpoint", [QNameFmt]),
Nodes = amqqueue:get_nodes(Q), Nodes = amqqueue:get_nodes(Q),
_ = [ra:cast_aux_command({RaName, Node}, force_checkpoint) _ = [ra:cast_aux_command({RaName, Node}, force_checkpoint)
|| Node <- Nodes], || Node <- Nodes],
@ -2132,7 +2133,7 @@ force_checkpoint(VhostSpec, QueueSpec) ->
ok -> ok ->
{QName, {ok}}; {QName, {ok}};
{error, Err} -> {error, Err} ->
rabbit_log:warning("~ts: failed to force checkpoint, error: ~w", ?LOG_WARNING("~ts: failed to force checkpoint, error: ~w",
[rabbit_misc:rs(QName), Err]), [rabbit_misc:rs(QName), Err]),
{QName, {error, Err}} {QName, {error, Err}}
end end
@ -2264,7 +2265,7 @@ wait_for_leader_health_checks(Ref, N, UnhealthyAcc) ->
check_process_limit_safety(QCount, ProcessLimitThreshold) -> check_process_limit_safety(QCount, ProcessLimitThreshold) ->
case (erlang:system_info(process_count) + QCount) >= ProcessLimitThreshold of case (erlang:system_info(process_count) + QCount) >= ProcessLimitThreshold of
true -> true ->
rabbit_log:warning("Leader health check not permitted, process limit threshold will be exceeded."), ?LOG_WARNING("Leader health check not permitted, process limit threshold will be exceeded."),
throw({error, leader_health_check_process_limit_exceeded}); throw({error, leader_health_check_process_limit_exceeded});
false -> false ->
ok ok
@ -2273,4 +2274,4 @@ check_process_limit_safety(QCount, ProcessLimitThreshold) ->
maybe_log_leader_health_check_result([]) -> ok; maybe_log_leader_health_check_result([]) -> ok;
maybe_log_leader_health_check_result(Result) -> maybe_log_leader_health_check_result(Result) ->
Qs = lists:map(fun(R) -> catch maps:get(<<"readable_name">>, R) end, Result), Qs = lists:map(fun(R) -> catch maps:get(<<"readable_name">>, R) end, Result),
rabbit_log:warning("Leader health check result (unhealthy leaders detected): ~tp", [Qs]). ?LOG_WARNING("Leader health check result (unhealthy leaders detected): ~tp", [Qs]).

View File

@ -43,6 +43,7 @@
-include_lib("rabbit_common/include/rabbit_framing.hrl"). -include_lib("rabbit_common/include/rabbit_framing.hrl").
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include("rabbit_amqp_metrics.hrl"). -include("rabbit_amqp_metrics.hrl").
-include_lib("kernel/include/logger.hrl").
-export([start_link/2, info/2, force_event_refresh/2, -export([start_link/2, info/2, force_event_refresh/2,
shutdown/2]). shutdown/2]).
@ -1363,7 +1364,7 @@ handle_method0(#'connection.update_secret'{new_secret = NewSecret, reason = Reas
%% Any secret update errors coming from the authz backend will be handled in the other branch. %% Any secret update errors coming from the authz backend will be handled in the other branch.
%% Therefore we optimistically do no error handling here. MK. %% Therefore we optimistically do no error handling here. MK.
lists:foreach(fun(Ch) -> lists:foreach(fun(Ch) ->
rabbit_log:debug("Updating user/auth backend state for channel ~tp", [Ch]), ?LOG_DEBUG("Updating user/auth backend state for channel ~tp", [Ch]),
_ = rabbit_channel:update_user_state(Ch, User1) _ = rabbit_channel:update_user_state(Ch, User1)
end, all_channels()), end, all_channels()),
ok = send_on_channel0(Sock, #'connection.update_secret_ok'{}, Protocol), ok = send_on_channel0(Sock, #'connection.update_secret_ok'{}, Protocol),
@ -1505,7 +1506,7 @@ auth_phase(Response,
auth_state = AuthState, auth_state = AuthState,
host = RemoteAddress}, host = RemoteAddress},
sock = Sock}) -> sock = Sock}) ->
rabbit_log:debug("Client address during authN phase: ~tp", [RemoteAddress]), ?LOG_DEBUG("Client address during authN phase: ~tp", [RemoteAddress]),
case AuthMechanism:handle_response(Response, AuthState) of case AuthMechanism:handle_response(Response, AuthState) of
{refused, Username, Msg, Args} -> {refused, Username, Msg, Args} ->
rabbit_core_metrics:auth_attempt_failed(RemoteAddress, Username, amqp091), rabbit_core_metrics:auth_attempt_failed(RemoteAddress, Username, amqp091),

View File

@ -19,6 +19,7 @@
terminate/2, code_change/3]). terminate/2, code_change/3]).
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
%%---------------------------------------------------------------------------- %%----------------------------------------------------------------------------
@ -36,7 +37,7 @@ start(VHost) ->
%% we can get here if a vhost is added and removed concurrently %% we can get here if a vhost is added and removed concurrently
%% e.g. some integration tests do it %% e.g. some integration tests do it
{error, {no_such_vhost, VHost}} -> {error, {no_such_vhost, VHost}} ->
rabbit_log:error("Failed to start a recovery terms manager for vhost ~ts: vhost no longer exists!", ?LOG_ERROR("Failed to start a recovery terms manager for vhost ~ts: vhost no longer exists!",
[VHost]), [VHost]),
{error, {no_such_vhost, VHost}} {error, {no_such_vhost, VHost}}
end. end.
@ -52,7 +53,7 @@ stop(VHost) ->
end; end;
%% see start/1 %% see start/1
{error, {no_such_vhost, VHost}} -> {error, {no_such_vhost, VHost}} ->
rabbit_log:error("Failed to stop a recovery terms manager for vhost ~ts: vhost no longer exists!", ?LOG_ERROR("Failed to stop a recovery terms manager for vhost ~ts: vhost no longer exists!",
[VHost]), [VHost]),
ok ok
@ -81,7 +82,7 @@ clear(VHost) ->
ok ok
%% see start/1 %% see start/1
catch _:badarg -> catch _:badarg ->
rabbit_log:error("Failed to clear recovery terms for vhost ~ts: table no longer exists!", ?LOG_ERROR("Failed to clear recovery terms for vhost ~ts: table no longer exists!",
[VHost]), [VHost]),
ok ok
end, end,
@ -138,7 +139,7 @@ open_table(VHost, RamFile, RetriesLeft) ->
_ = file:delete(File), _ = file:delete(File),
%% Wait before retrying %% Wait before retrying
DelayInMs = 1000, DelayInMs = 1000,
rabbit_log:warning("Failed to open a recovery terms DETS file at ~tp. Will delete it and retry in ~tp ms (~tp retries left)", ?LOG_WARNING("Failed to open a recovery terms DETS file at ~tp. Will delete it and retry in ~tp ms (~tp retries left)",
[File, DelayInMs, RetriesLeft]), [File, DelayInMs, RetriesLeft]),
timer:sleep(DelayInMs), timer:sleep(DelayInMs),
open_table(VHost, RamFile, RetriesLeft - 1) open_table(VHost, RamFile, RetriesLeft - 1)
@ -152,7 +153,7 @@ flush(VHost) ->
dets:sync(VHost) dets:sync(VHost)
%% see clear/1 %% see clear/1
catch _:badarg -> catch _:badarg ->
rabbit_log:error("Failed to sync recovery terms table for vhost ~ts: the table no longer exists!", ?LOG_ERROR("Failed to sync recovery terms table for vhost ~ts: the table no longer exists!",
[VHost]), [VHost]),
ok ok
end. end.
@ -165,7 +166,7 @@ close_table(VHost) ->
ok = dets:close(VHost) ok = dets:close(VHost)
%% see clear/1 %% see clear/1
catch _:badarg -> catch _:badarg ->
rabbit_log:error("Failed to close recovery terms table for vhost ~ts: the table no longer exists!", ?LOG_ERROR("Failed to close recovery terms table for vhost ~ts: the table no longer exists!",
[VHost]), [VHost]),
ok ok
end. end.

View File

@ -41,6 +41,7 @@
%% * rabbit_event %% * rabbit_event
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
-export([parse_set/5, set/5, set_any/5, clear/4, clear_any/4, list/0, list/1, -export([parse_set/5, set/5, set_any/5, clear/4, clear_any/4, list/0, list/1,
list_component/1, list/2, list_formatted/1, list_formatted/3, list_component/1, list/2, list_formatted/1, list_formatted/3,
@ -104,7 +105,7 @@ parse_set_global(Name, String, ActingUser) ->
set_global(Name, Term, ActingUser) -> set_global(Name, Term, ActingUser) ->
NameAsAtom = rabbit_data_coercion:to_atom(Name), NameAsAtom = rabbit_data_coercion:to_atom(Name),
rabbit_log:debug("Setting global parameter '~ts' to ~tp", [NameAsAtom, Term]), ?LOG_DEBUG("Setting global parameter '~ts' to ~tp", [NameAsAtom, Term]),
_ = rabbit_db_rtparams:set(NameAsAtom, Term), _ = rabbit_db_rtparams:set(NameAsAtom, Term),
event_notify(parameter_set, none, global, [{name, NameAsAtom}, event_notify(parameter_set, none, global, [{name, NameAsAtom},
{value, Term}, {value, Term},
@ -125,7 +126,7 @@ set_any(VHost, Component, Name, Term, User) ->
end. end.
set_any0(VHost, Component, Name, Term, User) -> set_any0(VHost, Component, Name, Term, User) ->
rabbit_log:debug("Asked to set or update runtime parameter '~ts' in vhost '~ts' " ?LOG_DEBUG("Asked to set or update runtime parameter '~ts' in vhost '~ts' "
"for component '~ts', value: ~tp", "for component '~ts', value: ~tp",
[Name, VHost, Component, Term]), [Name, VHost, Component, Term]),
case lookup_component(Component) of case lookup_component(Component) of
@ -168,7 +169,7 @@ is_within_limit(Component) ->
false -> false ->
ErrorMsg = "Limit reached: component ~ts is limited to ~tp", ErrorMsg = "Limit reached: component ~ts is limited to ~tp",
ErrorArgs = [Component, Limit], ErrorArgs = [Component, Limit],
rabbit_log:error(ErrorMsg, ErrorArgs), ?LOG_ERROR(ErrorMsg, ErrorArgs),
{errors, [{"component ~ts is limited to ~tp", [Component, Limit]}]} {errors, [{"component ~ts is limited to ~tp", [Component, Limit]}]}
end. end.

View File

@ -8,6 +8,7 @@
-module(rabbit_ssl). -module(rabbit_ssl).
-include_lib("public_key/include/public_key.hrl"). -include_lib("public_key/include/public_key.hrl").
-include_lib("kernel/include/logger.hrl").
-export([peer_cert_issuer/1, peer_cert_subject/1, peer_cert_validity/1]). -export([peer_cert_issuer/1, peer_cert_subject/1, peer_cert_validity/1]).
-export([peer_cert_subject_items/2, peer_cert_auth_name/1, peer_cert_auth_name/2]). -export([peer_cert_subject_items/2, peer_cert_auth_name/1, peer_cert_auth_name/2]).
@ -161,7 +162,7 @@ peer_cert_auth_name({subject_alternative_name, Type, Index0}, Cert) ->
%% lists:nth/2 is 1-based %% lists:nth/2 is 1-based
Index = Index0 + 1, Index = Index0 + 1,
OfType = peer_cert_subject_alternative_names(Cert, otp_san_type(Type)), OfType = peer_cert_subject_alternative_names(Cert, otp_san_type(Type)),
rabbit_log:debug("Peer certificate SANs of type ~ts: ~tp, index to use with lists:nth/2: ~b", [Type, OfType, Index]), ?LOG_DEBUG("Peer certificate SANs of type ~ts: ~tp, index to use with lists:nth/2: ~b", [Type, OfType, Index]),
case length(OfType) of case length(OfType) of
0 -> not_found; 0 -> not_found;
N when N < Index -> not_found; N when N < Index -> not_found;
@ -198,7 +199,7 @@ auth_config_sane() ->
{ok, Opts} = application:get_env(rabbit, ssl_options), {ok, Opts} = application:get_env(rabbit, ssl_options),
case proplists:get_value(verify, Opts) of case proplists:get_value(verify, Opts) of
verify_peer -> true; verify_peer -> true;
V -> rabbit_log:warning("TLS peer verification (authentication) is " V -> ?LOG_WARNING("TLS peer verification (authentication) is "
"disabled, ssl_options.verify value used: ~tp. " "disabled, ssl_options.verify value used: ~tp. "
"See https://www.rabbitmq.com/docs/ssl#peer-verification to learn more.", [V]), "See https://www.rabbitmq.com/docs/ssl#peer-verification to learn more.", [V]),
false false

View File

@ -83,6 +83,7 @@
-include("rabbit_stream_coordinator.hrl"). -include("rabbit_stream_coordinator.hrl").
-include("amqqueue.hrl"). -include("amqqueue.hrl").
-include_lib("kernel/include/logger.hrl").
-define(REPLICA_FRESHNESS_LIMIT_MS, 10 * 1000). %% 10s -define(REPLICA_FRESHNESS_LIMIT_MS, 10 * 1000). %% 10s
-define(V2_OR_MORE(Vsn), Vsn >= 2). -define(V2_OR_MORE(Vsn), Vsn >= 2).
@ -174,7 +175,7 @@ restart_stream(QRes, Options)
restart_stream(Q, Options) restart_stream(Q, Options)
when ?is_amqqueue(Q) andalso when ?is_amqqueue(Q) andalso
?amqqueue_is_stream(Q) -> ?amqqueue_is_stream(Q) ->
rabbit_log:info("restarting stream ~s in vhost ~s with options ~p", ?LOG_INFO("restarting stream ~s in vhost ~s with options ~p",
[maps:get(name, amqqueue:get_type_state(Q)), amqqueue:get_vhost(Q), Options]), [maps:get(name, amqqueue:get_type_state(Q)), amqqueue:get_vhost(Q), Options]),
#{name := StreamId} = amqqueue:get_type_state(Q), #{name := StreamId} = amqqueue:get_type_state(Q),
case process_command({restart_stream, StreamId, Options}) of case process_command({restart_stream, StreamId, Options}) of
@ -217,7 +218,7 @@ add_replica(Q, Node) when ?is_amqqueue(Q) ->
{error, {disallowed, out_of_sync_replica}}; {error, {disallowed, out_of_sync_replica}};
false -> false ->
Name = rabbit_misc:rs(amqqueue:get_name(Q)), Name = rabbit_misc:rs(amqqueue:get_name(Q)),
rabbit_log:info("~ts : adding replica ~ts to ~ts Replication State: ~w", ?LOG_INFO("~ts : adding replica ~ts to ~ts Replication State: ~w",
[?MODULE, Node, Name, ReplState0]), [?MODULE, Node, Name, ReplState0]),
StreamId = maps:get(name, amqqueue:get_type_state(Q)), StreamId = maps:get(name, amqqueue:get_type_state(Q)),
case process_command({add_replica, StreamId, #{node => Node}}) of case process_command({add_replica, StreamId, #{node => Node}}) of
@ -444,7 +445,7 @@ process_command([Server | Servers], Cmd) ->
_ -> _ ->
element(1, Cmd) element(1, Cmd)
end, end,
rabbit_log:warning("Coordinator timeout on server ~w when processing command ~W", ?LOG_WARNING("Coordinator timeout on server ~w when processing command ~W",
[element(2, Server), CmdLabel, 10]), [element(2, Server), CmdLabel, 10]),
process_command(Servers, Cmd); process_command(Servers, Cmd);
{error, noproc} -> {error, noproc} ->
@ -516,17 +517,17 @@ start_coordinator_cluster() ->
Versions = [V || {ok, V} <- erpc:multicall(Nodes, Versions = [V || {ok, V} <- erpc:multicall(Nodes,
?MODULE, version, [])], ?MODULE, version, [])],
MinVersion = lists:min([version() | Versions]), MinVersion = lists:min([version() | Versions]),
rabbit_log:debug("Starting stream coordinator on nodes: ~w, " ?LOG_DEBUG("Starting stream coordinator on nodes: ~w, "
"initial machine version ~b", "initial machine version ~b",
[Nodes, MinVersion]), [Nodes, MinVersion]),
case ra:start_cluster(?RA_SYSTEM, case ra:start_cluster(?RA_SYSTEM,
[make_ra_conf(Node, Nodes, MinVersion) [make_ra_conf(Node, Nodes, MinVersion)
|| Node <- Nodes]) of || Node <- Nodes]) of
{ok, Started, _} -> {ok, Started, _} ->
rabbit_log:debug("Started stream coordinator on ~w", [Started]), ?LOG_DEBUG("Started stream coordinator on ~w", [Started]),
Started; Started;
{error, cluster_not_formed} -> {error, cluster_not_formed} ->
rabbit_log:warning("Stream coordinator could not be started on nodes ~w", ?LOG_WARNING("Stream coordinator could not be started on nodes ~w",
[Nodes]), [Nodes]),
[] []
end. end.
@ -740,7 +741,7 @@ apply(Meta, {nodeup, Node} = Cmd,
streams = Streams, streams = Streams,
single_active_consumer = Sac1}, ok, Effects2); single_active_consumer = Sac1}, ok, Effects2);
apply(Meta, {machine_version, From, To}, State0) -> apply(Meta, {machine_version, From, To}, State0) ->
rabbit_log:info("Stream coordinator machine version changes from ~tp to ~tp, " ?LOG_INFO("Stream coordinator machine version changes from ~tp to ~tp, "
++ "applying incremental upgrade.", [From, To]), ++ "applying incremental upgrade.", [From, To]),
%% RA applies machine upgrades from any version to any version, e.g. 0 -> 2. %% RA applies machine upgrades from any version to any version, e.g. 0 -> 2.
%% We fill in the gaps here, applying all 1-to-1 machine upgrades. %% We fill in the gaps here, applying all 1-to-1 machine upgrades.
@ -756,7 +757,7 @@ apply(Meta, {timeout, {sac, node_disconnected, #{connection_pid := Pid}}},
return(Meta, State0#?MODULE{single_active_consumer = SacState1}, ok, return(Meta, State0#?MODULE{single_active_consumer = SacState1}, ok,
Effects); Effects);
apply(Meta, UnkCmd, State) -> apply(Meta, UnkCmd, State) ->
rabbit_log:debug("~ts: unknown command ~W", ?LOG_DEBUG("~ts: unknown command ~W",
[?MODULE, UnkCmd, 10]), [?MODULE, UnkCmd, 10]),
return(Meta, State, {error, unknown_command}, []). return(Meta, State, {error, unknown_command}, []).
@ -842,7 +843,7 @@ maybe_resize_coordinator_cluster(LeaderPid, SacNodes, MachineVersion) ->
[New | _] -> [New | _] ->
%% any remaining members will be added %% any remaining members will be added
%% next tick %% next tick
rabbit_log:info("~ts: New rabbit node(s) detected, " ?LOG_INFO("~ts: New rabbit node(s) detected, "
"adding : ~w", "adding : ~w",
[?MODULE, New]), [?MODULE, New]),
add_member(Members, New) add_member(Members, New)
@ -854,7 +855,7 @@ maybe_resize_coordinator_cluster(LeaderPid, SacNodes, MachineVersion) ->
%% this ought to be rather rare as the stream %% this ought to be rather rare as the stream
%% coordinator member is now removed as part %% coordinator member is now removed as part
%% of the forget_cluster_node command %% of the forget_cluster_node command
rabbit_log:info("~ts: Rabbit node(s) removed " ?LOG_INFO("~ts: Rabbit node(s) removed "
"from the cluster, " "from the cluster, "
"deleting: ~w", [?MODULE, Old]), "deleting: ~w", [?MODULE, Old]),
_ = remove_member(Leader, Members, Old), _ = remove_member(Leader, Members, Old),
@ -874,7 +875,7 @@ maybe_handle_stale_nodes(SacNodes, BrokerNodes,
[] -> [] ->
ok; ok;
Stale when length(BrokerNodes) > 0 -> Stale when length(BrokerNodes) > 0 ->
rabbit_log:debug("Stale nodes detected in stream SAC " ?LOG_DEBUG("Stale nodes detected in stream SAC "
"coordinator: ~w. Purging state.", "coordinator: ~w. Purging state.",
[Stale]), [Stale]),
ra:pipeline_command(LeaderPid, sac_make_purge_nodes(Stale)), ra:pipeline_command(LeaderPid, sac_make_purge_nodes(Stale)),
@ -903,14 +904,14 @@ add_member(Members, Node) ->
{ok, _, _} -> {ok, _, _} ->
ok; ok;
{error, Err} -> {error, Err} ->
rabbit_log:warning("~ts: Failed to add member, reason ~w" ?LOG_WARNING("~ts: Failed to add member, reason ~w"
"deleting started server on ~w", "deleting started server on ~w",
[?MODULE, Err, Node]), [?MODULE, Err, Node]),
case ra:force_delete_server(?RA_SYSTEM, ServerId) of case ra:force_delete_server(?RA_SYSTEM, ServerId) of
ok -> ok ->
ok; ok;
Err -> Err ->
rabbit_log:warning("~ts: Failed to delete server " ?LOG_WARNING("~ts: Failed to delete server "
"on ~w, reason ~w", "on ~w, reason ~w",
[?MODULE, Node, Err]), [?MODULE, Node, Err]),
ok ok
@ -926,7 +927,7 @@ add_member(Members, Node) ->
%% there is a server running but is not a member of the %% there is a server running but is not a member of the
%% stream coordinator cluster %% stream coordinator cluster
%% In this case it needs to be deleted %% In this case it needs to be deleted
rabbit_log:warning("~ts: server already running on ~w but not ?LOG_WARNING("~ts: server already running on ~w but not
part of cluster, " part of cluster, "
"deleting started server", "deleting started server",
[?MODULE, Node]), [?MODULE, Node]),
@ -934,14 +935,14 @@ add_member(Members, Node) ->
ok -> ok ->
ok; ok;
Err -> Err ->
rabbit_log:warning("~ts: Failed to delete server " ?LOG_WARNING("~ts: Failed to delete server "
"on ~w, reason ~w", "on ~w, reason ~w",
[?MODULE, Node, Err]), [?MODULE, Node, Err]),
ok ok
end end
end; end;
Error -> Error ->
rabbit_log:warning("Stream coordinator server failed to start on node ~ts : ~W", ?LOG_WARNING("Stream coordinator server failed to start on node ~ts : ~W",
[Node, Error, 10]), [Node, Error, 10]),
ok ok
end. end.
@ -983,7 +984,7 @@ handle_aux(leader, _, {down, Pid, _},
handle_aux(leader, _, {start_writer, StreamId, handle_aux(leader, _, {start_writer, StreamId,
#{epoch := Epoch, node := Node} = Args, Conf}, #{epoch := Epoch, node := Node} = Args, Conf},
Aux, RaAux) -> Aux, RaAux) ->
rabbit_log:debug("~ts: running action: 'start_writer'" ?LOG_DEBUG("~ts: running action: 'start_writer'"
" for ~ts on node ~w in epoch ~b", " for ~ts on node ~w in epoch ~b",
[?MODULE, StreamId, Node, Epoch]), [?MODULE, StreamId, Node, Epoch]),
ActionFun = phase_start_writer(StreamId, Args, Conf), ActionFun = phase_start_writer(StreamId, Args, Conf),
@ -991,7 +992,7 @@ handle_aux(leader, _, {start_writer, StreamId,
handle_aux(leader, _, {start_replica, StreamId, handle_aux(leader, _, {start_replica, StreamId,
#{epoch := Epoch, node := Node} = Args, Conf}, #{epoch := Epoch, node := Node} = Args, Conf},
Aux, RaAux) -> Aux, RaAux) ->
rabbit_log:debug("~ts: running action: 'start_replica'" ?LOG_DEBUG("~ts: running action: 'start_replica'"
" for ~ts on node ~w in epoch ~b", " for ~ts on node ~w in epoch ~b",
[?MODULE, StreamId, Node, Epoch]), [?MODULE, StreamId, Node, Epoch]),
ActionFun = phase_start_replica(StreamId, Args, Conf), ActionFun = phase_start_replica(StreamId, Args, Conf),
@ -999,26 +1000,26 @@ handle_aux(leader, _, {start_replica, StreamId,
handle_aux(leader, _, {stop, StreamId, #{node := Node, handle_aux(leader, _, {stop, StreamId, #{node := Node,
epoch := Epoch} = Args, Conf}, epoch := Epoch} = Args, Conf},
Aux, RaAux) -> Aux, RaAux) ->
rabbit_log:debug("~ts: running action: 'stop'" ?LOG_DEBUG("~ts: running action: 'stop'"
" for ~ts on node ~w in epoch ~b", " for ~ts on node ~w in epoch ~b",
[?MODULE, StreamId, Node, Epoch]), [?MODULE, StreamId, Node, Epoch]),
ActionFun = phase_stop_member(StreamId, Args, Conf), ActionFun = phase_stop_member(StreamId, Args, Conf),
run_action(stopping, StreamId, Args, ActionFun, Aux, RaAux); run_action(stopping, StreamId, Args, ActionFun, Aux, RaAux);
handle_aux(leader, _, {update_mnesia, StreamId, Args, Conf}, handle_aux(leader, _, {update_mnesia, StreamId, Args, Conf},
#aux{actions = _Monitors} = Aux, RaAux) -> #aux{actions = _Monitors} = Aux, RaAux) ->
rabbit_log:debug("~ts: running action: 'update_mnesia'" ?LOG_DEBUG("~ts: running action: 'update_mnesia'"
" for ~ts", [?MODULE, StreamId]), " for ~ts", [?MODULE, StreamId]),
ActionFun = phase_update_mnesia(StreamId, Args, Conf), ActionFun = phase_update_mnesia(StreamId, Args, Conf),
run_action(updating_mnesia, StreamId, Args, ActionFun, Aux, RaAux); run_action(updating_mnesia, StreamId, Args, ActionFun, Aux, RaAux);
handle_aux(leader, _, {update_retention, StreamId, Args, _Conf}, handle_aux(leader, _, {update_retention, StreamId, Args, _Conf},
#aux{actions = _Monitors} = Aux, RaAux) -> #aux{actions = _Monitors} = Aux, RaAux) ->
rabbit_log:debug("~ts: running action: 'update_retention'" ?LOG_DEBUG("~ts: running action: 'update_retention'"
" for ~ts", [?MODULE, StreamId]), " for ~ts", [?MODULE, StreamId]),
ActionFun = phase_update_retention(StreamId, Args), ActionFun = phase_update_retention(StreamId, Args),
run_action(update_retention, StreamId, Args, ActionFun, Aux, RaAux); run_action(update_retention, StreamId, Args, ActionFun, Aux, RaAux);
handle_aux(leader, _, {delete_member, StreamId, #{node := Node} = Args, Conf}, handle_aux(leader, _, {delete_member, StreamId, #{node := Node} = Args, Conf},
#aux{actions = _Monitors} = Aux, RaAux) -> #aux{actions = _Monitors} = Aux, RaAux) ->
rabbit_log:debug("~ts: running action: 'delete_member'" ?LOG_DEBUG("~ts: running action: 'delete_member'"
" for ~ts ~ts", [?MODULE, StreamId, Node]), " for ~ts ~ts", [?MODULE, StreamId, Node]),
ActionFun = phase_delete_member(StreamId, Args, Conf), ActionFun = phase_delete_member(StreamId, Args, Conf),
run_action(delete_member, StreamId, Args, ActionFun, Aux, RaAux); run_action(delete_member, StreamId, Args, ActionFun, Aux, RaAux);
@ -1030,7 +1031,7 @@ handle_aux(leader, _, fail_active_actions,
Exclude = maps:from_list([{S, ok} Exclude = maps:from_list([{S, ok}
|| {P, {S, _, _}} <- maps_to_list(Actions), || {P, {S, _, _}} <- maps_to_list(Actions),
is_process_alive(P)]), is_process_alive(P)]),
rabbit_log:debug("~ts: failing actions: ~w", [?MODULE, Exclude]), ?LOG_DEBUG("~ts: failing actions: ~w", [?MODULE, Exclude]),
#?MODULE{streams = Streams} = ra_aux:machine_state(RaAux), #?MODULE{streams = Streams} = ra_aux:machine_state(RaAux),
fail_active_actions(Streams, Exclude), fail_active_actions(Streams, Exclude),
{no_reply, Aux, RaAux, []}; {no_reply, Aux, RaAux, []};
@ -1043,7 +1044,7 @@ handle_aux(leader, _, {down, Pid, Reason},
%% An action has failed - report back to the state machine %% An action has failed - report back to the state machine
case maps:get(Pid, Monitors0, undefined) of case maps:get(Pid, Monitors0, undefined) of
{StreamId, Action, #{node := Node, epoch := Epoch} = Args} -> {StreamId, Action, #{node := Node, epoch := Epoch} = Args} ->
rabbit_log:warning("~ts: error while executing action ~w for stream queue ~ts, " ?LOG_WARNING("~ts: error while executing action ~w for stream queue ~ts, "
" node ~ts, epoch ~b Err: ~w", " node ~ts, epoch ~b Err: ~w",
[?MODULE, Action, StreamId, Node, Epoch, Reason]), [?MODULE, Action, StreamId, Node, Epoch, Reason]),
Monitors = maps:remove(Pid, Monitors0), Monitors = maps:remove(Pid, Monitors0),
@ -1110,7 +1111,7 @@ phase_start_replica(StreamId, #{epoch := Epoch,
fun() -> fun() ->
try osiris_replica:start(Node, Conf0) of try osiris_replica:start(Node, Conf0) of
{ok, Pid} -> {ok, Pid} ->
rabbit_log:info("~ts: ~ts: replica started on ~ts in ~b pid ~w", ?LOG_INFO("~ts: ~ts: replica started on ~ts in ~b pid ~w",
[?MODULE, StreamId, Node, Epoch, Pid]), [?MODULE, StreamId, Node, Epoch, Pid]),
send_self_command({member_started, StreamId, send_self_command({member_started, StreamId,
Args#{pid => Pid}}); Args#{pid => Pid}});
@ -1126,12 +1127,12 @@ phase_start_replica(StreamId, #{epoch := Epoch,
send_self_command({member_started, StreamId, send_self_command({member_started, StreamId,
Args#{pid => Pid}}); Args#{pid => Pid}});
{error, Reason} -> {error, Reason} ->
rabbit_log:warning("~ts: Error while starting replica for ~ts on node ~ts in ~b : ~W", ?LOG_WARNING("~ts: Error while starting replica for ~ts on node ~ts in ~b : ~W",
[?MODULE, maps:get(name, Conf0), Node, Epoch, Reason, 10]), [?MODULE, maps:get(name, Conf0), Node, Epoch, Reason, 10]),
maybe_sleep(Reason), maybe_sleep(Reason),
send_action_failed(StreamId, starting, Args) send_action_failed(StreamId, starting, Args)
catch _:Error -> catch _:Error ->
rabbit_log:warning("~ts: Error while starting replica for ~ts on node ~ts in ~b : ~W", ?LOG_WARNING("~ts: Error while starting replica for ~ts on node ~ts in ~b : ~W",
[?MODULE, maps:get(name, Conf0), Node, Epoch, Error, 10]), [?MODULE, maps:get(name, Conf0), Node, Epoch, Error, 10]),
maybe_sleep(Error), maybe_sleep(Error),
send_action_failed(StreamId, starting, Args) send_action_failed(StreamId, starting, Args)
@ -1152,13 +1153,13 @@ phase_delete_member(StreamId, #{node := Node} = Arg, Conf) ->
true -> true ->
try osiris:delete_member(Node, Conf) of try osiris:delete_member(Node, Conf) of
ok -> ok ->
rabbit_log:info("~ts: Member deleted for ~ts : on node ~ts", ?LOG_INFO("~ts: Member deleted for ~ts : on node ~ts",
[?MODULE, StreamId, Node]), [?MODULE, StreamId, Node]),
send_self_command({member_deleted, StreamId, Arg}); send_self_command({member_deleted, StreamId, Arg});
_ -> _ ->
send_action_failed(StreamId, deleting, Arg) send_action_failed(StreamId, deleting, Arg)
catch _:E -> catch _:E ->
rabbit_log:warning("~ts: Error while deleting member for ~ts : on node ~ts ~W", ?LOG_WARNING("~ts: Error while deleting member for ~ts : on node ~ts ~W",
[?MODULE, StreamId, Node, E, 10]), [?MODULE, StreamId, Node, E, 10]),
maybe_sleep(E), maybe_sleep(E),
send_action_failed(StreamId, deleting, Arg) send_action_failed(StreamId, deleting, Arg)
@ -1166,7 +1167,7 @@ phase_delete_member(StreamId, #{node := Node} = Arg, Conf) ->
false -> false ->
%% node is no longer a cluster member, we return success to avoid %% node is no longer a cluster member, we return success to avoid
%% trying to delete the member indefinitely %% trying to delete the member indefinitely
rabbit_log:info("~ts: Member deleted/forgotten for ~ts : node ~ts is no longer a cluster member", ?LOG_INFO("~ts: Member deleted/forgotten for ~ts : node ~ts is no longer a cluster member",
[?MODULE, StreamId, Node]), [?MODULE, StreamId, Node]),
send_self_command({member_deleted, StreamId, Arg}) send_self_command({member_deleted, StreamId, Arg})
end end
@ -1180,22 +1181,22 @@ phase_stop_member(StreamId, #{node := Node, epoch := Epoch} = Arg0, Conf) ->
try get_replica_tail(Node, Conf) of try get_replica_tail(Node, Conf) of
{ok, Tail} -> {ok, Tail} ->
Arg = Arg0#{tail => Tail}, Arg = Arg0#{tail => Tail},
rabbit_log:debug("~ts: ~ts: member stopped on ~ts in ~b Tail ~w", ?LOG_DEBUG("~ts: ~ts: member stopped on ~ts in ~b Tail ~w",
[?MODULE, StreamId, Node, Epoch, Tail]), [?MODULE, StreamId, Node, Epoch, Tail]),
send_self_command({member_stopped, StreamId, Arg}); send_self_command({member_stopped, StreamId, Arg});
Err -> Err ->
rabbit_log:warning("~ts: failed to get tail of member ~ts on ~ts in ~b Error: ~w", ?LOG_WARNING("~ts: failed to get tail of member ~ts on ~ts in ~b Error: ~w",
[?MODULE, StreamId, Node, Epoch, Err]), [?MODULE, StreamId, Node, Epoch, Err]),
maybe_sleep(Err), maybe_sleep(Err),
send_action_failed(StreamId, stopping, Arg0) send_action_failed(StreamId, stopping, Arg0)
catch _:Err -> catch _:Err ->
rabbit_log:warning("~ts: failed to get tail of member ~ts on ~ts in ~b Error: ~w", ?LOG_WARNING("~ts: failed to get tail of member ~ts on ~ts in ~b Error: ~w",
[?MODULE, StreamId, Node, Epoch, Err]), [?MODULE, StreamId, Node, Epoch, Err]),
maybe_sleep(Err), maybe_sleep(Err),
send_action_failed(StreamId, stopping, Arg0) send_action_failed(StreamId, stopping, Arg0)
end end
catch _:Err -> catch _:Err ->
rabbit_log:warning("~ts: failed to stop member ~ts ~w Error: ~w", ?LOG_WARNING("~ts: failed to stop member ~ts ~w Error: ~w",
[?MODULE, StreamId, Node, Err]), [?MODULE, StreamId, Node, Err]),
maybe_sleep(Err), maybe_sleep(Err),
send_action_failed(StreamId, stopping, Arg0) send_action_failed(StreamId, stopping, Arg0)
@ -1207,17 +1208,17 @@ phase_start_writer(StreamId, #{epoch := Epoch, node := Node} = Args0, Conf) ->
try osiris:start_writer(Conf) of try osiris:start_writer(Conf) of
{ok, Pid} -> {ok, Pid} ->
Args = Args0#{epoch => Epoch, pid => Pid}, Args = Args0#{epoch => Epoch, pid => Pid},
rabbit_log:info("~ts: started writer ~ts on ~w in ~b", ?LOG_INFO("~ts: started writer ~ts on ~w in ~b",
[?MODULE, StreamId, Node, Epoch]), [?MODULE, StreamId, Node, Epoch]),
send_self_command({member_started, StreamId, Args}); send_self_command({member_started, StreamId, Args});
Err -> Err ->
%% no sleep for writer failures as we want to trigger a new %% no sleep for writer failures as we want to trigger a new
%% election asap %% election asap
rabbit_log:warning("~ts: failed to start writer ~ts on ~ts in ~b Error: ~w", ?LOG_WARNING("~ts: failed to start writer ~ts on ~ts in ~b Error: ~w",
[?MODULE, StreamId, Node, Epoch, Err]), [?MODULE, StreamId, Node, Epoch, Err]),
send_action_failed(StreamId, starting, Args0) send_action_failed(StreamId, starting, Args0)
catch _:Err -> catch _:Err ->
rabbit_log:warning("~ts: failed to start writer ~ts on ~ts in ~b Error: ~w", ?LOG_WARNING("~ts: failed to start writer ~ts on ~ts in ~b Error: ~w",
[?MODULE, StreamId, Node, Epoch, Err]), [?MODULE, StreamId, Node, Epoch, Err]),
send_action_failed(StreamId, starting, Args0) send_action_failed(StreamId, starting, Args0)
end end
@ -1230,12 +1231,12 @@ phase_update_retention(StreamId, #{pid := Pid,
ok -> ok ->
send_self_command({retention_updated, StreamId, Args}); send_self_command({retention_updated, StreamId, Args});
{error, Reason} = Err -> {error, Reason} = Err ->
rabbit_log:warning("~ts: failed to update retention for ~ts ~w Reason: ~w", ?LOG_WARNING("~ts: failed to update retention for ~ts ~w Reason: ~w",
[?MODULE, StreamId, node(Pid), Reason]), [?MODULE, StreamId, node(Pid), Reason]),
maybe_sleep(Err), maybe_sleep(Err),
send_action_failed(StreamId, update_retention, Args) send_action_failed(StreamId, update_retention, Args)
catch _:Err -> catch _:Err ->
rabbit_log:warning("~ts: failed to update retention for ~ts ~w Error: ~w", ?LOG_WARNING("~ts: failed to update retention for ~ts ~w Error: ~w",
[?MODULE, StreamId, node(Pid), Err]), [?MODULE, StreamId, node(Pid), Err]),
maybe_sleep(Err), maybe_sleep(Err),
send_action_failed(StreamId, update_retention, Args) send_action_failed(StreamId, update_retention, Args)
@ -1281,7 +1282,7 @@ is_quorum(NumReplicas, NumAlive) ->
phase_update_mnesia(StreamId, Args, #{reference := QName, phase_update_mnesia(StreamId, Args, #{reference := QName,
leader_pid := LeaderPid} = Conf) -> leader_pid := LeaderPid} = Conf) ->
fun() -> fun() ->
rabbit_log:debug("~ts: running mnesia update for ~ts: ~W", ?LOG_DEBUG("~ts: running mnesia update for ~ts: ~W",
[?MODULE, StreamId, Conf, 10]), [?MODULE, StreamId, Conf, 10]),
Fun = fun (Q) -> Fun = fun (Q) ->
case amqqueue:get_type_state(Q) of case amqqueue:get_type_state(Q) of
@ -1293,7 +1294,7 @@ phase_update_mnesia(StreamId, Args, #{reference := QName,
Ts -> Ts ->
S = maps:get(name, Ts, undefined), S = maps:get(name, Ts, undefined),
%% TODO log as side-effect %% TODO log as side-effect
rabbit_log:debug("~ts: refusing mnesia update for stale stream id ~s, current ~s", ?LOG_DEBUG("~ts: refusing mnesia update for stale stream id ~s, current ~s",
[?MODULE, StreamId, S]), [?MODULE, StreamId, S]),
%% if the stream id isn't a match this is a stale %% if the stream id isn't a match this is a stale
%% update from a previous stream incarnation for the %% update from a previous stream incarnation for the
@ -1303,7 +1304,7 @@ phase_update_mnesia(StreamId, Args, #{reference := QName,
end, end,
try rabbit_amqqueue:update(QName, Fun) of try rabbit_amqqueue:update(QName, Fun) of
not_found -> not_found ->
rabbit_log:debug("~ts: resource for stream id ~ts not found, " ?LOG_DEBUG("~ts: resource for stream id ~ts not found, "
"recovering from rabbit_durable_queue", "recovering from rabbit_durable_queue",
[?MODULE, StreamId]), [?MODULE, StreamId]),
%% This can happen during recovery %% This can happen during recovery
@ -1316,7 +1317,7 @@ phase_update_mnesia(StreamId, Args, #{reference := QName,
{ok, Q} -> {ok, Q} ->
case amqqueue:get_type_state(Q) of case amqqueue:get_type_state(Q) of
#{name := S} when S == StreamId -> #{name := S} when S == StreamId ->
rabbit_log:debug("~ts: initializing queue record for stream id ~ts", ?LOG_DEBUG("~ts: initializing queue record for stream id ~ts",
[?MODULE, StreamId]), [?MODULE, StreamId]),
ok = rabbit_amqqueue:ensure_rabbit_queue_record_is_initialized(Fun(Q)), ok = rabbit_amqqueue:ensure_rabbit_queue_record_is_initialized(Fun(Q)),
ok; ok;
@ -1328,7 +1329,7 @@ phase_update_mnesia(StreamId, Args, #{reference := QName,
_ -> _ ->
send_self_command({mnesia_updated, StreamId, Args}) send_self_command({mnesia_updated, StreamId, Args})
catch _:E -> catch _:E ->
rabbit_log:debug("~ts: failed to update mnesia for ~ts: ~W", ?LOG_DEBUG("~ts: failed to update mnesia for ~ts: ~W",
[?MODULE, StreamId, E, 10]), [?MODULE, StreamId, E, 10]),
send_action_failed(StreamId, updating_mnesia, Args) send_action_failed(StreamId, updating_mnesia, Args)
end end
@ -1364,7 +1365,7 @@ filter_command(_Meta, {delete_replica, _, #{node := Node}}, #stream{id = StreamI
end, Members0), end, Members0),
case maps:size(Members) =< 1 of case maps:size(Members) =< 1 of
true -> true ->
rabbit_log:warning( ?LOG_WARNING(
"~ts failed to delete replica on node ~ts for stream ~ts: refusing to delete the only replica", "~ts failed to delete replica on node ~ts for stream ~ts: refusing to delete the only replica",
[?MODULE, Node, StreamId]), [?MODULE, Node, StreamId]),
{error, last_stream_member}; {error, last_stream_member};
@ -1379,7 +1380,7 @@ update_stream(Meta, Cmd, Stream) ->
update_stream0(Meta, Cmd, Stream) update_stream0(Meta, Cmd, Stream)
catch catch
_:E:Stacktrace -> _:E:Stacktrace ->
rabbit_log:warning( ?LOG_WARNING(
"~ts failed to update stream:~n~W~n~W", "~ts failed to update stream:~n~W~n~W",
[?MODULE, E, 10, Stacktrace, 10]), [?MODULE, E, 10, Stacktrace, 10]),
Stream Stream
@ -1495,7 +1496,7 @@ update_stream0(#{system_time := _Ts},
Member -> Member ->
%% do we just ignore any members started events from unexpected %% do we just ignore any members started events from unexpected
%% epochs? %% epochs?
rabbit_log:warning("~ts: member started unexpected ~w ~w", ?LOG_WARNING("~ts: member started unexpected ~w ~w",
[?MODULE, Args, Member]), [?MODULE, Args, Member]),
Stream0 Stream0
end; end;
@ -2056,7 +2057,7 @@ fail_active_actions(Streams, Exclude) ->
end, Members), end, Members),
case Mnesia of case Mnesia of
{updating, E} -> {updating, E} ->
rabbit_log:debug("~ts: failing stale action to trigger retry. " ?LOG_DEBUG("~ts: failing stale action to trigger retry. "
"Stream ID: ~ts, node: ~w, action: ~w", "Stream ID: ~ts, node: ~w, action: ~w",
[?MODULE, Id, node(), updating_mnesia]), [?MODULE, Id, node(), updating_mnesia]),
send_self_command({action_failed, Id, send_self_command({action_failed, Id,
@ -2076,7 +2077,7 @@ fail_action(_StreamId, _, #member{current = undefined}) ->
ok; ok;
fail_action(StreamId, Node, #member{role = {_, E}, fail_action(StreamId, Node, #member{role = {_, E},
current = {Action, Idx}}) -> current = {Action, Idx}}) ->
rabbit_log:debug("~ts: failing stale action to trigger retry. " ?LOG_DEBUG("~ts: failing stale action to trigger retry. "
"Stream ID: ~ts, node: ~w, action: ~w", "Stream ID: ~ts, node: ~w, action: ~w",
[?MODULE, StreamId, node(), Action]), [?MODULE, StreamId, node(), Action]),
%% if we have an action send failure message %% if we have an action send failure message
@ -2241,7 +2242,7 @@ update_target(Member, Target) ->
machine_version(1, 2, State = #?MODULE{streams = Streams0, machine_version(1, 2, State = #?MODULE{streams = Streams0,
monitors = Monitors0}) -> monitors = Monitors0}) ->
rabbit_log:info("Stream coordinator machine version changes from 1 to 2, updating state."), ?LOG_INFO("Stream coordinator machine version changes from 1 to 2, updating state."),
%% conversion from old state to new state %% conversion from old state to new state
%% additional operation: the stream listeners are never collected in the previous version %% additional operation: the stream listeners are never collected in the previous version
%% so we'll emit monitors for all listener PIDs %% so we'll emit monitors for all listener PIDs
@ -2273,13 +2274,13 @@ machine_version(1, 2, State = #?MODULE{streams = Streams0,
monitors = Monitors2, monitors = Monitors2,
listeners = undefined}, Effects}; listeners = undefined}, Effects};
machine_version(2, 3, State) -> machine_version(2, 3, State) ->
rabbit_log:info("Stream coordinator machine version changes from 2 to 3, " ?LOG_INFO("Stream coordinator machine version changes from 2 to 3, "
"updating state."), "updating state."),
SacState = rabbit_stream_sac_coordinator_v4:init_state(), SacState = rabbit_stream_sac_coordinator_v4:init_state(),
{State#?MODULE{single_active_consumer = SacState}, {State#?MODULE{single_active_consumer = SacState},
[]}; []};
machine_version(3, 4, #?MODULE{streams = Streams0} = State) -> machine_version(3, 4, #?MODULE{streams = Streams0} = State) ->
rabbit_log:info("Stream coordinator machine version changes from 3 to 4, updating state."), ?LOG_INFO("Stream coordinator machine version changes from 3 to 4, updating state."),
%% the "preferred" field takes the place of the "node" field in this version %% the "preferred" field takes the place of the "node" field in this version
%% initializing the "preferred" field to false %% initializing the "preferred" field to false
Streams = maps:map( Streams = maps:map(
@ -2291,12 +2292,12 @@ machine_version(3, 4, #?MODULE{streams = Streams0} = State) ->
end, Streams0), end, Streams0),
{State#?MODULE{streams = Streams}, []}; {State#?MODULE{streams = Streams}, []};
machine_version(4 = From, 5, #?MODULE{single_active_consumer = Sac0} = State) -> machine_version(4 = From, 5, #?MODULE{single_active_consumer = Sac0} = State) ->
rabbit_log:info("Stream coordinator machine version changes from 4 to 5, updating state."), ?LOG_INFO("Stream coordinator machine version changes from 4 to 5, updating state."),
SacExport = rabbit_stream_sac_coordinator_v4:state_to_map(Sac0), SacExport = rabbit_stream_sac_coordinator_v4:state_to_map(Sac0),
Sac1 = rabbit_stream_sac_coordinator:import_state(From, SacExport), Sac1 = rabbit_stream_sac_coordinator:import_state(From, SacExport),
{State#?MODULE{single_active_consumer = Sac1}, []}; {State#?MODULE{single_active_consumer = Sac1}, []};
machine_version(From, To, State) -> machine_version(From, To, State) ->
rabbit_log:info("Stream coordinator machine version changes from ~tp to ~tp, no state changes required.", ?LOG_INFO("Stream coordinator machine version changes from ~tp to ~tp, no state changes required.",
[From, To]), [From, To]),
{State, []}. {State, []}.

View File

@ -61,6 +61,7 @@
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include("amqqueue.hrl"). -include("amqqueue.hrl").
-include_lib("kernel/include/logger.hrl").
-define(INFO_KEYS, [name, durable, auto_delete, arguments, leader, members, online, state, -define(INFO_KEYS, [name, durable, auto_delete, arguments, leader, members, online, state,
messages, messages_ready, messages_unacknowledged, committed_offset, messages, messages_ready, messages_unacknowledged, committed_offset,
@ -318,7 +319,7 @@ consume(Q, Spec, #stream_client{} = QState0)
args := Args, args := Args,
ok_msg := OkMsg, ok_msg := OkMsg,
acting_user := ActingUser} = Spec, acting_user := ActingUser} = Spec,
rabbit_log:debug("~s:~s Local pid resolved ~0p", ?LOG_DEBUG("~s:~s Local pid resolved ~0p",
[?MODULE, ?FUNCTION_NAME, LocalPid]), [?MODULE, ?FUNCTION_NAME, LocalPid]),
case parse_offset_arg( case parse_offset_arg(
rabbit_misc:table_lookup(Args, <<"x-stream-offset">>)) of rabbit_misc:table_lookup(Args, <<"x-stream-offset">>)) of
@ -629,17 +630,17 @@ handle_event(_QName, {stream_local_member_change, Pid},
handle_event(_QName, {stream_local_member_change, Pid}, handle_event(_QName, {stream_local_member_change, Pid},
#stream_client{name = QName, #stream_client{name = QName,
readers = Readers0} = State) -> readers = Readers0} = State) ->
rabbit_log:debug("Local member change event for ~tp", [QName]), ?LOG_DEBUG("Local member change event for ~tp", [QName]),
Readers1 = maps:fold(fun(T, #stream{log = Log0, reader_options = Options} = S0, Acc) -> Readers1 = maps:fold(fun(T, #stream{log = Log0, reader_options = Options} = S0, Acc) ->
Offset = osiris_log:next_offset(Log0), Offset = osiris_log:next_offset(Log0),
osiris_log:close(Log0), osiris_log:close(Log0),
CounterSpec = {{?MODULE, QName, self()}, []}, CounterSpec = {{?MODULE, QName, self()}, []},
rabbit_log:debug("Re-creating Osiris reader for consumer ~tp at offset ~tp " ?LOG_DEBUG("Re-creating Osiris reader for consumer ~tp at offset ~tp "
" with options ~tp", " with options ~tp",
[T, Offset, Options]), [T, Offset, Options]),
{ok, Log1} = osiris:init_reader(Pid, Offset, CounterSpec, Options), {ok, Log1} = osiris:init_reader(Pid, Offset, CounterSpec, Options),
NextOffset = osiris_log:next_offset(Log1) - 1, NextOffset = osiris_log:next_offset(Log1) - 1,
rabbit_log:debug("Registering offset listener at offset ~tp", [NextOffset]), ?LOG_DEBUG("Registering offset listener at offset ~tp", [NextOffset]),
osiris:register_offset_listener(Pid, NextOffset), osiris:register_offset_listener(Pid, NextOffset),
S1 = S0#stream{listening_offset = NextOffset, S1 = S0#stream{listening_offset = NextOffset,
log = Log1}, log = Log1},
@ -982,7 +983,7 @@ init(Q) when ?is_amqqueue(Q) ->
{ok, stream_not_found, _} -> {ok, stream_not_found, _} ->
{error, stream_not_found}; {error, stream_not_found};
{error, coordinator_unavailable} = E -> {error, coordinator_unavailable} = E ->
rabbit_log:warning("Failed to start stream client ~tp: coordinator unavailable", ?LOG_WARNING("Failed to start stream client ~tp: coordinator unavailable",
[rabbit_misc:rs(QName)]), [rabbit_misc:rs(QName)]),
E E
end. end.
@ -1001,7 +1002,7 @@ update(Q, State)
update_leader_pid(Pid, #stream_client{leader = Pid} = State) -> update_leader_pid(Pid, #stream_client{leader = Pid} = State) ->
State; State;
update_leader_pid(Pid, #stream_client{} = State) -> update_leader_pid(Pid, #stream_client{} = State) ->
rabbit_log:debug("stream client: new leader detected ~w", [Pid]), ?LOG_DEBUG("stream client: new leader detected ~w", [Pid]),
resend_all(State#stream_client{leader = Pid}). resend_all(State#stream_client{leader = Pid}).
state_info(_) -> state_info(_) ->
@ -1058,11 +1059,11 @@ delete_replica(VHost, Name, Node) ->
end. end.
delete_all_replicas(Node) -> delete_all_replicas(Node) ->
rabbit_log:info("Asked to remove all stream replicas from node ~ts", [Node]), ?LOG_INFO("Asked to remove all stream replicas from node ~ts", [Node]),
Streams = rabbit_amqqueue:list_stream_queues_on(Node), Streams = rabbit_amqqueue:list_stream_queues_on(Node),
lists:map(fun(Q) -> lists:map(fun(Q) ->
QName = amqqueue:get_name(Q), QName = amqqueue:get_name(Q),
rabbit_log:info("~ts: removing replica on node ~w", ?LOG_INFO("~ts: removing replica on node ~w",
[rabbit_misc:rs(QName), Node]), [rabbit_misc:rs(QName), Node]),
#{name := StreamId} = amqqueue:get_type_state(Q), #{name := StreamId} = amqqueue:get_type_state(Q),
{ok, Reply, _} = rabbit_stream_coordinator:delete_replica(StreamId, Node), {ok, Reply, _} = rabbit_stream_coordinator:delete_replica(StreamId, Node),
@ -1070,7 +1071,7 @@ delete_all_replicas(Node) ->
ok -> ok ->
{QName, ok}; {QName, ok};
Err -> Err ->
rabbit_log:warning("~ts: failed to remove replica on node ~w, error: ~w", ?LOG_WARNING("~ts: failed to remove replica on node ~w, error: ~w",
[rabbit_misc:rs(QName), Node, Err]), [rabbit_misc:rs(QName), Node, Err]),
{QName, {error, Err}} {QName, {error, Err}}
end end
@ -1264,7 +1265,7 @@ chunk_iterator(#stream{credit = Credit,
end, end,
{end_of_stream, Str}; {end_of_stream, Str};
{error, Err} -> {error, Err} ->
rabbit_log:info("stream client: failed to create chunk iterator ~p", [Err]), ?LOG_INFO("stream client: failed to create chunk iterator ~p", [Err]),
exit(Err) exit(Err)
end. end.
@ -1338,7 +1339,7 @@ resend_all(#stream_client{leader = LeaderPid,
case Msgs of case Msgs of
[] -> ok; [] -> ok;
[{Seq, _} | _] -> [{Seq, _} | _] ->
rabbit_log:debug("stream client: resending from seq ~w num ~b", ?LOG_DEBUG("stream client: resending from seq ~w num ~b",
[Seq, maps:size(Corrs)]) [Seq, maps:size(Corrs)])
end, end,
[begin [begin

View File

@ -17,6 +17,7 @@
-module(rabbit_stream_sac_coordinator). -module(rabbit_stream_sac_coordinator).
-include("rabbit_stream_sac_coordinator.hrl"). -include("rabbit_stream_sac_coordinator.hrl").
-include_lib("kernel/include/logger.hrl").
-opaque command() :: #command_register_consumer{} | -opaque command() :: #command_register_consumer{} |
#command_unregister_consumer{} | #command_unregister_consumer{} |
@ -148,7 +149,7 @@ process_command(Cmd) ->
{ok, Res, _} -> {ok, Res, _} ->
Res; Res;
{error, _} = Err -> {error, _} = Err ->
rabbit_log:warning("SAC coordinator command ~tp returned error ~tp", ?LOG_WARNING("SAC coordinator command ~tp returned error ~tp",
[Cmd, Err]), [Cmd, Err]),
Err Err
end. end.
@ -286,7 +287,7 @@ apply(#command_activate_consumer{vhost = VH, stream = S, consumer_name = Name},
{G, Eff} = {G, Eff} =
case lookup_group(VH, S, Name, StreamGroups0) of case lookup_group(VH, S, Name, StreamGroups0) of
undefined -> undefined ->
rabbit_log:warning("Trying to activate consumer in group ~tp, but " ?LOG_WARNING("Trying to activate consumer in group ~tp, but "
"the group does not longer exist", "the group does not longer exist",
[{VH, S, Name}]), [{VH, S, Name}]),
{undefined, []}; {undefined, []};
@ -348,7 +349,7 @@ apply(#command_purge_nodes{nodes = Nodes}, State0) ->
apply(#command_update_conf{conf = NewConf}, State) -> apply(#command_update_conf{conf = NewConf}, State) ->
{State#?MODULE{conf = NewConf}, ok, []}; {State#?MODULE{conf = NewConf}, ok, []};
apply(UnkCmd, State) -> apply(UnkCmd, State) ->
rabbit_log:debug("~ts: unknown SAC command ~W", [?MODULE, UnkCmd, 10]), ?LOG_DEBUG("~ts: unknown SAC command ~W", [?MODULE, UnkCmd, 10]),
{State, {error, unknown_command}, []}. {State, {error, unknown_command}, []}.
purge_node(Node, #?MODULE{groups = Groups0} = State0) -> purge_node(Node, #?MODULE{groups = Groups0} = State0) ->

View File

@ -17,6 +17,7 @@
-module(rabbit_stream_sac_coordinator_v4). -module(rabbit_stream_sac_coordinator_v4).
-include("rabbit_stream_sac_coordinator_v4.hrl"). -include("rabbit_stream_sac_coordinator_v4.hrl").
-include_lib("kernel/include/logger.hrl").
-opaque command() :: -opaque command() ::
#command_register_consumer{} | #command_unregister_consumer{} | #command_register_consumer{} | #command_unregister_consumer{} |
@ -124,7 +125,7 @@ process_command(Cmd) ->
{ok, Res, _} -> {ok, Res, _} ->
Res; Res;
{error, _} = Err -> {error, _} = Err ->
rabbit_log:warning("SAC coordinator command ~tp returned error ~tp", ?LOG_WARNING("SAC coordinator command ~tp returned error ~tp",
[Cmd, Err]), [Cmd, Err]),
Err Err
end. end.
@ -251,7 +252,7 @@ apply(#command_activate_consumer{vhost = VirtualHost,
{G, Eff} = {G, Eff} =
case lookup_group(VirtualHost, Stream, ConsumerName, StreamGroups0) of case lookup_group(VirtualHost, Stream, ConsumerName, StreamGroups0) of
undefined -> undefined ->
rabbit_log:warning("Trying to activate consumer in group ~tp, but " ?LOG_WARNING("Trying to activate consumer in group ~tp, but "
"the group does not longer exist", "the group does not longer exist",
[{VirtualHost, Stream, ConsumerName}]), [{VirtualHost, Stream, ConsumerName}]),
{undefined, []}; {undefined, []};

View File

@ -23,6 +23,9 @@
-module(rabbit_sysmon_handler). -module(rabbit_sysmon_handler).
-include_lib("kernel/include/logger.hrl").
-behaviour(gen_event). -behaviour(gen_event).
%% API %% API
@ -89,16 +92,16 @@ handle_event({monitor, PidOrPort, Type, Info}, State=#state{timer_ref=TimerRef})
%% Reset the inactivity timeout %% Reset the inactivity timeout
NewTimerRef = reset_timer(TimerRef), NewTimerRef = reset_timer(TimerRef),
{Fmt, Args} = format_pretty_proc_or_port_info(PidOrPort), {Fmt, Args} = format_pretty_proc_or_port_info(PidOrPort),
rabbit_log:warning("~tp ~w ~w " ++ Fmt ++ " ~w", [?MODULE, Type, PidOrPort] ++ Args ++ [Info]), ?LOG_WARNING("~tp ~w ~w " ++ Fmt ++ " ~w", [?MODULE, Type, PidOrPort] ++ Args ++ [Info]),
{ok, State#state{timer_ref=NewTimerRef}}; {ok, State#state{timer_ref=NewTimerRef}};
handle_event({suppressed, Type, Info}, State=#state{timer_ref=TimerRef}) -> handle_event({suppressed, Type, Info}, State=#state{timer_ref=TimerRef}) ->
%% Reset the inactivity timeout %% Reset the inactivity timeout
NewTimerRef = reset_timer(TimerRef), NewTimerRef = reset_timer(TimerRef),
rabbit_log:debug("~tp encountered a suppressed event of type ~w: ~w", [?MODULE, Type, Info]), ?LOG_DEBUG("~tp encountered a suppressed event of type ~w: ~w", [?MODULE, Type, Info]),
{ok, State#state{timer_ref=NewTimerRef}}; {ok, State#state{timer_ref=NewTimerRef}};
handle_event(Event, State=#state{timer_ref=TimerRef}) -> handle_event(Event, State=#state{timer_ref=TimerRef}) ->
NewTimerRef = reset_timer(TimerRef), NewTimerRef = reset_timer(TimerRef),
rabbit_log:warning("~tp unhandled event: ~tp", [?MODULE, Event]), ?LOG_WARNING("~tp unhandled event: ~tp", [?MODULE, Event]),
{ok, State#state{timer_ref=NewTimerRef}}. {ok, State#state{timer_ref=NewTimerRef}}.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
@ -136,7 +139,7 @@ handle_info(inactivity_timeout, State) ->
%% so hibernate to free up resources. %% so hibernate to free up resources.
{ok, State, hibernate}; {ok, State, hibernate};
handle_info(Info, State) -> handle_info(Info, State) ->
rabbit_log:info("handle_info got ~tp", [Info]), ?LOG_INFO("handle_info got ~tp", [Info]),
{ok, State}. {ok, State}.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------

View File

@ -20,6 +20,7 @@
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
-ifdef(TEST). -ifdef(TEST).
-export([pre_khepri_definitions/0]). -export([pre_khepri_definitions/0]).
@ -46,7 +47,7 @@ create() ->
create(TableName, TableDefinition) -> create(TableName, TableDefinition) ->
TableDefinition1 = proplists:delete(match, TableDefinition), TableDefinition1 = proplists:delete(match, TableDefinition),
rabbit_log:debug("Will create a schema database table '~ts'", [TableName]), ?LOG_DEBUG("Will create a schema database table '~ts'", [TableName]),
case mnesia:create_table(TableName, TableDefinition1) of case mnesia:create_table(TableName, TableDefinition1) of
{atomic, ok} -> ok; {atomic, ok} -> ok;
{aborted,{already_exists, TableName}} -> ok; {aborted,{already_exists, TableName}} -> ok;
@ -78,7 +79,7 @@ ensure_secondary_index(Table, Field) ->
-spec ensure_table_copy(mnesia_table(), node(), mnesia_storage_type()) -> -spec ensure_table_copy(mnesia_table(), node(), mnesia_storage_type()) ->
ok | {error, any()}. ok | {error, any()}.
ensure_table_copy(TableName, Node, StorageType) -> ensure_table_copy(TableName, Node, StorageType) ->
rabbit_log:debug("Will add a local schema database copy for table '~ts'", [TableName]), ?LOG_DEBUG("Will add a local schema database copy for table '~ts'", [TableName]),
case mnesia:add_table_copy(TableName, Node, StorageType) of case mnesia:add_table_copy(TableName, Node, StorageType) of
{atomic, ok} -> ok; {atomic, ok} -> ok;
{aborted,{already_exists, TableName}} -> ok; {aborted,{already_exists, TableName}} -> ok;
@ -140,7 +141,7 @@ wait1(TableNames, Timeout, Retries, Silent) ->
true -> true ->
ok; ok;
false -> false ->
rabbit_log:info("Waiting for Mnesia tables for ~tp ms, ~tp retries left", ?LOG_INFO("Waiting for Mnesia tables for ~tp ms, ~tp retries left",
[Timeout, Retries - 1]) [Timeout, Retries - 1])
end, end,
Result = case mnesia:wait_for_tables(TableNames, Timeout) of Result = case mnesia:wait_for_tables(TableNames, Timeout) of
@ -159,7 +160,7 @@ wait1(TableNames, Timeout, Retries, Silent) ->
true -> true ->
ok; ok;
false -> false ->
rabbit_log:info("Successfully synced tables from a peer"), ?LOG_INFO("Successfully synced tables from a peer"),
ok ok
end; end;
{1, {error, _} = Error} -> {1, {error, _} = Error} ->
@ -169,7 +170,7 @@ wait1(TableNames, Timeout, Retries, Silent) ->
true -> true ->
ok; ok;
false -> false ->
rabbit_log:warning("Error while waiting for Mnesia tables: ~tp", [Error]) ?LOG_WARNING("Error while waiting for Mnesia tables: ~tp", [Error])
end, end,
wait1(TableNames, Timeout, Retries - 1, Silent) wait1(TableNames, Timeout, Retries - 1, Silent)
end. end.

View File

@ -12,6 +12,7 @@
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("rabbit_common/include/rabbit_framing.hrl"). -include_lib("rabbit_common/include/rabbit_framing.hrl").
-include_lib("kernel/include/logger.hrl").
-define(TRACE_VHOSTS, trace_vhosts). -define(TRACE_VHOSTS, trace_vhosts).
-define(XNAME, <<"amq.rabbitmq.trace">>). -define(XNAME, <<"amq.rabbitmq.trace">>).
@ -103,10 +104,10 @@ start(VHost)
when is_binary(VHost) -> when is_binary(VHost) ->
case enabled(VHost) of case enabled(VHost) of
true -> true ->
rabbit_log:info("Tracing is already enabled for vhost '~ts'", [VHost]), ?LOG_INFO("Tracing is already enabled for vhost '~ts'", [VHost]),
ok; ok;
false -> false ->
rabbit_log:info("Enabling tracing for vhost '~ts'", [VHost]), ?LOG_INFO("Enabling tracing for vhost '~ts'", [VHost]),
update_config(fun(VHosts) -> lists:usort([VHost | VHosts]) end) update_config(fun(VHosts) -> lists:usort([VHost | VHosts]) end)
end. end.
@ -115,10 +116,10 @@ stop(VHost)
when is_binary(VHost) -> when is_binary(VHost) ->
case enabled(VHost) of case enabled(VHost) of
true -> true ->
rabbit_log:info("Disabling tracing for vhost '~ts'", [VHost]), ?LOG_INFO("Disabling tracing for vhost '~ts'", [VHost]),
update_config(fun(VHosts) -> VHosts -- [VHost] end); update_config(fun(VHosts) -> VHosts -- [VHost] end);
false -> false ->
rabbit_log:info("Tracing is already disabled for vhost '~ts'", [VHost]), ?LOG_INFO("Tracing is already disabled for vhost '~ts'", [VHost]),
ok ok
end. end.
@ -128,13 +129,13 @@ update_config(Fun) ->
application:set_env(rabbit, ?TRACE_VHOSTS, VHosts), application:set_env(rabbit, ?TRACE_VHOSTS, VHosts),
Sessions = rabbit_amqp_session:list_local(), Sessions = rabbit_amqp_session:list_local(),
NonAmqpPids = rabbit_networking:local_non_amqp_connections(), NonAmqpPids = rabbit_networking:local_non_amqp_connections(),
rabbit_log:debug("Refreshing state of channels, ~b sessions and ~b non " ?LOG_DEBUG("Refreshing state of channels, ~b sessions and ~b non "
"AMQP 0.9.1 connections after virtual host tracing changes...", "AMQP 0.9.1 connections after virtual host tracing changes...",
[length(Sessions), length(NonAmqpPids)]), [length(Sessions), length(NonAmqpPids)]),
Pids = Sessions ++ NonAmqpPids, Pids = Sessions ++ NonAmqpPids,
lists:foreach(fun(Pid) -> gen_server:cast(Pid, refresh_config) end, Pids), lists:foreach(fun(Pid) -> gen_server:cast(Pid, refresh_config) end, Pids),
{Time, ok} = timer:tc(fun rabbit_channel:refresh_config_local/0), {Time, ok} = timer:tc(fun rabbit_channel:refresh_config_local/0),
rabbit_log:debug("Refreshed channel states in ~fs", [Time / 1_000_000]), ?LOG_DEBUG("Refreshed channel states in ~fs", [Time / 1_000_000]),
ok. ok.
vhosts_with_tracing_enabled() -> vhosts_with_tracing_enabled() ->

View File

@ -7,6 +7,9 @@
-module(rabbit_tracking). -module(rabbit_tracking).
-include_lib("kernel/include/logger.hrl").
%% Common behaviour and processing functions for tracking components %% Common behaviour and processing functions for tracking components
%% %%
%% See in use: %% See in use:
@ -45,12 +48,12 @@ count_on_all_nodes(Mod, Fun, Args, ContextMsg) ->
sum_rpc_multicall_result([{ok, Int}|ResL], [_N|Nodes], ContextMsg, Acc) when is_integer(Int) -> sum_rpc_multicall_result([{ok, Int}|ResL], [_N|Nodes], ContextMsg, Acc) when is_integer(Int) ->
sum_rpc_multicall_result(ResL, Nodes, ContextMsg, Acc + Int); sum_rpc_multicall_result(ResL, Nodes, ContextMsg, Acc + Int);
sum_rpc_multicall_result([{ok, BadValue}|ResL], [BadNode|Nodes], ContextMsg, Acc) -> sum_rpc_multicall_result([{ok, BadValue}|ResL], [BadNode|Nodes], ContextMsg, Acc) ->
rabbit_log:error( ?LOG_ERROR(
"Failed to fetch number of ~ts on node ~tp:~n not an integer ~tp", "Failed to fetch number of ~ts on node ~tp:~n not an integer ~tp",
[ContextMsg, BadNode, BadValue]), [ContextMsg, BadNode, BadValue]),
sum_rpc_multicall_result(ResL, Nodes, ContextMsg, Acc); sum_rpc_multicall_result(ResL, Nodes, ContextMsg, Acc);
sum_rpc_multicall_result([{Class, Reason}|ResL], [BadNode|Nodes], ContextMsg, Acc) -> sum_rpc_multicall_result([{Class, Reason}|ResL], [BadNode|Nodes], ContextMsg, Acc) ->
rabbit_log:error( ?LOG_ERROR(
"Failed to fetch number of ~ts on node ~tp:~n~tp:~tp", "Failed to fetch number of ~ts on node ~tp:~n~tp:~tp",
[ContextMsg, BadNode, Class, Reason]), [ContextMsg, BadNode, Class, Reason]),
sum_rpc_multicall_result(ResL, Nodes, ContextMsg, Acc); sum_rpc_multicall_result(ResL, Nodes, ContextMsg, Acc);

View File

@ -7,6 +7,9 @@
-module(rabbit_upgrade_preparation). -module(rabbit_upgrade_preparation).
-include_lib("kernel/include/logger.hrl").
-export([await_online_quorum_plus_one/1, -export([await_online_quorum_plus_one/1,
list_with_minimum_quorum_for_cli/0]). list_with_minimum_quorum_for_cli/0]).
@ -66,12 +69,12 @@ do_await_safe_online_quorum(IterationsLeft) ->
0 -> 0 ->
case length(EndangeredQueues) of case length(EndangeredQueues) of
0 -> ok; 0 -> ok;
N -> rabbit_log:info("Waiting for ~p queues and streams to have quorum+1 replicas online. " N -> ?LOG_INFO("Waiting for ~p queues and streams to have quorum+1 replicas online. "
"You can list them with `rabbitmq-diagnostics check_if_node_is_quorum_critical`", [N]) "You can list them with `rabbitmq-diagnostics check_if_node_is_quorum_critical`", [N])
end, end,
case endangered_critical_components() of case endangered_critical_components() of
[] -> ok; [] -> ok;
_ -> rabbit_log:info("Waiting for the following critical components to have quorum+1 replicas online: ~p.", _ -> ?LOG_INFO("Waiting for the following critical components to have quorum+1 replicas online: ~p.",
[endangered_critical_components()]) [endangered_critical_components()])
end; end;
_ -> _ ->

View File

@ -268,6 +268,7 @@
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include("amqqueue.hrl"). -include("amqqueue.hrl").
-include_lib("kernel/include/logger.hrl").
%%---------------------------------------------------------------------------- %%----------------------------------------------------------------------------
@ -382,7 +383,7 @@ stop(VHost) ->
ok = rabbit_classic_queue_index_v2:stop(VHost). ok = rabbit_classic_queue_index_v2:stop(VHost).
start_msg_store(VHost, Refs, StartFunState) when is_list(Refs); Refs == undefined -> start_msg_store(VHost, Refs, StartFunState) when is_list(Refs); Refs == undefined ->
rabbit_log:info("Starting message stores for vhost '~ts'", [VHost]), ?LOG_INFO("Starting message stores for vhost '~ts'", [VHost]),
do_start_msg_store(VHost, ?TRANSIENT_MSG_STORE, undefined, ?EMPTY_START_FUN_STATE), do_start_msg_store(VHost, ?TRANSIENT_MSG_STORE, undefined, ?EMPTY_START_FUN_STATE),
do_start_msg_store(VHost, ?PERSISTENT_MSG_STORE, Refs, StartFunState), do_start_msg_store(VHost, ?PERSISTENT_MSG_STORE, Refs, StartFunState),
ok. ok.
@ -390,13 +391,13 @@ start_msg_store(VHost, Refs, StartFunState) when is_list(Refs); Refs == undefine
do_start_msg_store(VHost, Type, Refs, StartFunState) -> do_start_msg_store(VHost, Type, Refs, StartFunState) ->
case rabbit_vhost_msg_store:start(VHost, Type, Refs, StartFunState) of case rabbit_vhost_msg_store:start(VHost, Type, Refs, StartFunState) of
{ok, _} -> {ok, _} ->
rabbit_log:info("Started message store of type ~ts for vhost '~ts'", [abbreviated_type(Type), VHost]); ?LOG_INFO("Started message store of type ~ts for vhost '~ts'", [abbreviated_type(Type), VHost]);
{error, {no_such_vhost, VHost}} = Err -> {error, {no_such_vhost, VHost}} = Err ->
rabbit_log:error("Failed to start message store of type ~ts for vhost '~ts': the vhost no longer exists!", ?LOG_ERROR("Failed to start message store of type ~ts for vhost '~ts': the vhost no longer exists!",
[Type, VHost]), [Type, VHost]),
exit(Err); exit(Err);
{error, Error} -> {error, Error} ->
rabbit_log:error("Failed to start message store of type ~ts for vhost '~ts': ~tp", ?LOG_ERROR("Failed to start message store of type ~ts for vhost '~ts': ~tp",
[Type, VHost, Error]), [Type, VHost, Error]),
exit({error, Error}) exit({error, Error})
end. end.
@ -891,7 +892,7 @@ convert_from_v1_to_v2_loop(QueueName, V1Index0, V2Index0, V2Store0,
%% Log some progress to keep the user aware of what's going on, as moving %% Log some progress to keep the user aware of what's going on, as moving
%% embedded messages can take quite some time. %% embedded messages can take quite some time.
#resource{virtual_host = VHost, name = Name} = QueueName, #resource{virtual_host = VHost, name = Name} = QueueName,
rabbit_log:info("Queue ~ts in vhost ~ts converted ~b messages from v1 to v2", ?LOG_INFO("Queue ~ts in vhost ~ts converted ~b messages from v1 to v2",
[Name, VHost, length(Messages)]), [Name, VHost, length(Messages)]),
convert_from_v1_to_v2_loop(QueueName, V1Index, V2Index, V2Store, Counters, UpSeqId, HiSeqId, SkipFun). convert_from_v1_to_v2_loop(QueueName, V1Index, V2Index, V2Store, Counters, UpSeqId, HiSeqId, SkipFun).

View File

@ -9,6 +9,7 @@
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include("vhost.hrl"). -include("vhost.hrl").
-include_lib("kernel/include/logger.hrl").
-export([recover/0, recover/1, read_config/1]). -export([recover/0, recover/1, read_config/1]).
-export([add/2, add/3, add/4, delete/2, delete_ignoring_protection/2, exists/1, assert/1, -export([add/2, add/3, add/4, delete/2, delete_ignoring_protection/2, exists/1, assert/1,
@ -40,7 +41,7 @@ recover() ->
{Time, _} = timer:tc(fun() -> {Time, _} = timer:tc(fun() ->
rabbit_binding:recover() rabbit_binding:recover()
end), end),
rabbit_log:debug("rabbit_binding:recover/0 completed in ~fs", [Time/1000000]), ?LOG_DEBUG("rabbit_binding:recover/0 completed in ~fs", [Time/1000000]),
%% rabbit_vhost_sup_sup will start the actual recovery. %% rabbit_vhost_sup_sup will start the actual recovery.
%% So recovery will be run every time a vhost supervisor is restarted. %% So recovery will be run every time a vhost supervisor is restarted.
@ -51,7 +52,7 @@ recover() ->
recover(VHost) -> recover(VHost) ->
VHostDir = msg_store_dir_path(VHost), VHostDir = msg_store_dir_path(VHost),
rabbit_log:info("Making sure data directory '~ts' for vhost '~ts' exists", ?LOG_INFO("Making sure data directory '~ts' for vhost '~ts' exists",
[VHostDir, VHost]), [VHostDir, VHost]),
VHostStubFile = filename:join(VHostDir, ".vhost"), VHostStubFile = filename:join(VHostDir, ".vhost"),
ok = rabbit_file:ensure_dir(VHostStubFile), ok = rabbit_file:ensure_dir(VHostStubFile),
@ -65,25 +66,25 @@ recover(VHost) ->
%% we need to add the default type to the metadata %% we need to add the default type to the metadata
case rabbit_db_vhost:get(VHost) of case rabbit_db_vhost:get(VHost) of
undefined -> undefined ->
rabbit_log:warning("Cannot check metadata for vhost '~ts' during recovery, record not found.", ?LOG_WARNING("Cannot check metadata for vhost '~ts' during recovery, record not found.",
[VHost]); [VHost]);
VHostRecord -> VHostRecord ->
Metadata = vhost:get_metadata(VHostRecord), Metadata = vhost:get_metadata(VHostRecord),
case maps:is_key(default_queue_type, Metadata) of case maps:is_key(default_queue_type, Metadata) of
true -> true ->
rabbit_log:debug("Default queue type for vhost '~ts' is ~p.", ?LOG_DEBUG("Default queue type for vhost '~ts' is ~p.",
[VHost, maps:get(default_queue_type, Metadata)]), [VHost, maps:get(default_queue_type, Metadata)]),
ok; ok;
false -> false ->
DefaultType = rabbit_queue_type:default_alias(), DefaultType = rabbit_queue_type:default_alias(),
rabbit_log:info("Setting missing default queue type to '~p' for vhost '~ts'.", ?LOG_INFO("Setting missing default queue type to '~p' for vhost '~ts'.",
[DefaultType, VHost]), [DefaultType, VHost]),
case rabbit_db_vhost:merge_metadata(VHost, #{default_queue_type => DefaultType}) of case rabbit_db_vhost:merge_metadata(VHost, #{default_queue_type => DefaultType}) of
{ok, _UpdatedVHostRecord} -> {ok, _UpdatedVHostRecord} ->
ok; ok;
{error, Reason} -> {error, Reason} ->
% Log the error but continue recovery % Log the error but continue recovery
rabbit_log:warning("Failed to set the default queue type for vhost '~ts': ~p", ?LOG_WARNING("Failed to set the default queue type for vhost '~ts': ~p",
[VHost, Reason]) [VHost, Reason])
end end
end end
@ -95,7 +96,7 @@ recover(VHost) ->
{Time, ok} = timer:tc(fun() -> {Time, ok} = timer:tc(fun() ->
rabbit_binding:recover(rabbit_exchange:recover(VHost), QNames) rabbit_binding:recover(rabbit_exchange:recover(VHost), QNames)
end), end),
rabbit_log:debug("rabbit_binding:recover/2 for vhost ~ts completed in ~fs", [VHost, Time/1000000]), ?LOG_DEBUG("rabbit_binding:recover/2 for vhost ~ts completed in ~fs", [VHost, Time/1000000]),
ok = rabbit_amqqueue:start(Recovered), ok = rabbit_amqqueue:start(Recovered),
ok. ok.
@ -124,7 +125,7 @@ ensure_config_file(VHost) ->
_ -> _ ->
?LEGACY_INDEX_SEGMENT_ENTRY_COUNT ?LEGACY_INDEX_SEGMENT_ENTRY_COUNT
end, end,
rabbit_log:info("Setting segment_entry_count for vhost '~ts' with ~b queues to '~b'", ?LOG_INFO("Setting segment_entry_count for vhost '~ts' with ~b queues to '~b'",
[VHost, length(QueueDirs), SegmentEntryCount]), [VHost, length(QueueDirs), SegmentEntryCount]),
file:write_file(Path, io_lib:format( file:write_file(Path, io_lib:format(
"%% This file is auto-generated! Edit at your own risk!~n" "%% This file is auto-generated! Edit at your own risk!~n"
@ -206,7 +207,7 @@ do_add(Name, Metadata0, ActingUser) ->
case Metadata of case Metadata of
#{default_queue_type := DQT} -> #{default_queue_type := DQT} ->
%% check that the queue type is known %% check that the queue type is known
rabbit_log:debug("Default queue type of virtual host '~ts' is ~tp", ?LOG_DEBUG("Default queue type of virtual host '~ts' is ~tp",
[Name, DQT]), [Name, DQT]),
try rabbit_queue_type:discover(DQT) of try rabbit_queue_type:discover(DQT) of
QueueType when is_atom(QueueType) -> QueueType when is_atom(QueueType) ->
@ -225,9 +226,9 @@ do_add(Name, Metadata0, ActingUser) ->
case Description of case Description of
undefined -> undefined ->
rabbit_log:info("Adding vhost '~ts' without a description", [Name]); ?LOG_INFO("Adding vhost '~ts' without a description", [Name]);
Description -> Description ->
rabbit_log:info("Adding vhost '~ts' (description: '~ts', tags: ~tp)", ?LOG_INFO("Adding vhost '~ts' (description: '~ts', tags: ~tp)",
[Name, Description, Tags]) [Name, Description, Tags])
end, end,
DefaultLimits = rabbit_db_vhost_defaults:list_limits(Name), DefaultLimits = rabbit_db_vhost_defaults:list_limits(Name),
@ -235,7 +236,7 @@ do_add(Name, Metadata0, ActingUser) ->
{NewOrNot, VHost} = rabbit_db_vhost:create_or_get(Name, DefaultLimits, Metadata), {NewOrNot, VHost} = rabbit_db_vhost:create_or_get(Name, DefaultLimits, Metadata),
case NewOrNot of case NewOrNot of
new -> new ->
rabbit_log:debug("Inserted a virtual host record ~tp", [VHost]); ?LOG_DEBUG("Inserted a virtual host record ~tp", [VHost]);
existing -> existing ->
ok ok
end, end,
@ -280,7 +281,7 @@ declare_default_exchanges(VHostName, ActingUser) ->
rabbit_misc:for_each_while_ok( rabbit_misc:for_each_while_ok(
fun({ExchangeName, Type, Internal}) -> fun({ExchangeName, Type, Internal}) ->
Resource = rabbit_misc:r(VHostName, exchange, ExchangeName), Resource = rabbit_misc:r(VHostName, exchange, ExchangeName),
rabbit_log:debug("Will declare an exchange ~tp", [Resource]), ?LOG_DEBUG("Will declare an exchange ~tp", [Resource]),
case rabbit_exchange:declare( case rabbit_exchange:declare(
Resource, Type, true, false, Internal, [], Resource, Type, true, false, Internal, [],
ActingUser) of ActingUser) of
@ -342,7 +343,7 @@ delete(Name, ActingUser) ->
case vhost:is_protected_from_deletion(VHost) of case vhost:is_protected_from_deletion(VHost) of
true -> true ->
Msg = "Refusing to delete virtual host '~ts' because it is protected from deletion", Msg = "Refusing to delete virtual host '~ts' because it is protected from deletion",
rabbit_log:debug(Msg, [Name]), ?LOG_DEBUG(Msg, [Name]),
{error, protected_from_deletion}; {error, protected_from_deletion};
false -> false ->
delete_ignoring_protection(Name, ActingUser) delete_ignoring_protection(Name, ActingUser)
@ -356,25 +357,25 @@ delete_ignoring_protection(Name, ActingUser) ->
%% process, which in turn results in further database actions and %% process, which in turn results in further database actions and
%% eventually the termination of that process. Exchange deletion causes %% eventually the termination of that process. Exchange deletion causes
%% notifications which must be sent outside the TX %% notifications which must be sent outside the TX
rabbit_log:info("Deleting vhost '~ts'", [Name]), ?LOG_INFO("Deleting vhost '~ts'", [Name]),
%% TODO: This code does a lot of "list resources, walk through the list to %% TODO: This code does a lot of "list resources, walk through the list to
%% delete each resource". This feature should be provided by each called %% delete each resource". This feature should be provided by each called
%% modules, like `rabbit_amqqueue:delete_all_for_vhost(VHost)'. These new %% modules, like `rabbit_amqqueue:delete_all_for_vhost(VHost)'. These new
%% calls would be responsible for the atomicity, not this code. %% calls would be responsible for the atomicity, not this code.
%% Clear the permissions first to prohibit new incoming connections when deleting a vhost %% Clear the permissions first to prohibit new incoming connections when deleting a vhost
rabbit_log:info("Clearing permissions in vhost '~ts' because it's being deleted", [Name]), ?LOG_INFO("Clearing permissions in vhost '~ts' because it's being deleted", [Name]),
ok = rabbit_auth_backend_internal:clear_all_permissions_for_vhost(Name, ActingUser), ok = rabbit_auth_backend_internal:clear_all_permissions_for_vhost(Name, ActingUser),
rabbit_log:info("Deleting queues in vhost '~ts' because it's being deleted", [Name]), ?LOG_INFO("Deleting queues in vhost '~ts' because it's being deleted", [Name]),
QDelFun = fun (Q) -> rabbit_amqqueue:delete(Q, false, false, ActingUser) end, QDelFun = fun (Q) -> rabbit_amqqueue:delete(Q, false, false, ActingUser) end,
[begin [begin
QName = amqqueue:get_name(Q), QName = amqqueue:get_name(Q),
assert_benign(rabbit_amqqueue:with(QName, QDelFun), ActingUser) assert_benign(rabbit_amqqueue:with(QName, QDelFun), ActingUser)
end || Q <- rabbit_amqqueue:list(Name)], end || Q <- rabbit_amqqueue:list(Name)],
rabbit_log:info("Deleting exchanges in vhost '~ts' because it's being deleted", [Name]), ?LOG_INFO("Deleting exchanges in vhost '~ts' because it's being deleted", [Name]),
ok = rabbit_exchange:delete_all(Name, ActingUser), ok = rabbit_exchange:delete_all(Name, ActingUser),
rabbit_log:info("Clearing policies and runtime parameters in vhost '~ts' because it's being deleted", [Name]), ?LOG_INFO("Clearing policies and runtime parameters in vhost '~ts' because it's being deleted", [Name]),
_ = rabbit_runtime_parameters:clear_vhost(Name, ActingUser), _ = rabbit_runtime_parameters:clear_vhost(Name, ActingUser),
rabbit_log:debug("Removing vhost '~ts' from the metadata storage because it's being deleted", [Name]), ?LOG_DEBUG("Removing vhost '~ts' from the metadata storage because it's being deleted", [Name]),
Ret = case rabbit_db_vhost:delete(Name) of Ret = case rabbit_db_vhost:delete(Name) of
true -> true ->
ok = rabbit_event:notify( ok = rabbit_event:notify(
@ -407,7 +408,7 @@ put_vhost(Name, Description, Tags0, DefaultQueueType, Trace, Username) ->
Other -> Other Other -> Other
end, end,
ParsedTags = parse_tags(Tags), ParsedTags = parse_tags(Tags),
rabbit_log:debug("Parsed virtual host tags ~tp to ~tp", [Tags, ParsedTags]), ?LOG_DEBUG("Parsed virtual host tags ~tp to ~tp", [Tags, ParsedTags]),
Result = case exists(Name) of Result = case exists(Name) of
true -> true ->
update(Name, Description, ParsedTags, DefaultQueueType, Username); update(Name, Description, ParsedTags, DefaultQueueType, Username);
@ -451,7 +452,7 @@ is_over_vhost_limit(Name, Limit) when is_integer(Limit) ->
ErrorMsg = rabbit_misc:format("cannot create vhost '~ts': " ErrorMsg = rabbit_misc:format("cannot create vhost '~ts': "
"vhost limit of ~tp is reached", "vhost limit of ~tp is reached",
[Name, Limit]), [Name, Limit]),
rabbit_log:error(ErrorMsg), ?LOG_ERROR(ErrorMsg),
exit({vhost_limit_exceeded, ErrorMsg}) exit({vhost_limit_exceeded, ErrorMsg})
end. end.
@ -510,7 +511,7 @@ vhost_cluster_state(VHost) ->
Nodes). Nodes).
vhost_down(VHost) -> vhost_down(VHost) ->
rabbit_log:info("Virtual host '~ts' is stopping", [VHost]), ?LOG_INFO("Virtual host '~ts' is stopping", [VHost]),
ok = rabbit_event:notify(vhost_down, ok = rabbit_event:notify(vhost_down,
[{name, VHost}, [{name, VHost},
{node, node()}, {node, node()},
@ -518,16 +519,16 @@ vhost_down(VHost) ->
delete_storage(VHost) -> delete_storage(VHost) ->
VhostDir = msg_store_dir_path(VHost), VhostDir = msg_store_dir_path(VHost),
rabbit_log:info("Deleting message store directory for vhost '~ts' at '~ts'", [VHost, VhostDir]), ?LOG_INFO("Deleting message store directory for vhost '~ts' at '~ts'", [VHost, VhostDir]),
%% Message store should be closed when vhost supervisor is closed. %% Message store should be closed when vhost supervisor is closed.
case rabbit_file:recursive_delete([VhostDir]) of case rabbit_file:recursive_delete([VhostDir]) of
ok -> ok; ok -> ok;
{error, {_, enoent}} -> {error, {_, enoent}} ->
%% a concurrent delete did the job for us %% a concurrent delete did the job for us
rabbit_log:warning("Tried to delete storage directories for vhost '~ts', it failed with an ENOENT", [VHost]), ?LOG_WARNING("Tried to delete storage directories for vhost '~ts', it failed with an ENOENT", [VHost]),
ok; ok;
Other -> Other ->
rabbit_log:warning("Tried to delete storage directories for vhost '~ts': ~tp", [VHost, Other]), ?LOG_WARNING("Tried to delete storage directories for vhost '~ts': ~tp", [VHost, Other]),
Other Other
end. end.
@ -642,7 +643,7 @@ update_tags(VHostName, Tags, ActingUser) ->
end, end,
VHost = rabbit_db_vhost:set_tags(VHostName, Tags), VHost = rabbit_db_vhost:set_tags(VHostName, Tags),
ConvertedTags = vhost:get_tags(VHost), ConvertedTags = vhost:get_tags(VHost),
rabbit_log:info("Successfully set tags for virtual host '~ts' to ~tp", [VHostName, ConvertedTags]), ?LOG_INFO("Successfully set tags for virtual host '~ts' to ~tp", [VHostName, ConvertedTags]),
rabbit_event:notify_if(are_different(CurrentTags, ConvertedTags), rabbit_event:notify_if(are_different(CurrentTags, ConvertedTags),
vhost_tags_set, [{name, VHostName}, vhost_tags_set, [{name, VHostName},
{tags, ConvertedTags}, {tags, ConvertedTags},
@ -650,13 +651,13 @@ update_tags(VHostName, Tags, ActingUser) ->
VHost VHost
catch catch
throw:{error, {no_such_vhost, _}} = Error -> throw:{error, {no_such_vhost, _}} = Error ->
rabbit_log:warning("Failed to set tags for virtual host '~ts': the virtual host does not exist", [VHostName]), ?LOG_WARNING("Failed to set tags for virtual host '~ts': the virtual host does not exist", [VHostName]),
throw(Error); throw(Error);
throw:Error -> throw:Error ->
rabbit_log:warning("Failed to set tags for virtual host '~ts': ~tp", [VHostName, Error]), ?LOG_WARNING("Failed to set tags for virtual host '~ts': ~tp", [VHostName, Error]),
throw(Error); throw(Error);
exit:Error -> exit:Error ->
rabbit_log:warning("Failed to set tags for virtual host '~ts': ~tp", [VHostName, Error]), ?LOG_WARNING("Failed to set tags for virtual host '~ts': ~tp", [VHostName, Error]),
exit(Error) exit(Error)
end. end.
@ -718,7 +719,7 @@ i(metadata, VHost) ->
M#{default_queue_type => DQT} M#{default_queue_type => DQT}
end; end;
i(Item, VHost) -> i(Item, VHost) ->
rabbit_log:error("Don't know how to compute a virtual host info item '~ts' for virtual host '~tp'", [Item, VHost]), ?LOG_ERROR("Don't know how to compute a virtual host info item '~ts' for virtual host '~tp'", [Item, VHost]),
throw({bad_argument, Item}). throw({bad_argument, Item}).
-spec info(vhost:vhost() | vhost:name()) -> rabbit_types:infos(). -spec info(vhost:vhost() | vhost:name()) -> rabbit_types:infos().

View File

@ -8,6 +8,7 @@
-module(rabbit_vhost_msg_store). -module(rabbit_vhost_msg_store).
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
-export([start/4, stop/2, client_init/4, successfully_recovered_state/2]). -export([start/4, stop/2, client_init/4, successfully_recovered_state/2]).
-export([vhost_store_pid/2]). -export([vhost_store_pid/2]).
@ -25,7 +26,7 @@ start(VHost, Type, ClientRefs, StartupFunState) when is_list(ClientRefs);
%% we can get here if a vhost is added and removed concurrently %% we can get here if a vhost is added and removed concurrently
%% e.g. some integration tests do it %% e.g. some integration tests do it
{error, {no_such_vhost, VHost}} = E -> {error, {no_such_vhost, VHost}} = E ->
rabbit_log:error("Failed to start a message store for vhost ~ts: vhost no longer exists!", ?LOG_ERROR("Failed to start a message store for vhost ~ts: vhost no longer exists!",
[VHost]), [VHost]),
E E
end. end.
@ -37,7 +38,7 @@ stop(VHost, Type) ->
ok = supervisor:delete_child(VHostSup, Type); ok = supervisor:delete_child(VHostSup, Type);
%% see start/4 %% see start/4
{error, {no_such_vhost, VHost}} -> {error, {no_such_vhost, VHost}} ->
rabbit_log:error("Failed to stop a message store for vhost ~ts: vhost no longer exists!", ?LOG_ERROR("Failed to stop a message store for vhost ~ts: vhost no longer exists!",
[VHost]), [VHost]),
ok ok

View File

@ -21,6 +21,9 @@
-module(rabbit_vhost_process). -module(rabbit_vhost_process).
-include_lib("kernel/include/logger.hrl").
-define(VHOST_CHECK_INTERVAL, 5000). -define(VHOST_CHECK_INTERVAL, 5000).
-behaviour(gen_server2). -behaviour(gen_server2).
@ -35,7 +38,7 @@ start_link(VHost) ->
init([VHost]) -> init([VHost]) ->
process_flag(trap_exit, true), process_flag(trap_exit, true),
rabbit_log:debug("Recovering data for virtual host ~ts", [VHost]), ?LOG_DEBUG("Recovering data for virtual host ~ts", [VHost]),
try try
%% Recover the vhost data and save it to vhost registry. %% Recover the vhost data and save it to vhost registry.
ok = rabbit_vhost:recover(VHost), ok = rabbit_vhost:recover(VHost),
@ -45,7 +48,7 @@ init([VHost]) ->
{ok, VHost} {ok, VHost}
catch _:Reason:Stacktrace -> catch _:Reason:Stacktrace ->
rabbit_amqqueue:mark_local_durable_queues_stopped(VHost), rabbit_amqqueue:mark_local_durable_queues_stopped(VHost),
rabbit_log:error("Unable to recover vhost ~tp data. Reason ~tp~n" ?LOG_ERROR("Unable to recover vhost ~tp data. Reason ~tp~n"
" Stacktrace ~tp", " Stacktrace ~tp",
[VHost, Reason, Stacktrace]), [VHost, Reason, Stacktrace]),
{stop, Reason} {stop, Reason}
@ -61,7 +64,7 @@ handle_info(check_vhost, VHost) ->
case rabbit_vhost:exists(VHost) of case rabbit_vhost:exists(VHost) of
true -> {noreply, VHost}; true -> {noreply, VHost};
false -> false ->
rabbit_log:warning("Virtual host '~ts' is gone. " ?LOG_WARNING("Virtual host '~ts' is gone. "
"Stopping its top level supervisor.", "Stopping its top level supervisor.",
[VHost]), [VHost]),
%% Stop vhost's top supervisor in a one-off process to avoid a deadlock: %% Stop vhost's top supervisor in a one-off process to avoid a deadlock:

View File

@ -8,6 +8,7 @@
-module(rabbit_vhost_sup_sup). -module(rabbit_vhost_sup_sup).
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
-behaviour(supervisor). -behaviour(supervisor).
@ -79,18 +80,18 @@ delete_on_all_nodes(VHost) ->
stop_and_delete_vhost(VHost) -> stop_and_delete_vhost(VHost) ->
StopResult = case lookup_vhost_sup_record(VHost) of StopResult = case lookup_vhost_sup_record(VHost) of
not_found -> not_found ->
rabbit_log:warning("Supervisor for vhost '~ts' not found during deletion procedure", ?LOG_WARNING("Supervisor for vhost '~ts' not found during deletion procedure",
[VHost]), [VHost]),
ok; ok;
#vhost_sup{wrapper_pid = WrapperPid, #vhost_sup{wrapper_pid = WrapperPid,
vhost_sup_pid = VHostSupPid} -> vhost_sup_pid = VHostSupPid} ->
case is_process_alive(WrapperPid) of case is_process_alive(WrapperPid) of
false -> false ->
rabbit_log:info("Supervisor ~tp for vhost '~ts' already stopped", ?LOG_INFO("Supervisor ~tp for vhost '~ts' already stopped",
[VHostSupPid, VHost]), [VHostSupPid, VHost]),
ok; ok;
true -> true ->
rabbit_log:info("Stopping vhost supervisor ~tp" ?LOG_INFO("Stopping vhost supervisor ~tp"
" for vhost '~ts'", " for vhost '~ts'",
[VHostSupPid, VHost]), [VHostSupPid, VHost]),
case supervisor:terminate_child(?MODULE, WrapperPid) of case supervisor:terminate_child(?MODULE, WrapperPid) of
@ -112,7 +113,7 @@ stop_and_delete_vhost(VHost, Node) ->
case rabbit_misc:rpc_call(Node, rabbit_vhost_sup_sup, stop_and_delete_vhost, [VHost]) of case rabbit_misc:rpc_call(Node, rabbit_vhost_sup_sup, stop_and_delete_vhost, [VHost]) of
ok -> ok; ok -> ok;
{badrpc, RpcErr} -> {badrpc, RpcErr} ->
rabbit_log:error("Failed to stop and delete a vhost ~tp" ?LOG_ERROR("Failed to stop and delete a vhost ~tp"
" on node ~tp." " on node ~tp."
" Reason: ~tp", " Reason: ~tp",
[VHost, Node, RpcErr]), [VHost, Node, RpcErr]),
@ -124,7 +125,7 @@ init_vhost(VHost) ->
case start_vhost(VHost) of case start_vhost(VHost) of
{ok, _} -> ok; {ok, _} -> ok;
{error, {already_started, _}} -> {error, {already_started, _}} ->
rabbit_log:warning( ?LOG_WARNING(
"Attempting to start an already started vhost '~ts'.", "Attempting to start an already started vhost '~ts'.",
[VHost]), [VHost]),
ok; ok;
@ -133,13 +134,13 @@ init_vhost(VHost) ->
{error, Reason} -> {error, Reason} ->
case vhost_restart_strategy() of case vhost_restart_strategy() of
permanent -> permanent ->
rabbit_log:error( ?LOG_ERROR(
"Unable to initialize vhost data store for vhost '~ts'." "Unable to initialize vhost data store for vhost '~ts'."
" Reason: ~tp", " Reason: ~tp",
[VHost, Reason]), [VHost, Reason]),
throw({error, Reason}); throw({error, Reason});
transient -> transient ->
rabbit_log:warning( ?LOG_WARNING(
"Unable to initialize vhost data store for vhost '~ts'." "Unable to initialize vhost data store for vhost '~ts'."
" The vhost will be stopped for this node. " " The vhost will be stopped for this node. "
" Reason: ~tp", " Reason: ~tp",

View File

@ -9,6 +9,9 @@
%% several others virtual hosts-related modules. %% several others virtual hosts-related modules.
-module(rabbit_vhosts). -module(rabbit_vhosts).
-include_lib("kernel/include/logger.hrl").
-define(PERSISTENT_TERM_COUNTER_KEY, rabbit_vhosts_reconciliation_run_counter). -define(PERSISTENT_TERM_COUNTER_KEY, rabbit_vhosts_reconciliation_run_counter).
%% API %% API
@ -63,11 +66,11 @@ reconcile() ->
%% See start_processes_for_all/1. %% See start_processes_for_all/1.
-spec reconcile_once() -> 'ok'. -spec reconcile_once() -> 'ok'.
reconcile_once() -> reconcile_once() ->
rabbit_log:debug("Will reconcile virtual host processes on all cluster members..."), ?LOG_DEBUG("Will reconcile virtual host processes on all cluster members..."),
_ = start_processes_for_all(), _ = start_processes_for_all(),
_ = increment_run_counter(), _ = increment_run_counter(),
N = get_run_counter(), N = get_run_counter(),
rabbit_log:debug("Done with virtual host processes reconciliation (run ~tp)", [N]), ?LOG_DEBUG("Done with virtual host processes reconciliation (run ~tp)", [N]),
ok. ok.
-spec on_node_up(Node :: node()) -> 'ok'. -spec on_node_up(Node :: node()) -> 'ok'.
@ -77,7 +80,7 @@ on_node_up(_Node) ->
true -> true ->
DelayInSeconds = 10, DelayInSeconds = 10,
Delay = DelayInSeconds * 1000, Delay = DelayInSeconds * 1000,
rabbit_log:debug("Will reschedule virtual host process reconciliation after ~b seconds", [DelayInSeconds]), ?LOG_DEBUG("Will reschedule virtual host process reconciliation after ~b seconds", [DelayInSeconds]),
_ = timer:apply_after(Delay, ?MODULE, reconcile_once, []), _ = timer:apply_after(Delay, ?MODULE, reconcile_once, []),
ok ok
end. end.
@ -111,13 +114,13 @@ reconciliation_interval() ->
start_processes_for_all(Nodes) -> start_processes_for_all(Nodes) ->
Names = list_names(), Names = list_names(),
N = length(Names), N = length(Names),
rabbit_log:debug("Will make sure that processes of ~p virtual hosts are running on all reachable cluster nodes", [N]), ?LOG_DEBUG("Will make sure that processes of ~p virtual hosts are running on all reachable cluster nodes", [N]),
[begin [begin
try try
start_on_all_nodes(VH, Nodes) start_on_all_nodes(VH, Nodes)
catch catch
_:Err:_Stacktrace -> _:Err:_Stacktrace ->
rabbit_log:error("Could not reconcile virtual host ~ts: ~tp", [VH, Err]) ?LOG_ERROR("Could not reconcile virtual host ~ts: ~tp", [VH, Err])
end end
end || VH <- Names], end || VH <- Names],
ok. ok.
@ -153,14 +156,14 @@ maybe_start_timer(FunName) ->
case N >= 10 of case N >= 10 of
true -> true ->
%% Stop after ten runs %% Stop after ten runs
rabbit_log:debug("Will stop virtual host process reconciliation after ~tp runs", [N]), ?LOG_DEBUG("Will stop virtual host process reconciliation after ~tp runs", [N]),
ok; ok;
false -> false ->
case is_reconciliation_enabled() of case is_reconciliation_enabled() of
false -> ok; false -> ok;
true -> true ->
Delay = DelayInSeconds * 1000, Delay = DelayInSeconds * 1000,
rabbit_log:debug("Will reschedule virtual host process reconciliation after ~b seconds", [DelayInSeconds]), ?LOG_DEBUG("Will reschedule virtual host process reconciliation after ~b seconds", [DelayInSeconds]),
timer:apply_after(Delay, ?MODULE, FunName, []) timer:apply_after(Delay, ?MODULE, FunName, [])
end end
end. end.

View File

@ -55,6 +55,7 @@
proc_file = undefined}). proc_file = undefined}).
-include("include/rabbit_memory.hrl"). -include("include/rabbit_memory.hrl").
-include_lib("kernel/include/logger.hrl").
%%---------------------------------------------------------------------------- %%----------------------------------------------------------------------------
@ -89,7 +90,7 @@ get_total_memory() ->
{ok, ParsedTotal} -> {ok, ParsedTotal} ->
ParsedTotal; ParsedTotal;
{error, parse_error} -> {error, parse_error} ->
rabbit_log:warning( ?LOG_WARNING(
"The override value for the total memmory available is " "The override value for the total memmory available is "
"not a valid value: ~tp, getting total from the system.", "not a valid value: ~tp, getting total from the system.",
[Value]), [Value]),
@ -163,7 +164,7 @@ get_memory_calculation_strategy() ->
legacy -> erlang; %% backwards compatibility legacy -> erlang; %% backwards compatibility
rss -> rss; rss -> rss;
UnsupportedValue -> UnsupportedValue ->
rabbit_log:warning( ?LOG_WARNING(
"Unsupported value '~tp' for vm_memory_calculation_strategy. " "Unsupported value '~tp' for vm_memory_calculation_strategy. "
"Supported values: (allocated|erlang|legacy|rss). " "Supported values: (allocated|erlang|legacy|rss). "
"Defaulting to 'rss'", "Defaulting to 'rss'",
@ -252,7 +253,7 @@ get_cached_process_memory_and_limit() ->
try try
gen_server:call(?MODULE, get_cached_process_memory_and_limit, infinity) gen_server:call(?MODULE, get_cached_process_memory_and_limit, infinity)
catch exit:{noproc, Error} -> catch exit:{noproc, Error} ->
rabbit_log:warning("Memory monitor process not yet started: ~tp", [Error]), ?LOG_WARNING("Memory monitor process not yet started: ~tp", [Error]),
ProcessMemory = get_process_memory_uncached(), ProcessMemory = get_process_memory_uncached(),
{ProcessMemory, infinity} {ProcessMemory, infinity}
end. end.
@ -306,7 +307,7 @@ get_total_memory_from_os() ->
try try
get_total_memory(os:type()) get_total_memory(os:type())
catch _:Error:Stacktrace -> catch _:Error:Stacktrace ->
rabbit_log:warning( ?LOG_WARNING(
"Failed to get total system memory: ~n~tp~n~tp", "Failed to get total system memory: ~n~tp~n~tp",
[Error, Stacktrace]), [Error, Stacktrace]),
unknown unknown
@ -317,7 +318,7 @@ set_mem_limits(State, {relative, MemLimit}) ->
set_mem_limits(State, MemLimit) -> set_mem_limits(State, MemLimit) ->
case erlang:system_info(wordsize) of case erlang:system_info(wordsize) of
4 -> 4 ->
rabbit_log:warning( ?LOG_WARNING(
"You are using a 32-bit version of Erlang: you may run into " "You are using a 32-bit version of Erlang: you may run into "
"memory address~n" "memory address~n"
"space exhaustion or statistic counters overflow.~n"); "space exhaustion or statistic counters overflow.~n");
@ -330,7 +331,7 @@ set_mem_limits(State, MemLimit) ->
case State of case State of
#state { total_memory = undefined, #state { total_memory = undefined,
memory_limit = undefined } -> memory_limit = undefined } ->
rabbit_log:warning( ?LOG_WARNING(
"Unknown total memory size for your OS ~tp. " "Unknown total memory size for your OS ~tp. "
"Assuming memory size is ~tp MiB (~tp bytes).", "Assuming memory size is ~tp MiB (~tp bytes).",
[os:type(), [os:type(),
@ -345,7 +346,7 @@ set_mem_limits(State, MemLimit) ->
UsableMemory = UsableMemory =
case get_vm_limit() of case get_vm_limit() of
Limit when Limit < TotalMemory -> Limit when Limit < TotalMemory ->
rabbit_log:warning( ?LOG_WARNING(
"Only ~tp MiB (~tp bytes) of ~tp MiB (~tp bytes) memory usable due to " "Only ~tp MiB (~tp bytes) of ~tp MiB (~tp bytes) memory usable due to "
"limited address space.~n" "limited address space.~n"
"Crashes due to memory exhaustion are possible - see~n" "Crashes due to memory exhaustion are possible - see~n"
@ -357,7 +358,7 @@ set_mem_limits(State, MemLimit) ->
TotalMemory TotalMemory
end, end,
MemLim = interpret_limit(parse_mem_limit(MemLimit), UsableMemory), MemLim = interpret_limit(parse_mem_limit(MemLimit), UsableMemory),
rabbit_log:info( ?LOG_INFO(
"Memory high watermark set to ~tp MiB (~tp bytes)" "Memory high watermark set to ~tp MiB (~tp bytes)"
" of ~tp MiB (~tp bytes) total", " of ~tp MiB (~tp bytes) total",
[trunc(MemLim/?ONE_MiB), MemLim, [trunc(MemLim/?ONE_MiB), MemLim,
@ -381,7 +382,7 @@ parse_mem_limit({absolute, Limit}) ->
case rabbit_resource_monitor_misc:parse_information_unit(Limit) of case rabbit_resource_monitor_misc:parse_information_unit(Limit) of
{ok, ParsedLimit} -> {absolute, ParsedLimit}; {ok, ParsedLimit} -> {absolute, ParsedLimit};
{error, parse_error} -> {error, parse_error} ->
rabbit_log:error("Unable to parse vm_memory_high_watermark value ~tp", [Limit]), ?LOG_ERROR("Unable to parse vm_memory_high_watermark value ~tp", [Limit]),
?DEFAULT_VM_MEMORY_HIGH_WATERMARK ?DEFAULT_VM_MEMORY_HIGH_WATERMARK
end; end;
parse_mem_limit({relative, MemLimit}) -> parse_mem_limit({relative, MemLimit}) ->
@ -391,13 +392,13 @@ parse_mem_limit(MemLimit) when is_integer(MemLimit) ->
parse_mem_limit(MemLimit) when is_float(MemLimit), MemLimit =< ?MAX_VM_MEMORY_HIGH_WATERMARK -> parse_mem_limit(MemLimit) when is_float(MemLimit), MemLimit =< ?MAX_VM_MEMORY_HIGH_WATERMARK ->
MemLimit; MemLimit;
parse_mem_limit(MemLimit) when is_float(MemLimit), MemLimit > ?MAX_VM_MEMORY_HIGH_WATERMARK -> parse_mem_limit(MemLimit) when is_float(MemLimit), MemLimit > ?MAX_VM_MEMORY_HIGH_WATERMARK ->
rabbit_log:warning( ?LOG_WARNING(
"Memory high watermark of ~tp is above the allowed maximum, falling back to ~tp", "Memory high watermark of ~tp is above the allowed maximum, falling back to ~tp",
[MemLimit, ?MAX_VM_MEMORY_HIGH_WATERMARK] [MemLimit, ?MAX_VM_MEMORY_HIGH_WATERMARK]
), ),
?MAX_VM_MEMORY_HIGH_WATERMARK; ?MAX_VM_MEMORY_HIGH_WATERMARK;
parse_mem_limit(MemLimit) -> parse_mem_limit(MemLimit) ->
rabbit_log:warning( ?LOG_WARNING(
"Memory high watermark of ~tp is invalid, defaulting to ~tp", "Memory high watermark of ~tp is invalid, defaulting to ~tp",
[MemLimit, ?DEFAULT_VM_MEMORY_HIGH_WATERMARK] [MemLimit, ?DEFAULT_VM_MEMORY_HIGH_WATERMARK]
), ),
@ -419,7 +420,7 @@ internal_update(State0 = #state{memory_limit = MemLimit,
State1#state{alarmed = NewAlarmed}. State1#state{alarmed = NewAlarmed}.
emit_update_info(AlarmState, MemUsed, MemLimit) -> emit_update_info(AlarmState, MemUsed, MemLimit) ->
rabbit_log:info( ?LOG_INFO(
"vm_memory_high_watermark ~tp. Memory used:~tp allowed:~tp", "vm_memory_high_watermark ~tp. Memory used:~tp allowed:~tp",
[AlarmState, MemUsed, MemLimit]). [AlarmState, MemUsed, MemLimit]).
@ -458,7 +459,7 @@ cmd(Command, ThrowIfMissing) ->
end. end.
default_linux_pagesize(CmdOutput) -> default_linux_pagesize(CmdOutput) ->
rabbit_log:warning( ?LOG_WARNING(
"Failed to get memory page size, using 4096. Reason: ~ts", "Failed to get memory page size, using 4096. Reason: ~ts",
[CmdOutput]), [CmdOutput]),
4096. 4096.
@ -583,7 +584,7 @@ sysctl(Def) ->
list_to_integer(R) list_to_integer(R)
catch catch
error:badarg -> error:badarg ->
rabbit_log:debug("Failed to get total system memory: ~tp", [R]), ?LOG_DEBUG("Failed to get total system memory: ~tp", [R]),
unknown unknown
end. end.

View File

@ -12,6 +12,7 @@
-include_lib("kernel/include/file.hrl"). -include_lib("kernel/include/file.hrl").
-include_lib("amqp_client/include/amqp_client.hrl"). -include_lib("amqp_client/include/amqp_client.hrl").
-include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl"). -include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl").
-include_lib("kernel/include/logger.hrl").
-compile(export_all). -compile(export_all).
@ -227,7 +228,7 @@ non_empty_files(Files) ->
end || EmptyFile <- empty_files(Files)]. end || EmptyFile <- empty_files(Files)].
test_logs_working(LogFiles) -> test_logs_working(LogFiles) ->
ok = rabbit_log:error("Log a test message"), ok = ?LOG_ERROR("Log a test message"),
%% give the error loggers some time to catch up %% give the error loggers some time to catch up
?awaitMatch(true, ?awaitMatch(true,
lists:all(fun(LogFile) -> [true] =:= non_empty_files([LogFile]) end, LogFiles), lists:all(fun(LogFile) -> [true] =:= non_empty_files([LogFile]) end, LogFiles),

View File

@ -534,7 +534,7 @@ shortstr_size(S) ->
for (c,v,cls) in spec.constants: genLookupException(c,v,cls) for (c,v,cls) in spec.constants: genLookupException(c,v,cls)
print("lookup_amqp_exception(Code) ->") print("lookup_amqp_exception(Code) ->")
print(" rabbit_log:warning(\"Unknown AMQP error code '~p'~n\", [Code]),") print(" ?LOG_WARNING(\"Unknown AMQP error code '~p'~n\", [Code]),")
print(" {true, ?INTERNAL_ERROR, <<\"INTERNAL_ERROR\">>}.") print(" {true, ?INTERNAL_ERROR, <<\"INTERNAL_ERROR\">>}.")
for(c,v,cls) in spec.constants: genAmqpException(c,v,cls) for(c,v,cls) in spec.constants: genAmqpException(c,v,cls)

View File

@ -7,6 +7,9 @@
-module(app_utils). -module(app_utils).
-include_lib("kernel/include/logger.hrl").
-export([load_applications/1, -export([load_applications/1,
start_applications/1, start_applications/2, start_applications/3, start_applications/1, start_applications/2, start_applications/3,
stop_applications/1, stop_applications/2, app_dependency_order/2, stop_applications/1, stop_applications/2, app_dependency_order/2,
@ -61,7 +64,7 @@ start_applications(Apps, ErrorHandler, RestartTypes) ->
stop_applications(Apps, ErrorHandler) -> stop_applications(Apps, ErrorHandler) ->
manage_applications(fun lists:foldr/3, manage_applications(fun lists:foldr/3,
fun(App) -> fun(App) ->
rabbit_log:info("Stopping application '~ts'", [App]), ?LOG_INFO("Stopping application '~ts'", [App]),
application:stop(App) application:stop(App)
end, end,
fun(App) -> ensure_all_started(App, #{}) end, fun(App) -> ensure_all_started(App, #{}) end,

View File

@ -7,6 +7,9 @@
-module(rabbit_amqp_connection). -module(rabbit_amqp_connection).
-include_lib("kernel/include/logger.hrl").
-export([amqp_params/2]). -export([amqp_params/2]).
-spec amqp_params(pid(), timeout()) -> [{atom(), term()}]. -spec amqp_params(pid(), timeout()) -> [{atom(), term()}].
@ -14,11 +17,11 @@ amqp_params(ConnPid, Timeout) ->
P = try P = try
gen_server:call(ConnPid, {info, [amqp_params]}, Timeout) gen_server:call(ConnPid, {info, [amqp_params]}, Timeout)
catch exit:{noproc, Error} -> catch exit:{noproc, Error} ->
rabbit_log:debug("file ~tp, line ~tp - connection process ~tp not alive: ~tp", ?LOG_DEBUG("file ~tp, line ~tp - connection process ~tp not alive: ~tp",
[?FILE, ?LINE, ConnPid, Error]), [?FILE, ?LINE, ConnPid, Error]),
[]; [];
_:Error -> _:Error ->
rabbit_log:debug("file ~tp, line ~tp - failed to get amqp_params from connection process ~tp: ~tp", ?LOG_DEBUG("file ~tp, line ~tp - failed to get amqp_params from connection process ~tp: ~tp",
[?FILE, ?LINE, ConnPid, Error]), [?FILE, ?LINE, ConnPid, Error]),
[] []
end, end,

View File

@ -8,6 +8,7 @@
-module(rabbit_binary_generator). -module(rabbit_binary_generator).
-include("rabbit_framing.hrl"). -include("rabbit_framing.hrl").
-include("rabbit.hrl"). -include("rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
-export([build_simple_method_frame/3, -export([build_simple_method_frame/3,
build_simple_content_frames/4, build_simple_content_frames/4,
@ -223,7 +224,7 @@ lookup_amqp_exception(#amqp_error{name = Name,
ExplBin = amqp_exception_explanation(Text, Expl), ExplBin = amqp_exception_explanation(Text, Expl),
{ShouldClose, Code, ExplBin, Method}; {ShouldClose, Code, ExplBin, Method};
lookup_amqp_exception(Other, Protocol) -> lookup_amqp_exception(Other, Protocol) ->
rabbit_log:warning("Non-AMQP exit reason '~tp'", [Other]), ?LOG_WARNING("Non-AMQP exit reason '~tp'", [Other]),
{ShouldClose, Code, Text} = Protocol:lookup_amqp_exception(internal_error), {ShouldClose, Code, Text} = Protocol:lookup_amqp_exception(internal_error),
{ShouldClose, Code, Text, none}. {ShouldClose, Code, Text, none}.

View File

@ -1724,7 +1724,7 @@ collect_conf_env_file_output(Context, Port, Marker, Output) ->
collect_conf_env_file_output( collect_conf_env_file_output(
Context, Port, Marker, [Output, UnicodeChunk]); Context, Port, Marker, [Output, UnicodeChunk]);
{Port, {data, Chunk}} -> {Port, {data, Chunk}} ->
rabbit_log:warning("~tp unexpected non-binary chunk in " ?LOG_WARNING("~tp unexpected non-binary chunk in "
"conf env file output: ~tp~n", [?MODULE, Chunk]) "conf env file output: ~tp~n", [?MODULE, Chunk])
end. end.
@ -2157,5 +2157,5 @@ unicode_characters_to_list(Input) ->
end. end.
log_characters_to_list_error(Input, Partial, Rest) -> log_characters_to_list_error(Input, Partial, Rest) ->
rabbit_log:error("error converting '~tp' to unicode string " ?LOG_ERROR("error converting '~tp' to unicode string "
"(partial '~tp', rest '~tp')", [Input, Partial, Rest]). "(partial '~tp', rest '~tp')", [Input, Partial, Rest]).

View File

@ -8,6 +8,7 @@
%% %%
-module(rabbit_framing_amqp_0_8). -module(rabbit_framing_amqp_0_8).
-include("rabbit_framing.hrl"). -include("rabbit_framing.hrl").
-include_lib("kernel/include/logger.hrl").
-export([version/0]). -export([version/0]).
-export([lookup_method_name/1]). -export([lookup_method_name/1]).
@ -1626,7 +1627,7 @@ lookup_amqp_exception(not_allowed) -> {true, ?NOT_ALLOWED, <<"NOT_ALLOWED">>};
lookup_amqp_exception(not_implemented) -> {true, ?NOT_IMPLEMENTED, <<"NOT_IMPLEMENTED">>}; lookup_amqp_exception(not_implemented) -> {true, ?NOT_IMPLEMENTED, <<"NOT_IMPLEMENTED">>};
lookup_amqp_exception(internal_error) -> {true, ?INTERNAL_ERROR, <<"INTERNAL_ERROR">>}; lookup_amqp_exception(internal_error) -> {true, ?INTERNAL_ERROR, <<"INTERNAL_ERROR">>};
lookup_amqp_exception(Code) -> lookup_amqp_exception(Code) ->
rabbit_log:warning("Unknown AMQP error code '~p'~n", [Code]), ?LOG_WARNING("Unknown AMQP error code '~p'~n", [Code]),
{true, ?INTERNAL_ERROR, <<"INTERNAL_ERROR">>}. {true, ?INTERNAL_ERROR, <<"INTERNAL_ERROR">>}.
amqp_exception(?FRAME_METHOD) -> frame_method; amqp_exception(?FRAME_METHOD) -> frame_method;
amqp_exception(?FRAME_HEADER) -> frame_header; amqp_exception(?FRAME_HEADER) -> frame_header;

View File

@ -8,6 +8,7 @@
%% %%
-module(rabbit_framing_amqp_0_9_1). -module(rabbit_framing_amqp_0_9_1).
-include("rabbit_framing.hrl"). -include("rabbit_framing.hrl").
-include_lib("kernel/include/logger.hrl").
-export([version/0]). -export([version/0]).
-export([lookup_method_name/1]). -export([lookup_method_name/1]).
@ -1240,7 +1241,7 @@ lookup_amqp_exception(not_allowed) -> {true, ?NOT_ALLOWED, <<"NOT_ALLOWED">>};
lookup_amqp_exception(not_implemented) -> {true, ?NOT_IMPLEMENTED, <<"NOT_IMPLEMENTED">>}; lookup_amqp_exception(not_implemented) -> {true, ?NOT_IMPLEMENTED, <<"NOT_IMPLEMENTED">>};
lookup_amqp_exception(internal_error) -> {true, ?INTERNAL_ERROR, <<"INTERNAL_ERROR">>}; lookup_amqp_exception(internal_error) -> {true, ?INTERNAL_ERROR, <<"INTERNAL_ERROR">>};
lookup_amqp_exception(Code) -> lookup_amqp_exception(Code) ->
rabbit_log:warning("Unknown AMQP error code '~p'~n", [Code]), ?LOG_WARNING("Unknown AMQP error code '~p'~n", [Code]),
{true, ?INTERNAL_ERROR, <<"INTERNAL_ERROR">>}. {true, ?INTERNAL_ERROR, <<"INTERNAL_ERROR">>}.
amqp_exception(?FRAME_METHOD) -> frame_method; amqp_exception(?FRAME_METHOD) -> frame_method;
amqp_exception(?FRAME_HEADER) -> frame_header; amqp_exception(?FRAME_HEADER) -> frame_header;

View File

@ -1,118 +0,0 @@
%% This Source Code Form is subject to the terms of the Mozilla Public
%% License, v. 2.0. If a copy of the MPL was not distributed with this
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
%%
%% Copyright (c) 2007-2025 Broadcom. All Rights Reserved. The term Broadcom refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
%%
-module(rabbit_log).
-export([log/3, log/4]).
-export([debug/1, debug/2, debug/3,
info/1, info/2, info/3,
notice/1, notice/2, notice/3,
warning/1, warning/2, warning/3,
error/1, error/2, error/3,
critical/1, critical/2, critical/3,
alert/1, alert/2, alert/3,
emergency/1, emergency/2, emergency/3,
none/1, none/2, none/3]).
-include("logging.hrl").
-compile({no_auto_import, [error/2, error/3]}).
%%----------------------------------------------------------------------------
-type category() :: atom().
-spec debug(string()) -> 'ok'.
-spec debug(string(), [any()]) -> 'ok'.
-spec debug(pid() | [tuple()], string(), [any()]) -> 'ok'.
-spec info(string()) -> 'ok'.
-spec info(string(), [any()]) -> 'ok'.
-spec info(pid() | [tuple()], string(), [any()]) -> 'ok'.
-spec notice(string()) -> 'ok'.
-spec notice(string(), [any()]) -> 'ok'.
-spec notice(pid() | [tuple()], string(), [any()]) -> 'ok'.
-spec warning(string()) -> 'ok'.
-spec warning(string(), [any()]) -> 'ok'.
-spec warning(pid() | [tuple()], string(), [any()]) -> 'ok'.
-spec error(string()) -> 'ok'.
-spec error(string(), [any()]) -> 'ok'.
-spec error(pid() | [tuple()], string(), [any()]) -> 'ok'.
-spec critical(string()) -> 'ok'.
-spec critical(string(), [any()]) -> 'ok'.
-spec critical(pid() | [tuple()], string(), [any()]) -> 'ok'.
-spec alert(string()) -> 'ok'.
-spec alert(string(), [any()]) -> 'ok'.
-spec alert(pid() | [tuple()], string(), [any()]) -> 'ok'.
-spec emergency(string()) -> 'ok'.
-spec emergency(string(), [any()]) -> 'ok'.
-spec emergency(pid() | [tuple()], string(), [any()]) -> 'ok'.
-spec none(string()) -> 'ok'.
-spec none(string(), [any()]) -> 'ok'.
-spec none(pid() | [tuple()], string(), [any()]) -> 'ok'.
%%----------------------------------------------------------------------------
-spec log(category(), logger:level(), string()) -> 'ok'.
log(Category, Level, Fmt) -> log(Category, Level, Fmt, []).
-spec log(category(), logger:level(), string(), [any()]) -> 'ok'.
log(default, Level, Fmt, Args) when is_list(Args) ->
logger:log(Level, Fmt, Args, #{domain => ?RMQLOG_DOMAIN_GLOBAL});
log(Category, Level, Fmt, Args) when is_list(Args) ->
logger:log(Level, Fmt, Args, #{domain => ?DEFINE_RMQLOG_DOMAIN(Category)}).
debug(Format) -> debug(Format, []).
debug(Format, Args) -> debug(self(), Format, Args).
debug(Pid, Format, Args) ->
logger:debug(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_GLOBAL}).
info(Format) -> info(Format, []).
info(Format, Args) -> info(self(), Format, Args).
info(Pid, Format, Args) ->
logger:info(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_GLOBAL}).
notice(Format) -> notice(Format, []).
notice(Format, Args) -> notice(self(), Format, Args).
notice(Pid, Format, Args) ->
logger:notice(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_GLOBAL}).
warning(Format) -> warning(Format, []).
warning(Format, Args) -> warning(self(), Format, Args).
warning(Pid, Format, Args) ->
logger:warning(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_GLOBAL}).
error(Format) -> error(Format, []).
error(Format, Args) -> error(self(), Format, Args).
error(Pid, Format, Args) ->
logger:error(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_GLOBAL}).
critical(Format) -> critical(Format, []).
critical(Format, Args) -> critical(self(), Format, Args).
critical(Pid, Format, Args) ->
logger:critical(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_GLOBAL}).
alert(Format) -> alert(Format, []).
alert(Format, Args) -> alert(self(), Format, Args).
alert(Pid, Format, Args) ->
logger:alert(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_GLOBAL}).
emergency(Format) -> emergency(Format, []).
emergency(Format, Args) -> emergency(self(), Format, Args).
emergency(Pid, Format, Args) ->
logger:emergency(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_GLOBAL}).
none(_Format) -> ok.
none(_Format, _Args) -> ok.
none(_Pid, _Format, _Args) -> ok.

View File

@ -13,6 +13,7 @@
-include("rabbit_misc.hrl"). -include("rabbit_misc.hrl").
-include_lib("kernel/include/file.hrl"). -include_lib("kernel/include/file.hrl").
-include_lib("kernel/include/logger.hrl").
-ifdef(TEST). -ifdef(TEST).
-export([decompose_pid/1, compose_pid/4]). -export([decompose_pid/1, compose_pid/4]).
@ -1284,7 +1285,7 @@ safe_ets_update_counter(Tab, Key, UpdateOp) ->
try try
ets:update_counter(Tab, Key, UpdateOp) ets:update_counter(Tab, Key, UpdateOp)
catch error:badarg:E -> catch error:badarg:E ->
rabbit_log:debug("error updating ets counter ~p in table ~p: ~p", [Key, Tab, E]), ?LOG_DEBUG("error updating ets counter ~p in table ~p: ~p", [Key, Tab, E]),
ok ok
end. end.
@ -1354,7 +1355,7 @@ safe_ets_update_counter(Tab, Key, UpdateOp, OnSuccess, OnFailure) ->
try try
OnSuccess(ets:update_counter(Tab, Key, UpdateOp)) OnSuccess(ets:update_counter(Tab, Key, UpdateOp))
catch error:badarg:E -> catch error:badarg:E ->
rabbit_log:debug("error updating ets counter ~p in table ~p: ~p", [Key, Tab, E]), ?LOG_DEBUG("error updating ets counter ~p in table ~p: ~p", [Key, Tab, E]),
OnFailure() OnFailure()
end. end.
@ -1373,7 +1374,7 @@ safe_ets_update_element(Tab, Key, ElementSpec) ->
try try
ets:update_element(Tab, Key, ElementSpec) ets:update_element(Tab, Key, ElementSpec)
catch error:badarg:E -> catch error:badarg:E ->
rabbit_log:debug("error updating ets element ~p in table ~p: ~p", [Key, Tab, E]), ?LOG_DEBUG("error updating ets element ~p in table ~p: ~p", [Key, Tab, E]),
false false
end. end.
@ -1410,7 +1411,7 @@ safe_ets_update_element(Tab, Key, ElementSpec, OnSuccess, OnFailure) ->
try try
OnSuccess(ets:update_element(Tab, Key, ElementSpec)) OnSuccess(ets:update_element(Tab, Key, ElementSpec))
catch error:badarg:E -> catch error:badarg:E ->
rabbit_log:debug("error updating ets element ~p in table ~p: ~p", [Key, Tab, E]), ?LOG_DEBUG("error updating ets element ~p in table ~p: ~p", [Key, Tab, E]),
OnFailure(), OnFailure(),
false false
end. end.

View File

@ -14,6 +14,7 @@
-define(ERROR_LOGGER_HANDLER, rabbit_error_logger_handler). -define(ERROR_LOGGER_HANDLER, rabbit_error_logger_handler).
-include_lib("kernel/include/inet.hrl"). -include_lib("kernel/include/inet.hrl").
-include_lib("kernel/include/logger.hrl").
%% %%
%% API %% API
@ -51,7 +52,7 @@ names(Hostname) ->
names(Hostname, 0) -> names(Hostname, 0) ->
epmd_names(Hostname); epmd_names(Hostname);
names(Hostname, RetriesLeft) -> names(Hostname, RetriesLeft) ->
rabbit_log:debug("Getting epmd names for hostname '~ts', ~b retries left", ?LOG_DEBUG("Getting epmd names for hostname '~ts', ~b retries left",
[Hostname, RetriesLeft]), [Hostname, RetriesLeft]),
case catch epmd_names(Hostname) of case catch epmd_names(Hostname) of
{ok, R } -> {ok, R}; {ok, R } -> {ok, R};
@ -131,7 +132,7 @@ port_shutdown_loop(Port) ->
{Port, closed} -> ok; {Port, closed} -> ok;
{Port, {data, _}} -> port_shutdown_loop(Port); {Port, {data, _}} -> port_shutdown_loop(Port);
{'EXIT', Port, Reason} -> {'EXIT', Port, Reason} ->
rabbit_log:error("Failed to start a one-off Erlang VM to keep epmd alive: ~tp", [Reason]) ?LOG_ERROR("Failed to start a one-off Erlang VM to keep epmd alive: ~tp", [Reason])
after 15000 -> after 15000 ->
%% ensure the port is closed %% ensure the port is closed
Port ! {self(), close}, Port ! {self(), close},

View File

@ -7,6 +7,9 @@
-module(rabbit_ssl_options). -module(rabbit_ssl_options).
-include_lib("kernel/include/logger.hrl").
-export([ -export([
fix/1, fix/1,
fix_client/1, fix_client/1,
@ -86,7 +89,7 @@ make_verify_fun(Module, Function, InitialUserState) ->
Module:module_info() Module:module_info()
catch catch
_:Exception -> _:Exception ->
rabbit_log:error("TLS verify_fun: module ~ts missing: ~tp", ?LOG_ERROR("TLS verify_fun: module ~ts missing: ~tp",
[Module, Exception]), [Module, Exception]),
throw({error, {invalid_verify_fun, missing_module}}) throw({error, {invalid_verify_fun, missing_module}})
end, end,
@ -109,7 +112,7 @@ make_verify_fun(Module, Function, InitialUserState) ->
Module:Function(Args) Module:Function(Args)
end; end;
_ -> _ ->
rabbit_log:error("TLS verify_fun: no ~ts:~ts/3 exported", ?LOG_ERROR("TLS verify_fun: no ~ts:~ts/3 exported",
[Module, Function]), [Module, Function]),
throw({error, {invalid_verify_fun, function_not_exported}}) throw({error, {invalid_verify_fun, function_not_exported}})
end. end.

View File

@ -27,6 +27,7 @@
%% When a socket write fails, writer will exit. %% When a socket write fails, writer will exit.
-include("rabbit.hrl"). -include("rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
-export([start/6, start_link/6, start/7, start_link/7, start/8, start_link/8]). -export([start/6, start_link/6, start/7, start_link/7, start/8, start_link/8]).
-export([init/1, -export([init/1,
@ -264,10 +265,10 @@ handle_message(emit_stats, State = #wstate{reader = ReaderPid}) ->
handle_message(ok, State) -> handle_message(ok, State) ->
State; State;
handle_message({_Ref, ok} = Msg, State) -> handle_message({_Ref, ok} = Msg, State) ->
rabbit_log:warning("AMQP 0-9-1 channel writer has received a message it does not support: ~p", [Msg]), ?LOG_WARNING("AMQP 0-9-1 channel writer has received a message it does not support: ~p", [Msg]),
State; State;
handle_message({ok, _Ref} = Msg, State) -> handle_message({ok, _Ref} = Msg, State) ->
rabbit_log:warning("AMQP 0-9-1 channel writer has received a message it does not support: ~p", [Msg]), ?LOG_WARNING("AMQP 0-9-1 channel writer has received a message it does not support: ~p", [Msg]),
State; State;
handle_message(Message, _State) -> handle_message(Message, _State) ->
exit({writer, message_not_understood, Message}). exit({writer, message_not_understood, Message}).

View File

@ -7,6 +7,9 @@
-module(worker_pool_sup). -module(worker_pool_sup).
-include_lib("kernel/include/logger.hrl").
-behaviour(supervisor). -behaviour(supervisor).
-export([start_link/0, start_link/1, start_link/2]). -export([start_link/0, start_link/1, start_link/2]).
@ -29,11 +32,11 @@ start_link() ->
start_link(Size). start_link(Size).
start_link(PoolSize) -> start_link(PoolSize) ->
rabbit_log:info("Will use ~tp processes for default worker pool", [PoolSize]), ?LOG_INFO("Will use ~tp processes for default worker pool", [PoolSize]),
start_link(PoolSize, worker_pool:default_pool()). start_link(PoolSize, worker_pool:default_pool()).
start_link(PoolSize, PoolName) -> start_link(PoolSize, PoolName) ->
rabbit_log:info("Starting worker pool '~tp' with ~tp processes in it", [PoolName, PoolSize]), ?LOG_INFO("Starting worker pool '~tp' with ~tp processes in it", [PoolName, PoolSize]),
SupName = list_to_atom(atom_to_list(PoolName) ++ "_sup"), SupName = list_to_atom(atom_to_list(PoolName) ++ "_sup"),
supervisor:start_link({local, SupName}, ?MODULE, [PoolSize, PoolName]). supervisor:start_link({local, SupName}, ?MODULE, [PoolSize, PoolName]).

View File

@ -7,6 +7,7 @@
-module(rabbit_auth_backend_cache). -module(rabbit_auth_backend_cache).
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
-behaviour(rabbit_authn_backend). -behaviour(rabbit_authn_backend).
-behaviour(rabbit_authz_backend). -behaviour(rabbit_authz_backend).
@ -68,13 +69,13 @@ expiry_timestamp(_) -> never.
clear_cache_cluster_wide() -> clear_cache_cluster_wide() ->
Nodes = rabbit_nodes:list_running(), Nodes = rabbit_nodes:list_running(),
rabbit_log:warning("Clearing auth_backend_cache in all nodes : ~p", [Nodes]), ?LOG_WARNING("Clearing auth_backend_cache in all nodes : ~p", [Nodes]),
rabbit_misc:append_rpc_all_nodes(Nodes, ?MODULE, clear_cache, []). rabbit_misc:append_rpc_all_nodes(Nodes, ?MODULE, clear_cache, []).
clear_cache() -> clear_cache() ->
{ok, AuthCache} = application:get_env(rabbitmq_auth_backend_cache, {ok, AuthCache} = application:get_env(rabbitmq_auth_backend_cache,
cache_module), cache_module),
rabbit_log:warning("Clearing auth_backend_cache"), ?LOG_WARNING("Clearing auth_backend_cache"),
AuthCache:clear(). AuthCache:clear().
with_cache(BackendType, {F, A}, Fun) -> with_cache(BackendType, {F, A}, Fun) ->

View File

@ -8,6 +8,7 @@
-module(rabbit_auth_backend_http). -module(rabbit_auth_backend_http).
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
-behaviour(rabbit_authn_backend). -behaviour(rabbit_authn_backend).
-behaviour(rabbit_authz_backend). -behaviour(rabbit_authz_backend).
@ -180,10 +181,10 @@ do_http_req(Path0, Query) ->
Request = case rabbit_data_coercion:to_atom(Method) of Request = case rabbit_data_coercion:to_atom(Method) of
get -> get ->
Path = Path0 ++ "?" ++ Query, Path = Path0 ++ "?" ++ Query,
rabbit_log:debug("auth_backend_http: GET ~ts", [Path]), ?LOG_DEBUG("auth_backend_http: GET ~ts", [Path]),
{Path, [{"Host", HostHdr}]}; {Path, [{"Host", HostHdr}]};
post -> post ->
rabbit_log:debug("auth_backend_http: POST ~ts", [Path0]), ?LOG_DEBUG("auth_backend_http: POST ~ts", [Path0]),
{Path0, [{"Host", HostHdr}], "application/x-www-form-urlencoded", Query} {Path0, [{"Host", HostHdr}], "application/x-www-form-urlencoded", Query}
end, end,
RequestTimeout = RequestTimeout =
@ -196,12 +197,12 @@ do_http_req(Path0, Query) ->
{ok, Val2} -> Val2; {ok, Val2} -> Val2;
_ -> RequestTimeout _ -> RequestTimeout
end, end,
rabbit_log:debug("auth_backend_http: request timeout: ~tp, connection timeout: ~tp", [RequestTimeout, ConnectionTimeout]), ?LOG_DEBUG("auth_backend_http: request timeout: ~tp, connection timeout: ~tp", [RequestTimeout, ConnectionTimeout]),
HttpOpts = [{timeout, RequestTimeout}, HttpOpts = [{timeout, RequestTimeout},
{connect_timeout, ConnectionTimeout}] ++ ssl_options(), {connect_timeout, ConnectionTimeout}] ++ ssl_options(),
case httpc:request(Method, Request, HttpOpts, []) of case httpc:request(Method, Request, HttpOpts, []) of
{ok, {{_HTTP, Code, _}, _Headers, Body}} -> {ok, {{_HTTP, Code, _}, _Headers, Body}} ->
rabbit_log:debug("auth_backend_http: response code is ~tp, body: ~tp", [Code, Body]), ?LOG_DEBUG("auth_backend_http: response code is ~tp, body: ~tp", [Code, Body]),
case lists:member(Code, ?SUCCESSFUL_RESPONSE_CODES) of case lists:member(Code, ?SUCCESSFUL_RESPONSE_CODES) of
true -> parse_resp(Body); true -> parse_resp(Body);
false -> {error, {Code, Body}} false -> {error, {Code, Body}}
@ -216,7 +217,7 @@ ssl_options() ->
Opts1 = [{ssl, rabbit_ssl_options:fix_client(Opts0)}], Opts1 = [{ssl, rabbit_ssl_options:fix_client(Opts0)}],
case application:get_env(rabbitmq_auth_backend_http, ssl_hostname_verification) of case application:get_env(rabbitmq_auth_backend_http, ssl_hostname_verification) of
{ok, wildcard} -> {ok, wildcard} ->
rabbit_log:debug("Enabling wildcard-aware hostname verification for HTTP client connections"), ?LOG_DEBUG("Enabling wildcard-aware hostname verification for HTTP client connections"),
%% Needed for HTTPS connections that connect to servers that use wildcard certificates. %% Needed for HTTPS connections that connect to servers that use wildcard certificates.
%% See https://erlang.org/doc/man/public_key.html#pkix_verify_hostname_match_fun-1. %% See https://erlang.org/doc/man/public_key.html#pkix_verify_hostname_match_fun-1.
[{customize_hostname_check, [{match_fun, public_key:pkix_verify_hostname_match_fun(https)}]} | Opts1]; [{customize_hostname_check, [{match_fun, public_key:pkix_verify_hostname_match_fun(https)}]} | Opts1];

View File

@ -9,6 +9,7 @@
-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl").
-include("oauth2.hrl"). -include("oauth2.hrl").
-include_lib("kernel/include/logger.hrl").
-behaviour(rabbit_authn_backend). -behaviour(rabbit_authn_backend).
-behaviour(rabbit_authz_backend). -behaviour(rabbit_authz_backend).
@ -63,7 +64,7 @@ description() ->
user_login_authentication(Username, AuthProps) -> user_login_authentication(Username, AuthProps) ->
case authenticate(Username, AuthProps) of case authenticate(Username, AuthProps) of
{refused, Msg, Args} = AuthResult -> {refused, Msg, Args} = AuthResult ->
rabbit_log:debug(Msg, Args), ?LOG_DEBUG(Msg, Args),
AuthResult; AuthResult;
_ = AuthResult -> _ = AuthResult ->
AuthResult AuthResult
@ -179,7 +180,7 @@ with_decoded_token(DecodedToken, Fun) ->
case validate_token_expiry(DecodedToken) of case validate_token_expiry(DecodedToken) of
ok -> Fun(DecodedToken); ok -> Fun(DecodedToken);
{error, Msg} = Err -> {error, Msg} = Err ->
rabbit_log:error(Msg), ?LOG_ERROR(Msg),
Err Err
end. end.
@ -418,7 +419,7 @@ username_from(PreferredUsernameClaims, DecodedToken) ->
[ _One ] -> _One; [ _One ] -> _One;
[ _One | _ ] -> _One [ _One | _ ] -> _One
end, end,
rabbit_log:debug("Computing username from client's JWT token: ~ts -> ~ts ", ?LOG_DEBUG("Computing username from client's JWT token: ~ts -> ~ts ",
[lists:flatten(io_lib:format("~p",[ResolvedUsernameClaims])), Username]), [lists:flatten(io_lib:format("~p",[ResolvedUsernameClaims])), Username]),
Username. Username.

View File

@ -8,6 +8,7 @@
-module(rabbit_oauth2_provider). -module(rabbit_oauth2_provider).
-include("oauth2.hrl"). -include("oauth2.hrl").
-include_lib("kernel/include/logger.hrl").
-export([ -export([
get_internal_oauth_provider/0, get_internal_oauth_provider/1, get_internal_oauth_provider/0, get_internal_oauth_provider/1,
@ -101,7 +102,7 @@ do_replace_signing_keys(SigningKeys, root) ->
proplists:get_value(signing_keys, KeyConfig1, #{}), proplists:get_value(signing_keys, KeyConfig1, #{}),
SigningKeys)} | KeyConfig1], SigningKeys)} | KeyConfig1],
set_env(key_config, KeyConfig2), set_env(key_config, KeyConfig2),
rabbit_log:debug("Replacing signing keys for key_config with ~p keys", ?LOG_DEBUG("Replacing signing keys for key_config with ~p keys",
[maps:size(SigningKeys)]), [maps:size(SigningKeys)]),
SigningKeys; SigningKeys;
@ -115,7 +116,7 @@ do_replace_signing_keys(SigningKeys, OauthProviderId) ->
OauthProviders = maps:put(OauthProviderId, OauthProvider, OauthProviders0), OauthProviders = maps:put(OauthProviderId, OauthProvider, OauthProviders0),
set_env(oauth_providers, OauthProviders), set_env(oauth_providers, OauthProviders),
rabbit_log:debug("Replacing signing keys for ~p -> ~p with ~p keys", ?LOG_DEBUG("Replacing signing keys for ~p -> ~p with ~p keys",
[OauthProviderId, OauthProvider, maps:size(SigningKeys)]), [OauthProviderId, OauthProvider, maps:size(SigningKeys)]),
SigningKeys. SigningKeys.

View File

@ -16,6 +16,7 @@
-include("oauth2.hrl"). -include("oauth2.hrl").
-include_lib("jose/include/jose_jwk.hrl"). -include_lib("jose/include/jose_jwk.hrl").
-include_lib("kernel/include/logger.hrl").
-import(rabbit_data_coercion, [ -import(rabbit_data_coercion, [
to_map/1]). to_map/1]).
@ -44,7 +45,7 @@ add_signing_key(KeyId, Type, Value) ->
-spec update_jwks_signing_keys(oauth_provider()) -> ok | {error, term()}. -spec update_jwks_signing_keys(oauth_provider()) -> ok | {error, term()}.
update_jwks_signing_keys(#oauth_provider{id = Id, jwks_uri = JwksUrl, update_jwks_signing_keys(#oauth_provider{id = Id, jwks_uri = JwksUrl,
ssl_options = SslOptions}) -> ssl_options = SslOptions}) ->
rabbit_log:debug("Downloading signing keys from ~tp (TLS options: ~p)", ?LOG_DEBUG("Downloading signing keys from ~tp (TLS options: ~p)",
[JwksUrl, format_ssl_options(SslOptions)]), [JwksUrl, format_ssl_options(SslOptions)]),
case uaa_jwks:get(JwksUrl, SslOptions) of case uaa_jwks:get(JwksUrl, SslOptions) of
{ok, {_, _, JwksBody}} -> {ok, {_, _, JwksBody}} ->
@ -52,13 +53,13 @@ update_jwks_signing_keys(#oauth_provider{id = Id, jwks_uri = JwksUrl,
jose:decode(erlang:iolist_to_binary(JwksBody)), []), jose:decode(erlang:iolist_to_binary(JwksBody)), []),
Keys = maps:from_list(lists:map(fun(Key) -> Keys = maps:from_list(lists:map(fun(Key) ->
{maps:get(<<"kid">>, Key, undefined), {json, Key}} end, KeyList)), {maps:get(<<"kid">>, Key, undefined), {json, Key}} end, KeyList)),
rabbit_log:debug("Downloaded ~p signing keys", [maps:size(Keys)]), ?LOG_DEBUG("Downloaded ~p signing keys", [maps:size(Keys)]),
case replace_signing_keys(Keys, Id) of case replace_signing_keys(Keys, Id) of
{error, _} = Err -> Err; {error, _} = Err -> Err;
_ -> ok _ -> ok
end; end;
{error, _} = Err -> {error, _} = Err ->
rabbit_log:error("Failed to download signing keys: ~tp", [Err]), ?LOG_ERROR("Failed to download signing keys: ~tp", [Err]),
Err Err
end. end.
@ -66,7 +67,7 @@ update_jwks_signing_keys(#oauth_provider{id = Id, jwks_uri = JwksUrl,
-> {boolean(), map()} | {error, term()}. -> {boolean(), map()} | {error, term()}.
decode_and_verify(Token, ResourceServer, InternalOAuthProvider) -> decode_and_verify(Token, ResourceServer, InternalOAuthProvider) ->
OAuthProviderId = InternalOAuthProvider#internal_oauth_provider.id, OAuthProviderId = InternalOAuthProvider#internal_oauth_provider.id,
rabbit_log:debug("Decoding token for resource_server: ~p using oauth_provider_id: ~p", ?LOG_DEBUG("Decoding token for resource_server: ~p using oauth_provider_id: ~p",
[ResourceServer#resource_server.id, [ResourceServer#resource_server.id,
format_oauth_provider_id(OAuthProviderId)]), format_oauth_provider_id(OAuthProviderId)]),
Result = case uaa_jwt_jwt:get_key_id(Token) of Result = case uaa_jwt_jwt:get_key_id(Token) of
@ -81,7 +82,7 @@ decode_and_verify(Token, ResourceServer, InternalOAuthProvider) ->
case get_jwk(KeyId, InternalOAuthProvider) of case get_jwk(KeyId, InternalOAuthProvider) of
{ok, JWK} -> {ok, JWK} ->
Algorithms = InternalOAuthProvider#internal_oauth_provider.algorithms, Algorithms = InternalOAuthProvider#internal_oauth_provider.algorithms,
rabbit_log:debug("Verifying signature using signing_key_id : '~tp' and algorithms: ~p", ?LOG_DEBUG("Verifying signature using signing_key_id : '~tp' and algorithms: ~p",
[KeyId, Algorithms]), [KeyId, Algorithms]),
uaa_jwt_jwt:decode_and_verify(Algorithms, JWK, Token); uaa_jwt_jwt:decode_and_verify(Algorithms, JWK, Token);
{error, _} = Err3 -> {error, _} = Err3 ->
@ -118,7 +119,7 @@ get_jwk(KeyId, InternalOAuthProvider, AllowUpdateJwks) ->
undefined -> undefined ->
case AllowUpdateJwks of case AllowUpdateJwks of
true -> true ->
rabbit_log:debug("Signing key '~tp' not found. Downloading it... ", [KeyId]), ?LOG_DEBUG("Signing key '~tp' not found. Downloading it... ", [KeyId]),
case get_oauth_provider(OAuthProviderId, [jwks_uri]) of case get_oauth_provider(OAuthProviderId, [jwks_uri]) of
{ok, OAuthProvider} -> {ok, OAuthProvider} ->
case update_jwks_signing_keys(OAuthProvider) of case update_jwks_signing_keys(OAuthProvider) of
@ -130,15 +131,15 @@ get_jwk(KeyId, InternalOAuthProvider, AllowUpdateJwks) ->
Err Err
end; end;
{error, _} = Error -> {error, _} = Error ->
rabbit_log:debug("Unable to download signing keys due to ~p", [Error]), ?LOG_DEBUG("Unable to download signing keys due to ~p", [Error]),
Error Error
end; end;
false -> false ->
rabbit_log:debug("Signing key '~tp' not found. Downloading is not allowed", [KeyId]), ?LOG_DEBUG("Signing key '~tp' not found. Downloading is not allowed", [KeyId]),
{error, key_not_found} {error, key_not_found}
end; end;
{Type, Value} -> {Type, Value} ->
rabbit_log:debug("Signing key ~p found", [KeyId]), ?LOG_DEBUG("Signing key ~p found", [KeyId]),
case Type of case Type of
json -> uaa_jwt_jwk:make_jwk(Value); json -> uaa_jwt_jwk:make_jwk(Value);
pem -> uaa_jwt_jwk:from_pem(Value); pem -> uaa_jwt_jwk:from_pem(Value);

View File

@ -7,6 +7,9 @@
-module(wildcard). -module(wildcard).
-include_lib("kernel/include/logger.hrl").
-export([match/2]). -export([match/2]).
-spec match(Subject :: binary(), Pattern :: binary()) -> boolean(). -spec match(Subject :: binary(), Pattern :: binary()) -> boolean().
@ -52,7 +55,7 @@ parse_pattern(Pattern) ->
Parts = binary:split(Pattern, <<"*">>, [global]), Parts = binary:split(Pattern, <<"*">>, [global]),
try lists:map(fun(Part) -> cow_qs:urldecode(Part) end, Parts) try lists:map(fun(Part) -> cow_qs:urldecode(Part) end, Parts)
catch Type:Error -> catch Type:Error ->
rabbit_log:warning("Invalid pattern ~tp : ~tp", ?LOG_WARNING("Invalid pattern ~tp : ~tp",
[Pattern, {Type, Error}]), [Pattern, {Type, Error}]),
invalid invalid
end. end.

View File

@ -13,6 +13,7 @@
-export([description/0, should_offer/1, init/1, handle_response/2]). -export([description/0, should_offer/1, init/1, handle_response/2]).
-include_lib("public_key/include/public_key.hrl"). -include_lib("public_key/include/public_key.hrl").
-include_lib("kernel/include/logger.hrl").
-rabbit_boot_step({?MODULE, -rabbit_boot_step({?MODULE,
[{description, "external TLS peer verification-based authentication mechanism"}, [{description, "external TLS peer verification-based authentication mechanism"},
@ -52,7 +53,7 @@ init(Sock) ->
not_found -> {refused, none, "no name found", []}; not_found -> {refused, none, "no name found", []};
Name -> Name ->
Val = rabbit_data_coercion:to_binary(Name), Val = rabbit_data_coercion:to_binary(Name),
rabbit_log:debug("auth mechanism TLS extracted username '~ts' from peer certificate", [Val]), ?LOG_DEBUG("auth mechanism TLS extracted username '~ts' from peer certificate", [Val]),
Val Val
end; end;
{error, no_peercert} -> {error, no_peercert} ->

Some files were not shown because too many files have changed in this diff Show More