[skip ci] Remove rabbit_log and switch to LOG_ macros
This commit is contained in:
parent
310e8123ec
commit
175ba70e8c
|
@ -17,12 +17,13 @@
|
|||
]).
|
||||
|
||||
-include("oauth2_client.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-spec get_access_token(oauth_provider(), access_token_request()) ->
|
||||
{ok, successful_access_token_response()} |
|
||||
{error, unsuccessful_access_token_response() | any()}.
|
||||
get_access_token(OAuthProvider, Request) ->
|
||||
rabbit_log:debug("get_access_token using OAuthProvider:~p and client_id:~p",
|
||||
?LOG_DEBUG("get_access_token using OAuthProvider:~p and client_id:~p",
|
||||
[OAuthProvider, Request#access_token_request.client_id]),
|
||||
URL = OAuthProvider#oauth_provider.token_endpoint,
|
||||
Header = [],
|
||||
|
@ -96,7 +97,7 @@ drop_trailing_path_separator(Path) when is_list(Path) ->
|
|||
-spec get_openid_configuration(DiscoveryEndpoint :: uri_string:uri_string(),
|
||||
ssl:tls_option() | []) -> {ok, openid_configuration()} | {error, term()}.
|
||||
get_openid_configuration(DiscoverEndpoint, TLSOptions) ->
|
||||
rabbit_log:debug("get_openid_configuration from ~p (~p)", [DiscoverEndpoint,
|
||||
?LOG_DEBUG("get_openid_configuration from ~p (~p)", [DiscoverEndpoint,
|
||||
format_ssl_options(TLSOptions)]),
|
||||
Options = [],
|
||||
Response = httpc:request(get, {DiscoverEndpoint, []}, TLSOptions, Options),
|
||||
|
@ -219,7 +220,7 @@ do_update_oauth_provider_endpoints_configuration(OAuthProvider) when
|
|||
undefined -> do_nothing;
|
||||
JwksUri -> set_env(jwks_uri, JwksUri)
|
||||
end,
|
||||
rabbit_log:debug("Updated oauth_provider details: ~p ",
|
||||
?LOG_DEBUG("Updated oauth_provider details: ~p ",
|
||||
[format_oauth_provider(OAuthProvider)]),
|
||||
OAuthProvider;
|
||||
|
||||
|
@ -230,7 +231,7 @@ do_update_oauth_provider_endpoints_configuration(OAuthProvider) ->
|
|||
ModifiedOAuthProviders = maps:put(OAuthProviderId,
|
||||
merge_oauth_provider(OAuthProvider, Proplist), OAuthProviders),
|
||||
set_env(oauth_providers, ModifiedOAuthProviders),
|
||||
rabbit_log:debug("Replaced oauth_providers "),
|
||||
?LOG_DEBUG("Replaced oauth_providers "),
|
||||
OAuthProvider.
|
||||
|
||||
use_global_locks_on_all_nodes() ->
|
||||
|
@ -271,7 +272,7 @@ get_oauth_provider(ListOfRequiredAttributes) ->
|
|||
case get_env(default_oauth_provider) of
|
||||
undefined -> get_root_oauth_provider(ListOfRequiredAttributes);
|
||||
DefaultOauthProviderId ->
|
||||
rabbit_log:debug("Using default_oauth_provider ~p",
|
||||
?LOG_DEBUG("Using default_oauth_provider ~p",
|
||||
[DefaultOauthProviderId]),
|
||||
get_oauth_provider(DefaultOauthProviderId, ListOfRequiredAttributes)
|
||||
end.
|
||||
|
@ -282,7 +283,7 @@ download_oauth_provider(OAuthProvider) ->
|
|||
case OAuthProvider#oauth_provider.discovery_endpoint of
|
||||
undefined -> {error, {missing_oauth_provider_attributes, [issuer]}};
|
||||
URL ->
|
||||
rabbit_log:debug("Downloading oauth_provider using ~p ", [URL]),
|
||||
?LOG_DEBUG("Downloading oauth_provider using ~p ", [URL]),
|
||||
case get_openid_configuration(URL, get_ssl_options_if_any(OAuthProvider)) of
|
||||
{ok, OpenIdConfiguration} ->
|
||||
{ok, update_oauth_provider_endpoints_configuration(
|
||||
|
@ -294,7 +295,7 @@ download_oauth_provider(OAuthProvider) ->
|
|||
ensure_oauth_provider_has_attributes(OAuthProvider, ListOfRequiredAttributes) ->
|
||||
case find_missing_attributes(OAuthProvider, ListOfRequiredAttributes) of
|
||||
[] ->
|
||||
rabbit_log:debug("Resolved oauth_provider ~p",
|
||||
?LOG_DEBUG("Resolved oauth_provider ~p",
|
||||
[format_oauth_provider(OAuthProvider)]),
|
||||
{ok, OAuthProvider};
|
||||
_ = Attrs ->
|
||||
|
@ -303,13 +304,13 @@ ensure_oauth_provider_has_attributes(OAuthProvider, ListOfRequiredAttributes) ->
|
|||
|
||||
get_root_oauth_provider(ListOfRequiredAttributes) ->
|
||||
OAuthProvider = lookup_root_oauth_provider(),
|
||||
rabbit_log:debug("Using root oauth_provider ~p",
|
||||
?LOG_DEBUG("Using root oauth_provider ~p",
|
||||
[format_oauth_provider(OAuthProvider)]),
|
||||
case find_missing_attributes(OAuthProvider, ListOfRequiredAttributes) of
|
||||
[] ->
|
||||
{ok, OAuthProvider};
|
||||
_ = MissingAttributes ->
|
||||
rabbit_log:debug("Looking up missing attributes ~p ...",
|
||||
?LOG_DEBUG("Looking up missing attributes ~p ...",
|
||||
[MissingAttributes]),
|
||||
case download_oauth_provider(OAuthProvider) of
|
||||
{ok, OAuthProvider2} ->
|
||||
|
@ -333,22 +334,22 @@ get_oauth_provider(OAuth2ProviderId, ListOfRequiredAttributes)
|
|||
|
||||
get_oauth_provider(OAuthProviderId, ListOfRequiredAttributes)
|
||||
when is_binary(OAuthProviderId) ->
|
||||
rabbit_log:debug("get_oauth_provider ~p with at least these attributes: ~p",
|
||||
?LOG_DEBUG("get_oauth_provider ~p with at least these attributes: ~p",
|
||||
[OAuthProviderId, ListOfRequiredAttributes]),
|
||||
case lookup_oauth_provider_config(OAuthProviderId) of
|
||||
{error, _} = Error0 ->
|
||||
rabbit_log:debug("Failed to find oauth_provider ~p configuration due to ~p",
|
||||
?LOG_DEBUG("Failed to find oauth_provider ~p configuration due to ~p",
|
||||
[OAuthProviderId, Error0]),
|
||||
Error0;
|
||||
Config ->
|
||||
rabbit_log:debug("Found oauth_provider configuration ~p", [Config]),
|
||||
?LOG_DEBUG("Found oauth_provider configuration ~p", [Config]),
|
||||
OAuthProvider = map_to_oauth_provider(Config),
|
||||
rabbit_log:debug("Resolved oauth_provider ~p", [format_oauth_provider(OAuthProvider)]),
|
||||
?LOG_DEBUG("Resolved oauth_provider ~p", [format_oauth_provider(OAuthProvider)]),
|
||||
case find_missing_attributes(OAuthProvider, ListOfRequiredAttributes) of
|
||||
[] ->
|
||||
{ok, OAuthProvider};
|
||||
_ = MissingAttributes ->
|
||||
rabbit_log:debug("OauthProvider has following missing attributes ~p", [MissingAttributes]),
|
||||
?LOG_DEBUG("OauthProvider has following missing attributes ~p", [MissingAttributes]),
|
||||
case download_oauth_provider(OAuthProvider) of
|
||||
{ok, OAuthProvider2} ->
|
||||
ensure_oauth_provider_has_attributes(OAuthProvider2,
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
-ifdef(TRACE_AMQP).
|
||||
-warning("AMQP tracing is enabled").
|
||||
-define(TRACE(Format, Args),
|
||||
rabbit_log:debug(
|
||||
?LOG_DEBUG(
|
||||
"~s:~s/~b ~b~n" ++ Format ++ "~n",
|
||||
[?MODULE, ?FUNCTION_NAME, ?FUNCTION_ARITY, ?LINE] ++ Args)).
|
||||
-else.
|
||||
|
|
|
@ -9,6 +9,8 @@
|
|||
|
||||
-module(code_server_cache).
|
||||
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-behaviour(gen_server).
|
||||
|
||||
%% API
|
||||
|
@ -70,7 +72,7 @@ handle_maybe_call_mfa(true, {Module, Function, Args, Default}, State) ->
|
|||
error:undef ->
|
||||
handle_maybe_call_mfa_error(Module, Default, State);
|
||||
Err:Reason ->
|
||||
rabbit_log:error("Calling ~tp:~tp failed: ~tp:~tp",
|
||||
?LOG_ERROR("Calling ~tp:~tp failed: ~tp:~tp",
|
||||
[Module, Function, Err, Reason]),
|
||||
handle_maybe_call_mfa_error(Module, Default, State)
|
||||
end.
|
||||
|
|
|
@ -7,6 +7,8 @@
|
|||
|
||||
-module(file_handle_cache).
|
||||
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
%% A File Handle Cache
|
||||
%%
|
||||
%% This extends a subset of the functionality of the Erlang file
|
||||
|
@ -1451,19 +1453,19 @@ update_counts(open, Pid, Delta,
|
|||
State = #fhc_state { open_count = OpenCount,
|
||||
clients = Clients }) ->
|
||||
safe_ets_update_counter(Clients, Pid, {#cstate.opened, Delta},
|
||||
fun() -> rabbit_log:warning("FHC: failed to update counter 'opened', client pid: ~p", [Pid]) end),
|
||||
fun() -> ?LOG_WARNING("FHC: failed to update counter 'opened', client pid: ~p", [Pid]) end),
|
||||
State #fhc_state { open_count = OpenCount + Delta};
|
||||
update_counts({obtain, file}, Pid, Delta,
|
||||
State = #fhc_state {obtain_count_file = ObtainCountF,
|
||||
clients = Clients }) ->
|
||||
safe_ets_update_counter(Clients, Pid, {#cstate.obtained_file, Delta},
|
||||
fun() -> rabbit_log:warning("FHC: failed to update counter 'obtained_file', client pid: ~p", [Pid]) end),
|
||||
fun() -> ?LOG_WARNING("FHC: failed to update counter 'obtained_file', client pid: ~p", [Pid]) end),
|
||||
State #fhc_state { obtain_count_file = ObtainCountF + Delta};
|
||||
update_counts({obtain, socket}, Pid, Delta,
|
||||
State = #fhc_state {obtain_count_socket = ObtainCountS,
|
||||
clients = Clients }) ->
|
||||
safe_ets_update_counter(Clients, Pid, {#cstate.obtained_socket, Delta},
|
||||
fun() -> rabbit_log:warning("FHC: failed to update counter 'obtained_socket', client pid: ~p", [Pid]) end),
|
||||
fun() -> ?LOG_WARNING("FHC: failed to update counter 'obtained_socket', client pid: ~p", [Pid]) end),
|
||||
State #fhc_state { obtain_count_socket = ObtainCountS + Delta};
|
||||
update_counts({reserve, file}, Pid, NewReservation,
|
||||
State = #fhc_state {reserve_count_file = ReserveCountF,
|
||||
|
@ -1471,7 +1473,7 @@ update_counts({reserve, file}, Pid, NewReservation,
|
|||
[#cstate{reserved_file = R}] = ets:lookup(Clients, Pid),
|
||||
Delta = NewReservation - R,
|
||||
safe_ets_update_counter(Clients, Pid, {#cstate.reserved_file, Delta},
|
||||
fun() -> rabbit_log:warning("FHC: failed to update counter 'reserved_file', client pid: ~p", [Pid]) end),
|
||||
fun() -> ?LOG_WARNING("FHC: failed to update counter 'reserved_file', client pid: ~p", [Pid]) end),
|
||||
State #fhc_state { reserve_count_file = ReserveCountF + Delta};
|
||||
update_counts({reserve, socket}, Pid, NewReservation,
|
||||
State = #fhc_state {reserve_count_socket = ReserveCountS,
|
||||
|
@ -1479,7 +1481,7 @@ update_counts({reserve, socket}, Pid, NewReservation,
|
|||
[#cstate{reserved_file = R}] = ets:lookup(Clients, Pid),
|
||||
Delta = NewReservation - R,
|
||||
safe_ets_update_counter(Clients, Pid, {#cstate.reserved_socket, Delta},
|
||||
fun() -> rabbit_log:warning("FHC: failed to update counter 'reserved_socket', client pid: ~p", [Pid]) end),
|
||||
fun() -> ?LOG_WARNING("FHC: failed to update counter 'reserved_socket', client pid: ~p", [Pid]) end),
|
||||
State #fhc_state { reserve_count_socket = ReserveCountS + Delta}.
|
||||
|
||||
maybe_reduce(State) ->
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include_lib("rabbit_common/include/rabbit_framing.hrl").
|
||||
-include("mc.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-export([
|
||||
%init/3,
|
||||
|
@ -267,7 +268,7 @@ update_x_death_header(Info, Headers) ->
|
|||
Headers, <<"x-death">>, array,
|
||||
[{table, rabbit_misc:sort_field_table(Info1)} | Others]);
|
||||
{<<"x-death">>, InvalidType, Header} ->
|
||||
rabbit_log:warning("Message has invalid x-death header (type: ~tp)."
|
||||
?LOG_WARNING("Message has invalid x-death header (type: ~tp)."
|
||||
" Resetting header ~tp",
|
||||
[InvalidType, Header]),
|
||||
%% if x-death is something other than an array (list)
|
||||
|
|
|
@ -7,6 +7,8 @@
|
|||
|
||||
-module(mirrored_supervisor).
|
||||
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
%% Mirrored Supervisor
|
||||
%% ===================
|
||||
%%
|
||||
|
@ -252,13 +254,13 @@ handle_call({init, Overall}, _From,
|
|||
LockId = mirrored_supervisor_locks:lock(Group),
|
||||
maybe_log_lock_acquisition_failure(LockId, Group),
|
||||
ok = pg:join(Group, Overall),
|
||||
rabbit_log:debug("Mirrored supervisor: initializing, overall supervisor ~tp joined group ~tp", [Overall, Group]),
|
||||
?LOG_DEBUG("Mirrored supervisor: initializing, overall supervisor ~tp joined group ~tp", [Overall, Group]),
|
||||
Rest = pg:get_members(Group) -- [Overall],
|
||||
Nodes = [node(M) || M <- Rest],
|
||||
rabbit_log:debug("Mirrored supervisor: known group ~tp members: ~tp on nodes ~tp", [Group, Rest, Nodes]),
|
||||
?LOG_DEBUG("Mirrored supervisor: known group ~tp members: ~tp on nodes ~tp", [Group, Rest, Nodes]),
|
||||
case Rest of
|
||||
[] ->
|
||||
rabbit_log:debug("Mirrored supervisor: no known peer members in group ~tp, will delete all child records for it", [Group]),
|
||||
?LOG_DEBUG("Mirrored supervisor: no known peer members in group ~tp, will delete all child records for it", [Group]),
|
||||
delete_all(Group);
|
||||
_ -> ok
|
||||
end,
|
||||
|
@ -282,18 +284,18 @@ handle_call({start_child, ChildSpec}, _From,
|
|||
group = Group}) ->
|
||||
LockId = mirrored_supervisor_locks:lock(Group),
|
||||
maybe_log_lock_acquisition_failure(LockId, Group),
|
||||
rabbit_log:debug("Mirrored supervisor: asked to consider starting a child, group: ~tp", [Group]),
|
||||
?LOG_DEBUG("Mirrored supervisor: asked to consider starting a child, group: ~tp", [Group]),
|
||||
Result = case maybe_start(Group, Overall, Delegate, ChildSpec) of
|
||||
already_in_store ->
|
||||
rabbit_log:debug("Mirrored supervisor: maybe_start for group ~tp,"
|
||||
?LOG_DEBUG("Mirrored supervisor: maybe_start for group ~tp,"
|
||||
" overall ~p returned 'record already present'", [Group, Overall]),
|
||||
{error, already_present};
|
||||
{already_in_store, Pid} ->
|
||||
rabbit_log:debug("Mirrored supervisor: maybe_start for group ~tp,"
|
||||
?LOG_DEBUG("Mirrored supervisor: maybe_start for group ~tp,"
|
||||
" overall ~p returned 'already running: ~tp'", [Group, Overall, Pid]),
|
||||
{error, {already_started, Pid}};
|
||||
Else ->
|
||||
rabbit_log:debug("Mirrored supervisor: maybe_start for group ~tp,"
|
||||
?LOG_DEBUG("Mirrored supervisor: maybe_start for group ~tp,"
|
||||
" overall ~tp returned ~tp", [Group, Overall, Else]),
|
||||
Else
|
||||
end,
|
||||
|
@ -377,19 +379,19 @@ tell_all_peers_to_die(Group, Reason) ->
|
|||
[cast(P, {die, Reason}) || P <- pg:get_members(Group) -- [self()]].
|
||||
|
||||
maybe_start(Group, Overall, Delegate, ChildSpec) ->
|
||||
rabbit_log:debug("Mirrored supervisor: asked to consider starting, group: ~tp",
|
||||
?LOG_DEBUG("Mirrored supervisor: asked to consider starting, group: ~tp",
|
||||
[Group]),
|
||||
try check_start(Group, Overall, Delegate, ChildSpec) of
|
||||
start ->
|
||||
rabbit_log:debug("Mirrored supervisor: check_start for group ~tp,"
|
||||
?LOG_DEBUG("Mirrored supervisor: check_start for group ~tp,"
|
||||
" overall ~tp returned 'do start'", [Group, Overall]),
|
||||
start(Delegate, ChildSpec);
|
||||
undefined ->
|
||||
rabbit_log:debug("Mirrored supervisor: check_start for group ~tp,"
|
||||
?LOG_DEBUG("Mirrored supervisor: check_start for group ~tp,"
|
||||
" overall ~tp returned 'undefined'", [Group, Overall]),
|
||||
already_in_store;
|
||||
Pid ->
|
||||
rabbit_log:debug("Mirrored supervisor: check_start for group ~tp,"
|
||||
?LOG_DEBUG("Mirrored supervisor: check_start for group ~tp,"
|
||||
" overall ~tp returned 'already running (~tp)'",
|
||||
[Group, Overall, Pid]),
|
||||
{already_in_store, Pid}
|
||||
|
@ -400,7 +402,7 @@ maybe_start(Group, Overall, Delegate, ChildSpec) ->
|
|||
|
||||
check_start(Group, Overall, Delegate, ChildSpec) ->
|
||||
Id = id(ChildSpec),
|
||||
rabbit_log:debug("Mirrored supervisor: check_start for group ~tp, id: ~tp, "
|
||||
?LOG_DEBUG("Mirrored supervisor: check_start for group ~tp, id: ~tp, "
|
||||
"overall: ~tp", [Group, Id, Overall]),
|
||||
case rabbit_db_msup:create_or_update(Group, Overall, Delegate, ChildSpec, Id) of
|
||||
Delegate0 when is_pid(Delegate0) ->
|
||||
|
@ -486,6 +488,6 @@ restore_child_order(ChildSpecs, ChildOrder) ->
|
|||
end, ChildSpecs).
|
||||
|
||||
maybe_log_lock_acquisition_failure(undefined = _LockId, Group) ->
|
||||
rabbit_log:warning("Mirrored supervisor: could not acquire lock for group ~ts", [Group]);
|
||||
?LOG_WARNING("Mirrored supervisor: could not acquire lock for group ~ts", [Group]);
|
||||
maybe_log_lock_acquisition_failure(_, _) ->
|
||||
ok.
|
||||
|
|
|
@ -1688,7 +1688,7 @@ maybe_warn_low_fd_limit() ->
|
|||
L when L > 1024 ->
|
||||
ok;
|
||||
L ->
|
||||
rabbit_log:warning("Available file handles: ~tp. "
|
||||
?LOG_WARNING("Available file handles: ~tp. "
|
||||
"Please consider increasing system limits", [L])
|
||||
end.
|
||||
|
||||
|
@ -1718,7 +1718,7 @@ persist_static_configuration() ->
|
|||
MoreCreditAfter =< InitialCredit ->
|
||||
{InitialCredit, MoreCreditAfter};
|
||||
Other ->
|
||||
rabbit_log:error("Refusing to boot due to an invalid value of 'rabbit.credit_flow_default_credit'"),
|
||||
?LOG_ERROR("Refusing to boot due to an invalid value of 'rabbit.credit_flow_default_credit'"),
|
||||
throw({error, {invalid_credit_flow_default_credit_value, Other}})
|
||||
end,
|
||||
ok = persistent_term:put(credit_flow_default_credit, CreditFlowDefaultCredit),
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
-module(rabbit_access_control).
|
||||
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-export([check_user_pass_login/2, check_user_login/2, check_user_login/3, check_user_loopback/2,
|
||||
check_vhost_access/4, check_resource_access/4, check_topic_access/4,
|
||||
|
@ -59,10 +60,10 @@ check_user_login(Username, AuthProps, Modules) ->
|
|||
%% it gives us
|
||||
case try_authenticate(Mod, Username, AuthProps) of
|
||||
{ok, ModNUser = #auth_user{username = Username2, impl = Impl}} ->
|
||||
rabbit_log:debug("User '~ts' authenticated successfully by backend ~ts", [Username2, Mod]),
|
||||
?LOG_DEBUG("User '~ts' authenticated successfully by backend ~ts", [Username2, Mod]),
|
||||
user(ModNUser, {ok, [{Mod, Impl}], []});
|
||||
Else ->
|
||||
rabbit_log:debug("User '~ts' failed authentication by backend ~ts", [Username, Mod]),
|
||||
?LOG_DEBUG("User '~ts' failed authentication by backend ~ts", [Username, Mod]),
|
||||
Else
|
||||
end;
|
||||
(_, {ok, User}) ->
|
||||
|
@ -72,7 +73,7 @@ check_user_login(Username, AuthProps, Modules) ->
|
|||
{refused, Username, "No modules checked '~ts'", [Username]}, Modules)
|
||||
catch
|
||||
Type:Error:Stacktrace ->
|
||||
rabbit_log:debug("User '~ts' authentication failed with ~ts:~tp:~n~tp", [Username, Type, Error, Stacktrace]),
|
||||
?LOG_DEBUG("User '~ts' authentication failed with ~ts:~tp:~n~tp", [Username, Type, Error, Stacktrace]),
|
||||
{refused, Username, "User '~ts' authentication failed with internal error. "
|
||||
"Enable debug logs to see the real error.", [Username]}
|
||||
|
||||
|
@ -85,7 +86,7 @@ try_authenticate_and_try_authorize(ModN, ModZs0, Username, AuthProps) ->
|
|||
end,
|
||||
case try_authenticate(ModN, Username, AuthProps) of
|
||||
{ok, ModNUser = #auth_user{username = Username2}} ->
|
||||
rabbit_log:debug("User '~ts' authenticated successfully by backend ~ts", [Username2, ModN]),
|
||||
?LOG_DEBUG("User '~ts' authenticated successfully by backend ~ts", [Username2, ModN]),
|
||||
user(ModNUser, try_authorize(ModZs, Username2, AuthProps));
|
||||
Else ->
|
||||
Else
|
||||
|
@ -227,7 +228,7 @@ check_access(Fun, Module, ErrStr, ErrArgs, ErrName) ->
|
|||
{error, E} ->
|
||||
FullErrStr = ErrStr ++ ", backend ~ts returned an error: ~tp",
|
||||
FullErrArgs = ErrArgs ++ [Module, E],
|
||||
rabbit_log:error(FullErrStr, FullErrArgs),
|
||||
?LOG_ERROR(FullErrStr, FullErrArgs),
|
||||
rabbit_misc:protocol_error(ErrName, FullErrStr, FullErrArgs)
|
||||
end.
|
||||
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
|
||||
-module(rabbit_alarm).
|
||||
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-behaviour(gen_event).
|
||||
|
||||
-export([start_link/0, start/0, stop/0, register/2, set_alarm/1,
|
||||
|
@ -239,7 +241,7 @@ handle_event({node_down, Node}, #alarms{alarmed_nodes = AN} = State) ->
|
|||
error -> []
|
||||
end,
|
||||
{ok, lists:foldr(fun(Source, AccState) ->
|
||||
rabbit_log:warning("~ts resource limit alarm cleared for dead node ~tp",
|
||||
?LOG_WARNING("~ts resource limit alarm cleared for dead node ~tp",
|
||||
[Source, Node]),
|
||||
maybe_alert(fun dict_unappend/3, Node, Source, false, AccState)
|
||||
end, State, AlarmsForDeadNode)};
|
||||
|
@ -291,7 +293,7 @@ maybe_alert(UpdateFun, Node, Source, WasAlertAdded,
|
|||
StillHasAlerts = lists:any(fun ({_Node, NodeAlerts}) -> lists:member(Source, NodeAlerts) end, dict:to_list(AN1)),
|
||||
case StillHasAlerts of
|
||||
true -> ok;
|
||||
false -> rabbit_log:warning("~ts resource limit alarm cleared across the cluster", [Source])
|
||||
false -> ?LOG_WARNING("~ts resource limit alarm cleared across the cluster", [Source])
|
||||
end,
|
||||
Alert = {WasAlertAdded, StillHasAlerts, Node},
|
||||
case node() of
|
||||
|
@ -327,7 +329,7 @@ internal_register(Pid, {M, F, A} = AlertMFA,
|
|||
State#alarms{alertees = NewAlertees}.
|
||||
|
||||
handle_set_resource_alarm(Source, Node, State) ->
|
||||
rabbit_log:warning(
|
||||
?LOG_WARNING(
|
||||
"~ts resource limit alarm set on node ~tp.~n~n"
|
||||
"**********************************************************~n"
|
||||
"*** Publishers will be blocked until this alarm clears ***~n"
|
||||
|
@ -336,26 +338,26 @@ handle_set_resource_alarm(Source, Node, State) ->
|
|||
{ok, maybe_alert(fun dict_append/3, Node, Source, true, State)}.
|
||||
|
||||
handle_set_alarm({file_descriptor_limit, []}, State) ->
|
||||
rabbit_log:warning(
|
||||
?LOG_WARNING(
|
||||
"file descriptor limit alarm set.~n~n"
|
||||
"********************************************************************~n"
|
||||
"*** New connections will not be accepted until this alarm clears ***~n"
|
||||
"********************************************************************~n"),
|
||||
{ok, State};
|
||||
handle_set_alarm(Alarm, State) ->
|
||||
rabbit_log:warning("alarm '~tp' set", [Alarm]),
|
||||
?LOG_WARNING("alarm '~tp' set", [Alarm]),
|
||||
{ok, State}.
|
||||
|
||||
handle_clear_resource_alarm(Source, Node, State) ->
|
||||
rabbit_log:warning("~ts resource limit alarm cleared on node ~tp",
|
||||
?LOG_WARNING("~ts resource limit alarm cleared on node ~tp",
|
||||
[Source, Node]),
|
||||
{ok, maybe_alert(fun dict_unappend/3, Node, Source, false, State)}.
|
||||
|
||||
handle_clear_alarm(file_descriptor_limit, State) ->
|
||||
rabbit_log:warning("file descriptor limit alarm cleared~n"),
|
||||
?LOG_WARNING("file descriptor limit alarm cleared~n"),
|
||||
{ok, State};
|
||||
handle_clear_alarm(Alarm, State) ->
|
||||
rabbit_log:warning("alarm '~tp' cleared", [Alarm]),
|
||||
?LOG_WARNING("alarm '~tp' cleared", [Alarm]),
|
||||
{ok, State}.
|
||||
|
||||
is_node_alarmed(Source, Node, #alarms{alarmed_nodes = AN}) ->
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
-feature(maybe_expr, enable).
|
||||
|
||||
-include_lib("amqp10_common/include/amqp10_filter.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-type parsed_expression() :: {ApplicationProperties :: boolean(),
|
||||
rabbit_amqp_sql_ast:ast()}.
|
||||
|
@ -293,36 +294,33 @@ sql_to_list(SQL) ->
|
|||
String when is_list(String) ->
|
||||
{ok, String};
|
||||
Error ->
|
||||
rabbit_log:warning("SQL expression ~p is not UTF-8 encoded: ~p",
|
||||
[SQL, Error]),
|
||||
?LOG_WARNING("JMS message selector ~p is not UTF-8 encoded: ~p",
|
||||
[JmsSelector, Error]),
|
||||
error
|
||||
end.
|
||||
|
||||
check_length(String) ->
|
||||
Len = length(String),
|
||||
case Len =< ?MAX_EXPRESSION_LENGTH of
|
||||
true ->
|
||||
ok;
|
||||
false ->
|
||||
rabbit_log:warning("SQL expression length ~b exceeds maximum length ~b",
|
||||
[Len, ?MAX_EXPRESSION_LENGTH]),
|
||||
error
|
||||
end.
|
||||
check_length(String)
|
||||
when length(String) > ?MAX_EXPRESSION_LENGTH ->
|
||||
?LOG_WARNING("JMS message selector length ~b exceeds maximum length ~b",
|
||||
[length(String), ?MAX_EXPRESSION_LENGTH]),
|
||||
error;
|
||||
check_length(_) ->
|
||||
ok.
|
||||
|
||||
tokenize(String, SQL) ->
|
||||
case rabbit_amqp_sql_lexer:string(String) of
|
||||
{ok, Tokens, _EndLocation} ->
|
||||
{ok, Tokens};
|
||||
{error, {_Line, _Mod, ErrDescriptor}, _Location} ->
|
||||
rabbit_log:warning("failed to scan SQL expression '~ts': ~tp",
|
||||
[SQL, ErrDescriptor]),
|
||||
?LOG_WARNING("failed to scan JMS message selector '~ts': ~tp",
|
||||
[JmsSelector, ErrDescriptor]),
|
||||
error
|
||||
end.
|
||||
|
||||
check_token_count(Tokens, SQL)
|
||||
when length(Tokens) > ?MAX_TOKENS ->
|
||||
rabbit_log:warning("SQL expression '~ts' with ~b tokens exceeds token limit ~b",
|
||||
[SQL, length(Tokens), ?MAX_TOKENS]),
|
||||
?LOG_WARNING("JMS message selector '~ts' with ~b tokens exceeds token limit ~b",
|
||||
[JmsSelector, length(Tokens), ?MAX_TOKENS]),
|
||||
error;
|
||||
check_token_count(_, _) ->
|
||||
ok.
|
||||
|
@ -330,8 +328,8 @@ check_token_count(_, _) ->
|
|||
parse(Tokens, SQL) ->
|
||||
case rabbit_amqp_sql_parser:parse(Tokens) of
|
||||
{error, Reason} ->
|
||||
rabbit_log:warning("failed to parse SQL expression '~ts': ~p",
|
||||
[SQL, Reason]),
|
||||
?LOG_WARNING("failed to parse JMS message selector '~ts': ~p",
|
||||
[JmsSelector, Reason]),
|
||||
error;
|
||||
Ok ->
|
||||
Ok
|
||||
|
@ -345,10 +343,15 @@ transform_ast(Ast0, SQL) ->
|
|||
end, Ast0) of
|
||||
Ast ->
|
||||
{ok, Ast}
|
||||
catch {invalid_pattern, Reason} ->
|
||||
rabbit_log:warning(
|
||||
"failed to parse LIKE pattern for SQL expression ~tp: ~tp",
|
||||
[SQL, Reason]),
|
||||
catch {unsupported_field, Name} ->
|
||||
?LOG_WARNING(
|
||||
"identifier ~ts in JMS message selector ~tp is unsupported",
|
||||
[Name, JmsSelector]),
|
||||
error;
|
||||
{invalid_pattern, Reason} ->
|
||||
?LOG_WARNING(
|
||||
"failed to parse LIKE pattern for JMS message selector ~tp: ~tp",
|
||||
[JmsSelector, Reason]),
|
||||
error
|
||||
end.
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
-include("rabbit_amqp.hrl").
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-export([handle_request/5]).
|
||||
|
||||
|
@ -49,7 +50,7 @@ handle_request(Request, Vhost, User, ConnectionPid, PermCaches0) ->
|
|||
ConnectionPid,
|
||||
PermCaches0)
|
||||
catch throw:{?MODULE, StatusCode0, Explanation} ->
|
||||
rabbit_log:warning("request ~ts ~ts failed: ~ts",
|
||||
?LOG_WARNING("request ~ts ~ts failed: ~ts",
|
||||
[HttpMethod, HttpRequestTarget, Explanation]),
|
||||
{StatusCode0, {utf8, Explanation}, PermCaches0}
|
||||
end,
|
||||
|
|
|
@ -82,6 +82,7 @@
|
|||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include_lib("stdlib/include/qlc.hrl").
|
||||
-include("amqqueue.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-define(INTEGER_ARG_TYPES, [byte, short, signedint, long,
|
||||
unsignedbyte, unsignedshort, unsignedint]).
|
||||
|
@ -423,7 +424,7 @@ rebalance(Type, VhostSpec, QueueSpec) ->
|
|||
%% TODO: classic queues do not support rebalancing, it looks like they are simply
|
||||
%% filtered out with is_replicable(Q). Maybe error instead?
|
||||
maybe_rebalance({true, Id}, Type, VhostSpec, QueueSpec) ->
|
||||
rabbit_log:info("Starting queue rebalance operation: '~ts' for vhosts matching '~ts' and queues matching '~ts'",
|
||||
?LOG_INFO("Starting queue rebalance operation: '~ts' for vhosts matching '~ts' and queues matching '~ts'",
|
||||
[Type, VhostSpec, QueueSpec]),
|
||||
Running = rabbit_maintenance:filter_out_drained_nodes_consistent_read(rabbit_nodes:list_running()),
|
||||
NumRunning = length(Running),
|
||||
|
@ -445,10 +446,10 @@ maybe_rebalance({true, Id}, Type, VhostSpec, QueueSpec) ->
|
|||
MaxQueuesDesired = (NumToRebalance div NumRunning) + Rem,
|
||||
Result = iterative_rebalance(ByNode, MaxQueuesDesired),
|
||||
global:del_lock(Id),
|
||||
rabbit_log:info("Finished queue rebalance operation"),
|
||||
?LOG_INFO("Finished queue rebalance operation"),
|
||||
Result;
|
||||
maybe_rebalance(false, _Type, _VhostSpec, _QueueSpec) ->
|
||||
rabbit_log:warning("Queue rebalance operation is in progress, please wait."),
|
||||
?LOG_WARNING("Queue rebalance operation is in progress, please wait."),
|
||||
{error, rebalance_in_progress}.
|
||||
|
||||
%% Stream queues don't yet support rebalance
|
||||
|
@ -466,7 +467,7 @@ filter_per_type_for_rebalance(TypeModule, Q) ->
|
|||
rebalance_module(Q) ->
|
||||
case rabbit_queue_type:rebalance_module(Q) of
|
||||
undefined ->
|
||||
rabbit_log:error("Undefined rebalance module for queue type: ~s", [amqqueue:get_type(Q)]),
|
||||
?LOG_ERROR("Undefined rebalance module for queue type: ~s", [amqqueue:get_type(Q)]),
|
||||
{error, not_supported};
|
||||
RBModule ->
|
||||
RBModule
|
||||
|
@ -484,7 +485,7 @@ is_match(Subj, RegEx) ->
|
|||
iterative_rebalance(ByNode, MaxQueuesDesired) ->
|
||||
case maybe_migrate(ByNode, MaxQueuesDesired) of
|
||||
{ok, Summary} ->
|
||||
rabbit_log:info("All queue leaders are balanced"),
|
||||
?LOG_INFO("All queue leaders are balanced"),
|
||||
{ok, Summary};
|
||||
{migrated, Other} ->
|
||||
iterative_rebalance(Other, MaxQueuesDesired);
|
||||
|
@ -521,23 +522,23 @@ maybe_migrate(ByNode, MaxQueuesDesired, [N | Nodes]) ->
|
|||
{not_migrated, update_not_migrated_queue(N, Queue, Queues, ByNode)};
|
||||
_ ->
|
||||
[{Length, Destination} | _] = sort_by_number_of_queues(Candidates, ByNode),
|
||||
rabbit_log:info("Migrating queue ~tp from node ~tp with ~tp queues to node ~tp with ~tp queues",
|
||||
?LOG_INFO("Migrating queue ~tp from node ~tp with ~tp queues to node ~tp with ~tp queues",
|
||||
[Name, N, length(All), Destination, Length]),
|
||||
case Module:transfer_leadership(Q, Destination) of
|
||||
{migrated, NewNode} ->
|
||||
rabbit_log:info("Queue ~tp migrated to ~tp", [Name, NewNode]),
|
||||
?LOG_INFO("Queue ~tp migrated to ~tp", [Name, NewNode]),
|
||||
{migrated, update_migrated_queue(NewNode, N, Queue, Queues, ByNode)};
|
||||
{not_migrated, Reason} ->
|
||||
rabbit_log:warning("Error migrating queue ~tp: ~tp", [Name, Reason]),
|
||||
?LOG_WARNING("Error migrating queue ~tp: ~tp", [Name, Reason]),
|
||||
{not_migrated, update_not_migrated_queue(N, Queue, Queues, ByNode)}
|
||||
end
|
||||
end;
|
||||
[{_, _, true} | _] = All when length(All) > MaxQueuesDesired ->
|
||||
rabbit_log:warning("Node ~tp contains ~tp queues, but all have already migrated. "
|
||||
?LOG_WARNING("Node ~tp contains ~tp queues, but all have already migrated. "
|
||||
"Do nothing", [N, length(All)]),
|
||||
maybe_migrate(ByNode, MaxQueuesDesired, Nodes);
|
||||
All ->
|
||||
rabbit_log:debug("Node ~tp only contains ~tp queues, do nothing",
|
||||
?LOG_DEBUG("Node ~tp only contains ~tp queues, do nothing",
|
||||
[N, length(All)]),
|
||||
maybe_migrate(ByNode, MaxQueuesDesired, Nodes)
|
||||
end.
|
||||
|
@ -625,7 +626,7 @@ retry_wait(Q, F, E, RetriesLeft) ->
|
|||
%% The old check would have crashed here,
|
||||
%% instead, log it and run the exit fun. absent & alive is weird,
|
||||
%% but better than crashing with badmatch,true
|
||||
rabbit_log:debug("Unexpected alive queue process ~tp", [QPid]),
|
||||
?LOG_DEBUG("Unexpected alive queue process ~tp", [QPid]),
|
||||
E({absent, Q, alive});
|
||||
false ->
|
||||
ok % Expected result
|
||||
|
@ -1894,7 +1895,7 @@ internal_delete(Queue, ActingUser, Reason) ->
|
|||
%% TODO this is used by `rabbit_mnesia:remove_node_if_mnesia_running`
|
||||
%% Does it make any sense once mnesia is not used/removed?
|
||||
forget_all_durable(Node) ->
|
||||
rabbit_log:info("Will remove all classic queues from node ~ts. The node is likely being removed from the cluster.", [Node]),
|
||||
?LOG_INFO("Will remove all classic queues from node ~ts. The node is likely being removed from the cluster.", [Node]),
|
||||
UpdateFun = fun(Q) ->
|
||||
forget_node_for_queue(Q)
|
||||
end,
|
||||
|
@ -1959,7 +1960,7 @@ on_node_down(Node) ->
|
|||
%% `rabbit_khepri:init/0': we also try this deletion when the node
|
||||
%% restarts - a time that the cluster is very likely to have a
|
||||
%% majority - to ensure these records are deleted.
|
||||
rabbit_log:warning("transient queues for node '~ts' could not be "
|
||||
?LOG_WARNING("transient queues for node '~ts' could not be "
|
||||
"deleted because of a timeout. These queues "
|
||||
"will be removed when node '~ts' restarts or "
|
||||
"is removed from the cluster.", [Node, Node]),
|
||||
|
@ -1980,7 +1981,7 @@ delete_transient_queues_on_node(Node) ->
|
|||
{QueueNames, Deletions} when is_list(QueueNames) ->
|
||||
case length(QueueNames) of
|
||||
0 -> ok;
|
||||
N -> rabbit_log:info("~b transient queues from node '~ts' "
|
||||
N -> ?LOG_INFO("~b transient queues from node '~ts' "
|
||||
"deleted in ~fs",
|
||||
[N, Node, Time / 1_000_000])
|
||||
end,
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
-module(rabbit_amqqueue_process).
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include("amqqueue.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-behaviour(gen_server2).
|
||||
|
||||
|
@ -150,7 +151,7 @@ init({Q, Marker}) ->
|
|||
%% restart
|
||||
QueueName = amqqueue:get_name(Q),
|
||||
{ok, Q1} = rabbit_amqqueue:lookup(QueueName),
|
||||
rabbit_log:error("Restarting crashed ~ts.", [rabbit_misc:rs(QueueName)]),
|
||||
?LOG_ERROR("Restarting crashed ~ts.", [rabbit_misc:rs(QueueName)]),
|
||||
gen_server2:cast(self(), init),
|
||||
init(Q1)
|
||||
end;
|
||||
|
@ -1604,7 +1605,7 @@ handle_cast({force_event_refresh, Ref},
|
|||
rabbit_event:notify(queue_created, queue_created_infos(State), Ref),
|
||||
QName = qname(State),
|
||||
AllConsumers = rabbit_queue_consumers:all(Consumers),
|
||||
rabbit_log:debug("Queue ~ts forced to re-emit events, consumers: ~tp", [rabbit_misc:rs(QName), AllConsumers]),
|
||||
?LOG_DEBUG("Queue ~ts forced to re-emit events, consumers: ~tp", [rabbit_misc:rs(QName), AllConsumers]),
|
||||
[emit_consumer_created(
|
||||
Ch, CTag, ActiveOrExclusive, AckRequired, QName, Prefetch,
|
||||
Args, Ref, ActingUser) ||
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
-export([init/1]).
|
||||
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-define(SERVER, ?MODULE).
|
||||
|
||||
|
@ -74,7 +75,7 @@ start_for_vhost(VHost) ->
|
|||
%% we can get here if a vhost is added and removed concurrently
|
||||
%% e.g. some integration tests do it
|
||||
{error, {no_such_vhost, VHost}} ->
|
||||
rabbit_log:error("Failed to start a queue process supervisor for vhost ~ts: vhost no longer exists!",
|
||||
?LOG_ERROR("Failed to start a queue process supervisor for vhost ~ts: vhost no longer exists!",
|
||||
[VHost]),
|
||||
{error, {no_such_vhost, VHost}}
|
||||
end.
|
||||
|
@ -87,7 +88,7 @@ stop_for_vhost(VHost) ->
|
|||
ok = supervisor:delete_child(VHostSup, rabbit_amqqueue_sup_sup);
|
||||
%% see start/1
|
||||
{error, {no_such_vhost, VHost}} ->
|
||||
rabbit_log:error("Failed to stop a queue process supervisor for vhost ~ts: vhost no longer exists!",
|
||||
?LOG_ERROR("Failed to stop a queue process supervisor for vhost ~ts: vhost no longer exists!",
|
||||
[VHost]),
|
||||
ok
|
||||
end.
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
|
||||
-module(rabbit_auth_backend_internal).
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-behaviour(rabbit_authn_backend).
|
||||
-behaviour(rabbit_authz_backend).
|
||||
|
@ -204,7 +205,7 @@ validate_and_alternate_credentials(Username, Password, ActingUser, Fun) ->
|
|||
ok ->
|
||||
Fun(Username, Password, ActingUser);
|
||||
{error, Err} ->
|
||||
rabbit_log:error("Credential validation for user '~ts' failed!", [Username]),
|
||||
?LOG_ERROR("Credential validation for user '~ts' failed!", [Username]),
|
||||
{error, Err}
|
||||
end.
|
||||
|
||||
|
@ -238,7 +239,7 @@ add_user_sans_validation(Limits, Tags) ->
|
|||
end.
|
||||
|
||||
add_user_sans_validation(Username, Password, ActingUser, Limits, Tags) ->
|
||||
rabbit_log:debug("Asked to create a new user '~ts', password length in bytes: ~tp", [Username, bit_size(Password)]),
|
||||
?LOG_DEBUG("Asked to create a new user '~ts', password length in bytes: ~tp", [Username, bit_size(Password)]),
|
||||
%% hash_password will pick the hashing function configured for us
|
||||
%% but we also need to store a hint as part of the record, so we
|
||||
%% retrieve it here one more time
|
||||
|
@ -254,7 +255,7 @@ add_user_sans_validation(Username, Password, ActingUser, Limits, Tags) ->
|
|||
add_user_sans_validation_in(Username, User, ConvertedTags, Limits, ActingUser).
|
||||
|
||||
add_user_sans_validation(Username, PasswordHash, HashingMod, Tags, Limits, ActingUser) ->
|
||||
rabbit_log:debug("Asked to create a new user '~ts' with password hash", [Username]),
|
||||
?LOG_DEBUG("Asked to create a new user '~ts' with password hash", [Username]),
|
||||
ConvertedTags = [rabbit_data_coercion:to_atom(I) || I <- Tags],
|
||||
User0 = internal_user:create_user(Username, PasswordHash, HashingMod),
|
||||
User1 = internal_user:set_tags(
|
||||
|
@ -269,7 +270,7 @@ add_user_sans_validation(Username, PasswordHash, HashingMod, Tags, Limits, Actin
|
|||
add_user_sans_validation_in(Username, User, ConvertedTags, Limits, ActingUser) ->
|
||||
try
|
||||
R = rabbit_db_user:create(User),
|
||||
rabbit_log:info("Created user '~ts'", [Username]),
|
||||
?LOG_INFO("Created user '~ts'", [Username]),
|
||||
rabbit_event:notify(user_created, [{name, Username},
|
||||
{user_who_performed_action, ActingUser}]),
|
||||
case ConvertedTags of
|
||||
|
@ -283,21 +284,21 @@ add_user_sans_validation_in(Username, User, ConvertedTags, Limits, ActingUser) -
|
|||
R
|
||||
catch
|
||||
throw:{error, {user_already_exists, _}} = Error ->
|
||||
rabbit_log:warning("Failed to add user '~ts': the user already exists", [Username]),
|
||||
?LOG_WARNING("Failed to add user '~ts': the user already exists", [Username]),
|
||||
throw(Error);
|
||||
Class:Error:Stacktrace ->
|
||||
rabbit_log:warning("Failed to add user '~ts': ~tp", [Username, Error]),
|
||||
?LOG_WARNING("Failed to add user '~ts': ~tp", [Username, Error]),
|
||||
erlang:raise(Class, Error, Stacktrace)
|
||||
end .
|
||||
|
||||
-spec delete_user(rabbit_types:username(), rabbit_types:username()) -> 'ok'.
|
||||
|
||||
delete_user(Username, ActingUser) ->
|
||||
rabbit_log:debug("Asked to delete user '~ts'", [Username]),
|
||||
?LOG_DEBUG("Asked to delete user '~ts'", [Username]),
|
||||
try
|
||||
case rabbit_db_user:delete(Username) of
|
||||
true ->
|
||||
rabbit_log:info("Deleted user '~ts'", [Username]),
|
||||
?LOG_INFO("Deleted user '~ts'", [Username]),
|
||||
rabbit_event:notify(user_deleted,
|
||||
[{name, Username},
|
||||
{user_who_performed_action, ActingUser}]),
|
||||
|
@ -305,12 +306,12 @@ delete_user(Username, ActingUser) ->
|
|||
false ->
|
||||
ok;
|
||||
Error0 ->
|
||||
rabbit_log:info("Failed to delete user '~ts': ~tp", [Username, Error0]),
|
||||
?LOG_INFO("Failed to delete user '~ts': ~tp", [Username, Error0]),
|
||||
throw(Error0)
|
||||
end
|
||||
catch
|
||||
Class:Error:Stacktrace ->
|
||||
rabbit_log:warning("Failed to delete user '~ts': ~tp", [Username, Error]),
|
||||
?LOG_WARNING("Failed to delete user '~ts': ~tp", [Username, Error]),
|
||||
erlang:raise(Class, Error, Stacktrace)
|
||||
end .
|
||||
|
||||
|
@ -342,23 +343,23 @@ change_password(Username, Password, ActingUser) ->
|
|||
|
||||
change_password_sans_validation(Username, Password, ActingUser) ->
|
||||
try
|
||||
rabbit_log:debug("Asked to change password of user '~ts', new password length in bytes: ~tp", [Username, bit_size(Password)]),
|
||||
?LOG_DEBUG("Asked to change password of user '~ts', new password length in bytes: ~tp", [Username, bit_size(Password)]),
|
||||
HashingAlgorithm = rabbit_password:hashing_mod(),
|
||||
R = change_password_hash(Username,
|
||||
hash_password(rabbit_password:hashing_mod(),
|
||||
Password),
|
||||
HashingAlgorithm),
|
||||
rabbit_log:info("Successfully changed password for user '~ts'", [Username]),
|
||||
?LOG_INFO("Successfully changed password for user '~ts'", [Username]),
|
||||
rabbit_event:notify(user_password_changed,
|
||||
[{name, Username},
|
||||
{user_who_performed_action, ActingUser}]),
|
||||
R
|
||||
catch
|
||||
throw:{error, {no_such_user, _}} = Error ->
|
||||
rabbit_log:warning("Failed to change password for user '~ts': the user does not exist", [Username]),
|
||||
?LOG_WARNING("Failed to change password for user '~ts': the user does not exist", [Username]),
|
||||
throw(Error);
|
||||
Class:Error:Stacktrace ->
|
||||
rabbit_log:warning("Failed to change password for user '~ts': ~tp", [Username, Error]),
|
||||
?LOG_WARNING("Failed to change password for user '~ts': ~tp", [Username, Error]),
|
||||
erlang:raise(Class, Error, Stacktrace)
|
||||
end.
|
||||
|
||||
|
@ -369,10 +370,10 @@ update_user(Username, Password, Tags, Limits, ActingUser) ->
|
|||
update_user_sans_validation(Tags, Limits) ->
|
||||
fun(Username, Password, ActingUser) ->
|
||||
try
|
||||
rabbit_log:debug("Asked to change password of user '~ts', new password length in bytes: ~tp", [Username, bit_size(Password)]),
|
||||
?LOG_DEBUG("Asked to change password of user '~ts', new password length in bytes: ~tp", [Username, bit_size(Password)]),
|
||||
HashingAlgorithm = rabbit_password:hashing_mod(),
|
||||
|
||||
rabbit_log:debug("Asked to set user tags for user '~ts' to ~tp", [Username, Tags]),
|
||||
?LOG_DEBUG("Asked to set user tags for user '~ts' to ~tp", [Username, Tags]),
|
||||
|
||||
ConvertedTags = [rabbit_data_coercion:to_atom(I) || I <- Tags],
|
||||
R = update_user_with_hash(Username,
|
||||
|
@ -381,7 +382,7 @@ update_user_sans_validation(Tags, Limits) ->
|
|||
HashingAlgorithm,
|
||||
ConvertedTags,
|
||||
Limits),
|
||||
rabbit_log:info("Successfully changed password for user '~ts'", [Username]),
|
||||
?LOG_INFO("Successfully changed password for user '~ts'", [Username]),
|
||||
rabbit_event:notify(user_password_changed,
|
||||
[{name, Username},
|
||||
{user_who_performed_action, ActingUser}]),
|
||||
|
@ -390,10 +391,10 @@ update_user_sans_validation(Tags, Limits) ->
|
|||
R
|
||||
catch
|
||||
throw:{error, {no_such_user, _}} = Error ->
|
||||
rabbit_log:warning("Failed to change password for user '~ts': the user does not exist", [Username]),
|
||||
?LOG_WARNING("Failed to change password for user '~ts': the user does not exist", [Username]),
|
||||
throw(Error);
|
||||
Class:Error:Stacktrace ->
|
||||
rabbit_log:warning("Failed to change password for user '~ts': ~tp", [Username, Error]),
|
||||
?LOG_WARNING("Failed to change password for user '~ts': ~tp", [Username, Error]),
|
||||
erlang:raise(Class, Error, Stacktrace)
|
||||
end
|
||||
end.
|
||||
|
@ -401,7 +402,7 @@ update_user_sans_validation(Tags, Limits) ->
|
|||
-spec clear_password(rabbit_types:username(), rabbit_types:username()) -> 'ok'.
|
||||
|
||||
clear_password(Username, ActingUser) ->
|
||||
rabbit_log:info("Clearing password for user '~ts'", [Username]),
|
||||
?LOG_INFO("Clearing password for user '~ts'", [Username]),
|
||||
R = change_password_hash(Username, <<"">>),
|
||||
rabbit_event:notify(user_password_cleared,
|
||||
[{name, Username},
|
||||
|
@ -443,7 +444,7 @@ update_user_with_hash(Username, PasswordHash, HashingAlgorithm, ConvertedTags, L
|
|||
|
||||
set_tags(Username, Tags, ActingUser) ->
|
||||
ConvertedTags = [rabbit_data_coercion:to_atom(I) || I <- Tags],
|
||||
rabbit_log:debug("Asked to set user tags for user '~ts' to ~tp", [Username, ConvertedTags]),
|
||||
?LOG_DEBUG("Asked to set user tags for user '~ts' to ~tp", [Username, ConvertedTags]),
|
||||
try
|
||||
R = rabbit_db_user:update(Username, fun(User) ->
|
||||
internal_user:set_tags(User, ConvertedTags)
|
||||
|
@ -452,15 +453,15 @@ set_tags(Username, Tags, ActingUser) ->
|
|||
R
|
||||
catch
|
||||
throw:{error, {no_such_user, _}} = Error ->
|
||||
rabbit_log:warning("Failed to set tags for user '~ts': the user does not exist", [Username]),
|
||||
?LOG_WARNING("Failed to set tags for user '~ts': the user does not exist", [Username]),
|
||||
throw(Error);
|
||||
Class:Error:Stacktrace ->
|
||||
rabbit_log:warning("Failed to set tags for user '~ts': ~tp", [Username, Error]),
|
||||
?LOG_WARNING("Failed to set tags for user '~ts': ~tp", [Username, Error]),
|
||||
erlang:raise(Class, Error, Stacktrace)
|
||||
end .
|
||||
|
||||
notify_user_tags_set(Username, ConvertedTags, ActingUser) ->
|
||||
rabbit_log:info("Successfully set user tags for user '~ts' to ~tp", [Username, ConvertedTags]),
|
||||
?LOG_INFO("Successfully set user tags for user '~ts' to ~tp", [Username, ConvertedTags]),
|
||||
rabbit_event:notify(user_tags_set, [{name, Username}, {tags, ConvertedTags},
|
||||
{user_who_performed_action, ActingUser}]).
|
||||
|
||||
|
@ -470,7 +471,7 @@ notify_user_tags_set(Username, ConvertedTags, ActingUser) ->
|
|||
'ok'.
|
||||
|
||||
set_permissions(Username, VirtualHost, ConfigurePerm, WritePerm, ReadPerm, ActingUser) ->
|
||||
rabbit_log:debug("Asked to set permissions for user "
|
||||
?LOG_DEBUG("Asked to set permissions for user "
|
||||
"'~ts' in virtual host '~ts' to '~ts', '~ts', '~ts'",
|
||||
[Username, VirtualHost, ConfigurePerm, WritePerm, ReadPerm]),
|
||||
_ = lists:map(
|
||||
|
@ -479,7 +480,7 @@ set_permissions(Username, VirtualHost, ConfigurePerm, WritePerm, ReadPerm, Actin
|
|||
case re:compile(Regexp) of
|
||||
{ok, _} -> ok;
|
||||
{error, Reason} ->
|
||||
rabbit_log:warning("Failed to set permissions for user '~ts' in virtual host '~ts': "
|
||||
?LOG_WARNING("Failed to set permissions for user '~ts' in virtual host '~ts': "
|
||||
"regular expression '~ts' is invalid",
|
||||
[Username, VirtualHost, RegexpBin]),
|
||||
throw({error, {invalid_regexp, Regexp, Reason}})
|
||||
|
@ -495,7 +496,7 @@ set_permissions(Username, VirtualHost, ConfigurePerm, WritePerm, ReadPerm, Actin
|
|||
write = WritePerm,
|
||||
read = ReadPerm}},
|
||||
R = rabbit_db_user:set_user_permissions(UserPermission),
|
||||
rabbit_log:info("Successfully set permissions for user "
|
||||
?LOG_INFO("Successfully set permissions for user "
|
||||
"'~ts' in virtual host '~ts' to '~ts', '~ts', '~ts'",
|
||||
[Username, VirtualHost, ConfigurePerm, WritePerm, ReadPerm]),
|
||||
rabbit_event:notify(permission_created, [{user, Username},
|
||||
|
@ -507,15 +508,15 @@ set_permissions(Username, VirtualHost, ConfigurePerm, WritePerm, ReadPerm, Actin
|
|||
R
|
||||
catch
|
||||
throw:{error, {no_such_vhost, _}} = Error ->
|
||||
rabbit_log:warning("Failed to set permissions for user '~ts': virtual host '~ts' does not exist",
|
||||
?LOG_WARNING("Failed to set permissions for user '~ts': virtual host '~ts' does not exist",
|
||||
[Username, VirtualHost]),
|
||||
throw(Error);
|
||||
throw:{error, {no_such_user, _}} = Error ->
|
||||
rabbit_log:warning("Failed to set permissions for user '~ts': the user does not exist",
|
||||
?LOG_WARNING("Failed to set permissions for user '~ts': the user does not exist",
|
||||
[Username]),
|
||||
throw(Error);
|
||||
Class:Error:Stacktrace ->
|
||||
rabbit_log:warning("Failed to set permissions for user '~ts' in virtual host '~ts': ~tp",
|
||||
?LOG_WARNING("Failed to set permissions for user '~ts' in virtual host '~ts': ~tp",
|
||||
[Username, VirtualHost, Error]),
|
||||
erlang:raise(Class, Error, Stacktrace)
|
||||
end.
|
||||
|
@ -524,11 +525,11 @@ set_permissions(Username, VirtualHost, ConfigurePerm, WritePerm, ReadPerm, Actin
|
|||
(rabbit_types:username(), rabbit_types:vhost(), rabbit_types:username()) -> 'ok'.
|
||||
|
||||
clear_permissions(Username, VirtualHost, ActingUser) ->
|
||||
rabbit_log:debug("Asked to clear permissions for user '~ts' in virtual host '~ts'",
|
||||
?LOG_DEBUG("Asked to clear permissions for user '~ts' in virtual host '~ts'",
|
||||
[Username, VirtualHost]),
|
||||
try
|
||||
R = rabbit_db_user:clear_user_permissions(Username, VirtualHost),
|
||||
rabbit_log:info("Successfully cleared permissions for user '~ts' in virtual host '~ts'",
|
||||
?LOG_INFO("Successfully cleared permissions for user '~ts' in virtual host '~ts'",
|
||||
[Username, VirtualHost]),
|
||||
rabbit_event:notify(permission_deleted, [{user, Username},
|
||||
{vhost, VirtualHost},
|
||||
|
@ -536,7 +537,7 @@ clear_permissions(Username, VirtualHost, ActingUser) ->
|
|||
R
|
||||
catch
|
||||
Class:Error:Stacktrace ->
|
||||
rabbit_log:warning("Failed to clear permissions for user '~ts' in virtual host '~ts': ~tp",
|
||||
?LOG_WARNING("Failed to clear permissions for user '~ts' in virtual host '~ts': ~tp",
|
||||
[Username, VirtualHost, Error]),
|
||||
erlang:raise(Class, Error, Stacktrace)
|
||||
end.
|
||||
|
@ -577,7 +578,7 @@ set_permissions_globally(Username, ConfigurePerm, WritePerm, ReadPerm, ActingUse
|
|||
ok.
|
||||
|
||||
set_topic_permissions(Username, VirtualHost, Exchange, WritePerm, ReadPerm, ActingUser) ->
|
||||
rabbit_log:debug("Asked to set topic permissions on exchange '~ts' for "
|
||||
?LOG_DEBUG("Asked to set topic permissions on exchange '~ts' for "
|
||||
"user '~ts' in virtual host '~ts' to '~ts', '~ts'",
|
||||
[Exchange, Username, VirtualHost, WritePerm, ReadPerm]),
|
||||
WritePermRegex = rabbit_data_coercion:to_binary(WritePerm),
|
||||
|
@ -587,7 +588,7 @@ set_topic_permissions(Username, VirtualHost, Exchange, WritePerm, ReadPerm, Acti
|
|||
case re:compile(RegexpBin) of
|
||||
{ok, _} -> ok;
|
||||
{error, Reason} ->
|
||||
rabbit_log:warning("Failed to set topic permissions on exchange '~ts' for user "
|
||||
?LOG_WARNING("Failed to set topic permissions on exchange '~ts' for user "
|
||||
"'~ts' in virtual host '~ts': regular expression '~ts' is invalid",
|
||||
[Exchange, Username, VirtualHost, RegexpBin]),
|
||||
throw({error, {invalid_regexp, RegexpBin, Reason}})
|
||||
|
@ -607,7 +608,7 @@ set_topic_permissions(Username, VirtualHost, Exchange, WritePerm, ReadPerm, Acti
|
|||
}
|
||||
},
|
||||
R = rabbit_db_user:set_topic_permissions(TopicPermission),
|
||||
rabbit_log:info("Successfully set topic permissions on exchange '~ts' for "
|
||||
?LOG_INFO("Successfully set topic permissions on exchange '~ts' for "
|
||||
"user '~ts' in virtual host '~ts' to '~ts', '~ts'",
|
||||
[Exchange, Username, VirtualHost, WritePerm, ReadPerm]),
|
||||
rabbit_event:notify(topic_permission_created, [
|
||||
|
@ -620,25 +621,25 @@ set_topic_permissions(Username, VirtualHost, Exchange, WritePerm, ReadPerm, Acti
|
|||
R
|
||||
catch
|
||||
throw:{error, {no_such_vhost, _}} = Error ->
|
||||
rabbit_log:warning("Failed to set topic permissions on exchange '~ts' for user '~ts': virtual host '~ts' does not exist.",
|
||||
?LOG_WARNING("Failed to set topic permissions on exchange '~ts' for user '~ts': virtual host '~ts' does not exist.",
|
||||
[Exchange, Username, VirtualHost]),
|
||||
throw(Error);
|
||||
throw:{error, {no_such_user, _}} = Error ->
|
||||
rabbit_log:warning("Failed to set topic permissions on exchange '~ts' for user '~ts': the user does not exist.",
|
||||
?LOG_WARNING("Failed to set topic permissions on exchange '~ts' for user '~ts': the user does not exist.",
|
||||
[Exchange, Username]),
|
||||
throw(Error);
|
||||
Class:Error:Stacktrace ->
|
||||
rabbit_log:warning("Failed to set topic permissions on exchange '~ts' for user '~ts' in virtual host '~ts': ~tp.",
|
||||
?LOG_WARNING("Failed to set topic permissions on exchange '~ts' for user '~ts' in virtual host '~ts': ~tp.",
|
||||
[Exchange, Username, VirtualHost, Error]),
|
||||
erlang:raise(Class, Error, Stacktrace)
|
||||
end .
|
||||
|
||||
clear_topic_permissions(Username, VirtualHost, ActingUser) ->
|
||||
rabbit_log:debug("Asked to clear topic permissions for user '~ts' in virtual host '~ts'",
|
||||
?LOG_DEBUG("Asked to clear topic permissions for user '~ts' in virtual host '~ts'",
|
||||
[Username, VirtualHost]),
|
||||
try
|
||||
R = rabbit_db_user:clear_topic_permissions(Username, VirtualHost, '_'),
|
||||
rabbit_log:info("Successfully cleared topic permissions for user '~ts' in virtual host '~ts'",
|
||||
?LOG_INFO("Successfully cleared topic permissions for user '~ts' in virtual host '~ts'",
|
||||
[Username, VirtualHost]),
|
||||
rabbit_event:notify(topic_permission_deleted, [{user, Username},
|
||||
{vhost, VirtualHost},
|
||||
|
@ -646,18 +647,18 @@ clear_topic_permissions(Username, VirtualHost, ActingUser) ->
|
|||
R
|
||||
catch
|
||||
Class:Error:Stacktrace ->
|
||||
rabbit_log:warning("Failed to clear topic permissions for user '~ts' in virtual host '~ts': ~tp",
|
||||
?LOG_WARNING("Failed to clear topic permissions for user '~ts' in virtual host '~ts': ~tp",
|
||||
[Username, VirtualHost, Error]),
|
||||
erlang:raise(Class, Error, Stacktrace)
|
||||
end.
|
||||
|
||||
clear_topic_permissions(Username, VirtualHost, Exchange, ActingUser) ->
|
||||
rabbit_log:debug("Asked to clear topic permissions on exchange '~ts' for user '~ts' in virtual host '~ts'",
|
||||
?LOG_DEBUG("Asked to clear topic permissions on exchange '~ts' for user '~ts' in virtual host '~ts'",
|
||||
[Exchange, Username, VirtualHost]),
|
||||
try
|
||||
R = rabbit_db_user:clear_topic_permissions(
|
||||
Username, VirtualHost, Exchange),
|
||||
rabbit_log:info("Successfully cleared topic permissions on exchange '~ts' for user '~ts' in virtual host '~ts'",
|
||||
?LOG_INFO("Successfully cleared topic permissions on exchange '~ts' for user '~ts' in virtual host '~ts'",
|
||||
[Exchange, Username, VirtualHost]),
|
||||
rabbit_event:notify(topic_permission_deleted, [{user, Username},
|
||||
{vhost, VirtualHost},
|
||||
|
@ -665,7 +666,7 @@ clear_topic_permissions(Username, VirtualHost, Exchange, ActingUser) ->
|
|||
R
|
||||
catch
|
||||
Class:Error:Stacktrace ->
|
||||
rabbit_log:warning("Failed to clear topic permissions on exchange '~ts' for user '~ts' in virtual host '~ts': ~tp",
|
||||
?LOG_WARNING("Failed to clear topic permissions on exchange '~ts' for user '~ts' in virtual host '~ts': ~tp",
|
||||
[Exchange, Username, VirtualHost, Error]),
|
||||
erlang:raise(Class, Error, Stacktrace)
|
||||
end.
|
||||
|
|
|
@ -7,6 +7,9 @@
|
|||
|
||||
-module(rabbit_autoheal).
|
||||
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
|
||||
-export([init/0, enabled/0, maybe_start/1, rabbit_down/2, node_down/2,
|
||||
handle_msg/3, process_down/2]).
|
||||
|
||||
|
@ -117,7 +120,7 @@ init() ->
|
|||
ok = application:unset_env(rabbit, ?AUTOHEAL_STATE_AFTER_RESTART),
|
||||
case State of
|
||||
{leader_waiting, Winner, _} ->
|
||||
rabbit_log:info(
|
||||
?LOG_INFO(
|
||||
"Autoheal: in progress, requesting report from ~tp", [Winner]),
|
||||
_ = send(Winner, report_autoheal_status),
|
||||
ok;
|
||||
|
@ -130,7 +133,7 @@ maybe_start(not_healing) ->
|
|||
case enabled() of
|
||||
true -> Leader = leader(),
|
||||
_ = send(Leader, {request_start, node()}),
|
||||
rabbit_log:info("Autoheal request sent to ~tp", [Leader]),
|
||||
?LOG_INFO("Autoheal request sent to ~tp", [Leader]),
|
||||
not_healing;
|
||||
false -> not_healing
|
||||
end;
|
||||
|
@ -151,7 +154,7 @@ leader() ->
|
|||
%% This is the winner receiving its last notification that a node has
|
||||
%% stopped - all nodes can now start again
|
||||
rabbit_down(Node, {winner_waiting, [Node], Notify}) ->
|
||||
rabbit_log:info("Autoheal: final node has stopped, starting...",[]),
|
||||
?LOG_INFO("Autoheal: final node has stopped, starting...",[]),
|
||||
winner_finish(Notify);
|
||||
|
||||
rabbit_down(Node, {winner_waiting, WaitFor, Notify}) ->
|
||||
|
@ -174,24 +177,24 @@ node_down(Node, {winner_waiting, _, Notify}) ->
|
|||
|
||||
node_down(Node, {leader_waiting, Node, _Notify}) ->
|
||||
%% The winner went down, we don't know what to do so we simply abort.
|
||||
rabbit_log:info("Autoheal: aborting - winner ~tp went down", [Node]),
|
||||
?LOG_INFO("Autoheal: aborting - winner ~tp went down", [Node]),
|
||||
not_healing;
|
||||
|
||||
node_down(Node, {leader_waiting, _, _} = St) ->
|
||||
%% If it is a partial partition, the winner might continue with the
|
||||
%% healing process. If it is a full partition, the winner will also
|
||||
%% see it and abort. Let's wait for it.
|
||||
rabbit_log:info("Autoheal: ~tp went down, waiting for winner decision ", [Node]),
|
||||
?LOG_INFO("Autoheal: ~tp went down, waiting for winner decision ", [Node]),
|
||||
St;
|
||||
|
||||
node_down(Node, _State) ->
|
||||
rabbit_log:info("Autoheal: aborting - ~tp went down", [Node]),
|
||||
?LOG_INFO("Autoheal: aborting - ~tp went down", [Node]),
|
||||
not_healing.
|
||||
|
||||
%% If the process that has to restart the node crashes for an unexpected reason,
|
||||
%% we go back to a not healing state so the node is able to recover.
|
||||
process_down({'EXIT', Pid, Reason}, {restarting, Pid}) when Reason =/= normal ->
|
||||
rabbit_log:info("Autoheal: aborting - the process responsible for restarting the "
|
||||
?LOG_INFO("Autoheal: aborting - the process responsible for restarting the "
|
||||
"node terminated with reason: ~tp", [Reason]),
|
||||
not_healing;
|
||||
|
||||
|
@ -204,14 +207,14 @@ handle_msg({request_start, _Node}, not_healing, []) ->
|
|||
not_healing;
|
||||
handle_msg({request_start, Node},
|
||||
not_healing, Partitions) ->
|
||||
rabbit_log:info("Autoheal request received from ~tp", [Node]),
|
||||
?LOG_INFO("Autoheal request received from ~tp", [Node]),
|
||||
case check_other_nodes(Partitions) of
|
||||
{error, E} ->
|
||||
rabbit_log:info("Autoheal request denied: ~ts", [fmt_error(E)]),
|
||||
?LOG_INFO("Autoheal request denied: ~ts", [fmt_error(E)]),
|
||||
not_healing;
|
||||
{ok, AllPartitions} ->
|
||||
{Winner, Losers} = make_decision(AllPartitions),
|
||||
rabbit_log:info("Autoheal decision~n"
|
||||
?LOG_INFO("Autoheal decision~n"
|
||||
" * Partitions: ~tp~n"
|
||||
" * Winner: ~tp~n"
|
||||
" * Losers: ~tp",
|
||||
|
@ -226,13 +229,13 @@ handle_msg({request_start, Node},
|
|||
|
||||
handle_msg({request_start, Node},
|
||||
State, _Partitions) ->
|
||||
rabbit_log:info("Autoheal request received from ~tp when healing; "
|
||||
?LOG_INFO("Autoheal request received from ~tp when healing; "
|
||||
"ignoring", [Node]),
|
||||
State;
|
||||
|
||||
handle_msg({become_winner, Losers},
|
||||
not_healing, _Partitions) ->
|
||||
rabbit_log:info("Autoheal: I am the winner, waiting for ~tp to stop",
|
||||
?LOG_INFO("Autoheal: I am the winner, waiting for ~tp to stop",
|
||||
[Losers]),
|
||||
stop_partition(Losers);
|
||||
|
||||
|
@ -240,7 +243,7 @@ handle_msg({become_winner, Losers},
|
|||
{winner_waiting, _, Losers}, _Partitions) ->
|
||||
%% The leader has aborted the healing, might have seen us down but
|
||||
%% we didn't see the same. Let's try again as it is the same partition.
|
||||
rabbit_log:info("Autoheal: I am the winner and received a duplicated "
|
||||
?LOG_INFO("Autoheal: I am the winner and received a duplicated "
|
||||
"request, waiting again for ~tp to stop", [Losers]),
|
||||
stop_partition(Losers);
|
||||
|
||||
|
@ -248,7 +251,7 @@ handle_msg({become_winner, _},
|
|||
{winner_waiting, _, Losers}, _Partitions) ->
|
||||
%% Something has happened to the leader, it might have seen us down but we
|
||||
%% are still alive. Partitions have changed, cannot continue.
|
||||
rabbit_log:info("Autoheal: I am the winner and received another healing "
|
||||
?LOG_INFO("Autoheal: I am the winner and received another healing "
|
||||
"request, partitions have changed to ~tp. Aborting ", [Losers]),
|
||||
winner_finish(Losers),
|
||||
not_healing;
|
||||
|
@ -272,7 +275,7 @@ handle_msg({winner_is, Winner}, State = {winner_waiting, _OutstandingStops, _Not
|
|||
|
||||
handle_msg(Request, {restarting, Pid} = St, _Partitions) ->
|
||||
%% ignore, we can contribute no further
|
||||
rabbit_log:info("Autoheal: Received the request ~tp while waiting for ~tp "
|
||||
?LOG_INFO("Autoheal: Received the request ~tp while waiting for ~tp "
|
||||
"to restart the node. Ignoring it ", [Request, Pid]),
|
||||
St;
|
||||
|
||||
|
@ -295,21 +298,21 @@ handle_msg({autoheal_finished, Winner},
|
|||
%% The winner is finished with the autoheal process and notified us
|
||||
%% (the leader). We can transition to the "not_healing" state and
|
||||
%% accept new requests.
|
||||
rabbit_log:info("Autoheal finished according to winner ~tp", [Winner]),
|
||||
?LOG_INFO("Autoheal finished according to winner ~tp", [Winner]),
|
||||
not_healing;
|
||||
|
||||
handle_msg({autoheal_finished, Winner}, not_healing, _Partitions)
|
||||
when Winner =:= node() ->
|
||||
%% We are the leader and the winner. The state already transitioned
|
||||
%% to "not_healing" at the end of the autoheal process.
|
||||
rabbit_log:info("Autoheal finished according to winner ~tp", [node()]),
|
||||
?LOG_INFO("Autoheal finished according to winner ~tp", [node()]),
|
||||
not_healing;
|
||||
|
||||
handle_msg({autoheal_finished, Winner}, not_healing, _Partitions) ->
|
||||
%% We might have seen the winner down during a partial partition and
|
||||
%% transitioned to not_healing. However, the winner was still able
|
||||
%% to finish. Let it pass.
|
||||
rabbit_log:info("Autoheal finished according to winner ~tp."
|
||||
?LOG_INFO("Autoheal finished according to winner ~tp."
|
||||
" Unexpected, I might have previously seen the winner down", [Winner]),
|
||||
not_healing.
|
||||
|
||||
|
@ -318,7 +321,7 @@ handle_msg({autoheal_finished, Winner}, not_healing, _Partitions) ->
|
|||
send(Node, Msg) -> {?SERVER, Node} ! {autoheal_msg, Msg}.
|
||||
|
||||
abort(Down, Notify) ->
|
||||
rabbit_log:info("Autoheal: aborting - ~tp down", [Down]),
|
||||
?LOG_INFO("Autoheal: aborting - ~tp down", [Down]),
|
||||
%% Make sure any nodes waiting for us start - it won't necessarily
|
||||
%% heal the partition but at least they won't get stuck.
|
||||
%% If we are executing this, we are not stopping. Thus, don't wait
|
||||
|
@ -362,7 +365,7 @@ wait_for_supervisors(Monitors) ->
|
|||
after
|
||||
60000 ->
|
||||
AliveLosers = [Node || {_, Node} <- pmon:monitored(Monitors)],
|
||||
rabbit_log:info("Autoheal: mnesia in nodes ~tp is still up, sending "
|
||||
?LOG_INFO("Autoheal: mnesia in nodes ~tp is still up, sending "
|
||||
"winner notification again to these ", [AliveLosers]),
|
||||
_ = [send(L, {winner_is, node()}) || L <- AliveLosers],
|
||||
wait_for_mnesia_shutdown(AliveLosers)
|
||||
|
@ -370,7 +373,7 @@ wait_for_supervisors(Monitors) ->
|
|||
end.
|
||||
|
||||
restart_loser(State, Winner) ->
|
||||
rabbit_log:warning("Autoheal: we were selected to restart; winner is ~tp", [Winner]),
|
||||
?LOG_WARNING("Autoheal: we were selected to restart; winner is ~tp", [Winner]),
|
||||
NextStateTimeout = application:get_env(rabbit, autoheal_state_transition_timeout, 60000),
|
||||
rabbit_node_monitor:run_outside_applications(
|
||||
fun () ->
|
||||
|
@ -382,7 +385,7 @@ restart_loser(State, Winner) ->
|
|||
autoheal_safe_to_start ->
|
||||
State
|
||||
after NextStateTimeout ->
|
||||
rabbit_log:warning(
|
||||
?LOG_WARNING(
|
||||
"Autoheal: timed out waiting for a safe-to-start message from the winner (~tp); will retry",
|
||||
[Winner]),
|
||||
not_healing
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
-module(rabbit_binding).
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include("amqqueue.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-export([recover/0, recover/2, exists/1, add/2, add/3, remove/2, remove/3]).
|
||||
-export([list/1, list_for_source/1, list_for_destination/1,
|
||||
|
@ -117,7 +118,7 @@ recover_semi_durable_route(Gatherer, Binding, Src, Dst, ToRecover, Fun) ->
|
|||
gatherer:finish(Gatherer)
|
||||
end);
|
||||
{error, not_found}=Error ->
|
||||
rabbit_log:warning(
|
||||
?LOG_WARNING(
|
||||
"expected exchange ~tp to exist during recovery, "
|
||||
"error: ~tp", [Src, Error]),
|
||||
ok
|
||||
|
|
|
@ -20,7 +20,7 @@ run_boot_steps() ->
|
|||
|
||||
run_boot_steps(Apps) ->
|
||||
[begin
|
||||
rabbit_log:info("Running boot step ~ts defined by app ~ts", [Step, App]),
|
||||
?LOG_INFO("Running boot step ~ts defined by app ~ts", [Step, App]),
|
||||
ok = run_step(Attrs, mfa)
|
||||
end || {App, Step, Attrs} <- find_steps(Apps)],
|
||||
ok.
|
||||
|
@ -48,11 +48,11 @@ find_steps(Apps) ->
|
|||
|
||||
run_step(Attributes, AttributeName) ->
|
||||
[begin
|
||||
rabbit_log:debug("Applying MFA: M = ~ts, F = ~ts, A = ~tp",
|
||||
?LOG_DEBUG("Applying MFA: M = ~ts, F = ~ts, A = ~tp",
|
||||
[M, F, A]),
|
||||
case apply(M,F,A) of
|
||||
ok ->
|
||||
rabbit_log:debug("Finished MFA: M = ~ts, F = ~ts, A = ~tp",
|
||||
?LOG_DEBUG("Finished MFA: M = ~ts, F = ~ts, A = ~tp",
|
||||
[M, F, A]);
|
||||
{error, Reason} -> exit({error, Reason})
|
||||
end
|
||||
|
|
|
@ -360,7 +360,7 @@ info(Pid) ->
|
|||
end
|
||||
catch
|
||||
exit:{timeout, _} ->
|
||||
rabbit_log:error("Timed out getting channel ~tp info", [Pid]),
|
||||
?LOG_ERROR("Timed out getting channel ~tp info", [Pid]),
|
||||
throw(timeout)
|
||||
end.
|
||||
|
||||
|
@ -375,7 +375,7 @@ info(Pid, Items) ->
|
|||
end
|
||||
catch
|
||||
exit:{timeout, _} ->
|
||||
rabbit_log:error("Timed out getting channel ~tp info", [Pid]),
|
||||
?LOG_ERROR("Timed out getting channel ~tp info", [Pid]),
|
||||
throw(timeout)
|
||||
end.
|
||||
|
||||
|
@ -411,7 +411,7 @@ refresh_config_local() ->
|
|||
try
|
||||
gen_server2:call(C, refresh_config, infinity)
|
||||
catch _:Reason ->
|
||||
rabbit_log:error("Failed to refresh channel config "
|
||||
?LOG_ERROR("Failed to refresh channel config "
|
||||
"for channel ~tp. Reason ~tp",
|
||||
[C, Reason])
|
||||
end
|
||||
|
@ -425,7 +425,7 @@ refresh_interceptors() ->
|
|||
try
|
||||
gen_server2:call(C, refresh_interceptors, ?REFRESH_TIMEOUT)
|
||||
catch _:Reason ->
|
||||
rabbit_log:error("Failed to refresh channel interceptors "
|
||||
?LOG_ERROR("Failed to refresh channel interceptors "
|
||||
"for channel ~tp. Reason ~tp",
|
||||
[C, Reason])
|
||||
end
|
||||
|
@ -643,7 +643,7 @@ handle_cast(terminate, State = #ch{cfg = #conf{writer_pid = WriterPid}}) ->
|
|||
ok = rabbit_writer:flush(WriterPid)
|
||||
catch
|
||||
_Class:Reason ->
|
||||
rabbit_log:debug("Failed to flush pending writes on a terminating connection, reason: ~tp", [Reason])
|
||||
?LOG_DEBUG("Failed to flush pending writes on a terminating connection, reason: ~tp", [Reason])
|
||||
end,
|
||||
{stop, normal, State};
|
||||
|
||||
|
@ -805,7 +805,7 @@ terminate(_Reason,
|
|||
case rabbit_confirms:size(State#ch.unconfirmed) of
|
||||
0 -> ok;
|
||||
NumConfirms ->
|
||||
rabbit_log:warning("Channel is stopping with ~b pending publisher confirms",
|
||||
?LOG_WARNING("Channel is stopping with ~b pending publisher confirms",
|
||||
[NumConfirms])
|
||||
end.
|
||||
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
-export([count_local_tracked_items_of_user/1]).
|
||||
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-import(rabbit_misc, [pget/2]).
|
||||
|
||||
|
@ -214,13 +215,13 @@ ensure_tracked_tables_for_this_node() ->
|
|||
|
||||
%% Create tables
|
||||
ensure_tracked_channels_table_for_this_node() ->
|
||||
rabbit_log:info("Setting up a table for channel tracking on this node: ~tp",
|
||||
?LOG_INFO("Setting up a table for channel tracking on this node: ~tp",
|
||||
[?TRACKED_CHANNEL_TABLE]),
|
||||
ets:new(?TRACKED_CHANNEL_TABLE, [named_table, public, {write_concurrency, true},
|
||||
{keypos, #tracked_channel.pid}]).
|
||||
|
||||
ensure_per_user_tracked_channels_table_for_this_node() ->
|
||||
rabbit_log:info("Setting up a table for channel tracking on this node: ~tp",
|
||||
?LOG_INFO("Setting up a table for channel tracking on this node: ~tp",
|
||||
[?TRACKED_CHANNEL_TABLE_PER_USER]),
|
||||
ets:new(?TRACKED_CHANNEL_TABLE_PER_USER, [named_table, public, {write_concurrency, true}]).
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
|
||||
-include("amqqueue.hrl").
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
%% TODO possible to use sets / maps instead of lists?
|
||||
%% Check performance with QoS 1 and 1 million target queues.
|
||||
|
@ -177,13 +178,13 @@ delete(Q0, IfUnused, IfEmpty, ActingUser) when ?amqqueue_is_classic(Q0) ->
|
|||
#resource{name = Name, virtual_host = Vhost} = QName,
|
||||
case IfEmpty of
|
||||
true ->
|
||||
rabbit_log:error("Queue ~ts in vhost ~ts is down. "
|
||||
?LOG_ERROR("Queue ~ts in vhost ~ts is down. "
|
||||
"The queue may be non-empty. "
|
||||
"Refusing to force-delete.",
|
||||
[Name, Vhost]),
|
||||
{error, not_empty};
|
||||
false ->
|
||||
rabbit_log:warning("Queue ~ts in vhost ~ts is down. "
|
||||
?LOG_WARNING("Queue ~ts in vhost ~ts is down. "
|
||||
"Forcing queue deletion.",
|
||||
[Name, Vhost]),
|
||||
case delete_crashed_internal(Q, ActingUser) of
|
||||
|
@ -219,7 +220,7 @@ recover(VHost, Queues) ->
|
|||
FailedQs = find_missing_queues(Queues,RecoveredQs),
|
||||
{RecoveredQs, FailedQs};
|
||||
{error, Reason} ->
|
||||
rabbit_log:error("Failed to start queue supervisor for vhost '~ts': ~ts", [VHost, Reason]),
|
||||
?LOG_ERROR("Failed to start queue supervisor for vhost '~ts': ~ts", [VHost, Reason]),
|
||||
throw({error, Reason})
|
||||
end.
|
||||
|
||||
|
@ -588,7 +589,7 @@ recover_durable_queues(QueuesAndRecoveryTerms) ->
|
|||
gen_server2:mcall(
|
||||
[{rabbit_amqqueue_sup_sup:start_queue_process(node(), Q),
|
||||
{init, {self(), Terms}}} || {Q, Terms} <- QueuesAndRecoveryTerms]),
|
||||
[rabbit_log:error("Queue ~tp failed to initialise: ~tp",
|
||||
[?LOG_ERROR("Queue ~tp failed to initialise: ~tp",
|
||||
[Pid, Error]) || {Pid, Error} <- Failures],
|
||||
[Q || {_, {new, Q}} <- Results].
|
||||
|
||||
|
|
|
@ -42,6 +42,7 @@
|
|||
-define(ENTRY_SIZE, 32). %% bytes
|
||||
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
%% Set to true to get an awful lot of debug logs.
|
||||
-if(false).
|
||||
-define(DEBUG(X,Y), logger:debug("~0p: " ++ X, [?FUNCTION_NAME|Y])).
|
||||
|
@ -255,7 +256,7 @@ recover(#resource{ virtual_host = VHost, name = QueueName } = Name, Terms,
|
|||
State = recover_segments(State0, Terms, IsMsgStoreClean,
|
||||
ContainsCheckFun, OnSyncFun, OnSyncMsgFun,
|
||||
CountersRef, Context),
|
||||
rabbit_log:warning("Queue ~ts in vhost ~ts dropped ~b/~b/~b persistent messages "
|
||||
?LOG_WARNING("Queue ~ts in vhost ~ts dropped ~b/~b/~b persistent messages "
|
||||
"and ~b transient messages after unclean shutdown",
|
||||
[QueueName, VHost,
|
||||
counters:get(CountersRef, ?RECOVER_DROPPED_PERSISTENT_PER_VHOST),
|
||||
|
@ -329,7 +330,7 @@ recover_segments(State0, ContainsCheckFun, StoreState0, CountersRef, [Segment|Ta
|
|||
%% File was either empty or the header was invalid.
|
||||
%% We cannot recover this file.
|
||||
_ ->
|
||||
rabbit_log:warning("Deleting invalid v2 segment file ~ts (file has invalid header)",
|
||||
?LOG_WARNING("Deleting invalid v2 segment file ~ts (file has invalid header)",
|
||||
[SegmentFile]),
|
||||
ok = file:close(Fd),
|
||||
_ = prim_file:delete(SegmentFile),
|
||||
|
@ -436,7 +437,7 @@ recover_segment(State, ContainsCheckFun, StoreState0, CountersRef, Fd,
|
|||
recover_index_v1_clean(State0 = #qi{ queue_name = Name }, Terms, IsMsgStoreClean,
|
||||
ContainsCheckFun, OnSyncFun, OnSyncMsgFun) ->
|
||||
#resource{virtual_host = VHost, name = QName} = Name,
|
||||
rabbit_log:info("Converting queue ~ts in vhost ~ts from v1 to v2 after clean shutdown", [QName, VHost]),
|
||||
?LOG_INFO("Converting queue ~ts in vhost ~ts from v1 to v2 after clean shutdown", [QName, VHost]),
|
||||
{_, _, V1State} = rabbit_queue_index:recover(Name, Terms, IsMsgStoreClean,
|
||||
ContainsCheckFun, OnSyncFun, OnSyncMsgFun,
|
||||
convert),
|
||||
|
@ -445,7 +446,7 @@ recover_index_v1_clean(State0 = #qi{ queue_name = Name }, Terms, IsMsgStoreClean
|
|||
%% share code with dirty recovery.
|
||||
CountersRef = counters:new(?RECOVER_COUNTER_SIZE, []),
|
||||
State = recover_index_v1_common(State0, V1State, CountersRef),
|
||||
rabbit_log:info("Queue ~ts in vhost ~ts converted ~b total messages from v1 to v2",
|
||||
?LOG_INFO("Queue ~ts in vhost ~ts converted ~b total messages from v1 to v2",
|
||||
[QName, VHost, counters:get(CountersRef, ?RECOVER_COUNT)]),
|
||||
State.
|
||||
|
||||
|
@ -453,7 +454,7 @@ recover_index_v1_dirty(State0 = #qi{ queue_name = Name }, Terms, IsMsgStoreClean
|
|||
ContainsCheckFun, OnSyncFun, OnSyncMsgFun,
|
||||
CountersRef) ->
|
||||
#resource{virtual_host = VHost, name = QName} = Name,
|
||||
rabbit_log:info("Converting queue ~ts in vhost ~ts from v1 to v2 after unclean shutdown", [QName, VHost]),
|
||||
?LOG_INFO("Converting queue ~ts in vhost ~ts from v1 to v2 after unclean shutdown", [QName, VHost]),
|
||||
%% We ignore the count and bytes returned here because we cannot trust
|
||||
%% rabbit_queue_index: it has a bug that may lead to more bytes being
|
||||
%% returned than it really has.
|
||||
|
@ -464,7 +465,7 @@ recover_index_v1_dirty(State0 = #qi{ queue_name = Name }, Terms, IsMsgStoreClean
|
|||
ContainsCheckFun, OnSyncFun, OnSyncMsgFun,
|
||||
convert),
|
||||
State = recover_index_v1_common(State0, V1State, CountersRef),
|
||||
rabbit_log:info("Queue ~ts in vhost ~ts converted ~b total messages from v1 to v2",
|
||||
?LOG_INFO("Queue ~ts in vhost ~ts converted ~b total messages from v1 to v2",
|
||||
[QName, VHost, counters:get(CountersRef, ?RECOVER_COUNT)]),
|
||||
State.
|
||||
|
||||
|
|
|
@ -56,6 +56,7 @@
|
|||
-define(ENTRY_HEADER_SIZE, 8). %% bytes
|
||||
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
%% Set to true to get an awful lot of debug logs.
|
||||
-if(false).
|
||||
|
@ -317,7 +318,7 @@ read_from_disk(SeqId, {?MODULE, Offset, Size}, State0) ->
|
|||
CRC32Expected = <<CRC32:16>>,
|
||||
ok
|
||||
catch C:E:S ->
|
||||
rabbit_log:error("Per-queue store CRC32 check failed in ~ts seq id ~b offset ~b size ~b",
|
||||
?LOG_ERROR("Per-queue store CRC32 check failed in ~ts seq id ~b offset ~b size ~b",
|
||||
[segment_file(Segment, State), SeqId, Offset, Size]),
|
||||
erlang:raise(C, E, S)
|
||||
end
|
||||
|
@ -415,7 +416,7 @@ parse_many_from_disk([<<Size:32/unsigned, _:7, UseCRC32:1, CRC32Expected:16/bits
|
|||
CRC32Expected = <<CRC32:16>>,
|
||||
ok
|
||||
catch C:E:S ->
|
||||
rabbit_log:error("Per-queue store CRC32 check failed in ~ts",
|
||||
?LOG_ERROR("Per-queue store CRC32 check failed in ~ts",
|
||||
[segment_file(Segment, State)]),
|
||||
erlang:raise(C, E, S)
|
||||
end
|
||||
|
|
|
@ -41,6 +41,7 @@
|
|||
count_local_tracked_items_of_user/1]).
|
||||
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-import(rabbit_misc, [pget/2]).
|
||||
|
||||
|
@ -189,17 +190,17 @@ ensure_tracked_tables_for_this_node() ->
|
|||
ensure_tracked_connections_table_for_this_node() ->
|
||||
_ = ets:new(?TRACKED_CONNECTION_TABLE, [named_table, public, {write_concurrency, true},
|
||||
{keypos, #tracked_connection.id}]),
|
||||
rabbit_log:info("Setting up a table for connection tracking on this node: ~tp",
|
||||
?LOG_INFO("Setting up a table for connection tracking on this node: ~tp",
|
||||
[?TRACKED_CONNECTION_TABLE]).
|
||||
|
||||
ensure_per_vhost_tracked_connections_table_for_this_node() ->
|
||||
rabbit_log:info("Setting up a table for per-vhost connection counting on this node: ~tp",
|
||||
?LOG_INFO("Setting up a table for per-vhost connection counting on this node: ~tp",
|
||||
[?TRACKED_CONNECTION_TABLE_PER_VHOST]),
|
||||
ets:new(?TRACKED_CONNECTION_TABLE_PER_VHOST, [named_table, public, {write_concurrency, true}]).
|
||||
|
||||
ensure_per_user_tracked_connections_table_for_this_node() ->
|
||||
_ = ets:new(?TRACKED_CONNECTION_TABLE_PER_USER, [named_table, public, {write_concurrency, true}]),
|
||||
rabbit_log:info("Setting up a table for per-user connection counting on this node: ~tp",
|
||||
?LOG_INFO("Setting up a table for per-user connection counting on this node: ~tp",
|
||||
[?TRACKED_CONNECTION_TABLE_PER_USER]).
|
||||
|
||||
-spec tracked_connection_table_name_for(node()) -> atom().
|
||||
|
@ -420,7 +421,7 @@ close_connection(#tracked_connection{pid = Pid, type = network}, Message) ->
|
|||
ok;
|
||||
_:Err ->
|
||||
%% ignore, don't terminate
|
||||
rabbit_log:warning("Could not close connection ~tp: ~tp", [Pid, Err]),
|
||||
?LOG_WARNING("Could not close connection ~tp: ~tp", [Pid, Err]),
|
||||
ok
|
||||
end;
|
||||
close_connection(#tracked_connection{pid = Pid, type = direct}, Message) ->
|
||||
|
|
|
@ -223,7 +223,7 @@ join(RemoteNode, NodeType)
|
|||
%% as RemoteNode thinks this node is already in the cluster.
|
||||
%% Attempt to leave the RemoteNode cluster, the discovery cluster,
|
||||
%% and simply retry the operation.
|
||||
rabbit_log:info("Mnesia: node ~tp thinks it's clustered "
|
||||
?LOG_INFO("Mnesia: node ~tp thinks it's clustered "
|
||||
"with node ~tp, but ~tp disagrees. ~tp will ask "
|
||||
"to leave the cluster and try again.",
|
||||
[RemoteNode, node(), node(), node()]),
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
-include("mirrored_supervisor.hrl").
|
||||
|
||||
-include("include/rabbit_khepri.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-export([
|
||||
create_tables/0,
|
||||
|
@ -96,7 +97,7 @@ create_or_update_in_mnesia(Group, Overall, Delegate, ChildSpec, Id) ->
|
|||
rabbit_mnesia:execute_mnesia_transaction(
|
||||
fun() ->
|
||||
ReadResult = mnesia:wread({?TABLE, {Group, Id}}),
|
||||
rabbit_log:debug("Mirrored supervisor: check_start table ~ts read for key ~tp returned ~tp",
|
||||
?LOG_DEBUG("Mirrored supervisor: check_start table ~ts read for key ~tp returned ~tp",
|
||||
[?TABLE, {Group, Id}, ReadResult]),
|
||||
case ReadResult of
|
||||
[] -> _ = write_in_mnesia(Group, Overall, ChildSpec, Id),
|
||||
|
@ -105,12 +106,12 @@ create_or_update_in_mnesia(Group, Overall, Delegate, ChildSpec, Id) ->
|
|||
mirroring_pid = Pid} = S,
|
||||
case Overall of
|
||||
Pid ->
|
||||
rabbit_log:debug("Mirrored supervisor: overall matched mirrored pid ~tp", [Pid]),
|
||||
?LOG_DEBUG("Mirrored supervisor: overall matched mirrored pid ~tp", [Pid]),
|
||||
Delegate;
|
||||
_ ->
|
||||
rabbit_log:debug("Mirrored supervisor: overall ~tp did not match mirrored pid ~tp", [Overall, Pid]),
|
||||
?LOG_DEBUG("Mirrored supervisor: overall ~tp did not match mirrored pid ~tp", [Overall, Pid]),
|
||||
Sup = mirrored_supervisor:supervisor(Pid),
|
||||
rabbit_log:debug("Mirrored supervisor: supervisor(~tp) returned ~tp", [Pid, Sup]),
|
||||
?LOG_DEBUG("Mirrored supervisor: supervisor(~tp) returned ~tp", [Pid, Sup]),
|
||||
case Sup of
|
||||
dead ->
|
||||
_ = write_in_mnesia(Group, Overall, ChildSpec, Id),
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
-include("amqqueue.hrl").
|
||||
|
||||
-include("include/rabbit_khepri.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-export([
|
||||
get/1,
|
||||
|
@ -341,7 +342,7 @@ count(VHostName) ->
|
|||
try
|
||||
list_for_count(VHostName)
|
||||
catch _:Err ->
|
||||
rabbit_log:error("Failed to fetch number of queues in vhost ~p:~n~p",
|
||||
?LOG_ERROR("Failed to fetch number of queues in vhost ~p:~n~p",
|
||||
[VHostName, Err]),
|
||||
0
|
||||
end.
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
|
||||
-include("include/rabbit_khepri.hrl").
|
||||
-include("vhost.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-export([create_or_get/3,
|
||||
merge_metadata/2,
|
||||
|
@ -102,7 +103,7 @@ create_or_get_in_mnesia_tx(VHostName, VHost) ->
|
|||
|
||||
create_or_get_in_khepri(VHostName, VHost) ->
|
||||
Path = khepri_vhost_path(VHostName),
|
||||
rabbit_log:debug("Inserting a virtual host record ~tp", [VHost]),
|
||||
?LOG_DEBUG("Inserting a virtual host record ~tp", [VHost]),
|
||||
case rabbit_khepri:create(Path, VHost) of
|
||||
ok ->
|
||||
{new, VHost};
|
||||
|
@ -137,7 +138,7 @@ merge_metadata(VHostName, Metadata)
|
|||
when is_binary(VHostName) andalso is_map(Metadata) ->
|
||||
case do_merge_metadata(VHostName, Metadata) of
|
||||
{ok, VHost} when ?is_vhost(VHost) ->
|
||||
rabbit_log:debug("Updated a virtual host record ~tp", [VHost]),
|
||||
?LOG_DEBUG("Updated a virtual host record ~tp", [VHost]),
|
||||
{ok, VHost};
|
||||
{error, _} = Error ->
|
||||
Error
|
||||
|
@ -169,7 +170,7 @@ merge_metadata_in_khepri(VHostName, Metadata) ->
|
|||
case Ret1 of
|
||||
{ok, #{Path := #{data := VHost0, payload_version := DVersion}}} ->
|
||||
VHost = vhost:merge_metadata(VHost0, Metadata),
|
||||
rabbit_log:debug("Updating a virtual host record ~p", [VHost]),
|
||||
?LOG_DEBUG("Updating a virtual host record ~p", [VHost]),
|
||||
Path1 = khepri_path:combine_with_conditions(
|
||||
Path, [#if_payload_version{version = DVersion}]),
|
||||
Ret2 = rabbit_khepri:put(Path1, VHost),
|
||||
|
@ -240,7 +241,7 @@ enable_protection_from_deletion(VHostName) ->
|
|||
MetadataPatch = #{
|
||||
protected_from_deletion => true
|
||||
},
|
||||
rabbit_log:info("Enabling deletion protection for virtual host '~ts'", [VHostName]),
|
||||
?LOG_INFO("Enabling deletion protection for virtual host '~ts'", [VHostName]),
|
||||
merge_metadata(VHostName, MetadataPatch).
|
||||
|
||||
-spec disable_protection_from_deletion(VHostName) -> Ret when
|
||||
|
@ -253,7 +254,7 @@ disable_protection_from_deletion(VHostName) ->
|
|||
MetadataPatch = #{
|
||||
protected_from_deletion => false
|
||||
},
|
||||
rabbit_log:info("Disabling deletion protection for virtual host '~ts'", [VHostName]),
|
||||
?LOG_INFO("Disabling deletion protection for virtual host '~ts'", [VHostName]),
|
||||
merge_metadata(VHostName, MetadataPatch).
|
||||
|
||||
%% -------------------------------------------------------------------
|
||||
|
|
|
@ -7,6 +7,9 @@
|
|||
|
||||
-module(rabbit_db_vhost_defaults).
|
||||
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
|
||||
-export([apply/2]).
|
||||
-export([list_limits/1, list_operator_policies/1, list_users/1]).
|
||||
|
||||
|
@ -36,20 +39,20 @@ apply(VHost, ActingUser) ->
|
|||
ok;
|
||||
L ->
|
||||
ok = rabbit_vhost_limit:set(VHost, L, ActingUser),
|
||||
rabbit_log:info("Applied default limits to vhost '~tp': ~tp", [VHost, L])
|
||||
?LOG_INFO("Applied default limits to vhost '~tp': ~tp", [VHost, L])
|
||||
end,
|
||||
lists:foreach(
|
||||
fun(P) ->
|
||||
ok = rabbit_policy:set_op(VHost, P#seeding_policy.name, P#seeding_policy.queue_pattern, P#seeding_policy.definition,
|
||||
undefined, undefined, ActingUser),
|
||||
rabbit_log:info("Applied default operator policy to vhost '~tp': ~tp", [VHost, P])
|
||||
?LOG_INFO("Applied default operator policy to vhost '~tp': ~tp", [VHost, P])
|
||||
end,
|
||||
list_operator_policies(VHost)
|
||||
),
|
||||
lists:foreach(
|
||||
fun(U) ->
|
||||
ok = add_user(VHost, U, ActingUser),
|
||||
rabbit_log:info("Added default user to vhost '~tp': ~tp", [VHost, maps:remove(password, U)])
|
||||
?LOG_INFO("Added default user to vhost '~tp': ~tp", [VHost, maps:remove(password, U)])
|
||||
end,
|
||||
list_users(VHost)
|
||||
),
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
detect_cycles/3]).
|
||||
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
%%----------------------------------------------------------------------------
|
||||
|
||||
|
@ -74,7 +75,7 @@ log_cycle_once(Cycle) ->
|
|||
true ->
|
||||
ok;
|
||||
undefined ->
|
||||
rabbit_log:warning(
|
||||
?LOG_WARNING(
|
||||
"Message dropped because the following list of queues (ordered by "
|
||||
"death recency) contains a dead letter cycle without reason 'rejected'. "
|
||||
"This list will not be logged again: ~tp",
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
%% * rabbit_definitions_hashing
|
||||
-module(rabbit_definitions).
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-export([boot/0]).
|
||||
%% automatic import on boot
|
||||
|
@ -177,7 +178,7 @@ validate_definitions(Body) when is_binary(Body) ->
|
|||
|
||||
-spec import_raw(Body :: binary() | iolist()) -> ok | {error, term()}.
|
||||
import_raw(Body) ->
|
||||
rabbit_log:info("Asked to import definitions. Acting user: ~ts", [?INTERNAL_USER]),
|
||||
?LOG_INFO("Asked to import definitions. Acting user: ~ts", [?INTERNAL_USER]),
|
||||
case decode([], Body) of
|
||||
{error, E} -> {error, E};
|
||||
{ok, _, Map} -> apply_defs(Map, ?INTERNAL_USER)
|
||||
|
@ -185,7 +186,7 @@ import_raw(Body) ->
|
|||
|
||||
-spec import_raw(Body :: binary() | iolist(), VHost :: vhost:name()) -> ok | {error, term()}.
|
||||
import_raw(Body, VHost) ->
|
||||
rabbit_log:info("Asked to import definitions. Acting user: ~ts", [?INTERNAL_USER]),
|
||||
?LOG_INFO("Asked to import definitions. Acting user: ~ts", [?INTERNAL_USER]),
|
||||
case decode([], Body) of
|
||||
{error, E} -> {error, E};
|
||||
{ok, _, Map} -> apply_defs(Map, ?INTERNAL_USER, fun() -> ok end, VHost)
|
||||
|
@ -195,7 +196,7 @@ import_raw(Body, VHost) ->
|
|||
import_parsed(Body0) when is_list(Body0) ->
|
||||
import_parsed(maps:from_list(Body0));
|
||||
import_parsed(Body0) when is_map(Body0) ->
|
||||
rabbit_log:info("Asked to import definitions. Acting user: ~ts", [?INTERNAL_USER]),
|
||||
?LOG_INFO("Asked to import definitions. Acting user: ~ts", [?INTERNAL_USER]),
|
||||
Body = atomise_map_keys(Body0),
|
||||
apply_defs(Body, ?INTERNAL_USER).
|
||||
|
||||
|
@ -203,7 +204,7 @@ import_parsed(Body0) when is_map(Body0) ->
|
|||
import_parsed(Body0, VHost) when is_list(Body0) ->
|
||||
import_parsed(maps:from_list(Body0), VHost);
|
||||
import_parsed(Body0, VHost) ->
|
||||
rabbit_log:info("Asked to import definitions. Acting user: ~ts", [?INTERNAL_USER]),
|
||||
?LOG_INFO("Asked to import definitions. Acting user: ~ts", [?INTERNAL_USER]),
|
||||
Body = atomise_map_keys(Body0),
|
||||
apply_defs(Body, ?INTERNAL_USER, fun() -> ok end, VHost).
|
||||
|
||||
|
@ -212,7 +213,7 @@ import_parsed(Body0, VHost) ->
|
|||
import_parsed_with_hashing(Body0) when is_list(Body0) ->
|
||||
import_parsed(maps:from_list(Body0));
|
||||
import_parsed_with_hashing(Body0) when is_map(Body0) ->
|
||||
rabbit_log:info("Asked to import definitions. Acting user: ~ts", [?INTERNAL_USER]),
|
||||
?LOG_INFO("Asked to import definitions. Acting user: ~ts", [?INTERNAL_USER]),
|
||||
case should_skip_if_unchanged() of
|
||||
false ->
|
||||
import_parsed(Body0);
|
||||
|
@ -222,10 +223,10 @@ import_parsed_with_hashing(Body0) when is_map(Body0) ->
|
|||
Algo = rabbit_definitions_hashing:hashing_algorithm(),
|
||||
case rabbit_definitions_hashing:hash(Algo, Body) of
|
||||
PreviousHash ->
|
||||
rabbit_log:info("Submitted definition content hash matches the stored one: ~ts", [binary:part(rabbit_misc:hexify(PreviousHash), 0, 12)]),
|
||||
?LOG_INFO("Submitted definition content hash matches the stored one: ~ts", [binary:part(rabbit_misc:hexify(PreviousHash), 0, 12)]),
|
||||
ok;
|
||||
Other ->
|
||||
rabbit_log:debug("Submitted definition content hash: ~ts, stored one: ~ts", [
|
||||
?LOG_DEBUG("Submitted definition content hash: ~ts, stored one: ~ts", [
|
||||
binary:part(rabbit_misc:hexify(PreviousHash), 0, 10),
|
||||
binary:part(rabbit_misc:hexify(Other), 0, 10)
|
||||
]),
|
||||
|
@ -239,7 +240,7 @@ import_parsed_with_hashing(Body0) when is_map(Body0) ->
|
|||
import_parsed_with_hashing(Body0, VHost) when is_list(Body0) ->
|
||||
import_parsed(maps:from_list(Body0), VHost);
|
||||
import_parsed_with_hashing(Body0, VHost) ->
|
||||
rabbit_log:info("Asked to import definitions for virtual host '~ts'. Acting user: ~ts", [?INTERNAL_USER, VHost]),
|
||||
?LOG_INFO("Asked to import definitions for virtual host '~ts'. Acting user: ~ts", [?INTERNAL_USER, VHost]),
|
||||
|
||||
case should_skip_if_unchanged() of
|
||||
false ->
|
||||
|
@ -250,10 +251,10 @@ import_parsed_with_hashing(Body0, VHost) ->
|
|||
Algo = rabbit_definitions_hashing:hashing_algorithm(),
|
||||
case rabbit_definitions_hashing:hash(Algo, Body) of
|
||||
PreviousHash ->
|
||||
rabbit_log:info("Submitted definition content hash matches the stored one: ~ts", [binary:part(rabbit_misc:hexify(PreviousHash), 0, 12)]),
|
||||
?LOG_INFO("Submitted definition content hash matches the stored one: ~ts", [binary:part(rabbit_misc:hexify(PreviousHash), 0, 12)]),
|
||||
ok;
|
||||
Other ->
|
||||
rabbit_log:debug("Submitted definition content hash: ~ts, stored one: ~ts", [
|
||||
?LOG_DEBUG("Submitted definition content hash: ~ts, stored one: ~ts", [
|
||||
binary:part(rabbit_misc:hexify(PreviousHash), 0, 10),
|
||||
binary:part(rabbit_misc:hexify(Other), 0, 10)
|
||||
]),
|
||||
|
@ -340,14 +341,14 @@ maybe_load_definitions_from_local_filesystem(App, Key) ->
|
|||
undefined -> ok;
|
||||
{ok, none} -> ok;
|
||||
{ok, Path} ->
|
||||
rabbit_log:debug("~ts.~ts is set to '~ts', will discover definition file(s) to import", [App, Key, Path]),
|
||||
?LOG_DEBUG("~ts.~ts is set to '~ts', will discover definition file(s) to import", [App, Key, Path]),
|
||||
IsDir = filelib:is_dir(Path),
|
||||
Mod = rabbit_definitions_import_local_filesystem,
|
||||
rabbit_log:debug("Will use module ~ts to import definitions", [Mod]),
|
||||
?LOG_DEBUG("Will use module ~ts to import definitions", [Mod]),
|
||||
|
||||
case should_skip_if_unchanged() of
|
||||
false ->
|
||||
rabbit_log:debug("Will re-import definitions even if they have not changed"),
|
||||
?LOG_DEBUG("Will re-import definitions even if they have not changed"),
|
||||
Mod:load(IsDir, Path);
|
||||
true ->
|
||||
maybe_load_definitions_from_local_filesystem_if_unchanged(Mod, IsDir, Path)
|
||||
|
@ -356,16 +357,16 @@ maybe_load_definitions_from_local_filesystem(App, Key) ->
|
|||
|
||||
maybe_load_definitions_from_local_filesystem_if_unchanged(Mod, IsDir, Path) ->
|
||||
Algo = rabbit_definitions_hashing:hashing_algorithm(),
|
||||
rabbit_log:debug("Will import definitions only if definition file/directory has changed, hashing algo: ~ts", [Algo]),
|
||||
?LOG_DEBUG("Will import definitions only if definition file/directory has changed, hashing algo: ~ts", [Algo]),
|
||||
CurrentHash = rabbit_definitions_hashing:stored_global_hash(),
|
||||
rabbit_log:debug("Previously stored hash value of imported definitions: ~ts...", [binary:part(rabbit_misc:hexify(CurrentHash), 0, 12)]),
|
||||
?LOG_DEBUG("Previously stored hash value of imported definitions: ~ts...", [binary:part(rabbit_misc:hexify(CurrentHash), 0, 12)]),
|
||||
case Mod:load_with_hashing(IsDir, Path, CurrentHash, Algo) of
|
||||
{error, Err} ->
|
||||
{error, Err};
|
||||
CurrentHash ->
|
||||
rabbit_log:info("Hash value of imported definitions matches current contents");
|
||||
?LOG_INFO("Hash value of imported definitions matches current contents");
|
||||
UpdatedHash ->
|
||||
rabbit_log:debug("Hash value of imported definitions has changed to ~ts", [binary:part(rabbit_misc:hexify(UpdatedHash), 0, 12)]),
|
||||
?LOG_DEBUG("Hash value of imported definitions has changed to ~ts", [binary:part(rabbit_misc:hexify(UpdatedHash), 0, 12)]),
|
||||
rabbit_definitions_hashing:store_global_hash(UpdatedHash)
|
||||
end.
|
||||
|
||||
|
@ -387,20 +388,20 @@ maybe_load_definitions_from_pluggable_source(App, Key) ->
|
|||
maybe_load_definitions_from_pluggable_source_if_unchanged(Mod, Proplist) ->
|
||||
case should_skip_if_unchanged() of
|
||||
false ->
|
||||
rabbit_log:debug("Will use module ~ts to import definitions", [Mod]),
|
||||
?LOG_DEBUG("Will use module ~ts to import definitions", [Mod]),
|
||||
Mod:load(Proplist);
|
||||
true ->
|
||||
rabbit_log:debug("Will use module ~ts to import definitions (if definition file/directory/source has changed)", [Mod]),
|
||||
?LOG_DEBUG("Will use module ~ts to import definitions (if definition file/directory/source has changed)", [Mod]),
|
||||
CurrentHash = rabbit_definitions_hashing:stored_global_hash(),
|
||||
rabbit_log:debug("Previously stored hash value of imported definitions: ~ts...", [binary:part(rabbit_misc:hexify(CurrentHash), 0, 12)]),
|
||||
?LOG_DEBUG("Previously stored hash value of imported definitions: ~ts...", [binary:part(rabbit_misc:hexify(CurrentHash), 0, 12)]),
|
||||
Algo = rabbit_definitions_hashing:hashing_algorithm(),
|
||||
case Mod:load_with_hashing(Proplist, CurrentHash, Algo) of
|
||||
{error, Err} ->
|
||||
{error, Err};
|
||||
CurrentHash ->
|
||||
rabbit_log:info("Hash value of imported definitions matches current contents");
|
||||
?LOG_INFO("Hash value of imported definitions matches current contents");
|
||||
UpdatedHash ->
|
||||
rabbit_log:debug("Hash value of imported definitions has changed to ~ts...", [binary:part(rabbit_misc:hexify(CurrentHash), 0, 12)]),
|
||||
?LOG_DEBUG("Hash value of imported definitions has changed to ~ts...", [binary:part(rabbit_misc:hexify(CurrentHash), 0, 12)]),
|
||||
rabbit_definitions_hashing:store_global_hash(UpdatedHash)
|
||||
end
|
||||
end.
|
||||
|
@ -467,7 +468,7 @@ should_skip_if_unchanged() ->
|
|||
OptedIn andalso ReachedTargetClusterSize.
|
||||
|
||||
log_an_error_about_orphaned_objects() ->
|
||||
rabbit_log:error("Definitions import: some queues, exchanges or bindings in the definition file "
|
||||
?LOG_ERROR("Definitions import: some queues, exchanges or bindings in the definition file "
|
||||
"are missing the virtual host field. Such files are produced when definitions of "
|
||||
"a single virtual host are exported. They cannot be used to import definitions at boot time").
|
||||
|
||||
|
@ -524,7 +525,7 @@ apply_defs(Map, ActingUser, SuccessFun) when is_function(SuccessFun) ->
|
|||
end,
|
||||
|
||||
fun() ->
|
||||
rabbit_log:info("There are fewer than target cluster size (~b) nodes online,"
|
||||
?LOG_INFO("There are fewer than target cluster size (~b) nodes online,"
|
||||
" skipping queue and binding import from definitions",
|
||||
[rabbit_nodes:target_cluster_size_hint()])
|
||||
end
|
||||
|
@ -544,7 +545,7 @@ apply_defs(Map, ActingUser, SuccessFun) when is_function(SuccessFun) ->
|
|||
VHost :: vhost:name()) -> 'ok' | {error, term()}.
|
||||
|
||||
apply_defs(Map, ActingUser, SuccessFun, VHost) when is_function(SuccessFun); is_binary(VHost) ->
|
||||
rabbit_log:info("Asked to import definitions for a virtual host. Virtual host: ~tp, acting user: ~tp",
|
||||
?LOG_INFO("Asked to import definitions for a virtual host. Virtual host: ~tp, acting user: ~tp",
|
||||
[VHost, ActingUser]),
|
||||
try
|
||||
validate_limits(Map, VHost),
|
||||
|
@ -562,7 +563,7 @@ apply_defs(Map, ActingUser, SuccessFun, VHost) when is_function(SuccessFun); is_
|
|||
end,
|
||||
|
||||
fun() ->
|
||||
rabbit_log:info("There are fewer than target cluster size (~b) nodes online,"
|
||||
?LOG_INFO("There are fewer than target cluster size (~b) nodes online,"
|
||||
" skipping queue and binding import from definitions",
|
||||
[rabbit_nodes:target_cluster_size_hint()])
|
||||
end
|
||||
|
@ -589,7 +590,7 @@ sequential_for_all0(Category, ActingUser, Definitions, Fun) ->
|
|||
List ->
|
||||
case length(List) of
|
||||
0 -> ok;
|
||||
N -> rabbit_log:info("Importing sequentially ~tp ~ts...", [N, human_readable_category_name(Category)])
|
||||
N -> ?LOG_INFO("Importing sequentially ~tp ~ts...", [N, human_readable_category_name(Category)])
|
||||
end,
|
||||
[begin
|
||||
%% keys are expected to be atoms
|
||||
|
@ -626,7 +627,7 @@ concurrent_for_all0(Category, ActingUser, Definitions, Fun) ->
|
|||
List ->
|
||||
case length(List) of
|
||||
0 -> ok;
|
||||
N -> rabbit_log:info("Importing concurrently ~tp ~ts...", [N, human_readable_category_name(Category)])
|
||||
N -> ?LOG_INFO("Importing concurrently ~tp ~ts...", [N, human_readable_category_name(Category)])
|
||||
end,
|
||||
WorkPoolFun = fun(M) ->
|
||||
Fun(atomize_keys(M), ActingUser)
|
||||
|
@ -664,7 +665,7 @@ do_concurrent_for_all(List, WorkPoolFun) ->
|
|||
WorkPoolFun(M)
|
||||
catch {error, E} -> gatherer:in(Gatherer, {error, E});
|
||||
_:E:Stacktrace ->
|
||||
rabbit_log:debug("Definition import: a work pool operation has thrown an exception ~st, stacktrace: ~p",
|
||||
?LOG_DEBUG("Definition import: a work pool operation has thrown an exception ~st, stacktrace: ~p",
|
||||
[E, Stacktrace]),
|
||||
gatherer:in(Gatherer, {error, E})
|
||||
end,
|
||||
|
@ -706,7 +707,7 @@ format({no_such_vhost, VHost}) ->
|
|||
format({vhost_limit_exceeded, ErrMsg}) ->
|
||||
rabbit_data_coercion:to_binary(ErrMsg);
|
||||
format({shutdown, _} = Error) ->
|
||||
rabbit_log:debug("Metadata store is unavailable: ~p", [Error]),
|
||||
?LOG_DEBUG("Metadata store is unavailable: ~p", [Error]),
|
||||
rabbit_data_coercion:to_binary(
|
||||
rabbit_misc:format("Metadata store is unavailable. Please try again.", []));
|
||||
format(E) ->
|
||||
|
@ -825,11 +826,11 @@ add_queue(VHost, Queue, ActingUser) ->
|
|||
add_queue_int(_Queue, R = #resource{kind = queue,
|
||||
name = <<"amq.", _/binary>>}, ActingUser) ->
|
||||
Name = R#resource.name,
|
||||
rabbit_log:warning("Skipping import of a queue whose name begins with 'amq.', "
|
||||
?LOG_WARNING("Skipping import of a queue whose name begins with 'amq.', "
|
||||
"name: ~ts, acting user: ~ts", [Name, ActingUser]);
|
||||
add_queue_int(_Queue, R = #resource{kind = queue, virtual_host = undefined}, ActingUser) ->
|
||||
Name = R#resource.name,
|
||||
rabbit_log:warning("Skipping import of a queue with an unset virtual host field, "
|
||||
?LOG_WARNING("Skipping import of a queue with an unset virtual host field, "
|
||||
"name: ~ts, acting user: ~ts", [Name, ActingUser]);
|
||||
add_queue_int(Queue, Name = #resource{virtual_host = VHostName}, ActingUser) ->
|
||||
case rabbit_amqqueue:exists(Name) of
|
||||
|
@ -862,11 +863,11 @@ add_exchange(VHost, Exchange, ActingUser) ->
|
|||
add_exchange_int(Exchange, rv(VHost, exchange, Exchange), ActingUser).
|
||||
|
||||
add_exchange_int(_Exchange, #resource{kind = exchange, name = <<"">>}, ActingUser) ->
|
||||
rabbit_log:warning("Not importing the default exchange, acting user: ~ts", [ActingUser]);
|
||||
?LOG_WARNING("Not importing the default exchange, acting user: ~ts", [ActingUser]);
|
||||
add_exchange_int(_Exchange, R = #resource{kind = exchange,
|
||||
name = <<"amq.", _/binary>>}, ActingUser) ->
|
||||
Name = R#resource.name,
|
||||
rabbit_log:warning("Skipping import of an exchange whose name begins with 'amq.', "
|
||||
?LOG_WARNING("Skipping import of an exchange whose name begins with 'amq.', "
|
||||
"name: ~ts, acting user: ~ts", [Name, ActingUser]);
|
||||
add_exchange_int(Exchange, Name, ActingUser) ->
|
||||
case rabbit_exchange:exists(Name) of
|
||||
|
@ -934,7 +935,7 @@ validate_limits(All) ->
|
|||
undefined -> ok;
|
||||
Queues0 ->
|
||||
{ok, VHostMap} = filter_out_existing_queues(Queues0),
|
||||
_ = rabbit_log:debug("Definition import. Virtual host map for validation: ~p", [VHostMap]),
|
||||
_ = ?LOG_DEBUG("Definition import. Virtual host map for validation: ~p", [VHostMap]),
|
||||
maps:fold(fun validate_vhost_limit/3, ok, VHostMap)
|
||||
end.
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
-behaviour(rabbit_runtime_parameter).
|
||||
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-import(rabbit_misc, [pget/2, pget/3]).
|
||||
|
||||
|
@ -109,7 +110,7 @@ stored_vhost_specific_hash(VHostName) ->
|
|||
|
||||
-spec store_global_hash(Value :: term()) -> ok.
|
||||
store_global_hash(Value) ->
|
||||
rabbit_log:debug("Storing global imported definitions content hash, hex value: ~ts", [rabbit_misc:hexify(Value)]),
|
||||
?LOG_DEBUG("Storing global imported definitions content hash, hex value: ~ts", [rabbit_misc:hexify(Value)]),
|
||||
store_global_hash(Value, ?INTERNAL_USER).
|
||||
|
||||
-spec store_global_hash(Value0 :: term(), Username :: rabbit_types:username()) -> ok.
|
||||
|
|
|
@ -14,6 +14,9 @@
|
|||
%% * rabbit_definitions_import_local_filesystem
|
||||
%% * rabbit_definitions_hashing
|
||||
-module(rabbit_definitions_import_https).
|
||||
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-export([
|
||||
is_enabled/0,
|
||||
load/1,
|
||||
|
@ -47,8 +50,8 @@ is_enabled() ->
|
|||
-spec load(Proplist :: list() | map()) -> ok | {error, term()}.
|
||||
load(Proplist) ->
|
||||
URL = pget(url, Proplist),
|
||||
rabbit_log:info("Applying definitions from a remote URL"),
|
||||
rabbit_log:debug("HTTPS URL: ~ts", [URL]),
|
||||
?LOG_INFO("Applying definitions from a remote URL"),
|
||||
?LOG_DEBUG("HTTPS URL: ~ts", [URL]),
|
||||
TLSOptions0 = tls_options_or_default(Proplist),
|
||||
TLSOptions = rabbit_ssl:wrap_password_opt(TLSOptions0),
|
||||
HTTPOptions = http_options(TLSOptions),
|
||||
|
@ -57,8 +60,8 @@ load(Proplist) ->
|
|||
-spec load_with_hashing(Proplist :: list() | map(), PreviousHash :: binary() | 'undefined', Algo :: crypto:sha1() | crypto:sha2()) -> binary() | 'undefined'.
|
||||
load_with_hashing(Proplist, PreviousHash, Algo) ->
|
||||
URL = pget(url, Proplist),
|
||||
rabbit_log:info("Applying definitions from a remote URL"),
|
||||
rabbit_log:debug("Loading definitions with content hashing enabled, HTTPS URL: ~ts, previous hash value: ~ts",
|
||||
?LOG_INFO("Applying definitions from a remote URL"),
|
||||
?LOG_DEBUG("Loading definitions with content hashing enabled, HTTPS URL: ~ts, previous hash value: ~ts",
|
||||
[URL, rabbit_misc:hexify(PreviousHash)]),
|
||||
|
||||
TLSOptions = tls_options_or_default(Proplist),
|
||||
|
@ -67,20 +70,20 @@ load_with_hashing(Proplist, PreviousHash, Algo) ->
|
|||
case httpc_get(URL, HTTPOptions) of
|
||||
%% 2XX
|
||||
{ok, {{_, Code, _}, _Headers, Body}} when Code div 100 == 2 ->
|
||||
rabbit_log:debug("Requested definitions from remote URL '~ts', response code: ~b", [URL, Code]),
|
||||
rabbit_log:debug("Requested definitions from remote URL '~ts', body: ~tp", [URL, Body]),
|
||||
?LOG_DEBUG("Requested definitions from remote URL '~ts', response code: ~b", [URL, Code]),
|
||||
?LOG_DEBUG("Requested definitions from remote URL '~ts', body: ~tp", [URL, Body]),
|
||||
case rabbit_definitions_hashing:hash(Algo, Body) of
|
||||
PreviousHash -> PreviousHash;
|
||||
Other ->
|
||||
rabbit_log:debug("New hash: ~ts", [rabbit_misc:hexify(Other)]),
|
||||
?LOG_DEBUG("New hash: ~ts", [rabbit_misc:hexify(Other)]),
|
||||
_ = import_raw(Body),
|
||||
Other
|
||||
end;
|
||||
{ok, {{_, Code, _}, _Headers, _Body}} when Code >= 400 ->
|
||||
rabbit_log:debug("Requested definitions from remote URL '~ts', response code: ~b", [URL, Code]),
|
||||
?LOG_DEBUG("Requested definitions from remote URL '~ts', response code: ~b", [URL, Code]),
|
||||
{error, {could_not_read_defs, {URL, rabbit_misc:format("URL request failed with response code ~b", [Code])}}};
|
||||
{error, Reason} ->
|
||||
rabbit_log:error("Requested definitions from remote URL '~ts', error: ~tp", [URL, Reason]),
|
||||
?LOG_ERROR("Requested definitions from remote URL '~ts', error: ~tp", [URL, Reason]),
|
||||
{error, {could_not_read_defs, {URL, Reason}}}
|
||||
end.
|
||||
|
||||
|
@ -93,14 +96,14 @@ load_from_url(URL, HTTPOptions0) ->
|
|||
case httpc_get(URL, HTTPOptions0) of
|
||||
%% 2XX
|
||||
{ok, {{_, Code, _}, _Headers, Body}} when Code div 100 == 2 ->
|
||||
rabbit_log:debug("Requested definitions from remote URL '~ts', response code: ~b", [URL, Code]),
|
||||
rabbit_log:debug("Requested definitions from remote URL '~ts', body: ~tp", [URL, Body]),
|
||||
?LOG_DEBUG("Requested definitions from remote URL '~ts', response code: ~b", [URL, Code]),
|
||||
?LOG_DEBUG("Requested definitions from remote URL '~ts', body: ~tp", [URL, Body]),
|
||||
import_raw(Body);
|
||||
{ok, {{_, Code, _}, _Headers, _Body}} when Code >= 400 ->
|
||||
rabbit_log:debug("Requested definitions from remote URL '~ts', response code: ~b", [URL, Code]),
|
||||
?LOG_DEBUG("Requested definitions from remote URL '~ts', response code: ~b", [URL, Code]),
|
||||
{error, {could_not_read_defs, {URL, rabbit_misc:format("URL request failed with response code ~b", [Code])}}};
|
||||
{error, Reason} ->
|
||||
rabbit_log:error("Requested definitions from remote URL '~ts', error: ~tp", [URL, Reason]),
|
||||
?LOG_ERROR("Requested definitions from remote URL '~ts', error: ~tp", [URL, Reason]),
|
||||
{error, {could_not_read_defs, {URL, Reason}}}
|
||||
end.
|
||||
|
||||
|
|
|
@ -15,6 +15,9 @@
|
|||
%% * rabbit_definitions_import_http
|
||||
%% * rabbit_definitions_hashing
|
||||
-module(rabbit_definitions_import_local_filesystem).
|
||||
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-export([
|
||||
is_enabled/0,
|
||||
%% definition source options
|
||||
|
@ -48,7 +51,7 @@ load(Proplist) when is_list(Proplist) ->
|
|||
case pget(local_path, Proplist, undefined) of
|
||||
undefined -> {error, "local definition file path is not configured: local_path is not set"};
|
||||
Path ->
|
||||
rabbit_log:debug("Asked to import definitions from a local file or directory at '~ts'", [Path]),
|
||||
?LOG_DEBUG("Asked to import definitions from a local file or directory at '~ts'", [Path]),
|
||||
IsDir = filelib:is_dir(Path),
|
||||
case IsDir of
|
||||
true ->
|
||||
|
@ -75,7 +78,7 @@ load_with_hashing(Proplist, PreviousHash, Algo) ->
|
|||
|
||||
-spec load_with_hashing(IsDir :: boolean(), Path :: file:name_all(), PreviousHash :: binary() | 'undefined', Algo :: crypto:sha1() | crypto:sha2()) -> binary() | 'undefined'.
|
||||
load_with_hashing(IsDir, Path, PreviousHash, Algo) when is_boolean(IsDir) ->
|
||||
rabbit_log:debug("Loading definitions with content hashing enabled, path: ~ts, is directory?: ~tp, previous hash value: ~ts",
|
||||
?LOG_DEBUG("Loading definitions with content hashing enabled, path: ~ts, is directory?: ~tp, previous hash value: ~ts",
|
||||
[Path, IsDir, rabbit_misc:hexify(PreviousHash)]),
|
||||
case compiled_definitions_from_local_path(IsDir, Path) of
|
||||
%% the directory is empty or no files could be read
|
||||
|
@ -87,12 +90,12 @@ load_with_hashing(IsDir, Path, PreviousHash, Algo) when is_boolean(IsDir) ->
|
|||
case rabbit_definitions_hashing:hash(Algo, Defs) of
|
||||
PreviousHash -> PreviousHash;
|
||||
Other ->
|
||||
rabbit_log:debug("New hash: ~ts", [rabbit_misc:hexify(Other)]),
|
||||
?LOG_DEBUG("New hash: ~ts", [rabbit_misc:hexify(Other)]),
|
||||
_ = load_from_local_path(IsDir, Path),
|
||||
Other
|
||||
end;
|
||||
false ->
|
||||
rabbit_log:error("Definitions file at path ~p failed validation. The file must be a valid JSON document "
|
||||
?LOG_ERROR("Definitions file at path ~p failed validation. The file must be a valid JSON document "
|
||||
"and all virtual host-scoped resources must have a virtual host field to be set. "
|
||||
"Definition files exported for a single virtual host CANNOT be imported at boot time", [Path]),
|
||||
{error, not_json}
|
||||
|
@ -107,10 +110,10 @@ location() ->
|
|||
|
||||
-spec load_from_local_path(IsDir :: boolean(), Path :: file:name_all()) -> ok | {error, term()}.
|
||||
load_from_local_path(true, Dir) ->
|
||||
rabbit_log:info("Applying definitions from directory ~ts", [Dir]),
|
||||
?LOG_INFO("Applying definitions from directory ~ts", [Dir]),
|
||||
load_from_files(file:list_dir(Dir), Dir);
|
||||
load_from_local_path(false, File) ->
|
||||
rabbit_log:info("Applying definitions from regular file at ~ts", [File]),
|
||||
?LOG_INFO("Applying definitions from regular file at ~ts", [File]),
|
||||
load_from_single_file(File).
|
||||
|
||||
%%
|
||||
|
@ -169,7 +172,7 @@ compiled_definitions_from_local_path(true = _IsDir, Dir) ->
|
|||
end, ReadResults),
|
||||
[Body || {ok, Body} <- Successes];
|
||||
{error, E} ->
|
||||
rabbit_log:error("Could not list files in '~ts', error: ~tp", [Dir, E]),
|
||||
?LOG_ERROR("Could not list files in '~ts', error: ~tp", [Dir, E]),
|
||||
{error, {could_not_read_defs, {Dir, E}}}
|
||||
end;
|
||||
compiled_definitions_from_local_path(false = _IsDir, Path) ->
|
||||
|
@ -184,7 +187,7 @@ read_file_contents(Path) ->
|
|||
{ok, Body} ->
|
||||
Body;
|
||||
{error, E} ->
|
||||
rabbit_log:error("Could not read definitions from file at '~ts', error: ~tp", [Path, E]),
|
||||
?LOG_ERROR("Could not read definitions from file at '~ts', error: ~tp", [Path, E]),
|
||||
{error, {could_not_read_defs, {Path, E}}}
|
||||
end.
|
||||
|
||||
|
@ -193,7 +196,7 @@ load_from_files({ok, Filenames0}, Dir) ->
|
|||
Filenames2 = [filename:join(Dir, F) || F <- Filenames1],
|
||||
load_from_multiple_files(Filenames2);
|
||||
load_from_files({error, E}, Dir) ->
|
||||
rabbit_log:error("Could not read definitions from directory ~ts, Error: ~tp", [Dir, E]),
|
||||
?LOG_ERROR("Could not read definitions from directory ~ts, Error: ~tp", [Dir, E]),
|
||||
{error, {could_not_read_defs, E}}.
|
||||
|
||||
load_from_multiple_files([]) ->
|
||||
|
@ -205,7 +208,7 @@ load_from_multiple_files([File|Rest]) ->
|
|||
end.
|
||||
|
||||
load_from_single_file(Path) ->
|
||||
rabbit_log:debug("Will try to load definitions from a local file or directory at '~ts'", [Path]),
|
||||
?LOG_DEBUG("Will try to load definitions from a local file or directory at '~ts'", [Path]),
|
||||
|
||||
case file:read_file_info(Path, [raw]) of
|
||||
{ok, FileInfo} ->
|
||||
|
@ -215,10 +218,10 @@ load_from_single_file(Path) ->
|
|||
true ->
|
||||
case rabbit_misc:raw_read_file(Path) of
|
||||
{ok, Body} ->
|
||||
rabbit_log:info("Applying definitions from file at '~ts'", [Path]),
|
||||
?LOG_INFO("Applying definitions from file at '~ts'", [Path]),
|
||||
import_raw(Body);
|
||||
{error, E} ->
|
||||
rabbit_log:error("Could not read definitions from file at '~ts', error: ~tp", [Path, E]),
|
||||
?LOG_ERROR("Could not read definitions from file at '~ts', error: ~tp", [Path, E]),
|
||||
{error, {could_not_read_defs, {Path, E}}}
|
||||
end;
|
||||
false ->
|
||||
|
|
|
@ -7,6 +7,9 @@
|
|||
|
||||
-module(rabbit_disk_monitor).
|
||||
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
|
||||
%% Disk monitoring server. Monitors free disk space
|
||||
%% periodically and sets alarms when it is below a certain
|
||||
%% watermark (configurable either as an absolute value or
|
||||
|
@ -145,7 +148,7 @@ init([Limit]) ->
|
|||
{ok, State4}.
|
||||
|
||||
handle_call({set_disk_free_limit, _}, _From, #state{enabled = false} = State) ->
|
||||
rabbit_log:info("Cannot set disk free limit: "
|
||||
?LOG_INFO("Cannot set disk free limit: "
|
||||
"disabled disk free space monitoring", []),
|
||||
{reply, ok, State};
|
||||
|
||||
|
@ -163,22 +166,22 @@ handle_call({set_max_check_interval, MaxInterval}, _From, State) ->
|
|||
|
||||
handle_call({set_enabled, _Enabled = true}, _From, State = #state{enabled = true}) ->
|
||||
_ = start_timer(set_disk_limits(State, State#state.limit)),
|
||||
rabbit_log:info("Free disk space monitor was already enabled"),
|
||||
?LOG_INFO("Free disk space monitor was already enabled"),
|
||||
{reply, ok, State#state{enabled = true}};
|
||||
|
||||
handle_call({set_enabled, _Enabled = true}, _From, State = #state{enabled = false}) ->
|
||||
_ = start_timer(set_disk_limits(State, State#state.limit)),
|
||||
rabbit_log:info("Free disk space monitor was manually enabled"),
|
||||
?LOG_INFO("Free disk space monitor was manually enabled"),
|
||||
{reply, ok, State#state{enabled = true}};
|
||||
|
||||
handle_call({set_enabled, _Enabled = false}, _From, State = #state{enabled = true}) ->
|
||||
_ = erlang:cancel_timer(State#state.timer),
|
||||
rabbit_log:info("Free disk space monitor was manually disabled"),
|
||||
?LOG_INFO("Free disk space monitor was manually disabled"),
|
||||
{reply, ok, State#state{enabled = false}};
|
||||
|
||||
handle_call({set_enabled, _Enabled = false}, _From, State = #state{enabled = false}) ->
|
||||
_ = erlang:cancel_timer(State#state.timer),
|
||||
rabbit_log:info("Free disk space monitor was already disabled"),
|
||||
?LOG_INFO("Free disk space monitor was already disabled"),
|
||||
{reply, ok, State#state{enabled = false}};
|
||||
|
||||
handle_call(_Request, _From, State) ->
|
||||
|
@ -194,7 +197,7 @@ handle_info(update, State) ->
|
|||
{noreply, start_timer(internal_update(State))};
|
||||
|
||||
handle_info(Info, State) ->
|
||||
rabbit_log:debug("~tp unhandled msg: ~tp", [?MODULE, Info]),
|
||||
?LOG_DEBUG("~tp unhandled msg: ~tp", [?MODULE, Info]),
|
||||
{noreply, State}.
|
||||
|
||||
terminate(_Reason, _State) ->
|
||||
|
@ -271,7 +274,7 @@ set_max_check_interval(MaxInterval, State) ->
|
|||
set_disk_limits(State, Limit0) ->
|
||||
Limit = interpret_limit(Limit0),
|
||||
State1 = State#state { limit = Limit },
|
||||
rabbit_log:info("Disk free limit set to ~bMB",
|
||||
?LOG_INFO("Disk free limit set to ~bMB",
|
||||
[trunc(Limit / 1000000)]),
|
||||
ets:insert(?ETS_NAME, {disk_free_limit, Limit}),
|
||||
internal_update(State1).
|
||||
|
@ -309,7 +312,7 @@ get_disk_free(Dir, {win32, _}, not_used) ->
|
|||
% "c:/Users/username/AppData/Roaming/RabbitMQ/db/rabbit2@username-z01-mnesia"
|
||||
case win32_get_drive_letter(Dir) of
|
||||
error ->
|
||||
rabbit_log:warning("Expected the mnesia directory absolute "
|
||||
?LOG_WARNING("Expected the mnesia directory absolute "
|
||||
"path to start with a drive letter like "
|
||||
"'C:'. The path is: '~tp'", [Dir]),
|
||||
{ok, Free} = win32_get_disk_free_dir(Dir),
|
||||
|
@ -340,7 +343,7 @@ get_disk_free(Dir, {win32, _}, not_used) ->
|
|||
%% could not compute the result
|
||||
'NaN';
|
||||
_:Reason:_ ->
|
||||
rabbit_log:warning("Free disk space monitoring failed to retrieve the amount of available space: ~p", [Reason]),
|
||||
?LOG_WARNING("Free disk space monitoring failed to retrieve the amount of available space: ~p", [Reason]),
|
||||
%% could not compute the result
|
||||
'NaN'
|
||||
end
|
||||
|
@ -405,13 +408,13 @@ interpret_limit(Absolute) ->
|
|||
case rabbit_resource_monitor_misc:parse_information_unit(Absolute) of
|
||||
{ok, ParsedAbsolute} -> ParsedAbsolute;
|
||||
{error, parse_error} ->
|
||||
rabbit_log:error("Unable to parse disk_free_limit value ~tp",
|
||||
?LOG_ERROR("Unable to parse disk_free_limit value ~tp",
|
||||
[Absolute]),
|
||||
?DEFAULT_DISK_FREE_LIMIT
|
||||
end.
|
||||
|
||||
emit_update_info(StateStr, CurrentFree, Limit) ->
|
||||
rabbit_log:info(
|
||||
?LOG_INFO(
|
||||
"Free disk space is ~ts. Free bytes: ~b. Limit: ~b",
|
||||
[StateStr, CurrentFree, Limit]).
|
||||
|
||||
|
@ -432,7 +435,7 @@ interval(#state{limit = Limit,
|
|||
trunc(erlang:max(MinInterval, erlang:min(MaxInterval, IdealInterval))).
|
||||
|
||||
enable(#state{retries = 0} = State) ->
|
||||
rabbit_log:error("Free disk space monitor failed to start!"),
|
||||
?LOG_ERROR("Free disk space monitor failed to start!"),
|
||||
State;
|
||||
enable(#state{dir = Dir, os = OS, port = Port} = State) ->
|
||||
enable_handle_disk_free(catch get_disk_free(Dir, OS, Port), State).
|
||||
|
@ -440,7 +443,7 @@ enable(#state{dir = Dir, os = OS, port = Port} = State) ->
|
|||
enable_handle_disk_free(DiskFree, State) when is_integer(DiskFree) ->
|
||||
enable_handle_total_memory(catch vm_memory_monitor:get_total_memory(), DiskFree, State);
|
||||
enable_handle_disk_free(Error, #state{interval = Interval, retries = Retries} = State) ->
|
||||
rabbit_log:warning("Free disk space monitor encountered an error "
|
||||
?LOG_WARNING("Free disk space monitor encountered an error "
|
||||
"(e.g. failed to parse output from OS tools). "
|
||||
"Retries left: ~b Error:~n~tp",
|
||||
[Retries, Error]),
|
||||
|
@ -448,11 +451,11 @@ enable_handle_disk_free(Error, #state{interval = Interval, retries = Retries} =
|
|||
State#state{enabled = false}.
|
||||
|
||||
enable_handle_total_memory(TotalMemory, DiskFree, #state{limit = Limit} = State) when is_integer(TotalMemory) ->
|
||||
rabbit_log:info("Enabling free disk space monitoring "
|
||||
?LOG_INFO("Enabling free disk space monitoring "
|
||||
"(disk free space: ~b, total memory: ~b)", [DiskFree, TotalMemory]),
|
||||
start_timer(set_disk_limits(State, Limit));
|
||||
enable_handle_total_memory(Error, _DiskFree, #state{interval = Interval, retries = Retries} = State) ->
|
||||
rabbit_log:warning("Free disk space monitor encountered an error "
|
||||
?LOG_WARNING("Free disk space monitor encountered an error "
|
||||
"retrieving total memory. "
|
||||
"Retries left: ~b Error:~n~tp",
|
||||
[Retries, Error]),
|
||||
|
@ -472,6 +475,6 @@ run_os_cmd(Cmd) ->
|
|||
CmdResult
|
||||
after 5000 ->
|
||||
exit(CmdPid, kill),
|
||||
rabbit_log:error("Command timed out: '~ts'", [Cmd]),
|
||||
?LOG_ERROR("Command timed out: '~ts'", [Cmd]),
|
||||
{error, timeout}
|
||||
end.
|
||||
|
|
|
@ -7,6 +7,9 @@
|
|||
|
||||
-module(rabbit_epmd_monitor).
|
||||
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
|
||||
-behaviour(gen_server).
|
||||
|
||||
-export([start_link/0]).
|
||||
|
@ -84,19 +87,19 @@ check_epmd(State = #state{mod = Mod,
|
|||
{ok, State#state{port = Port1}}.
|
||||
|
||||
handle_port_please(init, noport, Me, Port) ->
|
||||
rabbit_log:info("epmd does not know us, re-registering as ~ts", [Me]),
|
||||
?LOG_INFO("epmd does not know us, re-registering as ~ts", [Me]),
|
||||
{ok, Port};
|
||||
handle_port_please(check, noport, Me, Port) ->
|
||||
rabbit_log:warning("epmd does not know us, re-registering ~ts at port ~b", [Me, Port]),
|
||||
?LOG_WARNING("epmd does not know us, re-registering ~ts at port ~b", [Me, Port]),
|
||||
{ok, Port};
|
||||
handle_port_please(_, closed, _Me, Port) ->
|
||||
rabbit_log:error("epmd monitor failed to retrieve our port from epmd: closed"),
|
||||
?LOG_ERROR("epmd monitor failed to retrieve our port from epmd: closed"),
|
||||
{ok, Port};
|
||||
handle_port_please(init, {port, NewPort, _Version}, _Me, _Port) ->
|
||||
rabbit_log:info("epmd monitor knows us, inter-node communication (distribution) port: ~tp", [NewPort]),
|
||||
?LOG_INFO("epmd monitor knows us, inter-node communication (distribution) port: ~tp", [NewPort]),
|
||||
{ok, NewPort};
|
||||
handle_port_please(check, {port, NewPort, _Version}, _Me, _Port) ->
|
||||
{ok, NewPort};
|
||||
handle_port_please(_, {error, Error}, _Me, Port) ->
|
||||
rabbit_log:error("epmd monitor failed to retrieve our port from epmd: ~tp", [Error]),
|
||||
?LOG_ERROR("epmd monitor failed to retrieve our port from epmd: ~tp", [Error]),
|
||||
{ok, Port}.
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
|
||||
-module(rabbit_exchange).
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-export([recover/1, policy_changed/2, callback/4, declare/7,
|
||||
assert_equivalence/6, assert_args_equivalence/2, check_type/1, exists/1,
|
||||
|
@ -135,7 +136,7 @@ declare(XName, Type, Durable, AutoDelete, Internal, Args, Username) ->
|
|||
Err
|
||||
end;
|
||||
_ ->
|
||||
rabbit_log:warning("ignoring exchange.declare for exchange ~tp,
|
||||
?LOG_WARNING("ignoring exchange.declare for exchange ~tp,
|
||||
exchange.delete in progress~n.", [XName]),
|
||||
{ok, X}
|
||||
end.
|
||||
|
@ -531,7 +532,7 @@ peek_serial(XName) ->
|
|||
rabbit_db_exchange:peek_serial(XName).
|
||||
|
||||
invalid_module(T) ->
|
||||
rabbit_log:warning("Could not find exchange type ~ts.", [T]),
|
||||
?LOG_WARNING("Could not find exchange type ~ts.", [T]),
|
||||
put({xtype_to_module, T}, rabbit_exchange_type_invalid),
|
||||
rabbit_exchange_type_invalid.
|
||||
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
-dialyzer(no_improper_lists).
|
||||
|
||||
-include("rabbit_fifo.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-define(STATE, ?MODULE).
|
||||
|
||||
|
@ -676,7 +677,7 @@ apply(Meta, {dlx, _} = Cmd,
|
|||
checkout(Meta, State0, State1, Effects0);
|
||||
apply(_Meta, Cmd, State) ->
|
||||
%% handle unhandled commands gracefully
|
||||
rabbit_log:debug("rabbit_fifo: unhandled command ~W", [Cmd, 10]),
|
||||
?LOG_DEBUG("rabbit_fifo: unhandled command ~W", [Cmd, 10]),
|
||||
{State, ok, []}.
|
||||
|
||||
convert_v3_to_v4(#{} = _Meta, StateV3) ->
|
||||
|
@ -1157,7 +1158,7 @@ handle_aux(_RaState, _, force_checkpoint,
|
|||
bytes_in = BytesIn} = Aux, RaAux) ->
|
||||
Ts = erlang:system_time(millisecond),
|
||||
#?STATE{cfg = #cfg{resource = QR}} = ra_aux:machine_state(RaAux),
|
||||
rabbit_log:debug("~ts: rabbit_fifo: forcing checkpoint at ~b",
|
||||
?LOG_DEBUG("~ts: rabbit_fifo: forcing checkpoint at ~b",
|
||||
[rabbit_misc:rs(QR), ra_aux:last_applied(RaAux)]),
|
||||
{Check, Effects} = do_checkpoints(Ts, Check0, RaAux, BytesIn, true),
|
||||
{no_reply, Aux#?AUX{last_checkpoint = Check}, RaAux, Effects};
|
||||
|
@ -1178,7 +1179,7 @@ eval_gc(RaAux, MacState,
|
|||
Mem > ?GC_MEM_LIMIT_B ->
|
||||
garbage_collect(),
|
||||
{memory, MemAfter} = erlang:process_info(self(), memory),
|
||||
rabbit_log:debug("~ts: full GC sweep complete. "
|
||||
?LOG_DEBUG("~ts: full GC sweep complete. "
|
||||
"Process memory changed from ~.2fMB to ~.2fMB.",
|
||||
[rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]),
|
||||
AuxState#?AUX{gc = Gc#aux_gc{last_raft_idx = Idx}};
|
||||
|
@ -1195,7 +1196,7 @@ force_eval_gc(RaAux,
|
|||
true ->
|
||||
garbage_collect(),
|
||||
{memory, MemAfter} = erlang:process_info(self(), memory),
|
||||
rabbit_log:debug("~ts: full GC sweep complete. "
|
||||
?LOG_DEBUG("~ts: full GC sweep complete. "
|
||||
"Process memory changed from ~.2fMB to ~.2fMB.",
|
||||
[rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]),
|
||||
AuxState#?AUX{gc = Gc#aux_gc{last_raft_idx = Idx}};
|
||||
|
|
|
@ -11,6 +11,9 @@
|
|||
%% Handles command tracking and other non-functional concerns.
|
||||
-module(rabbit_fifo_client).
|
||||
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
|
||||
-export([
|
||||
init/1,
|
||||
init/2,
|
||||
|
@ -143,13 +146,13 @@ enqueue(QName, Correlation, Msg,
|
|||
%% to send it
|
||||
{reject_publish, State0};
|
||||
{error, {shutdown, delete}} ->
|
||||
rabbit_log:debug("~ts: QQ ~ts tried to register enqueuer during delete shutdown",
|
||||
?LOG_DEBUG("~ts: QQ ~ts tried to register enqueuer during delete shutdown",
|
||||
[?MODULE, rabbit_misc:rs(QName)]),
|
||||
{reject_publish, State0};
|
||||
{timeout, _} ->
|
||||
{reject_publish, State0};
|
||||
Err ->
|
||||
rabbit_log:debug("~ts: QQ ~ts error when registering enqueuer ~p",
|
||||
?LOG_DEBUG("~ts: QQ ~ts error when registering enqueuer ~p",
|
||||
[?MODULE, rabbit_misc:rs(QName), Err]),
|
||||
exit(Err)
|
||||
end;
|
||||
|
@ -628,7 +631,7 @@ handle_ra_event(QName, Leader, {applied, Seqs},
|
|||
{ok, _, ActualLeader}
|
||||
when ActualLeader =/= OldLeader ->
|
||||
%% there is a new leader
|
||||
rabbit_log:debug("~ts: Detected QQ leader change (applied) "
|
||||
?LOG_DEBUG("~ts: Detected QQ leader change (applied) "
|
||||
"from ~w to ~w, "
|
||||
"resending ~b pending commands",
|
||||
[?MODULE, OldLeader, ActualLeader,
|
||||
|
@ -698,7 +701,7 @@ handle_ra_event(QName, Leader, {machine, leader_change},
|
|||
pending = Pending} = State0) ->
|
||||
%% we need to update leader
|
||||
%% and resend any pending commands
|
||||
rabbit_log:debug("~ts: ~s Detected QQ leader change from ~w to ~w, "
|
||||
?LOG_DEBUG("~ts: ~s Detected QQ leader change from ~w to ~w, "
|
||||
"resending ~b pending commands",
|
||||
[rabbit_misc:rs(QName), ?MODULE, OldLeader,
|
||||
Leader, maps:size(Pending)]),
|
||||
|
@ -710,7 +713,7 @@ handle_ra_event(_QName, _From, {rejected, {not_leader, Leader, _Seq}},
|
|||
handle_ra_event(QName, _From, {rejected, {not_leader, Leader, _Seq}},
|
||||
#state{leader = OldLeader,
|
||||
pending = Pending} = State0) ->
|
||||
rabbit_log:debug("~ts: ~s Detected QQ leader change (rejection) from ~w to ~w, "
|
||||
?LOG_DEBUG("~ts: ~s Detected QQ leader change (rejection) from ~w to ~w, "
|
||||
"resending ~b pending commands",
|
||||
[rabbit_misc:rs(QName), ?MODULE, OldLeader,
|
||||
Leader, maps:size(Pending)]),
|
||||
|
@ -739,7 +742,7 @@ handle_ra_event(QName, Leader, close_cached_segments,
|
|||
{_TRef, Last, Cache} ->
|
||||
case now_ms() > Last + ?CACHE_SEG_TIMEOUT of
|
||||
true ->
|
||||
rabbit_log:debug("~ts: closing_cached_segments",
|
||||
?LOG_DEBUG("~ts: closing_cached_segments",
|
||||
[rabbit_misc:rs(QName)]),
|
||||
%% its been long enough, evict all
|
||||
_ = ra_flru:evict_all(Cache),
|
||||
|
@ -982,7 +985,7 @@ add_delivery_count(DelCntIncr, Tag, #state{consumers = CDels0} = State) ->
|
|||
get_missing_deliveries(State, From, To, ConsumerTag) ->
|
||||
%% find local server
|
||||
ConsumerKey = consumer_key(ConsumerTag, State),
|
||||
rabbit_log:debug("get_missing_deliveries for consumer '~s' from ~b to ~b",
|
||||
?LOG_DEBUG("get_missing_deliveries for consumer '~s' from ~b to ~b",
|
||||
[ConsumerTag, From, To]),
|
||||
Cmd = {get_checked_out, ConsumerKey, lists:seq(From, To)},
|
||||
ServerId = find_local_or_leader(State),
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
|
||||
-include("rabbit_fifo_dlx.hrl").
|
||||
-include("rabbit_fifo.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
-compile({no_auto_import, [apply/3]}).
|
||||
|
||||
-export([
|
||||
|
@ -123,7 +124,7 @@ apply(_, {dlx, #checkout{consumer = ConsumerPid,
|
|||
OldConsumerPid ->
|
||||
ok;
|
||||
_ ->
|
||||
rabbit_log:debug("Terminating ~p since ~p becomes active rabbit_fifo_dlx_worker",
|
||||
?LOG_DEBUG("Terminating ~p since ~p becomes active rabbit_fifo_dlx_worker",
|
||||
[OldConsumerPid, ConsumerPid]),
|
||||
ensure_worker_terminated(State0)
|
||||
end,
|
||||
|
@ -144,7 +145,7 @@ apply(_, {dlx, #checkout{consumer = ConsumerPid,
|
|||
msg_bytes_checkout = BytesCheckout - BytesMoved},
|
||||
{State, []};
|
||||
apply(_, Cmd, DLH, State) ->
|
||||
rabbit_log:debug("Ignoring command ~tp for dead_letter_handler ~tp", [Cmd, DLH]),
|
||||
?LOG_DEBUG("Ignoring command ~tp for dead_letter_handler ~tp", [Cmd, DLH]),
|
||||
{State, []}.
|
||||
|
||||
-spec discard([msg()], rabbit_dead_letter:reason(), dead_letter_handler(), state()) ->
|
||||
|
@ -257,7 +258,7 @@ ensure_worker_started(QRef, #?MODULE{consumer = undefined}) ->
|
|||
ensure_worker_started(QRef, #?MODULE{consumer = #dlx_consumer{pid = Pid}}) ->
|
||||
case is_local_and_alive(Pid) of
|
||||
true ->
|
||||
rabbit_log:debug("rabbit_fifo_dlx_worker ~tp already started for ~ts",
|
||||
?LOG_DEBUG("rabbit_fifo_dlx_worker ~tp already started for ~ts",
|
||||
[Pid, rabbit_misc:rs(QRef)]);
|
||||
false ->
|
||||
start_worker(QRef)
|
||||
|
@ -269,7 +270,7 @@ ensure_worker_started(QRef, #?MODULE{consumer = #dlx_consumer{pid = Pid}}) ->
|
|||
%% Ra server process crash in which case another Ra node will become leader.
|
||||
start_worker(QRef) ->
|
||||
{ok, Pid} = supervisor:start_child(rabbit_fifo_dlx_sup, [QRef]),
|
||||
rabbit_log:debug("started rabbit_fifo_dlx_worker ~tp for ~ts",
|
||||
?LOG_DEBUG("started rabbit_fifo_dlx_worker ~tp for ~ts",
|
||||
[Pid, rabbit_misc:rs(QRef)]).
|
||||
|
||||
ensure_worker_terminated(#?MODULE{consumer = undefined}) ->
|
||||
|
@ -280,7 +281,7 @@ ensure_worker_terminated(#?MODULE{consumer = #dlx_consumer{pid = Pid}}) ->
|
|||
%% Note that we can't return a mod_call effect here
|
||||
%% because mod_call is executed on the leader only.
|
||||
ok = supervisor:terminate_child(rabbit_fifo_dlx_sup, Pid),
|
||||
rabbit_log:debug("terminated rabbit_fifo_dlx_worker ~tp", [Pid]);
|
||||
?LOG_DEBUG("terminated rabbit_fifo_dlx_worker ~tp", [Pid]);
|
||||
false ->
|
||||
ok
|
||||
end.
|
||||
|
|
|
@ -6,6 +6,9 @@
|
|||
|
||||
-module(rabbit_fifo_dlx_client).
|
||||
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
|
||||
-export([checkout/3, settle/2, handle_ra_event/3,
|
||||
overview/1]).
|
||||
|
||||
|
@ -47,11 +50,11 @@ process_command(Cmd, #state{leader = Leader} = State, Tries) ->
|
|||
{ok, ok, Leader} ->
|
||||
{ok, State#state{leader = Leader}};
|
||||
{ok, ok, NonLocalLeader} ->
|
||||
rabbit_log:warning("Failed to process command ~tp on quorum queue leader ~tp because actual leader is ~tp.",
|
||||
?LOG_WARNING("Failed to process command ~tp on quorum queue leader ~tp because actual leader is ~tp.",
|
||||
[Cmd, Leader, NonLocalLeader]),
|
||||
{error, non_local_leader};
|
||||
Err ->
|
||||
rabbit_log:warning("Failed to process command ~tp on quorum queue leader ~tp: ~tp~n"
|
||||
?LOG_WARNING("Failed to process command ~tp on quorum queue leader ~tp: ~tp~n"
|
||||
"Trying ~b more time(s)...",
|
||||
[Cmd, Leader, Err, Tries]),
|
||||
process_command(Cmd, State, Tries - 1)
|
||||
|
@ -63,7 +66,7 @@ handle_ra_event(Leader, {dlx_delivery, _} = Del,
|
|||
#state{leader = _Leader} = State) when node(Leader) == node() ->
|
||||
handle_delivery(Del, State);
|
||||
handle_ra_event(From, Evt, State) ->
|
||||
rabbit_log:debug("Ignoring ra event ~tp from ~tp", [Evt, From]),
|
||||
?LOG_DEBUG("Ignoring ra event ~tp from ~tp", [Evt, From]),
|
||||
{ok, State, []}.
|
||||
|
||||
handle_delivery({dlx_delivery, [{FstId, _} | _] = IdMsgs},
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
|
||||
-include("mc.hrl").
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
% -include_lib("rabbit_common/include/rabbit_framing.hrl").
|
||||
|
||||
-behaviour(gen_server).
|
||||
|
@ -135,7 +136,7 @@ terminate(_Reason, State) ->
|
|||
cancel_timer(State).
|
||||
|
||||
handle_call(Request, From, State) ->
|
||||
rabbit_log:info("~ts received unhandled call from ~tp: ~tp", [?MODULE, From, Request]),
|
||||
?LOG_INFO("~ts received unhandled call from ~tp: ~tp", [?MODULE, From, Request]),
|
||||
{noreply, State}.
|
||||
|
||||
handle_cast({dlx_event, _LeaderPid, lookup_topology},
|
||||
|
@ -169,7 +170,7 @@ handle_cast(settle_timeout, State0) ->
|
|||
State = State0#state{timer = undefined},
|
||||
redeliver_and_ack(State);
|
||||
handle_cast(Request, State) ->
|
||||
rabbit_log:info("~ts received unhandled cast ~tp", [?MODULE, Request]),
|
||||
?LOG_INFO("~ts received unhandled cast ~tp", [?MODULE, Request]),
|
||||
{noreply, State}.
|
||||
|
||||
redeliver_and_ack(State0) ->
|
||||
|
@ -183,7 +184,7 @@ handle_info({'DOWN', Ref, process, _, _},
|
|||
queue_ref = QRef}) ->
|
||||
%% Source quorum queue is down. Therefore, terminate ourself.
|
||||
%% The new leader will re-create another dlx_worker.
|
||||
rabbit_log:debug("~ts terminating itself because leader of ~ts is down...",
|
||||
?LOG_DEBUG("~ts terminating itself because leader of ~ts is down...",
|
||||
[?MODULE, rabbit_misc:rs(QRef)]),
|
||||
supervisor:terminate_child(rabbit_fifo_dlx_sup, self());
|
||||
handle_info({{'DOWN', QName}, _MRef, process, QPid, Reason},
|
||||
|
@ -197,7 +198,7 @@ handle_info({{'DOWN', QName}, _MRef, process, QPid, Reason},
|
|||
remove_queue(QRef, State0#state{queue_type_state = QTypeState})
|
||||
end;
|
||||
handle_info(Info, State) ->
|
||||
rabbit_log:info("~ts received unhandled info ~tp", [?MODULE, Info]),
|
||||
?LOG_INFO("~ts received unhandled info ~tp", [?MODULE, Info]),
|
||||
{noreply, State}.
|
||||
|
||||
code_change(_OldVsn, State, _Extra) ->
|
||||
|
@ -219,7 +220,7 @@ remove_queue(QRef, #state{pendings = Pendings0,
|
|||
queue_type_state = QTypeState}}.
|
||||
|
||||
wait_for_queue_deleted(QRef, 0) ->
|
||||
rabbit_log:debug("Received deletion event for ~ts but queue still exists in ETS table.",
|
||||
?LOG_DEBUG("Received deletion event for ~ts but queue still exists in ETS table.",
|
||||
[rabbit_misc:rs(QRef)]);
|
||||
wait_for_queue_deleted(QRef, N) ->
|
||||
case rabbit_amqqueue:exists(QRef) of
|
||||
|
@ -289,7 +290,7 @@ rejected(SeqNo, Qs, Pendings)
|
|||
end,
|
||||
Pendings);
|
||||
false ->
|
||||
rabbit_log:debug("Ignoring rejection for unknown sequence number ~b "
|
||||
?LOG_DEBUG("Ignoring rejection for unknown sequence number ~b "
|
||||
"from target dead letter queues ~tp",
|
||||
[SeqNo, Qs]),
|
||||
Pendings
|
||||
|
@ -386,7 +387,7 @@ deliver_to_queues(Msg, Options, Qs, #state{queue_type_state = QTypeState0,
|
|||
%% we won't rely on rabbit_fifo_client to re-deliver on behalf of us
|
||||
%% (and therefore preventing messages to get stuck in our 'unsettled' state).
|
||||
QNames = queue_names(Qs),
|
||||
rabbit_log:debug("Failed to deliver message with seq_no ~b to "
|
||||
?LOG_DEBUG("Failed to deliver message with seq_no ~b to "
|
||||
"queues ~tp: ~tp",
|
||||
[SeqNo, QNames, Reason]),
|
||||
{State0#state{pendings = rejected(SeqNo, QNames, Pendings)}, []}
|
||||
|
@ -419,7 +420,7 @@ handle_settled0(QRef, MsgSeq, #state{pendings = Pendings,
|
|||
settled = [QRef | Settled]},
|
||||
State#state{pendings = maps:update(MsgSeq, Pend, Pendings)};
|
||||
error ->
|
||||
rabbit_log:debug("Ignoring publisher confirm for unknown sequence number ~b "
|
||||
?LOG_DEBUG("Ignoring publisher confirm for unknown sequence number ~b "
|
||||
"from target dead letter ~ts",
|
||||
[MsgSeq, rabbit_misc:rs(QRef)]),
|
||||
State
|
||||
|
@ -625,7 +626,7 @@ log_missing_dlx_once(#state{exchange_ref = SameDlx,
|
|||
log_missing_dlx_once(#state{exchange_ref = DlxResource,
|
||||
queue_ref = QueueResource,
|
||||
logged = Logged} = State) ->
|
||||
rabbit_log:warning("Cannot forward any dead-letter messages from source quorum ~ts because "
|
||||
?LOG_WARNING("Cannot forward any dead-letter messages from source quorum ~ts because "
|
||||
"its configured dead-letter-exchange ~ts does not exist. "
|
||||
"Either create the configured dead-letter-exchange or re-configure "
|
||||
"the dead-letter-exchange policy for the source quorum queue to prevent "
|
||||
|
@ -642,7 +643,7 @@ log_no_route_once(#state{queue_ref = QueueResource,
|
|||
exchange_ref = DlxResource,
|
||||
routing_key = RoutingKey,
|
||||
logged = Logged} = State) ->
|
||||
rabbit_log:warning("Cannot forward any dead-letter messages from source quorum ~ts "
|
||||
?LOG_WARNING("Cannot forward any dead-letter messages from source quorum ~ts "
|
||||
"with configured dead-letter-exchange ~ts and configured "
|
||||
"dead-letter-routing-key '~ts'. This can happen either if the dead-letter "
|
||||
"routing topology is misconfigured (for example no queue bound to "
|
||||
|
@ -663,7 +664,7 @@ log_cycle_once(Queues, _, #state{logged = Logged} = State)
|
|||
log_cycle_once(Queues, RoutingKeys, #state{exchange_ref = DlxResource,
|
||||
queue_ref = QueueResource,
|
||||
logged = Logged} = State) ->
|
||||
rabbit_log:warning("Dead-letter queues cycle detected for source quorum ~ts "
|
||||
?LOG_WARNING("Dead-letter queues cycle detected for source quorum ~ts "
|
||||
"with dead-letter exchange ~ts and routing keys ~tp: ~tp "
|
||||
"This message will not be logged again.",
|
||||
[rabbit_misc:rs(QueueResource), rabbit_misc:rs(DlxResource),
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
|
||||
-include("rabbit_fifo_v0.hrl").
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-export([
|
||||
init/1,
|
||||
|
@ -673,7 +674,7 @@ eval_gc(Log, #?STATE{cfg = #cfg{resource = QR}} = MacState,
|
|||
Mem > ?GC_MEM_LIMIT_B ->
|
||||
garbage_collect(),
|
||||
{memory, MemAfter} = erlang:process_info(self(), memory),
|
||||
rabbit_log:debug("~ts: full GC sweep complete. "
|
||||
?LOG_DEBUG("~ts: full GC sweep complete. "
|
||||
"Process memory changed from ~.2fMB to ~.2fMB.",
|
||||
[rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]),
|
||||
AuxState#aux{gc = Gc#aux_gc{last_raft_idx = Idx}};
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
|
||||
-include("rabbit_fifo_v1.hrl").
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-export([
|
||||
init/1,
|
||||
|
@ -533,7 +534,7 @@ apply(_Meta, {machine_version, 0, 1}, V0State) ->
|
|||
{State, ok, []};
|
||||
apply(_Meta, Cmd, State) ->
|
||||
%% handle unhandled commands gracefully
|
||||
rabbit_log:debug("rabbit_fifo: unhandled command ~W", [Cmd, 10]),
|
||||
?LOG_DEBUG("rabbit_fifo: unhandled command ~W", [Cmd, 10]),
|
||||
{State, ok, []}.
|
||||
|
||||
convert_v0_to_v1(V0State0) ->
|
||||
|
@ -855,7 +856,7 @@ eval_gc(Log, #?STATE{cfg = #cfg{resource = QR}} = MacState,
|
|||
Mem > ?GC_MEM_LIMIT_B ->
|
||||
garbage_collect(),
|
||||
{memory, MemAfter} = erlang:process_info(self(), memory),
|
||||
rabbit_log:debug("~ts: full GC sweep complete. "
|
||||
?LOG_DEBUG("~ts: full GC sweep complete. "
|
||||
"Process memory changed from ~.2fMB to ~.2fMB.",
|
||||
[rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]),
|
||||
AuxState#aux{gc = Gc#aux_gc{last_raft_idx = Idx}};
|
||||
|
@ -871,7 +872,7 @@ force_eval_gc(Log, #?STATE{cfg = #cfg{resource = QR}},
|
|||
true ->
|
||||
garbage_collect(),
|
||||
{memory, MemAfter} = erlang:process_info(self(), memory),
|
||||
rabbit_log:debug("~ts: full GC sweep complete. "
|
||||
?LOG_DEBUG("~ts: full GC sweep complete. "
|
||||
"Process memory changed from ~.2fMB to ~.2fMB.",
|
||||
[rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]),
|
||||
AuxState#aux{gc = Gc#aux_gc{last_raft_idx = Idx}};
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
|
||||
-include("rabbit_fifo_v3.hrl").
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-define(STATE, rabbit_fifo).
|
||||
|
||||
|
@ -619,7 +620,7 @@ apply(#{index := IncomingRaftIdx} = Meta, {dlx, _} = Cmd,
|
|||
update_smallest_raft_index(IncomingRaftIdx, State, Effects);
|
||||
apply(_Meta, Cmd, State) ->
|
||||
%% handle unhandled commands gracefully
|
||||
rabbit_log:debug("rabbit_fifo: unhandled command ~W", [Cmd, 10]),
|
||||
?LOG_DEBUG("rabbit_fifo: unhandled command ~W", [Cmd, 10]),
|
||||
{State, ok, []}.
|
||||
|
||||
convert_msg({RaftIdx, {Header, empty}}) when is_integer(RaftIdx) ->
|
||||
|
@ -1172,7 +1173,7 @@ eval_gc(Log, #?STATE{cfg = #cfg{resource = QR}} = MacState,
|
|||
Mem > ?GC_MEM_LIMIT_B ->
|
||||
garbage_collect(),
|
||||
{memory, MemAfter} = erlang:process_info(self(), memory),
|
||||
rabbit_log:debug("~ts: full GC sweep complete. "
|
||||
?LOG_DEBUG("~ts: full GC sweep complete. "
|
||||
"Process memory changed from ~.2fMB to ~.2fMB.",
|
||||
[rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]),
|
||||
AuxState#?AUX{gc = Gc#aux_gc{last_raft_idx = Idx}};
|
||||
|
@ -1188,7 +1189,7 @@ force_eval_gc(Log, #?STATE{cfg = #cfg{resource = QR}},
|
|||
true ->
|
||||
garbage_collect(),
|
||||
{memory, MemAfter} = erlang:process_info(self(), memory),
|
||||
rabbit_log:debug("~ts: full GC sweep complete. "
|
||||
?LOG_DEBUG("~ts: full GC sweep complete. "
|
||||
"Process memory changed from ~.2fMB to ~.2fMB.",
|
||||
[rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]),
|
||||
AuxState#?AUX{gc = Gc#aux_gc{last_raft_idx = Idx}};
|
||||
|
|
|
@ -6,6 +6,9 @@
|
|||
%%
|
||||
-module(rabbit_health_check).
|
||||
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
|
||||
%% External API
|
||||
-export([node/1, node/2]).
|
||||
|
||||
|
@ -28,7 +31,7 @@ node(Node, Timeout) ->
|
|||
-spec local() -> ok | {error_string, string()}.
|
||||
|
||||
local() ->
|
||||
rabbit_log:warning("rabbitmqctl node_health_check and its HTTP API counterpart are DEPRECATED. "
|
||||
?LOG_WARNING("rabbitmqctl node_health_check and its HTTP API counterpart are DEPRECATED. "
|
||||
"See https://www.rabbitmq.com/docs/monitoring#health-checks for replacement options."),
|
||||
run_checks([list_channels, list_queues, alarms, rabbit_node_monitor]).
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
-module(rabbit_maintenance).
|
||||
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
%% FIXME: Ra consistent queries are currently fragile in the sense that the
|
||||
%% query function may run on a remote node and the function reference or MFA
|
||||
|
@ -62,13 +63,13 @@ is_enabled() ->
|
|||
|
||||
-spec drain() -> ok.
|
||||
drain() ->
|
||||
rabbit_log:warning("This node is being put into maintenance (drain) mode"),
|
||||
?LOG_WARNING("This node is being put into maintenance (drain) mode"),
|
||||
mark_as_being_drained(),
|
||||
rabbit_log:info("Marked this node as undergoing maintenance"),
|
||||
?LOG_INFO("Marked this node as undergoing maintenance"),
|
||||
_ = suspend_all_client_listeners(),
|
||||
rabbit_log:warning("Suspended all listeners and will no longer accept client connections"),
|
||||
?LOG_WARNING("Suspended all listeners and will no longer accept client connections"),
|
||||
{ok, NConnections} = close_all_client_connections(),
|
||||
rabbit_log:warning("Closed ~b local client connections", [NConnections]),
|
||||
?LOG_WARNING("Closed ~b local client connections", [NConnections]),
|
||||
%% allow plugins to react e.g. by closing their protocol connections
|
||||
rabbit_event:notify(maintenance_connections_closed, #{
|
||||
reason => <<"node is being put into maintenance">>
|
||||
|
@ -85,19 +86,19 @@ drain() ->
|
|||
rabbit_event:notify(maintenance_draining, #{
|
||||
reason => <<"node is being put into maintenance">>
|
||||
}),
|
||||
rabbit_log:info("Node is ready to be shut down for maintenance or upgrade"),
|
||||
?LOG_INFO("Node is ready to be shut down for maintenance or upgrade"),
|
||||
|
||||
ok.
|
||||
|
||||
-spec revive() -> ok.
|
||||
revive() ->
|
||||
rabbit_log:info("This node is being revived from maintenance (drain) mode"),
|
||||
?LOG_INFO("This node is being revived from maintenance (drain) mode"),
|
||||
rabbit_queue_type:revive(),
|
||||
rabbit_log:info("Resumed all listeners and will accept client connections again"),
|
||||
?LOG_INFO("Resumed all listeners and will accept client connections again"),
|
||||
_ = resume_all_client_listeners(),
|
||||
rabbit_log:info("Resumed all listeners and will accept client connections again"),
|
||||
?LOG_INFO("Resumed all listeners and will accept client connections again"),
|
||||
unmark_as_being_drained(),
|
||||
rabbit_log:info("Marked this node as back from maintenance and ready to serve clients"),
|
||||
?LOG_INFO("Marked this node as back from maintenance and ready to serve clients"),
|
||||
|
||||
%% allow plugins to react
|
||||
rabbit_event:notify(maintenance_revived, #{}),
|
||||
|
@ -106,12 +107,12 @@ revive() ->
|
|||
|
||||
-spec mark_as_being_drained() -> boolean().
|
||||
mark_as_being_drained() ->
|
||||
rabbit_log:debug("Marking the node as undergoing maintenance"),
|
||||
?LOG_DEBUG("Marking the node as undergoing maintenance"),
|
||||
rabbit_db_maintenance:set(?DRAINING_STATUS).
|
||||
|
||||
-spec unmark_as_being_drained() -> boolean().
|
||||
unmark_as_being_drained() ->
|
||||
rabbit_log:debug("Unmarking the node as undergoing maintenance"),
|
||||
?LOG_DEBUG("Unmarking the node as undergoing maintenance"),
|
||||
rabbit_db_maintenance:set(?DEFAULT_STATUS).
|
||||
|
||||
-spec is_being_drained_local_read(node()) -> boolean().
|
||||
|
@ -157,7 +158,7 @@ filter_out_drained_nodes_consistent_read(Nodes) ->
|
|||
%% but previously established connections won't be interrupted.
|
||||
suspend_all_client_listeners() ->
|
||||
Listeners = rabbit_networking:node_client_listeners(node()),
|
||||
rabbit_log:info("Asked to suspend ~b client connection listeners. "
|
||||
?LOG_INFO("Asked to suspend ~b client connection listeners. "
|
||||
"No new client connections will be accepted until these listeners are resumed!", [length(Listeners)]),
|
||||
Results = lists:foldl(local_listener_fold_fun(fun ranch:suspend_listener/1), [], Listeners),
|
||||
lists:foldl(fun ok_or_first_error/2, ok, Results).
|
||||
|
@ -168,7 +169,7 @@ suspend_all_client_listeners() ->
|
|||
%% A resumed listener will accept new client connections.
|
||||
resume_all_client_listeners() ->
|
||||
Listeners = rabbit_networking:node_client_listeners(node()),
|
||||
rabbit_log:info("Asked to resume ~b client connection listeners. "
|
||||
?LOG_INFO("Asked to resume ~b client connection listeners. "
|
||||
"New client connections will be accepted from now on", [length(Listeners)]),
|
||||
Results = lists:foldl(local_listener_fold_fun(fun ranch:resume_listener/1), [], Listeners),
|
||||
lists:foldl(fun ok_or_first_error/2, ok, Results).
|
||||
|
@ -180,15 +181,15 @@ close_all_client_connections() ->
|
|||
{ok, length(Pids)}.
|
||||
|
||||
transfer_leadership_of_metadata_store(TransferCandidates) ->
|
||||
rabbit_log:info("Will transfer leadership of metadata store with current leader on this node",
|
||||
?LOG_INFO("Will transfer leadership of metadata store with current leader on this node",
|
||||
[]),
|
||||
case rabbit_khepri:transfer_leadership(TransferCandidates) of
|
||||
{ok, Node} when Node == node(); Node == undefined ->
|
||||
rabbit_log:info("Skipping leadership transfer of metadata store: current leader is not on this node");
|
||||
?LOG_INFO("Skipping leadership transfer of metadata store: current leader is not on this node");
|
||||
{ok, Node} ->
|
||||
rabbit_log:info("Leadership transfer for metadata store on this node has been done. The new leader is ~p", [Node]);
|
||||
?LOG_INFO("Leadership transfer for metadata store on this node has been done. The new leader is ~p", [Node]);
|
||||
Error ->
|
||||
rabbit_log:warning("Skipping leadership transfer of metadata store: ~p", [Error])
|
||||
?LOG_WARNING("Skipping leadership transfer of metadata store: ~p", [Error])
|
||||
end.
|
||||
|
||||
-spec primary_replica_transfer_candidate_nodes() -> [node()].
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
-module(rabbit_mnesia).
|
||||
|
||||
-include_lib("rabbit_common/include/logging.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-export([%% Main interface
|
||||
init/0,
|
||||
|
@ -123,7 +124,7 @@ init() ->
|
|||
NodeType = node_type(),
|
||||
case is_node_type_permitted(NodeType) of
|
||||
false ->
|
||||
rabbit_log:info(
|
||||
?LOG_INFO(
|
||||
"RAM nodes are deprecated and not permitted. This "
|
||||
"node will be converted to a disc node."),
|
||||
init_db_and_upgrade(cluster_nodes(all), disc,
|
||||
|
@ -175,7 +176,7 @@ can_join_cluster(DiscoveryNode) ->
|
|||
%% do we think so ourselves?
|
||||
case are_we_clustered_with(DiscoveryNode) of
|
||||
true ->
|
||||
rabbit_log:info("Asked to join a cluster but already a member of it: ~tp", [ClusterNodes]),
|
||||
?LOG_INFO("Asked to join a cluster but already a member of it: ~tp", [ClusterNodes]),
|
||||
{ok, already_member};
|
||||
false ->
|
||||
Msg = format_inconsistent_cluster_message(DiscoveryNode, node()),
|
||||
|
@ -195,7 +196,7 @@ join_cluster(ClusterNodes, NodeType) when is_list(ClusterNodes) ->
|
|||
false -> disc;
|
||||
true -> NodeType
|
||||
end,
|
||||
rabbit_log:info("Clustering with ~tp as ~tp node",
|
||||
?LOG_INFO("Clustering with ~tp as ~tp node",
|
||||
[ClusterNodes, NodeType1]),
|
||||
ok = init_db_with_mnesia(ClusterNodes, NodeType1,
|
||||
true, true, _Retry = true),
|
||||
|
@ -230,7 +231,7 @@ reset() ->
|
|||
|
||||
force_reset() ->
|
||||
ensure_mnesia_not_running(),
|
||||
rabbit_log:info("Resetting Rabbit forcefully", []),
|
||||
?LOG_INFO("Resetting Rabbit forcefully", []),
|
||||
wipe().
|
||||
|
||||
reset_gracefully() ->
|
||||
|
@ -300,7 +301,7 @@ forget_cluster_node(Node, RemoveWhenOffline) ->
|
|||
{true, false} -> remove_node_offline_node(Node);
|
||||
{true, true} -> e(online_node_offline_flag);
|
||||
{false, false} -> e(offline_node_no_offline_flag);
|
||||
{false, true} -> rabbit_log:info(
|
||||
{false, true} -> ?LOG_INFO(
|
||||
"Removing node ~tp from cluster", [Node]),
|
||||
case remove_node_if_mnesia_running(Node) of
|
||||
ok -> ok;
|
||||
|
@ -550,7 +551,7 @@ init_db(ClusterNodes, NodeType, CheckOtherNodes) ->
|
|||
ensure_node_type_is_permitted(NodeType),
|
||||
|
||||
NodeIsVirgin = is_virgin_node(),
|
||||
rabbit_log:debug("Does data directory looks like that of a blank (uninitialised) node? ~tp", [NodeIsVirgin]),
|
||||
?LOG_DEBUG("Does data directory looks like that of a blank (uninitialised) node? ~tp", [NodeIsVirgin]),
|
||||
Nodes = change_extra_db_nodes(ClusterNodes, CheckOtherNodes),
|
||||
%% Note that we use `system_info' here and not the cluster status
|
||||
%% since when we start rabbit for the first time the cluster
|
||||
|
@ -744,7 +745,7 @@ remote_node_info(Node) ->
|
|||
|
||||
on_node_up(Node) ->
|
||||
case running_disc_nodes() of
|
||||
[Node] -> rabbit_log:info("cluster contains disc nodes again~n");
|
||||
[Node] -> ?LOG_INFO("cluster contains disc nodes again~n");
|
||||
_ -> ok
|
||||
end.
|
||||
|
||||
|
@ -752,7 +753,7 @@ on_node_up(Node) ->
|
|||
|
||||
on_node_down(_Node) ->
|
||||
case running_disc_nodes() of
|
||||
[] -> rabbit_log:info("only running disc node went down~n");
|
||||
[] -> ?LOG_INFO("only running disc node went down~n");
|
||||
_ -> ok
|
||||
end.
|
||||
|
||||
|
@ -891,17 +892,17 @@ create_schema() ->
|
|||
false = rabbit_khepri:is_enabled(),
|
||||
|
||||
stop_mnesia(),
|
||||
rabbit_log:debug("Will bootstrap a schema database..."),
|
||||
?LOG_DEBUG("Will bootstrap a schema database..."),
|
||||
rabbit_misc:ensure_ok(mnesia:create_schema([node()]), cannot_create_schema),
|
||||
rabbit_log:debug("Bootstraped a schema database successfully"),
|
||||
?LOG_DEBUG("Bootstraped a schema database successfully"),
|
||||
start_mnesia(),
|
||||
|
||||
rabbit_log:debug("Will create schema database tables"),
|
||||
?LOG_DEBUG("Will create schema database tables"),
|
||||
ok = rabbit_table:create(),
|
||||
rabbit_log:debug("Created schema database tables successfully"),
|
||||
rabbit_log:debug("Will check schema database integrity..."),
|
||||
?LOG_DEBUG("Created schema database tables successfully"),
|
||||
?LOG_DEBUG("Will check schema database integrity..."),
|
||||
ensure_schema_integrity(),
|
||||
rabbit_log:debug("Schema database schema integrity check passed"),
|
||||
?LOG_DEBUG("Schema database schema integrity check passed"),
|
||||
ok.
|
||||
|
||||
remove_node_if_mnesia_running(Node) ->
|
||||
|
@ -945,7 +946,7 @@ leave_cluster(Node) ->
|
|||
end.
|
||||
|
||||
wait_for(Condition) ->
|
||||
rabbit_log:info("Waiting for ~tp...", [Condition]),
|
||||
?LOG_INFO("Waiting for ~tp...", [Condition]),
|
||||
timer:sleep(1000).
|
||||
|
||||
start_mnesia(CheckConsistency) ->
|
||||
|
@ -1067,10 +1068,10 @@ mnesia_and_msg_store_files() ->
|
|||
rabbit_feature_flags:enabled_feature_flags_list_file(),
|
||||
rabbit_khepri:dir()],
|
||||
IgnoredFiles = [filename:basename(File) || File <- IgnoredFiles0],
|
||||
rabbit_log:debug("Files and directories found in node's data directory: ~ts, of them to be ignored: ~ts",
|
||||
?LOG_DEBUG("Files and directories found in node's data directory: ~ts, of them to be ignored: ~ts",
|
||||
[string:join(lists:usort(List0), ", "), string:join(lists:usort(IgnoredFiles), ", ")]),
|
||||
List = List0 -- IgnoredFiles,
|
||||
rabbit_log:debug("Files and directories found in node's data directory sans ignored ones: ~ts", [string:join(lists:usort(List), ", ")]),
|
||||
?LOG_DEBUG("Files and directories found in node's data directory sans ignored ones: ~ts", [string:join(lists:usort(List), ", ")]),
|
||||
List
|
||||
end.
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
%%----------------------------------------------------------------------------
|
||||
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-type(msg() :: any()).
|
||||
|
||||
|
@ -792,11 +793,11 @@ init([VHost, Type, BaseDir, ClientRefs, StartupFunState]) ->
|
|||
true -> "clean";
|
||||
false -> "unclean"
|
||||
end,
|
||||
rabbit_log:debug("Rebuilding message location index after ~ts shutdown...",
|
||||
?LOG_DEBUG("Rebuilding message location index after ~ts shutdown...",
|
||||
[Cleanliness]),
|
||||
{CurOffset, State1 = #msstate { current_file = CurFile }} =
|
||||
build_index(CleanShutdown, StartupFunState, State),
|
||||
rabbit_log:debug("Finished rebuilding index", []),
|
||||
?LOG_DEBUG("Finished rebuilding index", []),
|
||||
%% Open the most recent file.
|
||||
{ok, CurHdl} = writer_recover(Dir, CurFile, CurOffset),
|
||||
{ok, State1 #msstate { current_file_handle = CurHdl,
|
||||
|
@ -971,7 +972,7 @@ terminate(Reason, State = #msstate { index_ets = IndexEts,
|
|||
{shutdown, _} -> {"", []};
|
||||
_ -> {" with reason ~0p", [Reason]}
|
||||
end,
|
||||
rabbit_log:info("Stopping message store for directory '~ts'" ++ ExtraLog, [Dir|ExtraLogArgs]),
|
||||
?LOG_INFO("Stopping message store for directory '~ts'" ++ ExtraLog, [Dir|ExtraLogArgs]),
|
||||
%% stop the gc first, otherwise it could be working and we pull
|
||||
%% out the ets tables from under it.
|
||||
ok = rabbit_msg_store_gc:stop(GCPid),
|
||||
|
@ -984,7 +985,7 @@ terminate(Reason, State = #msstate { index_ets = IndexEts,
|
|||
case store_file_summary(FileSummaryEts, Dir) of
|
||||
ok -> ok;
|
||||
{error, FSErr} ->
|
||||
rabbit_log:error("Unable to store file summary"
|
||||
?LOG_ERROR("Unable to store file summary"
|
||||
" for vhost message store for directory ~tp~n"
|
||||
"Error: ~tp",
|
||||
[Dir, FSErr])
|
||||
|
@ -994,10 +995,10 @@ terminate(Reason, State = #msstate { index_ets = IndexEts,
|
|||
index_terminate(IndexEts, Dir),
|
||||
case store_recovery_terms([{client_refs, maps:keys(Clients)}], Dir) of
|
||||
ok ->
|
||||
rabbit_log:info("Message store for directory '~ts' is stopped", [Dir]),
|
||||
?LOG_INFO("Message store for directory '~ts' is stopped", [Dir]),
|
||||
ok;
|
||||
{error, RTErr} ->
|
||||
rabbit_log:error("Unable to save message store recovery terms"
|
||||
?LOG_ERROR("Unable to save message store recovery terms"
|
||||
" for directory ~tp~nError: ~tp",
|
||||
[Dir, RTErr])
|
||||
end,
|
||||
|
@ -1703,7 +1704,7 @@ index_terminate(IndexEts, Dir) ->
|
|||
[{extended_info, [object_count]}]) of
|
||||
ok -> ok;
|
||||
{error, Err} ->
|
||||
rabbit_log:error("Unable to save message store index"
|
||||
?LOG_ERROR("Unable to save message store index"
|
||||
" for directory ~tp.~nError: ~tp",
|
||||
[Dir, Err])
|
||||
end,
|
||||
|
@ -1716,11 +1717,11 @@ index_terminate(IndexEts, Dir) ->
|
|||
recover_index_and_client_refs(_Recover, undefined, Dir, _Name) ->
|
||||
{false, index_new(Dir), []};
|
||||
recover_index_and_client_refs(false, _ClientRefs, Dir, Name) ->
|
||||
rabbit_log:warning("Message store ~tp: rebuilding indices from scratch", [Name]),
|
||||
?LOG_WARNING("Message store ~tp: rebuilding indices from scratch", [Name]),
|
||||
{false, index_new(Dir), []};
|
||||
recover_index_and_client_refs(true, ClientRefs, Dir, Name) ->
|
||||
Fresh = fun (ErrorMsg, ErrorArgs) ->
|
||||
rabbit_log:warning("Message store ~tp : " ++ ErrorMsg ++ "~n"
|
||||
?LOG_WARNING("Message store ~tp : " ++ ErrorMsg ++ "~n"
|
||||
"rebuilding indices from scratch",
|
||||
[Name | ErrorArgs]),
|
||||
{false, index_new(Dir), []}
|
||||
|
@ -1813,9 +1814,9 @@ build_index(true, _StartupFunState,
|
|||
{FileSize, State#msstate{ current_file = File }};
|
||||
build_index(false, {MsgRefDeltaGen, MsgRefDeltaGenInit},
|
||||
State = #msstate { dir = Dir }) ->
|
||||
rabbit_log:debug("Rebuilding message refcount...", []),
|
||||
?LOG_DEBUG("Rebuilding message refcount...", []),
|
||||
ok = count_msg_refs(MsgRefDeltaGen, MsgRefDeltaGenInit, State),
|
||||
rabbit_log:debug("Done rebuilding message refcount", []),
|
||||
?LOG_DEBUG("Done rebuilding message refcount", []),
|
||||
{ok, Pid} = gatherer:start_link(),
|
||||
case [filename_to_num(FileName) ||
|
||||
FileName <- list_sorted_filenames(Dir, ?FILE_EXTENSION)] of
|
||||
|
@ -1829,7 +1830,7 @@ build_index(false, {MsgRefDeltaGen, MsgRefDeltaGenInit},
|
|||
build_index_worker(Gatherer, #msstate { index_ets = IndexEts, dir = Dir },
|
||||
File, Files) ->
|
||||
Path = form_filename(Dir, filenum_to_name(File)),
|
||||
rabbit_log:debug("Rebuilding message location index from ~ts (~B file(s) remaining)",
|
||||
?LOG_DEBUG("Rebuilding message location index from ~ts (~B file(s) remaining)",
|
||||
[Path, length(Files)]),
|
||||
%% The scan function already dealt with duplicate messages
|
||||
%% within the file, and only returns valid messages (we do
|
||||
|
@ -2001,7 +2002,7 @@ delete_file_if_empty(File, State = #msstate {
|
|||
compact_file(File, State = #gc_state { file_summary_ets = FileSummaryEts }) ->
|
||||
case ets:lookup(FileSummaryEts, File) of
|
||||
[] ->
|
||||
rabbit_log:debug("File ~tp has already been deleted; no need to compact",
|
||||
?LOG_DEBUG("File ~tp has already been deleted; no need to compact",
|
||||
[File]),
|
||||
ok;
|
||||
[#file_summary{file_size = FileSize}] ->
|
||||
|
@ -2046,7 +2047,7 @@ compact_file(File, FileSize,
|
|||
%% after truncation. This is a debug message so it doesn't hurt to
|
||||
%% put out more details around what's happening.
|
||||
Reclaimed = FileSize - TruncateSize,
|
||||
rabbit_log:debug("Compacted segment file number ~tp; ~tp bytes can now be reclaimed",
|
||||
?LOG_DEBUG("Compacted segment file number ~tp; ~tp bytes can now be reclaimed",
|
||||
[File, Reclaimed]),
|
||||
%% Tell the message store to update its state.
|
||||
gen_server2:cast(Server, {compacted_file, File}),
|
||||
|
@ -2147,7 +2148,7 @@ truncate_file(File, Size, ThresholdTimestamp, #gc_state{ file_summary_ets = File
|
|||
case ets:select(FileHandlesEts, [{{{'_', File}, '$1'},
|
||||
[{'=<', '$1', ThresholdTimestamp}], ['$$']}], 1) of
|
||||
{[_|_], _Cont} ->
|
||||
rabbit_log:debug("Asked to truncate file ~p but it has active readers. Deferring.",
|
||||
?LOG_DEBUG("Asked to truncate file ~p but it has active readers. Deferring.",
|
||||
[File]),
|
||||
defer;
|
||||
_ ->
|
||||
|
@ -2158,7 +2159,7 @@ truncate_file(File, Size, ThresholdTimestamp, #gc_state{ file_summary_ets = File
|
|||
ok = file:close(Fd),
|
||||
true = ets:update_element(FileSummaryEts, File,
|
||||
{#file_summary.file_size, Size}),
|
||||
rabbit_log:debug("Truncated file number ~tp; new size ~tp bytes", [File, Size]),
|
||||
?LOG_DEBUG("Truncated file number ~tp; new size ~tp bytes", [File, Size]),
|
||||
ok
|
||||
end
|
||||
end.
|
||||
|
@ -2170,7 +2171,7 @@ delete_file(File, #gc_state { file_summary_ets = FileSummaryEts,
|
|||
dir = Dir }) ->
|
||||
case ets:match_object(FileHandlesEts, {{'_', File}, '_'}, 1) of
|
||||
{[_|_], _Cont} ->
|
||||
rabbit_log:debug("Asked to delete file ~p but it has active readers. Deferring.",
|
||||
?LOG_DEBUG("Asked to delete file ~p but it has active readers. Deferring.",
|
||||
[File]),
|
||||
defer;
|
||||
_ ->
|
||||
|
@ -2178,7 +2179,7 @@ delete_file(File, #gc_state { file_summary_ets = FileSummaryEts,
|
|||
file_size = FileSize }] = ets:lookup(FileSummaryEts, File),
|
||||
ok = file:delete(form_filename(Dir, filenum_to_name(File))),
|
||||
true = ets:delete(FileSummaryEts, File),
|
||||
rabbit_log:debug("Deleted empty file number ~tp; reclaimed ~tp bytes", [File, FileSize]),
|
||||
?LOG_DEBUG("Deleted empty file number ~tp; reclaimed ~tp bytes", [File, FileSize]),
|
||||
ok
|
||||
end.
|
||||
|
||||
|
|
|
@ -55,6 +55,7 @@
|
|||
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include_lib("rabbit_common/include/rabbit_misc.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
%% IANA-suggested ephemeral port range is 49152 to 65535
|
||||
-define(FIRST_TEST_BIND_PORT, 49152).
|
||||
|
@ -90,7 +91,7 @@
|
|||
boot() ->
|
||||
ok = record_distribution_listener(),
|
||||
_ = application:start(ranch),
|
||||
rabbit_log:debug("Started Ranch"),
|
||||
?LOG_DEBUG("Started Ranch"),
|
||||
%% Failures will throw exceptions
|
||||
_ = boot_listeners(fun boot_tcp/2, application:get_env(rabbit, num_tcp_acceptors, 10),
|
||||
application:get_env(rabbit, num_conns_sups, 1), "TCP"),
|
||||
|
@ -103,7 +104,7 @@ boot_listeners(Fun, NumAcceptors, ConcurrentConnsSupsCount, Type) ->
|
|||
ok ->
|
||||
ok;
|
||||
{error, {could_not_start_listener, Address, Port, Details}} = Error ->
|
||||
rabbit_log:error("Failed to start ~ts listener [~ts]:~tp, error: ~tp",
|
||||
?LOG_ERROR("Failed to start ~ts listener [~ts]:~tp, error: ~tp",
|
||||
[Type, Address, Port, Details]),
|
||||
throw(Error)
|
||||
end.
|
||||
|
@ -156,7 +157,7 @@ tcp_listener_addresses({Host, Port, Family0})
|
|||
[{IPAddress, Port, Family} ||
|
||||
{IPAddress, Family} <- getaddr(Host, Family0)];
|
||||
tcp_listener_addresses({_Host, Port, _Family0}) ->
|
||||
rabbit_log:error("invalid port ~tp - not 0..65535", [Port]),
|
||||
?LOG_ERROR("invalid port ~tp - not 0..65535", [Port]),
|
||||
throw({error, {invalid_port, Port}}).
|
||||
|
||||
tcp_listener_addresses_auto(Port) ->
|
||||
|
@ -264,7 +265,7 @@ stop_ranch_listener_of_protocol(Protocol) ->
|
|||
case ranch_ref_of_protocol(Protocol) of
|
||||
undefined -> ok;
|
||||
Ref ->
|
||||
rabbit_log:debug("Stopping Ranch listener for protocol ~ts", [Protocol]),
|
||||
?LOG_DEBUG("Stopping Ranch listener for protocol ~ts", [Protocol]),
|
||||
ranch:stop_listener(Ref)
|
||||
end.
|
||||
|
||||
|
@ -404,7 +405,7 @@ epmd_port_please(Name, Host) ->
|
|||
epmd_port_please(Name, Host, 0) ->
|
||||
maybe_get_epmd_port(Name, Host);
|
||||
epmd_port_please(Name, Host, RetriesLeft) ->
|
||||
rabbit_log:debug("Getting epmd port node '~ts', ~b retries left",
|
||||
?LOG_DEBUG("Getting epmd port node '~ts', ~b retries left",
|
||||
[Name, RetriesLeft]),
|
||||
case catch maybe_get_epmd_port(Name, Host) of
|
||||
ok -> ok;
|
||||
|
@ -520,11 +521,11 @@ emit_connection_info_local(Items, Ref, AggregatorPid) ->
|
|||
|
||||
-spec close_connection(pid(), string()) -> 'ok'.
|
||||
close_connection(Pid, Explanation) ->
|
||||
rabbit_log:info("Closing connection ~tp because ~tp",
|
||||
?LOG_INFO("Closing connection ~tp because ~tp",
|
||||
[Pid, Explanation]),
|
||||
try rabbit_reader:shutdown(Pid, Explanation)
|
||||
catch exit:{Reason, _Location} ->
|
||||
rabbit_log:warning("Could not close connection ~tp (reason: ~tp): ~p",
|
||||
?LOG_WARNING("Could not close connection ~tp (reason: ~tp): ~p",
|
||||
[Pid, Explanation, Reason])
|
||||
end.
|
||||
|
||||
|
@ -561,7 +562,7 @@ failed_to_recv_proxy_header(Ref, Error) ->
|
|||
closed -> "error when receiving proxy header: TCP socket was ~tp prematurely";
|
||||
_Other -> "error when receiving proxy header: ~tp"
|
||||
end,
|
||||
rabbit_log:debug(Msg, [Error]),
|
||||
?LOG_DEBUG(Msg, [Error]),
|
||||
% The following call will clean up resources then exit
|
||||
_ = try ranch:handshake(Ref) catch
|
||||
_:_ -> ok
|
||||
|
@ -602,7 +603,7 @@ ranch_handshake(Ref) ->
|
|||
exit:{shutdown, {Reason, {PeerIp, PeerPort}}} = Error:Stacktrace ->
|
||||
PeerAddress = io_lib:format("~ts:~tp", [rabbit_misc:ntoab(PeerIp), PeerPort]),
|
||||
Protocol = ranch_ref_to_protocol(Ref),
|
||||
rabbit_log:error("~p error during handshake for protocol ~p and peer ~ts",
|
||||
?LOG_ERROR("~p error during handshake for protocol ~p and peer ~ts",
|
||||
[Reason, Protocol, PeerAddress]),
|
||||
erlang:raise(exit, Error, Stacktrace)
|
||||
end.
|
||||
|
@ -664,7 +665,7 @@ gethostaddr(Host, Family) ->
|
|||
|
||||
-spec host_lookup_error(_, _) -> no_return().
|
||||
host_lookup_error(Host, Reason) ->
|
||||
rabbit_log:error("invalid host ~tp - ~tp", [Host, Reason]),
|
||||
?LOG_ERROR("invalid host ~tp - ~tp", [Host, Reason]),
|
||||
throw({error, {invalid_host, Host, Reason}}).
|
||||
|
||||
resolve_family({_,_,_,_}, auto) -> inet;
|
||||
|
|
|
@ -7,6 +7,9 @@
|
|||
|
||||
-module(rabbit_node_monitor).
|
||||
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
|
||||
-behaviour(gen_server).
|
||||
|
||||
-export([start_link/0]).
|
||||
|
@ -492,14 +495,14 @@ handle_cast({check_partial_partition, Node, Rep, NodeGUID, MyGUID, RepGUID},
|
|||
case rpc:call(Node, erlang, system_info, [creation]) of
|
||||
{badrpc, _} -> ok;
|
||||
NodeGUID ->
|
||||
rabbit_log:warning("Received a 'DOWN' message"
|
||||
?LOG_WARNING("Received a 'DOWN' message"
|
||||
" from ~tp but still can"
|
||||
" communicate with it ",
|
||||
[Node]),
|
||||
cast(Rep, {partial_partition,
|
||||
Node, node(), RepGUID});
|
||||
_ ->
|
||||
rabbit_log:warning("Node ~tp was restarted", [Node]),
|
||||
?LOG_WARNING("Node ~tp was restarted", [Node]),
|
||||
ok
|
||||
end
|
||||
end),
|
||||
|
@ -530,7 +533,7 @@ handle_cast({partial_partition, NotReallyDown, Proxy, MyGUID},
|
|||
ArgsBase = [NotReallyDown, Proxy, NotReallyDown],
|
||||
case application:get_env(rabbit, cluster_partition_handling) of
|
||||
{ok, pause_minority} ->
|
||||
rabbit_log:error(
|
||||
?LOG_ERROR(
|
||||
FmtBase ++ " * pause_minority mode enabled~n"
|
||||
"We will therefore pause until the *entire* cluster recovers",
|
||||
ArgsBase),
|
||||
|
@ -538,17 +541,17 @@ handle_cast({partial_partition, NotReallyDown, Proxy, MyGUID},
|
|||
{noreply, State};
|
||||
{ok, {pause_if_all_down, PreferredNodes, _}} ->
|
||||
case in_preferred_partition(PreferredNodes) of
|
||||
true -> rabbit_log:error(
|
||||
true -> ?LOG_ERROR(
|
||||
FmtBase ++ "We will therefore intentionally "
|
||||
"disconnect from ~ts", ArgsBase ++ [Proxy]),
|
||||
upgrade_to_full_partition(Proxy);
|
||||
false -> rabbit_log:info(
|
||||
false -> ?LOG_INFO(
|
||||
FmtBase ++ "We are about to pause, no need "
|
||||
"for further actions", ArgsBase)
|
||||
end,
|
||||
{noreply, State};
|
||||
{ok, _} ->
|
||||
rabbit_log:error(
|
||||
?LOG_ERROR(
|
||||
FmtBase ++ "We will therefore intentionally disconnect from ~ts",
|
||||
ArgsBase ++ [Proxy]),
|
||||
upgrade_to_full_partition(Proxy),
|
||||
|
@ -562,7 +565,7 @@ handle_cast({partial_partition, _GUID, _Reporter, _Proxy}, State) ->
|
|||
%% messages reliably when another node disconnects from us. Therefore
|
||||
%% we are told just before the disconnection so we can reciprocate.
|
||||
handle_cast({partial_partition_disconnect, Other}, State) ->
|
||||
rabbit_log:error("Partial partition disconnect from ~ts", [Other]),
|
||||
?LOG_ERROR("Partial partition disconnect from ~ts", [Other]),
|
||||
disconnect(Other),
|
||||
{noreply, State};
|
||||
|
||||
|
@ -571,7 +574,7 @@ handle_cast({partial_partition_disconnect, Other}, State) ->
|
|||
%% mnesia propagation.
|
||||
handle_cast({node_up, Node, NodeType},
|
||||
State = #state{monitors = Monitors}) ->
|
||||
rabbit_log:info("rabbit on node ~tp up", [Node]),
|
||||
?LOG_INFO("rabbit on node ~tp up", [Node]),
|
||||
case rabbit_khepri:is_enabled() of
|
||||
true ->
|
||||
ok;
|
||||
|
@ -606,7 +609,7 @@ handle_cast({joined_cluster, Node, NodeType}, State) ->
|
|||
end,
|
||||
RunningNodes})
|
||||
end,
|
||||
rabbit_log:debug("Node '~tp' has joined the cluster", [Node]),
|
||||
?LOG_DEBUG("Node '~tp' has joined the cluster", [Node]),
|
||||
rabbit_event:notify(node_added, [{node, Node}]),
|
||||
{noreply, State};
|
||||
|
||||
|
@ -634,7 +637,7 @@ handle_cast(_Msg, State) ->
|
|||
|
||||
handle_info({'DOWN', _MRef, process, {rabbit, Node}, _Reason},
|
||||
State = #state{monitors = Monitors, subscribers = Subscribers}) ->
|
||||
rabbit_log:info("rabbit on node ~tp down", [Node]),
|
||||
?LOG_INFO("rabbit on node ~tp down", [Node]),
|
||||
case rabbit_khepri:is_enabled() of
|
||||
true ->
|
||||
ok;
|
||||
|
@ -653,7 +656,7 @@ handle_info({'DOWN', _MRef, process, Pid, _Reason},
|
|||
{noreply, State#state{subscribers = pmon:erase(Pid, Subscribers)}};
|
||||
|
||||
handle_info({nodedown, Node, Info}, State) ->
|
||||
rabbit_log:info("node ~tp down: ~tp",
|
||||
?LOG_INFO("node ~tp down: ~tp",
|
||||
[Node, proplists:get_value(nodedown_reason, Info)]),
|
||||
case rabbit_khepri:is_enabled() of
|
||||
true -> {noreply, State};
|
||||
|
@ -661,7 +664,7 @@ handle_info({nodedown, Node, Info}, State) ->
|
|||
end;
|
||||
|
||||
handle_info({nodeup, Node, _Info}, State) ->
|
||||
rabbit_log:info("node ~tp up", [Node]),
|
||||
?LOG_INFO("node ~tp up", [Node]),
|
||||
{noreply, State};
|
||||
|
||||
handle_info({mnesia_system_event,
|
||||
|
@ -781,13 +784,13 @@ handle_dead_node(Node, State = #state{autoheal = Autoheal}) ->
|
|||
{ok, autoheal} ->
|
||||
State#state{autoheal = rabbit_autoheal:node_down(Node, Autoheal)};
|
||||
{ok, Term} ->
|
||||
rabbit_log:warning("cluster_partition_handling ~tp unrecognised, "
|
||||
?LOG_WARNING("cluster_partition_handling ~tp unrecognised, "
|
||||
"assuming 'ignore'", [Term]),
|
||||
State
|
||||
end.
|
||||
|
||||
await_cluster_recovery(Condition) ->
|
||||
rabbit_log:warning("Cluster minority/secondary status detected - "
|
||||
?LOG_WARNING("Cluster minority/secondary status detected - "
|
||||
"awaiting recovery", []),
|
||||
run_outside_applications(fun () ->
|
||||
rabbit:stop(),
|
||||
|
@ -838,7 +841,7 @@ do_run_outside_app_fun(Fun) ->
|
|||
try
|
||||
Fun()
|
||||
catch _:E:Stacktrace ->
|
||||
rabbit_log:error(
|
||||
?LOG_ERROR(
|
||||
"rabbit_outside_app_process:~n~tp~n~tp",
|
||||
[E, Stacktrace])
|
||||
end.
|
||||
|
@ -1048,14 +1051,14 @@ possibly_partitioned_nodes() ->
|
|||
alive_rabbit_nodes() -- rabbit_mnesia:cluster_nodes(running).
|
||||
|
||||
startup_log() ->
|
||||
rabbit_log:info("Starting rabbit_node_monitor (partition handling strategy unapplicable with Khepri)", []).
|
||||
?LOG_INFO("Starting rabbit_node_monitor (partition handling strategy unapplicable with Khepri)", []).
|
||||
|
||||
startup_log(Nodes) ->
|
||||
{ok, M} = application:get_env(rabbit, cluster_partition_handling),
|
||||
startup_log(Nodes, M).
|
||||
|
||||
startup_log([], PartitionHandling) ->
|
||||
rabbit_log:info("Starting rabbit_node_monitor (in ~tp mode)", [PartitionHandling]);
|
||||
?LOG_INFO("Starting rabbit_node_monitor (in ~tp mode)", [PartitionHandling]);
|
||||
startup_log(Nodes, PartitionHandling) ->
|
||||
rabbit_log:info("Starting rabbit_node_monitor (in ~tp mode), might be partitioned from ~tp",
|
||||
?LOG_INFO("Starting rabbit_node_monitor (in ~tp mode), might be partitioned from ~tp",
|
||||
[PartitionHandling, Nodes]).
|
||||
|
|
|
@ -126,7 +126,7 @@ seed_internal_cluster_id() ->
|
|||
case rabbit_runtime_parameters:lookup_global(?INTERNAL_CLUSTER_ID_PARAM_NAME) of
|
||||
not_found ->
|
||||
Id = rabbit_guid:binary(rabbit_guid:gen(), "rabbitmq-cluster-id"),
|
||||
rabbit_log:info("Initialising internal cluster ID to '~ts'", [Id]),
|
||||
?LOG_INFO("Initialising internal cluster ID to '~ts'", [Id]),
|
||||
rabbit_runtime_parameters:set_global(?INTERNAL_CLUSTER_ID_PARAM_NAME, Id, ?INTERNAL_USER),
|
||||
Id;
|
||||
Param ->
|
||||
|
@ -138,7 +138,7 @@ seed_user_provided_cluster_name() ->
|
|||
case application:get_env(rabbit, cluster_name) of
|
||||
undefined -> ok;
|
||||
{ok, Name} ->
|
||||
rabbit_log:info("Setting cluster name to '~ts' as configured", [Name]),
|
||||
?LOG_INFO("Setting cluster name to '~ts' as configured", [Name]),
|
||||
set_cluster_name(rabbit_data_coercion:to_binary(Name))
|
||||
end.
|
||||
|
||||
|
|
|
@ -6,6 +6,9 @@
|
|||
%%
|
||||
|
||||
-module(rabbit_peer_discovery_classic_config).
|
||||
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-behaviour(rabbit_peer_discovery_backend).
|
||||
|
||||
-export([list_nodes/0, supports_registration/0, register/0, unregister/0,
|
||||
|
@ -42,7 +45,7 @@ check_duplicates(Nodes) ->
|
|||
true ->
|
||||
ok;
|
||||
false ->
|
||||
rabbit_log:warning("Classic peer discovery backend: list of "
|
||||
?LOG_WARNING("Classic peer discovery backend: list of "
|
||||
"nodes contains duplicates ~0tp",
|
||||
[Nodes])
|
||||
end.
|
||||
|
@ -52,7 +55,7 @@ check_local_node(Nodes) ->
|
|||
true ->
|
||||
ok;
|
||||
false ->
|
||||
rabbit_log:warning("Classic peer discovery backend: list of "
|
||||
?LOG_WARNING("Classic peer discovery backend: list of "
|
||||
"nodes does not contain the local node ~0tp",
|
||||
[Nodes])
|
||||
end.
|
||||
|
@ -65,7 +68,7 @@ lock(Nodes) ->
|
|||
Node = node(),
|
||||
case lists:member(Node, Nodes) of
|
||||
false when Nodes =/= [] ->
|
||||
rabbit_log:warning("Local node ~ts is not part of configured nodes ~tp. "
|
||||
?LOG_WARNING("Local node ~ts is not part of configured nodes ~tp. "
|
||||
"This might lead to incorrect cluster formation.", [Node, Nodes]);
|
||||
_ -> ok
|
||||
end,
|
||||
|
|
|
@ -6,6 +6,9 @@
|
|||
%%
|
||||
|
||||
-module(rabbit_peer_discovery_dns).
|
||||
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-behaviour(rabbit_peer_discovery_backend).
|
||||
|
||||
-export([list_nodes/0, supports_registration/0, register/0, unregister/0,
|
||||
|
@ -27,7 +30,7 @@ list_nodes() ->
|
|||
{ok, ClusterFormation} ->
|
||||
case proplists:get_value(peer_discovery_dns, ClusterFormation) of
|
||||
undefined ->
|
||||
rabbit_log:warning("Peer discovery backend is set to ~ts "
|
||||
?LOG_WARNING("Peer discovery backend is set to ~ts "
|
||||
"but final config does not contain rabbit.cluster_formation.peer_discovery_dns. "
|
||||
"Cannot discover any nodes because seed hostname is not configured!",
|
||||
[?MODULE]),
|
||||
|
@ -90,7 +93,7 @@ decode_record(ipv6) ->
|
|||
|
||||
lookup(SeedHostname, LongNamesUsed, IPv) ->
|
||||
IPs = inet_res:lookup(SeedHostname, in, decode_record(IPv)),
|
||||
rabbit_log:info("Addresses discovered via ~ts records of ~ts: ~ts",
|
||||
?LOG_INFO("Addresses discovered via ~ts records of ~ts: ~ts",
|
||||
[string:to_upper(atom_to_list(decode_record(IPv))),
|
||||
SeedHostname,
|
||||
string:join([inet_parse:ntoa(IP) || IP <- IPs], ", ")]),
|
||||
|
@ -106,6 +109,6 @@ extract_host({ok, {hostent, FQDN, _, _, _, _}}, true, _Address) ->
|
|||
extract_host({ok, {hostent, FQDN, _, _, _, _}}, false, _Address) ->
|
||||
lists:nth(1, string:tokens(FQDN, "."));
|
||||
extract_host({error, Error}, _, Address) ->
|
||||
rabbit_log:error("Reverse DNS lookup for address ~ts failed: ~tp",
|
||||
?LOG_ERROR("Reverse DNS lookup for address ~ts failed: ~tp",
|
||||
[inet_parse:ntoa(Address), Error]),
|
||||
error.
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
|
||||
-module(rabbit_plugins).
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
-export([setup/0, active/0, read_enabled/1, list/1, list/2, dependencies/3, running_plugins/0]).
|
||||
-export([ensure/1]).
|
||||
-export([validate_plugins/1, format_invalid_plugins/1]).
|
||||
|
@ -54,13 +55,13 @@ ensure1(FileJustChanged0) ->
|
|||
{[], []} ->
|
||||
ok;
|
||||
{[], _} ->
|
||||
rabbit_log:info("Plugins changed; disabled ~tp",
|
||||
?LOG_INFO("Plugins changed; disabled ~tp",
|
||||
[Stop]);
|
||||
{_, []} ->
|
||||
rabbit_log:info("Plugins changed; enabled ~tp",
|
||||
?LOG_INFO("Plugins changed; enabled ~tp",
|
||||
[Start]);
|
||||
{_, _} ->
|
||||
rabbit_log:info("Plugins changed; enabled ~tp, disabled ~tp",
|
||||
?LOG_INFO("Plugins changed; enabled ~tp, disabled ~tp",
|
||||
[Start, Stop])
|
||||
end,
|
||||
{ok, Start, Stop};
|
||||
|
@ -271,7 +272,7 @@ maybe_warn_about_invalid_plugins([]) ->
|
|||
ok;
|
||||
maybe_warn_about_invalid_plugins(InvalidPlugins) ->
|
||||
%% TODO: error message formatting
|
||||
rabbit_log:warning(format_invalid_plugins(InvalidPlugins)).
|
||||
?LOG_WARNING(format_invalid_plugins(InvalidPlugins)).
|
||||
|
||||
|
||||
format_invalid_plugins(InvalidPlugins) ->
|
||||
|
@ -327,7 +328,7 @@ validate_plugins(Plugins, BrokerVersion) ->
|
|||
true ->
|
||||
case BrokerVersion of
|
||||
"0.0.0" ->
|
||||
rabbit_log:warning(
|
||||
?LOG_WARNING(
|
||||
"Running development version of the broker."
|
||||
" Requirement ~tp for plugin ~tp is ignored.",
|
||||
[BrokerVersionReqs, Name]);
|
||||
|
@ -358,7 +359,7 @@ check_plugins_versions(PluginName, AllPlugins, RequiredVersions) ->
|
|||
true ->
|
||||
case Version of
|
||||
"" ->
|
||||
rabbit_log:warning(
|
||||
?LOG_WARNING(
|
||||
"~tp plugin version is not defined."
|
||||
" Requirement ~tp for plugin ~tp is ignored",
|
||||
[Name, Versions, PluginName]);
|
||||
|
@ -426,7 +427,7 @@ prepare_dir_plugin(PluginAppDescPath) ->
|
|||
{module, _} ->
|
||||
ok;
|
||||
{error, badfile} ->
|
||||
rabbit_log:error("Failed to enable plugin \"~ts\": "
|
||||
?LOG_ERROR("Failed to enable plugin \"~ts\": "
|
||||
"it may have been built with an "
|
||||
"incompatible (more recent?) "
|
||||
"version of Erlang", [Plugin]),
|
||||
|
@ -459,11 +460,11 @@ prepare_plugin(#plugin{type = ez, name = Name, location = Location}, ExpandDir)
|
|||
[PluginAppDescPath|_] ->
|
||||
prepare_dir_plugin(PluginAppDescPath);
|
||||
_ ->
|
||||
rabbit_log:error("Plugin archive '~ts' doesn't contain an .app file", [Location]),
|
||||
?LOG_ERROR("Plugin archive '~ts' doesn't contain an .app file", [Location]),
|
||||
throw({app_file_missing, Name, Location})
|
||||
end;
|
||||
{error, Reason} ->
|
||||
rabbit_log:error("Could not unzip plugin archive '~ts': ~tp", [Location, Reason]),
|
||||
?LOG_ERROR("Could not unzip plugin archive '~ts': ~tp", [Location, Reason]),
|
||||
throw({failed_to_unzip_plugin, Name, Location, Reason})
|
||||
end;
|
||||
prepare_plugin(#plugin{type = dir, location = Location, name = Name},
|
||||
|
@ -472,7 +473,7 @@ prepare_plugin(#plugin{type = dir, location = Location, name = Name},
|
|||
[PluginAppDescPath|_] ->
|
||||
prepare_dir_plugin(PluginAppDescPath);
|
||||
_ ->
|
||||
rabbit_log:error("Plugin directory '~ts' doesn't contain an .app file", [Location]),
|
||||
?LOG_ERROR("Plugin directory '~ts' doesn't contain an .app file", [Location]),
|
||||
throw({app_file_missing, Name, Location})
|
||||
end.
|
||||
|
||||
|
@ -668,12 +669,12 @@ remove_plugins(Plugins) ->
|
|||
lists:member(Name, PluginDeps),
|
||||
if
|
||||
IsOTPApp ->
|
||||
rabbit_log:debug(
|
||||
?LOG_DEBUG(
|
||||
"Plugins discovery: "
|
||||
"ignoring ~ts, Erlang/OTP application",
|
||||
[Name]);
|
||||
not IsAPlugin ->
|
||||
rabbit_log:debug(
|
||||
?LOG_DEBUG(
|
||||
"Plugins discovery: "
|
||||
"ignoring ~ts, not a RabbitMQ plugin",
|
||||
[Name]);
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include("amqqueue.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-import(rabbit_misc, [pget/2, pget/3]).
|
||||
|
||||
|
@ -285,7 +286,7 @@ parse_set0(Type, VHost, Name, Pattern, Defn, Priority, ApplyTo, ActingUser) ->
|
|||
{<<"priority">>, Priority},
|
||||
{<<"apply-to">>, ApplyTo}],
|
||||
ActingUser),
|
||||
rabbit_log:info("Successfully set policy '~ts' matching ~ts names in virtual host '~ts' using pattern '~ts'",
|
||||
?LOG_INFO("Successfully set policy '~ts' matching ~ts names in virtual host '~ts' using pattern '~ts'",
|
||||
[Name, ApplyTo, VHost, Pattern]),
|
||||
R;
|
||||
{error, Reason} ->
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include("amqqueue.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-behaviour(rabbit_backing_queue).
|
||||
|
||||
|
@ -66,7 +67,7 @@ enable() ->
|
|||
{ok, RealBQ} = application:get_env(rabbit, backing_queue_module),
|
||||
case RealBQ of
|
||||
?MODULE -> ok;
|
||||
_ -> rabbit_log:info("Priority queues enabled, real BQ is ~ts",
|
||||
_ -> ?LOG_INFO("Priority queues enabled, real BQ is ~ts",
|
||||
[RealBQ]),
|
||||
application:set_env(
|
||||
rabbitmq_priority_queue, backing_queue_module, RealBQ),
|
||||
|
|
|
@ -223,6 +223,7 @@
|
|||
}).
|
||||
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
%%----------------------------------------------------------------------------
|
||||
|
||||
|
@ -556,7 +557,7 @@ start(VHost, DurableQueueNames) ->
|
|||
ToDelete = [filename:join([rabbit_vhost:msg_store_dir_path(VHost), "queues", Dir])
|
||||
|| Dir <- lists:subtract(all_queue_directory_names(VHost),
|
||||
sets:to_list(DurableDirectories))],
|
||||
rabbit_log:debug("Deleting unknown files/folders: ~p", [ToDelete]),
|
||||
?LOG_DEBUG("Deleting unknown files/folders: ~p", [ToDelete]),
|
||||
_ = rabbit_file:recursive_delete(ToDelete),
|
||||
|
||||
rabbit_recovery_terms:clear(VHost),
|
||||
|
@ -1182,7 +1183,7 @@ load_segment(KeepAcked, #segment { path = Path }) ->
|
|||
%% was missing above). We also log some information.
|
||||
case SegBin of
|
||||
<<0:Size/unit:8>> ->
|
||||
rabbit_log:warning("Deleting invalid v1 segment file ~ts (file only contains NUL bytes)",
|
||||
?LOG_WARNING("Deleting invalid v1 segment file ~ts (file only contains NUL bytes)",
|
||||
[Path]),
|
||||
_ = rabbit_file:delete(Path),
|
||||
Empty;
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
-include("vhost.hrl").
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include_lib("amqp10_common/include/amqp10_types.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-export([
|
||||
init/0,
|
||||
|
@ -554,7 +555,7 @@ recover(VHost, Qs) ->
|
|||
end, ByType0, Qs),
|
||||
maps:fold(fun (Mod, Queues, {R0, F0}) ->
|
||||
{Taken, {R, F}} = timer:tc(Mod, recover, [VHost, Queues]),
|
||||
rabbit_log:info("Recovering ~b queues of type ~ts took ~bms",
|
||||
?LOG_INFO("Recovering ~b queues of type ~ts took ~bms",
|
||||
[length(Queues), Mod, Taken div 1000]),
|
||||
{R0 ++ R, F0 ++ F}
|
||||
end, {[], []}, ByType).
|
||||
|
|
|
@ -106,6 +106,7 @@
|
|||
-include_lib("stdlib/include/qlc.hrl").
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include("amqqueue.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-rabbit_boot_step(
|
||||
{rabbit_quorum_queue_type,
|
||||
|
@ -129,7 +130,7 @@
|
|||
-define(DEFAULT_DELIVERY_LIMIT, 20).
|
||||
|
||||
-define(INFO(Str, Args),
|
||||
rabbit_log:info("[~s:~s/~b] " Str,
|
||||
?LOG_INFO("[~s:~s/~b] " Str,
|
||||
[?MODULE, ?FUNCTION_NAME, ?FUNCTION_ARITY | Args])).
|
||||
|
||||
|
||||
|
@ -284,7 +285,7 @@ start_cluster(Q) ->
|
|||
?RPC_TIMEOUT)],
|
||||
MinVersion = lists:min([rabbit_fifo:version() | Versions]),
|
||||
|
||||
rabbit_log:debug("Will start up to ~w replicas for quorum queue ~ts with "
|
||||
?LOG_DEBUG("Will start up to ~w replicas for quorum queue ~ts with "
|
||||
"leader on node '~ts', initial machine version ~b",
|
||||
[QuorumSize, rabbit_misc:rs(QName), LeaderNode, MinVersion]),
|
||||
case rabbit_amqqueue:internal_declare(NewQ1, false) of
|
||||
|
@ -354,7 +355,7 @@ gather_policy_config(Q, IsQueueDeclaration) ->
|
|||
undefined ->
|
||||
case IsQueueDeclaration of
|
||||
true ->
|
||||
rabbit_log:info(
|
||||
?LOG_INFO(
|
||||
"~ts: delivery_limit not set, defaulting to ~b",
|
||||
[rabbit_misc:rs(QName), ?DEFAULT_DELIVERY_LIMIT]);
|
||||
false ->
|
||||
|
@ -660,7 +661,7 @@ handle_tick(QName,
|
|||
ok ->
|
||||
ok;
|
||||
repaired ->
|
||||
rabbit_log:debug("Repaired quorum queue ~ts amqqueue record",
|
||||
?LOG_DEBUG("Repaired quorum queue ~ts amqqueue record",
|
||||
[rabbit_misc:rs(QName)])
|
||||
end,
|
||||
ExpectedNodes = rabbit_nodes:list_members(),
|
||||
|
@ -670,7 +671,7 @@ handle_tick(QName,
|
|||
Stale when length(ExpectedNodes) > 0 ->
|
||||
%% rabbit_nodes:list_members/0 returns [] when there
|
||||
%% is an error so we need to handle that case
|
||||
rabbit_log:debug("~ts: stale nodes detected in quorum "
|
||||
?LOG_DEBUG("~ts: stale nodes detected in quorum "
|
||||
"queue state. Purging ~w",
|
||||
[rabbit_misc:rs(QName), Stale]),
|
||||
%% pipeline purge command
|
||||
|
@ -684,13 +685,13 @@ handle_tick(QName,
|
|||
ok
|
||||
catch
|
||||
_:Err ->
|
||||
rabbit_log:debug("~ts: handle tick failed with ~p",
|
||||
?LOG_DEBUG("~ts: handle tick failed with ~p",
|
||||
[rabbit_misc:rs(QName), Err]),
|
||||
ok
|
||||
end
|
||||
end);
|
||||
handle_tick(QName, Config, _Nodes) ->
|
||||
rabbit_log:debug("~ts: handle tick received unexpected config format ~tp",
|
||||
?LOG_DEBUG("~ts: handle tick received unexpected config format ~tp",
|
||||
[rabbit_misc:rs(QName), Config]).
|
||||
|
||||
repair_leader_record(Q, Name) ->
|
||||
|
@ -701,7 +702,7 @@ repair_leader_record(Q, Name) ->
|
|||
ok;
|
||||
_ ->
|
||||
QName = amqqueue:get_name(Q),
|
||||
rabbit_log:debug("~ts: updating leader record to current node ~ts",
|
||||
?LOG_DEBUG("~ts: updating leader record to current node ~ts",
|
||||
[rabbit_misc:rs(QName), Node]),
|
||||
ok = become_leader0(QName, Name),
|
||||
ok
|
||||
|
@ -776,7 +777,7 @@ maybe_apply_policies(Q, #{config := CurrentConfig}) ->
|
|||
ShouldUpdate = NewPolicyConfig =/= CurrentPolicyConfig,
|
||||
case ShouldUpdate of
|
||||
true ->
|
||||
rabbit_log:debug("Re-applying policies to ~ts", [rabbit_misc:rs(amqqueue:get_name(Q))]),
|
||||
?LOG_DEBUG("Re-applying policies to ~ts", [rabbit_misc:rs(amqqueue:get_name(Q))]),
|
||||
policy_changed(Q),
|
||||
ok;
|
||||
false -> ok
|
||||
|
@ -798,7 +799,7 @@ recover(_Vhost, Queues) ->
|
|||
{error, Err1}
|
||||
when Err1 == not_started orelse
|
||||
Err1 == name_not_registered ->
|
||||
rabbit_log:warning("Quorum queue recovery: configured member of ~ts was not found on this node. Starting member as a new one. "
|
||||
?LOG_WARNING("Quorum queue recovery: configured member of ~ts was not found on this node. Starting member as a new one. "
|
||||
"Context: ~s",
|
||||
[rabbit_misc:rs(QName), Err1]),
|
||||
% queue was never started on this node
|
||||
|
@ -806,7 +807,7 @@ recover(_Vhost, Queues) ->
|
|||
case start_server(make_ra_conf(Q0, ServerId)) of
|
||||
ok -> ok;
|
||||
Err2 ->
|
||||
rabbit_log:warning("recover: quorum queue ~w could not"
|
||||
?LOG_WARNING("recover: quorum queue ~w could not"
|
||||
" be started ~w", [Name, Err2]),
|
||||
fail
|
||||
end;
|
||||
|
@ -817,7 +818,7 @@ recover(_Vhost, Queues) ->
|
|||
ok;
|
||||
Err ->
|
||||
%% catch all clause to avoid causing the vhost not to start
|
||||
rabbit_log:warning("recover: quorum queue ~w could not be "
|
||||
?LOG_WARNING("recover: quorum queue ~w could not be "
|
||||
"restarted ~w", [Name, Err]),
|
||||
fail
|
||||
end,
|
||||
|
@ -908,7 +909,7 @@ delete(Q, _IfUnused, _IfEmpty, ActingUser) when ?amqqueue_is_quorum(Q) ->
|
|||
ok;
|
||||
false ->
|
||||
%% attempt forced deletion of all servers
|
||||
rabbit_log:warning(
|
||||
?LOG_WARNING(
|
||||
"Could not delete quorum '~ts', not enough nodes "
|
||||
" online to reach a quorum: ~255p."
|
||||
" Attempting force delete.",
|
||||
|
@ -929,7 +930,7 @@ force_delete_queue(Servers) ->
|
|||
case catch(ra:force_delete_server(?RA_SYSTEM, S)) of
|
||||
ok -> ok;
|
||||
Err ->
|
||||
rabbit_log:warning(
|
||||
?LOG_WARNING(
|
||||
"Force delete of ~w failed with: ~w"
|
||||
"This may require manual data clean up",
|
||||
[S, Err]),
|
||||
|
@ -1222,7 +1223,7 @@ policy_changed(Q) ->
|
|||
ok;
|
||||
Err ->
|
||||
FormattedQueueName = rabbit_misc:rs(amqqueue:get_name(Q)),
|
||||
rabbit_log:warning("~s: policy may not have been successfully applied. Error: ~p",
|
||||
?LOG_WARNING("~s: policy may not have been successfully applied. Error: ~p",
|
||||
[FormattedQueueName, Err]),
|
||||
ok
|
||||
end.
|
||||
|
@ -1340,7 +1341,7 @@ add_member(VHost, Name, Node, Membership, Timeout)
|
|||
is_binary(Name) andalso
|
||||
is_atom(Node) ->
|
||||
QName = #resource{virtual_host = VHost, name = Name, kind = queue},
|
||||
rabbit_log:debug("Asked to add a replica for queue ~ts on node ~ts",
|
||||
?LOG_DEBUG("Asked to add a replica for queue ~ts on node ~ts",
|
||||
[rabbit_misc:rs(QName), Node]),
|
||||
case rabbit_amqqueue:lookup(QName) of
|
||||
{ok, Q} when ?amqqueue_is_classic(Q) ->
|
||||
|
@ -1354,7 +1355,7 @@ add_member(VHost, Name, Node, Membership, Timeout)
|
|||
case lists:member(Node, QNodes) of
|
||||
true ->
|
||||
%% idempotent by design
|
||||
rabbit_log:debug("Quorum ~ts already has a replica on node ~ts",
|
||||
?LOG_DEBUG("Quorum ~ts already has a replica on node ~ts",
|
||||
[rabbit_misc:rs(QName), Node]),
|
||||
ok;
|
||||
false ->
|
||||
|
@ -1422,7 +1423,7 @@ do_add_member(Q, Node, Membership, Timeout)
|
|||
{erlang, is_list, []},
|
||||
#{condition => {applied, {RaIndex, RaTerm}}}),
|
||||
_ = rabbit_amqqueue:update(QName, Fun),
|
||||
rabbit_log:info("Added a replica of quorum ~ts on node ~ts", [rabbit_misc:rs(QName), Node]),
|
||||
?LOG_INFO("Added a replica of quorum ~ts on node ~ts", [rabbit_misc:rs(QName), Node]),
|
||||
ok;
|
||||
{timeout, _} ->
|
||||
_ = ra:force_delete_server(?RA_SYSTEM, ServerId),
|
||||
|
@ -1433,7 +1434,7 @@ do_add_member(Q, Node, Membership, Timeout)
|
|||
E
|
||||
end;
|
||||
E ->
|
||||
rabbit_log:warning("Could not add a replica of quorum ~ts on node ~ts: ~p",
|
||||
?LOG_WARNING("Could not add a replica of quorum ~ts on node ~ts: ~p",
|
||||
[rabbit_misc:rs(QName), Node, E]),
|
||||
E
|
||||
end.
|
||||
|
@ -1484,7 +1485,7 @@ delete_member(Q, Node) when ?amqqueue_is_quorum(Q) ->
|
|||
_ = rabbit_amqqueue:update(QName, Fun),
|
||||
case ra:force_delete_server(?RA_SYSTEM, ServerId) of
|
||||
ok ->
|
||||
rabbit_log:info("Deleted a replica of quorum ~ts on node ~ts", [rabbit_misc:rs(QName), Node]),
|
||||
?LOG_INFO("Deleted a replica of quorum ~ts on node ~ts", [rabbit_misc:rs(QName), Node]),
|
||||
ok;
|
||||
{error, {badrpc, nodedown}} ->
|
||||
ok;
|
||||
|
@ -1507,10 +1508,10 @@ delete_member(Q, Node) when ?amqqueue_is_quorum(Q) ->
|
|||
[{rabbit_amqqueue:name(),
|
||||
{ok, pos_integer()} | {error, pos_integer(), term()}}].
|
||||
shrink_all(Node) ->
|
||||
rabbit_log:info("Asked to remove all quorum queue replicas from node ~ts", [Node]),
|
||||
?LOG_INFO("Asked to remove all quorum queue replicas from node ~ts", [Node]),
|
||||
[begin
|
||||
QName = amqqueue:get_name(Q),
|
||||
rabbit_log:info("~ts: removing member (replica) on node ~w",
|
||||
?LOG_INFO("~ts: removing member (replica) on node ~w",
|
||||
[rabbit_misc:rs(QName), Node]),
|
||||
Size = length(get_nodes(Q)),
|
||||
case delete_member(Q, Node) of
|
||||
|
@ -1520,7 +1521,7 @@ shrink_all(Node) ->
|
|||
%% this could be timing related and due to a new leader just being
|
||||
%% elected but it's noop command not been committed yet.
|
||||
%% lets sleep and retry once
|
||||
rabbit_log:info("~ts: failed to remove member (replica) on node ~w "
|
||||
?LOG_INFO("~ts: failed to remove member (replica) on node ~w "
|
||||
"as cluster change is not permitted. "
|
||||
"retrying once in 500ms",
|
||||
[rabbit_misc:rs(QName), Node]),
|
||||
|
@ -1529,12 +1530,12 @@ shrink_all(Node) ->
|
|||
ok ->
|
||||
{QName, {ok, Size-1}};
|
||||
{error, Err} ->
|
||||
rabbit_log:warning("~ts: failed to remove member (replica) on node ~w, error: ~w",
|
||||
?LOG_WARNING("~ts: failed to remove member (replica) on node ~w, error: ~w",
|
||||
[rabbit_misc:rs(QName), Node, Err]),
|
||||
{QName, {error, Size, Err}}
|
||||
end;
|
||||
{error, Err} ->
|
||||
rabbit_log:warning("~ts: failed to remove member (replica) on node ~w, error: ~w",
|
||||
?LOG_WARNING("~ts: failed to remove member (replica) on node ~w, error: ~w",
|
||||
[rabbit_misc:rs(QName), Node, Err]),
|
||||
{QName, {error, Size, Err}}
|
||||
end
|
||||
|
@ -1554,13 +1555,13 @@ grow(Node, VhostSpec, QueueSpec, Strategy, Membership) ->
|
|||
[begin
|
||||
Size = length(get_nodes(Q)),
|
||||
QName = amqqueue:get_name(Q),
|
||||
rabbit_log:info("~ts: adding a new member (replica) on node ~w",
|
||||
?LOG_INFO("~ts: adding a new member (replica) on node ~w",
|
||||
[rabbit_misc:rs(QName), Node]),
|
||||
case add_member(Q, Node, Membership) of
|
||||
ok ->
|
||||
{QName, {ok, Size + 1}};
|
||||
{error, Err} ->
|
||||
rabbit_log:warning(
|
||||
?LOG_WARNING(
|
||||
"~ts: failed to add member (replica) on node ~w, error: ~w",
|
||||
[rabbit_misc:rs(QName), Node, Err]),
|
||||
{QName, {error, Size, Err}}
|
||||
|
@ -1647,19 +1648,19 @@ dead_letter_handler(Q, Overflow) ->
|
|||
dlh(undefined, undefined, undefined, _, _) ->
|
||||
undefined;
|
||||
dlh(undefined, RoutingKey, undefined, _, QName) ->
|
||||
rabbit_log:warning("Disabling dead-lettering for ~ts despite configured dead-letter-routing-key '~ts' "
|
||||
?LOG_WARNING("Disabling dead-lettering for ~ts despite configured dead-letter-routing-key '~ts' "
|
||||
"because dead-letter-exchange is not configured.",
|
||||
[rabbit_misc:rs(QName), RoutingKey]),
|
||||
undefined;
|
||||
dlh(undefined, _, Strategy, _, QName) ->
|
||||
rabbit_log:warning("Disabling dead-lettering for ~ts despite configured dead-letter-strategy '~ts' "
|
||||
?LOG_WARNING("Disabling dead-lettering for ~ts despite configured dead-letter-strategy '~ts' "
|
||||
"because dead-letter-exchange is not configured.",
|
||||
[rabbit_misc:rs(QName), Strategy]),
|
||||
undefined;
|
||||
dlh(_, _, <<"at-least-once">>, reject_publish, _) ->
|
||||
at_least_once;
|
||||
dlh(Exchange, RoutingKey, <<"at-least-once">>, drop_head, QName) ->
|
||||
rabbit_log:warning("Falling back to dead-letter-strategy at-most-once for ~ts "
|
||||
?LOG_WARNING("Falling back to dead-letter-strategy at-most-once for ~ts "
|
||||
"because configured dead-letter-strategy at-least-once is incompatible with "
|
||||
"effective overflow strategy drop-head. To enable dead-letter-strategy "
|
||||
"at-least-once, set overflow strategy to reject-publish.",
|
||||
|
@ -2030,7 +2031,7 @@ overflow(undefined, Def, _QName) -> Def;
|
|||
overflow(<<"reject-publish">>, _Def, _QName) -> reject_publish;
|
||||
overflow(<<"drop-head">>, _Def, _QName) -> drop_head;
|
||||
overflow(<<"reject-publish-dlx">> = V, Def, QName) ->
|
||||
rabbit_log:warning("Invalid overflow strategy ~tp for quorum queue: ~ts",
|
||||
?LOG_WARNING("Invalid overflow strategy ~tp for quorum queue: ~ts",
|
||||
[V, rabbit_misc:rs(QName)]),
|
||||
Def.
|
||||
|
||||
|
@ -2069,7 +2070,7 @@ force_shrink_member_to_current_member(VHost, Name) ->
|
|||
Node = node(),
|
||||
QName = rabbit_misc:r(VHost, queue, Name),
|
||||
QNameFmt = rabbit_misc:rs(QName),
|
||||
rabbit_log:warning("Shrinking ~ts to a single node: ~ts", [QNameFmt, Node]),
|
||||
?LOG_WARNING("Shrinking ~ts to a single node: ~ts", [QNameFmt, Node]),
|
||||
case rabbit_amqqueue:lookup(QName) of
|
||||
{ok, Q} when ?is_amqqueue(Q) ->
|
||||
{RaName, _} = amqqueue:get_pid(Q),
|
||||
|
@ -2082,19 +2083,19 @@ force_shrink_member_to_current_member(VHost, Name) ->
|
|||
end,
|
||||
_ = rabbit_amqqueue:update(QName, Fun),
|
||||
_ = [ra:force_delete_server(?RA_SYSTEM, {RaName, N}) || N <- OtherNodes],
|
||||
rabbit_log:warning("Shrinking ~ts finished", [QNameFmt]);
|
||||
?LOG_WARNING("Shrinking ~ts finished", [QNameFmt]);
|
||||
_ ->
|
||||
rabbit_log:warning("Shrinking failed, ~ts not found", [QNameFmt]),
|
||||
?LOG_WARNING("Shrinking failed, ~ts not found", [QNameFmt]),
|
||||
{error, not_found}
|
||||
end.
|
||||
|
||||
force_vhost_queues_shrink_member_to_current_member(VHost) when is_binary(VHost) ->
|
||||
rabbit_log:warning("Shrinking all quorum queues in vhost '~ts' to a single node: ~ts", [VHost, node()]),
|
||||
?LOG_WARNING("Shrinking all quorum queues in vhost '~ts' to a single node: ~ts", [VHost, node()]),
|
||||
ListQQs = fun() -> rabbit_amqqueue:list(VHost) end,
|
||||
force_all_queues_shrink_member_to_current_member(ListQQs).
|
||||
|
||||
force_all_queues_shrink_member_to_current_member() ->
|
||||
rabbit_log:warning("Shrinking all quorum queues to a single node: ~ts", [node()]),
|
||||
?LOG_WARNING("Shrinking all quorum queues to a single node: ~ts", [node()]),
|
||||
ListQQs = fun() -> rabbit_amqqueue:list() end,
|
||||
force_all_queues_shrink_member_to_current_member(ListQQs).
|
||||
|
||||
|
@ -2104,7 +2105,7 @@ force_all_queues_shrink_member_to_current_member(ListQQFun) when is_function(Lis
|
|||
QName = amqqueue:get_name(Q),
|
||||
{RaName, _} = amqqueue:get_pid(Q),
|
||||
OtherNodes = lists:delete(Node, get_nodes(Q)),
|
||||
rabbit_log:warning("Shrinking queue ~ts to a single node: ~ts", [rabbit_misc:rs(QName), Node]),
|
||||
?LOG_WARNING("Shrinking queue ~ts to a single node: ~ts", [rabbit_misc:rs(QName), Node]),
|
||||
ok = ra_server_proc:force_shrink_members_to_current_member({RaName, Node}),
|
||||
Fun = fun (QQ) ->
|
||||
TS0 = amqqueue:get_type_state(QQ),
|
||||
|
@ -2114,7 +2115,7 @@ force_all_queues_shrink_member_to_current_member(ListQQFun) when is_function(Lis
|
|||
_ = rabbit_amqqueue:update(QName, Fun),
|
||||
_ = [ra:force_delete_server(?RA_SYSTEM, {RaName, N}) || N <- OtherNodes]
|
||||
end || Q <- ListQQFun(), amqqueue:get_type(Q) == ?MODULE],
|
||||
rabbit_log:warning("Shrinking finished"),
|
||||
?LOG_WARNING("Shrinking finished"),
|
||||
ok.
|
||||
|
||||
force_checkpoint_on_queue(QName) ->
|
||||
|
@ -2124,7 +2125,7 @@ force_checkpoint_on_queue(QName) ->
|
|||
{error, classic_queue_not_supported};
|
||||
{ok, Q} when ?amqqueue_is_quorum(Q) ->
|
||||
{RaName, _} = amqqueue:get_pid(Q),
|
||||
rabbit_log:debug("Sending command to force ~ts to take a checkpoint", [QNameFmt]),
|
||||
?LOG_DEBUG("Sending command to force ~ts to take a checkpoint", [QNameFmt]),
|
||||
Nodes = amqqueue:get_nodes(Q),
|
||||
_ = [ra:cast_aux_command({RaName, Node}, force_checkpoint)
|
||||
|| Node <- Nodes],
|
||||
|
@ -2142,7 +2143,7 @@ force_checkpoint(VhostSpec, QueueSpec) ->
|
|||
ok ->
|
||||
{QName, {ok}};
|
||||
{error, Err} ->
|
||||
rabbit_log:warning("~ts: failed to force checkpoint, error: ~w",
|
||||
?LOG_WARNING("~ts: failed to force checkpoint, error: ~w",
|
||||
[rabbit_misc:rs(QName), Err]),
|
||||
{QName, {error, Err}}
|
||||
end
|
||||
|
@ -2274,7 +2275,7 @@ wait_for_leader_health_checks(Ref, N, UnhealthyAcc) ->
|
|||
check_process_limit_safety(QCount, ProcessLimitThreshold) ->
|
||||
case (erlang:system_info(process_count) + QCount) >= ProcessLimitThreshold of
|
||||
true ->
|
||||
rabbit_log:warning("Leader health check not permitted, process limit threshold will be exceeded."),
|
||||
?LOG_WARNING("Leader health check not permitted, process limit threshold will be exceeded."),
|
||||
throw({error, leader_health_check_process_limit_exceeded});
|
||||
false ->
|
||||
ok
|
||||
|
@ -2283,7 +2284,7 @@ check_process_limit_safety(QCount, ProcessLimitThreshold) ->
|
|||
maybe_log_leader_health_check_result([]) -> ok;
|
||||
maybe_log_leader_health_check_result(Result) ->
|
||||
Qs = lists:map(fun(R) -> catch maps:get(<<"readable_name">>, R) end, Result),
|
||||
rabbit_log:warning("Leader health check result (unhealthy leaders detected): ~tp", [Qs]).
|
||||
?LOG_WARNING("Leader health check result (unhealthy leaders detected): ~tp", [Qs]).
|
||||
|
||||
policy_apply_to_name() ->
|
||||
<<"quorum_queues">>.
|
||||
|
@ -2295,52 +2296,52 @@ drain(TransferCandidates) ->
|
|||
ok.
|
||||
|
||||
transfer_leadership([]) ->
|
||||
rabbit_log:warning("Skipping leadership transfer of quorum queues: no candidate "
|
||||
?LOG_WARNING("Skipping leadership transfer of quorum queues: no candidate "
|
||||
"(online, not under maintenance) nodes to transfer to!");
|
||||
transfer_leadership(_TransferCandidates) ->
|
||||
%% we only transfer leadership for QQs that have local leaders
|
||||
Queues = rabbit_amqqueue:list_local_leaders(),
|
||||
rabbit_log:info("Will transfer leadership of ~b quorum queues with current leader on this node",
|
||||
?LOG_INFO("Will transfer leadership of ~b quorum queues with current leader on this node",
|
||||
[length(Queues)]),
|
||||
[begin
|
||||
Name = amqqueue:get_name(Q),
|
||||
rabbit_log:debug("Will trigger a leader election for local quorum queue ~ts",
|
||||
?LOG_DEBUG("Will trigger a leader election for local quorum queue ~ts",
|
||||
[rabbit_misc:rs(Name)]),
|
||||
%% we trigger an election and exclude this node from the list of candidates
|
||||
%% by simply shutting its local QQ replica (Ra server)
|
||||
RaLeader = amqqueue:get_pid(Q),
|
||||
rabbit_log:debug("Will stop Ra server ~tp", [RaLeader]),
|
||||
?LOG_DEBUG("Will stop Ra server ~tp", [RaLeader]),
|
||||
case rabbit_quorum_queue:stop_server(RaLeader) of
|
||||
ok ->
|
||||
rabbit_log:debug("Successfully stopped Ra server ~tp", [RaLeader]);
|
||||
?LOG_DEBUG("Successfully stopped Ra server ~tp", [RaLeader]);
|
||||
{error, nodedown} ->
|
||||
rabbit_log:error("Failed to stop Ra server ~tp: target node was reported as down")
|
||||
?LOG_ERROR("Failed to stop Ra server ~tp: target node was reported as down")
|
||||
end
|
||||
end || Q <- Queues],
|
||||
rabbit_log:info("Leadership transfer for quorum queues hosted on this node has been initiated").
|
||||
?LOG_INFO("Leadership transfer for quorum queues hosted on this node has been initiated").
|
||||
|
||||
%% TODO: I just copied it over, it looks like was always called inside maintenance so...
|
||||
-spec stop_local_quorum_queue_followers() -> ok.
|
||||
stop_local_quorum_queue_followers() ->
|
||||
Queues = rabbit_amqqueue:list_local_followers(),
|
||||
rabbit_log:info("Will stop local follower replicas of ~b quorum queues on this node",
|
||||
?LOG_INFO("Will stop local follower replicas of ~b quorum queues on this node",
|
||||
[length(Queues)]),
|
||||
[begin
|
||||
Name = amqqueue:get_name(Q),
|
||||
rabbit_log:debug("Will stop a local follower replica of quorum queue ~ts",
|
||||
?LOG_DEBUG("Will stop a local follower replica of quorum queue ~ts",
|
||||
[rabbit_misc:rs(Name)]),
|
||||
%% shut down Ra nodes so that they are not considered for leader election
|
||||
{RegisteredName, _LeaderNode} = amqqueue:get_pid(Q),
|
||||
RaNode = {RegisteredName, node()},
|
||||
rabbit_log:debug("Will stop Ra server ~tp", [RaNode]),
|
||||
?LOG_DEBUG("Will stop Ra server ~tp", [RaNode]),
|
||||
case rabbit_quorum_queue:stop_server(RaNode) of
|
||||
ok ->
|
||||
rabbit_log:debug("Successfully stopped Ra server ~tp", [RaNode]);
|
||||
?LOG_DEBUG("Successfully stopped Ra server ~tp", [RaNode]);
|
||||
{error, nodedown} ->
|
||||
rabbit_log:error("Failed to stop Ra server ~tp: target node was reported as down")
|
||||
?LOG_ERROR("Failed to stop Ra server ~tp: target node was reported as down")
|
||||
end
|
||||
end || Q <- Queues],
|
||||
rabbit_log:info("Stopped all local replicas of quorum queues hosted on this node").
|
||||
?LOG_INFO("Stopped all local replicas of quorum queues hosted on this node").
|
||||
|
||||
revive() ->
|
||||
revive_local_queue_members().
|
||||
|
@ -2350,17 +2351,17 @@ revive_local_queue_members() ->
|
|||
%% NB: this function ignores the first argument so we can just pass the
|
||||
%% empty binary as the vhost name.
|
||||
{Recovered, Failed} = rabbit_quorum_queue:recover(<<>>, Queues),
|
||||
rabbit_log:debug("Successfully revived ~b quorum queue replicas",
|
||||
?LOG_DEBUG("Successfully revived ~b quorum queue replicas",
|
||||
[length(Recovered)]),
|
||||
case length(Failed) of
|
||||
0 ->
|
||||
ok;
|
||||
NumFailed ->
|
||||
rabbit_log:error("Failed to revive ~b quorum queue replicas",
|
||||
?LOG_ERROR("Failed to revive ~b quorum queue replicas",
|
||||
[NumFailed])
|
||||
end,
|
||||
|
||||
rabbit_log:info("Restart of local quorum queue replicas is complete"),
|
||||
?LOG_INFO("Restart of local quorum queue replicas is complete"),
|
||||
ok.
|
||||
|
||||
queue_vm_stats_sups() ->
|
||||
|
|
|
@ -43,6 +43,7 @@
|
|||
-include_lib("rabbit_common/include/rabbit_framing.hrl").
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include("rabbit_amqp_metrics.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-export([start_link/2, info/2, force_event_refresh/2,
|
||||
shutdown/2]).
|
||||
|
@ -1363,7 +1364,7 @@ handle_method0(#'connection.update_secret'{new_secret = NewSecret, reason = Reas
|
|||
%% Any secret update errors coming from the authz backend will be handled in the other branch.
|
||||
%% Therefore we optimistically do no error handling here. MK.
|
||||
lists:foreach(fun(Ch) ->
|
||||
rabbit_log:debug("Updating user/auth backend state for channel ~tp", [Ch]),
|
||||
?LOG_DEBUG("Updating user/auth backend state for channel ~tp", [Ch]),
|
||||
_ = rabbit_channel:update_user_state(Ch, User1)
|
||||
end, all_channels()),
|
||||
ok = send_on_channel0(Sock, #'connection.update_secret_ok'{}, Protocol),
|
||||
|
@ -1505,7 +1506,7 @@ auth_phase(Response,
|
|||
auth_state = AuthState,
|
||||
host = RemoteAddress},
|
||||
sock = Sock}) ->
|
||||
rabbit_log:debug("Client address during authN phase: ~tp", [RemoteAddress]),
|
||||
?LOG_DEBUG("Client address during authN phase: ~tp", [RemoteAddress]),
|
||||
case AuthMechanism:handle_response(Response, AuthState) of
|
||||
{refused, Username, Msg, Args} ->
|
||||
rabbit_core_metrics:auth_attempt_failed(RemoteAddress, Username, amqp091),
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
terminate/2, code_change/3]).
|
||||
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
%%----------------------------------------------------------------------------
|
||||
|
||||
|
@ -36,7 +37,7 @@ start(VHost) ->
|
|||
%% we can get here if a vhost is added and removed concurrently
|
||||
%% e.g. some integration tests do it
|
||||
{error, {no_such_vhost, VHost}} ->
|
||||
rabbit_log:error("Failed to start a recovery terms manager for vhost ~ts: vhost no longer exists!",
|
||||
?LOG_ERROR("Failed to start a recovery terms manager for vhost ~ts: vhost no longer exists!",
|
||||
[VHost]),
|
||||
{error, {no_such_vhost, VHost}}
|
||||
end.
|
||||
|
@ -52,7 +53,7 @@ stop(VHost) ->
|
|||
end;
|
||||
%% see start/1
|
||||
{error, {no_such_vhost, VHost}} ->
|
||||
rabbit_log:error("Failed to stop a recovery terms manager for vhost ~ts: vhost no longer exists!",
|
||||
?LOG_ERROR("Failed to stop a recovery terms manager for vhost ~ts: vhost no longer exists!",
|
||||
[VHost]),
|
||||
|
||||
ok
|
||||
|
@ -81,7 +82,7 @@ clear(VHost) ->
|
|||
ok
|
||||
%% see start/1
|
||||
catch _:badarg ->
|
||||
rabbit_log:error("Failed to clear recovery terms for vhost ~ts: table no longer exists!",
|
||||
?LOG_ERROR("Failed to clear recovery terms for vhost ~ts: table no longer exists!",
|
||||
[VHost]),
|
||||
ok
|
||||
end,
|
||||
|
@ -138,7 +139,7 @@ open_table(VHost, RamFile, RetriesLeft) ->
|
|||
_ = file:delete(File),
|
||||
%% Wait before retrying
|
||||
DelayInMs = 1000,
|
||||
rabbit_log:warning("Failed to open a recovery terms DETS file at ~tp. Will delete it and retry in ~tp ms (~tp retries left)",
|
||||
?LOG_WARNING("Failed to open a recovery terms DETS file at ~tp. Will delete it and retry in ~tp ms (~tp retries left)",
|
||||
[File, DelayInMs, RetriesLeft]),
|
||||
timer:sleep(DelayInMs),
|
||||
open_table(VHost, RamFile, RetriesLeft - 1)
|
||||
|
@ -152,7 +153,7 @@ flush(VHost) ->
|
|||
dets:sync(VHost)
|
||||
%% see clear/1
|
||||
catch _:badarg ->
|
||||
rabbit_log:error("Failed to sync recovery terms table for vhost ~ts: the table no longer exists!",
|
||||
?LOG_ERROR("Failed to sync recovery terms table for vhost ~ts: the table no longer exists!",
|
||||
[VHost]),
|
||||
ok
|
||||
end.
|
||||
|
@ -165,7 +166,7 @@ close_table(VHost) ->
|
|||
ok = dets:close(VHost)
|
||||
%% see clear/1
|
||||
catch _:badarg ->
|
||||
rabbit_log:error("Failed to close recovery terms table for vhost ~ts: the table no longer exists!",
|
||||
?LOG_ERROR("Failed to close recovery terms table for vhost ~ts: the table no longer exists!",
|
||||
[VHost]),
|
||||
ok
|
||||
end.
|
||||
|
|
|
@ -41,6 +41,7 @@
|
|||
%% * rabbit_event
|
||||
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-export([parse_set/5, set/5, set_any/5, clear/4, clear_any/4, list/0, list/1,
|
||||
list_component/1, list/2, list_formatted/1, list_formatted/3,
|
||||
|
@ -104,7 +105,7 @@ parse_set_global(Name, String, ActingUser) ->
|
|||
|
||||
set_global(Name, Term, ActingUser) ->
|
||||
NameAsAtom = rabbit_data_coercion:to_atom(Name),
|
||||
rabbit_log:debug("Setting global parameter '~ts' to ~tp", [NameAsAtom, Term]),
|
||||
?LOG_DEBUG("Setting global parameter '~ts' to ~tp", [NameAsAtom, Term]),
|
||||
_ = rabbit_db_rtparams:set(NameAsAtom, Term),
|
||||
event_notify(parameter_set, none, global, [{name, NameAsAtom},
|
||||
{value, Term},
|
||||
|
@ -125,7 +126,7 @@ set_any(VHost, Component, Name, Term, User) ->
|
|||
end.
|
||||
|
||||
set_any0(VHost, Component, Name, Term, User) ->
|
||||
rabbit_log:debug("Asked to set or update runtime parameter '~ts' in vhost '~ts' "
|
||||
?LOG_DEBUG("Asked to set or update runtime parameter '~ts' in vhost '~ts' "
|
||||
"for component '~ts', value: ~tp",
|
||||
[Name, VHost, Component, Term]),
|
||||
case lookup_component(Component) of
|
||||
|
@ -168,7 +169,7 @@ is_within_limit(Component) ->
|
|||
false ->
|
||||
ErrorMsg = "Limit reached: component ~ts is limited to ~tp",
|
||||
ErrorArgs = [Component, Limit],
|
||||
rabbit_log:error(ErrorMsg, ErrorArgs),
|
||||
?LOG_ERROR(ErrorMsg, ErrorArgs),
|
||||
{errors, [{"component ~ts is limited to ~tp", [Component, Limit]}]}
|
||||
end.
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
-module(rabbit_ssl).
|
||||
|
||||
-include_lib("public_key/include/public_key.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-export([peer_cert_issuer/1, peer_cert_subject/1, peer_cert_validity/1]).
|
||||
-export([peer_cert_subject_items/2, peer_cert_auth_name/1, peer_cert_auth_name/2]).
|
||||
|
@ -161,7 +162,7 @@ peer_cert_auth_name({subject_alternative_name, Type, Index0}, Cert) ->
|
|||
%% lists:nth/2 is 1-based
|
||||
Index = Index0 + 1,
|
||||
OfType = peer_cert_subject_alternative_names(Cert, otp_san_type(Type)),
|
||||
rabbit_log:debug("Peer certificate SANs of type ~ts: ~tp, index to use with lists:nth/2: ~b", [Type, OfType, Index]),
|
||||
?LOG_DEBUG("Peer certificate SANs of type ~ts: ~tp, index to use with lists:nth/2: ~b", [Type, OfType, Index]),
|
||||
case length(OfType) of
|
||||
0 -> not_found;
|
||||
N when N < Index -> not_found;
|
||||
|
@ -198,7 +199,7 @@ auth_config_sane() ->
|
|||
{ok, Opts} = application:get_env(rabbit, ssl_options),
|
||||
case proplists:get_value(verify, Opts) of
|
||||
verify_peer -> true;
|
||||
V -> rabbit_log:warning("TLS peer verification (authentication) is "
|
||||
V -> ?LOG_WARNING("TLS peer verification (authentication) is "
|
||||
"disabled, ssl_options.verify value used: ~tp. "
|
||||
"See https://www.rabbitmq.com/docs/ssl#peer-verification to learn more.", [V]),
|
||||
false
|
||||
|
|
|
@ -83,6 +83,7 @@
|
|||
|
||||
-include("rabbit_stream_coordinator.hrl").
|
||||
-include("amqqueue.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-define(REPLICA_FRESHNESS_LIMIT_MS, 10 * 1000). %% 10s
|
||||
-define(V2_OR_MORE(Vsn), Vsn >= 2).
|
||||
|
@ -174,7 +175,7 @@ restart_stream(QRes, Options)
|
|||
restart_stream(Q, Options)
|
||||
when ?is_amqqueue(Q) andalso
|
||||
?amqqueue_is_stream(Q) ->
|
||||
rabbit_log:info("restarting stream ~s in vhost ~s with options ~p",
|
||||
?LOG_INFO("restarting stream ~s in vhost ~s with options ~p",
|
||||
[maps:get(name, amqqueue:get_type_state(Q)), amqqueue:get_vhost(Q), Options]),
|
||||
#{name := StreamId} = amqqueue:get_type_state(Q),
|
||||
case process_command({restart_stream, StreamId, Options}) of
|
||||
|
@ -217,7 +218,7 @@ add_replica(Q, Node) when ?is_amqqueue(Q) ->
|
|||
{error, {disallowed, out_of_sync_replica}};
|
||||
false ->
|
||||
Name = rabbit_misc:rs(amqqueue:get_name(Q)),
|
||||
rabbit_log:info("~ts : adding replica ~ts to ~ts Replication State: ~w",
|
||||
?LOG_INFO("~ts : adding replica ~ts to ~ts Replication State: ~w",
|
||||
[?MODULE, Node, Name, ReplState0]),
|
||||
StreamId = maps:get(name, amqqueue:get_type_state(Q)),
|
||||
case process_command({add_replica, StreamId, #{node => Node}}) of
|
||||
|
@ -444,7 +445,7 @@ process_command([Server | Servers], Cmd) ->
|
|||
_ ->
|
||||
element(1, Cmd)
|
||||
end,
|
||||
rabbit_log:warning("Coordinator timeout on server ~w when processing command ~W",
|
||||
?LOG_WARNING("Coordinator timeout on server ~w when processing command ~W",
|
||||
[element(2, Server), CmdLabel, 10]),
|
||||
process_command(Servers, Cmd);
|
||||
{error, noproc} ->
|
||||
|
@ -516,17 +517,17 @@ start_coordinator_cluster() ->
|
|||
Versions = [V || {ok, V} <- erpc:multicall(Nodes,
|
||||
?MODULE, version, [])],
|
||||
MinVersion = lists:min([version() | Versions]),
|
||||
rabbit_log:debug("Starting stream coordinator on nodes: ~w, "
|
||||
?LOG_DEBUG("Starting stream coordinator on nodes: ~w, "
|
||||
"initial machine version ~b",
|
||||
[Nodes, MinVersion]),
|
||||
case ra:start_cluster(?RA_SYSTEM,
|
||||
[make_ra_conf(Node, Nodes, MinVersion)
|
||||
|| Node <- Nodes]) of
|
||||
{ok, Started, _} ->
|
||||
rabbit_log:debug("Started stream coordinator on ~w", [Started]),
|
||||
?LOG_DEBUG("Started stream coordinator on ~w", [Started]),
|
||||
Started;
|
||||
{error, cluster_not_formed} ->
|
||||
rabbit_log:warning("Stream coordinator could not be started on nodes ~w",
|
||||
?LOG_WARNING("Stream coordinator could not be started on nodes ~w",
|
||||
[Nodes]),
|
||||
[]
|
||||
end.
|
||||
|
@ -740,7 +741,7 @@ apply(Meta, {nodeup, Node} = Cmd,
|
|||
streams = Streams,
|
||||
single_active_consumer = Sac1}, ok, Effects2);
|
||||
apply(Meta, {machine_version, From, To}, State0) ->
|
||||
rabbit_log:info("Stream coordinator machine version changes from ~tp to ~tp, "
|
||||
?LOG_INFO("Stream coordinator machine version changes from ~tp to ~tp, "
|
||||
++ "applying incremental upgrade.", [From, To]),
|
||||
%% RA applies machine upgrades from any version to any version, e.g. 0 -> 2.
|
||||
%% We fill in the gaps here, applying all 1-to-1 machine upgrades.
|
||||
|
@ -756,7 +757,7 @@ apply(Meta, {timeout, {sac, node_disconnected, #{connection_pid := Pid}}},
|
|||
return(Meta, State0#?MODULE{single_active_consumer = SacState1}, ok,
|
||||
Effects);
|
||||
apply(Meta, UnkCmd, State) ->
|
||||
rabbit_log:debug("~ts: unknown command ~W",
|
||||
?LOG_DEBUG("~ts: unknown command ~W",
|
||||
[?MODULE, UnkCmd, 10]),
|
||||
return(Meta, State, {error, unknown_command}, []).
|
||||
|
||||
|
@ -842,7 +843,7 @@ maybe_resize_coordinator_cluster(LeaderPid, SacNodes, MachineVersion) ->
|
|||
[New | _] ->
|
||||
%% any remaining members will be added
|
||||
%% next tick
|
||||
rabbit_log:info("~ts: New rabbit node(s) detected, "
|
||||
?LOG_INFO("~ts: New rabbit node(s) detected, "
|
||||
"adding : ~w",
|
||||
[?MODULE, New]),
|
||||
add_member(Members, New)
|
||||
|
@ -854,7 +855,7 @@ maybe_resize_coordinator_cluster(LeaderPid, SacNodes, MachineVersion) ->
|
|||
%% this ought to be rather rare as the stream
|
||||
%% coordinator member is now removed as part
|
||||
%% of the forget_cluster_node command
|
||||
rabbit_log:info("~ts: Rabbit node(s) removed "
|
||||
?LOG_INFO("~ts: Rabbit node(s) removed "
|
||||
"from the cluster, "
|
||||
"deleting: ~w", [?MODULE, Old]),
|
||||
_ = remove_member(Leader, Members, Old),
|
||||
|
@ -874,7 +875,7 @@ maybe_handle_stale_nodes(SacNodes, BrokerNodes,
|
|||
[] ->
|
||||
ok;
|
||||
Stale when length(BrokerNodes) > 0 ->
|
||||
rabbit_log:debug("Stale nodes detected in stream SAC "
|
||||
?LOG_DEBUG("Stale nodes detected in stream SAC "
|
||||
"coordinator: ~w. Purging state.",
|
||||
[Stale]),
|
||||
ra:pipeline_command(LeaderPid, sac_make_purge_nodes(Stale)),
|
||||
|
@ -903,14 +904,14 @@ add_member(Members, Node) ->
|
|||
{ok, _, _} ->
|
||||
ok;
|
||||
{error, Err} ->
|
||||
rabbit_log:warning("~ts: Failed to add member, reason ~w"
|
||||
?LOG_WARNING("~ts: Failed to add member, reason ~w"
|
||||
"deleting started server on ~w",
|
||||
[?MODULE, Err, Node]),
|
||||
case ra:force_delete_server(?RA_SYSTEM, ServerId) of
|
||||
ok ->
|
||||
ok;
|
||||
Err ->
|
||||
rabbit_log:warning("~ts: Failed to delete server "
|
||||
?LOG_WARNING("~ts: Failed to delete server "
|
||||
"on ~w, reason ~w",
|
||||
[?MODULE, Node, Err]),
|
||||
ok
|
||||
|
@ -926,7 +927,7 @@ add_member(Members, Node) ->
|
|||
%% there is a server running but is not a member of the
|
||||
%% stream coordinator cluster
|
||||
%% In this case it needs to be deleted
|
||||
rabbit_log:warning("~ts: server already running on ~w but not
|
||||
?LOG_WARNING("~ts: server already running on ~w but not
|
||||
part of cluster, "
|
||||
"deleting started server",
|
||||
[?MODULE, Node]),
|
||||
|
@ -934,14 +935,14 @@ add_member(Members, Node) ->
|
|||
ok ->
|
||||
ok;
|
||||
Err ->
|
||||
rabbit_log:warning("~ts: Failed to delete server "
|
||||
?LOG_WARNING("~ts: Failed to delete server "
|
||||
"on ~w, reason ~w",
|
||||
[?MODULE, Node, Err]),
|
||||
ok
|
||||
end
|
||||
end;
|
||||
Error ->
|
||||
rabbit_log:warning("Stream coordinator server failed to start on node ~ts : ~W",
|
||||
?LOG_WARNING("Stream coordinator server failed to start on node ~ts : ~W",
|
||||
[Node, Error, 10]),
|
||||
ok
|
||||
end.
|
||||
|
@ -983,7 +984,7 @@ handle_aux(leader, _, {down, Pid, _},
|
|||
handle_aux(leader, _, {start_writer, StreamId,
|
||||
#{epoch := Epoch, node := Node} = Args, Conf},
|
||||
Aux, RaAux) ->
|
||||
rabbit_log:debug("~ts: running action: 'start_writer'"
|
||||
?LOG_DEBUG("~ts: running action: 'start_writer'"
|
||||
" for ~ts on node ~w in epoch ~b",
|
||||
[?MODULE, StreamId, Node, Epoch]),
|
||||
ActionFun = phase_start_writer(StreamId, Args, Conf),
|
||||
|
@ -991,7 +992,7 @@ handle_aux(leader, _, {start_writer, StreamId,
|
|||
handle_aux(leader, _, {start_replica, StreamId,
|
||||
#{epoch := Epoch, node := Node} = Args, Conf},
|
||||
Aux, RaAux) ->
|
||||
rabbit_log:debug("~ts: running action: 'start_replica'"
|
||||
?LOG_DEBUG("~ts: running action: 'start_replica'"
|
||||
" for ~ts on node ~w in epoch ~b",
|
||||
[?MODULE, StreamId, Node, Epoch]),
|
||||
ActionFun = phase_start_replica(StreamId, Args, Conf),
|
||||
|
@ -999,26 +1000,26 @@ handle_aux(leader, _, {start_replica, StreamId,
|
|||
handle_aux(leader, _, {stop, StreamId, #{node := Node,
|
||||
epoch := Epoch} = Args, Conf},
|
||||
Aux, RaAux) ->
|
||||
rabbit_log:debug("~ts: running action: 'stop'"
|
||||
?LOG_DEBUG("~ts: running action: 'stop'"
|
||||
" for ~ts on node ~w in epoch ~b",
|
||||
[?MODULE, StreamId, Node, Epoch]),
|
||||
ActionFun = phase_stop_member(StreamId, Args, Conf),
|
||||
run_action(stopping, StreamId, Args, ActionFun, Aux, RaAux);
|
||||
handle_aux(leader, _, {update_mnesia, StreamId, Args, Conf},
|
||||
#aux{actions = _Monitors} = Aux, RaAux) ->
|
||||
rabbit_log:debug("~ts: running action: 'update_mnesia'"
|
||||
?LOG_DEBUG("~ts: running action: 'update_mnesia'"
|
||||
" for ~ts", [?MODULE, StreamId]),
|
||||
ActionFun = phase_update_mnesia(StreamId, Args, Conf),
|
||||
run_action(updating_mnesia, StreamId, Args, ActionFun, Aux, RaAux);
|
||||
handle_aux(leader, _, {update_retention, StreamId, Args, _Conf},
|
||||
#aux{actions = _Monitors} = Aux, RaAux) ->
|
||||
rabbit_log:debug("~ts: running action: 'update_retention'"
|
||||
?LOG_DEBUG("~ts: running action: 'update_retention'"
|
||||
" for ~ts", [?MODULE, StreamId]),
|
||||
ActionFun = phase_update_retention(StreamId, Args),
|
||||
run_action(update_retention, StreamId, Args, ActionFun, Aux, RaAux);
|
||||
handle_aux(leader, _, {delete_member, StreamId, #{node := Node} = Args, Conf},
|
||||
#aux{actions = _Monitors} = Aux, RaAux) ->
|
||||
rabbit_log:debug("~ts: running action: 'delete_member'"
|
||||
?LOG_DEBUG("~ts: running action: 'delete_member'"
|
||||
" for ~ts ~ts", [?MODULE, StreamId, Node]),
|
||||
ActionFun = phase_delete_member(StreamId, Args, Conf),
|
||||
run_action(delete_member, StreamId, Args, ActionFun, Aux, RaAux);
|
||||
|
@ -1030,7 +1031,7 @@ handle_aux(leader, _, fail_active_actions,
|
|||
Exclude = maps:from_list([{S, ok}
|
||||
|| {P, {S, _, _}} <- maps_to_list(Actions),
|
||||
is_process_alive(P)]),
|
||||
rabbit_log:debug("~ts: failing actions: ~w", [?MODULE, Exclude]),
|
||||
?LOG_DEBUG("~ts: failing actions: ~w", [?MODULE, Exclude]),
|
||||
#?MODULE{streams = Streams} = ra_aux:machine_state(RaAux),
|
||||
fail_active_actions(Streams, Exclude),
|
||||
{no_reply, Aux, RaAux, []};
|
||||
|
@ -1043,7 +1044,7 @@ handle_aux(leader, _, {down, Pid, Reason},
|
|||
%% An action has failed - report back to the state machine
|
||||
case maps:get(Pid, Monitors0, undefined) of
|
||||
{StreamId, Action, #{node := Node, epoch := Epoch} = Args} ->
|
||||
rabbit_log:warning("~ts: error while executing action ~w for stream queue ~ts, "
|
||||
?LOG_WARNING("~ts: error while executing action ~w for stream queue ~ts, "
|
||||
" node ~ts, epoch ~b Err: ~w",
|
||||
[?MODULE, Action, StreamId, Node, Epoch, Reason]),
|
||||
Monitors = maps:remove(Pid, Monitors0),
|
||||
|
@ -1110,7 +1111,7 @@ phase_start_replica(StreamId, #{epoch := Epoch,
|
|||
fun() ->
|
||||
try osiris_replica:start(Node, Conf0) of
|
||||
{ok, Pid} ->
|
||||
rabbit_log:info("~ts: ~ts: replica started on ~ts in ~b pid ~w",
|
||||
?LOG_INFO("~ts: ~ts: replica started on ~ts in ~b pid ~w",
|
||||
[?MODULE, StreamId, Node, Epoch, Pid]),
|
||||
send_self_command({member_started, StreamId,
|
||||
Args#{pid => Pid}});
|
||||
|
@ -1126,12 +1127,12 @@ phase_start_replica(StreamId, #{epoch := Epoch,
|
|||
send_self_command({member_started, StreamId,
|
||||
Args#{pid => Pid}});
|
||||
{error, Reason} ->
|
||||
rabbit_log:warning("~ts: Error while starting replica for ~ts on node ~ts in ~b : ~W",
|
||||
?LOG_WARNING("~ts: Error while starting replica for ~ts on node ~ts in ~b : ~W",
|
||||
[?MODULE, maps:get(name, Conf0), Node, Epoch, Reason, 10]),
|
||||
maybe_sleep(Reason),
|
||||
send_action_failed(StreamId, starting, Args)
|
||||
catch _:Error ->
|
||||
rabbit_log:warning("~ts: Error while starting replica for ~ts on node ~ts in ~b : ~W",
|
||||
?LOG_WARNING("~ts: Error while starting replica for ~ts on node ~ts in ~b : ~W",
|
||||
[?MODULE, maps:get(name, Conf0), Node, Epoch, Error, 10]),
|
||||
maybe_sleep(Error),
|
||||
send_action_failed(StreamId, starting, Args)
|
||||
|
@ -1152,13 +1153,13 @@ phase_delete_member(StreamId, #{node := Node} = Arg, Conf) ->
|
|||
true ->
|
||||
try osiris:delete_member(Node, Conf) of
|
||||
ok ->
|
||||
rabbit_log:info("~ts: Member deleted for ~ts : on node ~ts",
|
||||
?LOG_INFO("~ts: Member deleted for ~ts : on node ~ts",
|
||||
[?MODULE, StreamId, Node]),
|
||||
send_self_command({member_deleted, StreamId, Arg});
|
||||
_ ->
|
||||
send_action_failed(StreamId, deleting, Arg)
|
||||
catch _:E ->
|
||||
rabbit_log:warning("~ts: Error while deleting member for ~ts : on node ~ts ~W",
|
||||
?LOG_WARNING("~ts: Error while deleting member for ~ts : on node ~ts ~W",
|
||||
[?MODULE, StreamId, Node, E, 10]),
|
||||
maybe_sleep(E),
|
||||
send_action_failed(StreamId, deleting, Arg)
|
||||
|
@ -1166,7 +1167,7 @@ phase_delete_member(StreamId, #{node := Node} = Arg, Conf) ->
|
|||
false ->
|
||||
%% node is no longer a cluster member, we return success to avoid
|
||||
%% trying to delete the member indefinitely
|
||||
rabbit_log:info("~ts: Member deleted/forgotten for ~ts : node ~ts is no longer a cluster member",
|
||||
?LOG_INFO("~ts: Member deleted/forgotten for ~ts : node ~ts is no longer a cluster member",
|
||||
[?MODULE, StreamId, Node]),
|
||||
send_self_command({member_deleted, StreamId, Arg})
|
||||
end
|
||||
|
@ -1180,22 +1181,22 @@ phase_stop_member(StreamId, #{node := Node, epoch := Epoch} = Arg0, Conf) ->
|
|||
try get_replica_tail(Node, Conf) of
|
||||
{ok, Tail} ->
|
||||
Arg = Arg0#{tail => Tail},
|
||||
rabbit_log:debug("~ts: ~ts: member stopped on ~ts in ~b Tail ~w",
|
||||
?LOG_DEBUG("~ts: ~ts: member stopped on ~ts in ~b Tail ~w",
|
||||
[?MODULE, StreamId, Node, Epoch, Tail]),
|
||||
send_self_command({member_stopped, StreamId, Arg});
|
||||
Err ->
|
||||
rabbit_log:warning("~ts: failed to get tail of member ~ts on ~ts in ~b Error: ~w",
|
||||
?LOG_WARNING("~ts: failed to get tail of member ~ts on ~ts in ~b Error: ~w",
|
||||
[?MODULE, StreamId, Node, Epoch, Err]),
|
||||
maybe_sleep(Err),
|
||||
send_action_failed(StreamId, stopping, Arg0)
|
||||
catch _:Err ->
|
||||
rabbit_log:warning("~ts: failed to get tail of member ~ts on ~ts in ~b Error: ~w",
|
||||
?LOG_WARNING("~ts: failed to get tail of member ~ts on ~ts in ~b Error: ~w",
|
||||
[?MODULE, StreamId, Node, Epoch, Err]),
|
||||
maybe_sleep(Err),
|
||||
send_action_failed(StreamId, stopping, Arg0)
|
||||
end
|
||||
catch _:Err ->
|
||||
rabbit_log:warning("~ts: failed to stop member ~ts ~w Error: ~w",
|
||||
?LOG_WARNING("~ts: failed to stop member ~ts ~w Error: ~w",
|
||||
[?MODULE, StreamId, Node, Err]),
|
||||
maybe_sleep(Err),
|
||||
send_action_failed(StreamId, stopping, Arg0)
|
||||
|
@ -1207,17 +1208,17 @@ phase_start_writer(StreamId, #{epoch := Epoch, node := Node} = Args0, Conf) ->
|
|||
try osiris:start_writer(Conf) of
|
||||
{ok, Pid} ->
|
||||
Args = Args0#{epoch => Epoch, pid => Pid},
|
||||
rabbit_log:info("~ts: started writer ~ts on ~w in ~b",
|
||||
?LOG_INFO("~ts: started writer ~ts on ~w in ~b",
|
||||
[?MODULE, StreamId, Node, Epoch]),
|
||||
send_self_command({member_started, StreamId, Args});
|
||||
Err ->
|
||||
%% no sleep for writer failures as we want to trigger a new
|
||||
%% election asap
|
||||
rabbit_log:warning("~ts: failed to start writer ~ts on ~ts in ~b Error: ~w",
|
||||
?LOG_WARNING("~ts: failed to start writer ~ts on ~ts in ~b Error: ~w",
|
||||
[?MODULE, StreamId, Node, Epoch, Err]),
|
||||
send_action_failed(StreamId, starting, Args0)
|
||||
catch _:Err ->
|
||||
rabbit_log:warning("~ts: failed to start writer ~ts on ~ts in ~b Error: ~w",
|
||||
?LOG_WARNING("~ts: failed to start writer ~ts on ~ts in ~b Error: ~w",
|
||||
[?MODULE, StreamId, Node, Epoch, Err]),
|
||||
send_action_failed(StreamId, starting, Args0)
|
||||
end
|
||||
|
@ -1230,12 +1231,12 @@ phase_update_retention(StreamId, #{pid := Pid,
|
|||
ok ->
|
||||
send_self_command({retention_updated, StreamId, Args});
|
||||
{error, Reason} = Err ->
|
||||
rabbit_log:warning("~ts: failed to update retention for ~ts ~w Reason: ~w",
|
||||
?LOG_WARNING("~ts: failed to update retention for ~ts ~w Reason: ~w",
|
||||
[?MODULE, StreamId, node(Pid), Reason]),
|
||||
maybe_sleep(Err),
|
||||
send_action_failed(StreamId, update_retention, Args)
|
||||
catch _:Err ->
|
||||
rabbit_log:warning("~ts: failed to update retention for ~ts ~w Error: ~w",
|
||||
?LOG_WARNING("~ts: failed to update retention for ~ts ~w Error: ~w",
|
||||
[?MODULE, StreamId, node(Pid), Err]),
|
||||
maybe_sleep(Err),
|
||||
send_action_failed(StreamId, update_retention, Args)
|
||||
|
@ -1281,7 +1282,7 @@ is_quorum(NumReplicas, NumAlive) ->
|
|||
phase_update_mnesia(StreamId, Args, #{reference := QName,
|
||||
leader_pid := LeaderPid} = Conf) ->
|
||||
fun() ->
|
||||
rabbit_log:debug("~ts: running mnesia update for ~ts: ~W",
|
||||
?LOG_DEBUG("~ts: running mnesia update for ~ts: ~W",
|
||||
[?MODULE, StreamId, Conf, 10]),
|
||||
Fun = fun (Q) ->
|
||||
case amqqueue:get_type_state(Q) of
|
||||
|
@ -1293,7 +1294,7 @@ phase_update_mnesia(StreamId, Args, #{reference := QName,
|
|||
Ts ->
|
||||
S = maps:get(name, Ts, undefined),
|
||||
%% TODO log as side-effect
|
||||
rabbit_log:debug("~ts: refusing mnesia update for stale stream id ~s, current ~s",
|
||||
?LOG_DEBUG("~ts: refusing mnesia update for stale stream id ~s, current ~s",
|
||||
[?MODULE, StreamId, S]),
|
||||
%% if the stream id isn't a match this is a stale
|
||||
%% update from a previous stream incarnation for the
|
||||
|
@ -1303,7 +1304,7 @@ phase_update_mnesia(StreamId, Args, #{reference := QName,
|
|||
end,
|
||||
try rabbit_amqqueue:update(QName, Fun) of
|
||||
not_found ->
|
||||
rabbit_log:debug("~ts: resource for stream id ~ts not found, "
|
||||
?LOG_DEBUG("~ts: resource for stream id ~ts not found, "
|
||||
"recovering from rabbit_durable_queue",
|
||||
[?MODULE, StreamId]),
|
||||
%% This can happen during recovery
|
||||
|
@ -1316,7 +1317,7 @@ phase_update_mnesia(StreamId, Args, #{reference := QName,
|
|||
{ok, Q} ->
|
||||
case amqqueue:get_type_state(Q) of
|
||||
#{name := S} when S == StreamId ->
|
||||
rabbit_log:debug("~ts: initializing queue record for stream id ~ts",
|
||||
?LOG_DEBUG("~ts: initializing queue record for stream id ~ts",
|
||||
[?MODULE, StreamId]),
|
||||
ok = rabbit_amqqueue:ensure_rabbit_queue_record_is_initialized(Fun(Q)),
|
||||
ok;
|
||||
|
@ -1328,7 +1329,7 @@ phase_update_mnesia(StreamId, Args, #{reference := QName,
|
|||
_ ->
|
||||
send_self_command({mnesia_updated, StreamId, Args})
|
||||
catch _:E ->
|
||||
rabbit_log:debug("~ts: failed to update mnesia for ~ts: ~W",
|
||||
?LOG_DEBUG("~ts: failed to update mnesia for ~ts: ~W",
|
||||
[?MODULE, StreamId, E, 10]),
|
||||
send_action_failed(StreamId, updating_mnesia, Args)
|
||||
end
|
||||
|
@ -1364,7 +1365,7 @@ filter_command(_Meta, {delete_replica, _, #{node := Node}}, #stream{id = StreamI
|
|||
end, Members0),
|
||||
case maps:size(Members) =< 1 of
|
||||
true ->
|
||||
rabbit_log:warning(
|
||||
?LOG_WARNING(
|
||||
"~ts failed to delete replica on node ~ts for stream ~ts: refusing to delete the only replica",
|
||||
[?MODULE, Node, StreamId]),
|
||||
{error, last_stream_member};
|
||||
|
@ -1379,7 +1380,7 @@ update_stream(Meta, Cmd, Stream) ->
|
|||
update_stream0(Meta, Cmd, Stream)
|
||||
catch
|
||||
_:E:Stacktrace ->
|
||||
rabbit_log:warning(
|
||||
?LOG_WARNING(
|
||||
"~ts failed to update stream:~n~W~n~W",
|
||||
[?MODULE, E, 10, Stacktrace, 10]),
|
||||
Stream
|
||||
|
@ -1495,7 +1496,7 @@ update_stream0(#{system_time := _Ts},
|
|||
Member ->
|
||||
%% do we just ignore any members started events from unexpected
|
||||
%% epochs?
|
||||
rabbit_log:warning("~ts: member started unexpected ~w ~w",
|
||||
?LOG_WARNING("~ts: member started unexpected ~w ~w",
|
||||
[?MODULE, Args, Member]),
|
||||
Stream0
|
||||
end;
|
||||
|
@ -2056,7 +2057,7 @@ fail_active_actions(Streams, Exclude) ->
|
|||
end, Members),
|
||||
case Mnesia of
|
||||
{updating, E} ->
|
||||
rabbit_log:debug("~ts: failing stale action to trigger retry. "
|
||||
?LOG_DEBUG("~ts: failing stale action to trigger retry. "
|
||||
"Stream ID: ~ts, node: ~w, action: ~w",
|
||||
[?MODULE, Id, node(), updating_mnesia]),
|
||||
send_self_command({action_failed, Id,
|
||||
|
@ -2076,7 +2077,7 @@ fail_action(_StreamId, _, #member{current = undefined}) ->
|
|||
ok;
|
||||
fail_action(StreamId, Node, #member{role = {_, E},
|
||||
current = {Action, Idx}}) ->
|
||||
rabbit_log:debug("~ts: failing stale action to trigger retry. "
|
||||
?LOG_DEBUG("~ts: failing stale action to trigger retry. "
|
||||
"Stream ID: ~ts, node: ~w, action: ~w",
|
||||
[?MODULE, StreamId, node(), Action]),
|
||||
%% if we have an action send failure message
|
||||
|
@ -2241,7 +2242,7 @@ update_target(Member, Target) ->
|
|||
|
||||
machine_version(1, 2, State = #?MODULE{streams = Streams0,
|
||||
monitors = Monitors0}) ->
|
||||
rabbit_log:info("Stream coordinator machine version changes from 1 to 2, updating state."),
|
||||
?LOG_INFO("Stream coordinator machine version changes from 1 to 2, updating state."),
|
||||
%% conversion from old state to new state
|
||||
%% additional operation: the stream listeners are never collected in the previous version
|
||||
%% so we'll emit monitors for all listener PIDs
|
||||
|
@ -2273,13 +2274,13 @@ machine_version(1, 2, State = #?MODULE{streams = Streams0,
|
|||
monitors = Monitors2,
|
||||
listeners = undefined}, Effects};
|
||||
machine_version(2, 3, State) ->
|
||||
rabbit_log:info("Stream coordinator machine version changes from 2 to 3, "
|
||||
?LOG_INFO("Stream coordinator machine version changes from 2 to 3, "
|
||||
"updating state."),
|
||||
SacState = rabbit_stream_sac_coordinator_v4:init_state(),
|
||||
{State#?MODULE{single_active_consumer = SacState},
|
||||
[]};
|
||||
machine_version(3, 4, #?MODULE{streams = Streams0} = State) ->
|
||||
rabbit_log:info("Stream coordinator machine version changes from 3 to 4, updating state."),
|
||||
?LOG_INFO("Stream coordinator machine version changes from 3 to 4, updating state."),
|
||||
%% the "preferred" field takes the place of the "node" field in this version
|
||||
%% initializing the "preferred" field to false
|
||||
Streams = maps:map(
|
||||
|
@ -2291,12 +2292,12 @@ machine_version(3, 4, #?MODULE{streams = Streams0} = State) ->
|
|||
end, Streams0),
|
||||
{State#?MODULE{streams = Streams}, []};
|
||||
machine_version(4 = From, 5, #?MODULE{single_active_consumer = Sac0} = State) ->
|
||||
rabbit_log:info("Stream coordinator machine version changes from 4 to 5, updating state."),
|
||||
?LOG_INFO("Stream coordinator machine version changes from 4 to 5, updating state."),
|
||||
SacExport = rabbit_stream_sac_coordinator_v4:state_to_map(Sac0),
|
||||
Sac1 = rabbit_stream_sac_coordinator:import_state(From, SacExport),
|
||||
{State#?MODULE{single_active_consumer = Sac1}, []};
|
||||
machine_version(From, To, State) ->
|
||||
rabbit_log:info("Stream coordinator machine version changes from ~tp to ~tp, no state changes required.",
|
||||
?LOG_INFO("Stream coordinator machine version changes from ~tp to ~tp, no state changes required.",
|
||||
[From, To]),
|
||||
{State, []}.
|
||||
|
||||
|
|
|
@ -70,6 +70,7 @@
|
|||
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include("amqqueue.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-define(INFO_KEYS, [name, durable, auto_delete, arguments, leader, members, online, state,
|
||||
messages, messages_ready, messages_unacknowledged, committed_offset,
|
||||
|
@ -332,7 +333,7 @@ consume(Q, Spec, #stream_client{} = QState0)
|
|||
args := Args,
|
||||
ok_msg := OkMsg,
|
||||
acting_user := ActingUser} = Spec,
|
||||
rabbit_log:debug("~s:~s Local pid resolved ~0p",
|
||||
?LOG_DEBUG("~s:~s Local pid resolved ~0p",
|
||||
[?MODULE, ?FUNCTION_NAME, LocalPid]),
|
||||
case parse_offset_arg(
|
||||
rabbit_misc:table_lookup(Args, <<"x-stream-offset">>)) of
|
||||
|
@ -643,17 +644,17 @@ handle_event(_QName, {stream_local_member_change, Pid},
|
|||
handle_event(_QName, {stream_local_member_change, Pid},
|
||||
#stream_client{name = QName,
|
||||
readers = Readers0} = State) ->
|
||||
rabbit_log:debug("Local member change event for ~tp", [QName]),
|
||||
?LOG_DEBUG("Local member change event for ~tp", [QName]),
|
||||
Readers1 = maps:fold(fun(T, #stream{log = Log0, reader_options = Options} = S0, Acc) ->
|
||||
Offset = osiris_log:next_offset(Log0),
|
||||
osiris_log:close(Log0),
|
||||
CounterSpec = {{?MODULE, QName, self()}, []},
|
||||
rabbit_log:debug("Re-creating Osiris reader for consumer ~tp at offset ~tp "
|
||||
?LOG_DEBUG("Re-creating Osiris reader for consumer ~tp at offset ~tp "
|
||||
" with options ~tp",
|
||||
[T, Offset, Options]),
|
||||
{ok, Log1} = osiris:init_reader(Pid, Offset, CounterSpec, Options),
|
||||
NextOffset = osiris_log:next_offset(Log1) - 1,
|
||||
rabbit_log:debug("Registering offset listener at offset ~tp", [NextOffset]),
|
||||
?LOG_DEBUG("Registering offset listener at offset ~tp", [NextOffset]),
|
||||
osiris:register_offset_listener(Pid, NextOffset),
|
||||
S1 = S0#stream{listening_offset = NextOffset,
|
||||
log = Log1},
|
||||
|
@ -1000,7 +1001,7 @@ init(Q) when ?is_amqqueue(Q) ->
|
|||
{ok, stream_not_found, _} ->
|
||||
{error, stream_not_found};
|
||||
{error, coordinator_unavailable} = E ->
|
||||
rabbit_log:warning("Failed to start stream client ~tp: coordinator unavailable",
|
||||
?LOG_WARNING("Failed to start stream client ~tp: coordinator unavailable",
|
||||
[rabbit_misc:rs(QName)]),
|
||||
E
|
||||
end.
|
||||
|
@ -1019,7 +1020,7 @@ update(Q, State)
|
|||
update_leader_pid(Pid, #stream_client{leader = Pid} = State) ->
|
||||
State;
|
||||
update_leader_pid(Pid, #stream_client{} = State) ->
|
||||
rabbit_log:debug("stream client: new leader detected ~w", [Pid]),
|
||||
?LOG_DEBUG("stream client: new leader detected ~w", [Pid]),
|
||||
resend_all(State#stream_client{leader = Pid}).
|
||||
|
||||
state_info(_) ->
|
||||
|
@ -1080,11 +1081,11 @@ delete_replica(VHost, Name, Node) ->
|
|||
end.
|
||||
|
||||
delete_all_replicas(Node) ->
|
||||
rabbit_log:info("Asked to remove all stream replicas from node ~ts", [Node]),
|
||||
?LOG_INFO("Asked to remove all stream replicas from node ~ts", [Node]),
|
||||
Streams = rabbit_amqqueue:list_stream_queues_on(Node),
|
||||
lists:map(fun(Q) ->
|
||||
QName = amqqueue:get_name(Q),
|
||||
rabbit_log:info("~ts: removing replica on node ~w",
|
||||
?LOG_INFO("~ts: removing replica on node ~w",
|
||||
[rabbit_misc:rs(QName), Node]),
|
||||
#{name := StreamId} = amqqueue:get_type_state(Q),
|
||||
{ok, Reply, _} = rabbit_stream_coordinator:delete_replica(StreamId, Node),
|
||||
|
@ -1092,7 +1093,7 @@ delete_all_replicas(Node) ->
|
|||
ok ->
|
||||
{QName, ok};
|
||||
Err ->
|
||||
rabbit_log:warning("~ts: failed to remove replica on node ~w, error: ~w",
|
||||
?LOG_WARNING("~ts: failed to remove replica on node ~w, error: ~w",
|
||||
[rabbit_misc:rs(QName), Node, Err]),
|
||||
{QName, {error, Err}}
|
||||
end
|
||||
|
@ -1286,7 +1287,7 @@ chunk_iterator(#stream{credit = Credit,
|
|||
end,
|
||||
{end_of_stream, Str};
|
||||
{error, Err} ->
|
||||
rabbit_log:info("stream client: failed to create chunk iterator ~p", [Err]),
|
||||
?LOG_INFO("stream client: failed to create chunk iterator ~p", [Err]),
|
||||
exit(Err)
|
||||
end.
|
||||
|
||||
|
@ -1365,7 +1366,7 @@ resend_all(#stream_client{leader = LeaderPid,
|
|||
case Msgs of
|
||||
[] -> ok;
|
||||
[{Seq, _} | _] ->
|
||||
rabbit_log:debug("stream client: resending from seq ~w num ~b",
|
||||
?LOG_DEBUG("stream client: resending from seq ~w num ~b",
|
||||
[Seq, maps:size(Corrs)])
|
||||
end,
|
||||
[begin
|
||||
|
@ -1444,7 +1445,7 @@ revive() ->
|
|||
|
||||
-spec transfer_leadership_of_stream_coordinator([node()]) -> ok.
|
||||
transfer_leadership_of_stream_coordinator([]) ->
|
||||
rabbit_log:warning("Skipping leadership transfer of stream coordinator: no candidate "
|
||||
?LOG_WARNING("Skipping leadership transfer of stream coordinator: no candidate "
|
||||
"(online, not under maintenance) nodes to transfer to!");
|
||||
transfer_leadership_of_stream_coordinator(TransferCandidates) ->
|
||||
% try to transfer to the node with the lowest uptime; the assumption is that
|
||||
|
@ -1456,9 +1457,9 @@ transfer_leadership_of_stream_coordinator(TransferCandidates) ->
|
|||
BestCandidate = element(1, hd(lists:keysort(2, Candidates))),
|
||||
case rabbit_stream_coordinator:transfer_leadership([BestCandidate]) of
|
||||
{ok, Node} ->
|
||||
rabbit_log:info("Leadership transfer for stream coordinator completed. The new leader is ~p", [Node]);
|
||||
?LOG_INFO("Leadership transfer for stream coordinator completed. The new leader is ~p", [Node]);
|
||||
Error ->
|
||||
rabbit_log:warning("Skipping leadership transfer of stream coordinator: ~p", [Error])
|
||||
?LOG_WARNING("Skipping leadership transfer of stream coordinator: ~p", [Error])
|
||||
end.
|
||||
|
||||
queue_vm_stats_sups() ->
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
-module(rabbit_stream_sac_coordinator).
|
||||
|
||||
-include("rabbit_stream_sac_coordinator.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-opaque command() :: #command_register_consumer{} |
|
||||
#command_unregister_consumer{} |
|
||||
|
@ -148,7 +149,7 @@ process_command(Cmd) ->
|
|||
{ok, Res, _} ->
|
||||
Res;
|
||||
{error, _} = Err ->
|
||||
rabbit_log:warning("SAC coordinator command ~tp returned error ~tp",
|
||||
?LOG_WARNING("SAC coordinator command ~tp returned error ~tp",
|
||||
[Cmd, Err]),
|
||||
Err
|
||||
end.
|
||||
|
@ -286,7 +287,7 @@ apply(#command_activate_consumer{vhost = VH, stream = S, consumer_name = Name},
|
|||
{G, Eff} =
|
||||
case lookup_group(VH, S, Name, StreamGroups0) of
|
||||
undefined ->
|
||||
rabbit_log:warning("Trying to activate consumer in group ~tp, but "
|
||||
?LOG_WARNING("Trying to activate consumer in group ~tp, but "
|
||||
"the group does not longer exist",
|
||||
[{VH, S, Name}]),
|
||||
{undefined, []};
|
||||
|
@ -348,7 +349,7 @@ apply(#command_purge_nodes{nodes = Nodes}, State0) ->
|
|||
apply(#command_update_conf{conf = NewConf}, State) ->
|
||||
{State#?MODULE{conf = NewConf}, ok, []};
|
||||
apply(UnkCmd, State) ->
|
||||
rabbit_log:debug("~ts: unknown SAC command ~W", [?MODULE, UnkCmd, 10]),
|
||||
?LOG_DEBUG("~ts: unknown SAC command ~W", [?MODULE, UnkCmd, 10]),
|
||||
{State, {error, unknown_command}, []}.
|
||||
|
||||
purge_node(Node, #?MODULE{groups = Groups0} = State0) ->
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
-module(rabbit_stream_sac_coordinator_v4).
|
||||
|
||||
-include("rabbit_stream_sac_coordinator_v4.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-opaque command() ::
|
||||
#command_register_consumer{} | #command_unregister_consumer{} |
|
||||
|
@ -124,7 +125,7 @@ process_command(Cmd) ->
|
|||
{ok, Res, _} ->
|
||||
Res;
|
||||
{error, _} = Err ->
|
||||
rabbit_log:warning("SAC coordinator command ~tp returned error ~tp",
|
||||
?LOG_WARNING("SAC coordinator command ~tp returned error ~tp",
|
||||
[Cmd, Err]),
|
||||
Err
|
||||
end.
|
||||
|
@ -251,7 +252,7 @@ apply(#command_activate_consumer{vhost = VirtualHost,
|
|||
{G, Eff} =
|
||||
case lookup_group(VirtualHost, Stream, ConsumerName, StreamGroups0) of
|
||||
undefined ->
|
||||
rabbit_log:warning("Trying to activate consumer in group ~tp, but "
|
||||
?LOG_WARNING("Trying to activate consumer in group ~tp, but "
|
||||
"the group does not longer exist",
|
||||
[{VirtualHost, Stream, ConsumerName}]),
|
||||
{undefined, []};
|
||||
|
|
|
@ -23,6 +23,9 @@
|
|||
|
||||
-module(rabbit_sysmon_handler).
|
||||
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
|
||||
-behaviour(gen_event).
|
||||
|
||||
%% API
|
||||
|
@ -89,16 +92,16 @@ handle_event({monitor, PidOrPort, Type, Info}, State=#state{timer_ref=TimerRef})
|
|||
%% Reset the inactivity timeout
|
||||
NewTimerRef = reset_timer(TimerRef),
|
||||
{Fmt, Args} = format_pretty_proc_or_port_info(PidOrPort),
|
||||
rabbit_log:warning("~tp ~w ~w " ++ Fmt ++ " ~w", [?MODULE, Type, PidOrPort] ++ Args ++ [Info]),
|
||||
?LOG_WARNING("~tp ~w ~w " ++ Fmt ++ " ~w", [?MODULE, Type, PidOrPort] ++ Args ++ [Info]),
|
||||
{ok, State#state{timer_ref=NewTimerRef}};
|
||||
handle_event({suppressed, Type, Info}, State=#state{timer_ref=TimerRef}) ->
|
||||
%% Reset the inactivity timeout
|
||||
NewTimerRef = reset_timer(TimerRef),
|
||||
rabbit_log:debug("~tp encountered a suppressed event of type ~w: ~w", [?MODULE, Type, Info]),
|
||||
?LOG_DEBUG("~tp encountered a suppressed event of type ~w: ~w", [?MODULE, Type, Info]),
|
||||
{ok, State#state{timer_ref=NewTimerRef}};
|
||||
handle_event(Event, State=#state{timer_ref=TimerRef}) ->
|
||||
NewTimerRef = reset_timer(TimerRef),
|
||||
rabbit_log:warning("~tp unhandled event: ~tp", [?MODULE, Event]),
|
||||
?LOG_WARNING("~tp unhandled event: ~tp", [?MODULE, Event]),
|
||||
{ok, State#state{timer_ref=NewTimerRef}}.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
|
@ -136,7 +139,7 @@ handle_info(inactivity_timeout, State) ->
|
|||
%% so hibernate to free up resources.
|
||||
{ok, State, hibernate};
|
||||
handle_info(Info, State) ->
|
||||
rabbit_log:info("handle_info got ~tp", [Info]),
|
||||
?LOG_INFO("handle_info got ~tp", [Info]),
|
||||
{ok, State}.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
|
||||
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-ifdef(TEST).
|
||||
-export([pre_khepri_definitions/0]).
|
||||
|
@ -46,7 +47,7 @@ create() ->
|
|||
|
||||
create(TableName, TableDefinition) ->
|
||||
TableDefinition1 = proplists:delete(match, TableDefinition),
|
||||
rabbit_log:debug("Will create a schema database table '~ts'", [TableName]),
|
||||
?LOG_DEBUG("Will create a schema database table '~ts'", [TableName]),
|
||||
case mnesia:create_table(TableName, TableDefinition1) of
|
||||
{atomic, ok} -> ok;
|
||||
{aborted,{already_exists, TableName}} -> ok;
|
||||
|
@ -78,7 +79,7 @@ ensure_secondary_index(Table, Field) ->
|
|||
-spec ensure_table_copy(mnesia_table(), node(), mnesia_storage_type()) ->
|
||||
ok | {error, any()}.
|
||||
ensure_table_copy(TableName, Node, StorageType) ->
|
||||
rabbit_log:debug("Will add a local schema database copy for table '~ts'", [TableName]),
|
||||
?LOG_DEBUG("Will add a local schema database copy for table '~ts'", [TableName]),
|
||||
case mnesia:add_table_copy(TableName, Node, StorageType) of
|
||||
{atomic, ok} -> ok;
|
||||
{aborted,{already_exists, TableName}} -> ok;
|
||||
|
@ -140,7 +141,7 @@ wait1(TableNames, Timeout, Retries, Silent) ->
|
|||
true ->
|
||||
ok;
|
||||
false ->
|
||||
rabbit_log:info("Waiting for Mnesia tables for ~tp ms, ~tp retries left",
|
||||
?LOG_INFO("Waiting for Mnesia tables for ~tp ms, ~tp retries left",
|
||||
[Timeout, Retries - 1])
|
||||
end,
|
||||
Result = case mnesia:wait_for_tables(TableNames, Timeout) of
|
||||
|
@ -159,7 +160,7 @@ wait1(TableNames, Timeout, Retries, Silent) ->
|
|||
true ->
|
||||
ok;
|
||||
false ->
|
||||
rabbit_log:info("Successfully synced tables from a peer"),
|
||||
?LOG_INFO("Successfully synced tables from a peer"),
|
||||
ok
|
||||
end;
|
||||
{1, {error, _} = Error} ->
|
||||
|
@ -169,7 +170,7 @@ wait1(TableNames, Timeout, Retries, Silent) ->
|
|||
true ->
|
||||
ok;
|
||||
false ->
|
||||
rabbit_log:warning("Error while waiting for Mnesia tables: ~tp", [Error])
|
||||
?LOG_WARNING("Error while waiting for Mnesia tables: ~tp", [Error])
|
||||
end,
|
||||
wait1(TableNames, Timeout, Retries - 1, Silent)
|
||||
end.
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include_lib("rabbit_common/include/rabbit_framing.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-define(TRACE_VHOSTS, trace_vhosts).
|
||||
-define(XNAME, <<"amq.rabbitmq.trace">>).
|
||||
|
@ -103,10 +104,10 @@ start(VHost)
|
|||
when is_binary(VHost) ->
|
||||
case enabled(VHost) of
|
||||
true ->
|
||||
rabbit_log:info("Tracing is already enabled for vhost '~ts'", [VHost]),
|
||||
?LOG_INFO("Tracing is already enabled for vhost '~ts'", [VHost]),
|
||||
ok;
|
||||
false ->
|
||||
rabbit_log:info("Enabling tracing for vhost '~ts'", [VHost]),
|
||||
?LOG_INFO("Enabling tracing for vhost '~ts'", [VHost]),
|
||||
update_config(fun(VHosts) -> lists:usort([VHost | VHosts]) end)
|
||||
end.
|
||||
|
||||
|
@ -115,10 +116,10 @@ stop(VHost)
|
|||
when is_binary(VHost) ->
|
||||
case enabled(VHost) of
|
||||
true ->
|
||||
rabbit_log:info("Disabling tracing for vhost '~ts'", [VHost]),
|
||||
?LOG_INFO("Disabling tracing for vhost '~ts'", [VHost]),
|
||||
update_config(fun(VHosts) -> VHosts -- [VHost] end);
|
||||
false ->
|
||||
rabbit_log:info("Tracing is already disabled for vhost '~ts'", [VHost]),
|
||||
?LOG_INFO("Tracing is already disabled for vhost '~ts'", [VHost]),
|
||||
ok
|
||||
end.
|
||||
|
||||
|
@ -128,13 +129,13 @@ update_config(Fun) ->
|
|||
application:set_env(rabbit, ?TRACE_VHOSTS, VHosts),
|
||||
Sessions = rabbit_amqp_session:list_local(),
|
||||
NonAmqpPids = rabbit_networking:local_non_amqp_connections(),
|
||||
rabbit_log:debug("Refreshing state of channels, ~b sessions and ~b non "
|
||||
?LOG_DEBUG("Refreshing state of channels, ~b sessions and ~b non "
|
||||
"AMQP 0.9.1 connections after virtual host tracing changes...",
|
||||
[length(Sessions), length(NonAmqpPids)]),
|
||||
Pids = Sessions ++ NonAmqpPids,
|
||||
lists:foreach(fun(Pid) -> gen_server:cast(Pid, refresh_config) end, Pids),
|
||||
{Time, ok} = timer:tc(fun rabbit_channel:refresh_config_local/0),
|
||||
rabbit_log:debug("Refreshed channel states in ~fs", [Time / 1_000_000]),
|
||||
?LOG_DEBUG("Refreshed channel states in ~fs", [Time / 1_000_000]),
|
||||
ok.
|
||||
|
||||
vhosts_with_tracing_enabled() ->
|
||||
|
|
|
@ -7,6 +7,9 @@
|
|||
|
||||
-module(rabbit_tracking).
|
||||
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
|
||||
%% Common behaviour and processing functions for tracking components
|
||||
%%
|
||||
%% See in use:
|
||||
|
@ -45,12 +48,12 @@ count_on_all_nodes(Mod, Fun, Args, ContextMsg) ->
|
|||
sum_rpc_multicall_result([{ok, Int}|ResL], [_N|Nodes], ContextMsg, Acc) when is_integer(Int) ->
|
||||
sum_rpc_multicall_result(ResL, Nodes, ContextMsg, Acc + Int);
|
||||
sum_rpc_multicall_result([{ok, BadValue}|ResL], [BadNode|Nodes], ContextMsg, Acc) ->
|
||||
rabbit_log:error(
|
||||
?LOG_ERROR(
|
||||
"Failed to fetch number of ~ts on node ~tp:~n not an integer ~tp",
|
||||
[ContextMsg, BadNode, BadValue]),
|
||||
sum_rpc_multicall_result(ResL, Nodes, ContextMsg, Acc);
|
||||
sum_rpc_multicall_result([{Class, Reason}|ResL], [BadNode|Nodes], ContextMsg, Acc) ->
|
||||
rabbit_log:error(
|
||||
?LOG_ERROR(
|
||||
"Failed to fetch number of ~ts on node ~tp:~n~tp:~tp",
|
||||
[ContextMsg, BadNode, Class, Reason]),
|
||||
sum_rpc_multicall_result(ResL, Nodes, ContextMsg, Acc);
|
||||
|
|
|
@ -7,6 +7,9 @@
|
|||
|
||||
-module(rabbit_upgrade_preparation).
|
||||
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
|
||||
-export([await_online_quorum_plus_one/1,
|
||||
list_with_minimum_quorum_for_cli/0]).
|
||||
|
||||
|
@ -64,12 +67,12 @@ do_await_safe_online_quorum(IterationsLeft) ->
|
|||
0 ->
|
||||
case length(EndangeredQueues) of
|
||||
0 -> ok;
|
||||
N -> rabbit_log:info("Waiting for ~p queues and streams to have quorum+1 replicas online. "
|
||||
N -> ?LOG_INFO("Waiting for ~p queues and streams to have quorum+1 replicas online. "
|
||||
"You can list them with `rabbitmq-diagnostics check_if_node_is_quorum_critical`", [N])
|
||||
end,
|
||||
case endangered_critical_components() of
|
||||
[] -> ok;
|
||||
_ -> rabbit_log:info("Waiting for the following critical components to have quorum+1 replicas online: ~p.",
|
||||
_ -> ?LOG_INFO("Waiting for the following critical components to have quorum+1 replicas online: ~p.",
|
||||
[endangered_critical_components()])
|
||||
end;
|
||||
_ ->
|
||||
|
|
|
@ -268,6 +268,7 @@
|
|||
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include("amqqueue.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
%%----------------------------------------------------------------------------
|
||||
|
||||
|
@ -382,7 +383,7 @@ stop(VHost) ->
|
|||
ok = rabbit_classic_queue_index_v2:stop(VHost).
|
||||
|
||||
start_msg_store(VHost, Refs, StartFunState) when is_list(Refs); Refs == undefined ->
|
||||
rabbit_log:info("Starting message stores for vhost '~ts'", [VHost]),
|
||||
?LOG_INFO("Starting message stores for vhost '~ts'", [VHost]),
|
||||
do_start_msg_store(VHost, ?TRANSIENT_MSG_STORE, undefined, ?EMPTY_START_FUN_STATE),
|
||||
do_start_msg_store(VHost, ?PERSISTENT_MSG_STORE, Refs, StartFunState),
|
||||
ok.
|
||||
|
@ -390,13 +391,13 @@ start_msg_store(VHost, Refs, StartFunState) when is_list(Refs); Refs == undefine
|
|||
do_start_msg_store(VHost, Type, Refs, StartFunState) ->
|
||||
case rabbit_vhost_msg_store:start(VHost, Type, Refs, StartFunState) of
|
||||
{ok, _} ->
|
||||
rabbit_log:info("Started message store of type ~ts for vhost '~ts'", [abbreviated_type(Type), VHost]);
|
||||
?LOG_INFO("Started message store of type ~ts for vhost '~ts'", [abbreviated_type(Type), VHost]);
|
||||
{error, {no_such_vhost, VHost}} = Err ->
|
||||
rabbit_log:error("Failed to start message store of type ~ts for vhost '~ts': the vhost no longer exists!",
|
||||
?LOG_ERROR("Failed to start message store of type ~ts for vhost '~ts': the vhost no longer exists!",
|
||||
[Type, VHost]),
|
||||
exit(Err);
|
||||
{error, Error} ->
|
||||
rabbit_log:error("Failed to start message store of type ~ts for vhost '~ts': ~tp",
|
||||
?LOG_ERROR("Failed to start message store of type ~ts for vhost '~ts': ~tp",
|
||||
[Type, VHost, Error]),
|
||||
exit({error, Error})
|
||||
end.
|
||||
|
@ -891,7 +892,7 @@ convert_from_v1_to_v2_loop(QueueName, V1Index0, V2Index0, V2Store0,
|
|||
%% Log some progress to keep the user aware of what's going on, as moving
|
||||
%% embedded messages can take quite some time.
|
||||
#resource{virtual_host = VHost, name = Name} = QueueName,
|
||||
rabbit_log:info("Queue ~ts in vhost ~ts converted ~b messages from v1 to v2",
|
||||
?LOG_INFO("Queue ~ts in vhost ~ts converted ~b messages from v1 to v2",
|
||||
[Name, VHost, length(Messages)]),
|
||||
convert_from_v1_to_v2_loop(QueueName, V1Index, V2Index, V2Store, Counters, UpSeqId, HiSeqId, SkipFun).
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include("vhost.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-export([recover/0, recover/1, read_config/1]).
|
||||
-export([add/2, add/3, add/4, delete/2, delete_ignoring_protection/2, exists/1, assert/1,
|
||||
|
@ -40,7 +41,7 @@ recover() ->
|
|||
{Time, _} = timer:tc(fun() ->
|
||||
rabbit_binding:recover()
|
||||
end),
|
||||
rabbit_log:debug("rabbit_binding:recover/0 completed in ~fs", [Time/1000000]),
|
||||
?LOG_DEBUG("rabbit_binding:recover/0 completed in ~fs", [Time/1000000]),
|
||||
|
||||
%% rabbit_vhost_sup_sup will start the actual recovery.
|
||||
%% So recovery will be run every time a vhost supervisor is restarted.
|
||||
|
@ -51,7 +52,7 @@ recover() ->
|
|||
|
||||
recover(VHost) ->
|
||||
VHostDir = msg_store_dir_path(VHost),
|
||||
rabbit_log:info("Making sure data directory '~ts' for vhost '~ts' exists",
|
||||
?LOG_INFO("Making sure data directory '~ts' for vhost '~ts' exists",
|
||||
[VHostDir, VHost]),
|
||||
VHostStubFile = filename:join(VHostDir, ".vhost"),
|
||||
ok = rabbit_file:ensure_dir(VHostStubFile),
|
||||
|
@ -65,25 +66,25 @@ recover(VHost) ->
|
|||
%% we need to add the default type to the metadata
|
||||
case rabbit_db_vhost:get(VHost) of
|
||||
undefined ->
|
||||
rabbit_log:warning("Cannot check metadata for vhost '~ts' during recovery, record not found.",
|
||||
?LOG_WARNING("Cannot check metadata for vhost '~ts' during recovery, record not found.",
|
||||
[VHost]);
|
||||
VHostRecord ->
|
||||
Metadata = vhost:get_metadata(VHostRecord),
|
||||
case maps:is_key(default_queue_type, Metadata) of
|
||||
true ->
|
||||
rabbit_log:debug("Default queue type for vhost '~ts' is ~p.",
|
||||
?LOG_DEBUG("Default queue type for vhost '~ts' is ~p.",
|
||||
[VHost, maps:get(default_queue_type, Metadata)]),
|
||||
ok;
|
||||
false ->
|
||||
DefaultType = rabbit_queue_type:default_alias(),
|
||||
rabbit_log:info("Setting missing default queue type to '~p' for vhost '~ts'.",
|
||||
?LOG_INFO("Setting missing default queue type to '~p' for vhost '~ts'.",
|
||||
[DefaultType, VHost]),
|
||||
case rabbit_db_vhost:merge_metadata(VHost, #{default_queue_type => DefaultType}) of
|
||||
{ok, _UpdatedVHostRecord} ->
|
||||
ok;
|
||||
{error, Reason} ->
|
||||
% Log the error but continue recovery
|
||||
rabbit_log:warning("Failed to set the default queue type for vhost '~ts': ~p",
|
||||
?LOG_WARNING("Failed to set the default queue type for vhost '~ts': ~p",
|
||||
[VHost, Reason])
|
||||
end
|
||||
end
|
||||
|
@ -95,7 +96,7 @@ recover(VHost) ->
|
|||
{Time, ok} = timer:tc(fun() ->
|
||||
rabbit_binding:recover(rabbit_exchange:recover(VHost), QNames)
|
||||
end),
|
||||
rabbit_log:debug("rabbit_binding:recover/2 for vhost ~ts completed in ~fs", [VHost, Time/1000000]),
|
||||
?LOG_DEBUG("rabbit_binding:recover/2 for vhost ~ts completed in ~fs", [VHost, Time/1000000]),
|
||||
|
||||
ok = rabbit_amqqueue:start(Recovered),
|
||||
ok.
|
||||
|
@ -124,7 +125,7 @@ ensure_config_file(VHost) ->
|
|||
_ ->
|
||||
?LEGACY_INDEX_SEGMENT_ENTRY_COUNT
|
||||
end,
|
||||
rabbit_log:info("Setting segment_entry_count for vhost '~ts' with ~b queues to '~b'",
|
||||
?LOG_INFO("Setting segment_entry_count for vhost '~ts' with ~b queues to '~b'",
|
||||
[VHost, length(QueueDirs), SegmentEntryCount]),
|
||||
file:write_file(Path, io_lib:format(
|
||||
"%% This file is auto-generated! Edit at your own risk!~n"
|
||||
|
@ -206,7 +207,7 @@ do_add(Name, Metadata0, ActingUser) ->
|
|||
case Metadata of
|
||||
#{default_queue_type := DQT} ->
|
||||
%% check that the queue type is known
|
||||
rabbit_log:debug("Default queue type of virtual host '~ts' is ~tp",
|
||||
?LOG_DEBUG("Default queue type of virtual host '~ts' is ~tp",
|
||||
[Name, DQT]),
|
||||
try rabbit_queue_type:discover(DQT) of
|
||||
QueueType when is_atom(QueueType) ->
|
||||
|
@ -225,9 +226,9 @@ do_add(Name, Metadata0, ActingUser) ->
|
|||
|
||||
case Description of
|
||||
undefined ->
|
||||
rabbit_log:info("Adding vhost '~ts' without a description", [Name]);
|
||||
?LOG_INFO("Adding vhost '~ts' without a description", [Name]);
|
||||
Description ->
|
||||
rabbit_log:info("Adding vhost '~ts' (description: '~ts', tags: ~tp)",
|
||||
?LOG_INFO("Adding vhost '~ts' (description: '~ts', tags: ~tp)",
|
||||
[Name, Description, Tags])
|
||||
end,
|
||||
DefaultLimits = rabbit_db_vhost_defaults:list_limits(Name),
|
||||
|
@ -235,7 +236,7 @@ do_add(Name, Metadata0, ActingUser) ->
|
|||
{NewOrNot, VHost} = rabbit_db_vhost:create_or_get(Name, DefaultLimits, Metadata),
|
||||
case NewOrNot of
|
||||
new ->
|
||||
rabbit_log:debug("Inserted a virtual host record ~tp", [VHost]);
|
||||
?LOG_DEBUG("Inserted a virtual host record ~tp", [VHost]);
|
||||
existing ->
|
||||
ok
|
||||
end,
|
||||
|
@ -280,7 +281,7 @@ declare_default_exchanges(VHostName, ActingUser) ->
|
|||
rabbit_misc:for_each_while_ok(
|
||||
fun({ExchangeName, Type, Internal}) ->
|
||||
Resource = rabbit_misc:r(VHostName, exchange, ExchangeName),
|
||||
rabbit_log:debug("Will declare an exchange ~tp", [Resource]),
|
||||
?LOG_DEBUG("Will declare an exchange ~tp", [Resource]),
|
||||
case rabbit_exchange:declare(
|
||||
Resource, Type, true, false, Internal, [],
|
||||
ActingUser) of
|
||||
|
@ -342,7 +343,7 @@ delete(Name, ActingUser) ->
|
|||
case vhost:is_protected_from_deletion(VHost) of
|
||||
true ->
|
||||
Msg = "Refusing to delete virtual host '~ts' because it is protected from deletion",
|
||||
rabbit_log:debug(Msg, [Name]),
|
||||
?LOG_DEBUG(Msg, [Name]),
|
||||
{error, protected_from_deletion};
|
||||
false ->
|
||||
delete_ignoring_protection(Name, ActingUser)
|
||||
|
@ -356,25 +357,25 @@ delete_ignoring_protection(Name, ActingUser) ->
|
|||
%% process, which in turn results in further database actions and
|
||||
%% eventually the termination of that process. Exchange deletion causes
|
||||
%% notifications which must be sent outside the TX
|
||||
rabbit_log:info("Deleting vhost '~ts'", [Name]),
|
||||
?LOG_INFO("Deleting vhost '~ts'", [Name]),
|
||||
%% TODO: This code does a lot of "list resources, walk through the list to
|
||||
%% delete each resource". This feature should be provided by each called
|
||||
%% modules, like `rabbit_amqqueue:delete_all_for_vhost(VHost)'. These new
|
||||
%% calls would be responsible for the atomicity, not this code.
|
||||
%% Clear the permissions first to prohibit new incoming connections when deleting a vhost
|
||||
rabbit_log:info("Clearing permissions in vhost '~ts' because it's being deleted", [Name]),
|
||||
?LOG_INFO("Clearing permissions in vhost '~ts' because it's being deleted", [Name]),
|
||||
ok = rabbit_auth_backend_internal:clear_all_permissions_for_vhost(Name, ActingUser),
|
||||
rabbit_log:info("Deleting queues in vhost '~ts' because it's being deleted", [Name]),
|
||||
?LOG_INFO("Deleting queues in vhost '~ts' because it's being deleted", [Name]),
|
||||
QDelFun = fun (Q) -> rabbit_amqqueue:delete(Q, false, false, ActingUser) end,
|
||||
[begin
|
||||
QName = amqqueue:get_name(Q),
|
||||
assert_benign(rabbit_amqqueue:with(QName, QDelFun), ActingUser)
|
||||
end || Q <- rabbit_amqqueue:list(Name)],
|
||||
rabbit_log:info("Deleting exchanges in vhost '~ts' because it's being deleted", [Name]),
|
||||
?LOG_INFO("Deleting exchanges in vhost '~ts' because it's being deleted", [Name]),
|
||||
ok = rabbit_exchange:delete_all(Name, ActingUser),
|
||||
rabbit_log:info("Clearing policies and runtime parameters in vhost '~ts' because it's being deleted", [Name]),
|
||||
?LOG_INFO("Clearing policies and runtime parameters in vhost '~ts' because it's being deleted", [Name]),
|
||||
_ = rabbit_runtime_parameters:clear_vhost(Name, ActingUser),
|
||||
rabbit_log:debug("Removing vhost '~ts' from the metadata storage because it's being deleted", [Name]),
|
||||
?LOG_DEBUG("Removing vhost '~ts' from the metadata storage because it's being deleted", [Name]),
|
||||
Ret = case rabbit_db_vhost:delete(Name) of
|
||||
true ->
|
||||
ok = rabbit_event:notify(
|
||||
|
@ -407,7 +408,7 @@ put_vhost(Name, Description, Tags0, DefaultQueueType, Trace, Username) ->
|
|||
Other -> Other
|
||||
end,
|
||||
ParsedTags = parse_tags(Tags),
|
||||
rabbit_log:debug("Parsed virtual host tags ~tp to ~tp", [Tags, ParsedTags]),
|
||||
?LOG_DEBUG("Parsed virtual host tags ~tp to ~tp", [Tags, ParsedTags]),
|
||||
Result = case exists(Name) of
|
||||
true ->
|
||||
update(Name, Description, ParsedTags, DefaultQueueType, Username);
|
||||
|
@ -451,7 +452,7 @@ is_over_vhost_limit(Name, Limit) when is_integer(Limit) ->
|
|||
ErrorMsg = rabbit_misc:format("cannot create vhost '~ts': "
|
||||
"vhost limit of ~tp is reached",
|
||||
[Name, Limit]),
|
||||
rabbit_log:error(ErrorMsg),
|
||||
?LOG_ERROR(ErrorMsg),
|
||||
exit({vhost_limit_exceeded, ErrorMsg})
|
||||
end.
|
||||
|
||||
|
@ -510,7 +511,7 @@ vhost_cluster_state(VHost) ->
|
|||
Nodes).
|
||||
|
||||
vhost_down(VHost) ->
|
||||
rabbit_log:info("Virtual host '~ts' is stopping", [VHost]),
|
||||
?LOG_INFO("Virtual host '~ts' is stopping", [VHost]),
|
||||
ok = rabbit_event:notify(vhost_down,
|
||||
[{name, VHost},
|
||||
{node, node()},
|
||||
|
@ -518,16 +519,16 @@ vhost_down(VHost) ->
|
|||
|
||||
delete_storage(VHost) ->
|
||||
VhostDir = msg_store_dir_path(VHost),
|
||||
rabbit_log:info("Deleting message store directory for vhost '~ts' at '~ts'", [VHost, VhostDir]),
|
||||
?LOG_INFO("Deleting message store directory for vhost '~ts' at '~ts'", [VHost, VhostDir]),
|
||||
%% Message store should be closed when vhost supervisor is closed.
|
||||
case rabbit_file:recursive_delete([VhostDir]) of
|
||||
ok -> ok;
|
||||
{error, {_, enoent}} ->
|
||||
%% a concurrent delete did the job for us
|
||||
rabbit_log:warning("Tried to delete storage directories for vhost '~ts', it failed with an ENOENT", [VHost]),
|
||||
?LOG_WARNING("Tried to delete storage directories for vhost '~ts', it failed with an ENOENT", [VHost]),
|
||||
ok;
|
||||
Other ->
|
||||
rabbit_log:warning("Tried to delete storage directories for vhost '~ts': ~tp", [VHost, Other]),
|
||||
?LOG_WARNING("Tried to delete storage directories for vhost '~ts': ~tp", [VHost, Other]),
|
||||
Other
|
||||
end.
|
||||
|
||||
|
@ -642,7 +643,7 @@ update_tags(VHostName, Tags, ActingUser) ->
|
|||
end,
|
||||
VHost = rabbit_db_vhost:set_tags(VHostName, Tags),
|
||||
ConvertedTags = vhost:get_tags(VHost),
|
||||
rabbit_log:info("Successfully set tags for virtual host '~ts' to ~tp", [VHostName, ConvertedTags]),
|
||||
?LOG_INFO("Successfully set tags for virtual host '~ts' to ~tp", [VHostName, ConvertedTags]),
|
||||
rabbit_event:notify_if(are_different(CurrentTags, ConvertedTags),
|
||||
vhost_tags_set, [{name, VHostName},
|
||||
{tags, ConvertedTags},
|
||||
|
@ -650,13 +651,13 @@ update_tags(VHostName, Tags, ActingUser) ->
|
|||
VHost
|
||||
catch
|
||||
throw:{error, {no_such_vhost, _}} = Error ->
|
||||
rabbit_log:warning("Failed to set tags for virtual host '~ts': the virtual host does not exist", [VHostName]),
|
||||
?LOG_WARNING("Failed to set tags for virtual host '~ts': the virtual host does not exist", [VHostName]),
|
||||
throw(Error);
|
||||
throw:Error ->
|
||||
rabbit_log:warning("Failed to set tags for virtual host '~ts': ~tp", [VHostName, Error]),
|
||||
?LOG_WARNING("Failed to set tags for virtual host '~ts': ~tp", [VHostName, Error]),
|
||||
throw(Error);
|
||||
exit:Error ->
|
||||
rabbit_log:warning("Failed to set tags for virtual host '~ts': ~tp", [VHostName, Error]),
|
||||
?LOG_WARNING("Failed to set tags for virtual host '~ts': ~tp", [VHostName, Error]),
|
||||
exit(Error)
|
||||
end.
|
||||
|
||||
|
@ -718,7 +719,7 @@ i(metadata, VHost) ->
|
|||
M#{default_queue_type => DQT}
|
||||
end;
|
||||
i(Item, VHost) ->
|
||||
rabbit_log:error("Don't know how to compute a virtual host info item '~ts' for virtual host '~tp'", [Item, VHost]),
|
||||
?LOG_ERROR("Don't know how to compute a virtual host info item '~ts' for virtual host '~tp'", [Item, VHost]),
|
||||
throw({bad_argument, Item}).
|
||||
|
||||
-spec info(vhost:vhost() | vhost:name()) -> rabbit_types:infos().
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
-module(rabbit_vhost_msg_store).
|
||||
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-export([start/4, stop/2, client_init/4, successfully_recovered_state/2]).
|
||||
-export([vhost_store_pid/2]).
|
||||
|
@ -25,7 +26,7 @@ start(VHost, Type, ClientRefs, StartupFunState) when is_list(ClientRefs);
|
|||
%% we can get here if a vhost is added and removed concurrently
|
||||
%% e.g. some integration tests do it
|
||||
{error, {no_such_vhost, VHost}} = E ->
|
||||
rabbit_log:error("Failed to start a message store for vhost ~ts: vhost no longer exists!",
|
||||
?LOG_ERROR("Failed to start a message store for vhost ~ts: vhost no longer exists!",
|
||||
[VHost]),
|
||||
E
|
||||
end.
|
||||
|
@ -37,7 +38,7 @@ stop(VHost, Type) ->
|
|||
ok = supervisor:delete_child(VHostSup, Type);
|
||||
%% see start/4
|
||||
{error, {no_such_vhost, VHost}} ->
|
||||
rabbit_log:error("Failed to stop a message store for vhost ~ts: vhost no longer exists!",
|
||||
?LOG_ERROR("Failed to stop a message store for vhost ~ts: vhost no longer exists!",
|
||||
[VHost]),
|
||||
|
||||
ok
|
||||
|
|
|
@ -21,6 +21,9 @@
|
|||
|
||||
-module(rabbit_vhost_process).
|
||||
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
|
||||
-define(VHOST_CHECK_INTERVAL, 5000).
|
||||
|
||||
-behaviour(gen_server2).
|
||||
|
@ -35,7 +38,7 @@ start_link(VHost) ->
|
|||
|
||||
init([VHost]) ->
|
||||
process_flag(trap_exit, true),
|
||||
rabbit_log:debug("Recovering data for virtual host ~ts", [VHost]),
|
||||
?LOG_DEBUG("Recovering data for virtual host ~ts", [VHost]),
|
||||
try
|
||||
%% Recover the vhost data and save it to vhost registry.
|
||||
ok = rabbit_vhost:recover(VHost),
|
||||
|
@ -45,7 +48,7 @@ init([VHost]) ->
|
|||
{ok, VHost}
|
||||
catch _:Reason:Stacktrace ->
|
||||
rabbit_amqqueue:mark_local_durable_queues_stopped(VHost),
|
||||
rabbit_log:error("Unable to recover vhost ~tp data. Reason ~tp~n"
|
||||
?LOG_ERROR("Unable to recover vhost ~tp data. Reason ~tp~n"
|
||||
" Stacktrace ~tp",
|
||||
[VHost, Reason, Stacktrace]),
|
||||
{stop, Reason}
|
||||
|
@ -61,7 +64,7 @@ handle_info(check_vhost, VHost) ->
|
|||
case rabbit_vhost:exists(VHost) of
|
||||
true -> {noreply, VHost};
|
||||
false ->
|
||||
rabbit_log:warning("Virtual host '~ts' is gone. "
|
||||
?LOG_WARNING("Virtual host '~ts' is gone. "
|
||||
"Stopping its top level supervisor.",
|
||||
[VHost]),
|
||||
%% Stop vhost's top supervisor in a one-off process to avoid a deadlock:
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
-module(rabbit_vhost_sup_sup).
|
||||
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-behaviour(supervisor).
|
||||
|
||||
|
@ -79,18 +80,18 @@ delete_on_all_nodes(VHost) ->
|
|||
stop_and_delete_vhost(VHost) ->
|
||||
StopResult = case lookup_vhost_sup_record(VHost) of
|
||||
not_found ->
|
||||
rabbit_log:warning("Supervisor for vhost '~ts' not found during deletion procedure",
|
||||
?LOG_WARNING("Supervisor for vhost '~ts' not found during deletion procedure",
|
||||
[VHost]),
|
||||
ok;
|
||||
#vhost_sup{wrapper_pid = WrapperPid,
|
||||
vhost_sup_pid = VHostSupPid} ->
|
||||
case is_process_alive(WrapperPid) of
|
||||
false ->
|
||||
rabbit_log:info("Supervisor ~tp for vhost '~ts' already stopped",
|
||||
?LOG_INFO("Supervisor ~tp for vhost '~ts' already stopped",
|
||||
[VHostSupPid, VHost]),
|
||||
ok;
|
||||
true ->
|
||||
rabbit_log:info("Stopping vhost supervisor ~tp"
|
||||
?LOG_INFO("Stopping vhost supervisor ~tp"
|
||||
" for vhost '~ts'",
|
||||
[VHostSupPid, VHost]),
|
||||
case supervisor:terminate_child(?MODULE, WrapperPid) of
|
||||
|
@ -112,7 +113,7 @@ stop_and_delete_vhost(VHost, Node) ->
|
|||
case rabbit_misc:rpc_call(Node, rabbit_vhost_sup_sup, stop_and_delete_vhost, [VHost]) of
|
||||
ok -> ok;
|
||||
{badrpc, RpcErr} ->
|
||||
rabbit_log:error("Failed to stop and delete a vhost ~tp"
|
||||
?LOG_ERROR("Failed to stop and delete a vhost ~tp"
|
||||
" on node ~tp."
|
||||
" Reason: ~tp",
|
||||
[VHost, Node, RpcErr]),
|
||||
|
@ -124,7 +125,7 @@ init_vhost(VHost) ->
|
|||
case start_vhost(VHost) of
|
||||
{ok, _} -> ok;
|
||||
{error, {already_started, _}} ->
|
||||
rabbit_log:warning(
|
||||
?LOG_WARNING(
|
||||
"Attempting to start an already started vhost '~ts'.",
|
||||
[VHost]),
|
||||
ok;
|
||||
|
@ -133,13 +134,13 @@ init_vhost(VHost) ->
|
|||
{error, Reason} ->
|
||||
case vhost_restart_strategy() of
|
||||
permanent ->
|
||||
rabbit_log:error(
|
||||
?LOG_ERROR(
|
||||
"Unable to initialize vhost data store for vhost '~ts'."
|
||||
" Reason: ~tp",
|
||||
[VHost, Reason]),
|
||||
throw({error, Reason});
|
||||
transient ->
|
||||
rabbit_log:warning(
|
||||
?LOG_WARNING(
|
||||
"Unable to initialize vhost data store for vhost '~ts'."
|
||||
" The vhost will be stopped for this node. "
|
||||
" Reason: ~tp",
|
||||
|
|
|
@ -9,6 +9,9 @@
|
|||
%% several others virtual hosts-related modules.
|
||||
-module(rabbit_vhosts).
|
||||
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
|
||||
-define(PERSISTENT_TERM_COUNTER_KEY, rabbit_vhosts_reconciliation_run_counter).
|
||||
|
||||
%% API
|
||||
|
@ -63,11 +66,11 @@ reconcile() ->
|
|||
%% See start_processes_for_all/1.
|
||||
-spec reconcile_once() -> 'ok'.
|
||||
reconcile_once() ->
|
||||
rabbit_log:debug("Will reconcile virtual host processes on all cluster members..."),
|
||||
?LOG_DEBUG("Will reconcile virtual host processes on all cluster members..."),
|
||||
_ = start_processes_for_all(),
|
||||
_ = increment_run_counter(),
|
||||
N = get_run_counter(),
|
||||
rabbit_log:debug("Done with virtual host processes reconciliation (run ~tp)", [N]),
|
||||
?LOG_DEBUG("Done with virtual host processes reconciliation (run ~tp)", [N]),
|
||||
ok.
|
||||
|
||||
-spec on_node_up(Node :: node()) -> 'ok'.
|
||||
|
@ -77,7 +80,7 @@ on_node_up(_Node) ->
|
|||
true ->
|
||||
DelayInSeconds = 10,
|
||||
Delay = DelayInSeconds * 1000,
|
||||
rabbit_log:debug("Will reschedule virtual host process reconciliation after ~b seconds", [DelayInSeconds]),
|
||||
?LOG_DEBUG("Will reschedule virtual host process reconciliation after ~b seconds", [DelayInSeconds]),
|
||||
_ = timer:apply_after(Delay, ?MODULE, reconcile_once, []),
|
||||
ok
|
||||
end.
|
||||
|
@ -111,13 +114,13 @@ reconciliation_interval() ->
|
|||
start_processes_for_all(Nodes) ->
|
||||
Names = list_names(),
|
||||
N = length(Names),
|
||||
rabbit_log:debug("Will make sure that processes of ~p virtual hosts are running on all reachable cluster nodes", [N]),
|
||||
?LOG_DEBUG("Will make sure that processes of ~p virtual hosts are running on all reachable cluster nodes", [N]),
|
||||
[begin
|
||||
try
|
||||
start_on_all_nodes(VH, Nodes)
|
||||
catch
|
||||
_:Err:_Stacktrace ->
|
||||
rabbit_log:error("Could not reconcile virtual host ~ts: ~tp", [VH, Err])
|
||||
?LOG_ERROR("Could not reconcile virtual host ~ts: ~tp", [VH, Err])
|
||||
end
|
||||
end || VH <- Names],
|
||||
ok.
|
||||
|
@ -153,14 +156,14 @@ maybe_start_timer(FunName) ->
|
|||
case N >= 10 of
|
||||
true ->
|
||||
%% Stop after ten runs
|
||||
rabbit_log:debug("Will stop virtual host process reconciliation after ~tp runs", [N]),
|
||||
?LOG_DEBUG("Will stop virtual host process reconciliation after ~tp runs", [N]),
|
||||
ok;
|
||||
false ->
|
||||
case is_reconciliation_enabled() of
|
||||
false -> ok;
|
||||
true ->
|
||||
Delay = DelayInSeconds * 1000,
|
||||
rabbit_log:debug("Will reschedule virtual host process reconciliation after ~b seconds", [DelayInSeconds]),
|
||||
?LOG_DEBUG("Will reschedule virtual host process reconciliation after ~b seconds", [DelayInSeconds]),
|
||||
timer:apply_after(Delay, ?MODULE, FunName, [])
|
||||
end
|
||||
end.
|
||||
|
|
|
@ -55,6 +55,7 @@
|
|||
proc_file = undefined}).
|
||||
|
||||
-include("include/rabbit_memory.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
%%----------------------------------------------------------------------------
|
||||
|
||||
|
@ -89,7 +90,7 @@ get_total_memory() ->
|
|||
{ok, ParsedTotal} ->
|
||||
ParsedTotal;
|
||||
{error, parse_error} ->
|
||||
rabbit_log:warning(
|
||||
?LOG_WARNING(
|
||||
"The override value for the total memmory available is "
|
||||
"not a valid value: ~tp, getting total from the system.",
|
||||
[Value]),
|
||||
|
@ -163,7 +164,7 @@ get_memory_calculation_strategy() ->
|
|||
legacy -> erlang; %% backwards compatibility
|
||||
rss -> rss;
|
||||
UnsupportedValue ->
|
||||
rabbit_log:warning(
|
||||
?LOG_WARNING(
|
||||
"Unsupported value '~tp' for vm_memory_calculation_strategy. "
|
||||
"Supported values: (allocated|erlang|legacy|rss). "
|
||||
"Defaulting to 'rss'",
|
||||
|
@ -252,7 +253,7 @@ get_cached_process_memory_and_limit() ->
|
|||
try
|
||||
gen_server:call(?MODULE, get_cached_process_memory_and_limit, infinity)
|
||||
catch exit:{noproc, Error} ->
|
||||
rabbit_log:warning("Memory monitor process not yet started: ~tp", [Error]),
|
||||
?LOG_WARNING("Memory monitor process not yet started: ~tp", [Error]),
|
||||
ProcessMemory = get_process_memory_uncached(),
|
||||
{ProcessMemory, infinity}
|
||||
end.
|
||||
|
@ -306,7 +307,7 @@ get_total_memory_from_os() ->
|
|||
try
|
||||
get_total_memory(os:type())
|
||||
catch _:Error:Stacktrace ->
|
||||
rabbit_log:warning(
|
||||
?LOG_WARNING(
|
||||
"Failed to get total system memory: ~n~tp~n~tp",
|
||||
[Error, Stacktrace]),
|
||||
unknown
|
||||
|
@ -317,7 +318,7 @@ set_mem_limits(State, {relative, MemLimit}) ->
|
|||
set_mem_limits(State, MemLimit) ->
|
||||
case erlang:system_info(wordsize) of
|
||||
4 ->
|
||||
rabbit_log:warning(
|
||||
?LOG_WARNING(
|
||||
"You are using a 32-bit version of Erlang: you may run into "
|
||||
"memory address~n"
|
||||
"space exhaustion or statistic counters overflow.~n");
|
||||
|
@ -330,7 +331,7 @@ set_mem_limits(State, MemLimit) ->
|
|||
case State of
|
||||
#state { total_memory = undefined,
|
||||
memory_limit = undefined } ->
|
||||
rabbit_log:warning(
|
||||
?LOG_WARNING(
|
||||
"Unknown total memory size for your OS ~tp. "
|
||||
"Assuming memory size is ~tp MiB (~tp bytes).",
|
||||
[os:type(),
|
||||
|
@ -345,7 +346,7 @@ set_mem_limits(State, MemLimit) ->
|
|||
UsableMemory =
|
||||
case get_vm_limit() of
|
||||
Limit when Limit < TotalMemory ->
|
||||
rabbit_log:warning(
|
||||
?LOG_WARNING(
|
||||
"Only ~tp MiB (~tp bytes) of ~tp MiB (~tp bytes) memory usable due to "
|
||||
"limited address space.~n"
|
||||
"Crashes due to memory exhaustion are possible - see~n"
|
||||
|
@ -357,7 +358,7 @@ set_mem_limits(State, MemLimit) ->
|
|||
TotalMemory
|
||||
end,
|
||||
MemLim = interpret_limit(parse_mem_limit(MemLimit), UsableMemory),
|
||||
rabbit_log:info(
|
||||
?LOG_INFO(
|
||||
"Memory high watermark set to ~tp MiB (~tp bytes)"
|
||||
" of ~tp MiB (~tp bytes) total",
|
||||
[trunc(MemLim/?ONE_MiB), MemLim,
|
||||
|
@ -381,7 +382,7 @@ parse_mem_limit({absolute, Limit}) ->
|
|||
case rabbit_resource_monitor_misc:parse_information_unit(Limit) of
|
||||
{ok, ParsedLimit} -> {absolute, ParsedLimit};
|
||||
{error, parse_error} ->
|
||||
rabbit_log:error("Unable to parse vm_memory_high_watermark value ~tp", [Limit]),
|
||||
?LOG_ERROR("Unable to parse vm_memory_high_watermark value ~tp", [Limit]),
|
||||
?DEFAULT_VM_MEMORY_HIGH_WATERMARK
|
||||
end;
|
||||
parse_mem_limit({relative, MemLimit}) ->
|
||||
|
@ -391,13 +392,13 @@ parse_mem_limit(MemLimit) when is_integer(MemLimit) ->
|
|||
parse_mem_limit(MemLimit) when is_float(MemLimit), MemLimit =< ?MAX_VM_MEMORY_HIGH_WATERMARK ->
|
||||
MemLimit;
|
||||
parse_mem_limit(MemLimit) when is_float(MemLimit), MemLimit > ?MAX_VM_MEMORY_HIGH_WATERMARK ->
|
||||
rabbit_log:warning(
|
||||
?LOG_WARNING(
|
||||
"Memory high watermark of ~tp is above the allowed maximum, falling back to ~tp",
|
||||
[MemLimit, ?MAX_VM_MEMORY_HIGH_WATERMARK]
|
||||
),
|
||||
?MAX_VM_MEMORY_HIGH_WATERMARK;
|
||||
parse_mem_limit(MemLimit) ->
|
||||
rabbit_log:warning(
|
||||
?LOG_WARNING(
|
||||
"Memory high watermark of ~tp is invalid, defaulting to ~tp",
|
||||
[MemLimit, ?DEFAULT_VM_MEMORY_HIGH_WATERMARK]
|
||||
),
|
||||
|
@ -419,7 +420,7 @@ internal_update(State0 = #state{memory_limit = MemLimit,
|
|||
State1#state{alarmed = NewAlarmed}.
|
||||
|
||||
emit_update_info(AlarmState, MemUsed, MemLimit) ->
|
||||
rabbit_log:info(
|
||||
?LOG_INFO(
|
||||
"vm_memory_high_watermark ~tp. Memory used:~tp allowed:~tp",
|
||||
[AlarmState, MemUsed, MemLimit]).
|
||||
|
||||
|
@ -458,7 +459,7 @@ cmd(Command, ThrowIfMissing) ->
|
|||
end.
|
||||
|
||||
default_linux_pagesize(CmdOutput) ->
|
||||
rabbit_log:warning(
|
||||
?LOG_WARNING(
|
||||
"Failed to get memory page size, using 4096. Reason: ~ts",
|
||||
[CmdOutput]),
|
||||
4096.
|
||||
|
@ -583,7 +584,7 @@ sysctl(Def) ->
|
|||
list_to_integer(R)
|
||||
catch
|
||||
error:badarg ->
|
||||
rabbit_log:debug("Failed to get total system memory: ~tp", [R]),
|
||||
?LOG_DEBUG("Failed to get total system memory: ~tp", [R]),
|
||||
unknown
|
||||
end.
|
||||
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
-include_lib("kernel/include/file.hrl").
|
||||
-include_lib("amqp_client/include/amqp_client.hrl").
|
||||
-include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-compile(export_all).
|
||||
|
||||
|
@ -227,7 +228,7 @@ non_empty_files(Files) ->
|
|||
end || EmptyFile <- empty_files(Files)].
|
||||
|
||||
test_logs_working(LogFiles) ->
|
||||
ok = rabbit_log:error("Log a test message"),
|
||||
ok = ?LOG_ERROR("Log a test message"),
|
||||
%% give the error loggers some time to catch up
|
||||
?awaitMatch(true,
|
||||
lists:all(fun(LogFile) -> [true] =:= non_empty_files([LogFile]) end, LogFiles),
|
||||
|
|
|
@ -534,7 +534,7 @@ shortstr_size(S) ->
|
|||
|
||||
for (c,v,cls) in spec.constants: genLookupException(c,v,cls)
|
||||
print("lookup_amqp_exception(Code) ->")
|
||||
print(" rabbit_log:warning(\"Unknown AMQP error code '~p'~n\", [Code]),")
|
||||
print(" ?LOG_WARNING(\"Unknown AMQP error code '~p'~n\", [Code]),")
|
||||
print(" {true, ?INTERNAL_ERROR, <<\"INTERNAL_ERROR\">>}.")
|
||||
|
||||
for(c,v,cls) in spec.constants: genAmqpException(c,v,cls)
|
||||
|
|
|
@ -7,6 +7,9 @@
|
|||
|
||||
-module(app_utils).
|
||||
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
|
||||
-export([load_applications/1,
|
||||
start_applications/1, start_applications/2, start_applications/3,
|
||||
stop_applications/1, stop_applications/2, app_dependency_order/2,
|
||||
|
@ -61,7 +64,7 @@ start_applications(Apps, ErrorHandler, RestartTypes) ->
|
|||
stop_applications(Apps, ErrorHandler) ->
|
||||
manage_applications(fun lists:foldr/3,
|
||||
fun(App) ->
|
||||
rabbit_log:info("Stopping application '~ts'", [App]),
|
||||
?LOG_INFO("Stopping application '~ts'", [App]),
|
||||
application:stop(App)
|
||||
end,
|
||||
fun(App) -> ensure_all_started(App, #{}) end,
|
||||
|
|
|
@ -7,6 +7,9 @@
|
|||
|
||||
-module(rabbit_amqp_connection).
|
||||
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
|
||||
-export([amqp_params/2]).
|
||||
|
||||
-spec amqp_params(pid(), timeout()) -> [{atom(), term()}].
|
||||
|
@ -14,11 +17,11 @@ amqp_params(ConnPid, Timeout) ->
|
|||
P = try
|
||||
gen_server:call(ConnPid, {info, [amqp_params]}, Timeout)
|
||||
catch exit:{noproc, Error} ->
|
||||
rabbit_log:debug("file ~tp, line ~tp - connection process ~tp not alive: ~tp",
|
||||
?LOG_DEBUG("file ~tp, line ~tp - connection process ~tp not alive: ~tp",
|
||||
[?FILE, ?LINE, ConnPid, Error]),
|
||||
[];
|
||||
_:Error ->
|
||||
rabbit_log:debug("file ~tp, line ~tp - failed to get amqp_params from connection process ~tp: ~tp",
|
||||
?LOG_DEBUG("file ~tp, line ~tp - failed to get amqp_params from connection process ~tp: ~tp",
|
||||
[?FILE, ?LINE, ConnPid, Error]),
|
||||
[]
|
||||
end,
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
-module(rabbit_binary_generator).
|
||||
-include("rabbit_framing.hrl").
|
||||
-include("rabbit.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-export([build_simple_method_frame/3,
|
||||
build_simple_content_frames/4,
|
||||
|
@ -223,7 +224,7 @@ lookup_amqp_exception(#amqp_error{name = Name,
|
|||
ExplBin = amqp_exception_explanation(Text, Expl),
|
||||
{ShouldClose, Code, ExplBin, Method};
|
||||
lookup_amqp_exception(Other, Protocol) ->
|
||||
rabbit_log:warning("Non-AMQP exit reason '~tp'", [Other]),
|
||||
?LOG_WARNING("Non-AMQP exit reason '~tp'", [Other]),
|
||||
{ShouldClose, Code, Text} = Protocol:lookup_amqp_exception(internal_error),
|
||||
{ShouldClose, Code, Text, none}.
|
||||
|
||||
|
|
|
@ -1724,7 +1724,7 @@ collect_conf_env_file_output(Context, Port, Marker, Output) ->
|
|||
collect_conf_env_file_output(
|
||||
Context, Port, Marker, [Output, UnicodeChunk]);
|
||||
{Port, {data, Chunk}} ->
|
||||
rabbit_log:warning("~tp unexpected non-binary chunk in "
|
||||
?LOG_WARNING("~tp unexpected non-binary chunk in "
|
||||
"conf env file output: ~tp~n", [?MODULE, Chunk])
|
||||
end.
|
||||
|
||||
|
@ -2157,5 +2157,5 @@ unicode_characters_to_list(Input) ->
|
|||
end.
|
||||
|
||||
log_characters_to_list_error(Input, Partial, Rest) ->
|
||||
rabbit_log:error("error converting '~tp' to unicode string "
|
||||
?LOG_ERROR("error converting '~tp' to unicode string "
|
||||
"(partial '~tp', rest '~tp')", [Input, Partial, Rest]).
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
%%
|
||||
-module(rabbit_framing_amqp_0_8).
|
||||
-include("rabbit_framing.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-export([version/0]).
|
||||
-export([lookup_method_name/1]).
|
||||
|
@ -1626,7 +1627,7 @@ lookup_amqp_exception(not_allowed) -> {true, ?NOT_ALLOWED, <<"NOT_ALLOWED">>};
|
|||
lookup_amqp_exception(not_implemented) -> {true, ?NOT_IMPLEMENTED, <<"NOT_IMPLEMENTED">>};
|
||||
lookup_amqp_exception(internal_error) -> {true, ?INTERNAL_ERROR, <<"INTERNAL_ERROR">>};
|
||||
lookup_amqp_exception(Code) ->
|
||||
rabbit_log:warning("Unknown AMQP error code '~p'~n", [Code]),
|
||||
?LOG_WARNING("Unknown AMQP error code '~p'~n", [Code]),
|
||||
{true, ?INTERNAL_ERROR, <<"INTERNAL_ERROR">>}.
|
||||
amqp_exception(?FRAME_METHOD) -> frame_method;
|
||||
amqp_exception(?FRAME_HEADER) -> frame_header;
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
%%
|
||||
-module(rabbit_framing_amqp_0_9_1).
|
||||
-include("rabbit_framing.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-export([version/0]).
|
||||
-export([lookup_method_name/1]).
|
||||
|
@ -1240,7 +1241,7 @@ lookup_amqp_exception(not_allowed) -> {true, ?NOT_ALLOWED, <<"NOT_ALLOWED">>};
|
|||
lookup_amqp_exception(not_implemented) -> {true, ?NOT_IMPLEMENTED, <<"NOT_IMPLEMENTED">>};
|
||||
lookup_amqp_exception(internal_error) -> {true, ?INTERNAL_ERROR, <<"INTERNAL_ERROR">>};
|
||||
lookup_amqp_exception(Code) ->
|
||||
rabbit_log:warning("Unknown AMQP error code '~p'~n", [Code]),
|
||||
?LOG_WARNING("Unknown AMQP error code '~p'~n", [Code]),
|
||||
{true, ?INTERNAL_ERROR, <<"INTERNAL_ERROR">>}.
|
||||
amqp_exception(?FRAME_METHOD) -> frame_method;
|
||||
amqp_exception(?FRAME_HEADER) -> frame_header;
|
||||
|
|
|
@ -1,118 +0,0 @@
|
|||
%% This Source Code Form is subject to the terms of the Mozilla Public
|
||||
%% License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
%%
|
||||
%% Copyright (c) 2007-2025 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
|
||||
%%
|
||||
|
||||
-module(rabbit_log).
|
||||
|
||||
-export([log/3, log/4]).
|
||||
-export([debug/1, debug/2, debug/3,
|
||||
info/1, info/2, info/3,
|
||||
notice/1, notice/2, notice/3,
|
||||
warning/1, warning/2, warning/3,
|
||||
error/1, error/2, error/3,
|
||||
critical/1, critical/2, critical/3,
|
||||
alert/1, alert/2, alert/3,
|
||||
emergency/1, emergency/2, emergency/3,
|
||||
none/1, none/2, none/3]).
|
||||
|
||||
-include("logging.hrl").
|
||||
|
||||
-compile({no_auto_import, [error/2, error/3]}).
|
||||
|
||||
%%----------------------------------------------------------------------------
|
||||
|
||||
-type category() :: atom().
|
||||
|
||||
-spec debug(string()) -> 'ok'.
|
||||
-spec debug(string(), [any()]) -> 'ok'.
|
||||
-spec debug(pid() | [tuple()], string(), [any()]) -> 'ok'.
|
||||
-spec info(string()) -> 'ok'.
|
||||
-spec info(string(), [any()]) -> 'ok'.
|
||||
-spec info(pid() | [tuple()], string(), [any()]) -> 'ok'.
|
||||
-spec notice(string()) -> 'ok'.
|
||||
-spec notice(string(), [any()]) -> 'ok'.
|
||||
-spec notice(pid() | [tuple()], string(), [any()]) -> 'ok'.
|
||||
-spec warning(string()) -> 'ok'.
|
||||
-spec warning(string(), [any()]) -> 'ok'.
|
||||
-spec warning(pid() | [tuple()], string(), [any()]) -> 'ok'.
|
||||
-spec error(string()) -> 'ok'.
|
||||
-spec error(string(), [any()]) -> 'ok'.
|
||||
-spec error(pid() | [tuple()], string(), [any()]) -> 'ok'.
|
||||
-spec critical(string()) -> 'ok'.
|
||||
-spec critical(string(), [any()]) -> 'ok'.
|
||||
-spec critical(pid() | [tuple()], string(), [any()]) -> 'ok'.
|
||||
-spec alert(string()) -> 'ok'.
|
||||
-spec alert(string(), [any()]) -> 'ok'.
|
||||
-spec alert(pid() | [tuple()], string(), [any()]) -> 'ok'.
|
||||
-spec emergency(string()) -> 'ok'.
|
||||
-spec emergency(string(), [any()]) -> 'ok'.
|
||||
-spec emergency(pid() | [tuple()], string(), [any()]) -> 'ok'.
|
||||
-spec none(string()) -> 'ok'.
|
||||
-spec none(string(), [any()]) -> 'ok'.
|
||||
-spec none(pid() | [tuple()], string(), [any()]) -> 'ok'.
|
||||
|
||||
%%----------------------------------------------------------------------------
|
||||
|
||||
-spec log(category(), logger:level(), string()) -> 'ok'.
|
||||
log(Category, Level, Fmt) -> log(Category, Level, Fmt, []).
|
||||
|
||||
-spec log(category(), logger:level(), string(), [any()]) -> 'ok'.
|
||||
log(default, Level, Fmt, Args) when is_list(Args) ->
|
||||
logger:log(Level, Fmt, Args, #{domain => ?RMQLOG_DOMAIN_GLOBAL});
|
||||
log(Category, Level, Fmt, Args) when is_list(Args) ->
|
||||
logger:log(Level, Fmt, Args, #{domain => ?DEFINE_RMQLOG_DOMAIN(Category)}).
|
||||
|
||||
debug(Format) -> debug(Format, []).
|
||||
debug(Format, Args) -> debug(self(), Format, Args).
|
||||
debug(Pid, Format, Args) ->
|
||||
logger:debug(Format, Args, #{pid => Pid,
|
||||
domain => ?RMQLOG_DOMAIN_GLOBAL}).
|
||||
|
||||
info(Format) -> info(Format, []).
|
||||
info(Format, Args) -> info(self(), Format, Args).
|
||||
info(Pid, Format, Args) ->
|
||||
logger:info(Format, Args, #{pid => Pid,
|
||||
domain => ?RMQLOG_DOMAIN_GLOBAL}).
|
||||
|
||||
notice(Format) -> notice(Format, []).
|
||||
notice(Format, Args) -> notice(self(), Format, Args).
|
||||
notice(Pid, Format, Args) ->
|
||||
logger:notice(Format, Args, #{pid => Pid,
|
||||
domain => ?RMQLOG_DOMAIN_GLOBAL}).
|
||||
|
||||
warning(Format) -> warning(Format, []).
|
||||
warning(Format, Args) -> warning(self(), Format, Args).
|
||||
warning(Pid, Format, Args) ->
|
||||
logger:warning(Format, Args, #{pid => Pid,
|
||||
domain => ?RMQLOG_DOMAIN_GLOBAL}).
|
||||
|
||||
error(Format) -> error(Format, []).
|
||||
error(Format, Args) -> error(self(), Format, Args).
|
||||
error(Pid, Format, Args) ->
|
||||
logger:error(Format, Args, #{pid => Pid,
|
||||
domain => ?RMQLOG_DOMAIN_GLOBAL}).
|
||||
|
||||
critical(Format) -> critical(Format, []).
|
||||
critical(Format, Args) -> critical(self(), Format, Args).
|
||||
critical(Pid, Format, Args) ->
|
||||
logger:critical(Format, Args, #{pid => Pid,
|
||||
domain => ?RMQLOG_DOMAIN_GLOBAL}).
|
||||
|
||||
alert(Format) -> alert(Format, []).
|
||||
alert(Format, Args) -> alert(self(), Format, Args).
|
||||
alert(Pid, Format, Args) ->
|
||||
logger:alert(Format, Args, #{pid => Pid,
|
||||
domain => ?RMQLOG_DOMAIN_GLOBAL}).
|
||||
|
||||
emergency(Format) -> emergency(Format, []).
|
||||
emergency(Format, Args) -> emergency(self(), Format, Args).
|
||||
emergency(Pid, Format, Args) ->
|
||||
logger:emergency(Format, Args, #{pid => Pid,
|
||||
domain => ?RMQLOG_DOMAIN_GLOBAL}).
|
||||
|
||||
none(_Format) -> ok.
|
||||
none(_Format, _Args) -> ok.
|
||||
none(_Pid, _Format, _Args) -> ok.
|
|
@ -13,6 +13,7 @@
|
|||
-include("rabbit_misc.hrl").
|
||||
|
||||
-include_lib("kernel/include/file.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-ifdef(TEST).
|
||||
-export([decompose_pid/1, compose_pid/4]).
|
||||
|
@ -1284,7 +1285,7 @@ safe_ets_update_counter(Tab, Key, UpdateOp) ->
|
|||
try
|
||||
ets:update_counter(Tab, Key, UpdateOp)
|
||||
catch error:badarg:E ->
|
||||
rabbit_log:debug("error updating ets counter ~p in table ~p: ~p", [Key, Tab, E]),
|
||||
?LOG_DEBUG("error updating ets counter ~p in table ~p: ~p", [Key, Tab, E]),
|
||||
ok
|
||||
end.
|
||||
|
||||
|
@ -1354,7 +1355,7 @@ safe_ets_update_counter(Tab, Key, UpdateOp, OnSuccess, OnFailure) ->
|
|||
try
|
||||
OnSuccess(ets:update_counter(Tab, Key, UpdateOp))
|
||||
catch error:badarg:E ->
|
||||
rabbit_log:debug("error updating ets counter ~p in table ~p: ~p", [Key, Tab, E]),
|
||||
?LOG_DEBUG("error updating ets counter ~p in table ~p: ~p", [Key, Tab, E]),
|
||||
OnFailure()
|
||||
end.
|
||||
|
||||
|
@ -1373,7 +1374,7 @@ safe_ets_update_element(Tab, Key, ElementSpec) ->
|
|||
try
|
||||
ets:update_element(Tab, Key, ElementSpec)
|
||||
catch error:badarg:E ->
|
||||
rabbit_log:debug("error updating ets element ~p in table ~p: ~p", [Key, Tab, E]),
|
||||
?LOG_DEBUG("error updating ets element ~p in table ~p: ~p", [Key, Tab, E]),
|
||||
false
|
||||
end.
|
||||
|
||||
|
@ -1410,7 +1411,7 @@ safe_ets_update_element(Tab, Key, ElementSpec, OnSuccess, OnFailure) ->
|
|||
try
|
||||
OnSuccess(ets:update_element(Tab, Key, ElementSpec))
|
||||
catch error:badarg:E ->
|
||||
rabbit_log:debug("error updating ets element ~p in table ~p: ~p", [Key, Tab, E]),
|
||||
?LOG_DEBUG("error updating ets element ~p in table ~p: ~p", [Key, Tab, E]),
|
||||
OnFailure(),
|
||||
false
|
||||
end.
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
-define(ERROR_LOGGER_HANDLER, rabbit_error_logger_handler).
|
||||
|
||||
-include_lib("kernel/include/inet.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
%%
|
||||
%% API
|
||||
|
@ -51,7 +52,7 @@ names(Hostname) ->
|
|||
names(Hostname, 0) ->
|
||||
epmd_names(Hostname);
|
||||
names(Hostname, RetriesLeft) ->
|
||||
rabbit_log:debug("Getting epmd names for hostname '~ts', ~b retries left",
|
||||
?LOG_DEBUG("Getting epmd names for hostname '~ts', ~b retries left",
|
||||
[Hostname, RetriesLeft]),
|
||||
case catch epmd_names(Hostname) of
|
||||
{ok, R } -> {ok, R};
|
||||
|
@ -131,7 +132,7 @@ port_shutdown_loop(Port) ->
|
|||
{Port, closed} -> ok;
|
||||
{Port, {data, _}} -> port_shutdown_loop(Port);
|
||||
{'EXIT', Port, Reason} ->
|
||||
rabbit_log:error("Failed to start a one-off Erlang VM to keep epmd alive: ~tp", [Reason])
|
||||
?LOG_ERROR("Failed to start a one-off Erlang VM to keep epmd alive: ~tp", [Reason])
|
||||
after 15000 ->
|
||||
%% ensure the port is closed
|
||||
Port ! {self(), close},
|
||||
|
|
|
@ -7,6 +7,9 @@
|
|||
|
||||
-module(rabbit_ssl_options).
|
||||
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
|
||||
-export([
|
||||
fix/1,
|
||||
fix_client/1,
|
||||
|
@ -86,7 +89,7 @@ make_verify_fun(Module, Function, InitialUserState) ->
|
|||
Module:module_info()
|
||||
catch
|
||||
_:Exception ->
|
||||
rabbit_log:error("TLS verify_fun: module ~ts missing: ~tp",
|
||||
?LOG_ERROR("TLS verify_fun: module ~ts missing: ~tp",
|
||||
[Module, Exception]),
|
||||
throw({error, {invalid_verify_fun, missing_module}})
|
||||
end,
|
||||
|
@ -109,7 +112,7 @@ make_verify_fun(Module, Function, InitialUserState) ->
|
|||
Module:Function(Args)
|
||||
end;
|
||||
_ ->
|
||||
rabbit_log:error("TLS verify_fun: no ~ts:~ts/3 exported",
|
||||
?LOG_ERROR("TLS verify_fun: no ~ts:~ts/3 exported",
|
||||
[Module, Function]),
|
||||
throw({error, {invalid_verify_fun, function_not_exported}})
|
||||
end.
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
%% When a socket write fails, writer will exit.
|
||||
|
||||
-include("rabbit.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
-export([start/6, start_link/6, start/7, start_link/7, start/8, start_link/8]).
|
||||
|
||||
-export([init/1,
|
||||
|
@ -264,10 +265,10 @@ handle_message(emit_stats, State = #wstate{reader = ReaderPid}) ->
|
|||
handle_message(ok, State) ->
|
||||
State;
|
||||
handle_message({_Ref, ok} = Msg, State) ->
|
||||
rabbit_log:warning("AMQP 0-9-1 channel writer has received a message it does not support: ~p", [Msg]),
|
||||
?LOG_WARNING("AMQP 0-9-1 channel writer has received a message it does not support: ~p", [Msg]),
|
||||
State;
|
||||
handle_message({ok, _Ref} = Msg, State) ->
|
||||
rabbit_log:warning("AMQP 0-9-1 channel writer has received a message it does not support: ~p", [Msg]),
|
||||
?LOG_WARNING("AMQP 0-9-1 channel writer has received a message it does not support: ~p", [Msg]),
|
||||
State;
|
||||
handle_message(Message, _State) ->
|
||||
exit({writer, message_not_understood, Message}).
|
||||
|
|
|
@ -7,6 +7,9 @@
|
|||
|
||||
-module(worker_pool_sup).
|
||||
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
|
||||
-behaviour(supervisor).
|
||||
|
||||
-export([start_link/0, start_link/1, start_link/2]).
|
||||
|
@ -29,11 +32,11 @@ start_link() ->
|
|||
start_link(Size).
|
||||
|
||||
start_link(PoolSize) ->
|
||||
rabbit_log:info("Will use ~tp processes for default worker pool", [PoolSize]),
|
||||
?LOG_INFO("Will use ~tp processes for default worker pool", [PoolSize]),
|
||||
start_link(PoolSize, worker_pool:default_pool()).
|
||||
|
||||
start_link(PoolSize, PoolName) ->
|
||||
rabbit_log:info("Starting worker pool '~tp' with ~tp processes in it", [PoolName, PoolSize]),
|
||||
?LOG_INFO("Starting worker pool '~tp' with ~tp processes in it", [PoolName, PoolSize]),
|
||||
SupName = list_to_atom(atom_to_list(PoolName) ++ "_sup"),
|
||||
supervisor:start_link({local, SupName}, ?MODULE, [PoolSize, PoolName]).
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
|
||||
-module(rabbit_auth_backend_cache).
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-behaviour(rabbit_authn_backend).
|
||||
-behaviour(rabbit_authz_backend).
|
||||
|
@ -68,13 +69,13 @@ expiry_timestamp(_) -> never.
|
|||
|
||||
clear_cache_cluster_wide() ->
|
||||
Nodes = rabbit_nodes:list_running(),
|
||||
rabbit_log:warning("Clearing auth_backend_cache in all nodes : ~p", [Nodes]),
|
||||
?LOG_WARNING("Clearing auth_backend_cache in all nodes : ~p", [Nodes]),
|
||||
rabbit_misc:append_rpc_all_nodes(Nodes, ?MODULE, clear_cache, []).
|
||||
|
||||
clear_cache() ->
|
||||
{ok, AuthCache} = application:get_env(rabbitmq_auth_backend_cache,
|
||||
cache_module),
|
||||
rabbit_log:warning("Clearing auth_backend_cache"),
|
||||
?LOG_WARNING("Clearing auth_backend_cache"),
|
||||
AuthCache:clear().
|
||||
|
||||
with_cache(BackendType, {F, A}, Fun) ->
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
-module(rabbit_auth_backend_http).
|
||||
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-behaviour(rabbit_authn_backend).
|
||||
-behaviour(rabbit_authz_backend).
|
||||
|
@ -180,10 +181,10 @@ do_http_req(Path0, Query) ->
|
|||
Request = case rabbit_data_coercion:to_atom(Method) of
|
||||
get ->
|
||||
Path = Path0 ++ "?" ++ Query,
|
||||
rabbit_log:debug("auth_backend_http: GET ~ts", [Path]),
|
||||
?LOG_DEBUG("auth_backend_http: GET ~ts", [Path]),
|
||||
{Path, [{"Host", HostHdr}]};
|
||||
post ->
|
||||
rabbit_log:debug("auth_backend_http: POST ~ts", [Path0]),
|
||||
?LOG_DEBUG("auth_backend_http: POST ~ts", [Path0]),
|
||||
{Path0, [{"Host", HostHdr}], "application/x-www-form-urlencoded", Query}
|
||||
end,
|
||||
RequestTimeout =
|
||||
|
@ -196,12 +197,12 @@ do_http_req(Path0, Query) ->
|
|||
{ok, Val2} -> Val2;
|
||||
_ -> RequestTimeout
|
||||
end,
|
||||
rabbit_log:debug("auth_backend_http: request timeout: ~tp, connection timeout: ~tp", [RequestTimeout, ConnectionTimeout]),
|
||||
?LOG_DEBUG("auth_backend_http: request timeout: ~tp, connection timeout: ~tp", [RequestTimeout, ConnectionTimeout]),
|
||||
HttpOpts = [{timeout, RequestTimeout},
|
||||
{connect_timeout, ConnectionTimeout}] ++ ssl_options(),
|
||||
case httpc:request(Method, Request, HttpOpts, []) of
|
||||
{ok, {{_HTTP, Code, _}, _Headers, Body}} ->
|
||||
rabbit_log:debug("auth_backend_http: response code is ~tp, body: ~tp", [Code, Body]),
|
||||
?LOG_DEBUG("auth_backend_http: response code is ~tp, body: ~tp", [Code, Body]),
|
||||
case lists:member(Code, ?SUCCESSFUL_RESPONSE_CODES) of
|
||||
true -> parse_resp(Body);
|
||||
false -> {error, {Code, Body}}
|
||||
|
@ -216,7 +217,7 @@ ssl_options() ->
|
|||
Opts1 = [{ssl, rabbit_ssl_options:fix_client(Opts0)}],
|
||||
case application:get_env(rabbitmq_auth_backend_http, ssl_hostname_verification) of
|
||||
{ok, wildcard} ->
|
||||
rabbit_log:debug("Enabling wildcard-aware hostname verification for HTTP client connections"),
|
||||
?LOG_DEBUG("Enabling wildcard-aware hostname verification for HTTP client connections"),
|
||||
%% Needed for HTTPS connections that connect to servers that use wildcard certificates.
|
||||
%% See https://erlang.org/doc/man/public_key.html#pkix_verify_hostname_match_fun-1.
|
||||
[{customize_hostname_check, [{match_fun, public_key:pkix_verify_hostname_match_fun(https)}]} | Opts1];
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include("oauth2.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-behaviour(rabbit_authn_backend).
|
||||
-behaviour(rabbit_authz_backend).
|
||||
|
@ -63,7 +64,7 @@ description() ->
|
|||
user_login_authentication(Username, AuthProps) ->
|
||||
case authenticate(Username, AuthProps) of
|
||||
{refused, Msg, Args} = AuthResult ->
|
||||
rabbit_log:debug(Msg, Args),
|
||||
?LOG_DEBUG(Msg, Args),
|
||||
AuthResult;
|
||||
_ = AuthResult ->
|
||||
AuthResult
|
||||
|
@ -179,7 +180,7 @@ with_decoded_token(DecodedToken, Fun) ->
|
|||
case validate_token_expiry(DecodedToken) of
|
||||
ok -> Fun(DecodedToken);
|
||||
{error, Msg} = Err ->
|
||||
rabbit_log:error(Msg),
|
||||
?LOG_ERROR(Msg),
|
||||
Err
|
||||
end.
|
||||
|
||||
|
@ -418,7 +419,7 @@ username_from(PreferredUsernameClaims, DecodedToken) ->
|
|||
[ _One ] -> _One;
|
||||
[ _One | _ ] -> _One
|
||||
end,
|
||||
rabbit_log:debug("Computing username from client's JWT token: ~ts -> ~ts ",
|
||||
?LOG_DEBUG("Computing username from client's JWT token: ~ts -> ~ts ",
|
||||
[lists:flatten(io_lib:format("~p",[ResolvedUsernameClaims])), Username]),
|
||||
Username.
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
-module(rabbit_oauth2_provider).
|
||||
|
||||
-include("oauth2.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-export([
|
||||
get_internal_oauth_provider/0, get_internal_oauth_provider/1,
|
||||
|
@ -101,7 +102,7 @@ do_replace_signing_keys(SigningKeys, root) ->
|
|||
proplists:get_value(signing_keys, KeyConfig1, #{}),
|
||||
SigningKeys)} | KeyConfig1],
|
||||
set_env(key_config, KeyConfig2),
|
||||
rabbit_log:debug("Replacing signing keys for key_config with ~p keys",
|
||||
?LOG_DEBUG("Replacing signing keys for key_config with ~p keys",
|
||||
[maps:size(SigningKeys)]),
|
||||
SigningKeys;
|
||||
|
||||
|
@ -115,7 +116,7 @@ do_replace_signing_keys(SigningKeys, OauthProviderId) ->
|
|||
|
||||
OauthProviders = maps:put(OauthProviderId, OauthProvider, OauthProviders0),
|
||||
set_env(oauth_providers, OauthProviders),
|
||||
rabbit_log:debug("Replacing signing keys for ~p -> ~p with ~p keys",
|
||||
?LOG_DEBUG("Replacing signing keys for ~p -> ~p with ~p keys",
|
||||
[OauthProviderId, OauthProvider, maps:size(SigningKeys)]),
|
||||
SigningKeys.
|
||||
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
|
||||
-include("oauth2.hrl").
|
||||
-include_lib("jose/include/jose_jwk.hrl").
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
-import(rabbit_data_coercion, [
|
||||
to_map/1]).
|
||||
|
@ -44,7 +45,7 @@ add_signing_key(KeyId, Type, Value) ->
|
|||
-spec update_jwks_signing_keys(oauth_provider()) -> ok | {error, term()}.
|
||||
update_jwks_signing_keys(#oauth_provider{id = Id, jwks_uri = JwksUrl,
|
||||
ssl_options = SslOptions}) ->
|
||||
rabbit_log:debug("Downloading signing keys from ~tp (TLS options: ~p)",
|
||||
?LOG_DEBUG("Downloading signing keys from ~tp (TLS options: ~p)",
|
||||
[JwksUrl, format_ssl_options(SslOptions)]),
|
||||
case uaa_jwks:get(JwksUrl, SslOptions) of
|
||||
{ok, {_, _, JwksBody}} ->
|
||||
|
@ -52,13 +53,13 @@ update_jwks_signing_keys(#oauth_provider{id = Id, jwks_uri = JwksUrl,
|
|||
jose:decode(erlang:iolist_to_binary(JwksBody)), []),
|
||||
Keys = maps:from_list(lists:map(fun(Key) ->
|
||||
{maps:get(<<"kid">>, Key, undefined), {json, Key}} end, KeyList)),
|
||||
rabbit_log:debug("Downloaded ~p signing keys", [maps:size(Keys)]),
|
||||
?LOG_DEBUG("Downloaded ~p signing keys", [maps:size(Keys)]),
|
||||
case replace_signing_keys(Keys, Id) of
|
||||
{error, _} = Err -> Err;
|
||||
_ -> ok
|
||||
end;
|
||||
{error, _} = Err ->
|
||||
rabbit_log:error("Failed to download signing keys: ~tp", [Err]),
|
||||
?LOG_ERROR("Failed to download signing keys: ~tp", [Err]),
|
||||
Err
|
||||
end.
|
||||
|
||||
|
@ -66,7 +67,7 @@ update_jwks_signing_keys(#oauth_provider{id = Id, jwks_uri = JwksUrl,
|
|||
-> {boolean(), map()} | {error, term()}.
|
||||
decode_and_verify(Token, ResourceServer, InternalOAuthProvider) ->
|
||||
OAuthProviderId = InternalOAuthProvider#internal_oauth_provider.id,
|
||||
rabbit_log:debug("Decoding token for resource_server: ~p using oauth_provider_id: ~p",
|
||||
?LOG_DEBUG("Decoding token for resource_server: ~p using oauth_provider_id: ~p",
|
||||
[ResourceServer#resource_server.id,
|
||||
format_oauth_provider_id(OAuthProviderId)]),
|
||||
Result = case uaa_jwt_jwt:get_key_id(Token) of
|
||||
|
@ -81,7 +82,7 @@ decode_and_verify(Token, ResourceServer, InternalOAuthProvider) ->
|
|||
case get_jwk(KeyId, InternalOAuthProvider) of
|
||||
{ok, JWK} ->
|
||||
Algorithms = InternalOAuthProvider#internal_oauth_provider.algorithms,
|
||||
rabbit_log:debug("Verifying signature using signing_key_id : '~tp' and algorithms: ~p",
|
||||
?LOG_DEBUG("Verifying signature using signing_key_id : '~tp' and algorithms: ~p",
|
||||
[KeyId, Algorithms]),
|
||||
uaa_jwt_jwt:decode_and_verify(Algorithms, JWK, Token);
|
||||
{error, _} = Err3 ->
|
||||
|
@ -118,7 +119,7 @@ get_jwk(KeyId, InternalOAuthProvider, AllowUpdateJwks) ->
|
|||
undefined ->
|
||||
case AllowUpdateJwks of
|
||||
true ->
|
||||
rabbit_log:debug("Signing key '~tp' not found. Downloading it... ", [KeyId]),
|
||||
?LOG_DEBUG("Signing key '~tp' not found. Downloading it... ", [KeyId]),
|
||||
case get_oauth_provider(OAuthProviderId, [jwks_uri]) of
|
||||
{ok, OAuthProvider} ->
|
||||
case update_jwks_signing_keys(OAuthProvider) of
|
||||
|
@ -130,15 +131,15 @@ get_jwk(KeyId, InternalOAuthProvider, AllowUpdateJwks) ->
|
|||
Err
|
||||
end;
|
||||
{error, _} = Error ->
|
||||
rabbit_log:debug("Unable to download signing keys due to ~p", [Error]),
|
||||
?LOG_DEBUG("Unable to download signing keys due to ~p", [Error]),
|
||||
Error
|
||||
end;
|
||||
false ->
|
||||
rabbit_log:debug("Signing key '~tp' not found. Downloading is not allowed", [KeyId]),
|
||||
?LOG_DEBUG("Signing key '~tp' not found. Downloading is not allowed", [KeyId]),
|
||||
{error, key_not_found}
|
||||
end;
|
||||
{Type, Value} ->
|
||||
rabbit_log:debug("Signing key ~p found", [KeyId]),
|
||||
?LOG_DEBUG("Signing key ~p found", [KeyId]),
|
||||
case Type of
|
||||
json -> uaa_jwt_jwk:make_jwk(Value);
|
||||
pem -> uaa_jwt_jwk:from_pem(Value);
|
||||
|
|
|
@ -7,6 +7,9 @@
|
|||
|
||||
-module(wildcard).
|
||||
|
||||
-include_lib("kernel/include/logger.hrl").
|
||||
|
||||
|
||||
-export([match/2]).
|
||||
|
||||
-spec match(Subject :: binary(), Pattern :: binary()) -> boolean().
|
||||
|
@ -52,7 +55,7 @@ parse_pattern(Pattern) ->
|
|||
Parts = binary:split(Pattern, <<"*">>, [global]),
|
||||
try lists:map(fun(Part) -> cow_qs:urldecode(Part) end, Parts)
|
||||
catch Type:Error ->
|
||||
rabbit_log:warning("Invalid pattern ~tp : ~tp",
|
||||
?LOG_WARNING("Invalid pattern ~tp : ~tp",
|
||||
[Pattern, {Type, Error}]),
|
||||
invalid
|
||||
end.
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue