Merge pull request #14232 from rabbitmq/consistent-logging
Peer Discovery AWS Integration Test / Integration Test (push) Waiting to run Details
Test (make) / Build and Xref (1.18, 26) (push) Waiting to run Details
Test (make) / Build and Xref (1.18, 27) (push) Waiting to run Details
Test (make) / Build and Xref (1.18, 28) (push) Waiting to run Details
Test (make) / Test (1.18, 28, khepri) (push) Waiting to run Details
Test (make) / Test (1.18, 28, mnesia) (push) Waiting to run Details
Test (make) / Test mixed clusters (1.18, 28, khepri) (push) Waiting to run Details
Test (make) / Test mixed clusters (1.18, 28, mnesia) (push) Waiting to run Details
Test (make) / Type check (1.18, 28) (push) Waiting to run Details
Trigger a 4.2.x alpha release build / trigger_alpha_build (push) Has been cancelled Details
Test Authentication/Authorization backends via mutiple messaging protocols / selenium (chrome, 1.17.3, 27.3) (push) Has been cancelled Details
Test Management UI with Selenium / selenium (chrome, 1.17.3, 27.3) (push) Has been cancelled Details
Test Authentication/Authorization backends via mutiple messaging protocols / summary-selenium (push) Has been cancelled Details

Consistent logging
This commit is contained in:
Michal Kuratczyk 2025-07-18 10:58:40 +02:00 committed by GitHub
commit febb58003d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
217 changed files with 1805 additions and 2417 deletions

View File

@ -12,6 +12,7 @@
-include("amqp10_client_internal.hrl").
-include_lib("amqp10_common/include/amqp10_framing.hrl").
-include_lib("amqp10_common/include/amqp10_types.hrl").
-include_lib("kernel/include/logger.hrl").
%% public API
-export([open/1,
@ -247,8 +248,8 @@ hdr_sent(_EvtType, {protocol_header_received, 0, 1, 0, 0}, State) ->
end;
hdr_sent(_EvtType, {protocol_header_received, Protocol, Maj, Min,
Rev}, State) ->
logger:warning("Unsupported protocol version: ~b ~b.~b.~b",
[Protocol, Maj, Min, Rev]),
?LOG_WARNING("Unsupported protocol version: ~b ~b.~b.~b",
[Protocol, Maj, Min, Rev]),
{stop, normal, State};
hdr_sent({call, From}, begin_session,
#state{pending_session_reqs = PendingSessionReqs} = State) ->
@ -342,8 +343,8 @@ opened(info, {'DOWN', MRef, process, _, _Info},
ok = notify_closed(Config, shutdown),
{stop, normal};
opened(_EvtType, Frame, State) ->
logger:warning("Unexpected connection frame ~tp when in state ~tp ",
[Frame, State]),
?LOG_WARNING("Unexpected connection frame ~tp when in state ~tp ",
[Frame, State]),
keep_state_and_data.
close_sent(_EvtType, heartbeat, _Data) ->

View File

@ -10,6 +10,7 @@
-include("amqp10_client_internal.hrl").
-include_lib("amqp10_common/include/amqp10_framing.hrl").
-include_lib("kernel/include/logger.hrl").
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
@ -141,33 +142,33 @@ handle_event(info, {gun_ws, WsPid, StreamRef, WsFrame}, StateName,
{binary, Bin} ->
handle_socket_input(Bin, StateName, State);
close ->
logger:info("peer closed AMQP over WebSocket connection in state '~s'",
[StateName]),
?LOG_INFO("peer closed AMQP over WebSocket connection in state '~s'",
[StateName]),
{stop, normal, socket_closed(State)};
{close, ReasonStatusCode, ReasonUtf8} ->
logger:info("peer closed AMQP over WebSocket connection in state '~s', reason: ~b ~ts",
[StateName, ReasonStatusCode, ReasonUtf8]),
?LOG_INFO("peer closed AMQP over WebSocket connection in state '~s', reason: ~b ~ts",
[StateName, ReasonStatusCode, ReasonUtf8]),
{stop, {shutdown, {ReasonStatusCode, ReasonUtf8}}, socket_closed(State)}
end;
handle_event(info, {TcpError, _Sock, Reason}, StateName, State)
when TcpError == tcp_error orelse TcpError == ssl_error ->
logger:warning("AMQP 1.0 connection socket errored, connection state: '~ts', reason: '~tp'",
[StateName, Reason]),
?LOG_WARNING("AMQP 1.0 connection socket errored, connection state: '~ts', reason: '~tp'",
[StateName, Reason]),
{stop, {error, Reason}, socket_closed(State)};
handle_event(info, {TcpClosed, _}, StateName, State)
when TcpClosed == tcp_closed orelse TcpClosed == ssl_closed ->
logger:info("AMQP 1.0 connection socket was closed, connection state: '~ts'",
?LOG_INFO("AMQP 1.0 connection socket was closed, connection state: '~ts'",
[StateName]),
{stop, normal, socket_closed(State)};
handle_event(info, {gun_down, WsPid, _Proto, Reason, _Streams}, StateName,
#state{socket = {ws, WsPid, _StreamRef}} = State) ->
logger:warning("AMQP over WebSocket process ~p lost connection in state: '~s': ~p",
[WsPid, StateName, Reason]),
?LOG_WARNING("AMQP over WebSocket process ~p lost connection in state: '~s': ~p",
[WsPid, StateName, Reason]),
{stop, Reason, socket_closed(State)};
handle_event(info, {'DOWN', _Mref, process, WsPid, Reason}, StateName,
#state{socket = {ws, WsPid, _StreamRef}} = State) ->
logger:warning("AMQP over WebSocket process ~p terminated in state: '~s': ~p",
[WsPid, StateName, Reason]),
?LOG_WARNING("AMQP over WebSocket process ~p terminated in state: '~s': ~p",
[WsPid, StateName, Reason]),
{stop, Reason, socket_closed(State)};
handle_event(info, heartbeat, _StateName, #state{connection = Connection}) ->

View File

@ -12,7 +12,7 @@
% -define(debug, true).
-ifdef(debug).
-define(DBG(F, A), error_logger:info_msg(F, A)).
-define(DBG(F, A), ?LOG_INFO(F, A)).
-else.
-define(DBG(F, A), ok).
-endif.

View File

@ -12,6 +12,7 @@
-include("amqp10_client_internal.hrl").
-include_lib("amqp10_common/include/amqp10_framing.hrl").
-include_lib("amqp10_common/include/amqp10_types.hrl").
-include_lib("kernel/include/logger.hrl").
%% Public API.
-export(['begin'/1,
@ -434,7 +435,7 @@ mapped(cast, {Transfer0 = #'v1_0.transfer'{handle = {uint, InHandle}},
notify_credit_exhausted(Link3),
{keep_state, State};
{transfer_limit_exceeded, Link3, State} ->
logger:warning("transfer_limit_exceeded for link ~tp", [Link3]),
?LOG_WARNING("transfer_limit_exceeded for link ~tp", [Link3]),
Link = detach_with_error_cond(Link3,
State,
?V_1_0_LINK_ERROR_TRANSFER_LIMIT_EXCEEDED,
@ -446,7 +447,7 @@ mapped(cast, {Transfer0 = #'v1_0.transfer'{handle = {uint, InHandle}},
io_lib:format(
"~s checksum error: expected ~b, actual ~b",
[FooterOpt, Expected, Actual])),
logger:warning("deteaching link ~tp due to ~s", [Link2, Description]),
?LOG_WARNING("deteaching link ~tp due to ~s", [Link2, Description]),
Link = detach_with_error_cond(Link2,
State0,
?V_1_0_AMQP_ERROR_DECODE_ERROR,
@ -485,8 +486,8 @@ mapped(cast, #'v1_0.disposition'{role = true,
{keep_state, State#state{outgoing_unsettled = Unsettled}};
mapped(cast, Frame, State) ->
logger:warning("Unhandled session frame ~tp in state ~tp",
[Frame, State]),
?LOG_WARNING("Unhandled session frame ~tp in state ~tp",
[Frame, State]),
{keep_state, State};
mapped({call, From},
{transfer, _Transfer, _Sections},
@ -566,8 +567,8 @@ mapped({call, From}, Msg, State) ->
{keep_state, State1, {reply, From, Reply}};
mapped(_EvtType, Msg, _State) ->
logger:warning("amqp10_session: unhandled msg in mapped state ~W",
[Msg, 10]),
?LOG_WARNING("amqp10_session: unhandled msg in mapped state ~W",
[Msg, 10]),
keep_state_and_data.
end_sent(_EvtType, #'v1_0.end'{} = End, State) ->
@ -1375,6 +1376,7 @@ format_status(Status = #{data := Data0}) ->
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-include_lib("kernel/include/logger.hrl").
handle_session_flow_test() ->
% see spec section: 2.5.6 for logic

View File

@ -14,11 +14,6 @@
-define(MAX_CHANNEL_NUMBER, 65535).
-define(LOG_DEBUG(Format), error_logger:info_msg(Format)).
-define(LOG_INFO(Format, Args), error_logger:info_msg(Format, Args)).
-define(LOG_WARN(Format, Args), error_logger:warning_msg(Format, Args)).
-define(LOG_ERR(Format, Args), error_logger:error_msg(Format, Args)).
-define(CLIENT_CAPABILITIES,
[{<<"publisher_confirms">>, bool, true},
{<<"exchange_exchange_bindings">>, bool, true},

View File

@ -54,6 +54,7 @@
-module(amqp_channel).
-include("amqp_client_internal.hrl").
-include_lib("kernel/include/logger.hrl").
-behaviour(gen_server).
@ -514,7 +515,7 @@ handle_info({bump_credit, Msg}, State) ->
{noreply, State};
%% @private
handle_info(timed_out_flushing_channel, State) ->
?LOG_WARN("Channel (~tp) closing: timed out flushing while "
?LOG_WARNING("Channel (~tp) closing: timed out flushing while "
"connection closing", [self()]),
{stop, timed_out_flushing_channel, State};
%% @private
@ -523,7 +524,7 @@ handle_info({'DOWN', _, process, ReturnHandler, shutdown},
{noreply, State#state{return_handler = none}};
handle_info({'DOWN', _, process, ReturnHandler, Reason},
State = #state{return_handler = {ReturnHandler, _Ref}}) ->
?LOG_WARN("Channel (~tp): Unregistering return handler ~tp because it died. "
?LOG_WARNING("Channel (~tp): Unregistering return handler ~tp because it died. "
"Reason: ~tp", [self(), ReturnHandler, Reason]),
{noreply, State#state{return_handler = none}};
%% @private
@ -532,7 +533,7 @@ handle_info({'DOWN', _, process, ConfirmHandler, shutdown},
{noreply, State#state{confirm_handler = none}};
handle_info({'DOWN', _, process, ConfirmHandler, Reason},
State = #state{confirm_handler = {ConfirmHandler, _Ref}}) ->
?LOG_WARN("Channel (~tp): Unregistering confirm handler ~tp because it died. "
?LOG_WARNING("Channel (~tp): Unregistering confirm handler ~tp because it died. "
"Reason: ~tp", [self(), ConfirmHandler, Reason]),
{noreply, State#state{confirm_handler = none}};
%% @private
@ -541,7 +542,7 @@ handle_info({'DOWN', _, process, FlowHandler, shutdown},
{noreply, State#state{flow_handler = none}};
handle_info({'DOWN', _, process, FlowHandler, Reason},
State = #state{flow_handler = {FlowHandler, _Ref}}) ->
?LOG_WARN("Channel (~tp): Unregistering flow handler ~tp because it died. "
?LOG_WARNING("Channel (~tp): Unregistering flow handler ~tp because it died. "
"Reason: ~tp", [self(), FlowHandler, Reason]),
{noreply, State#state{flow_handler = none}};
handle_info({'DOWN', _, process, QPid, _Reason}, State) ->
@ -591,13 +592,13 @@ handle_method_to_server(Method, AmqpMsg, From, Sender, Flow,
{noreply, rpc_top_half(Method, build_content(AmqpMsg),
From, Sender, Flow, State1)};
{ok, none, BlockReply} ->
?LOG_WARN("Channel (~tp): discarding method ~tp in cast.~n"
?LOG_WARNING("Channel (~tp): discarding method ~tp in cast.~n"
"Reason: ~tp", [self(), Method, BlockReply]),
{noreply, State};
{ok, _, BlockReply} ->
{reply, BlockReply, State};
{{_, InvalidMethodMessage}, none, _} ->
?LOG_WARN("Channel (~tp): ignoring cast of ~tp method. " ++
?LOG_WARNING("Channel (~tp): ignoring cast of ~tp method. " ++
InvalidMethodMessage ++ "", [self(), Method]),
{noreply, State};
{{InvalidMethodReply, _}, _, _} ->
@ -779,9 +780,9 @@ handle_method_from_server1(
#'basic.return'{} = BasicReturn, AmqpMsg,
State = #state{return_handler = ReturnHandler}) ->
_ = case ReturnHandler of
none -> ?LOG_WARN("Channel (~tp): received {~tp, ~tp} but there is "
"no return handler registered",
[self(), BasicReturn, AmqpMsg]);
none -> ?LOG_WARNING("Channel (~tp): received {~tp, ~tp} but there is "
"no return handler registered",
[self(), BasicReturn, AmqpMsg]);
{Pid, _Ref} -> Pid ! {BasicReturn, AmqpMsg}
end,
{noreply, State};
@ -794,7 +795,7 @@ handle_method_from_server1(#'basic.ack'{} = BasicAck, none,
{noreply, update_confirm_set(BasicAck, State)};
handle_method_from_server1(#'basic.nack'{} = BasicNack, none,
#state{confirm_handler = none} = State) ->
?LOG_WARN("Channel (~tp): received ~tp but there is no "
?LOG_WARNING("Channel (~tp): received ~tp but there is no "
"confirm handler registered", [self(), BasicNack]),
{noreply, update_confirm_set(BasicNack, State)};
handle_method_from_server1(#'basic.nack'{} = BasicNack, none,
@ -834,7 +835,7 @@ handle_connection_closing(CloseType, Reason,
handle_channel_exit(Reason = #amqp_error{name = ErrorName, explanation = Expl},
State = #state{connection = Connection, number = Number}) ->
%% Sent by rabbit_channel for hard errors in the direct case
?LOG_ERR("connection ~tp, channel ~tp - error:~n~tp",
?LOG_ERROR("connection ~tp, channel ~tp - error:~n~tp",
[Connection, Number, Reason]),
{true, Code, _} = ?PROTOCOL:lookup_amqp_exception(ErrorName),
ReportedReason = {server_initiated_close, Code, Expl},
@ -930,7 +931,7 @@ server_misbehaved(#amqp_error{} = AmqpError, State = #state{number = Number}) ->
{0, _} ->
handle_shutdown({server_misbehaved, AmqpError}, State);
{_, Close} ->
?LOG_WARN("Channel (~tp) flushing and closing due to soft "
?LOG_WARNING("Channel (~tp) flushing and closing due to soft "
"error caused by the server ~tp", [self(), AmqpError]),
Self = self(),
spawn(fun () -> call(Self, Close) end),

View File

@ -9,6 +9,7 @@
-module(amqp_channels_manager).
-include("amqp_client_internal.hrl").
-include_lib("kernel/include/logger.hrl").
-behaviour(gen_server).

View File

@ -59,6 +59,7 @@
-module(amqp_connection).
-include("amqp_client_internal.hrl").
-include_lib("kernel/include/logger.hrl").
-export([open_channel/1, open_channel/2, open_channel/3, register_blocked_handler/2]).
-export([start/1, start/2, close/1, close/2, close/3, close/4]).
@ -427,7 +428,7 @@ maybe_update_call_timeout(BaseTimeout, CallTimeout)
ok;
maybe_update_call_timeout(BaseTimeout, CallTimeout) ->
EffectiveSafeCallTimeout = amqp_util:safe_call_timeout(BaseTimeout),
?LOG_WARN("AMQP 0-9-1 client call timeout was ~tp ms, is updated to a safe effective "
?LOG_WARNING("AMQP 0-9-1 client call timeout was ~tp ms, is updated to a safe effective "
"value of ~tp ms", [CallTimeout, EffectiveSafeCallTimeout]),
amqp_util:update_call_timeout(EffectiveSafeCallTimeout),
ok.

View File

@ -9,6 +9,7 @@
-module(amqp_direct_connection).
-include("amqp_client_internal.hrl").
-include_lib("kernel/include/logger.hrl").
-behaviour(amqp_gen_connection).

View File

@ -9,6 +9,7 @@
-module(amqp_gen_connection).
-include("amqp_client_internal.hrl").
-include_lib("kernel/include/logger.hrl").
-behaviour(gen_server).
@ -191,8 +192,8 @@ handle_cast(channels_terminated, State) ->
handle_cast({hard_error_in_channel, _Pid, Reason}, State) ->
server_initiated_close(Reason, State);
handle_cast({channel_internal_error, Pid, Reason}, State) ->
?LOG_WARN("Connection (~tp) closing: internal error in channel (~tp): ~tp",
[self(), Pid, Reason]),
?LOG_WARNING("Connection (~tp) closing: internal error in channel (~tp): ~tp",
[self(), Pid, Reason]),
internal_error(Pid, Reason, State);
handle_cast({server_misbehaved, AmqpError}, State) ->
server_misbehaved_close(AmqpError, State);
@ -205,12 +206,12 @@ handle_cast({register_blocked_handler, HandlerPid}, State) ->
%% @private
handle_info({'DOWN', _, process, BlockHandler, Reason},
State = #state{block_handler = {BlockHandler, _Ref}}) ->
?LOG_WARN("Connection (~tp): Unregistering connection.{blocked,unblocked} handler ~tp because it died. "
?LOG_WARNING("Connection (~tp): Unregistering connection.{blocked,unblocked} handler ~tp because it died. "
"Reason: ~tp", [self(), BlockHandler, Reason]),
{noreply, State#state{block_handler = none}};
handle_info({'EXIT', BlockHandler, Reason},
State = #state{block_handler = {BlockHandler, Ref}}) ->
?LOG_WARN("Connection (~tp): Unregistering connection.{blocked,unblocked} handler ~tp because it died. "
?LOG_WARNING("Connection (~tp): Unregistering connection.{blocked,unblocked} handler ~tp because it died. "
"Reason: ~tp", [self(), BlockHandler, Reason]),
erlang:demonitor(Ref, [flush]),
{noreply, State#state{block_handler = none}};
@ -316,14 +317,14 @@ internal_error(Pid, Reason, State) ->
State).
server_initiated_close(Close, State) ->
?LOG_WARN("Connection (~tp) closing: received hard error ~tp "
?LOG_WARNING("Connection (~tp) closing: received hard error ~tp "
"from server", [self(), Close]),
set_closing_state(abrupt, #closing{reason = server_initiated_close,
close = Close}, State).
server_misbehaved_close(AmqpError, State) ->
?LOG_WARN("Connection (~tp) closing: server misbehaved: ~tp",
[self(), AmqpError]),
?LOG_WARNING("Connection (~tp) closing: server misbehaved: ~tp",
[self(), AmqpError]),
{0, Close} = rabbit_binary_generator:map_exception(0, AmqpError, ?PROTOCOL),
set_closing_state(abrupt, #closing{reason = server_misbehaved,
close = Close}, State).

View File

@ -3,6 +3,7 @@
-include("amqp_client_internal.hrl").
-include_lib("public_key/include/public_key.hrl").
-include_lib("kernel/include/logger.hrl").
-export([maybe_enhance_ssl_options/1,
verify_fun/3]).
@ -51,7 +52,7 @@ maybe_add_verify1(Options) ->
% NB: user has explicitly set 'verify'
Options;
_ ->
?LOG_WARN("Connection (~tp): certificate chain verification is not enabled for this TLS connection. "
?LOG_WARNING("Connection (~tp): certificate chain verification is not enabled for this TLS connection. "
"Please see https://rabbitmq.com/ssl.html for more information.", [self()]),
Options
end.

View File

@ -17,13 +17,14 @@
]).
-include("oauth2_client.hrl").
-include_lib("kernel/include/logger.hrl").
-spec get_access_token(oauth_provider(), access_token_request()) ->
{ok, successful_access_token_response()} |
{error, unsuccessful_access_token_response() | any()}.
get_access_token(OAuthProvider, Request) ->
rabbit_log:debug("get_access_token using OAuthProvider:~p and client_id:~p",
[OAuthProvider, Request#access_token_request.client_id]),
?LOG_DEBUG("get_access_token using OAuthProvider:~p and client_id:~p",
[OAuthProvider, Request#access_token_request.client_id]),
URL = OAuthProvider#oauth_provider.token_endpoint,
Header = [],
Type = ?CONTENT_URLENCODED,
@ -96,7 +97,7 @@ drop_trailing_path_separator(Path) when is_list(Path) ->
-spec get_openid_configuration(DiscoveryEndpoint :: uri_string:uri_string(),
ssl:tls_option() | []) -> {ok, openid_configuration()} | {error, term()}.
get_openid_configuration(DiscoverEndpoint, TLSOptions) ->
rabbit_log:debug("get_openid_configuration from ~p (~p)", [DiscoverEndpoint,
?LOG_DEBUG("get_openid_configuration from ~p (~p)", [DiscoverEndpoint,
format_ssl_options(TLSOptions)]),
Options = [],
Response = httpc:request(get, {DiscoverEndpoint, []}, TLSOptions, Options),
@ -219,8 +220,8 @@ do_update_oauth_provider_endpoints_configuration(OAuthProvider) when
undefined -> do_nothing;
JwksUri -> set_env(jwks_uri, JwksUri)
end,
rabbit_log:debug("Updated oauth_provider details: ~p ",
[format_oauth_provider(OAuthProvider)]),
?LOG_DEBUG("Updated oauth_provider details: ~p ",
[format_oauth_provider(OAuthProvider)]),
OAuthProvider;
do_update_oauth_provider_endpoints_configuration(OAuthProvider) ->
@ -230,7 +231,7 @@ do_update_oauth_provider_endpoints_configuration(OAuthProvider) ->
ModifiedOAuthProviders = maps:put(OAuthProviderId,
merge_oauth_provider(OAuthProvider, Proplist), OAuthProviders),
set_env(oauth_providers, ModifiedOAuthProviders),
rabbit_log:debug("Replaced oauth_providers "),
?LOG_DEBUG("Replaced oauth_providers "),
OAuthProvider.
use_global_locks_on_all_nodes() ->
@ -271,8 +272,8 @@ get_oauth_provider(ListOfRequiredAttributes) ->
case get_env(default_oauth_provider) of
undefined -> get_root_oauth_provider(ListOfRequiredAttributes);
DefaultOauthProviderId ->
rabbit_log:debug("Using default_oauth_provider ~p",
[DefaultOauthProviderId]),
?LOG_DEBUG("Using default_oauth_provider ~p",
[DefaultOauthProviderId]),
get_oauth_provider(DefaultOauthProviderId, ListOfRequiredAttributes)
end.
@ -282,7 +283,7 @@ download_oauth_provider(OAuthProvider) ->
case OAuthProvider#oauth_provider.discovery_endpoint of
undefined -> {error, {missing_oauth_provider_attributes, [issuer]}};
URL ->
rabbit_log:debug("Downloading oauth_provider using ~p ", [URL]),
?LOG_DEBUG("Downloading oauth_provider using ~p ", [URL]),
case get_openid_configuration(URL, get_ssl_options_if_any(OAuthProvider)) of
{ok, OpenIdConfiguration} ->
{ok, update_oauth_provider_endpoints_configuration(
@ -294,8 +295,8 @@ download_oauth_provider(OAuthProvider) ->
ensure_oauth_provider_has_attributes(OAuthProvider, ListOfRequiredAttributes) ->
case find_missing_attributes(OAuthProvider, ListOfRequiredAttributes) of
[] ->
rabbit_log:debug("Resolved oauth_provider ~p",
[format_oauth_provider(OAuthProvider)]),
?LOG_DEBUG("Resolved oauth_provider ~p",
[format_oauth_provider(OAuthProvider)]),
{ok, OAuthProvider};
_ = Attrs ->
{error, {missing_oauth_provider_attributes, Attrs}}
@ -303,14 +304,14 @@ ensure_oauth_provider_has_attributes(OAuthProvider, ListOfRequiredAttributes) ->
get_root_oauth_provider(ListOfRequiredAttributes) ->
OAuthProvider = lookup_root_oauth_provider(),
rabbit_log:debug("Using root oauth_provider ~p",
[format_oauth_provider(OAuthProvider)]),
?LOG_DEBUG("Using root oauth_provider ~p",
[format_oauth_provider(OAuthProvider)]),
case find_missing_attributes(OAuthProvider, ListOfRequiredAttributes) of
[] ->
{ok, OAuthProvider};
_ = MissingAttributes ->
rabbit_log:debug("Looking up missing attributes ~p ...",
[MissingAttributes]),
?LOG_DEBUG("Looking up missing attributes ~p ...",
[MissingAttributes]),
case download_oauth_provider(OAuthProvider) of
{ok, OAuthProvider2} ->
ensure_oauth_provider_has_attributes(OAuthProvider2,
@ -333,22 +334,22 @@ get_oauth_provider(OAuth2ProviderId, ListOfRequiredAttributes)
get_oauth_provider(OAuthProviderId, ListOfRequiredAttributes)
when is_binary(OAuthProviderId) ->
rabbit_log:debug("get_oauth_provider ~p with at least these attributes: ~p",
[OAuthProviderId, ListOfRequiredAttributes]),
?LOG_DEBUG("get_oauth_provider ~p with at least these attributes: ~p",
[OAuthProviderId, ListOfRequiredAttributes]),
case lookup_oauth_provider_config(OAuthProviderId) of
{error, _} = Error0 ->
rabbit_log:debug("Failed to find oauth_provider ~p configuration due to ~p",
[OAuthProviderId, Error0]),
?LOG_DEBUG("Failed to find oauth_provider ~p configuration due to ~p",
[OAuthProviderId, Error0]),
Error0;
Config ->
rabbit_log:debug("Found oauth_provider configuration ~p", [Config]),
?LOG_DEBUG("Found oauth_provider configuration ~p", [Config]),
OAuthProvider = map_to_oauth_provider(Config),
rabbit_log:debug("Resolved oauth_provider ~p", [format_oauth_provider(OAuthProvider)]),
?LOG_DEBUG("Resolved oauth_provider ~p", [format_oauth_provider(OAuthProvider)]),
case find_missing_attributes(OAuthProvider, ListOfRequiredAttributes) of
[] ->
{ok, OAuthProvider};
_ = MissingAttributes ->
rabbit_log:debug("OauthProvider has following missing attributes ~p", [MissingAttributes]),
?LOG_DEBUG("OauthProvider has following missing attributes ~p", [MissingAttributes]),
case download_oauth_provider(OAuthProvider) of
{ok, OAuthProvider2} ->
ensure_oauth_provider_has_attributes(OAuthProvider2,

View File

@ -3,7 +3,6 @@ PROJECT_DESCRIPTION = RabbitMQ
PROJECT_MOD = rabbit
PROJECT_REGISTERED = rabbit_amqqueue_sup \
rabbit_direct_client_sup \
rabbit_log \
rabbit_node_monitor \
rabbit_router

View File

@ -3,7 +3,7 @@
-ifdef(TRACE_AMQP).
-warning("AMQP tracing is enabled").
-define(TRACE(Format, Args),
rabbit_log:debug(
?LOG_DEBUG(
"~s:~s/~b ~b~n" ++ Format ++ "~n",
[?MODULE, ?FUNCTION_NAME, ?FUNCTION_ARITY, ?LINE] ++ Args)).
-else.

View File

@ -9,6 +9,8 @@
-module(code_server_cache).
-include_lib("kernel/include/logger.hrl").
-behaviour(gen_server).
%% API
@ -70,8 +72,8 @@ handle_maybe_call_mfa(true, {Module, Function, Args, Default}, State) ->
error:undef ->
handle_maybe_call_mfa_error(Module, Default, State);
Err:Reason ->
rabbit_log:error("Calling ~tp:~tp failed: ~tp:~tp",
[Module, Function, Err, Reason]),
?LOG_ERROR("Calling ~tp:~tp failed: ~tp:~tp",
[Module, Function, Err, Reason]),
handle_maybe_call_mfa_error(Module, Default, State)
end.

View File

@ -7,6 +7,8 @@
-module(file_handle_cache).
-include_lib("kernel/include/logger.hrl").
%% A File Handle Cache
%%
%% This extends a subset of the functionality of the Erlang file
@ -1110,7 +1112,7 @@ init([AlarmSet, AlarmClear]) ->
end
end,
ObtainLimit = obtain_limit(Limit),
logger:info("Limiting to approx ~tp file handles (~tp sockets)",
?LOG_INFO("Limiting to approx ~tp file handles (~tp sockets)",
[Limit, ObtainLimit]),
Clients = ets:new(?CLIENT_ETS_TABLE, [set, private, {keypos, #cstate.pid}]),
Elders = ets:new(?ELDERS_ETS_TABLE, [set, private]),
@ -1451,19 +1453,19 @@ update_counts(open, Pid, Delta,
State = #fhc_state { open_count = OpenCount,
clients = Clients }) ->
safe_ets_update_counter(Clients, Pid, {#cstate.opened, Delta},
fun() -> rabbit_log:warning("FHC: failed to update counter 'opened', client pid: ~p", [Pid]) end),
fun() -> ?LOG_WARNING("FHC: failed to update counter 'opened', client pid: ~p", [Pid]) end),
State #fhc_state { open_count = OpenCount + Delta};
update_counts({obtain, file}, Pid, Delta,
State = #fhc_state {obtain_count_file = ObtainCountF,
clients = Clients }) ->
safe_ets_update_counter(Clients, Pid, {#cstate.obtained_file, Delta},
fun() -> rabbit_log:warning("FHC: failed to update counter 'obtained_file', client pid: ~p", [Pid]) end),
fun() -> ?LOG_WARNING("FHC: failed to update counter 'obtained_file', client pid: ~p", [Pid]) end),
State #fhc_state { obtain_count_file = ObtainCountF + Delta};
update_counts({obtain, socket}, Pid, Delta,
State = #fhc_state {obtain_count_socket = ObtainCountS,
clients = Clients }) ->
safe_ets_update_counter(Clients, Pid, {#cstate.obtained_socket, Delta},
fun() -> rabbit_log:warning("FHC: failed to update counter 'obtained_socket', client pid: ~p", [Pid]) end),
fun() -> ?LOG_WARNING("FHC: failed to update counter 'obtained_socket', client pid: ~p", [Pid]) end),
State #fhc_state { obtain_count_socket = ObtainCountS + Delta};
update_counts({reserve, file}, Pid, NewReservation,
State = #fhc_state {reserve_count_file = ReserveCountF,
@ -1471,7 +1473,7 @@ update_counts({reserve, file}, Pid, NewReservation,
[#cstate{reserved_file = R}] = ets:lookup(Clients, Pid),
Delta = NewReservation - R,
safe_ets_update_counter(Clients, Pid, {#cstate.reserved_file, Delta},
fun() -> rabbit_log:warning("FHC: failed to update counter 'reserved_file', client pid: ~p", [Pid]) end),
fun() -> ?LOG_WARNING("FHC: failed to update counter 'reserved_file', client pid: ~p", [Pid]) end),
State #fhc_state { reserve_count_file = ReserveCountF + Delta};
update_counts({reserve, socket}, Pid, NewReservation,
State = #fhc_state {reserve_count_socket = ReserveCountS,
@ -1479,7 +1481,7 @@ update_counts({reserve, socket}, Pid, NewReservation,
[#cstate{reserved_file = R}] = ets:lookup(Clients, Pid),
Delta = NewReservation - R,
safe_ets_update_counter(Clients, Pid, {#cstate.reserved_socket, Delta},
fun() -> rabbit_log:warning("FHC: failed to update counter 'reserved_socket', client pid: ~p", [Pid]) end),
fun() -> ?LOG_WARNING("FHC: failed to update counter 'reserved_socket', client pid: ~p", [Pid]) end),
State #fhc_state { reserve_count_socket = ReserveCountS + Delta}.
maybe_reduce(State) ->

View File

@ -3,6 +3,7 @@
-include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("rabbit_common/include/rabbit_framing.hrl").
-include("mc.hrl").
-include_lib("kernel/include/logger.hrl").
-export([
%init/3,
@ -267,9 +268,9 @@ update_x_death_header(Info, Headers) ->
Headers, <<"x-death">>, array,
[{table, rabbit_misc:sort_field_table(Info1)} | Others]);
{<<"x-death">>, InvalidType, Header} ->
rabbit_log:warning("Message has invalid x-death header (type: ~tp)."
" Resetting header ~tp",
[InvalidType, Header]),
?LOG_WARNING("Message has invalid x-death header (type: ~tp)."
" Resetting header ~tp",
[InvalidType, Header]),
%% if x-death is something other than an array (list)
%% then we reset it: this happens when some clients consume
%% a message and re-publish is, converting header values

View File

@ -7,6 +7,8 @@
-module(mirrored_supervisor).
-include_lib("kernel/include/logger.hrl").
%% Mirrored Supervisor
%% ===================
%%
@ -252,13 +254,13 @@ handle_call({init, Overall}, _From,
LockId = mirrored_supervisor_locks:lock(Group),
maybe_log_lock_acquisition_failure(LockId, Group),
ok = pg:join(Group, Overall),
rabbit_log:debug("Mirrored supervisor: initializing, overall supervisor ~tp joined group ~tp", [Overall, Group]),
?LOG_DEBUG("Mirrored supervisor: initializing, overall supervisor ~tp joined group ~tp", [Overall, Group]),
Rest = pg:get_members(Group) -- [Overall],
Nodes = [node(M) || M <- Rest],
rabbit_log:debug("Mirrored supervisor: known group ~tp members: ~tp on nodes ~tp", [Group, Rest, Nodes]),
?LOG_DEBUG("Mirrored supervisor: known group ~tp members: ~tp on nodes ~tp", [Group, Rest, Nodes]),
case Rest of
[] ->
rabbit_log:debug("Mirrored supervisor: no known peer members in group ~tp, will delete all child records for it", [Group]),
?LOG_DEBUG("Mirrored supervisor: no known peer members in group ~tp, will delete all child records for it", [Group]),
delete_all(Group);
_ -> ok
end,
@ -282,18 +284,18 @@ handle_call({start_child, ChildSpec}, _From,
group = Group}) ->
LockId = mirrored_supervisor_locks:lock(Group),
maybe_log_lock_acquisition_failure(LockId, Group),
rabbit_log:debug("Mirrored supervisor: asked to consider starting a child, group: ~tp", [Group]),
?LOG_DEBUG("Mirrored supervisor: asked to consider starting a child, group: ~tp", [Group]),
Result = case maybe_start(Group, Overall, Delegate, ChildSpec) of
already_in_store ->
rabbit_log:debug("Mirrored supervisor: maybe_start for group ~tp,"
?LOG_DEBUG("Mirrored supervisor: maybe_start for group ~tp,"
" overall ~p returned 'record already present'", [Group, Overall]),
{error, already_present};
{already_in_store, Pid} ->
rabbit_log:debug("Mirrored supervisor: maybe_start for group ~tp,"
?LOG_DEBUG("Mirrored supervisor: maybe_start for group ~tp,"
" overall ~p returned 'already running: ~tp'", [Group, Overall, Pid]),
{error, {already_started, Pid}};
Else ->
rabbit_log:debug("Mirrored supervisor: maybe_start for group ~tp,"
?LOG_DEBUG("Mirrored supervisor: maybe_start for group ~tp,"
" overall ~tp returned ~tp", [Group, Overall, Else]),
Else
end,
@ -377,19 +379,19 @@ tell_all_peers_to_die(Group, Reason) ->
[cast(P, {die, Reason}) || P <- pg:get_members(Group) -- [self()]].
maybe_start(Group, Overall, Delegate, ChildSpec) ->
rabbit_log:debug("Mirrored supervisor: asked to consider starting, group: ~tp",
?LOG_DEBUG("Mirrored supervisor: asked to consider starting, group: ~tp",
[Group]),
try check_start(Group, Overall, Delegate, ChildSpec) of
start ->
rabbit_log:debug("Mirrored supervisor: check_start for group ~tp,"
?LOG_DEBUG("Mirrored supervisor: check_start for group ~tp,"
" overall ~tp returned 'do start'", [Group, Overall]),
start(Delegate, ChildSpec);
undefined ->
rabbit_log:debug("Mirrored supervisor: check_start for group ~tp,"
?LOG_DEBUG("Mirrored supervisor: check_start for group ~tp,"
" overall ~tp returned 'undefined'", [Group, Overall]),
already_in_store;
Pid ->
rabbit_log:debug("Mirrored supervisor: check_start for group ~tp,"
?LOG_DEBUG("Mirrored supervisor: check_start for group ~tp,"
" overall ~tp returned 'already running (~tp)'",
[Group, Overall, Pid]),
{already_in_store, Pid}
@ -400,7 +402,7 @@ maybe_start(Group, Overall, Delegate, ChildSpec) ->
check_start(Group, Overall, Delegate, ChildSpec) ->
Id = id(ChildSpec),
rabbit_log:debug("Mirrored supervisor: check_start for group ~tp, id: ~tp, "
?LOG_DEBUG("Mirrored supervisor: check_start for group ~tp, id: ~tp, "
"overall: ~tp", [Group, Id, Overall]),
case rabbit_db_msup:create_or_update(Group, Overall, Delegate, ChildSpec, Id) of
Delegate0 when is_pid(Delegate0) ->
@ -486,6 +488,6 @@ restore_child_order(ChildSpecs, ChildOrder) ->
end, ChildSpecs).
maybe_log_lock_acquisition_failure(undefined = _LockId, Group) ->
rabbit_log:warning("Mirrored supervisor: could not acquire lock for group ~ts", [Group]);
?LOG_WARNING("Mirrored supervisor: could not acquire lock for group ~ts", [Group]);
maybe_log_lock_acquisition_failure(_, _) ->
ok.

View File

@ -34,6 +34,8 @@
%%
-module(pg_local).
-include_lib("kernel/include/logger.hrl").
-export([join/2, leave/2, get_members/1, in_group/2]).
%% intended for testing only; not part of official API
-export([sync/0, clear/0]).
@ -120,9 +122,9 @@ handle_call(clear, _From, S) ->
{reply, ok, S};
handle_call(Request, From, S) ->
error_logger:warning_msg("The pg_local server received an unexpected message:\n"
"handle_call(~tp, ~tp, _)\n",
[Request, From]),
?LOG_WARNING("The pg_local server received an unexpected message:\n"
"handle_call(~tp, ~tp, _)\n",
[Request, From]),
{noreply, S}.
handle_cast({join, Name, Pid}, S) ->

View File

@ -1155,7 +1155,7 @@ pg_local_scope(Prefix) ->
update_cluster_tags() ->
Tags = application:get_env(rabbit, cluster_tags, []),
?LOG_DEBUG("Seeding cluster tags from application environment key...",
#{domain => ?RMQLOG_DOMAIN_GLOBAL}),
#{domain => ?RMQLOG_DOMAIN_GLOBAL}),
rabbit_runtime_parameters:set_global(cluster_tags, Tags, <<"internal_user">>).
@ -1688,7 +1688,7 @@ maybe_warn_low_fd_limit() ->
L when L > 1024 ->
ok;
L ->
rabbit_log:warning("Available file handles: ~tp. "
?LOG_WARNING("Available file handles: ~tp. "
"Please consider increasing system limits", [L])
end.
@ -1718,7 +1718,7 @@ persist_static_configuration() ->
MoreCreditAfter =< InitialCredit ->
{InitialCredit, MoreCreditAfter};
Other ->
rabbit_log:error("Refusing to boot due to an invalid value of 'rabbit.credit_flow_default_credit'"),
?LOG_ERROR("Refusing to boot due to an invalid value of 'rabbit.credit_flow_default_credit'"),
throw({error, {invalid_credit_flow_default_credit_value, Other}})
end,
ok = persistent_term:put(credit_flow_default_credit, CreditFlowDefaultCredit),

View File

@ -8,6 +8,7 @@
-module(rabbit_access_control).
-include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
-export([check_user_pass_login/2, check_user_login/2, check_user_login/3, check_user_loopback/2,
check_vhost_access/4, check_resource_access/4, check_topic_access/4,
@ -59,10 +60,10 @@ check_user_login(Username, AuthProps, Modules) ->
%% it gives us
case try_authenticate(Mod, Username, AuthProps) of
{ok, ModNUser = #auth_user{username = Username2, impl = Impl}} ->
rabbit_log:debug("User '~ts' authenticated successfully by backend ~ts", [Username2, Mod]),
?LOG_DEBUG("User '~ts' authenticated successfully by backend ~ts", [Username2, Mod]),
user(ModNUser, {ok, [{Mod, Impl}], []});
Else ->
rabbit_log:debug("User '~ts' failed authentication by backend ~ts", [Username, Mod]),
?LOG_DEBUG("User '~ts' failed authentication by backend ~ts", [Username, Mod]),
Else
end;
(_, {ok, User}) ->
@ -72,7 +73,7 @@ check_user_login(Username, AuthProps, Modules) ->
{refused, Username, "No modules checked '~ts'", [Username]}, Modules)
catch
Type:Error:Stacktrace ->
rabbit_log:debug("User '~ts' authentication failed with ~ts:~tp:~n~tp", [Username, Type, Error, Stacktrace]),
?LOG_DEBUG("User '~ts' authentication failed with ~ts:~tp:~n~tp", [Username, Type, Error, Stacktrace]),
{refused, Username, "User '~ts' authentication failed with internal error. "
"Enable debug logs to see the real error.", [Username]}
@ -85,7 +86,7 @@ try_authenticate_and_try_authorize(ModN, ModZs0, Username, AuthProps) ->
end,
case try_authenticate(ModN, Username, AuthProps) of
{ok, ModNUser = #auth_user{username = Username2}} ->
rabbit_log:debug("User '~ts' authenticated successfully by backend ~ts", [Username2, ModN]),
?LOG_DEBUG("User '~ts' authenticated successfully by backend ~ts", [Username2, ModN]),
user(ModNUser, try_authorize(ModZs, Username2, AuthProps));
Else ->
Else
@ -227,7 +228,7 @@ check_access(Fun, Module, ErrStr, ErrArgs, ErrName) ->
{error, E} ->
FullErrStr = ErrStr ++ ", backend ~ts returned an error: ~tp",
FullErrArgs = ErrArgs ++ [Module, E],
rabbit_log:error(FullErrStr, FullErrArgs),
?LOG_ERROR(FullErrStr, FullErrArgs),
rabbit_misc:protocol_error(ErrName, FullErrStr, FullErrArgs)
end.

View File

@ -18,6 +18,8 @@
-module(rabbit_alarm).
-include_lib("kernel/include/logger.hrl").
-behaviour(gen_event).
-export([start_link/0, start/0, stop/0, register/2, set_alarm/1,
@ -239,8 +241,8 @@ handle_event({node_down, Node}, #alarms{alarmed_nodes = AN} = State) ->
error -> []
end,
{ok, lists:foldr(fun(Source, AccState) ->
rabbit_log:warning("~ts resource limit alarm cleared for dead node ~tp",
[Source, Node]),
?LOG_WARNING("~ts resource limit alarm cleared for dead node ~tp",
[Source, Node]),
maybe_alert(fun dict_unappend/3, Node, Source, false, AccState)
end, State, AlarmsForDeadNode)};
@ -291,7 +293,7 @@ maybe_alert(UpdateFun, Node, Source, WasAlertAdded,
StillHasAlerts = lists:any(fun ({_Node, NodeAlerts}) -> lists:member(Source, NodeAlerts) end, dict:to_list(AN1)),
case StillHasAlerts of
true -> ok;
false -> rabbit_log:warning("~ts resource limit alarm cleared across the cluster", [Source])
false -> ?LOG_WARNING("~ts resource limit alarm cleared across the cluster", [Source])
end,
Alert = {WasAlertAdded, StillHasAlerts, Node},
case node() of
@ -327,7 +329,7 @@ internal_register(Pid, {M, F, A} = AlertMFA,
State#alarms{alertees = NewAlertees}.
handle_set_resource_alarm(Source, Node, State) ->
rabbit_log:warning(
?LOG_WARNING(
"~ts resource limit alarm set on node ~tp.~n~n"
"**********************************************************~n"
"*** Publishers will be blocked until this alarm clears ***~n"
@ -336,26 +338,26 @@ handle_set_resource_alarm(Source, Node, State) ->
{ok, maybe_alert(fun dict_append/3, Node, Source, true, State)}.
handle_set_alarm({file_descriptor_limit, []}, State) ->
rabbit_log:warning(
?LOG_WARNING(
"file descriptor limit alarm set.~n~n"
"********************************************************************~n"
"*** New connections will not be accepted until this alarm clears ***~n"
"********************************************************************~n"),
{ok, State};
handle_set_alarm(Alarm, State) ->
rabbit_log:warning("alarm '~tp' set", [Alarm]),
?LOG_WARNING("alarm '~tp' set", [Alarm]),
{ok, State}.
handle_clear_resource_alarm(Source, Node, State) ->
rabbit_log:warning("~ts resource limit alarm cleared on node ~tp",
[Source, Node]),
?LOG_WARNING("~ts resource limit alarm cleared on node ~tp",
[Source, Node]),
{ok, maybe_alert(fun dict_unappend/3, Node, Source, false, State)}.
handle_clear_alarm(file_descriptor_limit, State) ->
rabbit_log:warning("file descriptor limit alarm cleared~n"),
?LOG_WARNING("file descriptor limit alarm cleared~n"),
{ok, State};
handle_clear_alarm(Alarm, State) ->
rabbit_log:warning("alarm '~tp' cleared", [Alarm]),
?LOG_WARNING("alarm '~tp' cleared", [Alarm]),
{ok, State}.
is_node_alarmed(Source, Node, #alarms{alarmed_nodes = AN}) ->

View File

@ -8,6 +8,7 @@
-feature(maybe_expr, enable).
-include_lib("amqp10_common/include/amqp10_filter.hrl").
-include_lib("kernel/include/logger.hrl").
-type parsed_expression() :: {ApplicationProperties :: boolean(),
rabbit_amqp_sql_ast:ast()}.
@ -293,8 +294,8 @@ sql_to_list(SQL) ->
String when is_list(String) ->
{ok, String};
Error ->
rabbit_log:warning("SQL expression ~p is not UTF-8 encoded: ~p",
[SQL, Error]),
?LOG_WARNING("SQL expression ~p is not UTF-8 encoded: ~p",
[SQL, Error]),
error
end.
@ -304,8 +305,8 @@ check_length(String) ->
true ->
ok;
false ->
rabbit_log:warning("SQL expression length ~b exceeds maximum length ~b",
[Len, ?MAX_EXPRESSION_LENGTH]),
?LOG_WARNING("SQL expression length ~b exceeds maximum length ~b",
[Len, ?MAX_EXPRESSION_LENGTH]),
error
end.
@ -314,15 +315,15 @@ tokenize(String, SQL) ->
{ok, Tokens, _EndLocation} ->
{ok, Tokens};
{error, {_Line, _Mod, ErrDescriptor}, _Location} ->
rabbit_log:warning("failed to scan SQL expression '~ts': ~tp",
[SQL, ErrDescriptor]),
?LOG_WARNING("failed to scan SQL expression '~ts': ~tp",
[SQL, ErrDescriptor]),
error
end.
check_token_count(Tokens, SQL)
when length(Tokens) > ?MAX_TOKENS ->
rabbit_log:warning("SQL expression '~ts' with ~b tokens exceeds token limit ~b",
[SQL, length(Tokens), ?MAX_TOKENS]),
?LOG_WARNING("SQL expression '~ts' with ~b tokens exceeds token limit ~b",
[SQL, length(Tokens), ?MAX_TOKENS]),
error;
check_token_count(_, _) ->
ok.
@ -330,8 +331,8 @@ check_token_count(_, _) ->
parse(Tokens, SQL) ->
case rabbit_amqp_sql_parser:parse(Tokens) of
{error, Reason} ->
rabbit_log:warning("failed to parse SQL expression '~ts': ~p",
[SQL, Reason]),
?LOG_WARNING("failed to parse SQL expression '~ts': ~p",
[SQL, Reason]),
error;
Ok ->
Ok
@ -346,9 +347,9 @@ transform_ast(Ast0, SQL) ->
Ast ->
{ok, Ast}
catch {invalid_pattern, Reason} ->
rabbit_log:warning(
"failed to parse LIKE pattern for SQL expression ~tp: ~tp",
[SQL, Reason]),
?LOG_WARNING(
"failed to parse LIKE pattern for SQL expression ~tp: ~tp",
[SQL, Reason]),
error
end.

View File

@ -2,6 +2,7 @@
-include("rabbit_amqp.hrl").
-include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
-export([handle_request/5]).
@ -49,8 +50,8 @@ handle_request(Request, Vhost, User, ConnectionPid, PermCaches0) ->
ConnectionPid,
PermCaches0)
catch throw:{?MODULE, StatusCode0, Explanation} ->
rabbit_log:warning("request ~ts ~ts failed: ~ts",
[HttpMethod, HttpRequestTarget, Explanation]),
?LOG_WARNING("request ~ts ~ts failed: ~ts",
[HttpMethod, HttpRequestTarget, Explanation]),
{StatusCode0, {utf8, Explanation}, PermCaches0}
end,

View File

@ -82,6 +82,7 @@
-include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("stdlib/include/qlc.hrl").
-include("amqqueue.hrl").
-include_lib("kernel/include/logger.hrl").
-define(INTEGER_ARG_TYPES, [byte, short, signedint, long,
unsignedbyte, unsignedshort, unsignedint]).
@ -423,8 +424,8 @@ rebalance(Type, VhostSpec, QueueSpec) ->
%% TODO: classic queues do not support rebalancing, it looks like they are simply
%% filtered out with is_replicable(Q). Maybe error instead?
maybe_rebalance({true, Id}, Type, VhostSpec, QueueSpec) ->
rabbit_log:info("Starting queue rebalance operation: '~ts' for vhosts matching '~ts' and queues matching '~ts'",
[Type, VhostSpec, QueueSpec]),
?LOG_INFO("Starting queue rebalance operation: '~ts' for vhosts matching '~ts' and queues matching '~ts'",
[Type, VhostSpec, QueueSpec]),
Running = rabbit_maintenance:filter_out_drained_nodes_consistent_read(rabbit_nodes:list_running()),
NumRunning = length(Running),
TypeModule = case Type of
@ -445,10 +446,10 @@ maybe_rebalance({true, Id}, Type, VhostSpec, QueueSpec) ->
MaxQueuesDesired = (NumToRebalance div NumRunning) + Rem,
Result = iterative_rebalance(ByNode, MaxQueuesDesired),
global:del_lock(Id),
rabbit_log:info("Finished queue rebalance operation"),
?LOG_INFO("Finished queue rebalance operation"),
Result;
maybe_rebalance(false, _Type, _VhostSpec, _QueueSpec) ->
rabbit_log:warning("Queue rebalance operation is in progress, please wait."),
?LOG_WARNING("Queue rebalance operation is in progress, please wait."),
{error, rebalance_in_progress}.
%% Stream queues don't yet support rebalance
@ -466,7 +467,7 @@ filter_per_type_for_rebalance(TypeModule, Q) ->
rebalance_module(Q) ->
case rabbit_queue_type:rebalance_module(Q) of
undefined ->
rabbit_log:error("Undefined rebalance module for queue type: ~s", [amqqueue:get_type(Q)]),
?LOG_ERROR("Undefined rebalance module for queue type: ~s", [amqqueue:get_type(Q)]),
{error, not_supported};
RBModule ->
RBModule
@ -484,7 +485,7 @@ is_match(Subj, RegEx) ->
iterative_rebalance(ByNode, MaxQueuesDesired) ->
case maybe_migrate(ByNode, MaxQueuesDesired) of
{ok, Summary} ->
rabbit_log:info("All queue leaders are balanced"),
?LOG_INFO("All queue leaders are balanced"),
{ok, Summary};
{migrated, Other} ->
iterative_rebalance(Other, MaxQueuesDesired);
@ -521,23 +522,23 @@ maybe_migrate(ByNode, MaxQueuesDesired, [N | Nodes]) ->
{not_migrated, update_not_migrated_queue(N, Queue, Queues, ByNode)};
_ ->
[{Length, Destination} | _] = sort_by_number_of_queues(Candidates, ByNode),
rabbit_log:info("Migrating queue ~tp from node ~tp with ~tp queues to node ~tp with ~tp queues",
[Name, N, length(All), Destination, Length]),
?LOG_INFO("Migrating queue ~tp from node ~tp with ~tp queues to node ~tp with ~tp queues",
[Name, N, length(All), Destination, Length]),
case Module:transfer_leadership(Q, Destination) of
{migrated, NewNode} ->
rabbit_log:info("Queue ~tp migrated to ~tp", [Name, NewNode]),
?LOG_INFO("Queue ~tp migrated to ~tp", [Name, NewNode]),
{migrated, update_migrated_queue(NewNode, N, Queue, Queues, ByNode)};
{not_migrated, Reason} ->
rabbit_log:warning("Error migrating queue ~tp: ~tp", [Name, Reason]),
?LOG_WARNING("Error migrating queue ~tp: ~tp", [Name, Reason]),
{not_migrated, update_not_migrated_queue(N, Queue, Queues, ByNode)}
end
end;
[{_, _, true} | _] = All when length(All) > MaxQueuesDesired ->
rabbit_log:warning("Node ~tp contains ~tp queues, but all have already migrated. "
?LOG_WARNING("Node ~tp contains ~tp queues, but all have already migrated. "
"Do nothing", [N, length(All)]),
maybe_migrate(ByNode, MaxQueuesDesired, Nodes);
All ->
rabbit_log:debug("Node ~tp only contains ~tp queues, do nothing",
?LOG_DEBUG("Node ~tp only contains ~tp queues, do nothing",
[N, length(All)]),
maybe_migrate(ByNode, MaxQueuesDesired, Nodes)
end.
@ -625,7 +626,7 @@ retry_wait(Q, F, E, RetriesLeft) ->
%% The old check would have crashed here,
%% instead, log it and run the exit fun. absent & alive is weird,
%% but better than crashing with badmatch,true
rabbit_log:debug("Unexpected alive queue process ~tp", [QPid]),
?LOG_DEBUG("Unexpected alive queue process ~tp", [QPid]),
E({absent, Q, alive});
false ->
ok % Expected result
@ -1894,7 +1895,7 @@ internal_delete(Queue, ActingUser, Reason) ->
%% TODO this is used by `rabbit_mnesia:remove_node_if_mnesia_running`
%% Does it make any sense once mnesia is not used/removed?
forget_all_durable(Node) ->
rabbit_log:info("Will remove all classic queues from node ~ts. The node is likely being removed from the cluster.", [Node]),
?LOG_INFO("Will remove all classic queues from node ~ts. The node is likely being removed from the cluster.", [Node]),
UpdateFun = fun(Q) ->
forget_node_for_queue(Q)
end,
@ -1959,7 +1960,7 @@ on_node_down(Node) ->
%% `rabbit_khepri:init/0': we also try this deletion when the node
%% restarts - a time that the cluster is very likely to have a
%% majority - to ensure these records are deleted.
rabbit_log:warning("transient queues for node '~ts' could not be "
?LOG_WARNING("transient queues for node '~ts' could not be "
"deleted because of a timeout. These queues "
"will be removed when node '~ts' restarts or "
"is removed from the cluster.", [Node, Node]),
@ -1980,9 +1981,9 @@ delete_transient_queues_on_node(Node) ->
{QueueNames, Deletions} when is_list(QueueNames) ->
case length(QueueNames) of
0 -> ok;
N -> rabbit_log:info("~b transient queues from node '~ts' "
"deleted in ~fs",
[N, Node, Time / 1_000_000])
N -> ?LOG_INFO("~b transient queues from node '~ts' "
"deleted in ~fs",
[N, Node, Time / 1_000_000])
end,
notify_queue_binding_deletions(Deletions),
rabbit_core_metrics:queues_deleted(QueueNames),

View File

@ -8,6 +8,8 @@
-module(rabbit_amqqueue_process).
-include_lib("rabbit_common/include/rabbit.hrl").
-include("amqqueue.hrl").
-include_lib("kernel/include/logger.hrl").
-include_lib("rabbit_common/include/logging.hrl").
-behaviour(gen_server2).
@ -142,6 +144,7 @@ start_link(Q, Marker) ->
gen_server2:start_link(?MODULE, {Q, Marker}, []).
init({Q, Marker}) ->
logger:set_process_metadata(#{domain => ?RMQLOG_DOMAIN_QUEUE}),
case is_process_alive(Marker) of
true ->
%% start
@ -150,7 +153,7 @@ init({Q, Marker}) ->
%% restart
QueueName = amqqueue:get_name(Q),
{ok, Q1} = rabbit_amqqueue:lookup(QueueName),
rabbit_log:error("Restarting crashed ~ts.", [rabbit_misc:rs(QueueName)]),
?LOG_ERROR("Restarting crashed ~ts.", [rabbit_misc:rs(QueueName)]),
gen_server2:cast(self(), init),
init(Q1)
end;
@ -1604,7 +1607,8 @@ handle_cast({force_event_refresh, Ref},
rabbit_event:notify(queue_created, queue_created_infos(State), Ref),
QName = qname(State),
AllConsumers = rabbit_queue_consumers:all(Consumers),
rabbit_log:debug("Queue ~ts forced to re-emit events, consumers: ~tp", [rabbit_misc:rs(QName), AllConsumers]),
?LOG_DEBUG("Queue ~ts forced to re-emit events, consumers: ~tp",
[rabbit_misc:rs(QName), AllConsumers]),
[emit_consumer_created(
Ch, CTag, ActiveOrExclusive, AckRequired, QName, Prefetch,
Args, Ref, ActingUser) ||
@ -1649,7 +1653,8 @@ handle_info({maybe_expire, Vsn}, State = #q{q = Q, expires = Expiry, args_policy
case is_unused(State) of
true ->
QResource = rabbit_misc:rs(amqqueue:get_name(Q)),
rabbit_log_queue:debug("Deleting 'classic ~ts' on expiry after ~tp milliseconds", [QResource, Expiry]),
?LOG_DEBUG("Deleting 'classic ~ts' on expiry after ~tp milliseconds",
[QResource, Expiry]),
stop(State);
false -> noreply(State#q{expiry_timer_ref = undefined})
end;
@ -1751,16 +1756,16 @@ log_delete_exclusive({ConPid, _ConRef}, State) ->
log_delete_exclusive(ConPid, #q{ q = Q }) ->
Resource = amqqueue:get_name(Q),
#resource{ name = QName, virtual_host = VHost } = Resource,
rabbit_log_queue:debug("Deleting exclusive queue '~ts' in vhost '~ts' " ++
"because its declaring connection ~tp was closed",
[QName, VHost, ConPid]).
?LOG_DEBUG("Deleting exclusive queue '~ts' in vhost '~ts' " ++
"because its declaring connection ~tp was closed",
[QName, VHost, ConPid]).
log_auto_delete(Reason, #q{ q = Q }) ->
Resource = amqqueue:get_name(Q),
#resource{ name = QName, virtual_host = VHost } = Resource,
rabbit_log_queue:debug("Deleting auto-delete queue '~ts' in vhost '~ts' " ++
Reason,
[QName, VHost]).
?LOG_DEBUG("Deleting auto-delete queue '~ts' in vhost '~ts' " ++
Reason,
[QName, VHost]).
confirm_to_sender(Pid, QName, MsgSeqNos) ->
rabbit_classic_queue:confirm_to_sender(Pid, QName, MsgSeqNos).

View File

@ -16,6 +16,7 @@
-export([init/1]).
-include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
-define(SERVER, ?MODULE).
@ -74,8 +75,8 @@ start_for_vhost(VHost) ->
%% we can get here if a vhost is added and removed concurrently
%% e.g. some integration tests do it
{error, {no_such_vhost, VHost}} ->
rabbit_log:error("Failed to start a queue process supervisor for vhost ~ts: vhost no longer exists!",
[VHost]),
?LOG_ERROR("Failed to start a queue process supervisor for vhost ~ts: vhost no longer exists!",
[VHost]),
{error, {no_such_vhost, VHost}}
end.
@ -87,7 +88,7 @@ stop_for_vhost(VHost) ->
ok = supervisor:delete_child(VHostSup, rabbit_amqqueue_sup_sup);
%% see start/1
{error, {no_such_vhost, VHost}} ->
rabbit_log:error("Failed to stop a queue process supervisor for vhost ~ts: vhost no longer exists!",
[VHost]),
?LOG_ERROR("Failed to stop a queue process supervisor for vhost ~ts: vhost no longer exists!",
[VHost]),
ok
end.

View File

@ -7,6 +7,7 @@
-module(rabbit_auth_backend_internal).
-include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
-behaviour(rabbit_authn_backend).
-behaviour(rabbit_authz_backend).
@ -204,7 +205,7 @@ validate_and_alternate_credentials(Username, Password, ActingUser, Fun) ->
ok ->
Fun(Username, Password, ActingUser);
{error, Err} ->
rabbit_log:error("Credential validation for user '~ts' failed!", [Username]),
?LOG_ERROR("Credential validation for user '~ts' failed!", [Username]),
{error, Err}
end.
@ -238,7 +239,7 @@ add_user_sans_validation(Limits, Tags) ->
end.
add_user_sans_validation(Username, Password, ActingUser, Limits, Tags) ->
rabbit_log:debug("Asked to create a new user '~ts', password length in bytes: ~tp", [Username, bit_size(Password)]),
?LOG_DEBUG("Asked to create a new user '~ts', password length in bytes: ~tp", [Username, bit_size(Password)]),
%% hash_password will pick the hashing function configured for us
%% but we also need to store a hint as part of the record, so we
%% retrieve it here one more time
@ -254,7 +255,7 @@ add_user_sans_validation(Username, Password, ActingUser, Limits, Tags) ->
add_user_sans_validation_in(Username, User, ConvertedTags, Limits, ActingUser).
add_user_sans_validation(Username, PasswordHash, HashingMod, Tags, Limits, ActingUser) ->
rabbit_log:debug("Asked to create a new user '~ts' with password hash", [Username]),
?LOG_DEBUG("Asked to create a new user '~ts' with password hash", [Username]),
ConvertedTags = [rabbit_data_coercion:to_atom(I) || I <- Tags],
User0 = internal_user:create_user(Username, PasswordHash, HashingMod),
User1 = internal_user:set_tags(
@ -269,7 +270,7 @@ add_user_sans_validation(Username, PasswordHash, HashingMod, Tags, Limits, Actin
add_user_sans_validation_in(Username, User, ConvertedTags, Limits, ActingUser) ->
try
R = rabbit_db_user:create(User),
rabbit_log:info("Created user '~ts'", [Username]),
?LOG_INFO("Created user '~ts'", [Username]),
rabbit_event:notify(user_created, [{name, Username},
{user_who_performed_action, ActingUser}]),
case ConvertedTags of
@ -283,21 +284,21 @@ add_user_sans_validation_in(Username, User, ConvertedTags, Limits, ActingUser) -
R
catch
throw:{error, {user_already_exists, _}} = Error ->
rabbit_log:warning("Failed to add user '~ts': the user already exists", [Username]),
?LOG_WARNING("Failed to add user '~ts': the user already exists", [Username]),
throw(Error);
Class:Error:Stacktrace ->
rabbit_log:warning("Failed to add user '~ts': ~tp", [Username, Error]),
?LOG_WARNING("Failed to add user '~ts': ~tp", [Username, Error]),
erlang:raise(Class, Error, Stacktrace)
end .
-spec delete_user(rabbit_types:username(), rabbit_types:username()) -> 'ok'.
delete_user(Username, ActingUser) ->
rabbit_log:debug("Asked to delete user '~ts'", [Username]),
?LOG_DEBUG("Asked to delete user '~ts'", [Username]),
try
case rabbit_db_user:delete(Username) of
true ->
rabbit_log:info("Deleted user '~ts'", [Username]),
?LOG_INFO("Deleted user '~ts'", [Username]),
rabbit_event:notify(user_deleted,
[{name, Username},
{user_who_performed_action, ActingUser}]),
@ -305,12 +306,12 @@ delete_user(Username, ActingUser) ->
false ->
ok;
Error0 ->
rabbit_log:info("Failed to delete user '~ts': ~tp", [Username, Error0]),
?LOG_INFO("Failed to delete user '~ts': ~tp", [Username, Error0]),
throw(Error0)
end
catch
Class:Error:Stacktrace ->
rabbit_log:warning("Failed to delete user '~ts': ~tp", [Username, Error]),
?LOG_WARNING("Failed to delete user '~ts': ~tp", [Username, Error]),
erlang:raise(Class, Error, Stacktrace)
end .
@ -342,23 +343,23 @@ change_password(Username, Password, ActingUser) ->
change_password_sans_validation(Username, Password, ActingUser) ->
try
rabbit_log:debug("Asked to change password of user '~ts', new password length in bytes: ~tp", [Username, bit_size(Password)]),
?LOG_DEBUG("Asked to change password of user '~ts', new password length in bytes: ~tp", [Username, bit_size(Password)]),
HashingAlgorithm = rabbit_password:hashing_mod(),
R = change_password_hash(Username,
hash_password(rabbit_password:hashing_mod(),
Password),
HashingAlgorithm),
rabbit_log:info("Successfully changed password for user '~ts'", [Username]),
?LOG_INFO("Successfully changed password for user '~ts'", [Username]),
rabbit_event:notify(user_password_changed,
[{name, Username},
{user_who_performed_action, ActingUser}]),
R
catch
throw:{error, {no_such_user, _}} = Error ->
rabbit_log:warning("Failed to change password for user '~ts': the user does not exist", [Username]),
?LOG_WARNING("Failed to change password for user '~ts': the user does not exist", [Username]),
throw(Error);
Class:Error:Stacktrace ->
rabbit_log:warning("Failed to change password for user '~ts': ~tp", [Username, Error]),
?LOG_WARNING("Failed to change password for user '~ts': ~tp", [Username, Error]),
erlang:raise(Class, Error, Stacktrace)
end.
@ -369,10 +370,10 @@ update_user(Username, Password, Tags, Limits, ActingUser) ->
update_user_sans_validation(Tags, Limits) ->
fun(Username, Password, ActingUser) ->
try
rabbit_log:debug("Asked to change password of user '~ts', new password length in bytes: ~tp", [Username, bit_size(Password)]),
?LOG_DEBUG("Asked to change password of user '~ts', new password length in bytes: ~tp", [Username, bit_size(Password)]),
HashingAlgorithm = rabbit_password:hashing_mod(),
rabbit_log:debug("Asked to set user tags for user '~ts' to ~tp", [Username, Tags]),
?LOG_DEBUG("Asked to set user tags for user '~ts' to ~tp", [Username, Tags]),
ConvertedTags = [rabbit_data_coercion:to_atom(I) || I <- Tags],
R = update_user_with_hash(Username,
@ -381,7 +382,7 @@ update_user_sans_validation(Tags, Limits) ->
HashingAlgorithm,
ConvertedTags,
Limits),
rabbit_log:info("Successfully changed password for user '~ts'", [Username]),
?LOG_INFO("Successfully changed password for user '~ts'", [Username]),
rabbit_event:notify(user_password_changed,
[{name, Username},
{user_who_performed_action, ActingUser}]),
@ -390,10 +391,10 @@ update_user_sans_validation(Tags, Limits) ->
R
catch
throw:{error, {no_such_user, _}} = Error ->
rabbit_log:warning("Failed to change password for user '~ts': the user does not exist", [Username]),
?LOG_WARNING("Failed to change password for user '~ts': the user does not exist", [Username]),
throw(Error);
Class:Error:Stacktrace ->
rabbit_log:warning("Failed to change password for user '~ts': ~tp", [Username, Error]),
?LOG_WARNING("Failed to change password for user '~ts': ~tp", [Username, Error]),
erlang:raise(Class, Error, Stacktrace)
end
end.
@ -401,7 +402,7 @@ update_user_sans_validation(Tags, Limits) ->
-spec clear_password(rabbit_types:username(), rabbit_types:username()) -> 'ok'.
clear_password(Username, ActingUser) ->
rabbit_log:info("Clearing password for user '~ts'", [Username]),
?LOG_INFO("Clearing password for user '~ts'", [Username]),
R = change_password_hash(Username, <<"">>),
rabbit_event:notify(user_password_cleared,
[{name, Username},
@ -443,7 +444,7 @@ update_user_with_hash(Username, PasswordHash, HashingAlgorithm, ConvertedTags, L
set_tags(Username, Tags, ActingUser) ->
ConvertedTags = [rabbit_data_coercion:to_atom(I) || I <- Tags],
rabbit_log:debug("Asked to set user tags for user '~ts' to ~tp", [Username, ConvertedTags]),
?LOG_DEBUG("Asked to set user tags for user '~ts' to ~tp", [Username, ConvertedTags]),
try
R = rabbit_db_user:update(Username, fun(User) ->
internal_user:set_tags(User, ConvertedTags)
@ -452,15 +453,15 @@ set_tags(Username, Tags, ActingUser) ->
R
catch
throw:{error, {no_such_user, _}} = Error ->
rabbit_log:warning("Failed to set tags for user '~ts': the user does not exist", [Username]),
?LOG_WARNING("Failed to set tags for user '~ts': the user does not exist", [Username]),
throw(Error);
Class:Error:Stacktrace ->
rabbit_log:warning("Failed to set tags for user '~ts': ~tp", [Username, Error]),
?LOG_WARNING("Failed to set tags for user '~ts': ~tp", [Username, Error]),
erlang:raise(Class, Error, Stacktrace)
end .
notify_user_tags_set(Username, ConvertedTags, ActingUser) ->
rabbit_log:info("Successfully set user tags for user '~ts' to ~tp", [Username, ConvertedTags]),
?LOG_INFO("Successfully set user tags for user '~ts' to ~tp", [Username, ConvertedTags]),
rabbit_event:notify(user_tags_set, [{name, Username}, {tags, ConvertedTags},
{user_who_performed_action, ActingUser}]).
@ -470,7 +471,7 @@ notify_user_tags_set(Username, ConvertedTags, ActingUser) ->
'ok'.
set_permissions(Username, VirtualHost, ConfigurePerm, WritePerm, ReadPerm, ActingUser) ->
rabbit_log:debug("Asked to set permissions for user "
?LOG_DEBUG("Asked to set permissions for user "
"'~ts' in virtual host '~ts' to '~ts', '~ts', '~ts'",
[Username, VirtualHost, ConfigurePerm, WritePerm, ReadPerm]),
_ = lists:map(
@ -479,7 +480,7 @@ set_permissions(Username, VirtualHost, ConfigurePerm, WritePerm, ReadPerm, Actin
case re:compile(Regexp) of
{ok, _} -> ok;
{error, Reason} ->
rabbit_log:warning("Failed to set permissions for user '~ts' in virtual host '~ts': "
?LOG_WARNING("Failed to set permissions for user '~ts' in virtual host '~ts': "
"regular expression '~ts' is invalid",
[Username, VirtualHost, RegexpBin]),
throw({error, {invalid_regexp, Regexp, Reason}})
@ -495,7 +496,7 @@ set_permissions(Username, VirtualHost, ConfigurePerm, WritePerm, ReadPerm, Actin
write = WritePerm,
read = ReadPerm}},
R = rabbit_db_user:set_user_permissions(UserPermission),
rabbit_log:info("Successfully set permissions for user "
?LOG_INFO("Successfully set permissions for user "
"'~ts' in virtual host '~ts' to '~ts', '~ts', '~ts'",
[Username, VirtualHost, ConfigurePerm, WritePerm, ReadPerm]),
rabbit_event:notify(permission_created, [{user, Username},
@ -507,15 +508,15 @@ set_permissions(Username, VirtualHost, ConfigurePerm, WritePerm, ReadPerm, Actin
R
catch
throw:{error, {no_such_vhost, _}} = Error ->
rabbit_log:warning("Failed to set permissions for user '~ts': virtual host '~ts' does not exist",
?LOG_WARNING("Failed to set permissions for user '~ts': virtual host '~ts' does not exist",
[Username, VirtualHost]),
throw(Error);
throw:{error, {no_such_user, _}} = Error ->
rabbit_log:warning("Failed to set permissions for user '~ts': the user does not exist",
?LOG_WARNING("Failed to set permissions for user '~ts': the user does not exist",
[Username]),
throw(Error);
Class:Error:Stacktrace ->
rabbit_log:warning("Failed to set permissions for user '~ts' in virtual host '~ts': ~tp",
?LOG_WARNING("Failed to set permissions for user '~ts' in virtual host '~ts': ~tp",
[Username, VirtualHost, Error]),
erlang:raise(Class, Error, Stacktrace)
end.
@ -524,19 +525,19 @@ set_permissions(Username, VirtualHost, ConfigurePerm, WritePerm, ReadPerm, Actin
(rabbit_types:username(), rabbit_types:vhost(), rabbit_types:username()) -> 'ok'.
clear_permissions(Username, VirtualHost, ActingUser) ->
rabbit_log:debug("Asked to clear permissions for user '~ts' in virtual host '~ts'",
[Username, VirtualHost]),
?LOG_DEBUG("Asked to clear permissions for user '~ts' in virtual host '~ts'",
[Username, VirtualHost]),
try
R = rabbit_db_user:clear_user_permissions(Username, VirtualHost),
rabbit_log:info("Successfully cleared permissions for user '~ts' in virtual host '~ts'",
[Username, VirtualHost]),
?LOG_INFO("Successfully cleared permissions for user '~ts' in virtual host '~ts'",
[Username, VirtualHost]),
rabbit_event:notify(permission_deleted, [{user, Username},
{vhost, VirtualHost},
{user_who_performed_action, ActingUser}]),
R
catch
Class:Error:Stacktrace ->
rabbit_log:warning("Failed to clear permissions for user '~ts' in virtual host '~ts': ~tp",
?LOG_WARNING("Failed to clear permissions for user '~ts' in virtual host '~ts': ~tp",
[Username, VirtualHost, Error]),
erlang:raise(Class, Error, Stacktrace)
end.
@ -577,7 +578,7 @@ set_permissions_globally(Username, ConfigurePerm, WritePerm, ReadPerm, ActingUse
ok.
set_topic_permissions(Username, VirtualHost, Exchange, WritePerm, ReadPerm, ActingUser) ->
rabbit_log:debug("Asked to set topic permissions on exchange '~ts' for "
?LOG_DEBUG("Asked to set topic permissions on exchange '~ts' for "
"user '~ts' in virtual host '~ts' to '~ts', '~ts'",
[Exchange, Username, VirtualHost, WritePerm, ReadPerm]),
WritePermRegex = rabbit_data_coercion:to_binary(WritePerm),
@ -587,7 +588,7 @@ set_topic_permissions(Username, VirtualHost, Exchange, WritePerm, ReadPerm, Acti
case re:compile(RegexpBin) of
{ok, _} -> ok;
{error, Reason} ->
rabbit_log:warning("Failed to set topic permissions on exchange '~ts' for user "
?LOG_WARNING("Failed to set topic permissions on exchange '~ts' for user "
"'~ts' in virtual host '~ts': regular expression '~ts' is invalid",
[Exchange, Username, VirtualHost, RegexpBin]),
throw({error, {invalid_regexp, RegexpBin, Reason}})
@ -607,7 +608,7 @@ set_topic_permissions(Username, VirtualHost, Exchange, WritePerm, ReadPerm, Acti
}
},
R = rabbit_db_user:set_topic_permissions(TopicPermission),
rabbit_log:info("Successfully set topic permissions on exchange '~ts' for "
?LOG_INFO("Successfully set topic permissions on exchange '~ts' for "
"user '~ts' in virtual host '~ts' to '~ts', '~ts'",
[Exchange, Username, VirtualHost, WritePerm, ReadPerm]),
rabbit_event:notify(topic_permission_created, [
@ -620,52 +621,52 @@ set_topic_permissions(Username, VirtualHost, Exchange, WritePerm, ReadPerm, Acti
R
catch
throw:{error, {no_such_vhost, _}} = Error ->
rabbit_log:warning("Failed to set topic permissions on exchange '~ts' for user '~ts': virtual host '~ts' does not exist.",
?LOG_WARNING("Failed to set topic permissions on exchange '~ts' for user '~ts': virtual host '~ts' does not exist.",
[Exchange, Username, VirtualHost]),
throw(Error);
throw:{error, {no_such_user, _}} = Error ->
rabbit_log:warning("Failed to set topic permissions on exchange '~ts' for user '~ts': the user does not exist.",
?LOG_WARNING("Failed to set topic permissions on exchange '~ts' for user '~ts': the user does not exist.",
[Exchange, Username]),
throw(Error);
Class:Error:Stacktrace ->
rabbit_log:warning("Failed to set topic permissions on exchange '~ts' for user '~ts' in virtual host '~ts': ~tp.",
?LOG_WARNING("Failed to set topic permissions on exchange '~ts' for user '~ts' in virtual host '~ts': ~tp.",
[Exchange, Username, VirtualHost, Error]),
erlang:raise(Class, Error, Stacktrace)
end .
clear_topic_permissions(Username, VirtualHost, ActingUser) ->
rabbit_log:debug("Asked to clear topic permissions for user '~ts' in virtual host '~ts'",
[Username, VirtualHost]),
?LOG_DEBUG("Asked to clear topic permissions for user '~ts' in virtual host '~ts'",
[Username, VirtualHost]),
try
R = rabbit_db_user:clear_topic_permissions(Username, VirtualHost, '_'),
rabbit_log:info("Successfully cleared topic permissions for user '~ts' in virtual host '~ts'",
[Username, VirtualHost]),
?LOG_INFO("Successfully cleared topic permissions for user '~ts' in virtual host '~ts'",
[Username, VirtualHost]),
rabbit_event:notify(topic_permission_deleted, [{user, Username},
{vhost, VirtualHost},
{user_who_performed_action, ActingUser}]),
R
catch
Class:Error:Stacktrace ->
rabbit_log:warning("Failed to clear topic permissions for user '~ts' in virtual host '~ts': ~tp",
?LOG_WARNING("Failed to clear topic permissions for user '~ts' in virtual host '~ts': ~tp",
[Username, VirtualHost, Error]),
erlang:raise(Class, Error, Stacktrace)
end.
clear_topic_permissions(Username, VirtualHost, Exchange, ActingUser) ->
rabbit_log:debug("Asked to clear topic permissions on exchange '~ts' for user '~ts' in virtual host '~ts'",
[Exchange, Username, VirtualHost]),
?LOG_DEBUG("Asked to clear topic permissions on exchange '~ts' for user '~ts' in virtual host '~ts'",
[Exchange, Username, VirtualHost]),
try
R = rabbit_db_user:clear_topic_permissions(
Username, VirtualHost, Exchange),
rabbit_log:info("Successfully cleared topic permissions on exchange '~ts' for user '~ts' in virtual host '~ts'",
[Exchange, Username, VirtualHost]),
?LOG_INFO("Successfully cleared topic permissions on exchange '~ts' for user '~ts' in virtual host '~ts'",
[Exchange, Username, VirtualHost]),
rabbit_event:notify(topic_permission_deleted, [{user, Username},
{vhost, VirtualHost},
{user_who_performed_action, ActingUser}]),
R
catch
Class:Error:Stacktrace ->
rabbit_log:warning("Failed to clear topic permissions on exchange '~ts' for user '~ts' in virtual host '~ts': ~tp",
?LOG_WARNING("Failed to clear topic permissions on exchange '~ts' for user '~ts' in virtual host '~ts': ~tp",
[Exchange, Username, VirtualHost, Error]),
erlang:raise(Class, Error, Stacktrace)
end.

View File

@ -7,6 +7,9 @@
-module(rabbit_autoheal).
-include_lib("kernel/include/logger.hrl").
-export([init/0, enabled/0, maybe_start/1, rabbit_down/2, node_down/2,
handle_msg/3, process_down/2]).
@ -117,7 +120,7 @@ init() ->
ok = application:unset_env(rabbit, ?AUTOHEAL_STATE_AFTER_RESTART),
case State of
{leader_waiting, Winner, _} ->
rabbit_log:info(
?LOG_INFO(
"Autoheal: in progress, requesting report from ~tp", [Winner]),
_ = send(Winner, report_autoheal_status),
ok;
@ -130,7 +133,7 @@ maybe_start(not_healing) ->
case enabled() of
true -> Leader = leader(),
_ = send(Leader, {request_start, node()}),
rabbit_log:info("Autoheal request sent to ~tp", [Leader]),
?LOG_INFO("Autoheal request sent to ~tp", [Leader]),
not_healing;
false -> not_healing
end;
@ -151,7 +154,7 @@ leader() ->
%% This is the winner receiving its last notification that a node has
%% stopped - all nodes can now start again
rabbit_down(Node, {winner_waiting, [Node], Notify}) ->
rabbit_log:info("Autoheal: final node has stopped, starting...",[]),
?LOG_INFO("Autoheal: final node has stopped, starting...",[]),
winner_finish(Notify);
rabbit_down(Node, {winner_waiting, WaitFor, Notify}) ->
@ -174,24 +177,24 @@ node_down(Node, {winner_waiting, _, Notify}) ->
node_down(Node, {leader_waiting, Node, _Notify}) ->
%% The winner went down, we don't know what to do so we simply abort.
rabbit_log:info("Autoheal: aborting - winner ~tp went down", [Node]),
?LOG_INFO("Autoheal: aborting - winner ~tp went down", [Node]),
not_healing;
node_down(Node, {leader_waiting, _, _} = St) ->
%% If it is a partial partition, the winner might continue with the
%% healing process. If it is a full partition, the winner will also
%% see it and abort. Let's wait for it.
rabbit_log:info("Autoheal: ~tp went down, waiting for winner decision ", [Node]),
?LOG_INFO("Autoheal: ~tp went down, waiting for winner decision ", [Node]),
St;
node_down(Node, _State) ->
rabbit_log:info("Autoheal: aborting - ~tp went down", [Node]),
?LOG_INFO("Autoheal: aborting - ~tp went down", [Node]),
not_healing.
%% If the process that has to restart the node crashes for an unexpected reason,
%% we go back to a not healing state so the node is able to recover.
process_down({'EXIT', Pid, Reason}, {restarting, Pid}) when Reason =/= normal ->
rabbit_log:info("Autoheal: aborting - the process responsible for restarting the "
?LOG_INFO("Autoheal: aborting - the process responsible for restarting the "
"node terminated with reason: ~tp", [Reason]),
not_healing;
@ -204,14 +207,14 @@ handle_msg({request_start, _Node}, not_healing, []) ->
not_healing;
handle_msg({request_start, Node},
not_healing, Partitions) ->
rabbit_log:info("Autoheal request received from ~tp", [Node]),
?LOG_INFO("Autoheal request received from ~tp", [Node]),
case check_other_nodes(Partitions) of
{error, E} ->
rabbit_log:info("Autoheal request denied: ~ts", [fmt_error(E)]),
?LOG_INFO("Autoheal request denied: ~ts", [fmt_error(E)]),
not_healing;
{ok, AllPartitions} ->
{Winner, Losers} = make_decision(AllPartitions),
rabbit_log:info("Autoheal decision~n"
?LOG_INFO("Autoheal decision~n"
" * Partitions: ~tp~n"
" * Winner: ~tp~n"
" * Losers: ~tp",
@ -226,13 +229,13 @@ handle_msg({request_start, Node},
handle_msg({request_start, Node},
State, _Partitions) ->
rabbit_log:info("Autoheal request received from ~tp when healing; "
?LOG_INFO("Autoheal request received from ~tp when healing; "
"ignoring", [Node]),
State;
handle_msg({become_winner, Losers},
not_healing, _Partitions) ->
rabbit_log:info("Autoheal: I am the winner, waiting for ~tp to stop",
?LOG_INFO("Autoheal: I am the winner, waiting for ~tp to stop",
[Losers]),
stop_partition(Losers);
@ -240,7 +243,7 @@ handle_msg({become_winner, Losers},
{winner_waiting, _, Losers}, _Partitions) ->
%% The leader has aborted the healing, might have seen us down but
%% we didn't see the same. Let's try again as it is the same partition.
rabbit_log:info("Autoheal: I am the winner and received a duplicated "
?LOG_INFO("Autoheal: I am the winner and received a duplicated "
"request, waiting again for ~tp to stop", [Losers]),
stop_partition(Losers);
@ -248,7 +251,7 @@ handle_msg({become_winner, _},
{winner_waiting, _, Losers}, _Partitions) ->
%% Something has happened to the leader, it might have seen us down but we
%% are still alive. Partitions have changed, cannot continue.
rabbit_log:info("Autoheal: I am the winner and received another healing "
?LOG_INFO("Autoheal: I am the winner and received another healing "
"request, partitions have changed to ~tp. Aborting ", [Losers]),
winner_finish(Losers),
not_healing;
@ -272,7 +275,7 @@ handle_msg({winner_is, Winner}, State = {winner_waiting, _OutstandingStops, _Not
handle_msg(Request, {restarting, Pid} = St, _Partitions) ->
%% ignore, we can contribute no further
rabbit_log:info("Autoheal: Received the request ~tp while waiting for ~tp "
?LOG_INFO("Autoheal: Received the request ~tp while waiting for ~tp "
"to restart the node. Ignoring it ", [Request, Pid]),
St;
@ -295,21 +298,21 @@ handle_msg({autoheal_finished, Winner},
%% The winner is finished with the autoheal process and notified us
%% (the leader). We can transition to the "not_healing" state and
%% accept new requests.
rabbit_log:info("Autoheal finished according to winner ~tp", [Winner]),
?LOG_INFO("Autoheal finished according to winner ~tp", [Winner]),
not_healing;
handle_msg({autoheal_finished, Winner}, not_healing, _Partitions)
when Winner =:= node() ->
%% We are the leader and the winner. The state already transitioned
%% to "not_healing" at the end of the autoheal process.
rabbit_log:info("Autoheal finished according to winner ~tp", [node()]),
?LOG_INFO("Autoheal finished according to winner ~tp", [node()]),
not_healing;
handle_msg({autoheal_finished, Winner}, not_healing, _Partitions) ->
%% We might have seen the winner down during a partial partition and
%% transitioned to not_healing. However, the winner was still able
%% to finish. Let it pass.
rabbit_log:info("Autoheal finished according to winner ~tp."
?LOG_INFO("Autoheal finished according to winner ~tp."
" Unexpected, I might have previously seen the winner down", [Winner]),
not_healing.
@ -318,7 +321,7 @@ handle_msg({autoheal_finished, Winner}, not_healing, _Partitions) ->
send(Node, Msg) -> {?SERVER, Node} ! {autoheal_msg, Msg}.
abort(Down, Notify) ->
rabbit_log:info("Autoheal: aborting - ~tp down", [Down]),
?LOG_INFO("Autoheal: aborting - ~tp down", [Down]),
%% Make sure any nodes waiting for us start - it won't necessarily
%% heal the partition but at least they won't get stuck.
%% If we are executing this, we are not stopping. Thus, don't wait
@ -362,7 +365,7 @@ wait_for_supervisors(Monitors) ->
after
60000 ->
AliveLosers = [Node || {_, Node} <- pmon:monitored(Monitors)],
rabbit_log:info("Autoheal: mnesia in nodes ~tp is still up, sending "
?LOG_INFO("Autoheal: mnesia in nodes ~tp is still up, sending "
"winner notification again to these ", [AliveLosers]),
_ = [send(L, {winner_is, node()}) || L <- AliveLosers],
wait_for_mnesia_shutdown(AliveLosers)
@ -370,7 +373,7 @@ wait_for_supervisors(Monitors) ->
end.
restart_loser(State, Winner) ->
rabbit_log:warning("Autoheal: we were selected to restart; winner is ~tp", [Winner]),
?LOG_WARNING("Autoheal: we were selected to restart; winner is ~tp", [Winner]),
NextStateTimeout = application:get_env(rabbit, autoheal_state_transition_timeout, 60000),
rabbit_node_monitor:run_outside_applications(
fun () ->
@ -382,7 +385,7 @@ restart_loser(State, Winner) ->
autoheal_safe_to_start ->
State
after NextStateTimeout ->
rabbit_log:warning(
?LOG_WARNING(
"Autoheal: timed out waiting for a safe-to-start message from the winner (~tp); will retry",
[Winner]),
not_healing

View File

@ -8,6 +8,7 @@
-module(rabbit_binding).
-include_lib("rabbit_common/include/rabbit.hrl").
-include("amqqueue.hrl").
-include_lib("kernel/include/logger.hrl").
-export([recover/0, recover/2, exists/1, add/2, add/3, remove/2, remove/3]).
-export([list/1, list_for_source/1, list_for_destination/1,
@ -117,7 +118,7 @@ recover_semi_durable_route(Gatherer, Binding, Src, Dst, ToRecover, Fun) ->
gatherer:finish(Gatherer)
end);
{error, not_found}=Error ->
rabbit_log:warning(
?LOG_WARNING(
"expected exchange ~tp to exist during recovery, "
"error: ~tp", [Src, Error]),
ok

View File

@ -20,7 +20,7 @@ run_boot_steps() ->
run_boot_steps(Apps) ->
[begin
rabbit_log:info("Running boot step ~ts defined by app ~ts", [Step, App]),
?LOG_INFO("Running boot step ~ts defined by app ~ts", [Step, App]),
ok = run_step(Attrs, mfa)
end || {App, Step, Attrs} <- find_steps(Apps)],
ok.
@ -47,12 +47,12 @@ find_steps(Apps) ->
[Step || {App, _, _} = Step <- All, lists:member(App, Apps)].
run_step(Attributes, AttributeName) ->
[begin
rabbit_log:debug("Applying MFA: M = ~ts, F = ~ts, A = ~tp",
_ = [begin
?LOG_DEBUG("Applying MFA: M = ~ts, F = ~ts, A = ~tp",
[M, F, A]),
case apply(M,F,A) of
ok ->
rabbit_log:debug("Finished MFA: M = ~ts, F = ~ts, A = ~tp",
?LOG_DEBUG("Finished MFA: M = ~ts, F = ~ts, A = ~tp",
[M, F, A]);
{error, Reason} -> exit({error, Reason})
end

View File

@ -41,6 +41,8 @@
-include_lib("rabbit_common/include/rabbit_framing.hrl").
-include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("rabbit_common/include/rabbit_misc.hrl").
-include_lib("rabbit_common/include/logging.hrl").
-include_lib("kernel/include/logger.hrl").
-include("amqqueue.hrl").
@ -360,7 +362,7 @@ info(Pid) ->
end
catch
exit:{timeout, _} ->
rabbit_log:error("Timed out getting channel ~tp info", [Pid]),
?LOG_ERROR("Timed out getting channel ~tp info", [Pid]),
throw(timeout)
end.
@ -375,7 +377,7 @@ info(Pid, Items) ->
end
catch
exit:{timeout, _} ->
rabbit_log:error("Timed out getting channel ~tp info", [Pid]),
?LOG_ERROR("Timed out getting channel ~tp info", [Pid]),
throw(timeout)
end.
@ -411,9 +413,9 @@ refresh_config_local() ->
try
gen_server2:call(C, refresh_config, infinity)
catch _:Reason ->
rabbit_log:error("Failed to refresh channel config "
"for channel ~tp. Reason ~tp",
[C, Reason])
?LOG_ERROR("Failed to refresh channel config "
"for channel ~tp. Reason ~tp",
[C, Reason])
end
end,
list_local()),
@ -425,9 +427,9 @@ refresh_interceptors() ->
try
gen_server2:call(C, refresh_interceptors, ?REFRESH_TIMEOUT)
catch _:Reason ->
rabbit_log:error("Failed to refresh channel interceptors "
"for channel ~tp. Reason ~tp",
[C, Reason])
?LOG_ERROR("Failed to refresh channel interceptors "
"for channel ~tp. Reason ~tp",
[C, Reason])
end
end,
list_local()),
@ -465,6 +467,7 @@ init([Channel, ReaderPid, WriterPid, ConnPid, ConnName, Protocol, User, VHost,
Capabilities, CollectorPid, LimiterPid, AmqpParams]) ->
process_flag(trap_exit, true),
rabbit_process_flag:adjust_for_message_handling_proc(),
logger:set_process_metadata(#{domain => ?RMQLOG_DOMAIN_CHAN}),
?LG_PROCESS_TYPE(channel),
?store_proc_name({ConnName, Channel}),
@ -643,7 +646,7 @@ handle_cast(terminate, State = #ch{cfg = #conf{writer_pid = WriterPid}}) ->
ok = rabbit_writer:flush(WriterPid)
catch
_Class:Reason ->
rabbit_log:debug("Failed to flush pending writes on a terminating connection, reason: ~tp", [Reason])
?LOG_DEBUG("Failed to flush pending writes on a terminating connection, reason: ~tp", [Reason])
end,
{stop, normal, State};
@ -749,8 +752,8 @@ handle_info({'EXIT', _Pid, Reason}, State) ->
handle_info({{Ref, Node}, LateAnswer},
State = #ch{cfg = #conf{channel = Channel}})
when is_reference(Ref) ->
rabbit_log_channel:warning("Channel ~tp ignoring late answer ~tp from ~tp",
[Channel, LateAnswer, Node]),
?LOG_WARNING("Channel ~tp ignoring late answer ~tp from ~tp",
[Channel, LateAnswer, Node]),
noreply(State);
handle_info(tick, State0 = #ch{queue_states = QueueStates0}) ->
@ -805,7 +808,7 @@ terminate(_Reason,
case rabbit_confirms:size(State#ch.unconfirmed) of
0 -> ok;
NumConfirms ->
rabbit_log:warning("Channel is stopping with ~b pending publisher confirms",
?LOG_WARNING("Channel is stopping with ~b pending publisher confirms",
[NumConfirms])
end.
@ -866,7 +869,7 @@ handle_exception(Reason, State = #ch{cfg = #conf{protocol = Protocol,
{_Result, State1} = notify_queues(State),
case rabbit_binary_generator:map_exception(Channel, Reason, Protocol) of
{Channel, CloseMethod} ->
rabbit_log_channel:error(
?LOG_ERROR(
"Channel error on connection ~tp (~ts, vhost: '~ts',"
" user: '~ts'), channel ~tp:~n~ts",
[ConnPid, ConnName, VHost, User#user.username,
@ -2719,13 +2722,13 @@ evaluate_consumer_timeout1(PA = #pending_ack{delivered_at = Time},
handle_consumer_timed_out(Timeout,#pending_ack{delivery_tag = DeliveryTag, tag = ConsumerTag, queue = QName},
State = #ch{cfg = #conf{channel = Channel}}) ->
rabbit_log_channel:warning("Consumer '~ts' on channel ~w and ~ts has timed out "
"waiting for a consumer acknowledgement of a delivery with delivery tag = ~b. Timeout used: ~tp ms. "
"This timeout value can be configured, see consumers doc guide to learn more",
[ConsumerTag,
Channel,
rabbit_misc:rs(QName),
DeliveryTag, Timeout]),
?LOG_WARNING("Consumer '~ts' on channel ~w and ~ts has timed out "
"waiting for a consumer acknowledgement of a delivery with delivery tag = ~b. Timeout used: ~tp ms. "
"This timeout value can be configured, see consumers doc guide to learn more",
[ConsumerTag,
Channel,
rabbit_misc:rs(QName),
DeliveryTag, Timeout]),
Ex = rabbit_misc:amqp_error(precondition_failed,
"delivery acknowledgement on channel ~w timed out. "
"Timeout value used: ~tp ms. "

View File

@ -34,6 +34,9 @@
-export([count_local_tracked_items_of_user/1]).
-include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
-include_lib("rabbit_common/include/logging.hrl").
-import(rabbit_misc, [pget/2]).
@ -65,11 +68,11 @@ handle_cast({channel_created, Details}) ->
error:{no_exists, _} ->
Msg = "Could not register channel ~tp for tracking, "
"its table is not ready yet or the channel terminated prematurely",
rabbit_log_connection:warning(Msg, [TrackedChId]),
?LOG_WARNING(Msg, [TrackedChId], #{domain => ?RMQLOG_DOMAIN_CHAN}),
ok;
error:Err ->
Msg = "Could not register channel ~tp for tracking: ~tp",
rabbit_log_connection:warning(Msg, [TrackedChId, Err]),
?LOG_WARNING(Msg, [TrackedChId, Err], #{domain => ?RMQLOG_DOMAIN_CHAN}),
ok
end;
_OtherNode ->
@ -88,9 +91,10 @@ handle_cast({connection_closed, ConnDetails}) ->
[] ->
ok;
TrackedChs ->
rabbit_log_channel:debug(
?LOG_DEBUG(
"Closing ~b channel(s) because connection '~ts' has been closed",
[length(TrackedChs), pget(name, ConnDetails)]),
[length(TrackedChs), pget(name, ConnDetails)],
#{domain => ?RMQLOG_DOMAIN_CHAN}),
%% Shutting down channels will take care of unregistering the
%% corresponding tracking.
shutdown_tracked_items(TrackedChs, undefined),
@ -214,14 +218,14 @@ ensure_tracked_tables_for_this_node() ->
%% Create tables
ensure_tracked_channels_table_for_this_node() ->
rabbit_log:info("Setting up a table for channel tracking on this node: ~tp",
[?TRACKED_CHANNEL_TABLE]),
?LOG_INFO("Setting up a table for channel tracking on this node: ~tp",
[?TRACKED_CHANNEL_TABLE]),
ets:new(?TRACKED_CHANNEL_TABLE, [named_table, public, {write_concurrency, true},
{keypos, #tracked_channel.pid}]).
ensure_per_user_tracked_channels_table_for_this_node() ->
rabbit_log:info("Setting up a table for channel tracking on this node: ~tp",
[?TRACKED_CHANNEL_TABLE_PER_USER]),
?LOG_INFO("Setting up a table for channel tracking on this node: ~tp",
[?TRACKED_CHANNEL_TABLE_PER_USER]),
ets:new(?TRACKED_CHANNEL_TABLE_PER_USER, [named_table, public, {write_concurrency, true}]).
get_tracked_channels_by_connection_pid(ConnPid) ->

View File

@ -4,6 +4,7 @@
-include("amqqueue.hrl").
-include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
%% TODO possible to use sets / maps instead of lists?
%% Check performance with QoS 1 and 1 million target queues.
@ -177,13 +178,13 @@ delete(Q0, IfUnused, IfEmpty, ActingUser) when ?amqqueue_is_classic(Q0) ->
#resource{name = Name, virtual_host = Vhost} = QName,
case IfEmpty of
true ->
rabbit_log:error("Queue ~ts in vhost ~ts is down. "
"The queue may be non-empty. "
"Refusing to force-delete.",
[Name, Vhost]),
?LOG_ERROR("Queue ~ts in vhost ~ts is down. "
"The queue may be non-empty. "
"Refusing to force-delete.",
[Name, Vhost]),
{error, not_empty};
false ->
rabbit_log:warning("Queue ~ts in vhost ~ts is down. "
?LOG_WARNING("Queue ~ts in vhost ~ts is down. "
"Forcing queue deletion.",
[Name, Vhost]),
case delete_crashed_internal(Q, ActingUser) of
@ -219,7 +220,7 @@ recover(VHost, Queues) ->
FailedQs = find_missing_queues(Queues,RecoveredQs),
{RecoveredQs, FailedQs};
{error, Reason} ->
rabbit_log:error("Failed to start queue supervisor for vhost '~ts': ~ts", [VHost, Reason]),
?LOG_ERROR("Failed to start queue supervisor for vhost '~ts': ~ts", [VHost, Reason]),
throw({error, Reason})
end.
@ -588,8 +589,8 @@ recover_durable_queues(QueuesAndRecoveryTerms) ->
gen_server2:mcall(
[{rabbit_amqqueue_sup_sup:start_queue_process(node(), Q),
{init, {self(), Terms}}} || {Q, Terms} <- QueuesAndRecoveryTerms]),
[rabbit_log:error("Queue ~tp failed to initialise: ~tp",
[Pid, Error]) || {Pid, Error} <- Failures],
_ = [?LOG_ERROR("Queue ~tp failed to initialise: ~tp",
[Pid, Error]) || {Pid, Error} <- Failures],
[Q || {_, {new, Q}} <- Results].
capabilities() ->

View File

@ -42,9 +42,10 @@
-define(ENTRY_SIZE, 32). %% bytes
-include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
%% Set to true to get an awful lot of debug logs.
-if(false).
-define(DEBUG(X,Y), logger:debug("~0p: " ++ X, [?FUNCTION_NAME|Y])).
-define(DEBUG(X,Y), ?LOG_DEBUG("~0p: " ++ X, [?FUNCTION_NAME|Y])).
-else.
-define(DEBUG(X,Y), _ = X, _ = Y, ok).
-endif.
@ -255,7 +256,7 @@ recover(#resource{ virtual_host = VHost, name = QueueName } = Name, Terms,
State = recover_segments(State0, Terms, IsMsgStoreClean,
ContainsCheckFun, OnSyncFun, OnSyncMsgFun,
CountersRef, Context),
rabbit_log:warning("Queue ~ts in vhost ~ts dropped ~b/~b/~b persistent messages "
?LOG_WARNING("Queue ~ts in vhost ~ts dropped ~b/~b/~b persistent messages "
"and ~b transient messages after unclean shutdown",
[QueueName, VHost,
counters:get(CountersRef, ?RECOVER_DROPPED_PERSISTENT_PER_VHOST),
@ -329,7 +330,7 @@ recover_segments(State0, ContainsCheckFun, StoreState0, CountersRef, [Segment|Ta
%% File was either empty or the header was invalid.
%% We cannot recover this file.
_ ->
rabbit_log:warning("Deleting invalid v2 segment file ~ts (file has invalid header)",
?LOG_WARNING("Deleting invalid v2 segment file ~ts (file has invalid header)",
[SegmentFile]),
ok = file:close(Fd),
_ = prim_file:delete(SegmentFile),
@ -436,7 +437,7 @@ recover_segment(State, ContainsCheckFun, StoreState0, CountersRef, Fd,
recover_index_v1_clean(State0 = #qi{ queue_name = Name }, Terms, IsMsgStoreClean,
ContainsCheckFun, OnSyncFun, OnSyncMsgFun) ->
#resource{virtual_host = VHost, name = QName} = Name,
rabbit_log:info("Converting queue ~ts in vhost ~ts from v1 to v2 after clean shutdown", [QName, VHost]),
?LOG_INFO("Converting queue ~ts in vhost ~ts from v1 to v2 after clean shutdown", [QName, VHost]),
{_, _, V1State} = rabbit_queue_index:recover(Name, Terms, IsMsgStoreClean,
ContainsCheckFun, OnSyncFun, OnSyncMsgFun,
convert),
@ -445,15 +446,15 @@ recover_index_v1_clean(State0 = #qi{ queue_name = Name }, Terms, IsMsgStoreClean
%% share code with dirty recovery.
CountersRef = counters:new(?RECOVER_COUNTER_SIZE, []),
State = recover_index_v1_common(State0, V1State, CountersRef),
rabbit_log:info("Queue ~ts in vhost ~ts converted ~b total messages from v1 to v2",
[QName, VHost, counters:get(CountersRef, ?RECOVER_COUNT)]),
?LOG_INFO("Queue ~ts in vhost ~ts converted ~b total messages from v1 to v2",
[QName, VHost, counters:get(CountersRef, ?RECOVER_COUNT)]),
State.
recover_index_v1_dirty(State0 = #qi{ queue_name = Name }, Terms, IsMsgStoreClean,
ContainsCheckFun, OnSyncFun, OnSyncMsgFun,
CountersRef) ->
#resource{virtual_host = VHost, name = QName} = Name,
rabbit_log:info("Converting queue ~ts in vhost ~ts from v1 to v2 after unclean shutdown", [QName, VHost]),
?LOG_INFO("Converting queue ~ts in vhost ~ts from v1 to v2 after unclean shutdown", [QName, VHost]),
%% We ignore the count and bytes returned here because we cannot trust
%% rabbit_queue_index: it has a bug that may lead to more bytes being
%% returned than it really has.
@ -464,8 +465,8 @@ recover_index_v1_dirty(State0 = #qi{ queue_name = Name }, Terms, IsMsgStoreClean
ContainsCheckFun, OnSyncFun, OnSyncMsgFun,
convert),
State = recover_index_v1_common(State0, V1State, CountersRef),
rabbit_log:info("Queue ~ts in vhost ~ts converted ~b total messages from v1 to v2",
[QName, VHost, counters:get(CountersRef, ?RECOVER_COUNT)]),
?LOG_INFO("Queue ~ts in vhost ~ts converted ~b total messages from v1 to v2",
[QName, VHost, counters:get(CountersRef, ?RECOVER_COUNT)]),
State.
%% At this point all messages are persistent because transient messages

View File

@ -56,10 +56,11 @@
-define(ENTRY_HEADER_SIZE, 8). %% bytes
-include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
%% Set to true to get an awful lot of debug logs.
-if(false).
-define(DEBUG(X,Y), logger:debug("~0p: " ++ X, [?FUNCTION_NAME|Y])).
-define(DEBUG(X,Y), ?LOG_DEBUG("~0p: " ++ X, [?FUNCTION_NAME|Y])).
-else.
-define(DEBUG(X,Y), _ = X, _ = Y, ok).
-endif.
@ -317,8 +318,8 @@ read_from_disk(SeqId, {?MODULE, Offset, Size}, State0) ->
CRC32Expected = <<CRC32:16>>,
ok
catch C:E:S ->
rabbit_log:error("Per-queue store CRC32 check failed in ~ts seq id ~b offset ~b size ~b",
[segment_file(Segment, State), SeqId, Offset, Size]),
?LOG_ERROR("Per-queue store CRC32 check failed in ~ts seq id ~b offset ~b size ~b",
[segment_file(Segment, State), SeqId, Offset, Size]),
erlang:raise(C, E, S)
end
end,
@ -415,8 +416,8 @@ parse_many_from_disk([<<Size:32/unsigned, _:7, UseCRC32:1, CRC32Expected:16/bits
CRC32Expected = <<CRC32:16>>,
ok
catch C:E:S ->
rabbit_log:error("Per-queue store CRC32 check failed in ~ts",
[segment_file(Segment, State)]),
?LOG_ERROR("Per-queue store CRC32 check failed in ~ts",
[segment_file(Segment, State)]),
erlang:raise(C, E, S)
end
end,

View File

@ -41,6 +41,7 @@
count_local_tracked_items_of_user/1]).
-include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
-import(rabbit_misc, [pget/2]).
@ -77,11 +78,11 @@ handle_cast({connection_created, Details}) ->
error:{no_exists, _} ->
Msg = "Could not register connection ~tp for tracking, "
"its table is not ready yet or the connection terminated prematurely",
rabbit_log_connection:warning(Msg, [ConnId]),
?LOG_WARNING(Msg, [ConnId]),
ok;
error:Err ->
Msg = "Could not register connection ~tp for tracking: ~tp",
rabbit_log_connection:warning(Msg, [ConnId, Err]),
?LOG_WARNING(Msg, [ConnId, Err]),
ok
end;
_OtherNode ->
@ -106,7 +107,7 @@ handle_cast({vhost_deleted, Details}) ->
%% Schedule vhost entry deletion, allowing time for connections to close
_ = timer:apply_after(?TRACKING_EXECUTION_TIMEOUT, ?MODULE,
delete_tracked_connection_vhost_entry, [VHost]),
rabbit_log_connection:info("Closing all connections in vhost '~ts' because it's being deleted", [VHost]),
?LOG_INFO("Closing all connections in vhost '~ts' because it's being deleted", [VHost]),
shutdown_tracked_items(
list(VHost),
rabbit_misc:format("vhost '~ts' is deleted", [VHost]));
@ -116,9 +117,9 @@ handle_cast({vhost_deleted, Details}) ->
handle_cast({vhost_down, Details}) ->
VHost = pget(name, Details),
Node = pget(node, Details),
rabbit_log_connection:info("Closing all connections in vhost '~ts' on node '~ts'"
" because the vhost is stopping",
[VHost, Node]),
?LOG_INFO("Closing all connections in vhost '~ts' on node '~ts'"
" because the vhost is stopping",
[VHost, Node]),
shutdown_tracked_items(
list_on_node(Node, VHost),
rabbit_misc:format("vhost '~ts' is down", [VHost]));
@ -127,7 +128,7 @@ handle_cast({user_deleted, Details}) ->
%% Schedule user entry deletion, allowing time for connections to close
_ = timer:apply_after(?TRACKING_EXECUTION_TIMEOUT, ?MODULE,
delete_tracked_connection_user_entry, [Username]),
rabbit_log_connection:info("Closing all connections for user '~ts' because the user is being deleted", [Username]),
?LOG_INFO("Closing all connections for user '~ts' because the user is being deleted", [Username]),
shutdown_tracked_items(
list_of_user(Username),
rabbit_misc:format("user '~ts' is deleted", [Username])).
@ -189,18 +190,18 @@ ensure_tracked_tables_for_this_node() ->
ensure_tracked_connections_table_for_this_node() ->
_ = ets:new(?TRACKED_CONNECTION_TABLE, [named_table, public, {write_concurrency, true},
{keypos, #tracked_connection.id}]),
rabbit_log:info("Setting up a table for connection tracking on this node: ~tp",
[?TRACKED_CONNECTION_TABLE]).
?LOG_INFO("Setting up a table for connection tracking on this node: ~tp",
[?TRACKED_CONNECTION_TABLE]).
ensure_per_vhost_tracked_connections_table_for_this_node() ->
rabbit_log:info("Setting up a table for per-vhost connection counting on this node: ~tp",
[?TRACKED_CONNECTION_TABLE_PER_VHOST]),
?LOG_INFO("Setting up a table for per-vhost connection counting on this node: ~tp",
[?TRACKED_CONNECTION_TABLE_PER_VHOST]),
ets:new(?TRACKED_CONNECTION_TABLE_PER_VHOST, [named_table, public, {write_concurrency, true}]).
ensure_per_user_tracked_connections_table_for_this_node() ->
_ = ets:new(?TRACKED_CONNECTION_TABLE_PER_USER, [named_table, public, {write_concurrency, true}]),
rabbit_log:info("Setting up a table for per-user connection counting on this node: ~tp",
[?TRACKED_CONNECTION_TABLE_PER_USER]).
?LOG_INFO("Setting up a table for per-user connection counting on this node: ~tp",
[?TRACKED_CONNECTION_TABLE_PER_USER]).
-spec tracked_connection_table_name_for(node()) -> atom().
@ -420,7 +421,7 @@ close_connection(#tracked_connection{pid = Pid, type = network}, Message) ->
ok;
_:Err ->
%% ignore, don't terminate
rabbit_log:warning("Could not close connection ~tp: ~tp", [Pid, Err]),
?LOG_WARNING("Could not close connection ~tp: ~tp", [Pid, Err]),
ok
end;
close_connection(#tracked_connection{pid = Pid, type = direct}, Message) ->

View File

@ -22,6 +22,7 @@
-export([close_connections/3]).
-include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("rabbit_common/include/logging.hrl").
-rabbit_boot_step({?MODULE,
[{description, "connection tracking event handler"},
@ -37,6 +38,7 @@
%%
init([]) ->
logger:set_process_metadata(#{domain => ?RMQLOG_DOMAIN_CONN}),
{ok, []}.
handle_event(#event{type = connection_created, props = Details}, State) ->

View File

@ -223,7 +223,7 @@ join(RemoteNode, NodeType)
%% as RemoteNode thinks this node is already in the cluster.
%% Attempt to leave the RemoteNode cluster, the discovery cluster,
%% and simply retry the operation.
rabbit_log:info("Mnesia: node ~tp thinks it's clustered "
?LOG_INFO("Mnesia: node ~tp thinks it's clustered "
"with node ~tp, but ~tp disagrees. ~tp will ask "
"to leave the cluster and try again.",
[RemoteNode, node(), node(), node()]),

View File

@ -11,6 +11,7 @@
-include("mirrored_supervisor.hrl").
-include("include/rabbit_khepri.hrl").
-include_lib("kernel/include/logger.hrl").
-export([
create_tables/0,
@ -96,7 +97,7 @@ create_or_update_in_mnesia(Group, Overall, Delegate, ChildSpec, Id) ->
rabbit_mnesia:execute_mnesia_transaction(
fun() ->
ReadResult = mnesia:wread({?TABLE, {Group, Id}}),
rabbit_log:debug("Mirrored supervisor: check_start table ~ts read for key ~tp returned ~tp",
?LOG_DEBUG("Mirrored supervisor: check_start table ~ts read for key ~tp returned ~tp",
[?TABLE, {Group, Id}, ReadResult]),
case ReadResult of
[] -> _ = write_in_mnesia(Group, Overall, ChildSpec, Id),
@ -105,12 +106,12 @@ create_or_update_in_mnesia(Group, Overall, Delegate, ChildSpec, Id) ->
mirroring_pid = Pid} = S,
case Overall of
Pid ->
rabbit_log:debug("Mirrored supervisor: overall matched mirrored pid ~tp", [Pid]),
?LOG_DEBUG("Mirrored supervisor: overall matched mirrored pid ~tp", [Pid]),
Delegate;
_ ->
rabbit_log:debug("Mirrored supervisor: overall ~tp did not match mirrored pid ~tp", [Overall, Pid]),
?LOG_DEBUG("Mirrored supervisor: overall ~tp did not match mirrored pid ~tp", [Overall, Pid]),
Sup = mirrored_supervisor:supervisor(Pid),
rabbit_log:debug("Mirrored supervisor: supervisor(~tp) returned ~tp", [Pid, Sup]),
?LOG_DEBUG("Mirrored supervisor: supervisor(~tp) returned ~tp", [Pid, Sup]),
case Sup of
dead ->
_ = write_in_mnesia(Group, Overall, ChildSpec, Id),

View File

@ -14,6 +14,7 @@
-include("amqqueue.hrl").
-include("include/rabbit_khepri.hrl").
-include_lib("kernel/include/logger.hrl").
-export([
get/1,
@ -341,7 +342,7 @@ count(VHostName) ->
try
list_for_count(VHostName)
catch _:Err ->
rabbit_log:error("Failed to fetch number of queues in vhost ~p:~n~p",
?LOG_ERROR("Failed to fetch number of queues in vhost ~p:~n~p",
[VHostName, Err]),
0
end.

View File

@ -13,6 +13,7 @@
-include("include/rabbit_khepri.hrl").
-include("vhost.hrl").
-include_lib("kernel/include/logger.hrl").
-export([create_or_get/3,
merge_metadata/2,
@ -102,7 +103,7 @@ create_or_get_in_mnesia_tx(VHostName, VHost) ->
create_or_get_in_khepri(VHostName, VHost) ->
Path = khepri_vhost_path(VHostName),
rabbit_log:debug("Inserting a virtual host record ~tp", [VHost]),
?LOG_DEBUG("Inserting a virtual host record ~tp", [VHost]),
case rabbit_khepri:create(Path, VHost) of
ok ->
{new, VHost};
@ -137,7 +138,7 @@ merge_metadata(VHostName, Metadata)
when is_binary(VHostName) andalso is_map(Metadata) ->
case do_merge_metadata(VHostName, Metadata) of
{ok, VHost} when ?is_vhost(VHost) ->
rabbit_log:debug("Updated a virtual host record ~tp", [VHost]),
?LOG_DEBUG("Updated a virtual host record ~tp", [VHost]),
{ok, VHost};
{error, _} = Error ->
Error
@ -169,7 +170,7 @@ merge_metadata_in_khepri(VHostName, Metadata) ->
case Ret1 of
{ok, #{Path := #{data := VHost0, payload_version := DVersion}}} ->
VHost = vhost:merge_metadata(VHost0, Metadata),
rabbit_log:debug("Updating a virtual host record ~p", [VHost]),
?LOG_DEBUG("Updating a virtual host record ~p", [VHost]),
Path1 = khepri_path:combine_with_conditions(
Path, [#if_payload_version{version = DVersion}]),
Ret2 = rabbit_khepri:put(Path1, VHost),
@ -240,7 +241,7 @@ enable_protection_from_deletion(VHostName) ->
MetadataPatch = #{
protected_from_deletion => true
},
rabbit_log:info("Enabling deletion protection for virtual host '~ts'", [VHostName]),
?LOG_INFO("Enabling deletion protection for virtual host '~ts'", [VHostName]),
merge_metadata(VHostName, MetadataPatch).
-spec disable_protection_from_deletion(VHostName) -> Ret when
@ -253,7 +254,7 @@ disable_protection_from_deletion(VHostName) ->
MetadataPatch = #{
protected_from_deletion => false
},
rabbit_log:info("Disabling deletion protection for virtual host '~ts'", [VHostName]),
?LOG_INFO("Disabling deletion protection for virtual host '~ts'", [VHostName]),
merge_metadata(VHostName, MetadataPatch).
%% -------------------------------------------------------------------

View File

@ -7,6 +7,9 @@
-module(rabbit_db_vhost_defaults).
-include_lib("kernel/include/logger.hrl").
-export([apply/2]).
-export([list_limits/1, list_operator_policies/1, list_users/1]).
@ -36,20 +39,20 @@ apply(VHost, ActingUser) ->
ok;
L ->
ok = rabbit_vhost_limit:set(VHost, L, ActingUser),
rabbit_log:info("Applied default limits to vhost '~tp': ~tp", [VHost, L])
?LOG_INFO("Applied default limits to vhost '~tp': ~tp", [VHost, L])
end,
lists:foreach(
fun(P) ->
ok = rabbit_policy:set_op(VHost, P#seeding_policy.name, P#seeding_policy.queue_pattern, P#seeding_policy.definition,
undefined, undefined, ActingUser),
rabbit_log:info("Applied default operator policy to vhost '~tp': ~tp", [VHost, P])
?LOG_INFO("Applied default operator policy to vhost '~tp': ~tp", [VHost, P])
end,
list_operator_policies(VHost)
),
lists:foreach(
fun(U) ->
ok = add_user(VHost, U, ActingUser),
rabbit_log:info("Added default user to vhost '~tp': ~tp", [VHost, maps:remove(password, U)])
?LOG_INFO("Added default user to vhost '~tp': ~tp", [VHost, maps:remove(password, U)])
end,
list_users(VHost)
),

View File

@ -12,6 +12,7 @@
detect_cycles/3]).
-include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
%%----------------------------------------------------------------------------
@ -74,7 +75,7 @@ log_cycle_once(Cycle) ->
true ->
ok;
undefined ->
rabbit_log:warning(
?LOG_WARNING(
"Message dropped because the following list of queues (ordered by "
"death recency) contains a dead letter cycle without reason 'rejected'. "
"This list will not be logged again: ~tp",

View File

@ -30,6 +30,7 @@
%% * rabbit_definitions_hashing
-module(rabbit_definitions).
-include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
-export([boot/0]).
%% automatic import on boot
@ -177,7 +178,7 @@ validate_definitions(Body) when is_binary(Body) ->
-spec import_raw(Body :: binary() | iolist()) -> ok | {error, term()}.
import_raw(Body) ->
rabbit_log:info("Asked to import definitions. Acting user: ~ts", [?INTERNAL_USER]),
?LOG_INFO("Asked to import definitions. Acting user: ~ts", [?INTERNAL_USER]),
case decode([], Body) of
{error, E} -> {error, E};
{ok, _, Map} -> apply_defs(Map, ?INTERNAL_USER)
@ -185,7 +186,7 @@ import_raw(Body) ->
-spec import_raw(Body :: binary() | iolist(), VHost :: vhost:name()) -> ok | {error, term()}.
import_raw(Body, VHost) ->
rabbit_log:info("Asked to import definitions. Acting user: ~ts", [?INTERNAL_USER]),
?LOG_INFO("Asked to import definitions. Acting user: ~ts", [?INTERNAL_USER]),
case decode([], Body) of
{error, E} -> {error, E};
{ok, _, Map} -> apply_defs(Map, ?INTERNAL_USER, fun() -> ok end, VHost)
@ -195,7 +196,7 @@ import_raw(Body, VHost) ->
import_parsed(Body0) when is_list(Body0) ->
import_parsed(maps:from_list(Body0));
import_parsed(Body0) when is_map(Body0) ->
rabbit_log:info("Asked to import definitions. Acting user: ~ts", [?INTERNAL_USER]),
?LOG_INFO("Asked to import definitions. Acting user: ~ts", [?INTERNAL_USER]),
Body = atomise_map_keys(Body0),
apply_defs(Body, ?INTERNAL_USER).
@ -203,7 +204,7 @@ import_parsed(Body0) when is_map(Body0) ->
import_parsed(Body0, VHost) when is_list(Body0) ->
import_parsed(maps:from_list(Body0), VHost);
import_parsed(Body0, VHost) ->
rabbit_log:info("Asked to import definitions. Acting user: ~ts", [?INTERNAL_USER]),
?LOG_INFO("Asked to import definitions. Acting user: ~ts", [?INTERNAL_USER]),
Body = atomise_map_keys(Body0),
apply_defs(Body, ?INTERNAL_USER, fun() -> ok end, VHost).
@ -212,7 +213,7 @@ import_parsed(Body0, VHost) ->
import_parsed_with_hashing(Body0) when is_list(Body0) ->
import_parsed(maps:from_list(Body0));
import_parsed_with_hashing(Body0) when is_map(Body0) ->
rabbit_log:info("Asked to import definitions. Acting user: ~ts", [?INTERNAL_USER]),
?LOG_INFO("Asked to import definitions. Acting user: ~ts", [?INTERNAL_USER]),
case should_skip_if_unchanged() of
false ->
import_parsed(Body0);
@ -222,10 +223,10 @@ import_parsed_with_hashing(Body0) when is_map(Body0) ->
Algo = rabbit_definitions_hashing:hashing_algorithm(),
case rabbit_definitions_hashing:hash(Algo, Body) of
PreviousHash ->
rabbit_log:info("Submitted definition content hash matches the stored one: ~ts", [binary:part(rabbit_misc:hexify(PreviousHash), 0, 12)]),
?LOG_INFO("Submitted definition content hash matches the stored one: ~ts", [binary:part(rabbit_misc:hexify(PreviousHash), 0, 12)]),
ok;
Other ->
rabbit_log:debug("Submitted definition content hash: ~ts, stored one: ~ts", [
?LOG_DEBUG("Submitted definition content hash: ~ts, stored one: ~ts", [
binary:part(rabbit_misc:hexify(PreviousHash), 0, 10),
binary:part(rabbit_misc:hexify(Other), 0, 10)
]),
@ -239,7 +240,7 @@ import_parsed_with_hashing(Body0) when is_map(Body0) ->
import_parsed_with_hashing(Body0, VHost) when is_list(Body0) ->
import_parsed(maps:from_list(Body0), VHost);
import_parsed_with_hashing(Body0, VHost) ->
rabbit_log:info("Asked to import definitions for virtual host '~ts'. Acting user: ~ts", [?INTERNAL_USER, VHost]),
?LOG_INFO("Asked to import definitions for virtual host '~ts'. Acting user: ~ts", [?INTERNAL_USER, VHost]),
case should_skip_if_unchanged() of
false ->
@ -250,10 +251,10 @@ import_parsed_with_hashing(Body0, VHost) ->
Algo = rabbit_definitions_hashing:hashing_algorithm(),
case rabbit_definitions_hashing:hash(Algo, Body) of
PreviousHash ->
rabbit_log:info("Submitted definition content hash matches the stored one: ~ts", [binary:part(rabbit_misc:hexify(PreviousHash), 0, 12)]),
?LOG_INFO("Submitted definition content hash matches the stored one: ~ts", [binary:part(rabbit_misc:hexify(PreviousHash), 0, 12)]),
ok;
Other ->
rabbit_log:debug("Submitted definition content hash: ~ts, stored one: ~ts", [
?LOG_DEBUG("Submitted definition content hash: ~ts, stored one: ~ts", [
binary:part(rabbit_misc:hexify(PreviousHash), 0, 10),
binary:part(rabbit_misc:hexify(Other), 0, 10)
]),
@ -340,14 +341,14 @@ maybe_load_definitions_from_local_filesystem(App, Key) ->
undefined -> ok;
{ok, none} -> ok;
{ok, Path} ->
rabbit_log:debug("~ts.~ts is set to '~ts', will discover definition file(s) to import", [App, Key, Path]),
?LOG_DEBUG("~ts.~ts is set to '~ts', will discover definition file(s) to import", [App, Key, Path]),
IsDir = filelib:is_dir(Path),
Mod = rabbit_definitions_import_local_filesystem,
rabbit_log:debug("Will use module ~ts to import definitions", [Mod]),
?LOG_DEBUG("Will use module ~ts to import definitions", [Mod]),
case should_skip_if_unchanged() of
false ->
rabbit_log:debug("Will re-import definitions even if they have not changed"),
?LOG_DEBUG("Will re-import definitions even if they have not changed"),
Mod:load(IsDir, Path);
true ->
maybe_load_definitions_from_local_filesystem_if_unchanged(Mod, IsDir, Path)
@ -356,16 +357,16 @@ maybe_load_definitions_from_local_filesystem(App, Key) ->
maybe_load_definitions_from_local_filesystem_if_unchanged(Mod, IsDir, Path) ->
Algo = rabbit_definitions_hashing:hashing_algorithm(),
rabbit_log:debug("Will import definitions only if definition file/directory has changed, hashing algo: ~ts", [Algo]),
?LOG_DEBUG("Will import definitions only if definition file/directory has changed, hashing algo: ~ts", [Algo]),
CurrentHash = rabbit_definitions_hashing:stored_global_hash(),
rabbit_log:debug("Previously stored hash value of imported definitions: ~ts...", [binary:part(rabbit_misc:hexify(CurrentHash), 0, 12)]),
?LOG_DEBUG("Previously stored hash value of imported definitions: ~ts...", [binary:part(rabbit_misc:hexify(CurrentHash), 0, 12)]),
case Mod:load_with_hashing(IsDir, Path, CurrentHash, Algo) of
{error, Err} ->
{error, Err};
CurrentHash ->
rabbit_log:info("Hash value of imported definitions matches current contents");
?LOG_INFO("Hash value of imported definitions matches current contents");
UpdatedHash ->
rabbit_log:debug("Hash value of imported definitions has changed to ~ts", [binary:part(rabbit_misc:hexify(UpdatedHash), 0, 12)]),
?LOG_DEBUG("Hash value of imported definitions has changed to ~ts", [binary:part(rabbit_misc:hexify(UpdatedHash), 0, 12)]),
rabbit_definitions_hashing:store_global_hash(UpdatedHash)
end.
@ -387,20 +388,20 @@ maybe_load_definitions_from_pluggable_source(App, Key) ->
maybe_load_definitions_from_pluggable_source_if_unchanged(Mod, Proplist) ->
case should_skip_if_unchanged() of
false ->
rabbit_log:debug("Will use module ~ts to import definitions", [Mod]),
?LOG_DEBUG("Will use module ~ts to import definitions", [Mod]),
Mod:load(Proplist);
true ->
rabbit_log:debug("Will use module ~ts to import definitions (if definition file/directory/source has changed)", [Mod]),
?LOG_DEBUG("Will use module ~ts to import definitions (if definition file/directory/source has changed)", [Mod]),
CurrentHash = rabbit_definitions_hashing:stored_global_hash(),
rabbit_log:debug("Previously stored hash value of imported definitions: ~ts...", [binary:part(rabbit_misc:hexify(CurrentHash), 0, 12)]),
?LOG_DEBUG("Previously stored hash value of imported definitions: ~ts...", [binary:part(rabbit_misc:hexify(CurrentHash), 0, 12)]),
Algo = rabbit_definitions_hashing:hashing_algorithm(),
case Mod:load_with_hashing(Proplist, CurrentHash, Algo) of
{error, Err} ->
{error, Err};
CurrentHash ->
rabbit_log:info("Hash value of imported definitions matches current contents");
?LOG_INFO("Hash value of imported definitions matches current contents");
UpdatedHash ->
rabbit_log:debug("Hash value of imported definitions has changed to ~ts...", [binary:part(rabbit_misc:hexify(CurrentHash), 0, 12)]),
?LOG_DEBUG("Hash value of imported definitions has changed to ~ts...", [binary:part(rabbit_misc:hexify(CurrentHash), 0, 12)]),
rabbit_definitions_hashing:store_global_hash(UpdatedHash)
end
end.
@ -467,7 +468,7 @@ should_skip_if_unchanged() ->
OptedIn andalso ReachedTargetClusterSize.
log_an_error_about_orphaned_objects() ->
rabbit_log:error("Definitions import: some queues, exchanges or bindings in the definition file "
?LOG_ERROR("Definitions import: some queues, exchanges or bindings in the definition file "
"are missing the virtual host field. Such files are produced when definitions of "
"a single virtual host are exported. They cannot be used to import definitions at boot time").
@ -524,7 +525,7 @@ apply_defs(Map, ActingUser, SuccessFun) when is_function(SuccessFun) ->
end,
fun() ->
rabbit_log:info("There are fewer than target cluster size (~b) nodes online,"
?LOG_INFO("There are fewer than target cluster size (~b) nodes online,"
" skipping queue and binding import from definitions",
[rabbit_nodes:target_cluster_size_hint()])
end
@ -544,7 +545,7 @@ apply_defs(Map, ActingUser, SuccessFun) when is_function(SuccessFun) ->
VHost :: vhost:name()) -> 'ok' | {error, term()}.
apply_defs(Map, ActingUser, SuccessFun, VHost) when is_function(SuccessFun); is_binary(VHost) ->
rabbit_log:info("Asked to import definitions for a virtual host. Virtual host: ~tp, acting user: ~tp",
?LOG_INFO("Asked to import definitions for a virtual host. Virtual host: ~tp, acting user: ~tp",
[VHost, ActingUser]),
try
validate_limits(Map, VHost),
@ -562,7 +563,7 @@ apply_defs(Map, ActingUser, SuccessFun, VHost) when is_function(SuccessFun); is_
end,
fun() ->
rabbit_log:info("There are fewer than target cluster size (~b) nodes online,"
?LOG_INFO("There are fewer than target cluster size (~b) nodes online,"
" skipping queue and binding import from definitions",
[rabbit_nodes:target_cluster_size_hint()])
end
@ -589,7 +590,7 @@ sequential_for_all0(Category, ActingUser, Definitions, Fun) ->
List ->
case length(List) of
0 -> ok;
N -> rabbit_log:info("Importing sequentially ~tp ~ts...", [N, human_readable_category_name(Category)])
N -> ?LOG_INFO("Importing sequentially ~tp ~ts...", [N, human_readable_category_name(Category)])
end,
[begin
%% keys are expected to be atoms
@ -626,7 +627,7 @@ concurrent_for_all0(Category, ActingUser, Definitions, Fun) ->
List ->
case length(List) of
0 -> ok;
N -> rabbit_log:info("Importing concurrently ~tp ~ts...", [N, human_readable_category_name(Category)])
N -> ?LOG_INFO("Importing concurrently ~tp ~ts...", [N, human_readable_category_name(Category)])
end,
WorkPoolFun = fun(M) ->
Fun(atomize_keys(M), ActingUser)
@ -664,7 +665,7 @@ do_concurrent_for_all(List, WorkPoolFun) ->
WorkPoolFun(M)
catch {error, E} -> gatherer:in(Gatherer, {error, E});
_:E:Stacktrace ->
rabbit_log:debug("Definition import: a work pool operation has thrown an exception ~st, stacktrace: ~p",
?LOG_DEBUG("Definition import: a work pool operation has thrown an exception ~st, stacktrace: ~p",
[E, Stacktrace]),
gatherer:in(Gatherer, {error, E})
end,
@ -706,7 +707,7 @@ format({no_such_vhost, VHost}) ->
format({vhost_limit_exceeded, ErrMsg}) ->
rabbit_data_coercion:to_binary(ErrMsg);
format({shutdown, _} = Error) ->
rabbit_log:debug("Metadata store is unavailable: ~p", [Error]),
?LOG_DEBUG("Metadata store is unavailable: ~p", [Error]),
rabbit_data_coercion:to_binary(
rabbit_misc:format("Metadata store is unavailable. Please try again.", []));
format(E) ->
@ -825,11 +826,11 @@ add_queue(VHost, Queue, ActingUser) ->
add_queue_int(_Queue, R = #resource{kind = queue,
name = <<"amq.", _/binary>>}, ActingUser) ->
Name = R#resource.name,
rabbit_log:warning("Skipping import of a queue whose name begins with 'amq.', "
?LOG_WARNING("Skipping import of a queue whose name begins with 'amq.', "
"name: ~ts, acting user: ~ts", [Name, ActingUser]);
add_queue_int(_Queue, R = #resource{kind = queue, virtual_host = undefined}, ActingUser) ->
Name = R#resource.name,
rabbit_log:warning("Skipping import of a queue with an unset virtual host field, "
?LOG_WARNING("Skipping import of a queue with an unset virtual host field, "
"name: ~ts, acting user: ~ts", [Name, ActingUser]);
add_queue_int(Queue, Name = #resource{virtual_host = VHostName}, ActingUser) ->
case rabbit_amqqueue:exists(Name) of
@ -862,11 +863,11 @@ add_exchange(VHost, Exchange, ActingUser) ->
add_exchange_int(Exchange, rv(VHost, exchange, Exchange), ActingUser).
add_exchange_int(_Exchange, #resource{kind = exchange, name = <<"">>}, ActingUser) ->
rabbit_log:warning("Not importing the default exchange, acting user: ~ts", [ActingUser]);
?LOG_WARNING("Not importing the default exchange, acting user: ~ts", [ActingUser]);
add_exchange_int(_Exchange, R = #resource{kind = exchange,
name = <<"amq.", _/binary>>}, ActingUser) ->
Name = R#resource.name,
rabbit_log:warning("Skipping import of an exchange whose name begins with 'amq.', "
?LOG_WARNING("Skipping import of an exchange whose name begins with 'amq.', "
"name: ~ts, acting user: ~ts", [Name, ActingUser]);
add_exchange_int(Exchange, Name, ActingUser) ->
case rabbit_exchange:exists(Name) of
@ -934,7 +935,7 @@ validate_limits(All) ->
undefined -> ok;
Queues0 ->
{ok, VHostMap} = filter_out_existing_queues(Queues0),
_ = rabbit_log:debug("Definition import. Virtual host map for validation: ~p", [VHostMap]),
_ = ?LOG_DEBUG("Definition import. Virtual host map for validation: ~p", [VHostMap]),
maps:fold(fun validate_vhost_limit/3, ok, VHostMap)
end.

View File

@ -20,6 +20,7 @@
-behaviour(rabbit_runtime_parameter).
-include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
-import(rabbit_misc, [pget/2, pget/3]).
@ -109,7 +110,7 @@ stored_vhost_specific_hash(VHostName) ->
-spec store_global_hash(Value :: term()) -> ok.
store_global_hash(Value) ->
rabbit_log:debug("Storing global imported definitions content hash, hex value: ~ts", [rabbit_misc:hexify(Value)]),
?LOG_DEBUG("Storing global imported definitions content hash, hex value: ~ts", [rabbit_misc:hexify(Value)]),
store_global_hash(Value, ?INTERNAL_USER).
-spec store_global_hash(Value0 :: term(), Username :: rabbit_types:username()) -> ok.

View File

@ -14,6 +14,9 @@
%% * rabbit_definitions_import_local_filesystem
%% * rabbit_definitions_hashing
-module(rabbit_definitions_import_https).
-include_lib("kernel/include/logger.hrl").
-export([
is_enabled/0,
load/1,
@ -47,8 +50,8 @@ is_enabled() ->
-spec load(Proplist :: list() | map()) -> ok | {error, term()}.
load(Proplist) ->
URL = pget(url, Proplist),
rabbit_log:info("Applying definitions from a remote URL"),
rabbit_log:debug("HTTPS URL: ~ts", [URL]),
?LOG_INFO("Applying definitions from a remote URL"),
?LOG_DEBUG("HTTPS URL: ~ts", [URL]),
TLSOptions0 = tls_options_or_default(Proplist),
TLSOptions = rabbit_ssl:wrap_password_opt(TLSOptions0),
HTTPOptions = http_options(TLSOptions),
@ -57,8 +60,8 @@ load(Proplist) ->
-spec load_with_hashing(Proplist :: list() | map(), PreviousHash :: binary() | 'undefined', Algo :: crypto:sha1() | crypto:sha2()) -> binary() | 'undefined'.
load_with_hashing(Proplist, PreviousHash, Algo) ->
URL = pget(url, Proplist),
rabbit_log:info("Applying definitions from a remote URL"),
rabbit_log:debug("Loading definitions with content hashing enabled, HTTPS URL: ~ts, previous hash value: ~ts",
?LOG_INFO("Applying definitions from a remote URL"),
?LOG_DEBUG("Loading definitions with content hashing enabled, HTTPS URL: ~ts, previous hash value: ~ts",
[URL, rabbit_misc:hexify(PreviousHash)]),
TLSOptions = tls_options_or_default(Proplist),
@ -67,20 +70,20 @@ load_with_hashing(Proplist, PreviousHash, Algo) ->
case httpc_get(URL, HTTPOptions) of
%% 2XX
{ok, {{_, Code, _}, _Headers, Body}} when Code div 100 == 2 ->
rabbit_log:debug("Requested definitions from remote URL '~ts', response code: ~b", [URL, Code]),
rabbit_log:debug("Requested definitions from remote URL '~ts', body: ~tp", [URL, Body]),
?LOG_DEBUG("Requested definitions from remote URL '~ts', response code: ~b", [URL, Code]),
?LOG_DEBUG("Requested definitions from remote URL '~ts', body: ~tp", [URL, Body]),
case rabbit_definitions_hashing:hash(Algo, Body) of
PreviousHash -> PreviousHash;
Other ->
rabbit_log:debug("New hash: ~ts", [rabbit_misc:hexify(Other)]),
?LOG_DEBUG("New hash: ~ts", [rabbit_misc:hexify(Other)]),
_ = import_raw(Body),
Other
end;
{ok, {{_, Code, _}, _Headers, _Body}} when Code >= 400 ->
rabbit_log:debug("Requested definitions from remote URL '~ts', response code: ~b", [URL, Code]),
?LOG_DEBUG("Requested definitions from remote URL '~ts', response code: ~b", [URL, Code]),
{error, {could_not_read_defs, {URL, rabbit_misc:format("URL request failed with response code ~b", [Code])}}};
{error, Reason} ->
rabbit_log:error("Requested definitions from remote URL '~ts', error: ~tp", [URL, Reason]),
?LOG_ERROR("Requested definitions from remote URL '~ts', error: ~tp", [URL, Reason]),
{error, {could_not_read_defs, {URL, Reason}}}
end.
@ -93,14 +96,14 @@ load_from_url(URL, HTTPOptions0) ->
case httpc_get(URL, HTTPOptions0) of
%% 2XX
{ok, {{_, Code, _}, _Headers, Body}} when Code div 100 == 2 ->
rabbit_log:debug("Requested definitions from remote URL '~ts', response code: ~b", [URL, Code]),
rabbit_log:debug("Requested definitions from remote URL '~ts', body: ~tp", [URL, Body]),
?LOG_DEBUG("Requested definitions from remote URL '~ts', response code: ~b", [URL, Code]),
?LOG_DEBUG("Requested definitions from remote URL '~ts', body: ~tp", [URL, Body]),
import_raw(Body);
{ok, {{_, Code, _}, _Headers, _Body}} when Code >= 400 ->
rabbit_log:debug("Requested definitions from remote URL '~ts', response code: ~b", [URL, Code]),
?LOG_DEBUG("Requested definitions from remote URL '~ts', response code: ~b", [URL, Code]),
{error, {could_not_read_defs, {URL, rabbit_misc:format("URL request failed with response code ~b", [Code])}}};
{error, Reason} ->
rabbit_log:error("Requested definitions from remote URL '~ts', error: ~tp", [URL, Reason]),
?LOG_ERROR("Requested definitions from remote URL '~ts', error: ~tp", [URL, Reason]),
{error, {could_not_read_defs, {URL, Reason}}}
end.

View File

@ -15,6 +15,9 @@
%% * rabbit_definitions_import_http
%% * rabbit_definitions_hashing
-module(rabbit_definitions_import_local_filesystem).
-include_lib("kernel/include/logger.hrl").
-export([
is_enabled/0,
%% definition source options
@ -48,7 +51,7 @@ load(Proplist) when is_list(Proplist) ->
case pget(local_path, Proplist, undefined) of
undefined -> {error, "local definition file path is not configured: local_path is not set"};
Path ->
rabbit_log:debug("Asked to import definitions from a local file or directory at '~ts'", [Path]),
?LOG_DEBUG("Asked to import definitions from a local file or directory at '~ts'", [Path]),
IsDir = filelib:is_dir(Path),
case IsDir of
true ->
@ -75,7 +78,7 @@ load_with_hashing(Proplist, PreviousHash, Algo) ->
-spec load_with_hashing(IsDir :: boolean(), Path :: file:name_all(), PreviousHash :: binary() | 'undefined', Algo :: crypto:sha1() | crypto:sha2()) -> binary() | 'undefined'.
load_with_hashing(IsDir, Path, PreviousHash, Algo) when is_boolean(IsDir) ->
rabbit_log:debug("Loading definitions with content hashing enabled, path: ~ts, is directory?: ~tp, previous hash value: ~ts",
?LOG_DEBUG("Loading definitions with content hashing enabled, path: ~ts, is directory?: ~tp, previous hash value: ~ts",
[Path, IsDir, rabbit_misc:hexify(PreviousHash)]),
case compiled_definitions_from_local_path(IsDir, Path) of
%% the directory is empty or no files could be read
@ -87,12 +90,12 @@ load_with_hashing(IsDir, Path, PreviousHash, Algo) when is_boolean(IsDir) ->
case rabbit_definitions_hashing:hash(Algo, Defs) of
PreviousHash -> PreviousHash;
Other ->
rabbit_log:debug("New hash: ~ts", [rabbit_misc:hexify(Other)]),
?LOG_DEBUG("New hash: ~ts", [rabbit_misc:hexify(Other)]),
_ = load_from_local_path(IsDir, Path),
Other
end;
false ->
rabbit_log:error("Definitions file at path ~p failed validation. The file must be a valid JSON document "
?LOG_ERROR("Definitions file at path ~p failed validation. The file must be a valid JSON document "
"and all virtual host-scoped resources must have a virtual host field to be set. "
"Definition files exported for a single virtual host CANNOT be imported at boot time", [Path]),
{error, not_json}
@ -107,10 +110,10 @@ location() ->
-spec load_from_local_path(IsDir :: boolean(), Path :: file:name_all()) -> ok | {error, term()}.
load_from_local_path(true, Dir) ->
rabbit_log:info("Applying definitions from directory ~ts", [Dir]),
?LOG_INFO("Applying definitions from directory ~ts", [Dir]),
load_from_files(file:list_dir(Dir), Dir);
load_from_local_path(false, File) ->
rabbit_log:info("Applying definitions from regular file at ~ts", [File]),
?LOG_INFO("Applying definitions from regular file at ~ts", [File]),
load_from_single_file(File).
%%
@ -169,7 +172,7 @@ compiled_definitions_from_local_path(true = _IsDir, Dir) ->
end, ReadResults),
[Body || {ok, Body} <- Successes];
{error, E} ->
rabbit_log:error("Could not list files in '~ts', error: ~tp", [Dir, E]),
?LOG_ERROR("Could not list files in '~ts', error: ~tp", [Dir, E]),
{error, {could_not_read_defs, {Dir, E}}}
end;
compiled_definitions_from_local_path(false = _IsDir, Path) ->
@ -184,7 +187,7 @@ read_file_contents(Path) ->
{ok, Body} ->
Body;
{error, E} ->
rabbit_log:error("Could not read definitions from file at '~ts', error: ~tp", [Path, E]),
?LOG_ERROR("Could not read definitions from file at '~ts', error: ~tp", [Path, E]),
{error, {could_not_read_defs, {Path, E}}}
end.
@ -193,7 +196,7 @@ load_from_files({ok, Filenames0}, Dir) ->
Filenames2 = [filename:join(Dir, F) || F <- Filenames1],
load_from_multiple_files(Filenames2);
load_from_files({error, E}, Dir) ->
rabbit_log:error("Could not read definitions from directory ~ts, Error: ~tp", [Dir, E]),
?LOG_ERROR("Could not read definitions from directory ~ts, Error: ~tp", [Dir, E]),
{error, {could_not_read_defs, E}}.
load_from_multiple_files([]) ->
@ -205,7 +208,7 @@ load_from_multiple_files([File|Rest]) ->
end.
load_from_single_file(Path) ->
rabbit_log:debug("Will try to load definitions from a local file or directory at '~ts'", [Path]),
?LOG_DEBUG("Will try to load definitions from a local file or directory at '~ts'", [Path]),
case file:read_file_info(Path, [raw]) of
{ok, FileInfo} ->
@ -215,10 +218,10 @@ load_from_single_file(Path) ->
true ->
case rabbit_misc:raw_read_file(Path) of
{ok, Body} ->
rabbit_log:info("Applying definitions from file at '~ts'", [Path]),
?LOG_INFO("Applying definitions from file at '~ts'", [Path]),
import_raw(Body);
{error, E} ->
rabbit_log:error("Could not read definitions from file at '~ts', error: ~tp", [Path, E]),
?LOG_ERROR("Could not read definitions from file at '~ts', error: ~tp", [Path, E]),
{error, {could_not_read_defs, {Path, E}}}
end;
false ->

View File

@ -20,6 +20,7 @@
-include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("rabbit_common/include/rabbit_misc.hrl").
-include_lib("kernel/include/logger.hrl").
%%----------------------------------------------------------------------------
@ -157,7 +158,7 @@ is_vhost_alive(VHost, {Username, _Password}, Pid) ->
case rabbit_vhost_sup_sup:is_vhost_alive(VHost) of
true -> true;
false ->
rabbit_log_connection:error(
?LOG_ERROR(
"Error on direct client connection ~tp~n"
"access to vhost '~ts' refused for user '~ts': "
"vhost '~ts' is down",
@ -173,7 +174,7 @@ is_over_vhost_connection_limit(VHost, {Username, _Password}, Pid) ->
try rabbit_vhost_limit:is_over_connection_limit(VHost) of
false -> false;
{true, Limit} ->
rabbit_log_connection:error(
?LOG_ERROR(
"Error on direct client connection ~tp~n"
"access to vhost '~ts' refused for user '~ts': "
"vhost connection limit (~tp) is reached",
@ -181,7 +182,7 @@ is_over_vhost_connection_limit(VHost, {Username, _Password}, Pid) ->
true
catch
throw:{error, {no_such_vhost, VHost}} ->
rabbit_log_connection:error(
?LOG_ERROR(
"Error on direct client connection ~tp~n"
"vhost ~ts not found", [Pid, VHost]),
true
@ -211,7 +212,7 @@ connect1(User = #user{username = Username}, VHost, Protocol, Pid, Infos) ->
{error, Reason}
end;
{true, Limit} ->
rabbit_log_connection:error(
?LOG_ERROR(
"Error on Direct connection ~tp~n"
"access refused for user '~ts': "
"user connection limit (~tp) is reached",
@ -237,7 +238,7 @@ start_channel(Number, ClientChannelPid, ConnPid, ConnName, Protocol,
User, VHost, Capabilities, Collector, AmqpParams}]),
{ok, ChannelPid};
{true, Limit} ->
rabbit_log_connection:error(
?LOG_ERROR(
"Error on direct connection ~tp~n"
"number of channels opened for user '~ts' has reached the "
"maximum allowed limit of (~w)",

View File

@ -7,6 +7,9 @@
-module(rabbit_disk_monitor).
-include_lib("kernel/include/logger.hrl").
%% Disk monitoring server. Monitors free disk space
%% periodically and sets alarms when it is below a certain
%% watermark (configurable either as an absolute value or
@ -145,7 +148,7 @@ init([Limit]) ->
{ok, State4}.
handle_call({set_disk_free_limit, _}, _From, #state{enabled = false} = State) ->
rabbit_log:info("Cannot set disk free limit: "
?LOG_INFO("Cannot set disk free limit: "
"disabled disk free space monitoring", []),
{reply, ok, State};
@ -163,22 +166,22 @@ handle_call({set_max_check_interval, MaxInterval}, _From, State) ->
handle_call({set_enabled, _Enabled = true}, _From, State = #state{enabled = true}) ->
_ = start_timer(set_disk_limits(State, State#state.limit)),
rabbit_log:info("Free disk space monitor was already enabled"),
?LOG_INFO("Free disk space monitor was already enabled"),
{reply, ok, State#state{enabled = true}};
handle_call({set_enabled, _Enabled = true}, _From, State = #state{enabled = false}) ->
_ = start_timer(set_disk_limits(State, State#state.limit)),
rabbit_log:info("Free disk space monitor was manually enabled"),
?LOG_INFO("Free disk space monitor was manually enabled"),
{reply, ok, State#state{enabled = true}};
handle_call({set_enabled, _Enabled = false}, _From, State = #state{enabled = true}) ->
_ = erlang:cancel_timer(State#state.timer),
rabbit_log:info("Free disk space monitor was manually disabled"),
?LOG_INFO("Free disk space monitor was manually disabled"),
{reply, ok, State#state{enabled = false}};
handle_call({set_enabled, _Enabled = false}, _From, State = #state{enabled = false}) ->
_ = erlang:cancel_timer(State#state.timer),
rabbit_log:info("Free disk space monitor was already disabled"),
?LOG_INFO("Free disk space monitor was already disabled"),
{reply, ok, State#state{enabled = false}};
handle_call(_Request, _From, State) ->
@ -194,7 +197,7 @@ handle_info(update, State) ->
{noreply, start_timer(internal_update(State))};
handle_info(Info, State) ->
rabbit_log:debug("~tp unhandled msg: ~tp", [?MODULE, Info]),
?LOG_DEBUG("~tp unhandled msg: ~tp", [?MODULE, Info]),
{noreply, State}.
terminate(_Reason, _State) ->
@ -271,7 +274,7 @@ set_max_check_interval(MaxInterval, State) ->
set_disk_limits(State, Limit0) ->
Limit = interpret_limit(Limit0),
State1 = State#state { limit = Limit },
rabbit_log:info("Disk free limit set to ~bMB",
?LOG_INFO("Disk free limit set to ~bMB",
[trunc(Limit / 1000000)]),
ets:insert(?ETS_NAME, {disk_free_limit, Limit}),
internal_update(State1).
@ -309,7 +312,7 @@ get_disk_free(Dir, {win32, _}, not_used) ->
% "c:/Users/username/AppData/Roaming/RabbitMQ/db/rabbit2@username-z01-mnesia"
case win32_get_drive_letter(Dir) of
error ->
rabbit_log:warning("Expected the mnesia directory absolute "
?LOG_WARNING("Expected the mnesia directory absolute "
"path to start with a drive letter like "
"'C:'. The path is: '~tp'", [Dir]),
{ok, Free} = win32_get_disk_free_dir(Dir),
@ -340,7 +343,7 @@ get_disk_free(Dir, {win32, _}, not_used) ->
%% could not compute the result
'NaN';
_:Reason:_ ->
rabbit_log:warning("Free disk space monitoring failed to retrieve the amount of available space: ~p", [Reason]),
?LOG_WARNING("Free disk space monitoring failed to retrieve the amount of available space: ~p", [Reason]),
%% could not compute the result
'NaN'
end
@ -405,13 +408,13 @@ interpret_limit(Absolute) ->
case rabbit_resource_monitor_misc:parse_information_unit(Absolute) of
{ok, ParsedAbsolute} -> ParsedAbsolute;
{error, parse_error} ->
rabbit_log:error("Unable to parse disk_free_limit value ~tp",
?LOG_ERROR("Unable to parse disk_free_limit value ~tp",
[Absolute]),
?DEFAULT_DISK_FREE_LIMIT
end.
emit_update_info(StateStr, CurrentFree, Limit) ->
rabbit_log:info(
?LOG_INFO(
"Free disk space is ~ts. Free bytes: ~b. Limit: ~b",
[StateStr, CurrentFree, Limit]).
@ -432,7 +435,7 @@ interval(#state{limit = Limit,
trunc(erlang:max(MinInterval, erlang:min(MaxInterval, IdealInterval))).
enable(#state{retries = 0} = State) ->
rabbit_log:error("Free disk space monitor failed to start!"),
?LOG_ERROR("Free disk space monitor failed to start!"),
State;
enable(#state{dir = Dir, os = OS, port = Port} = State) ->
enable_handle_disk_free(catch get_disk_free(Dir, OS, Port), State).
@ -440,7 +443,7 @@ enable(#state{dir = Dir, os = OS, port = Port} = State) ->
enable_handle_disk_free(DiskFree, State) when is_integer(DiskFree) ->
enable_handle_total_memory(catch vm_memory_monitor:get_total_memory(), DiskFree, State);
enable_handle_disk_free(Error, #state{interval = Interval, retries = Retries} = State) ->
rabbit_log:warning("Free disk space monitor encountered an error "
?LOG_WARNING("Free disk space monitor encountered an error "
"(e.g. failed to parse output from OS tools). "
"Retries left: ~b Error:~n~tp",
[Retries, Error]),
@ -448,11 +451,11 @@ enable_handle_disk_free(Error, #state{interval = Interval, retries = Retries} =
State#state{enabled = false}.
enable_handle_total_memory(TotalMemory, DiskFree, #state{limit = Limit} = State) when is_integer(TotalMemory) ->
rabbit_log:info("Enabling free disk space monitoring "
?LOG_INFO("Enabling free disk space monitoring "
"(disk free space: ~b, total memory: ~b)", [DiskFree, TotalMemory]),
start_timer(set_disk_limits(State, Limit));
enable_handle_total_memory(Error, _DiskFree, #state{interval = Interval, retries = Retries} = State) ->
rabbit_log:warning("Free disk space monitor encountered an error "
?LOG_WARNING("Free disk space monitor encountered an error "
"retrieving total memory. "
"Retries left: ~b Error:~n~tp",
[Retries, Error]),
@ -472,6 +475,6 @@ run_os_cmd(Cmd) ->
CmdResult
after 5000 ->
exit(CmdPid, kill),
rabbit_log:error("Command timed out: '~ts'", [Cmd]),
?LOG_ERROR("Command timed out: '~ts'", [Cmd]),
{error, timeout}
end.

View File

@ -7,6 +7,9 @@
-module(rabbit_epmd_monitor).
-include_lib("kernel/include/logger.hrl").
-behaviour(gen_server).
-export([start_link/0]).
@ -84,19 +87,19 @@ check_epmd(State = #state{mod = Mod,
{ok, State#state{port = Port1}}.
handle_port_please(init, noport, Me, Port) ->
rabbit_log:info("epmd does not know us, re-registering as ~ts", [Me]),
?LOG_INFO("epmd does not know us, re-registering as ~ts", [Me]),
{ok, Port};
handle_port_please(check, noport, Me, Port) ->
rabbit_log:warning("epmd does not know us, re-registering ~ts at port ~b", [Me, Port]),
?LOG_WARNING("epmd does not know us, re-registering ~ts at port ~b", [Me, Port]),
{ok, Port};
handle_port_please(_, closed, _Me, Port) ->
rabbit_log:error("epmd monitor failed to retrieve our port from epmd: closed"),
?LOG_ERROR("epmd monitor failed to retrieve our port from epmd: closed"),
{ok, Port};
handle_port_please(init, {port, NewPort, _Version}, _Me, _Port) ->
rabbit_log:info("epmd monitor knows us, inter-node communication (distribution) port: ~tp", [NewPort]),
?LOG_INFO("epmd monitor knows us, inter-node communication (distribution) port: ~tp", [NewPort]),
{ok, NewPort};
handle_port_please(check, {port, NewPort, _Version}, _Me, _Port) ->
{ok, NewPort};
handle_port_please(_, {error, Error}, _Me, Port) ->
rabbit_log:error("epmd monitor failed to retrieve our port from epmd: ~tp", [Error]),
?LOG_ERROR("epmd monitor failed to retrieve our port from epmd: ~tp", [Error]),
{ok, Port}.

View File

@ -7,6 +7,7 @@
-module(rabbit_exchange).
-include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
-export([recover/1, policy_changed/2, callback/4, declare/7,
assert_equivalence/6, assert_args_equivalence/2, check_type/1, exists/1,
@ -135,7 +136,7 @@ declare(XName, Type, Durable, AutoDelete, Internal, Args, Username) ->
Err
end;
_ ->
rabbit_log:warning("ignoring exchange.declare for exchange ~tp,
?LOG_WARNING("ignoring exchange.declare for exchange ~tp,
exchange.delete in progress~n.", [XName]),
{ok, X}
end.
@ -531,7 +532,7 @@ peek_serial(XName) ->
rabbit_db_exchange:peek_serial(XName).
invalid_module(T) ->
rabbit_log:warning("Could not find exchange type ~ts.", [T]),
?LOG_WARNING("Could not find exchange type ~ts.", [T]),
put({xtype_to_module, T}, rabbit_exchange_type_invalid),
rabbit_exchange_type_invalid.

View File

@ -14,6 +14,7 @@
-dialyzer(no_improper_lists).
-include("rabbit_fifo.hrl").
-include_lib("kernel/include/logger.hrl").
-define(STATE, ?MODULE).
@ -676,7 +677,7 @@ apply(Meta, {dlx, _} = Cmd,
checkout(Meta, State0, State1, Effects0);
apply(_Meta, Cmd, State) ->
%% handle unhandled commands gracefully
rabbit_log:debug("rabbit_fifo: unhandled command ~W", [Cmd, 10]),
?LOG_DEBUG("rabbit_fifo: unhandled command ~W", [Cmd, 10]),
{State, ok, []}.
convert_v3_to_v4(#{} = _Meta, StateV3) ->
@ -1157,7 +1158,7 @@ handle_aux(_RaState, _, force_checkpoint,
bytes_in = BytesIn} = Aux, RaAux) ->
Ts = erlang:system_time(millisecond),
#?STATE{cfg = #cfg{resource = QR}} = ra_aux:machine_state(RaAux),
rabbit_log:debug("~ts: rabbit_fifo: forcing checkpoint at ~b",
?LOG_DEBUG("~ts: rabbit_fifo: forcing checkpoint at ~b",
[rabbit_misc:rs(QR), ra_aux:last_applied(RaAux)]),
{Check, Effects} = do_checkpoints(Ts, Check0, RaAux, BytesIn, true),
{no_reply, Aux#?AUX{last_checkpoint = Check}, RaAux, Effects};
@ -1178,7 +1179,7 @@ eval_gc(RaAux, MacState,
Mem > ?GC_MEM_LIMIT_B ->
garbage_collect(),
{memory, MemAfter} = erlang:process_info(self(), memory),
rabbit_log:debug("~ts: full GC sweep complete. "
?LOG_DEBUG("~ts: full GC sweep complete. "
"Process memory changed from ~.2fMB to ~.2fMB.",
[rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]),
AuxState#?AUX{gc = Gc#aux_gc{last_raft_idx = Idx}};
@ -1195,7 +1196,7 @@ force_eval_gc(RaAux,
true ->
garbage_collect(),
{memory, MemAfter} = erlang:process_info(self(), memory),
rabbit_log:debug("~ts: full GC sweep complete. "
?LOG_DEBUG("~ts: full GC sweep complete. "
"Process memory changed from ~.2fMB to ~.2fMB.",
[rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]),
AuxState#?AUX{gc = Gc#aux_gc{last_raft_idx = Idx}};

View File

@ -11,6 +11,9 @@
%% Handles command tracking and other non-functional concerns.
-module(rabbit_fifo_client).
-include_lib("kernel/include/logger.hrl").
-export([
init/1,
init/2,
@ -143,13 +146,13 @@ enqueue(QName, Correlation, Msg,
%% to send it
{reject_publish, State0};
{error, {shutdown, delete}} ->
rabbit_log:debug("~ts: QQ ~ts tried to register enqueuer during delete shutdown",
?LOG_DEBUG("~ts: QQ ~ts tried to register enqueuer during delete shutdown",
[?MODULE, rabbit_misc:rs(QName)]),
{reject_publish, State0};
{timeout, _} ->
{reject_publish, State0};
Err ->
rabbit_log:debug("~ts: QQ ~ts error when registering enqueuer ~p",
?LOG_DEBUG("~ts: QQ ~ts error when registering enqueuer ~p",
[?MODULE, rabbit_misc:rs(QName), Err]),
exit(Err)
end;
@ -628,7 +631,7 @@ handle_ra_event(QName, Leader, {applied, Seqs},
{ok, _, ActualLeader}
when ActualLeader =/= OldLeader ->
%% there is a new leader
rabbit_log:debug("~ts: Detected QQ leader change (applied) "
?LOG_DEBUG("~ts: Detected QQ leader change (applied) "
"from ~w to ~w, "
"resending ~b pending commands",
[?MODULE, OldLeader, ActualLeader,
@ -698,7 +701,7 @@ handle_ra_event(QName, Leader, {machine, leader_change},
pending = Pending} = State0) ->
%% we need to update leader
%% and resend any pending commands
rabbit_log:debug("~ts: ~s Detected QQ leader change from ~w to ~w, "
?LOG_DEBUG("~ts: ~s Detected QQ leader change from ~w to ~w, "
"resending ~b pending commands",
[rabbit_misc:rs(QName), ?MODULE, OldLeader,
Leader, maps:size(Pending)]),
@ -710,7 +713,7 @@ handle_ra_event(_QName, _From, {rejected, {not_leader, Leader, _Seq}},
handle_ra_event(QName, _From, {rejected, {not_leader, Leader, _Seq}},
#state{leader = OldLeader,
pending = Pending} = State0) ->
rabbit_log:debug("~ts: ~s Detected QQ leader change (rejection) from ~w to ~w, "
?LOG_DEBUG("~ts: ~s Detected QQ leader change (rejection) from ~w to ~w, "
"resending ~b pending commands",
[rabbit_misc:rs(QName), ?MODULE, OldLeader,
Leader, maps:size(Pending)]),
@ -739,7 +742,7 @@ handle_ra_event(QName, Leader, close_cached_segments,
{_TRef, Last, Cache} ->
case now_ms() > Last + ?CACHE_SEG_TIMEOUT of
true ->
rabbit_log:debug("~ts: closing_cached_segments",
?LOG_DEBUG("~ts: closing_cached_segments",
[rabbit_misc:rs(QName)]),
%% its been long enough, evict all
_ = ra_flru:evict_all(Cache),
@ -982,7 +985,7 @@ add_delivery_count(DelCntIncr, Tag, #state{consumers = CDels0} = State) ->
get_missing_deliveries(State, From, To, ConsumerTag) ->
%% find local server
ConsumerKey = consumer_key(ConsumerTag, State),
rabbit_log:debug("get_missing_deliveries for consumer '~s' from ~b to ~b",
?LOG_DEBUG("get_missing_deliveries for consumer '~s' from ~b to ~b",
[ConsumerTag, From, To]),
Cmd = {get_checked_out, ConsumerKey, lists:seq(From, To)},
ServerId = find_local_or_leader(State),

View File

@ -8,6 +8,7 @@
-include("rabbit_fifo_dlx.hrl").
-include("rabbit_fifo.hrl").
-include_lib("kernel/include/logger.hrl").
-compile({no_auto_import, [apply/3]}).
-export([
@ -123,7 +124,7 @@ apply(_, {dlx, #checkout{consumer = ConsumerPid,
OldConsumerPid ->
ok;
_ ->
rabbit_log:debug("Terminating ~p since ~p becomes active rabbit_fifo_dlx_worker",
?LOG_DEBUG("Terminating ~p since ~p becomes active rabbit_fifo_dlx_worker",
[OldConsumerPid, ConsumerPid]),
ensure_worker_terminated(State0)
end,
@ -144,7 +145,7 @@ apply(_, {dlx, #checkout{consumer = ConsumerPid,
msg_bytes_checkout = BytesCheckout - BytesMoved},
{State, []};
apply(_, Cmd, DLH, State) ->
rabbit_log:debug("Ignoring command ~tp for dead_letter_handler ~tp", [Cmd, DLH]),
?LOG_DEBUG("Ignoring command ~tp for dead_letter_handler ~tp", [Cmd, DLH]),
{State, []}.
-spec discard([msg()], rabbit_dead_letter:reason(), dead_letter_handler(), state()) ->
@ -257,7 +258,7 @@ ensure_worker_started(QRef, #?MODULE{consumer = undefined}) ->
ensure_worker_started(QRef, #?MODULE{consumer = #dlx_consumer{pid = Pid}}) ->
case is_local_and_alive(Pid) of
true ->
rabbit_log:debug("rabbit_fifo_dlx_worker ~tp already started for ~ts",
?LOG_DEBUG("rabbit_fifo_dlx_worker ~tp already started for ~ts",
[Pid, rabbit_misc:rs(QRef)]);
false ->
start_worker(QRef)
@ -269,7 +270,7 @@ ensure_worker_started(QRef, #?MODULE{consumer = #dlx_consumer{pid = Pid}}) ->
%% Ra server process crash in which case another Ra node will become leader.
start_worker(QRef) ->
{ok, Pid} = supervisor:start_child(rabbit_fifo_dlx_sup, [QRef]),
rabbit_log:debug("started rabbit_fifo_dlx_worker ~tp for ~ts",
?LOG_DEBUG("started rabbit_fifo_dlx_worker ~tp for ~ts",
[Pid, rabbit_misc:rs(QRef)]).
ensure_worker_terminated(#?MODULE{consumer = undefined}) ->
@ -280,7 +281,7 @@ ensure_worker_terminated(#?MODULE{consumer = #dlx_consumer{pid = Pid}}) ->
%% Note that we can't return a mod_call effect here
%% because mod_call is executed on the leader only.
ok = supervisor:terminate_child(rabbit_fifo_dlx_sup, Pid),
rabbit_log:debug("terminated rabbit_fifo_dlx_worker ~tp", [Pid]);
?LOG_DEBUG("terminated rabbit_fifo_dlx_worker ~tp", [Pid]);
false ->
ok
end.
@ -315,7 +316,7 @@ update_config(at_least_once, at_least_once, _, State) ->
update_config(SameDLH, SameDLH, _, State) ->
{State, []};
update_config(OldDLH, NewDLH, QRes, State0) ->
LogOnLeader = {mod_call, rabbit_log, debug,
LogOnLeader = {mod_call, logger, debug,
["Switching dead_letter_handler from ~tp to ~tp for ~ts",
[OldDLH, NewDLH, rabbit_misc:rs(QRes)]]},
{State1, Effects0} = switch_from(OldDLH, QRes, State0),
@ -329,7 +330,7 @@ switch_from(at_least_once, QRes, State) ->
ensure_worker_terminated(State),
{Num, Bytes} = stat(State),
%% Log only on leader.
{init(), [{mod_call, rabbit_log, info,
{init(), [{mod_call, logger, info,
["Deleted ~b dead-lettered messages (with total messages size of ~b bytes) in ~ts",
[Num, Bytes, rabbit_misc:rs(QRes)]]}]};
switch_from(_, _, State) ->

View File

@ -6,6 +6,9 @@
-module(rabbit_fifo_dlx_client).
-include_lib("kernel/include/logger.hrl").
-export([checkout/3, settle/2, handle_ra_event/3,
overview/1]).
@ -47,11 +50,11 @@ process_command(Cmd, #state{leader = Leader} = State, Tries) ->
{ok, ok, Leader} ->
{ok, State#state{leader = Leader}};
{ok, ok, NonLocalLeader} ->
rabbit_log:warning("Failed to process command ~tp on quorum queue leader ~tp because actual leader is ~tp.",
?LOG_WARNING("Failed to process command ~tp on quorum queue leader ~tp because actual leader is ~tp.",
[Cmd, Leader, NonLocalLeader]),
{error, non_local_leader};
Err ->
rabbit_log:warning("Failed to process command ~tp on quorum queue leader ~tp: ~tp~n"
?LOG_WARNING("Failed to process command ~tp on quorum queue leader ~tp: ~tp~n"
"Trying ~b more time(s)...",
[Cmd, Leader, Err, Tries]),
process_command(Cmd, State, Tries - 1)
@ -63,7 +66,7 @@ handle_ra_event(Leader, {dlx_delivery, _} = Del,
#state{leader = _Leader} = State) when node(Leader) == node() ->
handle_delivery(Del, State);
handle_ra_event(From, Evt, State) ->
rabbit_log:debug("Ignoring ra event ~tp from ~tp", [Evt, From]),
?LOG_DEBUG("Ignoring ra event ~tp from ~tp", [Evt, From]),
{ok, State, []}.
handle_delivery({dlx_delivery, [{FstId, _} | _] = IdMsgs},

View File

@ -25,6 +25,7 @@
-include("mc.hrl").
-include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
% -include_lib("rabbit_common/include/rabbit_framing.hrl").
-behaviour(gen_server).
@ -135,7 +136,7 @@ terminate(_Reason, State) ->
cancel_timer(State).
handle_call(Request, From, State) ->
rabbit_log:info("~ts received unhandled call from ~tp: ~tp", [?MODULE, From, Request]),
?LOG_INFO("~ts received unhandled call from ~tp: ~tp", [?MODULE, From, Request]),
{noreply, State}.
handle_cast({dlx_event, _LeaderPid, lookup_topology},
@ -169,7 +170,7 @@ handle_cast(settle_timeout, State0) ->
State = State0#state{timer = undefined},
redeliver_and_ack(State);
handle_cast(Request, State) ->
rabbit_log:info("~ts received unhandled cast ~tp", [?MODULE, Request]),
?LOG_INFO("~ts received unhandled cast ~tp", [?MODULE, Request]),
{noreply, State}.
redeliver_and_ack(State0) ->
@ -183,7 +184,7 @@ handle_info({'DOWN', Ref, process, _, _},
queue_ref = QRef}) ->
%% Source quorum queue is down. Therefore, terminate ourself.
%% The new leader will re-create another dlx_worker.
rabbit_log:debug("~ts terminating itself because leader of ~ts is down...",
?LOG_DEBUG("~ts terminating itself because leader of ~ts is down...",
[?MODULE, rabbit_misc:rs(QRef)]),
supervisor:terminate_child(rabbit_fifo_dlx_sup, self());
handle_info({{'DOWN', QName}, _MRef, process, QPid, Reason},
@ -197,7 +198,7 @@ handle_info({{'DOWN', QName}, _MRef, process, QPid, Reason},
remove_queue(QRef, State0#state{queue_type_state = QTypeState})
end;
handle_info(Info, State) ->
rabbit_log:info("~ts received unhandled info ~tp", [?MODULE, Info]),
?LOG_INFO("~ts received unhandled info ~tp", [?MODULE, Info]),
{noreply, State}.
code_change(_OldVsn, State, _Extra) ->
@ -219,7 +220,7 @@ remove_queue(QRef, #state{pendings = Pendings0,
queue_type_state = QTypeState}}.
wait_for_queue_deleted(QRef, 0) ->
rabbit_log:debug("Received deletion event for ~ts but queue still exists in ETS table.",
?LOG_DEBUG("Received deletion event for ~ts but queue still exists in ETS table.",
[rabbit_misc:rs(QRef)]);
wait_for_queue_deleted(QRef, N) ->
case rabbit_amqqueue:exists(QRef) of
@ -289,7 +290,7 @@ rejected(SeqNo, Qs, Pendings)
end,
Pendings);
false ->
rabbit_log:debug("Ignoring rejection for unknown sequence number ~b "
?LOG_DEBUG("Ignoring rejection for unknown sequence number ~b "
"from target dead letter queues ~tp",
[SeqNo, Qs]),
Pendings
@ -386,7 +387,7 @@ deliver_to_queues(Msg, Options, Qs, #state{queue_type_state = QTypeState0,
%% we won't rely on rabbit_fifo_client to re-deliver on behalf of us
%% (and therefore preventing messages to get stuck in our 'unsettled' state).
QNames = queue_names(Qs),
rabbit_log:debug("Failed to deliver message with seq_no ~b to "
?LOG_DEBUG("Failed to deliver message with seq_no ~b to "
"queues ~tp: ~tp",
[SeqNo, QNames, Reason]),
{State0#state{pendings = rejected(SeqNo, QNames, Pendings)}, []}
@ -419,7 +420,7 @@ handle_settled0(QRef, MsgSeq, #state{pendings = Pendings,
settled = [QRef | Settled]},
State#state{pendings = maps:update(MsgSeq, Pend, Pendings)};
error ->
rabbit_log:debug("Ignoring publisher confirm for unknown sequence number ~b "
?LOG_DEBUG("Ignoring publisher confirm for unknown sequence number ~b "
"from target dead letter ~ts",
[MsgSeq, rabbit_misc:rs(QRef)]),
State
@ -625,7 +626,7 @@ log_missing_dlx_once(#state{exchange_ref = SameDlx,
log_missing_dlx_once(#state{exchange_ref = DlxResource,
queue_ref = QueueResource,
logged = Logged} = State) ->
rabbit_log:warning("Cannot forward any dead-letter messages from source quorum ~ts because "
?LOG_WARNING("Cannot forward any dead-letter messages from source quorum ~ts because "
"its configured dead-letter-exchange ~ts does not exist. "
"Either create the configured dead-letter-exchange or re-configure "
"the dead-letter-exchange policy for the source quorum queue to prevent "
@ -642,7 +643,7 @@ log_no_route_once(#state{queue_ref = QueueResource,
exchange_ref = DlxResource,
routing_key = RoutingKey,
logged = Logged} = State) ->
rabbit_log:warning("Cannot forward any dead-letter messages from source quorum ~ts "
?LOG_WARNING("Cannot forward any dead-letter messages from source quorum ~ts "
"with configured dead-letter-exchange ~ts and configured "
"dead-letter-routing-key '~ts'. This can happen either if the dead-letter "
"routing topology is misconfigured (for example no queue bound to "
@ -663,7 +664,7 @@ log_cycle_once(Queues, _, #state{logged = Logged} = State)
log_cycle_once(Queues, RoutingKeys, #state{exchange_ref = DlxResource,
queue_ref = QueueResource,
logged = Logged} = State) ->
rabbit_log:warning("Dead-letter queues cycle detected for source quorum ~ts "
?LOG_WARNING("Dead-letter queues cycle detected for source quorum ~ts "
"with dead-letter exchange ~ts and routing keys ~tp: ~tp "
"This message will not be logged again.",
[rabbit_misc:rs(QueueResource), rabbit_misc:rs(DlxResource),

View File

@ -15,6 +15,7 @@
-include("rabbit_fifo_v0.hrl").
-include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
-export([
init/1,
@ -673,7 +674,7 @@ eval_gc(Log, #?STATE{cfg = #cfg{resource = QR}} = MacState,
Mem > ?GC_MEM_LIMIT_B ->
garbage_collect(),
{memory, MemAfter} = erlang:process_info(self(), memory),
rabbit_log:debug("~ts: full GC sweep complete. "
?LOG_DEBUG("~ts: full GC sweep complete. "
"Process memory changed from ~.2fMB to ~.2fMB.",
[rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]),
AuxState#aux{gc = Gc#aux_gc{last_raft_idx = Idx}};

View File

@ -15,6 +15,7 @@
-include("rabbit_fifo_v1.hrl").
-include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
-export([
init/1,
@ -533,7 +534,7 @@ apply(_Meta, {machine_version, 0, 1}, V0State) ->
{State, ok, []};
apply(_Meta, Cmd, State) ->
%% handle unhandled commands gracefully
rabbit_log:debug("rabbit_fifo: unhandled command ~W", [Cmd, 10]),
?LOG_DEBUG("rabbit_fifo: unhandled command ~W", [Cmd, 10]),
{State, ok, []}.
convert_v0_to_v1(V0State0) ->
@ -855,7 +856,7 @@ eval_gc(Log, #?STATE{cfg = #cfg{resource = QR}} = MacState,
Mem > ?GC_MEM_LIMIT_B ->
garbage_collect(),
{memory, MemAfter} = erlang:process_info(self(), memory),
rabbit_log:debug("~ts: full GC sweep complete. "
?LOG_DEBUG("~ts: full GC sweep complete. "
"Process memory changed from ~.2fMB to ~.2fMB.",
[rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]),
AuxState#aux{gc = Gc#aux_gc{last_raft_idx = Idx}};
@ -871,7 +872,7 @@ force_eval_gc(Log, #?STATE{cfg = #cfg{resource = QR}},
true ->
garbage_collect(),
{memory, MemAfter} = erlang:process_info(self(), memory),
rabbit_log:debug("~ts: full GC sweep complete. "
?LOG_DEBUG("~ts: full GC sweep complete. "
"Process memory changed from ~.2fMB to ~.2fMB.",
[rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]),
AuxState#aux{gc = Gc#aux_gc{last_raft_idx = Idx}};

View File

@ -15,6 +15,7 @@
-include("rabbit_fifo_v3.hrl").
-include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
-define(STATE, rabbit_fifo).
@ -619,7 +620,7 @@ apply(#{index := IncomingRaftIdx} = Meta, {dlx, _} = Cmd,
update_smallest_raft_index(IncomingRaftIdx, State, Effects);
apply(_Meta, Cmd, State) ->
%% handle unhandled commands gracefully
rabbit_log:debug("rabbit_fifo: unhandled command ~W", [Cmd, 10]),
?LOG_DEBUG("rabbit_fifo: unhandled command ~W", [Cmd, 10]),
{State, ok, []}.
convert_msg({RaftIdx, {Header, empty}}) when is_integer(RaftIdx) ->
@ -1172,7 +1173,7 @@ eval_gc(Log, #?STATE{cfg = #cfg{resource = QR}} = MacState,
Mem > ?GC_MEM_LIMIT_B ->
garbage_collect(),
{memory, MemAfter} = erlang:process_info(self(), memory),
rabbit_log:debug("~ts: full GC sweep complete. "
?LOG_DEBUG("~ts: full GC sweep complete. "
"Process memory changed from ~.2fMB to ~.2fMB.",
[rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]),
AuxState#?AUX{gc = Gc#aux_gc{last_raft_idx = Idx}};
@ -1188,7 +1189,7 @@ force_eval_gc(Log, #?STATE{cfg = #cfg{resource = QR}},
true ->
garbage_collect(),
{memory, MemAfter} = erlang:process_info(self(), memory),
rabbit_log:debug("~ts: full GC sweep complete. "
?LOG_DEBUG("~ts: full GC sweep complete. "
"Process memory changed from ~.2fMB to ~.2fMB.",
[rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]),
AuxState#?AUX{gc = Gc#aux_gc{last_raft_idx = Idx}};

View File

@ -6,6 +6,9 @@
%%
-module(rabbit_health_check).
-include_lib("kernel/include/logger.hrl").
%% External API
-export([node/1, node/2]).
@ -28,7 +31,7 @@ node(Node, Timeout) ->
-spec local() -> ok | {error_string, string()}.
local() ->
rabbit_log:warning("rabbitmqctl node_health_check and its HTTP API counterpart are DEPRECATED. "
?LOG_WARNING("rabbitmqctl node_health_check and its HTTP API counterpart are DEPRECATED. "
"See https://www.rabbitmq.com/docs/monitoring#health-checks for replacement options."),
run_checks([list_channels, list_queues, alarms, rabbit_node_monitor]).

View File

@ -1,120 +0,0 @@
%% This Source Code Form is subject to the terms of the Mozilla Public
%% License, v. 2.0. If a copy of the MPL was not distributed with this
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
%%
%% Copyright (c) 2007-2025 Broadcom. All Rights Reserved. The term Broadcom refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
%%
%% @doc Compatibility module for the old Lager-based logging API.
-module(rabbit_log_channel).
-export([debug/1, debug/2, debug/3,
info/1, info/2, info/3,
notice/1, notice/2, notice/3,
warning/1, warning/2, warning/3,
error/1, error/2, error/3,
critical/1, critical/2, critical/3,
alert/1, alert/2, alert/3,
emergency/1, emergency/2, emergency/3,
none/1, none/2, none/3]).
-include_lib("rabbit_common/include/logging.hrl").
-compile({no_auto_import, [error/2, error/3]}).
-spec debug(string()) -> 'ok'.
debug(Format) -> debug(Format, []).
-spec debug(string(), [any()]) -> 'ok'.
debug(Format, Args) -> debug(self(), Format, Args).
-spec debug(pid() | [tuple()], string(), [any()]) -> 'ok'.
debug(Pid, Format, Args) ->
logger:debug(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_CHAN}).
-spec info(string()) -> 'ok'.
info(Format) -> info(Format, []).
-spec info(string(), [any()]) -> 'ok'.
info(Format, Args) -> info(self(), Format, Args).
-spec info(pid() | [tuple()], string(), [any()]) -> 'ok'.
info(Pid, Format, Args) ->
logger:info(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_CHAN}).
-spec notice(string()) -> 'ok'.
notice(Format) -> notice(Format, []).
-spec notice(string(), [any()]) -> 'ok'.
notice(Format, Args) -> notice(self(), Format, Args).
-spec notice(pid() | [tuple()], string(), [any()]) -> 'ok'.
notice(Pid, Format, Args) ->
logger:notice(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_CHAN}).
-spec warning(string()) -> 'ok'.
warning(Format) -> warning(Format, []).
-spec warning(string(), [any()]) -> 'ok'.
warning(Format, Args) -> warning(self(), Format, Args).
-spec warning(pid() | [tuple()], string(), [any()]) -> 'ok'.
warning(Pid, Format, Args) ->
logger:warning(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_CHAN}).
-spec error(string()) -> 'ok'.
error(Format) -> error(Format, []).
-spec error(string(), [any()]) -> 'ok'.
error(Format, Args) -> error(self(), Format, Args).
-spec error(pid() | [tuple()], string(), [any()]) -> 'ok'.
error(Pid, Format, Args) ->
logger:error(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_CHAN}).
-spec critical(string()) -> 'ok'.
critical(Format) -> critical(Format, []).
-spec critical(string(), [any()]) -> 'ok'.
critical(Format, Args) -> critical(self(), Format, Args).
-spec critical(pid() | [tuple()], string(), [any()]) -> 'ok'.
critical(Pid, Format, Args) ->
logger:critical(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_CHAN}).
-spec alert(string()) -> 'ok'.
alert(Format) -> alert(Format, []).
-spec alert(string(), [any()]) -> 'ok'.
alert(Format, Args) -> alert(self(), Format, Args).
-spec alert(pid() | [tuple()], string(), [any()]) -> 'ok'.
alert(Pid, Format, Args) ->
logger:alert(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_CHAN}).
-spec emergency(string()) -> 'ok'.
emergency(Format) -> emergency(Format, []).
-spec emergency(string(), [any()]) -> 'ok'.
emergency(Format, Args) -> emergency(self(), Format, Args).
-spec emergency(pid() | [tuple()], string(), [any()]) -> 'ok'.
emergency(Pid, Format, Args) ->
logger:emergency(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_CHAN}).
-spec none(string()) -> 'ok'.
none(_Format) -> ok.
-spec none(string(), [any()]) -> 'ok'.
none(_Format, _Args) -> ok.
-spec none(pid() | [tuple()], string(), [any()]) -> 'ok'.
none(_Pid, _Format, _Args) -> ok.

View File

@ -1,120 +0,0 @@
%% This Source Code Form is subject to the terms of the Mozilla Public
%% License, v. 2.0. If a copy of the MPL was not distributed with this
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
%%
%% Copyright (c) 2007-2025 Broadcom. All Rights Reserved. The term Broadcom refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
%%
%% @doc Compatibility module for the old Lager-based logging API.
-module(rabbit_log_connection).
-export([debug/1, debug/2, debug/3,
info/1, info/2, info/3,
notice/1, notice/2, notice/3,
warning/1, warning/2, warning/3,
error/1, error/2, error/3,
critical/1, critical/2, critical/3,
alert/1, alert/2, alert/3,
emergency/1, emergency/2, emergency/3,
none/1, none/2, none/3]).
-include_lib("rabbit_common/include/logging.hrl").
-compile({no_auto_import, [error/2, error/3]}).
-spec debug(string()) -> 'ok'.
debug(Format) -> debug(Format, []).
-spec debug(string(), [any()]) -> 'ok'.
debug(Format, Args) -> debug(self(), Format, Args).
-spec debug(pid() | [tuple()], string(), [any()]) -> 'ok'.
debug(Pid, Format, Args) ->
logger:debug(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_CONN}).
-spec info(string()) -> 'ok'.
info(Format) -> info(Format, []).
-spec info(string(), [any()]) -> 'ok'.
info(Format, Args) -> info(self(), Format, Args).
-spec info(pid() | [tuple()], string(), [any()]) -> 'ok'.
info(Pid, Format, Args) ->
logger:info(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_CONN}).
-spec notice(string()) -> 'ok'.
notice(Format) -> notice(Format, []).
-spec notice(string(), [any()]) -> 'ok'.
notice(Format, Args) -> notice(self(), Format, Args).
-spec notice(pid() | [tuple()], string(), [any()]) -> 'ok'.
notice(Pid, Format, Args) ->
logger:notice(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_CONN}).
-spec warning(string()) -> 'ok'.
warning(Format) -> warning(Format, []).
-spec warning(string(), [any()]) -> 'ok'.
warning(Format, Args) -> warning(self(), Format, Args).
-spec warning(pid() | [tuple()], string(), [any()]) -> 'ok'.
warning(Pid, Format, Args) ->
logger:warning(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_CONN}).
-spec error(string()) -> 'ok'.
error(Format) -> error(Format, []).
-spec error(string(), [any()]) -> 'ok'.
error(Format, Args) -> error(self(), Format, Args).
-spec error(pid() | [tuple()], string(), [any()]) -> 'ok'.
error(Pid, Format, Args) ->
logger:error(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_CONN}).
-spec critical(string()) -> 'ok'.
critical(Format) -> critical(Format, []).
-spec critical(string(), [any()]) -> 'ok'.
critical(Format, Args) -> critical(self(), Format, Args).
-spec critical(pid() | [tuple()], string(), [any()]) -> 'ok'.
critical(Pid, Format, Args) ->
logger:critical(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_CONN}).
-spec alert(string()) -> 'ok'.
alert(Format) -> alert(Format, []).
-spec alert(string(), [any()]) -> 'ok'.
alert(Format, Args) -> alert(self(), Format, Args).
-spec alert(pid() | [tuple()], string(), [any()]) -> 'ok'.
alert(Pid, Format, Args) ->
logger:alert(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_CONN}).
-spec emergency(string()) -> 'ok'.
emergency(Format) -> emergency(Format, []).
-spec emergency(string(), [any()]) -> 'ok'.
emergency(Format, Args) -> emergency(self(), Format, Args).
-spec emergency(pid() | [tuple()], string(), [any()]) -> 'ok'.
emergency(Pid, Format, Args) ->
logger:emergency(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_CONN}).
-spec none(string()) -> 'ok'.
none(_Format) -> ok.
-spec none(string(), [any()]) -> 'ok'.
none(_Format, _Args) -> ok.
-spec none(pid() | [tuple()], string(), [any()]) -> 'ok'.
none(_Pid, _Format, _Args) -> ok.

View File

@ -1,122 +0,0 @@
%% This Source Code Form is subject to the terms of the Mozilla Public
%% License, v. 2.0. If a copy of the MPL was not distributed with this
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
%%
%% Copyright (c) 2007-2025 Broadcom. All Rights Reserved. The term Broadcom refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
%%
%% @doc Compatibility module for the old Lager-based logging API.
-module(rabbit_log_mirroring).
-export([debug/1, debug/2, debug/3,
info/1, info/2, info/3,
notice/1, notice/2, notice/3,
warning/1, warning/2, warning/3,
error/1, error/2, error/3,
critical/1, critical/2, critical/3,
alert/1, alert/2, alert/3,
emergency/1, emergency/2, emergency/3,
none/1, none/2, none/3]).
-include_lib("rabbit_common/include/logging.hrl").
-compile({no_auto_import, [error/2, error/3]}).
%%----------------------------------------------------------------------------
-spec debug(string()) -> 'ok'.
debug(Format) -> debug(Format, []).
-spec debug(string(), [any()]) -> 'ok'.
debug(Format, Args) -> debug(self(), Format, Args).
-spec debug(pid() | [tuple()], string(), [any()]) -> 'ok'.
debug(Pid, Format, Args) ->
logger:debug(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_MIRRORING}).
-spec info(string()) -> 'ok'.
info(Format) -> info(Format, []).
-spec info(string(), [any()]) -> 'ok'.
info(Format, Args) -> info(self(), Format, Args).
-spec info(pid() | [tuple()], string(), [any()]) -> 'ok'.
info(Pid, Format, Args) ->
logger:info(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_MIRRORING}).
-spec notice(string()) -> 'ok'.
notice(Format) -> notice(Format, []).
-spec notice(string(), [any()]) -> 'ok'.
notice(Format, Args) -> notice(self(), Format, Args).
-spec notice(pid() | [tuple()], string(), [any()]) -> 'ok'.
notice(Pid, Format, Args) ->
logger:notice(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_MIRRORING}).
-spec warning(string()) -> 'ok'.
warning(Format) -> warning(Format, []).
-spec warning(string(), [any()]) -> 'ok'.
warning(Format, Args) -> warning(self(), Format, Args).
-spec warning(pid() | [tuple()], string(), [any()]) -> 'ok'.
warning(Pid, Format, Args) ->
logger:warning(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_MIRRORING}).
-spec error(string()) -> 'ok'.
error(Format) -> error(Format, []).
-spec error(string(), [any()]) -> 'ok'.
error(Format, Args) -> error(self(), Format, Args).
-spec error(pid() | [tuple()], string(), [any()]) -> 'ok'.
error(Pid, Format, Args) ->
logger:error(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_MIRRORING}).
-spec critical(string()) -> 'ok'.
critical(Format) -> critical(Format, []).
-spec critical(string(), [any()]) -> 'ok'.
critical(Format, Args) -> critical(self(), Format, Args).
-spec critical(pid() | [tuple()], string(), [any()]) -> 'ok'.
critical(Pid, Format, Args) ->
logger:critical(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_MIRRORING}).
-spec alert(string()) -> 'ok'.
alert(Format) -> alert(Format, []).
-spec alert(string(), [any()]) -> 'ok'.
alert(Format, Args) -> alert(self(), Format, Args).
-spec alert(pid() | [tuple()], string(), [any()]) -> 'ok'.
alert(Pid, Format, Args) ->
logger:alert(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_MIRRORING}).
-spec emergency(string()) -> 'ok'.
emergency(Format) -> emergency(Format, []).
-spec emergency(string(), [any()]) -> 'ok'.
emergency(Format, Args) -> emergency(self(), Format, Args).
-spec emergency(pid() | [tuple()], string(), [any()]) -> 'ok'.
emergency(Pid, Format, Args) ->
logger:emergency(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_MIRRORING}).
-spec none(string()) -> 'ok'.
none(_Format) -> ok.
-spec none(string(), [any()]) -> 'ok'.
none(_Format, _Args) -> ok.
-spec none(pid() | [tuple()], string(), [any()]) -> 'ok'.
none(_Pid, _Format, _Args) -> ok.

View File

@ -1,120 +0,0 @@
%% This Source Code Form is subject to the terms of the Mozilla Public
%% License, v. 2.0. If a copy of the MPL was not distributed with this
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
%%
%% Copyright (c) 2007-2025 Broadcom. All Rights Reserved. The term Broadcom refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
%%
%% @doc Compatibility module for the old Lager-based logging API.
-module(rabbit_log_prelaunch).
-export([debug/1, debug/2, debug/3,
info/1, info/2, info/3,
notice/1, notice/2, notice/3,
warning/1, warning/2, warning/3,
error/1, error/2, error/3,
critical/1, critical/2, critical/3,
alert/1, alert/2, alert/3,
emergency/1, emergency/2, emergency/3,
none/1, none/2, none/3]).
-include_lib("rabbit_common/include/logging.hrl").
-compile({no_auto_import, [error/2, error/3]}).
-spec debug(string()) -> 'ok'.
debug(Format) -> debug(Format, []).
-spec debug(string(), [any()]) -> 'ok'.
debug(Format, Args) -> debug(self(), Format, Args).
-spec debug(pid() | [tuple()], string(), [any()]) -> 'ok'.
debug(Pid, Format, Args) ->
logger:debug(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_PRELAUNCH}).
-spec info(string()) -> 'ok'.
info(Format) -> info(Format, []).
-spec info(string(), [any()]) -> 'ok'.
info(Format, Args) -> info(self(), Format, Args).
-spec info(pid() | [tuple()], string(), [any()]) -> 'ok'.
info(Pid, Format, Args) ->
logger:info(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_PRELAUNCH}).
-spec notice(string()) -> 'ok'.
notice(Format) -> notice(Format, []).
-spec notice(string(), [any()]) -> 'ok'.
notice(Format, Args) -> notice(self(), Format, Args).
-spec notice(pid() | [tuple()], string(), [any()]) -> 'ok'.
notice(Pid, Format, Args) ->
logger:notice(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_PRELAUNCH}).
-spec warning(string()) -> 'ok'.
warning(Format) -> warning(Format, []).
-spec warning(string(), [any()]) -> 'ok'.
warning(Format, Args) -> warning(self(), Format, Args).
-spec warning(pid() | [tuple()], string(), [any()]) -> 'ok'.
warning(Pid, Format, Args) ->
logger:warning(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_PRELAUNCH}).
-spec error(string()) -> 'ok'.
error(Format) -> error(Format, []).
-spec error(string(), [any()]) -> 'ok'.
error(Format, Args) -> error(self(), Format, Args).
-spec error(pid() | [tuple()], string(), [any()]) -> 'ok'.
error(Pid, Format, Args) ->
logger:error(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_PRELAUNCH}).
-spec critical(string()) -> 'ok'.
critical(Format) -> critical(Format, []).
-spec critical(string(), [any()]) -> 'ok'.
critical(Format, Args) -> critical(self(), Format, Args).
-spec critical(pid() | [tuple()], string(), [any()]) -> 'ok'.
critical(Pid, Format, Args) ->
logger:critical(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_PRELAUNCH}).
-spec alert(string()) -> 'ok'.
alert(Format) -> alert(Format, []).
-spec alert(string(), [any()]) -> 'ok'.
alert(Format, Args) -> alert(self(), Format, Args).
-spec alert(pid() | [tuple()], string(), [any()]) -> 'ok'.
alert(Pid, Format, Args) ->
logger:alert(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_PRELAUNCH}).
-spec emergency(string()) -> 'ok'.
emergency(Format) -> emergency(Format, []).
-spec emergency(string(), [any()]) -> 'ok'.
emergency(Format, Args) -> emergency(self(), Format, Args).
-spec emergency(pid() | [tuple()], string(), [any()]) -> 'ok'.
emergency(Pid, Format, Args) ->
logger:emergency(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_PRELAUNCH}).
-spec none(string()) -> 'ok'.
none(_Format) -> ok.
-spec none(string(), [any()]) -> 'ok'.
none(_Format, _Args) -> ok.
-spec none(pid() | [tuple()], string(), [any()]) -> 'ok'.
none(_Pid, _Format, _Args) -> ok.

View File

@ -1,120 +0,0 @@
%% This Source Code Form is subject to the terms of the Mozilla Public
%% License, v. 2.0. If a copy of the MPL was not distributed with this
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
%%
%% Copyright (c) 2007-2025 Broadcom. All Rights Reserved. The term Broadcom refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
%%
%% @doc Compatibility module for the old Lager-based logging API.
-module(rabbit_log_queue).
-export([debug/1, debug/2, debug/3,
info/1, info/2, info/3,
notice/1, notice/2, notice/3,
warning/1, warning/2, warning/3,
error/1, error/2, error/3,
critical/1, critical/2, critical/3,
alert/1, alert/2, alert/3,
emergency/1, emergency/2, emergency/3,
none/1, none/2, none/3]).
-include_lib("rabbit_common/include/logging.hrl").
-compile({no_auto_import, [error/2, error/3]}).
-spec debug(string()) -> 'ok'.
debug(Format) -> debug(Format, []).
-spec debug(string(), [any()]) -> 'ok'.
debug(Format, Args) -> debug(self(), Format, Args).
-spec debug(pid() | [tuple()], string(), [any()]) -> 'ok'.
debug(Pid, Format, Args) ->
logger:debug(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_QUEUE}).
-spec info(string()) -> 'ok'.
info(Format) -> info(Format, []).
-spec info(string(), [any()]) -> 'ok'.
info(Format, Args) -> info(self(), Format, Args).
-spec info(pid() | [tuple()], string(), [any()]) -> 'ok'.
info(Pid, Format, Args) ->
logger:info(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_QUEUE}).
-spec notice(string()) -> 'ok'.
notice(Format) -> notice(Format, []).
-spec notice(string(), [any()]) -> 'ok'.
notice(Format, Args) -> notice(self(), Format, Args).
-spec notice(pid() | [tuple()], string(), [any()]) -> 'ok'.
notice(Pid, Format, Args) ->
logger:notice(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_QUEUE}).
-spec warning(string()) -> 'ok'.
warning(Format) -> warning(Format, []).
-spec warning(string(), [any()]) -> 'ok'.
warning(Format, Args) -> warning(self(), Format, Args).
-spec warning(pid() | [tuple()], string(), [any()]) -> 'ok'.
warning(Pid, Format, Args) ->
logger:warning(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_QUEUE}).
-spec error(string()) -> 'ok'.
error(Format) -> error(Format, []).
-spec error(string(), [any()]) -> 'ok'.
error(Format, Args) -> error(self(), Format, Args).
-spec error(pid() | [tuple()], string(), [any()]) -> 'ok'.
error(Pid, Format, Args) ->
logger:error(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_QUEUE}).
-spec critical(string()) -> 'ok'.
critical(Format) -> critical(Format, []).
-spec critical(string(), [any()]) -> 'ok'.
critical(Format, Args) -> critical(self(), Format, Args).
-spec critical(pid() | [tuple()], string(), [any()]) -> 'ok'.
critical(Pid, Format, Args) ->
logger:critical(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_QUEUE}).
-spec alert(string()) -> 'ok'.
alert(Format) -> alert(Format, []).
-spec alert(string(), [any()]) -> 'ok'.
alert(Format, Args) -> alert(self(), Format, Args).
-spec alert(pid() | [tuple()], string(), [any()]) -> 'ok'.
alert(Pid, Format, Args) ->
logger:alert(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_QUEUE}).
-spec emergency(string()) -> 'ok'.
emergency(Format) -> emergency(Format, []).
-spec emergency(string(), [any()]) -> 'ok'.
emergency(Format, Args) -> emergency(self(), Format, Args).
-spec emergency(pid() | [tuple()], string(), [any()]) -> 'ok'.
emergency(Pid, Format, Args) ->
logger:emergency(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_QUEUE}).
-spec none(string()) -> 'ok'.
none(_Format) -> ok.
-spec none(string(), [any()]) -> 'ok'.
none(_Format, _Args) -> ok.
-spec none(pid() | [tuple()], string(), [any()]) -> 'ok'.
none(_Pid, _Format, _Args) -> ok.

View File

@ -8,6 +8,7 @@
-module(rabbit_maintenance).
-include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
%% FIXME: Ra consistent queries are currently fragile in the sense that the
%% query function may run on a remote node and the function reference or MFA
@ -62,13 +63,13 @@ is_enabled() ->
-spec drain() -> ok.
drain() ->
rabbit_log:warning("This node is being put into maintenance (drain) mode"),
?LOG_WARNING("This node is being put into maintenance (drain) mode"),
mark_as_being_drained(),
rabbit_log:info("Marked this node as undergoing maintenance"),
?LOG_INFO("Marked this node as undergoing maintenance"),
_ = suspend_all_client_listeners(),
rabbit_log:warning("Suspended all listeners and will no longer accept client connections"),
?LOG_WARNING("Suspended all listeners and will no longer accept client connections"),
{ok, NConnections} = close_all_client_connections(),
rabbit_log:warning("Closed ~b local client connections", [NConnections]),
?LOG_WARNING("Closed ~b local client connections", [NConnections]),
%% allow plugins to react e.g. by closing their protocol connections
rabbit_event:notify(maintenance_connections_closed, #{
reason => <<"node is being put into maintenance">>
@ -85,19 +86,19 @@ drain() ->
rabbit_event:notify(maintenance_draining, #{
reason => <<"node is being put into maintenance">>
}),
rabbit_log:info("Node is ready to be shut down for maintenance or upgrade"),
?LOG_INFO("Node is ready to be shut down for maintenance or upgrade"),
ok.
-spec revive() -> ok.
revive() ->
rabbit_log:info("This node is being revived from maintenance (drain) mode"),
?LOG_INFO("This node is being revived from maintenance (drain) mode"),
rabbit_queue_type:revive(),
rabbit_log:info("Resumed all listeners and will accept client connections again"),
?LOG_INFO("Resumed all listeners and will accept client connections again"),
_ = resume_all_client_listeners(),
rabbit_log:info("Resumed all listeners and will accept client connections again"),
?LOG_INFO("Resumed all listeners and will accept client connections again"),
unmark_as_being_drained(),
rabbit_log:info("Marked this node as back from maintenance and ready to serve clients"),
?LOG_INFO("Marked this node as back from maintenance and ready to serve clients"),
%% allow plugins to react
rabbit_event:notify(maintenance_revived, #{}),
@ -106,12 +107,12 @@ revive() ->
-spec mark_as_being_drained() -> boolean().
mark_as_being_drained() ->
rabbit_log:debug("Marking the node as undergoing maintenance"),
?LOG_DEBUG("Marking the node as undergoing maintenance"),
rabbit_db_maintenance:set(?DRAINING_STATUS).
-spec unmark_as_being_drained() -> boolean().
unmark_as_being_drained() ->
rabbit_log:debug("Unmarking the node as undergoing maintenance"),
?LOG_DEBUG("Unmarking the node as undergoing maintenance"),
rabbit_db_maintenance:set(?DEFAULT_STATUS).
-spec is_being_drained_local_read(node()) -> boolean().
@ -157,7 +158,7 @@ filter_out_drained_nodes_consistent_read(Nodes) ->
%% but previously established connections won't be interrupted.
suspend_all_client_listeners() ->
Listeners = rabbit_networking:node_client_listeners(node()),
rabbit_log:info("Asked to suspend ~b client connection listeners. "
?LOG_INFO("Asked to suspend ~b client connection listeners. "
"No new client connections will be accepted until these listeners are resumed!", [length(Listeners)]),
Results = lists:foldl(local_listener_fold_fun(fun ranch:suspend_listener/1), [], Listeners),
lists:foldl(fun ok_or_first_error/2, ok, Results).
@ -168,7 +169,7 @@ suspend_all_client_listeners() ->
%% A resumed listener will accept new client connections.
resume_all_client_listeners() ->
Listeners = rabbit_networking:node_client_listeners(node()),
rabbit_log:info("Asked to resume ~b client connection listeners. "
?LOG_INFO("Asked to resume ~b client connection listeners. "
"New client connections will be accepted from now on", [length(Listeners)]),
Results = lists:foldl(local_listener_fold_fun(fun ranch:resume_listener/1), [], Listeners),
lists:foldl(fun ok_or_first_error/2, ok, Results).
@ -180,15 +181,15 @@ close_all_client_connections() ->
{ok, length(Pids)}.
transfer_leadership_of_metadata_store(TransferCandidates) ->
rabbit_log:info("Will transfer leadership of metadata store with current leader on this node",
?LOG_INFO("Will transfer leadership of metadata store with current leader on this node",
[]),
case rabbit_khepri:transfer_leadership(TransferCandidates) of
{ok, Node} when Node == node(); Node == undefined ->
rabbit_log:info("Skipping leadership transfer of metadata store: current leader is not on this node");
?LOG_INFO("Skipping leadership transfer of metadata store: current leader is not on this node");
{ok, Node} ->
rabbit_log:info("Leadership transfer for metadata store on this node has been done. The new leader is ~p", [Node]);
?LOG_INFO("Leadership transfer for metadata store on this node has been done. The new leader is ~p", [Node]);
Error ->
rabbit_log:warning("Skipping leadership transfer of metadata store: ~p", [Error])
?LOG_WARNING("Skipping leadership transfer of metadata store: ~p", [Error])
end.
-spec primary_replica_transfer_candidate_nodes() -> [node()].

View File

@ -8,6 +8,7 @@
-module(rabbit_mnesia).
-include_lib("rabbit_common/include/logging.hrl").
-include_lib("kernel/include/logger.hrl").
-export([%% Main interface
init/0,
@ -123,7 +124,7 @@ init() ->
NodeType = node_type(),
case is_node_type_permitted(NodeType) of
false ->
rabbit_log:info(
?LOG_INFO(
"RAM nodes are deprecated and not permitted. This "
"node will be converted to a disc node."),
init_db_and_upgrade(cluster_nodes(all), disc,
@ -175,7 +176,7 @@ can_join_cluster(DiscoveryNode) ->
%% do we think so ourselves?
case are_we_clustered_with(DiscoveryNode) of
true ->
rabbit_log:info("Asked to join a cluster but already a member of it: ~tp", [ClusterNodes]),
?LOG_INFO("Asked to join a cluster but already a member of it: ~tp", [ClusterNodes]),
{ok, already_member};
false ->
Msg = format_inconsistent_cluster_message(DiscoveryNode, node()),
@ -195,7 +196,7 @@ join_cluster(ClusterNodes, NodeType) when is_list(ClusterNodes) ->
false -> disc;
true -> NodeType
end,
rabbit_log:info("Clustering with ~tp as ~tp node",
?LOG_INFO("Clustering with ~tp as ~tp node",
[ClusterNodes, NodeType1]),
ok = init_db_with_mnesia(ClusterNodes, NodeType1,
true, true, _Retry = true),
@ -230,7 +231,7 @@ reset() ->
force_reset() ->
ensure_mnesia_not_running(),
rabbit_log:info("Resetting Rabbit forcefully", []),
?LOG_INFO("Resetting Rabbit forcefully", []),
wipe().
reset_gracefully() ->
@ -300,7 +301,7 @@ forget_cluster_node(Node, RemoveWhenOffline) ->
{true, false} -> remove_node_offline_node(Node);
{true, true} -> e(online_node_offline_flag);
{false, false} -> e(offline_node_no_offline_flag);
{false, true} -> rabbit_log:info(
{false, true} -> ?LOG_INFO(
"Removing node ~tp from cluster", [Node]),
case remove_node_if_mnesia_running(Node) of
ok -> ok;
@ -550,7 +551,7 @@ init_db(ClusterNodes, NodeType, CheckOtherNodes) ->
ensure_node_type_is_permitted(NodeType),
NodeIsVirgin = is_virgin_node(),
rabbit_log:debug("Does data directory looks like that of a blank (uninitialised) node? ~tp", [NodeIsVirgin]),
?LOG_DEBUG("Does data directory looks like that of a blank (uninitialised) node? ~tp", [NodeIsVirgin]),
Nodes = change_extra_db_nodes(ClusterNodes, CheckOtherNodes),
%% Note that we use `system_info' here and not the cluster status
%% since when we start rabbit for the first time the cluster
@ -744,7 +745,7 @@ remote_node_info(Node) ->
on_node_up(Node) ->
case running_disc_nodes() of
[Node] -> rabbit_log:info("cluster contains disc nodes again~n");
[Node] -> ?LOG_INFO("cluster contains disc nodes again~n");
_ -> ok
end.
@ -752,7 +753,7 @@ on_node_up(Node) ->
on_node_down(_Node) ->
case running_disc_nodes() of
[] -> rabbit_log:info("only running disc node went down~n");
[] -> ?LOG_INFO("only running disc node went down~n");
_ -> ok
end.
@ -891,17 +892,17 @@ create_schema() ->
false = rabbit_khepri:is_enabled(),
stop_mnesia(),
rabbit_log:debug("Will bootstrap a schema database..."),
?LOG_DEBUG("Will bootstrap a schema database..."),
rabbit_misc:ensure_ok(mnesia:create_schema([node()]), cannot_create_schema),
rabbit_log:debug("Bootstraped a schema database successfully"),
?LOG_DEBUG("Bootstraped a schema database successfully"),
start_mnesia(),
rabbit_log:debug("Will create schema database tables"),
?LOG_DEBUG("Will create schema database tables"),
ok = rabbit_table:create(),
rabbit_log:debug("Created schema database tables successfully"),
rabbit_log:debug("Will check schema database integrity..."),
?LOG_DEBUG("Created schema database tables successfully"),
?LOG_DEBUG("Will check schema database integrity..."),
ensure_schema_integrity(),
rabbit_log:debug("Schema database schema integrity check passed"),
?LOG_DEBUG("Schema database schema integrity check passed"),
ok.
remove_node_if_mnesia_running(Node) ->
@ -945,7 +946,7 @@ leave_cluster(Node) ->
end.
wait_for(Condition) ->
rabbit_log:info("Waiting for ~tp...", [Condition]),
?LOG_INFO("Waiting for ~tp...", [Condition]),
timer:sleep(1000).
start_mnesia(CheckConsistency) ->
@ -1067,10 +1068,10 @@ mnesia_and_msg_store_files() ->
rabbit_feature_flags:enabled_feature_flags_list_file(),
rabbit_khepri:dir()],
IgnoredFiles = [filename:basename(File) || File <- IgnoredFiles0],
rabbit_log:debug("Files and directories found in node's data directory: ~ts, of them to be ignored: ~ts",
?LOG_DEBUG("Files and directories found in node's data directory: ~ts, of them to be ignored: ~ts",
[string:join(lists:usort(List0), ", "), string:join(lists:usort(IgnoredFiles), ", ")]),
List = List0 -- IgnoredFiles,
rabbit_log:debug("Files and directories found in node's data directory sans ignored ones: ~ts", [string:join(lists:usort(List), ", ")]),
?LOG_DEBUG("Files and directories found in node's data directory sans ignored ones: ~ts", [string:join(lists:usort(List), ", ")]),
List
end.

View File

@ -25,6 +25,7 @@
%%----------------------------------------------------------------------------
-include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
-type(msg() :: any()).
@ -792,11 +793,11 @@ init([VHost, Type, BaseDir, ClientRefs, StartupFunState]) ->
true -> "clean";
false -> "unclean"
end,
rabbit_log:debug("Rebuilding message location index after ~ts shutdown...",
?LOG_DEBUG("Rebuilding message location index after ~ts shutdown...",
[Cleanliness]),
{CurOffset, State1 = #msstate { current_file = CurFile }} =
build_index(CleanShutdown, StartupFunState, State),
rabbit_log:debug("Finished rebuilding index", []),
?LOG_DEBUG("Finished rebuilding index", []),
%% Open the most recent file.
{ok, CurHdl} = writer_recover(Dir, CurFile, CurOffset),
{ok, State1 #msstate { current_file_handle = CurHdl,
@ -971,7 +972,7 @@ terminate(Reason, State = #msstate { index_ets = IndexEts,
{shutdown, _} -> {"", []};
_ -> {" with reason ~0p", [Reason]}
end,
rabbit_log:info("Stopping message store for directory '~ts'" ++ ExtraLog, [Dir|ExtraLogArgs]),
?LOG_INFO("Stopping message store for directory '~ts'" ++ ExtraLog, [Dir|ExtraLogArgs]),
%% stop the gc first, otherwise it could be working and we pull
%% out the ets tables from under it.
ok = rabbit_msg_store_gc:stop(GCPid),
@ -984,7 +985,7 @@ terminate(Reason, State = #msstate { index_ets = IndexEts,
case store_file_summary(FileSummaryEts, Dir) of
ok -> ok;
{error, FSErr} ->
rabbit_log:error("Unable to store file summary"
?LOG_ERROR("Unable to store file summary"
" for vhost message store for directory ~tp~n"
"Error: ~tp",
[Dir, FSErr])
@ -994,10 +995,10 @@ terminate(Reason, State = #msstate { index_ets = IndexEts,
index_terminate(IndexEts, Dir),
case store_recovery_terms([{client_refs, maps:keys(Clients)}], Dir) of
ok ->
rabbit_log:info("Message store for directory '~ts' is stopped", [Dir]),
?LOG_INFO("Message store for directory '~ts' is stopped", [Dir]),
ok;
{error, RTErr} ->
rabbit_log:error("Unable to save message store recovery terms"
?LOG_ERROR("Unable to save message store recovery terms"
" for directory ~tp~nError: ~tp",
[Dir, RTErr])
end,
@ -1703,7 +1704,7 @@ index_terminate(IndexEts, Dir) ->
[{extended_info, [object_count]}]) of
ok -> ok;
{error, Err} ->
rabbit_log:error("Unable to save message store index"
?LOG_ERROR("Unable to save message store index"
" for directory ~tp.~nError: ~tp",
[Dir, Err])
end,
@ -1716,11 +1717,11 @@ index_terminate(IndexEts, Dir) ->
recover_index_and_client_refs(_Recover, undefined, Dir, _Name) ->
{false, index_new(Dir), []};
recover_index_and_client_refs(false, _ClientRefs, Dir, Name) ->
rabbit_log:warning("Message store ~tp: rebuilding indices from scratch", [Name]),
?LOG_WARNING("Message store ~tp: rebuilding indices from scratch", [Name]),
{false, index_new(Dir), []};
recover_index_and_client_refs(true, ClientRefs, Dir, Name) ->
Fresh = fun (ErrorMsg, ErrorArgs) ->
rabbit_log:warning("Message store ~tp : " ++ ErrorMsg ++ "~n"
?LOG_WARNING("Message store ~tp : " ++ ErrorMsg ++ "~n"
"rebuilding indices from scratch",
[Name | ErrorArgs]),
{false, index_new(Dir), []}
@ -1813,9 +1814,9 @@ build_index(true, _StartupFunState,
{FileSize, State#msstate{ current_file = File }};
build_index(false, {MsgRefDeltaGen, MsgRefDeltaGenInit},
State = #msstate { dir = Dir }) ->
rabbit_log:debug("Rebuilding message refcount...", []),
?LOG_DEBUG("Rebuilding message refcount...", []),
ok = count_msg_refs(MsgRefDeltaGen, MsgRefDeltaGenInit, State),
rabbit_log:debug("Done rebuilding message refcount", []),
?LOG_DEBUG("Done rebuilding message refcount", []),
{ok, Pid} = gatherer:start_link(),
case [filename_to_num(FileName) ||
FileName <- list_sorted_filenames(Dir, ?FILE_EXTENSION)] of
@ -1829,7 +1830,7 @@ build_index(false, {MsgRefDeltaGen, MsgRefDeltaGenInit},
build_index_worker(Gatherer, #msstate { index_ets = IndexEts, dir = Dir },
File, Files) ->
Path = form_filename(Dir, filenum_to_name(File)),
rabbit_log:debug("Rebuilding message location index from ~ts (~B file(s) remaining)",
?LOG_DEBUG("Rebuilding message location index from ~ts (~B file(s) remaining)",
[Path, length(Files)]),
%% The scan function already dealt with duplicate messages
%% within the file, and only returns valid messages (we do
@ -2001,7 +2002,7 @@ delete_file_if_empty(File, State = #msstate {
compact_file(File, State = #gc_state { file_summary_ets = FileSummaryEts }) ->
case ets:lookup(FileSummaryEts, File) of
[] ->
rabbit_log:debug("File ~tp has already been deleted; no need to compact",
?LOG_DEBUG("File ~tp has already been deleted; no need to compact",
[File]),
ok;
[#file_summary{file_size = FileSize}] ->
@ -2046,7 +2047,7 @@ compact_file(File, FileSize,
%% after truncation. This is a debug message so it doesn't hurt to
%% put out more details around what's happening.
Reclaimed = FileSize - TruncateSize,
rabbit_log:debug("Compacted segment file number ~tp; ~tp bytes can now be reclaimed",
?LOG_DEBUG("Compacted segment file number ~tp; ~tp bytes can now be reclaimed",
[File, Reclaimed]),
%% Tell the message store to update its state.
gen_server2:cast(Server, {compacted_file, File}),
@ -2147,7 +2148,7 @@ truncate_file(File, Size, ThresholdTimestamp, #gc_state{ file_summary_ets = File
case ets:select(FileHandlesEts, [{{{'_', File}, '$1'},
[{'=<', '$1', ThresholdTimestamp}], ['$$']}], 1) of
{[_|_], _Cont} ->
rabbit_log:debug("Asked to truncate file ~p but it has active readers. Deferring.",
?LOG_DEBUG("Asked to truncate file ~p but it has active readers. Deferring.",
[File]),
defer;
_ ->
@ -2158,7 +2159,7 @@ truncate_file(File, Size, ThresholdTimestamp, #gc_state{ file_summary_ets = File
ok = file:close(Fd),
true = ets:update_element(FileSummaryEts, File,
{#file_summary.file_size, Size}),
rabbit_log:debug("Truncated file number ~tp; new size ~tp bytes", [File, Size]),
?LOG_DEBUG("Truncated file number ~tp; new size ~tp bytes", [File, Size]),
ok
end
end.
@ -2170,7 +2171,7 @@ delete_file(File, #gc_state { file_summary_ets = FileSummaryEts,
dir = Dir }) ->
case ets:match_object(FileHandlesEts, {{'_', File}, '_'}, 1) of
{[_|_], _Cont} ->
rabbit_log:debug("Asked to delete file ~p but it has active readers. Deferring.",
?LOG_DEBUG("Asked to delete file ~p but it has active readers. Deferring.",
[File]),
defer;
_ ->
@ -2178,7 +2179,7 @@ delete_file(File, #gc_state { file_summary_ets = FileSummaryEts,
file_size = FileSize }] = ets:lookup(FileSummaryEts, File),
ok = file:delete(form_filename(Dir, filenum_to_name(File))),
true = ets:delete(FileSummaryEts, File),
rabbit_log:debug("Deleted empty file number ~tp; reclaimed ~tp bytes", [File, FileSize]),
?LOG_DEBUG("Deleted empty file number ~tp; reclaimed ~tp bytes", [File, FileSize]),
ok
end.

View File

@ -55,6 +55,7 @@
-include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("rabbit_common/include/rabbit_misc.hrl").
-include_lib("kernel/include/logger.hrl").
%% IANA-suggested ephemeral port range is 49152 to 65535
-define(FIRST_TEST_BIND_PORT, 49152).
@ -90,7 +91,7 @@
boot() ->
ok = record_distribution_listener(),
_ = application:start(ranch),
rabbit_log:debug("Started Ranch"),
?LOG_DEBUG("Started Ranch"),
%% Failures will throw exceptions
_ = boot_listeners(fun boot_tcp/2, application:get_env(rabbit, num_tcp_acceptors, 10),
application:get_env(rabbit, num_conns_sups, 1), "TCP"),
@ -103,7 +104,7 @@ boot_listeners(Fun, NumAcceptors, ConcurrentConnsSupsCount, Type) ->
ok ->
ok;
{error, {could_not_start_listener, Address, Port, Details}} = Error ->
rabbit_log:error("Failed to start ~ts listener [~ts]:~tp, error: ~tp",
?LOG_ERROR("Failed to start ~ts listener [~ts]:~tp, error: ~tp",
[Type, Address, Port, Details]),
throw(Error)
end.
@ -156,7 +157,7 @@ tcp_listener_addresses({Host, Port, Family0})
[{IPAddress, Port, Family} ||
{IPAddress, Family} <- getaddr(Host, Family0)];
tcp_listener_addresses({_Host, Port, _Family0}) ->
rabbit_log:error("invalid port ~tp - not 0..65535", [Port]),
?LOG_ERROR("invalid port ~tp - not 0..65535", [Port]),
throw({error, {invalid_port, Port}}).
tcp_listener_addresses_auto(Port) ->
@ -264,7 +265,7 @@ stop_ranch_listener_of_protocol(Protocol) ->
case ranch_ref_of_protocol(Protocol) of
undefined -> ok;
Ref ->
rabbit_log:debug("Stopping Ranch listener for protocol ~ts", [Protocol]),
?LOG_DEBUG("Stopping Ranch listener for protocol ~ts", [Protocol]),
ranch:stop_listener(Ref)
end.
@ -404,7 +405,7 @@ epmd_port_please(Name, Host) ->
epmd_port_please(Name, Host, 0) ->
maybe_get_epmd_port(Name, Host);
epmd_port_please(Name, Host, RetriesLeft) ->
rabbit_log:debug("Getting epmd port node '~ts', ~b retries left",
?LOG_DEBUG("Getting epmd port node '~ts', ~b retries left",
[Name, RetriesLeft]),
case catch maybe_get_epmd_port(Name, Host) of
ok -> ok;
@ -520,11 +521,11 @@ emit_connection_info_local(Items, Ref, AggregatorPid) ->
-spec close_connection(pid(), string()) -> 'ok'.
close_connection(Pid, Explanation) ->
rabbit_log:info("Closing connection ~tp because ~tp",
?LOG_INFO("Closing connection ~tp because ~tp",
[Pid, Explanation]),
try rabbit_reader:shutdown(Pid, Explanation)
catch exit:{Reason, _Location} ->
rabbit_log:warning("Could not close connection ~tp (reason: ~tp): ~p",
?LOG_WARNING("Could not close connection ~tp (reason: ~tp): ~p",
[Pid, Explanation, Reason])
end.
@ -561,7 +562,7 @@ failed_to_recv_proxy_header(Ref, Error) ->
closed -> "error when receiving proxy header: TCP socket was ~tp prematurely";
_Other -> "error when receiving proxy header: ~tp"
end,
rabbit_log:debug(Msg, [Error]),
?LOG_DEBUG(Msg, [Error]),
% The following call will clean up resources then exit
_ = try ranch:handshake(Ref) catch
_:_ -> ok
@ -602,7 +603,7 @@ ranch_handshake(Ref) ->
exit:{shutdown, {Reason, {PeerIp, PeerPort}}} = Error:Stacktrace ->
PeerAddress = io_lib:format("~ts:~tp", [rabbit_misc:ntoab(PeerIp), PeerPort]),
Protocol = ranch_ref_to_protocol(Ref),
rabbit_log:error("~p error during handshake for protocol ~p and peer ~ts",
?LOG_ERROR("~p error during handshake for protocol ~p and peer ~ts",
[Reason, Protocol, PeerAddress]),
erlang:raise(exit, Error, Stacktrace)
end.
@ -664,7 +665,7 @@ gethostaddr(Host, Family) ->
-spec host_lookup_error(_, _) -> no_return().
host_lookup_error(Host, Reason) ->
rabbit_log:error("invalid host ~tp - ~tp", [Host, Reason]),
?LOG_ERROR("invalid host ~tp - ~tp", [Host, Reason]),
throw({error, {invalid_host, Host, Reason}}).
resolve_family({_,_,_,_}, auto) -> inet;

View File

@ -7,6 +7,9 @@
-module(rabbit_node_monitor).
-include_lib("kernel/include/logger.hrl").
-behaviour(gen_server).
-export([start_link/0]).
@ -314,7 +317,7 @@ find_blocked_global_peers() ->
Snapshot1 = snapshot_global_dict(),
timer:sleep(10_000),
Snapshot2 = snapshot_global_dict(),
logger:debug("global's sync tags 10s ago: ~p~n"
?LOG_DEBUG("global's sync tags 10s ago: ~p~n"
"global's sync tags now: ~p",
[Snapshot1, Snapshot2]),
find_blocked_global_peers1(Snapshot2, Snapshot1).
@ -341,11 +344,11 @@ unblock_global_peer(PeerNode) ->
PeerToThisCid = connection_id(PeerState, ThisNode),
ThisToPeerCid = connection_id(ThisState, PeerNode),
logger:info(
?LOG_INFO(
"global hang workaround: faking nodedown / nodeup between peer node ~s "
"(connection ID to us: ~p) and our node ~s (connection ID to peer: ~p)",
[PeerNode, PeerToThisCid, ThisNode, ThisToPeerCid]),
logger:debug(
?LOG_DEBUG(
"peer global state: ~tp~nour global state: ~tp",
[erpc:call(PeerNode, sys, get_status, [global_name_server]),
sys:get_status(global_name_server)]),
@ -492,14 +495,14 @@ handle_cast({check_partial_partition, Node, Rep, NodeGUID, MyGUID, RepGUID},
case rpc:call(Node, erlang, system_info, [creation]) of
{badrpc, _} -> ok;
NodeGUID ->
rabbit_log:warning("Received a 'DOWN' message"
?LOG_WARNING("Received a 'DOWN' message"
" from ~tp but still can"
" communicate with it ",
[Node]),
cast(Rep, {partial_partition,
Node, node(), RepGUID});
_ ->
rabbit_log:warning("Node ~tp was restarted", [Node]),
?LOG_WARNING("Node ~tp was restarted", [Node]),
ok
end
end),
@ -530,7 +533,7 @@ handle_cast({partial_partition, NotReallyDown, Proxy, MyGUID},
ArgsBase = [NotReallyDown, Proxy, NotReallyDown],
case application:get_env(rabbit, cluster_partition_handling) of
{ok, pause_minority} ->
rabbit_log:error(
?LOG_ERROR(
FmtBase ++ " * pause_minority mode enabled~n"
"We will therefore pause until the *entire* cluster recovers",
ArgsBase),
@ -538,17 +541,17 @@ handle_cast({partial_partition, NotReallyDown, Proxy, MyGUID},
{noreply, State};
{ok, {pause_if_all_down, PreferredNodes, _}} ->
case in_preferred_partition(PreferredNodes) of
true -> rabbit_log:error(
true -> ?LOG_ERROR(
FmtBase ++ "We will therefore intentionally "
"disconnect from ~ts", ArgsBase ++ [Proxy]),
upgrade_to_full_partition(Proxy);
false -> rabbit_log:info(
false -> ?LOG_INFO(
FmtBase ++ "We are about to pause, no need "
"for further actions", ArgsBase)
end,
{noreply, State};
{ok, _} ->
rabbit_log:error(
?LOG_ERROR(
FmtBase ++ "We will therefore intentionally disconnect from ~ts",
ArgsBase ++ [Proxy]),
upgrade_to_full_partition(Proxy),
@ -562,7 +565,7 @@ handle_cast({partial_partition, _GUID, _Reporter, _Proxy}, State) ->
%% messages reliably when another node disconnects from us. Therefore
%% we are told just before the disconnection so we can reciprocate.
handle_cast({partial_partition_disconnect, Other}, State) ->
rabbit_log:error("Partial partition disconnect from ~ts", [Other]),
?LOG_ERROR("Partial partition disconnect from ~ts", [Other]),
disconnect(Other),
{noreply, State};
@ -571,7 +574,7 @@ handle_cast({partial_partition_disconnect, Other}, State) ->
%% mnesia propagation.
handle_cast({node_up, Node, NodeType},
State = #state{monitors = Monitors}) ->
rabbit_log:info("rabbit on node ~tp up", [Node]),
?LOG_INFO("rabbit on node ~tp up", [Node]),
case rabbit_khepri:is_enabled() of
true ->
ok;
@ -606,7 +609,7 @@ handle_cast({joined_cluster, Node, NodeType}, State) ->
end,
RunningNodes})
end,
rabbit_log:debug("Node '~tp' has joined the cluster", [Node]),
?LOG_DEBUG("Node '~tp' has joined the cluster", [Node]),
rabbit_event:notify(node_added, [{node, Node}]),
{noreply, State};
@ -634,7 +637,7 @@ handle_cast(_Msg, State) ->
handle_info({'DOWN', _MRef, process, {rabbit, Node}, _Reason},
State = #state{monitors = Monitors, subscribers = Subscribers}) ->
rabbit_log:info("rabbit on node ~tp down", [Node]),
?LOG_INFO("rabbit on node ~tp down", [Node]),
case rabbit_khepri:is_enabled() of
true ->
ok;
@ -653,7 +656,7 @@ handle_info({'DOWN', _MRef, process, Pid, _Reason},
{noreply, State#state{subscribers = pmon:erase(Pid, Subscribers)}};
handle_info({nodedown, Node, Info}, State) ->
rabbit_log:info("node ~tp down: ~tp",
?LOG_INFO("node ~tp down: ~tp",
[Node, proplists:get_value(nodedown_reason, Info)]),
case rabbit_khepri:is_enabled() of
true -> {noreply, State};
@ -661,7 +664,7 @@ handle_info({nodedown, Node, Info}, State) ->
end;
handle_info({nodeup, Node, _Info}, State) ->
rabbit_log:info("node ~tp up", [Node]),
?LOG_INFO("node ~tp up", [Node]),
{noreply, State};
handle_info({mnesia_system_event,
@ -781,13 +784,13 @@ handle_dead_node(Node, State = #state{autoheal = Autoheal}) ->
{ok, autoheal} ->
State#state{autoheal = rabbit_autoheal:node_down(Node, Autoheal)};
{ok, Term} ->
rabbit_log:warning("cluster_partition_handling ~tp unrecognised, "
?LOG_WARNING("cluster_partition_handling ~tp unrecognised, "
"assuming 'ignore'", [Term]),
State
end.
await_cluster_recovery(Condition) ->
rabbit_log:warning("Cluster minority/secondary status detected - "
?LOG_WARNING("Cluster minority/secondary status detected - "
"awaiting recovery", []),
run_outside_applications(fun () ->
rabbit:stop(),
@ -838,7 +841,7 @@ do_run_outside_app_fun(Fun) ->
try
Fun()
catch _:E:Stacktrace ->
rabbit_log:error(
?LOG_ERROR(
"rabbit_outside_app_process:~n~tp~n~tp",
[E, Stacktrace])
end.
@ -1048,14 +1051,14 @@ possibly_partitioned_nodes() ->
alive_rabbit_nodes() -- rabbit_mnesia:cluster_nodes(running).
startup_log() ->
rabbit_log:info("Starting rabbit_node_monitor (partition handling strategy unapplicable with Khepri)", []).
?LOG_INFO("Starting rabbit_node_monitor (partition handling strategy unapplicable with Khepri)", []).
startup_log(Nodes) ->
{ok, M} = application:get_env(rabbit, cluster_partition_handling),
startup_log(Nodes, M).
startup_log([], PartitionHandling) ->
rabbit_log:info("Starting rabbit_node_monitor (in ~tp mode)", [PartitionHandling]);
?LOG_INFO("Starting rabbit_node_monitor (in ~tp mode)", [PartitionHandling]);
startup_log(Nodes, PartitionHandling) ->
rabbit_log:info("Starting rabbit_node_monitor (in ~tp mode), might be partitioned from ~tp",
?LOG_INFO("Starting rabbit_node_monitor (in ~tp mode), might be partitioned from ~tp",
[PartitionHandling, Nodes]).

View File

@ -126,7 +126,7 @@ seed_internal_cluster_id() ->
case rabbit_runtime_parameters:lookup_global(?INTERNAL_CLUSTER_ID_PARAM_NAME) of
not_found ->
Id = rabbit_guid:binary(rabbit_guid:gen(), "rabbitmq-cluster-id"),
rabbit_log:info("Initialising internal cluster ID to '~ts'", [Id]),
?LOG_INFO("Initialising internal cluster ID to '~ts'", [Id]),
rabbit_runtime_parameters:set_global(?INTERNAL_CLUSTER_ID_PARAM_NAME, Id, ?INTERNAL_USER),
Id;
Param ->
@ -138,7 +138,7 @@ seed_user_provided_cluster_name() ->
case application:get_env(rabbit, cluster_name) of
undefined -> ok;
{ok, Name} ->
rabbit_log:info("Setting cluster name to '~ts' as configured", [Name]),
?LOG_INFO("Setting cluster name to '~ts' as configured", [Name]),
set_cluster_name(rabbit_data_coercion:to_binary(Name))
end.

View File

@ -6,6 +6,9 @@
%%
-module(rabbit_peer_discovery_classic_config).
-include_lib("kernel/include/logger.hrl").
-behaviour(rabbit_peer_discovery_backend).
-export([list_nodes/0, supports_registration/0, register/0, unregister/0,
@ -42,7 +45,7 @@ check_duplicates(Nodes) ->
true ->
ok;
false ->
rabbit_log:warning("Classic peer discovery backend: list of "
?LOG_WARNING("Classic peer discovery backend: list of "
"nodes contains duplicates ~0tp",
[Nodes])
end.
@ -52,7 +55,7 @@ check_local_node(Nodes) ->
true ->
ok;
false ->
rabbit_log:warning("Classic peer discovery backend: list of "
?LOG_WARNING("Classic peer discovery backend: list of "
"nodes does not contain the local node ~0tp",
[Nodes])
end.
@ -65,7 +68,7 @@ lock(Nodes) ->
Node = node(),
case lists:member(Node, Nodes) of
false when Nodes =/= [] ->
rabbit_log:warning("Local node ~ts is not part of configured nodes ~tp. "
?LOG_WARNING("Local node ~ts is not part of configured nodes ~tp. "
"This might lead to incorrect cluster formation.", [Node, Nodes]);
_ -> ok
end,

View File

@ -6,6 +6,9 @@
%%
-module(rabbit_peer_discovery_dns).
-include_lib("kernel/include/logger.hrl").
-behaviour(rabbit_peer_discovery_backend).
-export([list_nodes/0, supports_registration/0, register/0, unregister/0,
@ -27,7 +30,7 @@ list_nodes() ->
{ok, ClusterFormation} ->
case proplists:get_value(peer_discovery_dns, ClusterFormation) of
undefined ->
rabbit_log:warning("Peer discovery backend is set to ~ts "
?LOG_WARNING("Peer discovery backend is set to ~ts "
"but final config does not contain rabbit.cluster_formation.peer_discovery_dns. "
"Cannot discover any nodes because seed hostname is not configured!",
[?MODULE]),
@ -90,7 +93,7 @@ decode_record(ipv6) ->
lookup(SeedHostname, LongNamesUsed, IPv) ->
IPs = inet_res:lookup(SeedHostname, in, decode_record(IPv)),
rabbit_log:info("Addresses discovered via ~ts records of ~ts: ~ts",
?LOG_INFO("Addresses discovered via ~ts records of ~ts: ~ts",
[string:to_upper(atom_to_list(decode_record(IPv))),
SeedHostname,
string:join([inet_parse:ntoa(IP) || IP <- IPs], ", ")]),
@ -106,6 +109,6 @@ extract_host({ok, {hostent, FQDN, _, _, _, _}}, true, _Address) ->
extract_host({ok, {hostent, FQDN, _, _, _, _}}, false, _Address) ->
lists:nth(1, string:tokens(FQDN, "."));
extract_host({error, Error}, _, Address) ->
rabbit_log:error("Reverse DNS lookup for address ~ts failed: ~tp",
?LOG_ERROR("Reverse DNS lookup for address ~ts failed: ~tp",
[inet_parse:ntoa(Address), Error]),
error.

View File

@ -7,6 +7,7 @@
-module(rabbit_plugins).
-include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
-export([setup/0, active/0, read_enabled/1, list/1, list/2, dependencies/3, running_plugins/0]).
-export([ensure/1]).
-export([validate_plugins/1, format_invalid_plugins/1]).
@ -54,13 +55,13 @@ ensure1(FileJustChanged0) ->
{[], []} ->
ok;
{[], _} ->
rabbit_log:info("Plugins changed; disabled ~tp",
?LOG_INFO("Plugins changed; disabled ~tp",
[Stop]);
{_, []} ->
rabbit_log:info("Plugins changed; enabled ~tp",
?LOG_INFO("Plugins changed; enabled ~tp",
[Start]);
{_, _} ->
rabbit_log:info("Plugins changed; enabled ~tp, disabled ~tp",
?LOG_INFO("Plugins changed; enabled ~tp, disabled ~tp",
[Start, Stop])
end,
{ok, Start, Stop};
@ -271,7 +272,7 @@ maybe_warn_about_invalid_plugins([]) ->
ok;
maybe_warn_about_invalid_plugins(InvalidPlugins) ->
%% TODO: error message formatting
rabbit_log:warning(format_invalid_plugins(InvalidPlugins)).
?LOG_WARNING(format_invalid_plugins(InvalidPlugins)).
format_invalid_plugins(InvalidPlugins) ->
@ -327,7 +328,7 @@ validate_plugins(Plugins, BrokerVersion) ->
true ->
case BrokerVersion of
"0.0.0" ->
rabbit_log:warning(
?LOG_WARNING(
"Running development version of the broker."
" Requirement ~tp for plugin ~tp is ignored.",
[BrokerVersionReqs, Name]);
@ -358,7 +359,7 @@ check_plugins_versions(PluginName, AllPlugins, RequiredVersions) ->
true ->
case Version of
"" ->
rabbit_log:warning(
?LOG_WARNING(
"~tp plugin version is not defined."
" Requirement ~tp for plugin ~tp is ignored",
[Name, Versions, PluginName]);
@ -426,7 +427,7 @@ prepare_dir_plugin(PluginAppDescPath) ->
{module, _} ->
ok;
{error, badfile} ->
rabbit_log:error("Failed to enable plugin \"~ts\": "
?LOG_ERROR("Failed to enable plugin \"~ts\": "
"it may have been built with an "
"incompatible (more recent?) "
"version of Erlang", [Plugin]),
@ -459,11 +460,11 @@ prepare_plugin(#plugin{type = ez, name = Name, location = Location}, ExpandDir)
[PluginAppDescPath|_] ->
prepare_dir_plugin(PluginAppDescPath);
_ ->
rabbit_log:error("Plugin archive '~ts' doesn't contain an .app file", [Location]),
?LOG_ERROR("Plugin archive '~ts' doesn't contain an .app file", [Location]),
throw({app_file_missing, Name, Location})
end;
{error, Reason} ->
rabbit_log:error("Could not unzip plugin archive '~ts': ~tp", [Location, Reason]),
?LOG_ERROR("Could not unzip plugin archive '~ts': ~tp", [Location, Reason]),
throw({failed_to_unzip_plugin, Name, Location, Reason})
end;
prepare_plugin(#plugin{type = dir, location = Location, name = Name},
@ -472,7 +473,7 @@ prepare_plugin(#plugin{type = dir, location = Location, name = Name},
[PluginAppDescPath|_] ->
prepare_dir_plugin(PluginAppDescPath);
_ ->
rabbit_log:error("Plugin directory '~ts' doesn't contain an .app file", [Location]),
?LOG_ERROR("Plugin directory '~ts' doesn't contain an .app file", [Location]),
throw({app_file_missing, Name, Location})
end.
@ -668,12 +669,12 @@ remove_plugins(Plugins) ->
lists:member(Name, PluginDeps),
if
IsOTPApp ->
rabbit_log:debug(
?LOG_DEBUG(
"Plugins discovery: "
"ignoring ~ts, Erlang/OTP application",
[Name]);
not IsAPlugin ->
rabbit_log:debug(
?LOG_DEBUG(
"Plugins discovery: "
"ignoring ~ts, not a RabbitMQ plugin",
[Name]);

View File

@ -29,6 +29,7 @@
-include_lib("rabbit_common/include/rabbit.hrl").
-include("amqqueue.hrl").
-include_lib("kernel/include/logger.hrl").
-import(rabbit_misc, [pget/2, pget/3]).
@ -285,7 +286,7 @@ parse_set0(Type, VHost, Name, Pattern, Defn, Priority, ApplyTo, ActingUser) ->
{<<"priority">>, Priority},
{<<"apply-to">>, ApplyTo}],
ActingUser),
rabbit_log:info("Successfully set policy '~ts' matching ~ts names in virtual host '~ts' using pattern '~ts'",
?LOG_INFO("Successfully set policy '~ts' matching ~ts names in virtual host '~ts' using pattern '~ts'",
[Name, ApplyTo, VHost, Pattern]),
R;
{error, Reason} ->

View File

@ -501,6 +501,8 @@ clear_config_run_number() ->
-spec configure_logger(rabbit_env:context()) -> ok.
configure_logger(Context) ->
_ = logger:set_primary_config(metadata, #{domain => ?RMQLOG_DOMAIN_GLOBAL}),
%% Configure main handlers.
%% We distinguish them by their type and possibly other
%% parameters (file name, syslog settings, etc.).

View File

@ -9,6 +9,7 @@
-include_lib("rabbit_common/include/rabbit.hrl").
-include("amqqueue.hrl").
-include_lib("kernel/include/logger.hrl").
-behaviour(rabbit_backing_queue).
@ -66,7 +67,7 @@ enable() ->
{ok, RealBQ} = application:get_env(rabbit, backing_queue_module),
case RealBQ of
?MODULE -> ok;
_ -> rabbit_log:info("Priority queues enabled, real BQ is ~ts",
_ -> ?LOG_INFO("Priority queues enabled, real BQ is ~ts",
[RealBQ]),
application:set_env(
rabbitmq_priority_queue, backing_queue_module, RealBQ),

View File

@ -223,6 +223,7 @@
}).
-include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
%%----------------------------------------------------------------------------
@ -556,7 +557,7 @@ start(VHost, DurableQueueNames) ->
ToDelete = [filename:join([rabbit_vhost:msg_store_dir_path(VHost), "queues", Dir])
|| Dir <- lists:subtract(all_queue_directory_names(VHost),
sets:to_list(DurableDirectories))],
rabbit_log:debug("Deleting unknown files/folders: ~p", [ToDelete]),
?LOG_DEBUG("Deleting unknown files/folders: ~p", [ToDelete]),
_ = rabbit_file:recursive_delete(ToDelete),
rabbit_recovery_terms:clear(VHost),
@ -1182,7 +1183,7 @@ load_segment(KeepAcked, #segment { path = Path }) ->
%% was missing above). We also log some information.
case SegBin of
<<0:Size/unit:8>> ->
rabbit_log:warning("Deleting invalid v1 segment file ~ts (file only contains NUL bytes)",
?LOG_WARNING("Deleting invalid v1 segment file ~ts (file only contains NUL bytes)",
[Path]),
_ = rabbit_file:delete(Path),
Empty;

View File

@ -14,6 +14,7 @@
-include("vhost.hrl").
-include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("amqp10_common/include/amqp10_types.hrl").
-include_lib("kernel/include/logger.hrl").
-export([
init/0,
@ -554,7 +555,7 @@ recover(VHost, Qs) ->
end, ByType0, Qs),
maps:fold(fun (Mod, Queues, {R0, F0}) ->
{Taken, {R, F}} = timer:tc(Mod, recover, [VHost, Queues]),
rabbit_log:info("Recovering ~b queues of type ~ts took ~bms",
?LOG_INFO("Recovering ~b queues of type ~ts took ~bms",
[length(Queues), Mod, Taken div 1000]),
{R0 ++ R, F0 ++ F}
end, {[], []}, ByType).

View File

@ -106,6 +106,7 @@
-include_lib("stdlib/include/qlc.hrl").
-include_lib("rabbit_common/include/rabbit.hrl").
-include("amqqueue.hrl").
-include_lib("kernel/include/logger.hrl").
-rabbit_boot_step(
{rabbit_quorum_queue_type,
@ -129,7 +130,7 @@
-define(DEFAULT_DELIVERY_LIMIT, 20).
-define(INFO(Str, Args),
rabbit_log:info("[~s:~s/~b] " Str,
?LOG_INFO("[~s:~s/~b] " Str,
[?MODULE, ?FUNCTION_NAME, ?FUNCTION_ARITY | Args])).
@ -284,7 +285,7 @@ start_cluster(Q) ->
?RPC_TIMEOUT)],
MinVersion = lists:min([rabbit_fifo:version() | Versions]),
rabbit_log:debug("Will start up to ~w replicas for quorum queue ~ts with "
?LOG_DEBUG("Will start up to ~w replicas for quorum queue ~ts with "
"leader on node '~ts', initial machine version ~b",
[QuorumSize, rabbit_misc:rs(QName), LeaderNode, MinVersion]),
case rabbit_amqqueue:internal_declare(NewQ1, false) of
@ -354,7 +355,7 @@ gather_policy_config(Q, IsQueueDeclaration) ->
undefined ->
case IsQueueDeclaration of
true ->
rabbit_log:info(
?LOG_INFO(
"~ts: delivery_limit not set, defaulting to ~b",
[rabbit_misc:rs(QName), ?DEFAULT_DELIVERY_LIMIT]);
false ->
@ -660,7 +661,7 @@ handle_tick(QName,
ok ->
ok;
repaired ->
rabbit_log:debug("Repaired quorum queue ~ts amqqueue record",
?LOG_DEBUG("Repaired quorum queue ~ts amqqueue record",
[rabbit_misc:rs(QName)])
end,
ExpectedNodes = rabbit_nodes:list_members(),
@ -670,7 +671,7 @@ handle_tick(QName,
Stale when length(ExpectedNodes) > 0 ->
%% rabbit_nodes:list_members/0 returns [] when there
%% is an error so we need to handle that case
rabbit_log:debug("~ts: stale nodes detected in quorum "
?LOG_DEBUG("~ts: stale nodes detected in quorum "
"queue state. Purging ~w",
[rabbit_misc:rs(QName), Stale]),
%% pipeline purge command
@ -684,13 +685,13 @@ handle_tick(QName,
ok
catch
_:Err ->
rabbit_log:debug("~ts: handle tick failed with ~p",
?LOG_DEBUG("~ts: handle tick failed with ~p",
[rabbit_misc:rs(QName), Err]),
ok
end
end);
handle_tick(QName, Config, _Nodes) ->
rabbit_log:debug("~ts: handle tick received unexpected config format ~tp",
?LOG_DEBUG("~ts: handle tick received unexpected config format ~tp",
[rabbit_misc:rs(QName), Config]).
repair_leader_record(Q, Name) ->
@ -701,7 +702,7 @@ repair_leader_record(Q, Name) ->
ok;
_ ->
QName = amqqueue:get_name(Q),
rabbit_log:debug("~ts: updating leader record to current node ~ts",
?LOG_DEBUG("~ts: updating leader record to current node ~ts",
[rabbit_misc:rs(QName), Node]),
ok = become_leader0(QName, Name),
ok
@ -776,7 +777,7 @@ maybe_apply_policies(Q, #{config := CurrentConfig}) ->
ShouldUpdate = NewPolicyConfig =/= CurrentPolicyConfig,
case ShouldUpdate of
true ->
rabbit_log:debug("Re-applying policies to ~ts", [rabbit_misc:rs(amqqueue:get_name(Q))]),
?LOG_DEBUG("Re-applying policies to ~ts", [rabbit_misc:rs(amqqueue:get_name(Q))]),
policy_changed(Q),
ok;
false -> ok
@ -798,7 +799,7 @@ recover(_Vhost, Queues) ->
{error, Err1}
when Err1 == not_started orelse
Err1 == name_not_registered ->
rabbit_log:warning("Quorum queue recovery: configured member of ~ts was not found on this node. Starting member as a new one. "
?LOG_WARNING("Quorum queue recovery: configured member of ~ts was not found on this node. Starting member as a new one. "
"Context: ~s",
[rabbit_misc:rs(QName), Err1]),
% queue was never started on this node
@ -806,7 +807,7 @@ recover(_Vhost, Queues) ->
case start_server(make_ra_conf(Q0, ServerId)) of
ok -> ok;
Err2 ->
rabbit_log:warning("recover: quorum queue ~w could not"
?LOG_WARNING("recover: quorum queue ~w could not"
" be started ~w", [Name, Err2]),
fail
end;
@ -817,7 +818,7 @@ recover(_Vhost, Queues) ->
ok;
Err ->
%% catch all clause to avoid causing the vhost not to start
rabbit_log:warning("recover: quorum queue ~w could not be "
?LOG_WARNING("recover: quorum queue ~w could not be "
"restarted ~w", [Name, Err]),
fail
end,
@ -908,7 +909,7 @@ delete(Q, _IfUnused, _IfEmpty, ActingUser) when ?amqqueue_is_quorum(Q) ->
ok;
false ->
%% attempt forced deletion of all servers
rabbit_log:warning(
?LOG_WARNING(
"Could not delete quorum '~ts', not enough nodes "
" online to reach a quorum: ~255p."
" Attempting force delete.",
@ -929,7 +930,7 @@ force_delete_queue(Servers) ->
case catch(ra:force_delete_server(?RA_SYSTEM, S)) of
ok -> ok;
Err ->
rabbit_log:warning(
?LOG_WARNING(
"Force delete of ~w failed with: ~w"
"This may require manual data clean up",
[S, Err]),
@ -1222,7 +1223,7 @@ policy_changed(Q) ->
ok;
Err ->
FormattedQueueName = rabbit_misc:rs(amqqueue:get_name(Q)),
rabbit_log:warning("~s: policy may not have been successfully applied. Error: ~p",
?LOG_WARNING("~s: policy may not have been successfully applied. Error: ~p",
[FormattedQueueName, Err]),
ok
end.
@ -1340,7 +1341,7 @@ add_member(VHost, Name, Node, Membership, Timeout)
is_binary(Name) andalso
is_atom(Node) ->
QName = #resource{virtual_host = VHost, name = Name, kind = queue},
rabbit_log:debug("Asked to add a replica for queue ~ts on node ~ts",
?LOG_DEBUG("Asked to add a replica for queue ~ts on node ~ts",
[rabbit_misc:rs(QName), Node]),
case rabbit_amqqueue:lookup(QName) of
{ok, Q} when ?amqqueue_is_classic(Q) ->
@ -1354,7 +1355,7 @@ add_member(VHost, Name, Node, Membership, Timeout)
case lists:member(Node, QNodes) of
true ->
%% idempotent by design
rabbit_log:debug("Quorum ~ts already has a replica on node ~ts",
?LOG_DEBUG("Quorum ~ts already has a replica on node ~ts",
[rabbit_misc:rs(QName), Node]),
ok;
false ->
@ -1422,7 +1423,7 @@ do_add_member(Q, Node, Membership, Timeout)
{erlang, is_list, []},
#{condition => {applied, {RaIndex, RaTerm}}}),
_ = rabbit_amqqueue:update(QName, Fun),
rabbit_log:info("Added a replica of quorum ~ts on node ~ts", [rabbit_misc:rs(QName), Node]),
?LOG_INFO("Added a replica of quorum ~ts on node ~ts", [rabbit_misc:rs(QName), Node]),
ok;
{timeout, _} ->
_ = ra:force_delete_server(?RA_SYSTEM, ServerId),
@ -1433,7 +1434,7 @@ do_add_member(Q, Node, Membership, Timeout)
E
end;
E ->
rabbit_log:warning("Could not add a replica of quorum ~ts on node ~ts: ~p",
?LOG_WARNING("Could not add a replica of quorum ~ts on node ~ts: ~p",
[rabbit_misc:rs(QName), Node, E]),
E
end.
@ -1484,7 +1485,7 @@ delete_member(Q, Node) when ?amqqueue_is_quorum(Q) ->
_ = rabbit_amqqueue:update(QName, Fun),
case ra:force_delete_server(?RA_SYSTEM, ServerId) of
ok ->
rabbit_log:info("Deleted a replica of quorum ~ts on node ~ts", [rabbit_misc:rs(QName), Node]),
?LOG_INFO("Deleted a replica of quorum ~ts on node ~ts", [rabbit_misc:rs(QName), Node]),
ok;
{error, {badrpc, nodedown}} ->
ok;
@ -1507,10 +1508,10 @@ delete_member(Q, Node) when ?amqqueue_is_quorum(Q) ->
[{rabbit_amqqueue:name(),
{ok, pos_integer()} | {error, pos_integer(), term()}}].
shrink_all(Node) ->
rabbit_log:info("Asked to remove all quorum queue replicas from node ~ts", [Node]),
?LOG_INFO("Asked to remove all quorum queue replicas from node ~ts", [Node]),
[begin
QName = amqqueue:get_name(Q),
rabbit_log:info("~ts: removing member (replica) on node ~w",
?LOG_INFO("~ts: removing member (replica) on node ~w",
[rabbit_misc:rs(QName), Node]),
Size = length(get_nodes(Q)),
case delete_member(Q, Node) of
@ -1520,7 +1521,7 @@ shrink_all(Node) ->
%% this could be timing related and due to a new leader just being
%% elected but it's noop command not been committed yet.
%% lets sleep and retry once
rabbit_log:info("~ts: failed to remove member (replica) on node ~w "
?LOG_INFO("~ts: failed to remove member (replica) on node ~w "
"as cluster change is not permitted. "
"retrying once in 500ms",
[rabbit_misc:rs(QName), Node]),
@ -1529,12 +1530,12 @@ shrink_all(Node) ->
ok ->
{QName, {ok, Size-1}};
{error, Err} ->
rabbit_log:warning("~ts: failed to remove member (replica) on node ~w, error: ~w",
?LOG_WARNING("~ts: failed to remove member (replica) on node ~w, error: ~w",
[rabbit_misc:rs(QName), Node, Err]),
{QName, {error, Size, Err}}
end;
{error, Err} ->
rabbit_log:warning("~ts: failed to remove member (replica) on node ~w, error: ~w",
?LOG_WARNING("~ts: failed to remove member (replica) on node ~w, error: ~w",
[rabbit_misc:rs(QName), Node, Err]),
{QName, {error, Size, Err}}
end
@ -1554,13 +1555,13 @@ grow(Node, VhostSpec, QueueSpec, Strategy, Membership) ->
[begin
Size = length(get_nodes(Q)),
QName = amqqueue:get_name(Q),
rabbit_log:info("~ts: adding a new member (replica) on node ~w",
?LOG_INFO("~ts: adding a new member (replica) on node ~w",
[rabbit_misc:rs(QName), Node]),
case add_member(Q, Node, Membership) of
ok ->
{QName, {ok, Size + 1}};
{error, Err} ->
rabbit_log:warning(
?LOG_WARNING(
"~ts: failed to add member (replica) on node ~w, error: ~w",
[rabbit_misc:rs(QName), Node, Err]),
{QName, {error, Size, Err}}
@ -1647,19 +1648,19 @@ dead_letter_handler(Q, Overflow) ->
dlh(undefined, undefined, undefined, _, _) ->
undefined;
dlh(undefined, RoutingKey, undefined, _, QName) ->
rabbit_log:warning("Disabling dead-lettering for ~ts despite configured dead-letter-routing-key '~ts' "
?LOG_WARNING("Disabling dead-lettering for ~ts despite configured dead-letter-routing-key '~ts' "
"because dead-letter-exchange is not configured.",
[rabbit_misc:rs(QName), RoutingKey]),
undefined;
dlh(undefined, _, Strategy, _, QName) ->
rabbit_log:warning("Disabling dead-lettering for ~ts despite configured dead-letter-strategy '~ts' "
?LOG_WARNING("Disabling dead-lettering for ~ts despite configured dead-letter-strategy '~ts' "
"because dead-letter-exchange is not configured.",
[rabbit_misc:rs(QName), Strategy]),
undefined;
dlh(_, _, <<"at-least-once">>, reject_publish, _) ->
at_least_once;
dlh(Exchange, RoutingKey, <<"at-least-once">>, drop_head, QName) ->
rabbit_log:warning("Falling back to dead-letter-strategy at-most-once for ~ts "
?LOG_WARNING("Falling back to dead-letter-strategy at-most-once for ~ts "
"because configured dead-letter-strategy at-least-once is incompatible with "
"effective overflow strategy drop-head. To enable dead-letter-strategy "
"at-least-once, set overflow strategy to reject-publish.",
@ -2030,7 +2031,7 @@ overflow(undefined, Def, _QName) -> Def;
overflow(<<"reject-publish">>, _Def, _QName) -> reject_publish;
overflow(<<"drop-head">>, _Def, _QName) -> drop_head;
overflow(<<"reject-publish-dlx">> = V, Def, QName) ->
rabbit_log:warning("Invalid overflow strategy ~tp for quorum queue: ~ts",
?LOG_WARNING("Invalid overflow strategy ~tp for quorum queue: ~ts",
[V, rabbit_misc:rs(QName)]),
Def.
@ -2069,7 +2070,7 @@ force_shrink_member_to_current_member(VHost, Name) ->
Node = node(),
QName = rabbit_misc:r(VHost, queue, Name),
QNameFmt = rabbit_misc:rs(QName),
rabbit_log:warning("Shrinking ~ts to a single node: ~ts", [QNameFmt, Node]),
?LOG_WARNING("Shrinking ~ts to a single node: ~ts", [QNameFmt, Node]),
case rabbit_amqqueue:lookup(QName) of
{ok, Q} when ?is_amqqueue(Q) ->
{RaName, _} = amqqueue:get_pid(Q),
@ -2082,19 +2083,19 @@ force_shrink_member_to_current_member(VHost, Name) ->
end,
_ = rabbit_amqqueue:update(QName, Fun),
_ = [ra:force_delete_server(?RA_SYSTEM, {RaName, N}) || N <- OtherNodes],
rabbit_log:warning("Shrinking ~ts finished", [QNameFmt]);
?LOG_WARNING("Shrinking ~ts finished", [QNameFmt]);
_ ->
rabbit_log:warning("Shrinking failed, ~ts not found", [QNameFmt]),
?LOG_WARNING("Shrinking failed, ~ts not found", [QNameFmt]),
{error, not_found}
end.
force_vhost_queues_shrink_member_to_current_member(VHost) when is_binary(VHost) ->
rabbit_log:warning("Shrinking all quorum queues in vhost '~ts' to a single node: ~ts", [VHost, node()]),
?LOG_WARNING("Shrinking all quorum queues in vhost '~ts' to a single node: ~ts", [VHost, node()]),
ListQQs = fun() -> rabbit_amqqueue:list(VHost) end,
force_all_queues_shrink_member_to_current_member(ListQQs).
force_all_queues_shrink_member_to_current_member() ->
rabbit_log:warning("Shrinking all quorum queues to a single node: ~ts", [node()]),
?LOG_WARNING("Shrinking all quorum queues to a single node: ~ts", [node()]),
ListQQs = fun() -> rabbit_amqqueue:list() end,
force_all_queues_shrink_member_to_current_member(ListQQs).
@ -2104,7 +2105,7 @@ force_all_queues_shrink_member_to_current_member(ListQQFun) when is_function(Lis
QName = amqqueue:get_name(Q),
{RaName, _} = amqqueue:get_pid(Q),
OtherNodes = lists:delete(Node, get_nodes(Q)),
rabbit_log:warning("Shrinking queue ~ts to a single node: ~ts", [rabbit_misc:rs(QName), Node]),
?LOG_WARNING("Shrinking queue ~ts to a single node: ~ts", [rabbit_misc:rs(QName), Node]),
ok = ra_server_proc:force_shrink_members_to_current_member({RaName, Node}),
Fun = fun (QQ) ->
TS0 = amqqueue:get_type_state(QQ),
@ -2114,7 +2115,7 @@ force_all_queues_shrink_member_to_current_member(ListQQFun) when is_function(Lis
_ = rabbit_amqqueue:update(QName, Fun),
_ = [ra:force_delete_server(?RA_SYSTEM, {RaName, N}) || N <- OtherNodes]
end || Q <- ListQQFun(), amqqueue:get_type(Q) == ?MODULE],
rabbit_log:warning("Shrinking finished"),
?LOG_WARNING("Shrinking finished"),
ok.
force_checkpoint_on_queue(QName) ->
@ -2124,7 +2125,7 @@ force_checkpoint_on_queue(QName) ->
{error, classic_queue_not_supported};
{ok, Q} when ?amqqueue_is_quorum(Q) ->
{RaName, _} = amqqueue:get_pid(Q),
rabbit_log:debug("Sending command to force ~ts to take a checkpoint", [QNameFmt]),
?LOG_DEBUG("Sending command to force ~ts to take a checkpoint", [QNameFmt]),
Nodes = amqqueue:get_nodes(Q),
_ = [ra:cast_aux_command({RaName, Node}, force_checkpoint)
|| Node <- Nodes],
@ -2142,7 +2143,7 @@ force_checkpoint(VhostSpec, QueueSpec) ->
ok ->
{QName, {ok}};
{error, Err} ->
rabbit_log:warning("~ts: failed to force checkpoint, error: ~w",
?LOG_WARNING("~ts: failed to force checkpoint, error: ~w",
[rabbit_misc:rs(QName), Err]),
{QName, {error, Err}}
end
@ -2274,7 +2275,7 @@ wait_for_leader_health_checks(Ref, N, UnhealthyAcc) ->
check_process_limit_safety(QCount, ProcessLimitThreshold) ->
case (erlang:system_info(process_count) + QCount) >= ProcessLimitThreshold of
true ->
rabbit_log:warning("Leader health check not permitted, process limit threshold will be exceeded."),
?LOG_WARNING("Leader health check not permitted, process limit threshold will be exceeded."),
throw({error, leader_health_check_process_limit_exceeded});
false ->
ok
@ -2283,7 +2284,7 @@ check_process_limit_safety(QCount, ProcessLimitThreshold) ->
maybe_log_leader_health_check_result([]) -> ok;
maybe_log_leader_health_check_result(Result) ->
Qs = lists:map(fun(R) -> catch maps:get(<<"readable_name">>, R) end, Result),
rabbit_log:warning("Leader health check result (unhealthy leaders detected): ~tp", [Qs]).
?LOG_WARNING("Leader health check result (unhealthy leaders detected): ~tp", [Qs]).
policy_apply_to_name() ->
<<"quorum_queues">>.
@ -2295,52 +2296,52 @@ drain(TransferCandidates) ->
ok.
transfer_leadership([]) ->
rabbit_log:warning("Skipping leadership transfer of quorum queues: no candidate "
?LOG_WARNING("Skipping leadership transfer of quorum queues: no candidate "
"(online, not under maintenance) nodes to transfer to!");
transfer_leadership(_TransferCandidates) ->
%% we only transfer leadership for QQs that have local leaders
Queues = rabbit_amqqueue:list_local_leaders(),
rabbit_log:info("Will transfer leadership of ~b quorum queues with current leader on this node",
?LOG_INFO("Will transfer leadership of ~b quorum queues with current leader on this node",
[length(Queues)]),
[begin
_ = [begin
Name = amqqueue:get_name(Q),
rabbit_log:debug("Will trigger a leader election for local quorum queue ~ts",
?LOG_DEBUG("Will trigger a leader election for local quorum queue ~ts",
[rabbit_misc:rs(Name)]),
%% we trigger an election and exclude this node from the list of candidates
%% by simply shutting its local QQ replica (Ra server)
RaLeader = amqqueue:get_pid(Q),
rabbit_log:debug("Will stop Ra server ~tp", [RaLeader]),
?LOG_DEBUG("Will stop Ra server ~tp", [RaLeader]),
case rabbit_quorum_queue:stop_server(RaLeader) of
ok ->
rabbit_log:debug("Successfully stopped Ra server ~tp", [RaLeader]);
?LOG_DEBUG("Successfully stopped Ra server ~tp", [RaLeader]);
{error, nodedown} ->
rabbit_log:error("Failed to stop Ra server ~tp: target node was reported as down")
?LOG_ERROR("Failed to stop Ra server ~tp: target node was reported as down")
end
end || Q <- Queues],
rabbit_log:info("Leadership transfer for quorum queues hosted on this node has been initiated").
?LOG_INFO("Leadership transfer for quorum queues hosted on this node has been initiated").
%% TODO: I just copied it over, it looks like was always called inside maintenance so...
-spec stop_local_quorum_queue_followers() -> ok.
stop_local_quorum_queue_followers() ->
Queues = rabbit_amqqueue:list_local_followers(),
rabbit_log:info("Will stop local follower replicas of ~b quorum queues on this node",
?LOG_INFO("Will stop local follower replicas of ~b quorum queues on this node",
[length(Queues)]),
[begin
_ = [begin
Name = amqqueue:get_name(Q),
rabbit_log:debug("Will stop a local follower replica of quorum queue ~ts",
?LOG_DEBUG("Will stop a local follower replica of quorum queue ~ts",
[rabbit_misc:rs(Name)]),
%% shut down Ra nodes so that they are not considered for leader election
{RegisteredName, _LeaderNode} = amqqueue:get_pid(Q),
RaNode = {RegisteredName, node()},
rabbit_log:debug("Will stop Ra server ~tp", [RaNode]),
?LOG_DEBUG("Will stop Ra server ~tp", [RaNode]),
case rabbit_quorum_queue:stop_server(RaNode) of
ok ->
rabbit_log:debug("Successfully stopped Ra server ~tp", [RaNode]);
?LOG_DEBUG("Successfully stopped Ra server ~tp", [RaNode]);
{error, nodedown} ->
rabbit_log:error("Failed to stop Ra server ~tp: target node was reported as down")
?LOG_ERROR("Failed to stop Ra server ~tp: target node was reported as down")
end
end || Q <- Queues],
rabbit_log:info("Stopped all local replicas of quorum queues hosted on this node").
?LOG_INFO("Stopped all local replicas of quorum queues hosted on this node").
revive() ->
revive_local_queue_members().
@ -2350,17 +2351,17 @@ revive_local_queue_members() ->
%% NB: this function ignores the first argument so we can just pass the
%% empty binary as the vhost name.
{Recovered, Failed} = rabbit_quorum_queue:recover(<<>>, Queues),
rabbit_log:debug("Successfully revived ~b quorum queue replicas",
?LOG_DEBUG("Successfully revived ~b quorum queue replicas",
[length(Recovered)]),
case length(Failed) of
0 ->
ok;
NumFailed ->
rabbit_log:error("Failed to revive ~b quorum queue replicas",
?LOG_ERROR("Failed to revive ~b quorum queue replicas",
[NumFailed])
end,
rabbit_log:info("Restart of local quorum queue replicas is complete"),
?LOG_INFO("Restart of local quorum queue replicas is complete"),
ok.
queue_vm_stats_sups() ->

View File

@ -43,6 +43,8 @@
-include_lib("rabbit_common/include/rabbit_framing.hrl").
-include_lib("rabbit_common/include/rabbit.hrl").
-include("rabbit_amqp_metrics.hrl").
-include_lib("kernel/include/logger.hrl").
-include_lib("rabbit_common/include/logging.hrl").
-export([start_link/2, info/2, force_event_refresh/2,
shutdown/2]).
@ -157,6 +159,7 @@ shutdown(Pid, Explanation) ->
-spec init(pid(), {pid(), pid()}, ranch:ref()) ->
no_return().
init(Parent, HelperSups, Ref) ->
logger:set_process_metadata(#{domain => ?RMQLOG_DOMAIN_CONN}),
?LG_PROCESS_TYPE(reader),
{ok, Sock} = rabbit_networking:handshake(Ref,
application:get_env(rabbit, proxy_protocol, false),
@ -253,7 +256,7 @@ server_capabilities(_) ->
%%--------------------------------------------------------------------------
socket_error(Reason) when is_atom(Reason) ->
rabbit_log_connection:error("Error on AMQP connection ~tp: ~ts",
?LOG_ERROR("Error on AMQP connection ~tp: ~ts",
[self(), rabbit_misc:format_inet_error(Reason)]);
socket_error(Reason) ->
Fmt = "Error on AMQP connection ~tp:~n~tp",
@ -263,9 +266,9 @@ socket_error(Reason) ->
%% This is presumably a TCP healthcheck, so don't log
%% it unless specified otherwise.
{ssl_upgrade_error, closed} ->
rabbit_log_connection:debug(Fmt, Args);
?LOG_DEBUG(Fmt, Args);
_ ->
rabbit_log_connection:error(Fmt, Args)
?LOG_ERROR(Fmt, Args)
end.
inet_op(F) -> rabbit_misc:throw_on_error(inet_error, F).
@ -347,13 +350,13 @@ start_connection(Parent, HelperSups, RanchRef, Deb, Sock) ->
connected_at = ConnectedAt0}} ->
ConnName = dynamic_connection_name(Name),
ConnDuration = connection_duration(ConnectedAt0),
rabbit_log_connection:info("closing AMQP connection (~ts, vhost: '~ts', user: '~ts', duration: '~ts')",
?LOG_INFO("closing AMQP connection (~ts, vhost: '~ts', user: '~ts', duration: '~ts')",
[ConnName, VHost, Username, ConnDuration]);
%% just to be more defensive
_ ->
ConnName = dynamic_connection_name(Name),
ConnDuration = connection_duration(ConnectedAt),
rabbit_log_connection:info("closing AMQP connection (~ts, duration: '~ts')",
?LOG_INFO("closing AMQP connection (~ts, duration: '~ts')",
[ConnName, ConnDuration])
end
catch
@ -460,9 +463,9 @@ log_connection_exception(Severity, Name, Duration, Ex) ->
log_connection_exception_with_severity(Severity, Fmt, Args) ->
case Severity of
debug -> rabbit_log_connection:debug(Fmt, Args);
warning -> rabbit_log_connection:warning(Fmt, Args);
error -> rabbit_log_connection:error(Fmt, Args)
debug -> ?LOG_DEBUG(Fmt, Args);
warning -> ?LOG_WARNING(Fmt, Args);
error -> ?LOG_ERROR(Fmt, Args)
end.
run({M, F, A}) ->
@ -518,8 +521,8 @@ mainloop(Deb, Buf, BufLen, State = #v1{sock = Sock,
Fmt = "accepting AMQP connection ~ts",
Args = [ConnName],
case Recv of
closed -> _ = rabbit_log_connection:debug(Fmt, Args);
_ -> _ = rabbit_log_connection:info(Fmt, Args)
closed -> _ = ?LOG_DEBUG(Fmt, Args);
_ -> _ = ?LOG_INFO(Fmt, Args)
end;
_ ->
ok
@ -792,7 +795,7 @@ wait_for_channel_termination(N, TimerRef,
{_, controlled} ->
wait_for_channel_termination(N-1, TimerRef, State1);
{_, uncontrolled} ->
rabbit_log_connection:error(
?LOG_ERROR(
"Error on AMQP connection ~tp (~ts, vhost: '~ts',"
" user: '~ts', state: ~tp), channel ~tp:"
"error while terminating:~n~tp",
@ -834,7 +837,7 @@ log_hard_error(#v1{connection_state = CS,
log_name = ConnName,
user = User,
vhost = VHost}}, Channel, Reason) ->
rabbit_log_connection:error(
?LOG_ERROR(
"Error on AMQP connection ~tp (~ts, vhost: '~ts',"
" user: '~ts', state: ~tp), channel ~tp:~n ~ts",
[self(), ConnName, VHost, User#user.username, CS, Channel, format_hard_error(Reason)]).
@ -854,7 +857,7 @@ handle_exception(State = #v1{connection = #connection{protocol = Protocol,
connection_state = starting},
Channel, Reason = #amqp_error{name = access_refused,
explanation = ErrMsg}) ->
rabbit_log_connection:error(
?LOG_ERROR(
"Error on AMQP connection ~tp (~ts, state: ~tp):~n~ts",
[self(), ConnName, starting, ErrMsg]),
%% respect authentication failure notification capability
@ -873,7 +876,7 @@ handle_exception(State = #v1{connection = #connection{protocol = Protocol,
connection_state = opening},
Channel, Reason = #amqp_error{name = not_allowed,
explanation = ErrMsg}) ->
rabbit_log_connection:error(
?LOG_ERROR(
"Error on AMQP connection ~tp (~ts, user: '~ts', state: ~tp):~n~ts",
[self(), ConnName, User#user.username, opening, ErrMsg]),
send_error_on_channel0_and_close(Channel, Protocol, Reason, State);
@ -890,7 +893,7 @@ handle_exception(State = #v1{connection = #connection{protocol = Protocol,
connection_state = tuning},
Channel, Reason = #amqp_error{name = not_allowed,
explanation = ErrMsg}) ->
rabbit_log_connection:error(
?LOG_ERROR(
"Error on AMQP connection ~tp (~ts,"
" user: '~ts', state: ~tp):~n~ts",
[self(), ConnName, User#user.username, tuning, ErrMsg]),
@ -1325,7 +1328,7 @@ handle_method0(#'connection.open'{virtual_host = VHost},
Infos),
rabbit_event:notify(connection_created, Infos),
maybe_emit_stats(State1),
rabbit_log_connection:info(
?LOG_INFO(
"connection ~ts: user '~ts' authenticated and granted access to vhost '~ts'",
[dynamic_connection_name(ConnName), Username, VHost]),
State1;
@ -1350,7 +1353,7 @@ handle_method0(#'connection.update_secret'{new_secret = NewSecret, reason = Reas
user = User = #user{username = Username},
log_name = ConnName} = Conn,
sock = Sock}) when ?IS_RUNNING(State) ->
rabbit_log_connection:debug(
?LOG_DEBUG(
"connection ~ts of user '~ts': "
"asked to update secret, reason: ~ts",
[dynamic_connection_name(ConnName), Username, Reason]),
@ -1363,20 +1366,20 @@ handle_method0(#'connection.update_secret'{new_secret = NewSecret, reason = Reas
%% Any secret update errors coming from the authz backend will be handled in the other branch.
%% Therefore we optimistically do no error handling here. MK.
lists:foreach(fun(Ch) ->
rabbit_log:debug("Updating user/auth backend state for channel ~tp", [Ch]),
?LOG_DEBUG("Updating user/auth backend state for channel ~tp", [Ch]),
_ = rabbit_channel:update_user_state(Ch, User1)
end, all_channels()),
ok = send_on_channel0(Sock, #'connection.update_secret_ok'{}, Protocol),
rabbit_log_connection:info(
?LOG_INFO(
"connection ~ts: user '~ts' updated secret, reason: ~ts",
[dynamic_connection_name(ConnName), Username, Reason]),
State#v1{connection = Conn#connection{user = User1}};
{refused, Message} ->
rabbit_log_connection:error("Secret update was refused for user '~ts': ~tp",
?LOG_ERROR("Secret update was refused for user '~ts': ~tp",
[Username, Message]),
rabbit_misc:protocol_error(not_allowed, "New secret was refused by one of the backends", []);
{error, Message} ->
rabbit_log_connection:error("Secret update for user '~ts' failed: ~tp",
?LOG_ERROR("Secret update for user '~ts' failed: ~tp",
[Username, Message]),
rabbit_misc:protocol_error(not_allowed,
"Secret update failed", [])
@ -1505,7 +1508,7 @@ auth_phase(Response,
auth_state = AuthState,
host = RemoteAddress},
sock = Sock}) ->
rabbit_log:debug("Client address during authN phase: ~tp", [RemoteAddress]),
?LOG_DEBUG("Client address during authN phase: ~tp", [RemoteAddress]),
case AuthMechanism:handle_response(Response, AuthState) of
{refused, Username, Msg, Args} ->
rabbit_core_metrics:auth_attempt_failed(RemoteAddress, Username, amqp091),
@ -1838,7 +1841,7 @@ augment_connection_log_name(#connection{name = Name} = Connection) ->
Connection;
UserSpecifiedName ->
LogName = <<Name/binary, " - ", UserSpecifiedName/binary>>,
rabbit_log_connection:info("connection ~ts has a client-provided name: ~ts",
?LOG_INFO("connection ~ts has a client-provided name: ~ts",
[Name, UserSpecifiedName]),
?store_proc_name(LogName),
Connection#connection{log_name = LogName}

View File

@ -19,6 +19,7 @@
terminate/2, code_change/3]).
-include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
%%----------------------------------------------------------------------------
@ -36,7 +37,7 @@ start(VHost) ->
%% we can get here if a vhost is added and removed concurrently
%% e.g. some integration tests do it
{error, {no_such_vhost, VHost}} ->
rabbit_log:error("Failed to start a recovery terms manager for vhost ~ts: vhost no longer exists!",
?LOG_ERROR("Failed to start a recovery terms manager for vhost ~ts: vhost no longer exists!",
[VHost]),
{error, {no_such_vhost, VHost}}
end.
@ -52,7 +53,7 @@ stop(VHost) ->
end;
%% see start/1
{error, {no_such_vhost, VHost}} ->
rabbit_log:error("Failed to stop a recovery terms manager for vhost ~ts: vhost no longer exists!",
?LOG_ERROR("Failed to stop a recovery terms manager for vhost ~ts: vhost no longer exists!",
[VHost]),
ok
@ -81,7 +82,7 @@ clear(VHost) ->
ok
%% see start/1
catch _:badarg ->
rabbit_log:error("Failed to clear recovery terms for vhost ~ts: table no longer exists!",
?LOG_ERROR("Failed to clear recovery terms for vhost ~ts: table no longer exists!",
[VHost]),
ok
end,
@ -138,7 +139,7 @@ open_table(VHost, RamFile, RetriesLeft) ->
_ = file:delete(File),
%% Wait before retrying
DelayInMs = 1000,
rabbit_log:warning("Failed to open a recovery terms DETS file at ~tp. Will delete it and retry in ~tp ms (~tp retries left)",
?LOG_WARNING("Failed to open a recovery terms DETS file at ~tp. Will delete it and retry in ~tp ms (~tp retries left)",
[File, DelayInMs, RetriesLeft]),
timer:sleep(DelayInMs),
open_table(VHost, RamFile, RetriesLeft - 1)
@ -152,7 +153,7 @@ flush(VHost) ->
dets:sync(VHost)
%% see clear/1
catch _:badarg ->
rabbit_log:error("Failed to sync recovery terms table for vhost ~ts: the table no longer exists!",
?LOG_ERROR("Failed to sync recovery terms table for vhost ~ts: the table no longer exists!",
[VHost]),
ok
end.
@ -165,7 +166,7 @@ close_table(VHost) ->
ok = dets:close(VHost)
%% see clear/1
catch _:badarg ->
rabbit_log:error("Failed to close recovery terms table for vhost ~ts: the table no longer exists!",
?LOG_ERROR("Failed to close recovery terms table for vhost ~ts: the table no longer exists!",
[VHost]),
ok
end.

View File

@ -41,6 +41,7 @@
%% * rabbit_event
-include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
-export([parse_set/5, set/5, set_any/5, clear/4, clear_any/4, list/0, list/1,
list_component/1, list/2, list_formatted/1, list_formatted/3,
@ -104,7 +105,7 @@ parse_set_global(Name, String, ActingUser) ->
set_global(Name, Term, ActingUser) ->
NameAsAtom = rabbit_data_coercion:to_atom(Name),
rabbit_log:debug("Setting global parameter '~ts' to ~tp", [NameAsAtom, Term]),
?LOG_DEBUG("Setting global parameter '~ts' to ~tp", [NameAsAtom, Term]),
_ = rabbit_db_rtparams:set(NameAsAtom, Term),
event_notify(parameter_set, none, global, [{name, NameAsAtom},
{value, Term},
@ -125,7 +126,7 @@ set_any(VHost, Component, Name, Term, User) ->
end.
set_any0(VHost, Component, Name, Term, User) ->
rabbit_log:debug("Asked to set or update runtime parameter '~ts' in vhost '~ts' "
?LOG_DEBUG("Asked to set or update runtime parameter '~ts' in vhost '~ts' "
"for component '~ts', value: ~tp",
[Name, VHost, Component, Term]),
case lookup_component(Component) of
@ -168,7 +169,7 @@ is_within_limit(Component) ->
false ->
ErrorMsg = "Limit reached: component ~ts is limited to ~tp",
ErrorArgs = [Component, Limit],
rabbit_log:error(ErrorMsg, ErrorArgs),
?LOG_ERROR(ErrorMsg, ErrorArgs),
{errors, [{"component ~ts is limited to ~tp", [Component, Limit]}]}
end.

View File

@ -8,6 +8,7 @@
-module(rabbit_ssl).
-include_lib("public_key/include/public_key.hrl").
-include_lib("kernel/include/logger.hrl").
-export([peer_cert_issuer/1, peer_cert_subject/1, peer_cert_validity/1]).
-export([peer_cert_subject_items/2, peer_cert_auth_name/1, peer_cert_auth_name/2]).
@ -161,7 +162,7 @@ peer_cert_auth_name({subject_alternative_name, Type, Index0}, Cert) ->
%% lists:nth/2 is 1-based
Index = Index0 + 1,
OfType = peer_cert_subject_alternative_names(Cert, otp_san_type(Type)),
rabbit_log:debug("Peer certificate SANs of type ~ts: ~tp, index to use with lists:nth/2: ~b", [Type, OfType, Index]),
?LOG_DEBUG("Peer certificate SANs of type ~ts: ~tp, index to use with lists:nth/2: ~b", [Type, OfType, Index]),
case length(OfType) of
0 -> not_found;
N when N < Index -> not_found;
@ -198,7 +199,7 @@ auth_config_sane() ->
{ok, Opts} = application:get_env(rabbit, ssl_options),
case proplists:get_value(verify, Opts) of
verify_peer -> true;
V -> rabbit_log:warning("TLS peer verification (authentication) is "
V -> ?LOG_WARNING("TLS peer verification (authentication) is "
"disabled, ssl_options.verify value used: ~tp. "
"See https://www.rabbitmq.com/docs/ssl#peer-verification to learn more.", [V]),
false

View File

@ -83,6 +83,7 @@
-include("rabbit_stream_coordinator.hrl").
-include("amqqueue.hrl").
-include_lib("kernel/include/logger.hrl").
-define(REPLICA_FRESHNESS_LIMIT_MS, 10 * 1000). %% 10s
-define(V2_OR_MORE(Vsn), Vsn >= 2).
@ -174,7 +175,7 @@ restart_stream(QRes, Options)
restart_stream(Q, Options)
when ?is_amqqueue(Q) andalso
?amqqueue_is_stream(Q) ->
rabbit_log:info("restarting stream ~s in vhost ~s with options ~p",
?LOG_INFO("restarting stream ~s in vhost ~s with options ~p",
[maps:get(name, amqqueue:get_type_state(Q)), amqqueue:get_vhost(Q), Options]),
#{name := StreamId} = amqqueue:get_type_state(Q),
case process_command({restart_stream, StreamId, Options}) of
@ -217,7 +218,7 @@ add_replica(Q, Node) when ?is_amqqueue(Q) ->
{error, {disallowed, out_of_sync_replica}};
false ->
Name = rabbit_misc:rs(amqqueue:get_name(Q)),
rabbit_log:info("~ts : adding replica ~ts to ~ts Replication State: ~w",
?LOG_INFO("~ts : adding replica ~ts to ~ts Replication State: ~w",
[?MODULE, Node, Name, ReplState0]),
StreamId = maps:get(name, amqqueue:get_type_state(Q)),
case process_command({add_replica, StreamId, #{node => Node}}) of
@ -444,7 +445,7 @@ process_command([Server | Servers], Cmd) ->
_ ->
element(1, Cmd)
end,
rabbit_log:warning("Coordinator timeout on server ~w when processing command ~W",
?LOG_WARNING("Coordinator timeout on server ~w when processing command ~W",
[element(2, Server), CmdLabel, 10]),
process_command(Servers, Cmd);
{error, noproc} ->
@ -516,17 +517,17 @@ start_coordinator_cluster() ->
Versions = [V || {ok, V} <- erpc:multicall(Nodes,
?MODULE, version, [])],
MinVersion = lists:min([version() | Versions]),
rabbit_log:debug("Starting stream coordinator on nodes: ~w, "
?LOG_DEBUG("Starting stream coordinator on nodes: ~w, "
"initial machine version ~b",
[Nodes, MinVersion]),
case ra:start_cluster(?RA_SYSTEM,
[make_ra_conf(Node, Nodes, MinVersion)
|| Node <- Nodes]) of
{ok, Started, _} ->
rabbit_log:debug("Started stream coordinator on ~w", [Started]),
?LOG_DEBUG("Started stream coordinator on ~w", [Started]),
Started;
{error, cluster_not_formed} ->
rabbit_log:warning("Stream coordinator could not be started on nodes ~w",
?LOG_WARNING("Stream coordinator could not be started on nodes ~w",
[Nodes]),
[]
end.
@ -740,7 +741,7 @@ apply(Meta, {nodeup, Node} = Cmd,
streams = Streams,
single_active_consumer = Sac1}, ok, Effects2);
apply(Meta, {machine_version, From, To}, State0) ->
rabbit_log:info("Stream coordinator machine version changes from ~tp to ~tp, "
?LOG_INFO("Stream coordinator machine version changes from ~tp to ~tp, "
++ "applying incremental upgrade.", [From, To]),
%% RA applies machine upgrades from any version to any version, e.g. 0 -> 2.
%% We fill in the gaps here, applying all 1-to-1 machine upgrades.
@ -756,7 +757,7 @@ apply(Meta, {timeout, {sac, node_disconnected, #{connection_pid := Pid}}},
return(Meta, State0#?MODULE{single_active_consumer = SacState1}, ok,
Effects);
apply(Meta, UnkCmd, State) ->
rabbit_log:debug("~ts: unknown command ~W",
?LOG_DEBUG("~ts: unknown command ~W",
[?MODULE, UnkCmd, 10]),
return(Meta, State, {error, unknown_command}, []).
@ -842,7 +843,7 @@ maybe_resize_coordinator_cluster(LeaderPid, SacNodes, MachineVersion) ->
[New | _] ->
%% any remaining members will be added
%% next tick
rabbit_log:info("~ts: New rabbit node(s) detected, "
?LOG_INFO("~ts: New rabbit node(s) detected, "
"adding : ~w",
[?MODULE, New]),
add_member(Members, New)
@ -854,7 +855,7 @@ maybe_resize_coordinator_cluster(LeaderPid, SacNodes, MachineVersion) ->
%% this ought to be rather rare as the stream
%% coordinator member is now removed as part
%% of the forget_cluster_node command
rabbit_log:info("~ts: Rabbit node(s) removed "
?LOG_INFO("~ts: Rabbit node(s) removed "
"from the cluster, "
"deleting: ~w", [?MODULE, Old]),
_ = remove_member(Leader, Members, Old),
@ -874,7 +875,7 @@ maybe_handle_stale_nodes(SacNodes, BrokerNodes,
[] ->
ok;
Stale when length(BrokerNodes) > 0 ->
rabbit_log:debug("Stale nodes detected in stream SAC "
?LOG_DEBUG("Stale nodes detected in stream SAC "
"coordinator: ~w. Purging state.",
[Stale]),
ra:pipeline_command(LeaderPid, sac_make_purge_nodes(Stale)),
@ -903,14 +904,14 @@ add_member(Members, Node) ->
{ok, _, _} ->
ok;
{error, Err} ->
rabbit_log:warning("~ts: Failed to add member, reason ~w"
?LOG_WARNING("~ts: Failed to add member, reason ~w"
"deleting started server on ~w",
[?MODULE, Err, Node]),
case ra:force_delete_server(?RA_SYSTEM, ServerId) of
ok ->
ok;
Err ->
rabbit_log:warning("~ts: Failed to delete server "
?LOG_WARNING("~ts: Failed to delete server "
"on ~w, reason ~w",
[?MODULE, Node, Err]),
ok
@ -926,7 +927,7 @@ add_member(Members, Node) ->
%% there is a server running but is not a member of the
%% stream coordinator cluster
%% In this case it needs to be deleted
rabbit_log:warning("~ts: server already running on ~w but not
?LOG_WARNING("~ts: server already running on ~w but not
part of cluster, "
"deleting started server",
[?MODULE, Node]),
@ -934,14 +935,14 @@ add_member(Members, Node) ->
ok ->
ok;
Err ->
rabbit_log:warning("~ts: Failed to delete server "
?LOG_WARNING("~ts: Failed to delete server "
"on ~w, reason ~w",
[?MODULE, Node, Err]),
ok
end
end;
Error ->
rabbit_log:warning("Stream coordinator server failed to start on node ~ts : ~W",
?LOG_WARNING("Stream coordinator server failed to start on node ~ts : ~W",
[Node, Error, 10]),
ok
end.
@ -983,7 +984,7 @@ handle_aux(leader, _, {down, Pid, _},
handle_aux(leader, _, {start_writer, StreamId,
#{epoch := Epoch, node := Node} = Args, Conf},
Aux, RaAux) ->
rabbit_log:debug("~ts: running action: 'start_writer'"
?LOG_DEBUG("~ts: running action: 'start_writer'"
" for ~ts on node ~w in epoch ~b",
[?MODULE, StreamId, Node, Epoch]),
ActionFun = phase_start_writer(StreamId, Args, Conf),
@ -991,7 +992,7 @@ handle_aux(leader, _, {start_writer, StreamId,
handle_aux(leader, _, {start_replica, StreamId,
#{epoch := Epoch, node := Node} = Args, Conf},
Aux, RaAux) ->
rabbit_log:debug("~ts: running action: 'start_replica'"
?LOG_DEBUG("~ts: running action: 'start_replica'"
" for ~ts on node ~w in epoch ~b",
[?MODULE, StreamId, Node, Epoch]),
ActionFun = phase_start_replica(StreamId, Args, Conf),
@ -999,26 +1000,26 @@ handle_aux(leader, _, {start_replica, StreamId,
handle_aux(leader, _, {stop, StreamId, #{node := Node,
epoch := Epoch} = Args, Conf},
Aux, RaAux) ->
rabbit_log:debug("~ts: running action: 'stop'"
?LOG_DEBUG("~ts: running action: 'stop'"
" for ~ts on node ~w in epoch ~b",
[?MODULE, StreamId, Node, Epoch]),
ActionFun = phase_stop_member(StreamId, Args, Conf),
run_action(stopping, StreamId, Args, ActionFun, Aux, RaAux);
handle_aux(leader, _, {update_mnesia, StreamId, Args, Conf},
#aux{actions = _Monitors} = Aux, RaAux) ->
rabbit_log:debug("~ts: running action: 'update_mnesia'"
?LOG_DEBUG("~ts: running action: 'update_mnesia'"
" for ~ts", [?MODULE, StreamId]),
ActionFun = phase_update_mnesia(StreamId, Args, Conf),
run_action(updating_mnesia, StreamId, Args, ActionFun, Aux, RaAux);
handle_aux(leader, _, {update_retention, StreamId, Args, _Conf},
#aux{actions = _Monitors} = Aux, RaAux) ->
rabbit_log:debug("~ts: running action: 'update_retention'"
?LOG_DEBUG("~ts: running action: 'update_retention'"
" for ~ts", [?MODULE, StreamId]),
ActionFun = phase_update_retention(StreamId, Args),
run_action(update_retention, StreamId, Args, ActionFun, Aux, RaAux);
handle_aux(leader, _, {delete_member, StreamId, #{node := Node} = Args, Conf},
#aux{actions = _Monitors} = Aux, RaAux) ->
rabbit_log:debug("~ts: running action: 'delete_member'"
?LOG_DEBUG("~ts: running action: 'delete_member'"
" for ~ts ~ts", [?MODULE, StreamId, Node]),
ActionFun = phase_delete_member(StreamId, Args, Conf),
run_action(delete_member, StreamId, Args, ActionFun, Aux, RaAux);
@ -1030,7 +1031,7 @@ handle_aux(leader, _, fail_active_actions,
Exclude = maps:from_list([{S, ok}
|| {P, {S, _, _}} <- maps_to_list(Actions),
is_process_alive(P)]),
rabbit_log:debug("~ts: failing actions: ~w", [?MODULE, Exclude]),
?LOG_DEBUG("~ts: failing actions: ~w", [?MODULE, Exclude]),
#?MODULE{streams = Streams} = ra_aux:machine_state(RaAux),
fail_active_actions(Streams, Exclude),
{no_reply, Aux, RaAux, []};
@ -1043,7 +1044,7 @@ handle_aux(leader, _, {down, Pid, Reason},
%% An action has failed - report back to the state machine
case maps:get(Pid, Monitors0, undefined) of
{StreamId, Action, #{node := Node, epoch := Epoch} = Args} ->
rabbit_log:warning("~ts: error while executing action ~w for stream queue ~ts, "
?LOG_WARNING("~ts: error while executing action ~w for stream queue ~ts, "
" node ~ts, epoch ~b Err: ~w",
[?MODULE, Action, StreamId, Node, Epoch, Reason]),
Monitors = maps:remove(Pid, Monitors0),
@ -1110,7 +1111,7 @@ phase_start_replica(StreamId, #{epoch := Epoch,
fun() ->
try osiris_replica:start(Node, Conf0) of
{ok, Pid} ->
rabbit_log:info("~ts: ~ts: replica started on ~ts in ~b pid ~w",
?LOG_INFO("~ts: ~ts: replica started on ~ts in ~b pid ~w",
[?MODULE, StreamId, Node, Epoch, Pid]),
send_self_command({member_started, StreamId,
Args#{pid => Pid}});
@ -1126,12 +1127,12 @@ phase_start_replica(StreamId, #{epoch := Epoch,
send_self_command({member_started, StreamId,
Args#{pid => Pid}});
{error, Reason} ->
rabbit_log:warning("~ts: Error while starting replica for ~ts on node ~ts in ~b : ~W",
?LOG_WARNING("~ts: Error while starting replica for ~ts on node ~ts in ~b : ~W",
[?MODULE, maps:get(name, Conf0), Node, Epoch, Reason, 10]),
maybe_sleep(Reason),
send_action_failed(StreamId, starting, Args)
catch _:Error ->
rabbit_log:warning("~ts: Error while starting replica for ~ts on node ~ts in ~b : ~W",
?LOG_WARNING("~ts: Error while starting replica for ~ts on node ~ts in ~b : ~W",
[?MODULE, maps:get(name, Conf0), Node, Epoch, Error, 10]),
maybe_sleep(Error),
send_action_failed(StreamId, starting, Args)
@ -1152,13 +1153,13 @@ phase_delete_member(StreamId, #{node := Node} = Arg, Conf) ->
true ->
try osiris:delete_member(Node, Conf) of
ok ->
rabbit_log:info("~ts: Member deleted for ~ts : on node ~ts",
?LOG_INFO("~ts: Member deleted for ~ts : on node ~ts",
[?MODULE, StreamId, Node]),
send_self_command({member_deleted, StreamId, Arg});
_ ->
send_action_failed(StreamId, deleting, Arg)
catch _:E ->
rabbit_log:warning("~ts: Error while deleting member for ~ts : on node ~ts ~W",
?LOG_WARNING("~ts: Error while deleting member for ~ts : on node ~ts ~W",
[?MODULE, StreamId, Node, E, 10]),
maybe_sleep(E),
send_action_failed(StreamId, deleting, Arg)
@ -1166,7 +1167,7 @@ phase_delete_member(StreamId, #{node := Node} = Arg, Conf) ->
false ->
%% node is no longer a cluster member, we return success to avoid
%% trying to delete the member indefinitely
rabbit_log:info("~ts: Member deleted/forgotten for ~ts : node ~ts is no longer a cluster member",
?LOG_INFO("~ts: Member deleted/forgotten for ~ts : node ~ts is no longer a cluster member",
[?MODULE, StreamId, Node]),
send_self_command({member_deleted, StreamId, Arg})
end
@ -1180,22 +1181,22 @@ phase_stop_member(StreamId, #{node := Node, epoch := Epoch} = Arg0, Conf) ->
try get_replica_tail(Node, Conf) of
{ok, Tail} ->
Arg = Arg0#{tail => Tail},
rabbit_log:debug("~ts: ~ts: member stopped on ~ts in ~b Tail ~w",
?LOG_DEBUG("~ts: ~ts: member stopped on ~ts in ~b Tail ~w",
[?MODULE, StreamId, Node, Epoch, Tail]),
send_self_command({member_stopped, StreamId, Arg});
Err ->
rabbit_log:warning("~ts: failed to get tail of member ~ts on ~ts in ~b Error: ~w",
?LOG_WARNING("~ts: failed to get tail of member ~ts on ~ts in ~b Error: ~w",
[?MODULE, StreamId, Node, Epoch, Err]),
maybe_sleep(Err),
send_action_failed(StreamId, stopping, Arg0)
catch _:Err ->
rabbit_log:warning("~ts: failed to get tail of member ~ts on ~ts in ~b Error: ~w",
?LOG_WARNING("~ts: failed to get tail of member ~ts on ~ts in ~b Error: ~w",
[?MODULE, StreamId, Node, Epoch, Err]),
maybe_sleep(Err),
send_action_failed(StreamId, stopping, Arg0)
end
catch _:Err ->
rabbit_log:warning("~ts: failed to stop member ~ts ~w Error: ~w",
?LOG_WARNING("~ts: failed to stop member ~ts ~w Error: ~w",
[?MODULE, StreamId, Node, Err]),
maybe_sleep(Err),
send_action_failed(StreamId, stopping, Arg0)
@ -1207,17 +1208,17 @@ phase_start_writer(StreamId, #{epoch := Epoch, node := Node} = Args0, Conf) ->
try osiris:start_writer(Conf) of
{ok, Pid} ->
Args = Args0#{epoch => Epoch, pid => Pid},
rabbit_log:info("~ts: started writer ~ts on ~w in ~b",
?LOG_INFO("~ts: started writer ~ts on ~w in ~b",
[?MODULE, StreamId, Node, Epoch]),
send_self_command({member_started, StreamId, Args});
Err ->
%% no sleep for writer failures as we want to trigger a new
%% election asap
rabbit_log:warning("~ts: failed to start writer ~ts on ~ts in ~b Error: ~w",
?LOG_WARNING("~ts: failed to start writer ~ts on ~ts in ~b Error: ~w",
[?MODULE, StreamId, Node, Epoch, Err]),
send_action_failed(StreamId, starting, Args0)
catch _:Err ->
rabbit_log:warning("~ts: failed to start writer ~ts on ~ts in ~b Error: ~w",
?LOG_WARNING("~ts: failed to start writer ~ts on ~ts in ~b Error: ~w",
[?MODULE, StreamId, Node, Epoch, Err]),
send_action_failed(StreamId, starting, Args0)
end
@ -1230,12 +1231,12 @@ phase_update_retention(StreamId, #{pid := Pid,
ok ->
send_self_command({retention_updated, StreamId, Args});
{error, Reason} = Err ->
rabbit_log:warning("~ts: failed to update retention for ~ts ~w Reason: ~w",
?LOG_WARNING("~ts: failed to update retention for ~ts ~w Reason: ~w",
[?MODULE, StreamId, node(Pid), Reason]),
maybe_sleep(Err),
send_action_failed(StreamId, update_retention, Args)
catch _:Err ->
rabbit_log:warning("~ts: failed to update retention for ~ts ~w Error: ~w",
?LOG_WARNING("~ts: failed to update retention for ~ts ~w Error: ~w",
[?MODULE, StreamId, node(Pid), Err]),
maybe_sleep(Err),
send_action_failed(StreamId, update_retention, Args)
@ -1281,7 +1282,7 @@ is_quorum(NumReplicas, NumAlive) ->
phase_update_mnesia(StreamId, Args, #{reference := QName,
leader_pid := LeaderPid} = Conf) ->
fun() ->
rabbit_log:debug("~ts: running mnesia update for ~ts: ~W",
?LOG_DEBUG("~ts: running mnesia update for ~ts: ~W",
[?MODULE, StreamId, Conf, 10]),
Fun = fun (Q) ->
case amqqueue:get_type_state(Q) of
@ -1293,7 +1294,7 @@ phase_update_mnesia(StreamId, Args, #{reference := QName,
Ts ->
S = maps:get(name, Ts, undefined),
%% TODO log as side-effect
rabbit_log:debug("~ts: refusing mnesia update for stale stream id ~s, current ~s",
?LOG_DEBUG("~ts: refusing mnesia update for stale stream id ~s, current ~s",
[?MODULE, StreamId, S]),
%% if the stream id isn't a match this is a stale
%% update from a previous stream incarnation for the
@ -1303,7 +1304,7 @@ phase_update_mnesia(StreamId, Args, #{reference := QName,
end,
try rabbit_amqqueue:update(QName, Fun) of
not_found ->
rabbit_log:debug("~ts: resource for stream id ~ts not found, "
?LOG_DEBUG("~ts: resource for stream id ~ts not found, "
"recovering from rabbit_durable_queue",
[?MODULE, StreamId]),
%% This can happen during recovery
@ -1316,7 +1317,7 @@ phase_update_mnesia(StreamId, Args, #{reference := QName,
{ok, Q} ->
case amqqueue:get_type_state(Q) of
#{name := S} when S == StreamId ->
rabbit_log:debug("~ts: initializing queue record for stream id ~ts",
?LOG_DEBUG("~ts: initializing queue record for stream id ~ts",
[?MODULE, StreamId]),
ok = rabbit_amqqueue:ensure_rabbit_queue_record_is_initialized(Fun(Q)),
ok;
@ -1328,7 +1329,7 @@ phase_update_mnesia(StreamId, Args, #{reference := QName,
_ ->
send_self_command({mnesia_updated, StreamId, Args})
catch _:E ->
rabbit_log:debug("~ts: failed to update mnesia for ~ts: ~W",
?LOG_DEBUG("~ts: failed to update mnesia for ~ts: ~W",
[?MODULE, StreamId, E, 10]),
send_action_failed(StreamId, updating_mnesia, Args)
end
@ -1364,7 +1365,7 @@ filter_command(_Meta, {delete_replica, _, #{node := Node}}, #stream{id = StreamI
end, Members0),
case maps:size(Members) =< 1 of
true ->
rabbit_log:warning(
?LOG_WARNING(
"~ts failed to delete replica on node ~ts for stream ~ts: refusing to delete the only replica",
[?MODULE, Node, StreamId]),
{error, last_stream_member};
@ -1379,7 +1380,7 @@ update_stream(Meta, Cmd, Stream) ->
update_stream0(Meta, Cmd, Stream)
catch
_:E:Stacktrace ->
rabbit_log:warning(
?LOG_WARNING(
"~ts failed to update stream:~n~W~n~W",
[?MODULE, E, 10, Stacktrace, 10]),
Stream
@ -1495,7 +1496,7 @@ update_stream0(#{system_time := _Ts},
Member ->
%% do we just ignore any members started events from unexpected
%% epochs?
rabbit_log:warning("~ts: member started unexpected ~w ~w",
?LOG_WARNING("~ts: member started unexpected ~w ~w",
[?MODULE, Args, Member]),
Stream0
end;
@ -2056,7 +2057,7 @@ fail_active_actions(Streams, Exclude) ->
end, Members),
case Mnesia of
{updating, E} ->
rabbit_log:debug("~ts: failing stale action to trigger retry. "
?LOG_DEBUG("~ts: failing stale action to trigger retry. "
"Stream ID: ~ts, node: ~w, action: ~w",
[?MODULE, Id, node(), updating_mnesia]),
send_self_command({action_failed, Id,
@ -2076,7 +2077,7 @@ fail_action(_StreamId, _, #member{current = undefined}) ->
ok;
fail_action(StreamId, Node, #member{role = {_, E},
current = {Action, Idx}}) ->
rabbit_log:debug("~ts: failing stale action to trigger retry. "
?LOG_DEBUG("~ts: failing stale action to trigger retry. "
"Stream ID: ~ts, node: ~w, action: ~w",
[?MODULE, StreamId, node(), Action]),
%% if we have an action send failure message
@ -2241,7 +2242,7 @@ update_target(Member, Target) ->
machine_version(1, 2, State = #?MODULE{streams = Streams0,
monitors = Monitors0}) ->
rabbit_log:info("Stream coordinator machine version changes from 1 to 2, updating state."),
?LOG_INFO("Stream coordinator machine version changes from 1 to 2, updating state."),
%% conversion from old state to new state
%% additional operation: the stream listeners are never collected in the previous version
%% so we'll emit monitors for all listener PIDs
@ -2273,13 +2274,13 @@ machine_version(1, 2, State = #?MODULE{streams = Streams0,
monitors = Monitors2,
listeners = undefined}, Effects};
machine_version(2, 3, State) ->
rabbit_log:info("Stream coordinator machine version changes from 2 to 3, "
?LOG_INFO("Stream coordinator machine version changes from 2 to 3, "
"updating state."),
SacState = rabbit_stream_sac_coordinator_v4:init_state(),
{State#?MODULE{single_active_consumer = SacState},
[]};
machine_version(3, 4, #?MODULE{streams = Streams0} = State) ->
rabbit_log:info("Stream coordinator machine version changes from 3 to 4, updating state."),
?LOG_INFO("Stream coordinator machine version changes from 3 to 4, updating state."),
%% the "preferred" field takes the place of the "node" field in this version
%% initializing the "preferred" field to false
Streams = maps:map(
@ -2291,12 +2292,12 @@ machine_version(3, 4, #?MODULE{streams = Streams0} = State) ->
end, Streams0),
{State#?MODULE{streams = Streams}, []};
machine_version(4 = From, 5, #?MODULE{single_active_consumer = Sac0} = State) ->
rabbit_log:info("Stream coordinator machine version changes from 4 to 5, updating state."),
?LOG_INFO("Stream coordinator machine version changes from 4 to 5, updating state."),
SacExport = rabbit_stream_sac_coordinator_v4:state_to_map(Sac0),
Sac1 = rabbit_stream_sac_coordinator:import_state(From, SacExport),
{State#?MODULE{single_active_consumer = Sac1}, []};
machine_version(From, To, State) ->
rabbit_log:info("Stream coordinator machine version changes from ~tp to ~tp, no state changes required.",
?LOG_INFO("Stream coordinator machine version changes from ~tp to ~tp, no state changes required.",
[From, To]),
{State, []}.

View File

@ -70,6 +70,7 @@
-include_lib("rabbit_common/include/rabbit.hrl").
-include("amqqueue.hrl").
-include_lib("kernel/include/logger.hrl").
-define(INFO_KEYS, [name, durable, auto_delete, arguments, leader, members, online, state,
messages, messages_ready, messages_unacknowledged, committed_offset,
@ -332,7 +333,7 @@ consume(Q, Spec, #stream_client{} = QState0)
args := Args,
ok_msg := OkMsg,
acting_user := ActingUser} = Spec,
rabbit_log:debug("~s:~s Local pid resolved ~0p",
?LOG_DEBUG("~s:~s Local pid resolved ~0p",
[?MODULE, ?FUNCTION_NAME, LocalPid]),
case parse_offset_arg(
rabbit_misc:table_lookup(Args, <<"x-stream-offset">>)) of
@ -643,17 +644,17 @@ handle_event(_QName, {stream_local_member_change, Pid},
handle_event(_QName, {stream_local_member_change, Pid},
#stream_client{name = QName,
readers = Readers0} = State) ->
rabbit_log:debug("Local member change event for ~tp", [QName]),
?LOG_DEBUG("Local member change event for ~tp", [QName]),
Readers1 = maps:fold(fun(T, #stream{log = Log0, reader_options = Options} = S0, Acc) ->
Offset = osiris_log:next_offset(Log0),
osiris_log:close(Log0),
CounterSpec = {{?MODULE, QName, self()}, []},
rabbit_log:debug("Re-creating Osiris reader for consumer ~tp at offset ~tp "
?LOG_DEBUG("Re-creating Osiris reader for consumer ~tp at offset ~tp "
" with options ~tp",
[T, Offset, Options]),
{ok, Log1} = osiris:init_reader(Pid, Offset, CounterSpec, Options),
NextOffset = osiris_log:next_offset(Log1) - 1,
rabbit_log:debug("Registering offset listener at offset ~tp", [NextOffset]),
?LOG_DEBUG("Registering offset listener at offset ~tp", [NextOffset]),
osiris:register_offset_listener(Pid, NextOffset),
S1 = S0#stream{listening_offset = NextOffset,
log = Log1},
@ -1000,7 +1001,7 @@ init(Q) when ?is_amqqueue(Q) ->
{ok, stream_not_found, _} ->
{error, stream_not_found};
{error, coordinator_unavailable} = E ->
rabbit_log:warning("Failed to start stream client ~tp: coordinator unavailable",
?LOG_WARNING("Failed to start stream client ~tp: coordinator unavailable",
[rabbit_misc:rs(QName)]),
E
end.
@ -1019,7 +1020,7 @@ update(Q, State)
update_leader_pid(Pid, #stream_client{leader = Pid} = State) ->
State;
update_leader_pid(Pid, #stream_client{} = State) ->
rabbit_log:debug("stream client: new leader detected ~w", [Pid]),
?LOG_DEBUG("stream client: new leader detected ~w", [Pid]),
resend_all(State#stream_client{leader = Pid}).
state_info(_) ->
@ -1080,11 +1081,11 @@ delete_replica(VHost, Name, Node) ->
end.
delete_all_replicas(Node) ->
rabbit_log:info("Asked to remove all stream replicas from node ~ts", [Node]),
?LOG_INFO("Asked to remove all stream replicas from node ~ts", [Node]),
Streams = rabbit_amqqueue:list_stream_queues_on(Node),
lists:map(fun(Q) ->
QName = amqqueue:get_name(Q),
rabbit_log:info("~ts: removing replica on node ~w",
?LOG_INFO("~ts: removing replica on node ~w",
[rabbit_misc:rs(QName), Node]),
#{name := StreamId} = amqqueue:get_type_state(Q),
{ok, Reply, _} = rabbit_stream_coordinator:delete_replica(StreamId, Node),
@ -1092,7 +1093,7 @@ delete_all_replicas(Node) ->
ok ->
{QName, ok};
Err ->
rabbit_log:warning("~ts: failed to remove replica on node ~w, error: ~w",
?LOG_WARNING("~ts: failed to remove replica on node ~w, error: ~w",
[rabbit_misc:rs(QName), Node, Err]),
{QName, {error, Err}}
end
@ -1286,7 +1287,7 @@ chunk_iterator(#stream{credit = Credit,
end,
{end_of_stream, Str};
{error, Err} ->
rabbit_log:info("stream client: failed to create chunk iterator ~p", [Err]),
?LOG_INFO("stream client: failed to create chunk iterator ~p", [Err]),
exit(Err)
end.
@ -1365,7 +1366,7 @@ resend_all(#stream_client{leader = LeaderPid,
case Msgs of
[] -> ok;
[{Seq, _} | _] ->
rabbit_log:debug("stream client: resending from seq ~w num ~b",
?LOG_DEBUG("stream client: resending from seq ~w num ~b",
[Seq, maps:size(Corrs)])
end,
[begin
@ -1444,7 +1445,7 @@ revive() ->
-spec transfer_leadership_of_stream_coordinator([node()]) -> ok.
transfer_leadership_of_stream_coordinator([]) ->
rabbit_log:warning("Skipping leadership transfer of stream coordinator: no candidate "
?LOG_WARNING("Skipping leadership transfer of stream coordinator: no candidate "
"(online, not under maintenance) nodes to transfer to!");
transfer_leadership_of_stream_coordinator(TransferCandidates) ->
% try to transfer to the node with the lowest uptime; the assumption is that
@ -1456,9 +1457,9 @@ transfer_leadership_of_stream_coordinator(TransferCandidates) ->
BestCandidate = element(1, hd(lists:keysort(2, Candidates))),
case rabbit_stream_coordinator:transfer_leadership([BestCandidate]) of
{ok, Node} ->
rabbit_log:info("Leadership transfer for stream coordinator completed. The new leader is ~p", [Node]);
?LOG_INFO("Leadership transfer for stream coordinator completed. The new leader is ~p", [Node]);
Error ->
rabbit_log:warning("Skipping leadership transfer of stream coordinator: ~p", [Error])
?LOG_WARNING("Skipping leadership transfer of stream coordinator: ~p", [Error])
end.
queue_vm_stats_sups() ->

View File

@ -17,6 +17,7 @@
-module(rabbit_stream_sac_coordinator).
-include("rabbit_stream_sac_coordinator.hrl").
-include_lib("kernel/include/logger.hrl").
-opaque command() :: #command_register_consumer{} |
#command_unregister_consumer{} |
@ -148,7 +149,7 @@ process_command(Cmd) ->
{ok, Res, _} ->
Res;
{error, _} = Err ->
rabbit_log:warning("SAC coordinator command ~tp returned error ~tp",
?LOG_WARNING("SAC coordinator command ~tp returned error ~tp",
[Cmd, Err]),
Err
end.
@ -286,7 +287,7 @@ apply(#command_activate_consumer{vhost = VH, stream = S, consumer_name = Name},
{G, Eff} =
case lookup_group(VH, S, Name, StreamGroups0) of
undefined ->
rabbit_log:warning("Trying to activate consumer in group ~tp, but "
?LOG_WARNING("Trying to activate consumer in group ~tp, but "
"the group does not longer exist",
[{VH, S, Name}]),
{undefined, []};
@ -348,7 +349,7 @@ apply(#command_purge_nodes{nodes = Nodes}, State0) ->
apply(#command_update_conf{conf = NewConf}, State) ->
{State#?MODULE{conf = NewConf}, ok, []};
apply(UnkCmd, State) ->
rabbit_log:debug("~ts: unknown SAC command ~W", [?MODULE, UnkCmd, 10]),
?LOG_DEBUG("~ts: unknown SAC command ~W", [?MODULE, UnkCmd, 10]),
{State, {error, unknown_command}, []}.
purge_node(Node, #?MODULE{groups = Groups0} = State0) ->

View File

@ -17,6 +17,7 @@
-module(rabbit_stream_sac_coordinator_v4).
-include("rabbit_stream_sac_coordinator_v4.hrl").
-include_lib("kernel/include/logger.hrl").
-opaque command() ::
#command_register_consumer{} | #command_unregister_consumer{} |
@ -124,7 +125,7 @@ process_command(Cmd) ->
{ok, Res, _} ->
Res;
{error, _} = Err ->
rabbit_log:warning("SAC coordinator command ~tp returned error ~tp",
?LOG_WARNING("SAC coordinator command ~tp returned error ~tp",
[Cmd, Err]),
Err
end.
@ -251,7 +252,7 @@ apply(#command_activate_consumer{vhost = VirtualHost,
{G, Eff} =
case lookup_group(VirtualHost, Stream, ConsumerName, StreamGroups0) of
undefined ->
rabbit_log:warning("Trying to activate consumer in group ~tp, but "
?LOG_WARNING("Trying to activate consumer in group ~tp, but "
"the group does not longer exist",
[{VirtualHost, Stream, ConsumerName}]),
{undefined, []};

View File

@ -23,6 +23,9 @@
-module(rabbit_sysmon_handler).
-include_lib("kernel/include/logger.hrl").
-behaviour(gen_event).
%% API
@ -89,16 +92,16 @@ handle_event({monitor, PidOrPort, Type, Info}, State=#state{timer_ref=TimerRef})
%% Reset the inactivity timeout
NewTimerRef = reset_timer(TimerRef),
{Fmt, Args} = format_pretty_proc_or_port_info(PidOrPort),
rabbit_log:warning("~tp ~w ~w " ++ Fmt ++ " ~w", [?MODULE, Type, PidOrPort] ++ Args ++ [Info]),
?LOG_WARNING("~tp ~w ~w " ++ Fmt ++ " ~w", [?MODULE, Type, PidOrPort] ++ Args ++ [Info]),
{ok, State#state{timer_ref=NewTimerRef}};
handle_event({suppressed, Type, Info}, State=#state{timer_ref=TimerRef}) ->
%% Reset the inactivity timeout
NewTimerRef = reset_timer(TimerRef),
rabbit_log:debug("~tp encountered a suppressed event of type ~w: ~w", [?MODULE, Type, Info]),
?LOG_DEBUG("~tp encountered a suppressed event of type ~w: ~w", [?MODULE, Type, Info]),
{ok, State#state{timer_ref=NewTimerRef}};
handle_event(Event, State=#state{timer_ref=TimerRef}) ->
NewTimerRef = reset_timer(TimerRef),
rabbit_log:warning("~tp unhandled event: ~tp", [?MODULE, Event]),
?LOG_WARNING("~tp unhandled event: ~tp", [?MODULE, Event]),
{ok, State#state{timer_ref=NewTimerRef}}.
%%--------------------------------------------------------------------
@ -136,7 +139,7 @@ handle_info(inactivity_timeout, State) ->
%% so hibernate to free up resources.
{ok, State, hibernate};
handle_info(Info, State) ->
rabbit_log:info("handle_info got ~tp", [Info]),
?LOG_INFO("handle_info got ~tp", [Info]),
{ok, State}.
%%--------------------------------------------------------------------

View File

@ -20,6 +20,7 @@
-include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
-ifdef(TEST).
-export([pre_khepri_definitions/0]).
@ -46,7 +47,7 @@ create() ->
create(TableName, TableDefinition) ->
TableDefinition1 = proplists:delete(match, TableDefinition),
rabbit_log:debug("Will create a schema database table '~ts'", [TableName]),
?LOG_DEBUG("Will create a schema database table '~ts'", [TableName]),
case mnesia:create_table(TableName, TableDefinition1) of
{atomic, ok} -> ok;
{aborted,{already_exists, TableName}} -> ok;
@ -78,7 +79,7 @@ ensure_secondary_index(Table, Field) ->
-spec ensure_table_copy(mnesia_table(), node(), mnesia_storage_type()) ->
ok | {error, any()}.
ensure_table_copy(TableName, Node, StorageType) ->
rabbit_log:debug("Will add a local schema database copy for table '~ts'", [TableName]),
?LOG_DEBUG("Will add a local schema database copy for table '~ts'", [TableName]),
case mnesia:add_table_copy(TableName, Node, StorageType) of
{atomic, ok} -> ok;
{aborted,{already_exists, TableName}} -> ok;
@ -140,7 +141,7 @@ wait1(TableNames, Timeout, Retries, Silent) ->
true ->
ok;
false ->
rabbit_log:info("Waiting for Mnesia tables for ~tp ms, ~tp retries left",
?LOG_INFO("Waiting for Mnesia tables for ~tp ms, ~tp retries left",
[Timeout, Retries - 1])
end,
Result = case mnesia:wait_for_tables(TableNames, Timeout) of
@ -159,7 +160,7 @@ wait1(TableNames, Timeout, Retries, Silent) ->
true ->
ok;
false ->
rabbit_log:info("Successfully synced tables from a peer"),
?LOG_INFO("Successfully synced tables from a peer"),
ok
end;
{1, {error, _} = Error} ->
@ -169,7 +170,7 @@ wait1(TableNames, Timeout, Retries, Silent) ->
true ->
ok;
false ->
rabbit_log:warning("Error while waiting for Mnesia tables: ~tp", [Error])
?LOG_WARNING("Error while waiting for Mnesia tables: ~tp", [Error])
end,
wait1(TableNames, Timeout, Retries - 1, Silent)
end.

View File

@ -17,6 +17,9 @@
%% allowing you to easily figure out what happened.
-module(rabbit_time_travel_dbg).
-include_lib("kernel/include/logger.hrl").
-compile(export_all).
-compile(nowarn_export_all).
@ -62,7 +65,7 @@ loop(Q) ->
[io_lib:format("~0p~n", [E]) || E <- queue:to_list(Q)]),
loop(Q);
print ->
_ = [logger:error("~0p", [E]) || E <- queue:to_list(Q)],
_ = [?LOG_ERROR("~0p", [E]) || E <- queue:to_list(Q)],
loop(Q);
stop ->
ok;

View File

@ -12,6 +12,7 @@
-include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("rabbit_common/include/rabbit_framing.hrl").
-include_lib("kernel/include/logger.hrl").
-define(TRACE_VHOSTS, trace_vhosts).
-define(XNAME, <<"amq.rabbitmq.trace">>).
@ -103,10 +104,10 @@ start(VHost)
when is_binary(VHost) ->
case enabled(VHost) of
true ->
rabbit_log:info("Tracing is already enabled for vhost '~ts'", [VHost]),
?LOG_INFO("Tracing is already enabled for vhost '~ts'", [VHost]),
ok;
false ->
rabbit_log:info("Enabling tracing for vhost '~ts'", [VHost]),
?LOG_INFO("Enabling tracing for vhost '~ts'", [VHost]),
update_config(fun(VHosts) -> lists:usort([VHost | VHosts]) end)
end.
@ -115,10 +116,10 @@ stop(VHost)
when is_binary(VHost) ->
case enabled(VHost) of
true ->
rabbit_log:info("Disabling tracing for vhost '~ts'", [VHost]),
?LOG_INFO("Disabling tracing for vhost '~ts'", [VHost]),
update_config(fun(VHosts) -> VHosts -- [VHost] end);
false ->
rabbit_log:info("Tracing is already disabled for vhost '~ts'", [VHost]),
?LOG_INFO("Tracing is already disabled for vhost '~ts'", [VHost]),
ok
end.
@ -128,13 +129,13 @@ update_config(Fun) ->
application:set_env(rabbit, ?TRACE_VHOSTS, VHosts),
Sessions = rabbit_amqp_session:list_local(),
NonAmqpPids = rabbit_networking:local_non_amqp_connections(),
rabbit_log:debug("Refreshing state of channels, ~b sessions and ~b non "
?LOG_DEBUG("Refreshing state of channels, ~b sessions and ~b non "
"AMQP 0.9.1 connections after virtual host tracing changes...",
[length(Sessions), length(NonAmqpPids)]),
Pids = Sessions ++ NonAmqpPids,
lists:foreach(fun(Pid) -> gen_server:cast(Pid, refresh_config) end, Pids),
{Time, ok} = timer:tc(fun rabbit_channel:refresh_config_local/0),
rabbit_log:debug("Refreshed channel states in ~fs", [Time / 1_000_000]),
?LOG_DEBUG("Refreshed channel states in ~fs", [Time / 1_000_000]),
ok.
vhosts_with_tracing_enabled() ->

View File

@ -7,6 +7,9 @@
-module(rabbit_tracking).
-include_lib("kernel/include/logger.hrl").
%% Common behaviour and processing functions for tracking components
%%
%% See in use:
@ -45,12 +48,12 @@ count_on_all_nodes(Mod, Fun, Args, ContextMsg) ->
sum_rpc_multicall_result([{ok, Int}|ResL], [_N|Nodes], ContextMsg, Acc) when is_integer(Int) ->
sum_rpc_multicall_result(ResL, Nodes, ContextMsg, Acc + Int);
sum_rpc_multicall_result([{ok, BadValue}|ResL], [BadNode|Nodes], ContextMsg, Acc) ->
rabbit_log:error(
?LOG_ERROR(
"Failed to fetch number of ~ts on node ~tp:~n not an integer ~tp",
[ContextMsg, BadNode, BadValue]),
sum_rpc_multicall_result(ResL, Nodes, ContextMsg, Acc);
sum_rpc_multicall_result([{Class, Reason}|ResL], [BadNode|Nodes], ContextMsg, Acc) ->
rabbit_log:error(
?LOG_ERROR(
"Failed to fetch number of ~ts on node ~tp:~n~tp:~tp",
[ContextMsg, BadNode, Class, Reason]),
sum_rpc_multicall_result(ResL, Nodes, ContextMsg, Acc);

View File

@ -7,6 +7,9 @@
-module(rabbit_upgrade_preparation).
-include_lib("kernel/include/logger.hrl").
-export([await_online_quorum_plus_one/1,
list_with_minimum_quorum_for_cli/0]).
@ -64,12 +67,12 @@ do_await_safe_online_quorum(IterationsLeft) ->
0 ->
case length(EndangeredQueues) of
0 -> ok;
N -> rabbit_log:info("Waiting for ~p queues and streams to have quorum+1 replicas online. "
N -> ?LOG_INFO("Waiting for ~p queues and streams to have quorum+1 replicas online. "
"You can list them with `rabbitmq-diagnostics check_if_node_is_quorum_critical`", [N])
end,
case endangered_critical_components() of
[] -> ok;
_ -> rabbit_log:info("Waiting for the following critical components to have quorum+1 replicas online: ~p.",
_ -> ?LOG_INFO("Waiting for the following critical components to have quorum+1 replicas online: ~p.",
[endangered_critical_components()])
end;
_ ->

View File

@ -268,6 +268,7 @@
-include_lib("rabbit_common/include/rabbit.hrl").
-include("amqqueue.hrl").
-include_lib("kernel/include/logger.hrl").
%%----------------------------------------------------------------------------
@ -382,7 +383,7 @@ stop(VHost) ->
ok = rabbit_classic_queue_index_v2:stop(VHost).
start_msg_store(VHost, Refs, StartFunState) when is_list(Refs); Refs == undefined ->
rabbit_log:info("Starting message stores for vhost '~ts'", [VHost]),
?LOG_INFO("Starting message stores for vhost '~ts'", [VHost]),
do_start_msg_store(VHost, ?TRANSIENT_MSG_STORE, undefined, ?EMPTY_START_FUN_STATE),
do_start_msg_store(VHost, ?PERSISTENT_MSG_STORE, Refs, StartFunState),
ok.
@ -390,13 +391,13 @@ start_msg_store(VHost, Refs, StartFunState) when is_list(Refs); Refs == undefine
do_start_msg_store(VHost, Type, Refs, StartFunState) ->
case rabbit_vhost_msg_store:start(VHost, Type, Refs, StartFunState) of
{ok, _} ->
rabbit_log:info("Started message store of type ~ts for vhost '~ts'", [abbreviated_type(Type), VHost]);
?LOG_INFO("Started message store of type ~ts for vhost '~ts'", [abbreviated_type(Type), VHost]);
{error, {no_such_vhost, VHost}} = Err ->
rabbit_log:error("Failed to start message store of type ~ts for vhost '~ts': the vhost no longer exists!",
?LOG_ERROR("Failed to start message store of type ~ts for vhost '~ts': the vhost no longer exists!",
[Type, VHost]),
exit(Err);
{error, Error} ->
rabbit_log:error("Failed to start message store of type ~ts for vhost '~ts': ~tp",
?LOG_ERROR("Failed to start message store of type ~ts for vhost '~ts': ~tp",
[Type, VHost, Error]),
exit({error, Error})
end.
@ -891,7 +892,7 @@ convert_from_v1_to_v2_loop(QueueName, V1Index0, V2Index0, V2Store0,
%% Log some progress to keep the user aware of what's going on, as moving
%% embedded messages can take quite some time.
#resource{virtual_host = VHost, name = Name} = QueueName,
rabbit_log:info("Queue ~ts in vhost ~ts converted ~b messages from v1 to v2",
?LOG_INFO("Queue ~ts in vhost ~ts converted ~b messages from v1 to v2",
[Name, VHost, length(Messages)]),
convert_from_v1_to_v2_loop(QueueName, V1Index, V2Index, V2Store, Counters, UpSeqId, HiSeqId, SkipFun).

View File

@ -9,6 +9,7 @@
-include_lib("rabbit_common/include/rabbit.hrl").
-include("vhost.hrl").
-include_lib("kernel/include/logger.hrl").
-export([recover/0, recover/1, read_config/1]).
-export([add/2, add/3, add/4, delete/2, delete_ignoring_protection/2, exists/1, assert/1,
@ -40,7 +41,7 @@ recover() ->
{Time, _} = timer:tc(fun() ->
rabbit_binding:recover()
end),
rabbit_log:debug("rabbit_binding:recover/0 completed in ~fs", [Time/1000000]),
?LOG_DEBUG("rabbit_binding:recover/0 completed in ~fs", [Time/1000000]),
%% rabbit_vhost_sup_sup will start the actual recovery.
%% So recovery will be run every time a vhost supervisor is restarted.
@ -51,7 +52,7 @@ recover() ->
recover(VHost) ->
VHostDir = msg_store_dir_path(VHost),
rabbit_log:info("Making sure data directory '~ts' for vhost '~ts' exists",
?LOG_INFO("Making sure data directory '~ts' for vhost '~ts' exists",
[VHostDir, VHost]),
VHostStubFile = filename:join(VHostDir, ".vhost"),
ok = rabbit_file:ensure_dir(VHostStubFile),
@ -65,25 +66,25 @@ recover(VHost) ->
%% we need to add the default type to the metadata
case rabbit_db_vhost:get(VHost) of
undefined ->
rabbit_log:warning("Cannot check metadata for vhost '~ts' during recovery, record not found.",
?LOG_WARNING("Cannot check metadata for vhost '~ts' during recovery, record not found.",
[VHost]);
VHostRecord ->
Metadata = vhost:get_metadata(VHostRecord),
case maps:is_key(default_queue_type, Metadata) of
true ->
rabbit_log:debug("Default queue type for vhost '~ts' is ~p.",
?LOG_DEBUG("Default queue type for vhost '~ts' is ~p.",
[VHost, maps:get(default_queue_type, Metadata)]),
ok;
false ->
DefaultType = rabbit_queue_type:default_alias(),
rabbit_log:info("Setting missing default queue type to '~p' for vhost '~ts'.",
?LOG_INFO("Setting missing default queue type to '~p' for vhost '~ts'.",
[DefaultType, VHost]),
case rabbit_db_vhost:merge_metadata(VHost, #{default_queue_type => DefaultType}) of
{ok, _UpdatedVHostRecord} ->
ok;
{error, Reason} ->
% Log the error but continue recovery
rabbit_log:warning("Failed to set the default queue type for vhost '~ts': ~p",
?LOG_WARNING("Failed to set the default queue type for vhost '~ts': ~p",
[VHost, Reason])
end
end
@ -95,7 +96,7 @@ recover(VHost) ->
{Time, ok} = timer:tc(fun() ->
rabbit_binding:recover(rabbit_exchange:recover(VHost), QNames)
end),
rabbit_log:debug("rabbit_binding:recover/2 for vhost ~ts completed in ~fs", [VHost, Time/1000000]),
?LOG_DEBUG("rabbit_binding:recover/2 for vhost ~ts completed in ~fs", [VHost, Time/1000000]),
ok = rabbit_amqqueue:start(Recovered),
ok.
@ -124,7 +125,7 @@ ensure_config_file(VHost) ->
_ ->
?LEGACY_INDEX_SEGMENT_ENTRY_COUNT
end,
rabbit_log:info("Setting segment_entry_count for vhost '~ts' with ~b queues to '~b'",
?LOG_INFO("Setting segment_entry_count for vhost '~ts' with ~b queues to '~b'",
[VHost, length(QueueDirs), SegmentEntryCount]),
file:write_file(Path, io_lib:format(
"%% This file is auto-generated! Edit at your own risk!~n"
@ -206,7 +207,7 @@ do_add(Name, Metadata0, ActingUser) ->
case Metadata of
#{default_queue_type := DQT} ->
%% check that the queue type is known
rabbit_log:debug("Default queue type of virtual host '~ts' is ~tp",
?LOG_DEBUG("Default queue type of virtual host '~ts' is ~tp",
[Name, DQT]),
try rabbit_queue_type:discover(DQT) of
QueueType when is_atom(QueueType) ->
@ -225,9 +226,9 @@ do_add(Name, Metadata0, ActingUser) ->
case Description of
undefined ->
rabbit_log:info("Adding vhost '~ts' without a description", [Name]);
?LOG_INFO("Adding vhost '~ts' without a description", [Name]);
Description ->
rabbit_log:info("Adding vhost '~ts' (description: '~ts', tags: ~tp)",
?LOG_INFO("Adding vhost '~ts' (description: '~ts', tags: ~tp)",
[Name, Description, Tags])
end,
DefaultLimits = rabbit_db_vhost_defaults:list_limits(Name),
@ -235,7 +236,7 @@ do_add(Name, Metadata0, ActingUser) ->
{NewOrNot, VHost} = rabbit_db_vhost:create_or_get(Name, DefaultLimits, Metadata),
case NewOrNot of
new ->
rabbit_log:debug("Inserted a virtual host record ~tp", [VHost]);
?LOG_DEBUG("Inserted a virtual host record ~tp", [VHost]);
existing ->
ok
end,
@ -280,7 +281,7 @@ declare_default_exchanges(VHostName, ActingUser) ->
rabbit_misc:for_each_while_ok(
fun({ExchangeName, Type, Internal}) ->
Resource = rabbit_misc:r(VHostName, exchange, ExchangeName),
rabbit_log:debug("Will declare an exchange ~tp", [Resource]),
?LOG_DEBUG("Will declare an exchange ~tp", [Resource]),
case rabbit_exchange:declare(
Resource, Type, true, false, Internal, [],
ActingUser) of
@ -342,7 +343,7 @@ delete(Name, ActingUser) ->
case vhost:is_protected_from_deletion(VHost) of
true ->
Msg = "Refusing to delete virtual host '~ts' because it is protected from deletion",
rabbit_log:debug(Msg, [Name]),
?LOG_DEBUG(Msg, [Name]),
{error, protected_from_deletion};
false ->
delete_ignoring_protection(Name, ActingUser)
@ -356,25 +357,25 @@ delete_ignoring_protection(Name, ActingUser) ->
%% process, which in turn results in further database actions and
%% eventually the termination of that process. Exchange deletion causes
%% notifications which must be sent outside the TX
rabbit_log:info("Deleting vhost '~ts'", [Name]),
?LOG_INFO("Deleting vhost '~ts'", [Name]),
%% TODO: This code does a lot of "list resources, walk through the list to
%% delete each resource". This feature should be provided by each called
%% modules, like `rabbit_amqqueue:delete_all_for_vhost(VHost)'. These new
%% calls would be responsible for the atomicity, not this code.
%% Clear the permissions first to prohibit new incoming connections when deleting a vhost
rabbit_log:info("Clearing permissions in vhost '~ts' because it's being deleted", [Name]),
?LOG_INFO("Clearing permissions in vhost '~ts' because it's being deleted", [Name]),
ok = rabbit_auth_backend_internal:clear_all_permissions_for_vhost(Name, ActingUser),
rabbit_log:info("Deleting queues in vhost '~ts' because it's being deleted", [Name]),
?LOG_INFO("Deleting queues in vhost '~ts' because it's being deleted", [Name]),
QDelFun = fun (Q) -> rabbit_amqqueue:delete(Q, false, false, ActingUser) end,
[begin
QName = amqqueue:get_name(Q),
assert_benign(rabbit_amqqueue:with(QName, QDelFun), ActingUser)
end || Q <- rabbit_amqqueue:list(Name)],
rabbit_log:info("Deleting exchanges in vhost '~ts' because it's being deleted", [Name]),
?LOG_INFO("Deleting exchanges in vhost '~ts' because it's being deleted", [Name]),
ok = rabbit_exchange:delete_all(Name, ActingUser),
rabbit_log:info("Clearing policies and runtime parameters in vhost '~ts' because it's being deleted", [Name]),
?LOG_INFO("Clearing policies and runtime parameters in vhost '~ts' because it's being deleted", [Name]),
_ = rabbit_runtime_parameters:clear_vhost(Name, ActingUser),
rabbit_log:debug("Removing vhost '~ts' from the metadata storage because it's being deleted", [Name]),
?LOG_DEBUG("Removing vhost '~ts' from the metadata storage because it's being deleted", [Name]),
Ret = case rabbit_db_vhost:delete(Name) of
true ->
ok = rabbit_event:notify(
@ -407,7 +408,7 @@ put_vhost(Name, Description, Tags0, DefaultQueueType, Trace, Username) ->
Other -> Other
end,
ParsedTags = parse_tags(Tags),
rabbit_log:debug("Parsed virtual host tags ~tp to ~tp", [Tags, ParsedTags]),
?LOG_DEBUG("Parsed virtual host tags ~tp to ~tp", [Tags, ParsedTags]),
Result = case exists(Name) of
true ->
update(Name, Description, ParsedTags, DefaultQueueType, Username);
@ -451,7 +452,7 @@ is_over_vhost_limit(Name, Limit) when is_integer(Limit) ->
ErrorMsg = rabbit_misc:format("cannot create vhost '~ts': "
"vhost limit of ~tp is reached",
[Name, Limit]),
rabbit_log:error(ErrorMsg),
?LOG_ERROR(ErrorMsg),
exit({vhost_limit_exceeded, ErrorMsg})
end.
@ -510,7 +511,7 @@ vhost_cluster_state(VHost) ->
Nodes).
vhost_down(VHost) ->
rabbit_log:info("Virtual host '~ts' is stopping", [VHost]),
?LOG_INFO("Virtual host '~ts' is stopping", [VHost]),
ok = rabbit_event:notify(vhost_down,
[{name, VHost},
{node, node()},
@ -518,16 +519,16 @@ vhost_down(VHost) ->
delete_storage(VHost) ->
VhostDir = msg_store_dir_path(VHost),
rabbit_log:info("Deleting message store directory for vhost '~ts' at '~ts'", [VHost, VhostDir]),
?LOG_INFO("Deleting message store directory for vhost '~ts' at '~ts'", [VHost, VhostDir]),
%% Message store should be closed when vhost supervisor is closed.
case rabbit_file:recursive_delete([VhostDir]) of
ok -> ok;
{error, {_, enoent}} ->
%% a concurrent delete did the job for us
rabbit_log:warning("Tried to delete storage directories for vhost '~ts', it failed with an ENOENT", [VHost]),
?LOG_WARNING("Tried to delete storage directories for vhost '~ts', it failed with an ENOENT", [VHost]),
ok;
Other ->
rabbit_log:warning("Tried to delete storage directories for vhost '~ts': ~tp", [VHost, Other]),
?LOG_WARNING("Tried to delete storage directories for vhost '~ts': ~tp", [VHost, Other]),
Other
end.
@ -642,7 +643,7 @@ update_tags(VHostName, Tags, ActingUser) ->
end,
VHost = rabbit_db_vhost:set_tags(VHostName, Tags),
ConvertedTags = vhost:get_tags(VHost),
rabbit_log:info("Successfully set tags for virtual host '~ts' to ~tp", [VHostName, ConvertedTags]),
?LOG_INFO("Successfully set tags for virtual host '~ts' to ~tp", [VHostName, ConvertedTags]),
rabbit_event:notify_if(are_different(CurrentTags, ConvertedTags),
vhost_tags_set, [{name, VHostName},
{tags, ConvertedTags},
@ -650,13 +651,13 @@ update_tags(VHostName, Tags, ActingUser) ->
VHost
catch
throw:{error, {no_such_vhost, _}} = Error ->
rabbit_log:warning("Failed to set tags for virtual host '~ts': the virtual host does not exist", [VHostName]),
?LOG_WARNING("Failed to set tags for virtual host '~ts': the virtual host does not exist", [VHostName]),
throw(Error);
throw:Error ->
rabbit_log:warning("Failed to set tags for virtual host '~ts': ~tp", [VHostName, Error]),
?LOG_WARNING("Failed to set tags for virtual host '~ts': ~tp", [VHostName, Error]),
throw(Error);
exit:Error ->
rabbit_log:warning("Failed to set tags for virtual host '~ts': ~tp", [VHostName, Error]),
?LOG_WARNING("Failed to set tags for virtual host '~ts': ~tp", [VHostName, Error]),
exit(Error)
end.
@ -718,7 +719,7 @@ i(metadata, VHost) ->
M#{default_queue_type => DQT}
end;
i(Item, VHost) ->
rabbit_log:error("Don't know how to compute a virtual host info item '~ts' for virtual host '~tp'", [Item, VHost]),
?LOG_ERROR("Don't know how to compute a virtual host info item '~ts' for virtual host '~tp'", [Item, VHost]),
throw({bad_argument, Item}).
-spec info(vhost:vhost() | vhost:name()) -> rabbit_types:infos().

View File

@ -8,6 +8,7 @@
-module(rabbit_vhost_msg_store).
-include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
-export([start/4, stop/2, client_init/4, successfully_recovered_state/2]).
-export([vhost_store_pid/2]).
@ -25,7 +26,7 @@ start(VHost, Type, ClientRefs, StartupFunState) when is_list(ClientRefs);
%% we can get here if a vhost is added and removed concurrently
%% e.g. some integration tests do it
{error, {no_such_vhost, VHost}} = E ->
rabbit_log:error("Failed to start a message store for vhost ~ts: vhost no longer exists!",
?LOG_ERROR("Failed to start a message store for vhost ~ts: vhost no longer exists!",
[VHost]),
E
end.
@ -37,7 +38,7 @@ stop(VHost, Type) ->
ok = supervisor:delete_child(VHostSup, Type);
%% see start/4
{error, {no_such_vhost, VHost}} ->
rabbit_log:error("Failed to stop a message store for vhost ~ts: vhost no longer exists!",
?LOG_ERROR("Failed to stop a message store for vhost ~ts: vhost no longer exists!",
[VHost]),
ok

View File

@ -21,6 +21,9 @@
-module(rabbit_vhost_process).
-include_lib("kernel/include/logger.hrl").
-define(VHOST_CHECK_INTERVAL, 5000).
-behaviour(gen_server2).
@ -35,7 +38,7 @@ start_link(VHost) ->
init([VHost]) ->
process_flag(trap_exit, true),
rabbit_log:debug("Recovering data for virtual host ~ts", [VHost]),
?LOG_DEBUG("Recovering data for virtual host ~ts", [VHost]),
try
%% Recover the vhost data and save it to vhost registry.
ok = rabbit_vhost:recover(VHost),
@ -45,7 +48,7 @@ init([VHost]) ->
{ok, VHost}
catch _:Reason:Stacktrace ->
rabbit_amqqueue:mark_local_durable_queues_stopped(VHost),
rabbit_log:error("Unable to recover vhost ~tp data. Reason ~tp~n"
?LOG_ERROR("Unable to recover vhost ~tp data. Reason ~tp~n"
" Stacktrace ~tp",
[VHost, Reason, Stacktrace]),
{stop, Reason}
@ -61,7 +64,7 @@ handle_info(check_vhost, VHost) ->
case rabbit_vhost:exists(VHost) of
true -> {noreply, VHost};
false ->
rabbit_log:warning("Virtual host '~ts' is gone. "
?LOG_WARNING("Virtual host '~ts' is gone. "
"Stopping its top level supervisor.",
[VHost]),
%% Stop vhost's top supervisor in a one-off process to avoid a deadlock:

View File

@ -8,6 +8,7 @@
-module(rabbit_vhost_sup_sup).
-include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("kernel/include/logger.hrl").
-behaviour(supervisor).
@ -79,18 +80,18 @@ delete_on_all_nodes(VHost) ->
stop_and_delete_vhost(VHost) ->
StopResult = case lookup_vhost_sup_record(VHost) of
not_found ->
rabbit_log:warning("Supervisor for vhost '~ts' not found during deletion procedure",
?LOG_WARNING("Supervisor for vhost '~ts' not found during deletion procedure",
[VHost]),
ok;
#vhost_sup{wrapper_pid = WrapperPid,
vhost_sup_pid = VHostSupPid} ->
case is_process_alive(WrapperPid) of
false ->
rabbit_log:info("Supervisor ~tp for vhost '~ts' already stopped",
?LOG_INFO("Supervisor ~tp for vhost '~ts' already stopped",
[VHostSupPid, VHost]),
ok;
true ->
rabbit_log:info("Stopping vhost supervisor ~tp"
?LOG_INFO("Stopping vhost supervisor ~tp"
" for vhost '~ts'",
[VHostSupPid, VHost]),
case supervisor:terminate_child(?MODULE, WrapperPid) of
@ -112,7 +113,7 @@ stop_and_delete_vhost(VHost, Node) ->
case rabbit_misc:rpc_call(Node, rabbit_vhost_sup_sup, stop_and_delete_vhost, [VHost]) of
ok -> ok;
{badrpc, RpcErr} ->
rabbit_log:error("Failed to stop and delete a vhost ~tp"
?LOG_ERROR("Failed to stop and delete a vhost ~tp"
" on node ~tp."
" Reason: ~tp",
[VHost, Node, RpcErr]),
@ -124,7 +125,7 @@ init_vhost(VHost) ->
case start_vhost(VHost) of
{ok, _} -> ok;
{error, {already_started, _}} ->
rabbit_log:warning(
?LOG_WARNING(
"Attempting to start an already started vhost '~ts'.",
[VHost]),
ok;
@ -133,13 +134,13 @@ init_vhost(VHost) ->
{error, Reason} ->
case vhost_restart_strategy() of
permanent ->
rabbit_log:error(
?LOG_ERROR(
"Unable to initialize vhost data store for vhost '~ts'."
" Reason: ~tp",
[VHost, Reason]),
throw({error, Reason});
transient ->
rabbit_log:warning(
?LOG_WARNING(
"Unable to initialize vhost data store for vhost '~ts'."
" The vhost will be stopped for this node. "
" Reason: ~tp",

Some files were not shown because too many files have changed in this diff Show More