Merge pull request #2861 from rabbitmq/use-builtin-logger

Switch from Lager to the new Erlang Logger API for logging
This commit is contained in:
Jean-Sébastien Pédron 2021-03-11 15:46:21 +01:00 committed by GitHub
commit 132dee6516
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
208 changed files with 5984 additions and 3378 deletions

View File

@ -228,7 +228,7 @@ hdr_sent(_EvtType, {protocol_header_received, 0, 1, 0, 0}, State) ->
end;
hdr_sent(_EvtType, {protocol_header_received, Protocol, Maj, Min,
Rev}, State) ->
error_logger:warning_msg("Unsupported protocol version: ~b ~b.~b.~b~n",
logger:warning("Unsupported protocol version: ~b ~b.~b.~b",
[Protocol, Maj, Min, Rev]),
{stop, normal, State};
hdr_sent({call, From}, begin_session,
@ -291,7 +291,7 @@ opened(info, {'DOWN', MRef, _, _, _Info},
ok = notify_closed(Config, shutdown),
{stop, normal, State};
opened(_EvtType, Frame, State) ->
error_logger:warning_msg("Unexpected connection frame ~p when in state ~p ~n",
logger:warning("Unexpected connection frame ~p when in state ~p ",
[Frame, State]),
{keep_state, State}.
@ -367,7 +367,7 @@ send_open(#state{socket = Socket, config = Config}) ->
end,
Encoded = amqp10_framing:encode_bin(Open),
Frame = amqp10_binary_generator:build_frame(0, Encoded),
?DBG("CONN <- ~p~n", [Open]),
?DBG("CONN <- ~p", [Open]),
socket_send(Socket, Frame).
@ -375,7 +375,7 @@ send_close(#state{socket = Socket}, _Reason) ->
Close = #'v1_0.close'{},
Encoded = amqp10_framing:encode_bin(Close),
Frame = amqp10_binary_generator:build_frame(0, Encoded),
?DBG("CONN <- ~p~n", [Close]),
?DBG("CONN <- ~p", [Close]),
Ret = socket_send(Socket, Frame),
case Ret of
ok -> _ =
@ -397,7 +397,7 @@ send_sasl_init(State, {plain, User, Pass}) ->
send(Record, FrameType, #state{socket = Socket}) ->
Encoded = amqp10_framing:encode_bin(Record),
Frame = amqp10_binary_generator:build_frame(0, FrameType, Encoded),
?DBG("CONN <- ~p~n", [Record]),
?DBG("CONN <- ~p", [Record]),
socket_send(Socket, Frame).
send_heartbeat(#state{socket = Socket}) ->

View File

@ -158,16 +158,16 @@ handle_event(info, {Tcp, _, Packet}, StateName, #state{buffer = Buffer} = State)
handle_event(info, {TcpError, _, Reason}, StateName, State)
when TcpError == tcp_error orelse TcpError == ssl_error ->
error_logger:warning_msg("AMQP 1.0 connection socket errored, connection state: '~s', reason: '~p'~n",
[StateName, Reason]),
logger:warning("AMQP 1.0 connection socket errored, connection state: '~s', reason: '~p'",
[StateName, Reason]),
State1 = State#state{socket = undefined,
buffer = <<>>,
frame_state = undefined},
{stop, {error, Reason}, State1};
handle_event(info, {TcpClosed, _}, StateName, State)
when TcpClosed == tcp_closed orelse TcpClosed == ssl_closed ->
error_logger:warning_msg("AMQP 1.0 connection socket was closed, connection state: '~s'~n",
[StateName]),
logger:warning("AMQP 1.0 connection socket was closed, connection state: '~s'",
[StateName]),
State1 = State#state{socket = undefined,
buffer = <<>>,
frame_state = undefined},
@ -279,7 +279,7 @@ defer_heartbeat_timer(State) -> State.
route_frame(Channel, FrameType, {Performative, Payload} = Frame, State0) ->
{DestinationPid, State} = find_destination(Channel, FrameType, Performative,
State0),
?DBG("FRAME -> ~p ~p~n ~p~n", [Channel, DestinationPid, Performative]),
?DBG("FRAME -> ~p ~p~n ~p", [Channel, DestinationPid, Performative]),
case Payload of
<<>> -> ok = gen_statem:cast(DestinationPid, Performative);
_ -> ok = gen_statem:cast(DestinationPid, Frame)

View File

@ -372,7 +372,7 @@ mapped(cast, {#'v1_0.transfer'{handle = {uint, InHandle},
ok = notify_link(Link, credit_exhausted),
{next_state, mapped, State};
{transfer_limit_exceeded, State} ->
error_logger:info_msg("transfer_limit_exceeded for link ~p~n", [Link]),
logger:warning("transfer_limit_exceeded for link ~p", [Link]),
Link1 = detach_with_error_cond(Link, State,
?V_1_0_LINK_ERROR_TRANSFER_LIMIT_EXCEEDED),
{next_state, mapped, update_link(Link1, State)}
@ -403,7 +403,7 @@ mapped(cast, #'v1_0.disposition'{role = true, settled = true, first = {uint, Fir
{next_state, mapped, State#state{unsettled = Unsettled}};
mapped(cast, Frame, State) ->
error_logger:warning_msg("Unhandled session frame ~p in state ~p~n",
logger:warning("Unhandled session frame ~p in state ~p",
[Frame, State]),
{next_state, mapped, State};
mapped({call, From},
@ -490,7 +490,7 @@ mapped({call, From}, Msg, State) ->
{keep_state, State1, [{reply, From, Reply}]};
mapped(_EvtType, Msg, _State) ->
error_logger:info_msg("amqp10_session: unhandled msg in mapped state ~W",
logger:warning("amqp10_session: unhandled msg in mapped state ~W",
[Msg, 10]),
keep_state_and_data.

View File

@ -511,7 +511,7 @@ handle_info({bump_credit, Msg}, State) ->
%% @private
handle_info(timed_out_flushing_channel, State) ->
?LOG_WARN("Channel (~p) closing: timed out flushing while "
"connection closing~n", [self()]),
"connection closing", [self()]),
{stop, timed_out_flushing_channel, State};
%% @private
handle_info({'DOWN', _, process, ReturnHandler, shutdown},
@ -520,7 +520,7 @@ handle_info({'DOWN', _, process, ReturnHandler, shutdown},
handle_info({'DOWN', _, process, ReturnHandler, Reason},
State = #state{return_handler = {ReturnHandler, _Ref}}) ->
?LOG_WARN("Channel (~p): Unregistering return handler ~p because it died. "
"Reason: ~p~n", [self(), ReturnHandler, Reason]),
"Reason: ~p", [self(), ReturnHandler, Reason]),
{noreply, State#state{return_handler = none}};
%% @private
handle_info({'DOWN', _, process, ConfirmHandler, shutdown},
@ -529,7 +529,7 @@ handle_info({'DOWN', _, process, ConfirmHandler, shutdown},
handle_info({'DOWN', _, process, ConfirmHandler, Reason},
State = #state{confirm_handler = {ConfirmHandler, _Ref}}) ->
?LOG_WARN("Channel (~p): Unregistering confirm handler ~p because it died. "
"Reason: ~p~n", [self(), ConfirmHandler, Reason]),
"Reason: ~p", [self(), ConfirmHandler, Reason]),
{noreply, State#state{confirm_handler = none}};
%% @private
handle_info({'DOWN', _, process, FlowHandler, shutdown},
@ -538,7 +538,7 @@ handle_info({'DOWN', _, process, FlowHandler, shutdown},
handle_info({'DOWN', _, process, FlowHandler, Reason},
State = #state{flow_handler = {FlowHandler, _Ref}}) ->
?LOG_WARN("Channel (~p): Unregistering flow handler ~p because it died. "
"Reason: ~p~n", [self(), FlowHandler, Reason]),
"Reason: ~p", [self(), FlowHandler, Reason]),
{noreply, State#state{flow_handler = none}};
handle_info({'DOWN', _, process, QPid, _Reason}, State) ->
rabbit_amqqueue_common:notify_sent_queue_down(QPid),
@ -588,13 +588,13 @@ handle_method_to_server(Method, AmqpMsg, From, Sender, Flow,
From, Sender, Flow, State1)};
{ok, none, BlockReply} ->
?LOG_WARN("Channel (~p): discarding method ~p in cast.~n"
"Reason: ~p~n", [self(), Method, BlockReply]),
"Reason: ~p", [self(), Method, BlockReply]),
{noreply, State};
{ok, _, BlockReply} ->
{reply, BlockReply, State};
{{_, InvalidMethodMessage}, none, _} ->
?LOG_WARN("Channel (~p): ignoring cast of ~p method. " ++
InvalidMethodMessage ++ "~n", [self(), Method]),
InvalidMethodMessage ++ "", [self(), Method]),
{noreply, State};
{{InvalidMethodReply, _}, _, _} ->
{reply, {error, InvalidMethodReply}, State}
@ -695,7 +695,7 @@ safely_handle_method_from_server(Method, Content,
_ -> false
end,
if Drop -> ?LOG_INFO("Channel (~p): dropping method ~p from "
"server because channel is closing~n",
"server because channel is closing",
[self(), {Method, Content}]),
{noreply, State};
true ->
@ -776,7 +776,7 @@ handle_method_from_server1(
State = #state{return_handler = ReturnHandler}) ->
case ReturnHandler of
none -> ?LOG_WARN("Channel (~p): received {~p, ~p} but there is "
"no return handler registered~n",
"no return handler registered",
[self(), BasicReturn, AmqpMsg]);
{Pid, _Ref} -> Pid ! {BasicReturn, AmqpMsg}
end,
@ -791,7 +791,7 @@ handle_method_from_server1(#'basic.ack'{} = BasicAck, none,
handle_method_from_server1(#'basic.nack'{} = BasicNack, none,
#state{confirm_handler = none} = State) ->
?LOG_WARN("Channel (~p): received ~p but there is no "
"confirm handler registered~n", [self(), BasicNack]),
"confirm handler registered", [self(), BasicNack]),
{noreply, update_confirm_set(BasicNack, State)};
handle_method_from_server1(#'basic.nack'{} = BasicNack, none,
#state{confirm_handler = {CH, _Ref}} = State) ->
@ -835,7 +835,7 @@ handle_connection_closing(CloseType, Reason,
handle_channel_exit(Reason = #amqp_error{name = ErrorName, explanation = Expl},
State = #state{connection = Connection, number = Number}) ->
%% Sent by rabbit_channel for hard errors in the direct case
?LOG_ERR("connection ~p, channel ~p - error:~n~p~n",
?LOG_ERR("connection ~p, channel ~p - error:~n~p",
[Connection, Number, Reason]),
{true, Code, _} = ?PROTOCOL:lookup_amqp_exception(ErrorName),
ReportedReason = {server_initiated_close, Code, Expl},
@ -928,7 +928,7 @@ server_misbehaved(#amqp_error{} = AmqpError, State = #state{number = Number}) ->
handle_shutdown({server_misbehaved, AmqpError}, State);
{_, Close} ->
?LOG_WARN("Channel (~p) flushing and closing due to soft "
"error caused by the server ~p~n", [self(), AmqpError]),
"error caused by the server ~p", [self(), AmqpError]),
Self = self(),
spawn(fun () -> call(Self, Close) end),
{noreply, State}

View File

@ -203,7 +203,7 @@ internal_pass_frame(Number, Frame, State) ->
case internal_lookup_npa(Number, State) of
undefined ->
?LOG_INFO("Dropping frame ~p for invalid or closed "
"channel number ~p~n", [Frame, Number]),
"channel number ~p", [Frame, Number]),
State;
{ChPid, AState} ->
NewAState = process_channel_frame(Frame, Number, ChPid, AState),

View File

@ -203,7 +203,7 @@ handle_cast(channels_terminated, State) ->
handle_cast({hard_error_in_channel, _Pid, Reason}, State) ->
server_initiated_close(Reason, State);
handle_cast({channel_internal_error, Pid, Reason}, State) ->
?LOG_WARN("Connection (~p) closing: internal error in channel (~p): ~p~n",
?LOG_WARN("Connection (~p) closing: internal error in channel (~p): ~p",
[self(), Pid, Reason]),
internal_error(Pid, Reason, State);
handle_cast({server_misbehaved, AmqpError}, State) ->
@ -218,12 +218,12 @@ handle_cast({register_blocked_handler, HandlerPid}, State) ->
handle_info({'DOWN', _, process, BlockHandler, Reason},
State = #state{block_handler = {BlockHandler, _Ref}}) ->
?LOG_WARN("Connection (~p): Unregistering connection.{blocked,unblocked} handler ~p because it died. "
"Reason: ~p~n", [self(), BlockHandler, Reason]),
"Reason: ~p", [self(), BlockHandler, Reason]),
{noreply, State#state{block_handler = none}};
handle_info({'EXIT', BlockHandler, Reason},
State = #state{block_handler = {BlockHandler, Ref}}) ->
?LOG_WARN("Connection (~p): Unregistering connection.{blocked,unblocked} handler ~p because it died. "
"Reason: ~p~n", [self(), BlockHandler, Reason]),
"Reason: ~p", [self(), BlockHandler, Reason]),
erlang:demonitor(Ref, [flush]),
{noreply, State#state{block_handler = none}};
%% propagate the exit to the module that will stop with a sensible reason logged
@ -329,12 +329,12 @@ internal_error(Pid, Reason, State) ->
server_initiated_close(Close, State) ->
?LOG_WARN("Connection (~p) closing: received hard error ~p "
"from server~n", [self(), Close]),
"from server", [self(), Close]),
set_closing_state(abrupt, #closing{reason = server_initiated_close,
close = Close}, State).
server_misbehaved_close(AmqpError, State) ->
?LOG_WARN("Connection (~p) closing: server misbehaved: ~p~n",
?LOG_WARN("Connection (~p) closing: server misbehaved: ~p",
[self(), AmqpError]),
{0, Close} = rabbit_binary_generator:map_exception(0, AmqpError, ?PROTOCOL),
set_closing_state(abrupt, #closing{reason = server_misbehaved,

View File

@ -54,8 +54,8 @@ maybe_add_verify(Options) ->
% NB: user has explicitly set 'verify'
Options;
_ ->
?LOG_WARN("Connection (~p): Certificate chain verification is not enabled for this TLS connection. "
"Please see https://rabbitmq.com/ssl.html for more information.~n", [self()]),
?LOG_WARN("Connection (~p): certificate chain verification is not enabled for this TLS connection. "
"Please see https://rabbitmq.com/ssl.html for more information.", [self()]),
Options
end.

View File

@ -131,14 +131,14 @@ endef
APPS_DIR := $(CURDIR)/apps
LOCAL_DEPS = sasl rabbitmq_prelaunch os_mon inets compiler public_key crypto ssl syntax_tools xmerl
BUILD_DEPS = rabbitmq_cli syslog
DEPS = cuttlefish ranch lager rabbit_common ra sysmon_handler stdout_formatter recon observer_cli osiris amqp10_common
BUILD_DEPS = rabbitmq_cli
DEPS = cuttlefish ranch rabbit_common ra sysmon_handler stdout_formatter recon observer_cli osiris amqp10_common syslog
TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers amqp_client meck proper
PLT_APPS += mnesia
dep_cuttlefish = hex 2.7.0
dep_syslog = git https://github.com/schlagert/syslog 3.4.5
dep_cuttlefish = git https://github.com/Kyorai/cuttlefish master
dep_syslog = git https://github.com/schlagert/syslog 4.0.0
dep_osiris = git https://github.com/rabbitmq/osiris master
define usage_xml_to_erl

View File

@ -5,6 +5,7 @@
*.coverdata
/ebin/
/.erlang.mk/
/logs/
/rabbitmq_prelaunch.d
/xrefr

View File

@ -3,7 +3,7 @@ PROJECT_DESCRIPTION = RabbitMQ prelaunch setup
PROJECT_VERSION = 1.0.0
PROJECT_MOD = rabbit_prelaunch_app
DEPS = rabbit_common lager
DEPS = rabbit_common jsx
DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk

View File

@ -8,8 +8,11 @@
-module(rabbit_boot_state).
-include_lib("kernel/include/logger.hrl").
-include_lib("eunit/include/eunit.hrl").
-include_lib("rabbit_common/include/logging.hrl").
-export([get/0,
set/1,
wait_for/2,
@ -28,7 +31,8 @@ get() ->
-spec set(boot_state()) -> ok.
set(BootState) ->
rabbit_log_prelaunch:debug("Change boot state to `~s`", [BootState]),
?LOG_DEBUG("Change boot state to `~s`", [BootState],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
?assert(is_valid(BootState)),
case BootState of
stopped -> persistent_term:erase(?PT_KEY_BOOT_STATE);

View File

@ -9,6 +9,10 @@
-behaviour(gen_server).
-include_lib("kernel/include/logger.hrl").
-include_lib("rabbit_common/include/logging.hrl").
-export([start_link/0]).
-export([init/1,
@ -65,20 +69,23 @@ code_change(_OldVsn, State, _Extra) ->
notify_boot_state(ready = BootState,
#state{mechanism = legacy, sd_notify_module = SDNotify}) ->
rabbit_log_prelaunch:debug(
?LOG_DEBUG(
?LOG_PREFIX "notifying of state `~s` (via native module)",
[BootState]),
[BootState],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
sd_notify_legacy(SDNotify);
notify_boot_state(ready = BootState,
#state{mechanism = socat, socket = Socket}) ->
rabbit_log_prelaunch:debug(
?LOG_DEBUG(
?LOG_PREFIX "notifying of state `~s` (via socat(1))",
[BootState]),
[BootState],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
sd_notify_socat(Socket);
notify_boot_state(BootState, _) ->
rabbit_log_prelaunch:debug(
?LOG_DEBUG(
?LOG_PREFIX "ignoring state `~s`",
[BootState]),
[BootState],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
ok.
sd_notify_message() ->
@ -99,9 +106,10 @@ sd_notify_legacy(SDNotify) ->
sd_notify_socat(Socket) ->
case sd_current_unit() of
{ok, Unit} ->
rabbit_log_prelaunch:debug(
?LOG_PREFIX "systemd unit for activation check: \"~s\"~n",
[Unit]),
?LOG_DEBUG(
?LOG_PREFIX "systemd unit for activation check: \"~s\"",
[Unit],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
sd_notify_socat(Socket, Unit);
_ ->
ok
@ -116,9 +124,10 @@ sd_notify_socat(Socket, Unit) ->
Result
catch
Class:Reason ->
rabbit_log_prelaunch:debug(
?LOG_PREFIX "Failed to start socat(1): ~p:~p~n",
[Class, Reason]),
?LOG_DEBUG(
?LOG_PREFIX "Failed to start socat(1): ~p:~p",
[Class, Reason],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
false
end.
@ -147,8 +156,10 @@ sd_open_port(Socket) ->
sd_wait_activation(Port, Unit) ->
case os:find_executable("systemctl") of
false ->
rabbit_log_prelaunch:debug(
?LOG_PREFIX "systemctl(1) unavailable, falling back to sleep~n"),
?LOG_DEBUG(
?LOG_PREFIX "systemctl(1) unavailable, falling back to sleep",
[],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
timer:sleep(5000),
ok;
_ ->
@ -156,8 +167,10 @@ sd_wait_activation(Port, Unit) ->
end.
sd_wait_activation(_, _, 0) ->
rabbit_log_prelaunch:debug(
?LOG_PREFIX "service still in 'activating' state, bailing out~n"),
?LOG_DEBUG(
?LOG_PREFIX "service still in 'activating' state, bailing out",
[],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
ok;
sd_wait_activation(Port, Unit, AttemptsLeft) ->
Ret = os:cmd("systemctl show --property=ActiveState -- '" ++ Unit ++ "'"),
@ -168,7 +181,8 @@ sd_wait_activation(Port, Unit, AttemptsLeft) ->
"ActiveState=" ++ _ ->
ok;
_ = Err ->
rabbit_log_prelaunch:debug(
?LOG_PREFIX "unexpected status from systemd: ~p~n", [Err]),
?LOG_DEBUG(
?LOG_PREFIX "unexpected status from systemd: ~p", [Err],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
ok
end.

View File

@ -0,0 +1,127 @@
%% This Source Code Form is subject to the terms of the Mozilla Public
%% License, v. 2.0. If a copy of the MPL was not distributed with this
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
%%
%% Copyright (c) 2021 VMware, Inc. or its affiliates. All rights reserved.
%%
-module(rabbit_logger_json_fmt).
-export([format/2]).
format(
#{msg := Msg,
level := Level,
meta := #{time := Timestamp} = Meta},
Config) ->
FormattedTimestamp = unicode:characters_to_binary(
format_time(Timestamp, Config)),
FormattedMsg = unicode:characters_to_binary(
format_msg(Msg, Meta, Config)),
FormattedMeta = format_meta(Meta, Config),
Json = jsx:encode(
[{time, FormattedTimestamp},
{level, Level},
{msg, FormattedMsg},
{meta, FormattedMeta}]),
[Json, $\n].
format_time(Timestamp, _) ->
Options = [{unit, microsecond}],
calendar:system_time_to_rfc3339(Timestamp, Options).
format_msg({string, Chardata}, Meta, Config) ->
format_msg({"~ts", [Chardata]}, Meta, Config);
format_msg({report, Report}, Meta, Config) ->
FormattedReport = format_report(Report, Meta, Config),
format_msg(FormattedReport, Meta, Config);
format_msg({Format, Args}, _, _) ->
io_lib:format(Format, Args).
format_report(
#{label := {application_controller, _}} = Report, Meta, Config) ->
format_application_progress(Report, Meta, Config);
format_report(
#{label := {supervisor, progress}} = Report, Meta, Config) ->
format_supervisor_progress(Report, Meta, Config);
format_report(
Report, #{report_cb := Cb} = Meta, Config) ->
try
case erlang:fun_info(Cb, arity) of
{arity, 1} -> Cb(Report);
{arity, 2} -> {"~ts", [Cb(Report, #{})]}
end
catch
_:_:_ ->
format_report(Report, maps:remove(report_cb, Meta), Config)
end;
format_report(Report, _, _) ->
logger:format_report(Report).
format_application_progress(#{label := {_, progress},
report := InternalReport}, _, _) ->
Application = proplists:get_value(application, InternalReport),
StartedAt = proplists:get_value(started_at, InternalReport),
{"Application ~w started on ~0p",
[Application, StartedAt]};
format_application_progress(#{label := {_, exit},
report := InternalReport}, _, _) ->
Application = proplists:get_value(application, InternalReport),
Exited = proplists:get_value(exited, InternalReport),
{"Application ~w exited with reason: ~0p",
[Application, Exited]}.
format_supervisor_progress(#{report := InternalReport}, _, _) ->
Supervisor = proplists:get_value(supervisor, InternalReport),
Started = proplists:get_value(started, InternalReport),
Id = proplists:get_value(id, Started),
Pid = proplists:get_value(pid, Started),
Mfa = proplists:get_value(mfargs, Started),
{"Supervisor ~w: child ~w started (~w): ~0p",
[Supervisor, Id, Pid, Mfa]}.
format_meta(Meta, _) ->
maps:fold(
fun
(time, _, Acc) ->
Acc;
(domain = Key, Components, Acc) ->
Term = unicode:characters_to_binary(
string:join(
[atom_to_list(Cmp) || Cmp <- Components],
".")),
Acc#{Key => Term};
(Key, Value, Acc) ->
case convert_to_types_accepted_by_jsx(Value) of
false -> Acc;
Term -> Acc#{Key => Term}
end
end, #{}, Meta).
convert_to_types_accepted_by_jsx(Term) when is_map(Term) ->
maps:map(
fun(_, Value) -> convert_to_types_accepted_by_jsx(Value) end,
Term);
convert_to_types_accepted_by_jsx(Term) when is_list(Term) ->
case io_lib:deep_char_list(Term) of
true ->
unicode:characters_to_binary(Term);
false ->
[convert_to_types_accepted_by_jsx(E) || E <- Term]
end;
convert_to_types_accepted_by_jsx(Term) when is_tuple(Term) ->
convert_to_types_accepted_by_jsx(erlang:tuple_to_list(Term));
convert_to_types_accepted_by_jsx(Term) when is_function(Term) ->
String = erlang:fun_to_list(Term),
unicode:characters_to_binary(String);
convert_to_types_accepted_by_jsx(Term) when is_pid(Term) ->
String = erlang:pid_to_list(Term),
unicode:characters_to_binary(String);
convert_to_types_accepted_by_jsx(Term) when is_port(Term) ->
String = erlang:port_to_list(Term),
unicode:characters_to_binary(String);
convert_to_types_accepted_by_jsx(Term) when is_reference(Term) ->
String = erlang:ref_to_list(Term),
unicode:characters_to_binary(String);
convert_to_types_accepted_by_jsx(Term) ->
Term.

View File

@ -0,0 +1,841 @@
%%
%% %CopyrightBegin%
%%
%% Copyright Ericsson AB 2017-2020. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% %CopyrightEnd%
%%
-module(rabbit_logger_std_h).
%-include("logger.hrl").
%-include("logger_internal.hrl").
%-include("logger_h_common.hrl").
-ifdef(TEST).
-define(io_put_chars(DEVICE, DATA), begin
%% We log to Common Test log as well.
%% This is the file we use to check
%% the message made it to
%% stdout/stderr.
ct:log("~ts", [DATA]),
io:put_chars(DEVICE, DATA)
end).
-else.
-define(io_put_chars(DEVICE, DATA), io:put_chars(DEVICE, DATA)).
-endif.
-define(file_write(DEVICE, DATA), file:write(DEVICE, DATA)).
-define(file_datasync(DEVICE), file:datasync(DEVICE)).
-include_lib("kernel/include/file.hrl").
%% API
-export([filesync/1]).
-export([is_date_based_rotation_needed/3]).
%% logger_h_common callbacks
-export([init/2, check_config/4, config_changed/3, reset_state/2,
filesync/3, write/4, handle_info/3, terminate/3]).
%% logger callbacks
-export([log/2, adding_handler/1, removing_handler/1, changing_config/3,
filter_config/1]).
-define(DEFAULT_CALL_TIMEOUT, 5000).
%%%===================================================================
%%% API
%%%===================================================================
%%%-----------------------------------------------------------------
%%%
-spec filesync(Name) -> ok | {error,Reason} when
Name :: atom(),
Reason :: handler_busy | {badarg,term()}.
filesync(Name) ->
logger_h_common:filesync(?MODULE,Name).
%%%===================================================================
%%% logger callbacks - just forward to logger_h_common
%%%===================================================================
%%%-----------------------------------------------------------------
%%% Handler being added
-spec adding_handler(Config) -> {ok,Config} | {error,Reason} when
Config :: logger:handler_config(),
Reason :: term().
adding_handler(Config) ->
logger_h_common:adding_handler(Config).
%%%-----------------------------------------------------------------
%%% Updating handler config
-spec changing_config(SetOrUpdate, OldConfig, NewConfig) ->
{ok,Config} | {error,Reason} when
SetOrUpdate :: set | update,
OldConfig :: logger:handler_config(),
NewConfig :: logger:handler_config(),
Config :: logger:handler_config(),
Reason :: term().
changing_config(SetOrUpdate, OldConfig, NewConfig) ->
logger_h_common:changing_config(SetOrUpdate, OldConfig, NewConfig).
%%%-----------------------------------------------------------------
%%% Handler being removed
-spec removing_handler(Config) -> ok when
Config :: logger:handler_config().
removing_handler(Config) ->
logger_h_common:removing_handler(Config).
%%%-----------------------------------------------------------------
%%% Log a string or report
-spec log(LogEvent, Config) -> ok when
LogEvent :: logger:log_event(),
Config :: logger:handler_config().
log(LogEvent, Config) ->
logger_h_common:log(LogEvent, Config).
%%%-----------------------------------------------------------------
%%% Remove internal fields from configuration
-spec filter_config(Config) -> Config when
Config :: logger:handler_config().
filter_config(Config) ->
logger_h_common:filter_config(Config).
%%%===================================================================
%%% logger_h_common callbacks
%%%===================================================================
init(Name, Config) ->
MyConfig = maps:with([type,file,modes,file_check,max_no_bytes,
rotate_on_date,max_no_files,compress_on_rotate],
Config),
case file_ctrl_start(Name, MyConfig) of
{ok,FileCtrlPid} ->
{ok,MyConfig#{file_ctrl_pid=>FileCtrlPid}};
Error ->
Error
end.
check_config(Name,set,undefined,NewHConfig) ->
check_h_config(merge_default_config(Name,normalize_config(NewHConfig)));
check_config(Name,SetOrUpdate,OldHConfig,NewHConfig0) ->
WriteOnce = maps:with([type,file,modes],OldHConfig),
Default =
case SetOrUpdate of
set ->
%% Do not reset write-once fields to defaults
merge_default_config(Name,WriteOnce);
update ->
OldHConfig
end,
NewHConfig = maps:merge(Default, normalize_config(NewHConfig0)),
%% Fail if write-once fields are changed
case maps:with([type,file,modes],NewHConfig) of
WriteOnce ->
check_h_config(NewHConfig);
Other ->
{error,{illegal_config_change,?MODULE,WriteOnce,Other}}
end.
check_h_config(HConfig) ->
case check_h_config(maps:get(type,HConfig),maps:to_list(HConfig)) of
ok ->
{ok,fix_file_opts(HConfig)};
{error,{Key,Value}} ->
{error,{invalid_config,?MODULE,#{Key=>Value}}}
end.
check_h_config(Type,[{type,Type} | Config]) when Type =:= standard_io;
Type =:= standard_error;
Type =:= file ->
check_h_config(Type,Config);
check_h_config({device,Device},[{type,{device,Device}} | Config]) ->
check_h_config({device,Device},Config);
check_h_config(file,[{file,File} | Config]) when is_list(File) ->
check_h_config(file,Config);
check_h_config(file,[{modes,Modes} | Config]) when is_list(Modes) ->
check_h_config(file,Config);
check_h_config(file,[{max_no_bytes,Size} | Config])
when (is_integer(Size) andalso Size>0) orelse Size=:=infinity ->
check_h_config(file,Config);
check_h_config(file,[{rotate_on_date,DateSpec}=Param | Config])
when is_list(DateSpec) orelse DateSpec=:=false ->
case parse_date_spec(DateSpec) of
error -> {error,Param};
_ -> check_h_config(file,Config)
end;
check_h_config(file,[{max_no_files,Num} | Config]) when is_integer(Num), Num>=0 ->
check_h_config(file,Config);
check_h_config(file,[{compress_on_rotate,Bool} | Config]) when is_boolean(Bool) ->
check_h_config(file,Config);
check_h_config(file,[{file_check,FileCheck} | Config])
when is_integer(FileCheck), FileCheck>=0 ->
check_h_config(file,Config);
check_h_config(_Type,[Other | _]) ->
{error,Other};
check_h_config(_Type,[]) ->
ok.
normalize_config(#{type:={file,File}}=HConfig) ->
normalize_config(HConfig#{type=>file,file=>File});
normalize_config(#{type:={file,File,Modes}}=HConfig) ->
normalize_config(HConfig#{type=>file,file=>File,modes=>Modes});
normalize_config(#{file:=File}=HConfig) ->
HConfig#{file=>filename:absname(File)};
normalize_config(HConfig) ->
HConfig.
merge_default_config(Name,#{type:=Type}=HConfig) ->
merge_default_config(Name,Type,HConfig);
merge_default_config(Name,#{file:=_}=HConfig) ->
merge_default_config(Name,file,HConfig);
merge_default_config(Name,HConfig) ->
merge_default_config(Name,standard_io,HConfig).
merge_default_config(Name,Type,HConfig) ->
maps:merge(get_default_config(Name,Type),HConfig).
get_default_config(Name,file) ->
#{type => file,
file => filename:absname(atom_to_list(Name)),
modes => [raw,append],
file_check => 0,
max_no_bytes => infinity,
rotate_on_date => false,
max_no_files => 0,
compress_on_rotate => false};
get_default_config(_Name,Type) ->
#{type => Type}.
fix_file_opts(#{modes:=Modes}=HConfig) ->
HConfig#{modes=>fix_modes(Modes)};
fix_file_opts(HConfig) ->
HConfig#{filesync_repeat_interval=>no_repeat}.
fix_modes(Modes) ->
%% Ensure write|append|exclusive
Modes1 =
case [M || M <- Modes,
lists:member(M,[write,append,exclusive])] of
[] -> [append|Modes];
_ -> Modes
end,
%% Ensure raw
Modes2 =
case lists:member(raw,Modes) of
false -> [raw|Modes1];
true -> Modes1
end,
%% Ensure delayed_write
case lists:partition(fun(delayed_write) -> true;
({delayed_write,_,_}) -> true;
(_) -> false
end, Modes2) of
{[],_} ->
[delayed_write|Modes2];
_ ->
Modes2
end.
config_changed(_Name,
#{file_check:=FileCheck,
max_no_bytes:=Size,
rotate_on_date:=DateSpec,
max_no_files:=Count,
compress_on_rotate:=Compress},
#{file_check:=FileCheck,
max_no_bytes:=Size,
rotate_on_date:=DateSpec,
max_no_files:=Count,
compress_on_rotate:=Compress}=State) ->
State;
config_changed(_Name,
#{file_check:=FileCheck,
max_no_bytes:=Size,
rotate_on_date:=DateSpec,
max_no_files:=Count,
compress_on_rotate:=Compress},
#{file_ctrl_pid := FileCtrlPid} = State) ->
FileCtrlPid ! {update_config,#{file_check=>FileCheck,
max_no_bytes=>Size,
rotate_on_date=>DateSpec,
max_no_files=>Count,
compress_on_rotate=>Compress}},
State#{file_check:=FileCheck,
max_no_bytes:=Size,
rotate_on_date:=DateSpec,
max_no_files:=Count,
compress_on_rotate:=Compress};
config_changed(_Name,_NewHConfig,State) ->
State.
filesync(_Name, SyncAsync, #{file_ctrl_pid := FileCtrlPid} = State) ->
Result = file_ctrl_filesync(SyncAsync, FileCtrlPid),
{Result,State}.
write(_Name, SyncAsync, Bin, #{file_ctrl_pid:=FileCtrlPid} = State) ->
Result = file_write(SyncAsync, FileCtrlPid, Bin),
{Result,State}.
reset_state(_Name, State) ->
State.
handle_info(_Name, {'EXIT',Pid,Why}, #{file_ctrl_pid := Pid}=State) ->
%% file_ctrl_pid died, file error, terminate handler
exit({error,{write_failed,maps:with([type,file,modes],State),Why}});
handle_info(_, _, State) ->
State.
terminate(_Name, _Reason, #{file_ctrl_pid:=FWPid}) ->
case is_process_alive(FWPid) of
true ->
unlink(FWPid),
_ = file_ctrl_stop(FWPid),
MRef = erlang:monitor(process, FWPid),
receive
{'DOWN',MRef,_,_,_} ->
ok
after
?DEFAULT_CALL_TIMEOUT ->
exit(FWPid, kill),
ok
end;
false ->
ok
end.
%%%===================================================================
%%% Internal functions
%%%===================================================================
%%%-----------------------------------------------------------------
%%%
open_log_file(HandlerName,#{type:=file,
file:=FileName,
modes:=Modes,
file_check:=FileCheck}) ->
try
case filelib:ensure_dir(FileName) of
ok ->
case file:open(FileName, Modes) of
{ok, Fd} ->
{ok,#file_info{inode=INode}} =
file:read_file_info(FileName,[raw]),
UpdateModes = [append | Modes--[write,append,exclusive]],
{ok,#{handler_name=>HandlerName,
file_name=>FileName,
modes=>UpdateModes,
file_check=>FileCheck,
fd=>Fd,
inode=>INode,
last_check=>timestamp(),
synced=>false,
write_res=>ok,
sync_res=>ok}};
Error ->
Error
end;
Error ->
Error
end
catch
_:Reason -> {error,Reason}
end.
close_log_file(#{fd:=Fd}) ->
_ = file:datasync(Fd), %% file:datasync may return error as it will flush the delayed_write buffer
_ = file:close(Fd),
ok;
close_log_file(_) ->
ok.
%% A special close that closes the FD properly when the delayed write close failed
delayed_write_close(#{fd:=Fd}) ->
case file:close(Fd) of
%% We got an error while closing, could be a delayed write failing
%% So we close again in order to make sure the file is closed.
{error, _} ->
file:close(Fd);
Res ->
Res
end.
%%%-----------------------------------------------------------------
%%% File control process
file_ctrl_start(HandlerName, HConfig) ->
Starter = self(),
FileCtrlPid =
spawn_link(fun() ->
file_ctrl_init(HandlerName, HConfig, Starter)
end),
receive
{FileCtrlPid,ok} ->
{ok,FileCtrlPid};
{FileCtrlPid,Error} ->
Error
after
?DEFAULT_CALL_TIMEOUT ->
{error,file_ctrl_process_not_started}
end.
file_ctrl_stop(Pid) ->
Pid ! stop.
file_write(async, Pid, Bin) ->
Pid ! {log,Bin},
ok;
file_write(sync, Pid, Bin) ->
file_ctrl_call(Pid, {log,Bin}).
file_ctrl_filesync(async, Pid) ->
Pid ! filesync,
ok;
file_ctrl_filesync(sync, Pid) ->
file_ctrl_call(Pid, filesync).
file_ctrl_call(Pid, Msg) ->
MRef = monitor(process, Pid),
Pid ! {Msg,{self(),MRef}},
receive
{MRef,Result} ->
demonitor(MRef, [flush]),
Result;
{'DOWN',MRef,_Type,_Object,Reason} ->
{error,Reason}
after
?DEFAULT_CALL_TIMEOUT ->
%% If this timeout triggers we will get a stray
%% reply message in our mailbox eventually.
%% That does not really matter though as it will
%% end up in this module's handle_info and be ignored
demonitor(MRef, [flush]),
{error,{no_response,Pid}}
end.
file_ctrl_init(HandlerName,
#{type:=file,
max_no_bytes:=Size,
rotate_on_date:=DateSpec,
max_no_files:=Count,
compress_on_rotate:=Compress,
file:=FileName} = HConfig,
Starter) ->
process_flag(message_queue_data, off_heap),
case open_log_file(HandlerName,HConfig) of
{ok,State} ->
Starter ! {self(),ok},
%% Do the initial rotate (if any) after we ack the starting
%% process as otherwise startup of the system will be
%% delayed/crash
case parse_date_spec(DateSpec) of
error ->
Starter ! {self(),{error,{invalid_date_spec,DateSpec}}};
ParsedDS ->
RotState = update_rotation({Size,ParsedDS,Count,Compress},State),
file_ctrl_loop(RotState)
end;
{error,Reason} ->
Starter ! {self(),{error,{open_failed,FileName,Reason}}}
end;
file_ctrl_init(HandlerName, #{type:={device,Dev}}, Starter) ->
Starter ! {self(),ok},
file_ctrl_loop(#{handler_name=>HandlerName,dev=>Dev});
file_ctrl_init(HandlerName, #{type:=StdDev}, Starter) ->
Starter ! {self(),ok},
file_ctrl_loop(#{handler_name=>HandlerName,dev=>StdDev}).
file_ctrl_loop(State) ->
receive
%% asynchronous event
{log,Bin} ->
State1 = write_to_dev(Bin,State),
file_ctrl_loop(State1);
%% synchronous event
{{log,Bin},{From,MRef}} ->
State1 = ensure_file(State),
State2 = write_to_dev(Bin,State1),
From ! {MRef,ok},
file_ctrl_loop(State2);
filesync ->
State1 = sync_dev(State),
file_ctrl_loop(State1);
{filesync,{From,MRef}} ->
State1 = ensure_file(State),
State2 = sync_dev(State1),
From ! {MRef,ok},
file_ctrl_loop(State2);
{update_config,#{file_check:=FileCheck,
max_no_bytes:=Size,
rotate_on_date:=DateSpec,
max_no_files:=Count,
compress_on_rotate:=Compress}} ->
case parse_date_spec(DateSpec) of
error ->
%% FIXME: Report parsing error?
file_ctrl_loop(State#{file_check=>FileCheck});
ParsedDS ->
State1 = update_rotation({Size,ParsedDS,Count,Compress},State),
file_ctrl_loop(State1#{file_check=>FileCheck})
end;
stop ->
close_log_file(State),
stopped
end.
maybe_ensure_file(#{file_check:=0}=State) ->
ensure_file(State);
maybe_ensure_file(#{last_check:=T0,file_check:=CheckInt}=State)
when is_integer(CheckInt) ->
T = timestamp(),
if T-T0 > CheckInt -> ensure_file(State);
true -> State
end;
maybe_ensure_file(State) ->
State.
%% In order to play well with tools like logrotate, we need to be able
%% to re-create the file if it has disappeared (e.g. if rotated by
%% logrotate)
ensure_file(#{inode:=INode0,file_name:=FileName,modes:=Modes}=State) ->
case file:read_file_info(FileName,[raw]) of
{ok,#file_info{inode=INode0}} ->
State#{last_check=>timestamp()};
_ ->
close_log_file(State),
case file:open(FileName,Modes) of
{ok,Fd} ->
{ok,#file_info{inode=INode}} =
file:read_file_info(FileName,[raw]),
State#{fd=>Fd,inode=>INode,
last_check=>timestamp(),
synced=>true,sync_res=>ok};
Error ->
exit({could_not_reopen_file,Error})
end
end;
ensure_file(State) ->
State.
write_to_dev(Bin,#{dev:=DevName}=State) ->
?io_put_chars(DevName, Bin),
State;
write_to_dev(Bin, State) ->
State1 = #{fd:=Fd} = maybe_ensure_file(State),
Result = ?file_write(Fd, Bin),
State2 = maybe_rotate_file(Bin,State1),
maybe_notify_error(write,Result,State2),
State2#{synced=>false,write_res=>Result}.
sync_dev(#{synced:=false}=State) ->
State1 = #{fd:=Fd} = maybe_ensure_file(State),
Result = ?file_datasync(Fd),
maybe_notify_error(filesync,Result,State1),
State1#{synced=>true,sync_res=>Result};
sync_dev(State) ->
State.
update_rotation({infinity,false,_,_},State) ->
maybe_remove_archives(0,State),
maps:remove(rotation,State);
update_rotation({Size,DateSpec,Count,Compress},#{file_name:=FileName}=State) ->
maybe_remove_archives(Count,State),
{ok,#file_info{size=CurrSize}} = file:read_file_info(FileName,[raw]),
State1 = State#{rotation=>#{size=>Size,
on_date=>DateSpec,
count=>Count,
compress=>Compress,
curr_size=>CurrSize}},
maybe_update_compress(0,State1),
maybe_rotate_file(0,State1).
parse_date_spec(false) ->
false;
parse_date_spec("") ->
false;
parse_date_spec([$$,$D | DateSpec]) ->
io:format(standard_error, "parse_date_spec: ~p (hour)~n", [DateSpec]),
parse_hour(DateSpec, #{every=>day,
hour=>0});
parse_date_spec([$$,$W | DateSpec]) ->
io:format(standard_error, "parse_date_spec: ~p (week)~n", [DateSpec]),
parse_day_of_week(DateSpec, #{every=>week,
hour=>0});
parse_date_spec([$$,$M | DateSpec]) ->
io:format(standard_error, "parse_date_spec: ~p (month)~n", [DateSpec]),
parse_day_of_month(DateSpec, #{every=>month,
hour=>0});
parse_date_spec(DateSpec) ->
io:format(standard_error, "parse_date_spec: ~p (error)~n", [DateSpec]),
error.
parse_hour(Rest,Result) ->
case date_string_to_int(Rest,0,23) of
{Hour,""} -> Result#{hour=>Hour};
error -> error
end.
parse_day_of_week(Rest,Result) ->
case date_string_to_int(Rest,0,6) of
{DayOfWeek,Rest} -> parse_hour(Rest,Result#{day_of_week=>DayOfWeek});
error -> error
end.
parse_day_of_month([Last | Rest],Result)
when Last=:=$l orelse Last=:=$L ->
parse_hour(Rest,Result#{day_of_month=>last});
parse_day_of_month(Rest,Result) ->
case date_string_to_int(Rest,1,31) of
{DayOfMonth,Rest} -> parse_hour(Rest,Result#{day_of_month=>DayOfMonth});
error -> error
end.
date_string_to_int(String,Min,Max) ->
case string:to_integer(String) of
{Int,Rest} when is_integer(Int) andalso Int>=Min andalso Int=<Max ->
{Int,Rest};
_ ->
error
end.
maybe_remove_archives(Count,#{file_name:=FileName}=State) ->
Archive = rot_file_name(FileName,Count,false),
CompressedArchive = rot_file_name(FileName,Count,true),
case {file:read_file_info(Archive,[raw]),
file:read_file_info(CompressedArchive,[raw])} of
{{error,enoent},{error,enoent}} ->
ok;
_ ->
_ = file:delete(Archive),
_ = file:delete(CompressedArchive),
maybe_remove_archives(Count+1,State)
end.
maybe_update_compress(Count,#{rotation:=#{count:=Count}}) ->
ok;
maybe_update_compress(N,#{file_name:=FileName,
rotation:=#{compress:=Compress}}=State) ->
Archive = rot_file_name(FileName,N,not Compress),
case file:read_file_info(Archive,[raw]) of
{ok,_} when Compress ->
compress_file(Archive);
{ok,_} ->
decompress_file(Archive);
_ ->
ok
end,
maybe_update_compress(N+1,State).
maybe_rotate_file(Bin,#{rotation:=_}=State) when is_binary(Bin) ->
maybe_rotate_file(byte_size(Bin),State);
maybe_rotate_file(AddSize,#{rotation:=#{size:=RotSize,
curr_size:=CurrSize}=Rotation}=State) ->
{DateBasedRotNeeded, Rotation1} = is_date_based_rotation_needed(Rotation),
NewSize = CurrSize + AddSize,
if NewSize>RotSize ->
rotate_file(State#{rotation=>Rotation1#{curr_size=>NewSize}});
DateBasedRotNeeded ->
rotate_file(State#{rotation=>Rotation1#{curr_size=>NewSize}});
true ->
State#{rotation=>Rotation1#{curr_size=>NewSize}}
end;
maybe_rotate_file(_Bin,State) ->
State.
is_date_based_rotation_needed(#{last_rotation_ts:=PrevTimestamp,
on_date:=DateSpec}=Rotation) ->
CurrTimestamp = rotation_timestamp(),
case is_date_based_rotation_needed(DateSpec,PrevTimestamp,CurrTimestamp) of
true -> {true,Rotation#{last_rotation_ts=>CurrTimestamp}};
false -> {false,Rotation}
end;
is_date_based_rotation_needed(Rotation) ->
{false,Rotation#{last_rotation_ts=>rotation_timestamp()}}.
is_date_based_rotation_needed(#{every:=day,hour:=Hour},
{Date1,Time1},{Date2,Time2})
when (Date1<Date2 orelse (Date1=:=Date2 andalso Time1<{Hour,0,0})) andalso
Time2>={Hour,0,0} ->
true;
is_date_based_rotation_needed(#{every:=day,hour:=Hour},
{Date1,_}=DateTime1,{Date2,Time2}=DateTime2)
when Date1<Date2 andalso
Time2<{Hour,0,0} ->
GregDays2 = calendar:date_to_gregorian_days(Date2),
TargetDate = calendar:gregorian_days_to_date(GregDays2 - 1),
TargetDateTime = {TargetDate,{Hour,0,0}},
DateTime1<TargetDateTime andalso DateTime2>=TargetDateTime;
is_date_based_rotation_needed(#{every:=week,day_of_week:=TargetDoW,hour:=Hour},
DateTime1,{Date2,_}=DateTime2) ->
DoW2 = calendar:day_of_the_week(Date2) rem 7,
DaysSinceTargetDoW = ((DoW2 - TargetDoW) + 7) rem 7,
GregDays2 = calendar:date_to_gregorian_days(Date2),
TargetGregDays = GregDays2 - DaysSinceTargetDoW,
TargetDate = calendar:gregorian_days_to_date(TargetGregDays),
TargetDateTime = {TargetDate,{Hour,0,0}},
DateTime1<TargetDateTime andalso DateTime2>=TargetDateTime;
is_date_based_rotation_needed(#{every:=month,day_of_month:=last,hour:=Hour},
DateTime1,{{Year2,Month2,_}=Date2,_}=DateTime2) ->
DoMA = calendar:last_day_of_the_month(Year2, Month2),
DateA = {Year2,Month2,DoMA},
TargetDate = if
DateA>Date2 ->
case Month2 - 1 of
0 ->
{Year2-1,12,31};
MonthB ->
{Year2,MonthB,
calendar:last_day_of_the_month(Year2,MonthB)}
end;
true ->
DateA
end,
TargetDateTime = {TargetDate,{Hour,0,0}},
io:format(standard_error, "TargetDateTime=~p~n", [TargetDateTime]),
DateTime1<TargetDateTime andalso DateTime2>=TargetDateTime;
is_date_based_rotation_needed(#{every:=month,day_of_month:=DoM,hour:=Hour},
DateTime1,{{Year2,Month2,_}=Date2,_}=DateTime2) ->
DateA = {Year2,Month2,adapt_day_of_month(Year2,Month2,DoM)},
TargetDate = if
DateA>Date2 ->
case Month2 - 1 of
0 ->
{Year2-1,12,31};
MonthB ->
{Year2,MonthB,
adapt_day_of_month(Year2,MonthB,DoM)}
end;
true ->
DateA
end,
TargetDateTime = {TargetDate,{Hour,0,0}},
io:format(standard_error, "TargetDateTime=~p~n", [TargetDateTime]),
DateTime1<TargetDateTime andalso DateTime2>=TargetDateTime;
is_date_based_rotation_needed(_,_,_) ->
false.
adapt_day_of_month(Year,Month,Day) ->
LastDay = calendar:last_day_of_the_month(Year,Month),
erlang:min(Day,LastDay).
rotate_file(#{file_name:=FileName,modes:=Modes,rotation:=Rotation}=State) ->
State1 = sync_dev(State),
_ = delayed_write_close(State),
rotate_files(FileName,maps:get(count,Rotation),maps:get(compress,Rotation)),
case file:open(FileName,Modes) of
{ok,Fd} ->
{ok,#file_info{inode=INode}} = file:read_file_info(FileName,[raw]),
CurrTimestamp = rotation_timestamp(),
State1#{fd=>Fd,inode=>INode,
rotation=>Rotation#{curr_size=>0,
last_rotation_ts=>CurrTimestamp}};
Error ->
exit({could_not_reopen_file,Error})
end.
rotation_timestamp() ->
calendar:now_to_local_time(erlang:timestamp()).
rotate_files(FileName,0,_Compress) ->
_ = file:delete(FileName),
ok;
rotate_files(FileName,1,Compress) ->
FileName0 = FileName++".0",
_ = file:rename(FileName,FileName0),
if Compress -> compress_file(FileName0);
true -> ok
end,
ok;
rotate_files(FileName,Count,Compress) ->
_ = file:rename(rot_file_name(FileName,Count-2,Compress),
rot_file_name(FileName,Count-1,Compress)),
rotate_files(FileName,Count-1,Compress).
rot_file_name(FileName,Count,false) ->
FileName ++ "." ++ integer_to_list(Count);
rot_file_name(FileName,Count,true) ->
rot_file_name(FileName,Count,false) ++ ".gz".
compress_file(FileName) ->
{ok,In} = file:open(FileName,[read,binary]),
{ok,Out} = file:open(FileName++".gz",[write]),
Z = zlib:open(),
zlib:deflateInit(Z, default, deflated, 31, 8, default),
compress_data(Z,In,Out),
zlib:deflateEnd(Z),
zlib:close(Z),
_ = file:close(In),
_ = file:close(Out),
_ = file:delete(FileName),
ok.
compress_data(Z,In,Out) ->
case file:read(In,100000) of
{ok,Data} ->
Compressed = zlib:deflate(Z, Data),
_ = file:write(Out,Compressed),
compress_data(Z,In,Out);
eof ->
Compressed = zlib:deflate(Z, <<>>, finish),
_ = file:write(Out,Compressed),
ok
end.
decompress_file(FileName) ->
{ok,In} = file:open(FileName,[read,binary]),
{ok,Out} = file:open(filename:rootname(FileName,".gz"),[write]),
Z = zlib:open(),
zlib:inflateInit(Z, 31),
decompress_data(Z,In,Out),
zlib:inflateEnd(Z),
zlib:close(Z),
_ = file:close(In),
_ = file:close(Out),
_ = file:delete(FileName),
ok.
decompress_data(Z,In,Out) ->
case file:read(In,1000) of
{ok,Data} ->
Decompressed = zlib:inflate(Z, Data),
_ = file:write(Out,Decompressed),
decompress_data(Z,In,Out);
eof ->
ok
end.
maybe_notify_error(_Op, ok, _State) ->
ok;
maybe_notify_error(Op, Result, #{write_res:=WR,sync_res:=SR})
when (Op==write andalso Result==WR) orelse
(Op==filesync andalso Result==SR) ->
%% don't report same error twice
ok;
maybe_notify_error(Op, Error, #{handler_name:=HandlerName,file_name:=FileName}) ->
logger_h_common:error_notify({HandlerName,Op,FileName,Error}),
ok.
timestamp() ->
erlang:monotonic_time(millisecond).

View File

@ -0,0 +1,167 @@
%% This Source Code Form is subject to the terms of the Mozilla Public
%% License, v. 2.0. If a copy of the MPL was not distributed with this
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
%%
%% Copyright (c) 2021 VMware, Inc. or its affiliates. All rights reserved.
%%
-module(rabbit_logger_text_fmt).
-export([format/2]).
format(#{msg := Msg, meta := Meta} = LogEvent, Config) ->
Prefix = format_prefix(LogEvent, Config),
Color = pick_color(LogEvent, Config),
FormattedMsg = format_msg(Msg, Meta, Config),
prepend_prefix_to_msg_and_add_color(Prefix, Color, FormattedMsg, Config).
format_prefix(_, #{prefix := false}) ->
none;
format_prefix(#{level := Level,
meta := #{time := Timestamp,
pid := Pid}},
Config) ->
Time = format_time(Timestamp, Config),
LevelName = level_name(Level, Config),
io_lib:format("~ts [~ts] ~p", [Time, LevelName, Pid]).
level_name(Level, #{level_name := full}) ->
Level;
level_name(Level, #{level_name := uc3}) ->
level_3letter_uc_name(Level);
level_name(Level, #{level_name := lc3}) ->
level_3letter_lc_name(Level);
level_name(Level, #{level_name := uc4}) ->
level_4letter_uc_name(Level);
level_name(Level, #{level_name := lc4}) ->
level_4letter_lc_name(Level);
level_name(Level, _) ->
Level.
level_3letter_lc_name(debug) -> "dbg";
level_3letter_lc_name(info) -> "inf";
level_3letter_lc_name(notice) -> "ntc";
level_3letter_lc_name(warning) -> "wrn";
level_3letter_lc_name(error) -> "err";
level_3letter_lc_name(critical) -> "crt";
level_3letter_lc_name(alert) -> "alt";
level_3letter_lc_name(emergency) -> "emg".
level_3letter_uc_name(debug) -> "DBG";
level_3letter_uc_name(info) -> "INF";
level_3letter_uc_name(notice) -> "NTC";
level_3letter_uc_name(warning) -> "WRN";
level_3letter_uc_name(error) -> "ERR";
level_3letter_uc_name(critical) -> "CRT";
level_3letter_uc_name(alert) -> "ALT";
level_3letter_uc_name(emergency) -> "EMG".
level_4letter_lc_name(debug) -> "dbug";
level_4letter_lc_name(info) -> "info";
level_4letter_lc_name(notice) -> "noti";
level_4letter_lc_name(warning) -> "warn";
level_4letter_lc_name(error) -> "erro";
level_4letter_lc_name(critical) -> "crit";
level_4letter_lc_name(alert) -> "alrt";
level_4letter_lc_name(emergency) -> "emgc".
level_4letter_uc_name(debug) -> "DBUG";
level_4letter_uc_name(info) -> "INFO";
level_4letter_uc_name(notice) -> "NOTI";
level_4letter_uc_name(warning) -> "WARN";
level_4letter_uc_name(error) -> "ERRO";
level_4letter_uc_name(critical) -> "CRIT";
level_4letter_uc_name(alert) -> "ALRT";
level_4letter_uc_name(emergency) -> "EMGC".
format_time(Timestamp, _) ->
Options = [{unit, microsecond},
{time_designator, $\s}],
calendar:system_time_to_rfc3339(Timestamp, Options).
format_msg({string, Chardata}, Meta, Config) ->
format_msg({"~ts", [Chardata]}, Meta, Config);
format_msg({report, Report}, Meta, Config) ->
FormattedReport = format_report(Report, Meta, Config),
format_msg(FormattedReport, Meta, Config);
format_msg({Format, Args}, _, _) ->
io_lib:format(Format, Args).
format_report(
#{label := {application_controller, _}} = Report, Meta, Config) ->
format_application_progress(Report, Meta, Config);
format_report(
#{label := {supervisor, progress}} = Report, Meta, Config) ->
format_supervisor_progress(Report, Meta, Config);
format_report(
Report, #{report_cb := Cb} = Meta, Config) ->
try
case erlang:fun_info(Cb, arity) of
{arity, 1} -> Cb(Report);
{arity, 2} -> {"~ts", [Cb(Report, #{})]}
end
catch
_:_:_ ->
format_report(Report, maps:remove(report_cb, Meta), Config)
end;
format_report(Report, _, _) ->
logger:format_report(Report).
format_application_progress(#{label := {_, progress},
report := InternalReport}, _, _) ->
Application = proplists:get_value(application, InternalReport),
StartedAt = proplists:get_value(started_at, InternalReport),
{"Application ~w started on ~0p",
[Application, StartedAt]};
format_application_progress(#{label := {_, exit},
report := InternalReport}, _, _) ->
Application = proplists:get_value(application, InternalReport),
Exited = proplists:get_value(exited, InternalReport),
{"Application ~w exited with reason: ~0p",
[Application, Exited]}.
format_supervisor_progress(#{report := InternalReport}, _, _) ->
Supervisor = proplists:get_value(supervisor, InternalReport),
Started = proplists:get_value(started, InternalReport),
Id = proplists:get_value(id, Started),
Pid = proplists:get_value(pid, Started),
Mfa = proplists:get_value(mfargs, Started),
{"Supervisor ~w: child ~w started (~w): ~0p",
[Supervisor, Id, Pid, Mfa]}.
pick_color(_, #{color := false}) ->
{"", ""};
pick_color(#{level := Level}, #{color := true} = Config) ->
ColorStart = level_to_color(Level, Config),
ColorEnd = "\033[0m",
{ColorStart, ColorEnd}.
level_to_color(debug, _) -> "\033[38;5;246m";
level_to_color(info, _) -> "";
level_to_color(notice, _) -> "\033[38;5;87m";
level_to_color(warning, _) -> "\033[38;5;214m";
level_to_color(error, _) -> "\033[38;5;160m";
level_to_color(critical, _) -> "\033[1;37m\033[48;5;20m";
level_to_color(alert, _) -> "\033[1;37m\033[48;5;93m";
level_to_color(emergency, _) -> "\033[1;37m\033[48;5;196m".
prepend_prefix_to_msg_and_add_color(
none, {ColorStart, ColorEnd}, FormattedMsg, Config) ->
Lines = split_lines(FormattedMsg, Config),
[case Line of
"" -> [$\n];
_ -> [ColorStart, Line, ColorEnd, $\n]
end
|| Line <- Lines];
prepend_prefix_to_msg_and_add_color(
Prefix, {ColorStart, ColorEnd}, FormattedMsg, Config) ->
Lines = split_lines(FormattedMsg, Config),
[case Line of
"" -> [ColorStart, Prefix, ColorEnd, $\n];
_ -> [ColorStart, Prefix, " ", Line, ColorEnd, $\n]
end
|| Line <- Lines].
split_lines(FormattedMsg, _) ->
FlattenMsg = lists:flatten(FormattedMsg),
string:split(FlattenMsg, [$\n], all).

View File

@ -1,7 +1,10 @@
-module(rabbit_prelaunch).
-include_lib("kernel/include/logger.hrl").
-include_lib("eunit/include/eunit.hrl").
-include_lib("rabbit_common/include/logging.hrl").
-export([run_prelaunch_first_phase/0,
assert_mnesia_is_stopped/0,
get_context/0,
@ -24,6 +27,8 @@
run_prelaunch_first_phase() ->
try
ok = logger:set_process_metadata(
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
do_run()
catch
throw:{error, _} = Error ->
@ -67,26 +72,25 @@ do_run() ->
?assertMatch(#{}, Context0),
%% Setup logging for the prelaunch phase.
ok = rabbit_prelaunch_early_logging:setup_early_logging(Context0, true),
ok = rabbit_prelaunch_early_logging:setup_early_logging(Context0),
IsInitialPass = is_initial_pass(),
case IsInitialPass of
true ->
rabbit_log_prelaunch:debug(""),
rabbit_log_prelaunch:debug(
"== Prelaunch phase [1/2] (initial pass) =="),
rabbit_log_prelaunch:debug("");
?LOG_DEBUG(""),
?LOG_DEBUG("== Prelaunch phase [1/2] (initial pass) =="),
?LOG_DEBUG("");
false ->
rabbit_log_prelaunch:debug(""),
rabbit_log_prelaunch:debug("== Prelaunch phase [1/2] =="),
rabbit_log_prelaunch:debug("")
?LOG_DEBUG(""),
?LOG_DEBUG("== Prelaunch phase [1/2] =="),
?LOG_DEBUG("")
end,
rabbit_env:log_process_env(),
%% Load rabbitmq-env.conf, redo logging setup and continue.
Context1 = rabbit_env:get_context_after_logging_init(Context0),
?assertMatch(#{}, Context1),
ok = rabbit_prelaunch_early_logging:setup_early_logging(Context1, true),
ok = rabbit_prelaunch_early_logging:setup_early_logging(Context1),
rabbit_env:log_process_env(),
%% Complete context now that we have the final environment loaded.
@ -111,7 +115,7 @@ do_run() ->
ok = rabbit_prelaunch_dist:setup(Context),
%% 4. Write PID file.
rabbit_log_prelaunch:debug(""),
?LOG_DEBUG(""),
_ = write_pid_file(Context),
ignore.
@ -138,7 +142,7 @@ get_stop_reason() ->
set_stop_reason(Reason) ->
case get_stop_reason() of
undefined ->
rabbit_log_prelaunch:debug("Set stop reason to: ~p", [Reason]),
?LOG_DEBUG("Set stop reason to: ~p", [Reason]),
persistent_term:put(?PT_KEY_STOP_REASON, Reason);
_ ->
ok
@ -161,7 +165,7 @@ setup_shutdown_func() ->
{ok, {ThisMod, ThisFunc}} ->
ok;
{ok, {ExistingMod, ExistingFunc}} ->
rabbit_log_prelaunch:debug(
?LOG_DEBUG(
"Setting up kernel shutdown function: ~s:~s/1 "
"(chained with ~s:~s/1)",
[ThisMod, ThisFunc, ExistingMod, ExistingFunc]),
@ -170,7 +174,7 @@ setup_shutdown_func() ->
ExistingShutdownFunc),
ok = record_kernel_shutdown_func(ThisMod, ThisFunc);
_ ->
rabbit_log_prelaunch:debug(
?LOG_DEBUG(
"Setting up kernel shutdown function: ~s:~s/1",
[ThisMod, ThisFunc]),
ok = record_kernel_shutdown_func(ThisMod, ThisFunc)
@ -182,7 +186,7 @@ record_kernel_shutdown_func(Mod, Func) ->
[{persistent, true}]).
shutdown_func(Reason) ->
rabbit_log_prelaunch:debug(
?LOG_DEBUG(
"Running ~s:shutdown_func() as part of `kernel` shutdown", [?MODULE]),
Context = get_context(),
remove_pid_file(Context),
@ -195,7 +199,7 @@ shutdown_func(Reason) ->
end.
write_pid_file(#{pid_file := PidFile}) ->
rabbit_log_prelaunch:debug("Writing PID file: ~s", [PidFile]),
?LOG_DEBUG("Writing PID file: ~s", [PidFile]),
case filelib:ensure_dir(PidFile) of
ok ->
OSPid = os:getpid(),
@ -203,13 +207,13 @@ write_pid_file(#{pid_file := PidFile}) ->
ok ->
ok;
{error, Reason} = Error ->
rabbit_log_prelaunch:warning(
?LOG_WARNING(
"Failed to write PID file \"~s\": ~s",
[PidFile, file:format_error(Reason)]),
Error
end;
{error, Reason} = Error ->
rabbit_log_prelaunch:warning(
?LOG_WARNING(
"Failed to create PID file \"~s\" directory: ~s",
[PidFile, file:format_error(Reason)]),
Error
@ -218,10 +222,10 @@ write_pid_file(_) ->
ok.
remove_pid_file(#{pid_file := PidFile, keep_pid_file_on_exit := true}) ->
rabbit_log_prelaunch:debug("Keeping PID file: ~s", [PidFile]),
?LOG_DEBUG("Keeping PID file: ~s", [PidFile]),
ok;
remove_pid_file(#{pid_file := PidFile}) ->
rabbit_log_prelaunch:debug("Deleting PID file: ~s", [PidFile]),
?LOG_DEBUG("Deleting PID file: ~s", [PidFile]),
_ = file:delete(PidFile),
ok;
remove_pid_file(_) ->

View File

@ -1,9 +1,11 @@
-module(rabbit_prelaunch_conf).
-include_lib("kernel/include/file.hrl").
-include_lib("kernel/include/logger.hrl").
-include_lib("stdlib/include/zip.hrl").
-include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("rabbit_common/include/logging.hrl").
-export([setup/1,
get_config_state/0,
@ -15,8 +17,9 @@
-endif.
setup(Context) ->
rabbit_log_prelaunch:debug(""),
rabbit_log_prelaunch:debug("== Configuration =="),
?LOG_DEBUG(
"\n== Configuration ==",
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
%% TODO: Check if directories/files are inside Mnesia dir.
@ -52,9 +55,10 @@ setup(Context) ->
#{config_files => ConfigFiles,
config_advanced_file => AdvancedConfigFile};
undefined when AdvancedConfigFile =/= undefined ->
rabbit_log_prelaunch:warning(
?LOG_WARNING(
"Using RABBITMQ_ADVANCED_CONFIG_FILE: ~s",
[AdvancedConfigFile]),
[AdvancedConfigFile],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
Config = load_cuttlefish_config_file(Context,
AdditionalConfigFiles,
AdvancedConfigFile),
@ -66,10 +70,10 @@ setup(Context) ->
#{config_files => [],
config_advanced_file => undefined}
end,
ok = override_with_hard_coded_critical_config(),
ok = set_credentials_obfuscation_secret(),
rabbit_log_prelaunch:debug(
"Saving config state to application env: ~p", [State]),
?LOG_DEBUG(
"Saving config state to application env: ~p", [State],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
store_config_state(State).
store_config_state(ConfigState) ->
@ -83,7 +87,8 @@ get_config_state() ->
%% -------------------------------------------------------------------
set_default_config() ->
rabbit_log_prelaunch:debug("Setting default config"),
?LOG_DEBUG("Setting default config",
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
Config = [
{ra,
[
@ -99,6 +104,8 @@ set_default_config() ->
%% goes down it is still immediately detected
{poll_interval, 5000}
]},
{syslog,
[{app_name, "rabbitmq-server"}]},
{sysmon_handler,
[{process_limit, 100},
{port_limit, 100},
@ -126,15 +133,18 @@ find_actual_main_config_file(#{main_config_file := File}) ->
true ->
case filelib:is_regular(NewFormatFile) of
true ->
rabbit_log_prelaunch:warning(
?LOG_WARNING(
"Both old (.config) and new (.conf) format "
"config files exist."),
rabbit_log_prelaunch:warning(
"config files exist.",
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
?LOG_WARNING(
"Using the old format config file: ~s",
[OldFormatFile]),
rabbit_log_prelaunch:warning(
[OldFormatFile],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
?LOG_WARNING(
"Please update your config files to the new "
"format and remove the old file."),
"format and remove the old file.",
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
ok;
false ->
ok
@ -193,15 +203,18 @@ generate_config_from_cuttlefish_files(Context,
SchemaFiles = find_cuttlefish_schemas(Context),
case SchemaFiles of
[] ->
rabbit_log_prelaunch:error(
"No configuration schema found~n", []),
?LOG_ERROR(
"No configuration schema found", [],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
throw({error, no_configuration_schema_found});
_ ->
rabbit_log_prelaunch:debug(
"Configuration schemas found:~n", []),
?LOG_DEBUG(
"Configuration schemas found:~n", [],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
lists:foreach(
fun(SchemaFile) ->
rabbit_log_prelaunch:debug(" - ~ts", [SchemaFile])
?LOG_DEBUG(" - ~ts", [SchemaFile],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH})
end,
SchemaFiles),
ok
@ -209,37 +222,44 @@ generate_config_from_cuttlefish_files(Context,
Schema = cuttlefish_schema:files(SchemaFiles),
%% Load configuration.
rabbit_log_prelaunch:debug(
"Loading configuration files (Cuttlefish based):"),
?LOG_DEBUG(
"Loading configuration files (Cuttlefish based):",
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
lists:foreach(
fun(ConfigFile) ->
rabbit_log_prelaunch:debug(" - ~ts", [ConfigFile])
?LOG_DEBUG(" - ~ts", [ConfigFile],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH})
end, ConfigFiles),
case cuttlefish_conf:files(ConfigFiles) of
{errorlist, Errors} ->
rabbit_log_prelaunch:error("Error parsing configuration:"),
?LOG_ERROR("Error parsing configuration:",
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
lists:foreach(
fun(Error) ->
rabbit_log_prelaunch:error(
?LOG_ERROR(
" - ~ts",
[cuttlefish_error:xlate(Error)])
[cuttlefish_error:xlate(Error)],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH})
end, Errors),
rabbit_log_prelaunch:error(
"Are these files using the Cuttlefish format?"),
?LOG_ERROR(
"Are these files using the Cuttlefish format?",
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
throw({error, failed_to_parse_configuration_file});
Config0 ->
%% Finalize configuration, based on the schema.
Config = case cuttlefish_generator:map(Schema, Config0) of
{error, Phase, {errorlist, Errors}} ->
%% TODO
rabbit_log_prelaunch:error(
?LOG_ERROR(
"Error preparing configuration in phase ~ts:",
[Phase]),
[Phase],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
lists:foreach(
fun(Error) ->
rabbit_log_prelaunch:error(
?LOG_ERROR(
" - ~ts",
[cuttlefish_error:xlate(Error)])
[cuttlefish_error:xlate(Error)],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH})
end, Errors),
throw(
{error, failed_to_prepare_configuration});
@ -253,8 +273,9 @@ generate_config_from_cuttlefish_files(Context,
find_cuttlefish_schemas(Context) ->
Apps = list_apps(Context),
rabbit_log_prelaunch:debug(
"Looking up configuration schemas in the following applications:"),
?LOG_DEBUG(
"Looking up configuration schemas in the following applications:",
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
find_cuttlefish_schemas(Apps, []).
find_cuttlefish_schemas([App | Rest], AllSchemas) ->
@ -281,9 +302,10 @@ list_apps1([Dir | Rest], Apps) ->
Apps1 = lists:umerge(Apps, lists:sort(NewApps)),
list_apps1(Rest, Apps1);
{error, Reason} ->
rabbit_log_prelaunch:debug(
?LOG_DEBUG(
"Failed to list directory \"~ts\" content: ~ts",
[Dir, file:format_error(Reason)]),
[Dir, file:format_error(Reason)],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
list_apps1(Rest, Apps)
end;
list_apps1([], AppInfos) ->
@ -299,17 +321,19 @@ list_schemas_in_app(App) ->
true ->
case code:priv_dir(App) of
{error, bad_name} ->
rabbit_log_prelaunch:debug(
" [ ] ~s (no readable priv dir)", [App]),
?LOG_DEBUG(
" [ ] ~s (no readable priv dir)", [App],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
[];
PrivDir ->
SchemaDir = filename:join([PrivDir, "schema"]),
do_list_schemas_in_app(App, SchemaDir)
end;
Reason1 ->
rabbit_log_prelaunch:debug(
?LOG_DEBUG(
" [ ] ~s (failed to load application: ~p)",
[App, Reason1]),
[App, Reason1],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
[]
end,
case Unload of
@ -322,74 +346,71 @@ list_schemas_in_app(App) ->
do_list_schemas_in_app(App, SchemaDir) ->
case erl_prim_loader:list_dir(SchemaDir) of
{ok, Files} ->
rabbit_log_prelaunch:debug(" [x] ~s", [App]),
?LOG_DEBUG(" [x] ~s", [App],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
[filename:join(SchemaDir, File)
|| [C | _] = File <- Files,
C =/= $.];
error ->
rabbit_log_prelaunch:debug(
" [ ] ~s (no readable schema dir)", [App]),
?LOG_DEBUG(
" [ ] ~s (no readable schema dir)", [App],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
[]
end.
override_with_advanced_config(Config, undefined) ->
Config;
override_with_advanced_config(Config, AdvancedConfigFile) ->
rabbit_log_prelaunch:debug(
?LOG_DEBUG(
"Override with advanced configuration file \"~ts\"",
[AdvancedConfigFile]),
[AdvancedConfigFile],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
case file:consult(AdvancedConfigFile) of
{ok, [AdvancedConfig]} ->
cuttlefish_advanced:overlay(Config, AdvancedConfig);
{ok, OtherTerms} ->
rabbit_log_prelaunch:error(
?LOG_ERROR(
"Failed to load advanced configuration file \"~ts\", "
"incorrect format: ~p",
[AdvancedConfigFile, OtherTerms]),
[AdvancedConfigFile, OtherTerms],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
throw({error, failed_to_parse_advanced_configuration_file});
{error, Reason} ->
rabbit_log_prelaunch:error(
?LOG_ERROR(
"Failed to load advanced configuration file \"~ts\": ~ts",
[AdvancedConfigFile, file:format_error(Reason)]),
[AdvancedConfigFile, file:format_error(Reason)],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
throw({error, failed_to_read_advanced_configuration_file})
end.
override_with_hard_coded_critical_config() ->
rabbit_log_prelaunch:debug("Override with hard-coded critical config"),
Config = [
{ra,
%% Make Ra use a custom logger that dispatches to lager
%% instead of the default OTP logger
[{logger_module, rabbit_log_ra_shim}]},
{osiris,
[{logger_module, rabbit_log_osiris_shim}]}
],
apply_erlang_term_based_config(Config).
apply_erlang_term_based_config([{_, []} | Rest]) ->
apply_erlang_term_based_config(Rest);
apply_erlang_term_based_config([{App, Vars} | Rest]) ->
rabbit_log_prelaunch:debug(" Applying configuration for '~s':", [App]),
?LOG_DEBUG(" Applying configuration for '~s':", [App],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
ok = apply_app_env_vars(App, Vars),
apply_erlang_term_based_config(Rest);
apply_erlang_term_based_config([]) ->
ok.
apply_app_env_vars(App, [{Var, Value} | Rest]) ->
rabbit_log_prelaunch:debug(" - ~s = ~p", [Var, Value]),
?LOG_DEBUG(" - ~s = ~p", [Var, Value],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
ok = application:set_env(App, Var, Value, [{persistent, true}]),
apply_app_env_vars(App, Rest);
apply_app_env_vars(_, []) ->
ok.
set_credentials_obfuscation_secret() ->
rabbit_log_prelaunch:debug(
?LOG_DEBUG(
"Refreshing credentials obfuscation configuration from env: ~p",
[application:get_all_env(credentials_obfuscation)]),
[application:get_all_env(credentials_obfuscation)],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
ok = credentials_obfuscation:refresh_config(),
CookieBin = rabbit_data_coercion:to_binary(erlang:get_cookie()),
rabbit_log_prelaunch:debug(
"Setting credentials obfuscation secret to '~s'", [CookieBin]),
?LOG_DEBUG(
"Setting credentials obfuscation secret to '~s'", [CookieBin],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
ok = credentials_obfuscation:set_secret(CookieBin).
%% -------------------------------------------------------------------
@ -397,7 +418,8 @@ set_credentials_obfuscation_secret() ->
%% -------------------------------------------------------------------
decrypt_config(Apps) ->
rabbit_log_prelaunch:debug("Decoding encrypted config values (if any)"),
?LOG_DEBUG("Decoding encrypted config values (if any)",
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
ConfigEntryDecoder = application:get_env(rabbit, config_entry_decoder, []),
decrypt_config(Apps, ConfigEntryDecoder).
@ -415,8 +437,9 @@ decrypt_app(App, [{Key, Value} | Tail], Algo) ->
{Value, Algo1} ->
Algo1;
{NewValue, Algo1} ->
rabbit_log_prelaunch:debug(
"Value of `~s` decrypted", [Key]),
?LOG_DEBUG(
"Value of `~s` decrypted", [Key],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
ok = application:set_env(App, Key, NewValue,
[{persistent, true}]),
Algo1
@ -474,7 +497,8 @@ config_entry_decoder_to_algo(ConfigEntryDecoder) ->
end.
get_passphrase(ConfigEntryDecoder) ->
rabbit_log_prelaunch:debug("Getting encrypted config passphrase"),
?LOG_DEBUG("Getting encrypted config passphrase",
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
case proplists:get_value(passphrase, ConfigEntryDecoder) of
prompt ->
IoDevice = get_input_iodevice(),

View File

@ -1,12 +1,19 @@
-module(rabbit_prelaunch_dist).
-include_lib("kernel/include/logger.hrl").
-include_lib("rabbit_common/include/logging.hrl").
-export([setup/1]).
setup(#{nodename := Node, nodename_type := NameType} = Context) ->
rabbit_log_prelaunch:debug(""),
rabbit_log_prelaunch:debug("== Erlang distribution =="),
rabbit_log_prelaunch:debug("Rqeuested node name: ~s (type: ~s)",
[Node, NameType]),
?LOG_DEBUG(
"~n== Erlang distribution ==", [],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
?LOG_DEBUG(
"Rqeuested node name: ~s (type: ~s)",
[Node, NameType],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
case node() of
nonode@nohost ->
ok = rabbit_nodes_common:ensure_epmd(),
@ -16,8 +23,9 @@ setup(#{nodename := Node, nodename_type := NameType} = Context) ->
ok = do_setup(Context);
Node ->
rabbit_log_prelaunch:debug(
"Erlang distribution already running", []),
?LOG_DEBUG(
"Erlang distribution already running", [],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
ok;
Unexpected ->
throw({error, {erlang_dist_running_with_unexpected_nodename,
@ -26,7 +34,9 @@ setup(#{nodename := Node, nodename_type := NameType} = Context) ->
ok.
do_setup(#{nodename := Node, nodename_type := NameType}) ->
rabbit_log_prelaunch:debug("Starting Erlang distribution", []),
?LOG_DEBUG(
"Starting Erlang distribution",
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
case application:get_env(kernel, net_ticktime) of
{ok, Ticktime} when is_integer(Ticktime) andalso Ticktime >= 1 ->
%% The value passed to net_kernel:start/1 is the
@ -43,8 +53,9 @@ do_setup(#{nodename := Node, nodename_type := NameType}) ->
%% Check whether a node with the same name is already running
duplicate_node_check(#{split_nodename := {NodeName, NodeHost}}) ->
rabbit_log_prelaunch:debug(
"Checking if node name ~s is already used", [NodeName]),
?LOG_DEBUG(
"Checking if node name ~s is already used", [NodeName],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
PrelaunchName = rabbit_nodes_common:make(
{NodeName ++ "_prelaunch_" ++ os:getpid(),
"localhost"}),
@ -63,8 +74,9 @@ duplicate_node_check(#{split_nodename := {NodeName, NodeHost}}) ->
end.
dist_port_range_check(#{erlang_dist_tcp_port := DistTcpPort}) ->
rabbit_log_prelaunch:debug(
"Checking if TCP port ~b is valid", [DistTcpPort]),
?LOG_DEBUG(
"Checking if TCP port ~b is valid", [DistTcpPort],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
case DistTcpPort of
_ when DistTcpPort < 1 orelse DistTcpPort > 65535 ->
throw({error, {invalid_dist_port_range, DistTcpPort}});
@ -74,8 +86,9 @@ dist_port_range_check(#{erlang_dist_tcp_port := DistTcpPort}) ->
dist_port_use_check(#{split_nodename := {_, NodeHost},
erlang_dist_tcp_port := DistTcpPort}) ->
rabbit_log_prelaunch:debug(
"Checking if TCP port ~b is available", [DistTcpPort]),
?LOG_DEBUG(
"Checking if TCP port ~b is available", [DistTcpPort],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
dist_port_use_check_ipv4(NodeHost, DistTcpPort).
dist_port_use_check_ipv4(NodeHost, Port) ->

View File

@ -1,60 +1,126 @@
%% This Source Code Form is subject to the terms of the Mozilla Public
%% License, v. 2.0. If a copy of the MPL was not distributed with this
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
%%
%% Copyright (c) 2019-2021 VMware, Inc. or its affiliates. All rights reserved.
%%
-module(rabbit_prelaunch_early_logging).
-include_lib("rabbit_common/include/rabbit_log.hrl").
-include_lib("kernel/include/logger.hrl").
-export([setup_early_logging/2,
-include_lib("rabbit_common/include/logging.hrl").
-export([setup_early_logging/1,
default_formatter/1,
default_console_formatter/1,
default_file_formatter/1,
default_syslog_formatter/1,
enable_quick_dbg/1,
use_colored_logging/0,
use_colored_logging/1,
list_expected_sinks/0]).
use_colored_logging/1]).
-export([filter_log_event/2]).
setup_early_logging(#{log_levels := undefined} = Context,
LagerEventToStdout) ->
setup_early_logging(Context#{log_levels => get_default_log_level()},
LagerEventToStdout);
setup_early_logging(Context, LagerEventToStdout) ->
Configured = lists:member(
lager_util:make_internal_sink_name(rabbit_log_prelaunch),
lager:list_all_sinks()),
case Configured of
-define(CONFIGURED_KEY, {?MODULE, configured}).
setup_early_logging(#{log_levels := undefined} = Context) ->
setup_early_logging(Context#{log_levels => get_default_log_level()});
setup_early_logging(Context) ->
case is_configured() of
true -> ok;
false -> do_setup_early_logging(Context, LagerEventToStdout)
false -> do_setup_early_logging(Context)
end.
get_default_log_level() ->
#{"prelaunch" => warning}.
#{"prelaunch" => notice}.
do_setup_early_logging(#{log_levels := LogLevels} = Context,
LagerEventToStdout) ->
redirect_logger_messages_to_lager(),
Colored = use_colored_logging(Context),
application:set_env(lager, colored, Colored),
ConsoleBackend = lager_console_backend,
case LagerEventToStdout of
true ->
GLogLevel = case LogLevels of
#{global := Level} -> Level;
_ -> warning
end,
_ = lager_app:start_handler(
lager_event, ConsoleBackend, [{level, GLogLevel}]),
ok;
false ->
ok
end,
lists:foreach(
fun(Sink) ->
CLogLevel = get_log_level(LogLevels, Sink),
lager_app:configure_sink(
Sink,
[{handlers, [{ConsoleBackend, [{level, CLogLevel}]}]}])
end, list_expected_sinks()),
do_setup_early_logging(#{log_levels := LogLevels} = Context) ->
add_rmqlog_filter(LogLevels),
ok = logger:update_handler_config(
default, main_handler_config(Context)).
is_configured() ->
persistent_term:get(?CONFIGURED_KEY, false).
add_rmqlog_filter(LogLevels) ->
add_erlang_specific_filters(LogLevels),
FilterConfig0 = lists:foldl(
fun
({_, V}, FC) when is_boolean(V) -> FC;
({K, V}, FC) when is_atom(K) -> FC#{K => V};
({K, V}, FC) -> FC#{list_to_atom(K) => V}
end, #{}, maps:to_list(LogLevels)),
FilterConfig1 = case maps:is_key(global, FilterConfig0) of
true -> FilterConfig0;
false -> FilterConfig0#{global => ?DEFAULT_LOG_LEVEL}
end,
ok = logger:add_handler_filter(
default, ?FILTER_NAME, {fun filter_log_event/2, FilterConfig1}),
ok = logger:set_primary_config(level, all),
ok = persistent_term:put(?CONFIGURED_KEY, true).
add_erlang_specific_filters(_) ->
_ = logger:add_handler_filter(
default, progress_reports, {fun logger_filters:progress/2, stop}),
ok.
redirect_logger_messages_to_lager() ->
io:format(standard_error, "Configuring logger redirection~n", []),
ok = logger:add_handler(rabbit_log, rabbit_log, #{}),
ok = logger:set_primary_config(level, all).
filter_log_event(
#{meta := #{domain := ?RMQLOG_DOMAIN_GLOBAL}} = LogEvent,
FilterConfig) ->
MinLevel = get_min_level(global, FilterConfig),
do_filter_log_event(LogEvent, MinLevel);
filter_log_event(
#{meta := #{domain := [?RMQLOG_SUPER_DOMAIN_NAME, CatName | _]}} = LogEvent,
FilterConfig) ->
MinLevel = get_min_level(CatName, FilterConfig),
do_filter_log_event(LogEvent, MinLevel);
filter_log_event(
#{meta := #{domain := [CatName | _]}} = LogEvent,
FilterConfig) ->
MinLevel = get_min_level(CatName, FilterConfig),
do_filter_log_event(LogEvent, MinLevel);
filter_log_event(LogEvent, FilterConfig) ->
MinLevel = get_min_level(global, FilterConfig),
do_filter_log_event(LogEvent, MinLevel).
get_min_level(global, FilterConfig) ->
maps:get(global, FilterConfig, none);
get_min_level(CatName, FilterConfig) ->
case maps:is_key(CatName, FilterConfig) of
true -> maps:get(CatName, FilterConfig);
false -> get_min_level(global, FilterConfig)
end.
do_filter_log_event(_, none) ->
stop;
do_filter_log_event(#{level := Level} = LogEvent, MinLevel) ->
case logger:compare_levels(Level, MinLevel) of
lt -> stop;
_ -> LogEvent
end.
main_handler_config(Context) ->
#{filter_default => log,
formatter => default_formatter(Context)}.
default_formatter(#{log_levels := #{json := true}}) ->
{rabbit_logger_json_fmt, #{}};
default_formatter(Context) ->
Color = use_colored_logging(Context),
{rabbit_logger_text_fmt, #{color => Color}}.
default_console_formatter(Context) ->
default_formatter(Context).
default_file_formatter(Context) ->
default_formatter(Context#{output_supports_colors => false}).
default_syslog_formatter(Context) ->
{Module, Config} = default_file_formatter(Context),
case Module of
rabbit_logger_text_fmt -> {Module, Config#{prefix => false}};
rabbit_logger_json_fmt -> {Module, Config}
end.
use_colored_logging() ->
use_colored_logging(rabbit_prelaunch:get_context()).
@ -65,45 +131,6 @@ use_colored_logging(#{log_levels := #{color := true},
use_colored_logging(_) ->
false.
list_expected_sinks() ->
Key = {?MODULE, lager_extra_sinks},
case persistent_term:get(Key, undefined) of
undefined ->
CompileOptions = proplists:get_value(options,
module_info(compile),
[]),
AutoList = [lager_util:make_internal_sink_name(M)
|| M <- proplists:get_value(lager_extra_sinks,
CompileOptions, [])],
List = case lists:member(?LAGER_SINK, AutoList) of
true -> AutoList;
false -> [?LAGER_SINK | AutoList]
end,
%% Store the list in the application environment. If this
%% module is later cover-compiled, the compile option will
%% be lost, so we will be able to retrieve the list from the
%% application environment.
persistent_term:put(Key, List),
List;
List ->
List
end.
sink_to_category(Sink) when is_atom(Sink) ->
re:replace(
atom_to_list(Sink),
"^rabbit_log_(.+)_lager_event$",
"\\1",
[{return, list}]).
get_log_level(LogLevels, Sink) ->
Category = sink_to_category(Sink),
case LogLevels of
#{Category := Level} -> Level;
#{global := Level} -> Level;
_ -> warning
end.
enable_quick_dbg(#{dbg_output := Output, dbg_mods := Mods}) ->
case Output of
stdout -> {ok, _} = dbg:tracer(),

View File

@ -1,25 +1,32 @@
-module(rabbit_prelaunch_erlang_compat).
-include_lib("kernel/include/logger.hrl").
-include_lib("rabbit_common/include/logging.hrl").
-export([check/1]).
-define(OTP_MINIMUM, "23.0").
-define(ERTS_MINIMUM, "11.1").
check(_Context) ->
rabbit_log_prelaunch:debug(""),
rabbit_log_prelaunch:debug("== Erlang/OTP compatibility check =="),
?LOG_DEBUG(
"~n== Erlang/OTP compatibility check ==", [],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
ERTSVer = erlang:system_info(version),
OTPRel = rabbit_misc:otp_release(),
rabbit_log_prelaunch:debug(
"Requiring: Erlang/OTP ~s (ERTS ~s)", [?OTP_MINIMUM, ?ERTS_MINIMUM]),
rabbit_log_prelaunch:debug(
"Running: Erlang/OTP ~s (ERTS ~s)", [OTPRel, ERTSVer]),
?LOG_DEBUG(
"Requiring: Erlang/OTP ~s (ERTS ~s)~n"
"Running: Erlang/OTP ~s (ERTS ~s)",
[?OTP_MINIMUM, ?ERTS_MINIMUM, OTPRel, ERTSVer],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
case rabbit_misc:version_compare(?ERTS_MINIMUM, ERTSVer, lte) of
true when ?ERTS_MINIMUM =/= ERTSVer ->
rabbit_log_prelaunch:debug(
"Erlang/OTP version requirement satisfied"),
?LOG_DEBUG(
"Erlang/OTP version requirement satisfied", [],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
ok;
true when ?ERTS_MINIMUM =:= ERTSVer andalso ?OTP_MINIMUM =< OTPRel ->
%% When a critical regression or bug is found, a new OTP
@ -35,7 +42,7 @@ check(_Context) ->
"This RabbitMQ version cannot run on Erlang ~s (erts ~s): "
"minimum required version is ~s (erts ~s)",
Args = [OTPRel, ERTSVer, ?OTP_MINIMUM, ?ERTS_MINIMUM],
rabbit_log_prelaunch:error(Msg, Args),
?LOG_ERROR(Msg, Args, #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
%% Also print to stderr to make this more visible
io:format(standard_error, "Error: " ++ Msg ++ "~n", Args),

View File

@ -1,5 +1,9 @@
-module(rabbit_prelaunch_errors).
-include_lib("kernel/include/logger.hrl").
-include_lib("rabbit_common/include/logging.hrl").
-export([format_error/1,
format_exception/3,
log_error/1,
@ -94,9 +98,19 @@ log_exception(Class, Exception, Stacktrace) ->
log_message(Message).
format_exception(Class, Exception, Stacktrace) ->
StacktraceStrs = [case proplists:get_value(line, Props) of
undefined ->
io_lib:format(" ~ts:~ts/~b",
[Mod, Fun, Arity]);
Line ->
io_lib:format(" ~ts:~ts/~b, line ~b",
[Mod, Fun, Arity, Line])
end
|| {Mod, Fun, Arity, Props} <- Stacktrace],
ExceptionStr = io_lib:format("~ts:~0p", [Class, Exception]),
rabbit_misc:format(
"Exception during startup:~n~s",
[lager:pr_stacktrace(Stacktrace, {Class, Exception})]).
"Exception during startup:~n~n~s~n~n~s",
[ExceptionStr, string:join(StacktraceStrs, "\n")]).
log_message(Message) ->
Lines = string:split(
@ -105,9 +119,11 @@ log_message(Message) ->
?BOOT_FAILED_FOOTER,
[$\n],
all),
?LOG_ERROR(
"~s", [string:join(Lines, "\n")],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
lists:foreach(
fun(Line) ->
rabbit_log_prelaunch:error("~s", [Line]),
io:format(standard_error, "~s~n", [Line])
end, Lines),
timer:sleep(1000),

View File

@ -69,13 +69,13 @@ handle_event(Signal, State) ->
%% which should stop RabbitMQ.
%
%#{Signal := stop} ->
% error_logger:info_msg(
% "~s received - shutting down~n",
% logger:info(
% "~s received - shutting down",
% [string:uppercase(atom_to_list(Signal))]),
% ok = init:stop();
_ ->
error_logger:info_msg(
"~s received - unhandled signal~n",
logger:info(
"~s received - unhandled signal",
[string:uppercase(atom_to_list(Signal))])
end,
{ok, State}.

View File

@ -0,0 +1,212 @@
-module(rabbit_logger_std_h_SUITE).
-include_lib("common_test/include/ct.hrl").
-include_lib("eunit/include/eunit.hrl").
-export([all/0,
groups/0,
init_per_suite/2,
end_per_suite/2,
init_per_group/2,
end_per_group/2,
init_per_testcase/2,
end_per_testcase/2,
every_day_rotation_is_detected/1,
every_week_rotation_is_detected/1,
every_month_rotation_is_detected/1
]).
all() ->
[
{group, parallel_tests}
].
groups() ->
[
{parallel_tests, [parallel], [every_day_rotation_is_detected,
every_week_rotation_is_detected,
every_month_rotation_is_detected]}
].
init_per_suite(_, Config) -> Config.
end_per_suite(_, Config) -> Config.
init_per_group(_, Config) -> Config.
end_per_group(_, Config) -> Config.
init_per_testcase(_, Config) -> Config.
end_per_testcase(_, Config) -> Config.
every_day_rotation_is_detected(_) ->
?assertNot(
rabbit_logger_std_h:is_date_based_rotation_needed(
#{every => day, hour => 12},
{{2021, 01, 15}, {10, 00, 00}},
{{2021, 01, 15}, {11, 00, 00}})),
?assert(
rabbit_logger_std_h:is_date_based_rotation_needed(
#{every => day, hour => 12},
{{2021, 01, 15}, {10, 00, 00}},
{{2021, 01, 15}, {12, 00, 00}})),
?assert(
rabbit_logger_std_h:is_date_based_rotation_needed(
#{every => day, hour => 12},
{{2021, 01, 15}, {10, 00, 00}},
{{2021, 01, 15}, {13, 00, 00}})),
?assert(
rabbit_logger_std_h:is_date_based_rotation_needed(
#{every => day, hour => 12},
{{2021, 01, 15}, {11, 00, 00}},
{{2021, 01, 15}, {13, 00, 00}})),
?assertNot(
rabbit_logger_std_h:is_date_based_rotation_needed(
#{every => day, hour => 12},
{{2021, 01, 15}, {12, 00, 00}},
{{2021, 01, 15}, {13, 00, 00}})),
?assert(
rabbit_logger_std_h:is_date_based_rotation_needed(
#{every => day, hour => 12},
{{2021, 01, 14}, {12, 00, 00}},
{{2021, 01, 15}, {12, 00, 00}})),
?assertNot(
rabbit_logger_std_h:is_date_based_rotation_needed(
#{every => day, hour => 12},
{{2021, 01, 14}, {12, 00, 00}},
{{2021, 01, 15}, {11, 00, 00}})),
?assert(
rabbit_logger_std_h:is_date_based_rotation_needed(
#{every => day, hour => 12},
{{2020, 11, 15}, {12, 00, 00}},
{{2021, 01, 15}, {11, 00, 00}})),
?assert(
rabbit_logger_std_h:is_date_based_rotation_needed(
#{every => day, hour => 12},
{{2020, 11, 15}, {12, 00, 00}},
{{2021, 01, 15}, {12, 00, 00}})).
every_week_rotation_is_detected(_) ->
?assertNot(
rabbit_logger_std_h:is_date_based_rotation_needed(
#{every => week, day_of_week => 3, hour => 12},
{{2021, 01, 11}, {12, 00, 00}},
{{2021, 01, 12}, {12, 00, 00}})),
?assert(
rabbit_logger_std_h:is_date_based_rotation_needed(
#{every => week, day_of_week => 3, hour => 12},
{{2021, 01, 11}, {12, 00, 00}},
{{2021, 01, 13}, {12, 00, 00}})),
?assert(
rabbit_logger_std_h:is_date_based_rotation_needed(
#{every => week, day_of_week => 3, hour => 12},
{{2021, 01, 11}, {12, 00, 00}},
{{2021, 01, 14}, {12, 00, 00}})),
?assertNot(
rabbit_logger_std_h:is_date_based_rotation_needed(
#{every => week, day_of_week => 3, hour => 12},
{{2021, 01, 13}, {12, 00, 00}},
{{2021, 01, 14}, {12, 00, 00}})),
?assertNot(
rabbit_logger_std_h:is_date_based_rotation_needed(
#{every => week, day_of_week => 3, hour => 12},
{{2021, 01, 14}, {12, 00, 00}},
{{2021, 01, 15}, {12, 00, 00}})),
?assert(
rabbit_logger_std_h:is_date_based_rotation_needed(
#{every => week, day_of_week => 3, hour => 12},
{{2021, 01, 13}, {11, 00, 00}},
{{2021, 01, 13}, {12, 00, 00}})),
?assert(
rabbit_logger_std_h:is_date_based_rotation_needed(
#{every => week, day_of_week => 3, hour => 12},
{{2021, 01, 06}, {12, 00, 00}},
{{2021, 01, 13}, {12, 00, 00}})),
?assert(
rabbit_logger_std_h:is_date_based_rotation_needed(
#{every => week, day_of_week => 3, hour => 12},
{{2021, 01, 07}, {12, 00, 00}},
{{2021, 01, 14}, {12, 00, 00}})),
?assertNot(
rabbit_logger_std_h:is_date_based_rotation_needed(
#{every => week, day_of_week => 3, hour => 12},
{{2021, 01, 06}, {12, 00, 00}},
{{2021, 01, 12}, {12, 00, 00}})),
?assert(
rabbit_logger_std_h:is_date_based_rotation_needed(
#{every => week, day_of_week => 3, hour => 12},
{{2021, 01, 06}, {11, 00, 00}},
{{2021, 01, 12}, {12, 00, 00}})),
?assertNot(
rabbit_logger_std_h:is_date_based_rotation_needed(
#{every => week, day_of_week => 3, hour => 12},
{{2021, 01, 06}, {12, 00, 00}},
{{2021, 01, 13}, {11, 00, 00}})).
every_month_rotation_is_detected(_) ->
?assertNot(
rabbit_logger_std_h:is_date_based_rotation_needed(
#{every => month, day_of_month => 15, hour => 12},
{{2021, 01, 15}, {10, 00, 00}},
{{2021, 01, 15}, {11, 00, 00}})),
?assert(
rabbit_logger_std_h:is_date_based_rotation_needed(
#{every => month, day_of_month => 15, hour => 12},
{{2021, 01, 15}, {10, 00, 00}},
{{2021, 01, 15}, {12, 00, 00}})),
?assertNot(
rabbit_logger_std_h:is_date_based_rotation_needed(
#{every => month, day_of_month => 15, hour => 12},
{{2021, 01, 13}, {12, 00, 00}},
{{2021, 01, 14}, {12, 00, 00}})),
?assert(
rabbit_logger_std_h:is_date_based_rotation_needed(
#{every => month, day_of_month => 15, hour => 12},
{{2021, 01, 14}, {12, 00, 00}},
{{2021, 01, 15}, {12, 00, 00}})),
?assertNot(
rabbit_logger_std_h:is_date_based_rotation_needed(
#{every => month, day_of_month => 15, hour => 12},
{{2021, 01, 15}, {12, 00, 00}},
{{2021, 01, 16}, {12, 00, 00}})),
?assert(
rabbit_logger_std_h:is_date_based_rotation_needed(
#{every => month, day_of_month => 15, hour => 12},
{{2021, 01, 14}, {12, 00, 00}},
{{2021, 02, 14}, {12, 00, 00}})),
?assert(
rabbit_logger_std_h:is_date_based_rotation_needed(
#{every => month, day_of_month => 15, hour => 12},
{{2021, 01, 16}, {12, 00, 00}},
{{2021, 02, 16}, {12, 00, 00}})),
?assert(
rabbit_logger_std_h:is_date_based_rotation_needed(
#{every => month, day_of_month => 30, hour => 12},
{{2021, 01, 29}, {12, 00, 00}},
{{2021, 01, 30}, {12, 00, 00}})),
?assertNot(
rabbit_logger_std_h:is_date_based_rotation_needed(
#{every => month, day_of_month => 30, hour => 12},
{{2021, 01, 30}, {12, 00, 00}},
{{2021, 01, 31}, {12, 00, 00}})),
?assert(
rabbit_logger_std_h:is_date_based_rotation_needed(
#{every => month, day_of_month => 30, hour => 12},
{{2021, 02, 27}, {12, 00, 00}},
{{2021, 02, 28}, {12, 00, 00}})),
?assertNot(
rabbit_logger_std_h:is_date_based_rotation_needed(
#{every => month, day_of_month => last, hour => 12},
{{2021, 01, 29}, {12, 00, 00}},
{{2021, 01, 30}, {12, 00, 00}})),
?assert(
rabbit_logger_std_h:is_date_based_rotation_needed(
#{every => month, day_of_month => last, hour => 12},
{{2021, 01, 30}, {12, 00, 00}},
{{2021, 01, 31}, {12, 00, 00}})),
?assert(
rabbit_logger_std_h:is_date_based_rotation_needed(
#{every => month, day_of_month => last, hour => 12},
{{2021, 01, 30}, {12, 00, 00}},
{{2021, 02, 01}, {12, 00, 00}})).

View File

@ -883,7 +883,7 @@
## Logging settings.
##
## See https://rabbitmq.com/logging.html and https://github.com/erlang-lager/lager for details.
## See https://rabbitmq.com/logging.html for details.
##
## Log directory, taken from the RABBITMQ_LOG_BASE env variable by default.

View File

@ -146,25 +146,6 @@ For example, to reset the RabbitMQ node:
.sp
.Dl rabbitmqctl reset
.\" ------------------------------------------------------------------
.It Cm rotate_logs
.Pp
Instructs the RabbitMQ node to perform internal log rotation.
.Pp
Log rotation is performed according to lager settings specified in
configuration file.
.Pp
Note that there is no need to call this command in case of external log
rotation (e.g. from logrotate(8)), because lager detects renames and
automatically reopens log files.
.Pp
For example, this command starts internal log rotation
process:
.sp
.Dl rabbitmqctl rotate_logs
.Pp
Rotation is performed asynchronously, so there is no guarantee that it
will be completed when this command returns.
.\" ------------------------------------------------------------------
.It Cm shutdown
.Pp
Shuts down the node, both RabbitMQ and its runtime.

View File

@ -1179,10 +1179,10 @@ end}.
]}.
% ==========================
% Lager section
% Logging section
% ==========================
{mapping, "log.dir", "lager.log_root", [
{mapping, "log.dir", "rabbit.log_root", [
{datatype, string},
{validators, ["dir_writable"]}]}.

View File

@ -117,7 +117,6 @@ dep_accept = hex 0.3.5
dep_cowboy = hex 2.8.0
dep_cowlib = hex 2.9.1
dep_jsx = hex 2.11.0
dep_lager = hex 3.9.1
dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
dep_ra = git https://github.com/rabbitmq/ra.git master
dep_ranch = hex 2.0.0

View File

@ -79,8 +79,8 @@ start_rabbitmq_server() {
${RABBITMQ_SERVER_ERL_ARGS} \
${RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS} \
${RABBITMQ_SERVER_START_ARGS} \
-lager crash_log false \
-lager handlers '[]' \
-syslog logger '[]' \
-syslog syslog_error_logger false \
"$@"
}

View File

@ -68,8 +68,8 @@ if "!RABBITMQ_ALLOW_INPUT!"=="" (
!RABBITMQ_SERVER_ERL_ARGS! ^
!RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS! ^
!RABBITMQ_SERVER_START_ARGS! ^
-lager crash_log false ^
-lager handlers "[]" ^
-syslog logger [] ^
-syslog syslog_error_logger false ^
!STAR!
if ERRORLEVEL 1 (

View File

@ -198,8 +198,8 @@ set ERLANG_SERVICE_ARGUMENTS= ^
!RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS! ^
!RABBITMQ_SERVER_START_ARGS! ^
!RABBITMQ_DIST_ARG! ^
-lager crash_log false ^
-lager handlers "[]" ^
-syslog logger [] ^
-syslog syslog_error_logger false ^
!STARVAR!
set ERLANG_SERVICE_ARGUMENTS=!ERLANG_SERVICE_ARGUMENTS:\=\\!

View File

@ -70,7 +70,7 @@ handle_maybe_call_mfa(true, {Module, Function, Args, Default}, State) ->
error:undef ->
handle_maybe_call_mfa_error(Module, Default, State);
Err:Reason ->
rabbit_log:error("Calling ~p:~p failed: ~p:~p~n",
rabbit_log:error("Calling ~p:~p failed: ~p:~p",
[Module, Function, Err, Reason]),
handle_maybe_call_mfa_error(Module, Default, State)
end.

View File

@ -1,233 +0,0 @@
%% This Source Code Form is subject to the terms of the Mozilla Public
%% License, v. 2.0. If a copy of the MPL was not distributed with this
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
%%
%% Copyright (c) 2007-2021 VMware, Inc. or its affiliates. All rights reserved.
%%
%% @doc RabbitMQ backend for lager.
%% Configuration is a proplist with the following keys:
%% <ul>
%% <li>`level' - log level to use</li>
%% <li>`formatter' - the module to use when formatting log messages. Defaults to
%% `lager_default_formatter'</li>
%% <li>`formatter_config' - the format configuration string. Defaults to
%% `time [ severity ] message'</li>
%% </ul>
-module(lager_exchange_backend).
-behaviour(gen_event).
-export([init/1, terminate/2, code_change/3,
handle_call/2, handle_event/2, handle_info/2]).
-export([maybe_init_exchange/0]).
-include("rabbit.hrl").
-include("rabbit_framing.hrl").
-include_lib("lager/include/lager.hrl").
-record(state, {level :: {'mask', integer()},
formatter :: atom(),
format_config :: any(),
init_exchange_ts = undefined :: integer() | undefined,
exchange = undefined :: #resource{} | undefined}).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-compile([{parse_transform, lager_transform}]).
-endif.
-define(INIT_EXCHANGE_INTERVAL_SECS, 5).
-define(TERSE_FORMAT, [time, " [", severity, "] ", message]).
-define(DEFAULT_FORMAT_CONFIG, ?TERSE_FORMAT).
-define(FORMAT_CONFIG_OFF, []).
-ifdef(TEST).
-define(DEPRECATED(_Msg), ok).
-else.
-define(DEPRECATED(Msg),
io:format(user, "WARNING: This is a deprecated lager_exchange_backend configuration. Please use \"~w\" instead.~n", [Msg])).
-endif.
-define(LOG_EXCH_NAME, <<"amq.rabbitmq.log">>).
init([Level]) when is_atom(Level) ->
?DEPRECATED([{level, Level}]),
init([{level, Level}]);
init([Level, true]) when is_atom(Level) -> % for backwards compatibility
?DEPRECATED([{level, Level}, {formatter_config, [{eol, "\\r\\n\\"}]}]),
init([{level, Level}, {formatter_config, ?FORMAT_CONFIG_OFF}]);
init([Level, false]) when is_atom(Level) -> % for backwards compatibility
?DEPRECATED([{level, Level}]),
init([{level, Level}]);
init(Options) when is_list(Options) ->
true = validate_options(Options),
Level = get_option(level, Options, undefined),
try lager_util:config_to_mask(Level) of
L ->
DefaultOptions = [{formatter, lager_default_formatter},
{formatter_config, ?DEFAULT_FORMAT_CONFIG}],
[Formatter, Config] = [get_option(K, Options, Default) || {K, Default} <- DefaultOptions],
State0 = #state{level=L,
formatter=Formatter,
format_config=Config},
% NB: this will probably always fail since the / vhost isn't available
State1 = maybe_init_exchange(State0),
{ok, State1}
catch
_:_ ->
{error, {fatal, bad_log_level}}
end;
init(Level) when is_atom(Level) ->
?DEPRECATED([{level, Level}]),
init([{level, Level}]);
init(Other) ->
{error, {fatal, {bad_lager_exchange_backend_config, Other}}}.
% rabbitmq/rabbitmq-server#1973
% This is called immediatly after the / vhost is created
% or recovered
maybe_init_exchange() ->
case lists:member(?MODULE, gen_event:which_handlers(lager_event)) of
true ->
_ = init_exchange(true),
ok;
_ ->
ok
end.
validate_options([]) -> true;
validate_options([{level, L}|T]) when is_atom(L) ->
case lists:member(L, ?LEVELS) of
false ->
throw({error, {fatal, {bad_level, L}}});
true ->
validate_options(T)
end;
validate_options([{formatter, M}|T]) when is_atom(M) ->
validate_options(T);
validate_options([{formatter_config, C}|T]) when is_list(C) ->
validate_options(T);
validate_options([H|_]) ->
throw({error, {fatal, {bad_lager_exchange_backend_config, H}}}).
get_option(K, Options, Default) ->
case lists:keyfind(K, 1, Options) of
{K, V} -> V;
false -> Default
end.
handle_call(get_loglevel, #state{level=Level} = State) ->
{ok, Level, State};
handle_call({set_loglevel, Level}, State) ->
try lager_util:config_to_mask(Level) of
Levels ->
{ok, ok, State#state{level=Levels}}
catch
_:_ ->
{ok, {error, bad_log_level}, State}
end;
handle_call(_Request, State) ->
{ok, ok, State}.
handle_event({log, _Message} = Event, State0) ->
State1 = maybe_init_exchange(State0),
handle_log_event(Event, State1);
handle_event(_Event, State) ->
{ok, State}.
handle_info(_Info, State) ->
{ok, State}.
terminate(_Reason, _State) ->
ok.
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
%% @private
handle_log_event({log, _Message}, #state{exchange=undefined} = State) ->
% NB: tried to define the exchange but still undefined,
% so not logging this message. Note: we can't log this dropped
% message because it will start an infinite loop
{ok, State};
handle_log_event({log, Message},
#state{level=L, exchange=LogExch,
formatter=Formatter, format_config=FormatConfig} = State) ->
case lager_util:is_loggable(Message, L, ?MODULE) of
true ->
%% 0-9-1 says the timestamp is a "64 bit POSIX timestamp". That's
%% second resolution, not millisecond.
RoutingKey = rabbit_data_coercion:to_binary(lager_msg:severity(Message)),
Timestamp = os:system_time(seconds),
Node = rabbit_data_coercion:to_binary(node()),
Headers = [{<<"node">>, longstr, Node}],
AmqpMsg = #'P_basic'{content_type = <<"text/plain">>,
timestamp = Timestamp,
headers = Headers},
Body = rabbit_data_coercion:to_binary(Formatter:format(Message, FormatConfig)),
case rabbit_basic:publish(LogExch, RoutingKey, AmqpMsg, Body) of
ok -> ok;
{error, not_found} -> ok
end,
{ok, State};
false ->
{ok, State}
end.
%% @private
maybe_init_exchange(#state{exchange=undefined, init_exchange_ts=undefined} = State) ->
Now = erlang:monotonic_time(second),
handle_init_exchange(init_exchange(true), Now, State);
maybe_init_exchange(#state{exchange=undefined, init_exchange_ts=Timestamp} = State) ->
Now = erlang:monotonic_time(second),
% NB: since we may try to declare the exchange on every log message, this ensures
% that we only try once every 5 seconds
HasEnoughTimeElapsed = Now - Timestamp > ?INIT_EXCHANGE_INTERVAL_SECS,
Result = init_exchange(HasEnoughTimeElapsed),
handle_init_exchange(Result, Now, State);
maybe_init_exchange(State) ->
State.
%% @private
init_exchange(true) ->
{ok, DefaultVHost} = application:get_env(rabbit, default_vhost),
Exchange = rabbit_misc:r(DefaultVHost, exchange, ?LOG_EXCH_NAME),
try
%% durable
#exchange{} = rabbit_exchange:declare(Exchange, topic, true, false, true, [], ?INTERNAL_USER),
rabbit_log:info("Declared exchange '~s' in vhost '~s'", [?LOG_EXCH_NAME, DefaultVHost]),
{ok, Exchange}
catch
ErrType:Err ->
rabbit_log:error("Could not declare exchange '~s' in vhost '~s', reason: ~p:~p",
[?LOG_EXCH_NAME, DefaultVHost, ErrType, Err]),
{ok, undefined}
end;
init_exchange(_) ->
{ok, undefined}.
%% @private
handle_init_exchange({ok, undefined}, Now, State) ->
State#state{init_exchange_ts=Now};
handle_init_exchange({ok, Exchange}, Now, State) ->
State#state{exchange=Exchange, init_exchange_ts=Now}.
-ifdef(TEST).
console_config_validation_test_() ->
Good = [{level, info}],
Bad1 = [{level, foo}],
Bad2 = [{larval, info}],
AllGood = [{level, info}, {formatter, my_formatter},
{formatter_config, ["blort", "garbage"]}],
[
?_assertEqual(true, validate_options(Good)),
?_assertThrow({error, {fatal, {bad_level, foo}}}, validate_options(Bad1)),
?_assertThrow({error, {fatal, {bad_lager_exchange_backend_config, {larval, info}}}}, validate_options(Bad2)),
?_assertEqual(true, validate_options(AllGood))
].
-endif.

View File

@ -7,6 +7,9 @@
-module(rabbit).
-include_lib("kernel/include/logger.hrl").
-include_lib("rabbit_common/include/logging.hrl").
%% Transitional step until we can require Erlang/OTP 21 and
%% use the now recommended try/catch syntax for obtaining the stack trace.
-compile(nowarn_deprecated_function).
@ -28,7 +31,8 @@
base_product_version/0,
motd_file/0,
motd/0]).
-export([log_locations/0, config_files/0]). %% for testing and mgmt-agent
%% For CLI, testing and mgmt-agent.
-export([set_log_level/1, log_locations/0, config_files/0]).
-export([is_booted/1, is_booted/0, is_booting/1, is_booting/0]).
%%---------------------------------------------------------------------------
@ -261,7 +265,7 @@
-rabbit_boot_step({networking,
[{description, "TCP and TLS listeners (backwards compatibility)"},
{mfa, {rabbit_log, debug, ["'networking' boot step skipped and moved to end of startup", []]}},
{mfa, {logger, debug, ["'networking' boot step skipped and moved to end of startup", [], #{domain => ?RMQLOG_DOMAIN_GLOBAL}]}},
{requires, notify_cluster}]}).
%%---------------------------------------------------------------------------
@ -335,12 +339,12 @@ run_prelaunch_second_phase() ->
case IsInitialPass of
true ->
rabbit_log_prelaunch:debug(""),
rabbit_log_prelaunch:debug(
?LOG_DEBUG(""),
?LOG_DEBUG(
"== Prelaunch phase [2/2] (initial pass) ==");
false ->
rabbit_log_prelaunch:debug(""),
rabbit_log_prelaunch:debug("== Prelaunch phase [2/2] =="),
?LOG_DEBUG(""),
?LOG_DEBUG("== Prelaunch phase [2/2] =="),
ok
end,
@ -357,11 +361,11 @@ run_prelaunch_second_phase() ->
ok = rabbit_prelaunch_cluster:setup(Context),
%% Start Mnesia now that everything is ready.
rabbit_log_prelaunch:debug("Starting Mnesia"),
?LOG_DEBUG("Starting Mnesia"),
ok = mnesia:start(),
rabbit_log_prelaunch:debug(""),
rabbit_log_prelaunch:debug("== Prelaunch DONE =="),
?LOG_DEBUG(""),
?LOG_DEBUG("== Prelaunch DONE =="),
case IsInitialPass of
true -> rabbit_prelaunch:initial_pass_finished();
@ -373,7 +377,8 @@ start_it(StartType) ->
case spawn_boot_marker() of
{ok, Marker} ->
T0 = erlang:timestamp(),
rabbit_log:info("RabbitMQ is asked to start...", []),
?LOG_INFO("RabbitMQ is asked to start...", [],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
try
{ok, _} = application:ensure_all_started(rabbitmq_prelaunch,
StartType),
@ -382,7 +387,7 @@ start_it(StartType) ->
ok = wait_for_ready_or_stopped(),
T1 = erlang:timestamp(),
rabbit_log_prelaunch:debug(
?LOG_DEBUG(
"Time to start RabbitMQ: ~p µs",
[timer:now_diff(T1, T0)]),
stop_boot_marker(Marker),
@ -433,11 +438,13 @@ stop() ->
case rabbit_boot_state:get() of
ready ->
Product = product_name(),
rabbit_log:info("~s is asked to stop...", [Product]),
?LOG_INFO("~s is asked to stop...", [Product],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
do_stop(),
rabbit_log:info(
?LOG_INFO(
"Successfully stopped ~s and its dependencies",
[Product]),
[Product],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
ok;
stopped ->
ok
@ -461,19 +468,22 @@ stop_and_halt() ->
try
stop()
catch Type:Reason ->
rabbit_log:error(
?LOG_ERROR(
"Error trying to stop ~s: ~p:~p",
[product_name(), Type, Reason]),
[product_name(), Type, Reason],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
error({Type, Reason})
after
%% Enclose all the logging in the try block.
%% init:stop() will be called regardless of any errors.
try
AppsLeft = [ A || {A, _, _} <- application:which_applications() ],
rabbit_log:info(
lists:flatten(["Halting Erlang VM with the following applications:~n",
[" ~p~n" || _ <- AppsLeft]]),
AppsLeft),
?LOG_ERROR(
lists:flatten(
["Halting Erlang VM with the following applications:~n",
[" ~p~n" || _ <- AppsLeft]]),
AppsLeft,
#{domain => ?RMQLOG_DOMAIN_GLOBAL}),
%% Also duplicate this information to stderr, so console where
%% foreground broker was running (or systemd journal) will
%% contain information about graceful termination.
@ -518,10 +528,12 @@ start_apps(Apps, RestartTypes) ->
stop_apps([]) ->
ok;
stop_apps(Apps) ->
rabbit_log:info(
lists:flatten(["Stopping ~s applications and their dependencies in the following order:~n",
[" ~p~n" || _ <- Apps]]),
[product_name() | lists:reverse(Apps)]),
?LOG_INFO(
lists:flatten(
["Stopping ~s applications and their dependencies in the following order:~n",
[" ~p~n" || _ <- Apps]]),
[product_name() | lists:reverse(Apps)],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
ok = app_utils:stop_applications(
Apps, handle_app_error(error_during_shutdown)),
case lists:member(rabbit, Apps) of
@ -785,28 +797,10 @@ environment(App) ->
-spec rotate_logs() -> rabbit_types:ok_or_error(any()).
rotate_logs() ->
rabbit_lager:fold_sinks(
fun
(_, [], Acc) ->
Acc;
(SinkName, FileNames, Acc) ->
lager:log(SinkName, info, self(),
"Log file rotation forced", []),
%% FIXME: We use an internal message, understood by
%% lager_file_backend. We should use a proper API, when
%% it's added to Lager.
%%
%% FIXME: This call is effectively asynchronous: at the
%% end of this function, we can't guaranty the rotation
%% is completed.
[ok = gen_event:call(SinkName,
{lager_file_backend, FileName},
rotate,
infinity) || FileName <- FileNames],
lager:log(SinkName, info, self(),
"Log file re-opened after forced rotation", []),
Acc
end, ok).
?LOG_ERROR(
"Forcing log rotation is currently unsupported",
#{domain => ?RMQLOG_DOMAIN_GLOBAL}),
{error, unsupported}.
%%--------------------------------------------------------------------
@ -835,14 +829,18 @@ start(normal, []) ->
#{product_overridden := true,
product_base_name := BaseName,
product_base_version := BaseVersion} ->
rabbit_log:info("~n Starting ~s ~s on Erlang ~s~n Based on ~s ~s~n ~s~n ~s~n",
[product_name(), product_version(), rabbit_misc:otp_release(),
BaseName, BaseVersion,
?COPYRIGHT_MESSAGE, ?INFORMATION_MESSAGE]);
?LOG_INFO(
"~n Starting ~s ~s on Erlang ~s~n Based on ~s ~s~n ~s~n ~s",
[product_name(), product_version(), rabbit_misc:otp_release(),
BaseName, BaseVersion,
?COPYRIGHT_MESSAGE, ?INFORMATION_MESSAGE],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH});
_ ->
rabbit_log:info("~n Starting ~s ~s on Erlang ~s~n ~s~n ~s~n",
[product_name(), product_version(), rabbit_misc:otp_release(),
?COPYRIGHT_MESSAGE, ?INFORMATION_MESSAGE])
?LOG_INFO(
"~n Starting ~s ~s on Erlang ~s~n ~s~n ~s",
[product_name(), product_version(), rabbit_misc:otp_release(),
?COPYRIGHT_MESSAGE, ?INFORMATION_MESSAGE],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH})
end,
log_motd(),
{ok, SupPid} = rabbit_sup:start_link(),
@ -860,7 +858,7 @@ start(normal, []) ->
%%
%% Note that plugins were not taken care of at this point
%% either.
rabbit_log_prelaunch:debug(
?LOG_DEBUG(
"Register `rabbit` process (~p) for rabbit_node_monitor",
[self()]),
true = register(rabbit, self()),
@ -870,15 +868,15 @@ start(normal, []) ->
warn_if_kernel_config_dubious(),
warn_if_disc_io_options_dubious(),
rabbit_log_prelaunch:debug(""),
rabbit_log_prelaunch:debug("== Plugins (prelaunch phase) =="),
?LOG_DEBUG(""),
?LOG_DEBUG("== Plugins (prelaunch phase) =="),
rabbit_log_prelaunch:debug("Setting plugins up"),
?LOG_DEBUG("Setting plugins up"),
%% `Plugins` contains all the enabled plugins, plus their
%% dependencies. The order is important: dependencies appear
%% before plugin which depend on them.
Plugins = rabbit_plugins:setup(),
rabbit_log_prelaunch:debug(
?LOG_DEBUG(
"Loading the following plugins: ~p", [Plugins]),
%% We can load all plugins and refresh their feature flags at
%% once, because it does not involve running code from the
@ -887,8 +885,8 @@ start(normal, []) ->
ok = rabbit_feature_flags:refresh_feature_flags_after_app_load(
Plugins),
rabbit_log_prelaunch:debug(""),
rabbit_log_prelaunch:debug("== Boot steps =="),
?LOG_DEBUG(""),
?LOG_DEBUG("== Boot steps =="),
ok = rabbit_boot_steps:run_boot_steps([rabbit | Plugins]),
run_postlaunch_phase(Plugins),
@ -917,23 +915,22 @@ run_postlaunch_phase(Plugins) ->
do_run_postlaunch_phase(Plugins) ->
%% Once RabbitMQ itself is started, we need to run a few more steps,
%% in particular start plugins.
rabbit_log_prelaunch:debug(""),
rabbit_log_prelaunch:debug("== Postlaunch phase =="),
?LOG_DEBUG(""),
?LOG_DEBUG("== Postlaunch phase =="),
try
%% Successful boot resets node maintenance state.
rabbit_log_prelaunch:debug(""),
rabbit_log_prelaunch:info("Resetting node maintenance status"),
?LOG_DEBUG(""),
?LOG_INFO("Resetting node maintenance status"),
_ = rabbit_maintenance:unmark_as_being_drained(),
rabbit_log_prelaunch:debug(""),
rabbit_log_prelaunch:debug("== Plugins (postlaunch phase) =="),
?LOG_DEBUG(""),
?LOG_DEBUG("== Plugins (postlaunch phase) =="),
%% However, we want to run their boot steps and actually start
%% them one by one, to ensure a dependency is fully started
%% before a plugin which depends on it gets a chance to start.
rabbit_log_prelaunch:debug(
"Starting the following plugins: ~p", [Plugins]),
?LOG_DEBUG("Starting the following plugins: ~p", [Plugins]),
lists:foreach(
fun(Plugin) ->
case application:ensure_all_started(Plugin) of
@ -951,18 +948,16 @@ do_run_postlaunch_phase(Plugins) ->
%% Start listeners after all plugins have been enabled,
%% see rabbitmq/rabbitmq-server#2405.
rabbit_log_prelaunch:info(
"Ready to start client connection listeners"),
?LOG_INFO("Ready to start client connection listeners"),
ok = rabbit_networking:boot(),
%% The node is ready: mark it as such and log it.
%% NOTE: PLEASE DO NOT ADD CRITICAL NODE STARTUP CODE AFTER THIS.
ok = rabbit_lager:broker_is_started(),
ActivePlugins = rabbit_plugins:active(),
StrictlyPlugins = rabbit_plugins:strictly_plugins(ActivePlugins),
ok = log_broker_started(StrictlyPlugins),
rabbit_log_prelaunch:debug("Marking ~s as running", [product_name()]),
?LOG_DEBUG("Marking ~s as running", [product_name()]),
rabbit_boot_state:set(ready)
catch
throw:{error, _} = Error ->
@ -1011,7 +1006,7 @@ boot_delegate() ->
recover() ->
ok = rabbit_policy:recover(),
ok = rabbit_vhost:recover(),
ok = lager_exchange_backend:maybe_init_exchange().
ok.
-spec maybe_insert_default_data() -> 'ok'.
@ -1019,10 +1014,12 @@ maybe_insert_default_data() ->
NoDefsToImport = not rabbit_definitions:has_configured_definitions_to_load(),
case rabbit_table:needs_default_data() andalso NoDefsToImport of
true ->
rabbit_log:info("Will seed default virtual host and user..."),
?LOG_INFO("Will seed default virtual host and user...",
#{domain => ?RMQLOG_DOMAIN_GLOBAL}),
insert_default_data();
false ->
rabbit_log:info("Will not seed default virtual host and user: have definitions to load..."),
?LOG_INFO("Will not seed default virtual host and user: have definitions to load...",
#{domain => ?RMQLOG_DOMAIN_GLOBAL}),
ok
end.
@ -1042,7 +1039,6 @@ insert_default_data() ->
DefaultReadPermBin = rabbit_data_coercion:to_binary(DefaultReadPerm),
ok = rabbit_vhost:add(DefaultVHostBin, <<"Default virtual host">>, [], ?INTERNAL_USER),
ok = lager_exchange_backend:maybe_init_exchange(),
ok = rabbit_auth_backend_internal:add_user(
DefaultUserBin,
DefaultPassBin,
@ -1061,9 +1057,13 @@ insert_default_data() ->
%%---------------------------------------------------------------------------
%% logging
-spec log_locations() -> [rabbit_lager:log_location()].
-spec set_log_level(logger:level()) -> ok.
set_log_level(Level) ->
rabbit_prelaunch_logging:set_log_level(Level).
-spec log_locations() -> [rabbit_prelaunch_logging:log_location()].
log_locations() ->
rabbit_lager:log_locations().
rabbit_prelaunch_logging:log_locations().
-spec config_locations() -> [rabbit_config:config_location()].
config_locations() ->
@ -1094,7 +1094,8 @@ log_broker_started(Plugins) ->
Message = string:strip(rabbit_misc:format(
"Server startup complete; ~b plugins started.~n~s",
[length(Plugins), PluginList]), right, $\n),
rabbit_log:info(Message),
?LOG_INFO(Message,
#{domain => ?RMQLOG_DOMAIN_GLOBAL}),
io:format(" completed with ~p plugins.~n", [length(Plugins)]).
-define(RABBIT_TEXT_LOGO,
@ -1185,7 +1186,8 @@ log_motd() ->
_ -> [" ", Line, "\n"]
end
|| Line <- Lines],
rabbit_log:info("~n~ts", [string:trim(Padded, trailing, [$\r, $\n])])
?LOG_INFO("~n~ts", [string:trim(Padded, trailing, [$\r, $\n])],
#{domain => ?RMQLOG_DOMAIN_GLOBAL})
end.
log_banner() ->
@ -1216,7 +1218,8 @@ log_banner() ->
{K, V} ->
Format(K, V)
end || S <- Settings]), right, $\n),
rabbit_log:info("~n~ts", [Banner]).
?LOG_INFO("~n~ts", [Banner],
#{domain => ?RMQLOG_DOMAIN_GLOBAL}).
warn_if_kernel_config_dubious() ->
case os:type() of
@ -1225,16 +1228,18 @@ warn_if_kernel_config_dubious() ->
_ ->
case erlang:system_info(kernel_poll) of
true -> ok;
false -> rabbit_log:warning(
"Kernel poll (epoll, kqueue, etc) is disabled. Throughput "
"and CPU utilization may worsen.~n")
false -> ?LOG_WARNING(
"Kernel poll (epoll, kqueue, etc) is disabled. "
"Throughput and CPU utilization may worsen.",
#{domain => ?RMQLOG_DOMAIN_GLOBAL})
end
end,
DirtyIOSchedulers = erlang:system_info(dirty_io_schedulers),
case DirtyIOSchedulers < ?DIRTY_IO_SCHEDULERS_WARNING_THRESHOLD of
true -> rabbit_log:warning(
true -> ?LOG_WARNING(
"Erlang VM is running with ~b dirty I/O schedulers, "
"file I/O performance may worsen~n", [DirtyIOSchedulers]);
"file I/O performance may worsen", [DirtyIOSchedulers],
#{domain => ?RMQLOG_DOMAIN_GLOBAL});
false -> ok
end,
IDCOpts = case application:get_env(kernel, inet_default_connect_options) of
@ -1242,8 +1247,9 @@ warn_if_kernel_config_dubious() ->
{ok, Val} -> Val
end,
case proplists:get_value(nodelay, IDCOpts, false) of
false -> rabbit_log:warning("Nagle's algorithm is enabled for sockets, "
"network I/O latency will be higher~n");
false -> ?LOG_WARNING("Nagle's algorithm is enabled for sockets, "
"network I/O latency will be higher",
#{domain => ?RMQLOG_DOMAIN_GLOBAL});
true -> ok
end.
@ -1259,7 +1265,8 @@ warn_if_disc_io_options_dubious() ->
CreditDiscBound, IoBatchSize) of
ok -> ok;
{error, {Reason, Vars}} ->
rabbit_log:warning(Reason, Vars)
?LOG_WARNING(Reason, Vars,
#{domain => ?RMQLOG_DOMAIN_GLOBAL})
end.
validate_msg_store_io_batch_size_and_credit_disc_bound(CreditDiscBound,
@ -1271,7 +1278,7 @@ validate_msg_store_io_batch_size_and_credit_disc_bound(CreditDiscBound,
if IoBatchSize < ?IO_BATCH_SIZE ->
throw({error,
{"io_batch_size of ~b lower than recommended value ~b, "
"paging performance may worsen~n",
"paging performance may worsen",
[IoBatchSize, ?IO_BATCH_SIZE]}});
true ->
ok
@ -1292,7 +1299,7 @@ validate_msg_store_io_batch_size_and_credit_disc_bound(CreditDiscBound,
throw({error,
{"msg_store_credit_disc_bound {~b, ~b} lower than"
"recommended value {~b, ~b},"
" paging performance may worsen~n",
" paging performance may worsen",
[IC, MCA, RIC, RMCA]}});
true ->
ok
@ -1320,7 +1327,7 @@ validate_msg_store_io_batch_size_and_credit_disc_bound(CreditDiscBound,
{error,
{"msg_store_io_batch_size ~b should be bigger than the initial "
"credit value from msg_store_credit_disc_bound ~b,"
" paging performance may worsen~n",
" paging performance may worsen",
[IoBatchSize, InitialCredit]}});
true ->
ok
@ -1498,8 +1505,10 @@ ensure_working_fhc() ->
{ok, true} -> "ON";
{ok, false} -> "OFF"
end,
rabbit_log:info("FHC read buffering: ~s~n", [ReadBuf]),
rabbit_log:info("FHC write buffering: ~s~n", [WriteBuf]),
?LOG_INFO("FHC read buffering: ~s", [ReadBuf],
#{domain => ?RMQLOG_DOMAIN_GLOBAL}),
?LOG_INFO("FHC write buffering: ~s", [WriteBuf],
#{domain => ?RMQLOG_DOMAIN_GLOBAL}),
Filename = filename:join(code:lib_dir(kernel, ebin), "kernel.app"),
{ok, Fd} = file_handle_cache:open(Filename, [raw, binary, read], []),
{ok, _} = file_handle_cache:read(Fd, 1),

View File

@ -85,7 +85,7 @@ try_authenticate(Module, Username, AuthProps) ->
case Module:user_login_authentication(Username, AuthProps) of
{ok, AuthUser} -> {ok, AuthUser};
{error, E} -> {refused, Username,
"~s failed authenticating ~s: ~p~n",
"~s failed authenticating ~s: ~p",
[Module, Username, E]};
{refused, F, A} -> {refused, Username, F, A}
end.
@ -97,7 +97,7 @@ try_authorize(Modules, Username, AuthProps) ->
{ok, Impl, Tags}-> {ok, [{Module, Impl} | ModsImpls], ModsTags ++ Tags};
{ok, Impl} -> {ok, [{Module, Impl} | ModsImpls], ModsTags};
{error, E} -> {refused, Username,
"~s failed authorizing ~s: ~p~n",
"~s failed authorizing ~s: ~p",
[Module, Username, E]};
{refused, F, A} -> {refused, Username, F, A}
end;
@ -215,7 +215,7 @@ check_access(Fun, Module, ErrStr, ErrArgs, ErrName) ->
false ->
rabbit_misc:protocol_error(ErrName, ErrStr, ErrArgs);
{error, E} ->
FullErrStr = ErrStr ++ ", backend ~s returned an error: ~p~n",
FullErrStr = ErrStr ++ ", backend ~s returned an error: ~p",
FullErrArgs = ErrArgs ++ [Module, E],
rabbit_log:error(FullErrStr, FullErrArgs),
rabbit_misc:protocol_error(ErrName, FullErrStr, FullErrArgs)

View File

@ -232,7 +232,7 @@ handle_event({node_down, Node}, #alarms{alarmed_nodes = AN} = State) ->
error -> []
end,
{ok, lists:foldr(fun(Source, AccState) ->
rabbit_log:warning("~s resource limit alarm cleared for dead node ~p~n",
rabbit_log:warning("~s resource limit alarm cleared for dead node ~p",
[Source, Node]),
maybe_alert(fun dict_unappend/3, Node, Source, false, AccState)
end, State, AlarmsForDeadNode)};
@ -284,7 +284,7 @@ maybe_alert(UpdateFun, Node, Source, WasAlertAdded,
StillHasAlerts = lists:any(fun ({_Node, NodeAlerts}) -> lists:member(Source, NodeAlerts) end, dict:to_list(AN1)),
case StillHasAlerts of
true -> ok;
false -> rabbit_log:warning("~s resource limit alarm cleared across the cluster~n", [Source])
false -> rabbit_log:warning("~s resource limit alarm cleared across the cluster", [Source])
end,
Alert = {WasAlertAdded, StillHasAlerts, Node},
case node() of
@ -336,11 +336,11 @@ handle_set_alarm({file_descriptor_limit, []}, State) ->
"********************************************************************~n"),
{ok, State};
handle_set_alarm(Alarm, State) ->
rabbit_log:warning("alarm '~p' set~n", [Alarm]),
rabbit_log:warning("alarm '~p' set", [Alarm]),
{ok, State}.
handle_clear_resource_alarm(Source, Node, State) ->
rabbit_log:warning("~s resource limit alarm cleared on node ~p~n",
rabbit_log:warning("~s resource limit alarm cleared on node ~p",
[Source, Node]),
{ok, maybe_alert(fun dict_unappend/3, Node, Source, false, State)}.
@ -348,7 +348,7 @@ handle_clear_alarm(file_descriptor_limit, State) ->
rabbit_log:warning("file descriptor limit alarm cleared~n"),
{ok, State};
handle_clear_alarm(Alarm, State) ->
rabbit_log:warning("alarm '~p' cleared~n", [Alarm]),
rabbit_log:warning("alarm '~p' cleared", [Alarm]),
{ok, State}.
is_node_alarmed(Source, Node, #alarms{alarmed_nodes = AN}) ->

View File

@ -111,7 +111,7 @@ warn_file_limit() ->
case file_handle_cache:get_limit() < L of
true ->
rabbit_log:warning(
"Recovering ~p queues, available file handles: ~p. Please increase max open file handles limit to at least ~p!~n",
"Recovering ~p queues, available file handles: ~p. Please increase max open file handles limit to at least ~p!",
[L, file_handle_cache:get_limit(), L]);
false ->
ok
@ -626,7 +626,7 @@ retry_wait(Q, F, E, RetriesLeft) ->
% The old check would have crashed here,
% instead, log it and run the exit fun. absent & alive is weird,
% but better than crashing with badmatch,true
rabbit_log:debug("Unexpected alive queue process ~p~n", [QPid]),
rabbit_log:debug("Unexpected alive queue process ~p", [QPid]),
E({absent, Q, alive});
false ->
ok % Expected result
@ -1234,7 +1234,7 @@ count(VHost) ->
%% that requires a proper consensus algorithm.
length(list_for_count(VHost))
catch _:Err ->
rabbit_log:error("Failed to fetch number of queues in vhost ~p:~n~p~n",
rabbit_log:error("Failed to fetch number of queues in vhost ~p:~n~p",
[VHost, Err]),
0
end.

View File

@ -197,7 +197,7 @@ validate_and_alternate_credentials(Username, Password, ActingUser, Fun) ->
ok ->
Fun(Username, Password, ActingUser);
{error, Err} ->
rabbit_log:error("Credential validation for '~s' failed!~n", [Username]),
rabbit_log:error("Credential validation for '~s' failed!", [Username]),
{error, Err}
end.
@ -334,7 +334,7 @@ change_password_sans_validation(Username, Password, ActingUser) ->
-spec clear_password(rabbit_types:username(), rabbit_types:username()) -> 'ok'.
clear_password(Username, ActingUser) ->
rabbit_log:info("Clearing password for '~s'~n", [Username]),
rabbit_log:info("Clearing password for '~s'", [Username]),
R = change_password_hash(Username, <<"">>),
rabbit_event:notify(user_password_cleared,
[{name, Username},

View File

@ -118,7 +118,7 @@ init() ->
case State of
{leader_waiting, Winner, _} ->
rabbit_log:info(
"Autoheal: in progress, requesting report from ~p~n", [Winner]),
"Autoheal: in progress, requesting report from ~p", [Winner]),
send(Winner, report_autoheal_status);
_ ->
ok
@ -129,7 +129,7 @@ maybe_start(not_healing) ->
case enabled() of
true -> Leader = leader(),
send(Leader, {request_start, node()}),
rabbit_log:info("Autoheal request sent to ~p~n", [Leader]),
rabbit_log:info("Autoheal request sent to ~p", [Leader]),
not_healing;
false -> not_healing
end;
@ -150,7 +150,7 @@ leader() ->
%% This is the winner receiving its last notification that a node has
%% stopped - all nodes can now start again
rabbit_down(Node, {winner_waiting, [Node], Notify}) ->
rabbit_log:info("Autoheal: final node has stopped, starting...~n",[]),
rabbit_log:info("Autoheal: final node has stopped, starting...",[]),
winner_finish(Notify);
rabbit_down(Node, {winner_waiting, WaitFor, Notify}) ->
@ -173,25 +173,25 @@ node_down(Node, {winner_waiting, _, Notify}) ->
node_down(Node, {leader_waiting, Node, _Notify}) ->
%% The winner went down, we don't know what to do so we simply abort.
rabbit_log:info("Autoheal: aborting - winner ~p went down~n", [Node]),
rabbit_log:info("Autoheal: aborting - winner ~p went down", [Node]),
not_healing;
node_down(Node, {leader_waiting, _, _} = St) ->
%% If it is a partial partition, the winner might continue with the
%% healing process. If it is a full partition, the winner will also
%% see it and abort. Let's wait for it.
rabbit_log:info("Autoheal: ~p went down, waiting for winner decision ~n", [Node]),
rabbit_log:info("Autoheal: ~p went down, waiting for winner decision ", [Node]),
St;
node_down(Node, _State) ->
rabbit_log:info("Autoheal: aborting - ~p went down~n", [Node]),
rabbit_log:info("Autoheal: aborting - ~p went down", [Node]),
not_healing.
%% If the process that has to restart the node crashes for an unexpected reason,
%% we go back to a not healing state so the node is able to recover.
process_down({'EXIT', Pid, Reason}, {restarting, Pid}) when Reason =/= normal ->
rabbit_log:info("Autoheal: aborting - the process responsible for restarting the "
"node terminated with reason: ~p~n", [Reason]),
"node terminated with reason: ~p", [Reason]),
not_healing;
process_down(_, State) ->
@ -201,17 +201,17 @@ process_down(_, State) ->
%% TODO should we try to debounce this?
handle_msg({request_start, Node},
not_healing, Partitions) ->
rabbit_log:info("Autoheal request received from ~p~n", [Node]),
rabbit_log:info("Autoheal request received from ~p", [Node]),
case check_other_nodes(Partitions) of
{error, E} ->
rabbit_log:info("Autoheal request denied: ~s~n", [fmt_error(E)]),
rabbit_log:info("Autoheal request denied: ~s", [fmt_error(E)]),
not_healing;
{ok, AllPartitions} ->
{Winner, Losers} = make_decision(AllPartitions),
rabbit_log:info("Autoheal decision~n"
" * Partitions: ~p~n"
" * Winner: ~p~n"
" * Losers: ~p~n",
" * Losers: ~p",
[AllPartitions, Winner, Losers]),
case node() =:= Winner of
true -> handle_msg({become_winner, Losers},
@ -224,12 +224,12 @@ handle_msg({request_start, Node},
handle_msg({request_start, Node},
State, _Partitions) ->
rabbit_log:info("Autoheal request received from ~p when healing; "
"ignoring~n", [Node]),
"ignoring", [Node]),
State;
handle_msg({become_winner, Losers},
not_healing, _Partitions) ->
rabbit_log:info("Autoheal: I am the winner, waiting for ~p to stop~n",
rabbit_log:info("Autoheal: I am the winner, waiting for ~p to stop",
[Losers]),
stop_partition(Losers);
@ -238,7 +238,7 @@ handle_msg({become_winner, Losers},
%% The leader has aborted the healing, might have seen us down but
%% we didn't see the same. Let's try again as it is the same partition.
rabbit_log:info("Autoheal: I am the winner and received a duplicated "
"request, waiting again for ~p to stop~n", [Losers]),
"request, waiting again for ~p to stop", [Losers]),
stop_partition(Losers);
handle_msg({become_winner, _},
@ -246,7 +246,7 @@ handle_msg({become_winner, _},
%% Something has happened to the leader, it might have seen us down but we
%% are still alive. Partitions have changed, cannot continue.
rabbit_log:info("Autoheal: I am the winner and received another healing "
"request, partitions have changed to ~p. Aborting ~n", [Losers]),
"request, partitions have changed to ~p. Aborting ", [Losers]),
winner_finish(Losers),
not_healing;
@ -264,7 +264,7 @@ handle_msg({winner_is, Winner}, State = {leader_waiting, Winner, _},
handle_msg(Request, {restarting, Pid} = St, _Partitions) ->
%% ignore, we can contribute no further
rabbit_log:info("Autoheal: Received the request ~p while waiting for ~p "
"to restart the node. Ignoring it ~n", [Request, Pid]),
"to restart the node. Ignoring it ", [Request, Pid]),
St;
handle_msg(report_autoheal_status, not_healing, _Partitions) ->
@ -286,14 +286,14 @@ handle_msg({autoheal_finished, Winner},
%% The winner is finished with the autoheal process and notified us
%% (the leader). We can transition to the "not_healing" state and
%% accept new requests.
rabbit_log:info("Autoheal finished according to winner ~p~n", [Winner]),
rabbit_log:info("Autoheal finished according to winner ~p", [Winner]),
not_healing;
handle_msg({autoheal_finished, Winner}, not_healing, _Partitions)
when Winner =:= node() ->
%% We are the leader and the winner. The state already transitioned
%% to "not_healing" at the end of the autoheal process.
rabbit_log:info("Autoheal finished according to winner ~p~n", [node()]),
rabbit_log:info("Autoheal finished according to winner ~p", [node()]),
not_healing;
handle_msg({autoheal_finished, Winner}, not_healing, _Partitions) ->
@ -301,7 +301,7 @@ handle_msg({autoheal_finished, Winner}, not_healing, _Partitions) ->
%% transitioned to not_healing. However, the winner was still able
%% to finish. Let it pass.
rabbit_log:info("Autoheal finished according to winner ~p."
" Unexpected, I might have previously seen the winner down~n", [Winner]),
" Unexpected, I might have previously seen the winner down", [Winner]),
not_healing.
%%----------------------------------------------------------------------------
@ -309,7 +309,7 @@ handle_msg({autoheal_finished, Winner}, not_healing, _Partitions) ->
send(Node, Msg) -> {?SERVER, Node} ! {autoheal_msg, Msg}.
abort(Down, Notify) ->
rabbit_log:info("Autoheal: aborting - ~p down~n", [Down]),
rabbit_log:info("Autoheal: aborting - ~p down", [Down]),
%% Make sure any nodes waiting for us start - it won't necessarily
%% heal the partition but at least they won't get stuck.
%% If we are executing this, we are not stopping. Thus, don't wait
@ -354,15 +354,14 @@ wait_for_supervisors(Monitors) ->
60000 ->
AliveLosers = [Node || {_, Node} <- pmon:monitored(Monitors)],
rabbit_log:info("Autoheal: mnesia in nodes ~p is still up, sending "
"winner notification again to these ~n", [AliveLosers]),
"winner notification again to these ", [AliveLosers]),
[send(L, {winner_is, node()}) || L <- AliveLosers],
wait_for_mnesia_shutdown(AliveLosers)
end
end.
restart_loser(State, Winner) ->
rabbit_log:warning(
"Autoheal: we were selected to restart; winner is ~p~n", [Winner]),
rabbit_log:warning("Autoheal: we were selected to restart; winner is ~p", [Winner]),
NextStateTimeout = application:get_env(rabbit, autoheal_state_transition_timeout, 60000),
rabbit_node_monitor:run_outside_applications(
fun () ->

View File

@ -948,7 +948,7 @@ handle_exception(Reason, State = #ch{cfg = #conf{protocol = Protocol,
{Channel, CloseMethod} ->
rabbit_log_channel:error(
"Channel error on connection ~p (~s, vhost: '~s',"
" user: '~s'), channel ~p:~n~s~n",
" user: '~s'), channel ~p:~n~s",
[ConnPid, ConnName, VHost, User#user.username,
Channel, format_soft_error(Reason)]),
ok = rabbit_writer:send_command(WriterPid, CloseMethod),

View File

@ -53,8 +53,7 @@ check_no_overlap1(Sets) ->
case sets:size(Is) of
0 -> ok;
_ ->
internal_error("Interceptor: more than one "
"module handles ~p~n", [Is])
internal_error("Interceptor: more than one module handles ~p", [Is])
end,
sets:union(Set, Union)
end,

View File

@ -117,7 +117,7 @@ handle_cast({user_deleted, Details}) ->
ok;
handle_cast({node_deleted, Details}) ->
Node = pget(node, Details),
rabbit_log_connection:info(
rabbit_log_channel:info(
"Node '~s' was removed from the cluster, deleting"
" its channel tracking tables...", [Node]),
delete_tracked_channels_table_for_node(Node),

View File

@ -437,7 +437,7 @@ recover_durable_queues(QueuesAndRecoveryTerms) ->
gen_server2:mcall(
[{rabbit_amqqueue_sup_sup:start_queue_process(node(), Q, recovery),
{init, {self(), Terms}}} || {Q, Terms} <- QueuesAndRecoveryTerms]),
[rabbit_log:error("Queue ~p failed to initialise: ~p~n",
[rabbit_log:error("Queue ~p failed to initialise: ~p",
[Pid, Error]) || {Pid, Error} <- Failures],
[Q || {_, {new, Q}} <- Results].

View File

@ -140,7 +140,7 @@ update_x_death_header(Info, Headers) ->
[{table, rabbit_misc:sort_field_table(Info1)} | Others]);
{<<"x-death">>, InvalidType, Header} ->
rabbit_log:warning("Message has invalid x-death header (type: ~p)."
" Resetting header ~p~n",
" Resetting header ~p",
[InvalidType, Header]),
%% if x-death is something other than an array (list)
%% then we reset it: this happens when some clients consume
@ -247,7 +247,7 @@ log_cycle_once(Queues) ->
true -> ok;
undefined -> rabbit_log:warning(
"Message dropped. Dead-letter queues cycle detected" ++
": ~p~nThis cycle will NOT be reported again.~n",
": ~p~nThis cycle will NOT be reported again.",
[Queues]),
put(Key, true)
end.

View File

@ -199,7 +199,7 @@ dir() -> rabbit_mnesia:dir().
set_disk_limits(State, Limit0) ->
Limit = interpret_limit(Limit0),
State1 = State#state { limit = Limit },
rabbit_log:info("Disk free limit set to ~pMB~n",
rabbit_log:info("Disk free limit set to ~pMB",
[trunc(Limit / 1000000)]),
internal_update(State1).
@ -283,7 +283,7 @@ interpret_limit(Absolute) ->
emit_update_info(StateStr, CurrentFree, Limit) ->
rabbit_log:info(
"Free disk space is ~s. Free bytes: ~p. Limit: ~p~n",
"Free disk space is ~s. Free bytes: ~p. Limit: ~p",
[StateStr, CurrentFree, Limit]).
start_timer(State) ->
@ -306,11 +306,11 @@ enable(#state{dir = Dir, interval = Interval, limit = Limit, retries = Retries}
case {catch get_disk_free(Dir),
vm_memory_monitor:get_total_memory()} of
{N1, N2} when is_integer(N1), is_integer(N2) ->
rabbit_log:info("Enabling free disk space monitoring~n", []),
rabbit_log:info("Enabling free disk space monitoring", []),
start_timer(set_disk_limits(State, Limit));
Err ->
rabbit_log:info("Free disk space monitor encountered an error "
"(e.g. failed to parse output from OS tools): ~p, retries left: ~b~n",
"(e.g. failed to parse output from OS tools): ~p, retries left: ~b",
[Err, Retries]),
erlang:send_after(Interval, self(), try_enable),
State#state{enabled = false}

View File

@ -86,10 +86,10 @@ check_epmd(State = #state{mod = Mod,
{ok, State#state{port = Port1}}.
handle_port_please(init, noport, Me, Port) ->
rabbit_log:info("epmd does not know us, re-registering as ~s~n", [Me]),
rabbit_log:info("epmd does not know us, re-registering as ~s", [Me]),
{ok, Port};
handle_port_please(check, noport, Me, Port) ->
rabbit_log:warning("epmd does not know us, re-registering ~s at port ~b~n", [Me, Port]),
rabbit_log:warning("epmd does not know us, re-registering ~s at port ~b", [Me, Port]),
{ok, Port};
handle_port_please(_, closed, _Me, Port) ->
rabbit_log:error("epmd monitor failed to retrieve our port from epmd: closed"),

View File

@ -574,7 +574,7 @@ peek_serial(XName, LockType) ->
end.
invalid_module(T) ->
rabbit_log:warning("Could not find exchange type ~s.~n", [T]),
rabbit_log:warning("Could not find exchange type ~s.", [T]),
put({xtype_to_module, T}, rabbit_exchange_type_invalid),
rabbit_exchange_type_invalid.

View File

@ -682,7 +682,6 @@ maybe_add_action(Action, Acc, State) ->
{[Action | Acc], State}.
do_resends(From, To, State) when From =< To ->
% ?INFO("rabbit_fifo_client: doing resends From ~w To ~w~n", [From, To]),
lists:foldl(fun resend/2, State, lists:seq(From, To));
do_resends(_, _, State) ->
State.

View File

@ -1,719 +0,0 @@
%% This Source Code Form is subject to the terms of the Mozilla Public
%% License, v. 2.0. If a copy of the MPL was not distributed with this
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
%%
%% Copyright (c) 2007-2021 VMware, Inc. or its affiliates. All rights reserved.
%%
-module(rabbit_lager).
-include_lib("rabbit_common/include/rabbit_log.hrl").
%% API
-export([start_logger/0, log_locations/0, fold_sinks/2,
broker_is_started/0, set_log_level/1]).
%% For test purposes
-export([configure_lager/0]).
-export_type([log_location/0]).
-type log_location() :: string().
start_logger() ->
ok = maybe_remove_logger_handler(),
ok = app_utils:stop_applications([lager, syslog]),
ok = ensure_lager_configured(),
ok = app_utils:start_applications([lager]),
fold_sinks(
fun
(_, [], Acc) ->
Acc;
(SinkName, _, Acc) ->
lager:log(SinkName, info, self(),
"Log file opened with Lager", []),
Acc
end, ok),
ensure_log_working().
broker_is_started() ->
{ok, HwmCurrent} = application:get_env(lager, error_logger_hwm),
{ok, HwmOrig0} = application:get_env(lager, error_logger_hwm_original),
HwmOrig = case get_most_verbose_log_level() of
debug -> HwmOrig0 * 100;
_ -> HwmOrig0
end,
case HwmOrig =:= HwmCurrent of
false ->
ok = application:set_env(lager, error_logger_hwm, HwmOrig),
Handlers = gen_event:which_handlers(lager_event),
lists:foreach(fun(Handler) ->
lager:set_loghwm(Handler, HwmOrig)
end, Handlers),
ok;
_ ->
ok
end.
set_log_level(Level) ->
IsValidLevel = lists:member(Level, lager_util:levels()),
set_log_level(IsValidLevel, Level).
set_log_level(true, Level) ->
SinksAndHandlers = [{Sink, gen_event:which_handlers(Sink)} ||
Sink <- lager:list_all_sinks()],
DefaultHwm = application:get_env(lager, error_logger_hwm_original, 50),
Hwm = case Level of
debug -> DefaultHwm * 100;
_ -> DefaultHwm
end,
application:set_env(lager, error_logger_hwm, Hwm),
set_sink_log_level(SinksAndHandlers, Level, Hwm);
set_log_level(_, Level) ->
{error, {invalid_log_level, Level}}.
set_sink_log_level([], _Level, _Hwm) ->
ok;
set_sink_log_level([{Sink, Handlers}|Rest], Level, Hwm) ->
set_sink_handler_log_level(Sink, Handlers, Level, Hwm),
set_sink_log_level(Rest, Level, Hwm).
set_sink_handler_log_level(_Sink, [], _Level, _Hwm) ->
ok;
set_sink_handler_log_level(Sink, [Handler|Rest], Level, Hwm)
when is_atom(Handler) andalso is_integer(Hwm) ->
lager:set_loghwm(Sink, Handler, undefined, Hwm),
ok = lager:set_loglevel(Sink, Handler, undefined, Level),
set_sink_handler_log_level(Sink, Rest, Level, Hwm);
set_sink_handler_log_level(Sink, [{Handler, Id}|Rest], Level, Hwm) ->
lager:set_loghwm(Sink, Handler, Id, Hwm),
ok = lager:set_loglevel(Sink, Handler, Id, Level),
set_sink_handler_log_level(Sink, Rest, Level, Hwm);
set_sink_handler_log_level(Sink, [_|Rest], Level, Hwm) ->
set_sink_handler_log_level(Sink, Rest, Level, Hwm).
log_locations() ->
ensure_lager_configured(),
DefaultHandlers = application:get_env(lager, handlers, []),
Sinks = application:get_env(lager, extra_sinks, []),
ExtraHandlers = [proplists:get_value(handlers, Props, [])
|| {_, Props} <- Sinks],
lists:sort(log_locations1([DefaultHandlers | ExtraHandlers], [])).
log_locations1([Handlers | Rest], Locations) ->
Locations1 = log_locations2(Handlers, Locations),
log_locations1(Rest, Locations1);
log_locations1([], Locations) ->
Locations.
log_locations2([{lager_file_backend, Settings} | Rest], Locations) ->
FileName = lager_file_name1(Settings),
Locations1 = case lists:member(FileName, Locations) of
false -> [FileName | Locations];
true -> Locations
end,
log_locations2(Rest, Locations1);
log_locations2([{lager_console_backend, _} | Rest], Locations) ->
Locations1 = case lists:member("<stdout>", Locations) of
false -> ["<stdout>" | Locations];
true -> Locations
end,
log_locations2(Rest, Locations1);
log_locations2([_ | Rest], Locations) ->
log_locations2(Rest, Locations);
log_locations2([], Locations) ->
Locations.
fold_sinks(Fun, Acc) ->
Handlers = lager_config:global_get(handlers),
Sinks = dict:to_list(lists:foldl(
fun
({{lager_file_backend, F}, _, S}, Dict) ->
dict:append(S, F, Dict);
({_, _, S}, Dict) ->
case dict:is_key(S, Dict) of
true -> dict:store(S, [], Dict);
false -> Dict
end
end,
dict:new(), Handlers)),
fold_sinks(Sinks, Fun, Acc).
fold_sinks([{SinkName, FileNames} | Rest], Fun, Acc) ->
Acc1 = Fun(SinkName, FileNames, Acc),
fold_sinks(Rest, Fun, Acc1);
fold_sinks([], _, Acc) ->
Acc.
ensure_log_working() ->
{ok, Handlers} = application:get_env(lager, handlers),
[ ensure_lager_handler_file_exist(Handler)
|| Handler <- Handlers ],
Sinks = application:get_env(lager, extra_sinks, []),
ensure_extra_sinks_working(Sinks, list_expected_sinks()).
ensure_extra_sinks_working(Sinks, [SinkName | Rest]) ->
case proplists:get_value(SinkName, Sinks) of
undefined -> throw({error, {cannot_log_to_file, unknown,
rabbit_log_lager_event_sink_undefined}});
Sink ->
SinkHandlers = proplists:get_value(handlers, Sink, []),
[ ensure_lager_handler_file_exist(Handler)
|| Handler <- SinkHandlers ]
end,
ensure_extra_sinks_working(Sinks, Rest);
ensure_extra_sinks_working(_Sinks, []) ->
ok.
ensure_lager_handler_file_exist(Handler) ->
case lager_file_name(Handler) of
false -> ok;
FileName -> ensure_logfile_exist(FileName)
end.
lager_file_name({lager_file_backend, Settings}) ->
lager_file_name1(Settings);
lager_file_name(_) ->
false.
lager_file_name1(Settings) when is_list(Settings) ->
{file, FileName} = proplists:lookup(file, Settings),
FileName;
lager_file_name1({FileName, _}) -> FileName;
lager_file_name1({FileName, _, _, _, _}) -> FileName;
lager_file_name1(_) ->
throw({error, {cannot_log_to_file, unknown,
lager_file_backend_config_invalid}}).
ensure_logfile_exist(LogFile) ->
case rabbit_file:read_file_info(LogFile) of
{ok,_} -> ok;
{error, Err} -> throw({error, {cannot_log_to_file, LogFile, Err}})
end.
ensure_lager_configured() ->
case lager_configured() of
false -> configure_lager();
true -> ok
end.
%% Lager should have handlers and sinks
%% Error logger forwarding to syslog should be disabled
lager_configured() ->
Sinks = lager:list_all_sinks(),
ExpectedSinks = list_expected_sinks(),
application:get_env(lager, handlers) =/= undefined
andalso
lists:all(fun(S) -> lists:member(S, Sinks) end, ExpectedSinks)
andalso
application:get_env(syslog, syslog_error_logger) =/= undefined.
configure_lager() ->
ok = app_utils:load_applications([lager]),
%% Turn off reformatting for error_logger messages
case application:get_env(lager, error_logger_redirect) of
undefined -> application:set_env(lager, error_logger_redirect, true);
_ -> ok
end,
case application:get_env(lager, error_logger_format_raw) of
undefined -> application:set_env(lager, error_logger_format_raw, true);
_ -> ok
end,
%% Setting env var to 'undefined' is different from not
%% setting it at all, and lager is sensitive to this
%% difference.
case application:get_env(rabbit, lager_log_root) of
{ok, Value} ->
ok = application:set_env(lager, log_root, Value);
_ ->
ok
end,
case application:get_env(lager, colored) of
undefined ->
UseColor = rabbit_prelaunch_early_logging:use_colored_logging(),
application:set_env(lager, colored, UseColor);
_ ->
ok
end,
%% Set rabbit.log config variable based on environment.
prepare_rabbit_log_config(),
%% Configure syslog library.
ok = configure_syslog_error_logger(),
%% At this point we should have rabbit.log application variable
%% configured to generate RabbitMQ log handlers.
GeneratedHandlers = generate_lager_handlers(),
%% If there are lager handlers configured,
%% both lager and generate RabbitMQ handlers are used.
%% This is because it's hard to decide clear preference rules.
%% RabbitMQ handlers can be set to [] to use only lager handlers.
Handlers = case application:get_env(lager, handlers, undefined) of
undefined -> GeneratedHandlers;
[] -> GeneratedHandlers;
LagerHandlers ->
%% Remove handlers generated in previous starts
FormerRabbitHandlers = application:get_env(lager, rabbit_handlers, []),
GeneratedHandlers ++ remove_rabbit_handlers(LagerHandlers,
FormerRabbitHandlers)
end,
ok = application:set_env(lager, handlers, Handlers),
ok = application:set_env(lager, rabbit_handlers, GeneratedHandlers),
%% Setup extra sink/handlers. If they are not configured, redirect
%% messages to the default sink. To know the list of expected extra
%% sinks, we look at the 'lager_extra_sinks' compilation option.
LogConfig = application:get_env(rabbit, log, []),
LogLevels = application:get_env(rabbit, log_levels, []),
Categories = proplists:get_value(categories, LogConfig, []),
CategoriesConfig0 = case {Categories, LogLevels} of
{[], []} -> [];
{[], LogLevels} ->
io:format("Using deprecated config parameter 'log_levels'. "
"Please update your configuration file according to "
"https://rabbitmq.com/logging.html"),
lists:map(fun({Name, Level}) -> {Name, [{level, Level}]} end,
LogLevels);
{Categories, []} ->
Categories;
{Categories, _} ->
io:format("Using the deprecated config parameter 'rabbit.log_levels' together "
"with a new parameter for log categories."
" 'rabbit.log_levels' will be ignored. Please remove it from the config. More at "
"https://rabbitmq.com/logging.html"),
Categories
end,
LogLevelsFromContext = case rabbit_prelaunch:get_context() of
#{log_levels := LL} -> LL;
_ -> undefined
end,
Fun = fun
(global, _, CC) ->
CC;
(color, _, CC) ->
CC;
(CategoryS, LogLevel, CC) ->
Category = list_to_atom(CategoryS),
CCEntry = proplists:get_value(
Category, CC, []),
CCEntry1 = lists:ukeymerge(
1,
[{level, LogLevel}],
lists:ukeysort(1, CCEntry)),
lists:keystore(
Category, 1, CC, {Category, CCEntry1})
end,
CategoriesConfig = case LogLevelsFromContext of
undefined ->
CategoriesConfig0;
_ ->
maps:fold(Fun,
CategoriesConfig0,
LogLevelsFromContext)
end,
SinkConfigs = lists:map(
fun({Name, Config}) ->
{rabbit_log:make_internal_sink_name(Name), Config}
end,
CategoriesConfig),
LagerSinks = application:get_env(lager, extra_sinks, []),
GeneratedSinks = generate_lager_sinks(
[error_logger_lager_event | list_expected_sinks()],
SinkConfigs),
Sinks = merge_lager_sink_handlers(LagerSinks, GeneratedSinks, []),
ok = application:set_env(lager, extra_sinks, Sinks),
case application:get_env(lager, error_logger_hwm) of
undefined ->
ok = application:set_env(lager, error_logger_hwm, 1000),
% NB: 50 is the default value in lager.app.src
ok = application:set_env(lager, error_logger_hwm_original, 50);
{ok, Val} when is_integer(Val) andalso Val < 1000 ->
ok = application:set_env(lager, error_logger_hwm, 1000),
ok = application:set_env(lager, error_logger_hwm_original, Val);
{ok, Val} when is_integer(Val) ->
ok = application:set_env(lager, error_logger_hwm_original, Val),
ok
end,
ok.
configure_syslog_error_logger() ->
%% Disable error_logger forwarding to syslog if it's not configured
case application:get_env(syslog, syslog_error_logger) of
undefined ->
application:set_env(syslog, syslog_error_logger, false);
_ -> ok
end.
remove_rabbit_handlers(Handlers, FormerHandlers) ->
lists:filter(fun(Handler) ->
not lists:member(Handler, FormerHandlers)
end,
Handlers).
generate_lager_handlers() ->
LogConfig = application:get_env(rabbit, log, []),
LogHandlersConfig = lists:keydelete(categories, 1, LogConfig),
generate_lager_handlers(LogHandlersConfig).
generate_lager_handlers(LogHandlersConfig) ->
lists:flatmap(
fun
({file, HandlerConfig}) ->
case proplists:get_value(file, HandlerConfig, false) of
false -> [];
FileName when is_list(FileName) ->
Backend = lager_backend(file),
generate_handler(Backend, HandlerConfig)
end;
({Other, HandlerConfig}) when
Other =:= console; Other =:= syslog; Other =:= exchange ->
case proplists:get_value(enabled, HandlerConfig, false) of
false -> [];
true ->
Backend = lager_backend(Other),
generate_handler(Backend,
lists:keydelete(enabled, 1, HandlerConfig))
end
end,
LogHandlersConfig).
lager_backend(file) -> lager_file_backend;
lager_backend(console) -> lager_console_backend;
lager_backend(syslog) -> syslog_lager_backend;
lager_backend(exchange) -> lager_exchange_backend.
%% Syslog backend is using an old API for configuration and
%% does not support proplists.
generate_handler(syslog_lager_backend=Backend, HandlerConfig) ->
%% The default log level is set to `debug` because the actual
%% filtering is made at the sink level. We want to accept all
%% messages here.
DefaultConfigVal = debug,
Level = proplists:get_value(level, HandlerConfig, DefaultConfigVal),
ok = configure_handler_backend(Backend),
[{Backend,
[Level,
{},
{lager_default_formatter, syslog_formatter_config()}]}];
generate_handler(Backend, HandlerConfig) ->
[{Backend,
lists:ukeymerge(1, lists:ukeysort(1, HandlerConfig),
lists:ukeysort(1, default_handler_config(Backend)))}].
configure_handler_backend(syslog_lager_backend) ->
{ok, _} = application:ensure_all_started(syslog),
ok;
configure_handler_backend(_Backend) ->
ok.
default_handler_config(lager_console_backend) ->
%% The default log level is set to `debug` because the actual
%% filtering is made at the sink level. We want to accept all
%% messages here.
DefaultConfigVal = debug,
[{level, DefaultConfigVal},
{formatter_config, default_config_value({formatter_config, console})}];
default_handler_config(lager_exchange_backend) ->
%% The default log level is set to `debug` because the actual
%% filtering is made at the sink level. We want to accept all
%% messages here.
DefaultConfigVal = debug,
[{level, DefaultConfigVal},
{formatter_config, default_config_value({formatter_config, exchange})}];
default_handler_config(lager_file_backend) ->
%% The default log level is set to `debug` because the actual
%% filtering is made at the sink level. We want to accept all
%% messages here.
DefaultConfigVal = debug,
[{level, DefaultConfigVal},
{formatter_config, default_config_value({formatter_config, file})},
{date, ""},
{size, 0}].
default_config_value(level) ->
LogConfig = application:get_env(rabbit, log, []),
FoldFun = fun
({_, Cfg}, LL) when is_list(Cfg) ->
NewLL = proplists:get_value(level, Cfg, LL),
case LL of
undefined ->
NewLL;
_ ->
MoreVerbose = lager_util:level_to_num(NewLL) > lager_util:level_to_num(LL),
case MoreVerbose of
true -> NewLL;
false -> LL
end
end;
(_, LL) ->
LL
end,
FoundLL = lists:foldl(FoldFun, undefined, LogConfig),
case FoundLL of
undefined -> info;
_ -> FoundLL
end;
default_config_value({formatter_config, console}) ->
EOL = case application:get_env(lager, colored) of
{ok, true} -> "\e[0m\r\n";
_ -> "\r\n"
end,
[date, " ", time, " ", color, "[", severity, "] ",
{pid, ""},
" ", message, EOL];
default_config_value({formatter_config, _}) ->
[date, " ", time, " ", color, "[", severity, "] ",
{pid, ""},
" ", message, "\n"].
syslog_formatter_config() ->
[color, "[", severity, "] ",
{pid, ""},
" ", message, "\n"].
prepare_rabbit_log_config() ->
%% If RABBIT_LOGS is not set, we should ignore it.
DefaultFile = application:get_env(rabbit, lager_default_file, undefined),
%% If RABBIT_UPGRADE_LOGS is not set, we should ignore it.
UpgradeFile = application:get_env(rabbit, lager_upgrade_file, undefined),
case DefaultFile of
undefined -> ok;
false ->
set_env_default_log_disabled();
tty ->
set_env_default_log_console();
FileName when is_list(FileName) ->
case rabbit_prelaunch:get_context() of
%% The user explicitly sets $RABBITMQ_LOGS;
%% we should override a file location even
%% if it's set in rabbitmq.config
#{var_origins := #{main_log_file := environment}} ->
set_env_default_log_file(FileName, override);
_ ->
set_env_default_log_file(FileName, keep)
end
end,
%% Upgrade log file never overrides the value set in rabbitmq.config
case UpgradeFile of
%% No special env for upgrade logs - redirect to the default sink
undefined -> ok;
%% Redirect logs to default output.
DefaultFile -> ok;
UpgradeFileName when is_list(UpgradeFileName) ->
set_env_upgrade_log_file(UpgradeFileName)
end.
set_env_default_log_disabled() ->
%% Disabling all the logs.
ok = application:set_env(rabbit, log, []).
set_env_default_log_console() ->
LogConfig = application:get_env(rabbit, log, []),
ConsoleConfig = proplists:get_value(console, LogConfig, []),
LogConfigConsole =
lists:keystore(console, 1, LogConfig,
{console, lists:keystore(enabled, 1, ConsoleConfig,
{enabled, true})}),
%% Remove the file handler - disable logging to file
LogConfigConsoleNoFile = lists:keydelete(file, 1, LogConfigConsole),
ok = application:set_env(rabbit, log, LogConfigConsoleNoFile).
set_env_default_log_file(FileName, Override) ->
LogConfig = application:get_env(rabbit, log, []),
FileConfig = proplists:get_value(file, LogConfig, []),
NewLogConfig = case proplists:get_value(file, FileConfig, undefined) of
undefined ->
lists:keystore(file, 1, LogConfig,
{file, lists:keystore(file, 1, FileConfig,
{file, FileName})});
_ConfiguredFileName ->
case Override of
override ->
lists:keystore(
file, 1, LogConfig,
{file, lists:keystore(file, 1, FileConfig,
{file, FileName})});
keep ->
LogConfig
end
end,
ok = application:set_env(rabbit, log, NewLogConfig).
set_env_upgrade_log_file(FileName) ->
LogConfig = application:get_env(rabbit, log, []),
SinksConfig = proplists:get_value(categories, LogConfig, []),
UpgradeSinkConfig = proplists:get_value(upgrade, SinksConfig, []),
FileConfig = proplists:get_value(file, SinksConfig, []),
NewLogConfig = case proplists:get_value(file, FileConfig, undefined) of
undefined ->
lists:keystore(
categories, 1, LogConfig,
{categories,
lists:keystore(
upgrade, 1, SinksConfig,
{upgrade,
lists:keystore(file, 1, UpgradeSinkConfig,
{file, FileName})})});
%% No cahnge. We don't want to override the configured value.
_File -> LogConfig
end,
ok = application:set_env(rabbit, log, NewLogConfig).
generate_lager_sinks(SinkNames, SinkConfigs) ->
LogLevels = case rabbit_prelaunch:get_context() of
#{log_levels := LL} -> LL;
_ -> undefined
end,
DefaultLogLevel = case LogLevels of
#{global := LogLevel} ->
LogLevel;
_ ->
default_config_value(level)
end,
lists:map(fun(SinkName) ->
SinkConfig = proplists:get_value(SinkName, SinkConfigs, []),
SinkHandlers = case proplists:get_value(file, SinkConfig, false) of
%% If no file defined - forward everything to the default backend
false ->
ForwarderLevel = proplists:get_value(level,
SinkConfig,
DefaultLogLevel),
[{lager_forwarder_backend,
[lager_util:make_internal_sink_name(lager), ForwarderLevel]}];
%% If a file defined - add a file backend to handlers and remove all default file backends.
File ->
%% Use `debug` as a default handler to not override a handler level
Level = proplists:get_value(level, SinkConfig, DefaultLogLevel),
DefaultGeneratedHandlers = application:get_env(lager, rabbit_handlers, []),
SinkFileHandlers = case proplists:get_value(lager_file_backend, DefaultGeneratedHandlers, undefined) of
undefined ->
%% Create a new file handler.
%% `info` is a default level here.
FileLevel = proplists:get_value(level, SinkConfig, DefaultLogLevel),
generate_lager_handlers([{file, [{file, File}, {level, FileLevel}]}]);
FileHandler ->
%% Replace a filename in the handler
FileHandlerChanges = case handler_level_more_verbose(FileHandler, Level) of
true -> [{file, File}, {level, Level}];
false -> [{file, File}]
end,
[{lager_file_backend,
lists:ukeymerge(1, FileHandlerChanges,
lists:ukeysort(1, FileHandler))}]
end,
%% Remove all file handlers.
AllLagerHandlers = application:get_env(lager, handlers, []),
HandlersWithoutFile = lists:filter(
fun({lager_file_backend, _}) -> false;
({_, _}) -> true
end,
AllLagerHandlers),
%% Set level for handlers which are more verbose.
%% We don't increase verbosity in sinks so it works like forwarder backend.
HandlersWithoutFileWithLevel = lists:map(fun({Name, Handler}) ->
case handler_level_more_verbose(Handler, Level) of
true -> {Name, lists:keystore(level, 1, Handler, {level, Level})};
false -> {Name, Handler}
end
end,
HandlersWithoutFile),
HandlersWithoutFileWithLevel ++ SinkFileHandlers
end,
{SinkName, [{handlers, SinkHandlers}, {rabbit_handlers, SinkHandlers}]}
end,
SinkNames).
handler_level_more_verbose(Handler, Level) ->
HandlerLevel = proplists:get_value(level, Handler, default_config_value(level)),
lager_util:level_to_num(HandlerLevel) > lager_util:level_to_num(Level).
merge_lager_sink_handlers([{Name, Sink} | RestSinks], GeneratedSinks, Agg) ->
%% rabbitmq/rabbitmq-server#2044.
%% We have to take into account that a sink's
%% handler backend may need additional configuration here.
%% {rabbit_log_federation_lager_event, [
%% {handlers, [
%% {lager_forwarder_backend, [lager_event,inherit]},
%% {syslog_lager_backend, [debug]}
%% ]},
%% {rabbit_handlers, [
%% {lager_forwarder_backend, [lager_event,inherit]}
%% ]}
%% ]}
case lists:keytake(Name, 1, GeneratedSinks) of
{value, {Name, GenSink}, RestGeneratedSinks} ->
Handlers = proplists:get_value(handlers, Sink, []),
GenHandlers = proplists:get_value(handlers, GenSink, []),
FormerRabbitHandlers = proplists:get_value(rabbit_handlers, Sink, []),
%% Remove handlers defined in previous starts
ConfiguredHandlers = remove_rabbit_handlers(Handlers, FormerRabbitHandlers),
NewHandlers = GenHandlers ++ ConfiguredHandlers,
ok = maybe_configure_handler_backends(NewHandlers),
MergedSink = lists:keystore(rabbit_handlers, 1,
lists:keystore(handlers, 1, Sink,
{handlers, NewHandlers}),
{rabbit_handlers, GenHandlers}),
merge_lager_sink_handlers(
RestSinks,
RestGeneratedSinks,
[{Name, MergedSink} | Agg]);
false ->
merge_lager_sink_handlers(
RestSinks,
GeneratedSinks,
[{Name, Sink} | Agg])
end;
merge_lager_sink_handlers([], GeneratedSinks, Agg) -> GeneratedSinks ++ Agg.
maybe_configure_handler_backends([]) ->
ok;
maybe_configure_handler_backends([{Backend, _}|Backends]) ->
ok = configure_handler_backend(Backend),
maybe_configure_handler_backends(Backends).
list_expected_sinks() ->
rabbit_prelaunch_early_logging:list_expected_sinks().
maybe_remove_logger_handler() ->
M = logger,
F = remove_handler,
try
ok = erlang:apply(M, F, [default])
catch
error:undef ->
% OK since the logger module only exists in OTP 21.1 or later
ok;
error:{badmatch, {error, {not_found, default}}} ->
% OK - this error happens when running a CLI command
ok;
Err:Reason ->
error_logger:error_msg("calling ~p:~p failed: ~p:~p~n",
[M, F, Err, Reason])
end.
get_most_verbose_log_level() ->
{ok, HandlersA} = application:get_env(lager, handlers),
{ok, ExtraSinks} = application:get_env(lager, extra_sinks),
HandlersB = lists:append(
[H || {_, Keys} <- ExtraSinks,
{handlers, H} <- Keys]),
get_most_verbose_log_level(HandlersA ++ HandlersB,
lager_util:level_to_num(none)).
get_most_verbose_log_level([{_, Props} | Rest], MostVerbose) ->
LogLevel = proplists:get_value(level, Props, info),
LogLevelNum = lager_util:level_to_num(LogLevel),
case LogLevelNum > MostVerbose of
true ->
get_most_verbose_log_level(Rest, LogLevelNum);
false ->
get_most_verbose_log_level(Rest, MostVerbose)
end;
get_most_verbose_log_level([], MostVerbose) ->
lager_util:num_to_level(MostVerbose).

120
deps/rabbit/src/rabbit_log_channel.erl vendored Normal file
View File

@ -0,0 +1,120 @@
%% This Source Code Form is subject to the terms of the Mozilla Public
%% License, v. 2.0. If a copy of the MPL was not distributed with this
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
%%
%% Copyright (c) 2021 VMware, Inc. or its affiliates. All rights reserved.
%%
%% @doc Compatibility module for the old Lager-based logging API.
-module(rabbit_log_channel).
-export([debug/1, debug/2, debug/3,
info/1, info/2, info/3,
notice/1, notice/2, notice/3,
warning/1, warning/2, warning/3,
error/1, error/2, error/3,
critical/1, critical/2, critical/3,
alert/1, alert/2, alert/3,
emergency/1, emergency/2, emergency/3,
none/1, none/2, none/3]).
-include_lib("rabbit_common/include/logging.hrl").
-compile({no_auto_import, [error/2, error/3]}).
-spec debug(string()) -> 'ok'.
debug(Format) -> debug(Format, []).
-spec debug(string(), [any()]) -> 'ok'.
debug(Format, Args) -> debug(self(), Format, Args).
-spec debug(pid() | [tuple()], string(), [any()]) -> 'ok'.
debug(Pid, Format, Args) ->
logger:debug(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_CHAN}).
-spec info(string()) -> 'ok'.
info(Format) -> info(Format, []).
-spec info(string(), [any()]) -> 'ok'.
info(Format, Args) -> info(self(), Format, Args).
-spec info(pid() | [tuple()], string(), [any()]) -> 'ok'.
info(Pid, Format, Args) ->
logger:info(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_CHAN}).
-spec notice(string()) -> 'ok'.
notice(Format) -> notice(Format, []).
-spec notice(string(), [any()]) -> 'ok'.
notice(Format, Args) -> notice(self(), Format, Args).
-spec notice(pid() | [tuple()], string(), [any()]) -> 'ok'.
notice(Pid, Format, Args) ->
logger:notice(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_CHAN}).
-spec warning(string()) -> 'ok'.
warning(Format) -> warning(Format, []).
-spec warning(string(), [any()]) -> 'ok'.
warning(Format, Args) -> warning(self(), Format, Args).
-spec warning(pid() | [tuple()], string(), [any()]) -> 'ok'.
warning(Pid, Format, Args) ->
logger:warning(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_CHAN}).
-spec error(string()) -> 'ok'.
error(Format) -> error(Format, []).
-spec error(string(), [any()]) -> 'ok'.
error(Format, Args) -> error(self(), Format, Args).
-spec error(pid() | [tuple()], string(), [any()]) -> 'ok'.
error(Pid, Format, Args) ->
logger:error(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_CHAN}).
-spec critical(string()) -> 'ok'.
critical(Format) -> critical(Format, []).
-spec critical(string(), [any()]) -> 'ok'.
critical(Format, Args) -> critical(self(), Format, Args).
-spec critical(pid() | [tuple()], string(), [any()]) -> 'ok'.
critical(Pid, Format, Args) ->
logger:critical(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_CHAN}).
-spec alert(string()) -> 'ok'.
alert(Format) -> alert(Format, []).
-spec alert(string(), [any()]) -> 'ok'.
alert(Format, Args) -> alert(self(), Format, Args).
-spec alert(pid() | [tuple()], string(), [any()]) -> 'ok'.
alert(Pid, Format, Args) ->
logger:alert(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_CHAN}).
-spec emergency(string()) -> 'ok'.
emergency(Format) -> emergency(Format, []).
-spec emergency(string(), [any()]) -> 'ok'.
emergency(Format, Args) -> emergency(self(), Format, Args).
-spec emergency(pid() | [tuple()], string(), [any()]) -> 'ok'.
emergency(Pid, Format, Args) ->
logger:emergency(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_CHAN}).
-spec none(string()) -> 'ok'.
none(_Format) -> ok.
-spec none(string(), [any()]) -> 'ok'.
none(_Format, _Args) -> ok.
-spec none(pid() | [tuple()], string(), [any()]) -> 'ok'.
none(_Pid, _Format, _Args) -> ok.

View File

@ -0,0 +1,120 @@
%% This Source Code Form is subject to the terms of the Mozilla Public
%% License, v. 2.0. If a copy of the MPL was not distributed with this
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
%%
%% Copyright (c) 2021 VMware, Inc. or its affiliates. All rights reserved.
%%
%% @doc Compatibility module for the old Lager-based logging API.
-module(rabbit_log_connection).
-export([debug/1, debug/2, debug/3,
info/1, info/2, info/3,
notice/1, notice/2, notice/3,
warning/1, warning/2, warning/3,
error/1, error/2, error/3,
critical/1, critical/2, critical/3,
alert/1, alert/2, alert/3,
emergency/1, emergency/2, emergency/3,
none/1, none/2, none/3]).
-include_lib("rabbit_common/include/logging.hrl").
-compile({no_auto_import, [error/2, error/3]}).
-spec debug(string()) -> 'ok'.
debug(Format) -> debug(Format, []).
-spec debug(string(), [any()]) -> 'ok'.
debug(Format, Args) -> debug(self(), Format, Args).
-spec debug(pid() | [tuple()], string(), [any()]) -> 'ok'.
debug(Pid, Format, Args) ->
logger:debug(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_CONN}).
-spec info(string()) -> 'ok'.
info(Format) -> info(Format, []).
-spec info(string(), [any()]) -> 'ok'.
info(Format, Args) -> info(self(), Format, Args).
-spec info(pid() | [tuple()], string(), [any()]) -> 'ok'.
info(Pid, Format, Args) ->
logger:info(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_CONN}).
-spec notice(string()) -> 'ok'.
notice(Format) -> notice(Format, []).
-spec notice(string(), [any()]) -> 'ok'.
notice(Format, Args) -> notice(self(), Format, Args).
-spec notice(pid() | [tuple()], string(), [any()]) -> 'ok'.
notice(Pid, Format, Args) ->
logger:notice(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_CONN}).
-spec warning(string()) -> 'ok'.
warning(Format) -> warning(Format, []).
-spec warning(string(), [any()]) -> 'ok'.
warning(Format, Args) -> warning(self(), Format, Args).
-spec warning(pid() | [tuple()], string(), [any()]) -> 'ok'.
warning(Pid, Format, Args) ->
logger:warning(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_CONN}).
-spec error(string()) -> 'ok'.
error(Format) -> error(Format, []).
-spec error(string(), [any()]) -> 'ok'.
error(Format, Args) -> error(self(), Format, Args).
-spec error(pid() | [tuple()], string(), [any()]) -> 'ok'.
error(Pid, Format, Args) ->
logger:error(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_CONN}).
-spec critical(string()) -> 'ok'.
critical(Format) -> critical(Format, []).
-spec critical(string(), [any()]) -> 'ok'.
critical(Format, Args) -> critical(self(), Format, Args).
-spec critical(pid() | [tuple()], string(), [any()]) -> 'ok'.
critical(Pid, Format, Args) ->
logger:critical(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_CONN}).
-spec alert(string()) -> 'ok'.
alert(Format) -> alert(Format, []).
-spec alert(string(), [any()]) -> 'ok'.
alert(Format, Args) -> alert(self(), Format, Args).
-spec alert(pid() | [tuple()], string(), [any()]) -> 'ok'.
alert(Pid, Format, Args) ->
logger:alert(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_CONN}).
-spec emergency(string()) -> 'ok'.
emergency(Format) -> emergency(Format, []).
-spec emergency(string(), [any()]) -> 'ok'.
emergency(Format, Args) -> emergency(self(), Format, Args).
-spec emergency(pid() | [tuple()], string(), [any()]) -> 'ok'.
emergency(Pid, Format, Args) ->
logger:emergency(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_CONN}).
-spec none(string()) -> 'ok'.
none(_Format) -> ok.
-spec none(string(), [any()]) -> 'ok'.
none(_Format, _Args) -> ok.
-spec none(pid() | [tuple()], string(), [any()]) -> 'ok'.
none(_Pid, _Format, _Args) -> ok.

View File

@ -0,0 +1,120 @@
%% This Source Code Form is subject to the terms of the Mozilla Public
%% License, v. 2.0. If a copy of the MPL was not distributed with this
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
%%
%% Copyright (c) 2021 VMware, Inc. or its affiliates. All rights reserved.
%%
%% @doc Compatibility module for the old Lager-based logging API.
-module(rabbit_log_feature_flags).
-export([debug/1, debug/2, debug/3,
info/1, info/2, info/3,
notice/1, notice/2, notice/3,
warning/1, warning/2, warning/3,
error/1, error/2, error/3,
critical/1, critical/2, critical/3,
alert/1, alert/2, alert/3,
emergency/1, emergency/2, emergency/3,
none/1, none/2, none/3]).
-include_lib("rabbit_common/include/logging.hrl").
-compile({no_auto_import, [error/2, error/3]}).
-spec debug(string()) -> 'ok'.
debug(Format) -> debug(Format, []).
-spec debug(string(), [any()]) -> 'ok'.
debug(Format, Args) -> debug(self(), Format, Args).
-spec debug(pid() | [tuple()], string(), [any()]) -> 'ok'.
debug(Pid, Format, Args) ->
logger:debug(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}).
-spec info(string()) -> 'ok'.
info(Format) -> info(Format, []).
-spec info(string(), [any()]) -> 'ok'.
info(Format, Args) -> info(self(), Format, Args).
-spec info(pid() | [tuple()], string(), [any()]) -> 'ok'.
info(Pid, Format, Args) ->
logger:info(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}).
-spec notice(string()) -> 'ok'.
notice(Format) -> notice(Format, []).
-spec notice(string(), [any()]) -> 'ok'.
notice(Format, Args) -> notice(self(), Format, Args).
-spec notice(pid() | [tuple()], string(), [any()]) -> 'ok'.
notice(Pid, Format, Args) ->
logger:notice(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}).
-spec warning(string()) -> 'ok'.
warning(Format) -> warning(Format, []).
-spec warning(string(), [any()]) -> 'ok'.
warning(Format, Args) -> warning(self(), Format, Args).
-spec warning(pid() | [tuple()], string(), [any()]) -> 'ok'.
warning(Pid, Format, Args) ->
logger:warning(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}).
-spec error(string()) -> 'ok'.
error(Format) -> error(Format, []).
-spec error(string(), [any()]) -> 'ok'.
error(Format, Args) -> error(self(), Format, Args).
-spec error(pid() | [tuple()], string(), [any()]) -> 'ok'.
error(Pid, Format, Args) ->
logger:error(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}).
-spec critical(string()) -> 'ok'.
critical(Format) -> critical(Format, []).
-spec critical(string(), [any()]) -> 'ok'.
critical(Format, Args) -> critical(self(), Format, Args).
-spec critical(pid() | [tuple()], string(), [any()]) -> 'ok'.
critical(Pid, Format, Args) ->
logger:critical(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}).
-spec alert(string()) -> 'ok'.
alert(Format) -> alert(Format, []).
-spec alert(string(), [any()]) -> 'ok'.
alert(Format, Args) -> alert(self(), Format, Args).
-spec alert(pid() | [tuple()], string(), [any()]) -> 'ok'.
alert(Pid, Format, Args) ->
logger:alert(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}).
-spec emergency(string()) -> 'ok'.
emergency(Format) -> emergency(Format, []).
-spec emergency(string(), [any()]) -> 'ok'.
emergency(Format, Args) -> emergency(self(), Format, Args).
-spec emergency(pid() | [tuple()], string(), [any()]) -> 'ok'.
emergency(Pid, Format, Args) ->
logger:emergency(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}).
-spec none(string()) -> 'ok'.
none(_Format) -> ok.
-spec none(string(), [any()]) -> 'ok'.
none(_Format, _Args) -> ok.
-spec none(pid() | [tuple()], string(), [any()]) -> 'ok'.
none(_Pid, _Format, _Args) -> ok.

122
deps/rabbit/src/rabbit_log_mirroring.erl vendored Normal file
View File

@ -0,0 +1,122 @@
%% This Source Code Form is subject to the terms of the Mozilla Public
%% License, v. 2.0. If a copy of the MPL was not distributed with this
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
%%
%% Copyright (c) 2021 VMware, Inc. or its affiliates. All rights reserved.
%%
%% @doc Compatibility module for the old Lager-based logging API.
-module(rabbit_log_mirroring).
-export([debug/1, debug/2, debug/3,
info/1, info/2, info/3,
notice/1, notice/2, notice/3,
warning/1, warning/2, warning/3,
error/1, error/2, error/3,
critical/1, critical/2, critical/3,
alert/1, alert/2, alert/3,
emergency/1, emergency/2, emergency/3,
none/1, none/2, none/3]).
-include_lib("rabbit_common/include/logging.hrl").
-compile({no_auto_import, [error/2, error/3]}).
%%----------------------------------------------------------------------------
-spec debug(string()) -> 'ok'.
debug(Format) -> debug(Format, []).
-spec debug(string(), [any()]) -> 'ok'.
debug(Format, Args) -> debug(self(), Format, Args).
-spec debug(pid() | [tuple()], string(), [any()]) -> 'ok'.
debug(Pid, Format, Args) ->
logger:debug(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_MIRRORING}).
-spec info(string()) -> 'ok'.
info(Format) -> info(Format, []).
-spec info(string(), [any()]) -> 'ok'.
info(Format, Args) -> info(self(), Format, Args).
-spec info(pid() | [tuple()], string(), [any()]) -> 'ok'.
info(Pid, Format, Args) ->
logger:info(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_MIRRORING}).
-spec notice(string()) -> 'ok'.
notice(Format) -> notice(Format, []).
-spec notice(string(), [any()]) -> 'ok'.
notice(Format, Args) -> notice(self(), Format, Args).
-spec notice(pid() | [tuple()], string(), [any()]) -> 'ok'.
notice(Pid, Format, Args) ->
logger:notice(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_MIRRORING}).
-spec warning(string()) -> 'ok'.
warning(Format) -> warning(Format, []).
-spec warning(string(), [any()]) -> 'ok'.
warning(Format, Args) -> warning(self(), Format, Args).
-spec warning(pid() | [tuple()], string(), [any()]) -> 'ok'.
warning(Pid, Format, Args) ->
logger:warning(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_MIRRORING}).
-spec error(string()) -> 'ok'.
error(Format) -> error(Format, []).
-spec error(string(), [any()]) -> 'ok'.
error(Format, Args) -> error(self(), Format, Args).
-spec error(pid() | [tuple()], string(), [any()]) -> 'ok'.
error(Pid, Format, Args) ->
logger:error(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_MIRRORING}).
-spec critical(string()) -> 'ok'.
critical(Format) -> critical(Format, []).
-spec critical(string(), [any()]) -> 'ok'.
critical(Format, Args) -> critical(self(), Format, Args).
-spec critical(pid() | [tuple()], string(), [any()]) -> 'ok'.
critical(Pid, Format, Args) ->
logger:critical(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_MIRRORING}).
-spec alert(string()) -> 'ok'.
alert(Format) -> alert(Format, []).
-spec alert(string(), [any()]) -> 'ok'.
alert(Format, Args) -> alert(self(), Format, Args).
-spec alert(pid() | [tuple()], string(), [any()]) -> 'ok'.
alert(Pid, Format, Args) ->
logger:alert(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_MIRRORING}).
-spec emergency(string()) -> 'ok'.
emergency(Format) -> emergency(Format, []).
-spec emergency(string(), [any()]) -> 'ok'.
emergency(Format, Args) -> emergency(self(), Format, Args).
-spec emergency(pid() | [tuple()], string(), [any()]) -> 'ok'.
emergency(Pid, Format, Args) ->
logger:emergency(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_MIRRORING}).
-spec none(string()) -> 'ok'.
none(_Format) -> ok.
-spec none(string(), [any()]) -> 'ok'.
none(_Format, _Args) -> ok.
-spec none(pid() | [tuple()], string(), [any()]) -> 'ok'.
none(_Pid, _Format, _Args) -> ok.

120
deps/rabbit/src/rabbit_log_prelaunch.erl vendored Normal file
View File

@ -0,0 +1,120 @@
%% This Source Code Form is subject to the terms of the Mozilla Public
%% License, v. 2.0. If a copy of the MPL was not distributed with this
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
%%
%% Copyright (c) 2021 VMware, Inc. or its affiliates. All rights reserved.
%%
%% @doc Compatibility module for the old Lager-based logging API.
-module(rabbit_log_prelaunch).
-export([debug/1, debug/2, debug/3,
info/1, info/2, info/3,
notice/1, notice/2, notice/3,
warning/1, warning/2, warning/3,
error/1, error/2, error/3,
critical/1, critical/2, critical/3,
alert/1, alert/2, alert/3,
emergency/1, emergency/2, emergency/3,
none/1, none/2, none/3]).
-include_lib("rabbit_common/include/logging.hrl").
-compile({no_auto_import, [error/2, error/3]}).
-spec debug(string()) -> 'ok'.
debug(Format) -> debug(Format, []).
-spec debug(string(), [any()]) -> 'ok'.
debug(Format, Args) -> debug(self(), Format, Args).
-spec debug(pid() | [tuple()], string(), [any()]) -> 'ok'.
debug(Pid, Format, Args) ->
logger:debug(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_PRELAUNCH}).
-spec info(string()) -> 'ok'.
info(Format) -> info(Format, []).
-spec info(string(), [any()]) -> 'ok'.
info(Format, Args) -> info(self(), Format, Args).
-spec info(pid() | [tuple()], string(), [any()]) -> 'ok'.
info(Pid, Format, Args) ->
logger:info(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_PRELAUNCH}).
-spec notice(string()) -> 'ok'.
notice(Format) -> notice(Format, []).
-spec notice(string(), [any()]) -> 'ok'.
notice(Format, Args) -> notice(self(), Format, Args).
-spec notice(pid() | [tuple()], string(), [any()]) -> 'ok'.
notice(Pid, Format, Args) ->
logger:notice(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_PRELAUNCH}).
-spec warning(string()) -> 'ok'.
warning(Format) -> warning(Format, []).
-spec warning(string(), [any()]) -> 'ok'.
warning(Format, Args) -> warning(self(), Format, Args).
-spec warning(pid() | [tuple()], string(), [any()]) -> 'ok'.
warning(Pid, Format, Args) ->
logger:warning(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_PRELAUNCH}).
-spec error(string()) -> 'ok'.
error(Format) -> error(Format, []).
-spec error(string(), [any()]) -> 'ok'.
error(Format, Args) -> error(self(), Format, Args).
-spec error(pid() | [tuple()], string(), [any()]) -> 'ok'.
error(Pid, Format, Args) ->
logger:error(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_PRELAUNCH}).
-spec critical(string()) -> 'ok'.
critical(Format) -> critical(Format, []).
-spec critical(string(), [any()]) -> 'ok'.
critical(Format, Args) -> critical(self(), Format, Args).
-spec critical(pid() | [tuple()], string(), [any()]) -> 'ok'.
critical(Pid, Format, Args) ->
logger:critical(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_PRELAUNCH}).
-spec alert(string()) -> 'ok'.
alert(Format) -> alert(Format, []).
-spec alert(string(), [any()]) -> 'ok'.
alert(Format, Args) -> alert(self(), Format, Args).
-spec alert(pid() | [tuple()], string(), [any()]) -> 'ok'.
alert(Pid, Format, Args) ->
logger:alert(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_PRELAUNCH}).
-spec emergency(string()) -> 'ok'.
emergency(Format) -> emergency(Format, []).
-spec emergency(string(), [any()]) -> 'ok'.
emergency(Format, Args) -> emergency(self(), Format, Args).
-spec emergency(pid() | [tuple()], string(), [any()]) -> 'ok'.
emergency(Pid, Format, Args) ->
logger:emergency(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_PRELAUNCH}).
-spec none(string()) -> 'ok'.
none(_Format) -> ok.
-spec none(string(), [any()]) -> 'ok'.
none(_Format, _Args) -> ok.
-spec none(pid() | [tuple()], string(), [any()]) -> 'ok'.
none(_Pid, _Format, _Args) -> ok.

120
deps/rabbit/src/rabbit_log_queue.erl vendored Normal file
View File

@ -0,0 +1,120 @@
%% This Source Code Form is subject to the terms of the Mozilla Public
%% License, v. 2.0. If a copy of the MPL was not distributed with this
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
%%
%% Copyright (c) 2021 VMware, Inc. or its affiliates. All rights reserved.
%%
%% @doc Compatibility module for the old Lager-based logging API.
-module(rabbit_log_queue).
-export([debug/1, debug/2, debug/3,
info/1, info/2, info/3,
notice/1, notice/2, notice/3,
warning/1, warning/2, warning/3,
error/1, error/2, error/3,
critical/1, critical/2, critical/3,
alert/1, alert/2, alert/3,
emergency/1, emergency/2, emergency/3,
none/1, none/2, none/3]).
-include_lib("rabbit_common/include/logging.hrl").
-compile({no_auto_import, [error/2, error/3]}).
-spec debug(string()) -> 'ok'.
debug(Format) -> debug(Format, []).
-spec debug(string(), [any()]) -> 'ok'.
debug(Format, Args) -> debug(self(), Format, Args).
-spec debug(pid() | [tuple()], string(), [any()]) -> 'ok'.
debug(Pid, Format, Args) ->
logger:debug(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_QUEUE}).
-spec info(string()) -> 'ok'.
info(Format) -> info(Format, []).
-spec info(string(), [any()]) -> 'ok'.
info(Format, Args) -> info(self(), Format, Args).
-spec info(pid() | [tuple()], string(), [any()]) -> 'ok'.
info(Pid, Format, Args) ->
logger:info(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_QUEUE}).
-spec notice(string()) -> 'ok'.
notice(Format) -> notice(Format, []).
-spec notice(string(), [any()]) -> 'ok'.
notice(Format, Args) -> notice(self(), Format, Args).
-spec notice(pid() | [tuple()], string(), [any()]) -> 'ok'.
notice(Pid, Format, Args) ->
logger:notice(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_QUEUE}).
-spec warning(string()) -> 'ok'.
warning(Format) -> warning(Format, []).
-spec warning(string(), [any()]) -> 'ok'.
warning(Format, Args) -> warning(self(), Format, Args).
-spec warning(pid() | [tuple()], string(), [any()]) -> 'ok'.
warning(Pid, Format, Args) ->
logger:warning(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_QUEUE}).
-spec error(string()) -> 'ok'.
error(Format) -> error(Format, []).
-spec error(string(), [any()]) -> 'ok'.
error(Format, Args) -> error(self(), Format, Args).
-spec error(pid() | [tuple()], string(), [any()]) -> 'ok'.
error(Pid, Format, Args) ->
logger:error(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_QUEUE}).
-spec critical(string()) -> 'ok'.
critical(Format) -> critical(Format, []).
-spec critical(string(), [any()]) -> 'ok'.
critical(Format, Args) -> critical(self(), Format, Args).
-spec critical(pid() | [tuple()], string(), [any()]) -> 'ok'.
critical(Pid, Format, Args) ->
logger:critical(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_QUEUE}).
-spec alert(string()) -> 'ok'.
alert(Format) -> alert(Format, []).
-spec alert(string(), [any()]) -> 'ok'.
alert(Format, Args) -> alert(self(), Format, Args).
-spec alert(pid() | [tuple()], string(), [any()]) -> 'ok'.
alert(Pid, Format, Args) ->
logger:alert(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_QUEUE}).
-spec emergency(string()) -> 'ok'.
emergency(Format) -> emergency(Format, []).
-spec emergency(string(), [any()]) -> 'ok'.
emergency(Format, Args) -> emergency(self(), Format, Args).
-spec emergency(pid() | [tuple()], string(), [any()]) -> 'ok'.
emergency(Pid, Format, Args) ->
logger:emergency(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_QUEUE}).
-spec none(string()) -> 'ok'.
none(_Format) -> ok.
-spec none(string(), [any()]) -> 'ok'.
none(_Format, _Args) -> ok.
-spec none(pid() | [tuple()], string(), [any()]) -> 'ok'.
none(_Pid, _Format, _Args) -> ok.

122
deps/rabbit/src/rabbit_log_upgrade.erl vendored Normal file
View File

@ -0,0 +1,122 @@
%% This Source Code Form is subject to the terms of the Mozilla Public
%% License, v. 2.0. If a copy of the MPL was not distributed with this
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
%%
%% Copyright (c) 2021 VMware, Inc. or its affiliates. All rights reserved.
%%
%% @doc Compatibility module for the old Lager-based logging API.
-module(rabbit_log_upgrade).
-export([debug/1, debug/2, debug/3,
info/1, info/2, info/3,
notice/1, notice/2, notice/3,
warning/1, warning/2, warning/3,
error/1, error/2, error/3,
critical/1, critical/2, critical/3,
alert/1, alert/2, alert/3,
emergency/1, emergency/2, emergency/3,
none/1, none/2, none/3]).
-include_lib("rabbit_common/include/logging.hrl").
-compile({no_auto_import, [error/2, error/3]}).
%%----------------------------------------------------------------------------
-spec debug(string()) -> 'ok'.
debug(Format) -> debug(Format, []).
-spec debug(string(), [any()]) -> 'ok'.
debug(Format, Args) -> debug(self(), Format, Args).
-spec debug(pid() | [tuple()], string(), [any()]) -> 'ok'.
debug(Pid, Format, Args) ->
logger:debug(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_UPGRADE}).
-spec info(string()) -> 'ok'.
info(Format) -> info(Format, []).
-spec info(string(), [any()]) -> 'ok'.
info(Format, Args) -> info(self(), Format, Args).
-spec info(pid() | [tuple()], string(), [any()]) -> 'ok'.
info(Pid, Format, Args) ->
logger:info(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_UPGRADE}).
-spec notice(string()) -> 'ok'.
notice(Format) -> notice(Format, []).
-spec notice(string(), [any()]) -> 'ok'.
notice(Format, Args) -> notice(self(), Format, Args).
-spec notice(pid() | [tuple()], string(), [any()]) -> 'ok'.
notice(Pid, Format, Args) ->
logger:notice(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_UPGRADE}).
-spec warning(string()) -> 'ok'.
warning(Format) -> warning(Format, []).
-spec warning(string(), [any()]) -> 'ok'.
warning(Format, Args) -> warning(self(), Format, Args).
-spec warning(pid() | [tuple()], string(), [any()]) -> 'ok'.
warning(Pid, Format, Args) ->
logger:warning(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_UPGRADE}).
-spec error(string()) -> 'ok'.
error(Format) -> error(Format, []).
-spec error(string(), [any()]) -> 'ok'.
error(Format, Args) -> error(self(), Format, Args).
-spec error(pid() | [tuple()], string(), [any()]) -> 'ok'.
error(Pid, Format, Args) ->
logger:error(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_UPGRADE}).
-spec critical(string()) -> 'ok'.
critical(Format) -> critical(Format, []).
-spec critical(string(), [any()]) -> 'ok'.
critical(Format, Args) -> critical(self(), Format, Args).
-spec critical(pid() | [tuple()], string(), [any()]) -> 'ok'.
critical(Pid, Format, Args) ->
logger:critical(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_UPGRADE}).
-spec alert(string()) -> 'ok'.
alert(Format) -> alert(Format, []).
-spec alert(string(), [any()]) -> 'ok'.
alert(Format, Args) -> alert(self(), Format, Args).
-spec alert(pid() | [tuple()], string(), [any()]) -> 'ok'.
alert(Pid, Format, Args) ->
logger:alert(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_UPGRADE}).
-spec emergency(string()) -> 'ok'.
emergency(Format) -> emergency(Format, []).
-spec emergency(string(), [any()]) -> 'ok'.
emergency(Format, Args) -> emergency(self(), Format, Args).
-spec emergency(pid() | [tuple()], string(), [any()]) -> 'ok'.
emergency(Pid, Format, Args) ->
logger:emergency(Format, Args, #{pid => Pid,
domain => ?RMQLOG_DOMAIN_UPGRADE}).
-spec none(string()) -> 'ok'.
none(_Format) -> ok.
-spec none(string(), [any()]) -> 'ok'.
none(_Format, _Args) -> ok.
-spec none(pid() | [tuple()], string(), [any()]) -> 'ok'.
none(_Pid, _Format, _Args) -> ok.

View File

@ -0,0 +1,176 @@
%% This Source Code Form is subject to the terms of the Mozilla Public
%% License, v. 2.0. If a copy of the MPL was not distributed with this
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
%%
%% Copyright (c) 2021 VMware, Inc. or its affiliates. All rights reserved.
%%
-module(rabbit_logger_exchange_h).
-include_lib("kernel/include/logger.hrl").
-include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("rabbit_common/include/rabbit_framing.hrl").
-include_lib("rabbit_common/include/logging.hrl").
%% logger callbacks
-export([log/2, adding_handler/1, removing_handler/1, changing_config/3,
filter_config/1]).
-define(DECL_EXCHANGE_INTERVAL_SECS, 5).
-define(LOG_EXCH_NAME, <<"amq.rabbitmq.log">>).
-define(DEFAULT_FORMATTER, logger_formatter).
-define(DEFAULT_FORMATTER_CONFIG, #{}).
%% -------------------------------------------------------------------
%% Logger handler callbacks.
%% -------------------------------------------------------------------
adding_handler(Config) ->
Config1 = start_setup_proc(Config),
{ok, Config1}.
changing_config(_SetOrUpdate, OldConfig, _NewConfig) ->
{ok, OldConfig}.
filter_config(Config) ->
Config.
log(#{meta := #{mfa := {?MODULE, _, _}}}, _) ->
ok;
log(LogEvent, Config) ->
case rabbit_boot_state:get() of
ready -> do_log(LogEvent, Config);
_ -> ok
end.
do_log(LogEvent, #{config := #{exchange := Exchange}} = Config) ->
RoutingKey = make_routing_key(LogEvent, Config),
AmqpMsg = log_event_to_amqp_msg(LogEvent, Config),
Body = try_format_body(LogEvent, Config),
case rabbit_basic:publish(Exchange, RoutingKey, AmqpMsg, Body) of
ok -> ok;
{error, not_found} -> ok
end.
removing_handler(Config) ->
unconfigure_exchange(Config),
ok.
%% -------------------------------------------------------------------
%% Internal functions.
%% -------------------------------------------------------------------
log_event_to_amqp_msg(LogEvent, Config) ->
ContentType = guess_content_type(Config),
Timestamp = make_timestamp(LogEvent, Config),
Headers = make_headers(LogEvent, Config),
#'P_basic'{
content_type = ContentType,
timestamp = Timestamp,
headers = Headers
}.
make_routing_key(#{level := Level}, _) ->
rabbit_data_coercion:to_binary(Level).
guess_content_type(#{formatter := {rabbit_logger_json_fmt, _}}) ->
<<"application/json">>;
guess_content_type(_) ->
<<"text/plain">>.
make_timestamp(#{meta := #{time := Timestamp}}, _) ->
erlang:convert_time_unit(Timestamp, microsecond, second);
make_timestamp(_, _) ->
os:system_time(second).
make_headers(_, _) ->
Node = rabbit_data_coercion:to_binary(node()),
[{<<"node">>, longstr, Node}].
try_format_body(LogEvent, #{formatter := {Formatter, FormatterConfig}}) ->
Formatted = try_format_body(LogEvent, Formatter, FormatterConfig),
erlang:iolist_to_binary(Formatted).
try_format_body(LogEvent, Formatter, FormatterConfig) ->
try
Formatter:format(LogEvent, FormatterConfig)
catch
C:R:S ->
case {?DEFAULT_FORMATTER, ?DEFAULT_FORMATTER_CONFIG} of
{Formatter, FormatterConfig} ->
"DEFAULT FORMATTER CRASHED\n";
{DefaultFormatter, DefaultFormatterConfig} ->
Msg = {"FORMATTER CRASH: ~tp -- ~p:~p:~p",
[maps:get(msg, LogEvent), C, R, S]},
LogEvent1 = LogEvent#{msg => Msg},
try_format_body(
LogEvent1,
DefaultFormatter,
DefaultFormatterConfig)
end
end.
start_setup_proc(#{config := InternalConfig} = Config) ->
{ok, DefaultVHost} = application:get_env(rabbit, default_vhost),
Exchange = rabbit_misc:r(DefaultVHost, exchange, ?LOG_EXCH_NAME),
InternalConfig1 = InternalConfig#{exchange => Exchange},
Pid = spawn(fun() -> setup_proc(Config#{config => InternalConfig1}) end),
InternalConfig2 = InternalConfig1#{setup_proc => Pid},
Config#{config => InternalConfig2}.
setup_proc(
#{config := #{exchange := #resource{name = Name,
virtual_host = VHost}}} = Config) ->
case declare_exchange(Config) of
ok ->
?LOG_INFO(
"Logging to exchange '~s' in vhost '~s' ready", [Name, VHost],
#{domain => ?RMQLOG_DOMAIN_GLOBAL});
error ->
?LOG_DEBUG(
"Logging to exchange '~s' in vhost '~s' not ready, "
"trying again in ~b second(s)",
[Name, VHost, ?DECL_EXCHANGE_INTERVAL_SECS],
#{domain => ?RMQLOG_DOMAIN_GLOBAL}),
receive
stop -> ok
after ?DECL_EXCHANGE_INTERVAL_SECS * 1000 ->
setup_proc(Config)
end
end.
declare_exchange(
#{config := #{exchange := #resource{name = Name,
virtual_host = VHost} = Exchange}}) ->
try
%% Durable.
#exchange{} = rabbit_exchange:declare(
Exchange, topic, true, false, true, [],
?INTERNAL_USER),
?LOG_DEBUG(
"Declared exchange '~s' in vhost '~s'",
[Name, VHost],
#{domain => ?RMQLOG_DOMAIN_GLOBAL}),
ok
catch
Class:Reason ->
?LOG_DEBUG(
"Could not declare exchange '~s' in vhost '~s', "
"reason: ~0p:~0p",
[Name, VHost, Class, Reason],
#{domain => ?RMQLOG_DOMAIN_GLOBAL}),
error
end.
unconfigure_exchange(
#{config := #{exchange := #resource{name = Name,
virtual_host = VHost} = Exchange,
setup_proc := Pid}}) ->
Pid ! stop,
rabbit_exchange:delete(Exchange, false, ?INTERNAL_USER),
?LOG_INFO(
"Logging to exchange '~s' in vhost '~s' disabled",
[Name, VHost],
#{domain => ?RMQLOG_DOMAIN_GLOBAL}).

View File

@ -150,7 +150,7 @@ sync_mirrors(HandleInfo, EmitStats,
backing_queue_state = BQS }) ->
Log = fun (Fmt, Params) ->
rabbit_mirror_queue_misc:log_info(
QName, "Synchronising: " ++ Fmt ++ "~n", Params)
QName, "Synchronising: " ++ Fmt ++ "", Params)
end,
Log("~p messages to synchronise", [BQ:len(BQS)]),
{ok, Q} = rabbit_amqqueue:lookup(QName),
@ -198,7 +198,7 @@ terminate(Reason,
true -> %% Remove the whole queue to avoid data loss
rabbit_mirror_queue_misc:log_warning(
QName, "Stopping all nodes on master shutdown since no "
"synchronised mirror (replica) is available~n", []),
"synchronised mirror (replica) is available", []),
stop_all_slaves(Reason, State);
false -> %% Just let some other mirror take over.
ok

View File

@ -211,7 +211,7 @@ drop_mirror(QName, MirrorNode) ->
[PrimaryPid] when MirrorPids =:= [] ->
{error, cannot_drop_only_mirror};
[Pid] ->
log_info(Name, "Dropping queue mirror on node ~p~n",
log_info(Name, "Dropping queue mirror on node ~p",
[MirrorNode]),
exit(Pid, {shutdown, dropped}),
{ok, dropped}
@ -238,7 +238,7 @@ add_mirror(QName, MirrorNode, SyncMode) ->
{ok, _} ->
try
MirrorPid = rabbit_amqqueue_sup_sup:start_queue_process(MirrorNode, Q, slave),
log_info(QName, "Adding mirror on node ~p: ~p~n", [MirrorNode, MirrorPid]),
log_info(QName, "Adding mirror on node ~p: ~p", [MirrorNode, MirrorPid]),
rabbit_mirror_queue_slave:go(MirrorPid, SyncMode)
of
_ -> ok
@ -246,13 +246,13 @@ add_mirror(QName, MirrorNode, SyncMode) ->
error:QError ->
log_warning(QName,
"Unable to start queue mirror on node '~p'. "
"Target queue supervisor is not running: ~p~n",
"Target queue supervisor is not running: ~p",
[MirrorNode, QError])
end;
{error, Error} ->
log_warning(QName,
"Unable to start queue mirror on node '~p'. "
"Target virtual host is not running: ~p~n",
"Target virtual host is not running: ~p",
[MirrorNode, Error]),
ok
end
@ -264,7 +264,7 @@ add_mirror(QName, MirrorNode, SyncMode) ->
report_deaths(_MirrorPid, _IsMaster, _QueueName, []) ->
ok;
report_deaths(MirrorPid, IsMaster, QueueName, DeadPids) ->
log_info(QueueName, "~s replica of queue ~s detected replica ~s to be down~n",
log_info(QueueName, "~s replica of queue ~s detected replica ~s to be down",
[case IsMaster of
true -> "Primary";
false -> "Secondary"
@ -342,7 +342,7 @@ stop_all_slaves(Reason, SPids, QName, GM, WaitTimeout) ->
after WaitTimeout ->
rabbit_mirror_queue_misc:log_warning(
QName, "Missing 'DOWN' message from ~p in"
" node ~p~n", [Pid, node(Pid)]),
" node ~p", [Pid, node(Pid)]),
[Pid | Acc]
end;
false ->

View File

@ -137,7 +137,7 @@ handle_go(Q0) when ?is_amqqueue(Q0) ->
{ok, State};
{stale, StalePid} ->
rabbit_mirror_queue_misc:log_warning(
QName, "Detected stale HA master: ~p~n", [StalePid]),
QName, "Detected stale classic mirrored queue leader: ~p", [StalePid]),
gm:leave(GM),
{error, {stale_master_pid, StalePid}};
duplicate_live_master ->
@ -189,7 +189,7 @@ init_it(Self, GM, Node, QName) ->
stop_pending_slaves(QName, Pids) ->
[begin
rabbit_mirror_queue_misc:log_warning(
QName, "Detected a non-responsive classic queue mirror, stopping it: ~p~n", [Pid]),
QName, "Detected a non-responsive classic queue mirror, stopping it: ~p", [Pid]),
case erlang:process_info(Pid, dictionary) of
undefined -> ok;
{dictionary, Dict} ->
@ -633,7 +633,7 @@ promote_me(From, #state { q = Q0,
msg_id_status = MS,
known_senders = KS}) when ?is_amqqueue(Q0) ->
QName = amqqueue:get_name(Q0),
rabbit_mirror_queue_misc:log_info(QName, "Promoting mirror ~s to master~n",
rabbit_mirror_queue_misc:log_info(QName, "Promoting mirror ~s to leader",
[rabbit_misc:pid_to_string(self())]),
Q1 = amqqueue:set_pid(Q0, self()),
DeathFun = rabbit_mirror_queue_master:sender_death_fun(),

View File

@ -69,7 +69,7 @@ init() ->
case is_virgin_node() of
true ->
rabbit_log:info("Node database directory at ~ts is empty. "
"Assuming we need to join an existing cluster or initialise from scratch...~n",
"Assuming we need to join an existing cluster or initialise from scratch...",
[dir()]),
rabbit_peer_discovery:log_configured_backend(),
rabbit_peer_discovery:maybe_init(),
@ -155,7 +155,7 @@ run_peer_discovery_with_retries(RetriesLeft, DelayInterval) ->
e(invalid_cluster_nodes_conf)
end,
DiscoveredNodes = lists:usort(DiscoveredNodes0),
rabbit_log:info("All discovered existing cluster peers: ~s~n",
rabbit_log:info("All discovered existing cluster peers: ~s",
[rabbit_peer_discovery:format_discovered_nodes(DiscoveredNodes)]),
Peers = nodes_excl_me(DiscoveredNodes),
case Peers of
@ -165,7 +165,7 @@ run_peer_discovery_with_retries(RetriesLeft, DelayInterval) ->
"Enabling debug logging might help troubleshoot."),
init_db_and_upgrade([node()], disc, false, _Retry = true);
_ ->
rabbit_log:info("Peer nodes we can cluster with: ~s~n",
rabbit_log:info("Peer nodes we can cluster with: ~s",
[rabbit_peer_discovery:format_discovered_nodes(Peers)]),
join_discovered_peers(Peers, NodeType)
end.
@ -180,13 +180,13 @@ join_discovered_peers(TryNodes, NodeType) ->
join_discovered_peers_with_retries(TryNodes, _NodeType, 0, _DelayInterval) ->
rabbit_log:warning(
"Could not successfully contact any node of: ~s (as in Erlang distribution). "
"Starting as a blank standalone node...~n",
"Starting as a blank standalone node...",
[string:join(lists:map(fun atom_to_list/1, TryNodes), ",")]),
init_db_and_upgrade([node()], disc, false, _Retry = true);
join_discovered_peers_with_retries(TryNodes, NodeType, RetriesLeft, DelayInterval) ->
case find_reachable_peer_to_cluster_with(nodes_excl_me(TryNodes)) of
{ok, Node} ->
rabbit_log:info("Node '~s' selected for auto-clustering~n", [Node]),
rabbit_log:info("Node '~s' selected for auto-clustering", [Node]),
{ok, {_, DiscNodes, _}} = discover_cluster0(Node),
init_db_and_upgrade(DiscNodes, NodeType, true, _Retry = true),
rabbit_connection_tracking:boot(),
@ -237,7 +237,7 @@ join_cluster(DiscoveryNode, NodeType) ->
reset_gracefully(),
%% Join the cluster
rabbit_log:info("Clustering with ~p as ~p node~n",
rabbit_log:info("Clustering with ~p as ~p node",
[ClusterNodes, NodeType]),
ok = init_db_with_mnesia(ClusterNodes, NodeType,
true, true, _Retry = true),
@ -252,7 +252,7 @@ join_cluster(DiscoveryNode, NodeType) ->
%% do we think so ourselves?
case are_we_clustered_with(DiscoveryNode) of
true ->
rabbit_log:info("Asked to join a cluster but already a member of it: ~p~n", [ClusterNodes]),
rabbit_log:info("Asked to join a cluster but already a member of it: ~p", [ClusterNodes]),
{ok, already_member};
false ->
Msg = format_inconsistent_cluster_message(DiscoveryNode, node()),
@ -269,14 +269,14 @@ join_cluster(DiscoveryNode, NodeType) ->
reset() ->
ensure_mnesia_not_running(),
rabbit_log:info("Resetting Rabbit~n", []),
rabbit_log:info("Resetting Rabbit", []),
reset_gracefully().
-spec force_reset() -> 'ok'.
force_reset() ->
ensure_mnesia_not_running(),
rabbit_log:info("Resetting Rabbit forcefully~n", []),
rabbit_log:info("Resetting Rabbit forcefully", []),
wipe().
reset_gracefully() ->
@ -336,7 +336,7 @@ update_cluster_nodes(DiscoveryNode) ->
%% nodes
mnesia:delete_schema([node()]),
rabbit_node_monitor:write_cluster_status(Status),
rabbit_log:info("Updating cluster nodes from ~p~n",
rabbit_log:info("Updating cluster nodes from ~p",
[DiscoveryNode]),
init_db_with_mnesia(AllNodes, node_type(), true, true, _Retry = false);
false ->
@ -367,7 +367,7 @@ forget_cluster_node(Node, RemoveWhenOffline, EmitNodeDeletedEvent) ->
{true, true} -> e(online_node_offline_flag);
{false, false} -> e(offline_node_no_offline_flag);
{false, true} -> rabbit_log:info(
"Removing node ~p from cluster~n", [Node]),
"Removing node ~p from cluster", [Node]),
case remove_node_if_mnesia_running(Node) of
ok when EmitNodeDeletedEvent ->
rabbit_event:notify(node_deleted, [{node, Node}]),
@ -814,7 +814,7 @@ schema_ok_or_move() ->
%% started yet
rabbit_log:warning("schema integrity check failed: ~p~n"
"moving database to backup location "
"and recreating schema from scratch~n",
"and recreating schema from scratch",
[Reason]),
ok = move_db(),
ok = create_schema()
@ -848,7 +848,7 @@ move_db() ->
ok ->
%% NB: we cannot use rabbit_log here since it may not have
%% been started yet
rabbit_log:warning("moved database from ~s to ~s~n",
rabbit_log:warning("moved database from ~s to ~s",
[MnesiaDir, BackupDir]),
ok;
{error, Reason} -> throw({error, {cannot_backup_mnesia,
@ -895,7 +895,7 @@ leave_cluster(Node) ->
end.
wait_for(Condition) ->
rabbit_log:info("Waiting for ~p...~n", [Condition]),
rabbit_log:info("Waiting for ~p...", [Condition]),
timer:sleep(1000).
start_mnesia(CheckConsistency) ->
@ -1040,15 +1040,15 @@ find_reachable_peer_to_cluster_with([Node | Nodes]) ->
end,
case remote_node_info(Node) of
{badrpc, _} = Reason ->
Fail("~p~n", [Reason]);
Fail("~p", [Reason]);
%% old delegate hash check
{_OTP, RMQ, Hash, _} when is_binary(Hash) ->
Fail("version ~s~n", [RMQ]);
Fail("version ~s", [RMQ]);
{_OTP, _RMQ, _Protocol, {error, _} = E} ->
Fail("~p~n", [E]);
Fail("~p", [E]);
{OTP, RMQ, Protocol, _} ->
case check_consistency(Node, OTP, RMQ, Protocol) of
{error, _} -> Fail("versions ~p~n",
{error, _} -> Fail("versions ~p",
[{OTP, RMQ}]);
ok -> {ok, Node}
end

View File

@ -144,7 +144,7 @@ finish(FromNode, ToNode, AllNodes) ->
end;
FromNode ->
rabbit_log:info(
"Abandoning rename from ~s to ~s since we are still ~s~n",
"Abandoning rename from ~s to ~s since we are still ~s",
[FromNode, ToNode, FromNode]),
[{ok, _} = file:copy(backup_of_conf(F), F) || F <- config_files()],
ok = rabbit_file:recursive_delete([rabbit_mnesia:dir()]),
@ -155,18 +155,18 @@ finish(FromNode, ToNode, AllNodes) ->
%% Boot will almost certainly fail but we might as
%% well just log this
rabbit_log:info(
"Rename attempted from ~s to ~s but we are ~s - ignoring.~n",
"Rename attempted from ~s to ~s but we are ~s - ignoring.",
[FromNode, ToNode, node()])
end.
finish_primary(FromNode, ToNode) ->
rabbit_log:info("Restarting as primary after rename from ~s to ~s~n",
rabbit_log:info("Restarting as primary after rename from ~s to ~s",
[FromNode, ToNode]),
delete_rename_files(),
ok.
finish_secondary(FromNode, ToNode, AllNodes) ->
rabbit_log:info("Restarting as secondary after rename from ~s to ~s~n",
rabbit_log:info("Restarting as secondary after rename from ~s to ~s",
[FromNode, ToNode]),
rabbit_upgrade:secondary_upgrade(AllNodes),
rename_in_running_mnesia(FromNode, ToNode),

View File

@ -717,7 +717,7 @@ init([Type, BaseDir, ClientRefs, StartupFunState]) ->
Name = filename:join(filename:basename(BaseDir), atom_to_list(Type)),
{ok, IndexModule} = application:get_env(rabbit, msg_store_index_module),
rabbit_log:info("Message store ~tp: using ~p to provide index~n", [Name, IndexModule]),
rabbit_log:info("Message store ~tp: using ~p to provide index", [Name, IndexModule]),
AttemptFileSummaryRecovery =
case ClientRefs of
@ -794,11 +794,11 @@ init([Type, BaseDir, ClientRefs, StartupFunState]) ->
true -> "clean";
false -> "unclean"
end,
rabbit_log:debug("Rebuilding message location index after ~s shutdown...~n",
rabbit_log:debug("Rebuilding message location index after ~s shutdown...",
[Cleanliness]),
{Offset, State1 = #msstate { current_file = CurFile }} =
build_index(CleanShutdown, StartupFunState, State),
rabbit_log:debug("Finished rebuilding index~n", []),
rabbit_log:debug("Finished rebuilding index", []),
%% read is only needed so that we can seek
{ok, CurHdl} = open_file(Dir, filenum_to_name(CurFile),
[read | ?WRITE_MODE]),
@ -999,7 +999,7 @@ terminate(_Reason, State = #msstate { index_state = IndexState,
{error, FSErr} ->
rabbit_log:error("Unable to store file summary"
" for vhost message store for directory ~p~n"
"Error: ~p~n",
"Error: ~p",
[Dir, FSErr])
end,
[true = ets:delete(T) || T <- [FileSummaryEts, FileHandlesEts,
@ -1012,7 +1012,7 @@ terminate(_Reason, State = #msstate { index_state = IndexState,
ok;
{error, RTErr} ->
rabbit_log:error("Unable to save message store recovery terms"
" for directory ~p~nError: ~p~n",
" for directory ~p~nError: ~p",
[Dir, RTErr])
end,
State3 #msstate { index_state = undefined,
@ -1574,12 +1574,12 @@ index_clean_up_temporary_reference_count_entries(
recover_index_and_client_refs(IndexModule, _Recover, undefined, Dir, _Name) ->
{false, IndexModule:new(Dir), []};
recover_index_and_client_refs(IndexModule, false, _ClientRefs, Dir, Name) ->
rabbit_log:warning("Message store ~tp: rebuilding indices from scratch~n", [Name]),
rabbit_log:warning("Message store ~tp: rebuilding indices from scratch", [Name]),
{false, IndexModule:new(Dir), []};
recover_index_and_client_refs(IndexModule, true, ClientRefs, Dir, Name) ->
Fresh = fun (ErrorMsg, ErrorArgs) ->
rabbit_log:warning("Message store ~tp : " ++ ErrorMsg ++ "~n"
"rebuilding indices from scratch~n",
"rebuilding indices from scratch",
[Name | ErrorArgs]),
{false, IndexModule:new(Dir), []}
end,
@ -1741,9 +1741,9 @@ build_index(true, _StartupFunState,
end, {0, State}, FileSummaryEts);
build_index(false, {MsgRefDeltaGen, MsgRefDeltaGenInit},
State = #msstate { dir = Dir }) ->
rabbit_log:debug("Rebuilding message refcount...~n", []),
rabbit_log:debug("Rebuilding message refcount...", []),
ok = count_msg_refs(MsgRefDeltaGen, MsgRefDeltaGenInit, State),
rabbit_log:debug("Done rebuilding message refcount~n", []),
rabbit_log:debug("Done rebuilding message refcount", []),
{ok, Pid} = gatherer:start_link(),
case [filename_to_num(FileName) ||
FileName <- list_sorted_filenames(Dir, ?FILE_EXTENSION)] of
@ -1757,7 +1757,7 @@ build_index(false, {MsgRefDeltaGen, MsgRefDeltaGenInit},
build_index_worker(Gatherer, State = #msstate { dir = Dir },
Left, File, Files) ->
FileName = filenum_to_name(File),
rabbit_log:debug("Rebuilding message location index from ~p (~B file(s) remaining)~n",
rabbit_log:debug("Rebuilding message location index from ~p (~B file(s) remaining)",
[form_filename(Dir, FileName), length(Files)]),
{ok, Messages, FileSize} =
scan_file_for_valid_messages(Dir, FileName),

View File

@ -70,7 +70,7 @@ terminate(#state { table = MsgLocations, dir = Dir }) ->
ok -> ok;
{error, Err} ->
rabbit_log:error("Unable to save message store index"
" for directory ~p.~nError: ~p~n",
" for directory ~p.~nError: ~p",
[Dir, Err])
end,
ets:delete(MsgLocations).

View File

@ -150,7 +150,7 @@ log_poodle_fail(Context) ->
"better.~n~n"
"If you cannot upgrade now and want to re-enable SSL listeners, you can~n"
"set the config item 'ssl_allow_poodle_attack' to 'true' in the~n"
"'rabbit' section of your configuration file.~n",
"'rabbit' section of your configuration file.",
[rabbit_misc:otp_release(), Context]).
fix_ssl_options(Config) ->
@ -171,7 +171,7 @@ tcp_listener_addresses({Host, Port, Family0})
[{IPAddress, Port, Family} ||
{IPAddress, Family} <- getaddr(Host, Family0)];
tcp_listener_addresses({_Host, Port, _Family0}) ->
rabbit_log:error("invalid port ~p - not 0..65535~n", [Port]),
rabbit_log:error("invalid port ~p - not 0..65535", [Port]),
throw({error, {invalid_port, Port}}).
tcp_listener_addresses_auto(Port) ->
@ -371,11 +371,11 @@ on_node_down(Node) ->
case lists:member(Node, nodes()) of
false ->
rabbit_log:info(
"Node ~s is down, deleting its listeners~n", [Node]),
"Node ~s is down, deleting its listeners", [Node]),
ok = mnesia:dirty_delete(rabbit_listener, Node);
true ->
rabbit_log:info(
"Keeping ~s listeners: the node is already back~n", [Node])
"Keeping ~s listeners: the node is already back", [Node])
end.
-spec register_connection(pid()) -> ok.
@ -457,11 +457,11 @@ close_connection(Pid, Explanation) ->
case lists:member(Pid, connections()) of
true ->
Res = rabbit_reader:shutdown(Pid, Explanation),
rabbit_log:info("Closing connection ~p because ~p~n", [Pid, Explanation]),
rabbit_log:info("Closing connection ~p because ~p", [Pid, Explanation]),
Res;
false ->
rabbit_log:warning("Asked to close connection ~p (reason: ~p) "
"but no running cluster node reported it as an active connection. Was it already closed? ~n",
"but no running cluster node reported it as an active connection. Was it already closed? ",
[Pid, Explanation]),
ok
end.
@ -578,7 +578,7 @@ gethostaddr(Host, Family) ->
-spec host_lookup_error(_, _) -> no_return().
host_lookup_error(Host, Reason) ->
rabbit_log:error("invalid host ~p - ~p~n", [Host, Reason]),
rabbit_log:error("invalid host ~p - ~p", [Host, Reason]),
throw({error, {invalid_host, Host, Reason}}).
resolve_family({_,_,_,_}, auto) -> inet;

View File

@ -322,11 +322,11 @@ find_blocked_global_peers1([], _) ->
unblock_global_peer(PeerNode) ->
ThisNode = node(),
PeerState = rpc:call(PeerNode, sys, get_status, [global_name_server]),
error_logger:info_msg(
logger:info(
"Global hang workaround: global state on ~s seems broken~n"
" * Peer global state: ~p~n"
" * Local global state: ~p~n"
"Faking nodedown/nodeup between ~s and ~s~n",
"Faking nodedown/nodeup between ~s and ~s",
[PeerNode, PeerState, sys:get_status(global_name_server),
PeerNode, ThisNode]),
{global_name_server, ThisNode} ! {nodedown, PeerNode},
@ -434,7 +434,7 @@ handle_cast({check_partial_partition, Node, Rep, NodeGUID, MyGUID, RepGUID},
_ ->
rabbit_log:warning("Received a 'DOWN' message"
" from ~p but still can"
" communicate with it ~n",
" communicate with it ",
[Node]),
cast(Rep, {partial_partition,
Node, node(), RepGUID})
@ -468,7 +468,7 @@ handle_cast({partial_partition, NotReallyDown, Proxy, MyGUID},
{ok, pause_minority} ->
rabbit_log:error(
FmtBase ++ " * pause_minority mode enabled~n"
"We will therefore pause until the *entire* cluster recovers~n",
"We will therefore pause until the *entire* cluster recovers",
ArgsBase),
await_cluster_recovery(fun all_nodes_up/0),
{noreply, State};
@ -476,16 +476,16 @@ handle_cast({partial_partition, NotReallyDown, Proxy, MyGUID},
case in_preferred_partition(PreferredNodes) of
true -> rabbit_log:error(
FmtBase ++ "We will therefore intentionally "
"disconnect from ~s~n", ArgsBase ++ [Proxy]),
"disconnect from ~s", ArgsBase ++ [Proxy]),
upgrade_to_full_partition(Proxy);
false -> rabbit_log:info(
FmtBase ++ "We are about to pause, no need "
"for further actions~n", ArgsBase)
"for further actions", ArgsBase)
end,
{noreply, State};
{ok, _} ->
rabbit_log:error(
FmtBase ++ "We will therefore intentionally disconnect from ~s~n",
FmtBase ++ "We will therefore intentionally disconnect from ~s",
ArgsBase ++ [Proxy]),
upgrade_to_full_partition(Proxy),
{noreply, State}
@ -498,7 +498,7 @@ handle_cast({partial_partition, _GUID, _Reporter, _Proxy}, State) ->
%% messages reliably when another node disconnects from us. Therefore
%% we are told just before the disconnection so we can reciprocate.
handle_cast({partial_partition_disconnect, Other}, State) ->
rabbit_log:error("Partial partition disconnect from ~s~n", [Other]),
rabbit_log:error("Partial partition disconnect from ~s", [Other]),
disconnect(Other),
{noreply, State};
@ -507,7 +507,7 @@ handle_cast({partial_partition_disconnect, Other}, State) ->
%% mnesia propagation.
handle_cast({node_up, Node, NodeType},
State = #state{monitors = Monitors}) ->
rabbit_log:info("rabbit on node ~p up~n", [Node]),
rabbit_log:info("rabbit on node ~p up", [Node]),
{AllNodes, DiscNodes, RunningNodes} = read_cluster_status(),
write_cluster_status({add_node(Node, AllNodes),
case NodeType of
@ -551,7 +551,7 @@ handle_cast(_Msg, State) ->
handle_info({'DOWN', _MRef, process, {rabbit, Node}, _Reason},
State = #state{monitors = Monitors, subscribers = Subscribers}) ->
rabbit_log:info("rabbit on node ~p down~n", [Node]),
rabbit_log:info("rabbit on node ~p down", [Node]),
{AllNodes, DiscNodes, RunningNodes} = read_cluster_status(),
write_cluster_status({AllNodes, DiscNodes, del_node(Node, RunningNodes)}),
[P ! {node_down, Node} || P <- pmon:monitored(Subscribers)],
@ -565,7 +565,7 @@ handle_info({'DOWN', _MRef, process, Pid, _Reason},
handle_info({nodedown, Node, Info}, State = #state{guid = MyGUID,
node_guids = GUIDs}) ->
rabbit_log:info("node ~p down: ~p~n",
rabbit_log:info("node ~p down: ~p",
[Node, proplists:get_value(nodedown_reason, Info)]),
Check = fun (N, CheckGUID, DownGUID) ->
cast(N, {check_partial_partition,
@ -583,7 +583,7 @@ handle_info({nodedown, Node, Info}, State = #state{guid = MyGUID,
{noreply, handle_dead_node(Node, State)};
handle_info({nodeup, Node, _Info}, State) ->
rabbit_log:info("node ~p up~n", [Node]),
rabbit_log:info("node ~p up", [Node]),
{noreply, State};
handle_info({mnesia_system_event,
@ -687,13 +687,13 @@ handle_dead_node(Node, State = #state{autoheal = Autoheal}) ->
State#state{autoheal = rabbit_autoheal:node_down(Node, Autoheal)};
{ok, Term} ->
rabbit_log:warning("cluster_partition_handling ~p unrecognised, "
"assuming 'ignore'~n", [Term]),
"assuming 'ignore'", [Term]),
State
end.
await_cluster_recovery(Condition) ->
rabbit_log:warning("Cluster minority/secondary status detected - "
"awaiting recovery~n", []),
"awaiting recovery", []),
run_outside_applications(fun () ->
rabbit:stop(),
wait_for_cluster_recovery(Condition)
@ -744,7 +744,7 @@ do_run_outside_app_fun(Fun) ->
Fun()
catch _:E:Stacktrace ->
rabbit_log:error(
"rabbit_outside_app_process:~n~p~n~p~n",
"rabbit_outside_app_process:~n~p~n~p",
[E, Stacktrace])
end.
@ -920,7 +920,7 @@ possibly_partitioned_nodes() ->
alive_rabbit_nodes() -- rabbit_nodes:all_running().
startup_log([]) ->
rabbit_log:info("Starting rabbit_node_monitor~n", []);
rabbit_log:info("Starting rabbit_node_monitor", []);
startup_log(Nodes) ->
rabbit_log:info("Starting rabbit_node_monitor, might be partitioned from ~p~n",
rabbit_log:info("Starting rabbit_node_monitor, might be partitioned from ~p",
[Nodes]).

View File

@ -84,7 +84,7 @@ lock_acquisition_failure_mode() ->
-spec log_configured_backend() -> ok.
log_configured_backend() ->
rabbit_log:info("Configured peer discovery backend: ~s~n", [backend()]).
rabbit_log:info("Configured peer discovery backend: ~s", [backend()]).
maybe_init() ->
Backend = backend(),

View File

@ -56,13 +56,13 @@ ensure1(FileJustChanged0) ->
{[], []} ->
ok;
{[], _} ->
rabbit_log:info("Plugins changed; disabled ~p~n",
rabbit_log:info("Plugins changed; disabled ~p",
[Stop]);
{_, []} ->
rabbit_log:info("Plugins changed; enabled ~p~n",
rabbit_log:info("Plugins changed; enabled ~p",
[Start]);
{_, _} ->
rabbit_log:info("Plugins changed; enabled ~p, disabled ~p~n",
rabbit_log:info("Plugins changed; enabled ~p, disabled ~p",
[Start, Stop])
end,
{ok, Start, Stop};
@ -429,7 +429,7 @@ prepare_dir_plugin(PluginAppDescPath) ->
rabbit_log:error("Failed to enable plugin \"~s\": "
"it may have been built with an "
"incompatible (more recent?) "
"version of Erlang~n", [Plugin]),
"version of Erlang", [Plugin]),
throw({plugin_built_with_incompatible_erlang, Plugin});
Error ->
throw({plugin_module_unloadable, Plugin, Error})
@ -459,11 +459,11 @@ prepare_plugin(#plugin{type = ez, name = Name, location = Location}, ExpandDir)
[PluginAppDescPath|_] ->
prepare_dir_plugin(PluginAppDescPath);
_ ->
rabbit_log:error("Plugin archive '~s' doesn't contain an .app file~n", [Location]),
rabbit_log:error("Plugin archive '~s' doesn't contain an .app file", [Location]),
throw({app_file_missing, Name, Location})
end;
{error, Reason} ->
rabbit_log:error("Could not unzip plugin archive '~s': ~p~n", [Location, Reason]),
rabbit_log:error("Could not unzip plugin archive '~s': ~p", [Location, Reason]),
throw({failed_to_unzip_plugin, Name, Location, Reason})
end;
prepare_plugin(#plugin{type = dir, location = Location, name = Name},
@ -472,7 +472,7 @@ prepare_plugin(#plugin{type = dir, location = Location, name = Name},
[PluginAppDescPath|_] ->
prepare_dir_plugin(PluginAppDescPath);
_ ->
rabbit_log:error("Plugin directory '~s' doesn't contain an .app file~n", [Location]),
rabbit_log:error("Plugin directory '~s' doesn't contain an .app file", [Location]),
throw({app_file_missing, Name, Location})
end.

View File

@ -1,15 +1,24 @@
-module(rabbit_prelaunch_cluster).
-include_lib("kernel/include/logger.hrl").
-include_lib("rabbit_common/include/logging.hrl").
-export([setup/1]).
setup(Context) ->
rabbit_log_prelaunch:debug(""),
rabbit_log_prelaunch:debug("== Clustering =="),
rabbit_log_prelaunch:debug("Preparing cluster status files"),
?LOG_DEBUG(
"~n== Clustering ==", [],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
?LOG_DEBUG(
"Preparing cluster status files", [],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
rabbit_node_monitor:prepare_cluster_status_files(),
case Context of
#{initial_pass := true} ->
rabbit_log_prelaunch:debug("Upgrading Mnesia schema"),
?LOG_DEBUG(
"Upgrading Mnesia schema", [],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
ok = rabbit_upgrade:maybe_upgrade_mnesia();
_ ->
ok
@ -17,6 +26,8 @@ setup(Context) ->
%% It's important that the consistency check happens after
%% the upgrade, since if we are a secondary node the
%% primary node will have forgotten us
rabbit_log_prelaunch:debug("Checking cluster consistency"),
?LOG_DEBUG(
"Checking cluster consistency", [],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
rabbit_mnesia:check_cluster_consistency(),
ok.

View File

@ -7,13 +7,17 @@
-module(rabbit_prelaunch_enabled_plugins_file).
-include_lib("kernel/include/logger.hrl").
-include_lib("rabbit_common/include/rabbit.hrl").
-include_lib("rabbit_common/include/logging.hrl").
-export([setup/1]).
setup(Context) ->
rabbit_log_prelaunch:debug(""),
rabbit_log_prelaunch:debug("== Enabled plugins file =="),
?LOG_DEBUG(
"~n== Enabled plugins file ==", [],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
update_enabled_plugins_file(Context).
%% -------------------------------------------------------------------
@ -33,21 +37,28 @@ do_update_enabled_plugins_file(#{enabled_plugins_file := File}, List) ->
SortedList = lists:usort(List),
case SortedList of
[] ->
rabbit_log_prelaunch:debug("Marking all plugins as disabled");
?LOG_DEBUG(
"Marking all plugins as disabled", [],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH});
_ ->
rabbit_log_prelaunch:debug(
"Marking the following plugins as enabled:"),
[rabbit_log_prelaunch:debug(" - ~s", [P]) || P <- SortedList]
?LOG_DEBUG(
lists:flatten(["Marking the following plugins as enabled:",
["~n - ~s" || _ <- SortedList]]),
SortedList,
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH})
end,
Content = io_lib:format("~p.~n", [SortedList]),
case file:write_file(File, Content) of
ok ->
rabbit_log_prelaunch:debug("Wrote plugins file: ~ts", [File]),
?LOG_DEBUG(
"Wrote plugins file: ~ts", [File],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
ok;
{error, Reason} ->
rabbit_log_prelaunch:error(
?LOG_ERROR(
"Failed to update enabled plugins file \"~ts\" "
"from $RABBITMQ_ENABLED_PLUGINS: ~ts",
[File, file:format_error(Reason)]),
[File, file:format_error(Reason)],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
throw({error, failed_to_update_enabled_plugins_file})
end.

View File

@ -7,26 +7,35 @@
-module(rabbit_prelaunch_feature_flags).
-include_lib("kernel/include/logger.hrl").
-include_lib("rabbit_common/include/logging.hrl").
-export([setup/1]).
setup(#{feature_flags_file := FFFile}) ->
rabbit_log_prelaunch:debug(""),
rabbit_log_prelaunch:debug("== Feature flags =="),
?LOG_DEBUG(
"~n== Feature flags ==", [],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
case filelib:ensure_dir(FFFile) of
ok ->
rabbit_log_prelaunch:debug("Initializing feature flags registry"),
?LOG_DEBUG(
"Initializing feature flags registry", [],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
case rabbit_feature_flags:initialize_registry() of
ok ->
ok;
{error, Reason} ->
rabbit_log_prelaunch:error(
?LOG_ERROR(
"Failed to initialize feature flags registry: ~p",
[Reason]),
[Reason],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
throw({error, failed_to_initialize_feature_flags_registry})
end;
{error, Reason} ->
rabbit_log_prelaunch:error(
?LOG_ERROR(
"Failed to create feature flags file \"~ts\" directory: ~ts",
[FFFile, file:format_error(Reason)]),
[FFFile, file:format_error(Reason)],
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
throw({error, failed_to_create_feature_flags_file_directory})
end.

File diff suppressed because it is too large Load Diff

View File

@ -79,7 +79,7 @@ init(Q0, restart) when ?is_amqqueue(Q0) ->
crash_restart(Q0) when ?is_amqqueue(Q0) ->
QueueName = amqqueue:get_name(Q0),
rabbit_log:error("Restarting crashed ~s.~n", [rabbit_misc:rs(QueueName)]),
rabbit_log:error("Restarting crashed ~s.", [rabbit_misc:rs(QueueName)]),
gen_server2:cast(self(), init),
Q1 = amqqueue:set_pid(Q0, self()),
rabbit_amqqueue_process:init(Q1).

View File

@ -67,7 +67,7 @@ enable() ->
{ok, RealBQ} = application:get_env(rabbit, backing_queue_module),
case RealBQ of
?MODULE -> ok;
_ -> rabbit_log:info("Priority queues enabled, real BQ is ~s~n",
_ -> rabbit_log:info("Priority queues enabled, real BQ is ~s",
[RealBQ]),
application:set_env(
rabbitmq_priority_queue, backing_queue_module, RealBQ),

View File

@ -1478,7 +1478,7 @@ move_to_per_vhost_stores(#resource{virtual_host = VHost} = QueueName) ->
ok = rabbit_file:rename(OldQueueDir, NewQueueDir),
ok = ensure_queue_name_stub_file(NewQueueDir, QueueName);
false ->
Msg = "Queue index directory '~s' not found for ~s~n",
Msg = "Queue index directory '~s' not found for ~s",
Args = [OldQueueDir, rabbit_misc:rs(QueueName)],
rabbit_log_upgrade:error(Msg, Args),
rabbit_log:error(Msg, Args)

View File

@ -419,7 +419,7 @@ handle_tick(QName,
[] ->
ok;
Stale ->
rabbit_log:info("~s: stale nodes detected. Purging ~w~n",
rabbit_log:info("~s: stale nodes detected. Purging ~w",
[rabbit_misc:rs(QName), Stale]),
%% pipeline purge command
{ok, Q} = rabbit_amqqueue:lookup(QName),
@ -618,7 +618,7 @@ force_delete_queue(Servers) ->
Err ->
rabbit_log:warning(
"Force delete of ~w failed with: ~w"
"This may require manual data clean up~n",
"This may require manual data clean up",
[S, Err]),
ok
end

View File

@ -272,19 +272,16 @@ server_capabilities(_) ->
%%--------------------------------------------------------------------------
socket_error(Reason) when is_atom(Reason) ->
rabbit_log_connection:error("Error on AMQP connection ~p: ~s~n",
rabbit_log_connection:error("Error on AMQP connection ~p: ~s",
[self(), rabbit_misc:format_inet_error(Reason)]);
socket_error(Reason) ->
Fmt = "Error on AMQP connection ~p:~n~p~n",
Fmt = "Error on AMQP connection ~p:~n~p",
Args = [self(), Reason],
case Reason of
%% The socket was closed while upgrading to SSL.
%% This is presumably a TCP healthcheck, so don't log
%% it unless specified otherwise.
{ssl_upgrade_error, closed} ->
%% Lager sinks (rabbit_log_connection)
%% are handled by the lager parse_transform.
%% Hence have to define the loglevel as a function call.
rabbit_log_connection:debug(Fmt, Args);
_ ->
rabbit_log_connection:error(Fmt, Args)
@ -365,11 +362,11 @@ start_connection(Parent, HelperSup, Deb, Sock) ->
%% connection was closed cleanly by the client
#v1{connection = #connection{user = #user{username = Username},
vhost = VHost}} ->
rabbit_log_connection:info("closing AMQP connection ~p (~s, vhost: '~s', user: '~s')~n",
rabbit_log_connection:info("closing AMQP connection ~p (~s, vhost: '~s', user: '~s')",
[self(), dynamic_connection_name(Name), VHost, Username]);
%% just to be more defensive
_ ->
rabbit_log_connection:info("closing AMQP connection ~p (~s)~n",
rabbit_log_connection:info("closing AMQP connection ~p (~s)",
[self(), dynamic_connection_name(Name)])
end
catch
@ -419,36 +416,36 @@ log_connection_exception(Severity, Name, {heartbeat_timeout, TimeoutSec}) ->
%% Long line to avoid extra spaces and line breaks in log
log_connection_exception_with_severity(Severity,
"closing AMQP connection ~p (~s):~n"
"missed heartbeats from client, timeout: ~ps~n",
"missed heartbeats from client, timeout: ~ps",
[self(), Name, TimeoutSec]);
log_connection_exception(Severity, Name, {connection_closed_abruptly,
#v1{connection = #connection{user = #user{username = Username},
vhost = VHost}}}) ->
log_connection_exception_with_severity(Severity,
"closing AMQP connection ~p (~s, vhost: '~s', user: '~s'):~nclient unexpectedly closed TCP connection~n",
"closing AMQP connection ~p (~s, vhost: '~s', user: '~s'):~nclient unexpectedly closed TCP connection",
[self(), Name, VHost, Username]);
%% when client abruptly closes connection before connection.open/authentication/authorization
%% succeeded, don't log username and vhost as 'none'
log_connection_exception(Severity, Name, {connection_closed_abruptly, _}) ->
log_connection_exception_with_severity(Severity,
"closing AMQP connection ~p (~s):~nclient unexpectedly closed TCP connection~n",
"closing AMQP connection ~p (~s):~nclient unexpectedly closed TCP connection",
[self(), Name]);
%% failed connection.tune negotiations
log_connection_exception(Severity, Name, {handshake_error, tuning, _Channel,
{exit, #amqp_error{explanation = Explanation},
_Method, _Stacktrace}}) ->
log_connection_exception_with_severity(Severity,
"closing AMQP connection ~p (~s):~nfailed to negotiate connection parameters: ~s~n",
"closing AMQP connection ~p (~s):~nfailed to negotiate connection parameters: ~s",
[self(), Name, Explanation]);
%% old exception structure
log_connection_exception(Severity, Name, connection_closed_abruptly) ->
log_connection_exception_with_severity(Severity,
"closing AMQP connection ~p (~s):~n"
"client unexpectedly closed TCP connection~n",
"client unexpectedly closed TCP connection",
[self(), Name]);
log_connection_exception(Severity, Name, Ex) ->
log_connection_exception_with_severity(Severity,
"closing AMQP connection ~p (~s):~n~p~n",
"closing AMQP connection ~p (~s):~n~p",
[self(), Name, Ex]).
log_connection_exception_with_severity(Severity, Fmt, Args) ->
@ -508,7 +505,7 @@ mainloop(Deb, Buf, BufLen, State = #v1{sock = Sock,
%%
%% The goal is to not log TCP healthchecks (a connection
%% with no data received) unless specified otherwise.
Fmt = "accepting AMQP connection ~p (~s)~n",
Fmt = "accepting AMQP connection ~p (~s)",
Args = [self(), ConnName],
case Recv of
closed -> rabbit_log_connection:debug(Fmt, Args);
@ -756,7 +753,7 @@ wait_for_channel_termination(N, TimerRef,
rabbit_log_connection:error(
"Error on AMQP connection ~p (~s, vhost: '~s',"
" user: '~s', state: ~p), channel ~p:"
"error while terminating:~n~p~n",
"error while terminating:~n~p",
[self(), ConnName, VHost, User#user.username,
CS, Channel, Reason]),
handle_uncontrolled_channel_close(ChPid),
@ -797,7 +794,7 @@ log_hard_error(#v1{connection_state = CS,
vhost = VHost}}, Channel, Reason) ->
rabbit_log_connection:error(
"Error on AMQP connection ~p (~s, vhost: '~s',"
" user: '~s', state: ~p), channel ~p:~n ~s~n",
" user: '~s', state: ~p), channel ~p:~n ~s",
[self(), ConnName, VHost, User#user.username, CS, Channel, format_hard_error(Reason)]).
handle_exception(State = #v1{connection_state = closed}, Channel, Reason) ->
@ -816,7 +813,7 @@ handle_exception(State = #v1{connection = #connection{protocol = Protocol,
Channel, Reason = #amqp_error{name = access_refused,
explanation = ErrMsg}) ->
rabbit_log_connection:error(
"Error on AMQP connection ~p (~s, state: ~p):~n~s~n",
"Error on AMQP connection ~p (~s, state: ~p):~n~s",
[self(), ConnName, starting, ErrMsg]),
%% respect authentication failure notification capability
case rabbit_misc:table_lookup(Capabilities,
@ -835,7 +832,7 @@ handle_exception(State = #v1{connection = #connection{protocol = Protocol,
Channel, Reason = #amqp_error{name = not_allowed,
explanation = ErrMsg}) ->
rabbit_log_connection:error(
"Error on AMQP connection ~p (~s, user: '~s', state: ~p):~n~s~n",
"Error on AMQP connection ~p (~s, user: '~s', state: ~p):~n~s",
[self(), ConnName, User#user.username, opening, ErrMsg]),
send_error_on_channel0_and_close(Channel, Protocol, Reason, State);
handle_exception(State = #v1{connection = #connection{protocol = Protocol},
@ -853,7 +850,7 @@ handle_exception(State = #v1{connection = #connection{protocol = Protocol,
explanation = ErrMsg}) ->
rabbit_log_connection:error(
"Error on AMQP connection ~p (~s,"
" user: '~s', state: ~p):~n~s~n",
" user: '~s', state: ~p):~n~s",
[self(), ConnName, User#user.username, tuning, ErrMsg]),
send_error_on_channel0_and_close(Channel, Protocol, Reason, State);
handle_exception(State, Channel, Reason) ->
@ -1256,7 +1253,7 @@ handle_method0(#'connection.open'{virtual_host = VHost},
maybe_emit_stats(State1),
rabbit_log_connection:info(
"connection ~p (~s): "
"user '~s' authenticated and granted access to vhost '~s'~n",
"user '~s' authenticated and granted access to vhost '~s'",
[self(), dynamic_connection_name(ConnName), Username, VHost]),
State1;
handle_method0(#'connection.close'{}, State) when ?IS_RUNNING(State) ->
@ -1282,7 +1279,7 @@ handle_method0(#'connection.update_secret'{new_secret = NewSecret, reason = Reas
sock = Sock}) when ?IS_RUNNING(State) ->
rabbit_log_connection:debug(
"connection ~p (~s) of user '~s': "
"asked to update secret, reason: ~s~n",
"asked to update secret, reason: ~s",
[self(), dynamic_connection_name(ConnName), Username, Reason]),
case rabbit_access_control:update_state(User, NewSecret) of
{ok, User1} ->
@ -1299,15 +1296,15 @@ handle_method0(#'connection.update_secret'{new_secret = NewSecret, reason = Reas
ok = send_on_channel0(Sock, #'connection.update_secret_ok'{}, Protocol),
rabbit_log_connection:info(
"connection ~p (~s): "
"user '~s' updated secret, reason: ~s~n",
"user '~s' updated secret, reason: ~s",
[self(), dynamic_connection_name(ConnName), Username, Reason]),
State#v1{connection = Conn#connection{user = User1}};
{refused, Message} ->
rabbit_log_connection:error("Secret update was refused for user '~p': ~p",
rabbit_log_connection:error("Secret update was refused for user '~s': ~p",
[Username, Message]),
rabbit_misc:protocol_error(not_allowed, "New secret was refused by one of the backends", []);
{error, Message} ->
rabbit_log_connection:error("Secret update for user '~p' failed: ~p",
rabbit_log_connection:error("Secret update for user '~s' failed: ~p",
[Username, Message]),
rabbit_misc:protocol_error(not_allowed,
"Secret update failed", [])
@ -1772,7 +1769,7 @@ augment_connection_log_name(#connection{name = Name} = Connection) ->
Connection;
UserSpecifiedName ->
LogName = <<Name/binary, " - ", UserSpecifiedName/binary>>,
rabbit_log_connection:info("Connection ~p (~s) has a client-provided name: ~s~n", [self(), Name, UserSpecifiedName]),
rabbit_log_connection:info("Connection ~p (~s) has a client-provided name: ~s", [self(), Name, UserSpecifiedName]),
?store_proc_name(LogName),
Connection#connection{log_name = LogName}
end.

View File

@ -101,7 +101,7 @@ wait(TableNames, Retry) ->
wait(TableNames, Timeout, Retries) ->
%% We might be in ctl here for offline ops, in which case we can't
%% get_env() for the rabbit app.
rabbit_log:info("Waiting for Mnesia tables for ~p ms, ~p retries left~n",
rabbit_log:info("Waiting for Mnesia tables for ~p ms, ~p retries left",
[Timeout, Retries - 1]),
Result = case mnesia:wait_for_tables(TableNames, Timeout) of
ok ->
@ -120,7 +120,7 @@ wait(TableNames, Timeout, Retries) ->
{1, {error, _} = Error} ->
throw(Error);
{_, {error, Error}} ->
rabbit_log:warning("Error while waiting for Mnesia tables: ~p~n", [Error]),
rabbit_log:warning("Error while waiting for Mnesia tables: ~p", [Error]),
wait(TableNames, Timeout, Retries - 1)
end.

View File

@ -74,13 +74,13 @@ tap_out({#resource{name = QName, virtual_host = VHost},
-spec start(rabbit_types:vhost()) -> 'ok'.
start(VHost) ->
rabbit_log:info("Enabling tracing for vhost '~s'~n", [VHost]),
rabbit_log:info("Enabling tracing for vhost '~s'", [VHost]),
update_config(fun (VHosts) -> [VHost | VHosts -- [VHost]] end).
-spec stop(rabbit_types:vhost()) -> 'ok'.
stop(VHost) ->
rabbit_log:info("Disabling tracing for vhost '~s'~n", [VHost]),
rabbit_log:info("Disabling tracing for vhost '~s'", [VHost]),
update_config(fun (VHosts) -> VHosts -- [VHost] end).
update_config(Fun) ->

View File

@ -52,7 +52,7 @@ count_tracked_items(TableNameFun, CountRecPosition, Key, ContextMsg) ->
Acc + N
catch _:Err ->
rabbit_log:error(
"Failed to fetch number of ~p ~p on node ~p:~n~p~n",
"Failed to fetch number of ~p ~p on node ~p:~n~p",
[ContextMsg, Key, Node, Err]),
Acc
end

View File

@ -91,9 +91,9 @@ ensure_backup_taken() ->
take_backup() ->
BackupDir = backup_dir(),
info("upgrades: Backing up mnesia dir to ~p~n", [BackupDir]),
info("upgrades: Backing up mnesia dir to ~p", [BackupDir]),
case rabbit_mnesia:copy_db(BackupDir) of
ok -> info("upgrades: Mnesia dir backed up to ~p~n",
ok -> info("upgrades: Mnesia dir backed up to ~p",
[BackupDir]);
{error, E} -> throw({could_not_back_up_mnesia_dir, E, BackupDir})
end.
@ -106,7 +106,7 @@ ensure_backup_removed() ->
remove_backup() ->
ok = rabbit_file:recursive_delete([backup_dir()]),
info("upgrades: Mnesia backup removed~n", []).
info("upgrades: Mnesia backup removed", []).
-spec maybe_upgrade_mnesia() -> 'ok'.
@ -216,7 +216,7 @@ primary_upgrade(Upgrades, Nodes) ->
rabbit_table:force_load(),
case Others of
[] -> ok;
_ -> info("mnesia upgrades: Breaking cluster~n", []),
_ -> info("mnesia upgrades: Breaking cluster", []),
[{atomic, ok} = mnesia:del_table_copy(schema, Node)
|| Node <- Others]
end
@ -280,16 +280,16 @@ maybe_migrate_queues_to_per_vhost_storage() ->
apply_upgrades(Scope, Upgrades, Fun) ->
ok = rabbit_file:lock_file(lock_filename()),
info("~s upgrades: ~w to apply~n", [Scope, length(Upgrades)]),
info("~s upgrades: ~w to apply", [Scope, length(Upgrades)]),
rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia),
Fun(),
[apply_upgrade(Scope, Upgrade) || Upgrade <- Upgrades],
info("~s upgrades: All upgrades applied successfully~n", [Scope]),
info("~s upgrades: All upgrades applied successfully", [Scope]),
ok = rabbit_version:record_desired_for_scope(Scope),
ok = file:delete(lock_filename()).
apply_upgrade(Scope, {M, F}) ->
info("~s upgrades: Applying ~w:~w~n", [Scope, M, F]),
info("~s upgrades: Applying ~w:~w", [Scope, M, F]),
ok = apply(M, F, []).
%% -------------------------------------------------------------------

View File

@ -426,9 +426,9 @@ cluster_name_tx() ->
case Tl of
[] -> ok;
_ -> {VHost, _, _} = K,
error_logger:warning_msg(
logger:warning(
"Multiple local-nodenames found, picking '~s' "
"from '~s' for cluster name~n", [Name, VHost])
"from '~s' for cluster name", [Name, VHost])
end
end,
[mnesia:delete(T, K, write) || K <- Ks],

View File

@ -484,7 +484,7 @@ stop(VHost) ->
ok = rabbit_queue_index:stop(VHost).
start_msg_store(VHost, Refs, StartFunState) when is_list(Refs); Refs == undefined ->
rabbit_log:info("Starting message stores for vhost '~s'~n", [VHost]),
rabbit_log:info("Starting message stores for vhost '~s'", [VHost]),
do_start_msg_store(VHost, ?TRANSIENT_MSG_STORE, undefined, ?EMPTY_START_FUN_STATE),
do_start_msg_store(VHost, ?PERSISTENT_MSG_STORE, Refs, StartFunState),
ok.
@ -492,13 +492,13 @@ start_msg_store(VHost, Refs, StartFunState) when is_list(Refs); Refs == undefine
do_start_msg_store(VHost, Type, Refs, StartFunState) ->
case rabbit_vhost_msg_store:start(VHost, Type, Refs, StartFunState) of
{ok, _} ->
rabbit_log:info("Started message store of type ~s for vhost '~s'~n", [abbreviated_type(Type), VHost]);
rabbit_log:info("Started message store of type ~s for vhost '~s'", [abbreviated_type(Type), VHost]);
{error, {no_such_vhost, VHost}} = Err ->
rabbit_log:error("Failed to start message store of type ~s for vhost '~s': the vhost no longer exists!~n",
rabbit_log:error("Failed to start message store of type ~s for vhost '~s': the vhost no longer exists!",
[Type, VHost]),
exit(Err);
{error, Error} ->
rabbit_log:error("Failed to start message store of type ~s for vhost '~s': ~p~n",
rabbit_log:error("Failed to start message store of type ~s for vhost '~s': ~p",
[Type, VHost, Error]),
exit({error, Error})
end.
@ -2846,7 +2846,7 @@ move_messages_to_vhost_store(Queues) ->
in_batches(MigrationBatchSize,
{rabbit_variable_queue, migrate_queue, [OldStore, NewMsgStore]},
QueuesWithTerms,
"message_store upgrades: Migrating batch ~p of ~p queues. Out of total ~p ~n",
"message_store upgrades: Migrating batch ~p of ~p queues. Out of total ~p ",
"message_store upgrades: Batch ~p of ~p queues migrated ~n. ~p total left"),
log_upgrade("Message store migration finished"),
@ -2882,7 +2882,7 @@ migrate_queue({QueueName = #resource{virtual_host = VHost, name = Name},
RecoveryTerm},
OldStore, NewStore) ->
log_upgrade_verbose(
"Migrating messages in queue ~s in vhost ~s to per-vhost message store~n",
"Migrating messages in queue ~s in vhost ~s to per-vhost message store",
[Name, VHost]),
OldStoreClient = get_global_store_client(OldStore),
NewStoreClient = get_per_vhost_store_client(QueueName, NewStore),

View File

@ -43,7 +43,7 @@ recover() ->
recover(VHost) ->
VHostDir = msg_store_dir_path(VHost),
rabbit_log:info("Making sure data directory '~ts' for vhost '~s' exists~n",
rabbit_log:info("Making sure data directory '~ts' for vhost '~s' exists",
[VHostDir, VHost]),
VHostStubFile = filename:join(VHostDir, ".vhost"),
ok = rabbit_file:ensure_dir(VHostStubFile),
@ -147,7 +147,7 @@ delete(VHost, ActingUser) ->
%% process, which in turn results in further mnesia actions and
%% eventually the termination of that process. Exchange deletion causes
%% notifications which must be sent outside the TX
rabbit_log:info("Deleting vhost '~s'~n", [VHost]),
rabbit_log:info("Deleting vhost '~s'", [VHost]),
QDelFun = fun (Q) -> rabbit_amqqueue:delete(Q, false, false, ActingUser) end,
[begin
Name = amqqueue:get_name(Q),
@ -257,7 +257,7 @@ vhost_down(VHost) ->
delete_storage(VHost) ->
VhostDir = msg_store_dir_path(VHost),
rabbit_log:info("Deleting message store directory for vhost '~s' at '~s'~n", [VHost, VhostDir]),
rabbit_log:info("Deleting message store directory for vhost '~s' at '~s'", [VHost, VhostDir]),
%% Message store should be closed when vhost supervisor is closed.
case rabbit_file:recursive_delete([VhostDir]) of
ok -> ok;

View File

@ -41,7 +41,7 @@ start_link(VHost) ->
init([VHost]) ->
process_flag(trap_exit, true),
rabbit_log:debug("Recovering data for VHost ~p~n", [VHost]),
rabbit_log:debug("Recovering data for VHost ~p", [VHost]),
try
%% Recover the vhost data and save it to vhost registry.
ok = rabbit_vhost:recover(VHost),

View File

@ -79,7 +79,7 @@ stop_and_delete_vhost(VHost) ->
false -> ok;
true ->
rabbit_log:info("Stopping vhost supervisor ~p"
" for vhost '~s'~n",
" for vhost '~s'",
[VHostSupPid, VHost]),
case supervisor2:terminate_child(?MODULE, WrapperPid) of
ok ->

View File

@ -65,9 +65,7 @@ start_link(IPAddress, Port,
init({IPAddress, Port, {M,F,A} = OnStartup, OnShutdown, Label}) ->
process_flag(trap_exit, true),
error_logger:info_msg(
"started ~s on ~s:~p~n",
[Label, rabbit_misc:ntoab(IPAddress), Port]),
logger:info("started ~s on ~s:~p", [Label, rabbit_misc:ntoab(IPAddress), Port]),
apply(M, F, A ++ [IPAddress, Port]),
{ok, #state{on_startup = OnStartup, on_shutdown = OnShutdown,
label = Label, ip=IPAddress, port=Port}}.
@ -82,8 +80,7 @@ handle_info(_Info, State) ->
{noreply, State}.
terminate(_Reason, #state{on_shutdown = {M,F,A}, label=Label, ip=IPAddress, port=Port}) ->
error_logger:info_msg("stopped ~s on ~s:~p~n",
[Label, rabbit_misc:ntoab(IPAddress), Port]),
logger:info("stopped ~s on ~s:~p", [Label, rabbit_misc:ntoab(IPAddress), Port]),
apply(M, F, A ++ [IPAddress, Port]).
code_change(_OldVsn, State, _Extra) ->

View File

@ -141,20 +141,7 @@ init_per_testcase(Testcase, Config) ->
TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
case ?config(tc_group_properties, Config) of
[{name, registry} | _] ->
application:set_env(lager, colored, true),
application:set_env(
lager,
handlers, [{lager_console_backend, [{level, debug}]}]),
application:set_env(
lager,
extra_sinks,
[{rabbit_log_lager_event,
[{handlers, [{lager_console_backend, [{level, debug}]}]}]
},
{rabbit_log_feature_flags_lager_event,
[{handlers, [{lager_console_backend, [{level, debug}]}]}]
}]),
lager:start(),
logger:set_primary_config(level, debug),
FeatureFlagsFile = filename:join(?config(priv_dir, Config),
rabbit_misc:format(
"feature_flags-~s",

642
deps/rabbit/test/logging_SUITE.erl vendored Normal file
View File

@ -0,0 +1,642 @@
%% This Source Code Form is subject to the terms of the Mozilla Public
%% License, v. 2.0. If a copy of the MPL was not distributed with this
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
%%
%% Copyright (c) 2016-2021 VMware, Inc. or its affiliates. All rights reserved.
%%
-module(logging_SUITE).
-include_lib("common_test/include/ct.hrl").
-include_lib("eunit/include/eunit.hrl").
-include_lib("kernel/include/logger.hrl").
-include_lib("rabbit_common/include/logging.hrl").
-include_lib("amqp_client/include/amqp_client.hrl").
-export([all/0,
init_per_suite/1,
end_per_suite/1,
init_per_group/2,
end_per_group/2,
init_per_testcase/2,
end_per_testcase/2,
logging_with_default_config_works/1,
logging_to_stdout_configured_in_env_works/1,
logging_to_stdout_configured_in_config_works/1,
logging_to_stderr_configured_in_env_works/1,
logging_to_exchange_works/1,
setting_log_levels_in_env_works/1,
setting_log_levels_in_config_works/1,
format_messages_as_json_works/1]).
all() ->
[logging_with_default_config_works,
logging_to_stdout_configured_in_env_works,
logging_to_stdout_configured_in_config_works,
logging_to_stderr_configured_in_env_works,
logging_to_exchange_works,
setting_log_levels_in_env_works,
setting_log_levels_in_config_works,
format_messages_as_json_works].
init_per_suite(Config) ->
rabbit_ct_helpers:log_environment(),
rabbit_ct_helpers:run_setup_steps(Config).
end_per_suite(Config) ->
Config.
init_per_group(_, Config) ->
Config.
end_per_group(_, Config) ->
Config.
init_per_testcase(logging_to_exchange_works = Testcase, Config) ->
rabbit_ct_helpers:testcase_started(Config, Testcase),
Config1 = rabbit_ct_helpers:set_config(
Config,
[{rmq_nodes_count, 1},
{rmq_nodename_suffix, Testcase}]),
Config2 = rabbit_ct_helpers:merge_app_env(
Config1,
{rabbit, [{log, [{exchange, [{enabled, true},
{level, info}]},
{file, [{level, info}]}]}]}),
rabbit_ct_helpers:run_steps(
Config2,
rabbit_ct_broker_helpers:setup_steps() ++
rabbit_ct_client_helpers:setup_steps());
init_per_testcase(Testcase, Config) ->
remove_all_handlers(),
application:unset_env(rabbit, log),
LogBaseDir = filename:join(
?config(priv_dir, Config),
atom_to_list(Testcase)),
Config1 = rabbit_ct_helpers:set_config(
Config, {log_base_dir, LogBaseDir}),
rabbit_ct_helpers:testcase_finished(Config1, Testcase).
end_per_testcase(logging_to_exchange_works, Config) ->
rabbit_ct_helpers:run_steps(
Config,
rabbit_ct_client_helpers:teardown_steps() ++
rabbit_ct_broker_helpers:teardown_steps());
end_per_testcase(_, Config) ->
application:unset_env(rabbit, log),
Config.
remove_all_handlers() ->
_ = [logger:remove_handler(Id)
|| #{id := Id} <- logger:get_handler_config()].
logging_with_default_config_works(Config) ->
Context = default_context(Config),
rabbit_prelaunch_logging:clear_config_run_number(),
rabbit_prelaunch_logging:setup(Context),
Handlers = logger:get_handler_config(),
MainFileHandler = get_handler_by_id(Handlers, rmq_1_file_1),
MainFile = main_log_file_in_context(Context),
?assertNotEqual(undefined, MainFileHandler),
?assertMatch(
#{level := info,
module := rabbit_logger_std_h,
filter_default := log,
filters := [{progress_reports, {_, stop}},
{rmqlog_filter, {_, #{global := info,
upgrade := none}}}],
formatter := {rabbit_logger_text_fmt, _},
config := #{type := file,
file := MainFile}},
MainFileHandler),
UpgradeFileHandler = get_handler_by_id(Handlers, rmq_1_file_2),
UpgradeFile = upgrade_log_file_in_context(Context),
?assertNotEqual(undefined, UpgradeFileHandler),
?assertMatch(
#{level := info,
module := rabbit_logger_std_h,
filter_default := stop,
filters := [{rmqlog_filter, {_, #{upgrade := info}}}],
formatter := {rabbit_logger_text_fmt, _},
config := #{type := file,
file := UpgradeFile}},
UpgradeFileHandler),
?assert(ping_log(rmq_1_file_1, info)),
?assert(ping_log(rmq_1_file_1, info,
#{domain => ?RMQLOG_DOMAIN_GLOBAL})),
?assert(ping_log(rmq_1_file_1, info,
#{domain => ['3rd_party']})),
?assertNot(ping_log(rmq_1_file_1, info,
#{domain => ?RMQLOG_DOMAIN_UPGRADE})),
?assert(ping_log(rmq_1_file_2, info,
#{domain => ?RMQLOG_DOMAIN_UPGRADE})),
?assertNot(ping_log(rmq_1_file_2, info,
#{domain => ?RMQLOG_DOMAIN_GLOBAL})),
ok.
logging_to_stdout_configured_in_env_works(Config) ->
#{var_origins := Origins0} = Context0 = default_context(Config),
Context = Context0#{main_log_file => "-",
var_origins => Origins0#{
main_log_file => environment}},
logging_to_stddev_works(standard_io, rmq_1_stdout, Config, Context).
logging_to_stdout_configured_in_config_works(Config) ->
Context = default_context(Config),
ok = application:set_env(
rabbit, log,
[{console, [{enabled, true}]}],
[{persistent, true}]),
logging_to_stddev_works(standard_io, rmq_1_stdout, Config, Context).
logging_to_stderr_configured_in_env_works(Config) ->
#{var_origins := Origins0} = Context0 = default_context(Config),
Context = Context0#{main_log_file => "-stderr",
var_origins => Origins0#{
main_log_file => environment}},
logging_to_stddev_works(standard_error, rmq_1_stderr, Config, Context).
logging_to_stddev_works(Stddev, Id, Config, Context) ->
rabbit_prelaunch_logging:clear_config_run_number(),
rabbit_prelaunch_logging:setup(Context),
Handlers = logger:get_handler_config(),
StddevHandler = get_handler_by_id(Handlers, Id),
?assertNotEqual(undefined, StddevHandler),
?assertMatch(
#{level := info,
module := rabbit_logger_std_h,
filter_default := log,
filters := [{progress_reports, {_, stop}},
{rmqlog_filter, {_, #{global := info,
upgrade := none}}}],
formatter := {rabbit_logger_text_fmt, _},
config := #{type := Stddev}},
StddevHandler),
UpgradeFileHandler = get_handler_by_id(Handlers, rmq_1_file_1),
UpgradeFile = upgrade_log_file_in_context(Context),
?assertNotEqual(undefined, UpgradeFileHandler),
?assertMatch(
#{level := info,
module := rabbit_logger_std_h,
filter_default := stop,
filters := [{rmqlog_filter, {_, #{upgrade := info}}}],
formatter := {rabbit_logger_text_fmt, _},
config := #{type := file,
file := UpgradeFile}},
UpgradeFileHandler),
?assert(ping_log(Id, info, Config)),
?assert(ping_log(Id, info,
#{domain => ?RMQLOG_DOMAIN_GLOBAL}, Config)),
?assert(ping_log(Id, info,
#{domain => ['3rd_party']}, Config)),
?assertNot(ping_log(Id, info,
#{domain => ?RMQLOG_DOMAIN_UPGRADE}, Config)),
?assert(ping_log(rmq_1_file_1, info,
#{domain => ?RMQLOG_DOMAIN_UPGRADE})),
?assertNot(ping_log(rmq_1_file_1, info,
#{domain => ?RMQLOG_DOMAIN_GLOBAL})),
ok.
logging_to_exchange_works(Config) ->
Context = rabbit_ct_broker_helpers:rpc(
Config, 0,
rabbit_prelaunch, get_context, []),
Handlers = rabbit_ct_broker_helpers:rpc(
Config, 0,
logger, get_handler_config, []),
ExchangeHandler = get_handler_by_id(Handlers, rmq_1_exchange),
?assertNotEqual(undefined, ExchangeHandler),
?assertMatch(
#{level := info,
module := rabbit_logger_exchange_h,
filter_default := log,
filters := [{progress_reports, {_, stop}},
{rmqlog_filter, {_, #{global := info,
upgrade := none}}}],
formatter := {rabbit_logger_text_fmt, _},
config := #{exchange := _}},
ExchangeHandler),
#{config :=
#{exchange := #resource{name = XName} = Exchange}} = ExchangeHandler,
UpgradeFileHandler = get_handler_by_id(Handlers, rmq_1_file_2),
UpgradeFile = upgrade_log_file_in_context(Context),
?assertNotEqual(undefined, UpgradeFileHandler),
?assertMatch(
#{level := info,
module := rabbit_logger_std_h,
filter_default := stop,
filters := [{rmqlog_filter, {_, #{upgrade := info}}}],
formatter := {rabbit_logger_text_fmt, _},
config := #{type := file,
file := UpgradeFile}},
UpgradeFileHandler),
%% Wait for the expected exchange to be automatically declared.
lists:any(
fun(_) ->
Ret = rabbit_ct_broker_helpers:rpc(
Config, 0,
rabbit_exchange, lookup, [Exchange]),
case Ret of
{ok, _} -> true;
_ -> timer:sleep(500),
false
end
end, lists:seq(1, 20)),
%% Declare a queue to collect all logged messages.
{Conn, Chan} = rabbit_ct_client_helpers:open_connection_and_channel(
Config),
QName = <<"log-messages">>,
?assertMatch(
#'queue.declare_ok'{},
amqp_channel:call(Chan, #'queue.declare'{queue = QName,
durable = false})),
?assertMatch(
#'queue.bind_ok'{},
amqp_channel:call(Chan, #'queue.bind'{queue = QName,
exchange = XName,
routing_key = <<"#">>})),
Config1 = rabbit_ct_helpers:set_config(
Config, {test_channel_and_queue, {Chan, QName}}),
?assert(ping_log(rmq_1_exchange, info, Config1)),
?assert(ping_log(rmq_1_exchange, info,
#{domain => ?RMQLOG_DOMAIN_GLOBAL}, Config1)),
?assert(ping_log(rmq_1_exchange, info,
#{domain => ['3rd_party']}, Config1)),
?assertNot(ping_log(rmq_1_exchange, info,
#{domain => ?RMQLOG_DOMAIN_UPGRADE}, Config1)),
?assert(ping_log(rmq_1_file_2, info,
#{domain => ?RMQLOG_DOMAIN_UPGRADE}, Config)),
?assertNot(ping_log(rmq_1_file_2, info,
#{domain => ?RMQLOG_DOMAIN_GLOBAL}, Config)),
amqp_channel:call(Chan, #'queue.delete'{queue = QName}),
rabbit_ct_client_helpers:close_connection_and_channel(Conn, Chan),
ok.
setting_log_levels_in_env_works(Config) ->
GlobalLevel = warning,
PrelaunchLevel = error,
MinLevel = rabbit_prelaunch_logging:get_less_severe_level(
GlobalLevel, PrelaunchLevel),
#{var_origins := Origins0} = Context0 = default_context(Config),
Context = Context0#{log_levels => #{global => GlobalLevel,
"prelaunch" => PrelaunchLevel},
var_origins => Origins0#{log_levels => environment}},
rabbit_prelaunch_logging:clear_config_run_number(),
rabbit_prelaunch_logging:setup(Context),
Handlers = logger:get_handler_config(),
MainFileHandler = get_handler_by_id(Handlers, rmq_1_file_1),
MainFile = main_log_file_in_context(Context),
?assertNotEqual(undefined, MainFileHandler),
?assertMatch(
#{level := MinLevel,
module := rabbit_logger_std_h,
filter_default := log,
filters := [{progress_reports, {_, stop}},
{rmqlog_filter, {_, #{global := GlobalLevel,
prelaunch := PrelaunchLevel,
upgrade := none}}}],
formatter := {rabbit_logger_text_fmt, _},
config := #{type := file,
file := MainFile}},
MainFileHandler),
UpgradeFileHandler = get_handler_by_id(Handlers, rmq_1_file_2),
UpgradeFile = upgrade_log_file_in_context(Context),
?assertNotEqual(undefined, UpgradeFileHandler),
?assertMatch(
#{level := info,
module := rabbit_logger_std_h,
filter_default := stop,
filters := [{rmqlog_filter, {_, #{upgrade := info}}}],
formatter := {rabbit_logger_text_fmt, _},
config := #{type := file,
file := UpgradeFile}},
UpgradeFileHandler),
?assertNot(ping_log(rmq_1_file_1, info)),
?assertNot(ping_log(rmq_1_file_1, info,
#{domain => ?RMQLOG_DOMAIN_GLOBAL})),
?assertNot(ping_log(rmq_1_file_1, info,
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH})),
?assertNot(ping_log(rmq_1_file_1, GlobalLevel,
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH})),
?assertNot(ping_log(rmq_1_file_1, info,
#{domain => ['3rd_party']})),
?assertNot(ping_log(rmq_1_file_1, info,
#{domain => ?RMQLOG_DOMAIN_UPGRADE})),
?assert(ping_log(rmq_1_file_1, GlobalLevel)),
?assert(ping_log(rmq_1_file_1, GlobalLevel,
#{domain => ?RMQLOG_DOMAIN_GLOBAL})),
?assert(ping_log(rmq_1_file_1, PrelaunchLevel,
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH})),
?assert(ping_log(rmq_1_file_1, GlobalLevel,
#{domain => ['3rd_party']})),
?assertNot(ping_log(rmq_1_file_1, GlobalLevel,
#{domain => ?RMQLOG_DOMAIN_UPGRADE})),
?assert(ping_log(rmq_1_file_2, GlobalLevel,
#{domain => ?RMQLOG_DOMAIN_UPGRADE})),
?assertNot(ping_log(rmq_1_file_2, GlobalLevel,
#{domain => ?RMQLOG_DOMAIN_GLOBAL})),
ok.
setting_log_levels_in_config_works(Config) ->
GlobalLevel = warning,
PrelaunchLevel = error,
MinLevel = rabbit_prelaunch_logging:get_less_severe_level(
GlobalLevel, PrelaunchLevel),
Context = default_context(Config),
ok = application:set_env(
rabbit, log,
[{file, [{level, GlobalLevel}]},
{categories, [{prelaunch, [{level, PrelaunchLevel}]}]}],
[{persistent, true}]),
rabbit_prelaunch_logging:clear_config_run_number(),
rabbit_prelaunch_logging:setup(Context),
Handlers = logger:get_handler_config(),
MainFileHandler = get_handler_by_id(Handlers, rmq_1_file_1),
MainFile = main_log_file_in_context(Context),
?assertNotEqual(undefined, MainFileHandler),
?assertMatch(
#{level := MinLevel,
module := rabbit_logger_std_h,
filter_default := log,
filters := [{progress_reports, {_, stop}},
{rmqlog_filter, {_, #{global := GlobalLevel,
prelaunch := PrelaunchLevel,
upgrade := none}}}],
formatter := {rabbit_logger_text_fmt, _},
config := #{type := file,
file := MainFile}},
MainFileHandler),
UpgradeFileHandler = get_handler_by_id(Handlers, rmq_1_file_2),
UpgradeFile = upgrade_log_file_in_context(Context),
?assertNotEqual(undefined, UpgradeFileHandler),
?assertMatch(
#{level := info,
module := rabbit_logger_std_h,
filter_default := stop,
filters := [{rmqlog_filter, {_, #{upgrade := info}}}],
formatter := {rabbit_logger_text_fmt, _},
config := #{type := file,
file := UpgradeFile}},
UpgradeFileHandler),
?assertNot(ping_log(rmq_1_file_1, info)),
?assertNot(ping_log(rmq_1_file_1, info,
#{domain => ?RMQLOG_DOMAIN_GLOBAL})),
?assertNot(ping_log(rmq_1_file_1, info,
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH})),
?assertNot(ping_log(rmq_1_file_1, GlobalLevel,
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH})),
?assertNot(ping_log(rmq_1_file_1, info,
#{domain => ['3rd_party']})),
?assertNot(ping_log(rmq_1_file_1, info,
#{domain => ?RMQLOG_DOMAIN_UPGRADE})),
?assert(ping_log(rmq_1_file_1, GlobalLevel)),
?assert(ping_log(rmq_1_file_1, GlobalLevel,
#{domain => ?RMQLOG_DOMAIN_GLOBAL})),
?assert(ping_log(rmq_1_file_1, PrelaunchLevel,
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH})),
?assert(ping_log(rmq_1_file_1, GlobalLevel,
#{domain => ['3rd_party']})),
?assertNot(ping_log(rmq_1_file_1, GlobalLevel,
#{domain => ?RMQLOG_DOMAIN_UPGRADE})),
?assert(ping_log(rmq_1_file_2, GlobalLevel,
#{domain => ?RMQLOG_DOMAIN_UPGRADE})),
?assertNot(ping_log(rmq_1_file_2, GlobalLevel,
#{domain => ?RMQLOG_DOMAIN_GLOBAL})),
ok.
format_messages_as_json_works(Config) ->
#{var_origins := Origins0} = Context0 = default_context(Config),
Context = Context0#{log_levels => #{json => true},
var_origins => Origins0#{log_levels => environment}},
rabbit_prelaunch_logging:clear_config_run_number(),
rabbit_prelaunch_logging:setup(Context),
Handlers = logger:get_handler_config(),
MainFileHandler = get_handler_by_id(Handlers, rmq_1_file_1),
MainFile = main_log_file_in_context(Context),
?assertNotEqual(undefined, MainFileHandler),
?assertMatch(
#{level := info,
module := rabbit_logger_std_h,
filter_default := log,
filters := [{progress_reports, {_, stop}},
{rmqlog_filter, {_, #{global := info,
upgrade := none}}}],
formatter := {rabbit_logger_json_fmt, _},
config := #{type := file,
file := MainFile}},
MainFileHandler),
?assertNot(ping_log(rmq_1_file_1, info)),
RandomMsg = get_random_string(
32,
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"),
Metadata = #{atom => rabbit,
integer => 1,
float => 1.42,
string => "string",
list => ["s", a, 3],
map => #{key => "value"},
function => fun get_random_string/2,
pid => self(),
port => hd(erlang:ports()),
ref => erlang:make_ref()},
?LOG_INFO(RandomMsg, Metadata),
rabbit_logger_std_h:filesync(rmq_1_file_1),
{ok, Content} = file:read_file(MainFile),
ReOpts = [{capture, first, binary}, multiline],
{match, [Line]} = re:run(
Content,
"^.+\"" ++ RandomMsg ++ "\".+$",
ReOpts),
Term = jsx:decode(Line, [return_maps, {labels, attempt_atom}]),
RandomMsgBin = list_to_binary(RandomMsg),
?assertMatch(#{time := _}, Term),
?assertMatch(#{level := <<"info">>}, Term),
?assertMatch(#{msg := RandomMsgBin}, Term),
Meta = maps:get(meta, Term),
FunBin = list_to_binary(erlang:fun_to_list(maps:get(function, Metadata))),
PidBin = list_to_binary(erlang:pid_to_list(maps:get(pid, Metadata))),
PortBin = list_to_binary(erlang:port_to_list(maps:get(port, Metadata))),
RefBin = list_to_binary(erlang:ref_to_list(maps:get(ref, Metadata))),
?assertMatch(#{atom := <<"rabbit">>}, Meta),
?assertMatch(#{integer := 1}, Meta),
?assertMatch(#{float := 1.42}, Meta),
?assertMatch(#{string := <<"string">>}, Meta),
?assertMatch(#{list := [<<"s">>, <<"a">>, 3]}, Meta),
?assertMatch(#{map := #{key := <<"value">>}}, Meta),
?assertMatch(#{function := FunBin}, Meta),
?assertMatch(#{pid := PidBin}, Meta),
?assertMatch(#{port := PortBin}, Meta),
?assertMatch(#{ref := RefBin}, Meta).
%% -------------------------------------------------------------------
%% Internal functions.
%% -------------------------------------------------------------------
default_context(Config) ->
LogBaseDir = ?config(log_base_dir, Config),
MainFile = "rabbit.log",
UpgradeFile = "rabbit_upgrade.log",
#{log_base_dir => LogBaseDir,
main_log_file => MainFile,
upgrade_log_file => UpgradeFile,
log_levels => undefined,
var_origins => #{log_base_dir => default,
main_log_file => default,
upgrade_log_file => default,
log_levels => default}}.
main_log_file_in_context(#{log_base_dir := LogBaseDir,
main_log_file := MainLogFile}) ->
filename:join(LogBaseDir, MainLogFile).
upgrade_log_file_in_context(#{log_base_dir := LogBaseDir,
upgrade_log_file := UpgradeLogFile}) ->
filename:join(LogBaseDir, UpgradeLogFile).
get_handler_by_id([#{id := Id} = Handler | _], Id) ->
Handler;
get_handler_by_id([_ | Rest], Id) ->
get_handler_by_id(Rest, Id);
get_handler_by_id([], _) ->
undefined.
ping_log(Id, Level) ->
ping_log(Id, Level, #{}, []).
ping_log(Id, Level, Metadata) when is_map(Metadata) ->
ping_log(Id, Level, Metadata, []);
ping_log(Id, Level, Config) when is_list(Config) ->
ping_log(Id, Level, #{}, Config).
ping_log(Id, Level, Metadata, Config) ->
RandomMsg = get_random_string(
32,
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"),
ct:log("Logging \"~ts\" at level ~ts (~p)", [RandomMsg, Level, Metadata]),
case need_rpc(Config) of
false -> logger:log(Level, RandomMsg, Metadata);
true -> rabbit_ct_broker_helpers:rpc(
Config, 0,
logger, log, [Level, RandomMsg, Metadata])
end,
check_log(Id, RandomMsg, Config).
need_rpc(Config) ->
rabbit_ct_helpers:get_config(
Config, rmq_nodes_count) =/= undefined.
check_log(Id, RandomMsg, Config) ->
{ok, Handler} = case need_rpc(Config) of
false -> logger:get_handler_config(Id);
true -> rabbit_ct_broker_helpers:rpc(
Config, 0,
logger, get_handler_config, [Id])
end,
check_log1(Handler, RandomMsg, Config).
check_log1(#{id := Id,
module := rabbit_logger_std_h,
config := #{type := file,
file := Filename}},
RandomMsg,
Config) ->
ok = case need_rpc(Config) of
false -> rabbit_logger_std_h:filesync(Id);
true -> rabbit_ct_broker_helpers:rpc(
Config, 0,
rabbit_logger_std_h, filesync, [Id])
end,
{ok, Content} = file:read_file(Filename),
ReOpts = [{capture, none}, multiline],
match =:= re:run(Content, RandomMsg ++ "$", ReOpts);
check_log1(#{module := Mod,
config := #{type := Stddev}},
RandomMsg,
Config)
when ?IS_STD_H_COMPAT(Mod) andalso ?IS_STDDEV(Stddev) ->
Filename = html_report_filename(Config),
ReOpts = [{capture, none}, multiline],
lists:any(
fun(_) ->
{ok, Content} = file:read_file(Filename),
case re:run(Content, RandomMsg ++ "$", ReOpts) of
match -> true;
_ -> timer:sleep(500),
false
end
end, lists:seq(1, 10));
check_log1(#{module := rabbit_logger_exchange_h},
RandomMsg,
Config) ->
{Chan, QName} = ?config(test_channel_and_queue, Config),
ReOpts = [{capture, none}, multiline],
lists:any(
fun(_) ->
Ret = amqp_channel:call(
Chan, #'basic.get'{queue = QName, no_ack = false}),
case Ret of
{#'basic.get_ok'{}, #amqp_msg{payload = Content}} ->
case re:run(Content, RandomMsg ++ "$", ReOpts) of
match -> true;
_ -> timer:sleep(500),
false
end;
#'basic.get_empty'{} ->
timer:sleep(500),
false;
Other ->
io:format(standard_error, "OTHER -> ~p~n", [Other]),
timer:sleep(500),
false
end
end, lists:seq(1, 10)).
get_random_string(Length, AllowedChars) ->
lists:foldl(fun(_, Acc) ->
[lists:nth(rand:uniform(length(AllowedChars)),
AllowedChars)]
++ Acc
end, [], lists:seq(1, Length)).
html_report_filename(Config) ->
?config(tc_logfile, Config).

View File

@ -1,838 +0,0 @@
%% This Source Code Form is subject to the terms of the Mozilla Public
%% License, v. 2.0. If a copy of the MPL was not distributed with this
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
%%
%% Copyright (c) 2016-2021 VMware, Inc. or its affiliates. All rights reserved.
%%
-module(unit_log_config_SUITE).
-include_lib("common_test/include/ct.hrl").
-include_lib("eunit/include/eunit.hrl").
-compile(export_all).
all() ->
[
default,
env_var_tty,
config_file_handler,
config_file_handler_level,
config_file_handler_rotation,
config_console_handler,
config_exchange_handler,
config_syslog_handler,
config_syslog_handler_options,
config_multiple_handlers,
env_var_overrides_config,
env_var_disable_log,
config_sinks_level,
config_sink_file,
config_sink_file_override_config_handler_file,
config_handlers_merged_with_lager_handlers,
sink_handlers_merged_with_lager_extra_sinks_handlers,
sink_file_rewrites_file_backends
].
init_per_testcase(_, Config) ->
application:load(rabbit),
application:load(lager),
application:unset_env(rabbit, log),
application:unset_env(rabbit, lager_log_root),
application:unset_env(rabbit, lager_default_file),
application:unset_env(rabbit, lager_upgrade_file),
application:unset_env(lager, handlers),
application:unset_env(lager, rabbit_handlers),
application:unset_env(lager, extra_sinks),
unset_logs_var_origin(),
Config.
end_per_testcase(_, Config) ->
application:unset_env(rabbit, log),
application:unset_env(rabbit, lager_log_root),
application:unset_env(rabbit, lager_default_file),
application:unset_env(rabbit, lager_upgrade_file),
application:unset_env(lager, handlers),
application:unset_env(lager, rabbit_handlers),
application:unset_env(lager, extra_sinks),
unset_logs_var_origin(),
application:unload(rabbit),
application:unload(lager),
Config.
sink_file_rewrites_file_backends(_) ->
application:set_env(rabbit, log, [
%% Disable rabbit file handler
{file, [{file, false}]},
{categories, [{federation, [{file, "federation.log"}, {level, warning}]}]}
]),
LagerHandlers = [
{lager_file_backend, [{file, "lager_file.log"}, {level, error}]},
{lager_file_backend, [{file, "lager_file_1.log"}, {level, error}]},
{lager_console_backend, [{level, info}]},
{lager_exchange_backend, [{level, info}]}
],
application:set_env(lager, handlers, LagerHandlers),
rabbit_lager:configure_lager(),
ExpectedSinks = sort_sinks(sink_rewrite_sinks()),
?assertEqual(ExpectedSinks, sort_sinks(application:get_env(lager, extra_sinks, undefined))).
sink_rewrite_sinks() ->
[{error_logger_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_channel_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_connection_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_feature_flags_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_federation_lager_event,
[{handlers,[
{lager_file_backend,
[{date, ""},
{file, "federation.log"},
{formatter_config, formatter_config(file)},
{level, warning},
{size, 0}]},
{lager_console_backend, [{level, warning}]},
{lager_exchange_backend, [{level, warning}]}
]},
{rabbit_handlers,[
{lager_file_backend,
[{date, ""},
{file, "federation.log"},
{formatter_config, formatter_config(file)},
{level, warning},
{size, 0}]},
{lager_console_backend, [{level, warning}]},
{lager_exchange_backend, [{level, warning}]}
]}]},
{rabbit_log_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_ldap_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,
[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_mirroring_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_osiris_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,
[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_prelaunch_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_queue_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_ra_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,
[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_shovel_lager_event,
[{handlers, [{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,
[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_upgrade_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}
].
sink_handlers_merged_with_lager_extra_sinks_handlers(_) ->
DefaultLevel = debug,
application:set_env(rabbit, log, [
{file, [{file, "rabbit_file.log"}, {level, DefaultLevel}]},
{console, [{enabled, true}, {level, error}]},
{exchange, [{enabled, true}, {level, error}]},
{categories, [
{connection, [{level, debug}]},
{channel, [{level, warning}, {file, "channel_log.log"}]}
]}
]),
LagerSinks = [
{rabbit_log_connection_lager_event,
[{handlers,
[{lager_file_backend,
[{file, "connection_lager.log"},
{level, info}]}]}]},
{rabbit_log_channel_lager_event,
[{handlers,
[{lager_console_backend, [{level, debug}]},
{lager_exchange_backend, [{level, debug}]},
{lager_file_backend, [{level, error},
{file, "channel_lager.log"}]}]}]}],
application:set_env(lager, extra_sinks, LagerSinks),
rabbit_lager:configure_lager(),
ExpectedSinks = sort_sinks([
{error_logger_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
{rabbit_log_channel_lager_event,
[{handlers,[
{lager_console_backend, [{level, error},
{formatter_config, formatter_config(console)}]},
{lager_exchange_backend, [{level, error},
{formatter_config, formatter_config(exchange)}]},
{lager_file_backend,
[{date, ""},
{file, "channel_log.log"},
{formatter_config, formatter_config(file)},
{level, warning},
{size, 0}]},
{lager_console_backend, [{level, debug}]},
{lager_exchange_backend, [{level, debug}]},
{lager_file_backend, [{level, error},
{file, "channel_lager.log"}]}
]},
{rabbit_handlers,[
{lager_console_backend, [{level, error},
{formatter_config, formatter_config(console)}]},
{lager_exchange_backend, [{level, error},
{formatter_config, formatter_config(exchange)}]},
{lager_file_backend,
[{date, ""},
{file, "channel_log.log"},
{formatter_config, formatter_config(file)},
{level, warning},
{size, 0}]}]}
]},
{rabbit_log_connection_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,debug]},
{lager_file_backend, [{file, "connection_lager.log"}, {level, info}]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,debug]}]}]},
{rabbit_log_feature_flags_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
{rabbit_log_federation_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
{rabbit_log_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
{rabbit_log_ldap_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
{rabbit_handlers,
[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
{rabbit_log_mirroring_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
{rabbit_log_osiris_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
{rabbit_handlers,
[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
{rabbit_log_prelaunch_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
{rabbit_log_queue_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
{rabbit_log_ra_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
{rabbit_handlers,
[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
{rabbit_log_shovel_lager_event,
[{handlers, [{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
{rabbit_handlers,
[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
{rabbit_log_upgrade_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]}]),
?assertEqual(ExpectedSinks, sort_sinks(application:get_env(lager, extra_sinks, undefined))).
config_handlers_merged_with_lager_handlers(_) ->
application:set_env(rabbit, log, [
{file, [{file, "rabbit_file.log"}, {level, debug}]},
{console, [{enabled, true}, {level, error}]},
{exchange, [{enabled, true}, {level, error}]},
{syslog, [{enabled, true}]}
]),
LagerHandlers = [
{lager_file_backend, [{file, "lager_file.log"}, {level, info}]},
{lager_console_backend, [{level, info}]},
{lager_exchange_backend, [{level, info}]},
{lager_exchange_backend, [{level, info}]}
],
application:set_env(lager, handlers, LagerHandlers),
rabbit_lager:configure_lager(),
FileHandlers = default_expected_handlers("rabbit_file.log", debug),
ConsoleHandlers = expected_console_handler(error),
RabbitHandlers = expected_rabbit_handler(error),
SyslogHandlers = expected_syslog_handler(),
ExpectedRabbitHandlers = sort_handlers(FileHandlers ++ ConsoleHandlers ++ RabbitHandlers ++ SyslogHandlers),
ExpectedHandlers = sort_handlers(ExpectedRabbitHandlers ++ LagerHandlers),
?assertEqual(ExpectedRabbitHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))),
?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))).
config_sinks_level(_) ->
DefaultLogFile = "rabbit_default.log",
application:set_env(rabbit, lager_default_file, DefaultLogFile),
application:set_env(rabbit, log, [
{categories, [
{connection, [{level, warning}]},
{channel, [{level, debug}]},
{mirroring, [{level, error}]}
]}
]),
rabbit_lager:configure_lager(),
ExpectedSinks = sort_sinks(level_sinks()),
?assertEqual(ExpectedSinks, sort_sinks(application:get_env(lager, extra_sinks, undefined))).
level_sinks() ->
[{error_logger_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_channel_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,debug]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,debug]}]}]},
{rabbit_log_connection_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,warning]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,warning]}]}]},
{rabbit_log_feature_flags_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_federation_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_ldap_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,
[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_mirroring_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,error]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,error]}]}]},
{rabbit_log_osiris_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,
[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_prelaunch_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_queue_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_ra_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,
[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_shovel_lager_event,
[{handlers, [{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,
[{lager_forwarder_backend,
[lager_event,info]}]}]},
{rabbit_log_upgrade_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}
].
config_sink_file(_) ->
DefaultLogFile = "rabbit_default.log",
application:set_env(rabbit, lager_default_file, DefaultLogFile),
DefaultLevel = error,
application:set_env(rabbit, log, [
{console, [{enabled, true}]},
{exchange, [{enabled, true}]},
{file, [{level, DefaultLevel}]},
{categories, [
{connection, [{file, "connection.log"}, {level, warning}]}
]}
]),
rabbit_lager:configure_lager(),
ExpectedSinks = sort_sinks(file_sinks(DefaultLevel)),
?assertEqual(ExpectedSinks, sort_sinks(application:get_env(lager, extra_sinks, undefined))).
config_sink_file_override_config_handler_file(_) ->
DefaultLogFile = "rabbit_default.log",
application:set_env(rabbit, lager_default_file, DefaultLogFile),
NonDefaultLogFile = "rabbit_not_default.log",
DefaultLevel = error,
application:set_env(rabbit, log, [
{file, [{file, NonDefaultLogFile}, {level, DefaultLevel}]},
{console, [{enabled, true}]},
{exchange, [{enabled, true}]},
{categories, [
{connection, [{file, "connection.log"}, {level, warning}]}
]}
]),
rabbit_lager:configure_lager(),
ExpectedSinks = sort_sinks(file_sinks(DefaultLevel)),
?assertEqual(ExpectedSinks, sort_sinks(application:get_env(lager, extra_sinks, undefined))).
file_sinks() ->
file_sinks(info).
file_sinks(DefaultLevel) ->
[{error_logger_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
{rabbit_log_channel_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
{rabbit_log_connection_lager_event,
[{handlers,[
{lager_console_backend, [{level, warning},
{formatter_config, formatter_config(console)}]},
{lager_exchange_backend, [{level, warning},
{formatter_config, formatter_config(exchange)}]},
{lager_file_backend,
[{date, ""},
{file, "connection.log"},
{formatter_config, formatter_config(file)},
{level, error},
{size, 0}]}]},
{rabbit_handlers,[
{lager_console_backend, [{level, warning},
{formatter_config, formatter_config(console)}]},
{lager_exchange_backend, [{level, warning},
{formatter_config, formatter_config(exchange)}]},
{lager_file_backend,
[{date, ""},
{file, "connection.log"},
{formatter_config, formatter_config(backend)},
{level, error},
{size, 0}]}]}
]},
{rabbit_log_feature_flags_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
{rabbit_log_federation_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
{rabbit_log_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
{rabbit_log_ldap_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
{rabbit_handlers,
[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
{rabbit_log_mirroring_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
{rabbit_log_osiris_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
{rabbit_handlers,
[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
{rabbit_log_prelaunch_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
{rabbit_log_queue_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
{rabbit_log_ra_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
{rabbit_handlers,
[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
{rabbit_log_shovel_lager_event,
[{handlers, [{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
{rabbit_handlers,
[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
{rabbit_log_upgrade_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]}
].
config_multiple_handlers(_) ->
DefaultLogFile = "rabbit_default.log",
application:set_env(rabbit, lager_default_file, DefaultLogFile),
application:set_env(rabbit, log, [
%% Disable file output
{file, [{file, false}]},
%% Enable console output
{console, [{enabled, true}]},
%% Enable exchange output
{exchange, [{enabled, true}]},
%% Enable a syslog output
{syslog, [{enabled, true}, {level, error}]}]),
rabbit_lager:configure_lager(),
ConsoleHandlers = expected_console_handler(),
RabbitHandlers = expected_rabbit_handler(),
SyslogHandlers = expected_syslog_handler(error),
ExpectedHandlers = sort_handlers(SyslogHandlers ++ ConsoleHandlers ++ RabbitHandlers),
?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))),
?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))).
config_console_handler(_) ->
DefaultLogFile = "rabbit_default.log",
application:set_env(rabbit, lager_default_file, DefaultLogFile),
application:set_env(rabbit, log, [{console, [{enabled, true}]}]),
rabbit_lager:configure_lager(),
FileHandlers = default_expected_handlers(DefaultLogFile),
ConsoleHandlers = expected_console_handler(),
ExpectedHandlers = sort_handlers(FileHandlers ++ ConsoleHandlers),
?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))),
?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))).
config_exchange_handler(_) ->
DefaultLogFile = "rabbit_default.log",
application:set_env(rabbit, lager_default_file, DefaultLogFile),
application:set_env(rabbit, log, [{exchange, [{enabled, true}]}]),
rabbit_lager:configure_lager(),
FileHandlers = default_expected_handlers(DefaultLogFile),
ExchangeHandlers = expected_rabbit_handler(),
ExpectedHandlers = sort_handlers(FileHandlers ++ ExchangeHandlers),
?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))),
?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))).
expected_console_handler() ->
expected_console_handler(debug).
expected_console_handler(Level) ->
[{lager_console_backend, [{level, Level},
{formatter_config, formatter_config(console)}]}].
expected_rabbit_handler() ->
expected_rabbit_handler(debug).
expected_rabbit_handler(Level) ->
[{lager_exchange_backend, [{level, Level},
{formatter_config, formatter_config(exchange)}]}].
config_syslog_handler(_) ->
DefaultLogFile = "rabbit_default.log",
application:set_env(rabbit, lager_default_file, DefaultLogFile),
application:set_env(rabbit, log, [{syslog, [{enabled, true}]}]),
rabbit_lager:configure_lager(),
FileHandlers = default_expected_handlers(DefaultLogFile),
SyslogHandlers = expected_syslog_handler(),
ExpectedHandlers = sort_handlers(FileHandlers ++ SyslogHandlers),
?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))),
?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))).
config_syslog_handler_options(_) ->
DefaultLogFile = "rabbit_default.log",
application:set_env(rabbit, lager_default_file, DefaultLogFile),
application:set_env(rabbit, log, [{syslog, [{enabled, true},
{level, warning}]}]),
rabbit_lager:configure_lager(),
FileHandlers = default_expected_handlers(DefaultLogFile),
SyslogHandlers = expected_syslog_handler(warning),
ExpectedHandlers = sort_handlers(FileHandlers ++ SyslogHandlers),
?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))),
?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))).
expected_syslog_handler() ->
expected_syslog_handler(debug).
expected_syslog_handler(Level) ->
[{syslog_lager_backend, [Level,
{},
{lager_default_formatter, syslog_formatter_config()}]}].
env_var_overrides_config(_) ->
EnvLogFile = "rabbit_default.log",
application:set_env(rabbit, lager_default_file, EnvLogFile),
ConfigLogFile = "rabbit_not_default.log",
application:set_env(rabbit, log, [{file, [{file, ConfigLogFile}]}]),
set_logs_var_origin(environment),
rabbit_lager:configure_lager(),
ExpectedHandlers = default_expected_handlers(EnvLogFile),
?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))),
?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))).
env_var_disable_log(_) ->
application:set_env(rabbit, lager_default_file, false),
ConfigLogFile = "rabbit_not_default.log",
application:set_env(rabbit, log, [{file, [{file, ConfigLogFile}]}]),
set_logs_var_origin(environment),
rabbit_lager:configure_lager(),
ExpectedHandlers = [],
?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))),
?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))).
config_file_handler(_) ->
DefaultLogFile = "rabbit_default.log",
application:set_env(rabbit, lager_default_file, DefaultLogFile),
NonDefaultLogFile = "rabbit_not_default.log",
application:set_env(rabbit, log, [{file, [{file, NonDefaultLogFile}]}]),
rabbit_lager:configure_lager(),
ExpectedHandlers = default_expected_handlers(NonDefaultLogFile),
?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))),
?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))).
config_file_handler_level(_) ->
DefaultLogFile = "rabbit_default.log",
application:set_env(rabbit, lager_default_file, DefaultLogFile),
application:set_env(rabbit, log, [{file, [{level, warning}]}]),
rabbit_lager:configure_lager(),
ExpectedHandlers = default_expected_handlers(DefaultLogFile, warning),
?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))),
?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))).
config_file_handler_rotation(_) ->
DefaultLogFile = "rabbit_default.log",
application:set_env(rabbit, lager_default_file, DefaultLogFile),
application:set_env(rabbit, log, [{file, [{date, "$D0"}, {size, 5000}, {count, 10}]}]),
rabbit_lager:configure_lager(),
ExpectedHandlers = sort_handlers(default_expected_handlers(DefaultLogFile, debug, 5000, "$D0", [{count, 10}])),
?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))),
?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))).
default(_) ->
LogRoot = "/tmp/log_base",
application:set_env(rabbit, lager_log_root, LogRoot),
LogFile = "rabbit_default.log",
application:set_env(rabbit, lager_default_file, LogFile),
LogUpgradeFile = "rabbit_default_upgrade.log",
application:set_env(rabbit, lager_upgrade_file, LogUpgradeFile),
?assertEqual(LogRoot, application:get_env(rabbit, lager_log_root, undefined)),
rabbit_lager:configure_lager(),
ExpectedHandlers = default_expected_handlers(LogFile),
?assertEqual(LogRoot, application:get_env(lager, log_root, undefined)),
?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))),
?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))),
ExpectedSinks = default_expected_sinks(LogUpgradeFile),
?assertEqual(ExpectedSinks, sort_sinks(application:get_env(lager, extra_sinks, undefined))).
default_expected_handlers(File) ->
default_expected_handlers(File, debug, 0, "").
default_expected_handlers(File, Level) ->
default_expected_handlers(File, Level, 0, "").
default_expected_handlers(File, Level, RotSize, RotDate) ->
default_expected_handlers(File, Level, RotSize, RotDate, []).
default_expected_handlers(File, Level, RotSize, RotDate, Extra) ->
[{lager_file_backend,
[{date, RotDate},
{file, File},
{formatter_config, formatter_config(file)},
{level, Level},
{size, RotSize}] ++ Extra}].
default_expected_sinks(UpgradeFile) ->
[{error_logger_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_channel_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_connection_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_feature_flags_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_federation_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_ldap_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,
[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_mirroring_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_osiris_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,
[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_prelaunch_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_queue_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_ra_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,
[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_shovel_lager_event,
[{handlers, [{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,
[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_upgrade_lager_event,
[{handlers,
[{lager_file_backend,
[{date,[]},
{file, UpgradeFile},
{formatter_config, formatter_config(file)},
{level,info},
{size,0}]}]},
{rabbit_handlers,
[{lager_file_backend,
[{date,[]},
{file, UpgradeFile},
{formatter_config, formatter_config(file)},
{level,info},
{size,0}]}]}]}].
env_var_tty(_) ->
application:set_env(rabbit, lager_log_root, "/tmp/log_base"),
application:set_env(rabbit, lager_default_file, tty),
application:set_env(rabbit, lager_upgrade_file, tty),
%% tty can only be set explicitly
set_logs_var_origin(environment),
rabbit_lager:configure_lager(),
ExpectedHandlers = tty_expected_handlers(),
?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))),
?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))),
%% Upgrade sink will be different.
ExpectedSinks = tty_expected_sinks(),
?assertEqual(ExpectedSinks, sort_sinks(application:get_env(lager, extra_sinks, undefined))).
set_logs_var_origin(Origin) ->
Context = #{var_origins => #{main_log_file => Origin}},
rabbit_prelaunch:store_context(Context),
ok.
unset_logs_var_origin() ->
rabbit_prelaunch:clear_context_cache(),
ok.
tty_expected_handlers() ->
[{lager_console_backend,
[{formatter_config, formatter_config(console)},
{level, debug}]}].
tty_expected_sinks() ->
[{error_logger_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_channel_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_connection_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_feature_flags_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_federation_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_lager_event,
[{handlers, [{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers, [{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_ldap_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,
[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_mirroring_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_osiris_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,
[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_prelaunch_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_queue_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_ra_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,
[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_shovel_lager_event,
[{handlers, [{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,
[{lager_forwarder_backend,[lager_event,info]}]}]},
{rabbit_log_upgrade_lager_event,
[{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
{rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}].
sort_sinks(Sinks) ->
lists:ukeysort(1,
lists:map(
fun({Name, Config}) ->
Handlers = proplists:get_value(handlers, Config),
RabbitHandlers = proplists:get_value(rabbit_handlers, Config),
{Name, lists:ukeymerge(1,
[{handlers, sort_handlers(Handlers)},
{rabbit_handlers, sort_handlers(RabbitHandlers)}],
lists:ukeysort(1, Config))}
end,
Sinks)).
sort_handlers(Handlers) ->
lists:keysort(1,
lists:map(
fun
({Name, [{Atom, _}|_] = Config}) when is_atom(Atom) ->
{Name, lists:ukeysort(1, Config)};
%% Non-proplist configuration. forwarder backend
(Other) ->
Other
end,
Handlers)).
formatter_config(console) ->
[date," ",time," ",color,"[",severity, "] ", {pid,[]}, " ",message,"\r\n"];
formatter_config(_) ->
[date," ",time," ",color,"[",severity, "] ", {pid,[]}, " ",message,"\n"].
syslog_formatter_config() ->
[color,"[",severity, "] ", {pid,[]}, " ",message,"\n"].

View File

@ -25,7 +25,6 @@ all() ->
groups() ->
[
{non_parallel_tests, [], [
log_management,
log_file_initialised_during_startup,
log_file_fails_to_initialise_during_startup,
externally_rotated_logs_are_automatically_reopened
@ -113,94 +112,6 @@ wait_for_application(Application, Time) ->
%% Log management.
%% -------------------------------------------------------------------
log_management(Config) ->
passed = rabbit_ct_broker_helpers:rpc(Config, 0,
?MODULE, log_management1, [Config]).
log_management1(_Config) ->
[LogFile|_] = rabbit:log_locations(),
Suffix = ".0",
ok = test_logs_working([LogFile]),
%% prepare basic logs
file:delete(LogFile ++ Suffix),
ok = test_logs_working([LogFile]),
%% simple log rotation
ok = rabbit:rotate_logs(),
%% rabbit:rotate_logs/0 is asynchronous due to a limitation in
%% Lager. Therefore, we have no choice but to wait an arbitrary
%% amount of time.
ok = rabbit_ct_helpers:await_condition(
fun() ->
[true, true] =:=
non_empty_files([LogFile ++ Suffix, LogFile])
end, 5000),
ok = test_logs_working([LogFile]),
%% log rotation on empty files
ok = clean_logs([LogFile], Suffix),
ok = rabbit:rotate_logs(),
ok = rabbit_ct_helpers:await_condition(
fun() ->
[true, true] =:=
non_empty_files([LogFile ++ Suffix, LogFile])
end, 5000),
%% logs with suffix are not writable
ok = rabbit:rotate_logs(),
ok = rabbit_ct_helpers:await_condition(
fun() ->
ok =:= make_files_non_writable([LogFile ++ Suffix])
end, 5000),
ok = rabbit:rotate_logs(),
ok = rabbit_ct_helpers:await_condition(
fun() ->
ok =:= test_logs_working([LogFile])
end, 5000),
%% rotate when original log files are not writable
ok = make_files_non_writable([LogFile]),
ok = rabbit:rotate_logs(),
timer:sleep(2000),
%% logging directed to tty (first, remove handlers)
ok = rabbit:stop(),
ok = make_files_writable([LogFile ++ Suffix]),
ok = clean_logs([LogFile], Suffix),
ok = application:set_env(rabbit, lager_default_file, tty),
application:unset_env(rabbit, log),
application:unset_env(lager, handlers),
application:unset_env(lager, extra_sinks),
ok = rabbit:start(),
timer:sleep(200),
rabbit_log:info("test info"),
%% rotate logs when logging is turned off
ok = rabbit:stop(),
ok = clean_logs([LogFile], Suffix),
ok = application:set_env(rabbit, lager_default_file, false),
application:unset_env(rabbit, log),
application:unset_env(lager, handlers),
application:unset_env(lager, extra_sinks),
ok = rabbit:start(),
timer:sleep(200),
rabbit_log:error("test error"),
timer:sleep(200),
?assertEqual([{error,enoent}], empty_files([LogFile])),
%% cleanup
ok = rabbit:stop(),
ok = clean_logs([LogFile], Suffix),
ok = application:set_env(rabbit, lager_default_file, LogFile),
application:unset_env(rabbit, log),
application:unset_env(lager, handlers),
application:unset_env(lager, extra_sinks),
ok = rabbit:start(),
ok = test_logs_working([LogFile]),
passed.
log_file_initialised_during_startup(Config) ->
passed = rabbit_ct_broker_helpers:rpc(Config, 0,
?MODULE, log_file_initialised_during_startup1, [Config]).
@ -212,10 +123,8 @@ log_file_initialised_during_startup1(_Config) ->
%% start application with simple tty logging
ok = rabbit:stop(),
ok = clean_logs([LogFile], Suffix),
ok = application:set_env(rabbit, lager_default_file, tty),
application:unset_env(rabbit, log),
application:unset_env(lager, handlers),
application:unset_env(lager, extra_sinks),
ok = application:set_env(rabbit, log, [{console, [{enabled, true}]},
{file, [{file, false}]}]),
ok = rabbit:start(),
%% start application with logging to non-existing directory
@ -224,18 +133,14 @@ log_file_initialised_during_startup1(_Config) ->
delete_file(NonExistent),
delete_file(filename:dirname(NonExistent)),
ok = rabbit:stop(),
ct:pal("Setting lager_default_file to \"~s\"", [NonExistent]),
ok = application:set_env(rabbit, lager_default_file, NonExistent),
application:unset_env(rabbit, log),
application:unset_env(lager, handlers),
application:unset_env(lager, extra_sinks),
io:format("Setting log file to \"~s\"~n", [NonExistent]),
ok = application:set_env(rabbit, log, [{console, [{enabled, true}]},
{file, [{file, NonExistent}]}]),
ok = rabbit:start(),
%% clean up
ok = application:set_env(rabbit, lager_default_file, LogFile),
application:unset_env(rabbit, log),
application:unset_env(lager, handlers),
application:unset_env(lager, extra_sinks),
ok = application:set_env(rabbit, log, [{console, [{enabled, true}]},
{file, [{file, LogFile}]}]),
ok = rabbit:start(),
passed.
@ -277,13 +182,12 @@ log_file_fails_to_initialise_during_startup1(_Config, NonWritableDir) ->
delete_file(filename:dirname(NoPermission1)),
ok = rabbit:stop(),
ct:pal("Setting lager_default_file to \"~s\"", [NoPermission1]),
ok = application:set_env(rabbit, lager_default_file, NoPermission1),
application:unset_env(rabbit, log),
application:unset_env(lager, handlers),
application:unset_env(lager, extra_sinks),
io:format("Setting log file to \"~s\"~n", [NoPermission1]),
ok = application:set_env(rabbit, log, [{console, [{enabled, true}]},
{file, [{file, NoPermission1}]}]),
ct:pal("`rabbit` application env.: ~p", [application:get_all_env(rabbit)]),
io:format("rabbit application env.: ~p~n",
[application:get_all_env(rabbit)]),
?assertThrow(
{error, {rabbit, {{cannot_log_to_file, _, _}, _}}},
rabbit:start()),
@ -296,22 +200,19 @@ log_file_fails_to_initialise_during_startup1(_Config, NonWritableDir) ->
delete_file(NoPermission2),
delete_file(filename:dirname(NoPermission2)),
ct:pal("Setting lager_default_file to \"~s\"", [NoPermission2]),
ok = application:set_env(rabbit, lager_default_file, NoPermission2),
application:unset_env(rabbit, log),
application:unset_env(lager, handlers),
application:unset_env(lager, extra_sinks),
io:format("Setting log file to \"~s\"~n", [NoPermission2]),
ok = application:set_env(rabbit, log, [{console, [{enabled, true}]},
{file, [{file, NoPermission2}]}]),
ct:pal("`rabbit` application env.: ~p", [application:get_all_env(rabbit)]),
io:format("rabbit application env.: ~p~n",
[application:get_all_env(rabbit)]),
?assertThrow(
{error, {rabbit, {{cannot_log_to_file, _, _}, _}}},
rabbit:start()),
%% clean up
ok = application:set_env(rabbit, lager_default_file, LogFile),
application:unset_env(rabbit, log),
application:unset_env(lager, handlers),
application:unset_env(lager, extra_sinks),
ok = application:set_env(rabbit, log, [{console, [{enabled, true}]},
{file, [{file, LogFile}]}]),
ok = rabbit:start(),
passed.

View File

@ -16,7 +16,7 @@ define PROJECT_APP_EXTRA_KEYS
endef
LOCAL_DEPS = compiler crypto public_key sasl ssl syntax_tools tools xmerl
DEPS = lager jsx recon credentials_obfuscation
DEPS = jsx recon credentials_obfuscation
dep_credentials_obfuscation = git https://github.com/rabbitmq/credentials-obfuscation.git master

19
deps/rabbit_common/include/logging.hrl vendored Normal file
View File

@ -0,0 +1,19 @@
-define(RMQLOG_SUPER_DOMAIN_NAME, rabbitmq).
-define(RMQLOG_DOMAIN_GLOBAL, [?RMQLOG_SUPER_DOMAIN_NAME]).
-define(DEFINE_RMQLOG_DOMAIN(Domain), [?RMQLOG_SUPER_DOMAIN_NAME, Domain]).
-define(RMQLOG_DOMAIN_CHAN, ?DEFINE_RMQLOG_DOMAIN(channel)).
-define(RMQLOG_DOMAIN_CONN, ?DEFINE_RMQLOG_DOMAIN(connection)).
-define(RMQLOG_DOMAIN_FEAT_FLAGS, ?DEFINE_RMQLOG_DOMAIN(feature_flags)).
-define(RMQLOG_DOMAIN_MIRRORING, ?DEFINE_RMQLOG_DOMAIN(mirroring)).
-define(RMQLOG_DOMAIN_PRELAUNCH, ?DEFINE_RMQLOG_DOMAIN(prelaunch)).
-define(RMQLOG_DOMAIN_QUEUE, ?DEFINE_RMQLOG_DOMAIN(queue)).
-define(RMQLOG_DOMAIN_UPGRADE, ?DEFINE_RMQLOG_DOMAIN(upgrade)).
-define(DEFAULT_LOG_LEVEL, info).
-define(FILTER_NAME, rmqlog_filter).
-define(IS_STD_H_COMPAT(Mod),
Mod =:= logger_std_h orelse Mod =:= rabbit_logger_std_h).
-define(IS_STDDEV(DevName),
DevName =:= standard_io orelse DevName =:= standard_error).

View File

@ -1,8 +0,0 @@
%% This Source Code Form is subject to the terms of the Mozilla Public
%% License, v. 2.0. If a copy of the MPL was not distributed with this
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
%%
%% Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved.
%%
-define(LAGER_SINK, rabbit_log_lager_event).

View File

@ -18,25 +18,6 @@ ifneq ($(filter-out rabbit_common amqp_client,$(PROJECT)),)
RMQ_ERLC_OPTS += -pa $(DEPS_DIR)/rabbitmq_cli/_build/dev/lib/rabbitmqctl/ebin
endif
# Add Lager parse_transform module and our default Lager extra sinks.
LAGER_EXTRA_SINKS += rabbit_log \
rabbit_log_channel \
rabbit_log_connection \
rabbit_log_feature_flags \
rabbit_log_federation \
rabbit_log_ldap \
rabbit_log_mirroring \
rabbit_log_osiris \
rabbit_log_prelaunch \
rabbit_log_queue \
rabbit_log_ra \
rabbit_log_shovel \
rabbit_log_upgrade
lager_extra_sinks = $(subst $(space),$(comma),$(LAGER_EXTRA_SINKS))
RMQ_ERLC_OPTS += +'{parse_transform,lager_transform}' \
+'{lager_extra_sinks,[$(lager_extra_sinks)]}'
# Push our compilation options to both the normal and test ERLC_OPTS.
ERLC_OPTS += $(RMQ_ERLC_OPTS)
TEST_ERLC_OPTS += $(RMQ_ERLC_OPTS)

View File

@ -117,7 +117,6 @@ dep_accept = hex 0.3.5
dep_cowboy = hex 2.8.0
dep_cowlib = hex 2.9.1
dep_jsx = hex 2.11.0
dep_lager = hex 3.9.1
dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
dep_ra = git https://github.com/rabbitmq/ra.git master
dep_ranch = hex 2.0.0

View File

@ -164,9 +164,7 @@ define test_rabbitmq_config
[
{rabbit, [
$(if $(RABBITMQ_NODE_PORT), {tcp_listeners$(comma) [$(RABBITMQ_NODE_PORT)]}$(comma),)
{loopback_users, []},
{log, [{file, [{level, debug}]},
{console, [{level, debug}]}]}
{loopback_users, []}
]},
{rabbitmq_management, [
$(if $(RABBITMQ_NODE_PORT), {listener$(comma) [{port$(comma) $(shell echo "$$(($(RABBITMQ_NODE_PORT) + 10000))")}]},)
@ -184,19 +182,6 @@ $(if $(RABBITMQ_NODE_PORT), {tcp_listeners$(comma) [$(shell echo "$$((5551
{data_dir, "$(RABBITMQ_QUORUM_DIR)"},
{wal_sync_method, sync}
]},
{lager, [
{colors, [
%% https://misc.flogisoft.com/bash/tip_colors_and_formatting
{debug, "\\\e[0;34m" },
{info, "\\\e[1;37m" },
{notice, "\\\e[1;36m" },
{warning, "\\\e[1;33m" },
{error, "\\\e[1;31m" },
{critical, "\\\e[1;35m" },
{alert, "\\\e[1;44m" },
{emergency, "\\\e[1;41m" }
]}
]},
{osiris, [
{data_dir, "$(RABBITMQ_STREAM_DIR)"}
]}
@ -209,8 +194,6 @@ define test_rabbitmq_config_with_tls
[
{rabbit, [
{loopback_users, []},
{log, [{file, [{level, debug}]},
{console, [{level, debug}]}]},
{ssl_listeners, [5671]},
{ssl_options, [
{cacertfile, "$(TEST_TLS_CERTS_DIR_in_config)/testca/cacert.pem"},
@ -237,19 +220,6 @@ define test_rabbitmq_config_with_tls
{data_dir, "$(RABBITMQ_QUORUM_DIR)"},
{wal_sync_method, sync}
]},
{lager, [
{colors, [
%% https://misc.flogisoft.com/bash/tip_colors_and_formatting
{debug, "\\\e[0;34m" },
{info, "\\\e[1;37m" },
{notice, "\\\e[1;36m" },
{warning, "\\\e[1;33m" },
{error, "\\\e[1;31m" },
{critical, "\\\e[1;35m" },
{alert, "\\\e[1;44m" },
{emergency, "\\\e[1;41m" }
]}
]},
{osiris, [
{data_dir, "$(RABBITMQ_STREAM_DIR)"}
]}

View File

@ -1083,8 +1083,8 @@ init([AlarmSet, AlarmClear]) ->
end
end,
ObtainLimit = obtain_limit(Limit),
error_logger:info_msg("Limiting to approx ~p file handles (~p sockets)~n",
[Limit, ObtainLimit]),
logger:info("Limiting to approx ~p file handles (~p sockets)",
[Limit, ObtainLimit]),
Clients = ets:new(?CLIENT_ETS_TABLE, [set, private, {keypos, #cstate.pid}]),
Elders = ets:new(?ELDERS_ETS_TABLE, [set, private]),
{ok, #fhc_state { elders = Elders,

View File

@ -1,120 +0,0 @@
%% This Source Code Form is subject to the terms of the Mozilla Public
%% License, v. 2.0. If a copy of the MPL was not distributed with this
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
%%
%% Copyright (c) 2007-2021 VMware, Inc. or its affiliates. All rights reserved.
%%
-module(lager_forwarder_backend).
-behaviour(gen_event).
-export([init/1, handle_call/2, handle_event/2, handle_info/2, terminate/2,
code_change/3]).
-record(state, {
next_sink :: atom(),
level :: {'mask', integer()} | inherit
}).
%% @private
init(Sink) when is_atom(Sink) ->
init([Sink]);
init([Sink]) when is_atom(Sink) ->
init([Sink, inherit]);
init([Sink, inherit]) when is_atom(Sink) ->
{ok, #state{
next_sink = Sink,
level = inherit
}};
init([Sink, Level]) when is_atom(Sink) ->
try
Mask = lager_util:config_to_mask(Level),
{ok, #state{
next_sink = Sink,
level = Mask
}}
catch
_:_ ->
{error, {fatal, bad_log_level}}
end;
init(_) ->
{error, {fatal, bad_config}}.
%% @private
handle_call(get_loglevel, #state{next_sink = Sink, level = inherit} = State) ->
SinkPid = whereis(Sink),
Mask = case self() of
SinkPid ->
%% Avoid direct loops, defaults to 'info'.
127;
_ ->
try
Levels = [gen_event:call(SinkPid, Handler, get_loglevel,
infinity)
|| Handler <- gen_event:which_handlers(SinkPid)],
lists:foldl(fun
({mask, Mask}, Acc) ->
Mask bor Acc;
(Level, Acc) when is_integer(Level) ->
{mask, Mask} = lager_util:config_to_mask(
lager_util:num_to_level(Level)),
Mask bor Acc;
(_, Acc) ->
Acc
end, 0, Levels)
catch
exit:noproc ->
127
end
end,
{ok, {mask, Mask}, State};
handle_call(get_loglevel, #state{level = Mask} = State) ->
{ok, Mask, State};
handle_call({set_loglevel, inherit}, State) ->
{ok, ok, State#state{level = inherit}};
handle_call({set_loglevel, Level}, State) ->
try lager_util:config_to_mask(Level) of
Mask ->
{ok, ok, State#state{level = Mask}}
catch
_:_ ->
{ok, {error, bad_log_level}, State}
end;
handle_call(_Request, State) ->
{ok, ok, State}.
%% @private
handle_event({log, LagerMsg}, #state{next_sink = Sink, level = Mask} = State) ->
SinkPid = whereis(Sink),
case self() of
SinkPid ->
%% Avoid direct loops.
ok;
_ ->
case Mask =:= inherit orelse
lager_util:is_loggable(LagerMsg, Mask, ?MODULE) of
true ->
case lager_config:get({Sink, async}, false) of
true -> gen_event:notify(SinkPid, {log, LagerMsg});
false -> gen_event:sync_notify(SinkPid, {log, LagerMsg})
end;
false ->
ok
end
end,
{ok, State};
handle_event(_Event, State) ->
{ok, State}.
%% @private
handle_info(_Info, State) ->
{ok, State}.
%% @private
terminate(_Reason, _State) ->
ok.
%% @private
code_change(_OldVsn, State, _Extra) ->
{ok, State}.

View File

@ -14,11 +14,11 @@ amqp_params(ConnPid, Timeout) ->
P = try
gen_server:call(ConnPid, {info, [amqp_params]}, Timeout)
catch exit:{noproc, Error} ->
rabbit_log:debug("file ~p, line ~p - connection process ~p not alive: ~p~n",
rabbit_log:debug("file ~p, line ~p - connection process ~p not alive: ~p",
[?FILE, ?LINE, ConnPid, Error]),
[];
_:Error ->
rabbit_log:debug("file ~p, line ~p - failed to get amqp_params from connection process ~p: ~p~n",
rabbit_log:debug("file ~p, line ~p - failed to get amqp_params from connection process ~p: ~p",
[?FILE, ?LINE, ConnPid, Error]),
[]
end,

View File

@ -223,7 +223,7 @@ lookup_amqp_exception(#amqp_error{name = Name,
ExplBin = amqp_exception_explanation(Text, Expl),
{ShouldClose, Code, ExplBin, Method};
lookup_amqp_exception(Other, Protocol) ->
rabbit_log:warning("Non-AMQP exit reason '~p'~n", [Other]),
rabbit_log:warning("Non-AMQP exit reason '~p'", [Other]),
{ShouldClose, Code, Text} = Protocol:lookup_amqp_exception(internal_error),
{ShouldClose, Code, Text, none}.

Some files were not shown because too many files have changed in this diff Show More