Merge pull request #2861 from rabbitmq/use-builtin-logger
Switch from Lager to the new Erlang Logger API for logging
This commit is contained in:
		
						commit
						132dee6516
					
				|  | @ -228,7 +228,7 @@ hdr_sent(_EvtType, {protocol_header_received, 0, 1, 0, 0}, State) -> | ||||||
|     end; |     end; | ||||||
| hdr_sent(_EvtType, {protocol_header_received, Protocol, Maj, Min, | hdr_sent(_EvtType, {protocol_header_received, Protocol, Maj, Min, | ||||||
|                                 Rev}, State) -> |                                 Rev}, State) -> | ||||||
|     error_logger:warning_msg("Unsupported protocol version: ~b ~b.~b.~b~n", |     logger:warning("Unsupported protocol version: ~b ~b.~b.~b", | ||||||
|                              [Protocol, Maj, Min, Rev]), |                              [Protocol, Maj, Min, Rev]), | ||||||
|     {stop, normal, State}; |     {stop, normal, State}; | ||||||
| hdr_sent({call, From}, begin_session, | hdr_sent({call, From}, begin_session, | ||||||
|  | @ -291,7 +291,7 @@ opened(info, {'DOWN', MRef, _, _, _Info}, | ||||||
|     ok = notify_closed(Config, shutdown), |     ok = notify_closed(Config, shutdown), | ||||||
|     {stop, normal, State}; |     {stop, normal, State}; | ||||||
| opened(_EvtType, Frame, State) -> | opened(_EvtType, Frame, State) -> | ||||||
|     error_logger:warning_msg("Unexpected connection frame ~p when in state ~p ~n", |     logger:warning("Unexpected connection frame ~p when in state ~p ", | ||||||
|                              [Frame, State]), |                              [Frame, State]), | ||||||
|     {keep_state, State}. |     {keep_state, State}. | ||||||
| 
 | 
 | ||||||
|  | @ -367,7 +367,7 @@ send_open(#state{socket = Socket, config = Config}) -> | ||||||
|            end, |            end, | ||||||
|     Encoded = amqp10_framing:encode_bin(Open), |     Encoded = amqp10_framing:encode_bin(Open), | ||||||
|     Frame = amqp10_binary_generator:build_frame(0, Encoded), |     Frame = amqp10_binary_generator:build_frame(0, Encoded), | ||||||
|     ?DBG("CONN <- ~p~n", [Open]), |     ?DBG("CONN <- ~p", [Open]), | ||||||
|     socket_send(Socket, Frame). |     socket_send(Socket, Frame). | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | @ -375,7 +375,7 @@ send_close(#state{socket = Socket}, _Reason) -> | ||||||
|     Close = #'v1_0.close'{}, |     Close = #'v1_0.close'{}, | ||||||
|     Encoded = amqp10_framing:encode_bin(Close), |     Encoded = amqp10_framing:encode_bin(Close), | ||||||
|     Frame = amqp10_binary_generator:build_frame(0, Encoded), |     Frame = amqp10_binary_generator:build_frame(0, Encoded), | ||||||
|     ?DBG("CONN <- ~p~n", [Close]), |     ?DBG("CONN <- ~p", [Close]), | ||||||
|     Ret = socket_send(Socket, Frame), |     Ret = socket_send(Socket, Frame), | ||||||
|     case Ret of |     case Ret of | ||||||
|         ok -> _ = |         ok -> _ = | ||||||
|  | @ -397,7 +397,7 @@ send_sasl_init(State, {plain, User, Pass}) -> | ||||||
| send(Record, FrameType, #state{socket = Socket}) -> | send(Record, FrameType, #state{socket = Socket}) -> | ||||||
|     Encoded = amqp10_framing:encode_bin(Record), |     Encoded = amqp10_framing:encode_bin(Record), | ||||||
|     Frame = amqp10_binary_generator:build_frame(0, FrameType, Encoded), |     Frame = amqp10_binary_generator:build_frame(0, FrameType, Encoded), | ||||||
|     ?DBG("CONN <- ~p~n", [Record]), |     ?DBG("CONN <- ~p", [Record]), | ||||||
|     socket_send(Socket, Frame). |     socket_send(Socket, Frame). | ||||||
| 
 | 
 | ||||||
| send_heartbeat(#state{socket = Socket}) -> | send_heartbeat(#state{socket = Socket}) -> | ||||||
|  |  | ||||||
|  | @ -158,16 +158,16 @@ handle_event(info, {Tcp, _, Packet}, StateName, #state{buffer = Buffer} = State) | ||||||
| 
 | 
 | ||||||
| handle_event(info, {TcpError, _, Reason}, StateName, State) | handle_event(info, {TcpError, _, Reason}, StateName, State) | ||||||
|   when TcpError == tcp_error orelse TcpError == ssl_error -> |   when TcpError == tcp_error orelse TcpError == ssl_error -> | ||||||
|     error_logger:warning_msg("AMQP 1.0 connection socket errored, connection state: '~s', reason: '~p'~n", |     logger:warning("AMQP 1.0 connection socket errored, connection state: '~s', reason: '~p'", | ||||||
|                              [StateName, Reason]), |                     [StateName, Reason]), | ||||||
|     State1 = State#state{socket = undefined, |     State1 = State#state{socket = undefined, | ||||||
|                          buffer = <<>>, |                          buffer = <<>>, | ||||||
|                          frame_state = undefined}, |                          frame_state = undefined}, | ||||||
|     {stop, {error, Reason}, State1}; |     {stop, {error, Reason}, State1}; | ||||||
| handle_event(info, {TcpClosed, _}, StateName, State) | handle_event(info, {TcpClosed, _}, StateName, State) | ||||||
|   when TcpClosed == tcp_closed orelse TcpClosed == ssl_closed -> |   when TcpClosed == tcp_closed orelse TcpClosed == ssl_closed -> | ||||||
|     error_logger:warning_msg("AMQP 1.0 connection socket was closed, connection state: '~s'~n", |     logger:warning("AMQP 1.0 connection socket was closed, connection state: '~s'", | ||||||
|                              [StateName]), |                     [StateName]), | ||||||
|     State1 = State#state{socket = undefined, |     State1 = State#state{socket = undefined, | ||||||
|                          buffer = <<>>, |                          buffer = <<>>, | ||||||
|                          frame_state = undefined}, |                          frame_state = undefined}, | ||||||
|  | @ -279,7 +279,7 @@ defer_heartbeat_timer(State) -> State. | ||||||
| route_frame(Channel, FrameType, {Performative, Payload} = Frame, State0) -> | route_frame(Channel, FrameType, {Performative, Payload} = Frame, State0) -> | ||||||
|     {DestinationPid, State} = find_destination(Channel, FrameType, Performative, |     {DestinationPid, State} = find_destination(Channel, FrameType, Performative, | ||||||
|                                                State0), |                                                State0), | ||||||
|     ?DBG("FRAME -> ~p ~p~n ~p~n", [Channel, DestinationPid, Performative]), |     ?DBG("FRAME -> ~p ~p~n ~p", [Channel, DestinationPid, Performative]), | ||||||
|     case Payload of |     case Payload of | ||||||
|         <<>> -> ok = gen_statem:cast(DestinationPid, Performative); |         <<>> -> ok = gen_statem:cast(DestinationPid, Performative); | ||||||
|         _ -> ok = gen_statem:cast(DestinationPid, Frame) |         _ -> ok = gen_statem:cast(DestinationPid, Frame) | ||||||
|  |  | ||||||
|  | @ -372,7 +372,7 @@ mapped(cast, {#'v1_0.transfer'{handle = {uint, InHandle}, | ||||||
|             ok = notify_link(Link, credit_exhausted), |             ok = notify_link(Link, credit_exhausted), | ||||||
|             {next_state, mapped, State}; |             {next_state, mapped, State}; | ||||||
|         {transfer_limit_exceeded, State} -> |         {transfer_limit_exceeded, State} -> | ||||||
|             error_logger:info_msg("transfer_limit_exceeded for link ~p~n", [Link]), |             logger:warning("transfer_limit_exceeded for link ~p", [Link]), | ||||||
|             Link1 = detach_with_error_cond(Link, State, |             Link1 = detach_with_error_cond(Link, State, | ||||||
|                                            ?V_1_0_LINK_ERROR_TRANSFER_LIMIT_EXCEEDED), |                                            ?V_1_0_LINK_ERROR_TRANSFER_LIMIT_EXCEEDED), | ||||||
|             {next_state, mapped, update_link(Link1, State)} |             {next_state, mapped, update_link(Link1, State)} | ||||||
|  | @ -403,7 +403,7 @@ mapped(cast, #'v1_0.disposition'{role = true, settled = true, first = {uint, Fir | ||||||
| 
 | 
 | ||||||
|     {next_state, mapped, State#state{unsettled = Unsettled}}; |     {next_state, mapped, State#state{unsettled = Unsettled}}; | ||||||
| mapped(cast, Frame, State) -> | mapped(cast, Frame, State) -> | ||||||
|     error_logger:warning_msg("Unhandled session frame ~p in state ~p~n", |     logger:warning("Unhandled session frame ~p in state ~p", | ||||||
|                              [Frame, State]), |                              [Frame, State]), | ||||||
|     {next_state, mapped, State}; |     {next_state, mapped, State}; | ||||||
| mapped({call, From}, | mapped({call, From}, | ||||||
|  | @ -490,7 +490,7 @@ mapped({call, From}, Msg, State) -> | ||||||
|     {keep_state, State1, [{reply, From, Reply}]}; |     {keep_state, State1, [{reply, From, Reply}]}; | ||||||
| 
 | 
 | ||||||
| mapped(_EvtType, Msg, _State) -> | mapped(_EvtType, Msg, _State) -> | ||||||
|     error_logger:info_msg("amqp10_session: unhandled msg in mapped state ~W", |     logger:warning("amqp10_session: unhandled msg in mapped state ~W", | ||||||
|                           [Msg, 10]), |                           [Msg, 10]), | ||||||
|     keep_state_and_data. |     keep_state_and_data. | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -511,7 +511,7 @@ handle_info({bump_credit, Msg}, State) -> | ||||||
| %% @private | %% @private | ||||||
| handle_info(timed_out_flushing_channel, State) -> | handle_info(timed_out_flushing_channel, State) -> | ||||||
|     ?LOG_WARN("Channel (~p) closing: timed out flushing while " |     ?LOG_WARN("Channel (~p) closing: timed out flushing while " | ||||||
|               "connection closing~n", [self()]), |               "connection closing", [self()]), | ||||||
|     {stop, timed_out_flushing_channel, State}; |     {stop, timed_out_flushing_channel, State}; | ||||||
| %% @private | %% @private | ||||||
| handle_info({'DOWN', _, process, ReturnHandler, shutdown}, | handle_info({'DOWN', _, process, ReturnHandler, shutdown}, | ||||||
|  | @ -520,7 +520,7 @@ handle_info({'DOWN', _, process, ReturnHandler, shutdown}, | ||||||
| handle_info({'DOWN', _, process, ReturnHandler, Reason}, | handle_info({'DOWN', _, process, ReturnHandler, Reason}, | ||||||
|             State = #state{return_handler = {ReturnHandler, _Ref}}) -> |             State = #state{return_handler = {ReturnHandler, _Ref}}) -> | ||||||
|     ?LOG_WARN("Channel (~p): Unregistering return handler ~p because it died. " |     ?LOG_WARN("Channel (~p): Unregistering return handler ~p because it died. " | ||||||
|               "Reason: ~p~n", [self(), ReturnHandler, Reason]), |               "Reason: ~p", [self(), ReturnHandler, Reason]), | ||||||
|     {noreply, State#state{return_handler = none}}; |     {noreply, State#state{return_handler = none}}; | ||||||
| %% @private | %% @private | ||||||
| handle_info({'DOWN', _, process, ConfirmHandler, shutdown}, | handle_info({'DOWN', _, process, ConfirmHandler, shutdown}, | ||||||
|  | @ -529,7 +529,7 @@ handle_info({'DOWN', _, process, ConfirmHandler, shutdown}, | ||||||
| handle_info({'DOWN', _, process, ConfirmHandler, Reason}, | handle_info({'DOWN', _, process, ConfirmHandler, Reason}, | ||||||
|             State = #state{confirm_handler = {ConfirmHandler, _Ref}}) -> |             State = #state{confirm_handler = {ConfirmHandler, _Ref}}) -> | ||||||
|     ?LOG_WARN("Channel (~p): Unregistering confirm handler ~p because it died. " |     ?LOG_WARN("Channel (~p): Unregistering confirm handler ~p because it died. " | ||||||
|               "Reason: ~p~n", [self(), ConfirmHandler, Reason]), |               "Reason: ~p", [self(), ConfirmHandler, Reason]), | ||||||
|     {noreply, State#state{confirm_handler = none}}; |     {noreply, State#state{confirm_handler = none}}; | ||||||
| %% @private | %% @private | ||||||
| handle_info({'DOWN', _, process, FlowHandler, shutdown}, | handle_info({'DOWN', _, process, FlowHandler, shutdown}, | ||||||
|  | @ -538,7 +538,7 @@ handle_info({'DOWN', _, process, FlowHandler, shutdown}, | ||||||
| handle_info({'DOWN', _, process, FlowHandler, Reason}, | handle_info({'DOWN', _, process, FlowHandler, Reason}, | ||||||
|             State = #state{flow_handler = {FlowHandler, _Ref}}) -> |             State = #state{flow_handler = {FlowHandler, _Ref}}) -> | ||||||
|     ?LOG_WARN("Channel (~p): Unregistering flow handler ~p because it died. " |     ?LOG_WARN("Channel (~p): Unregistering flow handler ~p because it died. " | ||||||
|               "Reason: ~p~n", [self(), FlowHandler, Reason]), |               "Reason: ~p", [self(), FlowHandler, Reason]), | ||||||
|     {noreply, State#state{flow_handler = none}}; |     {noreply, State#state{flow_handler = none}}; | ||||||
| handle_info({'DOWN', _, process, QPid, _Reason}, State) -> | handle_info({'DOWN', _, process, QPid, _Reason}, State) -> | ||||||
|     rabbit_amqqueue_common:notify_sent_queue_down(QPid), |     rabbit_amqqueue_common:notify_sent_queue_down(QPid), | ||||||
|  | @ -588,13 +588,13 @@ handle_method_to_server(Method, AmqpMsg, From, Sender, Flow, | ||||||
|                                    From, Sender, Flow, State1)}; |                                    From, Sender, Flow, State1)}; | ||||||
|         {ok, none, BlockReply} -> |         {ok, none, BlockReply} -> | ||||||
|             ?LOG_WARN("Channel (~p): discarding method ~p in cast.~n" |             ?LOG_WARN("Channel (~p): discarding method ~p in cast.~n" | ||||||
|                       "Reason: ~p~n", [self(), Method, BlockReply]), |                       "Reason: ~p", [self(), Method, BlockReply]), | ||||||
|             {noreply, State}; |             {noreply, State}; | ||||||
|         {ok, _, BlockReply} -> |         {ok, _, BlockReply} -> | ||||||
|             {reply, BlockReply, State}; |             {reply, BlockReply, State}; | ||||||
|         {{_, InvalidMethodMessage}, none, _} -> |         {{_, InvalidMethodMessage}, none, _} -> | ||||||
|             ?LOG_WARN("Channel (~p): ignoring cast of ~p method. " ++ |             ?LOG_WARN("Channel (~p): ignoring cast of ~p method. " ++ | ||||||
|                       InvalidMethodMessage ++ "~n", [self(), Method]), |                       InvalidMethodMessage ++ "", [self(), Method]), | ||||||
|             {noreply, State}; |             {noreply, State}; | ||||||
|         {{InvalidMethodReply, _}, _, _} -> |         {{InvalidMethodReply, _}, _, _} -> | ||||||
|             {reply, {error, InvalidMethodReply}, State} |             {reply, {error, InvalidMethodReply}, State} | ||||||
|  | @ -695,7 +695,7 @@ safely_handle_method_from_server(Method, Content, | ||||||
|                             _                                          -> false |                             _                                          -> false | ||||||
|                         end, |                         end, | ||||||
|                  if Drop -> ?LOG_INFO("Channel (~p): dropping method ~p from " |                  if Drop -> ?LOG_INFO("Channel (~p): dropping method ~p from " | ||||||
|                                       "server because channel is closing~n", |                                       "server because channel is closing", | ||||||
|                                       [self(), {Method, Content}]), |                                       [self(), {Method, Content}]), | ||||||
|                             {noreply, State}; |                             {noreply, State}; | ||||||
|                     true -> |                     true -> | ||||||
|  | @ -776,7 +776,7 @@ handle_method_from_server1( | ||||||
|         State = #state{return_handler = ReturnHandler}) -> |         State = #state{return_handler = ReturnHandler}) -> | ||||||
|     case ReturnHandler of |     case ReturnHandler of | ||||||
|         none        -> ?LOG_WARN("Channel (~p): received {~p, ~p} but there is " |         none        -> ?LOG_WARN("Channel (~p): received {~p, ~p} but there is " | ||||||
|                                  "no return handler registered~n", |                                  "no return handler registered", | ||||||
|                                  [self(), BasicReturn, AmqpMsg]); |                                  [self(), BasicReturn, AmqpMsg]); | ||||||
|         {Pid, _Ref} -> Pid ! {BasicReturn, AmqpMsg} |         {Pid, _Ref} -> Pid ! {BasicReturn, AmqpMsg} | ||||||
|     end, |     end, | ||||||
|  | @ -791,7 +791,7 @@ handle_method_from_server1(#'basic.ack'{} = BasicAck, none, | ||||||
| handle_method_from_server1(#'basic.nack'{} = BasicNack, none, | handle_method_from_server1(#'basic.nack'{} = BasicNack, none, | ||||||
|                            #state{confirm_handler = none} = State) -> |                            #state{confirm_handler = none} = State) -> | ||||||
|     ?LOG_WARN("Channel (~p): received ~p but there is no " |     ?LOG_WARN("Channel (~p): received ~p but there is no " | ||||||
|               "confirm handler registered~n", [self(), BasicNack]), |               "confirm handler registered", [self(), BasicNack]), | ||||||
|     {noreply, update_confirm_set(BasicNack, State)}; |     {noreply, update_confirm_set(BasicNack, State)}; | ||||||
| handle_method_from_server1(#'basic.nack'{} = BasicNack, none, | handle_method_from_server1(#'basic.nack'{} = BasicNack, none, | ||||||
|                            #state{confirm_handler = {CH, _Ref}} = State) -> |                            #state{confirm_handler = {CH, _Ref}} = State) -> | ||||||
|  | @ -835,7 +835,7 @@ handle_connection_closing(CloseType, Reason, | ||||||
| handle_channel_exit(Reason = #amqp_error{name = ErrorName, explanation = Expl}, | handle_channel_exit(Reason = #amqp_error{name = ErrorName, explanation = Expl}, | ||||||
|                     State = #state{connection = Connection, number = Number}) -> |                     State = #state{connection = Connection, number = Number}) -> | ||||||
|     %% Sent by rabbit_channel for hard errors in the direct case |     %% Sent by rabbit_channel for hard errors in the direct case | ||||||
|     ?LOG_ERR("connection ~p, channel ~p - error:~n~p~n", |     ?LOG_ERR("connection ~p, channel ~p - error:~n~p", | ||||||
|              [Connection, Number, Reason]), |              [Connection, Number, Reason]), | ||||||
|     {true, Code, _} = ?PROTOCOL:lookup_amqp_exception(ErrorName), |     {true, Code, _} = ?PROTOCOL:lookup_amqp_exception(ErrorName), | ||||||
|     ReportedReason = {server_initiated_close, Code, Expl}, |     ReportedReason = {server_initiated_close, Code, Expl}, | ||||||
|  | @ -928,7 +928,7 @@ server_misbehaved(#amqp_error{} = AmqpError, State = #state{number = Number}) -> | ||||||
|             handle_shutdown({server_misbehaved, AmqpError}, State); |             handle_shutdown({server_misbehaved, AmqpError}, State); | ||||||
|         {_, Close} -> |         {_, Close} -> | ||||||
|             ?LOG_WARN("Channel (~p) flushing and closing due to soft " |             ?LOG_WARN("Channel (~p) flushing and closing due to soft " | ||||||
|                       "error caused by the server ~p~n", [self(), AmqpError]), |                       "error caused by the server ~p", [self(), AmqpError]), | ||||||
|             Self = self(), |             Self = self(), | ||||||
|             spawn(fun () -> call(Self, Close) end), |             spawn(fun () -> call(Self, Close) end), | ||||||
|             {noreply, State} |             {noreply, State} | ||||||
|  |  | ||||||
|  | @ -203,7 +203,7 @@ internal_pass_frame(Number, Frame, State) -> | ||||||
|     case internal_lookup_npa(Number, State) of |     case internal_lookup_npa(Number, State) of | ||||||
|         undefined -> |         undefined -> | ||||||
|             ?LOG_INFO("Dropping frame ~p for invalid or closed " |             ?LOG_INFO("Dropping frame ~p for invalid or closed " | ||||||
|                       "channel number ~p~n", [Frame, Number]), |                       "channel number ~p", [Frame, Number]), | ||||||
|             State; |             State; | ||||||
|         {ChPid, AState} -> |         {ChPid, AState} -> | ||||||
|             NewAState = process_channel_frame(Frame, Number, ChPid, AState), |             NewAState = process_channel_frame(Frame, Number, ChPid, AState), | ||||||
|  |  | ||||||
|  | @ -203,7 +203,7 @@ handle_cast(channels_terminated, State) -> | ||||||
| handle_cast({hard_error_in_channel, _Pid, Reason}, State) -> | handle_cast({hard_error_in_channel, _Pid, Reason}, State) -> | ||||||
|     server_initiated_close(Reason, State); |     server_initiated_close(Reason, State); | ||||||
| handle_cast({channel_internal_error, Pid, Reason}, State) -> | handle_cast({channel_internal_error, Pid, Reason}, State) -> | ||||||
|     ?LOG_WARN("Connection (~p) closing: internal error in channel (~p): ~p~n", |     ?LOG_WARN("Connection (~p) closing: internal error in channel (~p): ~p", | ||||||
|               [self(), Pid, Reason]), |               [self(), Pid, Reason]), | ||||||
|     internal_error(Pid, Reason, State); |     internal_error(Pid, Reason, State); | ||||||
| handle_cast({server_misbehaved, AmqpError}, State) -> | handle_cast({server_misbehaved, AmqpError}, State) -> | ||||||
|  | @ -218,12 +218,12 @@ handle_cast({register_blocked_handler, HandlerPid}, State) -> | ||||||
| handle_info({'DOWN', _, process, BlockHandler, Reason}, | handle_info({'DOWN', _, process, BlockHandler, Reason}, | ||||||
|             State = #state{block_handler = {BlockHandler, _Ref}}) -> |             State = #state{block_handler = {BlockHandler, _Ref}}) -> | ||||||
|     ?LOG_WARN("Connection (~p): Unregistering connection.{blocked,unblocked} handler ~p because it died. " |     ?LOG_WARN("Connection (~p): Unregistering connection.{blocked,unblocked} handler ~p because it died. " | ||||||
|               "Reason: ~p~n", [self(), BlockHandler, Reason]), |               "Reason: ~p", [self(), BlockHandler, Reason]), | ||||||
|     {noreply, State#state{block_handler = none}}; |     {noreply, State#state{block_handler = none}}; | ||||||
| handle_info({'EXIT', BlockHandler, Reason}, | handle_info({'EXIT', BlockHandler, Reason}, | ||||||
|             State = #state{block_handler = {BlockHandler, Ref}}) -> |             State = #state{block_handler = {BlockHandler, Ref}}) -> | ||||||
|     ?LOG_WARN("Connection (~p): Unregistering connection.{blocked,unblocked} handler ~p because it died. " |     ?LOG_WARN("Connection (~p): Unregistering connection.{blocked,unblocked} handler ~p because it died. " | ||||||
|               "Reason: ~p~n", [self(), BlockHandler, Reason]), |               "Reason: ~p", [self(), BlockHandler, Reason]), | ||||||
|     erlang:demonitor(Ref, [flush]), |     erlang:demonitor(Ref, [flush]), | ||||||
|     {noreply, State#state{block_handler = none}}; |     {noreply, State#state{block_handler = none}}; | ||||||
| %% propagate the exit to the module that will stop with a sensible reason logged | %% propagate the exit to the module that will stop with a sensible reason logged | ||||||
|  | @ -329,12 +329,12 @@ internal_error(Pid, Reason, State) -> | ||||||
| 
 | 
 | ||||||
| server_initiated_close(Close, State) -> | server_initiated_close(Close, State) -> | ||||||
|     ?LOG_WARN("Connection (~p) closing: received hard error ~p " |     ?LOG_WARN("Connection (~p) closing: received hard error ~p " | ||||||
|               "from server~n", [self(), Close]), |               "from server", [self(), Close]), | ||||||
|     set_closing_state(abrupt, #closing{reason = server_initiated_close, |     set_closing_state(abrupt, #closing{reason = server_initiated_close, | ||||||
|                                        close = Close}, State). |                                        close = Close}, State). | ||||||
| 
 | 
 | ||||||
| server_misbehaved_close(AmqpError, State) -> | server_misbehaved_close(AmqpError, State) -> | ||||||
|     ?LOG_WARN("Connection (~p) closing: server misbehaved: ~p~n", |     ?LOG_WARN("Connection (~p) closing: server misbehaved: ~p", | ||||||
|               [self(), AmqpError]), |               [self(), AmqpError]), | ||||||
|     {0, Close} = rabbit_binary_generator:map_exception(0, AmqpError, ?PROTOCOL), |     {0, Close} = rabbit_binary_generator:map_exception(0, AmqpError, ?PROTOCOL), | ||||||
|     set_closing_state(abrupt, #closing{reason = server_misbehaved, |     set_closing_state(abrupt, #closing{reason = server_misbehaved, | ||||||
|  |  | ||||||
|  | @ -54,8 +54,8 @@ maybe_add_verify(Options) -> | ||||||
|             % NB: user has explicitly set 'verify' |             % NB: user has explicitly set 'verify' | ||||||
|             Options; |             Options; | ||||||
|         _ -> |         _ -> | ||||||
|             ?LOG_WARN("Connection (~p): Certificate chain verification is not enabled for this TLS connection. " |             ?LOG_WARN("Connection (~p): certificate chain verification is not enabled for this TLS connection. " | ||||||
|                     "Please see https://rabbitmq.com/ssl.html for more information.~n", [self()]), |                     "Please see https://rabbitmq.com/ssl.html for more information.", [self()]), | ||||||
|             Options |             Options | ||||||
|     end. |     end. | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -131,14 +131,14 @@ endef | ||||||
| APPS_DIR := $(CURDIR)/apps | APPS_DIR := $(CURDIR)/apps | ||||||
| 
 | 
 | ||||||
| LOCAL_DEPS = sasl rabbitmq_prelaunch os_mon inets compiler public_key crypto ssl syntax_tools xmerl | LOCAL_DEPS = sasl rabbitmq_prelaunch os_mon inets compiler public_key crypto ssl syntax_tools xmerl | ||||||
| BUILD_DEPS = rabbitmq_cli syslog | BUILD_DEPS = rabbitmq_cli | ||||||
| DEPS = cuttlefish ranch lager rabbit_common ra sysmon_handler stdout_formatter recon observer_cli osiris amqp10_common | DEPS = cuttlefish ranch rabbit_common ra sysmon_handler stdout_formatter recon observer_cli osiris amqp10_common syslog | ||||||
| TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers amqp_client meck proper | TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers amqp_client meck proper | ||||||
| 
 | 
 | ||||||
| PLT_APPS += mnesia | PLT_APPS += mnesia | ||||||
| 
 | 
 | ||||||
| dep_cuttlefish = hex 2.7.0 | dep_cuttlefish = git https://github.com/Kyorai/cuttlefish master | ||||||
| dep_syslog = git https://github.com/schlagert/syslog 3.4.5 | dep_syslog = git https://github.com/schlagert/syslog 4.0.0 | ||||||
| dep_osiris = git https://github.com/rabbitmq/osiris master | dep_osiris = git https://github.com/rabbitmq/osiris master | ||||||
| 
 | 
 | ||||||
| define usage_xml_to_erl | define usage_xml_to_erl | ||||||
|  |  | ||||||
|  | @ -5,6 +5,7 @@ | ||||||
| *.coverdata | *.coverdata | ||||||
| /ebin/ | /ebin/ | ||||||
| /.erlang.mk/ | /.erlang.mk/ | ||||||
|  | /logs/ | ||||||
| /rabbitmq_prelaunch.d | /rabbitmq_prelaunch.d | ||||||
| /xrefr | /xrefr | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -3,7 +3,7 @@ PROJECT_DESCRIPTION = RabbitMQ prelaunch setup | ||||||
| PROJECT_VERSION = 1.0.0 | PROJECT_VERSION = 1.0.0 | ||||||
| PROJECT_MOD = rabbit_prelaunch_app | PROJECT_MOD = rabbit_prelaunch_app | ||||||
| 
 | 
 | ||||||
| DEPS = rabbit_common lager | DEPS = rabbit_common jsx | ||||||
| 
 | 
 | ||||||
| DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk | DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -8,8 +8,11 @@ | ||||||
| 
 | 
 | ||||||
| -module(rabbit_boot_state). | -module(rabbit_boot_state). | ||||||
| 
 | 
 | ||||||
|  | -include_lib("kernel/include/logger.hrl"). | ||||||
| -include_lib("eunit/include/eunit.hrl"). | -include_lib("eunit/include/eunit.hrl"). | ||||||
| 
 | 
 | ||||||
|  | -include_lib("rabbit_common/include/logging.hrl"). | ||||||
|  | 
 | ||||||
| -export([get/0, | -export([get/0, | ||||||
|          set/1, |          set/1, | ||||||
|          wait_for/2, |          wait_for/2, | ||||||
|  | @ -28,7 +31,8 @@ get() -> | ||||||
| 
 | 
 | ||||||
| -spec set(boot_state()) -> ok. | -spec set(boot_state()) -> ok. | ||||||
| set(BootState) -> | set(BootState) -> | ||||||
|     rabbit_log_prelaunch:debug("Change boot state to `~s`", [BootState]), |     ?LOG_DEBUG("Change boot state to `~s`", [BootState], | ||||||
|  |                #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|     ?assert(is_valid(BootState)), |     ?assert(is_valid(BootState)), | ||||||
|     case BootState of |     case BootState of | ||||||
|         stopped -> persistent_term:erase(?PT_KEY_BOOT_STATE); |         stopped -> persistent_term:erase(?PT_KEY_BOOT_STATE); | ||||||
|  |  | ||||||
|  | @ -9,6 +9,10 @@ | ||||||
| 
 | 
 | ||||||
| -behaviour(gen_server). | -behaviour(gen_server). | ||||||
| 
 | 
 | ||||||
|  | -include_lib("kernel/include/logger.hrl"). | ||||||
|  | 
 | ||||||
|  | -include_lib("rabbit_common/include/logging.hrl"). | ||||||
|  | 
 | ||||||
| -export([start_link/0]). | -export([start_link/0]). | ||||||
| 
 | 
 | ||||||
| -export([init/1, | -export([init/1, | ||||||
|  | @ -65,20 +69,23 @@ code_change(_OldVsn, State, _Extra) -> | ||||||
| 
 | 
 | ||||||
| notify_boot_state(ready = BootState, | notify_boot_state(ready = BootState, | ||||||
|                   #state{mechanism = legacy, sd_notify_module = SDNotify}) -> |                   #state{mechanism = legacy, sd_notify_module = SDNotify}) -> | ||||||
|     rabbit_log_prelaunch:debug( |     ?LOG_DEBUG( | ||||||
|       ?LOG_PREFIX "notifying of state `~s` (via native module)", |       ?LOG_PREFIX "notifying of state `~s` (via native module)", | ||||||
|       [BootState]), |       [BootState], | ||||||
|  |       #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|     sd_notify_legacy(SDNotify); |     sd_notify_legacy(SDNotify); | ||||||
| notify_boot_state(ready = BootState, | notify_boot_state(ready = BootState, | ||||||
|                   #state{mechanism = socat, socket = Socket}) -> |                   #state{mechanism = socat, socket = Socket}) -> | ||||||
|     rabbit_log_prelaunch:debug( |     ?LOG_DEBUG( | ||||||
|       ?LOG_PREFIX "notifying of state `~s` (via socat(1))", |       ?LOG_PREFIX "notifying of state `~s` (via socat(1))", | ||||||
|       [BootState]), |       [BootState], | ||||||
|  |       #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|     sd_notify_socat(Socket); |     sd_notify_socat(Socket); | ||||||
| notify_boot_state(BootState, _) -> | notify_boot_state(BootState, _) -> | ||||||
|     rabbit_log_prelaunch:debug( |     ?LOG_DEBUG( | ||||||
|       ?LOG_PREFIX "ignoring state `~s`", |       ?LOG_PREFIX "ignoring state `~s`", | ||||||
|       [BootState]), |       [BootState], | ||||||
|  |       #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|     ok. |     ok. | ||||||
| 
 | 
 | ||||||
| sd_notify_message() -> | sd_notify_message() -> | ||||||
|  | @ -99,9 +106,10 @@ sd_notify_legacy(SDNotify) -> | ||||||
| sd_notify_socat(Socket) -> | sd_notify_socat(Socket) -> | ||||||
|     case sd_current_unit() of |     case sd_current_unit() of | ||||||
|         {ok, Unit} -> |         {ok, Unit} -> | ||||||
|             rabbit_log_prelaunch:debug( |             ?LOG_DEBUG( | ||||||
|               ?LOG_PREFIX "systemd unit for activation check: \"~s\"~n", |               ?LOG_PREFIX "systemd unit for activation check: \"~s\"", | ||||||
|               [Unit]), |               [Unit], | ||||||
|  |               #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|             sd_notify_socat(Socket, Unit); |             sd_notify_socat(Socket, Unit); | ||||||
|         _ -> |         _ -> | ||||||
|             ok |             ok | ||||||
|  | @ -116,9 +124,10 @@ sd_notify_socat(Socket, Unit) -> | ||||||
|             Result |             Result | ||||||
|     catch |     catch | ||||||
|         Class:Reason -> |         Class:Reason -> | ||||||
|             rabbit_log_prelaunch:debug( |             ?LOG_DEBUG( | ||||||
|               ?LOG_PREFIX "Failed to start socat(1): ~p:~p~n", |               ?LOG_PREFIX "Failed to start socat(1): ~p:~p", | ||||||
|               [Class, Reason]), |               [Class, Reason], | ||||||
|  |               #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|             false |             false | ||||||
|     end. |     end. | ||||||
| 
 | 
 | ||||||
|  | @ -147,8 +156,10 @@ sd_open_port(Socket) -> | ||||||
| sd_wait_activation(Port, Unit) -> | sd_wait_activation(Port, Unit) -> | ||||||
|     case os:find_executable("systemctl") of |     case os:find_executable("systemctl") of | ||||||
|         false -> |         false -> | ||||||
|             rabbit_log_prelaunch:debug( |             ?LOG_DEBUG( | ||||||
|               ?LOG_PREFIX "systemctl(1) unavailable, falling back to sleep~n"), |               ?LOG_PREFIX "systemctl(1) unavailable, falling back to sleep", | ||||||
|  |               [], | ||||||
|  |               #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|             timer:sleep(5000), |             timer:sleep(5000), | ||||||
|             ok; |             ok; | ||||||
|         _ -> |         _ -> | ||||||
|  | @ -156,8 +167,10 @@ sd_wait_activation(Port, Unit) -> | ||||||
|     end. |     end. | ||||||
| 
 | 
 | ||||||
| sd_wait_activation(_, _, 0) -> | sd_wait_activation(_, _, 0) -> | ||||||
|     rabbit_log_prelaunch:debug( |     ?LOG_DEBUG( | ||||||
|       ?LOG_PREFIX "service still in 'activating' state, bailing out~n"), |       ?LOG_PREFIX "service still in 'activating' state, bailing out", | ||||||
|  |       [], | ||||||
|  |       #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|     ok; |     ok; | ||||||
| sd_wait_activation(Port, Unit, AttemptsLeft) -> | sd_wait_activation(Port, Unit, AttemptsLeft) -> | ||||||
|     Ret = os:cmd("systemctl show --property=ActiveState -- '" ++ Unit ++ "'"), |     Ret = os:cmd("systemctl show --property=ActiveState -- '" ++ Unit ++ "'"), | ||||||
|  | @ -168,7 +181,8 @@ sd_wait_activation(Port, Unit, AttemptsLeft) -> | ||||||
|         "ActiveState=" ++ _ -> |         "ActiveState=" ++ _ -> | ||||||
|             ok; |             ok; | ||||||
|         _ = Err -> |         _ = Err -> | ||||||
|             rabbit_log_prelaunch:debug( |             ?LOG_DEBUG( | ||||||
|               ?LOG_PREFIX "unexpected status from systemd: ~p~n", [Err]), |               ?LOG_PREFIX "unexpected status from systemd: ~p", [Err], | ||||||
|  |               #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|             ok |             ok | ||||||
|     end. |     end. | ||||||
|  |  | ||||||
|  | @ -0,0 +1,127 @@ | ||||||
|  | %% This Source Code Form is subject to the terms of the Mozilla Public | ||||||
|  | %% License, v. 2.0. If a copy of the MPL was not distributed with this | ||||||
|  | %% file, You can obtain one at https://mozilla.org/MPL/2.0/. | ||||||
|  | %% | ||||||
|  | %% Copyright (c) 2021 VMware, Inc. or its affiliates.  All rights reserved. | ||||||
|  | %% | ||||||
|  | 
 | ||||||
|  | -module(rabbit_logger_json_fmt). | ||||||
|  | 
 | ||||||
|  | -export([format/2]). | ||||||
|  | 
 | ||||||
|  | format( | ||||||
|  |   #{msg := Msg, | ||||||
|  |     level := Level, | ||||||
|  |     meta := #{time := Timestamp} = Meta}, | ||||||
|  |   Config) -> | ||||||
|  |     FormattedTimestamp = unicode:characters_to_binary( | ||||||
|  |                            format_time(Timestamp, Config)), | ||||||
|  |     FormattedMsg = unicode:characters_to_binary( | ||||||
|  |                      format_msg(Msg, Meta, Config)), | ||||||
|  |     FormattedMeta = format_meta(Meta, Config), | ||||||
|  |     Json = jsx:encode( | ||||||
|  |              [{time, FormattedTimestamp}, | ||||||
|  |               {level, Level}, | ||||||
|  |               {msg, FormattedMsg}, | ||||||
|  |               {meta, FormattedMeta}]), | ||||||
|  |     [Json, $\n]. | ||||||
|  | 
 | ||||||
|  | format_time(Timestamp, _) -> | ||||||
|  |     Options = [{unit, microsecond}], | ||||||
|  |     calendar:system_time_to_rfc3339(Timestamp, Options). | ||||||
|  | 
 | ||||||
|  | format_msg({string, Chardata}, Meta, Config) -> | ||||||
|  |     format_msg({"~ts", [Chardata]}, Meta, Config); | ||||||
|  | format_msg({report, Report}, Meta, Config) -> | ||||||
|  |     FormattedReport = format_report(Report, Meta, Config), | ||||||
|  |     format_msg(FormattedReport, Meta, Config); | ||||||
|  | format_msg({Format, Args}, _, _) -> | ||||||
|  |     io_lib:format(Format, Args). | ||||||
|  | 
 | ||||||
|  | format_report( | ||||||
|  |   #{label := {application_controller, _}} = Report, Meta, Config) -> | ||||||
|  |     format_application_progress(Report, Meta, Config); | ||||||
|  | format_report( | ||||||
|  |   #{label := {supervisor, progress}} = Report, Meta, Config) -> | ||||||
|  |     format_supervisor_progress(Report, Meta, Config); | ||||||
|  | format_report( | ||||||
|  |   Report, #{report_cb := Cb} = Meta, Config) -> | ||||||
|  |     try | ||||||
|  |         case erlang:fun_info(Cb, arity) of | ||||||
|  |             {arity, 1} -> Cb(Report); | ||||||
|  |             {arity, 2} -> {"~ts", [Cb(Report, #{})]} | ||||||
|  |         end | ||||||
|  |     catch | ||||||
|  |         _:_:_ -> | ||||||
|  |             format_report(Report, maps:remove(report_cb, Meta), Config) | ||||||
|  |     end; | ||||||
|  | format_report(Report, _, _) -> | ||||||
|  |     logger:format_report(Report). | ||||||
|  | 
 | ||||||
|  | format_application_progress(#{label := {_, progress}, | ||||||
|  |                               report := InternalReport}, _, _) -> | ||||||
|  |     Application = proplists:get_value(application, InternalReport), | ||||||
|  |     StartedAt = proplists:get_value(started_at, InternalReport), | ||||||
|  |     {"Application ~w started on ~0p", | ||||||
|  |      [Application, StartedAt]}; | ||||||
|  | format_application_progress(#{label := {_, exit}, | ||||||
|  |                               report := InternalReport}, _, _) -> | ||||||
|  |     Application = proplists:get_value(application, InternalReport), | ||||||
|  |     Exited = proplists:get_value(exited, InternalReport), | ||||||
|  |     {"Application ~w exited with reason: ~0p", | ||||||
|  |      [Application, Exited]}. | ||||||
|  | 
 | ||||||
|  | format_supervisor_progress(#{report := InternalReport}, _, _) -> | ||||||
|  |     Supervisor = proplists:get_value(supervisor, InternalReport), | ||||||
|  |     Started = proplists:get_value(started, InternalReport), | ||||||
|  |     Id = proplists:get_value(id, Started), | ||||||
|  |     Pid = proplists:get_value(pid, Started), | ||||||
|  |     Mfa = proplists:get_value(mfargs, Started), | ||||||
|  |     {"Supervisor ~w: child ~w started (~w): ~0p", | ||||||
|  |      [Supervisor, Id, Pid, Mfa]}. | ||||||
|  | 
 | ||||||
|  | format_meta(Meta, _) -> | ||||||
|  |     maps:fold( | ||||||
|  |       fun | ||||||
|  |           (time, _, Acc) -> | ||||||
|  |               Acc; | ||||||
|  |           (domain = Key, Components, Acc) -> | ||||||
|  |               Term = unicode:characters_to_binary( | ||||||
|  |                        string:join( | ||||||
|  |                          [atom_to_list(Cmp) || Cmp <- Components], | ||||||
|  |                          ".")), | ||||||
|  |               Acc#{Key => Term}; | ||||||
|  |           (Key, Value, Acc) -> | ||||||
|  |               case convert_to_types_accepted_by_jsx(Value) of | ||||||
|  |                   false -> Acc; | ||||||
|  |                   Term  -> Acc#{Key => Term} | ||||||
|  |               end | ||||||
|  |       end, #{}, Meta). | ||||||
|  | 
 | ||||||
|  | convert_to_types_accepted_by_jsx(Term) when is_map(Term) -> | ||||||
|  |     maps:map( | ||||||
|  |       fun(_, Value) -> convert_to_types_accepted_by_jsx(Value) end, | ||||||
|  |       Term); | ||||||
|  | convert_to_types_accepted_by_jsx(Term) when is_list(Term) -> | ||||||
|  |     case io_lib:deep_char_list(Term) of | ||||||
|  |         true -> | ||||||
|  |             unicode:characters_to_binary(Term); | ||||||
|  |         false -> | ||||||
|  |             [convert_to_types_accepted_by_jsx(E) || E <- Term] | ||||||
|  |     end; | ||||||
|  | convert_to_types_accepted_by_jsx(Term) when is_tuple(Term) -> | ||||||
|  |     convert_to_types_accepted_by_jsx(erlang:tuple_to_list(Term)); | ||||||
|  | convert_to_types_accepted_by_jsx(Term) when is_function(Term) -> | ||||||
|  |     String = erlang:fun_to_list(Term), | ||||||
|  |     unicode:characters_to_binary(String); | ||||||
|  | convert_to_types_accepted_by_jsx(Term) when is_pid(Term) -> | ||||||
|  |     String = erlang:pid_to_list(Term), | ||||||
|  |     unicode:characters_to_binary(String); | ||||||
|  | convert_to_types_accepted_by_jsx(Term) when is_port(Term) -> | ||||||
|  |     String = erlang:port_to_list(Term), | ||||||
|  |     unicode:characters_to_binary(String); | ||||||
|  | convert_to_types_accepted_by_jsx(Term) when is_reference(Term) -> | ||||||
|  |     String = erlang:ref_to_list(Term), | ||||||
|  |     unicode:characters_to_binary(String); | ||||||
|  | convert_to_types_accepted_by_jsx(Term) -> | ||||||
|  |     Term. | ||||||
|  | @ -0,0 +1,841 @@ | ||||||
|  | %% | ||||||
|  | %% %CopyrightBegin% | ||||||
|  | %% | ||||||
|  | %% Copyright Ericsson AB 2017-2020. All Rights Reserved. | ||||||
|  | %% | ||||||
|  | %% Licensed under the Apache License, Version 2.0 (the "License"); | ||||||
|  | %% you may not use this file except in compliance with the License. | ||||||
|  | %% You may obtain a copy of the License at | ||||||
|  | %% | ||||||
|  | %%     http://www.apache.org/licenses/LICENSE-2.0 | ||||||
|  | %% | ||||||
|  | %% Unless required by applicable law or agreed to in writing, software | ||||||
|  | %% distributed under the License is distributed on an "AS IS" BASIS, | ||||||
|  | %% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||||
|  | %% See the License for the specific language governing permissions and | ||||||
|  | %% limitations under the License. | ||||||
|  | %% | ||||||
|  | %% %CopyrightEnd% | ||||||
|  | %% | ||||||
|  | -module(rabbit_logger_std_h). | ||||||
|  | 
 | ||||||
|  | %-include("logger.hrl"). | ||||||
|  | %-include("logger_internal.hrl"). | ||||||
|  | %-include("logger_h_common.hrl"). | ||||||
|  | -ifdef(TEST). | ||||||
|  | -define(io_put_chars(DEVICE, DATA), begin | ||||||
|  |                                         %% We log to Common Test log as well. | ||||||
|  |                                         %% This is the file we use to check | ||||||
|  |                                         %% the message made it to | ||||||
|  |                                         %% stdout/stderr. | ||||||
|  |                                         ct:log("~ts", [DATA]), | ||||||
|  |                                         io:put_chars(DEVICE, DATA) | ||||||
|  |                                     end). | ||||||
|  | -else. | ||||||
|  | -define(io_put_chars(DEVICE, DATA), io:put_chars(DEVICE, DATA)). | ||||||
|  | -endif. | ||||||
|  | -define(file_write(DEVICE, DATA), file:write(DEVICE, DATA)). | ||||||
|  | -define(file_datasync(DEVICE), file:datasync(DEVICE)). | ||||||
|  | 
 | ||||||
|  | -include_lib("kernel/include/file.hrl"). | ||||||
|  | 
 | ||||||
|  | %% API | ||||||
|  | -export([filesync/1]). | ||||||
|  | -export([is_date_based_rotation_needed/3]). | ||||||
|  | 
 | ||||||
|  | %% logger_h_common callbacks | ||||||
|  | -export([init/2, check_config/4, config_changed/3, reset_state/2, | ||||||
|  |          filesync/3, write/4, handle_info/3, terminate/3]). | ||||||
|  | 
 | ||||||
|  | %% logger callbacks | ||||||
|  | -export([log/2, adding_handler/1, removing_handler/1, changing_config/3, | ||||||
|  |          filter_config/1]). | ||||||
|  | 
 | ||||||
|  | -define(DEFAULT_CALL_TIMEOUT, 5000). | ||||||
|  | 
 | ||||||
|  | %%%=================================================================== | ||||||
|  | %%% API | ||||||
|  | %%%=================================================================== | ||||||
|  | 
 | ||||||
|  | %%%----------------------------------------------------------------- | ||||||
|  | %%% | ||||||
|  | -spec filesync(Name) -> ok | {error,Reason} when | ||||||
|  |       Name :: atom(), | ||||||
|  |       Reason :: handler_busy | {badarg,term()}. | ||||||
|  | 
 | ||||||
|  | filesync(Name) -> | ||||||
|  |     logger_h_common:filesync(?MODULE,Name). | ||||||
|  | 
 | ||||||
|  | %%%=================================================================== | ||||||
|  | %%% logger callbacks - just forward to logger_h_common | ||||||
|  | %%%=================================================================== | ||||||
|  | 
 | ||||||
|  | %%%----------------------------------------------------------------- | ||||||
|  | %%% Handler being added | ||||||
|  | -spec adding_handler(Config) -> {ok,Config} | {error,Reason} when | ||||||
|  |       Config :: logger:handler_config(), | ||||||
|  |       Reason :: term(). | ||||||
|  | 
 | ||||||
|  | adding_handler(Config) -> | ||||||
|  |     logger_h_common:adding_handler(Config). | ||||||
|  | 
 | ||||||
|  | %%%----------------------------------------------------------------- | ||||||
|  | %%% Updating handler config | ||||||
|  | -spec changing_config(SetOrUpdate, OldConfig, NewConfig) -> | ||||||
|  |                               {ok,Config} | {error,Reason} when | ||||||
|  |       SetOrUpdate :: set | update, | ||||||
|  |       OldConfig :: logger:handler_config(), | ||||||
|  |       NewConfig :: logger:handler_config(), | ||||||
|  |       Config :: logger:handler_config(), | ||||||
|  |       Reason :: term(). | ||||||
|  | 
 | ||||||
|  | changing_config(SetOrUpdate, OldConfig, NewConfig) -> | ||||||
|  |     logger_h_common:changing_config(SetOrUpdate, OldConfig, NewConfig). | ||||||
|  | 
 | ||||||
|  | %%%----------------------------------------------------------------- | ||||||
|  | %%% Handler being removed | ||||||
|  | -spec removing_handler(Config) -> ok when | ||||||
|  |       Config :: logger:handler_config(). | ||||||
|  | 
 | ||||||
|  | removing_handler(Config) -> | ||||||
|  |     logger_h_common:removing_handler(Config). | ||||||
|  | 
 | ||||||
|  | %%%----------------------------------------------------------------- | ||||||
|  | %%% Log a string or report | ||||||
|  | -spec log(LogEvent, Config) -> ok when | ||||||
|  |       LogEvent :: logger:log_event(), | ||||||
|  |       Config :: logger:handler_config(). | ||||||
|  | 
 | ||||||
|  | log(LogEvent, Config) -> | ||||||
|  |     logger_h_common:log(LogEvent, Config). | ||||||
|  | 
 | ||||||
|  | %%%----------------------------------------------------------------- | ||||||
|  | %%% Remove internal fields from configuration | ||||||
|  | -spec filter_config(Config) -> Config when | ||||||
|  |       Config :: logger:handler_config(). | ||||||
|  | 
 | ||||||
|  | filter_config(Config) -> | ||||||
|  |     logger_h_common:filter_config(Config). | ||||||
|  | 
 | ||||||
|  | %%%=================================================================== | ||||||
|  | %%% logger_h_common callbacks | ||||||
|  | %%%=================================================================== | ||||||
|  | init(Name, Config) -> | ||||||
|  |     MyConfig = maps:with([type,file,modes,file_check,max_no_bytes, | ||||||
|  |                           rotate_on_date,max_no_files,compress_on_rotate], | ||||||
|  |                          Config), | ||||||
|  |     case file_ctrl_start(Name, MyConfig) of | ||||||
|  |         {ok,FileCtrlPid} -> | ||||||
|  |             {ok,MyConfig#{file_ctrl_pid=>FileCtrlPid}}; | ||||||
|  |         Error -> | ||||||
|  |             Error | ||||||
|  |     end. | ||||||
|  | 
 | ||||||
|  | check_config(Name,set,undefined,NewHConfig) -> | ||||||
|  |     check_h_config(merge_default_config(Name,normalize_config(NewHConfig))); | ||||||
|  | check_config(Name,SetOrUpdate,OldHConfig,NewHConfig0) -> | ||||||
|  |     WriteOnce = maps:with([type,file,modes],OldHConfig), | ||||||
|  |     Default = | ||||||
|  |         case SetOrUpdate of | ||||||
|  |             set -> | ||||||
|  |                 %% Do not reset write-once fields to defaults | ||||||
|  |                 merge_default_config(Name,WriteOnce); | ||||||
|  |             update -> | ||||||
|  |                 OldHConfig | ||||||
|  |         end, | ||||||
|  | 
 | ||||||
|  |     NewHConfig = maps:merge(Default, normalize_config(NewHConfig0)), | ||||||
|  | 
 | ||||||
|  |     %% Fail if write-once fields are changed | ||||||
|  |     case maps:with([type,file,modes],NewHConfig) of | ||||||
|  |         WriteOnce -> | ||||||
|  |             check_h_config(NewHConfig); | ||||||
|  |         Other -> | ||||||
|  |             {error,{illegal_config_change,?MODULE,WriteOnce,Other}} | ||||||
|  |     end. | ||||||
|  | 
 | ||||||
|  | check_h_config(HConfig) -> | ||||||
|  |     case check_h_config(maps:get(type,HConfig),maps:to_list(HConfig)) of | ||||||
|  |         ok -> | ||||||
|  |             {ok,fix_file_opts(HConfig)}; | ||||||
|  |         {error,{Key,Value}} -> | ||||||
|  |             {error,{invalid_config,?MODULE,#{Key=>Value}}} | ||||||
|  |     end. | ||||||
|  | 
 | ||||||
|  | check_h_config(Type,[{type,Type} | Config]) when Type =:= standard_io; | ||||||
|  |                                                  Type =:= standard_error; | ||||||
|  |                                                  Type =:= file -> | ||||||
|  |     check_h_config(Type,Config); | ||||||
|  | check_h_config({device,Device},[{type,{device,Device}} | Config]) -> | ||||||
|  |     check_h_config({device,Device},Config); | ||||||
|  | check_h_config(file,[{file,File} | Config]) when is_list(File) -> | ||||||
|  |     check_h_config(file,Config); | ||||||
|  | check_h_config(file,[{modes,Modes} | Config]) when is_list(Modes) -> | ||||||
|  |     check_h_config(file,Config); | ||||||
|  | check_h_config(file,[{max_no_bytes,Size} | Config]) | ||||||
|  |   when (is_integer(Size) andalso Size>0) orelse Size=:=infinity -> | ||||||
|  |     check_h_config(file,Config); | ||||||
|  | check_h_config(file,[{rotate_on_date,DateSpec}=Param | Config]) | ||||||
|  |   when is_list(DateSpec) orelse DateSpec=:=false -> | ||||||
|  |     case parse_date_spec(DateSpec) of | ||||||
|  |         error -> {error,Param}; | ||||||
|  |         _ -> check_h_config(file,Config) | ||||||
|  |     end; | ||||||
|  | check_h_config(file,[{max_no_files,Num} | Config]) when is_integer(Num), Num>=0 -> | ||||||
|  |     check_h_config(file,Config); | ||||||
|  | check_h_config(file,[{compress_on_rotate,Bool} | Config]) when is_boolean(Bool) -> | ||||||
|  |     check_h_config(file,Config); | ||||||
|  | check_h_config(file,[{file_check,FileCheck} | Config]) | ||||||
|  |   when is_integer(FileCheck), FileCheck>=0 -> | ||||||
|  |     check_h_config(file,Config); | ||||||
|  | check_h_config(_Type,[Other | _]) -> | ||||||
|  |     {error,Other}; | ||||||
|  | check_h_config(_Type,[]) -> | ||||||
|  |     ok. | ||||||
|  | 
 | ||||||
|  | normalize_config(#{type:={file,File}}=HConfig) -> | ||||||
|  |     normalize_config(HConfig#{type=>file,file=>File}); | ||||||
|  | normalize_config(#{type:={file,File,Modes}}=HConfig) -> | ||||||
|  |     normalize_config(HConfig#{type=>file,file=>File,modes=>Modes}); | ||||||
|  | normalize_config(#{file:=File}=HConfig) -> | ||||||
|  |     HConfig#{file=>filename:absname(File)}; | ||||||
|  | normalize_config(HConfig) -> | ||||||
|  |     HConfig. | ||||||
|  | 
 | ||||||
|  | merge_default_config(Name,#{type:=Type}=HConfig) -> | ||||||
|  |     merge_default_config(Name,Type,HConfig); | ||||||
|  | merge_default_config(Name,#{file:=_}=HConfig) -> | ||||||
|  |     merge_default_config(Name,file,HConfig); | ||||||
|  | merge_default_config(Name,HConfig) -> | ||||||
|  |     merge_default_config(Name,standard_io,HConfig). | ||||||
|  | 
 | ||||||
|  | merge_default_config(Name,Type,HConfig) -> | ||||||
|  |     maps:merge(get_default_config(Name,Type),HConfig). | ||||||
|  | 
 | ||||||
|  | get_default_config(Name,file) -> | ||||||
|  |      #{type => file, | ||||||
|  |        file => filename:absname(atom_to_list(Name)), | ||||||
|  |        modes => [raw,append], | ||||||
|  |        file_check => 0, | ||||||
|  |        max_no_bytes => infinity, | ||||||
|  |        rotate_on_date => false, | ||||||
|  |        max_no_files => 0, | ||||||
|  |        compress_on_rotate => false}; | ||||||
|  | get_default_config(_Name,Type) -> | ||||||
|  |      #{type => Type}. | ||||||
|  | 
 | ||||||
|  | fix_file_opts(#{modes:=Modes}=HConfig) -> | ||||||
|  |     HConfig#{modes=>fix_modes(Modes)}; | ||||||
|  | fix_file_opts(HConfig) -> | ||||||
|  |     HConfig#{filesync_repeat_interval=>no_repeat}. | ||||||
|  | 
 | ||||||
|  | fix_modes(Modes) -> | ||||||
|  |     %% Ensure write|append|exclusive | ||||||
|  |     Modes1 = | ||||||
|  |         case [M || M <- Modes, | ||||||
|  |                    lists:member(M,[write,append,exclusive])] of | ||||||
|  |             [] -> [append|Modes]; | ||||||
|  |             _ -> Modes | ||||||
|  |         end, | ||||||
|  |     %% Ensure raw | ||||||
|  |     Modes2 = | ||||||
|  |         case lists:member(raw,Modes) of | ||||||
|  |             false -> [raw|Modes1]; | ||||||
|  |             true -> Modes1 | ||||||
|  |         end, | ||||||
|  |     %% Ensure delayed_write | ||||||
|  |     case lists:partition(fun(delayed_write) -> true; | ||||||
|  |                             ({delayed_write,_,_}) -> true; | ||||||
|  |                             (_) -> false | ||||||
|  |                          end, Modes2) of | ||||||
|  |         {[],_} -> | ||||||
|  |             [delayed_write|Modes2]; | ||||||
|  |         _ -> | ||||||
|  |             Modes2 | ||||||
|  |     end. | ||||||
|  | 
 | ||||||
|  | config_changed(_Name, | ||||||
|  |                #{file_check:=FileCheck, | ||||||
|  |                  max_no_bytes:=Size, | ||||||
|  |                  rotate_on_date:=DateSpec, | ||||||
|  |                  max_no_files:=Count, | ||||||
|  |                  compress_on_rotate:=Compress}, | ||||||
|  |                #{file_check:=FileCheck, | ||||||
|  |                  max_no_bytes:=Size, | ||||||
|  |                  rotate_on_date:=DateSpec, | ||||||
|  |                  max_no_files:=Count, | ||||||
|  |                  compress_on_rotate:=Compress}=State) -> | ||||||
|  |     State; | ||||||
|  | config_changed(_Name, | ||||||
|  |                #{file_check:=FileCheck, | ||||||
|  |                  max_no_bytes:=Size, | ||||||
|  |                  rotate_on_date:=DateSpec, | ||||||
|  |                  max_no_files:=Count, | ||||||
|  |                  compress_on_rotate:=Compress}, | ||||||
|  |                #{file_ctrl_pid := FileCtrlPid} = State) -> | ||||||
|  |     FileCtrlPid ! {update_config,#{file_check=>FileCheck, | ||||||
|  |                                    max_no_bytes=>Size, | ||||||
|  |                                    rotate_on_date=>DateSpec, | ||||||
|  |                                    max_no_files=>Count, | ||||||
|  |                                    compress_on_rotate=>Compress}}, | ||||||
|  |     State#{file_check:=FileCheck, | ||||||
|  |            max_no_bytes:=Size, | ||||||
|  |            rotate_on_date:=DateSpec, | ||||||
|  |            max_no_files:=Count, | ||||||
|  |            compress_on_rotate:=Compress}; | ||||||
|  | config_changed(_Name,_NewHConfig,State) -> | ||||||
|  |     State. | ||||||
|  | 
 | ||||||
|  | filesync(_Name, SyncAsync, #{file_ctrl_pid := FileCtrlPid} = State) -> | ||||||
|  |     Result = file_ctrl_filesync(SyncAsync, FileCtrlPid), | ||||||
|  |     {Result,State}. | ||||||
|  | 
 | ||||||
|  | write(_Name, SyncAsync, Bin, #{file_ctrl_pid:=FileCtrlPid} = State) -> | ||||||
|  |     Result = file_write(SyncAsync, FileCtrlPid, Bin), | ||||||
|  |     {Result,State}. | ||||||
|  | 
 | ||||||
|  | reset_state(_Name, State) -> | ||||||
|  |     State. | ||||||
|  | 
 | ||||||
|  | handle_info(_Name, {'EXIT',Pid,Why}, #{file_ctrl_pid := Pid}=State) -> | ||||||
|  |     %% file_ctrl_pid died, file error, terminate handler | ||||||
|  |     exit({error,{write_failed,maps:with([type,file,modes],State),Why}}); | ||||||
|  | handle_info(_, _, State) -> | ||||||
|  |     State. | ||||||
|  | 
 | ||||||
|  | terminate(_Name, _Reason, #{file_ctrl_pid:=FWPid}) -> | ||||||
|  |     case is_process_alive(FWPid) of | ||||||
|  |         true -> | ||||||
|  |             unlink(FWPid), | ||||||
|  |             _ = file_ctrl_stop(FWPid), | ||||||
|  |             MRef = erlang:monitor(process, FWPid), | ||||||
|  |             receive | ||||||
|  |                 {'DOWN',MRef,_,_,_} -> | ||||||
|  |                     ok | ||||||
|  |             after | ||||||
|  |                 ?DEFAULT_CALL_TIMEOUT -> | ||||||
|  |                     exit(FWPid, kill), | ||||||
|  |                     ok | ||||||
|  |             end; | ||||||
|  |         false -> | ||||||
|  |             ok | ||||||
|  |     end. | ||||||
|  | 
 | ||||||
|  | %%%=================================================================== | ||||||
|  | %%% Internal functions | ||||||
|  | %%%=================================================================== | ||||||
|  | 
 | ||||||
|  | %%%----------------------------------------------------------------- | ||||||
|  | %%% | ||||||
|  | open_log_file(HandlerName,#{type:=file, | ||||||
|  |                             file:=FileName, | ||||||
|  |                             modes:=Modes, | ||||||
|  |                             file_check:=FileCheck}) -> | ||||||
|  |     try | ||||||
|  |         case filelib:ensure_dir(FileName) of | ||||||
|  |             ok -> | ||||||
|  |                 case file:open(FileName, Modes) of | ||||||
|  |                     {ok, Fd} -> | ||||||
|  |                         {ok,#file_info{inode=INode}} = | ||||||
|  |                             file:read_file_info(FileName,[raw]), | ||||||
|  |                         UpdateModes = [append | Modes--[write,append,exclusive]], | ||||||
|  |                         {ok,#{handler_name=>HandlerName, | ||||||
|  |                               file_name=>FileName, | ||||||
|  |                               modes=>UpdateModes, | ||||||
|  |                               file_check=>FileCheck, | ||||||
|  |                               fd=>Fd, | ||||||
|  |                               inode=>INode, | ||||||
|  |                               last_check=>timestamp(), | ||||||
|  |                               synced=>false, | ||||||
|  |                               write_res=>ok, | ||||||
|  |                               sync_res=>ok}}; | ||||||
|  |                     Error -> | ||||||
|  |                         Error | ||||||
|  |                 end; | ||||||
|  |             Error -> | ||||||
|  |                 Error | ||||||
|  |         end | ||||||
|  |     catch | ||||||
|  |         _:Reason -> {error,Reason} | ||||||
|  |     end. | ||||||
|  | 
 | ||||||
|  | close_log_file(#{fd:=Fd}) -> | ||||||
|  |     _ = file:datasync(Fd), %% file:datasync may return error as it will flush the delayed_write buffer | ||||||
|  |     _ = file:close(Fd), | ||||||
|  |     ok; | ||||||
|  | close_log_file(_) -> | ||||||
|  |     ok. | ||||||
|  | 
 | ||||||
|  | %% A special close that closes the FD properly when the delayed write close failed | ||||||
|  | delayed_write_close(#{fd:=Fd}) -> | ||||||
|  |     case file:close(Fd) of | ||||||
|  |         %% We got an error while closing, could be a delayed write failing | ||||||
|  |         %% So we close again in order to make sure the file is closed. | ||||||
|  |         {error, _} -> | ||||||
|  |             file:close(Fd); | ||||||
|  |         Res -> | ||||||
|  |             Res | ||||||
|  |     end. | ||||||
|  | 
 | ||||||
|  | %%%----------------------------------------------------------------- | ||||||
|  | %%% File control process | ||||||
|  | 
 | ||||||
|  | file_ctrl_start(HandlerName, HConfig) -> | ||||||
|  |     Starter = self(), | ||||||
|  |     FileCtrlPid = | ||||||
|  |         spawn_link(fun() -> | ||||||
|  |                            file_ctrl_init(HandlerName, HConfig, Starter) | ||||||
|  |                    end), | ||||||
|  |     receive | ||||||
|  |         {FileCtrlPid,ok} -> | ||||||
|  |             {ok,FileCtrlPid}; | ||||||
|  |         {FileCtrlPid,Error} -> | ||||||
|  |             Error | ||||||
|  |     after | ||||||
|  |         ?DEFAULT_CALL_TIMEOUT -> | ||||||
|  |             {error,file_ctrl_process_not_started} | ||||||
|  |     end. | ||||||
|  | 
 | ||||||
|  | file_ctrl_stop(Pid) -> | ||||||
|  |     Pid ! stop. | ||||||
|  | 
 | ||||||
|  | file_write(async, Pid, Bin) -> | ||||||
|  |     Pid ! {log,Bin}, | ||||||
|  |     ok; | ||||||
|  | file_write(sync, Pid, Bin) -> | ||||||
|  |     file_ctrl_call(Pid, {log,Bin}). | ||||||
|  | 
 | ||||||
|  | file_ctrl_filesync(async, Pid) -> | ||||||
|  |     Pid ! filesync, | ||||||
|  |     ok; | ||||||
|  | file_ctrl_filesync(sync, Pid) -> | ||||||
|  |     file_ctrl_call(Pid, filesync). | ||||||
|  | 
 | ||||||
|  | file_ctrl_call(Pid, Msg) -> | ||||||
|  |     MRef = monitor(process, Pid), | ||||||
|  |     Pid ! {Msg,{self(),MRef}}, | ||||||
|  |     receive | ||||||
|  |         {MRef,Result} -> | ||||||
|  |             demonitor(MRef, [flush]), | ||||||
|  |             Result; | ||||||
|  |         {'DOWN',MRef,_Type,_Object,Reason} -> | ||||||
|  |             {error,Reason} | ||||||
|  |     after | ||||||
|  |         ?DEFAULT_CALL_TIMEOUT -> | ||||||
|  |             %% If this timeout triggers we will get a stray | ||||||
|  |             %% reply message in our mailbox eventually. | ||||||
|  |             %% That does not really matter though as it will | ||||||
|  |             %% end up in this module's handle_info and be ignored | ||||||
|  |             demonitor(MRef, [flush]), | ||||||
|  |             {error,{no_response,Pid}} | ||||||
|  |     end. | ||||||
|  | 
 | ||||||
|  | file_ctrl_init(HandlerName, | ||||||
|  |                #{type:=file, | ||||||
|  |                  max_no_bytes:=Size, | ||||||
|  |                  rotate_on_date:=DateSpec, | ||||||
|  |                  max_no_files:=Count, | ||||||
|  |                  compress_on_rotate:=Compress, | ||||||
|  |                  file:=FileName} = HConfig, | ||||||
|  |                Starter) -> | ||||||
|  |     process_flag(message_queue_data, off_heap), | ||||||
|  |     case open_log_file(HandlerName,HConfig) of | ||||||
|  |         {ok,State} -> | ||||||
|  |             Starter ! {self(),ok}, | ||||||
|  |             %% Do the initial rotate (if any) after we ack the starting | ||||||
|  |             %% process as otherwise startup of the system will be | ||||||
|  |             %% delayed/crash | ||||||
|  |             case parse_date_spec(DateSpec) of | ||||||
|  |                 error -> | ||||||
|  |                     Starter ! {self(),{error,{invalid_date_spec,DateSpec}}}; | ||||||
|  |                 ParsedDS -> | ||||||
|  |                     RotState = update_rotation({Size,ParsedDS,Count,Compress},State), | ||||||
|  |                     file_ctrl_loop(RotState) | ||||||
|  |             end; | ||||||
|  |         {error,Reason} -> | ||||||
|  |             Starter ! {self(),{error,{open_failed,FileName,Reason}}} | ||||||
|  |     end; | ||||||
|  | file_ctrl_init(HandlerName, #{type:={device,Dev}}, Starter) -> | ||||||
|  |     Starter ! {self(),ok}, | ||||||
|  |     file_ctrl_loop(#{handler_name=>HandlerName,dev=>Dev}); | ||||||
|  | file_ctrl_init(HandlerName, #{type:=StdDev}, Starter) -> | ||||||
|  |     Starter ! {self(),ok}, | ||||||
|  |     file_ctrl_loop(#{handler_name=>HandlerName,dev=>StdDev}). | ||||||
|  | 
 | ||||||
|  | file_ctrl_loop(State) -> | ||||||
|  |     receive | ||||||
|  |         %% asynchronous event | ||||||
|  |         {log,Bin} -> | ||||||
|  |             State1 = write_to_dev(Bin,State), | ||||||
|  |             file_ctrl_loop(State1); | ||||||
|  | 
 | ||||||
|  |         %% synchronous event | ||||||
|  |         {{log,Bin},{From,MRef}} -> | ||||||
|  |             State1 = ensure_file(State), | ||||||
|  |             State2 = write_to_dev(Bin,State1), | ||||||
|  |             From ! {MRef,ok}, | ||||||
|  |             file_ctrl_loop(State2); | ||||||
|  | 
 | ||||||
|  |         filesync -> | ||||||
|  |             State1 = sync_dev(State), | ||||||
|  |             file_ctrl_loop(State1); | ||||||
|  | 
 | ||||||
|  |         {filesync,{From,MRef}} -> | ||||||
|  |             State1 = ensure_file(State), | ||||||
|  |             State2 = sync_dev(State1), | ||||||
|  |             From ! {MRef,ok}, | ||||||
|  |             file_ctrl_loop(State2); | ||||||
|  | 
 | ||||||
|  |         {update_config,#{file_check:=FileCheck, | ||||||
|  |                          max_no_bytes:=Size, | ||||||
|  |                          rotate_on_date:=DateSpec, | ||||||
|  |                          max_no_files:=Count, | ||||||
|  |                          compress_on_rotate:=Compress}} -> | ||||||
|  |             case parse_date_spec(DateSpec) of | ||||||
|  |                 error -> | ||||||
|  |                     %% FIXME: Report parsing error? | ||||||
|  |                     file_ctrl_loop(State#{file_check=>FileCheck}); | ||||||
|  |                 ParsedDS -> | ||||||
|  |                     State1 = update_rotation({Size,ParsedDS,Count,Compress},State), | ||||||
|  |                     file_ctrl_loop(State1#{file_check=>FileCheck}) | ||||||
|  |             end; | ||||||
|  | 
 | ||||||
|  |         stop -> | ||||||
|  |             close_log_file(State), | ||||||
|  |             stopped | ||||||
|  |     end. | ||||||
|  | 
 | ||||||
|  | maybe_ensure_file(#{file_check:=0}=State) -> | ||||||
|  |     ensure_file(State); | ||||||
|  | maybe_ensure_file(#{last_check:=T0,file_check:=CheckInt}=State) | ||||||
|  |   when is_integer(CheckInt) -> | ||||||
|  |     T = timestamp(), | ||||||
|  |     if T-T0 > CheckInt -> ensure_file(State); | ||||||
|  |        true -> State | ||||||
|  |     end; | ||||||
|  | maybe_ensure_file(State) -> | ||||||
|  |     State. | ||||||
|  | 
 | ||||||
|  | %% In order to play well with tools like logrotate, we need to be able | ||||||
|  | %% to re-create the file if it has disappeared (e.g. if rotated by | ||||||
|  | %% logrotate) | ||||||
|  | ensure_file(#{inode:=INode0,file_name:=FileName,modes:=Modes}=State) -> | ||||||
|  |     case file:read_file_info(FileName,[raw]) of | ||||||
|  |         {ok,#file_info{inode=INode0}} -> | ||||||
|  |             State#{last_check=>timestamp()}; | ||||||
|  |         _ -> | ||||||
|  |             close_log_file(State), | ||||||
|  |             case file:open(FileName,Modes) of | ||||||
|  |                 {ok,Fd} -> | ||||||
|  |                     {ok,#file_info{inode=INode}} = | ||||||
|  |                         file:read_file_info(FileName,[raw]), | ||||||
|  |                     State#{fd=>Fd,inode=>INode, | ||||||
|  |                            last_check=>timestamp(), | ||||||
|  |                            synced=>true,sync_res=>ok}; | ||||||
|  |                 Error -> | ||||||
|  |                     exit({could_not_reopen_file,Error}) | ||||||
|  |             end | ||||||
|  |     end; | ||||||
|  | ensure_file(State) -> | ||||||
|  |     State. | ||||||
|  | 
 | ||||||
|  | write_to_dev(Bin,#{dev:=DevName}=State) -> | ||||||
|  |     ?io_put_chars(DevName, Bin), | ||||||
|  |     State; | ||||||
|  | write_to_dev(Bin, State) -> | ||||||
|  |     State1 = #{fd:=Fd} = maybe_ensure_file(State), | ||||||
|  |     Result = ?file_write(Fd, Bin), | ||||||
|  |     State2 = maybe_rotate_file(Bin,State1), | ||||||
|  |     maybe_notify_error(write,Result,State2), | ||||||
|  |     State2#{synced=>false,write_res=>Result}. | ||||||
|  | 
 | ||||||
|  | sync_dev(#{synced:=false}=State) -> | ||||||
|  |     State1 = #{fd:=Fd} = maybe_ensure_file(State), | ||||||
|  |     Result = ?file_datasync(Fd), | ||||||
|  |     maybe_notify_error(filesync,Result,State1), | ||||||
|  |     State1#{synced=>true,sync_res=>Result}; | ||||||
|  | sync_dev(State) -> | ||||||
|  |     State. | ||||||
|  | 
 | ||||||
|  | update_rotation({infinity,false,_,_},State) -> | ||||||
|  |     maybe_remove_archives(0,State), | ||||||
|  |     maps:remove(rotation,State); | ||||||
|  | update_rotation({Size,DateSpec,Count,Compress},#{file_name:=FileName}=State) -> | ||||||
|  |     maybe_remove_archives(Count,State), | ||||||
|  |     {ok,#file_info{size=CurrSize}} = file:read_file_info(FileName,[raw]), | ||||||
|  |     State1 = State#{rotation=>#{size=>Size, | ||||||
|  |                                 on_date=>DateSpec, | ||||||
|  |                                 count=>Count, | ||||||
|  |                                 compress=>Compress, | ||||||
|  |                                 curr_size=>CurrSize}}, | ||||||
|  |     maybe_update_compress(0,State1), | ||||||
|  |     maybe_rotate_file(0,State1). | ||||||
|  | 
 | ||||||
|  | parse_date_spec(false) -> | ||||||
|  |     false; | ||||||
|  | parse_date_spec("") -> | ||||||
|  |     false; | ||||||
|  | parse_date_spec([$$,$D | DateSpec]) -> | ||||||
|  |     io:format(standard_error, "parse_date_spec: ~p (hour)~n", [DateSpec]), | ||||||
|  |     parse_hour(DateSpec, #{every=>day, | ||||||
|  |                            hour=>0}); | ||||||
|  | parse_date_spec([$$,$W | DateSpec]) -> | ||||||
|  |     io:format(standard_error, "parse_date_spec: ~p (week)~n", [DateSpec]), | ||||||
|  |     parse_day_of_week(DateSpec, #{every=>week, | ||||||
|  |                                   hour=>0}); | ||||||
|  | parse_date_spec([$$,$M | DateSpec]) -> | ||||||
|  |     io:format(standard_error, "parse_date_spec: ~p (month)~n", [DateSpec]), | ||||||
|  |     parse_day_of_month(DateSpec, #{every=>month, | ||||||
|  |                                    hour=>0}); | ||||||
|  | parse_date_spec(DateSpec) -> | ||||||
|  |     io:format(standard_error, "parse_date_spec: ~p (error)~n", [DateSpec]), | ||||||
|  |     error. | ||||||
|  | 
 | ||||||
|  | parse_hour(Rest,Result) -> | ||||||
|  |     case date_string_to_int(Rest,0,23) of | ||||||
|  |         {Hour,""} -> Result#{hour=>Hour}; | ||||||
|  |         error -> error | ||||||
|  |     end. | ||||||
|  | 
 | ||||||
|  | parse_day_of_week(Rest,Result) -> | ||||||
|  |     case date_string_to_int(Rest,0,6) of | ||||||
|  |         {DayOfWeek,Rest} -> parse_hour(Rest,Result#{day_of_week=>DayOfWeek}); | ||||||
|  |         error -> error | ||||||
|  |     end. | ||||||
|  | 
 | ||||||
|  | parse_day_of_month([Last | Rest],Result) | ||||||
|  |   when Last=:=$l orelse Last=:=$L -> | ||||||
|  |     parse_hour(Rest,Result#{day_of_month=>last}); | ||||||
|  | parse_day_of_month(Rest,Result) -> | ||||||
|  |     case date_string_to_int(Rest,1,31) of | ||||||
|  |         {DayOfMonth,Rest} -> parse_hour(Rest,Result#{day_of_month=>DayOfMonth}); | ||||||
|  |         error -> error | ||||||
|  |     end. | ||||||
|  | 
 | ||||||
|  | date_string_to_int(String,Min,Max) -> | ||||||
|  |     case string:to_integer(String) of | ||||||
|  |         {Int,Rest} when is_integer(Int) andalso Int>=Min andalso Int=<Max -> | ||||||
|  |             {Int,Rest}; | ||||||
|  |         _ -> | ||||||
|  |             error | ||||||
|  |     end. | ||||||
|  | 
 | ||||||
|  | maybe_remove_archives(Count,#{file_name:=FileName}=State) -> | ||||||
|  |     Archive = rot_file_name(FileName,Count,false), | ||||||
|  |     CompressedArchive = rot_file_name(FileName,Count,true), | ||||||
|  |     case {file:read_file_info(Archive,[raw]), | ||||||
|  |           file:read_file_info(CompressedArchive,[raw])} of | ||||||
|  |         {{error,enoent},{error,enoent}} -> | ||||||
|  |             ok; | ||||||
|  |         _ -> | ||||||
|  |             _ = file:delete(Archive), | ||||||
|  |             _ = file:delete(CompressedArchive), | ||||||
|  |             maybe_remove_archives(Count+1,State) | ||||||
|  |     end. | ||||||
|  | 
 | ||||||
|  | maybe_update_compress(Count,#{rotation:=#{count:=Count}}) -> | ||||||
|  |     ok; | ||||||
|  | maybe_update_compress(N,#{file_name:=FileName, | ||||||
|  |                           rotation:=#{compress:=Compress}}=State) -> | ||||||
|  |     Archive = rot_file_name(FileName,N,not Compress), | ||||||
|  |     case file:read_file_info(Archive,[raw]) of | ||||||
|  |         {ok,_} when Compress -> | ||||||
|  |             compress_file(Archive); | ||||||
|  |         {ok,_} -> | ||||||
|  |             decompress_file(Archive); | ||||||
|  |         _ -> | ||||||
|  |             ok | ||||||
|  |     end, | ||||||
|  |     maybe_update_compress(N+1,State). | ||||||
|  | 
 | ||||||
|  | maybe_rotate_file(Bin,#{rotation:=_}=State) when is_binary(Bin) -> | ||||||
|  |     maybe_rotate_file(byte_size(Bin),State); | ||||||
|  | maybe_rotate_file(AddSize,#{rotation:=#{size:=RotSize, | ||||||
|  |                                         curr_size:=CurrSize}=Rotation}=State) -> | ||||||
|  |     {DateBasedRotNeeded, Rotation1} = is_date_based_rotation_needed(Rotation), | ||||||
|  |     NewSize = CurrSize + AddSize, | ||||||
|  |     if NewSize>RotSize -> | ||||||
|  |             rotate_file(State#{rotation=>Rotation1#{curr_size=>NewSize}}); | ||||||
|  |        DateBasedRotNeeded -> | ||||||
|  |             rotate_file(State#{rotation=>Rotation1#{curr_size=>NewSize}}); | ||||||
|  |        true -> | ||||||
|  |             State#{rotation=>Rotation1#{curr_size=>NewSize}} | ||||||
|  |     end; | ||||||
|  | maybe_rotate_file(_Bin,State) -> | ||||||
|  |     State. | ||||||
|  | 
 | ||||||
|  | is_date_based_rotation_needed(#{last_rotation_ts:=PrevTimestamp, | ||||||
|  |                                 on_date:=DateSpec}=Rotation) -> | ||||||
|  |     CurrTimestamp = rotation_timestamp(), | ||||||
|  |     case is_date_based_rotation_needed(DateSpec,PrevTimestamp,CurrTimestamp) of | ||||||
|  |         true -> {true,Rotation#{last_rotation_ts=>CurrTimestamp}}; | ||||||
|  |         false -> {false,Rotation} | ||||||
|  |     end; | ||||||
|  | is_date_based_rotation_needed(Rotation) -> | ||||||
|  |     {false,Rotation#{last_rotation_ts=>rotation_timestamp()}}. | ||||||
|  | 
 | ||||||
|  | is_date_based_rotation_needed(#{every:=day,hour:=Hour}, | ||||||
|  |                               {Date1,Time1},{Date2,Time2}) | ||||||
|  |   when (Date1<Date2 orelse (Date1=:=Date2 andalso Time1<{Hour,0,0})) andalso | ||||||
|  |        Time2>={Hour,0,0} -> | ||||||
|  |     true; | ||||||
|  | is_date_based_rotation_needed(#{every:=day,hour:=Hour}, | ||||||
|  |                               {Date1,_}=DateTime1,{Date2,Time2}=DateTime2) | ||||||
|  |   when Date1<Date2 andalso | ||||||
|  |        Time2<{Hour,0,0} -> | ||||||
|  |     GregDays2 = calendar:date_to_gregorian_days(Date2), | ||||||
|  |     TargetDate = calendar:gregorian_days_to_date(GregDays2 - 1), | ||||||
|  |     TargetDateTime = {TargetDate,{Hour,0,0}}, | ||||||
|  |     DateTime1<TargetDateTime andalso DateTime2>=TargetDateTime; | ||||||
|  | is_date_based_rotation_needed(#{every:=week,day_of_week:=TargetDoW,hour:=Hour}, | ||||||
|  |                               DateTime1,{Date2,_}=DateTime2) -> | ||||||
|  |     DoW2 = calendar:day_of_the_week(Date2) rem 7, | ||||||
|  |     DaysSinceTargetDoW = ((DoW2 - TargetDoW) + 7) rem 7, | ||||||
|  |     GregDays2 = calendar:date_to_gregorian_days(Date2), | ||||||
|  |     TargetGregDays = GregDays2 - DaysSinceTargetDoW, | ||||||
|  |     TargetDate = calendar:gregorian_days_to_date(TargetGregDays), | ||||||
|  |     TargetDateTime = {TargetDate,{Hour,0,0}}, | ||||||
|  |     DateTime1<TargetDateTime andalso DateTime2>=TargetDateTime; | ||||||
|  | is_date_based_rotation_needed(#{every:=month,day_of_month:=last,hour:=Hour}, | ||||||
|  |                               DateTime1,{{Year2,Month2,_}=Date2,_}=DateTime2) -> | ||||||
|  |     DoMA = calendar:last_day_of_the_month(Year2, Month2), | ||||||
|  |     DateA = {Year2,Month2,DoMA}, | ||||||
|  |     TargetDate = if | ||||||
|  |                      DateA>Date2 -> | ||||||
|  |                          case Month2 - 1 of | ||||||
|  |                              0 -> | ||||||
|  |                                  {Year2-1,12,31}; | ||||||
|  |                              MonthB -> | ||||||
|  |                                  {Year2,MonthB, | ||||||
|  |                                   calendar:last_day_of_the_month(Year2,MonthB)} | ||||||
|  |                          end; | ||||||
|  |                      true -> | ||||||
|  |                          DateA | ||||||
|  |                  end, | ||||||
|  |     TargetDateTime = {TargetDate,{Hour,0,0}}, | ||||||
|  |     io:format(standard_error, "TargetDateTime=~p~n", [TargetDateTime]), | ||||||
|  |     DateTime1<TargetDateTime andalso DateTime2>=TargetDateTime; | ||||||
|  | is_date_based_rotation_needed(#{every:=month,day_of_month:=DoM,hour:=Hour}, | ||||||
|  |                               DateTime1,{{Year2,Month2,_}=Date2,_}=DateTime2) -> | ||||||
|  |     DateA = {Year2,Month2,adapt_day_of_month(Year2,Month2,DoM)}, | ||||||
|  |     TargetDate = if | ||||||
|  |                      DateA>Date2 -> | ||||||
|  |                          case Month2 - 1 of | ||||||
|  |                              0 -> | ||||||
|  |                                  {Year2-1,12,31}; | ||||||
|  |                              MonthB -> | ||||||
|  |                                  {Year2,MonthB, | ||||||
|  |                                   adapt_day_of_month(Year2,MonthB,DoM)} | ||||||
|  |                          end; | ||||||
|  |                      true -> | ||||||
|  |                          DateA | ||||||
|  |                  end, | ||||||
|  |     TargetDateTime = {TargetDate,{Hour,0,0}}, | ||||||
|  |     io:format(standard_error, "TargetDateTime=~p~n", [TargetDateTime]), | ||||||
|  |     DateTime1<TargetDateTime andalso DateTime2>=TargetDateTime; | ||||||
|  | is_date_based_rotation_needed(_,_,_) -> | ||||||
|  |     false. | ||||||
|  | 
 | ||||||
|  | adapt_day_of_month(Year,Month,Day) -> | ||||||
|  |     LastDay = calendar:last_day_of_the_month(Year,Month), | ||||||
|  |     erlang:min(Day,LastDay). | ||||||
|  | 
 | ||||||
|  | rotate_file(#{file_name:=FileName,modes:=Modes,rotation:=Rotation}=State) -> | ||||||
|  |     State1 = sync_dev(State), | ||||||
|  |     _ = delayed_write_close(State), | ||||||
|  |     rotate_files(FileName,maps:get(count,Rotation),maps:get(compress,Rotation)), | ||||||
|  |     case file:open(FileName,Modes) of | ||||||
|  |         {ok,Fd} -> | ||||||
|  |             {ok,#file_info{inode=INode}} = file:read_file_info(FileName,[raw]), | ||||||
|  |             CurrTimestamp = rotation_timestamp(), | ||||||
|  |             State1#{fd=>Fd,inode=>INode, | ||||||
|  |                     rotation=>Rotation#{curr_size=>0, | ||||||
|  |                                         last_rotation_ts=>CurrTimestamp}}; | ||||||
|  |         Error -> | ||||||
|  |             exit({could_not_reopen_file,Error}) | ||||||
|  |     end. | ||||||
|  | 
 | ||||||
|  | rotation_timestamp() -> | ||||||
|  |     calendar:now_to_local_time(erlang:timestamp()). | ||||||
|  | 
 | ||||||
|  | rotate_files(FileName,0,_Compress) -> | ||||||
|  |     _ = file:delete(FileName), | ||||||
|  |     ok; | ||||||
|  | rotate_files(FileName,1,Compress) -> | ||||||
|  |     FileName0 = FileName++".0", | ||||||
|  |     _ = file:rename(FileName,FileName0), | ||||||
|  |     if Compress -> compress_file(FileName0); | ||||||
|  |        true -> ok | ||||||
|  |     end, | ||||||
|  |     ok; | ||||||
|  | rotate_files(FileName,Count,Compress) -> | ||||||
|  |     _ = file:rename(rot_file_name(FileName,Count-2,Compress), | ||||||
|  |                     rot_file_name(FileName,Count-1,Compress)), | ||||||
|  |     rotate_files(FileName,Count-1,Compress). | ||||||
|  | 
 | ||||||
|  | rot_file_name(FileName,Count,false) -> | ||||||
|  |     FileName ++ "." ++ integer_to_list(Count); | ||||||
|  | rot_file_name(FileName,Count,true) -> | ||||||
|  |     rot_file_name(FileName,Count,false) ++ ".gz". | ||||||
|  | 
 | ||||||
|  | compress_file(FileName) -> | ||||||
|  |     {ok,In} = file:open(FileName,[read,binary]), | ||||||
|  |     {ok,Out} = file:open(FileName++".gz",[write]), | ||||||
|  |     Z = zlib:open(), | ||||||
|  |     zlib:deflateInit(Z, default, deflated, 31, 8, default), | ||||||
|  |     compress_data(Z,In,Out), | ||||||
|  |     zlib:deflateEnd(Z), | ||||||
|  |     zlib:close(Z), | ||||||
|  |     _ = file:close(In), | ||||||
|  |     _ = file:close(Out), | ||||||
|  |     _ = file:delete(FileName), | ||||||
|  |     ok. | ||||||
|  | 
 | ||||||
|  | compress_data(Z,In,Out) -> | ||||||
|  |     case file:read(In,100000) of | ||||||
|  |         {ok,Data} -> | ||||||
|  |             Compressed = zlib:deflate(Z, Data), | ||||||
|  |             _ = file:write(Out,Compressed), | ||||||
|  |             compress_data(Z,In,Out); | ||||||
|  |         eof -> | ||||||
|  |             Compressed = zlib:deflate(Z, <<>>, finish), | ||||||
|  |             _ = file:write(Out,Compressed), | ||||||
|  |             ok | ||||||
|  |     end. | ||||||
|  | 
 | ||||||
|  | decompress_file(FileName) -> | ||||||
|  |     {ok,In} = file:open(FileName,[read,binary]), | ||||||
|  |     {ok,Out} = file:open(filename:rootname(FileName,".gz"),[write]), | ||||||
|  |     Z = zlib:open(), | ||||||
|  |     zlib:inflateInit(Z, 31), | ||||||
|  |     decompress_data(Z,In,Out), | ||||||
|  |     zlib:inflateEnd(Z), | ||||||
|  |     zlib:close(Z), | ||||||
|  |     _ = file:close(In), | ||||||
|  |     _ = file:close(Out), | ||||||
|  |     _ = file:delete(FileName), | ||||||
|  |     ok. | ||||||
|  | 
 | ||||||
|  | decompress_data(Z,In,Out) -> | ||||||
|  |     case file:read(In,1000) of | ||||||
|  |         {ok,Data} -> | ||||||
|  |             Decompressed = zlib:inflate(Z, Data), | ||||||
|  |             _ = file:write(Out,Decompressed), | ||||||
|  |             decompress_data(Z,In,Out); | ||||||
|  |         eof -> | ||||||
|  |             ok | ||||||
|  |     end. | ||||||
|  | 
 | ||||||
|  | maybe_notify_error(_Op, ok, _State) -> | ||||||
|  |     ok; | ||||||
|  | maybe_notify_error(Op, Result, #{write_res:=WR,sync_res:=SR}) | ||||||
|  |   when (Op==write andalso Result==WR) orelse | ||||||
|  |        (Op==filesync andalso Result==SR) -> | ||||||
|  |     %% don't report same error twice | ||||||
|  |     ok; | ||||||
|  | maybe_notify_error(Op, Error, #{handler_name:=HandlerName,file_name:=FileName}) -> | ||||||
|  |     logger_h_common:error_notify({HandlerName,Op,FileName,Error}), | ||||||
|  |     ok. | ||||||
|  | 
 | ||||||
|  | timestamp() -> | ||||||
|  |     erlang:monotonic_time(millisecond). | ||||||
|  | @ -0,0 +1,167 @@ | ||||||
|  | %% This Source Code Form is subject to the terms of the Mozilla Public | ||||||
|  | %% License, v. 2.0. If a copy of the MPL was not distributed with this | ||||||
|  | %% file, You can obtain one at https://mozilla.org/MPL/2.0/. | ||||||
|  | %% | ||||||
|  | %% Copyright (c) 2021 VMware, Inc. or its affiliates.  All rights reserved. | ||||||
|  | %% | ||||||
|  | 
 | ||||||
|  | -module(rabbit_logger_text_fmt). | ||||||
|  | 
 | ||||||
|  | -export([format/2]). | ||||||
|  | 
 | ||||||
|  | format(#{msg := Msg, meta := Meta} = LogEvent, Config) -> | ||||||
|  |     Prefix = format_prefix(LogEvent, Config), | ||||||
|  |     Color = pick_color(LogEvent, Config), | ||||||
|  |     FormattedMsg = format_msg(Msg, Meta, Config), | ||||||
|  |     prepend_prefix_to_msg_and_add_color(Prefix, Color, FormattedMsg, Config). | ||||||
|  | 
 | ||||||
|  | format_prefix(_, #{prefix := false}) -> | ||||||
|  |     none; | ||||||
|  | format_prefix(#{level := Level, | ||||||
|  |                 meta := #{time := Timestamp, | ||||||
|  |                           pid := Pid}}, | ||||||
|  |               Config) -> | ||||||
|  |     Time = format_time(Timestamp, Config), | ||||||
|  |     LevelName = level_name(Level, Config), | ||||||
|  |     io_lib:format("~ts [~ts] ~p", [Time, LevelName, Pid]). | ||||||
|  | 
 | ||||||
|  | level_name(Level, #{level_name := full}) -> | ||||||
|  |     Level; | ||||||
|  | level_name(Level, #{level_name := uc3}) -> | ||||||
|  |     level_3letter_uc_name(Level); | ||||||
|  | level_name(Level, #{level_name := lc3}) -> | ||||||
|  |     level_3letter_lc_name(Level); | ||||||
|  | level_name(Level, #{level_name := uc4}) -> | ||||||
|  |     level_4letter_uc_name(Level); | ||||||
|  | level_name(Level, #{level_name := lc4}) -> | ||||||
|  |     level_4letter_lc_name(Level); | ||||||
|  | level_name(Level, _) -> | ||||||
|  |     Level. | ||||||
|  | 
 | ||||||
|  | level_3letter_lc_name(debug)     -> "dbg"; | ||||||
|  | level_3letter_lc_name(info)      -> "inf"; | ||||||
|  | level_3letter_lc_name(notice)    -> "ntc"; | ||||||
|  | level_3letter_lc_name(warning)   -> "wrn"; | ||||||
|  | level_3letter_lc_name(error)     -> "err"; | ||||||
|  | level_3letter_lc_name(critical)  -> "crt"; | ||||||
|  | level_3letter_lc_name(alert)     -> "alt"; | ||||||
|  | level_3letter_lc_name(emergency) -> "emg". | ||||||
|  | 
 | ||||||
|  | level_3letter_uc_name(debug)     -> "DBG"; | ||||||
|  | level_3letter_uc_name(info)      -> "INF"; | ||||||
|  | level_3letter_uc_name(notice)    -> "NTC"; | ||||||
|  | level_3letter_uc_name(warning)   -> "WRN"; | ||||||
|  | level_3letter_uc_name(error)     -> "ERR"; | ||||||
|  | level_3letter_uc_name(critical)  -> "CRT"; | ||||||
|  | level_3letter_uc_name(alert)     -> "ALT"; | ||||||
|  | level_3letter_uc_name(emergency) -> "EMG". | ||||||
|  | 
 | ||||||
|  | level_4letter_lc_name(debug)     -> "dbug"; | ||||||
|  | level_4letter_lc_name(info)      -> "info"; | ||||||
|  | level_4letter_lc_name(notice)    -> "noti"; | ||||||
|  | level_4letter_lc_name(warning)   -> "warn"; | ||||||
|  | level_4letter_lc_name(error)     -> "erro"; | ||||||
|  | level_4letter_lc_name(critical)  -> "crit"; | ||||||
|  | level_4letter_lc_name(alert)     -> "alrt"; | ||||||
|  | level_4letter_lc_name(emergency) -> "emgc". | ||||||
|  | 
 | ||||||
|  | level_4letter_uc_name(debug)     -> "DBUG"; | ||||||
|  | level_4letter_uc_name(info)      -> "INFO"; | ||||||
|  | level_4letter_uc_name(notice)    -> "NOTI"; | ||||||
|  | level_4letter_uc_name(warning)   -> "WARN"; | ||||||
|  | level_4letter_uc_name(error)     -> "ERRO"; | ||||||
|  | level_4letter_uc_name(critical)  -> "CRIT"; | ||||||
|  | level_4letter_uc_name(alert)     -> "ALRT"; | ||||||
|  | level_4letter_uc_name(emergency) -> "EMGC". | ||||||
|  | 
 | ||||||
|  | format_time(Timestamp, _) -> | ||||||
|  |     Options = [{unit, microsecond}, | ||||||
|  |                {time_designator, $\s}], | ||||||
|  |     calendar:system_time_to_rfc3339(Timestamp, Options). | ||||||
|  | 
 | ||||||
|  | format_msg({string, Chardata}, Meta, Config) -> | ||||||
|  |     format_msg({"~ts", [Chardata]}, Meta, Config); | ||||||
|  | format_msg({report, Report}, Meta, Config) -> | ||||||
|  |     FormattedReport = format_report(Report, Meta, Config), | ||||||
|  |     format_msg(FormattedReport, Meta, Config); | ||||||
|  | format_msg({Format, Args}, _, _) -> | ||||||
|  |     io_lib:format(Format, Args). | ||||||
|  | 
 | ||||||
|  | format_report( | ||||||
|  |   #{label := {application_controller, _}} = Report, Meta, Config) -> | ||||||
|  |     format_application_progress(Report, Meta, Config); | ||||||
|  | format_report( | ||||||
|  |   #{label := {supervisor, progress}} = Report, Meta, Config) -> | ||||||
|  |     format_supervisor_progress(Report, Meta, Config); | ||||||
|  | format_report( | ||||||
|  |   Report, #{report_cb := Cb} = Meta, Config) -> | ||||||
|  |     try | ||||||
|  |         case erlang:fun_info(Cb, arity) of | ||||||
|  |             {arity, 1} -> Cb(Report); | ||||||
|  |             {arity, 2} -> {"~ts", [Cb(Report, #{})]} | ||||||
|  |         end | ||||||
|  |     catch | ||||||
|  |         _:_:_ -> | ||||||
|  |             format_report(Report, maps:remove(report_cb, Meta), Config) | ||||||
|  |     end; | ||||||
|  | format_report(Report, _, _) -> | ||||||
|  |     logger:format_report(Report). | ||||||
|  | 
 | ||||||
|  | format_application_progress(#{label := {_, progress}, | ||||||
|  |                               report := InternalReport}, _, _) -> | ||||||
|  |     Application = proplists:get_value(application, InternalReport), | ||||||
|  |     StartedAt = proplists:get_value(started_at, InternalReport), | ||||||
|  |     {"Application ~w started on ~0p", | ||||||
|  |      [Application, StartedAt]}; | ||||||
|  | format_application_progress(#{label := {_, exit}, | ||||||
|  |                               report := InternalReport}, _, _) -> | ||||||
|  |     Application = proplists:get_value(application, InternalReport), | ||||||
|  |     Exited = proplists:get_value(exited, InternalReport), | ||||||
|  |     {"Application ~w exited with reason: ~0p", | ||||||
|  |      [Application, Exited]}. | ||||||
|  | 
 | ||||||
|  | format_supervisor_progress(#{report := InternalReport}, _, _) -> | ||||||
|  |     Supervisor = proplists:get_value(supervisor, InternalReport), | ||||||
|  |     Started = proplists:get_value(started, InternalReport), | ||||||
|  |     Id = proplists:get_value(id, Started), | ||||||
|  |     Pid = proplists:get_value(pid, Started), | ||||||
|  |     Mfa = proplists:get_value(mfargs, Started), | ||||||
|  |     {"Supervisor ~w: child ~w started (~w): ~0p", | ||||||
|  |      [Supervisor, Id, Pid, Mfa]}. | ||||||
|  | 
 | ||||||
|  | pick_color(_, #{color := false}) -> | ||||||
|  |     {"", ""}; | ||||||
|  | pick_color(#{level := Level}, #{color := true} = Config) -> | ||||||
|  |     ColorStart = level_to_color(Level, Config), | ||||||
|  |     ColorEnd = "\033[0m", | ||||||
|  |     {ColorStart, ColorEnd}. | ||||||
|  | 
 | ||||||
|  | level_to_color(debug, _)     -> "\033[38;5;246m"; | ||||||
|  | level_to_color(info, _)      -> ""; | ||||||
|  | level_to_color(notice, _)    -> "\033[38;5;87m"; | ||||||
|  | level_to_color(warning, _)   -> "\033[38;5;214m"; | ||||||
|  | level_to_color(error, _)     -> "\033[38;5;160m"; | ||||||
|  | level_to_color(critical, _)  -> "\033[1;37m\033[48;5;20m"; | ||||||
|  | level_to_color(alert, _)     -> "\033[1;37m\033[48;5;93m"; | ||||||
|  | level_to_color(emergency, _) -> "\033[1;37m\033[48;5;196m". | ||||||
|  | 
 | ||||||
|  | prepend_prefix_to_msg_and_add_color( | ||||||
|  |   none, {ColorStart, ColorEnd}, FormattedMsg, Config) -> | ||||||
|  |     Lines = split_lines(FormattedMsg, Config), | ||||||
|  |     [case Line of | ||||||
|  |          "" -> [$\n]; | ||||||
|  |          _  -> [ColorStart, Line, ColorEnd, $\n] | ||||||
|  |      end | ||||||
|  |      || Line <- Lines]; | ||||||
|  | prepend_prefix_to_msg_and_add_color( | ||||||
|  |   Prefix, {ColorStart, ColorEnd}, FormattedMsg, Config) -> | ||||||
|  |     Lines = split_lines(FormattedMsg, Config), | ||||||
|  |     [case Line of | ||||||
|  |          "" -> [ColorStart, Prefix, ColorEnd, $\n]; | ||||||
|  |          _  -> [ColorStart, Prefix, " ", Line, ColorEnd, $\n] | ||||||
|  |      end | ||||||
|  |      || Line <- Lines]. | ||||||
|  | 
 | ||||||
|  | split_lines(FormattedMsg, _) -> | ||||||
|  |     FlattenMsg = lists:flatten(FormattedMsg), | ||||||
|  |     string:split(FlattenMsg, [$\n], all). | ||||||
|  | @ -1,7 +1,10 @@ | ||||||
| -module(rabbit_prelaunch). | -module(rabbit_prelaunch). | ||||||
| 
 | 
 | ||||||
|  | -include_lib("kernel/include/logger.hrl"). | ||||||
| -include_lib("eunit/include/eunit.hrl"). | -include_lib("eunit/include/eunit.hrl"). | ||||||
| 
 | 
 | ||||||
|  | -include_lib("rabbit_common/include/logging.hrl"). | ||||||
|  | 
 | ||||||
| -export([run_prelaunch_first_phase/0, | -export([run_prelaunch_first_phase/0, | ||||||
|          assert_mnesia_is_stopped/0, |          assert_mnesia_is_stopped/0, | ||||||
|          get_context/0, |          get_context/0, | ||||||
|  | @ -24,6 +27,8 @@ | ||||||
| 
 | 
 | ||||||
| run_prelaunch_first_phase() -> | run_prelaunch_first_phase() -> | ||||||
|     try |     try | ||||||
|  |         ok = logger:set_process_metadata( | ||||||
|  |                #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|         do_run() |         do_run() | ||||||
|     catch |     catch | ||||||
|         throw:{error, _} = Error -> |         throw:{error, _} = Error -> | ||||||
|  | @ -67,26 +72,25 @@ do_run() -> | ||||||
|     ?assertMatch(#{}, Context0), |     ?assertMatch(#{}, Context0), | ||||||
| 
 | 
 | ||||||
|     %% Setup logging for the prelaunch phase. |     %% Setup logging for the prelaunch phase. | ||||||
|     ok = rabbit_prelaunch_early_logging:setup_early_logging(Context0, true), |     ok = rabbit_prelaunch_early_logging:setup_early_logging(Context0), | ||||||
| 
 | 
 | ||||||
|     IsInitialPass = is_initial_pass(), |     IsInitialPass = is_initial_pass(), | ||||||
|     case IsInitialPass of |     case IsInitialPass of | ||||||
|         true -> |         true -> | ||||||
|             rabbit_log_prelaunch:debug(""), |             ?LOG_DEBUG(""), | ||||||
|             rabbit_log_prelaunch:debug( |             ?LOG_DEBUG("== Prelaunch phase [1/2] (initial pass) =="), | ||||||
|               "== Prelaunch phase [1/2] (initial pass) =="), |             ?LOG_DEBUG(""); | ||||||
|             rabbit_log_prelaunch:debug(""); |  | ||||||
|         false -> |         false -> | ||||||
|             rabbit_log_prelaunch:debug(""), |             ?LOG_DEBUG(""), | ||||||
|             rabbit_log_prelaunch:debug("== Prelaunch phase [1/2] =="), |             ?LOG_DEBUG("== Prelaunch phase [1/2] =="), | ||||||
|             rabbit_log_prelaunch:debug("") |             ?LOG_DEBUG("") | ||||||
|     end, |     end, | ||||||
|     rabbit_env:log_process_env(), |     rabbit_env:log_process_env(), | ||||||
| 
 | 
 | ||||||
|     %% Load rabbitmq-env.conf, redo logging setup and continue. |     %% Load rabbitmq-env.conf, redo logging setup and continue. | ||||||
|     Context1 = rabbit_env:get_context_after_logging_init(Context0), |     Context1 = rabbit_env:get_context_after_logging_init(Context0), | ||||||
|     ?assertMatch(#{}, Context1), |     ?assertMatch(#{}, Context1), | ||||||
|     ok = rabbit_prelaunch_early_logging:setup_early_logging(Context1, true), |     ok = rabbit_prelaunch_early_logging:setup_early_logging(Context1), | ||||||
|     rabbit_env:log_process_env(), |     rabbit_env:log_process_env(), | ||||||
| 
 | 
 | ||||||
|     %% Complete context now that we have the final environment loaded. |     %% Complete context now that we have the final environment loaded. | ||||||
|  | @ -111,7 +115,7 @@ do_run() -> | ||||||
|     ok = rabbit_prelaunch_dist:setup(Context), |     ok = rabbit_prelaunch_dist:setup(Context), | ||||||
| 
 | 
 | ||||||
|     %% 4. Write PID file. |     %% 4. Write PID file. | ||||||
|     rabbit_log_prelaunch:debug(""), |     ?LOG_DEBUG(""), | ||||||
|     _ = write_pid_file(Context), |     _ = write_pid_file(Context), | ||||||
|     ignore. |     ignore. | ||||||
| 
 | 
 | ||||||
|  | @ -138,7 +142,7 @@ get_stop_reason() -> | ||||||
| set_stop_reason(Reason) -> | set_stop_reason(Reason) -> | ||||||
|     case get_stop_reason() of |     case get_stop_reason() of | ||||||
|         undefined -> |         undefined -> | ||||||
|             rabbit_log_prelaunch:debug("Set stop reason to: ~p", [Reason]), |             ?LOG_DEBUG("Set stop reason to: ~p", [Reason]), | ||||||
|             persistent_term:put(?PT_KEY_STOP_REASON, Reason); |             persistent_term:put(?PT_KEY_STOP_REASON, Reason); | ||||||
|         _ -> |         _ -> | ||||||
|             ok |             ok | ||||||
|  | @ -161,7 +165,7 @@ setup_shutdown_func() -> | ||||||
|         {ok, {ThisMod, ThisFunc}} -> |         {ok, {ThisMod, ThisFunc}} -> | ||||||
|             ok; |             ok; | ||||||
|         {ok, {ExistingMod, ExistingFunc}} -> |         {ok, {ExistingMod, ExistingFunc}} -> | ||||||
|             rabbit_log_prelaunch:debug( |             ?LOG_DEBUG( | ||||||
|               "Setting up kernel shutdown function: ~s:~s/1 " |               "Setting up kernel shutdown function: ~s:~s/1 " | ||||||
|               "(chained with ~s:~s/1)", |               "(chained with ~s:~s/1)", | ||||||
|               [ThisMod, ThisFunc, ExistingMod, ExistingFunc]), |               [ThisMod, ThisFunc, ExistingMod, ExistingFunc]), | ||||||
|  | @ -170,7 +174,7 @@ setup_shutdown_func() -> | ||||||
|                    ExistingShutdownFunc), |                    ExistingShutdownFunc), | ||||||
|             ok = record_kernel_shutdown_func(ThisMod, ThisFunc); |             ok = record_kernel_shutdown_func(ThisMod, ThisFunc); | ||||||
|         _ -> |         _ -> | ||||||
|             rabbit_log_prelaunch:debug( |             ?LOG_DEBUG( | ||||||
|               "Setting up kernel shutdown function: ~s:~s/1", |               "Setting up kernel shutdown function: ~s:~s/1", | ||||||
|               [ThisMod, ThisFunc]), |               [ThisMod, ThisFunc]), | ||||||
|             ok = record_kernel_shutdown_func(ThisMod, ThisFunc) |             ok = record_kernel_shutdown_func(ThisMod, ThisFunc) | ||||||
|  | @ -182,7 +186,7 @@ record_kernel_shutdown_func(Mod, Func) -> | ||||||
|       [{persistent, true}]). |       [{persistent, true}]). | ||||||
| 
 | 
 | ||||||
| shutdown_func(Reason) -> | shutdown_func(Reason) -> | ||||||
|     rabbit_log_prelaunch:debug( |     ?LOG_DEBUG( | ||||||
|       "Running ~s:shutdown_func() as part of `kernel` shutdown", [?MODULE]), |       "Running ~s:shutdown_func() as part of `kernel` shutdown", [?MODULE]), | ||||||
|     Context = get_context(), |     Context = get_context(), | ||||||
|     remove_pid_file(Context), |     remove_pid_file(Context), | ||||||
|  | @ -195,7 +199,7 @@ shutdown_func(Reason) -> | ||||||
|     end. |     end. | ||||||
| 
 | 
 | ||||||
| write_pid_file(#{pid_file := PidFile}) -> | write_pid_file(#{pid_file := PidFile}) -> | ||||||
|     rabbit_log_prelaunch:debug("Writing PID file: ~s", [PidFile]), |     ?LOG_DEBUG("Writing PID file: ~s", [PidFile]), | ||||||
|     case filelib:ensure_dir(PidFile) of |     case filelib:ensure_dir(PidFile) of | ||||||
|         ok -> |         ok -> | ||||||
|             OSPid = os:getpid(), |             OSPid = os:getpid(), | ||||||
|  | @ -203,13 +207,13 @@ write_pid_file(#{pid_file := PidFile}) -> | ||||||
|                 ok -> |                 ok -> | ||||||
|                     ok; |                     ok; | ||||||
|                 {error, Reason} = Error -> |                 {error, Reason} = Error -> | ||||||
|                     rabbit_log_prelaunch:warning( |                     ?LOG_WARNING( | ||||||
|                       "Failed to write PID file \"~s\": ~s", |                       "Failed to write PID file \"~s\": ~s", | ||||||
|                       [PidFile, file:format_error(Reason)]), |                       [PidFile, file:format_error(Reason)]), | ||||||
|                     Error |                     Error | ||||||
|             end; |             end; | ||||||
|         {error, Reason} = Error -> |         {error, Reason} = Error -> | ||||||
|             rabbit_log_prelaunch:warning( |             ?LOG_WARNING( | ||||||
|               "Failed to create PID file \"~s\" directory: ~s", |               "Failed to create PID file \"~s\" directory: ~s", | ||||||
|               [PidFile, file:format_error(Reason)]), |               [PidFile, file:format_error(Reason)]), | ||||||
|             Error |             Error | ||||||
|  | @ -218,10 +222,10 @@ write_pid_file(_) -> | ||||||
|     ok. |     ok. | ||||||
| 
 | 
 | ||||||
| remove_pid_file(#{pid_file := PidFile, keep_pid_file_on_exit := true}) -> | remove_pid_file(#{pid_file := PidFile, keep_pid_file_on_exit := true}) -> | ||||||
|     rabbit_log_prelaunch:debug("Keeping PID file: ~s", [PidFile]), |     ?LOG_DEBUG("Keeping PID file: ~s", [PidFile]), | ||||||
|     ok; |     ok; | ||||||
| remove_pid_file(#{pid_file := PidFile}) -> | remove_pid_file(#{pid_file := PidFile}) -> | ||||||
|     rabbit_log_prelaunch:debug("Deleting PID file: ~s", [PidFile]), |     ?LOG_DEBUG("Deleting PID file: ~s", [PidFile]), | ||||||
|     _ = file:delete(PidFile), |     _ = file:delete(PidFile), | ||||||
|     ok; |     ok; | ||||||
| remove_pid_file(_) -> | remove_pid_file(_) -> | ||||||
|  |  | ||||||
|  | @ -1,9 +1,11 @@ | ||||||
| -module(rabbit_prelaunch_conf). | -module(rabbit_prelaunch_conf). | ||||||
| 
 | 
 | ||||||
| -include_lib("kernel/include/file.hrl"). | -include_lib("kernel/include/file.hrl"). | ||||||
|  | -include_lib("kernel/include/logger.hrl"). | ||||||
| -include_lib("stdlib/include/zip.hrl"). | -include_lib("stdlib/include/zip.hrl"). | ||||||
| 
 | 
 | ||||||
| -include_lib("rabbit_common/include/rabbit.hrl"). | -include_lib("rabbit_common/include/rabbit.hrl"). | ||||||
|  | -include_lib("rabbit_common/include/logging.hrl"). | ||||||
| 
 | 
 | ||||||
| -export([setup/1, | -export([setup/1, | ||||||
|          get_config_state/0, |          get_config_state/0, | ||||||
|  | @ -15,8 +17,9 @@ | ||||||
| -endif. | -endif. | ||||||
| 
 | 
 | ||||||
| setup(Context) -> | setup(Context) -> | ||||||
|     rabbit_log_prelaunch:debug(""), |     ?LOG_DEBUG( | ||||||
|     rabbit_log_prelaunch:debug("== Configuration =="), |        "\n== Configuration ==", | ||||||
|  |        #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
| 
 | 
 | ||||||
|     %% TODO: Check if directories/files are inside Mnesia dir. |     %% TODO: Check if directories/files are inside Mnesia dir. | ||||||
| 
 | 
 | ||||||
|  | @ -52,9 +55,10 @@ setup(Context) -> | ||||||
|                     #{config_files => ConfigFiles, |                     #{config_files => ConfigFiles, | ||||||
|                       config_advanced_file => AdvancedConfigFile}; |                       config_advanced_file => AdvancedConfigFile}; | ||||||
|                 undefined when AdvancedConfigFile =/= undefined -> |                 undefined when AdvancedConfigFile =/= undefined -> | ||||||
|                     rabbit_log_prelaunch:warning( |                     ?LOG_WARNING( | ||||||
|                       "Using RABBITMQ_ADVANCED_CONFIG_FILE: ~s", |                       "Using RABBITMQ_ADVANCED_CONFIG_FILE: ~s", | ||||||
|                       [AdvancedConfigFile]), |                       [AdvancedConfigFile], | ||||||
|  |                       #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|                     Config = load_cuttlefish_config_file(Context, |                     Config = load_cuttlefish_config_file(Context, | ||||||
|                                                          AdditionalConfigFiles, |                                                          AdditionalConfigFiles, | ||||||
|                                                          AdvancedConfigFile), |                                                          AdvancedConfigFile), | ||||||
|  | @ -66,10 +70,10 @@ setup(Context) -> | ||||||
|                     #{config_files => [], |                     #{config_files => [], | ||||||
|                       config_advanced_file => undefined} |                       config_advanced_file => undefined} | ||||||
|             end, |             end, | ||||||
|     ok = override_with_hard_coded_critical_config(), |  | ||||||
|     ok = set_credentials_obfuscation_secret(), |     ok = set_credentials_obfuscation_secret(), | ||||||
|     rabbit_log_prelaunch:debug( |     ?LOG_DEBUG( | ||||||
|       "Saving config state to application env: ~p", [State]), |       "Saving config state to application env: ~p", [State], | ||||||
|  |       #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|     store_config_state(State). |     store_config_state(State). | ||||||
| 
 | 
 | ||||||
| store_config_state(ConfigState) -> | store_config_state(ConfigState) -> | ||||||
|  | @ -83,7 +87,8 @@ get_config_state() -> | ||||||
| %% ------------------------------------------------------------------- | %% ------------------------------------------------------------------- | ||||||
| 
 | 
 | ||||||
| set_default_config() -> | set_default_config() -> | ||||||
|     rabbit_log_prelaunch:debug("Setting default config"), |     ?LOG_DEBUG("Setting default config", | ||||||
|  |                #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|     Config = [ |     Config = [ | ||||||
|               {ra, |               {ra, | ||||||
|                [ |                [ | ||||||
|  | @ -99,6 +104,8 @@ set_default_config() -> | ||||||
|                 %% goes down it is still immediately detected |                 %% goes down it is still immediately detected | ||||||
|                 {poll_interval, 5000} |                 {poll_interval, 5000} | ||||||
|                ]}, |                ]}, | ||||||
|  |               {syslog, | ||||||
|  |                [{app_name, "rabbitmq-server"}]}, | ||||||
|               {sysmon_handler, |               {sysmon_handler, | ||||||
|                [{process_limit, 100}, |                [{process_limit, 100}, | ||||||
|                 {port_limit, 100}, |                 {port_limit, 100}, | ||||||
|  | @ -126,15 +133,18 @@ find_actual_main_config_file(#{main_config_file := File}) -> | ||||||
|                 true -> |                 true -> | ||||||
|                     case filelib:is_regular(NewFormatFile) of |                     case filelib:is_regular(NewFormatFile) of | ||||||
|                         true -> |                         true -> | ||||||
|                             rabbit_log_prelaunch:warning( |                             ?LOG_WARNING( | ||||||
|                               "Both old (.config) and new (.conf) format " |                               "Both old (.config) and new (.conf) format " | ||||||
|                               "config files exist."), |                               "config files exist.", | ||||||
|                             rabbit_log_prelaunch:warning( |                               #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|  |                             ?LOG_WARNING( | ||||||
|                               "Using the old format config file: ~s", |                               "Using the old format config file: ~s", | ||||||
|                               [OldFormatFile]), |                               [OldFormatFile], | ||||||
|                             rabbit_log_prelaunch:warning( |                               #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|  |                             ?LOG_WARNING( | ||||||
|                               "Please update your config files to the new " |                               "Please update your config files to the new " | ||||||
|                               "format and remove the old file."), |                               "format and remove the old file.", | ||||||
|  |                               #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|                             ok; |                             ok; | ||||||
|                         false -> |                         false -> | ||||||
|                             ok |                             ok | ||||||
|  | @ -193,15 +203,18 @@ generate_config_from_cuttlefish_files(Context, | ||||||
|     SchemaFiles = find_cuttlefish_schemas(Context), |     SchemaFiles = find_cuttlefish_schemas(Context), | ||||||
|     case SchemaFiles of |     case SchemaFiles of | ||||||
|         [] -> |         [] -> | ||||||
|             rabbit_log_prelaunch:error( |             ?LOG_ERROR( | ||||||
|               "No configuration schema found~n", []), |               "No configuration schema found", [], | ||||||
|  |               #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|             throw({error, no_configuration_schema_found}); |             throw({error, no_configuration_schema_found}); | ||||||
|         _ -> |         _ -> | ||||||
|             rabbit_log_prelaunch:debug( |             ?LOG_DEBUG( | ||||||
|               "Configuration schemas found:~n", []), |               "Configuration schemas found:~n", [], | ||||||
|  |                #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|             lists:foreach( |             lists:foreach( | ||||||
|               fun(SchemaFile) -> |               fun(SchemaFile) -> | ||||||
|                       rabbit_log_prelaunch:debug("  - ~ts", [SchemaFile]) |                       ?LOG_DEBUG("  - ~ts", [SchemaFile], | ||||||
|  |                                  #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}) | ||||||
|               end, |               end, | ||||||
|               SchemaFiles), |               SchemaFiles), | ||||||
|             ok |             ok | ||||||
|  | @ -209,37 +222,44 @@ generate_config_from_cuttlefish_files(Context, | ||||||
|     Schema = cuttlefish_schema:files(SchemaFiles), |     Schema = cuttlefish_schema:files(SchemaFiles), | ||||||
| 
 | 
 | ||||||
|     %% Load configuration. |     %% Load configuration. | ||||||
|     rabbit_log_prelaunch:debug( |     ?LOG_DEBUG( | ||||||
|       "Loading configuration files (Cuttlefish based):"), |       "Loading configuration files (Cuttlefish based):", | ||||||
|  |       #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|     lists:foreach( |     lists:foreach( | ||||||
|       fun(ConfigFile) -> |       fun(ConfigFile) -> | ||||||
|               rabbit_log_prelaunch:debug("  - ~ts", [ConfigFile]) |               ?LOG_DEBUG("  - ~ts", [ConfigFile], | ||||||
|  |                          #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}) | ||||||
|       end, ConfigFiles), |       end, ConfigFiles), | ||||||
|     case cuttlefish_conf:files(ConfigFiles) of |     case cuttlefish_conf:files(ConfigFiles) of | ||||||
|         {errorlist, Errors} -> |         {errorlist, Errors} -> | ||||||
|             rabbit_log_prelaunch:error("Error parsing configuration:"), |             ?LOG_ERROR("Error parsing configuration:", | ||||||
|  |                        #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|             lists:foreach( |             lists:foreach( | ||||||
|               fun(Error) -> |               fun(Error) -> | ||||||
|                       rabbit_log_prelaunch:error( |                       ?LOG_ERROR( | ||||||
|                         "  - ~ts", |                         "  - ~ts", | ||||||
|                         [cuttlefish_error:xlate(Error)]) |                         [cuttlefish_error:xlate(Error)], | ||||||
|  |                         #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}) | ||||||
|               end, Errors), |               end, Errors), | ||||||
|             rabbit_log_prelaunch:error( |             ?LOG_ERROR( | ||||||
|               "Are these files using the Cuttlefish format?"), |               "Are these files using the Cuttlefish format?", | ||||||
|  |               #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|             throw({error, failed_to_parse_configuration_file}); |             throw({error, failed_to_parse_configuration_file}); | ||||||
|         Config0 -> |         Config0 -> | ||||||
|             %% Finalize configuration, based on the schema. |             %% Finalize configuration, based on the schema. | ||||||
|             Config = case cuttlefish_generator:map(Schema, Config0) of |             Config = case cuttlefish_generator:map(Schema, Config0) of | ||||||
|                          {error, Phase, {errorlist, Errors}} -> |                          {error, Phase, {errorlist, Errors}} -> | ||||||
|                              %% TODO |                              %% TODO | ||||||
|                              rabbit_log_prelaunch:error( |                              ?LOG_ERROR( | ||||||
|                                "Error preparing configuration in phase ~ts:", |                                "Error preparing configuration in phase ~ts:", | ||||||
|                                [Phase]), |                                [Phase], | ||||||
|  |                                #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|                              lists:foreach( |                              lists:foreach( | ||||||
|                                fun(Error) -> |                                fun(Error) -> | ||||||
|                                        rabbit_log_prelaunch:error( |                                        ?LOG_ERROR( | ||||||
|                                          "  - ~ts", |                                          "  - ~ts", | ||||||
|                                          [cuttlefish_error:xlate(Error)]) |                                          [cuttlefish_error:xlate(Error)], | ||||||
|  |                                          #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}) | ||||||
|                                end, Errors), |                                end, Errors), | ||||||
|                              throw( |                              throw( | ||||||
|                                {error, failed_to_prepare_configuration}); |                                {error, failed_to_prepare_configuration}); | ||||||
|  | @ -253,8 +273,9 @@ generate_config_from_cuttlefish_files(Context, | ||||||
| 
 | 
 | ||||||
| find_cuttlefish_schemas(Context) -> | find_cuttlefish_schemas(Context) -> | ||||||
|     Apps = list_apps(Context), |     Apps = list_apps(Context), | ||||||
|     rabbit_log_prelaunch:debug( |     ?LOG_DEBUG( | ||||||
|       "Looking up configuration schemas in the following applications:"), |       "Looking up configuration schemas in the following applications:", | ||||||
|  |       #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|     find_cuttlefish_schemas(Apps, []). |     find_cuttlefish_schemas(Apps, []). | ||||||
| 
 | 
 | ||||||
| find_cuttlefish_schemas([App | Rest], AllSchemas) -> | find_cuttlefish_schemas([App | Rest], AllSchemas) -> | ||||||
|  | @ -281,9 +302,10 @@ list_apps1([Dir | Rest], Apps) -> | ||||||
|             Apps1 = lists:umerge(Apps, lists:sort(NewApps)), |             Apps1 = lists:umerge(Apps, lists:sort(NewApps)), | ||||||
|             list_apps1(Rest, Apps1); |             list_apps1(Rest, Apps1); | ||||||
|         {error, Reason} -> |         {error, Reason} -> | ||||||
|             rabbit_log_prelaunch:debug( |             ?LOG_DEBUG( | ||||||
|               "Failed to list directory \"~ts\" content: ~ts", |               "Failed to list directory \"~ts\" content: ~ts", | ||||||
|               [Dir, file:format_error(Reason)]), |               [Dir, file:format_error(Reason)], | ||||||
|  |               #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|             list_apps1(Rest, Apps) |             list_apps1(Rest, Apps) | ||||||
|     end; |     end; | ||||||
| list_apps1([], AppInfos) -> | list_apps1([], AppInfos) -> | ||||||
|  | @ -299,17 +321,19 @@ list_schemas_in_app(App) -> | ||||||
|                true -> |                true -> | ||||||
|                    case code:priv_dir(App) of |                    case code:priv_dir(App) of | ||||||
|                        {error, bad_name} -> |                        {error, bad_name} -> | ||||||
|                            rabbit_log_prelaunch:debug( |                            ?LOG_DEBUG( | ||||||
|                              "  [ ] ~s (no readable priv dir)", [App]), |                              "  [ ] ~s (no readable priv dir)", [App], | ||||||
|  |                              #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|                            []; |                            []; | ||||||
|                        PrivDir -> |                        PrivDir -> | ||||||
|                            SchemaDir = filename:join([PrivDir, "schema"]), |                            SchemaDir = filename:join([PrivDir, "schema"]), | ||||||
|                            do_list_schemas_in_app(App, SchemaDir) |                            do_list_schemas_in_app(App, SchemaDir) | ||||||
|                    end; |                    end; | ||||||
|                Reason1 -> |                Reason1 -> | ||||||
|                    rabbit_log_prelaunch:debug( |                    ?LOG_DEBUG( | ||||||
|                      "  [ ] ~s (failed to load application: ~p)", |                      "  [ ] ~s (failed to load application: ~p)", | ||||||
|                      [App, Reason1]), |                      [App, Reason1], | ||||||
|  |                      #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|                    [] |                    [] | ||||||
|            end, |            end, | ||||||
|     case Unload of |     case Unload of | ||||||
|  | @ -322,74 +346,71 @@ list_schemas_in_app(App) -> | ||||||
| do_list_schemas_in_app(App, SchemaDir) -> | do_list_schemas_in_app(App, SchemaDir) -> | ||||||
|     case erl_prim_loader:list_dir(SchemaDir) of |     case erl_prim_loader:list_dir(SchemaDir) of | ||||||
|         {ok, Files} -> |         {ok, Files} -> | ||||||
|             rabbit_log_prelaunch:debug("  [x] ~s", [App]), |             ?LOG_DEBUG("  [x] ~s", [App], | ||||||
|  |                        #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|             [filename:join(SchemaDir, File) |             [filename:join(SchemaDir, File) | ||||||
|              || [C | _] = File <- Files, |              || [C | _] = File <- Files, | ||||||
|                 C =/= $.]; |                 C =/= $.]; | ||||||
|         error -> |         error -> | ||||||
|             rabbit_log_prelaunch:debug( |             ?LOG_DEBUG( | ||||||
|               "  [ ] ~s (no readable schema dir)", [App]), |               "  [ ] ~s (no readable schema dir)", [App], | ||||||
|  |               #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|             [] |             [] | ||||||
|     end. |     end. | ||||||
| 
 | 
 | ||||||
| override_with_advanced_config(Config, undefined) -> | override_with_advanced_config(Config, undefined) -> | ||||||
|     Config; |     Config; | ||||||
| override_with_advanced_config(Config, AdvancedConfigFile) -> | override_with_advanced_config(Config, AdvancedConfigFile) -> | ||||||
|     rabbit_log_prelaunch:debug( |     ?LOG_DEBUG( | ||||||
|       "Override with advanced configuration file \"~ts\"", |       "Override with advanced configuration file \"~ts\"", | ||||||
|       [AdvancedConfigFile]), |       [AdvancedConfigFile], | ||||||
|  |       #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|     case file:consult(AdvancedConfigFile) of |     case file:consult(AdvancedConfigFile) of | ||||||
|         {ok, [AdvancedConfig]} -> |         {ok, [AdvancedConfig]} -> | ||||||
|             cuttlefish_advanced:overlay(Config, AdvancedConfig); |             cuttlefish_advanced:overlay(Config, AdvancedConfig); | ||||||
|         {ok, OtherTerms} -> |         {ok, OtherTerms} -> | ||||||
|             rabbit_log_prelaunch:error( |             ?LOG_ERROR( | ||||||
|               "Failed to load advanced configuration file \"~ts\", " |               "Failed to load advanced configuration file \"~ts\", " | ||||||
|               "incorrect format: ~p", |               "incorrect format: ~p", | ||||||
|               [AdvancedConfigFile, OtherTerms]), |               [AdvancedConfigFile, OtherTerms], | ||||||
|  |               #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|             throw({error, failed_to_parse_advanced_configuration_file}); |             throw({error, failed_to_parse_advanced_configuration_file}); | ||||||
|         {error, Reason} -> |         {error, Reason} -> | ||||||
|             rabbit_log_prelaunch:error( |             ?LOG_ERROR( | ||||||
|               "Failed to load advanced configuration file \"~ts\": ~ts", |               "Failed to load advanced configuration file \"~ts\": ~ts", | ||||||
|               [AdvancedConfigFile, file:format_error(Reason)]), |               [AdvancedConfigFile, file:format_error(Reason)], | ||||||
|  |               #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|             throw({error, failed_to_read_advanced_configuration_file}) |             throw({error, failed_to_read_advanced_configuration_file}) | ||||||
|     end. |     end. | ||||||
| 
 | 
 | ||||||
| override_with_hard_coded_critical_config() -> |  | ||||||
|     rabbit_log_prelaunch:debug("Override with hard-coded critical config"), |  | ||||||
|     Config = [ |  | ||||||
|               {ra, |  | ||||||
|                %% Make Ra use a custom logger that dispatches to lager |  | ||||||
|                %% instead of the default OTP logger |  | ||||||
|                [{logger_module, rabbit_log_ra_shim}]}, |  | ||||||
|               {osiris, |  | ||||||
|                [{logger_module, rabbit_log_osiris_shim}]} |  | ||||||
|              ], |  | ||||||
|     apply_erlang_term_based_config(Config). |  | ||||||
| 
 |  | ||||||
| apply_erlang_term_based_config([{_, []} | Rest]) -> | apply_erlang_term_based_config([{_, []} | Rest]) -> | ||||||
|     apply_erlang_term_based_config(Rest); |     apply_erlang_term_based_config(Rest); | ||||||
| apply_erlang_term_based_config([{App, Vars} | Rest]) -> | apply_erlang_term_based_config([{App, Vars} | Rest]) -> | ||||||
|     rabbit_log_prelaunch:debug("  Applying configuration for '~s':", [App]), |     ?LOG_DEBUG("  Applying configuration for '~s':", [App], | ||||||
|  |                #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|     ok = apply_app_env_vars(App, Vars), |     ok = apply_app_env_vars(App, Vars), | ||||||
|     apply_erlang_term_based_config(Rest); |     apply_erlang_term_based_config(Rest); | ||||||
| apply_erlang_term_based_config([]) -> | apply_erlang_term_based_config([]) -> | ||||||
|     ok. |     ok. | ||||||
| 
 | 
 | ||||||
| apply_app_env_vars(App, [{Var, Value} | Rest]) -> | apply_app_env_vars(App, [{Var, Value} | Rest]) -> | ||||||
|     rabbit_log_prelaunch:debug("    - ~s = ~p", [Var, Value]), |     ?LOG_DEBUG("    - ~s = ~p", [Var, Value], | ||||||
|  |                #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|     ok = application:set_env(App, Var, Value, [{persistent, true}]), |     ok = application:set_env(App, Var, Value, [{persistent, true}]), | ||||||
|     apply_app_env_vars(App, Rest); |     apply_app_env_vars(App, Rest); | ||||||
| apply_app_env_vars(_, []) -> | apply_app_env_vars(_, []) -> | ||||||
|     ok. |     ok. | ||||||
| 
 | 
 | ||||||
| set_credentials_obfuscation_secret() -> | set_credentials_obfuscation_secret() -> | ||||||
|     rabbit_log_prelaunch:debug( |     ?LOG_DEBUG( | ||||||
|       "Refreshing credentials obfuscation configuration from env: ~p", |       "Refreshing credentials obfuscation configuration from env: ~p", | ||||||
|       [application:get_all_env(credentials_obfuscation)]), |       [application:get_all_env(credentials_obfuscation)], | ||||||
|  |       #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|     ok = credentials_obfuscation:refresh_config(), |     ok = credentials_obfuscation:refresh_config(), | ||||||
|     CookieBin = rabbit_data_coercion:to_binary(erlang:get_cookie()), |     CookieBin = rabbit_data_coercion:to_binary(erlang:get_cookie()), | ||||||
|     rabbit_log_prelaunch:debug( |     ?LOG_DEBUG( | ||||||
|       "Setting credentials obfuscation secret to '~s'", [CookieBin]), |       "Setting credentials obfuscation secret to '~s'", [CookieBin], | ||||||
|  |       #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|     ok = credentials_obfuscation:set_secret(CookieBin). |     ok = credentials_obfuscation:set_secret(CookieBin). | ||||||
| 
 | 
 | ||||||
| %% ------------------------------------------------------------------- | %% ------------------------------------------------------------------- | ||||||
|  | @ -397,7 +418,8 @@ set_credentials_obfuscation_secret() -> | ||||||
| %% ------------------------------------------------------------------- | %% ------------------------------------------------------------------- | ||||||
| 
 | 
 | ||||||
| decrypt_config(Apps) -> | decrypt_config(Apps) -> | ||||||
|     rabbit_log_prelaunch:debug("Decoding encrypted config values (if any)"), |     ?LOG_DEBUG("Decoding encrypted config values (if any)", | ||||||
|  |                #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|     ConfigEntryDecoder = application:get_env(rabbit, config_entry_decoder, []), |     ConfigEntryDecoder = application:get_env(rabbit, config_entry_decoder, []), | ||||||
|     decrypt_config(Apps, ConfigEntryDecoder). |     decrypt_config(Apps, ConfigEntryDecoder). | ||||||
| 
 | 
 | ||||||
|  | @ -415,8 +437,9 @@ decrypt_app(App, [{Key, Value} | Tail], Algo) -> | ||||||
|                     {Value, Algo1} -> |                     {Value, Algo1} -> | ||||||
|                         Algo1; |                         Algo1; | ||||||
|                     {NewValue, Algo1} -> |                     {NewValue, Algo1} -> | ||||||
|                         rabbit_log_prelaunch:debug( |                         ?LOG_DEBUG( | ||||||
|                           "Value of `~s` decrypted", [Key]), |                           "Value of `~s` decrypted", [Key], | ||||||
|  |                           #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|                         ok = application:set_env(App, Key, NewValue, |                         ok = application:set_env(App, Key, NewValue, | ||||||
|                                                  [{persistent, true}]), |                                                  [{persistent, true}]), | ||||||
|                         Algo1 |                         Algo1 | ||||||
|  | @ -474,7 +497,8 @@ config_entry_decoder_to_algo(ConfigEntryDecoder) -> | ||||||
|     end. |     end. | ||||||
| 
 | 
 | ||||||
| get_passphrase(ConfigEntryDecoder) -> | get_passphrase(ConfigEntryDecoder) -> | ||||||
|     rabbit_log_prelaunch:debug("Getting encrypted config passphrase"), |     ?LOG_DEBUG("Getting encrypted config passphrase", | ||||||
|  |                #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|     case proplists:get_value(passphrase, ConfigEntryDecoder) of |     case proplists:get_value(passphrase, ConfigEntryDecoder) of | ||||||
|         prompt -> |         prompt -> | ||||||
|             IoDevice = get_input_iodevice(), |             IoDevice = get_input_iodevice(), | ||||||
|  |  | ||||||
|  | @ -1,12 +1,19 @@ | ||||||
| -module(rabbit_prelaunch_dist). | -module(rabbit_prelaunch_dist). | ||||||
| 
 | 
 | ||||||
|  | -include_lib("kernel/include/logger.hrl"). | ||||||
|  | 
 | ||||||
|  | -include_lib("rabbit_common/include/logging.hrl"). | ||||||
|  | 
 | ||||||
| -export([setup/1]). | -export([setup/1]). | ||||||
| 
 | 
 | ||||||
| setup(#{nodename := Node, nodename_type := NameType} = Context) -> | setup(#{nodename := Node, nodename_type := NameType} = Context) -> | ||||||
|     rabbit_log_prelaunch:debug(""), |     ?LOG_DEBUG( | ||||||
|     rabbit_log_prelaunch:debug("== Erlang distribution =="), |        "~n== Erlang distribution ==", [], | ||||||
|     rabbit_log_prelaunch:debug("Rqeuested node name: ~s (type: ~s)", |        #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|                                [Node, NameType]), |     ?LOG_DEBUG( | ||||||
|  |        "Rqeuested node name: ~s (type: ~s)", | ||||||
|  |        [Node, NameType], | ||||||
|  |        #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|     case node() of |     case node() of | ||||||
|         nonode@nohost -> |         nonode@nohost -> | ||||||
|             ok = rabbit_nodes_common:ensure_epmd(), |             ok = rabbit_nodes_common:ensure_epmd(), | ||||||
|  | @ -16,8 +23,9 @@ setup(#{nodename := Node, nodename_type := NameType} = Context) -> | ||||||
| 
 | 
 | ||||||
|             ok = do_setup(Context); |             ok = do_setup(Context); | ||||||
|         Node -> |         Node -> | ||||||
|             rabbit_log_prelaunch:debug( |             ?LOG_DEBUG( | ||||||
|               "Erlang distribution already running", []), |               "Erlang distribution already running", [], | ||||||
|  |               #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|             ok; |             ok; | ||||||
|         Unexpected -> |         Unexpected -> | ||||||
|             throw({error, {erlang_dist_running_with_unexpected_nodename, |             throw({error, {erlang_dist_running_with_unexpected_nodename, | ||||||
|  | @ -26,7 +34,9 @@ setup(#{nodename := Node, nodename_type := NameType} = Context) -> | ||||||
|     ok. |     ok. | ||||||
| 
 | 
 | ||||||
| do_setup(#{nodename := Node, nodename_type := NameType}) -> | do_setup(#{nodename := Node, nodename_type := NameType}) -> | ||||||
|     rabbit_log_prelaunch:debug("Starting Erlang distribution", []), |     ?LOG_DEBUG( | ||||||
|  |        "Starting Erlang distribution", | ||||||
|  |        #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|     case application:get_env(kernel, net_ticktime) of |     case application:get_env(kernel, net_ticktime) of | ||||||
|         {ok, Ticktime} when is_integer(Ticktime) andalso Ticktime >= 1 -> |         {ok, Ticktime} when is_integer(Ticktime) andalso Ticktime >= 1 -> | ||||||
|             %% The value passed to net_kernel:start/1 is the |             %% The value passed to net_kernel:start/1 is the | ||||||
|  | @ -43,8 +53,9 @@ do_setup(#{nodename := Node, nodename_type := NameType}) -> | ||||||
| 
 | 
 | ||||||
| %% Check whether a node with the same name is already running | %% Check whether a node with the same name is already running | ||||||
| duplicate_node_check(#{split_nodename := {NodeName, NodeHost}}) -> | duplicate_node_check(#{split_nodename := {NodeName, NodeHost}}) -> | ||||||
|     rabbit_log_prelaunch:debug( |     ?LOG_DEBUG( | ||||||
|       "Checking if node name ~s is already used", [NodeName]), |       "Checking if node name ~s is already used", [NodeName], | ||||||
|  |       #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|     PrelaunchName = rabbit_nodes_common:make( |     PrelaunchName = rabbit_nodes_common:make( | ||||||
|                       {NodeName ++ "_prelaunch_" ++ os:getpid(), |                       {NodeName ++ "_prelaunch_" ++ os:getpid(), | ||||||
|                        "localhost"}), |                        "localhost"}), | ||||||
|  | @ -63,8 +74,9 @@ duplicate_node_check(#{split_nodename := {NodeName, NodeHost}}) -> | ||||||
|     end. |     end. | ||||||
| 
 | 
 | ||||||
| dist_port_range_check(#{erlang_dist_tcp_port := DistTcpPort}) -> | dist_port_range_check(#{erlang_dist_tcp_port := DistTcpPort}) -> | ||||||
|     rabbit_log_prelaunch:debug( |     ?LOG_DEBUG( | ||||||
|       "Checking if TCP port ~b is valid", [DistTcpPort]), |       "Checking if TCP port ~b is valid", [DistTcpPort], | ||||||
|  |       #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|     case DistTcpPort of |     case DistTcpPort of | ||||||
|         _ when DistTcpPort < 1 orelse DistTcpPort > 65535 -> |         _ when DistTcpPort < 1 orelse DistTcpPort > 65535 -> | ||||||
|             throw({error, {invalid_dist_port_range, DistTcpPort}}); |             throw({error, {invalid_dist_port_range, DistTcpPort}}); | ||||||
|  | @ -74,8 +86,9 @@ dist_port_range_check(#{erlang_dist_tcp_port := DistTcpPort}) -> | ||||||
| 
 | 
 | ||||||
| dist_port_use_check(#{split_nodename := {_, NodeHost}, | dist_port_use_check(#{split_nodename := {_, NodeHost}, | ||||||
|                       erlang_dist_tcp_port := DistTcpPort}) -> |                       erlang_dist_tcp_port := DistTcpPort}) -> | ||||||
|     rabbit_log_prelaunch:debug( |     ?LOG_DEBUG( | ||||||
|       "Checking if TCP port ~b is available", [DistTcpPort]), |        "Checking if TCP port ~b is available", [DistTcpPort], | ||||||
|  |        #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|     dist_port_use_check_ipv4(NodeHost, DistTcpPort). |     dist_port_use_check_ipv4(NodeHost, DistTcpPort). | ||||||
| 
 | 
 | ||||||
| dist_port_use_check_ipv4(NodeHost, Port) -> | dist_port_use_check_ipv4(NodeHost, Port) -> | ||||||
|  |  | ||||||
|  | @ -1,60 +1,126 @@ | ||||||
|  | %% This Source Code Form is subject to the terms of the Mozilla Public | ||||||
|  | %% License, v. 2.0. If a copy of the MPL was not distributed with this | ||||||
|  | %% file, You can obtain one at https://mozilla.org/MPL/2.0/. | ||||||
|  | %% | ||||||
|  | %% Copyright (c) 2019-2021 VMware, Inc. or its affiliates.  All rights reserved. | ||||||
|  | %% | ||||||
|  | 
 | ||||||
| -module(rabbit_prelaunch_early_logging). | -module(rabbit_prelaunch_early_logging). | ||||||
| 
 | 
 | ||||||
| -include_lib("rabbit_common/include/rabbit_log.hrl"). | -include_lib("kernel/include/logger.hrl"). | ||||||
| 
 | 
 | ||||||
| -export([setup_early_logging/2, | -include_lib("rabbit_common/include/logging.hrl"). | ||||||
|  | 
 | ||||||
|  | -export([setup_early_logging/1, | ||||||
|  |          default_formatter/1, | ||||||
|  |          default_console_formatter/1, | ||||||
|  |          default_file_formatter/1, | ||||||
|  |          default_syslog_formatter/1, | ||||||
|          enable_quick_dbg/1, |          enable_quick_dbg/1, | ||||||
|          use_colored_logging/0, |          use_colored_logging/0, | ||||||
|          use_colored_logging/1, |          use_colored_logging/1]). | ||||||
|          list_expected_sinks/0]). | -export([filter_log_event/2]). | ||||||
| 
 | 
 | ||||||
| setup_early_logging(#{log_levels := undefined} = Context, | -define(CONFIGURED_KEY, {?MODULE, configured}). | ||||||
|                          LagerEventToStdout) -> | 
 | ||||||
|     setup_early_logging(Context#{log_levels => get_default_log_level()}, | setup_early_logging(#{log_levels := undefined} = Context) -> | ||||||
|                              LagerEventToStdout); |     setup_early_logging(Context#{log_levels => get_default_log_level()}); | ||||||
| setup_early_logging(Context, LagerEventToStdout) -> | setup_early_logging(Context) -> | ||||||
|     Configured = lists:member( |     case is_configured() of | ||||||
|                    lager_util:make_internal_sink_name(rabbit_log_prelaunch), |  | ||||||
|                    lager:list_all_sinks()), |  | ||||||
|     case Configured of |  | ||||||
|         true  -> ok; |         true  -> ok; | ||||||
|         false -> do_setup_early_logging(Context, LagerEventToStdout) |         false -> do_setup_early_logging(Context) | ||||||
|     end. |     end. | ||||||
| 
 | 
 | ||||||
| get_default_log_level() -> | get_default_log_level() -> | ||||||
|     #{"prelaunch" => warning}. |     #{"prelaunch" => notice}. | ||||||
| 
 | 
 | ||||||
| do_setup_early_logging(#{log_levels := LogLevels} = Context, | do_setup_early_logging(#{log_levels := LogLevels} = Context) -> | ||||||
|                        LagerEventToStdout) -> |     add_rmqlog_filter(LogLevels), | ||||||
|     redirect_logger_messages_to_lager(), |     ok = logger:update_handler_config( | ||||||
|     Colored = use_colored_logging(Context), |            default, main_handler_config(Context)). | ||||||
|     application:set_env(lager, colored, Colored), | 
 | ||||||
|     ConsoleBackend = lager_console_backend, | is_configured() -> | ||||||
|     case LagerEventToStdout of |     persistent_term:get(?CONFIGURED_KEY, false). | ||||||
|         true -> | 
 | ||||||
|             GLogLevel = case LogLevels of | add_rmqlog_filter(LogLevels) -> | ||||||
|                             #{global := Level} -> Level; |     add_erlang_specific_filters(LogLevels), | ||||||
|                             _                  -> warning |     FilterConfig0 = lists:foldl( | ||||||
|                         end, |                       fun | ||||||
|             _ = lager_app:start_handler( |                           ({_, V}, FC) when is_boolean(V) -> FC; | ||||||
|                   lager_event, ConsoleBackend, [{level, GLogLevel}]), |                           ({K, V}, FC) when is_atom(K) -> FC#{K => V}; | ||||||
|             ok; |                           ({K, V}, FC) -> FC#{list_to_atom(K) => V} | ||||||
|         false -> |                       end, #{}, maps:to_list(LogLevels)), | ||||||
|             ok |     FilterConfig1 = case maps:is_key(global, FilterConfig0) of | ||||||
|     end, |                         true  -> FilterConfig0; | ||||||
|     lists:foreach( |                         false -> FilterConfig0#{global => ?DEFAULT_LOG_LEVEL} | ||||||
|       fun(Sink) -> |                     end, | ||||||
|               CLogLevel = get_log_level(LogLevels, Sink), |     ok = logger:add_handler_filter( | ||||||
|               lager_app:configure_sink( |            default, ?FILTER_NAME, {fun filter_log_event/2, FilterConfig1}), | ||||||
|                 Sink, |     ok = logger:set_primary_config(level, all), | ||||||
|                 [{handlers, [{ConsoleBackend, [{level, CLogLevel}]}]}]) |     ok = persistent_term:put(?CONFIGURED_KEY, true). | ||||||
|       end, list_expected_sinks()), | 
 | ||||||
|  | add_erlang_specific_filters(_) -> | ||||||
|  |     _ = logger:add_handler_filter( | ||||||
|  |           default, progress_reports, {fun logger_filters:progress/2, stop}), | ||||||
|     ok. |     ok. | ||||||
| 
 | 
 | ||||||
| redirect_logger_messages_to_lager() -> | filter_log_event( | ||||||
|     io:format(standard_error, "Configuring logger redirection~n", []), |   #{meta := #{domain := ?RMQLOG_DOMAIN_GLOBAL}} = LogEvent, | ||||||
|     ok = logger:add_handler(rabbit_log, rabbit_log, #{}), |   FilterConfig) -> | ||||||
|     ok = logger:set_primary_config(level, all). |     MinLevel = get_min_level(global, FilterConfig), | ||||||
|  |     do_filter_log_event(LogEvent, MinLevel); | ||||||
|  | filter_log_event( | ||||||
|  |   #{meta := #{domain := [?RMQLOG_SUPER_DOMAIN_NAME, CatName | _]}} = LogEvent, | ||||||
|  |   FilterConfig) -> | ||||||
|  |     MinLevel = get_min_level(CatName, FilterConfig), | ||||||
|  |     do_filter_log_event(LogEvent, MinLevel); | ||||||
|  | filter_log_event( | ||||||
|  |   #{meta := #{domain := [CatName | _]}} = LogEvent, | ||||||
|  |   FilterConfig) -> | ||||||
|  |     MinLevel = get_min_level(CatName, FilterConfig), | ||||||
|  |     do_filter_log_event(LogEvent, MinLevel); | ||||||
|  | filter_log_event(LogEvent, FilterConfig) -> | ||||||
|  |     MinLevel = get_min_level(global, FilterConfig), | ||||||
|  |     do_filter_log_event(LogEvent, MinLevel). | ||||||
|  | 
 | ||||||
|  | get_min_level(global, FilterConfig) -> | ||||||
|  |     maps:get(global, FilterConfig, none); | ||||||
|  | get_min_level(CatName, FilterConfig) -> | ||||||
|  |     case maps:is_key(CatName, FilterConfig) of | ||||||
|  |         true  -> maps:get(CatName, FilterConfig); | ||||||
|  |         false -> get_min_level(global, FilterConfig) | ||||||
|  |     end. | ||||||
|  | 
 | ||||||
|  | do_filter_log_event(_, none) -> | ||||||
|  |     stop; | ||||||
|  | do_filter_log_event(#{level := Level} = LogEvent, MinLevel) -> | ||||||
|  |     case logger:compare_levels(Level, MinLevel) of | ||||||
|  |         lt -> stop; | ||||||
|  |         _  -> LogEvent | ||||||
|  |     end. | ||||||
|  | 
 | ||||||
|  | main_handler_config(Context) -> | ||||||
|  |     #{filter_default => log, | ||||||
|  |       formatter => default_formatter(Context)}. | ||||||
|  | 
 | ||||||
|  | default_formatter(#{log_levels := #{json := true}}) -> | ||||||
|  |     {rabbit_logger_json_fmt, #{}}; | ||||||
|  | default_formatter(Context) -> | ||||||
|  |     Color = use_colored_logging(Context), | ||||||
|  |     {rabbit_logger_text_fmt, #{color => Color}}. | ||||||
|  | 
 | ||||||
|  | default_console_formatter(Context) -> | ||||||
|  |     default_formatter(Context). | ||||||
|  | 
 | ||||||
|  | default_file_formatter(Context) -> | ||||||
|  |     default_formatter(Context#{output_supports_colors => false}). | ||||||
|  | 
 | ||||||
|  | default_syslog_formatter(Context) -> | ||||||
|  |     {Module, Config} = default_file_formatter(Context), | ||||||
|  |     case Module of | ||||||
|  |         rabbit_logger_text_fmt -> {Module, Config#{prefix => false}}; | ||||||
|  |         rabbit_logger_json_fmt -> {Module, Config} | ||||||
|  |     end. | ||||||
| 
 | 
 | ||||||
| use_colored_logging() -> | use_colored_logging() -> | ||||||
|     use_colored_logging(rabbit_prelaunch:get_context()). |     use_colored_logging(rabbit_prelaunch:get_context()). | ||||||
|  | @ -65,45 +131,6 @@ use_colored_logging(#{log_levels := #{color := true}, | ||||||
| use_colored_logging(_) -> | use_colored_logging(_) -> | ||||||
|     false. |     false. | ||||||
| 
 | 
 | ||||||
| list_expected_sinks() -> |  | ||||||
|     Key = {?MODULE, lager_extra_sinks}, |  | ||||||
|     case persistent_term:get(Key, undefined) of |  | ||||||
|         undefined -> |  | ||||||
|             CompileOptions = proplists:get_value(options, |  | ||||||
|                                                  module_info(compile), |  | ||||||
|                                                  []), |  | ||||||
|             AutoList = [lager_util:make_internal_sink_name(M) |  | ||||||
|                         || M <- proplists:get_value(lager_extra_sinks, |  | ||||||
|                                                     CompileOptions, [])], |  | ||||||
|             List = case lists:member(?LAGER_SINK, AutoList) of |  | ||||||
|                 true  -> AutoList; |  | ||||||
|                 false -> [?LAGER_SINK | AutoList] |  | ||||||
|             end, |  | ||||||
|             %% Store the list in the application environment. If this |  | ||||||
|             %% module is later cover-compiled, the compile option will |  | ||||||
|             %% be lost, so we will be able to retrieve the list from the |  | ||||||
|             %% application environment. |  | ||||||
|             persistent_term:put(Key, List), |  | ||||||
|             List; |  | ||||||
|         List -> |  | ||||||
|             List |  | ||||||
|     end. |  | ||||||
| 
 |  | ||||||
| sink_to_category(Sink) when is_atom(Sink) -> |  | ||||||
|     re:replace( |  | ||||||
|       atom_to_list(Sink), |  | ||||||
|       "^rabbit_log_(.+)_lager_event$", |  | ||||||
|       "\\1", |  | ||||||
|       [{return, list}]). |  | ||||||
| 
 |  | ||||||
| get_log_level(LogLevels, Sink) -> |  | ||||||
|     Category = sink_to_category(Sink), |  | ||||||
|     case LogLevels of |  | ||||||
|         #{Category := Level} -> Level; |  | ||||||
|         #{global := Level}   -> Level; |  | ||||||
|         _                    -> warning |  | ||||||
|     end. |  | ||||||
| 
 |  | ||||||
| enable_quick_dbg(#{dbg_output := Output, dbg_mods := Mods}) -> | enable_quick_dbg(#{dbg_output := Output, dbg_mods := Mods}) -> | ||||||
|     case Output of |     case Output of | ||||||
|         stdout -> {ok, _} = dbg:tracer(), |         stdout -> {ok, _} = dbg:tracer(), | ||||||
|  |  | ||||||
|  | @ -1,25 +1,32 @@ | ||||||
| -module(rabbit_prelaunch_erlang_compat). | -module(rabbit_prelaunch_erlang_compat). | ||||||
| 
 | 
 | ||||||
|  | -include_lib("kernel/include/logger.hrl"). | ||||||
|  | 
 | ||||||
|  | -include_lib("rabbit_common/include/logging.hrl"). | ||||||
|  | 
 | ||||||
| -export([check/1]). | -export([check/1]). | ||||||
| 
 | 
 | ||||||
| -define(OTP_MINIMUM, "23.0"). | -define(OTP_MINIMUM, "23.0"). | ||||||
| -define(ERTS_MINIMUM, "11.1"). | -define(ERTS_MINIMUM, "11.1"). | ||||||
| 
 | 
 | ||||||
| check(_Context) -> | check(_Context) -> | ||||||
|     rabbit_log_prelaunch:debug(""), |     ?LOG_DEBUG( | ||||||
|     rabbit_log_prelaunch:debug("== Erlang/OTP compatibility check =="), |        "~n== Erlang/OTP compatibility check ==", [], | ||||||
|  |        #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
| 
 | 
 | ||||||
|     ERTSVer = erlang:system_info(version), |     ERTSVer = erlang:system_info(version), | ||||||
|     OTPRel = rabbit_misc:otp_release(), |     OTPRel = rabbit_misc:otp_release(), | ||||||
|     rabbit_log_prelaunch:debug( |     ?LOG_DEBUG( | ||||||
|       "Requiring: Erlang/OTP ~s (ERTS ~s)", [?OTP_MINIMUM, ?ERTS_MINIMUM]), |       "Requiring: Erlang/OTP ~s (ERTS ~s)~n" | ||||||
|     rabbit_log_prelaunch:debug( |       "Running:   Erlang/OTP ~s (ERTS ~s)", | ||||||
|       "Running:   Erlang/OTP ~s (ERTS ~s)", [OTPRel, ERTSVer]), |       [?OTP_MINIMUM, ?ERTS_MINIMUM, OTPRel, ERTSVer], | ||||||
|  |       #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
| 
 | 
 | ||||||
|     case rabbit_misc:version_compare(?ERTS_MINIMUM, ERTSVer, lte) of |     case rabbit_misc:version_compare(?ERTS_MINIMUM, ERTSVer, lte) of | ||||||
|         true when ?ERTS_MINIMUM =/= ERTSVer -> |         true when ?ERTS_MINIMUM =/= ERTSVer -> | ||||||
|             rabbit_log_prelaunch:debug( |             ?LOG_DEBUG( | ||||||
|               "Erlang/OTP version requirement satisfied"), |               "Erlang/OTP version requirement satisfied", [], | ||||||
|  |               #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|             ok; |             ok; | ||||||
|         true when ?ERTS_MINIMUM =:= ERTSVer andalso ?OTP_MINIMUM =< OTPRel -> |         true when ?ERTS_MINIMUM =:= ERTSVer andalso ?OTP_MINIMUM =< OTPRel -> | ||||||
|             %% When a critical regression or bug is found, a new OTP |             %% When a critical regression or bug is found, a new OTP | ||||||
|  | @ -35,7 +42,7 @@ check(_Context) -> | ||||||
|             "This RabbitMQ version cannot run on Erlang ~s (erts ~s): " |             "This RabbitMQ version cannot run on Erlang ~s (erts ~s): " | ||||||
|             "minimum required version is ~s (erts ~s)", |             "minimum required version is ~s (erts ~s)", | ||||||
|             Args = [OTPRel, ERTSVer, ?OTP_MINIMUM, ?ERTS_MINIMUM], |             Args = [OTPRel, ERTSVer, ?OTP_MINIMUM, ?ERTS_MINIMUM], | ||||||
|             rabbit_log_prelaunch:error(Msg, Args), |             ?LOG_ERROR(Msg, Args, #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
| 
 | 
 | ||||||
|             %% Also print to stderr to make this more visible |             %% Also print to stderr to make this more visible | ||||||
|             io:format(standard_error, "Error: " ++ Msg ++ "~n", Args), |             io:format(standard_error, "Error: " ++ Msg ++ "~n", Args), | ||||||
|  |  | ||||||
|  | @ -1,5 +1,9 @@ | ||||||
| -module(rabbit_prelaunch_errors). | -module(rabbit_prelaunch_errors). | ||||||
| 
 | 
 | ||||||
|  | -include_lib("kernel/include/logger.hrl"). | ||||||
|  | 
 | ||||||
|  | -include_lib("rabbit_common/include/logging.hrl"). | ||||||
|  | 
 | ||||||
| -export([format_error/1, | -export([format_error/1, | ||||||
|          format_exception/3, |          format_exception/3, | ||||||
|          log_error/1, |          log_error/1, | ||||||
|  | @ -94,9 +98,19 @@ log_exception(Class, Exception, Stacktrace) -> | ||||||
|     log_message(Message). |     log_message(Message). | ||||||
| 
 | 
 | ||||||
| format_exception(Class, Exception, Stacktrace) -> | format_exception(Class, Exception, Stacktrace) -> | ||||||
|  |     StacktraceStrs = [case proplists:get_value(line, Props) of | ||||||
|  |                           undefined -> | ||||||
|  |                               io_lib:format("    ~ts:~ts/~b", | ||||||
|  |                                             [Mod, Fun, Arity]); | ||||||
|  |                           Line -> | ||||||
|  |                               io_lib:format("    ~ts:~ts/~b, line ~b", | ||||||
|  |                                             [Mod, Fun, Arity, Line]) | ||||||
|  |                       end | ||||||
|  |                       || {Mod, Fun, Arity, Props} <- Stacktrace], | ||||||
|  |     ExceptionStr = io_lib:format("~ts:~0p", [Class, Exception]), | ||||||
|     rabbit_misc:format( |     rabbit_misc:format( | ||||||
|       "Exception during startup:~n~s", |       "Exception during startup:~n~n~s~n~n~s", | ||||||
|       [lager:pr_stacktrace(Stacktrace, {Class, Exception})]). |       [ExceptionStr, string:join(StacktraceStrs, "\n")]). | ||||||
| 
 | 
 | ||||||
| log_message(Message) -> | log_message(Message) -> | ||||||
|     Lines = string:split( |     Lines = string:split( | ||||||
|  | @ -105,9 +119,11 @@ log_message(Message) -> | ||||||
|               ?BOOT_FAILED_FOOTER, |               ?BOOT_FAILED_FOOTER, | ||||||
|               [$\n], |               [$\n], | ||||||
|               all), |               all), | ||||||
|  |     ?LOG_ERROR( | ||||||
|  |        "~s", [string:join(Lines, "\n")], | ||||||
|  |        #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|     lists:foreach( |     lists:foreach( | ||||||
|       fun(Line) -> |       fun(Line) -> | ||||||
|               rabbit_log_prelaunch:error("~s", [Line]), |  | ||||||
|               io:format(standard_error, "~s~n", [Line]) |               io:format(standard_error, "~s~n", [Line]) | ||||||
|       end, Lines), |       end, Lines), | ||||||
|     timer:sleep(1000), |     timer:sleep(1000), | ||||||
|  |  | ||||||
|  | @ -69,13 +69,13 @@ handle_event(Signal, State) -> | ||||||
|         %% which should stop RabbitMQ. |         %% which should stop RabbitMQ. | ||||||
|         % |         % | ||||||
|         %#{Signal := stop} -> |         %#{Signal := stop} -> | ||||||
|         %    error_logger:info_msg( |         %    logger:info( | ||||||
|         %      "~s received - shutting down~n", |         %      "~s received - shutting down", | ||||||
|         %      [string:uppercase(atom_to_list(Signal))]), |         %      [string:uppercase(atom_to_list(Signal))]), | ||||||
|         %    ok = init:stop(); |         %    ok = init:stop(); | ||||||
|         _ -> |         _ -> | ||||||
|             error_logger:info_msg( |             logger:info( | ||||||
|               "~s received - unhandled signal~n", |               "~s received - unhandled signal", | ||||||
|               [string:uppercase(atom_to_list(Signal))]) |               [string:uppercase(atom_to_list(Signal))]) | ||||||
|     end, |     end, | ||||||
|     {ok, State}. |     {ok, State}. | ||||||
|  |  | ||||||
|  | @ -0,0 +1,212 @@ | ||||||
|  | -module(rabbit_logger_std_h_SUITE). | ||||||
|  | 
 | ||||||
|  | -include_lib("common_test/include/ct.hrl"). | ||||||
|  | -include_lib("eunit/include/eunit.hrl"). | ||||||
|  | 
 | ||||||
|  | -export([all/0, | ||||||
|  |          groups/0, | ||||||
|  |          init_per_suite/2, | ||||||
|  |          end_per_suite/2, | ||||||
|  |          init_per_group/2, | ||||||
|  |          end_per_group/2, | ||||||
|  |          init_per_testcase/2, | ||||||
|  |          end_per_testcase/2, | ||||||
|  | 
 | ||||||
|  |          every_day_rotation_is_detected/1, | ||||||
|  |          every_week_rotation_is_detected/1, | ||||||
|  |          every_month_rotation_is_detected/1 | ||||||
|  |         ]). | ||||||
|  | 
 | ||||||
|  | all() -> | ||||||
|  |     [ | ||||||
|  |      {group, parallel_tests} | ||||||
|  |     ]. | ||||||
|  | 
 | ||||||
|  | groups() -> | ||||||
|  |     [ | ||||||
|  |      {parallel_tests, [parallel], [every_day_rotation_is_detected, | ||||||
|  |                                    every_week_rotation_is_detected, | ||||||
|  |                                    every_month_rotation_is_detected]} | ||||||
|  |     ]. | ||||||
|  | 
 | ||||||
|  | init_per_suite(_, Config) -> Config. | ||||||
|  | end_per_suite(_, Config) -> Config. | ||||||
|  | 
 | ||||||
|  | init_per_group(_, Config) -> Config. | ||||||
|  | end_per_group(_, Config) -> Config. | ||||||
|  | 
 | ||||||
|  | init_per_testcase(_, Config) -> Config. | ||||||
|  | end_per_testcase(_, Config) -> Config. | ||||||
|  | 
 | ||||||
|  | every_day_rotation_is_detected(_) -> | ||||||
|  |     ?assertNot( | ||||||
|  |       rabbit_logger_std_h:is_date_based_rotation_needed( | ||||||
|  |         #{every => day, hour => 12}, | ||||||
|  |         {{2021, 01, 15}, {10, 00, 00}}, | ||||||
|  |         {{2021, 01, 15}, {11, 00, 00}})), | ||||||
|  |     ?assert( | ||||||
|  |       rabbit_logger_std_h:is_date_based_rotation_needed( | ||||||
|  |         #{every => day, hour => 12}, | ||||||
|  |         {{2021, 01, 15}, {10, 00, 00}}, | ||||||
|  |         {{2021, 01, 15}, {12, 00, 00}})), | ||||||
|  |     ?assert( | ||||||
|  |       rabbit_logger_std_h:is_date_based_rotation_needed( | ||||||
|  |         #{every => day, hour => 12}, | ||||||
|  |         {{2021, 01, 15}, {10, 00, 00}}, | ||||||
|  |         {{2021, 01, 15}, {13, 00, 00}})), | ||||||
|  |     ?assert( | ||||||
|  |       rabbit_logger_std_h:is_date_based_rotation_needed( | ||||||
|  |         #{every => day, hour => 12}, | ||||||
|  |         {{2021, 01, 15}, {11, 00, 00}}, | ||||||
|  |         {{2021, 01, 15}, {13, 00, 00}})), | ||||||
|  |     ?assertNot( | ||||||
|  |       rabbit_logger_std_h:is_date_based_rotation_needed( | ||||||
|  |         #{every => day, hour => 12}, | ||||||
|  |         {{2021, 01, 15}, {12, 00, 00}}, | ||||||
|  |         {{2021, 01, 15}, {13, 00, 00}})), | ||||||
|  |     ?assert( | ||||||
|  |       rabbit_logger_std_h:is_date_based_rotation_needed( | ||||||
|  |         #{every => day, hour => 12}, | ||||||
|  |         {{2021, 01, 14}, {12, 00, 00}}, | ||||||
|  |         {{2021, 01, 15}, {12, 00, 00}})), | ||||||
|  |     ?assertNot( | ||||||
|  |       rabbit_logger_std_h:is_date_based_rotation_needed( | ||||||
|  |         #{every => day, hour => 12}, | ||||||
|  |         {{2021, 01, 14}, {12, 00, 00}}, | ||||||
|  |         {{2021, 01, 15}, {11, 00, 00}})), | ||||||
|  |     ?assert( | ||||||
|  |       rabbit_logger_std_h:is_date_based_rotation_needed( | ||||||
|  |         #{every => day, hour => 12}, | ||||||
|  |         {{2020, 11, 15}, {12, 00, 00}}, | ||||||
|  |         {{2021, 01, 15}, {11, 00, 00}})), | ||||||
|  |     ?assert( | ||||||
|  |       rabbit_logger_std_h:is_date_based_rotation_needed( | ||||||
|  |         #{every => day, hour => 12}, | ||||||
|  |         {{2020, 11, 15}, {12, 00, 00}}, | ||||||
|  |         {{2021, 01, 15}, {12, 00, 00}})). | ||||||
|  | 
 | ||||||
|  | every_week_rotation_is_detected(_) -> | ||||||
|  |     ?assertNot( | ||||||
|  |       rabbit_logger_std_h:is_date_based_rotation_needed( | ||||||
|  |         #{every => week, day_of_week => 3, hour => 12}, | ||||||
|  |         {{2021, 01, 11}, {12, 00, 00}}, | ||||||
|  |         {{2021, 01, 12}, {12, 00, 00}})), | ||||||
|  |     ?assert( | ||||||
|  |       rabbit_logger_std_h:is_date_based_rotation_needed( | ||||||
|  |         #{every => week, day_of_week => 3, hour => 12}, | ||||||
|  |         {{2021, 01, 11}, {12, 00, 00}}, | ||||||
|  |         {{2021, 01, 13}, {12, 00, 00}})), | ||||||
|  |     ?assert( | ||||||
|  |       rabbit_logger_std_h:is_date_based_rotation_needed( | ||||||
|  |         #{every => week, day_of_week => 3, hour => 12}, | ||||||
|  |         {{2021, 01, 11}, {12, 00, 00}}, | ||||||
|  |         {{2021, 01, 14}, {12, 00, 00}})), | ||||||
|  |     ?assertNot( | ||||||
|  |       rabbit_logger_std_h:is_date_based_rotation_needed( | ||||||
|  |         #{every => week, day_of_week => 3, hour => 12}, | ||||||
|  |         {{2021, 01, 13}, {12, 00, 00}}, | ||||||
|  |         {{2021, 01, 14}, {12, 00, 00}})), | ||||||
|  |     ?assertNot( | ||||||
|  |       rabbit_logger_std_h:is_date_based_rotation_needed( | ||||||
|  |         #{every => week, day_of_week => 3, hour => 12}, | ||||||
|  |         {{2021, 01, 14}, {12, 00, 00}}, | ||||||
|  |         {{2021, 01, 15}, {12, 00, 00}})), | ||||||
|  |     ?assert( | ||||||
|  |       rabbit_logger_std_h:is_date_based_rotation_needed( | ||||||
|  |         #{every => week, day_of_week => 3, hour => 12}, | ||||||
|  |         {{2021, 01, 13}, {11, 00, 00}}, | ||||||
|  |         {{2021, 01, 13}, {12, 00, 00}})), | ||||||
|  |     ?assert( | ||||||
|  |       rabbit_logger_std_h:is_date_based_rotation_needed( | ||||||
|  |         #{every => week, day_of_week => 3, hour => 12}, | ||||||
|  |         {{2021, 01, 06}, {12, 00, 00}}, | ||||||
|  |         {{2021, 01, 13}, {12, 00, 00}})), | ||||||
|  |     ?assert( | ||||||
|  |       rabbit_logger_std_h:is_date_based_rotation_needed( | ||||||
|  |         #{every => week, day_of_week => 3, hour => 12}, | ||||||
|  |         {{2021, 01, 07}, {12, 00, 00}}, | ||||||
|  |         {{2021, 01, 14}, {12, 00, 00}})), | ||||||
|  |     ?assertNot( | ||||||
|  |       rabbit_logger_std_h:is_date_based_rotation_needed( | ||||||
|  |         #{every => week, day_of_week => 3, hour => 12}, | ||||||
|  |         {{2021, 01, 06}, {12, 00, 00}}, | ||||||
|  |         {{2021, 01, 12}, {12, 00, 00}})), | ||||||
|  |     ?assert( | ||||||
|  |       rabbit_logger_std_h:is_date_based_rotation_needed( | ||||||
|  |         #{every => week, day_of_week => 3, hour => 12}, | ||||||
|  |         {{2021, 01, 06}, {11, 00, 00}}, | ||||||
|  |         {{2021, 01, 12}, {12, 00, 00}})), | ||||||
|  |     ?assertNot( | ||||||
|  |       rabbit_logger_std_h:is_date_based_rotation_needed( | ||||||
|  |         #{every => week, day_of_week => 3, hour => 12}, | ||||||
|  |         {{2021, 01, 06}, {12, 00, 00}}, | ||||||
|  |         {{2021, 01, 13}, {11, 00, 00}})). | ||||||
|  | 
 | ||||||
|  | every_month_rotation_is_detected(_) -> | ||||||
|  |     ?assertNot( | ||||||
|  |       rabbit_logger_std_h:is_date_based_rotation_needed( | ||||||
|  |         #{every => month, day_of_month => 15, hour => 12}, | ||||||
|  |         {{2021, 01, 15}, {10, 00, 00}}, | ||||||
|  |         {{2021, 01, 15}, {11, 00, 00}})), | ||||||
|  |     ?assert( | ||||||
|  |       rabbit_logger_std_h:is_date_based_rotation_needed( | ||||||
|  |         #{every => month, day_of_month => 15, hour => 12}, | ||||||
|  |         {{2021, 01, 15}, {10, 00, 00}}, | ||||||
|  |         {{2021, 01, 15}, {12, 00, 00}})), | ||||||
|  |     ?assertNot( | ||||||
|  |       rabbit_logger_std_h:is_date_based_rotation_needed( | ||||||
|  |         #{every => month, day_of_month => 15, hour => 12}, | ||||||
|  |         {{2021, 01, 13}, {12, 00, 00}}, | ||||||
|  |         {{2021, 01, 14}, {12, 00, 00}})), | ||||||
|  |     ?assert( | ||||||
|  |       rabbit_logger_std_h:is_date_based_rotation_needed( | ||||||
|  |         #{every => month, day_of_month => 15, hour => 12}, | ||||||
|  |         {{2021, 01, 14}, {12, 00, 00}}, | ||||||
|  |         {{2021, 01, 15}, {12, 00, 00}})), | ||||||
|  |     ?assertNot( | ||||||
|  |       rabbit_logger_std_h:is_date_based_rotation_needed( | ||||||
|  |         #{every => month, day_of_month => 15, hour => 12}, | ||||||
|  |         {{2021, 01, 15}, {12, 00, 00}}, | ||||||
|  |         {{2021, 01, 16}, {12, 00, 00}})), | ||||||
|  |     ?assert( | ||||||
|  |       rabbit_logger_std_h:is_date_based_rotation_needed( | ||||||
|  |         #{every => month, day_of_month => 15, hour => 12}, | ||||||
|  |         {{2021, 01, 14}, {12, 00, 00}}, | ||||||
|  |         {{2021, 02, 14}, {12, 00, 00}})), | ||||||
|  |     ?assert( | ||||||
|  |       rabbit_logger_std_h:is_date_based_rotation_needed( | ||||||
|  |         #{every => month, day_of_month => 15, hour => 12}, | ||||||
|  |         {{2021, 01, 16}, {12, 00, 00}}, | ||||||
|  |         {{2021, 02, 16}, {12, 00, 00}})), | ||||||
|  | 
 | ||||||
|  |     ?assert( | ||||||
|  |       rabbit_logger_std_h:is_date_based_rotation_needed( | ||||||
|  |         #{every => month, day_of_month => 30, hour => 12}, | ||||||
|  |         {{2021, 01, 29}, {12, 00, 00}}, | ||||||
|  |         {{2021, 01, 30}, {12, 00, 00}})), | ||||||
|  |     ?assertNot( | ||||||
|  |       rabbit_logger_std_h:is_date_based_rotation_needed( | ||||||
|  |         #{every => month, day_of_month => 30, hour => 12}, | ||||||
|  |         {{2021, 01, 30}, {12, 00, 00}}, | ||||||
|  |         {{2021, 01, 31}, {12, 00, 00}})), | ||||||
|  |     ?assert( | ||||||
|  |       rabbit_logger_std_h:is_date_based_rotation_needed( | ||||||
|  |         #{every => month, day_of_month => 30, hour => 12}, | ||||||
|  |         {{2021, 02, 27}, {12, 00, 00}}, | ||||||
|  |         {{2021, 02, 28}, {12, 00, 00}})), | ||||||
|  | 
 | ||||||
|  |     ?assertNot( | ||||||
|  |       rabbit_logger_std_h:is_date_based_rotation_needed( | ||||||
|  |         #{every => month, day_of_month => last, hour => 12}, | ||||||
|  |         {{2021, 01, 29}, {12, 00, 00}}, | ||||||
|  |         {{2021, 01, 30}, {12, 00, 00}})), | ||||||
|  |     ?assert( | ||||||
|  |       rabbit_logger_std_h:is_date_based_rotation_needed( | ||||||
|  |         #{every => month, day_of_month => last, hour => 12}, | ||||||
|  |         {{2021, 01, 30}, {12, 00, 00}}, | ||||||
|  |         {{2021, 01, 31}, {12, 00, 00}})), | ||||||
|  |     ?assert( | ||||||
|  |       rabbit_logger_std_h:is_date_based_rotation_needed( | ||||||
|  |         #{every => month, day_of_month => last, hour => 12}, | ||||||
|  |         {{2021, 01, 30}, {12, 00, 00}}, | ||||||
|  |         {{2021, 02, 01}, {12, 00, 00}})). | ||||||
|  | @ -883,7 +883,7 @@ | ||||||
| 
 | 
 | ||||||
| ## Logging settings. | ## Logging settings. | ||||||
| ## | ## | ||||||
| ## See https://rabbitmq.com/logging.html and https://github.com/erlang-lager/lager for details. | ## See https://rabbitmq.com/logging.html for details. | ||||||
| ## | ## | ||||||
| 
 | 
 | ||||||
| ## Log directory, taken from the RABBITMQ_LOG_BASE env variable by default. | ## Log directory, taken from the RABBITMQ_LOG_BASE env variable by default. | ||||||
|  |  | ||||||
|  | @ -146,25 +146,6 @@ For example, to reset the RabbitMQ node: | ||||||
| .sp | .sp | ||||||
| .Dl rabbitmqctl reset | .Dl rabbitmqctl reset | ||||||
| .\" ------------------------------------------------------------------ | .\" ------------------------------------------------------------------ | ||||||
| .It Cm rotate_logs |  | ||||||
| .Pp |  | ||||||
| Instructs the RabbitMQ node to perform internal log rotation. |  | ||||||
| .Pp |  | ||||||
| Log rotation is performed according to lager settings specified in |  | ||||||
| configuration file. |  | ||||||
| .Pp |  | ||||||
| Note that there is no need to call this command in case of external log |  | ||||||
| rotation (e.g. from logrotate(8)), because lager detects renames and |  | ||||||
| automatically reopens log files. |  | ||||||
| .Pp |  | ||||||
| For example, this command starts internal log rotation |  | ||||||
| process: |  | ||||||
| .sp |  | ||||||
| .Dl rabbitmqctl rotate_logs |  | ||||||
| .Pp |  | ||||||
| Rotation is performed asynchronously, so there is no guarantee that it |  | ||||||
| will be completed when this command returns. |  | ||||||
| .\" ------------------------------------------------------------------ |  | ||||||
| .It Cm shutdown | .It Cm shutdown | ||||||
| .Pp | .Pp | ||||||
| Shuts down the node, both RabbitMQ and its runtime. | Shuts down the node, both RabbitMQ and its runtime. | ||||||
|  |  | ||||||
|  | @ -1179,10 +1179,10 @@ end}. | ||||||
| ]}. | ]}. | ||||||
| 
 | 
 | ||||||
| % ========================== | % ========================== | ||||||
| % Lager section | % Logging section | ||||||
| % ========================== | % ========================== | ||||||
| 
 | 
 | ||||||
| {mapping, "log.dir", "lager.log_root", [ | {mapping, "log.dir", "rabbit.log_root", [ | ||||||
|     {datatype, string}, |     {datatype, string}, | ||||||
|     {validators, ["dir_writable"]}]}. |     {validators, ["dir_writable"]}]}. | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -117,7 +117,6 @@ dep_accept = hex 0.3.5 | ||||||
| dep_cowboy = hex 2.8.0 | dep_cowboy = hex 2.8.0 | ||||||
| dep_cowlib = hex 2.9.1 | dep_cowlib = hex 2.9.1 | ||||||
| dep_jsx = hex 2.11.0 | dep_jsx = hex 2.11.0 | ||||||
| dep_lager = hex 3.9.1 |  | ||||||
| dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master | dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master | ||||||
| dep_ra = git https://github.com/rabbitmq/ra.git master | dep_ra = git https://github.com/rabbitmq/ra.git master | ||||||
| dep_ranch = hex 2.0.0 | dep_ranch = hex 2.0.0 | ||||||
|  |  | ||||||
|  | @ -79,8 +79,8 @@ start_rabbitmq_server() { | ||||||
|         ${RABBITMQ_SERVER_ERL_ARGS} \ |         ${RABBITMQ_SERVER_ERL_ARGS} \ | ||||||
|         ${RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS} \ |         ${RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS} \ | ||||||
|         ${RABBITMQ_SERVER_START_ARGS} \ |         ${RABBITMQ_SERVER_START_ARGS} \ | ||||||
|         -lager crash_log false \ |         -syslog logger '[]' \ | ||||||
|         -lager handlers '[]' \ |         -syslog syslog_error_logger false \ | ||||||
|         "$@" |         "$@" | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -68,8 +68,8 @@ if "!RABBITMQ_ALLOW_INPUT!"=="" ( | ||||||
| !RABBITMQ_SERVER_ERL_ARGS! ^ | !RABBITMQ_SERVER_ERL_ARGS! ^ | ||||||
| !RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS! ^ | !RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS! ^ | ||||||
| !RABBITMQ_SERVER_START_ARGS! ^ | !RABBITMQ_SERVER_START_ARGS! ^ | ||||||
| -lager crash_log false ^ | -syslog logger [] ^ | ||||||
| -lager handlers "[]" ^ | -syslog syslog_error_logger false ^ | ||||||
| !STAR! | !STAR! | ||||||
| 
 | 
 | ||||||
| if ERRORLEVEL 1 ( | if ERRORLEVEL 1 ( | ||||||
|  |  | ||||||
|  | @ -198,8 +198,8 @@ set ERLANG_SERVICE_ARGUMENTS= ^ | ||||||
| !RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS! ^ | !RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS! ^ | ||||||
| !RABBITMQ_SERVER_START_ARGS! ^ | !RABBITMQ_SERVER_START_ARGS! ^ | ||||||
| !RABBITMQ_DIST_ARG! ^ | !RABBITMQ_DIST_ARG! ^ | ||||||
| -lager crash_log false ^ | -syslog logger [] ^ | ||||||
| -lager handlers "[]" ^ | -syslog syslog_error_logger false ^ | ||||||
| !STARVAR! | !STARVAR! | ||||||
| 
 | 
 | ||||||
| set ERLANG_SERVICE_ARGUMENTS=!ERLANG_SERVICE_ARGUMENTS:\=\\! | set ERLANG_SERVICE_ARGUMENTS=!ERLANG_SERVICE_ARGUMENTS:\=\\! | ||||||
|  |  | ||||||
|  | @ -70,7 +70,7 @@ handle_maybe_call_mfa(true, {Module, Function, Args, Default}, State) -> | ||||||
|         error:undef -> |         error:undef -> | ||||||
|             handle_maybe_call_mfa_error(Module, Default, State); |             handle_maybe_call_mfa_error(Module, Default, State); | ||||||
|         Err:Reason -> |         Err:Reason -> | ||||||
|             rabbit_log:error("Calling ~p:~p failed: ~p:~p~n", |             rabbit_log:error("Calling ~p:~p failed: ~p:~p", | ||||||
|                              [Module, Function, Err, Reason]), |                              [Module, Function, Err, Reason]), | ||||||
|             handle_maybe_call_mfa_error(Module, Default, State) |             handle_maybe_call_mfa_error(Module, Default, State) | ||||||
|     end. |     end. | ||||||
|  |  | ||||||
|  | @ -1,233 +0,0 @@ | ||||||
| %% This Source Code Form is subject to the terms of the Mozilla Public |  | ||||||
| %% License, v. 2.0. If a copy of the MPL was not distributed with this |  | ||||||
| %% file, You can obtain one at https://mozilla.org/MPL/2.0/. |  | ||||||
| %% |  | ||||||
| %% Copyright (c) 2007-2021 VMware, Inc. or its affiliates.  All rights reserved. |  | ||||||
| %% |  | ||||||
| 
 |  | ||||||
| %% @doc RabbitMQ backend for lager. |  | ||||||
| %% Configuration is a proplist with the following keys: |  | ||||||
| %% <ul> |  | ||||||
| %%    <li>`level' - log level to use</li> |  | ||||||
| %%    <li>`formatter' - the module to use when formatting log messages. Defaults to |  | ||||||
| %%                      `lager_default_formatter'</li> |  | ||||||
| %%    <li>`formatter_config' - the format configuration string. Defaults to |  | ||||||
| %%                             `time [ severity ] message'</li> |  | ||||||
| %% </ul> |  | ||||||
| 
 |  | ||||||
| -module(lager_exchange_backend). |  | ||||||
| 
 |  | ||||||
| -behaviour(gen_event). |  | ||||||
| 
 |  | ||||||
| -export([init/1, terminate/2, code_change/3, |  | ||||||
|          handle_call/2, handle_event/2, handle_info/2]). |  | ||||||
| 
 |  | ||||||
| -export([maybe_init_exchange/0]). |  | ||||||
| 
 |  | ||||||
| -include("rabbit.hrl"). |  | ||||||
| -include("rabbit_framing.hrl"). |  | ||||||
| 
 |  | ||||||
| -include_lib("lager/include/lager.hrl"). |  | ||||||
| 
 |  | ||||||
| -record(state, {level :: {'mask', integer()}, |  | ||||||
|                 formatter :: atom(), |  | ||||||
|                 format_config :: any(), |  | ||||||
|                 init_exchange_ts = undefined :: integer() | undefined, |  | ||||||
|                 exchange = undefined :: #resource{} | undefined}). |  | ||||||
| 
 |  | ||||||
| -ifdef(TEST). |  | ||||||
| -include_lib("eunit/include/eunit.hrl"). |  | ||||||
| -compile([{parse_transform, lager_transform}]). |  | ||||||
| -endif. |  | ||||||
| 
 |  | ||||||
| -define(INIT_EXCHANGE_INTERVAL_SECS, 5). |  | ||||||
| -define(TERSE_FORMAT, [time, " [", severity, "] ", message]). |  | ||||||
| -define(DEFAULT_FORMAT_CONFIG, ?TERSE_FORMAT). |  | ||||||
| -define(FORMAT_CONFIG_OFF, []). |  | ||||||
| 
 |  | ||||||
| -ifdef(TEST). |  | ||||||
| -define(DEPRECATED(_Msg), ok). |  | ||||||
| -else. |  | ||||||
| -define(DEPRECATED(Msg), |  | ||||||
|         io:format(user, "WARNING: This is a deprecated lager_exchange_backend configuration. Please use \"~w\" instead.~n", [Msg])). |  | ||||||
| -endif. |  | ||||||
| 
 |  | ||||||
| -define(LOG_EXCH_NAME, <<"amq.rabbitmq.log">>). |  | ||||||
| 
 |  | ||||||
| init([Level]) when is_atom(Level) -> |  | ||||||
|     ?DEPRECATED([{level, Level}]), |  | ||||||
|     init([{level, Level}]); |  | ||||||
| init([Level, true]) when is_atom(Level) -> % for backwards compatibility |  | ||||||
|     ?DEPRECATED([{level, Level}, {formatter_config, [{eol, "\\r\\n\\"}]}]), |  | ||||||
|     init([{level, Level}, {formatter_config, ?FORMAT_CONFIG_OFF}]); |  | ||||||
| init([Level, false]) when is_atom(Level) -> % for backwards compatibility |  | ||||||
|     ?DEPRECATED([{level, Level}]), |  | ||||||
|     init([{level, Level}]); |  | ||||||
| 
 |  | ||||||
| init(Options) when is_list(Options) -> |  | ||||||
|     true = validate_options(Options), |  | ||||||
|     Level = get_option(level, Options, undefined), |  | ||||||
|     try lager_util:config_to_mask(Level) of |  | ||||||
|         L -> |  | ||||||
|             DefaultOptions = [{formatter, lager_default_formatter}, |  | ||||||
|                               {formatter_config, ?DEFAULT_FORMAT_CONFIG}], |  | ||||||
|             [Formatter, Config] = [get_option(K, Options, Default) || {K, Default} <- DefaultOptions], |  | ||||||
|             State0 = #state{level=L, |  | ||||||
|                             formatter=Formatter, |  | ||||||
|                             format_config=Config}, |  | ||||||
|             % NB: this will probably always fail since the / vhost isn't available |  | ||||||
|             State1 = maybe_init_exchange(State0), |  | ||||||
|             {ok, State1} |  | ||||||
|     catch |  | ||||||
|         _:_ -> |  | ||||||
|             {error, {fatal, bad_log_level}} |  | ||||||
|     end; |  | ||||||
| init(Level) when is_atom(Level) -> |  | ||||||
|     ?DEPRECATED([{level, Level}]), |  | ||||||
|     init([{level, Level}]); |  | ||||||
| init(Other) -> |  | ||||||
|     {error, {fatal, {bad_lager_exchange_backend_config, Other}}}. |  | ||||||
| 
 |  | ||||||
| % rabbitmq/rabbitmq-server#1973 |  | ||||||
| % This is called immediatly after the / vhost is created |  | ||||||
| % or recovered |  | ||||||
| maybe_init_exchange() -> |  | ||||||
|     case lists:member(?MODULE, gen_event:which_handlers(lager_event)) of |  | ||||||
|         true -> |  | ||||||
|             _ = init_exchange(true), |  | ||||||
|             ok; |  | ||||||
|         _ -> |  | ||||||
|             ok |  | ||||||
|     end. |  | ||||||
| 
 |  | ||||||
| validate_options([]) -> true; |  | ||||||
| validate_options([{level, L}|T]) when is_atom(L) -> |  | ||||||
|     case lists:member(L, ?LEVELS) of |  | ||||||
|         false -> |  | ||||||
|             throw({error, {fatal, {bad_level, L}}}); |  | ||||||
|         true -> |  | ||||||
|             validate_options(T) |  | ||||||
|     end; |  | ||||||
| validate_options([{formatter, M}|T]) when is_atom(M) -> |  | ||||||
|     validate_options(T); |  | ||||||
| validate_options([{formatter_config, C}|T]) when is_list(C) -> |  | ||||||
|     validate_options(T); |  | ||||||
| validate_options([H|_]) -> |  | ||||||
|     throw({error, {fatal, {bad_lager_exchange_backend_config, H}}}). |  | ||||||
| 
 |  | ||||||
| get_option(K, Options, Default) -> |  | ||||||
|    case lists:keyfind(K, 1, Options) of |  | ||||||
|        {K, V} -> V; |  | ||||||
|        false -> Default |  | ||||||
|    end. |  | ||||||
| 
 |  | ||||||
| handle_call(get_loglevel, #state{level=Level} = State) -> |  | ||||||
|     {ok, Level, State}; |  | ||||||
| handle_call({set_loglevel, Level}, State) -> |  | ||||||
|     try lager_util:config_to_mask(Level) of |  | ||||||
|         Levels -> |  | ||||||
|             {ok, ok, State#state{level=Levels}} |  | ||||||
|     catch |  | ||||||
|         _:_ -> |  | ||||||
|             {ok, {error, bad_log_level}, State} |  | ||||||
|     end; |  | ||||||
| handle_call(_Request, State) -> |  | ||||||
|     {ok, ok, State}. |  | ||||||
| 
 |  | ||||||
| handle_event({log, _Message} = Event, State0) -> |  | ||||||
|     State1 = maybe_init_exchange(State0), |  | ||||||
|     handle_log_event(Event, State1); |  | ||||||
| handle_event(_Event, State) -> |  | ||||||
|     {ok, State}. |  | ||||||
| 
 |  | ||||||
| handle_info(_Info, State) -> |  | ||||||
|     {ok, State}. |  | ||||||
| 
 |  | ||||||
| terminate(_Reason, _State) -> |  | ||||||
|     ok. |  | ||||||
| 
 |  | ||||||
| code_change(_OldVsn, State, _Extra) -> |  | ||||||
|     {ok, State}. |  | ||||||
| 
 |  | ||||||
| %% @private |  | ||||||
| handle_log_event({log, _Message}, #state{exchange=undefined} = State) -> |  | ||||||
|     % NB: tried to define the exchange but still undefined, |  | ||||||
|     % so not logging this message. Note: we can't log this dropped |  | ||||||
|     % message because it will start an infinite loop |  | ||||||
|     {ok, State}; |  | ||||||
| handle_log_event({log, Message}, |  | ||||||
|     #state{level=L, exchange=LogExch, |  | ||||||
|            formatter=Formatter, format_config=FormatConfig} = State) -> |  | ||||||
|     case lager_util:is_loggable(Message, L, ?MODULE) of |  | ||||||
|         true -> |  | ||||||
|             %% 0-9-1 says the timestamp is a "64 bit POSIX timestamp". That's |  | ||||||
|             %% second resolution, not millisecond. |  | ||||||
|             RoutingKey = rabbit_data_coercion:to_binary(lager_msg:severity(Message)), |  | ||||||
|             Timestamp = os:system_time(seconds), |  | ||||||
|             Node = rabbit_data_coercion:to_binary(node()), |  | ||||||
|             Headers = [{<<"node">>, longstr, Node}], |  | ||||||
|             AmqpMsg = #'P_basic'{content_type = <<"text/plain">>, |  | ||||||
|                                  timestamp    = Timestamp, |  | ||||||
|                                  headers      = Headers}, |  | ||||||
|             Body = rabbit_data_coercion:to_binary(Formatter:format(Message, FormatConfig)), |  | ||||||
|             case rabbit_basic:publish(LogExch, RoutingKey, AmqpMsg, Body) of |  | ||||||
|                 ok                 -> ok; |  | ||||||
|                 {error, not_found} -> ok |  | ||||||
|             end, |  | ||||||
|             {ok, State}; |  | ||||||
|         false -> |  | ||||||
|             {ok, State} |  | ||||||
|     end. |  | ||||||
| 
 |  | ||||||
| %% @private |  | ||||||
| maybe_init_exchange(#state{exchange=undefined, init_exchange_ts=undefined} = State) -> |  | ||||||
|     Now = erlang:monotonic_time(second), |  | ||||||
|     handle_init_exchange(init_exchange(true), Now, State); |  | ||||||
| maybe_init_exchange(#state{exchange=undefined, init_exchange_ts=Timestamp} = State) -> |  | ||||||
|     Now = erlang:monotonic_time(second), |  | ||||||
|     % NB: since we may try to declare the exchange on every log message, this ensures |  | ||||||
|     % that we only try once every 5 seconds |  | ||||||
|     HasEnoughTimeElapsed = Now - Timestamp > ?INIT_EXCHANGE_INTERVAL_SECS, |  | ||||||
|     Result = init_exchange(HasEnoughTimeElapsed), |  | ||||||
|     handle_init_exchange(Result, Now, State); |  | ||||||
| maybe_init_exchange(State) -> |  | ||||||
|     State. |  | ||||||
| 
 |  | ||||||
| %% @private |  | ||||||
| init_exchange(true) -> |  | ||||||
|     {ok, DefaultVHost} = application:get_env(rabbit, default_vhost), |  | ||||||
|     Exchange = rabbit_misc:r(DefaultVHost, exchange, ?LOG_EXCH_NAME), |  | ||||||
|     try |  | ||||||
|         %% durable |  | ||||||
|         #exchange{} = rabbit_exchange:declare(Exchange, topic, true, false, true, [], ?INTERNAL_USER), |  | ||||||
|         rabbit_log:info("Declared exchange '~s' in vhost '~s'", [?LOG_EXCH_NAME, DefaultVHost]), |  | ||||||
|         {ok, Exchange} |  | ||||||
|     catch |  | ||||||
|         ErrType:Err -> |  | ||||||
|             rabbit_log:error("Could not declare exchange '~s' in vhost '~s', reason: ~p:~p", |  | ||||||
|                              [?LOG_EXCH_NAME, DefaultVHost, ErrType, Err]), |  | ||||||
|             {ok, undefined} |  | ||||||
|     end; |  | ||||||
| init_exchange(_) -> |  | ||||||
|     {ok, undefined}. |  | ||||||
| 
 |  | ||||||
| %% @private |  | ||||||
| handle_init_exchange({ok, undefined}, Now, State) -> |  | ||||||
|     State#state{init_exchange_ts=Now}; |  | ||||||
| handle_init_exchange({ok, Exchange}, Now, State) -> |  | ||||||
|     State#state{exchange=Exchange, init_exchange_ts=Now}. |  | ||||||
| 
 |  | ||||||
| -ifdef(TEST). |  | ||||||
| console_config_validation_test_() -> |  | ||||||
|     Good = [{level, info}], |  | ||||||
|     Bad1 = [{level, foo}], |  | ||||||
|     Bad2 = [{larval, info}], |  | ||||||
|     AllGood = [{level, info}, {formatter, my_formatter}, |  | ||||||
|                {formatter_config, ["blort", "garbage"]}], |  | ||||||
|     [ |  | ||||||
|      ?_assertEqual(true, validate_options(Good)), |  | ||||||
|      ?_assertThrow({error, {fatal, {bad_level, foo}}}, validate_options(Bad1)), |  | ||||||
|      ?_assertThrow({error, {fatal, {bad_lager_exchange_backend_config, {larval, info}}}}, validate_options(Bad2)), |  | ||||||
|      ?_assertEqual(true, validate_options(AllGood)) |  | ||||||
|     ]. |  | ||||||
| -endif. |  | ||||||
|  | @ -7,6 +7,9 @@ | ||||||
| 
 | 
 | ||||||
| -module(rabbit). | -module(rabbit). | ||||||
| 
 | 
 | ||||||
|  | -include_lib("kernel/include/logger.hrl"). | ||||||
|  | -include_lib("rabbit_common/include/logging.hrl"). | ||||||
|  | 
 | ||||||
| %% Transitional step until we can require Erlang/OTP 21 and | %% Transitional step until we can require Erlang/OTP 21 and | ||||||
| %% use the now recommended try/catch syntax for obtaining the stack trace. | %% use the now recommended try/catch syntax for obtaining the stack trace. | ||||||
| -compile(nowarn_deprecated_function). | -compile(nowarn_deprecated_function). | ||||||
|  | @ -28,7 +31,8 @@ | ||||||
|          base_product_version/0, |          base_product_version/0, | ||||||
|          motd_file/0, |          motd_file/0, | ||||||
|          motd/0]). |          motd/0]). | ||||||
| -export([log_locations/0, config_files/0]). %% for testing and mgmt-agent | %% For CLI, testing and mgmt-agent. | ||||||
|  | -export([set_log_level/1, log_locations/0, config_files/0]). | ||||||
| -export([is_booted/1, is_booted/0, is_booting/1, is_booting/0]). | -export([is_booted/1, is_booted/0, is_booting/1, is_booting/0]). | ||||||
| 
 | 
 | ||||||
| %%--------------------------------------------------------------------------- | %%--------------------------------------------------------------------------- | ||||||
|  | @ -261,7 +265,7 @@ | ||||||
| 
 | 
 | ||||||
| -rabbit_boot_step({networking, | -rabbit_boot_step({networking, | ||||||
|                    [{description, "TCP and TLS listeners (backwards compatibility)"}, |                    [{description, "TCP and TLS listeners (backwards compatibility)"}, | ||||||
|                     {mfa,         {rabbit_log, debug, ["'networking' boot step skipped and moved to end of startup", []]}}, |                     {mfa,         {logger, debug, ["'networking' boot step skipped and moved to end of startup", [], #{domain => ?RMQLOG_DOMAIN_GLOBAL}]}}, | ||||||
|                     {requires,    notify_cluster}]}). |                     {requires,    notify_cluster}]}). | ||||||
| 
 | 
 | ||||||
| %%--------------------------------------------------------------------------- | %%--------------------------------------------------------------------------- | ||||||
|  | @ -335,12 +339,12 @@ run_prelaunch_second_phase() -> | ||||||
| 
 | 
 | ||||||
|     case IsInitialPass of |     case IsInitialPass of | ||||||
|         true -> |         true -> | ||||||
|             rabbit_log_prelaunch:debug(""), |             ?LOG_DEBUG(""), | ||||||
|             rabbit_log_prelaunch:debug( |             ?LOG_DEBUG( | ||||||
|               "== Prelaunch phase [2/2] (initial pass) =="); |               "== Prelaunch phase [2/2] (initial pass) =="); | ||||||
|         false -> |         false -> | ||||||
|             rabbit_log_prelaunch:debug(""), |             ?LOG_DEBUG(""), | ||||||
|             rabbit_log_prelaunch:debug("== Prelaunch phase [2/2] =="), |             ?LOG_DEBUG("== Prelaunch phase [2/2] =="), | ||||||
|             ok |             ok | ||||||
|     end, |     end, | ||||||
| 
 | 
 | ||||||
|  | @ -357,11 +361,11 @@ run_prelaunch_second_phase() -> | ||||||
|     ok = rabbit_prelaunch_cluster:setup(Context), |     ok = rabbit_prelaunch_cluster:setup(Context), | ||||||
| 
 | 
 | ||||||
|     %% Start Mnesia now that everything is ready. |     %% Start Mnesia now that everything is ready. | ||||||
|     rabbit_log_prelaunch:debug("Starting Mnesia"), |     ?LOG_DEBUG("Starting Mnesia"), | ||||||
|     ok = mnesia:start(), |     ok = mnesia:start(), | ||||||
| 
 | 
 | ||||||
|     rabbit_log_prelaunch:debug(""), |     ?LOG_DEBUG(""), | ||||||
|     rabbit_log_prelaunch:debug("== Prelaunch DONE =="), |     ?LOG_DEBUG("== Prelaunch DONE =="), | ||||||
| 
 | 
 | ||||||
|     case IsInitialPass of |     case IsInitialPass of | ||||||
|         true  -> rabbit_prelaunch:initial_pass_finished(); |         true  -> rabbit_prelaunch:initial_pass_finished(); | ||||||
|  | @ -373,7 +377,8 @@ start_it(StartType) -> | ||||||
|     case spawn_boot_marker() of |     case spawn_boot_marker() of | ||||||
|         {ok, Marker} -> |         {ok, Marker} -> | ||||||
|             T0 = erlang:timestamp(), |             T0 = erlang:timestamp(), | ||||||
|             rabbit_log:info("RabbitMQ is asked to start...", []), |             ?LOG_INFO("RabbitMQ is asked to start...", [], | ||||||
|  |                       #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|             try |             try | ||||||
|                 {ok, _} = application:ensure_all_started(rabbitmq_prelaunch, |                 {ok, _} = application:ensure_all_started(rabbitmq_prelaunch, | ||||||
|                                                          StartType), |                                                          StartType), | ||||||
|  | @ -382,7 +387,7 @@ start_it(StartType) -> | ||||||
|                 ok = wait_for_ready_or_stopped(), |                 ok = wait_for_ready_or_stopped(), | ||||||
| 
 | 
 | ||||||
|                 T1 = erlang:timestamp(), |                 T1 = erlang:timestamp(), | ||||||
|                 rabbit_log_prelaunch:debug( |                 ?LOG_DEBUG( | ||||||
|                   "Time to start RabbitMQ: ~p µs", |                   "Time to start RabbitMQ: ~p µs", | ||||||
|                   [timer:now_diff(T1, T0)]), |                   [timer:now_diff(T1, T0)]), | ||||||
|                 stop_boot_marker(Marker), |                 stop_boot_marker(Marker), | ||||||
|  | @ -433,11 +438,13 @@ stop() -> | ||||||
|             case rabbit_boot_state:get() of |             case rabbit_boot_state:get() of | ||||||
|                 ready -> |                 ready -> | ||||||
|                     Product = product_name(), |                     Product = product_name(), | ||||||
|                     rabbit_log:info("~s is asked to stop...", [Product]), |                     ?LOG_INFO("~s is asked to stop...", [Product], | ||||||
|  |                               #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|                     do_stop(), |                     do_stop(), | ||||||
|                     rabbit_log:info( |                     ?LOG_INFO( | ||||||
|                       "Successfully stopped ~s and its dependencies", |                       "Successfully stopped ~s and its dependencies", | ||||||
|                       [Product]), |                       [Product], | ||||||
|  |                       #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|                     ok; |                     ok; | ||||||
|                 stopped -> |                 stopped -> | ||||||
|                     ok |                     ok | ||||||
|  | @ -461,19 +468,22 @@ stop_and_halt() -> | ||||||
|     try |     try | ||||||
|         stop() |         stop() | ||||||
|     catch Type:Reason -> |     catch Type:Reason -> | ||||||
|         rabbit_log:error( |         ?LOG_ERROR( | ||||||
|           "Error trying to stop ~s: ~p:~p", |           "Error trying to stop ~s: ~p:~p", | ||||||
|           [product_name(), Type, Reason]), |           [product_name(), Type, Reason], | ||||||
|  |           #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|         error({Type, Reason}) |         error({Type, Reason}) | ||||||
|     after |     after | ||||||
|         %% Enclose all the logging in the try block. |         %% Enclose all the logging in the try block. | ||||||
|         %% init:stop() will be called regardless of any errors. |         %% init:stop() will be called regardless of any errors. | ||||||
|         try |         try | ||||||
|             AppsLeft = [ A || {A, _, _} <- application:which_applications() ], |             AppsLeft = [ A || {A, _, _} <- application:which_applications() ], | ||||||
|             rabbit_log:info( |             ?LOG_ERROR( | ||||||
|                 lists:flatten(["Halting Erlang VM with the following applications:~n", |                 lists:flatten( | ||||||
|                                ["    ~p~n" || _ <- AppsLeft]]), |                   ["Halting Erlang VM with the following applications:~n", | ||||||
|                 AppsLeft), |                    ["    ~p~n" || _ <- AppsLeft]]), | ||||||
|  |                 AppsLeft, | ||||||
|  |                 #{domain => ?RMQLOG_DOMAIN_GLOBAL}), | ||||||
|             %% Also duplicate this information to stderr, so console where |             %% Also duplicate this information to stderr, so console where | ||||||
|             %% foreground broker was running (or systemd journal) will |             %% foreground broker was running (or systemd journal) will | ||||||
|             %% contain information about graceful termination. |             %% contain information about graceful termination. | ||||||
|  | @ -518,10 +528,12 @@ start_apps(Apps, RestartTypes) -> | ||||||
| stop_apps([]) -> | stop_apps([]) -> | ||||||
|     ok; |     ok; | ||||||
| stop_apps(Apps) -> | stop_apps(Apps) -> | ||||||
|     rabbit_log:info( |     ?LOG_INFO( | ||||||
|         lists:flatten(["Stopping ~s applications and their dependencies in the following order:~n", |         lists:flatten( | ||||||
|                        ["    ~p~n" || _ <- Apps]]), |           ["Stopping ~s applications and their dependencies in the following order:~n", | ||||||
|         [product_name() | lists:reverse(Apps)]), |            ["    ~p~n" || _ <- Apps]]), | ||||||
|  |         [product_name() | lists:reverse(Apps)], | ||||||
|  |         #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|     ok = app_utils:stop_applications( |     ok = app_utils:stop_applications( | ||||||
|            Apps, handle_app_error(error_during_shutdown)), |            Apps, handle_app_error(error_during_shutdown)), | ||||||
|     case lists:member(rabbit, Apps) of |     case lists:member(rabbit, Apps) of | ||||||
|  | @ -785,28 +797,10 @@ environment(App) -> | ||||||
| -spec rotate_logs() -> rabbit_types:ok_or_error(any()). | -spec rotate_logs() -> rabbit_types:ok_or_error(any()). | ||||||
| 
 | 
 | ||||||
| rotate_logs() -> | rotate_logs() -> | ||||||
|     rabbit_lager:fold_sinks( |     ?LOG_ERROR( | ||||||
|       fun |        "Forcing log rotation is currently unsupported", | ||||||
|           (_, [], Acc) -> |        #{domain => ?RMQLOG_DOMAIN_GLOBAL}), | ||||||
|               Acc; |     {error, unsupported}. | ||||||
|           (SinkName, FileNames, Acc) -> |  | ||||||
|               lager:log(SinkName, info, self(), |  | ||||||
|                         "Log file rotation forced", []), |  | ||||||
|               %% FIXME: We use an internal message, understood by |  | ||||||
|               %% lager_file_backend. We should use a proper API, when |  | ||||||
|               %% it's added to Lager. |  | ||||||
|               %% |  | ||||||
|               %% FIXME: This call is effectively asynchronous: at the |  | ||||||
|               %% end of this function, we can't guaranty the rotation |  | ||||||
|               %% is completed. |  | ||||||
|               [ok = gen_event:call(SinkName, |  | ||||||
|                                    {lager_file_backend, FileName}, |  | ||||||
|                                    rotate, |  | ||||||
|                                    infinity) || FileName <- FileNames], |  | ||||||
|               lager:log(SinkName, info, self(), |  | ||||||
|                         "Log file re-opened after forced rotation", []), |  | ||||||
|               Acc |  | ||||||
|       end, ok). |  | ||||||
| 
 | 
 | ||||||
| %%-------------------------------------------------------------------- | %%-------------------------------------------------------------------- | ||||||
| 
 | 
 | ||||||
|  | @ -835,14 +829,18 @@ start(normal, []) -> | ||||||
|             #{product_overridden := true, |             #{product_overridden := true, | ||||||
|               product_base_name := BaseName, |               product_base_name := BaseName, | ||||||
|               product_base_version := BaseVersion} -> |               product_base_version := BaseVersion} -> | ||||||
|                 rabbit_log:info("~n Starting ~s ~s on Erlang ~s~n Based on ~s ~s~n ~s~n ~s~n", |                 ?LOG_INFO( | ||||||
|                                 [product_name(), product_version(), rabbit_misc:otp_release(), |                    "~n Starting ~s ~s on Erlang ~s~n Based on ~s ~s~n ~s~n ~s", | ||||||
|                                  BaseName, BaseVersion, |                    [product_name(), product_version(), rabbit_misc:otp_release(), | ||||||
|                                  ?COPYRIGHT_MESSAGE, ?INFORMATION_MESSAGE]); |                     BaseName, BaseVersion, | ||||||
|  |                     ?COPYRIGHT_MESSAGE, ?INFORMATION_MESSAGE], | ||||||
|  |                    #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}); | ||||||
|             _ -> |             _ -> | ||||||
|                 rabbit_log:info("~n Starting ~s ~s on Erlang ~s~n ~s~n ~s~n", |                 ?LOG_INFO( | ||||||
|                                 [product_name(), product_version(), rabbit_misc:otp_release(), |                    "~n Starting ~s ~s on Erlang ~s~n ~s~n ~s", | ||||||
|                                  ?COPYRIGHT_MESSAGE, ?INFORMATION_MESSAGE]) |                    [product_name(), product_version(), rabbit_misc:otp_release(), | ||||||
|  |                     ?COPYRIGHT_MESSAGE, ?INFORMATION_MESSAGE], | ||||||
|  |                    #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}) | ||||||
|         end, |         end, | ||||||
|         log_motd(), |         log_motd(), | ||||||
|         {ok, SupPid} = rabbit_sup:start_link(), |         {ok, SupPid} = rabbit_sup:start_link(), | ||||||
|  | @ -860,7 +858,7 @@ start(normal, []) -> | ||||||
|         %% |         %% | ||||||
|         %% Note that plugins were not taken care of at this point |         %% Note that plugins were not taken care of at this point | ||||||
|         %% either. |         %% either. | ||||||
|         rabbit_log_prelaunch:debug( |         ?LOG_DEBUG( | ||||||
|           "Register `rabbit` process (~p) for rabbit_node_monitor", |           "Register `rabbit` process (~p) for rabbit_node_monitor", | ||||||
|           [self()]), |           [self()]), | ||||||
|         true = register(rabbit, self()), |         true = register(rabbit, self()), | ||||||
|  | @ -870,15 +868,15 @@ start(normal, []) -> | ||||||
|         warn_if_kernel_config_dubious(), |         warn_if_kernel_config_dubious(), | ||||||
|         warn_if_disc_io_options_dubious(), |         warn_if_disc_io_options_dubious(), | ||||||
| 
 | 
 | ||||||
|         rabbit_log_prelaunch:debug(""), |         ?LOG_DEBUG(""), | ||||||
|         rabbit_log_prelaunch:debug("== Plugins (prelaunch phase) =="), |         ?LOG_DEBUG("== Plugins (prelaunch phase) =="), | ||||||
| 
 | 
 | ||||||
|         rabbit_log_prelaunch:debug("Setting plugins up"), |         ?LOG_DEBUG("Setting plugins up"), | ||||||
|         %% `Plugins` contains all the enabled plugins, plus their |         %% `Plugins` contains all the enabled plugins, plus their | ||||||
|         %% dependencies. The order is important: dependencies appear |         %% dependencies. The order is important: dependencies appear | ||||||
|         %% before plugin which depend on them. |         %% before plugin which depend on them. | ||||||
|         Plugins = rabbit_plugins:setup(), |         Plugins = rabbit_plugins:setup(), | ||||||
|         rabbit_log_prelaunch:debug( |         ?LOG_DEBUG( | ||||||
|           "Loading the following plugins: ~p", [Plugins]), |           "Loading the following plugins: ~p", [Plugins]), | ||||||
|         %% We can load all plugins and refresh their feature flags at |         %% We can load all plugins and refresh their feature flags at | ||||||
|         %% once, because it does not involve running code from the |         %% once, because it does not involve running code from the | ||||||
|  | @ -887,8 +885,8 @@ start(normal, []) -> | ||||||
|         ok = rabbit_feature_flags:refresh_feature_flags_after_app_load( |         ok = rabbit_feature_flags:refresh_feature_flags_after_app_load( | ||||||
|                Plugins), |                Plugins), | ||||||
| 
 | 
 | ||||||
|         rabbit_log_prelaunch:debug(""), |         ?LOG_DEBUG(""), | ||||||
|         rabbit_log_prelaunch:debug("== Boot steps =="), |         ?LOG_DEBUG("== Boot steps =="), | ||||||
| 
 | 
 | ||||||
|         ok = rabbit_boot_steps:run_boot_steps([rabbit | Plugins]), |         ok = rabbit_boot_steps:run_boot_steps([rabbit | Plugins]), | ||||||
|         run_postlaunch_phase(Plugins), |         run_postlaunch_phase(Plugins), | ||||||
|  | @ -917,23 +915,22 @@ run_postlaunch_phase(Plugins) -> | ||||||
| do_run_postlaunch_phase(Plugins) -> | do_run_postlaunch_phase(Plugins) -> | ||||||
|     %% Once RabbitMQ itself is started, we need to run a few more steps, |     %% Once RabbitMQ itself is started, we need to run a few more steps, | ||||||
|     %% in particular start plugins. |     %% in particular start plugins. | ||||||
|     rabbit_log_prelaunch:debug(""), |     ?LOG_DEBUG(""), | ||||||
|     rabbit_log_prelaunch:debug("== Postlaunch phase =="), |     ?LOG_DEBUG("== Postlaunch phase =="), | ||||||
| 
 | 
 | ||||||
|     try |     try | ||||||
|         %% Successful boot resets node maintenance state. |         %% Successful boot resets node maintenance state. | ||||||
|         rabbit_log_prelaunch:debug(""), |         ?LOG_DEBUG(""), | ||||||
|         rabbit_log_prelaunch:info("Resetting node maintenance status"), |         ?LOG_INFO("Resetting node maintenance status"), | ||||||
|         _ = rabbit_maintenance:unmark_as_being_drained(), |         _ = rabbit_maintenance:unmark_as_being_drained(), | ||||||
| 
 | 
 | ||||||
|         rabbit_log_prelaunch:debug(""), |         ?LOG_DEBUG(""), | ||||||
|         rabbit_log_prelaunch:debug("== Plugins (postlaunch phase) =="), |         ?LOG_DEBUG("== Plugins (postlaunch phase) =="), | ||||||
| 
 | 
 | ||||||
|         %% However, we want to run their boot steps and actually start |         %% However, we want to run their boot steps and actually start | ||||||
|         %% them one by one, to ensure a dependency is fully started |         %% them one by one, to ensure a dependency is fully started | ||||||
|         %% before a plugin which depends on it gets a chance to start. |         %% before a plugin which depends on it gets a chance to start. | ||||||
|         rabbit_log_prelaunch:debug( |         ?LOG_DEBUG("Starting the following plugins: ~p", [Plugins]), | ||||||
|           "Starting the following plugins: ~p", [Plugins]), |  | ||||||
|         lists:foreach( |         lists:foreach( | ||||||
|           fun(Plugin) -> |           fun(Plugin) -> | ||||||
|                   case application:ensure_all_started(Plugin) of |                   case application:ensure_all_started(Plugin) of | ||||||
|  | @ -951,18 +948,16 @@ do_run_postlaunch_phase(Plugins) -> | ||||||
| 
 | 
 | ||||||
|         %% Start listeners after all plugins have been enabled, |         %% Start listeners after all plugins have been enabled, | ||||||
|         %% see rabbitmq/rabbitmq-server#2405. |         %% see rabbitmq/rabbitmq-server#2405. | ||||||
|         rabbit_log_prelaunch:info( |         ?LOG_INFO("Ready to start client connection listeners"), | ||||||
|           "Ready to start client connection listeners"), |  | ||||||
|         ok = rabbit_networking:boot(), |         ok = rabbit_networking:boot(), | ||||||
| 
 | 
 | ||||||
|         %% The node is ready: mark it as such and log it. |         %% The node is ready: mark it as such and log it. | ||||||
|         %% NOTE: PLEASE DO NOT ADD CRITICAL NODE STARTUP CODE AFTER THIS. |         %% NOTE: PLEASE DO NOT ADD CRITICAL NODE STARTUP CODE AFTER THIS. | ||||||
|         ok = rabbit_lager:broker_is_started(), |  | ||||||
|         ActivePlugins = rabbit_plugins:active(), |         ActivePlugins = rabbit_plugins:active(), | ||||||
|         StrictlyPlugins = rabbit_plugins:strictly_plugins(ActivePlugins), |         StrictlyPlugins = rabbit_plugins:strictly_plugins(ActivePlugins), | ||||||
|         ok = log_broker_started(StrictlyPlugins), |         ok = log_broker_started(StrictlyPlugins), | ||||||
| 
 | 
 | ||||||
|         rabbit_log_prelaunch:debug("Marking ~s as running", [product_name()]), |         ?LOG_DEBUG("Marking ~s as running", [product_name()]), | ||||||
|         rabbit_boot_state:set(ready) |         rabbit_boot_state:set(ready) | ||||||
|     catch |     catch | ||||||
|         throw:{error, _} = Error -> |         throw:{error, _} = Error -> | ||||||
|  | @ -1011,7 +1006,7 @@ boot_delegate() -> | ||||||
| recover() -> | recover() -> | ||||||
|     ok = rabbit_policy:recover(), |     ok = rabbit_policy:recover(), | ||||||
|     ok = rabbit_vhost:recover(), |     ok = rabbit_vhost:recover(), | ||||||
|     ok = lager_exchange_backend:maybe_init_exchange(). |     ok. | ||||||
| 
 | 
 | ||||||
| -spec maybe_insert_default_data() -> 'ok'. | -spec maybe_insert_default_data() -> 'ok'. | ||||||
| 
 | 
 | ||||||
|  | @ -1019,10 +1014,12 @@ maybe_insert_default_data() -> | ||||||
|     NoDefsToImport = not rabbit_definitions:has_configured_definitions_to_load(), |     NoDefsToImport = not rabbit_definitions:has_configured_definitions_to_load(), | ||||||
|     case rabbit_table:needs_default_data() andalso NoDefsToImport of |     case rabbit_table:needs_default_data() andalso NoDefsToImport of | ||||||
|         true  -> |         true  -> | ||||||
|             rabbit_log:info("Will seed default virtual host and user..."), |             ?LOG_INFO("Will seed default virtual host and user...", | ||||||
|  |                       #{domain => ?RMQLOG_DOMAIN_GLOBAL}), | ||||||
|             insert_default_data(); |             insert_default_data(); | ||||||
|         false -> |         false -> | ||||||
|             rabbit_log:info("Will not seed default virtual host and user: have definitions to load..."), |             ?LOG_INFO("Will not seed default virtual host and user: have definitions to load...", | ||||||
|  |                       #{domain => ?RMQLOG_DOMAIN_GLOBAL}), | ||||||
|             ok |             ok | ||||||
|     end. |     end. | ||||||
| 
 | 
 | ||||||
|  | @ -1042,7 +1039,6 @@ insert_default_data() -> | ||||||
|     DefaultReadPermBin = rabbit_data_coercion:to_binary(DefaultReadPerm), |     DefaultReadPermBin = rabbit_data_coercion:to_binary(DefaultReadPerm), | ||||||
| 
 | 
 | ||||||
|     ok = rabbit_vhost:add(DefaultVHostBin, <<"Default virtual host">>, [], ?INTERNAL_USER), |     ok = rabbit_vhost:add(DefaultVHostBin, <<"Default virtual host">>, [], ?INTERNAL_USER), | ||||||
|     ok = lager_exchange_backend:maybe_init_exchange(), |  | ||||||
|     ok = rabbit_auth_backend_internal:add_user( |     ok = rabbit_auth_backend_internal:add_user( | ||||||
|         DefaultUserBin, |         DefaultUserBin, | ||||||
|         DefaultPassBin, |         DefaultPassBin, | ||||||
|  | @ -1061,9 +1057,13 @@ insert_default_data() -> | ||||||
| %%--------------------------------------------------------------------------- | %%--------------------------------------------------------------------------- | ||||||
| %% logging | %% logging | ||||||
| 
 | 
 | ||||||
| -spec log_locations() -> [rabbit_lager:log_location()]. | -spec set_log_level(logger:level()) -> ok. | ||||||
|  | set_log_level(Level) -> | ||||||
|  |     rabbit_prelaunch_logging:set_log_level(Level). | ||||||
|  | 
 | ||||||
|  | -spec log_locations() -> [rabbit_prelaunch_logging:log_location()]. | ||||||
| log_locations() -> | log_locations() -> | ||||||
|     rabbit_lager:log_locations(). |     rabbit_prelaunch_logging:log_locations(). | ||||||
| 
 | 
 | ||||||
| -spec config_locations() -> [rabbit_config:config_location()]. | -spec config_locations() -> [rabbit_config:config_location()]. | ||||||
| config_locations() -> | config_locations() -> | ||||||
|  | @ -1094,7 +1094,8 @@ log_broker_started(Plugins) -> | ||||||
|     Message = string:strip(rabbit_misc:format( |     Message = string:strip(rabbit_misc:format( | ||||||
|         "Server startup complete; ~b plugins started.~n~s", |         "Server startup complete; ~b plugins started.~n~s", | ||||||
|         [length(Plugins), PluginList]), right, $\n), |         [length(Plugins), PluginList]), right, $\n), | ||||||
|     rabbit_log:info(Message), |     ?LOG_INFO(Message, | ||||||
|  |               #{domain => ?RMQLOG_DOMAIN_GLOBAL}), | ||||||
|     io:format(" completed with ~p plugins.~n", [length(Plugins)]). |     io:format(" completed with ~p plugins.~n", [length(Plugins)]). | ||||||
| 
 | 
 | ||||||
| -define(RABBIT_TEXT_LOGO, | -define(RABBIT_TEXT_LOGO, | ||||||
|  | @ -1185,7 +1186,8 @@ log_motd() -> | ||||||
|                           _    -> [" ", Line, "\n"] |                           _    -> [" ", Line, "\n"] | ||||||
|                       end |                       end | ||||||
|                       || Line <- Lines], |                       || Line <- Lines], | ||||||
|             rabbit_log:info("~n~ts", [string:trim(Padded, trailing, [$\r, $\n])]) |             ?LOG_INFO("~n~ts", [string:trim(Padded, trailing, [$\r, $\n])], | ||||||
|  |                       #{domain => ?RMQLOG_DOMAIN_GLOBAL}) | ||||||
|     end. |     end. | ||||||
| 
 | 
 | ||||||
| log_banner() -> | log_banner() -> | ||||||
|  | @ -1216,7 +1218,8 @@ log_banner() -> | ||||||
|                     {K, V} -> |                     {K, V} -> | ||||||
|                         Format(K, V) |                         Format(K, V) | ||||||
|                 end || S <- Settings]), right, $\n), |                 end || S <- Settings]), right, $\n), | ||||||
|     rabbit_log:info("~n~ts", [Banner]). |     ?LOG_INFO("~n~ts", [Banner], | ||||||
|  |               #{domain => ?RMQLOG_DOMAIN_GLOBAL}). | ||||||
| 
 | 
 | ||||||
| warn_if_kernel_config_dubious() -> | warn_if_kernel_config_dubious() -> | ||||||
|     case os:type() of |     case os:type() of | ||||||
|  | @ -1225,16 +1228,18 @@ warn_if_kernel_config_dubious() -> | ||||||
|         _ -> |         _ -> | ||||||
|             case erlang:system_info(kernel_poll) of |             case erlang:system_info(kernel_poll) of | ||||||
|                 true  -> ok; |                 true  -> ok; | ||||||
|                 false -> rabbit_log:warning( |                 false -> ?LOG_WARNING( | ||||||
|                            "Kernel poll (epoll, kqueue, etc) is disabled. Throughput " |                            "Kernel poll (epoll, kqueue, etc) is disabled. " | ||||||
|                            "and CPU utilization may worsen.~n") |                            "Throughput and CPU utilization may worsen.", | ||||||
|  |                            #{domain => ?RMQLOG_DOMAIN_GLOBAL}) | ||||||
|             end |             end | ||||||
|     end, |     end, | ||||||
|     DirtyIOSchedulers = erlang:system_info(dirty_io_schedulers), |     DirtyIOSchedulers = erlang:system_info(dirty_io_schedulers), | ||||||
|     case DirtyIOSchedulers < ?DIRTY_IO_SCHEDULERS_WARNING_THRESHOLD of |     case DirtyIOSchedulers < ?DIRTY_IO_SCHEDULERS_WARNING_THRESHOLD of | ||||||
|         true  -> rabbit_log:warning( |         true  -> ?LOG_WARNING( | ||||||
|                    "Erlang VM is running with ~b dirty I/O schedulers, " |                    "Erlang VM is running with ~b dirty I/O schedulers, " | ||||||
|                    "file I/O performance may worsen~n", [DirtyIOSchedulers]); |                    "file I/O performance may worsen", [DirtyIOSchedulers], | ||||||
|  |                    #{domain => ?RMQLOG_DOMAIN_GLOBAL}); | ||||||
|         false -> ok |         false -> ok | ||||||
|     end, |     end, | ||||||
|     IDCOpts = case application:get_env(kernel, inet_default_connect_options) of |     IDCOpts = case application:get_env(kernel, inet_default_connect_options) of | ||||||
|  | @ -1242,8 +1247,9 @@ warn_if_kernel_config_dubious() -> | ||||||
|                   {ok, Val} -> Val |                   {ok, Val} -> Val | ||||||
|               end, |               end, | ||||||
|     case proplists:get_value(nodelay, IDCOpts, false) of |     case proplists:get_value(nodelay, IDCOpts, false) of | ||||||
|         false -> rabbit_log:warning("Nagle's algorithm is enabled for sockets, " |         false -> ?LOG_WARNING("Nagle's algorithm is enabled for sockets, " | ||||||
|                                     "network I/O latency will be higher~n"); |                               "network I/O latency will be higher", | ||||||
|  |                               #{domain => ?RMQLOG_DOMAIN_GLOBAL}); | ||||||
|         true  -> ok |         true  -> ok | ||||||
|     end. |     end. | ||||||
| 
 | 
 | ||||||
|  | @ -1259,7 +1265,8 @@ warn_if_disc_io_options_dubious() -> | ||||||
|                  CreditDiscBound, IoBatchSize) of |                  CreditDiscBound, IoBatchSize) of | ||||||
|         ok -> ok; |         ok -> ok; | ||||||
|         {error, {Reason, Vars}} -> |         {error, {Reason, Vars}} -> | ||||||
|             rabbit_log:warning(Reason, Vars) |             ?LOG_WARNING(Reason, Vars, | ||||||
|  |                          #{domain => ?RMQLOG_DOMAIN_GLOBAL}) | ||||||
|     end. |     end. | ||||||
| 
 | 
 | ||||||
| validate_msg_store_io_batch_size_and_credit_disc_bound(CreditDiscBound, | validate_msg_store_io_batch_size_and_credit_disc_bound(CreditDiscBound, | ||||||
|  | @ -1271,7 +1278,7 @@ validate_msg_store_io_batch_size_and_credit_disc_bound(CreditDiscBound, | ||||||
|             if IoBatchSize < ?IO_BATCH_SIZE -> |             if IoBatchSize < ?IO_BATCH_SIZE -> | ||||||
|                     throw({error, |                     throw({error, | ||||||
|                      {"io_batch_size of ~b lower than recommended value ~b, " |                      {"io_batch_size of ~b lower than recommended value ~b, " | ||||||
|                       "paging performance may worsen~n", |                       "paging performance may worsen", | ||||||
|                       [IoBatchSize, ?IO_BATCH_SIZE]}}); |                       [IoBatchSize, ?IO_BATCH_SIZE]}}); | ||||||
|                true -> |                true -> | ||||||
|                     ok |                     ok | ||||||
|  | @ -1292,7 +1299,7 @@ validate_msg_store_io_batch_size_and_credit_disc_bound(CreditDiscBound, | ||||||
|                     throw({error, |                     throw({error, | ||||||
|                      {"msg_store_credit_disc_bound {~b, ~b} lower than" |                      {"msg_store_credit_disc_bound {~b, ~b} lower than" | ||||||
|                       "recommended value {~b, ~b}," |                       "recommended value {~b, ~b}," | ||||||
|                       " paging performance may worsen~n", |                       " paging performance may worsen", | ||||||
|                       [IC, MCA, RIC, RMCA]}}); |                       [IC, MCA, RIC, RMCA]}}); | ||||||
|                true -> |                true -> | ||||||
|                     ok |                     ok | ||||||
|  | @ -1320,7 +1327,7 @@ validate_msg_store_io_batch_size_and_credit_disc_bound(CreditDiscBound, | ||||||
|                       {error, |                       {error, | ||||||
|                        {"msg_store_io_batch_size ~b should be bigger than the initial " |                        {"msg_store_io_batch_size ~b should be bigger than the initial " | ||||||
|                         "credit value from msg_store_credit_disc_bound ~b," |                         "credit value from msg_store_credit_disc_bound ~b," | ||||||
|                         " paging performance may worsen~n", |                         " paging performance may worsen", | ||||||
|                         [IoBatchSize, InitialCredit]}}); |                         [IoBatchSize, InitialCredit]}}); | ||||||
|                true -> |                true -> | ||||||
|                     ok |                     ok | ||||||
|  | @ -1498,8 +1505,10 @@ ensure_working_fhc() -> | ||||||
|             {ok, true}  -> "ON"; |             {ok, true}  -> "ON"; | ||||||
|             {ok, false} -> "OFF" |             {ok, false} -> "OFF" | ||||||
|         end, |         end, | ||||||
|         rabbit_log:info("FHC read buffering:  ~s~n", [ReadBuf]), |         ?LOG_INFO("FHC read buffering: ~s", [ReadBuf], | ||||||
|         rabbit_log:info("FHC write buffering: ~s~n", [WriteBuf]), |                   #{domain => ?RMQLOG_DOMAIN_GLOBAL}), | ||||||
|  |         ?LOG_INFO("FHC write buffering: ~s", [WriteBuf], | ||||||
|  |                   #{domain => ?RMQLOG_DOMAIN_GLOBAL}), | ||||||
|         Filename = filename:join(code:lib_dir(kernel, ebin), "kernel.app"), |         Filename = filename:join(code:lib_dir(kernel, ebin), "kernel.app"), | ||||||
|         {ok, Fd} = file_handle_cache:open(Filename, [raw, binary, read], []), |         {ok, Fd} = file_handle_cache:open(Filename, [raw, binary, read], []), | ||||||
|         {ok, _} = file_handle_cache:read(Fd, 1), |         {ok, _} = file_handle_cache:read(Fd, 1), | ||||||
|  |  | ||||||
|  | @ -85,7 +85,7 @@ try_authenticate(Module, Username, AuthProps) -> | ||||||
|     case Module:user_login_authentication(Username, AuthProps) of |     case Module:user_login_authentication(Username, AuthProps) of | ||||||
|         {ok, AuthUser}  -> {ok, AuthUser}; |         {ok, AuthUser}  -> {ok, AuthUser}; | ||||||
|         {error, E}      -> {refused, Username, |         {error, E}      -> {refused, Username, | ||||||
|                             "~s failed authenticating ~s: ~p~n", |                             "~s failed authenticating ~s: ~p", | ||||||
|                             [Module, Username, E]}; |                             [Module, Username, E]}; | ||||||
|         {refused, F, A} -> {refused, Username, F, A} |         {refused, F, A} -> {refused, Username, F, A} | ||||||
|     end. |     end. | ||||||
|  | @ -97,7 +97,7 @@ try_authorize(Modules, Username, AuthProps) -> | ||||||
|                   {ok, Impl, Tags}-> {ok, [{Module, Impl} | ModsImpls], ModsTags ++ Tags}; |                   {ok, Impl, Tags}-> {ok, [{Module, Impl} | ModsImpls], ModsTags ++ Tags}; | ||||||
|                   {ok, Impl}      -> {ok, [{Module, Impl} | ModsImpls], ModsTags}; |                   {ok, Impl}      -> {ok, [{Module, Impl} | ModsImpls], ModsTags}; | ||||||
|                   {error, E}      -> {refused, Username, |                   {error, E}      -> {refused, Username, | ||||||
|                                         "~s failed authorizing ~s: ~p~n", |                                         "~s failed authorizing ~s: ~p", | ||||||
|                                         [Module, Username, E]}; |                                         [Module, Username, E]}; | ||||||
|                   {refused, F, A} -> {refused, Username, F, A} |                   {refused, F, A} -> {refused, Username, F, A} | ||||||
|               end; |               end; | ||||||
|  | @ -215,7 +215,7 @@ check_access(Fun, Module, ErrStr, ErrArgs, ErrName) -> | ||||||
|         false -> |         false -> | ||||||
|             rabbit_misc:protocol_error(ErrName, ErrStr, ErrArgs); |             rabbit_misc:protocol_error(ErrName, ErrStr, ErrArgs); | ||||||
|         {error, E}  -> |         {error, E}  -> | ||||||
|             FullErrStr = ErrStr ++ ", backend ~s returned an error: ~p~n", |             FullErrStr = ErrStr ++ ", backend ~s returned an error: ~p", | ||||||
|             FullErrArgs = ErrArgs ++ [Module, E], |             FullErrArgs = ErrArgs ++ [Module, E], | ||||||
|             rabbit_log:error(FullErrStr, FullErrArgs), |             rabbit_log:error(FullErrStr, FullErrArgs), | ||||||
|             rabbit_misc:protocol_error(ErrName, FullErrStr, FullErrArgs) |             rabbit_misc:protocol_error(ErrName, FullErrStr, FullErrArgs) | ||||||
|  |  | ||||||
|  | @ -232,7 +232,7 @@ handle_event({node_down, Node}, #alarms{alarmed_nodes = AN} = State) -> | ||||||
|                             error   -> [] |                             error   -> [] | ||||||
|                         end, |                         end, | ||||||
|     {ok, lists:foldr(fun(Source, AccState) -> |     {ok, lists:foldr(fun(Source, AccState) -> | ||||||
|                              rabbit_log:warning("~s resource limit alarm cleared for dead node ~p~n", |                              rabbit_log:warning("~s resource limit alarm cleared for dead node ~p", | ||||||
|                                                 [Source, Node]), |                                                 [Source, Node]), | ||||||
|                              maybe_alert(fun dict_unappend/3, Node, Source, false, AccState) |                              maybe_alert(fun dict_unappend/3, Node, Source, false, AccState) | ||||||
|                      end, State, AlarmsForDeadNode)}; |                      end, State, AlarmsForDeadNode)}; | ||||||
|  | @ -284,7 +284,7 @@ maybe_alert(UpdateFun, Node, Source, WasAlertAdded, | ||||||
|     StillHasAlerts = lists:any(fun ({_Node, NodeAlerts}) -> lists:member(Source, NodeAlerts) end, dict:to_list(AN1)), |     StillHasAlerts = lists:any(fun ({_Node, NodeAlerts}) -> lists:member(Source, NodeAlerts) end, dict:to_list(AN1)), | ||||||
|     case StillHasAlerts of |     case StillHasAlerts of | ||||||
|         true -> ok; |         true -> ok; | ||||||
|         false -> rabbit_log:warning("~s resource limit alarm cleared across the cluster~n", [Source]) |         false -> rabbit_log:warning("~s resource limit alarm cleared across the cluster", [Source]) | ||||||
|     end, |     end, | ||||||
|     Alert = {WasAlertAdded, StillHasAlerts, Node}, |     Alert = {WasAlertAdded, StillHasAlerts, Node}, | ||||||
|     case node() of |     case node() of | ||||||
|  | @ -336,11 +336,11 @@ handle_set_alarm({file_descriptor_limit, []}, State) -> | ||||||
|       "********************************************************************~n"), |       "********************************************************************~n"), | ||||||
|     {ok, State}; |     {ok, State}; | ||||||
| handle_set_alarm(Alarm, State) -> | handle_set_alarm(Alarm, State) -> | ||||||
|     rabbit_log:warning("alarm '~p' set~n", [Alarm]), |     rabbit_log:warning("alarm '~p' set", [Alarm]), | ||||||
|     {ok, State}. |     {ok, State}. | ||||||
| 
 | 
 | ||||||
| handle_clear_resource_alarm(Source, Node, State) -> | handle_clear_resource_alarm(Source, Node, State) -> | ||||||
|     rabbit_log:warning("~s resource limit alarm cleared on node ~p~n", |     rabbit_log:warning("~s resource limit alarm cleared on node ~p", | ||||||
|                        [Source, Node]), |                        [Source, Node]), | ||||||
|     {ok, maybe_alert(fun dict_unappend/3, Node, Source, false, State)}. |     {ok, maybe_alert(fun dict_unappend/3, Node, Source, false, State)}. | ||||||
| 
 | 
 | ||||||
|  | @ -348,7 +348,7 @@ handle_clear_alarm(file_descriptor_limit, State) -> | ||||||
|     rabbit_log:warning("file descriptor limit alarm cleared~n"), |     rabbit_log:warning("file descriptor limit alarm cleared~n"), | ||||||
|     {ok, State}; |     {ok, State}; | ||||||
| handle_clear_alarm(Alarm, State) -> | handle_clear_alarm(Alarm, State) -> | ||||||
|     rabbit_log:warning("alarm '~p' cleared~n", [Alarm]), |     rabbit_log:warning("alarm '~p' cleared", [Alarm]), | ||||||
|     {ok, State}. |     {ok, State}. | ||||||
| 
 | 
 | ||||||
| is_node_alarmed(Source, Node, #alarms{alarmed_nodes = AN}) -> | is_node_alarmed(Source, Node, #alarms{alarmed_nodes = AN}) -> | ||||||
|  |  | ||||||
|  | @ -111,7 +111,7 @@ warn_file_limit() -> | ||||||
|     case file_handle_cache:get_limit() < L of |     case file_handle_cache:get_limit() < L of | ||||||
|         true -> |         true -> | ||||||
|             rabbit_log:warning( |             rabbit_log:warning( | ||||||
|               "Recovering ~p queues, available file handles: ~p. Please increase max open file handles limit to at least ~p!~n", |               "Recovering ~p queues, available file handles: ~p. Please increase max open file handles limit to at least ~p!", | ||||||
|               [L, file_handle_cache:get_limit(), L]); |               [L, file_handle_cache:get_limit(), L]); | ||||||
|         false -> |         false -> | ||||||
|             ok |             ok | ||||||
|  | @ -626,7 +626,7 @@ retry_wait(Q, F, E, RetriesLeft) -> | ||||||
|                     % The old check would have crashed here, |                     % The old check would have crashed here, | ||||||
|                     % instead, log it and run the exit fun. absent & alive is weird, |                     % instead, log it and run the exit fun. absent & alive is weird, | ||||||
|                     % but better than crashing with badmatch,true |                     % but better than crashing with badmatch,true | ||||||
|                     rabbit_log:debug("Unexpected alive queue process ~p~n", [QPid]), |                     rabbit_log:debug("Unexpected alive queue process ~p", [QPid]), | ||||||
|                     E({absent, Q, alive}); |                     E({absent, Q, alive}); | ||||||
|                 false -> |                 false -> | ||||||
|                     ok % Expected result |                     ok % Expected result | ||||||
|  | @ -1234,7 +1234,7 @@ count(VHost) -> | ||||||
|     %% that requires a proper consensus algorithm. |     %% that requires a proper consensus algorithm. | ||||||
|     length(list_for_count(VHost)) |     length(list_for_count(VHost)) | ||||||
|   catch _:Err -> |   catch _:Err -> | ||||||
|     rabbit_log:error("Failed to fetch number of queues in vhost ~p:~n~p~n", |     rabbit_log:error("Failed to fetch number of queues in vhost ~p:~n~p", | ||||||
|                      [VHost, Err]), |                      [VHost, Err]), | ||||||
|     0 |     0 | ||||||
|   end. |   end. | ||||||
|  |  | ||||||
|  | @ -197,7 +197,7 @@ validate_and_alternate_credentials(Username, Password, ActingUser, Fun) -> | ||||||
|         ok           -> |         ok           -> | ||||||
|             Fun(Username, Password, ActingUser); |             Fun(Username, Password, ActingUser); | ||||||
|         {error, Err} -> |         {error, Err} -> | ||||||
|             rabbit_log:error("Credential validation for '~s' failed!~n", [Username]), |             rabbit_log:error("Credential validation for '~s' failed!", [Username]), | ||||||
|             {error, Err} |             {error, Err} | ||||||
|     end. |     end. | ||||||
| 
 | 
 | ||||||
|  | @ -334,7 +334,7 @@ change_password_sans_validation(Username, Password, ActingUser) -> | ||||||
| -spec clear_password(rabbit_types:username(), rabbit_types:username()) -> 'ok'. | -spec clear_password(rabbit_types:username(), rabbit_types:username()) -> 'ok'. | ||||||
| 
 | 
 | ||||||
| clear_password(Username, ActingUser) -> | clear_password(Username, ActingUser) -> | ||||||
|     rabbit_log:info("Clearing password for '~s'~n", [Username]), |     rabbit_log:info("Clearing password for '~s'", [Username]), | ||||||
|     R = change_password_hash(Username, <<"">>), |     R = change_password_hash(Username, <<"">>), | ||||||
|     rabbit_event:notify(user_password_cleared, |     rabbit_event:notify(user_password_cleared, | ||||||
|                         [{name, Username}, |                         [{name, Username}, | ||||||
|  |  | ||||||
|  | @ -118,7 +118,7 @@ init() -> | ||||||
|     case State of |     case State of | ||||||
|         {leader_waiting, Winner, _} -> |         {leader_waiting, Winner, _} -> | ||||||
|             rabbit_log:info( |             rabbit_log:info( | ||||||
|               "Autoheal: in progress, requesting report from ~p~n", [Winner]), |               "Autoheal: in progress, requesting report from ~p", [Winner]), | ||||||
|             send(Winner, report_autoheal_status); |             send(Winner, report_autoheal_status); | ||||||
|         _ -> |         _ -> | ||||||
|             ok |             ok | ||||||
|  | @ -129,7 +129,7 @@ maybe_start(not_healing) -> | ||||||
|     case enabled() of |     case enabled() of | ||||||
|         true  -> Leader = leader(), |         true  -> Leader = leader(), | ||||||
|                  send(Leader, {request_start, node()}), |                  send(Leader, {request_start, node()}), | ||||||
|                  rabbit_log:info("Autoheal request sent to ~p~n", [Leader]), |                  rabbit_log:info("Autoheal request sent to ~p", [Leader]), | ||||||
|                  not_healing; |                  not_healing; | ||||||
|         false -> not_healing |         false -> not_healing | ||||||
|     end; |     end; | ||||||
|  | @ -150,7 +150,7 @@ leader() -> | ||||||
| %% This is the winner receiving its last notification that a node has | %% This is the winner receiving its last notification that a node has | ||||||
| %% stopped - all nodes can now start again | %% stopped - all nodes can now start again | ||||||
| rabbit_down(Node, {winner_waiting, [Node], Notify}) -> | rabbit_down(Node, {winner_waiting, [Node], Notify}) -> | ||||||
|     rabbit_log:info("Autoheal: final node has stopped, starting...~n",[]), |     rabbit_log:info("Autoheal: final node has stopped, starting...",[]), | ||||||
|     winner_finish(Notify); |     winner_finish(Notify); | ||||||
| 
 | 
 | ||||||
| rabbit_down(Node, {winner_waiting, WaitFor, Notify}) -> | rabbit_down(Node, {winner_waiting, WaitFor, Notify}) -> | ||||||
|  | @ -173,25 +173,25 @@ node_down(Node, {winner_waiting, _, Notify}) -> | ||||||
| 
 | 
 | ||||||
| node_down(Node, {leader_waiting, Node, _Notify}) -> | node_down(Node, {leader_waiting, Node, _Notify}) -> | ||||||
|     %% The winner went down, we don't know what to do so we simply abort. |     %% The winner went down, we don't know what to do so we simply abort. | ||||||
|     rabbit_log:info("Autoheal: aborting - winner ~p went down~n", [Node]), |     rabbit_log:info("Autoheal: aborting - winner ~p went down", [Node]), | ||||||
|     not_healing; |     not_healing; | ||||||
| 
 | 
 | ||||||
| node_down(Node, {leader_waiting, _, _} = St) -> | node_down(Node, {leader_waiting, _, _} = St) -> | ||||||
|     %% If it is a partial partition, the winner might continue with the |     %% If it is a partial partition, the winner might continue with the | ||||||
|     %% healing process. If it is a full partition, the winner will also |     %% healing process. If it is a full partition, the winner will also | ||||||
|     %% see it and abort. Let's wait for it. |     %% see it and abort. Let's wait for it. | ||||||
|     rabbit_log:info("Autoheal: ~p went down, waiting for winner decision ~n", [Node]), |     rabbit_log:info("Autoheal: ~p went down, waiting for winner decision ", [Node]), | ||||||
|     St; |     St; | ||||||
| 
 | 
 | ||||||
| node_down(Node, _State) -> | node_down(Node, _State) -> | ||||||
|     rabbit_log:info("Autoheal: aborting - ~p went down~n", [Node]), |     rabbit_log:info("Autoheal: aborting - ~p went down", [Node]), | ||||||
|     not_healing. |     not_healing. | ||||||
| 
 | 
 | ||||||
| %% If the process that has to restart the node crashes for an unexpected reason, | %% If the process that has to restart the node crashes for an unexpected reason, | ||||||
| %% we go back to a not healing state so the node is able to recover. | %% we go back to a not healing state so the node is able to recover. | ||||||
| process_down({'EXIT', Pid, Reason}, {restarting, Pid}) when Reason =/= normal -> | process_down({'EXIT', Pid, Reason}, {restarting, Pid}) when Reason =/= normal -> | ||||||
|     rabbit_log:info("Autoheal: aborting - the process responsible for restarting the " |     rabbit_log:info("Autoheal: aborting - the process responsible for restarting the " | ||||||
|                     "node terminated with reason: ~p~n", [Reason]), |                     "node terminated with reason: ~p", [Reason]), | ||||||
|     not_healing; |     not_healing; | ||||||
| 
 | 
 | ||||||
| process_down(_, State) -> | process_down(_, State) -> | ||||||
|  | @ -201,17 +201,17 @@ process_down(_, State) -> | ||||||
| %% TODO should we try to debounce this? | %% TODO should we try to debounce this? | ||||||
| handle_msg({request_start, Node}, | handle_msg({request_start, Node}, | ||||||
|            not_healing, Partitions) -> |            not_healing, Partitions) -> | ||||||
|     rabbit_log:info("Autoheal request received from ~p~n", [Node]), |     rabbit_log:info("Autoheal request received from ~p", [Node]), | ||||||
|     case check_other_nodes(Partitions) of |     case check_other_nodes(Partitions) of | ||||||
|         {error, E} -> |         {error, E} -> | ||||||
|             rabbit_log:info("Autoheal request denied: ~s~n", [fmt_error(E)]), |             rabbit_log:info("Autoheal request denied: ~s", [fmt_error(E)]), | ||||||
|             not_healing; |             not_healing; | ||||||
|         {ok, AllPartitions} -> |         {ok, AllPartitions} -> | ||||||
|             {Winner, Losers} = make_decision(AllPartitions), |             {Winner, Losers} = make_decision(AllPartitions), | ||||||
|             rabbit_log:info("Autoheal decision~n" |             rabbit_log:info("Autoheal decision~n" | ||||||
|                             "  * Partitions: ~p~n" |                             "  * Partitions: ~p~n" | ||||||
|                             "  * Winner:     ~p~n" |                             "  * Winner:     ~p~n" | ||||||
|                             "  * Losers:     ~p~n", |                             "  * Losers:     ~p", | ||||||
|                             [AllPartitions, Winner, Losers]), |                             [AllPartitions, Winner, Losers]), | ||||||
|             case node() =:= Winner of |             case node() =:= Winner of | ||||||
|                 true  -> handle_msg({become_winner, Losers}, |                 true  -> handle_msg({become_winner, Losers}, | ||||||
|  | @ -224,12 +224,12 @@ handle_msg({request_start, Node}, | ||||||
| handle_msg({request_start, Node}, | handle_msg({request_start, Node}, | ||||||
|            State, _Partitions) -> |            State, _Partitions) -> | ||||||
|     rabbit_log:info("Autoheal request received from ~p when healing; " |     rabbit_log:info("Autoheal request received from ~p when healing; " | ||||||
|                     "ignoring~n", [Node]), |                     "ignoring", [Node]), | ||||||
|     State; |     State; | ||||||
| 
 | 
 | ||||||
| handle_msg({become_winner, Losers}, | handle_msg({become_winner, Losers}, | ||||||
|            not_healing, _Partitions) -> |            not_healing, _Partitions) -> | ||||||
|     rabbit_log:info("Autoheal: I am the winner, waiting for ~p to stop~n", |     rabbit_log:info("Autoheal: I am the winner, waiting for ~p to stop", | ||||||
|                     [Losers]), |                     [Losers]), | ||||||
|     stop_partition(Losers); |     stop_partition(Losers); | ||||||
| 
 | 
 | ||||||
|  | @ -238,7 +238,7 @@ handle_msg({become_winner, Losers}, | ||||||
|     %% The leader has aborted the healing, might have seen us down but |     %% The leader has aborted the healing, might have seen us down but | ||||||
|     %% we didn't see the same. Let's try again as it is the same partition. |     %% we didn't see the same. Let's try again as it is the same partition. | ||||||
|     rabbit_log:info("Autoheal: I am the winner and received a duplicated " |     rabbit_log:info("Autoheal: I am the winner and received a duplicated " | ||||||
| 		    "request, waiting again for ~p to stop~n", [Losers]), | 		    "request, waiting again for ~p to stop", [Losers]), | ||||||
|     stop_partition(Losers); |     stop_partition(Losers); | ||||||
| 
 | 
 | ||||||
| handle_msg({become_winner, _}, | handle_msg({become_winner, _}, | ||||||
|  | @ -246,7 +246,7 @@ handle_msg({become_winner, _}, | ||||||
|     %% Something has happened to the leader, it might have seen us down but we |     %% Something has happened to the leader, it might have seen us down but we | ||||||
|     %% are still alive. Partitions have changed, cannot continue. |     %% are still alive. Partitions have changed, cannot continue. | ||||||
|     rabbit_log:info("Autoheal: I am the winner and received another healing " |     rabbit_log:info("Autoheal: I am the winner and received another healing " | ||||||
| 		    "request, partitions have changed to ~p. Aborting ~n", [Losers]), | 		    "request, partitions have changed to ~p. Aborting ", [Losers]), | ||||||
|     winner_finish(Losers), |     winner_finish(Losers), | ||||||
|     not_healing; |     not_healing; | ||||||
| 
 | 
 | ||||||
|  | @ -264,7 +264,7 @@ handle_msg({winner_is, Winner}, State = {leader_waiting, Winner, _}, | ||||||
| handle_msg(Request, {restarting, Pid} = St, _Partitions) -> | handle_msg(Request, {restarting, Pid} = St, _Partitions) -> | ||||||
|     %% ignore, we can contribute no further |     %% ignore, we can contribute no further | ||||||
|     rabbit_log:info("Autoheal: Received the request ~p while waiting for ~p " |     rabbit_log:info("Autoheal: Received the request ~p while waiting for ~p " | ||||||
|                     "to restart the node. Ignoring it ~n", [Request, Pid]), |                     "to restart the node. Ignoring it ", [Request, Pid]), | ||||||
|     St; |     St; | ||||||
| 
 | 
 | ||||||
| handle_msg(report_autoheal_status, not_healing, _Partitions) -> | handle_msg(report_autoheal_status, not_healing, _Partitions) -> | ||||||
|  | @ -286,14 +286,14 @@ handle_msg({autoheal_finished, Winner}, | ||||||
|     %% The winner is finished with the autoheal process and notified us |     %% The winner is finished with the autoheal process and notified us | ||||||
|     %% (the leader). We can transition to the "not_healing" state and |     %% (the leader). We can transition to the "not_healing" state and | ||||||
|     %% accept new requests. |     %% accept new requests. | ||||||
|     rabbit_log:info("Autoheal finished according to winner ~p~n", [Winner]), |     rabbit_log:info("Autoheal finished according to winner ~p", [Winner]), | ||||||
|     not_healing; |     not_healing; | ||||||
| 
 | 
 | ||||||
| handle_msg({autoheal_finished, Winner}, not_healing, _Partitions) | handle_msg({autoheal_finished, Winner}, not_healing, _Partitions) | ||||||
|            when Winner =:= node() -> |            when Winner =:= node() -> | ||||||
|     %% We are the leader and the winner. The state already transitioned |     %% We are the leader and the winner. The state already transitioned | ||||||
|     %% to "not_healing" at the end of the autoheal process. |     %% to "not_healing" at the end of the autoheal process. | ||||||
|     rabbit_log:info("Autoheal finished according to winner ~p~n", [node()]), |     rabbit_log:info("Autoheal finished according to winner ~p", [node()]), | ||||||
|     not_healing; |     not_healing; | ||||||
| 
 | 
 | ||||||
| handle_msg({autoheal_finished, Winner}, not_healing, _Partitions) -> | handle_msg({autoheal_finished, Winner}, not_healing, _Partitions) -> | ||||||
|  | @ -301,7 +301,7 @@ handle_msg({autoheal_finished, Winner}, not_healing, _Partitions) -> | ||||||
|     %% transitioned to not_healing. However, the winner was still able |     %% transitioned to not_healing. However, the winner was still able | ||||||
|     %% to finish. Let it pass. |     %% to finish. Let it pass. | ||||||
|     rabbit_log:info("Autoheal finished according to winner ~p." |     rabbit_log:info("Autoheal finished according to winner ~p." | ||||||
| 		    " Unexpected, I might have previously seen the winner down~n", [Winner]), | 		    " Unexpected, I might have previously seen the winner down", [Winner]), | ||||||
|     not_healing. |     not_healing. | ||||||
| 
 | 
 | ||||||
| %%---------------------------------------------------------------------------- | %%---------------------------------------------------------------------------- | ||||||
|  | @ -309,7 +309,7 @@ handle_msg({autoheal_finished, Winner}, not_healing, _Partitions) -> | ||||||
| send(Node, Msg) -> {?SERVER, Node} ! {autoheal_msg, Msg}. | send(Node, Msg) -> {?SERVER, Node} ! {autoheal_msg, Msg}. | ||||||
| 
 | 
 | ||||||
| abort(Down, Notify) -> | abort(Down, Notify) -> | ||||||
|     rabbit_log:info("Autoheal: aborting - ~p down~n", [Down]), |     rabbit_log:info("Autoheal: aborting - ~p down", [Down]), | ||||||
|     %% Make sure any nodes waiting for us start - it won't necessarily |     %% Make sure any nodes waiting for us start - it won't necessarily | ||||||
|     %% heal the partition but at least they won't get stuck. |     %% heal the partition but at least they won't get stuck. | ||||||
|     %% If we are executing this, we are not stopping. Thus, don't wait |     %% If we are executing this, we are not stopping. Thus, don't wait | ||||||
|  | @ -354,15 +354,14 @@ wait_for_supervisors(Monitors) -> | ||||||
| 		60000 -> | 		60000 -> | ||||||
| 		    AliveLosers = [Node || {_, Node} <- pmon:monitored(Monitors)], | 		    AliveLosers = [Node || {_, Node} <- pmon:monitored(Monitors)], | ||||||
| 		    rabbit_log:info("Autoheal: mnesia in nodes ~p is still up, sending " | 		    rabbit_log:info("Autoheal: mnesia in nodes ~p is still up, sending " | ||||||
| 				    "winner notification again to these ~n", [AliveLosers]), | 				    "winner notification again to these ", [AliveLosers]), | ||||||
| 		    [send(L, {winner_is, node()}) || L <- AliveLosers], | 		    [send(L, {winner_is, node()}) || L <- AliveLosers], | ||||||
| 		    wait_for_mnesia_shutdown(AliveLosers) | 		    wait_for_mnesia_shutdown(AliveLosers) | ||||||
| 	    end | 	    end | ||||||
|     end. |     end. | ||||||
| 
 | 
 | ||||||
| restart_loser(State, Winner) -> | restart_loser(State, Winner) -> | ||||||
|     rabbit_log:warning( |     rabbit_log:warning("Autoheal: we were selected to restart; winner is ~p", [Winner]), | ||||||
|       "Autoheal: we were selected to restart; winner is ~p~n", [Winner]), |  | ||||||
|     NextStateTimeout = application:get_env(rabbit, autoheal_state_transition_timeout, 60000), |     NextStateTimeout = application:get_env(rabbit, autoheal_state_transition_timeout, 60000), | ||||||
|     rabbit_node_monitor:run_outside_applications( |     rabbit_node_monitor:run_outside_applications( | ||||||
|       fun () -> |       fun () -> | ||||||
|  |  | ||||||
|  | @ -948,7 +948,7 @@ handle_exception(Reason, State = #ch{cfg = #conf{protocol = Protocol, | ||||||
|         {Channel, CloseMethod} -> |         {Channel, CloseMethod} -> | ||||||
|             rabbit_log_channel:error( |             rabbit_log_channel:error( | ||||||
|                 "Channel error on connection ~p (~s, vhost: '~s'," |                 "Channel error on connection ~p (~s, vhost: '~s'," | ||||||
|                 " user: '~s'), channel ~p:~n~s~n", |                 " user: '~s'), channel ~p:~n~s", | ||||||
|                 [ConnPid, ConnName, VHost, User#user.username, |                 [ConnPid, ConnName, VHost, User#user.username, | ||||||
|                  Channel, format_soft_error(Reason)]), |                  Channel, format_soft_error(Reason)]), | ||||||
|             ok = rabbit_writer:send_command(WriterPid, CloseMethod), |             ok = rabbit_writer:send_command(WriterPid, CloseMethod), | ||||||
|  |  | ||||||
|  | @ -53,8 +53,7 @@ check_no_overlap1(Sets) -> | ||||||
|                     case sets:size(Is) of |                     case sets:size(Is) of | ||||||
|                         0 -> ok; |                         0 -> ok; | ||||||
|                         _ -> |                         _ -> | ||||||
|                             internal_error("Interceptor: more than one " |                             internal_error("Interceptor: more than one module handles ~p", [Is]) | ||||||
|                                                 "module handles ~p~n", [Is]) |  | ||||||
|                       end, |                       end, | ||||||
|                     sets:union(Set, Union) |                     sets:union(Set, Union) | ||||||
|                 end, |                 end, | ||||||
|  |  | ||||||
|  | @ -117,7 +117,7 @@ handle_cast({user_deleted, Details}) -> | ||||||
|     ok; |     ok; | ||||||
| handle_cast({node_deleted, Details}) -> | handle_cast({node_deleted, Details}) -> | ||||||
|     Node = pget(node, Details), |     Node = pget(node, Details), | ||||||
|     rabbit_log_connection:info( |     rabbit_log_channel:info( | ||||||
|         "Node '~s' was removed from the cluster, deleting" |         "Node '~s' was removed from the cluster, deleting" | ||||||
|         " its channel tracking tables...", [Node]), |         " its channel tracking tables...", [Node]), | ||||||
|     delete_tracked_channels_table_for_node(Node), |     delete_tracked_channels_table_for_node(Node), | ||||||
|  |  | ||||||
|  | @ -437,7 +437,7 @@ recover_durable_queues(QueuesAndRecoveryTerms) -> | ||||||
|         gen_server2:mcall( |         gen_server2:mcall( | ||||||
|           [{rabbit_amqqueue_sup_sup:start_queue_process(node(), Q, recovery), |           [{rabbit_amqqueue_sup_sup:start_queue_process(node(), Q, recovery), | ||||||
|             {init, {self(), Terms}}} || {Q, Terms} <- QueuesAndRecoveryTerms]), |             {init, {self(), Terms}}} || {Q, Terms} <- QueuesAndRecoveryTerms]), | ||||||
|     [rabbit_log:error("Queue ~p failed to initialise: ~p~n", |     [rabbit_log:error("Queue ~p failed to initialise: ~p", | ||||||
|                       [Pid, Error]) || {Pid, Error} <- Failures], |                       [Pid, Error]) || {Pid, Error} <- Failures], | ||||||
|     [Q || {_, {new, Q}} <- Results]. |     [Q || {_, {new, Q}} <- Results]. | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -140,7 +140,7 @@ update_x_death_header(Info, Headers) -> | ||||||
|               [{table, rabbit_misc:sort_field_table(Info1)} | Others]); |               [{table, rabbit_misc:sort_field_table(Info1)} | Others]); | ||||||
|         {<<"x-death">>, InvalidType, Header} -> |         {<<"x-death">>, InvalidType, Header} -> | ||||||
|             rabbit_log:warning("Message has invalid x-death header (type: ~p)." |             rabbit_log:warning("Message has invalid x-death header (type: ~p)." | ||||||
|                                " Resetting header ~p~n", |                                " Resetting header ~p", | ||||||
|                                [InvalidType, Header]), |                                [InvalidType, Header]), | ||||||
|             %% if x-death is something other than an array (list) |             %% if x-death is something other than an array (list) | ||||||
|             %% then we reset it: this happens when some clients consume |             %% then we reset it: this happens when some clients consume | ||||||
|  | @ -247,7 +247,7 @@ log_cycle_once(Queues) -> | ||||||
|         true      -> ok; |         true      -> ok; | ||||||
|         undefined -> rabbit_log:warning( |         undefined -> rabbit_log:warning( | ||||||
|                        "Message dropped. Dead-letter queues cycle detected" ++ |                        "Message dropped. Dead-letter queues cycle detected" ++ | ||||||
|                            ": ~p~nThis cycle will NOT be reported again.~n", |                            ": ~p~nThis cycle will NOT be reported again.", | ||||||
|                        [Queues]), |                        [Queues]), | ||||||
|                      put(Key, true) |                      put(Key, true) | ||||||
|     end. |     end. | ||||||
|  |  | ||||||
|  | @ -199,7 +199,7 @@ dir() -> rabbit_mnesia:dir(). | ||||||
| set_disk_limits(State, Limit0) -> | set_disk_limits(State, Limit0) -> | ||||||
|     Limit = interpret_limit(Limit0), |     Limit = interpret_limit(Limit0), | ||||||
|     State1 = State#state { limit = Limit }, |     State1 = State#state { limit = Limit }, | ||||||
|     rabbit_log:info("Disk free limit set to ~pMB~n", |     rabbit_log:info("Disk free limit set to ~pMB", | ||||||
|                     [trunc(Limit / 1000000)]), |                     [trunc(Limit / 1000000)]), | ||||||
|     internal_update(State1). |     internal_update(State1). | ||||||
| 
 | 
 | ||||||
|  | @ -283,7 +283,7 @@ interpret_limit(Absolute) -> | ||||||
| 
 | 
 | ||||||
| emit_update_info(StateStr, CurrentFree, Limit) -> | emit_update_info(StateStr, CurrentFree, Limit) -> | ||||||
|     rabbit_log:info( |     rabbit_log:info( | ||||||
|       "Free disk space is ~s. Free bytes: ~p. Limit: ~p~n", |       "Free disk space is ~s. Free bytes: ~p. Limit: ~p", | ||||||
|       [StateStr, CurrentFree, Limit]). |       [StateStr, CurrentFree, Limit]). | ||||||
| 
 | 
 | ||||||
| start_timer(State) -> | start_timer(State) -> | ||||||
|  | @ -306,11 +306,11 @@ enable(#state{dir = Dir, interval = Interval, limit = Limit, retries = Retries} | ||||||
|     case {catch get_disk_free(Dir), |     case {catch get_disk_free(Dir), | ||||||
|           vm_memory_monitor:get_total_memory()} of |           vm_memory_monitor:get_total_memory()} of | ||||||
|         {N1, N2} when is_integer(N1), is_integer(N2) -> |         {N1, N2} when is_integer(N1), is_integer(N2) -> | ||||||
|             rabbit_log:info("Enabling free disk space monitoring~n", []), |             rabbit_log:info("Enabling free disk space monitoring", []), | ||||||
|             start_timer(set_disk_limits(State, Limit)); |             start_timer(set_disk_limits(State, Limit)); | ||||||
|         Err -> |         Err -> | ||||||
|             rabbit_log:info("Free disk space monitor encountered an error " |             rabbit_log:info("Free disk space monitor encountered an error " | ||||||
|                             "(e.g. failed to parse output from OS tools): ~p, retries left: ~b~n", |                             "(e.g. failed to parse output from OS tools): ~p, retries left: ~b", | ||||||
|                             [Err, Retries]), |                             [Err, Retries]), | ||||||
|             erlang:send_after(Interval, self(), try_enable), |             erlang:send_after(Interval, self(), try_enable), | ||||||
|             State#state{enabled = false} |             State#state{enabled = false} | ||||||
|  |  | ||||||
|  | @ -86,10 +86,10 @@ check_epmd(State = #state{mod  = Mod, | ||||||
|     {ok, State#state{port = Port1}}. |     {ok, State#state{port = Port1}}. | ||||||
| 
 | 
 | ||||||
| handle_port_please(init, noport, Me, Port) -> | handle_port_please(init, noport, Me, Port) -> | ||||||
|     rabbit_log:info("epmd does not know us, re-registering as ~s~n", [Me]), |     rabbit_log:info("epmd does not know us, re-registering as ~s", [Me]), | ||||||
|     {ok, Port}; |     {ok, Port}; | ||||||
| handle_port_please(check, noport, Me, Port) -> | handle_port_please(check, noport, Me, Port) -> | ||||||
|     rabbit_log:warning("epmd does not know us, re-registering ~s at port ~b~n", [Me, Port]), |     rabbit_log:warning("epmd does not know us, re-registering ~s at port ~b", [Me, Port]), | ||||||
|     {ok, Port}; |     {ok, Port}; | ||||||
| handle_port_please(_, closed, _Me, Port) -> | handle_port_please(_, closed, _Me, Port) -> | ||||||
|     rabbit_log:error("epmd monitor failed to retrieve our port from epmd: closed"), |     rabbit_log:error("epmd monitor failed to retrieve our port from epmd: closed"), | ||||||
|  |  | ||||||
|  | @ -574,7 +574,7 @@ peek_serial(XName, LockType) -> | ||||||
|     end. |     end. | ||||||
| 
 | 
 | ||||||
| invalid_module(T) -> | invalid_module(T) -> | ||||||
|     rabbit_log:warning("Could not find exchange type ~s.~n", [T]), |     rabbit_log:warning("Could not find exchange type ~s.", [T]), | ||||||
|     put({xtype_to_module, T}, rabbit_exchange_type_invalid), |     put({xtype_to_module, T}, rabbit_exchange_type_invalid), | ||||||
|     rabbit_exchange_type_invalid. |     rabbit_exchange_type_invalid. | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -682,7 +682,6 @@ maybe_add_action(Action, Acc, State) -> | ||||||
|     {[Action | Acc], State}. |     {[Action | Acc], State}. | ||||||
| 
 | 
 | ||||||
| do_resends(From, To, State) when From =< To -> | do_resends(From, To, State) when From =< To -> | ||||||
|     % ?INFO("rabbit_fifo_client: doing resends From ~w  To ~w~n", [From, To]), |  | ||||||
|     lists:foldl(fun resend/2, State, lists:seq(From, To)); |     lists:foldl(fun resend/2, State, lists:seq(From, To)); | ||||||
| do_resends(_, _, State) -> | do_resends(_, _, State) -> | ||||||
|     State. |     State. | ||||||
|  |  | ||||||
|  | @ -1,719 +0,0 @@ | ||||||
| %% This Source Code Form is subject to the terms of the Mozilla Public |  | ||||||
| %% License, v. 2.0. If a copy of the MPL was not distributed with this |  | ||||||
| %% file, You can obtain one at https://mozilla.org/MPL/2.0/. |  | ||||||
| %% |  | ||||||
| %% Copyright (c) 2007-2021 VMware, Inc. or its affiliates.  All rights reserved. |  | ||||||
| %% |  | ||||||
| 
 |  | ||||||
| -module(rabbit_lager). |  | ||||||
| 
 |  | ||||||
| -include_lib("rabbit_common/include/rabbit_log.hrl"). |  | ||||||
| 
 |  | ||||||
| %% API |  | ||||||
| -export([start_logger/0, log_locations/0, fold_sinks/2, |  | ||||||
|          broker_is_started/0, set_log_level/1]). |  | ||||||
| 
 |  | ||||||
| %% For test purposes |  | ||||||
| -export([configure_lager/0]). |  | ||||||
| 
 |  | ||||||
| -export_type([log_location/0]). |  | ||||||
| 
 |  | ||||||
| -type log_location() :: string(). |  | ||||||
| 
 |  | ||||||
| start_logger() -> |  | ||||||
|     ok = maybe_remove_logger_handler(), |  | ||||||
|     ok = app_utils:stop_applications([lager, syslog]), |  | ||||||
|     ok = ensure_lager_configured(), |  | ||||||
|     ok = app_utils:start_applications([lager]), |  | ||||||
|     fold_sinks( |  | ||||||
|       fun |  | ||||||
|           (_, [], Acc) -> |  | ||||||
|               Acc; |  | ||||||
|           (SinkName, _, Acc) -> |  | ||||||
|               lager:log(SinkName, info, self(), |  | ||||||
|                         "Log file opened with Lager", []), |  | ||||||
|               Acc |  | ||||||
|       end, ok), |  | ||||||
|     ensure_log_working(). |  | ||||||
| 
 |  | ||||||
| broker_is_started() -> |  | ||||||
|     {ok, HwmCurrent} = application:get_env(lager, error_logger_hwm), |  | ||||||
|     {ok, HwmOrig0} = application:get_env(lager, error_logger_hwm_original), |  | ||||||
|     HwmOrig = case get_most_verbose_log_level() of |  | ||||||
|                   debug -> HwmOrig0 * 100; |  | ||||||
|                   _     -> HwmOrig0 |  | ||||||
|               end, |  | ||||||
|     case HwmOrig =:= HwmCurrent of |  | ||||||
|         false -> |  | ||||||
|             ok = application:set_env(lager, error_logger_hwm, HwmOrig), |  | ||||||
|             Handlers = gen_event:which_handlers(lager_event), |  | ||||||
|             lists:foreach(fun(Handler) -> |  | ||||||
|                               lager:set_loghwm(Handler, HwmOrig) |  | ||||||
|                           end, Handlers), |  | ||||||
|             ok; |  | ||||||
|         _ -> |  | ||||||
|             ok |  | ||||||
|     end. |  | ||||||
| 
 |  | ||||||
| set_log_level(Level) -> |  | ||||||
|     IsValidLevel = lists:member(Level, lager_util:levels()), |  | ||||||
|     set_log_level(IsValidLevel, Level). |  | ||||||
| 
 |  | ||||||
| set_log_level(true, Level) -> |  | ||||||
|     SinksAndHandlers = [{Sink, gen_event:which_handlers(Sink)} || |  | ||||||
|                         Sink <- lager:list_all_sinks()], |  | ||||||
|     DefaultHwm = application:get_env(lager, error_logger_hwm_original, 50), |  | ||||||
|     Hwm = case Level of |  | ||||||
|         debug -> DefaultHwm * 100; |  | ||||||
|         _     -> DefaultHwm |  | ||||||
|     end, |  | ||||||
|     application:set_env(lager, error_logger_hwm, Hwm), |  | ||||||
|     set_sink_log_level(SinksAndHandlers, Level, Hwm); |  | ||||||
| set_log_level(_, Level) -> |  | ||||||
|     {error, {invalid_log_level, Level}}. |  | ||||||
| 
 |  | ||||||
| set_sink_log_level([], _Level, _Hwm) -> |  | ||||||
|     ok; |  | ||||||
| set_sink_log_level([{Sink, Handlers}|Rest], Level, Hwm) -> |  | ||||||
|     set_sink_handler_log_level(Sink, Handlers, Level, Hwm), |  | ||||||
|     set_sink_log_level(Rest, Level, Hwm). |  | ||||||
| 
 |  | ||||||
| set_sink_handler_log_level(_Sink, [], _Level, _Hwm) -> |  | ||||||
|     ok; |  | ||||||
| set_sink_handler_log_level(Sink, [Handler|Rest], Level, Hwm) |  | ||||||
|   when is_atom(Handler) andalso is_integer(Hwm) -> |  | ||||||
|     lager:set_loghwm(Sink, Handler, undefined, Hwm), |  | ||||||
|     ok = lager:set_loglevel(Sink, Handler, undefined, Level), |  | ||||||
|     set_sink_handler_log_level(Sink, Rest, Level, Hwm); |  | ||||||
| set_sink_handler_log_level(Sink, [{Handler, Id}|Rest], Level, Hwm) -> |  | ||||||
|     lager:set_loghwm(Sink, Handler, Id, Hwm), |  | ||||||
|     ok = lager:set_loglevel(Sink, Handler, Id, Level), |  | ||||||
|     set_sink_handler_log_level(Sink, Rest, Level, Hwm); |  | ||||||
| set_sink_handler_log_level(Sink, [_|Rest], Level, Hwm) -> |  | ||||||
|     set_sink_handler_log_level(Sink, Rest, Level, Hwm). |  | ||||||
| 
 |  | ||||||
| log_locations() -> |  | ||||||
|     ensure_lager_configured(), |  | ||||||
|     DefaultHandlers = application:get_env(lager, handlers, []), |  | ||||||
|     Sinks = application:get_env(lager, extra_sinks, []), |  | ||||||
|     ExtraHandlers = [proplists:get_value(handlers, Props, []) |  | ||||||
|                      || {_, Props} <- Sinks], |  | ||||||
|     lists:sort(log_locations1([DefaultHandlers | ExtraHandlers], [])). |  | ||||||
| 
 |  | ||||||
| log_locations1([Handlers | Rest], Locations) -> |  | ||||||
|     Locations1 = log_locations2(Handlers, Locations), |  | ||||||
|     log_locations1(Rest, Locations1); |  | ||||||
| log_locations1([], Locations) -> |  | ||||||
|     Locations. |  | ||||||
| 
 |  | ||||||
| log_locations2([{lager_file_backend, Settings} | Rest], Locations) -> |  | ||||||
|     FileName = lager_file_name1(Settings), |  | ||||||
|     Locations1 = case lists:member(FileName, Locations) of |  | ||||||
|         false -> [FileName | Locations]; |  | ||||||
|         true  -> Locations |  | ||||||
|     end, |  | ||||||
|     log_locations2(Rest, Locations1); |  | ||||||
| log_locations2([{lager_console_backend, _} | Rest], Locations) -> |  | ||||||
|     Locations1 = case lists:member("<stdout>", Locations) of |  | ||||||
|         false -> ["<stdout>" | Locations]; |  | ||||||
|         true  -> Locations |  | ||||||
|     end, |  | ||||||
|     log_locations2(Rest, Locations1); |  | ||||||
| log_locations2([_ | Rest], Locations) -> |  | ||||||
|     log_locations2(Rest, Locations); |  | ||||||
| log_locations2([], Locations) -> |  | ||||||
|     Locations. |  | ||||||
| 
 |  | ||||||
| fold_sinks(Fun, Acc) -> |  | ||||||
|     Handlers = lager_config:global_get(handlers), |  | ||||||
|     Sinks = dict:to_list(lists:foldl( |  | ||||||
|         fun |  | ||||||
|             ({{lager_file_backend, F}, _, S}, Dict) -> |  | ||||||
|                 dict:append(S, F, Dict); |  | ||||||
|             ({_, _, S}, Dict) -> |  | ||||||
|                 case dict:is_key(S, Dict) of |  | ||||||
|                     true  -> dict:store(S, [], Dict); |  | ||||||
|                     false -> Dict |  | ||||||
|                 end |  | ||||||
|         end, |  | ||||||
|         dict:new(), Handlers)), |  | ||||||
|     fold_sinks(Sinks, Fun, Acc). |  | ||||||
| 
 |  | ||||||
| fold_sinks([{SinkName, FileNames} | Rest], Fun, Acc) -> |  | ||||||
|     Acc1 = Fun(SinkName, FileNames, Acc), |  | ||||||
|     fold_sinks(Rest, Fun, Acc1); |  | ||||||
| fold_sinks([], _, Acc) -> |  | ||||||
|     Acc. |  | ||||||
| 
 |  | ||||||
| ensure_log_working() -> |  | ||||||
|     {ok, Handlers} = application:get_env(lager, handlers), |  | ||||||
|     [ ensure_lager_handler_file_exist(Handler) |  | ||||||
|       || Handler <- Handlers ], |  | ||||||
|     Sinks = application:get_env(lager, extra_sinks, []), |  | ||||||
|     ensure_extra_sinks_working(Sinks, list_expected_sinks()). |  | ||||||
| 
 |  | ||||||
| ensure_extra_sinks_working(Sinks, [SinkName | Rest]) -> |  | ||||||
|     case proplists:get_value(SinkName, Sinks) of |  | ||||||
|         undefined -> throw({error, {cannot_log_to_file, unknown, |  | ||||||
|                                     rabbit_log_lager_event_sink_undefined}}); |  | ||||||
|         Sink -> |  | ||||||
|             SinkHandlers = proplists:get_value(handlers, Sink, []), |  | ||||||
|             [ ensure_lager_handler_file_exist(Handler) |  | ||||||
|               || Handler <- SinkHandlers ] |  | ||||||
|     end, |  | ||||||
|     ensure_extra_sinks_working(Sinks, Rest); |  | ||||||
| ensure_extra_sinks_working(_Sinks, []) -> |  | ||||||
|     ok. |  | ||||||
| 
 |  | ||||||
| ensure_lager_handler_file_exist(Handler) -> |  | ||||||
|     case lager_file_name(Handler) of |  | ||||||
|         false    -> ok; |  | ||||||
|         FileName -> ensure_logfile_exist(FileName) |  | ||||||
|     end. |  | ||||||
| 
 |  | ||||||
| lager_file_name({lager_file_backend, Settings}) -> |  | ||||||
|     lager_file_name1(Settings); |  | ||||||
| lager_file_name(_) -> |  | ||||||
|     false. |  | ||||||
| 
 |  | ||||||
| lager_file_name1(Settings) when is_list(Settings) -> |  | ||||||
|     {file, FileName} = proplists:lookup(file, Settings), |  | ||||||
|     FileName; |  | ||||||
| lager_file_name1({FileName, _}) -> FileName; |  | ||||||
| lager_file_name1({FileName, _, _, _, _}) -> FileName; |  | ||||||
| lager_file_name1(_) -> |  | ||||||
|     throw({error, {cannot_log_to_file, unknown, |  | ||||||
|                    lager_file_backend_config_invalid}}). |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| ensure_logfile_exist(LogFile) -> |  | ||||||
|     case rabbit_file:read_file_info(LogFile) of |  | ||||||
|         {ok,_} -> ok; |  | ||||||
|         {error, Err} -> throw({error, {cannot_log_to_file, LogFile, Err}}) |  | ||||||
|     end. |  | ||||||
| 
 |  | ||||||
| ensure_lager_configured() -> |  | ||||||
|     case lager_configured() of |  | ||||||
|         false -> configure_lager(); |  | ||||||
|         true -> ok |  | ||||||
|     end. |  | ||||||
| 
 |  | ||||||
| %% Lager should have handlers and sinks |  | ||||||
| %% Error logger forwarding to syslog should be disabled |  | ||||||
| lager_configured() -> |  | ||||||
|     Sinks = lager:list_all_sinks(), |  | ||||||
|     ExpectedSinks = list_expected_sinks(), |  | ||||||
|     application:get_env(lager, handlers) =/= undefined |  | ||||||
|     andalso |  | ||||||
|     lists:all(fun(S) -> lists:member(S, Sinks) end, ExpectedSinks) |  | ||||||
|     andalso |  | ||||||
|     application:get_env(syslog, syslog_error_logger) =/= undefined. |  | ||||||
| 
 |  | ||||||
| configure_lager() -> |  | ||||||
|     ok = app_utils:load_applications([lager]), |  | ||||||
|     %% Turn off reformatting for error_logger messages |  | ||||||
|     case application:get_env(lager, error_logger_redirect) of |  | ||||||
|         undefined -> application:set_env(lager, error_logger_redirect, true); |  | ||||||
|         _         -> ok |  | ||||||
|     end, |  | ||||||
|     case application:get_env(lager, error_logger_format_raw) of |  | ||||||
|         undefined -> application:set_env(lager, error_logger_format_raw, true); |  | ||||||
|         _         -> ok |  | ||||||
|     end, |  | ||||||
|     %% Setting env var to 'undefined' is different from not |  | ||||||
|     %% setting it at all, and lager is sensitive to this |  | ||||||
|     %% difference. |  | ||||||
|     case application:get_env(rabbit, lager_log_root) of |  | ||||||
|         {ok, Value} -> |  | ||||||
|             ok = application:set_env(lager, log_root, Value); |  | ||||||
|         _ -> |  | ||||||
|             ok |  | ||||||
|     end, |  | ||||||
|     case application:get_env(lager, colored) of |  | ||||||
|         undefined -> |  | ||||||
|             UseColor = rabbit_prelaunch_early_logging:use_colored_logging(), |  | ||||||
|             application:set_env(lager, colored, UseColor); |  | ||||||
|         _ -> |  | ||||||
|             ok |  | ||||||
|     end, |  | ||||||
|     %% Set rabbit.log config variable based on environment. |  | ||||||
|     prepare_rabbit_log_config(), |  | ||||||
|     %% Configure syslog library. |  | ||||||
|     ok = configure_syslog_error_logger(), |  | ||||||
|     %% At this point we should have rabbit.log application variable |  | ||||||
|     %% configured to generate RabbitMQ log handlers. |  | ||||||
|     GeneratedHandlers = generate_lager_handlers(), |  | ||||||
| 
 |  | ||||||
|     %% If there are lager handlers configured, |  | ||||||
|     %% both lager and generate RabbitMQ handlers are used. |  | ||||||
|     %% This is because it's hard to decide clear preference rules. |  | ||||||
|     %% RabbitMQ handlers can be set to [] to use only lager handlers. |  | ||||||
|     Handlers = case application:get_env(lager, handlers, undefined) of |  | ||||||
|         undefined -> GeneratedHandlers; |  | ||||||
|         []        -> GeneratedHandlers; |  | ||||||
|         LagerHandlers -> |  | ||||||
|             %% Remove handlers generated in previous starts |  | ||||||
|             FormerRabbitHandlers = application:get_env(lager, rabbit_handlers, []), |  | ||||||
|             GeneratedHandlers ++ remove_rabbit_handlers(LagerHandlers, |  | ||||||
|                                                         FormerRabbitHandlers) |  | ||||||
|     end, |  | ||||||
| 
 |  | ||||||
|     ok = application:set_env(lager, handlers, Handlers), |  | ||||||
|     ok = application:set_env(lager, rabbit_handlers, GeneratedHandlers), |  | ||||||
| 
 |  | ||||||
|     %% Setup extra sink/handlers. If they are not configured, redirect |  | ||||||
|     %% messages to the default sink. To know the list of expected extra |  | ||||||
|     %% sinks, we look at the 'lager_extra_sinks' compilation option. |  | ||||||
|     LogConfig = application:get_env(rabbit, log, []), |  | ||||||
|     LogLevels = application:get_env(rabbit, log_levels, []), |  | ||||||
|     Categories = proplists:get_value(categories, LogConfig, []), |  | ||||||
|     CategoriesConfig0 = case {Categories, LogLevels} of |  | ||||||
|         {[], []} -> []; |  | ||||||
|         {[], LogLevels} -> |  | ||||||
|             io:format("Using deprecated config parameter 'log_levels'. " |  | ||||||
|                       "Please update your configuration file according to " |  | ||||||
|                       "https://rabbitmq.com/logging.html"), |  | ||||||
|             lists:map(fun({Name, Level}) -> {Name, [{level, Level}]} end, |  | ||||||
|                       LogLevels); |  | ||||||
|         {Categories, []} -> |  | ||||||
|             Categories; |  | ||||||
|         {Categories, _} -> |  | ||||||
|             io:format("Using the deprecated config parameter 'rabbit.log_levels' together " |  | ||||||
|                       "with a new parameter for log categories." |  | ||||||
|                       " 'rabbit.log_levels' will be ignored. Please remove it from the config. More at " |  | ||||||
|                       "https://rabbitmq.com/logging.html"), |  | ||||||
|             Categories |  | ||||||
|     end, |  | ||||||
|     LogLevelsFromContext = case rabbit_prelaunch:get_context() of |  | ||||||
|                                #{log_levels := LL} -> LL; |  | ||||||
|                                _                   -> undefined |  | ||||||
|                            end, |  | ||||||
|     Fun = fun |  | ||||||
|               (global, _, CC) -> |  | ||||||
|                   CC; |  | ||||||
|               (color, _, CC) -> |  | ||||||
|                   CC; |  | ||||||
|               (CategoryS, LogLevel, CC) -> |  | ||||||
|                   Category = list_to_atom(CategoryS), |  | ||||||
|                   CCEntry = proplists:get_value( |  | ||||||
|                               Category, CC, []), |  | ||||||
|                   CCEntry1 = lists:ukeymerge( |  | ||||||
|                                1, |  | ||||||
|                                [{level, LogLevel}], |  | ||||||
|                                lists:ukeysort(1, CCEntry)), |  | ||||||
|                   lists:keystore( |  | ||||||
|                     Category, 1, CC, {Category, CCEntry1}) |  | ||||||
|           end, |  | ||||||
|     CategoriesConfig = case LogLevelsFromContext of |  | ||||||
|                            undefined -> |  | ||||||
|                                CategoriesConfig0; |  | ||||||
|                            _ -> |  | ||||||
|                                maps:fold(Fun, |  | ||||||
|                                          CategoriesConfig0, |  | ||||||
|                                          LogLevelsFromContext) |  | ||||||
|                        end, |  | ||||||
|     SinkConfigs = lists:map( |  | ||||||
|         fun({Name, Config}) -> |  | ||||||
|             {rabbit_log:make_internal_sink_name(Name), Config} |  | ||||||
|         end, |  | ||||||
|         CategoriesConfig), |  | ||||||
|     LagerSinks = application:get_env(lager, extra_sinks, []), |  | ||||||
|     GeneratedSinks = generate_lager_sinks( |  | ||||||
|         [error_logger_lager_event | list_expected_sinks()], |  | ||||||
|         SinkConfigs), |  | ||||||
|     Sinks = merge_lager_sink_handlers(LagerSinks, GeneratedSinks, []), |  | ||||||
|     ok = application:set_env(lager, extra_sinks, Sinks), |  | ||||||
| 
 |  | ||||||
|     case application:get_env(lager, error_logger_hwm) of |  | ||||||
|         undefined -> |  | ||||||
|             ok = application:set_env(lager, error_logger_hwm, 1000), |  | ||||||
|             % NB: 50 is the default value in lager.app.src |  | ||||||
|             ok = application:set_env(lager, error_logger_hwm_original, 50); |  | ||||||
|         {ok, Val} when is_integer(Val) andalso Val < 1000 -> |  | ||||||
|             ok = application:set_env(lager, error_logger_hwm, 1000), |  | ||||||
|             ok = application:set_env(lager, error_logger_hwm_original, Val); |  | ||||||
|         {ok, Val} when is_integer(Val) -> |  | ||||||
|             ok = application:set_env(lager, error_logger_hwm_original, Val), |  | ||||||
|             ok |  | ||||||
|     end, |  | ||||||
|     ok. |  | ||||||
| 
 |  | ||||||
| configure_syslog_error_logger() -> |  | ||||||
|     %% Disable error_logger forwarding to syslog if it's not configured |  | ||||||
|     case application:get_env(syslog, syslog_error_logger) of |  | ||||||
|         undefined -> |  | ||||||
|             application:set_env(syslog, syslog_error_logger, false); |  | ||||||
|         _ -> ok |  | ||||||
|     end. |  | ||||||
| 
 |  | ||||||
| remove_rabbit_handlers(Handlers, FormerHandlers) -> |  | ||||||
|     lists:filter(fun(Handler) -> |  | ||||||
|         not lists:member(Handler, FormerHandlers) |  | ||||||
|     end, |  | ||||||
|     Handlers). |  | ||||||
| 
 |  | ||||||
| generate_lager_handlers() -> |  | ||||||
|     LogConfig = application:get_env(rabbit, log, []), |  | ||||||
|     LogHandlersConfig = lists:keydelete(categories, 1, LogConfig), |  | ||||||
|     generate_lager_handlers(LogHandlersConfig). |  | ||||||
| 
 |  | ||||||
| generate_lager_handlers(LogHandlersConfig) -> |  | ||||||
|     lists:flatmap( |  | ||||||
|     fun |  | ||||||
|         ({file, HandlerConfig}) -> |  | ||||||
|             case proplists:get_value(file, HandlerConfig, false) of |  | ||||||
|                 false -> []; |  | ||||||
|                 FileName when is_list(FileName) -> |  | ||||||
|                     Backend = lager_backend(file), |  | ||||||
|                     generate_handler(Backend, HandlerConfig) |  | ||||||
|             end; |  | ||||||
|         ({Other, HandlerConfig}) when |  | ||||||
|               Other =:= console; Other =:= syslog; Other =:= exchange -> |  | ||||||
|             case proplists:get_value(enabled, HandlerConfig, false) of |  | ||||||
|                 false -> []; |  | ||||||
|                 true  -> |  | ||||||
|                     Backend = lager_backend(Other), |  | ||||||
|                     generate_handler(Backend, |  | ||||||
|                                      lists:keydelete(enabled, 1, HandlerConfig)) |  | ||||||
|             end |  | ||||||
|     end, |  | ||||||
|     LogHandlersConfig). |  | ||||||
| 
 |  | ||||||
| lager_backend(file)     -> lager_file_backend; |  | ||||||
| lager_backend(console)  -> lager_console_backend; |  | ||||||
| lager_backend(syslog)   -> syslog_lager_backend; |  | ||||||
| lager_backend(exchange) -> lager_exchange_backend. |  | ||||||
| 
 |  | ||||||
| %% Syslog backend is using an old API for configuration and |  | ||||||
| %% does not support proplists. |  | ||||||
| generate_handler(syslog_lager_backend=Backend, HandlerConfig) -> |  | ||||||
|     %% The default log level is set to `debug` because the actual |  | ||||||
|     %% filtering is made at the sink level. We want to accept all |  | ||||||
|     %% messages here. |  | ||||||
|     DefaultConfigVal = debug, |  | ||||||
|     Level = proplists:get_value(level, HandlerConfig, DefaultConfigVal), |  | ||||||
|     ok = configure_handler_backend(Backend), |  | ||||||
|     [{Backend, |  | ||||||
|      [Level, |  | ||||||
|       {}, |  | ||||||
|       {lager_default_formatter, syslog_formatter_config()}]}]; |  | ||||||
| generate_handler(Backend, HandlerConfig) -> |  | ||||||
|     [{Backend, |  | ||||||
|         lists:ukeymerge(1, lists:ukeysort(1, HandlerConfig), |  | ||||||
|                            lists:ukeysort(1, default_handler_config(Backend)))}]. |  | ||||||
| 
 |  | ||||||
| configure_handler_backend(syslog_lager_backend) -> |  | ||||||
|     {ok, _} = application:ensure_all_started(syslog), |  | ||||||
|     ok; |  | ||||||
| configure_handler_backend(_Backend) -> |  | ||||||
|     ok. |  | ||||||
| 
 |  | ||||||
| default_handler_config(lager_console_backend) -> |  | ||||||
|     %% The default log level is set to `debug` because the actual |  | ||||||
|     %% filtering is made at the sink level. We want to accept all |  | ||||||
|     %% messages here. |  | ||||||
|     DefaultConfigVal = debug, |  | ||||||
|     [{level, DefaultConfigVal}, |  | ||||||
|      {formatter_config, default_config_value({formatter_config, console})}]; |  | ||||||
| default_handler_config(lager_exchange_backend) -> |  | ||||||
|     %% The default log level is set to `debug` because the actual |  | ||||||
|     %% filtering is made at the sink level. We want to accept all |  | ||||||
|     %% messages here. |  | ||||||
|     DefaultConfigVal = debug, |  | ||||||
|     [{level, DefaultConfigVal}, |  | ||||||
|      {formatter_config, default_config_value({formatter_config, exchange})}]; |  | ||||||
| default_handler_config(lager_file_backend) -> |  | ||||||
|     %% The default log level is set to `debug` because the actual |  | ||||||
|     %% filtering is made at the sink level. We want to accept all |  | ||||||
|     %% messages here. |  | ||||||
|     DefaultConfigVal = debug, |  | ||||||
|     [{level, DefaultConfigVal}, |  | ||||||
|      {formatter_config, default_config_value({formatter_config, file})}, |  | ||||||
|      {date, ""}, |  | ||||||
|      {size, 0}]. |  | ||||||
| 
 |  | ||||||
| default_config_value(level) -> |  | ||||||
|     LogConfig = application:get_env(rabbit, log, []), |  | ||||||
|     FoldFun = fun |  | ||||||
|                   ({_, Cfg}, LL) when is_list(Cfg) -> |  | ||||||
|                       NewLL = proplists:get_value(level, Cfg, LL), |  | ||||||
|                       case LL of |  | ||||||
|                           undefined -> |  | ||||||
|                               NewLL; |  | ||||||
|                           _ -> |  | ||||||
|                               MoreVerbose = lager_util:level_to_num(NewLL) > lager_util:level_to_num(LL), |  | ||||||
|                               case MoreVerbose of |  | ||||||
|                                   true  -> NewLL; |  | ||||||
|                                   false -> LL |  | ||||||
|                               end |  | ||||||
|                       end; |  | ||||||
|                   (_, LL) -> |  | ||||||
|                       LL |  | ||||||
|               end, |  | ||||||
|     FoundLL = lists:foldl(FoldFun, undefined, LogConfig), |  | ||||||
|     case FoundLL of |  | ||||||
|         undefined -> info; |  | ||||||
|         _         -> FoundLL |  | ||||||
|     end; |  | ||||||
| default_config_value({formatter_config, console}) -> |  | ||||||
|     EOL = case application:get_env(lager, colored) of |  | ||||||
|               {ok, true}  -> "\e[0m\r\n"; |  | ||||||
|               _           -> "\r\n" |  | ||||||
|           end, |  | ||||||
|     [date, " ", time, " ", color, "[", severity, "] ", |  | ||||||
|        {pid, ""}, |  | ||||||
|        " ", message, EOL]; |  | ||||||
| default_config_value({formatter_config, _}) -> |  | ||||||
|     [date, " ", time, " ", color, "[", severity, "] ", |  | ||||||
|        {pid, ""}, |  | ||||||
|        " ", message, "\n"]. |  | ||||||
| 
 |  | ||||||
| syslog_formatter_config() -> |  | ||||||
|     [color, "[", severity, "] ", |  | ||||||
|        {pid, ""}, |  | ||||||
|        " ", message, "\n"]. |  | ||||||
| 
 |  | ||||||
| prepare_rabbit_log_config() -> |  | ||||||
|     %% If RABBIT_LOGS is not set, we should ignore it. |  | ||||||
|     DefaultFile = application:get_env(rabbit, lager_default_file, undefined), |  | ||||||
|     %% If RABBIT_UPGRADE_LOGS is not set, we should ignore it. |  | ||||||
|     UpgradeFile = application:get_env(rabbit, lager_upgrade_file, undefined), |  | ||||||
|     case DefaultFile of |  | ||||||
|         undefined -> ok; |  | ||||||
|         false -> |  | ||||||
|             set_env_default_log_disabled(); |  | ||||||
|         tty -> |  | ||||||
|             set_env_default_log_console(); |  | ||||||
|         FileName when is_list(FileName) -> |  | ||||||
|             case rabbit_prelaunch:get_context() of |  | ||||||
|                 %% The user explicitly sets $RABBITMQ_LOGS; |  | ||||||
|                 %% we should override a file location even |  | ||||||
|                 %% if it's set in rabbitmq.config |  | ||||||
|                 #{var_origins := #{main_log_file := environment}} -> |  | ||||||
|                     set_env_default_log_file(FileName, override); |  | ||||||
|                 _ -> |  | ||||||
|                     set_env_default_log_file(FileName, keep) |  | ||||||
|             end |  | ||||||
|     end, |  | ||||||
| 
 |  | ||||||
|     %% Upgrade log file never overrides the value set in rabbitmq.config |  | ||||||
|     case UpgradeFile of |  | ||||||
|         %% No special env for upgrade logs - redirect to the default sink |  | ||||||
|         undefined -> ok; |  | ||||||
|         %% Redirect logs to default output. |  | ||||||
|         DefaultFile -> ok; |  | ||||||
|         UpgradeFileName when is_list(UpgradeFileName) -> |  | ||||||
|             set_env_upgrade_log_file(UpgradeFileName) |  | ||||||
|     end. |  | ||||||
| 
 |  | ||||||
| set_env_default_log_disabled() -> |  | ||||||
|     %% Disabling all the logs. |  | ||||||
|     ok = application:set_env(rabbit, log, []). |  | ||||||
| 
 |  | ||||||
| set_env_default_log_console() -> |  | ||||||
|     LogConfig = application:get_env(rabbit, log, []), |  | ||||||
|     ConsoleConfig = proplists:get_value(console, LogConfig, []), |  | ||||||
|     LogConfigConsole = |  | ||||||
|         lists:keystore(console, 1, LogConfig, |  | ||||||
|                        {console, lists:keystore(enabled, 1, ConsoleConfig, |  | ||||||
|                                                 {enabled, true})}), |  | ||||||
|     %% Remove the file handler - disable logging to file |  | ||||||
|     LogConfigConsoleNoFile = lists:keydelete(file, 1, LogConfigConsole), |  | ||||||
|     ok = application:set_env(rabbit, log, LogConfigConsoleNoFile). |  | ||||||
| 
 |  | ||||||
| set_env_default_log_file(FileName, Override) -> |  | ||||||
|     LogConfig = application:get_env(rabbit, log, []), |  | ||||||
|     FileConfig = proplists:get_value(file, LogConfig, []), |  | ||||||
|     NewLogConfig = case proplists:get_value(file, FileConfig, undefined) of |  | ||||||
|         undefined -> |  | ||||||
|             lists:keystore(file, 1, LogConfig, |  | ||||||
|                            {file, lists:keystore(file, 1, FileConfig, |  | ||||||
|                                                  {file, FileName})}); |  | ||||||
|         _ConfiguredFileName -> |  | ||||||
|             case Override of |  | ||||||
|                 override -> |  | ||||||
|                     lists:keystore( |  | ||||||
|                         file, 1, LogConfig, |  | ||||||
|                         {file, lists:keystore(file, 1, FileConfig, |  | ||||||
|                                               {file, FileName})}); |  | ||||||
|                 keep -> |  | ||||||
|                     LogConfig |  | ||||||
|             end |  | ||||||
|     end, |  | ||||||
|     ok = application:set_env(rabbit, log, NewLogConfig). |  | ||||||
| 
 |  | ||||||
| set_env_upgrade_log_file(FileName) -> |  | ||||||
|     LogConfig = application:get_env(rabbit, log, []), |  | ||||||
|     SinksConfig = proplists:get_value(categories, LogConfig, []), |  | ||||||
|     UpgradeSinkConfig = proplists:get_value(upgrade, SinksConfig, []), |  | ||||||
|     FileConfig = proplists:get_value(file, SinksConfig, []), |  | ||||||
|     NewLogConfig = case proplists:get_value(file, FileConfig, undefined) of |  | ||||||
|         undefined -> |  | ||||||
|             lists:keystore( |  | ||||||
|                 categories, 1, LogConfig, |  | ||||||
|                 {categories, |  | ||||||
|                     lists:keystore( |  | ||||||
|                         upgrade, 1, SinksConfig, |  | ||||||
|                         {upgrade, |  | ||||||
|                             lists:keystore(file, 1, UpgradeSinkConfig, |  | ||||||
|                                            {file, FileName})})}); |  | ||||||
|         %% No cahnge. We don't want to override the configured value. |  | ||||||
|         _File -> LogConfig |  | ||||||
|     end, |  | ||||||
|     ok = application:set_env(rabbit, log, NewLogConfig). |  | ||||||
| 
 |  | ||||||
| generate_lager_sinks(SinkNames, SinkConfigs) -> |  | ||||||
|     LogLevels = case rabbit_prelaunch:get_context() of |  | ||||||
|                     #{log_levels := LL} -> LL; |  | ||||||
|                     _                   -> undefined |  | ||||||
|                 end, |  | ||||||
|     DefaultLogLevel = case LogLevels of |  | ||||||
|                           #{global := LogLevel} -> |  | ||||||
|                               LogLevel; |  | ||||||
|                           _ -> |  | ||||||
|                               default_config_value(level) |  | ||||||
|                       end, |  | ||||||
|     lists:map(fun(SinkName) -> |  | ||||||
|         SinkConfig = proplists:get_value(SinkName, SinkConfigs, []), |  | ||||||
|         SinkHandlers = case proplists:get_value(file, SinkConfig, false) of |  | ||||||
|             %% If no file defined - forward everything to the default backend |  | ||||||
|             false -> |  | ||||||
|                 ForwarderLevel = proplists:get_value(level, |  | ||||||
|                                                      SinkConfig, |  | ||||||
|                                                      DefaultLogLevel), |  | ||||||
|                 [{lager_forwarder_backend, |  | ||||||
|                     [lager_util:make_internal_sink_name(lager), ForwarderLevel]}]; |  | ||||||
|             %% If a file defined - add a file backend to handlers and remove all default file backends. |  | ||||||
|             File -> |  | ||||||
|                 %% Use `debug` as a default handler to not override a handler level |  | ||||||
|                 Level = proplists:get_value(level, SinkConfig, DefaultLogLevel), |  | ||||||
|                 DefaultGeneratedHandlers = application:get_env(lager, rabbit_handlers, []), |  | ||||||
|                 SinkFileHandlers = case proplists:get_value(lager_file_backend, DefaultGeneratedHandlers, undefined) of |  | ||||||
|                     undefined -> |  | ||||||
|                         %% Create a new file handler. |  | ||||||
|                         %% `info` is a default level here. |  | ||||||
|                         FileLevel = proplists:get_value(level, SinkConfig, DefaultLogLevel), |  | ||||||
|                         generate_lager_handlers([{file, [{file, File}, {level, FileLevel}]}]); |  | ||||||
|                     FileHandler -> |  | ||||||
|                         %% Replace a filename in the handler |  | ||||||
|                         FileHandlerChanges = case handler_level_more_verbose(FileHandler, Level) of |  | ||||||
|                             true  -> [{file, File}, {level, Level}]; |  | ||||||
|                             false -> [{file, File}] |  | ||||||
|                         end, |  | ||||||
| 
 |  | ||||||
|                         [{lager_file_backend, |  | ||||||
|                             lists:ukeymerge(1, FileHandlerChanges, |  | ||||||
|                                             lists:ukeysort(1, FileHandler))}] |  | ||||||
|                 end, |  | ||||||
|                 %% Remove all file handlers. |  | ||||||
|                 AllLagerHandlers = application:get_env(lager, handlers, []), |  | ||||||
|                 HandlersWithoutFile = lists:filter( |  | ||||||
|                     fun({lager_file_backend, _}) -> false; |  | ||||||
|                        ({_, _}) -> true |  | ||||||
|                     end, |  | ||||||
|                     AllLagerHandlers), |  | ||||||
|                 %% Set level for handlers which are more verbose. |  | ||||||
|                 %% We don't increase verbosity in sinks so it works like forwarder backend. |  | ||||||
|                 HandlersWithoutFileWithLevel = lists:map(fun({Name, Handler}) -> |  | ||||||
|                     case handler_level_more_verbose(Handler, Level) of |  | ||||||
|                         true  -> {Name, lists:keystore(level, 1, Handler, {level, Level})}; |  | ||||||
|                         false -> {Name, Handler} |  | ||||||
|                     end |  | ||||||
|                 end, |  | ||||||
|                 HandlersWithoutFile), |  | ||||||
| 
 |  | ||||||
|                 HandlersWithoutFileWithLevel ++ SinkFileHandlers |  | ||||||
|         end, |  | ||||||
|         {SinkName, [{handlers, SinkHandlers}, {rabbit_handlers, SinkHandlers}]} |  | ||||||
|     end, |  | ||||||
|     SinkNames). |  | ||||||
| 
 |  | ||||||
| handler_level_more_verbose(Handler, Level) -> |  | ||||||
|     HandlerLevel = proplists:get_value(level, Handler, default_config_value(level)), |  | ||||||
|     lager_util:level_to_num(HandlerLevel) > lager_util:level_to_num(Level). |  | ||||||
| 
 |  | ||||||
| merge_lager_sink_handlers([{Name, Sink} | RestSinks], GeneratedSinks, Agg) -> |  | ||||||
|     %% rabbitmq/rabbitmq-server#2044. |  | ||||||
|     %% We have to take into account that a sink's |  | ||||||
|     %% handler backend may need additional configuration here. |  | ||||||
|     %% {rabbit_log_federation_lager_event, [ |  | ||||||
|     %%     {handlers, [ |  | ||||||
|     %%         {lager_forwarder_backend, [lager_event,inherit]}, |  | ||||||
|     %%         {syslog_lager_backend, [debug]} |  | ||||||
|     %%     ]}, |  | ||||||
|     %%     {rabbit_handlers, [ |  | ||||||
|     %%         {lager_forwarder_backend, [lager_event,inherit]} |  | ||||||
|     %%     ]} |  | ||||||
|     %% ]} |  | ||||||
|     case lists:keytake(Name, 1, GeneratedSinks) of |  | ||||||
|         {value, {Name, GenSink}, RestGeneratedSinks} -> |  | ||||||
|             Handlers = proplists:get_value(handlers, Sink, []), |  | ||||||
|             GenHandlers = proplists:get_value(handlers, GenSink, []), |  | ||||||
|             FormerRabbitHandlers = proplists:get_value(rabbit_handlers, Sink, []), |  | ||||||
| 
 |  | ||||||
|             %% Remove handlers defined in previous starts |  | ||||||
|             ConfiguredHandlers = remove_rabbit_handlers(Handlers, FormerRabbitHandlers), |  | ||||||
|             NewHandlers = GenHandlers ++ ConfiguredHandlers, |  | ||||||
|             ok = maybe_configure_handler_backends(NewHandlers), |  | ||||||
|             MergedSink = lists:keystore(rabbit_handlers, 1, |  | ||||||
|                                         lists:keystore(handlers, 1, Sink, |  | ||||||
|                                                        {handlers, NewHandlers}), |  | ||||||
|                                         {rabbit_handlers, GenHandlers}), |  | ||||||
|             merge_lager_sink_handlers( |  | ||||||
|                 RestSinks, |  | ||||||
|                 RestGeneratedSinks, |  | ||||||
|                 [{Name, MergedSink} | Agg]); |  | ||||||
|         false -> |  | ||||||
|             merge_lager_sink_handlers( |  | ||||||
|                 RestSinks, |  | ||||||
|                 GeneratedSinks, |  | ||||||
|                 [{Name, Sink} | Agg]) |  | ||||||
|     end; |  | ||||||
| merge_lager_sink_handlers([], GeneratedSinks, Agg) -> GeneratedSinks ++ Agg. |  | ||||||
| 
 |  | ||||||
| maybe_configure_handler_backends([]) -> |  | ||||||
|     ok; |  | ||||||
| maybe_configure_handler_backends([{Backend, _}|Backends]) -> |  | ||||||
|     ok = configure_handler_backend(Backend), |  | ||||||
|     maybe_configure_handler_backends(Backends). |  | ||||||
| 
 |  | ||||||
| list_expected_sinks() -> |  | ||||||
|     rabbit_prelaunch_early_logging:list_expected_sinks(). |  | ||||||
| 
 |  | ||||||
| maybe_remove_logger_handler() -> |  | ||||||
|     M = logger, |  | ||||||
|     F = remove_handler, |  | ||||||
|     try |  | ||||||
|         ok = erlang:apply(M, F, [default]) |  | ||||||
|     catch |  | ||||||
|         error:undef -> |  | ||||||
|             % OK since the logger module only exists in OTP 21.1 or later |  | ||||||
|             ok; |  | ||||||
|         error:{badmatch, {error, {not_found, default}}} -> |  | ||||||
|             % OK - this error happens when running a CLI command |  | ||||||
|             ok; |  | ||||||
|         Err:Reason -> |  | ||||||
|             error_logger:error_msg("calling ~p:~p failed: ~p:~p~n", |  | ||||||
|                                    [M, F, Err, Reason]) |  | ||||||
|     end. |  | ||||||
| 
 |  | ||||||
| get_most_verbose_log_level() -> |  | ||||||
|     {ok, HandlersA} = application:get_env(lager, handlers), |  | ||||||
|     {ok, ExtraSinks} = application:get_env(lager, extra_sinks), |  | ||||||
|     HandlersB = lists:append( |  | ||||||
|                   [H || {_, Keys} <- ExtraSinks, |  | ||||||
|                         {handlers, H} <- Keys]), |  | ||||||
|     get_most_verbose_log_level(HandlersA ++ HandlersB, |  | ||||||
|                                lager_util:level_to_num(none)). |  | ||||||
| 
 |  | ||||||
| get_most_verbose_log_level([{_, Props} | Rest], MostVerbose) -> |  | ||||||
|     LogLevel = proplists:get_value(level, Props, info), |  | ||||||
|     LogLevelNum = lager_util:level_to_num(LogLevel), |  | ||||||
|     case LogLevelNum > MostVerbose of |  | ||||||
|         true -> |  | ||||||
|             get_most_verbose_log_level(Rest, LogLevelNum); |  | ||||||
|         false -> |  | ||||||
|             get_most_verbose_log_level(Rest, MostVerbose) |  | ||||||
|     end; |  | ||||||
| get_most_verbose_log_level([], MostVerbose) -> |  | ||||||
|     lager_util:num_to_level(MostVerbose). |  | ||||||
|  | @ -0,0 +1,120 @@ | ||||||
|  | %% This Source Code Form is subject to the terms of the Mozilla Public | ||||||
|  | %% License, v. 2.0. If a copy of the MPL was not distributed with this | ||||||
|  | %% file, You can obtain one at https://mozilla.org/MPL/2.0/. | ||||||
|  | %% | ||||||
|  | %% Copyright (c) 2021 VMware, Inc. or its affiliates.  All rights reserved. | ||||||
|  | %% | ||||||
|  | 
 | ||||||
|  | %% @doc Compatibility module for the old Lager-based logging API. | ||||||
|  | -module(rabbit_log_channel). | ||||||
|  | 
 | ||||||
|  | -export([debug/1, debug/2, debug/3, | ||||||
|  |          info/1, info/2, info/3, | ||||||
|  |          notice/1, notice/2, notice/3, | ||||||
|  |          warning/1, warning/2, warning/3, | ||||||
|  |          error/1, error/2, error/3, | ||||||
|  |          critical/1, critical/2, critical/3, | ||||||
|  |          alert/1, alert/2, alert/3, | ||||||
|  |          emergency/1, emergency/2, emergency/3, | ||||||
|  |          none/1, none/2, none/3]). | ||||||
|  | 
 | ||||||
|  | -include_lib("rabbit_common/include/logging.hrl"). | ||||||
|  | 
 | ||||||
|  | -compile({no_auto_import, [error/2, error/3]}). | ||||||
|  | 
 | ||||||
|  | -spec debug(string()) -> 'ok'. | ||||||
|  | debug(Format) -> debug(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec debug(string(), [any()]) -> 'ok'. | ||||||
|  | debug(Format, Args) -> debug(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec debug(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | debug(Pid, Format, Args) -> | ||||||
|  |     logger:debug(Format, Args, #{pid => Pid, | ||||||
|  |                                  domain => ?RMQLOG_DOMAIN_CHAN}). | ||||||
|  | 
 | ||||||
|  | -spec info(string()) -> 'ok'. | ||||||
|  | info(Format) -> info(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec info(string(), [any()]) -> 'ok'. | ||||||
|  | info(Format, Args) -> info(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec info(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | info(Pid, Format, Args) -> | ||||||
|  |     logger:info(Format, Args, #{pid => Pid, | ||||||
|  |                                 domain => ?RMQLOG_DOMAIN_CHAN}). | ||||||
|  | 
 | ||||||
|  | -spec notice(string()) -> 'ok'. | ||||||
|  | notice(Format) -> notice(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec notice(string(), [any()]) -> 'ok'. | ||||||
|  | notice(Format, Args) -> notice(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec notice(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | notice(Pid, Format, Args) -> | ||||||
|  |     logger:notice(Format, Args, #{pid => Pid, | ||||||
|  |                                   domain => ?RMQLOG_DOMAIN_CHAN}). | ||||||
|  | 
 | ||||||
|  | -spec warning(string()) -> 'ok'. | ||||||
|  | warning(Format) -> warning(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec warning(string(), [any()]) -> 'ok'. | ||||||
|  | warning(Format, Args) -> warning(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec warning(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | warning(Pid, Format, Args) -> | ||||||
|  |     logger:warning(Format, Args, #{pid => Pid, | ||||||
|  |                                    domain => ?RMQLOG_DOMAIN_CHAN}). | ||||||
|  | 
 | ||||||
|  | -spec error(string()) -> 'ok'. | ||||||
|  | error(Format) -> error(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec error(string(), [any()]) -> 'ok'. | ||||||
|  | error(Format, Args) -> error(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec error(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | error(Pid, Format, Args) -> | ||||||
|  |     logger:error(Format, Args, #{pid => Pid, | ||||||
|  |                                  domain => ?RMQLOG_DOMAIN_CHAN}). | ||||||
|  | 
 | ||||||
|  | -spec critical(string()) -> 'ok'. | ||||||
|  | critical(Format) -> critical(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec critical(string(), [any()]) -> 'ok'. | ||||||
|  | critical(Format, Args) -> critical(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec critical(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | critical(Pid, Format, Args) -> | ||||||
|  |     logger:critical(Format, Args, #{pid => Pid, | ||||||
|  |                                     domain => ?RMQLOG_DOMAIN_CHAN}). | ||||||
|  | 
 | ||||||
|  | -spec alert(string()) -> 'ok'. | ||||||
|  | alert(Format) -> alert(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec alert(string(), [any()]) -> 'ok'. | ||||||
|  | alert(Format, Args) -> alert(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec alert(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | alert(Pid, Format, Args) -> | ||||||
|  |     logger:alert(Format, Args, #{pid => Pid, | ||||||
|  |                                  domain => ?RMQLOG_DOMAIN_CHAN}). | ||||||
|  | 
 | ||||||
|  | -spec emergency(string()) -> 'ok'. | ||||||
|  | emergency(Format) -> emergency(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec emergency(string(), [any()]) -> 'ok'. | ||||||
|  | emergency(Format, Args) -> emergency(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec emergency(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | emergency(Pid, Format, Args) -> | ||||||
|  |     logger:emergency(Format, Args, #{pid => Pid, | ||||||
|  |                                      domain => ?RMQLOG_DOMAIN_CHAN}). | ||||||
|  | 
 | ||||||
|  | -spec none(string()) -> 'ok'. | ||||||
|  | none(_Format) -> ok. | ||||||
|  | 
 | ||||||
|  | -spec none(string(), [any()]) -> 'ok'. | ||||||
|  | none(_Format, _Args) -> ok. | ||||||
|  | 
 | ||||||
|  | -spec none(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | none(_Pid, _Format, _Args) -> ok. | ||||||
|  | @ -0,0 +1,120 @@ | ||||||
|  | %% This Source Code Form is subject to the terms of the Mozilla Public | ||||||
|  | %% License, v. 2.0. If a copy of the MPL was not distributed with this | ||||||
|  | %% file, You can obtain one at https://mozilla.org/MPL/2.0/. | ||||||
|  | %% | ||||||
|  | %% Copyright (c) 2021 VMware, Inc. or its affiliates.  All rights reserved. | ||||||
|  | %% | ||||||
|  | 
 | ||||||
|  | %% @doc Compatibility module for the old Lager-based logging API. | ||||||
|  | -module(rabbit_log_connection). | ||||||
|  | 
 | ||||||
|  | -export([debug/1, debug/2, debug/3, | ||||||
|  |          info/1, info/2, info/3, | ||||||
|  |          notice/1, notice/2, notice/3, | ||||||
|  |          warning/1, warning/2, warning/3, | ||||||
|  |          error/1, error/2, error/3, | ||||||
|  |          critical/1, critical/2, critical/3, | ||||||
|  |          alert/1, alert/2, alert/3, | ||||||
|  |          emergency/1, emergency/2, emergency/3, | ||||||
|  |          none/1, none/2, none/3]). | ||||||
|  | 
 | ||||||
|  | -include_lib("rabbit_common/include/logging.hrl"). | ||||||
|  | 
 | ||||||
|  | -compile({no_auto_import, [error/2, error/3]}). | ||||||
|  | 
 | ||||||
|  | -spec debug(string()) -> 'ok'. | ||||||
|  | debug(Format) -> debug(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec debug(string(), [any()]) -> 'ok'. | ||||||
|  | debug(Format, Args) -> debug(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec debug(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | debug(Pid, Format, Args) -> | ||||||
|  |     logger:debug(Format, Args, #{pid => Pid, | ||||||
|  |                                  domain => ?RMQLOG_DOMAIN_CONN}). | ||||||
|  | 
 | ||||||
|  | -spec info(string()) -> 'ok'. | ||||||
|  | info(Format) -> info(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec info(string(), [any()]) -> 'ok'. | ||||||
|  | info(Format, Args) -> info(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec info(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | info(Pid, Format, Args) -> | ||||||
|  |     logger:info(Format, Args, #{pid => Pid, | ||||||
|  |                                 domain => ?RMQLOG_DOMAIN_CONN}). | ||||||
|  | 
 | ||||||
|  | -spec notice(string()) -> 'ok'. | ||||||
|  | notice(Format) -> notice(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec notice(string(), [any()]) -> 'ok'. | ||||||
|  | notice(Format, Args) -> notice(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec notice(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | notice(Pid, Format, Args) -> | ||||||
|  |     logger:notice(Format, Args, #{pid => Pid, | ||||||
|  |                                   domain => ?RMQLOG_DOMAIN_CONN}). | ||||||
|  | 
 | ||||||
|  | -spec warning(string()) -> 'ok'. | ||||||
|  | warning(Format) -> warning(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec warning(string(), [any()]) -> 'ok'. | ||||||
|  | warning(Format, Args) -> warning(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec warning(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | warning(Pid, Format, Args) -> | ||||||
|  |     logger:warning(Format, Args, #{pid => Pid, | ||||||
|  |                                    domain => ?RMQLOG_DOMAIN_CONN}). | ||||||
|  | 
 | ||||||
|  | -spec error(string()) -> 'ok'. | ||||||
|  | error(Format) -> error(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec error(string(), [any()]) -> 'ok'. | ||||||
|  | error(Format, Args) -> error(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec error(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | error(Pid, Format, Args) -> | ||||||
|  |     logger:error(Format, Args, #{pid => Pid, | ||||||
|  |                                  domain => ?RMQLOG_DOMAIN_CONN}). | ||||||
|  | 
 | ||||||
|  | -spec critical(string()) -> 'ok'. | ||||||
|  | critical(Format) -> critical(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec critical(string(), [any()]) -> 'ok'. | ||||||
|  | critical(Format, Args) -> critical(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec critical(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | critical(Pid, Format, Args) -> | ||||||
|  |     logger:critical(Format, Args, #{pid => Pid, | ||||||
|  |                                     domain => ?RMQLOG_DOMAIN_CONN}). | ||||||
|  | 
 | ||||||
|  | -spec alert(string()) -> 'ok'. | ||||||
|  | alert(Format) -> alert(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec alert(string(), [any()]) -> 'ok'. | ||||||
|  | alert(Format, Args) -> alert(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec alert(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | alert(Pid, Format, Args) -> | ||||||
|  |     logger:alert(Format, Args, #{pid => Pid, | ||||||
|  |                                  domain => ?RMQLOG_DOMAIN_CONN}). | ||||||
|  | 
 | ||||||
|  | -spec emergency(string()) -> 'ok'. | ||||||
|  | emergency(Format) -> emergency(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec emergency(string(), [any()]) -> 'ok'. | ||||||
|  | emergency(Format, Args) -> emergency(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec emergency(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | emergency(Pid, Format, Args) -> | ||||||
|  |     logger:emergency(Format, Args, #{pid => Pid, | ||||||
|  |                                      domain => ?RMQLOG_DOMAIN_CONN}). | ||||||
|  | 
 | ||||||
|  | -spec none(string()) -> 'ok'. | ||||||
|  | none(_Format) -> ok. | ||||||
|  | 
 | ||||||
|  | -spec none(string(), [any()]) -> 'ok'. | ||||||
|  | none(_Format, _Args) -> ok. | ||||||
|  | 
 | ||||||
|  | -spec none(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | none(_Pid, _Format, _Args) -> ok. | ||||||
|  | @ -0,0 +1,120 @@ | ||||||
|  | %% This Source Code Form is subject to the terms of the Mozilla Public | ||||||
|  | %% License, v. 2.0. If a copy of the MPL was not distributed with this | ||||||
|  | %% file, You can obtain one at https://mozilla.org/MPL/2.0/. | ||||||
|  | %% | ||||||
|  | %% Copyright (c) 2021 VMware, Inc. or its affiliates.  All rights reserved. | ||||||
|  | %% | ||||||
|  | 
 | ||||||
|  | %% @doc Compatibility module for the old Lager-based logging API. | ||||||
|  | -module(rabbit_log_feature_flags). | ||||||
|  | 
 | ||||||
|  | -export([debug/1, debug/2, debug/3, | ||||||
|  |          info/1, info/2, info/3, | ||||||
|  |          notice/1, notice/2, notice/3, | ||||||
|  |          warning/1, warning/2, warning/3, | ||||||
|  |          error/1, error/2, error/3, | ||||||
|  |          critical/1, critical/2, critical/3, | ||||||
|  |          alert/1, alert/2, alert/3, | ||||||
|  |          emergency/1, emergency/2, emergency/3, | ||||||
|  |          none/1, none/2, none/3]). | ||||||
|  | 
 | ||||||
|  | -include_lib("rabbit_common/include/logging.hrl"). | ||||||
|  | 
 | ||||||
|  | -compile({no_auto_import, [error/2, error/3]}). | ||||||
|  | 
 | ||||||
|  | -spec debug(string()) -> 'ok'. | ||||||
|  | debug(Format) -> debug(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec debug(string(), [any()]) -> 'ok'. | ||||||
|  | debug(Format, Args) -> debug(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec debug(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | debug(Pid, Format, Args) -> | ||||||
|  |     logger:debug(Format, Args, #{pid => Pid, | ||||||
|  |                                  domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}). | ||||||
|  | 
 | ||||||
|  | -spec info(string()) -> 'ok'. | ||||||
|  | info(Format) -> info(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec info(string(), [any()]) -> 'ok'. | ||||||
|  | info(Format, Args) -> info(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec info(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | info(Pid, Format, Args) -> | ||||||
|  |     logger:info(Format, Args, #{pid => Pid, | ||||||
|  |                                 domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}). | ||||||
|  | 
 | ||||||
|  | -spec notice(string()) -> 'ok'. | ||||||
|  | notice(Format) -> notice(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec notice(string(), [any()]) -> 'ok'. | ||||||
|  | notice(Format, Args) -> notice(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec notice(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | notice(Pid, Format, Args) -> | ||||||
|  |     logger:notice(Format, Args, #{pid => Pid, | ||||||
|  |                                   domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}). | ||||||
|  | 
 | ||||||
|  | -spec warning(string()) -> 'ok'. | ||||||
|  | warning(Format) -> warning(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec warning(string(), [any()]) -> 'ok'. | ||||||
|  | warning(Format, Args) -> warning(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec warning(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | warning(Pid, Format, Args) -> | ||||||
|  |     logger:warning(Format, Args, #{pid => Pid, | ||||||
|  |                                    domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}). | ||||||
|  | 
 | ||||||
|  | -spec error(string()) -> 'ok'. | ||||||
|  | error(Format) -> error(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec error(string(), [any()]) -> 'ok'. | ||||||
|  | error(Format, Args) -> error(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec error(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | error(Pid, Format, Args) -> | ||||||
|  |     logger:error(Format, Args, #{pid => Pid, | ||||||
|  |                                  domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}). | ||||||
|  | 
 | ||||||
|  | -spec critical(string()) -> 'ok'. | ||||||
|  | critical(Format) -> critical(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec critical(string(), [any()]) -> 'ok'. | ||||||
|  | critical(Format, Args) -> critical(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec critical(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | critical(Pid, Format, Args) -> | ||||||
|  |     logger:critical(Format, Args, #{pid => Pid, | ||||||
|  |                                     domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}). | ||||||
|  | 
 | ||||||
|  | -spec alert(string()) -> 'ok'. | ||||||
|  | alert(Format) -> alert(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec alert(string(), [any()]) -> 'ok'. | ||||||
|  | alert(Format, Args) -> alert(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec alert(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | alert(Pid, Format, Args) -> | ||||||
|  |     logger:alert(Format, Args, #{pid => Pid, | ||||||
|  |                                  domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}). | ||||||
|  | 
 | ||||||
|  | -spec emergency(string()) -> 'ok'. | ||||||
|  | emergency(Format) -> emergency(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec emergency(string(), [any()]) -> 'ok'. | ||||||
|  | emergency(Format, Args) -> emergency(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec emergency(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | emergency(Pid, Format, Args) -> | ||||||
|  |     logger:emergency(Format, Args, #{pid => Pid, | ||||||
|  |                                      domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}). | ||||||
|  | 
 | ||||||
|  | -spec none(string()) -> 'ok'. | ||||||
|  | none(_Format) -> ok. | ||||||
|  | 
 | ||||||
|  | -spec none(string(), [any()]) -> 'ok'. | ||||||
|  | none(_Format, _Args) -> ok. | ||||||
|  | 
 | ||||||
|  | -spec none(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | none(_Pid, _Format, _Args) -> ok. | ||||||
|  | @ -0,0 +1,122 @@ | ||||||
|  | %% This Source Code Form is subject to the terms of the Mozilla Public | ||||||
|  | %% License, v. 2.0. If a copy of the MPL was not distributed with this | ||||||
|  | %% file, You can obtain one at https://mozilla.org/MPL/2.0/. | ||||||
|  | %% | ||||||
|  | %% Copyright (c) 2021 VMware, Inc. or its affiliates.  All rights reserved. | ||||||
|  | %% | ||||||
|  | 
 | ||||||
|  | %% @doc Compatibility module for the old Lager-based logging API. | ||||||
|  | -module(rabbit_log_mirroring). | ||||||
|  | 
 | ||||||
|  | -export([debug/1, debug/2, debug/3, | ||||||
|  |          info/1, info/2, info/3, | ||||||
|  |          notice/1, notice/2, notice/3, | ||||||
|  |          warning/1, warning/2, warning/3, | ||||||
|  |          error/1, error/2, error/3, | ||||||
|  |          critical/1, critical/2, critical/3, | ||||||
|  |          alert/1, alert/2, alert/3, | ||||||
|  |          emergency/1, emergency/2, emergency/3, | ||||||
|  |          none/1, none/2, none/3]). | ||||||
|  | 
 | ||||||
|  | -include_lib("rabbit_common/include/logging.hrl"). | ||||||
|  | 
 | ||||||
|  | -compile({no_auto_import, [error/2, error/3]}). | ||||||
|  | 
 | ||||||
|  | %%---------------------------------------------------------------------------- | ||||||
|  | 
 | ||||||
|  | -spec debug(string()) -> 'ok'. | ||||||
|  | debug(Format) -> debug(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec debug(string(), [any()]) -> 'ok'. | ||||||
|  | debug(Format, Args) -> debug(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec debug(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | debug(Pid, Format, Args) -> | ||||||
|  |     logger:debug(Format, Args, #{pid => Pid, | ||||||
|  |                                  domain => ?RMQLOG_DOMAIN_MIRRORING}). | ||||||
|  | 
 | ||||||
|  | -spec info(string()) -> 'ok'. | ||||||
|  | info(Format) -> info(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec info(string(), [any()]) -> 'ok'. | ||||||
|  | info(Format, Args) -> info(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec info(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | info(Pid, Format, Args) -> | ||||||
|  |     logger:info(Format, Args, #{pid => Pid, | ||||||
|  |                                 domain => ?RMQLOG_DOMAIN_MIRRORING}). | ||||||
|  | 
 | ||||||
|  | -spec notice(string()) -> 'ok'. | ||||||
|  | notice(Format) -> notice(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec notice(string(), [any()]) -> 'ok'. | ||||||
|  | notice(Format, Args) -> notice(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec notice(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | notice(Pid, Format, Args) -> | ||||||
|  |     logger:notice(Format, Args, #{pid => Pid, | ||||||
|  |                                   domain => ?RMQLOG_DOMAIN_MIRRORING}). | ||||||
|  | 
 | ||||||
|  | -spec warning(string()) -> 'ok'. | ||||||
|  | warning(Format) -> warning(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec warning(string(), [any()]) -> 'ok'. | ||||||
|  | warning(Format, Args) -> warning(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec warning(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | warning(Pid, Format, Args) -> | ||||||
|  |     logger:warning(Format, Args, #{pid => Pid, | ||||||
|  |                                    domain => ?RMQLOG_DOMAIN_MIRRORING}). | ||||||
|  | 
 | ||||||
|  | -spec error(string()) -> 'ok'. | ||||||
|  | error(Format) -> error(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec error(string(), [any()]) -> 'ok'. | ||||||
|  | error(Format, Args) -> error(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec error(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | error(Pid, Format, Args) -> | ||||||
|  |     logger:error(Format, Args, #{pid => Pid, | ||||||
|  |                                  domain => ?RMQLOG_DOMAIN_MIRRORING}). | ||||||
|  | 
 | ||||||
|  | -spec critical(string()) -> 'ok'. | ||||||
|  | critical(Format) -> critical(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec critical(string(), [any()]) -> 'ok'. | ||||||
|  | critical(Format, Args) -> critical(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec critical(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | critical(Pid, Format, Args) -> | ||||||
|  |     logger:critical(Format, Args, #{pid => Pid, | ||||||
|  |                                     domain => ?RMQLOG_DOMAIN_MIRRORING}). | ||||||
|  | 
 | ||||||
|  | -spec alert(string()) -> 'ok'. | ||||||
|  | alert(Format) -> alert(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec alert(string(), [any()]) -> 'ok'. | ||||||
|  | alert(Format, Args) -> alert(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec alert(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | alert(Pid, Format, Args) -> | ||||||
|  |     logger:alert(Format, Args, #{pid => Pid, | ||||||
|  |                                  domain => ?RMQLOG_DOMAIN_MIRRORING}). | ||||||
|  | 
 | ||||||
|  | -spec emergency(string()) -> 'ok'. | ||||||
|  | emergency(Format) -> emergency(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec emergency(string(), [any()]) -> 'ok'. | ||||||
|  | emergency(Format, Args) -> emergency(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec emergency(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | emergency(Pid, Format, Args) -> | ||||||
|  |     logger:emergency(Format, Args, #{pid => Pid, | ||||||
|  |                                      domain => ?RMQLOG_DOMAIN_MIRRORING}). | ||||||
|  | 
 | ||||||
|  | -spec none(string()) -> 'ok'. | ||||||
|  | none(_Format) -> ok. | ||||||
|  | 
 | ||||||
|  | -spec none(string(), [any()]) -> 'ok'. | ||||||
|  | none(_Format, _Args) -> ok. | ||||||
|  | 
 | ||||||
|  | -spec none(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | none(_Pid, _Format, _Args) -> ok. | ||||||
|  | @ -0,0 +1,120 @@ | ||||||
|  | %% This Source Code Form is subject to the terms of the Mozilla Public | ||||||
|  | %% License, v. 2.0. If a copy of the MPL was not distributed with this | ||||||
|  | %% file, You can obtain one at https://mozilla.org/MPL/2.0/. | ||||||
|  | %% | ||||||
|  | %% Copyright (c) 2021 VMware, Inc. or its affiliates.  All rights reserved. | ||||||
|  | %% | ||||||
|  | 
 | ||||||
|  | %% @doc Compatibility module for the old Lager-based logging API. | ||||||
|  | -module(rabbit_log_prelaunch). | ||||||
|  | 
 | ||||||
|  | -export([debug/1, debug/2, debug/3, | ||||||
|  |          info/1, info/2, info/3, | ||||||
|  |          notice/1, notice/2, notice/3, | ||||||
|  |          warning/1, warning/2, warning/3, | ||||||
|  |          error/1, error/2, error/3, | ||||||
|  |          critical/1, critical/2, critical/3, | ||||||
|  |          alert/1, alert/2, alert/3, | ||||||
|  |          emergency/1, emergency/2, emergency/3, | ||||||
|  |          none/1, none/2, none/3]). | ||||||
|  | 
 | ||||||
|  | -include_lib("rabbit_common/include/logging.hrl"). | ||||||
|  | 
 | ||||||
|  | -compile({no_auto_import, [error/2, error/3]}). | ||||||
|  | 
 | ||||||
|  | -spec debug(string()) -> 'ok'. | ||||||
|  | debug(Format) -> debug(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec debug(string(), [any()]) -> 'ok'. | ||||||
|  | debug(Format, Args) -> debug(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec debug(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | debug(Pid, Format, Args) -> | ||||||
|  |     logger:debug(Format, Args, #{pid => Pid, | ||||||
|  |                                  domain => ?RMQLOG_DOMAIN_PRELAUNCH}). | ||||||
|  | 
 | ||||||
|  | -spec info(string()) -> 'ok'. | ||||||
|  | info(Format) -> info(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec info(string(), [any()]) -> 'ok'. | ||||||
|  | info(Format, Args) -> info(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec info(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | info(Pid, Format, Args) -> | ||||||
|  |     logger:info(Format, Args, #{pid => Pid, | ||||||
|  |                                 domain => ?RMQLOG_DOMAIN_PRELAUNCH}). | ||||||
|  | 
 | ||||||
|  | -spec notice(string()) -> 'ok'. | ||||||
|  | notice(Format) -> notice(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec notice(string(), [any()]) -> 'ok'. | ||||||
|  | notice(Format, Args) -> notice(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec notice(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | notice(Pid, Format, Args) -> | ||||||
|  |     logger:notice(Format, Args, #{pid => Pid, | ||||||
|  |                                   domain => ?RMQLOG_DOMAIN_PRELAUNCH}). | ||||||
|  | 
 | ||||||
|  | -spec warning(string()) -> 'ok'. | ||||||
|  | warning(Format) -> warning(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec warning(string(), [any()]) -> 'ok'. | ||||||
|  | warning(Format, Args) -> warning(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec warning(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | warning(Pid, Format, Args) -> | ||||||
|  |     logger:warning(Format, Args, #{pid => Pid, | ||||||
|  |                                    domain => ?RMQLOG_DOMAIN_PRELAUNCH}). | ||||||
|  | 
 | ||||||
|  | -spec error(string()) -> 'ok'. | ||||||
|  | error(Format) -> error(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec error(string(), [any()]) -> 'ok'. | ||||||
|  | error(Format, Args) -> error(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec error(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | error(Pid, Format, Args) -> | ||||||
|  |     logger:error(Format, Args, #{pid => Pid, | ||||||
|  |                                  domain => ?RMQLOG_DOMAIN_PRELAUNCH}). | ||||||
|  | 
 | ||||||
|  | -spec critical(string()) -> 'ok'. | ||||||
|  | critical(Format) -> critical(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec critical(string(), [any()]) -> 'ok'. | ||||||
|  | critical(Format, Args) -> critical(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec critical(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | critical(Pid, Format, Args) -> | ||||||
|  |     logger:critical(Format, Args, #{pid => Pid, | ||||||
|  |                                     domain => ?RMQLOG_DOMAIN_PRELAUNCH}). | ||||||
|  | 
 | ||||||
|  | -spec alert(string()) -> 'ok'. | ||||||
|  | alert(Format) -> alert(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec alert(string(), [any()]) -> 'ok'. | ||||||
|  | alert(Format, Args) -> alert(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec alert(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | alert(Pid, Format, Args) -> | ||||||
|  |     logger:alert(Format, Args, #{pid => Pid, | ||||||
|  |                                  domain => ?RMQLOG_DOMAIN_PRELAUNCH}). | ||||||
|  | 
 | ||||||
|  | -spec emergency(string()) -> 'ok'. | ||||||
|  | emergency(Format) -> emergency(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec emergency(string(), [any()]) -> 'ok'. | ||||||
|  | emergency(Format, Args) -> emergency(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec emergency(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | emergency(Pid, Format, Args) -> | ||||||
|  |     logger:emergency(Format, Args, #{pid => Pid, | ||||||
|  |                                      domain => ?RMQLOG_DOMAIN_PRELAUNCH}). | ||||||
|  | 
 | ||||||
|  | -spec none(string()) -> 'ok'. | ||||||
|  | none(_Format) -> ok. | ||||||
|  | 
 | ||||||
|  | -spec none(string(), [any()]) -> 'ok'. | ||||||
|  | none(_Format, _Args) -> ok. | ||||||
|  | 
 | ||||||
|  | -spec none(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | none(_Pid, _Format, _Args) -> ok. | ||||||
|  | @ -0,0 +1,120 @@ | ||||||
|  | %% This Source Code Form is subject to the terms of the Mozilla Public | ||||||
|  | %% License, v. 2.0. If a copy of the MPL was not distributed with this | ||||||
|  | %% file, You can obtain one at https://mozilla.org/MPL/2.0/. | ||||||
|  | %% | ||||||
|  | %% Copyright (c) 2021 VMware, Inc. or its affiliates.  All rights reserved. | ||||||
|  | %% | ||||||
|  | 
 | ||||||
|  | %% @doc Compatibility module for the old Lager-based logging API. | ||||||
|  | -module(rabbit_log_queue). | ||||||
|  | 
 | ||||||
|  | -export([debug/1, debug/2, debug/3, | ||||||
|  |          info/1, info/2, info/3, | ||||||
|  |          notice/1, notice/2, notice/3, | ||||||
|  |          warning/1, warning/2, warning/3, | ||||||
|  |          error/1, error/2, error/3, | ||||||
|  |          critical/1, critical/2, critical/3, | ||||||
|  |          alert/1, alert/2, alert/3, | ||||||
|  |          emergency/1, emergency/2, emergency/3, | ||||||
|  |          none/1, none/2, none/3]). | ||||||
|  | 
 | ||||||
|  | -include_lib("rabbit_common/include/logging.hrl"). | ||||||
|  | 
 | ||||||
|  | -compile({no_auto_import, [error/2, error/3]}). | ||||||
|  | 
 | ||||||
|  | -spec debug(string()) -> 'ok'. | ||||||
|  | debug(Format) -> debug(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec debug(string(), [any()]) -> 'ok'. | ||||||
|  | debug(Format, Args) -> debug(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec debug(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | debug(Pid, Format, Args) -> | ||||||
|  |     logger:debug(Format, Args, #{pid => Pid, | ||||||
|  |                                  domain => ?RMQLOG_DOMAIN_QUEUE}). | ||||||
|  | 
 | ||||||
|  | -spec info(string()) -> 'ok'. | ||||||
|  | info(Format) -> info(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec info(string(), [any()]) -> 'ok'. | ||||||
|  | info(Format, Args) -> info(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec info(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | info(Pid, Format, Args) -> | ||||||
|  |     logger:info(Format, Args, #{pid => Pid, | ||||||
|  |                                 domain => ?RMQLOG_DOMAIN_QUEUE}). | ||||||
|  | 
 | ||||||
|  | -spec notice(string()) -> 'ok'. | ||||||
|  | notice(Format) -> notice(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec notice(string(), [any()]) -> 'ok'. | ||||||
|  | notice(Format, Args) -> notice(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec notice(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | notice(Pid, Format, Args) -> | ||||||
|  |     logger:notice(Format, Args, #{pid => Pid, | ||||||
|  |                                   domain => ?RMQLOG_DOMAIN_QUEUE}). | ||||||
|  | 
 | ||||||
|  | -spec warning(string()) -> 'ok'. | ||||||
|  | warning(Format) -> warning(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec warning(string(), [any()]) -> 'ok'. | ||||||
|  | warning(Format, Args) -> warning(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec warning(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | warning(Pid, Format, Args) -> | ||||||
|  |     logger:warning(Format, Args, #{pid => Pid, | ||||||
|  |                                    domain => ?RMQLOG_DOMAIN_QUEUE}). | ||||||
|  | 
 | ||||||
|  | -spec error(string()) -> 'ok'. | ||||||
|  | error(Format) -> error(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec error(string(), [any()]) -> 'ok'. | ||||||
|  | error(Format, Args) -> error(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec error(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | error(Pid, Format, Args) -> | ||||||
|  |     logger:error(Format, Args, #{pid => Pid, | ||||||
|  |                                  domain => ?RMQLOG_DOMAIN_QUEUE}). | ||||||
|  | 
 | ||||||
|  | -spec critical(string()) -> 'ok'. | ||||||
|  | critical(Format) -> critical(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec critical(string(), [any()]) -> 'ok'. | ||||||
|  | critical(Format, Args) -> critical(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec critical(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | critical(Pid, Format, Args) -> | ||||||
|  |     logger:critical(Format, Args, #{pid => Pid, | ||||||
|  |                                     domain => ?RMQLOG_DOMAIN_QUEUE}). | ||||||
|  | 
 | ||||||
|  | -spec alert(string()) -> 'ok'. | ||||||
|  | alert(Format) -> alert(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec alert(string(), [any()]) -> 'ok'. | ||||||
|  | alert(Format, Args) -> alert(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec alert(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | alert(Pid, Format, Args) -> | ||||||
|  |     logger:alert(Format, Args, #{pid => Pid, | ||||||
|  |                                  domain => ?RMQLOG_DOMAIN_QUEUE}). | ||||||
|  | 
 | ||||||
|  | -spec emergency(string()) -> 'ok'. | ||||||
|  | emergency(Format) -> emergency(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec emergency(string(), [any()]) -> 'ok'. | ||||||
|  | emergency(Format, Args) -> emergency(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec emergency(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | emergency(Pid, Format, Args) -> | ||||||
|  |     logger:emergency(Format, Args, #{pid => Pid, | ||||||
|  |                                      domain => ?RMQLOG_DOMAIN_QUEUE}). | ||||||
|  | 
 | ||||||
|  | -spec none(string()) -> 'ok'. | ||||||
|  | none(_Format) -> ok. | ||||||
|  | 
 | ||||||
|  | -spec none(string(), [any()]) -> 'ok'. | ||||||
|  | none(_Format, _Args) -> ok. | ||||||
|  | 
 | ||||||
|  | -spec none(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | none(_Pid, _Format, _Args) -> ok. | ||||||
|  | @ -0,0 +1,122 @@ | ||||||
|  | %% This Source Code Form is subject to the terms of the Mozilla Public | ||||||
|  | %% License, v. 2.0. If a copy of the MPL was not distributed with this | ||||||
|  | %% file, You can obtain one at https://mozilla.org/MPL/2.0/. | ||||||
|  | %% | ||||||
|  | %% Copyright (c) 2021 VMware, Inc. or its affiliates.  All rights reserved. | ||||||
|  | %% | ||||||
|  | 
 | ||||||
|  | %% @doc Compatibility module for the old Lager-based logging API. | ||||||
|  | -module(rabbit_log_upgrade). | ||||||
|  | 
 | ||||||
|  | -export([debug/1, debug/2, debug/3, | ||||||
|  |          info/1, info/2, info/3, | ||||||
|  |          notice/1, notice/2, notice/3, | ||||||
|  |          warning/1, warning/2, warning/3, | ||||||
|  |          error/1, error/2, error/3, | ||||||
|  |          critical/1, critical/2, critical/3, | ||||||
|  |          alert/1, alert/2, alert/3, | ||||||
|  |          emergency/1, emergency/2, emergency/3, | ||||||
|  |          none/1, none/2, none/3]). | ||||||
|  | 
 | ||||||
|  | -include_lib("rabbit_common/include/logging.hrl"). | ||||||
|  | 
 | ||||||
|  | -compile({no_auto_import, [error/2, error/3]}). | ||||||
|  | 
 | ||||||
|  | %%---------------------------------------------------------------------------- | ||||||
|  | 
 | ||||||
|  | -spec debug(string()) -> 'ok'. | ||||||
|  | debug(Format) -> debug(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec debug(string(), [any()]) -> 'ok'. | ||||||
|  | debug(Format, Args) -> debug(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec debug(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | debug(Pid, Format, Args) -> | ||||||
|  |     logger:debug(Format, Args, #{pid => Pid, | ||||||
|  |                                  domain => ?RMQLOG_DOMAIN_UPGRADE}). | ||||||
|  | 
 | ||||||
|  | -spec info(string()) -> 'ok'. | ||||||
|  | info(Format) -> info(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec info(string(), [any()]) -> 'ok'. | ||||||
|  | info(Format, Args) -> info(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec info(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | info(Pid, Format, Args) -> | ||||||
|  |     logger:info(Format, Args, #{pid => Pid, | ||||||
|  |                                 domain => ?RMQLOG_DOMAIN_UPGRADE}). | ||||||
|  | 
 | ||||||
|  | -spec notice(string()) -> 'ok'. | ||||||
|  | notice(Format) -> notice(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec notice(string(), [any()]) -> 'ok'. | ||||||
|  | notice(Format, Args) -> notice(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec notice(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | notice(Pid, Format, Args) -> | ||||||
|  |     logger:notice(Format, Args, #{pid => Pid, | ||||||
|  |                                   domain => ?RMQLOG_DOMAIN_UPGRADE}). | ||||||
|  | 
 | ||||||
|  | -spec warning(string()) -> 'ok'. | ||||||
|  | warning(Format) -> warning(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec warning(string(), [any()]) -> 'ok'. | ||||||
|  | warning(Format, Args) -> warning(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec warning(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | warning(Pid, Format, Args) -> | ||||||
|  |     logger:warning(Format, Args, #{pid => Pid, | ||||||
|  |                                    domain => ?RMQLOG_DOMAIN_UPGRADE}). | ||||||
|  | 
 | ||||||
|  | -spec error(string()) -> 'ok'. | ||||||
|  | error(Format) -> error(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec error(string(), [any()]) -> 'ok'. | ||||||
|  | error(Format, Args) -> error(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec error(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | error(Pid, Format, Args) -> | ||||||
|  |     logger:error(Format, Args, #{pid => Pid, | ||||||
|  |                                  domain => ?RMQLOG_DOMAIN_UPGRADE}). | ||||||
|  | 
 | ||||||
|  | -spec critical(string()) -> 'ok'. | ||||||
|  | critical(Format) -> critical(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec critical(string(), [any()]) -> 'ok'. | ||||||
|  | critical(Format, Args) -> critical(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec critical(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | critical(Pid, Format, Args) -> | ||||||
|  |     logger:critical(Format, Args, #{pid => Pid, | ||||||
|  |                                     domain => ?RMQLOG_DOMAIN_UPGRADE}). | ||||||
|  | 
 | ||||||
|  | -spec alert(string()) -> 'ok'. | ||||||
|  | alert(Format) -> alert(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec alert(string(), [any()]) -> 'ok'. | ||||||
|  | alert(Format, Args) -> alert(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec alert(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | alert(Pid, Format, Args) -> | ||||||
|  |     logger:alert(Format, Args, #{pid => Pid, | ||||||
|  |                                  domain => ?RMQLOG_DOMAIN_UPGRADE}). | ||||||
|  | 
 | ||||||
|  | -spec emergency(string()) -> 'ok'. | ||||||
|  | emergency(Format) -> emergency(Format, []). | ||||||
|  | 
 | ||||||
|  | -spec emergency(string(), [any()]) -> 'ok'. | ||||||
|  | emergency(Format, Args) -> emergency(self(), Format, Args). | ||||||
|  | 
 | ||||||
|  | -spec emergency(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | emergency(Pid, Format, Args) -> | ||||||
|  |     logger:emergency(Format, Args, #{pid => Pid, | ||||||
|  |                                      domain => ?RMQLOG_DOMAIN_UPGRADE}). | ||||||
|  | 
 | ||||||
|  | -spec none(string()) -> 'ok'. | ||||||
|  | none(_Format) -> ok. | ||||||
|  | 
 | ||||||
|  | -spec none(string(), [any()]) -> 'ok'. | ||||||
|  | none(_Format, _Args) -> ok. | ||||||
|  | 
 | ||||||
|  | -spec none(pid() | [tuple()], string(), [any()]) -> 'ok'. | ||||||
|  | none(_Pid, _Format, _Args) -> ok. | ||||||
|  | @ -0,0 +1,176 @@ | ||||||
|  | %% This Source Code Form is subject to the terms of the Mozilla Public | ||||||
|  | %% License, v. 2.0. If a copy of the MPL was not distributed with this | ||||||
|  | %% file, You can obtain one at https://mozilla.org/MPL/2.0/. | ||||||
|  | %% | ||||||
|  | %% Copyright (c) 2021 VMware, Inc. or its affiliates.  All rights reserved. | ||||||
|  | %% | ||||||
|  | 
 | ||||||
|  | -module(rabbit_logger_exchange_h). | ||||||
|  | 
 | ||||||
|  | -include_lib("kernel/include/logger.hrl"). | ||||||
|  | 
 | ||||||
|  | -include_lib("rabbit_common/include/rabbit.hrl"). | ||||||
|  | -include_lib("rabbit_common/include/rabbit_framing.hrl"). | ||||||
|  | -include_lib("rabbit_common/include/logging.hrl"). | ||||||
|  | 
 | ||||||
|  | %% logger callbacks | ||||||
|  | -export([log/2, adding_handler/1, removing_handler/1, changing_config/3, | ||||||
|  |          filter_config/1]). | ||||||
|  | 
 | ||||||
|  | -define(DECL_EXCHANGE_INTERVAL_SECS, 5). | ||||||
|  | -define(LOG_EXCH_NAME, <<"amq.rabbitmq.log">>). | ||||||
|  | -define(DEFAULT_FORMATTER, logger_formatter). | ||||||
|  | -define(DEFAULT_FORMATTER_CONFIG, #{}). | ||||||
|  | 
 | ||||||
|  | %% ------------------------------------------------------------------- | ||||||
|  | %% Logger handler callbacks. | ||||||
|  | %% ------------------------------------------------------------------- | ||||||
|  | 
 | ||||||
|  | adding_handler(Config) -> | ||||||
|  |     Config1 = start_setup_proc(Config), | ||||||
|  |     {ok, Config1}. | ||||||
|  | 
 | ||||||
|  | changing_config(_SetOrUpdate, OldConfig, _NewConfig) -> | ||||||
|  |     {ok, OldConfig}. | ||||||
|  | 
 | ||||||
|  | filter_config(Config) -> | ||||||
|  |     Config. | ||||||
|  | 
 | ||||||
|  | log(#{meta := #{mfa := {?MODULE, _, _}}}, _) -> | ||||||
|  |     ok; | ||||||
|  | log(LogEvent, Config) -> | ||||||
|  |     case rabbit_boot_state:get() of | ||||||
|  |         ready -> do_log(LogEvent, Config); | ||||||
|  |         _     -> ok | ||||||
|  |     end. | ||||||
|  | 
 | ||||||
|  | do_log(LogEvent, #{config := #{exchange := Exchange}} = Config) -> | ||||||
|  |     RoutingKey = make_routing_key(LogEvent, Config), | ||||||
|  |     AmqpMsg = log_event_to_amqp_msg(LogEvent, Config), | ||||||
|  |     Body = try_format_body(LogEvent, Config), | ||||||
|  |     case rabbit_basic:publish(Exchange, RoutingKey, AmqpMsg, Body) of | ||||||
|  |         ok                 -> ok; | ||||||
|  |         {error, not_found} -> ok | ||||||
|  |     end. | ||||||
|  | 
 | ||||||
|  | removing_handler(Config) -> | ||||||
|  |     unconfigure_exchange(Config), | ||||||
|  |     ok. | ||||||
|  | 
 | ||||||
|  | %% ------------------------------------------------------------------- | ||||||
|  | %% Internal functions. | ||||||
|  | %% ------------------------------------------------------------------- | ||||||
|  | 
 | ||||||
|  | log_event_to_amqp_msg(LogEvent, Config) -> | ||||||
|  |     ContentType = guess_content_type(Config), | ||||||
|  |     Timestamp = make_timestamp(LogEvent, Config), | ||||||
|  |     Headers = make_headers(LogEvent, Config), | ||||||
|  |     #'P_basic'{ | ||||||
|  |        content_type = ContentType, | ||||||
|  |        timestamp = Timestamp, | ||||||
|  |        headers = Headers | ||||||
|  |       }. | ||||||
|  | 
 | ||||||
|  | make_routing_key(#{level := Level}, _) -> | ||||||
|  |     rabbit_data_coercion:to_binary(Level). | ||||||
|  | 
 | ||||||
|  | guess_content_type(#{formatter := {rabbit_logger_json_fmt, _}}) -> | ||||||
|  |     <<"application/json">>; | ||||||
|  | guess_content_type(_) -> | ||||||
|  |     <<"text/plain">>. | ||||||
|  | 
 | ||||||
|  | make_timestamp(#{meta := #{time := Timestamp}}, _) -> | ||||||
|  |     erlang:convert_time_unit(Timestamp, microsecond, second); | ||||||
|  | make_timestamp(_, _) -> | ||||||
|  |      os:system_time(second). | ||||||
|  | 
 | ||||||
|  | make_headers(_, _) -> | ||||||
|  |     Node = rabbit_data_coercion:to_binary(node()), | ||||||
|  |     [{<<"node">>, longstr, Node}]. | ||||||
|  | 
 | ||||||
|  | try_format_body(LogEvent, #{formatter := {Formatter, FormatterConfig}}) -> | ||||||
|  |     Formatted = try_format_body(LogEvent, Formatter, FormatterConfig), | ||||||
|  |     erlang:iolist_to_binary(Formatted). | ||||||
|  | 
 | ||||||
|  | try_format_body(LogEvent, Formatter, FormatterConfig) -> | ||||||
|  |     try | ||||||
|  |         Formatter:format(LogEvent, FormatterConfig) | ||||||
|  |     catch | ||||||
|  |         C:R:S -> | ||||||
|  |             case {?DEFAULT_FORMATTER, ?DEFAULT_FORMATTER_CONFIG} of | ||||||
|  |                 {Formatter, FormatterConfig} -> | ||||||
|  |                     "DEFAULT FORMATTER CRASHED\n"; | ||||||
|  |                 {DefaultFormatter, DefaultFormatterConfig} -> | ||||||
|  |                     Msg = {"FORMATTER CRASH: ~tp -- ~p:~p:~p", | ||||||
|  |                            [maps:get(msg, LogEvent), C, R, S]}, | ||||||
|  |                     LogEvent1 = LogEvent#{msg => Msg}, | ||||||
|  |                     try_format_body( | ||||||
|  |                       LogEvent1, | ||||||
|  |                       DefaultFormatter, | ||||||
|  |                       DefaultFormatterConfig) | ||||||
|  |             end | ||||||
|  |     end. | ||||||
|  | 
 | ||||||
|  | start_setup_proc(#{config := InternalConfig} = Config) -> | ||||||
|  |     {ok, DefaultVHost} = application:get_env(rabbit, default_vhost), | ||||||
|  |     Exchange = rabbit_misc:r(DefaultVHost, exchange, ?LOG_EXCH_NAME), | ||||||
|  |     InternalConfig1 = InternalConfig#{exchange => Exchange}, | ||||||
|  | 
 | ||||||
|  |     Pid = spawn(fun() -> setup_proc(Config#{config => InternalConfig1}) end), | ||||||
|  |     InternalConfig2 = InternalConfig1#{setup_proc => Pid}, | ||||||
|  |     Config#{config => InternalConfig2}. | ||||||
|  | 
 | ||||||
|  | setup_proc( | ||||||
|  |   #{config := #{exchange := #resource{name = Name, | ||||||
|  |                                       virtual_host = VHost}}} = Config) -> | ||||||
|  |     case declare_exchange(Config) of | ||||||
|  |         ok -> | ||||||
|  |             ?LOG_INFO( | ||||||
|  |                "Logging to exchange '~s' in vhost '~s' ready", [Name, VHost], | ||||||
|  |                #{domain => ?RMQLOG_DOMAIN_GLOBAL}); | ||||||
|  |         error -> | ||||||
|  |             ?LOG_DEBUG( | ||||||
|  |                "Logging to exchange '~s' in vhost '~s' not ready, " | ||||||
|  |                "trying again in ~b second(s)", | ||||||
|  |                [Name, VHost, ?DECL_EXCHANGE_INTERVAL_SECS], | ||||||
|  |                #{domain => ?RMQLOG_DOMAIN_GLOBAL}), | ||||||
|  |             receive | ||||||
|  |                 stop -> ok | ||||||
|  |             after ?DECL_EXCHANGE_INTERVAL_SECS * 1000 -> | ||||||
|  |                       setup_proc(Config) | ||||||
|  |             end | ||||||
|  |     end. | ||||||
|  | 
 | ||||||
|  | declare_exchange( | ||||||
|  |   #{config := #{exchange := #resource{name = Name, | ||||||
|  |                                       virtual_host = VHost} = Exchange}}) -> | ||||||
|  |     try | ||||||
|  |         %% Durable. | ||||||
|  |         #exchange{} = rabbit_exchange:declare( | ||||||
|  |                         Exchange, topic, true, false, true, [], | ||||||
|  |                         ?INTERNAL_USER), | ||||||
|  |         ?LOG_DEBUG( | ||||||
|  |            "Declared exchange '~s' in vhost '~s'", | ||||||
|  |            [Name, VHost], | ||||||
|  |            #{domain => ?RMQLOG_DOMAIN_GLOBAL}), | ||||||
|  |         ok | ||||||
|  |     catch | ||||||
|  |         Class:Reason -> | ||||||
|  |             ?LOG_DEBUG( | ||||||
|  |                "Could not declare exchange '~s' in vhost '~s', " | ||||||
|  |                "reason: ~0p:~0p", | ||||||
|  |                [Name, VHost, Class, Reason], | ||||||
|  |                #{domain => ?RMQLOG_DOMAIN_GLOBAL}), | ||||||
|  |            error | ||||||
|  |     end. | ||||||
|  | 
 | ||||||
|  | unconfigure_exchange( | ||||||
|  |   #{config := #{exchange := #resource{name = Name, | ||||||
|  |                                       virtual_host = VHost} = Exchange, | ||||||
|  |                 setup_proc := Pid}}) -> | ||||||
|  |     Pid ! stop, | ||||||
|  |     rabbit_exchange:delete(Exchange, false, ?INTERNAL_USER), | ||||||
|  |     ?LOG_INFO( | ||||||
|  |        "Logging to exchange '~s' in vhost '~s' disabled", | ||||||
|  |        [Name, VHost], | ||||||
|  |        #{domain => ?RMQLOG_DOMAIN_GLOBAL}). | ||||||
|  | @ -150,7 +150,7 @@ sync_mirrors(HandleInfo, EmitStats, | ||||||
|                               backing_queue_state = BQS }) -> |                               backing_queue_state = BQS }) -> | ||||||
|     Log = fun (Fmt, Params) -> |     Log = fun (Fmt, Params) -> | ||||||
|                   rabbit_mirror_queue_misc:log_info( |                   rabbit_mirror_queue_misc:log_info( | ||||||
|                     QName, "Synchronising: " ++ Fmt ++ "~n", Params) |                     QName, "Synchronising: " ++ Fmt ++ "", Params) | ||||||
|           end, |           end, | ||||||
|     Log("~p messages to synchronise", [BQ:len(BQS)]), |     Log("~p messages to synchronise", [BQ:len(BQS)]), | ||||||
|     {ok, Q} = rabbit_amqqueue:lookup(QName), |     {ok, Q} = rabbit_amqqueue:lookup(QName), | ||||||
|  | @ -198,7 +198,7 @@ terminate(Reason, | ||||||
|         true  -> %% Remove the whole queue to avoid data loss |         true  -> %% Remove the whole queue to avoid data loss | ||||||
|                  rabbit_mirror_queue_misc:log_warning( |                  rabbit_mirror_queue_misc:log_warning( | ||||||
|                    QName, "Stopping all nodes on master shutdown since no " |                    QName, "Stopping all nodes on master shutdown since no " | ||||||
|                    "synchronised mirror (replica) is available~n", []), |                    "synchronised mirror (replica) is available", []), | ||||||
|                  stop_all_slaves(Reason, State); |                  stop_all_slaves(Reason, State); | ||||||
|         false -> %% Just let some other mirror take over. |         false -> %% Just let some other mirror take over. | ||||||
|                  ok |                  ok | ||||||
|  |  | ||||||
|  | @ -211,7 +211,7 @@ drop_mirror(QName, MirrorNode) -> | ||||||
|                 [PrimaryPid] when MirrorPids =:= [] -> |                 [PrimaryPid] when MirrorPids =:= [] -> | ||||||
|                     {error, cannot_drop_only_mirror}; |                     {error, cannot_drop_only_mirror}; | ||||||
|                 [Pid] -> |                 [Pid] -> | ||||||
|                     log_info(Name, "Dropping queue mirror on node ~p~n", |                     log_info(Name, "Dropping queue mirror on node ~p", | ||||||
|                              [MirrorNode]), |                              [MirrorNode]), | ||||||
|                     exit(Pid, {shutdown, dropped}), |                     exit(Pid, {shutdown, dropped}), | ||||||
|                     {ok, dropped} |                     {ok, dropped} | ||||||
|  | @ -238,7 +238,7 @@ add_mirror(QName, MirrorNode, SyncMode) -> | ||||||
|                         {ok, _} -> |                         {ok, _} -> | ||||||
|                             try |                             try | ||||||
|                                 MirrorPid = rabbit_amqqueue_sup_sup:start_queue_process(MirrorNode, Q, slave), |                                 MirrorPid = rabbit_amqqueue_sup_sup:start_queue_process(MirrorNode, Q, slave), | ||||||
|                                 log_info(QName, "Adding mirror on node ~p: ~p~n", [MirrorNode, MirrorPid]), |                                 log_info(QName, "Adding mirror on node ~p: ~p", [MirrorNode, MirrorPid]), | ||||||
|                                 rabbit_mirror_queue_slave:go(MirrorPid, SyncMode) |                                 rabbit_mirror_queue_slave:go(MirrorPid, SyncMode) | ||||||
|                             of |                             of | ||||||
|                                 _ -> ok |                                 _ -> ok | ||||||
|  | @ -246,13 +246,13 @@ add_mirror(QName, MirrorNode, SyncMode) -> | ||||||
|                                 error:QError -> |                                 error:QError -> | ||||||
|                                     log_warning(QName, |                                     log_warning(QName, | ||||||
|                                         "Unable to start queue mirror on node '~p'. " |                                         "Unable to start queue mirror on node '~p'. " | ||||||
|                                         "Target queue supervisor is not running: ~p~n", |                                         "Target queue supervisor is not running: ~p", | ||||||
|                                         [MirrorNode, QError]) |                                         [MirrorNode, QError]) | ||||||
|                             end; |                             end; | ||||||
|                         {error, Error} -> |                         {error, Error} -> | ||||||
|                             log_warning(QName, |                             log_warning(QName, | ||||||
|                                         "Unable to start queue mirror on node '~p'. " |                                         "Unable to start queue mirror on node '~p'. " | ||||||
|                                         "Target virtual host is not running: ~p~n", |                                         "Target virtual host is not running: ~p", | ||||||
|                                         [MirrorNode, Error]), |                                         [MirrorNode, Error]), | ||||||
|                             ok |                             ok | ||||||
|                     end |                     end | ||||||
|  | @ -264,7 +264,7 @@ add_mirror(QName, MirrorNode, SyncMode) -> | ||||||
| report_deaths(_MirrorPid, _IsMaster, _QueueName, []) -> | report_deaths(_MirrorPid, _IsMaster, _QueueName, []) -> | ||||||
|     ok; |     ok; | ||||||
| report_deaths(MirrorPid, IsMaster, QueueName, DeadPids) -> | report_deaths(MirrorPid, IsMaster, QueueName, DeadPids) -> | ||||||
|     log_info(QueueName, "~s replica of queue ~s detected replica ~s to be down~n", |     log_info(QueueName, "~s replica of queue ~s detected replica ~s to be down", | ||||||
|                     [case IsMaster of |                     [case IsMaster of | ||||||
|                          true  -> "Primary"; |                          true  -> "Primary"; | ||||||
|                          false -> "Secondary" |                          false -> "Secondary" | ||||||
|  | @ -342,7 +342,7 @@ stop_all_slaves(Reason, SPids, QName, GM, WaitTimeout) -> | ||||||
|                 after WaitTimeout -> |                 after WaitTimeout -> | ||||||
|                         rabbit_mirror_queue_misc:log_warning( |                         rabbit_mirror_queue_misc:log_warning( | ||||||
|                           QName, "Missing 'DOWN' message from ~p in" |                           QName, "Missing 'DOWN' message from ~p in" | ||||||
|                           " node ~p~n", [Pid, node(Pid)]), |                           " node ~p", [Pid, node(Pid)]), | ||||||
|                         [Pid | Acc] |                         [Pid | Acc] | ||||||
|                 end; |                 end; | ||||||
|             false -> |             false -> | ||||||
|  |  | ||||||
|  | @ -137,7 +137,7 @@ handle_go(Q0) when ?is_amqqueue(Q0) -> | ||||||
|             {ok, State}; |             {ok, State}; | ||||||
|         {stale, StalePid} -> |         {stale, StalePid} -> | ||||||
|             rabbit_mirror_queue_misc:log_warning( |             rabbit_mirror_queue_misc:log_warning( | ||||||
|               QName, "Detected stale HA master: ~p~n", [StalePid]), |               QName, "Detected stale classic mirrored queue leader: ~p", [StalePid]), | ||||||
|             gm:leave(GM), |             gm:leave(GM), | ||||||
|             {error, {stale_master_pid, StalePid}}; |             {error, {stale_master_pid, StalePid}}; | ||||||
|         duplicate_live_master -> |         duplicate_live_master -> | ||||||
|  | @ -189,7 +189,7 @@ init_it(Self, GM, Node, QName) -> | ||||||
| stop_pending_slaves(QName, Pids) -> | stop_pending_slaves(QName, Pids) -> | ||||||
|     [begin |     [begin | ||||||
|          rabbit_mirror_queue_misc:log_warning( |          rabbit_mirror_queue_misc:log_warning( | ||||||
|            QName, "Detected a non-responsive classic queue mirror, stopping it: ~p~n", [Pid]), |            QName, "Detected a non-responsive classic queue mirror, stopping it: ~p", [Pid]), | ||||||
|          case erlang:process_info(Pid, dictionary) of |          case erlang:process_info(Pid, dictionary) of | ||||||
|              undefined -> ok; |              undefined -> ok; | ||||||
|              {dictionary, Dict} -> |              {dictionary, Dict} -> | ||||||
|  | @ -633,7 +633,7 @@ promote_me(From, #state { q                   = Q0, | ||||||
|                           msg_id_status       = MS, |                           msg_id_status       = MS, | ||||||
|                           known_senders       = KS}) when ?is_amqqueue(Q0) -> |                           known_senders       = KS}) when ?is_amqqueue(Q0) -> | ||||||
|     QName = amqqueue:get_name(Q0), |     QName = amqqueue:get_name(Q0), | ||||||
|     rabbit_mirror_queue_misc:log_info(QName, "Promoting mirror ~s to master~n", |     rabbit_mirror_queue_misc:log_info(QName, "Promoting mirror ~s to leader", | ||||||
|                                       [rabbit_misc:pid_to_string(self())]), |                                       [rabbit_misc:pid_to_string(self())]), | ||||||
|     Q1 = amqqueue:set_pid(Q0, self()), |     Q1 = amqqueue:set_pid(Q0, self()), | ||||||
|     DeathFun = rabbit_mirror_queue_master:sender_death_fun(), |     DeathFun = rabbit_mirror_queue_master:sender_death_fun(), | ||||||
|  |  | ||||||
|  | @ -69,7 +69,7 @@ init() -> | ||||||
|     case is_virgin_node() of |     case is_virgin_node() of | ||||||
|         true  -> |         true  -> | ||||||
|             rabbit_log:info("Node database directory at ~ts is empty. " |             rabbit_log:info("Node database directory at ~ts is empty. " | ||||||
|                             "Assuming we need to join an existing cluster or initialise from scratch...~n", |                             "Assuming we need to join an existing cluster or initialise from scratch...", | ||||||
|                             [dir()]), |                             [dir()]), | ||||||
|             rabbit_peer_discovery:log_configured_backend(), |             rabbit_peer_discovery:log_configured_backend(), | ||||||
|             rabbit_peer_discovery:maybe_init(), |             rabbit_peer_discovery:maybe_init(), | ||||||
|  | @ -155,7 +155,7 @@ run_peer_discovery_with_retries(RetriesLeft, DelayInterval) -> | ||||||
|                 e(invalid_cluster_nodes_conf) |                 e(invalid_cluster_nodes_conf) | ||||||
|         end, |         end, | ||||||
|     DiscoveredNodes = lists:usort(DiscoveredNodes0), |     DiscoveredNodes = lists:usort(DiscoveredNodes0), | ||||||
|     rabbit_log:info("All discovered existing cluster peers: ~s~n", |     rabbit_log:info("All discovered existing cluster peers: ~s", | ||||||
|                     [rabbit_peer_discovery:format_discovered_nodes(DiscoveredNodes)]), |                     [rabbit_peer_discovery:format_discovered_nodes(DiscoveredNodes)]), | ||||||
|     Peers = nodes_excl_me(DiscoveredNodes), |     Peers = nodes_excl_me(DiscoveredNodes), | ||||||
|     case Peers of |     case Peers of | ||||||
|  | @ -165,7 +165,7 @@ run_peer_discovery_with_retries(RetriesLeft, DelayInterval) -> | ||||||
|                             "Enabling debug logging might help troubleshoot."), |                             "Enabling debug logging might help troubleshoot."), | ||||||
|             init_db_and_upgrade([node()], disc, false, _Retry = true); |             init_db_and_upgrade([node()], disc, false, _Retry = true); | ||||||
|         _  -> |         _  -> | ||||||
|             rabbit_log:info("Peer nodes we can cluster with: ~s~n", |             rabbit_log:info("Peer nodes we can cluster with: ~s", | ||||||
|                 [rabbit_peer_discovery:format_discovered_nodes(Peers)]), |                 [rabbit_peer_discovery:format_discovered_nodes(Peers)]), | ||||||
|             join_discovered_peers(Peers, NodeType) |             join_discovered_peers(Peers, NodeType) | ||||||
|     end. |     end. | ||||||
|  | @ -180,13 +180,13 @@ join_discovered_peers(TryNodes, NodeType) -> | ||||||
| join_discovered_peers_with_retries(TryNodes, _NodeType, 0, _DelayInterval) -> | join_discovered_peers_with_retries(TryNodes, _NodeType, 0, _DelayInterval) -> | ||||||
|     rabbit_log:warning( |     rabbit_log:warning( | ||||||
|               "Could not successfully contact any node of: ~s (as in Erlang distribution). " |               "Could not successfully contact any node of: ~s (as in Erlang distribution). " | ||||||
|                "Starting as a blank standalone node...~n", |                "Starting as a blank standalone node...", | ||||||
|                 [string:join(lists:map(fun atom_to_list/1, TryNodes), ",")]), |                 [string:join(lists:map(fun atom_to_list/1, TryNodes), ",")]), | ||||||
|             init_db_and_upgrade([node()], disc, false, _Retry = true); |             init_db_and_upgrade([node()], disc, false, _Retry = true); | ||||||
| join_discovered_peers_with_retries(TryNodes, NodeType, RetriesLeft, DelayInterval) -> | join_discovered_peers_with_retries(TryNodes, NodeType, RetriesLeft, DelayInterval) -> | ||||||
|     case find_reachable_peer_to_cluster_with(nodes_excl_me(TryNodes)) of |     case find_reachable_peer_to_cluster_with(nodes_excl_me(TryNodes)) of | ||||||
|         {ok, Node} -> |         {ok, Node} -> | ||||||
|             rabbit_log:info("Node '~s' selected for auto-clustering~n", [Node]), |             rabbit_log:info("Node '~s' selected for auto-clustering", [Node]), | ||||||
|             {ok, {_, DiscNodes, _}} = discover_cluster0(Node), |             {ok, {_, DiscNodes, _}} = discover_cluster0(Node), | ||||||
|             init_db_and_upgrade(DiscNodes, NodeType, true, _Retry = true), |             init_db_and_upgrade(DiscNodes, NodeType, true, _Retry = true), | ||||||
|             rabbit_connection_tracking:boot(), |             rabbit_connection_tracking:boot(), | ||||||
|  | @ -237,7 +237,7 @@ join_cluster(DiscoveryNode, NodeType) -> | ||||||
|                     reset_gracefully(), |                     reset_gracefully(), | ||||||
| 
 | 
 | ||||||
|                     %% Join the cluster |                     %% Join the cluster | ||||||
|                     rabbit_log:info("Clustering with ~p as ~p node~n", |                     rabbit_log:info("Clustering with ~p as ~p node", | ||||||
|                                     [ClusterNodes, NodeType]), |                                     [ClusterNodes, NodeType]), | ||||||
|                     ok = init_db_with_mnesia(ClusterNodes, NodeType, |                     ok = init_db_with_mnesia(ClusterNodes, NodeType, | ||||||
|                                              true, true, _Retry = true), |                                              true, true, _Retry = true), | ||||||
|  | @ -252,7 +252,7 @@ join_cluster(DiscoveryNode, NodeType) -> | ||||||
|             %% do we think so ourselves? |             %% do we think so ourselves? | ||||||
|             case are_we_clustered_with(DiscoveryNode) of |             case are_we_clustered_with(DiscoveryNode) of | ||||||
|                 true -> |                 true -> | ||||||
|                     rabbit_log:info("Asked to join a cluster but already a member of it: ~p~n", [ClusterNodes]), |                     rabbit_log:info("Asked to join a cluster but already a member of it: ~p", [ClusterNodes]), | ||||||
|                     {ok, already_member}; |                     {ok, already_member}; | ||||||
|                 false -> |                 false -> | ||||||
|                     Msg = format_inconsistent_cluster_message(DiscoveryNode, node()), |                     Msg = format_inconsistent_cluster_message(DiscoveryNode, node()), | ||||||
|  | @ -269,14 +269,14 @@ join_cluster(DiscoveryNode, NodeType) -> | ||||||
| 
 | 
 | ||||||
| reset() -> | reset() -> | ||||||
|     ensure_mnesia_not_running(), |     ensure_mnesia_not_running(), | ||||||
|     rabbit_log:info("Resetting Rabbit~n", []), |     rabbit_log:info("Resetting Rabbit", []), | ||||||
|     reset_gracefully(). |     reset_gracefully(). | ||||||
| 
 | 
 | ||||||
| -spec force_reset() -> 'ok'. | -spec force_reset() -> 'ok'. | ||||||
| 
 | 
 | ||||||
| force_reset() -> | force_reset() -> | ||||||
|     ensure_mnesia_not_running(), |     ensure_mnesia_not_running(), | ||||||
|     rabbit_log:info("Resetting Rabbit forcefully~n", []), |     rabbit_log:info("Resetting Rabbit forcefully", []), | ||||||
|     wipe(). |     wipe(). | ||||||
| 
 | 
 | ||||||
| reset_gracefully() -> | reset_gracefully() -> | ||||||
|  | @ -336,7 +336,7 @@ update_cluster_nodes(DiscoveryNode) -> | ||||||
|             %% nodes |             %% nodes | ||||||
|             mnesia:delete_schema([node()]), |             mnesia:delete_schema([node()]), | ||||||
|             rabbit_node_monitor:write_cluster_status(Status), |             rabbit_node_monitor:write_cluster_status(Status), | ||||||
|             rabbit_log:info("Updating cluster nodes from ~p~n", |             rabbit_log:info("Updating cluster nodes from ~p", | ||||||
|                             [DiscoveryNode]), |                             [DiscoveryNode]), | ||||||
|             init_db_with_mnesia(AllNodes, node_type(), true, true, _Retry = false); |             init_db_with_mnesia(AllNodes, node_type(), true, true, _Retry = false); | ||||||
|         false -> |         false -> | ||||||
|  | @ -367,7 +367,7 @@ forget_cluster_node(Node, RemoveWhenOffline, EmitNodeDeletedEvent) -> | ||||||
|         {true,   true} -> e(online_node_offline_flag); |         {true,   true} -> e(online_node_offline_flag); | ||||||
|         {false, false} -> e(offline_node_no_offline_flag); |         {false, false} -> e(offline_node_no_offline_flag); | ||||||
|         {false,  true} -> rabbit_log:info( |         {false,  true} -> rabbit_log:info( | ||||||
|                             "Removing node ~p from cluster~n", [Node]), |                             "Removing node ~p from cluster", [Node]), | ||||||
|                           case remove_node_if_mnesia_running(Node) of |                           case remove_node_if_mnesia_running(Node) of | ||||||
|                               ok when EmitNodeDeletedEvent -> |                               ok when EmitNodeDeletedEvent -> | ||||||
|                                   rabbit_event:notify(node_deleted, [{node, Node}]), |                                   rabbit_event:notify(node_deleted, [{node, Node}]), | ||||||
|  | @ -814,7 +814,7 @@ schema_ok_or_move() -> | ||||||
|             %% started yet |             %% started yet | ||||||
|             rabbit_log:warning("schema integrity check failed: ~p~n" |             rabbit_log:warning("schema integrity check failed: ~p~n" | ||||||
|                                "moving database to backup location " |                                "moving database to backup location " | ||||||
|                                "and recreating schema from scratch~n", |                                "and recreating schema from scratch", | ||||||
|                                [Reason]), |                                [Reason]), | ||||||
|             ok = move_db(), |             ok = move_db(), | ||||||
|             ok = create_schema() |             ok = create_schema() | ||||||
|  | @ -848,7 +848,7 @@ move_db() -> | ||||||
|         ok -> |         ok -> | ||||||
|             %% NB: we cannot use rabbit_log here since it may not have |             %% NB: we cannot use rabbit_log here since it may not have | ||||||
|             %% been started yet |             %% been started yet | ||||||
|             rabbit_log:warning("moved database from ~s to ~s~n", |             rabbit_log:warning("moved database from ~s to ~s", | ||||||
|                                [MnesiaDir, BackupDir]), |                                [MnesiaDir, BackupDir]), | ||||||
|             ok; |             ok; | ||||||
|         {error, Reason} -> throw({error, {cannot_backup_mnesia, |         {error, Reason} -> throw({error, {cannot_backup_mnesia, | ||||||
|  | @ -895,7 +895,7 @@ leave_cluster(Node) -> | ||||||
|     end. |     end. | ||||||
| 
 | 
 | ||||||
| wait_for(Condition) -> | wait_for(Condition) -> | ||||||
|     rabbit_log:info("Waiting for ~p...~n", [Condition]), |     rabbit_log:info("Waiting for ~p...", [Condition]), | ||||||
|     timer:sleep(1000). |     timer:sleep(1000). | ||||||
| 
 | 
 | ||||||
| start_mnesia(CheckConsistency) -> | start_mnesia(CheckConsistency) -> | ||||||
|  | @ -1040,15 +1040,15 @@ find_reachable_peer_to_cluster_with([Node | Nodes]) -> | ||||||
|            end, |            end, | ||||||
|     case remote_node_info(Node) of |     case remote_node_info(Node) of | ||||||
|         {badrpc, _} = Reason -> |         {badrpc, _} = Reason -> | ||||||
|             Fail("~p~n", [Reason]); |             Fail("~p", [Reason]); | ||||||
|         %% old delegate hash check |         %% old delegate hash check | ||||||
|         {_OTP, RMQ, Hash, _} when is_binary(Hash) -> |         {_OTP, RMQ, Hash, _} when is_binary(Hash) -> | ||||||
|             Fail("version ~s~n", [RMQ]); |             Fail("version ~s", [RMQ]); | ||||||
|         {_OTP, _RMQ, _Protocol, {error, _} = E} -> |         {_OTP, _RMQ, _Protocol, {error, _} = E} -> | ||||||
|             Fail("~p~n", [E]); |             Fail("~p", [E]); | ||||||
|         {OTP, RMQ, Protocol, _} -> |         {OTP, RMQ, Protocol, _} -> | ||||||
|             case check_consistency(Node, OTP, RMQ, Protocol) of |             case check_consistency(Node, OTP, RMQ, Protocol) of | ||||||
|                 {error, _} -> Fail("versions ~p~n", |                 {error, _} -> Fail("versions ~p", | ||||||
|                                    [{OTP, RMQ}]); |                                    [{OTP, RMQ}]); | ||||||
|                 ok         -> {ok, Node} |                 ok         -> {ok, Node} | ||||||
|             end |             end | ||||||
|  |  | ||||||
|  | @ -144,7 +144,7 @@ finish(FromNode, ToNode, AllNodes) -> | ||||||
|             end; |             end; | ||||||
|         FromNode -> |         FromNode -> | ||||||
|             rabbit_log:info( |             rabbit_log:info( | ||||||
|               "Abandoning rename from ~s to ~s since we are still ~s~n", |               "Abandoning rename from ~s to ~s since we are still ~s", | ||||||
|               [FromNode, ToNode, FromNode]), |               [FromNode, ToNode, FromNode]), | ||||||
|             [{ok, _} = file:copy(backup_of_conf(F), F) || F <- config_files()], |             [{ok, _} = file:copy(backup_of_conf(F), F) || F <- config_files()], | ||||||
|             ok = rabbit_file:recursive_delete([rabbit_mnesia:dir()]), |             ok = rabbit_file:recursive_delete([rabbit_mnesia:dir()]), | ||||||
|  | @ -155,18 +155,18 @@ finish(FromNode, ToNode, AllNodes) -> | ||||||
|             %% Boot will almost certainly fail but we might as |             %% Boot will almost certainly fail but we might as | ||||||
|             %% well just log this |             %% well just log this | ||||||
|             rabbit_log:info( |             rabbit_log:info( | ||||||
|               "Rename attempted from ~s to ~s but we are ~s - ignoring.~n", |               "Rename attempted from ~s to ~s but we are ~s - ignoring.", | ||||||
|               [FromNode, ToNode, node()]) |               [FromNode, ToNode, node()]) | ||||||
|     end. |     end. | ||||||
| 
 | 
 | ||||||
| finish_primary(FromNode, ToNode) -> | finish_primary(FromNode, ToNode) -> | ||||||
|     rabbit_log:info("Restarting as primary after rename from ~s to ~s~n", |     rabbit_log:info("Restarting as primary after rename from ~s to ~s", | ||||||
|                     [FromNode, ToNode]), |                     [FromNode, ToNode]), | ||||||
|     delete_rename_files(), |     delete_rename_files(), | ||||||
|     ok. |     ok. | ||||||
| 
 | 
 | ||||||
| finish_secondary(FromNode, ToNode, AllNodes) -> | finish_secondary(FromNode, ToNode, AllNodes) -> | ||||||
|     rabbit_log:info("Restarting as secondary after rename from ~s to ~s~n", |     rabbit_log:info("Restarting as secondary after rename from ~s to ~s", | ||||||
|                     [FromNode, ToNode]), |                     [FromNode, ToNode]), | ||||||
|     rabbit_upgrade:secondary_upgrade(AllNodes), |     rabbit_upgrade:secondary_upgrade(AllNodes), | ||||||
|     rename_in_running_mnesia(FromNode, ToNode), |     rename_in_running_mnesia(FromNode, ToNode), | ||||||
|  |  | ||||||
|  | @ -717,7 +717,7 @@ init([Type, BaseDir, ClientRefs, StartupFunState]) -> | ||||||
|     Name = filename:join(filename:basename(BaseDir), atom_to_list(Type)), |     Name = filename:join(filename:basename(BaseDir), atom_to_list(Type)), | ||||||
| 
 | 
 | ||||||
|     {ok, IndexModule} = application:get_env(rabbit, msg_store_index_module), |     {ok, IndexModule} = application:get_env(rabbit, msg_store_index_module), | ||||||
|     rabbit_log:info("Message store ~tp: using ~p to provide index~n", [Name, IndexModule]), |     rabbit_log:info("Message store ~tp: using ~p to provide index", [Name, IndexModule]), | ||||||
| 
 | 
 | ||||||
|     AttemptFileSummaryRecovery = |     AttemptFileSummaryRecovery = | ||||||
|         case ClientRefs of |         case ClientRefs of | ||||||
|  | @ -794,11 +794,11 @@ init([Type, BaseDir, ClientRefs, StartupFunState]) -> | ||||||
|                       true -> "clean"; |                       true -> "clean"; | ||||||
|                       false -> "unclean" |                       false -> "unclean" | ||||||
|                   end, |                   end, | ||||||
|     rabbit_log:debug("Rebuilding message location index after ~s shutdown...~n", |     rabbit_log:debug("Rebuilding message location index after ~s shutdown...", | ||||||
|                      [Cleanliness]), |                      [Cleanliness]), | ||||||
|     {Offset, State1 = #msstate { current_file = CurFile }} = |     {Offset, State1 = #msstate { current_file = CurFile }} = | ||||||
|         build_index(CleanShutdown, StartupFunState, State), |         build_index(CleanShutdown, StartupFunState, State), | ||||||
|     rabbit_log:debug("Finished rebuilding index~n", []), |     rabbit_log:debug("Finished rebuilding index", []), | ||||||
|     %% read is only needed so that we can seek |     %% read is only needed so that we can seek | ||||||
|     {ok, CurHdl} = open_file(Dir, filenum_to_name(CurFile), |     {ok, CurHdl} = open_file(Dir, filenum_to_name(CurFile), | ||||||
|                              [read | ?WRITE_MODE]), |                              [read | ?WRITE_MODE]), | ||||||
|  | @ -999,7 +999,7 @@ terminate(_Reason, State = #msstate { index_state         = IndexState, | ||||||
|         {error, FSErr} -> |         {error, FSErr} -> | ||||||
|             rabbit_log:error("Unable to store file summary" |             rabbit_log:error("Unable to store file summary" | ||||||
|                              " for vhost message store for directory ~p~n" |                              " for vhost message store for directory ~p~n" | ||||||
|                              "Error: ~p~n", |                              "Error: ~p", | ||||||
|                              [Dir, FSErr]) |                              [Dir, FSErr]) | ||||||
|     end, |     end, | ||||||
|     [true = ets:delete(T) || T <- [FileSummaryEts, FileHandlesEts, |     [true = ets:delete(T) || T <- [FileSummaryEts, FileHandlesEts, | ||||||
|  | @ -1012,7 +1012,7 @@ terminate(_Reason, State = #msstate { index_state         = IndexState, | ||||||
|             ok; |             ok; | ||||||
|         {error, RTErr} -> |         {error, RTErr} -> | ||||||
|             rabbit_log:error("Unable to save message store recovery terms" |             rabbit_log:error("Unable to save message store recovery terms" | ||||||
|                              " for directory ~p~nError: ~p~n", |                              " for directory ~p~nError: ~p", | ||||||
|                              [Dir, RTErr]) |                              [Dir, RTErr]) | ||||||
|     end, |     end, | ||||||
|     State3 #msstate { index_state         = undefined, |     State3 #msstate { index_state         = undefined, | ||||||
|  | @ -1574,12 +1574,12 @@ index_clean_up_temporary_reference_count_entries( | ||||||
| recover_index_and_client_refs(IndexModule, _Recover, undefined, Dir, _Name) -> | recover_index_and_client_refs(IndexModule, _Recover, undefined, Dir, _Name) -> | ||||||
|     {false, IndexModule:new(Dir), []}; |     {false, IndexModule:new(Dir), []}; | ||||||
| recover_index_and_client_refs(IndexModule, false, _ClientRefs, Dir, Name) -> | recover_index_and_client_refs(IndexModule, false, _ClientRefs, Dir, Name) -> | ||||||
|     rabbit_log:warning("Message store ~tp: rebuilding indices from scratch~n", [Name]), |     rabbit_log:warning("Message store ~tp: rebuilding indices from scratch", [Name]), | ||||||
|     {false, IndexModule:new(Dir), []}; |     {false, IndexModule:new(Dir), []}; | ||||||
| recover_index_and_client_refs(IndexModule, true, ClientRefs, Dir, Name) -> | recover_index_and_client_refs(IndexModule, true, ClientRefs, Dir, Name) -> | ||||||
|     Fresh = fun (ErrorMsg, ErrorArgs) -> |     Fresh = fun (ErrorMsg, ErrorArgs) -> | ||||||
|                     rabbit_log:warning("Message store ~tp : " ++ ErrorMsg ++ "~n" |                     rabbit_log:warning("Message store ~tp : " ++ ErrorMsg ++ "~n" | ||||||
|                                        "rebuilding indices from scratch~n", |                                        "rebuilding indices from scratch", | ||||||
|                                        [Name | ErrorArgs]), |                                        [Name | ErrorArgs]), | ||||||
|                     {false, IndexModule:new(Dir), []} |                     {false, IndexModule:new(Dir), []} | ||||||
|             end, |             end, | ||||||
|  | @ -1741,9 +1741,9 @@ build_index(true, _StartupFunState, | ||||||
|       end, {0, State}, FileSummaryEts); |       end, {0, State}, FileSummaryEts); | ||||||
| build_index(false, {MsgRefDeltaGen, MsgRefDeltaGenInit}, | build_index(false, {MsgRefDeltaGen, MsgRefDeltaGenInit}, | ||||||
|             State = #msstate { dir = Dir }) -> |             State = #msstate { dir = Dir }) -> | ||||||
|     rabbit_log:debug("Rebuilding message refcount...~n", []), |     rabbit_log:debug("Rebuilding message refcount...", []), | ||||||
|     ok = count_msg_refs(MsgRefDeltaGen, MsgRefDeltaGenInit, State), |     ok = count_msg_refs(MsgRefDeltaGen, MsgRefDeltaGenInit, State), | ||||||
|     rabbit_log:debug("Done rebuilding message refcount~n", []), |     rabbit_log:debug("Done rebuilding message refcount", []), | ||||||
|     {ok, Pid} = gatherer:start_link(), |     {ok, Pid} = gatherer:start_link(), | ||||||
|     case [filename_to_num(FileName) || |     case [filename_to_num(FileName) || | ||||||
|              FileName <- list_sorted_filenames(Dir, ?FILE_EXTENSION)] of |              FileName <- list_sorted_filenames(Dir, ?FILE_EXTENSION)] of | ||||||
|  | @ -1757,7 +1757,7 @@ build_index(false, {MsgRefDeltaGen, MsgRefDeltaGenInit}, | ||||||
| build_index_worker(Gatherer, State = #msstate { dir = Dir }, | build_index_worker(Gatherer, State = #msstate { dir = Dir }, | ||||||
|                    Left, File, Files) -> |                    Left, File, Files) -> | ||||||
|     FileName = filenum_to_name(File), |     FileName = filenum_to_name(File), | ||||||
|     rabbit_log:debug("Rebuilding message location index from ~p (~B file(s) remaining)~n", |     rabbit_log:debug("Rebuilding message location index from ~p (~B file(s) remaining)", | ||||||
|                      [form_filename(Dir, FileName), length(Files)]), |                      [form_filename(Dir, FileName), length(Files)]), | ||||||
|     {ok, Messages, FileSize} = |     {ok, Messages, FileSize} = | ||||||
|         scan_file_for_valid_messages(Dir, FileName), |         scan_file_for_valid_messages(Dir, FileName), | ||||||
|  |  | ||||||
|  | @ -70,7 +70,7 @@ terminate(#state { table = MsgLocations, dir = Dir }) -> | ||||||
|         ok           -> ok; |         ok           -> ok; | ||||||
|         {error, Err} -> |         {error, Err} -> | ||||||
|             rabbit_log:error("Unable to save message store index" |             rabbit_log:error("Unable to save message store index" | ||||||
|                              " for directory ~p.~nError: ~p~n", |                              " for directory ~p.~nError: ~p", | ||||||
|                              [Dir, Err]) |                              [Dir, Err]) | ||||||
|     end, |     end, | ||||||
|     ets:delete(MsgLocations). |     ets:delete(MsgLocations). | ||||||
|  |  | ||||||
|  | @ -150,7 +150,7 @@ log_poodle_fail(Context) -> | ||||||
|       "better.~n~n" |       "better.~n~n" | ||||||
|       "If you cannot upgrade now and want to re-enable SSL listeners, you can~n" |       "If you cannot upgrade now and want to re-enable SSL listeners, you can~n" | ||||||
|       "set the config item 'ssl_allow_poodle_attack' to 'true' in the~n" |       "set the config item 'ssl_allow_poodle_attack' to 'true' in the~n" | ||||||
|       "'rabbit' section of your configuration file.~n", |       "'rabbit' section of your configuration file.", | ||||||
|       [rabbit_misc:otp_release(), Context]). |       [rabbit_misc:otp_release(), Context]). | ||||||
| 
 | 
 | ||||||
| fix_ssl_options(Config) -> | fix_ssl_options(Config) -> | ||||||
|  | @ -171,7 +171,7 @@ tcp_listener_addresses({Host, Port, Family0}) | ||||||
|     [{IPAddress, Port, Family} || |     [{IPAddress, Port, Family} || | ||||||
|         {IPAddress, Family} <- getaddr(Host, Family0)]; |         {IPAddress, Family} <- getaddr(Host, Family0)]; | ||||||
| tcp_listener_addresses({_Host, Port, _Family0}) -> | tcp_listener_addresses({_Host, Port, _Family0}) -> | ||||||
|     rabbit_log:error("invalid port ~p - not 0..65535~n", [Port]), |     rabbit_log:error("invalid port ~p - not 0..65535", [Port]), | ||||||
|     throw({error, {invalid_port, Port}}). |     throw({error, {invalid_port, Port}}). | ||||||
| 
 | 
 | ||||||
| tcp_listener_addresses_auto(Port) -> | tcp_listener_addresses_auto(Port) -> | ||||||
|  | @ -371,11 +371,11 @@ on_node_down(Node) -> | ||||||
|     case lists:member(Node, nodes()) of |     case lists:member(Node, nodes()) of | ||||||
|         false -> |         false -> | ||||||
|             rabbit_log:info( |             rabbit_log:info( | ||||||
|                    "Node ~s is down, deleting its listeners~n", [Node]), |                    "Node ~s is down, deleting its listeners", [Node]), | ||||||
|             ok = mnesia:dirty_delete(rabbit_listener, Node); |             ok = mnesia:dirty_delete(rabbit_listener, Node); | ||||||
|         true  -> |         true  -> | ||||||
|             rabbit_log:info( |             rabbit_log:info( | ||||||
|                    "Keeping ~s listeners: the node is already back~n", [Node]) |                    "Keeping ~s listeners: the node is already back", [Node]) | ||||||
|     end. |     end. | ||||||
| 
 | 
 | ||||||
| -spec register_connection(pid()) -> ok. | -spec register_connection(pid()) -> ok. | ||||||
|  | @ -457,11 +457,11 @@ close_connection(Pid, Explanation) -> | ||||||
|     case lists:member(Pid, connections()) of |     case lists:member(Pid, connections()) of | ||||||
|         true  -> |         true  -> | ||||||
|             Res = rabbit_reader:shutdown(Pid, Explanation), |             Res = rabbit_reader:shutdown(Pid, Explanation), | ||||||
|             rabbit_log:info("Closing connection ~p because ~p~n", [Pid, Explanation]), |             rabbit_log:info("Closing connection ~p because ~p", [Pid, Explanation]), | ||||||
|             Res; |             Res; | ||||||
|         false -> |         false -> | ||||||
|             rabbit_log:warning("Asked to close connection ~p (reason: ~p) " |             rabbit_log:warning("Asked to close connection ~p (reason: ~p) " | ||||||
|                                "but no running cluster node reported it as an active connection. Was it already closed? ~n", |                                "but no running cluster node reported it as an active connection. Was it already closed? ", | ||||||
|                                [Pid, Explanation]), |                                [Pid, Explanation]), | ||||||
|             ok |             ok | ||||||
|     end. |     end. | ||||||
|  | @ -578,7 +578,7 @@ gethostaddr(Host, Family) -> | ||||||
| 
 | 
 | ||||||
| -spec host_lookup_error(_, _) -> no_return(). | -spec host_lookup_error(_, _) -> no_return(). | ||||||
| host_lookup_error(Host, Reason) -> | host_lookup_error(Host, Reason) -> | ||||||
|     rabbit_log:error("invalid host ~p - ~p~n", [Host, Reason]), |     rabbit_log:error("invalid host ~p - ~p", [Host, Reason]), | ||||||
|     throw({error, {invalid_host, Host, Reason}}). |     throw({error, {invalid_host, Host, Reason}}). | ||||||
| 
 | 
 | ||||||
| resolve_family({_,_,_,_},         auto) -> inet; | resolve_family({_,_,_,_},         auto) -> inet; | ||||||
|  |  | ||||||
|  | @ -322,11 +322,11 @@ find_blocked_global_peers1([], _) -> | ||||||
| unblock_global_peer(PeerNode) -> | unblock_global_peer(PeerNode) -> | ||||||
|     ThisNode = node(), |     ThisNode = node(), | ||||||
|     PeerState = rpc:call(PeerNode, sys, get_status, [global_name_server]), |     PeerState = rpc:call(PeerNode, sys, get_status, [global_name_server]), | ||||||
|     error_logger:info_msg( |     logger:info( | ||||||
|       "Global hang workaround: global state on ~s seems broken~n" |       "Global hang workaround: global state on ~s seems broken~n" | ||||||
|       " * Peer global state:  ~p~n" |       " * Peer global state:  ~p~n" | ||||||
|       " * Local global state: ~p~n" |       " * Local global state: ~p~n" | ||||||
|       "Faking nodedown/nodeup between ~s and ~s~n", |       "Faking nodedown/nodeup between ~s and ~s", | ||||||
|       [PeerNode, PeerState, sys:get_status(global_name_server), |       [PeerNode, PeerState, sys:get_status(global_name_server), | ||||||
|        PeerNode, ThisNode]), |        PeerNode, ThisNode]), | ||||||
|     {global_name_server, ThisNode} ! {nodedown, PeerNode}, |     {global_name_server, ThisNode} ! {nodedown, PeerNode}, | ||||||
|  | @ -434,7 +434,7 @@ handle_cast({check_partial_partition, Node, Rep, NodeGUID, MyGUID, RepGUID}, | ||||||
|                                _           -> |                                _           -> | ||||||
|                                    rabbit_log:warning("Received a 'DOWN' message" |                                    rabbit_log:warning("Received a 'DOWN' message" | ||||||
|                                                       " from ~p but still can" |                                                       " from ~p but still can" | ||||||
|                                                       " communicate with it ~n", |                                                       " communicate with it ", | ||||||
|                                                       [Node]), |                                                       [Node]), | ||||||
|                                    cast(Rep, {partial_partition, |                                    cast(Rep, {partial_partition, | ||||||
|                                                          Node, node(), RepGUID}) |                                                          Node, node(), RepGUID}) | ||||||
|  | @ -468,7 +468,7 @@ handle_cast({partial_partition, NotReallyDown, Proxy, MyGUID}, | ||||||
|         {ok, pause_minority} -> |         {ok, pause_minority} -> | ||||||
|             rabbit_log:error( |             rabbit_log:error( | ||||||
|               FmtBase ++ " * pause_minority mode enabled~n" |               FmtBase ++ " * pause_minority mode enabled~n" | ||||||
|               "We will therefore pause until the *entire* cluster recovers~n", |               "We will therefore pause until the *entire* cluster recovers", | ||||||
|               ArgsBase), |               ArgsBase), | ||||||
|             await_cluster_recovery(fun all_nodes_up/0), |             await_cluster_recovery(fun all_nodes_up/0), | ||||||
|             {noreply, State}; |             {noreply, State}; | ||||||
|  | @ -476,16 +476,16 @@ handle_cast({partial_partition, NotReallyDown, Proxy, MyGUID}, | ||||||
|             case in_preferred_partition(PreferredNodes) of |             case in_preferred_partition(PreferredNodes) of | ||||||
|                 true  -> rabbit_log:error( |                 true  -> rabbit_log:error( | ||||||
|                            FmtBase ++ "We will therefore intentionally " |                            FmtBase ++ "We will therefore intentionally " | ||||||
|                            "disconnect from ~s~n", ArgsBase ++ [Proxy]), |                            "disconnect from ~s", ArgsBase ++ [Proxy]), | ||||||
|                          upgrade_to_full_partition(Proxy); |                          upgrade_to_full_partition(Proxy); | ||||||
|                 false -> rabbit_log:info( |                 false -> rabbit_log:info( | ||||||
|                            FmtBase ++ "We are about to pause, no need " |                            FmtBase ++ "We are about to pause, no need " | ||||||
|                            "for further actions~n", ArgsBase) |                            "for further actions", ArgsBase) | ||||||
|             end, |             end, | ||||||
|             {noreply, State}; |             {noreply, State}; | ||||||
|         {ok, _} -> |         {ok, _} -> | ||||||
|             rabbit_log:error( |             rabbit_log:error( | ||||||
|               FmtBase ++ "We will therefore intentionally disconnect from ~s~n", |               FmtBase ++ "We will therefore intentionally disconnect from ~s", | ||||||
|               ArgsBase ++ [Proxy]), |               ArgsBase ++ [Proxy]), | ||||||
|             upgrade_to_full_partition(Proxy), |             upgrade_to_full_partition(Proxy), | ||||||
|             {noreply, State} |             {noreply, State} | ||||||
|  | @ -498,7 +498,7 @@ handle_cast({partial_partition, _GUID, _Reporter, _Proxy}, State) -> | ||||||
| %% messages reliably when another node disconnects from us. Therefore | %% messages reliably when another node disconnects from us. Therefore | ||||||
| %% we are told just before the disconnection so we can reciprocate. | %% we are told just before the disconnection so we can reciprocate. | ||||||
| handle_cast({partial_partition_disconnect, Other}, State) -> | handle_cast({partial_partition_disconnect, Other}, State) -> | ||||||
|     rabbit_log:error("Partial partition disconnect from ~s~n", [Other]), |     rabbit_log:error("Partial partition disconnect from ~s", [Other]), | ||||||
|     disconnect(Other), |     disconnect(Other), | ||||||
|     {noreply, State}; |     {noreply, State}; | ||||||
| 
 | 
 | ||||||
|  | @ -507,7 +507,7 @@ handle_cast({partial_partition_disconnect, Other}, State) -> | ||||||
| %% mnesia propagation. | %% mnesia propagation. | ||||||
| handle_cast({node_up, Node, NodeType}, | handle_cast({node_up, Node, NodeType}, | ||||||
|             State = #state{monitors = Monitors}) -> |             State = #state{monitors = Monitors}) -> | ||||||
|     rabbit_log:info("rabbit on node ~p up~n", [Node]), |     rabbit_log:info("rabbit on node ~p up", [Node]), | ||||||
|     {AllNodes, DiscNodes, RunningNodes} = read_cluster_status(), |     {AllNodes, DiscNodes, RunningNodes} = read_cluster_status(), | ||||||
|     write_cluster_status({add_node(Node, AllNodes), |     write_cluster_status({add_node(Node, AllNodes), | ||||||
|                           case NodeType of |                           case NodeType of | ||||||
|  | @ -551,7 +551,7 @@ handle_cast(_Msg, State) -> | ||||||
| 
 | 
 | ||||||
| handle_info({'DOWN', _MRef, process, {rabbit, Node}, _Reason}, | handle_info({'DOWN', _MRef, process, {rabbit, Node}, _Reason}, | ||||||
|             State = #state{monitors = Monitors, subscribers = Subscribers}) -> |             State = #state{monitors = Monitors, subscribers = Subscribers}) -> | ||||||
|     rabbit_log:info("rabbit on node ~p down~n", [Node]), |     rabbit_log:info("rabbit on node ~p down", [Node]), | ||||||
|     {AllNodes, DiscNodes, RunningNodes} = read_cluster_status(), |     {AllNodes, DiscNodes, RunningNodes} = read_cluster_status(), | ||||||
|     write_cluster_status({AllNodes, DiscNodes, del_node(Node, RunningNodes)}), |     write_cluster_status({AllNodes, DiscNodes, del_node(Node, RunningNodes)}), | ||||||
|     [P ! {node_down, Node} || P <- pmon:monitored(Subscribers)], |     [P ! {node_down, Node} || P <- pmon:monitored(Subscribers)], | ||||||
|  | @ -565,7 +565,7 @@ handle_info({'DOWN', _MRef, process, Pid, _Reason}, | ||||||
| 
 | 
 | ||||||
| handle_info({nodedown, Node, Info}, State = #state{guid       = MyGUID, | handle_info({nodedown, Node, Info}, State = #state{guid       = MyGUID, | ||||||
|                                                    node_guids = GUIDs}) -> |                                                    node_guids = GUIDs}) -> | ||||||
|     rabbit_log:info("node ~p down: ~p~n", |     rabbit_log:info("node ~p down: ~p", | ||||||
|                     [Node, proplists:get_value(nodedown_reason, Info)]), |                     [Node, proplists:get_value(nodedown_reason, Info)]), | ||||||
|     Check = fun (N, CheckGUID, DownGUID) -> |     Check = fun (N, CheckGUID, DownGUID) -> | ||||||
|                     cast(N, {check_partial_partition, |                     cast(N, {check_partial_partition, | ||||||
|  | @ -583,7 +583,7 @@ handle_info({nodedown, Node, Info}, State = #state{guid       = MyGUID, | ||||||
|     {noreply, handle_dead_node(Node, State)}; |     {noreply, handle_dead_node(Node, State)}; | ||||||
| 
 | 
 | ||||||
| handle_info({nodeup, Node, _Info}, State) -> | handle_info({nodeup, Node, _Info}, State) -> | ||||||
|     rabbit_log:info("node ~p up~n", [Node]), |     rabbit_log:info("node ~p up", [Node]), | ||||||
|     {noreply, State}; |     {noreply, State}; | ||||||
| 
 | 
 | ||||||
| handle_info({mnesia_system_event, | handle_info({mnesia_system_event, | ||||||
|  | @ -687,13 +687,13 @@ handle_dead_node(Node, State = #state{autoheal = Autoheal}) -> | ||||||
|             State#state{autoheal = rabbit_autoheal:node_down(Node, Autoheal)}; |             State#state{autoheal = rabbit_autoheal:node_down(Node, Autoheal)}; | ||||||
|         {ok, Term} -> |         {ok, Term} -> | ||||||
|             rabbit_log:warning("cluster_partition_handling ~p unrecognised, " |             rabbit_log:warning("cluster_partition_handling ~p unrecognised, " | ||||||
|                                "assuming 'ignore'~n", [Term]), |                                "assuming 'ignore'", [Term]), | ||||||
|             State |             State | ||||||
|     end. |     end. | ||||||
| 
 | 
 | ||||||
| await_cluster_recovery(Condition) -> | await_cluster_recovery(Condition) -> | ||||||
|     rabbit_log:warning("Cluster minority/secondary status detected - " |     rabbit_log:warning("Cluster minority/secondary status detected - " | ||||||
|                        "awaiting recovery~n", []), |                        "awaiting recovery", []), | ||||||
|     run_outside_applications(fun () -> |     run_outside_applications(fun () -> | ||||||
|                                      rabbit:stop(), |                                      rabbit:stop(), | ||||||
|                                      wait_for_cluster_recovery(Condition) |                                      wait_for_cluster_recovery(Condition) | ||||||
|  | @ -744,7 +744,7 @@ do_run_outside_app_fun(Fun) -> | ||||||
|         Fun() |         Fun() | ||||||
|     catch _:E:Stacktrace -> |     catch _:E:Stacktrace -> | ||||||
|             rabbit_log:error( |             rabbit_log:error( | ||||||
|               "rabbit_outside_app_process:~n~p~n~p~n", |               "rabbit_outside_app_process:~n~p~n~p", | ||||||
|               [E, Stacktrace]) |               [E, Stacktrace]) | ||||||
|     end. |     end. | ||||||
| 
 | 
 | ||||||
|  | @ -920,7 +920,7 @@ possibly_partitioned_nodes() -> | ||||||
|     alive_rabbit_nodes() -- rabbit_nodes:all_running(). |     alive_rabbit_nodes() -- rabbit_nodes:all_running(). | ||||||
| 
 | 
 | ||||||
| startup_log([]) -> | startup_log([]) -> | ||||||
|     rabbit_log:info("Starting rabbit_node_monitor~n", []); |     rabbit_log:info("Starting rabbit_node_monitor", []); | ||||||
| startup_log(Nodes) -> | startup_log(Nodes) -> | ||||||
|     rabbit_log:info("Starting rabbit_node_monitor, might be partitioned from ~p~n", |     rabbit_log:info("Starting rabbit_node_monitor, might be partitioned from ~p", | ||||||
|                     [Nodes]). |                     [Nodes]). | ||||||
|  |  | ||||||
|  | @ -84,7 +84,7 @@ lock_acquisition_failure_mode() -> | ||||||
| -spec log_configured_backend() -> ok. | -spec log_configured_backend() -> ok. | ||||||
| 
 | 
 | ||||||
| log_configured_backend() -> | log_configured_backend() -> | ||||||
|   rabbit_log:info("Configured peer discovery backend: ~s~n", [backend()]). |   rabbit_log:info("Configured peer discovery backend: ~s", [backend()]). | ||||||
| 
 | 
 | ||||||
| maybe_init() -> | maybe_init() -> | ||||||
|     Backend = backend(), |     Backend = backend(), | ||||||
|  |  | ||||||
|  | @ -56,13 +56,13 @@ ensure1(FileJustChanged0) -> | ||||||
|                 {[], []} -> |                 {[], []} -> | ||||||
|                     ok; |                     ok; | ||||||
|                 {[], _} -> |                 {[], _} -> | ||||||
|                     rabbit_log:info("Plugins changed; disabled ~p~n", |                     rabbit_log:info("Plugins changed; disabled ~p", | ||||||
|                                     [Stop]); |                                     [Stop]); | ||||||
|                 {_, []} -> |                 {_, []} -> | ||||||
|                     rabbit_log:info("Plugins changed; enabled ~p~n", |                     rabbit_log:info("Plugins changed; enabled ~p", | ||||||
|                                     [Start]); |                                     [Start]); | ||||||
|                 {_, _} -> |                 {_, _} -> | ||||||
|                     rabbit_log:info("Plugins changed; enabled ~p, disabled ~p~n", |                     rabbit_log:info("Plugins changed; enabled ~p, disabled ~p", | ||||||
|                                     [Start, Stop]) |                                     [Start, Stop]) | ||||||
|             end, |             end, | ||||||
|             {ok, Start, Stop}; |             {ok, Start, Stop}; | ||||||
|  | @ -429,7 +429,7 @@ prepare_dir_plugin(PluginAppDescPath) -> | ||||||
|                     rabbit_log:error("Failed to enable plugin \"~s\": " |                     rabbit_log:error("Failed to enable plugin \"~s\": " | ||||||
|                                      "it may have been built with an " |                                      "it may have been built with an " | ||||||
|                                      "incompatible (more recent?) " |                                      "incompatible (more recent?) " | ||||||
|                                      "version of Erlang~n", [Plugin]), |                                      "version of Erlang", [Plugin]), | ||||||
|                     throw({plugin_built_with_incompatible_erlang, Plugin}); |                     throw({plugin_built_with_incompatible_erlang, Plugin}); | ||||||
|                 Error -> |                 Error -> | ||||||
|                     throw({plugin_module_unloadable, Plugin, Error}) |                     throw({plugin_module_unloadable, Plugin, Error}) | ||||||
|  | @ -459,11 +459,11 @@ prepare_plugin(#plugin{type = ez, name = Name, location = Location}, ExpandDir) | ||||||
|                 [PluginAppDescPath|_] -> |                 [PluginAppDescPath|_] -> | ||||||
|                     prepare_dir_plugin(PluginAppDescPath); |                     prepare_dir_plugin(PluginAppDescPath); | ||||||
|                 _ -> |                 _ -> | ||||||
|                     rabbit_log:error("Plugin archive '~s' doesn't contain an .app file~n", [Location]), |                     rabbit_log:error("Plugin archive '~s' doesn't contain an .app file", [Location]), | ||||||
|                     throw({app_file_missing, Name, Location}) |                     throw({app_file_missing, Name, Location}) | ||||||
|             end; |             end; | ||||||
|         {error, Reason} -> |         {error, Reason} -> | ||||||
|             rabbit_log:error("Could not unzip plugin archive '~s': ~p~n", [Location, Reason]), |             rabbit_log:error("Could not unzip plugin archive '~s': ~p", [Location, Reason]), | ||||||
|             throw({failed_to_unzip_plugin, Name, Location, Reason}) |             throw({failed_to_unzip_plugin, Name, Location, Reason}) | ||||||
|     end; |     end; | ||||||
| prepare_plugin(#plugin{type = dir, location = Location, name = Name}, | prepare_plugin(#plugin{type = dir, location = Location, name = Name}, | ||||||
|  | @ -472,7 +472,7 @@ prepare_plugin(#plugin{type = dir, location = Location, name = Name}, | ||||||
|         [PluginAppDescPath|_] -> |         [PluginAppDescPath|_] -> | ||||||
|             prepare_dir_plugin(PluginAppDescPath); |             prepare_dir_plugin(PluginAppDescPath); | ||||||
|         _ -> |         _ -> | ||||||
|             rabbit_log:error("Plugin directory '~s' doesn't contain an .app file~n", [Location]), |             rabbit_log:error("Plugin directory '~s' doesn't contain an .app file", [Location]), | ||||||
|             throw({app_file_missing, Name, Location}) |             throw({app_file_missing, Name, Location}) | ||||||
|     end. |     end. | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -1,15 +1,24 @@ | ||||||
| -module(rabbit_prelaunch_cluster). | -module(rabbit_prelaunch_cluster). | ||||||
| 
 | 
 | ||||||
|  | -include_lib("kernel/include/logger.hrl"). | ||||||
|  | 
 | ||||||
|  | -include_lib("rabbit_common/include/logging.hrl"). | ||||||
|  | 
 | ||||||
| -export([setup/1]). | -export([setup/1]). | ||||||
| 
 | 
 | ||||||
| setup(Context) -> | setup(Context) -> | ||||||
|     rabbit_log_prelaunch:debug(""), |     ?LOG_DEBUG( | ||||||
|     rabbit_log_prelaunch:debug("== Clustering =="), |        "~n== Clustering ==", [], | ||||||
|     rabbit_log_prelaunch:debug("Preparing cluster status files"), |        #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|  |     ?LOG_DEBUG( | ||||||
|  |        "Preparing cluster status files", [], | ||||||
|  |        #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|     rabbit_node_monitor:prepare_cluster_status_files(), |     rabbit_node_monitor:prepare_cluster_status_files(), | ||||||
|     case Context of |     case Context of | ||||||
|         #{initial_pass := true} -> |         #{initial_pass := true} -> | ||||||
|             rabbit_log_prelaunch:debug("Upgrading Mnesia schema"), |             ?LOG_DEBUG( | ||||||
|  |                "Upgrading Mnesia schema", [], | ||||||
|  |                #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|             ok = rabbit_upgrade:maybe_upgrade_mnesia(); |             ok = rabbit_upgrade:maybe_upgrade_mnesia(); | ||||||
|         _ -> |         _ -> | ||||||
|             ok |             ok | ||||||
|  | @ -17,6 +26,8 @@ setup(Context) -> | ||||||
|     %% It's important that the consistency check happens after |     %% It's important that the consistency check happens after | ||||||
|     %% the upgrade, since if we are a secondary node the |     %% the upgrade, since if we are a secondary node the | ||||||
|     %% primary node will have forgotten us |     %% primary node will have forgotten us | ||||||
|     rabbit_log_prelaunch:debug("Checking cluster consistency"), |     ?LOG_DEBUG( | ||||||
|  |        "Checking cluster consistency", [], | ||||||
|  |        #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|     rabbit_mnesia:check_cluster_consistency(), |     rabbit_mnesia:check_cluster_consistency(), | ||||||
|     ok. |     ok. | ||||||
|  |  | ||||||
|  | @ -7,13 +7,17 @@ | ||||||
| 
 | 
 | ||||||
| -module(rabbit_prelaunch_enabled_plugins_file). | -module(rabbit_prelaunch_enabled_plugins_file). | ||||||
| 
 | 
 | ||||||
|  | -include_lib("kernel/include/logger.hrl"). | ||||||
|  | 
 | ||||||
| -include_lib("rabbit_common/include/rabbit.hrl"). | -include_lib("rabbit_common/include/rabbit.hrl"). | ||||||
|  | -include_lib("rabbit_common/include/logging.hrl"). | ||||||
| 
 | 
 | ||||||
| -export([setup/1]). | -export([setup/1]). | ||||||
| 
 | 
 | ||||||
| setup(Context) -> | setup(Context) -> | ||||||
|     rabbit_log_prelaunch:debug(""), |     ?LOG_DEBUG( | ||||||
|     rabbit_log_prelaunch:debug("== Enabled plugins file =="), |        "~n== Enabled plugins file ==", [], | ||||||
|  |        #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|     update_enabled_plugins_file(Context). |     update_enabled_plugins_file(Context). | ||||||
| 
 | 
 | ||||||
| %% ------------------------------------------------------------------- | %% ------------------------------------------------------------------- | ||||||
|  | @ -33,21 +37,28 @@ do_update_enabled_plugins_file(#{enabled_plugins_file := File}, List) -> | ||||||
|     SortedList = lists:usort(List), |     SortedList = lists:usort(List), | ||||||
|     case SortedList of |     case SortedList of | ||||||
|         [] -> |         [] -> | ||||||
|             rabbit_log_prelaunch:debug("Marking all plugins as disabled"); |             ?LOG_DEBUG( | ||||||
|  |                "Marking all plugins as disabled", [], | ||||||
|  |                #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}); | ||||||
|         _ -> |         _ -> | ||||||
|             rabbit_log_prelaunch:debug( |             ?LOG_DEBUG( | ||||||
|               "Marking the following plugins as enabled:"), |               lists:flatten(["Marking the following plugins as enabled:", | ||||||
|             [rabbit_log_prelaunch:debug("  - ~s", [P]) || P <- SortedList] |                              ["~n  - ~s" || _ <- SortedList]]), | ||||||
|  |               SortedList, | ||||||
|  |               #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}) | ||||||
|     end, |     end, | ||||||
|     Content = io_lib:format("~p.~n", [SortedList]), |     Content = io_lib:format("~p.~n", [SortedList]), | ||||||
|     case file:write_file(File, Content) of |     case file:write_file(File, Content) of | ||||||
|         ok -> |         ok -> | ||||||
|             rabbit_log_prelaunch:debug("Wrote plugins file: ~ts", [File]), |             ?LOG_DEBUG( | ||||||
|  |                "Wrote plugins file: ~ts", [File], | ||||||
|  |                #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|             ok; |             ok; | ||||||
|         {error, Reason} -> |         {error, Reason} -> | ||||||
|             rabbit_log_prelaunch:error( |             ?LOG_ERROR( | ||||||
|               "Failed to update enabled plugins file \"~ts\" " |               "Failed to update enabled plugins file \"~ts\" " | ||||||
|               "from $RABBITMQ_ENABLED_PLUGINS: ~ts", |               "from $RABBITMQ_ENABLED_PLUGINS: ~ts", | ||||||
|               [File, file:format_error(Reason)]), |               [File, file:format_error(Reason)], | ||||||
|  |               #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|             throw({error, failed_to_update_enabled_plugins_file}) |             throw({error, failed_to_update_enabled_plugins_file}) | ||||||
|     end. |     end. | ||||||
|  |  | ||||||
|  | @ -7,26 +7,35 @@ | ||||||
| 
 | 
 | ||||||
| -module(rabbit_prelaunch_feature_flags). | -module(rabbit_prelaunch_feature_flags). | ||||||
| 
 | 
 | ||||||
|  | -include_lib("kernel/include/logger.hrl"). | ||||||
|  | 
 | ||||||
|  | -include_lib("rabbit_common/include/logging.hrl"). | ||||||
|  | 
 | ||||||
| -export([setup/1]). | -export([setup/1]). | ||||||
| 
 | 
 | ||||||
| setup(#{feature_flags_file := FFFile}) -> | setup(#{feature_flags_file := FFFile}) -> | ||||||
|     rabbit_log_prelaunch:debug(""), |     ?LOG_DEBUG( | ||||||
|     rabbit_log_prelaunch:debug("== Feature flags =="), |        "~n== Feature flags ==", [], | ||||||
|  |        #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|     case filelib:ensure_dir(FFFile) of |     case filelib:ensure_dir(FFFile) of | ||||||
|         ok -> |         ok -> | ||||||
|             rabbit_log_prelaunch:debug("Initializing feature flags registry"), |             ?LOG_DEBUG( | ||||||
|  |                "Initializing feature flags registry", [], | ||||||
|  |                #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|             case rabbit_feature_flags:initialize_registry() of |             case rabbit_feature_flags:initialize_registry() of | ||||||
|                 ok -> |                 ok -> | ||||||
|                     ok; |                     ok; | ||||||
|                 {error, Reason} -> |                 {error, Reason} -> | ||||||
|                     rabbit_log_prelaunch:error( |                     ?LOG_ERROR( | ||||||
|                       "Failed to initialize feature flags registry: ~p", |                       "Failed to initialize feature flags registry: ~p", | ||||||
|                       [Reason]), |                       [Reason], | ||||||
|  |                       #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|                     throw({error, failed_to_initialize_feature_flags_registry}) |                     throw({error, failed_to_initialize_feature_flags_registry}) | ||||||
|             end; |             end; | ||||||
|         {error, Reason} -> |         {error, Reason} -> | ||||||
|             rabbit_log_prelaunch:error( |             ?LOG_ERROR( | ||||||
|               "Failed to create feature flags file \"~ts\" directory: ~ts", |               "Failed to create feature flags file \"~ts\" directory: ~ts", | ||||||
|               [FFFile, file:format_error(Reason)]), |               [FFFile, file:format_error(Reason)], | ||||||
|  |               #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), | ||||||
|             throw({error, failed_to_create_feature_flags_file_directory}) |             throw({error, failed_to_create_feature_flags_file_directory}) | ||||||
|     end. |     end. | ||||||
|  |  | ||||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							|  | @ -79,7 +79,7 @@ init(Q0, restart) when ?is_amqqueue(Q0) -> | ||||||
| 
 | 
 | ||||||
| crash_restart(Q0) when ?is_amqqueue(Q0) -> | crash_restart(Q0) when ?is_amqqueue(Q0) -> | ||||||
|     QueueName = amqqueue:get_name(Q0), |     QueueName = amqqueue:get_name(Q0), | ||||||
|     rabbit_log:error("Restarting crashed ~s.~n", [rabbit_misc:rs(QueueName)]), |     rabbit_log:error("Restarting crashed ~s.", [rabbit_misc:rs(QueueName)]), | ||||||
|     gen_server2:cast(self(), init), |     gen_server2:cast(self(), init), | ||||||
|     Q1 = amqqueue:set_pid(Q0, self()), |     Q1 = amqqueue:set_pid(Q0, self()), | ||||||
|     rabbit_amqqueue_process:init(Q1). |     rabbit_amqqueue_process:init(Q1). | ||||||
|  |  | ||||||
|  | @ -67,7 +67,7 @@ enable() -> | ||||||
|     {ok, RealBQ} = application:get_env(rabbit, backing_queue_module), |     {ok, RealBQ} = application:get_env(rabbit, backing_queue_module), | ||||||
|     case RealBQ of |     case RealBQ of | ||||||
|         ?MODULE -> ok; |         ?MODULE -> ok; | ||||||
|         _       -> rabbit_log:info("Priority queues enabled, real BQ is ~s~n", |         _       -> rabbit_log:info("Priority queues enabled, real BQ is ~s", | ||||||
|                                    [RealBQ]), |                                    [RealBQ]), | ||||||
|                    application:set_env( |                    application:set_env( | ||||||
|                      rabbitmq_priority_queue, backing_queue_module, RealBQ), |                      rabbitmq_priority_queue, backing_queue_module, RealBQ), | ||||||
|  |  | ||||||
|  | @ -1478,7 +1478,7 @@ move_to_per_vhost_stores(#resource{virtual_host = VHost} = QueueName) -> | ||||||
|             ok = rabbit_file:rename(OldQueueDir, NewQueueDir), |             ok = rabbit_file:rename(OldQueueDir, NewQueueDir), | ||||||
|             ok = ensure_queue_name_stub_file(NewQueueDir, QueueName); |             ok = ensure_queue_name_stub_file(NewQueueDir, QueueName); | ||||||
|         false -> |         false -> | ||||||
|             Msg  = "Queue index directory '~s' not found for ~s~n", |             Msg  = "Queue index directory '~s' not found for ~s", | ||||||
|             Args = [OldQueueDir, rabbit_misc:rs(QueueName)], |             Args = [OldQueueDir, rabbit_misc:rs(QueueName)], | ||||||
|             rabbit_log_upgrade:error(Msg, Args), |             rabbit_log_upgrade:error(Msg, Args), | ||||||
|             rabbit_log:error(Msg, Args) |             rabbit_log:error(Msg, Args) | ||||||
|  |  | ||||||
|  | @ -419,7 +419,7 @@ handle_tick(QName, | ||||||
|                           [] -> |                           [] -> | ||||||
|                               ok; |                               ok; | ||||||
|                           Stale -> |                           Stale -> | ||||||
|                               rabbit_log:info("~s: stale nodes detected. Purging ~w~n", |                               rabbit_log:info("~s: stale nodes detected. Purging ~w", | ||||||
|                                               [rabbit_misc:rs(QName), Stale]), |                                               [rabbit_misc:rs(QName), Stale]), | ||||||
|                               %% pipeline purge command |                               %% pipeline purge command | ||||||
|                               {ok, Q} = rabbit_amqqueue:lookup(QName), |                               {ok, Q} = rabbit_amqqueue:lookup(QName), | ||||||
|  | @ -618,7 +618,7 @@ force_delete_queue(Servers) -> | ||||||
|              Err -> |              Err -> | ||||||
|                  rabbit_log:warning( |                  rabbit_log:warning( | ||||||
|                    "Force delete of ~w failed with: ~w" |                    "Force delete of ~w failed with: ~w" | ||||||
|                    "This may require manual data clean up~n", |                    "This may require manual data clean up", | ||||||
|                    [S, Err]), |                    [S, Err]), | ||||||
|                  ok |                  ok | ||||||
|          end |          end | ||||||
|  |  | ||||||
|  | @ -272,19 +272,16 @@ server_capabilities(_) -> | ||||||
| %%-------------------------------------------------------------------------- | %%-------------------------------------------------------------------------- | ||||||
| 
 | 
 | ||||||
| socket_error(Reason) when is_atom(Reason) -> | socket_error(Reason) when is_atom(Reason) -> | ||||||
|     rabbit_log_connection:error("Error on AMQP connection ~p: ~s~n", |     rabbit_log_connection:error("Error on AMQP connection ~p: ~s", | ||||||
|         [self(), rabbit_misc:format_inet_error(Reason)]); |         [self(), rabbit_misc:format_inet_error(Reason)]); | ||||||
| socket_error(Reason) -> | socket_error(Reason) -> | ||||||
|     Fmt = "Error on AMQP connection ~p:~n~p~n", |     Fmt = "Error on AMQP connection ~p:~n~p", | ||||||
|     Args = [self(), Reason], |     Args = [self(), Reason], | ||||||
|     case Reason of |     case Reason of | ||||||
|         %% The socket was closed while upgrading to SSL. |         %% The socket was closed while upgrading to SSL. | ||||||
|         %% This is presumably a TCP healthcheck, so don't log |         %% This is presumably a TCP healthcheck, so don't log | ||||||
|         %% it unless specified otherwise. |         %% it unless specified otherwise. | ||||||
|         {ssl_upgrade_error, closed} -> |         {ssl_upgrade_error, closed} -> | ||||||
|             %% Lager sinks (rabbit_log_connection) |  | ||||||
|             %% are handled by the lager parse_transform. |  | ||||||
|             %% Hence have to define the loglevel as a function call. |  | ||||||
|             rabbit_log_connection:debug(Fmt, Args); |             rabbit_log_connection:debug(Fmt, Args); | ||||||
|         _ -> |         _ -> | ||||||
|             rabbit_log_connection:error(Fmt, Args) |             rabbit_log_connection:error(Fmt, Args) | ||||||
|  | @ -365,11 +362,11 @@ start_connection(Parent, HelperSup, Deb, Sock) -> | ||||||
|             %% connection was closed cleanly by the client |             %% connection was closed cleanly by the client | ||||||
|             #v1{connection = #connection{user  = #user{username = Username}, |             #v1{connection = #connection{user  = #user{username = Username}, | ||||||
|                                          vhost = VHost}} -> |                                          vhost = VHost}} -> | ||||||
|                 rabbit_log_connection:info("closing AMQP connection ~p (~s, vhost: '~s', user: '~s')~n", |                 rabbit_log_connection:info("closing AMQP connection ~p (~s, vhost: '~s', user: '~s')", | ||||||
|                     [self(), dynamic_connection_name(Name), VHost, Username]); |                     [self(), dynamic_connection_name(Name), VHost, Username]); | ||||||
|             %% just to be more defensive |             %% just to be more defensive | ||||||
|             _ -> |             _ -> | ||||||
|                 rabbit_log_connection:info("closing AMQP connection ~p (~s)~n", |                 rabbit_log_connection:info("closing AMQP connection ~p (~s)", | ||||||
|                     [self(), dynamic_connection_name(Name)]) |                     [self(), dynamic_connection_name(Name)]) | ||||||
|             end |             end | ||||||
|     catch |     catch | ||||||
|  | @ -419,36 +416,36 @@ log_connection_exception(Severity, Name, {heartbeat_timeout, TimeoutSec}) -> | ||||||
|     %% Long line to avoid extra spaces and line breaks in log |     %% Long line to avoid extra spaces and line breaks in log | ||||||
|     log_connection_exception_with_severity(Severity, |     log_connection_exception_with_severity(Severity, | ||||||
|         "closing AMQP connection ~p (~s):~n" |         "closing AMQP connection ~p (~s):~n" | ||||||
|         "missed heartbeats from client, timeout: ~ps~n", |         "missed heartbeats from client, timeout: ~ps", | ||||||
|         [self(), Name, TimeoutSec]); |         [self(), Name, TimeoutSec]); | ||||||
| log_connection_exception(Severity, Name, {connection_closed_abruptly, | log_connection_exception(Severity, Name, {connection_closed_abruptly, | ||||||
|                                           #v1{connection = #connection{user  = #user{username = Username}, |                                           #v1{connection = #connection{user  = #user{username = Username}, | ||||||
|                                                                        vhost = VHost}}}) -> |                                                                        vhost = VHost}}}) -> | ||||||
|     log_connection_exception_with_severity(Severity, |     log_connection_exception_with_severity(Severity, | ||||||
|         "closing AMQP connection ~p (~s, vhost: '~s', user: '~s'):~nclient unexpectedly closed TCP connection~n", |         "closing AMQP connection ~p (~s, vhost: '~s', user: '~s'):~nclient unexpectedly closed TCP connection", | ||||||
|         [self(), Name, VHost, Username]); |         [self(), Name, VHost, Username]); | ||||||
| %% when client abruptly closes connection before connection.open/authentication/authorization | %% when client abruptly closes connection before connection.open/authentication/authorization | ||||||
| %% succeeded, don't log username and vhost as 'none' | %% succeeded, don't log username and vhost as 'none' | ||||||
| log_connection_exception(Severity, Name, {connection_closed_abruptly, _}) -> | log_connection_exception(Severity, Name, {connection_closed_abruptly, _}) -> | ||||||
|     log_connection_exception_with_severity(Severity, |     log_connection_exception_with_severity(Severity, | ||||||
|         "closing AMQP connection ~p (~s):~nclient unexpectedly closed TCP connection~n", |         "closing AMQP connection ~p (~s):~nclient unexpectedly closed TCP connection", | ||||||
|         [self(), Name]); |         [self(), Name]); | ||||||
| %% failed connection.tune negotiations | %% failed connection.tune negotiations | ||||||
| log_connection_exception(Severity, Name, {handshake_error, tuning, _Channel, | log_connection_exception(Severity, Name, {handshake_error, tuning, _Channel, | ||||||
|                                           {exit, #amqp_error{explanation = Explanation}, |                                           {exit, #amqp_error{explanation = Explanation}, | ||||||
|                                            _Method, _Stacktrace}}) -> |                                            _Method, _Stacktrace}}) -> | ||||||
|     log_connection_exception_with_severity(Severity, |     log_connection_exception_with_severity(Severity, | ||||||
|         "closing AMQP connection ~p (~s):~nfailed to negotiate connection parameters: ~s~n", |         "closing AMQP connection ~p (~s):~nfailed to negotiate connection parameters: ~s", | ||||||
|         [self(), Name, Explanation]); |         [self(), Name, Explanation]); | ||||||
| %% old exception structure | %% old exception structure | ||||||
| log_connection_exception(Severity, Name, connection_closed_abruptly) -> | log_connection_exception(Severity, Name, connection_closed_abruptly) -> | ||||||
|     log_connection_exception_with_severity(Severity, |     log_connection_exception_with_severity(Severity, | ||||||
|         "closing AMQP connection ~p (~s):~n" |         "closing AMQP connection ~p (~s):~n" | ||||||
|         "client unexpectedly closed TCP connection~n", |         "client unexpectedly closed TCP connection", | ||||||
|         [self(), Name]); |         [self(), Name]); | ||||||
| log_connection_exception(Severity, Name, Ex) -> | log_connection_exception(Severity, Name, Ex) -> | ||||||
|     log_connection_exception_with_severity(Severity, |     log_connection_exception_with_severity(Severity, | ||||||
|         "closing AMQP connection ~p (~s):~n~p~n", |         "closing AMQP connection ~p (~s):~n~p", | ||||||
|         [self(), Name, Ex]). |         [self(), Name, Ex]). | ||||||
| 
 | 
 | ||||||
| log_connection_exception_with_severity(Severity, Fmt, Args) -> | log_connection_exception_with_severity(Severity, Fmt, Args) -> | ||||||
|  | @ -508,7 +505,7 @@ mainloop(Deb, Buf, BufLen, State = #v1{sock = Sock, | ||||||
|             %% |             %% | ||||||
|             %% The goal is to not log TCP healthchecks (a connection |             %% The goal is to not log TCP healthchecks (a connection | ||||||
|             %% with no data received) unless specified otherwise. |             %% with no data received) unless specified otherwise. | ||||||
|             Fmt = "accepting AMQP connection ~p (~s)~n", |             Fmt = "accepting AMQP connection ~p (~s)", | ||||||
|             Args = [self(), ConnName], |             Args = [self(), ConnName], | ||||||
|             case Recv of |             case Recv of | ||||||
|                 closed -> rabbit_log_connection:debug(Fmt, Args); |                 closed -> rabbit_log_connection:debug(Fmt, Args); | ||||||
|  | @ -756,7 +753,7 @@ wait_for_channel_termination(N, TimerRef, | ||||||
|                     rabbit_log_connection:error( |                     rabbit_log_connection:error( | ||||||
|                         "Error on AMQP connection ~p (~s, vhost: '~s'," |                         "Error on AMQP connection ~p (~s, vhost: '~s'," | ||||||
|                         " user: '~s', state: ~p), channel ~p:" |                         " user: '~s', state: ~p), channel ~p:" | ||||||
|                         "error while terminating:~n~p~n", |                         "error while terminating:~n~p", | ||||||
|                         [self(), ConnName, VHost, User#user.username, |                         [self(), ConnName, VHost, User#user.username, | ||||||
|                          CS, Channel, Reason]), |                          CS, Channel, Reason]), | ||||||
|                     handle_uncontrolled_channel_close(ChPid), |                     handle_uncontrolled_channel_close(ChPid), | ||||||
|  | @ -797,7 +794,7 @@ log_hard_error(#v1{connection_state = CS, | ||||||
|                                    vhost = VHost}}, Channel, Reason) -> |                                    vhost = VHost}}, Channel, Reason) -> | ||||||
|     rabbit_log_connection:error( |     rabbit_log_connection:error( | ||||||
|         "Error on AMQP connection ~p (~s, vhost: '~s'," |         "Error on AMQP connection ~p (~s, vhost: '~s'," | ||||||
|         " user: '~s', state: ~p), channel ~p:~n ~s~n", |         " user: '~s', state: ~p), channel ~p:~n ~s", | ||||||
|         [self(), ConnName, VHost, User#user.username, CS, Channel, format_hard_error(Reason)]). |         [self(), ConnName, VHost, User#user.username, CS, Channel, format_hard_error(Reason)]). | ||||||
| 
 | 
 | ||||||
| handle_exception(State = #v1{connection_state = closed}, Channel, Reason) -> | handle_exception(State = #v1{connection_state = closed}, Channel, Reason) -> | ||||||
|  | @ -816,7 +813,7 @@ handle_exception(State = #v1{connection = #connection{protocol = Protocol, | ||||||
|                  Channel, Reason = #amqp_error{name = access_refused, |                  Channel, Reason = #amqp_error{name = access_refused, | ||||||
|                                                explanation = ErrMsg}) -> |                                                explanation = ErrMsg}) -> | ||||||
|     rabbit_log_connection:error( |     rabbit_log_connection:error( | ||||||
|         "Error on AMQP connection ~p (~s, state: ~p):~n~s~n", |         "Error on AMQP connection ~p (~s, state: ~p):~n~s", | ||||||
|         [self(), ConnName, starting, ErrMsg]), |         [self(), ConnName, starting, ErrMsg]), | ||||||
|     %% respect authentication failure notification capability |     %% respect authentication failure notification capability | ||||||
|     case rabbit_misc:table_lookup(Capabilities, |     case rabbit_misc:table_lookup(Capabilities, | ||||||
|  | @ -835,7 +832,7 @@ handle_exception(State = #v1{connection = #connection{protocol = Protocol, | ||||||
|                  Channel, Reason = #amqp_error{name = not_allowed, |                  Channel, Reason = #amqp_error{name = not_allowed, | ||||||
|                                                explanation = ErrMsg}) -> |                                                explanation = ErrMsg}) -> | ||||||
|     rabbit_log_connection:error( |     rabbit_log_connection:error( | ||||||
|         "Error on AMQP connection ~p (~s, user: '~s', state: ~p):~n~s~n", |         "Error on AMQP connection ~p (~s, user: '~s', state: ~p):~n~s", | ||||||
|         [self(), ConnName, User#user.username, opening, ErrMsg]), |         [self(), ConnName, User#user.username, opening, ErrMsg]), | ||||||
|     send_error_on_channel0_and_close(Channel, Protocol, Reason, State); |     send_error_on_channel0_and_close(Channel, Protocol, Reason, State); | ||||||
| handle_exception(State = #v1{connection = #connection{protocol = Protocol}, | handle_exception(State = #v1{connection = #connection{protocol = Protocol}, | ||||||
|  | @ -853,7 +850,7 @@ handle_exception(State = #v1{connection = #connection{protocol = Protocol, | ||||||
|                                                explanation = ErrMsg}) -> |                                                explanation = ErrMsg}) -> | ||||||
|     rabbit_log_connection:error( |     rabbit_log_connection:error( | ||||||
|         "Error on AMQP connection ~p (~s," |         "Error on AMQP connection ~p (~s," | ||||||
|         " user: '~s', state: ~p):~n~s~n", |         " user: '~s', state: ~p):~n~s", | ||||||
|         [self(), ConnName, User#user.username, tuning, ErrMsg]), |         [self(), ConnName, User#user.username, tuning, ErrMsg]), | ||||||
|     send_error_on_channel0_and_close(Channel, Protocol, Reason, State); |     send_error_on_channel0_and_close(Channel, Protocol, Reason, State); | ||||||
| handle_exception(State, Channel, Reason) -> | handle_exception(State, Channel, Reason) -> | ||||||
|  | @ -1256,7 +1253,7 @@ handle_method0(#'connection.open'{virtual_host = VHost}, | ||||||
|     maybe_emit_stats(State1), |     maybe_emit_stats(State1), | ||||||
|     rabbit_log_connection:info( |     rabbit_log_connection:info( | ||||||
|         "connection ~p (~s): " |         "connection ~p (~s): " | ||||||
|         "user '~s' authenticated and granted access to vhost '~s'~n", |         "user '~s' authenticated and granted access to vhost '~s'", | ||||||
|         [self(), dynamic_connection_name(ConnName), Username, VHost]), |         [self(), dynamic_connection_name(ConnName), Username, VHost]), | ||||||
|     State1; |     State1; | ||||||
| handle_method0(#'connection.close'{}, State) when ?IS_RUNNING(State) -> | handle_method0(#'connection.close'{}, State) when ?IS_RUNNING(State) -> | ||||||
|  | @ -1282,7 +1279,7 @@ handle_method0(#'connection.update_secret'{new_secret = NewSecret, reason = Reas | ||||||
|                            sock       = Sock}) when ?IS_RUNNING(State) -> |                            sock       = Sock}) when ?IS_RUNNING(State) -> | ||||||
|     rabbit_log_connection:debug( |     rabbit_log_connection:debug( | ||||||
|         "connection ~p (~s) of user '~s': " |         "connection ~p (~s) of user '~s': " | ||||||
|         "asked to update secret, reason: ~s~n", |         "asked to update secret, reason: ~s", | ||||||
|         [self(), dynamic_connection_name(ConnName), Username, Reason]), |         [self(), dynamic_connection_name(ConnName), Username, Reason]), | ||||||
|     case rabbit_access_control:update_state(User, NewSecret) of |     case rabbit_access_control:update_state(User, NewSecret) of | ||||||
|       {ok, User1} -> |       {ok, User1} -> | ||||||
|  | @ -1299,15 +1296,15 @@ handle_method0(#'connection.update_secret'{new_secret = NewSecret, reason = Reas | ||||||
|         ok = send_on_channel0(Sock, #'connection.update_secret_ok'{}, Protocol), |         ok = send_on_channel0(Sock, #'connection.update_secret_ok'{}, Protocol), | ||||||
|         rabbit_log_connection:info( |         rabbit_log_connection:info( | ||||||
|             "connection ~p (~s): " |             "connection ~p (~s): " | ||||||
|             "user '~s' updated secret, reason: ~s~n", |             "user '~s' updated secret, reason: ~s", | ||||||
|             [self(), dynamic_connection_name(ConnName), Username, Reason]), |             [self(), dynamic_connection_name(ConnName), Username, Reason]), | ||||||
|         State#v1{connection = Conn#connection{user = User1}}; |         State#v1{connection = Conn#connection{user = User1}}; | ||||||
|       {refused, Message} -> |       {refused, Message} -> | ||||||
|         rabbit_log_connection:error("Secret update was refused for user '~p': ~p", |         rabbit_log_connection:error("Secret update was refused for user '~s': ~p", | ||||||
|                                     [Username, Message]), |                                     [Username, Message]), | ||||||
|         rabbit_misc:protocol_error(not_allowed, "New secret was refused by one of the backends", []); |         rabbit_misc:protocol_error(not_allowed, "New secret was refused by one of the backends", []); | ||||||
|       {error, Message} -> |       {error, Message} -> | ||||||
|         rabbit_log_connection:error("Secret update for user '~p' failed: ~p", |         rabbit_log_connection:error("Secret update for user '~s' failed: ~p", | ||||||
|                                     [Username, Message]), |                                     [Username, Message]), | ||||||
|         rabbit_misc:protocol_error(not_allowed, |         rabbit_misc:protocol_error(not_allowed, | ||||||
|                                   "Secret update failed", []) |                                   "Secret update failed", []) | ||||||
|  | @ -1772,7 +1769,7 @@ augment_connection_log_name(#connection{name = Name} = Connection) -> | ||||||
|             Connection; |             Connection; | ||||||
|         UserSpecifiedName -> |         UserSpecifiedName -> | ||||||
|             LogName = <<Name/binary, " - ", UserSpecifiedName/binary>>, |             LogName = <<Name/binary, " - ", UserSpecifiedName/binary>>, | ||||||
|             rabbit_log_connection:info("Connection ~p (~s) has a client-provided name: ~s~n", [self(), Name, UserSpecifiedName]), |             rabbit_log_connection:info("Connection ~p (~s) has a client-provided name: ~s", [self(), Name, UserSpecifiedName]), | ||||||
|             ?store_proc_name(LogName), |             ?store_proc_name(LogName), | ||||||
|             Connection#connection{log_name = LogName} |             Connection#connection{log_name = LogName} | ||||||
|     end. |     end. | ||||||
|  |  | ||||||
|  | @ -101,7 +101,7 @@ wait(TableNames, Retry) -> | ||||||
| wait(TableNames, Timeout, Retries) -> | wait(TableNames, Timeout, Retries) -> | ||||||
|     %% We might be in ctl here for offline ops, in which case we can't |     %% We might be in ctl here for offline ops, in which case we can't | ||||||
|     %% get_env() for the rabbit app. |     %% get_env() for the rabbit app. | ||||||
|     rabbit_log:info("Waiting for Mnesia tables for ~p ms, ~p retries left~n", |     rabbit_log:info("Waiting for Mnesia tables for ~p ms, ~p retries left", | ||||||
|                     [Timeout, Retries - 1]), |                     [Timeout, Retries - 1]), | ||||||
|     Result = case mnesia:wait_for_tables(TableNames, Timeout) of |     Result = case mnesia:wait_for_tables(TableNames, Timeout) of | ||||||
|                  ok -> |                  ok -> | ||||||
|  | @ -120,7 +120,7 @@ wait(TableNames, Timeout, Retries) -> | ||||||
|         {1, {error, _} = Error} -> |         {1, {error, _} = Error} -> | ||||||
|             throw(Error); |             throw(Error); | ||||||
|         {_, {error, Error}} -> |         {_, {error, Error}} -> | ||||||
|             rabbit_log:warning("Error while waiting for Mnesia tables: ~p~n", [Error]), |             rabbit_log:warning("Error while waiting for Mnesia tables: ~p", [Error]), | ||||||
|             wait(TableNames, Timeout, Retries - 1) |             wait(TableNames, Timeout, Retries - 1) | ||||||
|     end. |     end. | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -74,13 +74,13 @@ tap_out({#resource{name = QName, virtual_host = VHost}, | ||||||
| -spec start(rabbit_types:vhost()) -> 'ok'. | -spec start(rabbit_types:vhost()) -> 'ok'. | ||||||
| 
 | 
 | ||||||
| start(VHost) -> | start(VHost) -> | ||||||
|     rabbit_log:info("Enabling tracing for vhost '~s'~n", [VHost]), |     rabbit_log:info("Enabling tracing for vhost '~s'", [VHost]), | ||||||
|     update_config(fun (VHosts) -> [VHost | VHosts -- [VHost]] end). |     update_config(fun (VHosts) -> [VHost | VHosts -- [VHost]] end). | ||||||
| 
 | 
 | ||||||
| -spec stop(rabbit_types:vhost()) -> 'ok'. | -spec stop(rabbit_types:vhost()) -> 'ok'. | ||||||
| 
 | 
 | ||||||
| stop(VHost) -> | stop(VHost) -> | ||||||
|     rabbit_log:info("Disabling tracing for vhost '~s'~n", [VHost]), |     rabbit_log:info("Disabling tracing for vhost '~s'", [VHost]), | ||||||
|     update_config(fun (VHosts) -> VHosts -- [VHost] end). |     update_config(fun (VHosts) -> VHosts -- [VHost] end). | ||||||
| 
 | 
 | ||||||
| update_config(Fun) -> | update_config(Fun) -> | ||||||
|  |  | ||||||
|  | @ -52,7 +52,7 @@ count_tracked_items(TableNameFun, CountRecPosition, Key, ContextMsg) -> | ||||||
|                             Acc + N |                             Acc + N | ||||||
|                         catch _:Err  -> |                         catch _:Err  -> | ||||||
|                                 rabbit_log:error( |                                 rabbit_log:error( | ||||||
|                                   "Failed to fetch number of ~p ~p on node ~p:~n~p~n", |                                   "Failed to fetch number of ~p ~p on node ~p:~n~p", | ||||||
|                                   [ContextMsg, Key, Node, Err]), |                                   [ContextMsg, Key, Node, Err]), | ||||||
|                                 Acc |                                 Acc | ||||||
|                         end |                         end | ||||||
|  |  | ||||||
|  | @ -91,9 +91,9 @@ ensure_backup_taken() -> | ||||||
| 
 | 
 | ||||||
| take_backup() -> | take_backup() -> | ||||||
|     BackupDir = backup_dir(), |     BackupDir = backup_dir(), | ||||||
|     info("upgrades: Backing up mnesia dir to ~p~n", [BackupDir]), |     info("upgrades: Backing up mnesia dir to ~p", [BackupDir]), | ||||||
|     case rabbit_mnesia:copy_db(BackupDir) of |     case rabbit_mnesia:copy_db(BackupDir) of | ||||||
|         ok         -> info("upgrades: Mnesia dir backed up to ~p~n", |         ok         -> info("upgrades: Mnesia dir backed up to ~p", | ||||||
|                            [BackupDir]); |                            [BackupDir]); | ||||||
|         {error, E} -> throw({could_not_back_up_mnesia_dir, E, BackupDir}) |         {error, E} -> throw({could_not_back_up_mnesia_dir, E, BackupDir}) | ||||||
|     end. |     end. | ||||||
|  | @ -106,7 +106,7 @@ ensure_backup_removed() -> | ||||||
| 
 | 
 | ||||||
| remove_backup() -> | remove_backup() -> | ||||||
|     ok = rabbit_file:recursive_delete([backup_dir()]), |     ok = rabbit_file:recursive_delete([backup_dir()]), | ||||||
|     info("upgrades: Mnesia backup removed~n", []). |     info("upgrades: Mnesia backup removed", []). | ||||||
| 
 | 
 | ||||||
| -spec maybe_upgrade_mnesia() -> 'ok'. | -spec maybe_upgrade_mnesia() -> 'ok'. | ||||||
| 
 | 
 | ||||||
|  | @ -216,7 +216,7 @@ primary_upgrade(Upgrades, Nodes) -> | ||||||
|                    rabbit_table:force_load(), |                    rabbit_table:force_load(), | ||||||
|                    case Others of |                    case Others of | ||||||
|                        [] -> ok; |                        [] -> ok; | ||||||
|                        _  -> info("mnesia upgrades: Breaking cluster~n", []), |                        _  -> info("mnesia upgrades: Breaking cluster", []), | ||||||
|                              [{atomic, ok} = mnesia:del_table_copy(schema, Node) |                              [{atomic, ok} = mnesia:del_table_copy(schema, Node) | ||||||
|                               || Node <- Others] |                               || Node <- Others] | ||||||
|                    end |                    end | ||||||
|  | @ -280,16 +280,16 @@ maybe_migrate_queues_to_per_vhost_storage() -> | ||||||
| 
 | 
 | ||||||
| apply_upgrades(Scope, Upgrades, Fun) -> | apply_upgrades(Scope, Upgrades, Fun) -> | ||||||
|     ok = rabbit_file:lock_file(lock_filename()), |     ok = rabbit_file:lock_file(lock_filename()), | ||||||
|     info("~s upgrades: ~w to apply~n", [Scope, length(Upgrades)]), |     info("~s upgrades: ~w to apply", [Scope, length(Upgrades)]), | ||||||
|     rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), |     rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), | ||||||
|     Fun(), |     Fun(), | ||||||
|     [apply_upgrade(Scope, Upgrade) || Upgrade <- Upgrades], |     [apply_upgrade(Scope, Upgrade) || Upgrade <- Upgrades], | ||||||
|     info("~s upgrades: All upgrades applied successfully~n", [Scope]), |     info("~s upgrades: All upgrades applied successfully", [Scope]), | ||||||
|     ok = rabbit_version:record_desired_for_scope(Scope), |     ok = rabbit_version:record_desired_for_scope(Scope), | ||||||
|     ok = file:delete(lock_filename()). |     ok = file:delete(lock_filename()). | ||||||
| 
 | 
 | ||||||
| apply_upgrade(Scope, {M, F}) -> | apply_upgrade(Scope, {M, F}) -> | ||||||
|     info("~s upgrades: Applying ~w:~w~n", [Scope, M, F]), |     info("~s upgrades: Applying ~w:~w", [Scope, M, F]), | ||||||
|     ok = apply(M, F, []). |     ok = apply(M, F, []). | ||||||
| 
 | 
 | ||||||
| %% ------------------------------------------------------------------- | %% ------------------------------------------------------------------- | ||||||
|  |  | ||||||
|  | @ -426,9 +426,9 @@ cluster_name_tx() -> | ||||||
|                   case Tl of |                   case Tl of | ||||||
|                       [] -> ok; |                       [] -> ok; | ||||||
|                       _  -> {VHost, _, _} = K, |                       _  -> {VHost, _, _} = K, | ||||||
|                             error_logger:warning_msg( |                             logger:warning( | ||||||
|                               "Multiple local-nodenames found, picking '~s' " |                               "Multiple local-nodenames found, picking '~s' " | ||||||
|                               "from '~s' for cluster name~n", [Name, VHost]) |                               "from '~s' for cluster name", [Name, VHost]) | ||||||
|                   end |                   end | ||||||
|     end, |     end, | ||||||
|     [mnesia:delete(T, K, write) || K <- Ks], |     [mnesia:delete(T, K, write) || K <- Ks], | ||||||
|  |  | ||||||
|  | @ -484,7 +484,7 @@ stop(VHost) -> | ||||||
|     ok = rabbit_queue_index:stop(VHost). |     ok = rabbit_queue_index:stop(VHost). | ||||||
| 
 | 
 | ||||||
| start_msg_store(VHost, Refs, StartFunState) when is_list(Refs); Refs == undefined -> | start_msg_store(VHost, Refs, StartFunState) when is_list(Refs); Refs == undefined -> | ||||||
|     rabbit_log:info("Starting message stores for vhost '~s'~n", [VHost]), |     rabbit_log:info("Starting message stores for vhost '~s'", [VHost]), | ||||||
|     do_start_msg_store(VHost, ?TRANSIENT_MSG_STORE, undefined, ?EMPTY_START_FUN_STATE), |     do_start_msg_store(VHost, ?TRANSIENT_MSG_STORE, undefined, ?EMPTY_START_FUN_STATE), | ||||||
|     do_start_msg_store(VHost, ?PERSISTENT_MSG_STORE, Refs, StartFunState), |     do_start_msg_store(VHost, ?PERSISTENT_MSG_STORE, Refs, StartFunState), | ||||||
|     ok. |     ok. | ||||||
|  | @ -492,13 +492,13 @@ start_msg_store(VHost, Refs, StartFunState) when is_list(Refs); Refs == undefine | ||||||
| do_start_msg_store(VHost, Type, Refs, StartFunState) -> | do_start_msg_store(VHost, Type, Refs, StartFunState) -> | ||||||
|     case rabbit_vhost_msg_store:start(VHost, Type, Refs, StartFunState) of |     case rabbit_vhost_msg_store:start(VHost, Type, Refs, StartFunState) of | ||||||
|         {ok, _} -> |         {ok, _} -> | ||||||
|             rabbit_log:info("Started message store of type ~s for vhost '~s'~n", [abbreviated_type(Type), VHost]); |             rabbit_log:info("Started message store of type ~s for vhost '~s'", [abbreviated_type(Type), VHost]); | ||||||
|         {error, {no_such_vhost, VHost}} = Err -> |         {error, {no_such_vhost, VHost}} = Err -> | ||||||
|             rabbit_log:error("Failed to start message store of type ~s for vhost '~s': the vhost no longer exists!~n", |             rabbit_log:error("Failed to start message store of type ~s for vhost '~s': the vhost no longer exists!", | ||||||
|                              [Type, VHost]), |                              [Type, VHost]), | ||||||
|             exit(Err); |             exit(Err); | ||||||
|         {error, Error} -> |         {error, Error} -> | ||||||
|             rabbit_log:error("Failed to start message store of type ~s for vhost '~s': ~p~n", |             rabbit_log:error("Failed to start message store of type ~s for vhost '~s': ~p", | ||||||
|                              [Type, VHost, Error]), |                              [Type, VHost, Error]), | ||||||
|             exit({error, Error}) |             exit({error, Error}) | ||||||
|     end. |     end. | ||||||
|  | @ -2846,7 +2846,7 @@ move_messages_to_vhost_store(Queues) -> | ||||||
|     in_batches(MigrationBatchSize, |     in_batches(MigrationBatchSize, | ||||||
|         {rabbit_variable_queue, migrate_queue, [OldStore, NewMsgStore]}, |         {rabbit_variable_queue, migrate_queue, [OldStore, NewMsgStore]}, | ||||||
|         QueuesWithTerms, |         QueuesWithTerms, | ||||||
|         "message_store upgrades: Migrating batch ~p of ~p queues. Out of total ~p ~n", |         "message_store upgrades: Migrating batch ~p of ~p queues. Out of total ~p ", | ||||||
|         "message_store upgrades: Batch ~p of ~p queues migrated ~n. ~p total left"), |         "message_store upgrades: Batch ~p of ~p queues migrated ~n. ~p total left"), | ||||||
| 
 | 
 | ||||||
|     log_upgrade("Message store migration finished"), |     log_upgrade("Message store migration finished"), | ||||||
|  | @ -2882,7 +2882,7 @@ migrate_queue({QueueName = #resource{virtual_host = VHost, name = Name}, | ||||||
|                RecoveryTerm}, |                RecoveryTerm}, | ||||||
|               OldStore, NewStore) -> |               OldStore, NewStore) -> | ||||||
|     log_upgrade_verbose( |     log_upgrade_verbose( | ||||||
|         "Migrating messages in queue ~s in vhost ~s to per-vhost message store~n", |         "Migrating messages in queue ~s in vhost ~s to per-vhost message store", | ||||||
|         [Name, VHost]), |         [Name, VHost]), | ||||||
|     OldStoreClient = get_global_store_client(OldStore), |     OldStoreClient = get_global_store_client(OldStore), | ||||||
|     NewStoreClient = get_per_vhost_store_client(QueueName, NewStore), |     NewStoreClient = get_per_vhost_store_client(QueueName, NewStore), | ||||||
|  |  | ||||||
|  | @ -43,7 +43,7 @@ recover() -> | ||||||
| 
 | 
 | ||||||
| recover(VHost) -> | recover(VHost) -> | ||||||
|     VHostDir = msg_store_dir_path(VHost), |     VHostDir = msg_store_dir_path(VHost), | ||||||
|     rabbit_log:info("Making sure data directory '~ts' for vhost '~s' exists~n", |     rabbit_log:info("Making sure data directory '~ts' for vhost '~s' exists", | ||||||
|                     [VHostDir, VHost]), |                     [VHostDir, VHost]), | ||||||
|     VHostStubFile = filename:join(VHostDir, ".vhost"), |     VHostStubFile = filename:join(VHostDir, ".vhost"), | ||||||
|     ok = rabbit_file:ensure_dir(VHostStubFile), |     ok = rabbit_file:ensure_dir(VHostStubFile), | ||||||
|  | @ -147,7 +147,7 @@ delete(VHost, ActingUser) -> | ||||||
|     %% process, which in turn results in further mnesia actions and |     %% process, which in turn results in further mnesia actions and | ||||||
|     %% eventually the termination of that process. Exchange deletion causes |     %% eventually the termination of that process. Exchange deletion causes | ||||||
|     %% notifications which must be sent outside the TX |     %% notifications which must be sent outside the TX | ||||||
|     rabbit_log:info("Deleting vhost '~s'~n", [VHost]), |     rabbit_log:info("Deleting vhost '~s'", [VHost]), | ||||||
|     QDelFun = fun (Q) -> rabbit_amqqueue:delete(Q, false, false, ActingUser) end, |     QDelFun = fun (Q) -> rabbit_amqqueue:delete(Q, false, false, ActingUser) end, | ||||||
|     [begin |     [begin | ||||||
|          Name = amqqueue:get_name(Q), |          Name = amqqueue:get_name(Q), | ||||||
|  | @ -257,7 +257,7 @@ vhost_down(VHost) -> | ||||||
| 
 | 
 | ||||||
| delete_storage(VHost) -> | delete_storage(VHost) -> | ||||||
|     VhostDir = msg_store_dir_path(VHost), |     VhostDir = msg_store_dir_path(VHost), | ||||||
|     rabbit_log:info("Deleting message store directory for vhost '~s' at '~s'~n", [VHost, VhostDir]), |     rabbit_log:info("Deleting message store directory for vhost '~s' at '~s'", [VHost, VhostDir]), | ||||||
|     %% Message store should be closed when vhost supervisor is closed. |     %% Message store should be closed when vhost supervisor is closed. | ||||||
|     case rabbit_file:recursive_delete([VhostDir]) of |     case rabbit_file:recursive_delete([VhostDir]) of | ||||||
|         ok                   -> ok; |         ok                   -> ok; | ||||||
|  |  | ||||||
|  | @ -41,7 +41,7 @@ start_link(VHost) -> | ||||||
| 
 | 
 | ||||||
| init([VHost]) -> | init([VHost]) -> | ||||||
|     process_flag(trap_exit, true), |     process_flag(trap_exit, true), | ||||||
|     rabbit_log:debug("Recovering data for VHost ~p~n", [VHost]), |     rabbit_log:debug("Recovering data for VHost ~p", [VHost]), | ||||||
|     try |     try | ||||||
|         %% Recover the vhost data and save it to vhost registry. |         %% Recover the vhost data and save it to vhost registry. | ||||||
|         ok = rabbit_vhost:recover(VHost), |         ok = rabbit_vhost:recover(VHost), | ||||||
|  |  | ||||||
|  | @ -79,7 +79,7 @@ stop_and_delete_vhost(VHost) -> | ||||||
|                 false -> ok; |                 false -> ok; | ||||||
|                 true  -> |                 true  -> | ||||||
|                     rabbit_log:info("Stopping vhost supervisor ~p" |                     rabbit_log:info("Stopping vhost supervisor ~p" | ||||||
|                                     " for vhost '~s'~n", |                                     " for vhost '~s'", | ||||||
|                                     [VHostSupPid, VHost]), |                                     [VHostSupPid, VHost]), | ||||||
|                     case supervisor2:terminate_child(?MODULE, WrapperPid) of |                     case supervisor2:terminate_child(?MODULE, WrapperPid) of | ||||||
|                         ok -> |                         ok -> | ||||||
|  |  | ||||||
|  | @ -65,9 +65,7 @@ start_link(IPAddress, Port, | ||||||
| 
 | 
 | ||||||
| init({IPAddress, Port, {M,F,A} = OnStartup, OnShutdown, Label}) -> | init({IPAddress, Port, {M,F,A} = OnStartup, OnShutdown, Label}) -> | ||||||
|     process_flag(trap_exit, true), |     process_flag(trap_exit, true), | ||||||
|     error_logger:info_msg( |     logger:info("started ~s on ~s:~p", [Label, rabbit_misc:ntoab(IPAddress), Port]), | ||||||
|       "started ~s on ~s:~p~n", |  | ||||||
|       [Label, rabbit_misc:ntoab(IPAddress), Port]), |  | ||||||
|     apply(M, F, A ++ [IPAddress, Port]), |     apply(M, F, A ++ [IPAddress, Port]), | ||||||
|     {ok, #state{on_startup = OnStartup, on_shutdown = OnShutdown, |     {ok, #state{on_startup = OnStartup, on_shutdown = OnShutdown, | ||||||
|                 label = Label, ip=IPAddress, port=Port}}. |                 label = Label, ip=IPAddress, port=Port}}. | ||||||
|  | @ -82,8 +80,7 @@ handle_info(_Info, State) -> | ||||||
|     {noreply, State}. |     {noreply, State}. | ||||||
| 
 | 
 | ||||||
| terminate(_Reason, #state{on_shutdown = {M,F,A}, label=Label, ip=IPAddress, port=Port}) -> | terminate(_Reason, #state{on_shutdown = {M,F,A}, label=Label, ip=IPAddress, port=Port}) -> | ||||||
|     error_logger:info_msg("stopped ~s on ~s:~p~n", |     logger:info("stopped ~s on ~s:~p", [Label, rabbit_misc:ntoab(IPAddress), Port]), | ||||||
|                           [Label, rabbit_misc:ntoab(IPAddress), Port]), |  | ||||||
|     apply(M, F, A ++ [IPAddress, Port]). |     apply(M, F, A ++ [IPAddress, Port]). | ||||||
| 
 | 
 | ||||||
| code_change(_OldVsn, State, _Extra) -> | code_change(_OldVsn, State, _Extra) -> | ||||||
|  |  | ||||||
|  | @ -141,20 +141,7 @@ init_per_testcase(Testcase, Config) -> | ||||||
|     TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase), |     TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase), | ||||||
|     case ?config(tc_group_properties, Config) of |     case ?config(tc_group_properties, Config) of | ||||||
|         [{name, registry} | _] -> |         [{name, registry} | _] -> | ||||||
|             application:set_env(lager, colored, true), |             logger:set_primary_config(level, debug), | ||||||
|             application:set_env( |  | ||||||
|               lager, |  | ||||||
|               handlers, [{lager_console_backend, [{level, debug}]}]), |  | ||||||
|             application:set_env( |  | ||||||
|               lager, |  | ||||||
|               extra_sinks, |  | ||||||
|               [{rabbit_log_lager_event, |  | ||||||
|                 [{handlers, [{lager_console_backend, [{level, debug}]}]}] |  | ||||||
|                }, |  | ||||||
|                {rabbit_log_feature_flags_lager_event, |  | ||||||
|                 [{handlers, [{lager_console_backend, [{level, debug}]}]}] |  | ||||||
|                }]), |  | ||||||
|             lager:start(), |  | ||||||
|             FeatureFlagsFile = filename:join(?config(priv_dir, Config), |             FeatureFlagsFile = filename:join(?config(priv_dir, Config), | ||||||
|                                              rabbit_misc:format( |                                              rabbit_misc:format( | ||||||
|                                                "feature_flags-~s", |                                                "feature_flags-~s", | ||||||
|  |  | ||||||
|  | @ -0,0 +1,642 @@ | ||||||
|  | %% This Source Code Form is subject to the terms of the Mozilla Public | ||||||
|  | %% License, v. 2.0. If a copy of the MPL was not distributed with this | ||||||
|  | %% file, You can obtain one at https://mozilla.org/MPL/2.0/. | ||||||
|  | %% | ||||||
|  | %% Copyright (c) 2016-2021 VMware, Inc. or its affiliates.  All rights reserved. | ||||||
|  | %% | ||||||
|  | 
 | ||||||
|  | -module(logging_SUITE). | ||||||
|  | 
 | ||||||
|  | -include_lib("common_test/include/ct.hrl"). | ||||||
|  | -include_lib("eunit/include/eunit.hrl"). | ||||||
|  | 
 | ||||||
|  | -include_lib("kernel/include/logger.hrl"). | ||||||
|  | -include_lib("rabbit_common/include/logging.hrl"). | ||||||
|  | -include_lib("amqp_client/include/amqp_client.hrl"). | ||||||
|  | 
 | ||||||
|  | -export([all/0, | ||||||
|  |          init_per_suite/1, | ||||||
|  |          end_per_suite/1, | ||||||
|  |          init_per_group/2, | ||||||
|  |          end_per_group/2, | ||||||
|  |          init_per_testcase/2, | ||||||
|  |          end_per_testcase/2, | ||||||
|  | 
 | ||||||
|  |          logging_with_default_config_works/1, | ||||||
|  |          logging_to_stdout_configured_in_env_works/1, | ||||||
|  |          logging_to_stdout_configured_in_config_works/1, | ||||||
|  |          logging_to_stderr_configured_in_env_works/1, | ||||||
|  |          logging_to_exchange_works/1, | ||||||
|  |          setting_log_levels_in_env_works/1, | ||||||
|  |          setting_log_levels_in_config_works/1, | ||||||
|  |          format_messages_as_json_works/1]). | ||||||
|  | 
 | ||||||
|  | all() -> | ||||||
|  |     [logging_with_default_config_works, | ||||||
|  |      logging_to_stdout_configured_in_env_works, | ||||||
|  |      logging_to_stdout_configured_in_config_works, | ||||||
|  |      logging_to_stderr_configured_in_env_works, | ||||||
|  |      logging_to_exchange_works, | ||||||
|  |      setting_log_levels_in_env_works, | ||||||
|  |      setting_log_levels_in_config_works, | ||||||
|  |      format_messages_as_json_works]. | ||||||
|  | 
 | ||||||
|  | init_per_suite(Config) -> | ||||||
|  |     rabbit_ct_helpers:log_environment(), | ||||||
|  |     rabbit_ct_helpers:run_setup_steps(Config). | ||||||
|  | 
 | ||||||
|  | end_per_suite(Config) -> | ||||||
|  |     Config. | ||||||
|  | 
 | ||||||
|  | init_per_group(_, Config) -> | ||||||
|  |     Config. | ||||||
|  | 
 | ||||||
|  | end_per_group(_, Config) -> | ||||||
|  |     Config. | ||||||
|  | 
 | ||||||
|  | init_per_testcase(logging_to_exchange_works = Testcase, Config) -> | ||||||
|  |     rabbit_ct_helpers:testcase_started(Config, Testcase), | ||||||
|  |     Config1 = rabbit_ct_helpers:set_config( | ||||||
|  |                 Config, | ||||||
|  |                 [{rmq_nodes_count, 1}, | ||||||
|  |                  {rmq_nodename_suffix, Testcase}]), | ||||||
|  |     Config2 = rabbit_ct_helpers:merge_app_env( | ||||||
|  |                 Config1, | ||||||
|  |                 {rabbit, [{log, [{exchange, [{enabled, true}, | ||||||
|  |                                              {level, info}]}, | ||||||
|  |                                  {file, [{level, info}]}]}]}), | ||||||
|  |     rabbit_ct_helpers:run_steps( | ||||||
|  |       Config2, | ||||||
|  |       rabbit_ct_broker_helpers:setup_steps() ++ | ||||||
|  |       rabbit_ct_client_helpers:setup_steps()); | ||||||
|  | init_per_testcase(Testcase, Config) -> | ||||||
|  |     remove_all_handlers(), | ||||||
|  |     application:unset_env(rabbit, log), | ||||||
|  |     LogBaseDir = filename:join( | ||||||
|  |                    ?config(priv_dir, Config), | ||||||
|  |                    atom_to_list(Testcase)), | ||||||
|  |     Config1 = rabbit_ct_helpers:set_config( | ||||||
|  |                 Config, {log_base_dir, LogBaseDir}), | ||||||
|  |     rabbit_ct_helpers:testcase_finished(Config1, Testcase). | ||||||
|  | 
 | ||||||
|  | end_per_testcase(logging_to_exchange_works, Config) -> | ||||||
|  |     rabbit_ct_helpers:run_steps( | ||||||
|  |       Config, | ||||||
|  |       rabbit_ct_client_helpers:teardown_steps() ++ | ||||||
|  |       rabbit_ct_broker_helpers:teardown_steps()); | ||||||
|  | end_per_testcase(_, Config) -> | ||||||
|  |     application:unset_env(rabbit, log), | ||||||
|  |     Config. | ||||||
|  | 
 | ||||||
|  | remove_all_handlers() -> | ||||||
|  |     _ = [logger:remove_handler(Id) | ||||||
|  |          || #{id := Id} <- logger:get_handler_config()]. | ||||||
|  | 
 | ||||||
|  | logging_with_default_config_works(Config) -> | ||||||
|  |     Context = default_context(Config), | ||||||
|  |     rabbit_prelaunch_logging:clear_config_run_number(), | ||||||
|  |     rabbit_prelaunch_logging:setup(Context), | ||||||
|  | 
 | ||||||
|  |     Handlers = logger:get_handler_config(), | ||||||
|  | 
 | ||||||
|  |     MainFileHandler = get_handler_by_id(Handlers, rmq_1_file_1), | ||||||
|  |     MainFile = main_log_file_in_context(Context), | ||||||
|  |     ?assertNotEqual(undefined, MainFileHandler), | ||||||
|  |     ?assertMatch( | ||||||
|  |        #{level := info, | ||||||
|  |          module := rabbit_logger_std_h, | ||||||
|  |          filter_default := log, | ||||||
|  |          filters := [{progress_reports, {_, stop}}, | ||||||
|  |                      {rmqlog_filter, {_, #{global := info, | ||||||
|  |                                            upgrade := none}}}], | ||||||
|  |          formatter := {rabbit_logger_text_fmt, _}, | ||||||
|  |          config := #{type := file, | ||||||
|  |                      file := MainFile}}, | ||||||
|  |        MainFileHandler), | ||||||
|  | 
 | ||||||
|  |     UpgradeFileHandler = get_handler_by_id(Handlers, rmq_1_file_2), | ||||||
|  |     UpgradeFile = upgrade_log_file_in_context(Context), | ||||||
|  |     ?assertNotEqual(undefined, UpgradeFileHandler), | ||||||
|  |     ?assertMatch( | ||||||
|  |        #{level := info, | ||||||
|  |          module := rabbit_logger_std_h, | ||||||
|  |          filter_default := stop, | ||||||
|  |          filters := [{rmqlog_filter, {_, #{upgrade := info}}}], | ||||||
|  |          formatter := {rabbit_logger_text_fmt, _}, | ||||||
|  |          config := #{type := file, | ||||||
|  |                      file := UpgradeFile}}, | ||||||
|  |        UpgradeFileHandler), | ||||||
|  | 
 | ||||||
|  |     ?assert(ping_log(rmq_1_file_1, info)), | ||||||
|  |     ?assert(ping_log(rmq_1_file_1, info, | ||||||
|  |                      #{domain => ?RMQLOG_DOMAIN_GLOBAL})), | ||||||
|  |     ?assert(ping_log(rmq_1_file_1, info, | ||||||
|  |                      #{domain => ['3rd_party']})), | ||||||
|  |     ?assertNot(ping_log(rmq_1_file_1, info, | ||||||
|  |                         #{domain => ?RMQLOG_DOMAIN_UPGRADE})), | ||||||
|  | 
 | ||||||
|  |     ?assert(ping_log(rmq_1_file_2, info, | ||||||
|  |                      #{domain => ?RMQLOG_DOMAIN_UPGRADE})), | ||||||
|  |     ?assertNot(ping_log(rmq_1_file_2, info, | ||||||
|  |                         #{domain => ?RMQLOG_DOMAIN_GLOBAL})), | ||||||
|  |     ok. | ||||||
|  | 
 | ||||||
|  | logging_to_stdout_configured_in_env_works(Config) -> | ||||||
|  |     #{var_origins := Origins0} = Context0 = default_context(Config), | ||||||
|  |     Context = Context0#{main_log_file => "-", | ||||||
|  |                         var_origins => Origins0#{ | ||||||
|  |                                          main_log_file => environment}}, | ||||||
|  |     logging_to_stddev_works(standard_io, rmq_1_stdout, Config, Context). | ||||||
|  | 
 | ||||||
|  | logging_to_stdout_configured_in_config_works(Config) -> | ||||||
|  |     Context = default_context(Config), | ||||||
|  |     ok = application:set_env( | ||||||
|  |            rabbit, log, | ||||||
|  |            [{console, [{enabled, true}]}], | ||||||
|  |            [{persistent, true}]), | ||||||
|  |     logging_to_stddev_works(standard_io, rmq_1_stdout, Config, Context). | ||||||
|  | 
 | ||||||
|  | logging_to_stderr_configured_in_env_works(Config) -> | ||||||
|  |     #{var_origins := Origins0} = Context0 = default_context(Config), | ||||||
|  |     Context = Context0#{main_log_file => "-stderr", | ||||||
|  |                         var_origins => Origins0#{ | ||||||
|  |                                          main_log_file => environment}}, | ||||||
|  |     logging_to_stddev_works(standard_error, rmq_1_stderr, Config, Context). | ||||||
|  | 
 | ||||||
|  | logging_to_stddev_works(Stddev, Id, Config, Context) -> | ||||||
|  |     rabbit_prelaunch_logging:clear_config_run_number(), | ||||||
|  |     rabbit_prelaunch_logging:setup(Context), | ||||||
|  | 
 | ||||||
|  |     Handlers = logger:get_handler_config(), | ||||||
|  | 
 | ||||||
|  |     StddevHandler = get_handler_by_id(Handlers, Id), | ||||||
|  |     ?assertNotEqual(undefined, StddevHandler), | ||||||
|  |     ?assertMatch( | ||||||
|  |        #{level := info, | ||||||
|  |          module := rabbit_logger_std_h, | ||||||
|  |          filter_default := log, | ||||||
|  |          filters := [{progress_reports, {_, stop}}, | ||||||
|  |                      {rmqlog_filter, {_, #{global := info, | ||||||
|  |                                            upgrade := none}}}], | ||||||
|  |          formatter := {rabbit_logger_text_fmt, _}, | ||||||
|  |          config := #{type := Stddev}}, | ||||||
|  |        StddevHandler), | ||||||
|  | 
 | ||||||
|  |     UpgradeFileHandler = get_handler_by_id(Handlers, rmq_1_file_1), | ||||||
|  |     UpgradeFile = upgrade_log_file_in_context(Context), | ||||||
|  |     ?assertNotEqual(undefined, UpgradeFileHandler), | ||||||
|  |     ?assertMatch( | ||||||
|  |        #{level := info, | ||||||
|  |          module := rabbit_logger_std_h, | ||||||
|  |          filter_default := stop, | ||||||
|  |          filters := [{rmqlog_filter, {_, #{upgrade := info}}}], | ||||||
|  |          formatter := {rabbit_logger_text_fmt, _}, | ||||||
|  |          config := #{type := file, | ||||||
|  |                      file := UpgradeFile}}, | ||||||
|  |        UpgradeFileHandler), | ||||||
|  | 
 | ||||||
|  |     ?assert(ping_log(Id, info, Config)), | ||||||
|  |     ?assert(ping_log(Id, info, | ||||||
|  |                      #{domain => ?RMQLOG_DOMAIN_GLOBAL}, Config)), | ||||||
|  |     ?assert(ping_log(Id, info, | ||||||
|  |                      #{domain => ['3rd_party']}, Config)), | ||||||
|  |     ?assertNot(ping_log(Id, info, | ||||||
|  |                         #{domain => ?RMQLOG_DOMAIN_UPGRADE}, Config)), | ||||||
|  | 
 | ||||||
|  |     ?assert(ping_log(rmq_1_file_1, info, | ||||||
|  |                      #{domain => ?RMQLOG_DOMAIN_UPGRADE})), | ||||||
|  |     ?assertNot(ping_log(rmq_1_file_1, info, | ||||||
|  |                         #{domain => ?RMQLOG_DOMAIN_GLOBAL})), | ||||||
|  |     ok. | ||||||
|  | 
 | ||||||
|  | logging_to_exchange_works(Config) -> | ||||||
|  |     Context = rabbit_ct_broker_helpers:rpc( | ||||||
|  |                 Config, 0, | ||||||
|  |                 rabbit_prelaunch, get_context, []), | ||||||
|  |     Handlers = rabbit_ct_broker_helpers:rpc( | ||||||
|  |                  Config, 0, | ||||||
|  |                  logger, get_handler_config, []), | ||||||
|  | 
 | ||||||
|  |     ExchangeHandler = get_handler_by_id(Handlers, rmq_1_exchange), | ||||||
|  |     ?assertNotEqual(undefined, ExchangeHandler), | ||||||
|  |     ?assertMatch( | ||||||
|  |        #{level := info, | ||||||
|  |          module := rabbit_logger_exchange_h, | ||||||
|  |          filter_default := log, | ||||||
|  |          filters := [{progress_reports, {_, stop}}, | ||||||
|  |                      {rmqlog_filter, {_, #{global := info, | ||||||
|  |                                            upgrade := none}}}], | ||||||
|  |          formatter := {rabbit_logger_text_fmt, _}, | ||||||
|  |          config := #{exchange := _}}, | ||||||
|  |        ExchangeHandler), | ||||||
|  |     #{config := | ||||||
|  |       #{exchange := #resource{name = XName} = Exchange}} = ExchangeHandler, | ||||||
|  | 
 | ||||||
|  |     UpgradeFileHandler = get_handler_by_id(Handlers, rmq_1_file_2), | ||||||
|  |     UpgradeFile = upgrade_log_file_in_context(Context), | ||||||
|  |     ?assertNotEqual(undefined, UpgradeFileHandler), | ||||||
|  |     ?assertMatch( | ||||||
|  |        #{level := info, | ||||||
|  |          module := rabbit_logger_std_h, | ||||||
|  |          filter_default := stop, | ||||||
|  |          filters := [{rmqlog_filter, {_, #{upgrade := info}}}], | ||||||
|  |          formatter := {rabbit_logger_text_fmt, _}, | ||||||
|  |          config := #{type := file, | ||||||
|  |                      file := UpgradeFile}}, | ||||||
|  |        UpgradeFileHandler), | ||||||
|  | 
 | ||||||
|  |     %% Wait for the expected exchange to be automatically declared. | ||||||
|  |     lists:any( | ||||||
|  |       fun(_) -> | ||||||
|  |               Ret = rabbit_ct_broker_helpers:rpc( | ||||||
|  |                       Config, 0, | ||||||
|  |                       rabbit_exchange, lookup, [Exchange]), | ||||||
|  |               case Ret of | ||||||
|  |                   {ok, _} -> true; | ||||||
|  |                   _       -> timer:sleep(500), | ||||||
|  |                              false | ||||||
|  |               end | ||||||
|  |       end, lists:seq(1, 20)), | ||||||
|  | 
 | ||||||
|  |     %% Declare a queue to collect all logged messages. | ||||||
|  |     {Conn, Chan} = rabbit_ct_client_helpers:open_connection_and_channel( | ||||||
|  |                      Config), | ||||||
|  |     QName = <<"log-messages">>, | ||||||
|  |     ?assertMatch( | ||||||
|  |        #'queue.declare_ok'{}, | ||||||
|  |        amqp_channel:call(Chan, #'queue.declare'{queue = QName, | ||||||
|  |                                                 durable = false})), | ||||||
|  |     ?assertMatch( | ||||||
|  |        #'queue.bind_ok'{}, | ||||||
|  |        amqp_channel:call(Chan, #'queue.bind'{queue = QName, | ||||||
|  |                                              exchange = XName, | ||||||
|  |                                              routing_key = <<"#">>})), | ||||||
|  |     Config1 = rabbit_ct_helpers:set_config( | ||||||
|  |                 Config, {test_channel_and_queue, {Chan, QName}}), | ||||||
|  | 
 | ||||||
|  |     ?assert(ping_log(rmq_1_exchange, info, Config1)), | ||||||
|  |     ?assert(ping_log(rmq_1_exchange, info, | ||||||
|  |                      #{domain => ?RMQLOG_DOMAIN_GLOBAL}, Config1)), | ||||||
|  |     ?assert(ping_log(rmq_1_exchange, info, | ||||||
|  |                      #{domain => ['3rd_party']}, Config1)), | ||||||
|  |     ?assertNot(ping_log(rmq_1_exchange, info, | ||||||
|  |                         #{domain => ?RMQLOG_DOMAIN_UPGRADE}, Config1)), | ||||||
|  | 
 | ||||||
|  |     ?assert(ping_log(rmq_1_file_2, info, | ||||||
|  |                      #{domain => ?RMQLOG_DOMAIN_UPGRADE}, Config)), | ||||||
|  |     ?assertNot(ping_log(rmq_1_file_2, info, | ||||||
|  |                         #{domain => ?RMQLOG_DOMAIN_GLOBAL}, Config)), | ||||||
|  | 
 | ||||||
|  |     amqp_channel:call(Chan, #'queue.delete'{queue = QName}), | ||||||
|  |     rabbit_ct_client_helpers:close_connection_and_channel(Conn, Chan), | ||||||
|  |     ok. | ||||||
|  | 
 | ||||||
|  | setting_log_levels_in_env_works(Config) -> | ||||||
|  |     GlobalLevel = warning, | ||||||
|  |     PrelaunchLevel = error, | ||||||
|  |     MinLevel = rabbit_prelaunch_logging:get_less_severe_level( | ||||||
|  |                  GlobalLevel, PrelaunchLevel), | ||||||
|  |     #{var_origins := Origins0} = Context0 = default_context(Config), | ||||||
|  |     Context = Context0#{log_levels => #{global => GlobalLevel, | ||||||
|  |                                         "prelaunch" => PrelaunchLevel}, | ||||||
|  |                         var_origins => Origins0#{log_levels => environment}}, | ||||||
|  |     rabbit_prelaunch_logging:clear_config_run_number(), | ||||||
|  |     rabbit_prelaunch_logging:setup(Context), | ||||||
|  | 
 | ||||||
|  |     Handlers = logger:get_handler_config(), | ||||||
|  | 
 | ||||||
|  |     MainFileHandler = get_handler_by_id(Handlers, rmq_1_file_1), | ||||||
|  |     MainFile = main_log_file_in_context(Context), | ||||||
|  |     ?assertNotEqual(undefined, MainFileHandler), | ||||||
|  |     ?assertMatch( | ||||||
|  |        #{level := MinLevel, | ||||||
|  |          module := rabbit_logger_std_h, | ||||||
|  |          filter_default := log, | ||||||
|  |          filters := [{progress_reports, {_, stop}}, | ||||||
|  |                      {rmqlog_filter, {_, #{global := GlobalLevel, | ||||||
|  |                                            prelaunch := PrelaunchLevel, | ||||||
|  |                                            upgrade := none}}}], | ||||||
|  |          formatter := {rabbit_logger_text_fmt, _}, | ||||||
|  |          config := #{type := file, | ||||||
|  |                      file := MainFile}}, | ||||||
|  |        MainFileHandler), | ||||||
|  | 
 | ||||||
|  |     UpgradeFileHandler = get_handler_by_id(Handlers, rmq_1_file_2), | ||||||
|  |     UpgradeFile = upgrade_log_file_in_context(Context), | ||||||
|  |     ?assertNotEqual(undefined, UpgradeFileHandler), | ||||||
|  |     ?assertMatch( | ||||||
|  |        #{level := info, | ||||||
|  |          module := rabbit_logger_std_h, | ||||||
|  |          filter_default := stop, | ||||||
|  |          filters := [{rmqlog_filter, {_, #{upgrade := info}}}], | ||||||
|  |          formatter := {rabbit_logger_text_fmt, _}, | ||||||
|  |          config := #{type := file, | ||||||
|  |                      file := UpgradeFile}}, | ||||||
|  |        UpgradeFileHandler), | ||||||
|  | 
 | ||||||
|  |     ?assertNot(ping_log(rmq_1_file_1, info)), | ||||||
|  |     ?assertNot(ping_log(rmq_1_file_1, info, | ||||||
|  |                         #{domain => ?RMQLOG_DOMAIN_GLOBAL})), | ||||||
|  |     ?assertNot(ping_log(rmq_1_file_1, info, | ||||||
|  |                         #{domain => ?RMQLOG_DOMAIN_PRELAUNCH})), | ||||||
|  |     ?assertNot(ping_log(rmq_1_file_1, GlobalLevel, | ||||||
|  |                         #{domain => ?RMQLOG_DOMAIN_PRELAUNCH})), | ||||||
|  |     ?assertNot(ping_log(rmq_1_file_1, info, | ||||||
|  |                         #{domain => ['3rd_party']})), | ||||||
|  |     ?assertNot(ping_log(rmq_1_file_1, info, | ||||||
|  |                         #{domain => ?RMQLOG_DOMAIN_UPGRADE})), | ||||||
|  | 
 | ||||||
|  |     ?assert(ping_log(rmq_1_file_1, GlobalLevel)), | ||||||
|  |     ?assert(ping_log(rmq_1_file_1, GlobalLevel, | ||||||
|  |                      #{domain => ?RMQLOG_DOMAIN_GLOBAL})), | ||||||
|  |     ?assert(ping_log(rmq_1_file_1, PrelaunchLevel, | ||||||
|  |                      #{domain => ?RMQLOG_DOMAIN_PRELAUNCH})), | ||||||
|  |     ?assert(ping_log(rmq_1_file_1, GlobalLevel, | ||||||
|  |                      #{domain => ['3rd_party']})), | ||||||
|  |     ?assertNot(ping_log(rmq_1_file_1, GlobalLevel, | ||||||
|  |                         #{domain => ?RMQLOG_DOMAIN_UPGRADE})), | ||||||
|  | 
 | ||||||
|  |     ?assert(ping_log(rmq_1_file_2, GlobalLevel, | ||||||
|  |                      #{domain => ?RMQLOG_DOMAIN_UPGRADE})), | ||||||
|  |     ?assertNot(ping_log(rmq_1_file_2, GlobalLevel, | ||||||
|  |                         #{domain => ?RMQLOG_DOMAIN_GLOBAL})), | ||||||
|  |     ok. | ||||||
|  | 
 | ||||||
|  | setting_log_levels_in_config_works(Config) -> | ||||||
|  |     GlobalLevel = warning, | ||||||
|  |     PrelaunchLevel = error, | ||||||
|  |     MinLevel = rabbit_prelaunch_logging:get_less_severe_level( | ||||||
|  |                  GlobalLevel, PrelaunchLevel), | ||||||
|  |     Context = default_context(Config), | ||||||
|  |     ok = application:set_env( | ||||||
|  |            rabbit, log, | ||||||
|  |            [{file, [{level, GlobalLevel}]}, | ||||||
|  |             {categories, [{prelaunch, [{level, PrelaunchLevel}]}]}], | ||||||
|  |            [{persistent, true}]), | ||||||
|  |     rabbit_prelaunch_logging:clear_config_run_number(), | ||||||
|  |     rabbit_prelaunch_logging:setup(Context), | ||||||
|  | 
 | ||||||
|  |     Handlers = logger:get_handler_config(), | ||||||
|  | 
 | ||||||
|  |     MainFileHandler = get_handler_by_id(Handlers, rmq_1_file_1), | ||||||
|  |     MainFile = main_log_file_in_context(Context), | ||||||
|  |     ?assertNotEqual(undefined, MainFileHandler), | ||||||
|  |     ?assertMatch( | ||||||
|  |        #{level := MinLevel, | ||||||
|  |          module := rabbit_logger_std_h, | ||||||
|  |          filter_default := log, | ||||||
|  |          filters := [{progress_reports, {_, stop}}, | ||||||
|  |                      {rmqlog_filter, {_, #{global := GlobalLevel, | ||||||
|  |                                            prelaunch := PrelaunchLevel, | ||||||
|  |                                            upgrade := none}}}], | ||||||
|  |          formatter := {rabbit_logger_text_fmt, _}, | ||||||
|  |          config := #{type := file, | ||||||
|  |                      file := MainFile}}, | ||||||
|  |        MainFileHandler), | ||||||
|  | 
 | ||||||
|  |     UpgradeFileHandler = get_handler_by_id(Handlers, rmq_1_file_2), | ||||||
|  |     UpgradeFile = upgrade_log_file_in_context(Context), | ||||||
|  |     ?assertNotEqual(undefined, UpgradeFileHandler), | ||||||
|  |     ?assertMatch( | ||||||
|  |        #{level := info, | ||||||
|  |          module := rabbit_logger_std_h, | ||||||
|  |          filter_default := stop, | ||||||
|  |          filters := [{rmqlog_filter, {_, #{upgrade := info}}}], | ||||||
|  |          formatter := {rabbit_logger_text_fmt, _}, | ||||||
|  |          config := #{type := file, | ||||||
|  |                      file := UpgradeFile}}, | ||||||
|  |        UpgradeFileHandler), | ||||||
|  | 
 | ||||||
|  |     ?assertNot(ping_log(rmq_1_file_1, info)), | ||||||
|  |     ?assertNot(ping_log(rmq_1_file_1, info, | ||||||
|  |                         #{domain => ?RMQLOG_DOMAIN_GLOBAL})), | ||||||
|  |     ?assertNot(ping_log(rmq_1_file_1, info, | ||||||
|  |                         #{domain => ?RMQLOG_DOMAIN_PRELAUNCH})), | ||||||
|  |     ?assertNot(ping_log(rmq_1_file_1, GlobalLevel, | ||||||
|  |                         #{domain => ?RMQLOG_DOMAIN_PRELAUNCH})), | ||||||
|  |     ?assertNot(ping_log(rmq_1_file_1, info, | ||||||
|  |                         #{domain => ['3rd_party']})), | ||||||
|  |     ?assertNot(ping_log(rmq_1_file_1, info, | ||||||
|  |                         #{domain => ?RMQLOG_DOMAIN_UPGRADE})), | ||||||
|  | 
 | ||||||
|  |     ?assert(ping_log(rmq_1_file_1, GlobalLevel)), | ||||||
|  |     ?assert(ping_log(rmq_1_file_1, GlobalLevel, | ||||||
|  |                      #{domain => ?RMQLOG_DOMAIN_GLOBAL})), | ||||||
|  |     ?assert(ping_log(rmq_1_file_1, PrelaunchLevel, | ||||||
|  |                      #{domain => ?RMQLOG_DOMAIN_PRELAUNCH})), | ||||||
|  |     ?assert(ping_log(rmq_1_file_1, GlobalLevel, | ||||||
|  |                      #{domain => ['3rd_party']})), | ||||||
|  |     ?assertNot(ping_log(rmq_1_file_1, GlobalLevel, | ||||||
|  |                         #{domain => ?RMQLOG_DOMAIN_UPGRADE})), | ||||||
|  | 
 | ||||||
|  |     ?assert(ping_log(rmq_1_file_2, GlobalLevel, | ||||||
|  |                      #{domain => ?RMQLOG_DOMAIN_UPGRADE})), | ||||||
|  |     ?assertNot(ping_log(rmq_1_file_2, GlobalLevel, | ||||||
|  |                         #{domain => ?RMQLOG_DOMAIN_GLOBAL})), | ||||||
|  |     ok. | ||||||
|  | 
 | ||||||
|  | format_messages_as_json_works(Config) -> | ||||||
|  |     #{var_origins := Origins0} = Context0 = default_context(Config), | ||||||
|  |     Context = Context0#{log_levels => #{json => true}, | ||||||
|  |                         var_origins => Origins0#{log_levels => environment}}, | ||||||
|  |     rabbit_prelaunch_logging:clear_config_run_number(), | ||||||
|  |     rabbit_prelaunch_logging:setup(Context), | ||||||
|  | 
 | ||||||
|  |     Handlers = logger:get_handler_config(), | ||||||
|  | 
 | ||||||
|  |     MainFileHandler = get_handler_by_id(Handlers, rmq_1_file_1), | ||||||
|  |     MainFile = main_log_file_in_context(Context), | ||||||
|  |     ?assertNotEqual(undefined, MainFileHandler), | ||||||
|  |     ?assertMatch( | ||||||
|  |        #{level := info, | ||||||
|  |          module := rabbit_logger_std_h, | ||||||
|  |          filter_default := log, | ||||||
|  |          filters := [{progress_reports, {_, stop}}, | ||||||
|  |                      {rmqlog_filter, {_, #{global := info, | ||||||
|  |                                            upgrade := none}}}], | ||||||
|  |          formatter := {rabbit_logger_json_fmt, _}, | ||||||
|  |          config := #{type := file, | ||||||
|  |                      file := MainFile}}, | ||||||
|  |        MainFileHandler), | ||||||
|  | 
 | ||||||
|  |     ?assertNot(ping_log(rmq_1_file_1, info)), | ||||||
|  | 
 | ||||||
|  |     RandomMsg = get_random_string( | ||||||
|  |                   32, | ||||||
|  |                   "abcdefghijklmnopqrstuvwxyz" | ||||||
|  |                   "ABCDEFGHIJKLMNOPQRSTUVWXYZ"), | ||||||
|  |     Metadata = #{atom => rabbit, | ||||||
|  |                  integer => 1, | ||||||
|  |                  float => 1.42, | ||||||
|  |                  string => "string", | ||||||
|  |                  list => ["s", a, 3], | ||||||
|  |                  map => #{key => "value"}, | ||||||
|  |                  function => fun get_random_string/2, | ||||||
|  |                  pid => self(), | ||||||
|  |                  port => hd(erlang:ports()), | ||||||
|  |                  ref => erlang:make_ref()}, | ||||||
|  |     ?LOG_INFO(RandomMsg, Metadata), | ||||||
|  | 
 | ||||||
|  |     rabbit_logger_std_h:filesync(rmq_1_file_1), | ||||||
|  |     {ok, Content} = file:read_file(MainFile), | ||||||
|  |     ReOpts = [{capture, first, binary}, multiline], | ||||||
|  |     {match, [Line]} = re:run( | ||||||
|  |                         Content, | ||||||
|  |                         "^.+\"" ++ RandomMsg ++ "\".+$", | ||||||
|  |                         ReOpts), | ||||||
|  |     Term = jsx:decode(Line, [return_maps, {labels, attempt_atom}]), | ||||||
|  | 
 | ||||||
|  |     RandomMsgBin = list_to_binary(RandomMsg), | ||||||
|  |     ?assertMatch(#{time := _}, Term), | ||||||
|  |     ?assertMatch(#{level := <<"info">>}, Term), | ||||||
|  |     ?assertMatch(#{msg := RandomMsgBin}, Term), | ||||||
|  | 
 | ||||||
|  |     Meta = maps:get(meta, Term), | ||||||
|  |     FunBin = list_to_binary(erlang:fun_to_list(maps:get(function, Metadata))), | ||||||
|  |     PidBin = list_to_binary(erlang:pid_to_list(maps:get(pid, Metadata))), | ||||||
|  |     PortBin = list_to_binary(erlang:port_to_list(maps:get(port, Metadata))), | ||||||
|  |     RefBin = list_to_binary(erlang:ref_to_list(maps:get(ref, Metadata))), | ||||||
|  |     ?assertMatch(#{atom := <<"rabbit">>}, Meta), | ||||||
|  |     ?assertMatch(#{integer := 1}, Meta), | ||||||
|  |     ?assertMatch(#{float := 1.42}, Meta), | ||||||
|  |     ?assertMatch(#{string := <<"string">>}, Meta), | ||||||
|  |     ?assertMatch(#{list := [<<"s">>, <<"a">>, 3]}, Meta), | ||||||
|  |     ?assertMatch(#{map := #{key := <<"value">>}}, Meta), | ||||||
|  |     ?assertMatch(#{function := FunBin}, Meta), | ||||||
|  |     ?assertMatch(#{pid := PidBin}, Meta), | ||||||
|  |     ?assertMatch(#{port := PortBin}, Meta), | ||||||
|  |     ?assertMatch(#{ref := RefBin}, Meta). | ||||||
|  | 
 | ||||||
|  | %% ------------------------------------------------------------------- | ||||||
|  | %% Internal functions. | ||||||
|  | %% ------------------------------------------------------------------- | ||||||
|  | 
 | ||||||
|  | default_context(Config) -> | ||||||
|  |     LogBaseDir = ?config(log_base_dir, Config), | ||||||
|  |     MainFile = "rabbit.log", | ||||||
|  |     UpgradeFile = "rabbit_upgrade.log", | ||||||
|  |     #{log_base_dir => LogBaseDir, | ||||||
|  |       main_log_file => MainFile, | ||||||
|  |       upgrade_log_file => UpgradeFile, | ||||||
|  |       log_levels => undefined, | ||||||
|  |       var_origins => #{log_base_dir => default, | ||||||
|  |                        main_log_file => default, | ||||||
|  |                        upgrade_log_file => default, | ||||||
|  |                        log_levels => default}}. | ||||||
|  | 
 | ||||||
|  | main_log_file_in_context(#{log_base_dir := LogBaseDir, | ||||||
|  |                            main_log_file := MainLogFile}) -> | ||||||
|  |     filename:join(LogBaseDir, MainLogFile). | ||||||
|  | 
 | ||||||
|  | upgrade_log_file_in_context(#{log_base_dir := LogBaseDir, | ||||||
|  |                               upgrade_log_file := UpgradeLogFile}) -> | ||||||
|  |     filename:join(LogBaseDir, UpgradeLogFile). | ||||||
|  | 
 | ||||||
|  | get_handler_by_id([#{id := Id} = Handler | _], Id) -> | ||||||
|  |     Handler; | ||||||
|  | get_handler_by_id([_ | Rest], Id) -> | ||||||
|  |     get_handler_by_id(Rest, Id); | ||||||
|  | get_handler_by_id([], _) -> | ||||||
|  |     undefined. | ||||||
|  | 
 | ||||||
|  | ping_log(Id, Level) -> | ||||||
|  |     ping_log(Id, Level, #{}, []). | ||||||
|  | 
 | ||||||
|  | ping_log(Id, Level, Metadata) when is_map(Metadata) -> | ||||||
|  |     ping_log(Id, Level, Metadata, []); | ||||||
|  | ping_log(Id, Level, Config) when is_list(Config) -> | ||||||
|  |     ping_log(Id, Level, #{}, Config). | ||||||
|  | 
 | ||||||
|  | ping_log(Id, Level, Metadata, Config) -> | ||||||
|  |     RandomMsg = get_random_string( | ||||||
|  |                   32, | ||||||
|  |                   "abcdefghijklmnopqrstuvwxyz" | ||||||
|  |                   "ABCDEFGHIJKLMNOPQRSTUVWXYZ"), | ||||||
|  |     ct:log("Logging \"~ts\" at level ~ts (~p)", [RandomMsg, Level, Metadata]), | ||||||
|  |     case need_rpc(Config) of | ||||||
|  |         false -> logger:log(Level, RandomMsg, Metadata); | ||||||
|  |         true  -> rabbit_ct_broker_helpers:rpc( | ||||||
|  |                    Config, 0, | ||||||
|  |                    logger, log, [Level, RandomMsg, Metadata]) | ||||||
|  |     end, | ||||||
|  |     check_log(Id, RandomMsg, Config). | ||||||
|  | 
 | ||||||
|  | need_rpc(Config) -> | ||||||
|  |     rabbit_ct_helpers:get_config( | ||||||
|  |       Config, rmq_nodes_count) =/= undefined. | ||||||
|  | 
 | ||||||
|  | check_log(Id, RandomMsg, Config) -> | ||||||
|  |     {ok, Handler} = case need_rpc(Config) of | ||||||
|  |                         false -> logger:get_handler_config(Id); | ||||||
|  |                         true  -> rabbit_ct_broker_helpers:rpc( | ||||||
|  |                                    Config, 0, | ||||||
|  |                                    logger, get_handler_config, [Id]) | ||||||
|  |                     end, | ||||||
|  |     check_log1(Handler, RandomMsg, Config). | ||||||
|  | 
 | ||||||
|  | check_log1(#{id := Id, | ||||||
|  |              module := rabbit_logger_std_h, | ||||||
|  |              config := #{type := file, | ||||||
|  |                          file := Filename}}, | ||||||
|  |            RandomMsg, | ||||||
|  |            Config) -> | ||||||
|  |     ok = case need_rpc(Config) of | ||||||
|  |              false -> rabbit_logger_std_h:filesync(Id); | ||||||
|  |              true  -> rabbit_ct_broker_helpers:rpc( | ||||||
|  |                         Config, 0, | ||||||
|  |                         rabbit_logger_std_h, filesync, [Id]) | ||||||
|  |          end, | ||||||
|  |     {ok, Content} = file:read_file(Filename), | ||||||
|  |     ReOpts = [{capture, none}, multiline], | ||||||
|  |     match =:= re:run(Content, RandomMsg ++ "$", ReOpts); | ||||||
|  | check_log1(#{module := Mod, | ||||||
|  |              config := #{type := Stddev}}, | ||||||
|  |            RandomMsg, | ||||||
|  |            Config) | ||||||
|  |   when ?IS_STD_H_COMPAT(Mod) andalso ?IS_STDDEV(Stddev) -> | ||||||
|  |     Filename = html_report_filename(Config), | ||||||
|  |     ReOpts = [{capture, none}, multiline], | ||||||
|  |     lists:any( | ||||||
|  |       fun(_) -> | ||||||
|  |               {ok, Content} = file:read_file(Filename), | ||||||
|  |               case re:run(Content, RandomMsg ++ "$", ReOpts) of | ||||||
|  |                   match -> true; | ||||||
|  |                   _     -> timer:sleep(500), | ||||||
|  |                            false | ||||||
|  |               end | ||||||
|  |       end, lists:seq(1, 10)); | ||||||
|  | check_log1(#{module := rabbit_logger_exchange_h}, | ||||||
|  |            RandomMsg, | ||||||
|  |            Config) -> | ||||||
|  |     {Chan, QName} = ?config(test_channel_and_queue, Config), | ||||||
|  |     ReOpts = [{capture, none}, multiline], | ||||||
|  |     lists:any( | ||||||
|  |       fun(_) -> | ||||||
|  |               Ret = amqp_channel:call( | ||||||
|  |                       Chan, #'basic.get'{queue = QName, no_ack = false}), | ||||||
|  |               case Ret of | ||||||
|  |                   {#'basic.get_ok'{}, #amqp_msg{payload = Content}} -> | ||||||
|  |                       case re:run(Content, RandomMsg ++ "$", ReOpts) of | ||||||
|  |                           match -> true; | ||||||
|  |                           _     -> timer:sleep(500), | ||||||
|  |                                    false | ||||||
|  |                       end; | ||||||
|  |                   #'basic.get_empty'{} -> | ||||||
|  |                       timer:sleep(500), | ||||||
|  |                       false; | ||||||
|  |                   Other -> | ||||||
|  |                       io:format(standard_error, "OTHER -> ~p~n", [Other]), | ||||||
|  |                       timer:sleep(500), | ||||||
|  |                       false | ||||||
|  |               end | ||||||
|  |       end, lists:seq(1, 10)). | ||||||
|  | 
 | ||||||
|  | get_random_string(Length, AllowedChars) -> | ||||||
|  |     lists:foldl(fun(_, Acc) -> | ||||||
|  |                         [lists:nth(rand:uniform(length(AllowedChars)), | ||||||
|  |                                    AllowedChars)] | ||||||
|  |                         ++ Acc | ||||||
|  |                 end, [], lists:seq(1, Length)). | ||||||
|  | 
 | ||||||
|  | html_report_filename(Config) -> | ||||||
|  |     ?config(tc_logfile, Config). | ||||||
|  | @ -1,838 +0,0 @@ | ||||||
| %% This Source Code Form is subject to the terms of the Mozilla Public |  | ||||||
| %% License, v. 2.0. If a copy of the MPL was not distributed with this |  | ||||||
| %% file, You can obtain one at https://mozilla.org/MPL/2.0/. |  | ||||||
| %% |  | ||||||
| %% Copyright (c) 2016-2021 VMware, Inc. or its affiliates.  All rights reserved. |  | ||||||
| %% |  | ||||||
| 
 |  | ||||||
| -module(unit_log_config_SUITE). |  | ||||||
| 
 |  | ||||||
| -include_lib("common_test/include/ct.hrl"). |  | ||||||
| -include_lib("eunit/include/eunit.hrl"). |  | ||||||
| 
 |  | ||||||
| -compile(export_all). |  | ||||||
| 
 |  | ||||||
| all() -> |  | ||||||
|     [ |  | ||||||
|     default, |  | ||||||
|     env_var_tty, |  | ||||||
|     config_file_handler, |  | ||||||
|     config_file_handler_level, |  | ||||||
|     config_file_handler_rotation, |  | ||||||
|     config_console_handler, |  | ||||||
|     config_exchange_handler, |  | ||||||
|     config_syslog_handler, |  | ||||||
|     config_syslog_handler_options, |  | ||||||
|     config_multiple_handlers, |  | ||||||
| 
 |  | ||||||
|     env_var_overrides_config, |  | ||||||
|     env_var_disable_log, |  | ||||||
| 
 |  | ||||||
|     config_sinks_level, |  | ||||||
|     config_sink_file, |  | ||||||
|     config_sink_file_override_config_handler_file, |  | ||||||
| 
 |  | ||||||
|     config_handlers_merged_with_lager_handlers, |  | ||||||
|     sink_handlers_merged_with_lager_extra_sinks_handlers, |  | ||||||
|     sink_file_rewrites_file_backends |  | ||||||
|     ]. |  | ||||||
| 
 |  | ||||||
| init_per_testcase(_, Config) -> |  | ||||||
|     application:load(rabbit), |  | ||||||
|     application:load(lager), |  | ||||||
|     application:unset_env(rabbit, log), |  | ||||||
|     application:unset_env(rabbit, lager_log_root), |  | ||||||
|     application:unset_env(rabbit, lager_default_file), |  | ||||||
|     application:unset_env(rabbit, lager_upgrade_file), |  | ||||||
|     application:unset_env(lager, handlers), |  | ||||||
|     application:unset_env(lager, rabbit_handlers), |  | ||||||
|     application:unset_env(lager, extra_sinks), |  | ||||||
|     unset_logs_var_origin(), |  | ||||||
|     Config. |  | ||||||
| 
 |  | ||||||
| end_per_testcase(_, Config) -> |  | ||||||
|     application:unset_env(rabbit, log), |  | ||||||
|     application:unset_env(rabbit, lager_log_root), |  | ||||||
|     application:unset_env(rabbit, lager_default_file), |  | ||||||
|     application:unset_env(rabbit, lager_upgrade_file), |  | ||||||
|     application:unset_env(lager, handlers), |  | ||||||
|     application:unset_env(lager, rabbit_handlers), |  | ||||||
|     application:unset_env(lager, extra_sinks), |  | ||||||
|     unset_logs_var_origin(), |  | ||||||
|     application:unload(rabbit), |  | ||||||
|     application:unload(lager), |  | ||||||
|     Config. |  | ||||||
| 
 |  | ||||||
| sink_file_rewrites_file_backends(_) -> |  | ||||||
|     application:set_env(rabbit, log, [ |  | ||||||
|         %% Disable rabbit file handler |  | ||||||
|         {file, [{file, false}]}, |  | ||||||
|         {categories, [{federation, [{file, "federation.log"}, {level, warning}]}]} |  | ||||||
|     ]), |  | ||||||
| 
 |  | ||||||
|     LagerHandlers = [ |  | ||||||
|         {lager_file_backend, [{file, "lager_file.log"}, {level, error}]}, |  | ||||||
|         {lager_file_backend, [{file, "lager_file_1.log"}, {level, error}]}, |  | ||||||
|         {lager_console_backend, [{level, info}]}, |  | ||||||
|         {lager_exchange_backend, [{level, info}]} |  | ||||||
|     ], |  | ||||||
|     application:set_env(lager, handlers, LagerHandlers), |  | ||||||
|     rabbit_lager:configure_lager(), |  | ||||||
| 
 |  | ||||||
|     ExpectedSinks = sort_sinks(sink_rewrite_sinks()), |  | ||||||
|     ?assertEqual(ExpectedSinks, sort_sinks(application:get_env(lager, extra_sinks, undefined))). |  | ||||||
| 
 |  | ||||||
| sink_rewrite_sinks() -> |  | ||||||
|     [{error_logger_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_channel_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_connection_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_feature_flags_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_federation_lager_event, |  | ||||||
|         [{handlers,[ |  | ||||||
|             {lager_file_backend, |  | ||||||
|                     [{date, ""}, |  | ||||||
|                      {file, "federation.log"}, |  | ||||||
|                      {formatter_config, formatter_config(file)}, |  | ||||||
|                      {level, warning}, |  | ||||||
|                      {size, 0}]}, |  | ||||||
|             {lager_console_backend, [{level, warning}]}, |  | ||||||
|             {lager_exchange_backend, [{level, warning}]} |  | ||||||
|         ]}, |  | ||||||
|          {rabbit_handlers,[ |  | ||||||
|             {lager_file_backend, |  | ||||||
|                     [{date, ""}, |  | ||||||
|                      {file, "federation.log"}, |  | ||||||
|                      {formatter_config, formatter_config(file)}, |  | ||||||
|                      {level, warning}, |  | ||||||
|                      {size, 0}]}, |  | ||||||
|             {lager_console_backend, [{level, warning}]}, |  | ||||||
|             {lager_exchange_backend, [{level, warning}]} |  | ||||||
|         ]}]}, |  | ||||||
|      {rabbit_log_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_ldap_lager_event, |  | ||||||
|                [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|                 {rabbit_handlers, |  | ||||||
|                  [{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_mirroring_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_osiris_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers, |  | ||||||
|         [{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_prelaunch_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_queue_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_ra_lager_event, |  | ||||||
|       [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|        {rabbit_handlers, |  | ||||||
|         [{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_shovel_lager_event, |  | ||||||
|       [{handlers, [{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|        {rabbit_handlers, |  | ||||||
|         [{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_upgrade_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]} |  | ||||||
|      ]. |  | ||||||
| 
 |  | ||||||
| sink_handlers_merged_with_lager_extra_sinks_handlers(_) -> |  | ||||||
|     DefaultLevel = debug, |  | ||||||
|     application:set_env(rabbit, log, [ |  | ||||||
|         {file,     [{file, "rabbit_file.log"}, {level, DefaultLevel}]}, |  | ||||||
|         {console,  [{enabled, true}, {level, error}]}, |  | ||||||
|         {exchange, [{enabled, true}, {level, error}]}, |  | ||||||
|         {categories, [ |  | ||||||
|             {connection, [{level, debug}]}, |  | ||||||
|             {channel, [{level, warning}, {file, "channel_log.log"}]} |  | ||||||
|         ]} |  | ||||||
|     ]), |  | ||||||
| 
 |  | ||||||
|     LagerSinks = [ |  | ||||||
|         {rabbit_log_connection_lager_event, |  | ||||||
|             [{handlers, |  | ||||||
|                 [{lager_file_backend, |  | ||||||
|                     [{file, "connection_lager.log"}, |  | ||||||
|                      {level, info}]}]}]}, |  | ||||||
|         {rabbit_log_channel_lager_event, |  | ||||||
|             [{handlers, |  | ||||||
|                 [{lager_console_backend, [{level, debug}]}, |  | ||||||
|                  {lager_exchange_backend, [{level, debug}]}, |  | ||||||
|                  {lager_file_backend, [{level, error}, |  | ||||||
|                                        {file, "channel_lager.log"}]}]}]}], |  | ||||||
| 
 |  | ||||||
|     application:set_env(lager, extra_sinks, LagerSinks), |  | ||||||
|     rabbit_lager:configure_lager(), |  | ||||||
| 
 |  | ||||||
|     ExpectedSinks = sort_sinks([ |  | ||||||
|         {error_logger_lager_event, |  | ||||||
|             [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}, |  | ||||||
|              {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]}, |  | ||||||
|          {rabbit_log_channel_lager_event, |  | ||||||
|             [{handlers,[ |  | ||||||
|                 {lager_console_backend, [{level, error}, |  | ||||||
|                                          {formatter_config, formatter_config(console)}]}, |  | ||||||
|                 {lager_exchange_backend, [{level, error}, |  | ||||||
|                                         {formatter_config, formatter_config(exchange)}]}, |  | ||||||
|                 {lager_file_backend, |  | ||||||
|                     [{date, ""}, |  | ||||||
|                      {file, "channel_log.log"}, |  | ||||||
|                      {formatter_config, formatter_config(file)}, |  | ||||||
|                      {level, warning}, |  | ||||||
|                      {size, 0}]}, |  | ||||||
|                 {lager_console_backend, [{level, debug}]}, |  | ||||||
|                 {lager_exchange_backend, [{level, debug}]}, |  | ||||||
|                 {lager_file_backend, [{level, error}, |  | ||||||
|                                       {file, "channel_lager.log"}]} |  | ||||||
|                 ]}, |  | ||||||
|              {rabbit_handlers,[ |  | ||||||
|                 {lager_console_backend, [{level, error}, |  | ||||||
|                                          {formatter_config, formatter_config(console)}]}, |  | ||||||
|                 {lager_exchange_backend, [{level, error}, |  | ||||||
|                                         {formatter_config, formatter_config(exchange)}]}, |  | ||||||
|                 {lager_file_backend, |  | ||||||
|                     [{date, ""}, |  | ||||||
|                      {file, "channel_log.log"}, |  | ||||||
|                      {formatter_config, formatter_config(file)}, |  | ||||||
|                      {level, warning}, |  | ||||||
|                      {size, 0}]}]} |  | ||||||
|              ]}, |  | ||||||
|          {rabbit_log_connection_lager_event, |  | ||||||
|             [{handlers,[{lager_forwarder_backend,[lager_event,debug]}, |  | ||||||
|                         {lager_file_backend, [{file, "connection_lager.log"}, {level, info}]}]}, |  | ||||||
|              {rabbit_handlers,[{lager_forwarder_backend,[lager_event,debug]}]}]}, |  | ||||||
|          {rabbit_log_feature_flags_lager_event, |  | ||||||
|             [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}, |  | ||||||
|              {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]}, |  | ||||||
|          {rabbit_log_federation_lager_event, |  | ||||||
|             [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}, |  | ||||||
|              {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]}, |  | ||||||
|          {rabbit_log_lager_event, |  | ||||||
|             [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}, |  | ||||||
|              {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]}, |  | ||||||
|          {rabbit_log_ldap_lager_event, |  | ||||||
|                    [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}, |  | ||||||
|                     {rabbit_handlers, |  | ||||||
|                      [{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]}, |  | ||||||
|          {rabbit_log_mirroring_lager_event, |  | ||||||
|             [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}, |  | ||||||
|              {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]}, |  | ||||||
|          {rabbit_log_osiris_lager_event, |  | ||||||
|             [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}, |  | ||||||
|              {rabbit_handlers, |  | ||||||
|             [{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]}, |  | ||||||
|          {rabbit_log_prelaunch_lager_event, |  | ||||||
|             [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}, |  | ||||||
|              {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]}, |  | ||||||
|          {rabbit_log_queue_lager_event, |  | ||||||
|             [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}, |  | ||||||
|              {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]}, |  | ||||||
|          {rabbit_log_ra_lager_event, |  | ||||||
|             [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}, |  | ||||||
|              {rabbit_handlers, |  | ||||||
|             [{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]}, |  | ||||||
|          {rabbit_log_shovel_lager_event, |  | ||||||
|             [{handlers, [{lager_forwarder_backend,[lager_event,DefaultLevel]}]}, |  | ||||||
|              {rabbit_handlers, |  | ||||||
|               [{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]}, |  | ||||||
|          {rabbit_log_upgrade_lager_event, |  | ||||||
|             [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}, |  | ||||||
|              {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]}]), |  | ||||||
| 
 |  | ||||||
|     ?assertEqual(ExpectedSinks, sort_sinks(application:get_env(lager, extra_sinks, undefined))). |  | ||||||
| 
 |  | ||||||
| config_handlers_merged_with_lager_handlers(_) -> |  | ||||||
|     application:set_env(rabbit, log, [ |  | ||||||
|         {file,    [{file, "rabbit_file.log"}, {level, debug}]}, |  | ||||||
|         {console, [{enabled, true}, {level, error}]}, |  | ||||||
|         {exchange,  [{enabled, true}, {level, error}]}, |  | ||||||
|         {syslog,  [{enabled, true}]} |  | ||||||
|     ]), |  | ||||||
| 
 |  | ||||||
|     LagerHandlers = [ |  | ||||||
|         {lager_file_backend, [{file, "lager_file.log"}, {level, info}]}, |  | ||||||
|         {lager_console_backend, [{level, info}]}, |  | ||||||
|         {lager_exchange_backend, [{level, info}]}, |  | ||||||
|         {lager_exchange_backend, [{level, info}]} |  | ||||||
|     ], |  | ||||||
|     application:set_env(lager, handlers, LagerHandlers), |  | ||||||
|     rabbit_lager:configure_lager(), |  | ||||||
| 
 |  | ||||||
|     FileHandlers = default_expected_handlers("rabbit_file.log", debug), |  | ||||||
|     ConsoleHandlers = expected_console_handler(error), |  | ||||||
|     RabbitHandlers = expected_rabbit_handler(error), |  | ||||||
|     SyslogHandlers = expected_syslog_handler(), |  | ||||||
| 
 |  | ||||||
|     ExpectedRabbitHandlers = sort_handlers(FileHandlers ++ ConsoleHandlers ++ RabbitHandlers ++ SyslogHandlers), |  | ||||||
|     ExpectedHandlers = sort_handlers(ExpectedRabbitHandlers ++ LagerHandlers), |  | ||||||
| 
 |  | ||||||
|     ?assertEqual(ExpectedRabbitHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))), |  | ||||||
|     ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))). |  | ||||||
| 
 |  | ||||||
| config_sinks_level(_) -> |  | ||||||
|     DefaultLogFile = "rabbit_default.log", |  | ||||||
|     application:set_env(rabbit, lager_default_file, DefaultLogFile), |  | ||||||
| 
 |  | ||||||
|     application:set_env(rabbit, log, [ |  | ||||||
|         {categories, [ |  | ||||||
|             {connection, [{level, warning}]}, |  | ||||||
|             {channel, [{level, debug}]}, |  | ||||||
|             {mirroring, [{level, error}]} |  | ||||||
|         ]} |  | ||||||
|     ]), |  | ||||||
| 
 |  | ||||||
|     rabbit_lager:configure_lager(), |  | ||||||
| 
 |  | ||||||
|     ExpectedSinks = sort_sinks(level_sinks()), |  | ||||||
|     ?assertEqual(ExpectedSinks, sort_sinks(application:get_env(lager, extra_sinks, undefined))). |  | ||||||
| 
 |  | ||||||
| level_sinks() -> |  | ||||||
|     [{error_logger_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_channel_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,debug]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,debug]}]}]}, |  | ||||||
|      {rabbit_log_connection_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,warning]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,warning]}]}]}, |  | ||||||
|      {rabbit_log_feature_flags_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_federation_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_ldap_lager_event, |  | ||||||
|                [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|                 {rabbit_handlers, |  | ||||||
|                  [{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_mirroring_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,error]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,error]}]}]}, |  | ||||||
|      {rabbit_log_osiris_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers, |  | ||||||
|         [{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_prelaunch_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_queue_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_ra_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers, |  | ||||||
|         [{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_shovel_lager_event, |  | ||||||
|         [{handlers, [{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers, |  | ||||||
|              [{lager_forwarder_backend, |  | ||||||
|                   [lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_upgrade_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]} |  | ||||||
|      ]. |  | ||||||
| 
 |  | ||||||
| config_sink_file(_) -> |  | ||||||
|     DefaultLogFile = "rabbit_default.log", |  | ||||||
|     application:set_env(rabbit, lager_default_file, DefaultLogFile), |  | ||||||
| 
 |  | ||||||
|     DefaultLevel = error, |  | ||||||
|     application:set_env(rabbit, log, [ |  | ||||||
|         {console, [{enabled, true}]}, |  | ||||||
|         {exchange, [{enabled, true}]}, |  | ||||||
|         {file, [{level, DefaultLevel}]}, |  | ||||||
|         {categories, [ |  | ||||||
|             {connection, [{file, "connection.log"}, {level, warning}]} |  | ||||||
|         ]} |  | ||||||
|     ]), |  | ||||||
| 
 |  | ||||||
|     rabbit_lager:configure_lager(), |  | ||||||
| 
 |  | ||||||
|     ExpectedSinks = sort_sinks(file_sinks(DefaultLevel)), |  | ||||||
|     ?assertEqual(ExpectedSinks, sort_sinks(application:get_env(lager, extra_sinks, undefined))). |  | ||||||
| 
 |  | ||||||
| config_sink_file_override_config_handler_file(_) -> |  | ||||||
|     DefaultLogFile = "rabbit_default.log", |  | ||||||
|     application:set_env(rabbit, lager_default_file, DefaultLogFile), |  | ||||||
| 
 |  | ||||||
|     NonDefaultLogFile = "rabbit_not_default.log", |  | ||||||
| 
 |  | ||||||
|     DefaultLevel = error, |  | ||||||
|     application:set_env(rabbit, log, [ |  | ||||||
|         {file, [{file, NonDefaultLogFile}, {level, DefaultLevel}]}, |  | ||||||
|         {console, [{enabled, true}]}, |  | ||||||
|         {exchange, [{enabled, true}]}, |  | ||||||
|         {categories, [ |  | ||||||
|             {connection, [{file, "connection.log"}, {level, warning}]} |  | ||||||
|         ]} |  | ||||||
|     ]), |  | ||||||
| 
 |  | ||||||
|     rabbit_lager:configure_lager(), |  | ||||||
| 
 |  | ||||||
|     ExpectedSinks = sort_sinks(file_sinks(DefaultLevel)), |  | ||||||
|     ?assertEqual(ExpectedSinks, sort_sinks(application:get_env(lager, extra_sinks, undefined))). |  | ||||||
| 
 |  | ||||||
| file_sinks() -> |  | ||||||
|     file_sinks(info). |  | ||||||
| 
 |  | ||||||
| file_sinks(DefaultLevel) -> |  | ||||||
|     [{error_logger_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]}, |  | ||||||
|      {rabbit_log_channel_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]}, |  | ||||||
|      {rabbit_log_connection_lager_event, |  | ||||||
|         [{handlers,[ |  | ||||||
|             {lager_console_backend, [{level, warning}, |  | ||||||
|                                      {formatter_config, formatter_config(console)}]}, |  | ||||||
|             {lager_exchange_backend, [{level, warning}, |  | ||||||
|                                     {formatter_config, formatter_config(exchange)}]}, |  | ||||||
|             {lager_file_backend, |  | ||||||
|                 [{date, ""}, |  | ||||||
|                  {file, "connection.log"}, |  | ||||||
|                  {formatter_config, formatter_config(file)}, |  | ||||||
|                  {level, error}, |  | ||||||
|                  {size, 0}]}]}, |  | ||||||
|          {rabbit_handlers,[ |  | ||||||
|             {lager_console_backend, [{level, warning}, |  | ||||||
|                                      {formatter_config, formatter_config(console)}]}, |  | ||||||
|             {lager_exchange_backend, [{level, warning}, |  | ||||||
|                                     {formatter_config, formatter_config(exchange)}]}, |  | ||||||
|             {lager_file_backend, |  | ||||||
|                 [{date, ""}, |  | ||||||
|                  {file, "connection.log"}, |  | ||||||
|                  {formatter_config, formatter_config(backend)}, |  | ||||||
|                  {level, error}, |  | ||||||
|                  {size, 0}]}]} |  | ||||||
|          ]}, |  | ||||||
|      {rabbit_log_feature_flags_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]}, |  | ||||||
|      {rabbit_log_federation_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]}, |  | ||||||
|      {rabbit_log_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]}, |  | ||||||
|      {rabbit_log_ldap_lager_event, |  | ||||||
|                [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}, |  | ||||||
|                 {rabbit_handlers, |  | ||||||
|                  [{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]}, |  | ||||||
|      {rabbit_log_mirroring_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]}, |  | ||||||
|      {rabbit_log_osiris_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}, |  | ||||||
|          {rabbit_handlers, |  | ||||||
|         [{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]}, |  | ||||||
|      {rabbit_log_prelaunch_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]}, |  | ||||||
|      {rabbit_log_queue_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]}, |  | ||||||
|      {rabbit_log_ra_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}, |  | ||||||
|          {rabbit_handlers, |  | ||||||
|         [{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]}, |  | ||||||
|      {rabbit_log_shovel_lager_event, |  | ||||||
|         [{handlers, [{lager_forwarder_backend,[lager_event,DefaultLevel]}]}, |  | ||||||
|          {rabbit_handlers, |  | ||||||
|           [{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]}, |  | ||||||
|      {rabbit_log_upgrade_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]} |  | ||||||
|      ]. |  | ||||||
| 
 |  | ||||||
| config_multiple_handlers(_) -> |  | ||||||
|     DefaultLogFile = "rabbit_default.log", |  | ||||||
|     application:set_env(rabbit, lager_default_file, DefaultLogFile), |  | ||||||
| 
 |  | ||||||
|     application:set_env(rabbit, log, [ |  | ||||||
|         %% Disable file output |  | ||||||
|         {file, [{file, false}]}, |  | ||||||
|         %% Enable console output |  | ||||||
|         {console, [{enabled, true}]}, |  | ||||||
|         %% Enable exchange output |  | ||||||
|         {exchange, [{enabled, true}]}, |  | ||||||
|         %% Enable a syslog output |  | ||||||
|         {syslog, [{enabled, true}, {level, error}]}]), |  | ||||||
| 
 |  | ||||||
|     rabbit_lager:configure_lager(), |  | ||||||
| 
 |  | ||||||
|     ConsoleHandlers = expected_console_handler(), |  | ||||||
|     RabbitHandlers = expected_rabbit_handler(), |  | ||||||
|     SyslogHandlers = expected_syslog_handler(error), |  | ||||||
| 
 |  | ||||||
|     ExpectedHandlers = sort_handlers(SyslogHandlers ++ ConsoleHandlers ++ RabbitHandlers), |  | ||||||
| 
 |  | ||||||
|     ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))), |  | ||||||
|     ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))). |  | ||||||
| 
 |  | ||||||
| config_console_handler(_) -> |  | ||||||
|     DefaultLogFile = "rabbit_default.log", |  | ||||||
|     application:set_env(rabbit, lager_default_file, DefaultLogFile), |  | ||||||
|     application:set_env(rabbit, log, [{console, [{enabled, true}]}]), |  | ||||||
| 
 |  | ||||||
|     rabbit_lager:configure_lager(), |  | ||||||
| 
 |  | ||||||
|     FileHandlers = default_expected_handlers(DefaultLogFile), |  | ||||||
|     ConsoleHandlers = expected_console_handler(), |  | ||||||
| 
 |  | ||||||
|     ExpectedHandlers = sort_handlers(FileHandlers ++ ConsoleHandlers), |  | ||||||
| 
 |  | ||||||
|     ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))), |  | ||||||
|     ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))). |  | ||||||
| 
 |  | ||||||
| config_exchange_handler(_) -> |  | ||||||
|     DefaultLogFile = "rabbit_default.log", |  | ||||||
|     application:set_env(rabbit, lager_default_file, DefaultLogFile), |  | ||||||
|     application:set_env(rabbit, log, [{exchange, [{enabled, true}]}]), |  | ||||||
| 
 |  | ||||||
|     rabbit_lager:configure_lager(), |  | ||||||
| 
 |  | ||||||
|     FileHandlers = default_expected_handlers(DefaultLogFile), |  | ||||||
|     ExchangeHandlers = expected_rabbit_handler(), |  | ||||||
| 
 |  | ||||||
|     ExpectedHandlers = sort_handlers(FileHandlers ++ ExchangeHandlers), |  | ||||||
| 
 |  | ||||||
|     ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))), |  | ||||||
|     ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))). |  | ||||||
| 
 |  | ||||||
| expected_console_handler() -> |  | ||||||
|     expected_console_handler(debug). |  | ||||||
| 
 |  | ||||||
| expected_console_handler(Level) -> |  | ||||||
|     [{lager_console_backend, [{level, Level}, |  | ||||||
|                               {formatter_config, formatter_config(console)}]}]. |  | ||||||
| 
 |  | ||||||
| expected_rabbit_handler() -> |  | ||||||
|     expected_rabbit_handler(debug). |  | ||||||
| 
 |  | ||||||
| expected_rabbit_handler(Level) -> |  | ||||||
|     [{lager_exchange_backend, [{level, Level}, |  | ||||||
|                              {formatter_config, formatter_config(exchange)}]}]. |  | ||||||
| 
 |  | ||||||
| config_syslog_handler(_) -> |  | ||||||
|     DefaultLogFile = "rabbit_default.log", |  | ||||||
|     application:set_env(rabbit, lager_default_file, DefaultLogFile), |  | ||||||
|     application:set_env(rabbit, log, [{syslog, [{enabled, true}]}]), |  | ||||||
| 
 |  | ||||||
|     rabbit_lager:configure_lager(), |  | ||||||
| 
 |  | ||||||
|     FileHandlers = default_expected_handlers(DefaultLogFile), |  | ||||||
|     SyslogHandlers = expected_syslog_handler(), |  | ||||||
| 
 |  | ||||||
|     ExpectedHandlers = sort_handlers(FileHandlers ++ SyslogHandlers), |  | ||||||
| 
 |  | ||||||
|     ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))), |  | ||||||
|     ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))). |  | ||||||
| 
 |  | ||||||
| config_syslog_handler_options(_) -> |  | ||||||
|     DefaultLogFile = "rabbit_default.log", |  | ||||||
|     application:set_env(rabbit, lager_default_file, DefaultLogFile), |  | ||||||
|     application:set_env(rabbit, log, [{syslog, [{enabled, true}, |  | ||||||
|                                                 {level, warning}]}]), |  | ||||||
| 
 |  | ||||||
|     rabbit_lager:configure_lager(), |  | ||||||
| 
 |  | ||||||
|     FileHandlers = default_expected_handlers(DefaultLogFile), |  | ||||||
|     SyslogHandlers = expected_syslog_handler(warning), |  | ||||||
| 
 |  | ||||||
|     ExpectedHandlers = sort_handlers(FileHandlers ++ SyslogHandlers), |  | ||||||
| 
 |  | ||||||
|     ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))), |  | ||||||
|     ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))). |  | ||||||
| 
 |  | ||||||
| expected_syslog_handler() -> |  | ||||||
|     expected_syslog_handler(debug). |  | ||||||
| 
 |  | ||||||
| expected_syslog_handler(Level) -> |  | ||||||
|     [{syslog_lager_backend, [Level, |  | ||||||
|                              {}, |  | ||||||
|                              {lager_default_formatter, syslog_formatter_config()}]}]. |  | ||||||
| 
 |  | ||||||
| env_var_overrides_config(_) -> |  | ||||||
|     EnvLogFile = "rabbit_default.log", |  | ||||||
|     application:set_env(rabbit, lager_default_file, EnvLogFile), |  | ||||||
| 
 |  | ||||||
|     ConfigLogFile = "rabbit_not_default.log", |  | ||||||
|     application:set_env(rabbit, log, [{file, [{file, ConfigLogFile}]}]), |  | ||||||
| 
 |  | ||||||
|     set_logs_var_origin(environment), |  | ||||||
|     rabbit_lager:configure_lager(), |  | ||||||
| 
 |  | ||||||
|     ExpectedHandlers = default_expected_handlers(EnvLogFile), |  | ||||||
|     ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))), |  | ||||||
|     ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))). |  | ||||||
| 
 |  | ||||||
| env_var_disable_log(_) -> |  | ||||||
|     application:set_env(rabbit, lager_default_file, false), |  | ||||||
| 
 |  | ||||||
|     ConfigLogFile = "rabbit_not_default.log", |  | ||||||
|     application:set_env(rabbit, log, [{file, [{file, ConfigLogFile}]}]), |  | ||||||
| 
 |  | ||||||
|     set_logs_var_origin(environment), |  | ||||||
|     rabbit_lager:configure_lager(), |  | ||||||
| 
 |  | ||||||
|     ExpectedHandlers = [], |  | ||||||
|     ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))), |  | ||||||
|     ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))). |  | ||||||
| 
 |  | ||||||
| config_file_handler(_) -> |  | ||||||
|     DefaultLogFile = "rabbit_default.log", |  | ||||||
|     application:set_env(rabbit, lager_default_file, DefaultLogFile), |  | ||||||
| 
 |  | ||||||
|     NonDefaultLogFile = "rabbit_not_default.log", |  | ||||||
|     application:set_env(rabbit, log, [{file, [{file, NonDefaultLogFile}]}]), |  | ||||||
| 
 |  | ||||||
|     rabbit_lager:configure_lager(), |  | ||||||
| 
 |  | ||||||
|     ExpectedHandlers = default_expected_handlers(NonDefaultLogFile), |  | ||||||
|     ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))), |  | ||||||
|     ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))). |  | ||||||
| 
 |  | ||||||
| config_file_handler_level(_) -> |  | ||||||
|     DefaultLogFile = "rabbit_default.log", |  | ||||||
|     application:set_env(rabbit, lager_default_file, DefaultLogFile), |  | ||||||
| 
 |  | ||||||
|     application:set_env(rabbit, log, [{file, [{level, warning}]}]), |  | ||||||
|     rabbit_lager:configure_lager(), |  | ||||||
| 
 |  | ||||||
|     ExpectedHandlers = default_expected_handlers(DefaultLogFile, warning), |  | ||||||
|     ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))), |  | ||||||
|     ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))). |  | ||||||
| 
 |  | ||||||
| config_file_handler_rotation(_) -> |  | ||||||
|     DefaultLogFile = "rabbit_default.log", |  | ||||||
|     application:set_env(rabbit, lager_default_file, DefaultLogFile), |  | ||||||
| 
 |  | ||||||
|     application:set_env(rabbit, log, [{file, [{date, "$D0"}, {size, 5000}, {count, 10}]}]), |  | ||||||
|     rabbit_lager:configure_lager(), |  | ||||||
| 
 |  | ||||||
|     ExpectedHandlers = sort_handlers(default_expected_handlers(DefaultLogFile, debug, 5000, "$D0", [{count, 10}])), |  | ||||||
|     ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))), |  | ||||||
|     ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))). |  | ||||||
| 
 |  | ||||||
| default(_) -> |  | ||||||
|     LogRoot = "/tmp/log_base", |  | ||||||
|     application:set_env(rabbit, lager_log_root, LogRoot), |  | ||||||
|     LogFile = "rabbit_default.log", |  | ||||||
|     application:set_env(rabbit, lager_default_file, LogFile), |  | ||||||
|     LogUpgradeFile = "rabbit_default_upgrade.log", |  | ||||||
|     application:set_env(rabbit, lager_upgrade_file, LogUpgradeFile), |  | ||||||
| 
 |  | ||||||
|     ?assertEqual(LogRoot, application:get_env(rabbit, lager_log_root, undefined)), |  | ||||||
|     rabbit_lager:configure_lager(), |  | ||||||
| 
 |  | ||||||
|     ExpectedHandlers = default_expected_handlers(LogFile), |  | ||||||
|     ?assertEqual(LogRoot, application:get_env(lager, log_root, undefined)), |  | ||||||
|     ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))), |  | ||||||
|     ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))), |  | ||||||
| 
 |  | ||||||
|     ExpectedSinks = default_expected_sinks(LogUpgradeFile), |  | ||||||
|     ?assertEqual(ExpectedSinks, sort_sinks(application:get_env(lager, extra_sinks, undefined))). |  | ||||||
| 
 |  | ||||||
| default_expected_handlers(File) -> |  | ||||||
|     default_expected_handlers(File, debug, 0, ""). |  | ||||||
| default_expected_handlers(File, Level) -> |  | ||||||
|     default_expected_handlers(File, Level, 0, ""). |  | ||||||
| default_expected_handlers(File, Level, RotSize, RotDate) -> |  | ||||||
|     default_expected_handlers(File, Level, RotSize, RotDate, []). |  | ||||||
| default_expected_handlers(File, Level, RotSize, RotDate, Extra) -> |  | ||||||
|     [{lager_file_backend, |  | ||||||
|         [{date, RotDate}, |  | ||||||
|          {file, File}, |  | ||||||
|          {formatter_config, formatter_config(file)}, |  | ||||||
|          {level, Level}, |  | ||||||
|          {size, RotSize}] ++ Extra}]. |  | ||||||
| 
 |  | ||||||
| default_expected_sinks(UpgradeFile) -> |  | ||||||
|     [{error_logger_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_channel_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_connection_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_feature_flags_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_federation_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_ldap_lager_event, |  | ||||||
|                [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|                 {rabbit_handlers, |  | ||||||
|                  [{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_mirroring_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_osiris_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers, |  | ||||||
|         [{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_prelaunch_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_queue_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_ra_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers, |  | ||||||
|         [{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_shovel_lager_event, |  | ||||||
|         [{handlers, [{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers, |  | ||||||
|           [{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_upgrade_lager_event, |  | ||||||
|         [{handlers, |  | ||||||
|             [{lager_file_backend, |  | ||||||
|                 [{date,[]}, |  | ||||||
|                  {file, UpgradeFile}, |  | ||||||
|                  {formatter_config, formatter_config(file)}, |  | ||||||
|                  {level,info}, |  | ||||||
|                  {size,0}]}]}, |  | ||||||
|          {rabbit_handlers, |  | ||||||
|             [{lager_file_backend, |  | ||||||
|                 [{date,[]}, |  | ||||||
|                  {file, UpgradeFile}, |  | ||||||
|                  {formatter_config, formatter_config(file)}, |  | ||||||
|                  {level,info}, |  | ||||||
|                  {size,0}]}]}]}]. |  | ||||||
| 
 |  | ||||||
| env_var_tty(_) -> |  | ||||||
|     application:set_env(rabbit, lager_log_root, "/tmp/log_base"), |  | ||||||
|     application:set_env(rabbit, lager_default_file, tty), |  | ||||||
|     application:set_env(rabbit, lager_upgrade_file, tty), |  | ||||||
|     %% tty can only be set explicitly |  | ||||||
|     set_logs_var_origin(environment), |  | ||||||
| 
 |  | ||||||
|     rabbit_lager:configure_lager(), |  | ||||||
| 
 |  | ||||||
|     ExpectedHandlers = tty_expected_handlers(), |  | ||||||
|     ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))), |  | ||||||
|     ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))), |  | ||||||
| 
 |  | ||||||
|     %% Upgrade sink will be different. |  | ||||||
|     ExpectedSinks = tty_expected_sinks(), |  | ||||||
|     ?assertEqual(ExpectedSinks, sort_sinks(application:get_env(lager, extra_sinks, undefined))). |  | ||||||
| 
 |  | ||||||
| set_logs_var_origin(Origin) -> |  | ||||||
|     Context = #{var_origins => #{main_log_file => Origin}}, |  | ||||||
|     rabbit_prelaunch:store_context(Context), |  | ||||||
|     ok. |  | ||||||
| 
 |  | ||||||
| unset_logs_var_origin() -> |  | ||||||
|     rabbit_prelaunch:clear_context_cache(), |  | ||||||
|     ok. |  | ||||||
| 
 |  | ||||||
| tty_expected_handlers() -> |  | ||||||
|     [{lager_console_backend, |  | ||||||
|         [{formatter_config, formatter_config(console)}, |  | ||||||
|          {level, debug}]}]. |  | ||||||
| 
 |  | ||||||
| tty_expected_sinks() -> |  | ||||||
|     [{error_logger_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_channel_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_connection_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_feature_flags_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_federation_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_lager_event, |  | ||||||
|         [{handlers, [{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers, [{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_ldap_lager_event, |  | ||||||
|                [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|                 {rabbit_handlers, |  | ||||||
|                  [{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_mirroring_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_osiris_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers, |  | ||||||
|         [{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_prelaunch_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_queue_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_ra_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers, |  | ||||||
|         [{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_shovel_lager_event, |  | ||||||
|         [{handlers, [{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers, |  | ||||||
|           [{lager_forwarder_backend,[lager_event,info]}]}]}, |  | ||||||
|      {rabbit_log_upgrade_lager_event, |  | ||||||
|         [{handlers,[{lager_forwarder_backend,[lager_event,info]}]}, |  | ||||||
|          {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}]. |  | ||||||
| 
 |  | ||||||
| sort_sinks(Sinks) -> |  | ||||||
|     lists:ukeysort(1, |  | ||||||
|         lists:map( |  | ||||||
|             fun({Name, Config}) -> |  | ||||||
|                 Handlers = proplists:get_value(handlers, Config), |  | ||||||
|                 RabbitHandlers = proplists:get_value(rabbit_handlers, Config), |  | ||||||
|                 {Name, lists:ukeymerge(1, |  | ||||||
|                             [{handlers, sort_handlers(Handlers)}, |  | ||||||
|                              {rabbit_handlers, sort_handlers(RabbitHandlers)}], |  | ||||||
|                             lists:ukeysort(1, Config))} |  | ||||||
|             end, |  | ||||||
|             Sinks)). |  | ||||||
| 
 |  | ||||||
| sort_handlers(Handlers) -> |  | ||||||
|     lists:keysort(1, |  | ||||||
|         lists:map( |  | ||||||
|             fun |  | ||||||
|             ({Name, [{Atom, _}|_] = Config}) when is_atom(Atom) -> |  | ||||||
|                 {Name, lists:ukeysort(1, Config)}; |  | ||||||
|             %% Non-proplist configuration. forwarder backend |  | ||||||
|             (Other) -> |  | ||||||
|                 Other |  | ||||||
|             end, |  | ||||||
|             Handlers)). |  | ||||||
| 
 |  | ||||||
| formatter_config(console) -> |  | ||||||
|     [date," ",time," ",color,"[",severity, "] ", {pid,[]}, " ",message,"\r\n"]; |  | ||||||
| formatter_config(_) -> |  | ||||||
|     [date," ",time," ",color,"[",severity, "] ", {pid,[]}, " ",message,"\n"]. |  | ||||||
| 
 |  | ||||||
| syslog_formatter_config() -> |  | ||||||
|     [color,"[",severity, "] ", {pid,[]}, " ",message,"\n"]. |  | ||||||
|  | @ -25,7 +25,6 @@ all() -> | ||||||
| groups() -> | groups() -> | ||||||
|     [ |     [ | ||||||
|       {non_parallel_tests, [], [ |       {non_parallel_tests, [], [ | ||||||
|           log_management, |  | ||||||
|           log_file_initialised_during_startup, |           log_file_initialised_during_startup, | ||||||
|           log_file_fails_to_initialise_during_startup, |           log_file_fails_to_initialise_during_startup, | ||||||
|           externally_rotated_logs_are_automatically_reopened |           externally_rotated_logs_are_automatically_reopened | ||||||
|  | @ -113,94 +112,6 @@ wait_for_application(Application, Time) -> | ||||||
| %% Log management. | %% Log management. | ||||||
| %% ------------------------------------------------------------------- | %% ------------------------------------------------------------------- | ||||||
| 
 | 
 | ||||||
| log_management(Config) -> |  | ||||||
|     passed = rabbit_ct_broker_helpers:rpc(Config, 0, |  | ||||||
|       ?MODULE, log_management1, [Config]). |  | ||||||
| 
 |  | ||||||
| log_management1(_Config) -> |  | ||||||
|     [LogFile|_] = rabbit:log_locations(), |  | ||||||
|     Suffix = ".0", |  | ||||||
| 
 |  | ||||||
|     ok = test_logs_working([LogFile]), |  | ||||||
| 
 |  | ||||||
|     %% prepare basic logs |  | ||||||
|     file:delete(LogFile ++ Suffix), |  | ||||||
|     ok = test_logs_working([LogFile]), |  | ||||||
| 
 |  | ||||||
|     %% simple log rotation |  | ||||||
|     ok = rabbit:rotate_logs(), |  | ||||||
|     %% rabbit:rotate_logs/0 is asynchronous due to a limitation in |  | ||||||
|     %% Lager. Therefore, we have no choice but to wait an arbitrary |  | ||||||
|     %% amount of time. |  | ||||||
|     ok = rabbit_ct_helpers:await_condition( |  | ||||||
|            fun() -> |  | ||||||
|                    [true, true] =:= |  | ||||||
|                        non_empty_files([LogFile ++ Suffix, LogFile]) |  | ||||||
|            end, 5000), |  | ||||||
|     ok = test_logs_working([LogFile]), |  | ||||||
| 
 |  | ||||||
|     %% log rotation on empty files |  | ||||||
|     ok = clean_logs([LogFile], Suffix), |  | ||||||
|     ok = rabbit:rotate_logs(), |  | ||||||
|     ok = rabbit_ct_helpers:await_condition( |  | ||||||
|            fun() -> |  | ||||||
|                    [true, true] =:= |  | ||||||
|                        non_empty_files([LogFile ++ Suffix, LogFile]) |  | ||||||
|            end, 5000), |  | ||||||
| 
 |  | ||||||
|     %% logs with suffix are not writable |  | ||||||
|     ok = rabbit:rotate_logs(), |  | ||||||
|     ok = rabbit_ct_helpers:await_condition( |  | ||||||
|            fun() -> |  | ||||||
|                    ok =:= make_files_non_writable([LogFile ++ Suffix]) |  | ||||||
|            end, 5000), |  | ||||||
|     ok = rabbit:rotate_logs(), |  | ||||||
|     ok = rabbit_ct_helpers:await_condition( |  | ||||||
|            fun() -> |  | ||||||
|                    ok =:= test_logs_working([LogFile]) |  | ||||||
|            end, 5000), |  | ||||||
| 
 |  | ||||||
|     %% rotate when original log files are not writable |  | ||||||
|     ok = make_files_non_writable([LogFile]), |  | ||||||
|     ok = rabbit:rotate_logs(), |  | ||||||
|     timer:sleep(2000), |  | ||||||
| 
 |  | ||||||
|     %% logging directed to tty (first, remove handlers) |  | ||||||
|     ok = rabbit:stop(), |  | ||||||
|     ok = make_files_writable([LogFile ++ Suffix]), |  | ||||||
|     ok = clean_logs([LogFile], Suffix), |  | ||||||
|     ok = application:set_env(rabbit, lager_default_file, tty), |  | ||||||
|     application:unset_env(rabbit, log), |  | ||||||
|     application:unset_env(lager, handlers), |  | ||||||
|     application:unset_env(lager, extra_sinks), |  | ||||||
|     ok = rabbit:start(), |  | ||||||
|     timer:sleep(200), |  | ||||||
|     rabbit_log:info("test info"), |  | ||||||
| 
 |  | ||||||
|     %% rotate logs when logging is turned off |  | ||||||
|     ok = rabbit:stop(), |  | ||||||
|     ok = clean_logs([LogFile], Suffix), |  | ||||||
|     ok = application:set_env(rabbit, lager_default_file, false), |  | ||||||
|     application:unset_env(rabbit, log), |  | ||||||
|     application:unset_env(lager, handlers), |  | ||||||
|     application:unset_env(lager, extra_sinks), |  | ||||||
|     ok = rabbit:start(), |  | ||||||
|     timer:sleep(200), |  | ||||||
|     rabbit_log:error("test error"), |  | ||||||
|     timer:sleep(200), |  | ||||||
|     ?assertEqual([{error,enoent}], empty_files([LogFile])), |  | ||||||
| 
 |  | ||||||
|     %% cleanup |  | ||||||
|     ok = rabbit:stop(), |  | ||||||
|     ok = clean_logs([LogFile], Suffix), |  | ||||||
|     ok = application:set_env(rabbit, lager_default_file, LogFile), |  | ||||||
|     application:unset_env(rabbit, log), |  | ||||||
|     application:unset_env(lager, handlers), |  | ||||||
|     application:unset_env(lager, extra_sinks), |  | ||||||
|     ok = rabbit:start(), |  | ||||||
|     ok = test_logs_working([LogFile]), |  | ||||||
|     passed. |  | ||||||
| 
 |  | ||||||
| log_file_initialised_during_startup(Config) -> | log_file_initialised_during_startup(Config) -> | ||||||
|     passed = rabbit_ct_broker_helpers:rpc(Config, 0, |     passed = rabbit_ct_broker_helpers:rpc(Config, 0, | ||||||
|       ?MODULE, log_file_initialised_during_startup1, [Config]). |       ?MODULE, log_file_initialised_during_startup1, [Config]). | ||||||
|  | @ -212,10 +123,8 @@ log_file_initialised_during_startup1(_Config) -> | ||||||
|     %% start application with simple tty logging |     %% start application with simple tty logging | ||||||
|     ok = rabbit:stop(), |     ok = rabbit:stop(), | ||||||
|     ok = clean_logs([LogFile], Suffix), |     ok = clean_logs([LogFile], Suffix), | ||||||
|     ok = application:set_env(rabbit, lager_default_file, tty), |     ok = application:set_env(rabbit, log, [{console, [{enabled, true}]}, | ||||||
|     application:unset_env(rabbit, log), |                                            {file, [{file, false}]}]), | ||||||
|     application:unset_env(lager, handlers), |  | ||||||
|     application:unset_env(lager, extra_sinks), |  | ||||||
|     ok = rabbit:start(), |     ok = rabbit:start(), | ||||||
| 
 | 
 | ||||||
|     %% start application with logging to non-existing directory |     %% start application with logging to non-existing directory | ||||||
|  | @ -224,18 +133,14 @@ log_file_initialised_during_startup1(_Config) -> | ||||||
|     delete_file(NonExistent), |     delete_file(NonExistent), | ||||||
|     delete_file(filename:dirname(NonExistent)), |     delete_file(filename:dirname(NonExistent)), | ||||||
|     ok = rabbit:stop(), |     ok = rabbit:stop(), | ||||||
|     ct:pal("Setting lager_default_file to \"~s\"", [NonExistent]), |     io:format("Setting log file to \"~s\"~n", [NonExistent]), | ||||||
|     ok = application:set_env(rabbit, lager_default_file, NonExistent), |     ok = application:set_env(rabbit, log, [{console, [{enabled, true}]}, | ||||||
|     application:unset_env(rabbit, log), |                                            {file, [{file, NonExistent}]}]), | ||||||
|     application:unset_env(lager, handlers), |  | ||||||
|     application:unset_env(lager, extra_sinks), |  | ||||||
|     ok = rabbit:start(), |     ok = rabbit:start(), | ||||||
| 
 | 
 | ||||||
|     %% clean up |     %% clean up | ||||||
|     ok = application:set_env(rabbit, lager_default_file, LogFile), |     ok = application:set_env(rabbit, log, [{console, [{enabled, true}]}, | ||||||
|     application:unset_env(rabbit, log), |                                            {file, [{file, LogFile}]}]), | ||||||
|     application:unset_env(lager, handlers), |  | ||||||
|     application:unset_env(lager, extra_sinks), |  | ||||||
|     ok = rabbit:start(), |     ok = rabbit:start(), | ||||||
|     passed. |     passed. | ||||||
| 
 | 
 | ||||||
|  | @ -277,13 +182,12 @@ log_file_fails_to_initialise_during_startup1(_Config, NonWritableDir) -> | ||||||
|     delete_file(filename:dirname(NoPermission1)), |     delete_file(filename:dirname(NoPermission1)), | ||||||
| 
 | 
 | ||||||
|     ok = rabbit:stop(), |     ok = rabbit:stop(), | ||||||
|     ct:pal("Setting lager_default_file to \"~s\"", [NoPermission1]), |     io:format("Setting log file to \"~s\"~n", [NoPermission1]), | ||||||
|     ok = application:set_env(rabbit, lager_default_file, NoPermission1), |     ok = application:set_env(rabbit, log, [{console, [{enabled, true}]}, | ||||||
|     application:unset_env(rabbit, log), |                                            {file, [{file, NoPermission1}]}]), | ||||||
|     application:unset_env(lager, handlers), |  | ||||||
|     application:unset_env(lager, extra_sinks), |  | ||||||
| 
 | 
 | ||||||
|     ct:pal("`rabbit` application env.: ~p", [application:get_all_env(rabbit)]), |     io:format("rabbit application env.: ~p~n", | ||||||
|  |               [application:get_all_env(rabbit)]), | ||||||
|     ?assertThrow( |     ?assertThrow( | ||||||
|        {error, {rabbit, {{cannot_log_to_file, _, _}, _}}}, |        {error, {rabbit, {{cannot_log_to_file, _, _}, _}}}, | ||||||
|        rabbit:start()), |        rabbit:start()), | ||||||
|  | @ -296,22 +200,19 @@ log_file_fails_to_initialise_during_startup1(_Config, NonWritableDir) -> | ||||||
|     delete_file(NoPermission2), |     delete_file(NoPermission2), | ||||||
|     delete_file(filename:dirname(NoPermission2)), |     delete_file(filename:dirname(NoPermission2)), | ||||||
| 
 | 
 | ||||||
|     ct:pal("Setting lager_default_file to \"~s\"", [NoPermission2]), |     io:format("Setting log file to \"~s\"~n", [NoPermission2]), | ||||||
|     ok = application:set_env(rabbit, lager_default_file, NoPermission2), |     ok = application:set_env(rabbit, log, [{console, [{enabled, true}]}, | ||||||
|     application:unset_env(rabbit, log), |                                            {file, [{file, NoPermission2}]}]), | ||||||
|     application:unset_env(lager, handlers), |  | ||||||
|     application:unset_env(lager, extra_sinks), |  | ||||||
| 
 | 
 | ||||||
|     ct:pal("`rabbit` application env.: ~p", [application:get_all_env(rabbit)]), |     io:format("rabbit application env.: ~p~n", | ||||||
|  |               [application:get_all_env(rabbit)]), | ||||||
|     ?assertThrow( |     ?assertThrow( | ||||||
|        {error, {rabbit, {{cannot_log_to_file, _, _}, _}}}, |        {error, {rabbit, {{cannot_log_to_file, _, _}, _}}}, | ||||||
|        rabbit:start()), |        rabbit:start()), | ||||||
| 
 | 
 | ||||||
|     %% clean up |     %% clean up | ||||||
|     ok = application:set_env(rabbit, lager_default_file, LogFile), |     ok = application:set_env(rabbit, log, [{console, [{enabled, true}]}, | ||||||
|     application:unset_env(rabbit, log), |                                            {file, [{file, LogFile}]}]), | ||||||
|     application:unset_env(lager, handlers), |  | ||||||
|     application:unset_env(lager, extra_sinks), |  | ||||||
|     ok = rabbit:start(), |     ok = rabbit:start(), | ||||||
|     passed. |     passed. | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -16,7 +16,7 @@ define PROJECT_APP_EXTRA_KEYS | ||||||
| endef | endef | ||||||
| 
 | 
 | ||||||
| LOCAL_DEPS = compiler crypto public_key sasl ssl syntax_tools tools xmerl | LOCAL_DEPS = compiler crypto public_key sasl ssl syntax_tools tools xmerl | ||||||
| DEPS = lager jsx recon credentials_obfuscation | DEPS = jsx recon credentials_obfuscation | ||||||
| 
 | 
 | ||||||
| dep_credentials_obfuscation = git https://github.com/rabbitmq/credentials-obfuscation.git  master | dep_credentials_obfuscation = git https://github.com/rabbitmq/credentials-obfuscation.git  master | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -0,0 +1,19 @@ | ||||||
|  | -define(RMQLOG_SUPER_DOMAIN_NAME, rabbitmq). | ||||||
|  | -define(RMQLOG_DOMAIN_GLOBAL,         [?RMQLOG_SUPER_DOMAIN_NAME]). | ||||||
|  | -define(DEFINE_RMQLOG_DOMAIN(Domain), [?RMQLOG_SUPER_DOMAIN_NAME, Domain]). | ||||||
|  | 
 | ||||||
|  | -define(RMQLOG_DOMAIN_CHAN,       ?DEFINE_RMQLOG_DOMAIN(channel)). | ||||||
|  | -define(RMQLOG_DOMAIN_CONN,       ?DEFINE_RMQLOG_DOMAIN(connection)). | ||||||
|  | -define(RMQLOG_DOMAIN_FEAT_FLAGS, ?DEFINE_RMQLOG_DOMAIN(feature_flags)). | ||||||
|  | -define(RMQLOG_DOMAIN_MIRRORING,  ?DEFINE_RMQLOG_DOMAIN(mirroring)). | ||||||
|  | -define(RMQLOG_DOMAIN_PRELAUNCH,  ?DEFINE_RMQLOG_DOMAIN(prelaunch)). | ||||||
|  | -define(RMQLOG_DOMAIN_QUEUE,      ?DEFINE_RMQLOG_DOMAIN(queue)). | ||||||
|  | -define(RMQLOG_DOMAIN_UPGRADE,    ?DEFINE_RMQLOG_DOMAIN(upgrade)). | ||||||
|  | 
 | ||||||
|  | -define(DEFAULT_LOG_LEVEL, info). | ||||||
|  | -define(FILTER_NAME, rmqlog_filter). | ||||||
|  | 
 | ||||||
|  | -define(IS_STD_H_COMPAT(Mod), | ||||||
|  |         Mod =:= logger_std_h orelse Mod =:= rabbit_logger_std_h). | ||||||
|  | -define(IS_STDDEV(DevName), | ||||||
|  |         DevName =:= standard_io orelse DevName =:= standard_error). | ||||||
|  | @ -1,8 +0,0 @@ | ||||||
| %% This Source Code Form is subject to the terms of the Mozilla Public |  | ||||||
| %% License, v. 2.0. If a copy of the MPL was not distributed with this |  | ||||||
| %% file, You can obtain one at https://mozilla.org/MPL/2.0/. |  | ||||||
| %% |  | ||||||
| %% Copyright (c) 2017-2020 VMware, Inc. or its affiliates.  All rights reserved. |  | ||||||
| %% |  | ||||||
| 
 |  | ||||||
| -define(LAGER_SINK, rabbit_log_lager_event). |  | ||||||
|  | @ -18,25 +18,6 @@ ifneq ($(filter-out rabbit_common amqp_client,$(PROJECT)),) | ||||||
| RMQ_ERLC_OPTS += -pa $(DEPS_DIR)/rabbitmq_cli/_build/dev/lib/rabbitmqctl/ebin | RMQ_ERLC_OPTS += -pa $(DEPS_DIR)/rabbitmq_cli/_build/dev/lib/rabbitmqctl/ebin | ||||||
| endif | endif | ||||||
| 
 | 
 | ||||||
| # Add Lager parse_transform module and our default Lager extra sinks.
 |  | ||||||
| LAGER_EXTRA_SINKS += rabbit_log \
 |  | ||||||
| 		     rabbit_log_channel \
 |  | ||||||
| 		     rabbit_log_connection \
 |  | ||||||
| 		     rabbit_log_feature_flags \
 |  | ||||||
| 		     rabbit_log_federation \
 |  | ||||||
| 		     rabbit_log_ldap \
 |  | ||||||
| 		     rabbit_log_mirroring \
 |  | ||||||
| 		     rabbit_log_osiris \
 |  | ||||||
| 		     rabbit_log_prelaunch \
 |  | ||||||
| 		     rabbit_log_queue \
 |  | ||||||
| 		     rabbit_log_ra \
 |  | ||||||
| 		     rabbit_log_shovel \
 |  | ||||||
| 		     rabbit_log_upgrade |  | ||||||
| lager_extra_sinks = $(subst $(space),$(comma),$(LAGER_EXTRA_SINKS)) |  | ||||||
| 
 |  | ||||||
| RMQ_ERLC_OPTS += +'{parse_transform,lager_transform}' \
 |  | ||||||
| 		 +'{lager_extra_sinks,[$(lager_extra_sinks)]}' |  | ||||||
| 
 |  | ||||||
| # Push our compilation options to both the normal and test ERLC_OPTS.
 | # Push our compilation options to both the normal and test ERLC_OPTS.
 | ||||||
| ERLC_OPTS += $(RMQ_ERLC_OPTS) | ERLC_OPTS += $(RMQ_ERLC_OPTS) | ||||||
| TEST_ERLC_OPTS += $(RMQ_ERLC_OPTS) | TEST_ERLC_OPTS += $(RMQ_ERLC_OPTS) | ||||||
|  |  | ||||||
|  | @ -117,7 +117,6 @@ dep_accept = hex 0.3.5 | ||||||
| dep_cowboy = hex 2.8.0 | dep_cowboy = hex 2.8.0 | ||||||
| dep_cowlib = hex 2.9.1 | dep_cowlib = hex 2.9.1 | ||||||
| dep_jsx = hex 2.11.0 | dep_jsx = hex 2.11.0 | ||||||
| dep_lager = hex 3.9.1 |  | ||||||
| dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master | dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master | ||||||
| dep_ra = git https://github.com/rabbitmq/ra.git master | dep_ra = git https://github.com/rabbitmq/ra.git master | ||||||
| dep_ranch = hex 2.0.0 | dep_ranch = hex 2.0.0 | ||||||
|  |  | ||||||
|  | @ -164,9 +164,7 @@ define test_rabbitmq_config | ||||||
| [ | [ | ||||||
|   {rabbit, [ |   {rabbit, [ | ||||||
| $(if $(RABBITMQ_NODE_PORT),      {tcp_listeners$(comma) [$(RABBITMQ_NODE_PORT)]}$(comma),) | $(if $(RABBITMQ_NODE_PORT),      {tcp_listeners$(comma) [$(RABBITMQ_NODE_PORT)]}$(comma),) | ||||||
|       {loopback_users, []}, |       {loopback_users, []} | ||||||
|       {log, [{file, [{level, debug}]}, |  | ||||||
|              {console, [{level, debug}]}]} |  | ||||||
|     ]}, |     ]}, | ||||||
|   {rabbitmq_management, [ |   {rabbitmq_management, [ | ||||||
| $(if $(RABBITMQ_NODE_PORT),      {listener$(comma) [{port$(comma) $(shell echo "$$(($(RABBITMQ_NODE_PORT) + 10000))")}]},) | $(if $(RABBITMQ_NODE_PORT),      {listener$(comma) [{port$(comma) $(shell echo "$$(($(RABBITMQ_NODE_PORT) + 10000))")}]},) | ||||||
|  | @ -184,19 +182,6 @@ $(if $(RABBITMQ_NODE_PORT),      {tcp_listeners$(comma) [$(shell echo "$$((5551 | ||||||
|       {data_dir, "$(RABBITMQ_QUORUM_DIR)"}, |       {data_dir, "$(RABBITMQ_QUORUM_DIR)"}, | ||||||
|       {wal_sync_method, sync} |       {wal_sync_method, sync} | ||||||
|     ]}, |     ]}, | ||||||
|   {lager, [ |  | ||||||
|       {colors, [ |  | ||||||
|           %% https://misc.flogisoft.com/bash/tip_colors_and_formatting |  | ||||||
|           {debug,     "\\\e[0;34m" }, |  | ||||||
|           {info,      "\\\e[1;37m" }, |  | ||||||
|           {notice,    "\\\e[1;36m" }, |  | ||||||
|           {warning,   "\\\e[1;33m" }, |  | ||||||
|           {error,     "\\\e[1;31m" }, |  | ||||||
|           {critical,  "\\\e[1;35m" }, |  | ||||||
|           {alert,     "\\\e[1;44m" }, |  | ||||||
|           {emergency, "\\\e[1;41m" } |  | ||||||
|       ]} |  | ||||||
|     ]}, |  | ||||||
|   {osiris, [ |   {osiris, [ | ||||||
|       {data_dir, "$(RABBITMQ_STREAM_DIR)"} |       {data_dir, "$(RABBITMQ_STREAM_DIR)"} | ||||||
|     ]} |     ]} | ||||||
|  | @ -209,8 +194,6 @@ define test_rabbitmq_config_with_tls | ||||||
| [ | [ | ||||||
|   {rabbit, [ |   {rabbit, [ | ||||||
|       {loopback_users, []}, |       {loopback_users, []}, | ||||||
|       {log, [{file, [{level, debug}]}, |  | ||||||
|              {console, [{level, debug}]}]}, |  | ||||||
|       {ssl_listeners, [5671]}, |       {ssl_listeners, [5671]}, | ||||||
|       {ssl_options, [ |       {ssl_options, [ | ||||||
|           {cacertfile, "$(TEST_TLS_CERTS_DIR_in_config)/testca/cacert.pem"}, |           {cacertfile, "$(TEST_TLS_CERTS_DIR_in_config)/testca/cacert.pem"}, | ||||||
|  | @ -237,19 +220,6 @@ define test_rabbitmq_config_with_tls | ||||||
|       {data_dir, "$(RABBITMQ_QUORUM_DIR)"}, |       {data_dir, "$(RABBITMQ_QUORUM_DIR)"}, | ||||||
|       {wal_sync_method, sync} |       {wal_sync_method, sync} | ||||||
|     ]}, |     ]}, | ||||||
|   {lager, [ |  | ||||||
|       {colors, [ |  | ||||||
|           %% https://misc.flogisoft.com/bash/tip_colors_and_formatting |  | ||||||
|           {debug,     "\\\e[0;34m" }, |  | ||||||
|           {info,      "\\\e[1;37m" }, |  | ||||||
|           {notice,    "\\\e[1;36m" }, |  | ||||||
|           {warning,   "\\\e[1;33m" }, |  | ||||||
|           {error,     "\\\e[1;31m" }, |  | ||||||
|           {critical,  "\\\e[1;35m" }, |  | ||||||
|           {alert,     "\\\e[1;44m" }, |  | ||||||
|           {emergency, "\\\e[1;41m" } |  | ||||||
|       ]} |  | ||||||
|     ]}, |  | ||||||
|   {osiris, [ |   {osiris, [ | ||||||
|       {data_dir, "$(RABBITMQ_STREAM_DIR)"} |       {data_dir, "$(RABBITMQ_STREAM_DIR)"} | ||||||
|     ]} |     ]} | ||||||
|  |  | ||||||
|  | @ -1083,8 +1083,8 @@ init([AlarmSet, AlarmClear]) -> | ||||||
|                     end |                     end | ||||||
|             end, |             end, | ||||||
|     ObtainLimit = obtain_limit(Limit), |     ObtainLimit = obtain_limit(Limit), | ||||||
|     error_logger:info_msg("Limiting to approx ~p file handles (~p sockets)~n", |     logger:info("Limiting to approx ~p file handles (~p sockets)", | ||||||
|                           [Limit, ObtainLimit]), |                  [Limit, ObtainLimit]), | ||||||
|     Clients = ets:new(?CLIENT_ETS_TABLE, [set, private, {keypos, #cstate.pid}]), |     Clients = ets:new(?CLIENT_ETS_TABLE, [set, private, {keypos, #cstate.pid}]), | ||||||
|     Elders = ets:new(?ELDERS_ETS_TABLE, [set, private]), |     Elders = ets:new(?ELDERS_ETS_TABLE, [set, private]), | ||||||
|     {ok, #fhc_state { elders                = Elders, |     {ok, #fhc_state { elders                = Elders, | ||||||
|  |  | ||||||
|  | @ -1,120 +0,0 @@ | ||||||
| %% This Source Code Form is subject to the terms of the Mozilla Public |  | ||||||
| %% License, v. 2.0. If a copy of the MPL was not distributed with this |  | ||||||
| %% file, You can obtain one at https://mozilla.org/MPL/2.0/. |  | ||||||
| %% |  | ||||||
| %% Copyright (c) 2007-2021 VMware, Inc. or its affiliates.  All rights reserved. |  | ||||||
| %% |  | ||||||
| 
 |  | ||||||
| -module(lager_forwarder_backend). |  | ||||||
| 
 |  | ||||||
| -behaviour(gen_event). |  | ||||||
| 
 |  | ||||||
| -export([init/1, handle_call/2, handle_event/2, handle_info/2, terminate/2, |  | ||||||
|         code_change/3]). |  | ||||||
| 
 |  | ||||||
| -record(state, { |  | ||||||
|     next_sink :: atom(), |  | ||||||
|     level :: {'mask', integer()} | inherit |  | ||||||
|   }). |  | ||||||
| 
 |  | ||||||
| %% @private |  | ||||||
| init(Sink) when is_atom(Sink) -> |  | ||||||
|     init([Sink]); |  | ||||||
| init([Sink]) when is_atom(Sink) -> |  | ||||||
|     init([Sink, inherit]); |  | ||||||
| init([Sink, inherit]) when is_atom(Sink) -> |  | ||||||
|     {ok, #state{ |  | ||||||
|         next_sink = Sink, |  | ||||||
|         level = inherit |  | ||||||
|       }}; |  | ||||||
| init([Sink, Level]) when is_atom(Sink) -> |  | ||||||
|     try |  | ||||||
|         Mask = lager_util:config_to_mask(Level), |  | ||||||
|         {ok, #state{ |  | ||||||
|             next_sink = Sink, |  | ||||||
|             level = Mask |  | ||||||
|           }} |  | ||||||
|     catch |  | ||||||
|         _:_ -> |  | ||||||
|             {error, {fatal, bad_log_level}} |  | ||||||
|     end; |  | ||||||
| init(_) -> |  | ||||||
|     {error, {fatal, bad_config}}. |  | ||||||
| 
 |  | ||||||
| %% @private |  | ||||||
| handle_call(get_loglevel, #state{next_sink = Sink, level = inherit} = State) -> |  | ||||||
|     SinkPid = whereis(Sink), |  | ||||||
|     Mask = case self() of |  | ||||||
|         SinkPid -> |  | ||||||
|             %% Avoid direct loops, defaults to 'info'. |  | ||||||
|             127; |  | ||||||
|         _ -> |  | ||||||
|             try |  | ||||||
|                 Levels = [gen_event:call(SinkPid, Handler, get_loglevel, |  | ||||||
|                                          infinity) |  | ||||||
|                           || Handler <- gen_event:which_handlers(SinkPid)], |  | ||||||
|                 lists:foldl(fun |  | ||||||
|                       ({mask, Mask}, Acc) -> |  | ||||||
|                           Mask bor Acc; |  | ||||||
|                       (Level, Acc) when is_integer(Level) -> |  | ||||||
|                           {mask, Mask} = lager_util:config_to_mask( |  | ||||||
|                             lager_util:num_to_level(Level)), |  | ||||||
|                           Mask bor Acc; |  | ||||||
|                       (_, Acc) -> |  | ||||||
|                           Acc |  | ||||||
|                   end, 0, Levels) |  | ||||||
|             catch |  | ||||||
|                 exit:noproc -> |  | ||||||
|                     127 |  | ||||||
|             end |  | ||||||
|     end, |  | ||||||
|     {ok, {mask, Mask}, State}; |  | ||||||
| handle_call(get_loglevel, #state{level = Mask} = State) -> |  | ||||||
|     {ok, Mask, State}; |  | ||||||
| handle_call({set_loglevel, inherit}, State) -> |  | ||||||
|     {ok, ok, State#state{level = inherit}}; |  | ||||||
| handle_call({set_loglevel, Level}, State) -> |  | ||||||
|     try lager_util:config_to_mask(Level) of |  | ||||||
|         Mask -> |  | ||||||
|             {ok, ok, State#state{level = Mask}} |  | ||||||
|     catch |  | ||||||
|         _:_ -> |  | ||||||
|             {ok, {error, bad_log_level}, State} |  | ||||||
|     end; |  | ||||||
| handle_call(_Request, State) -> |  | ||||||
|     {ok, ok, State}. |  | ||||||
| 
 |  | ||||||
| %% @private |  | ||||||
| handle_event({log, LagerMsg}, #state{next_sink = Sink, level = Mask} = State) -> |  | ||||||
|     SinkPid = whereis(Sink), |  | ||||||
|     case self() of |  | ||||||
|         SinkPid -> |  | ||||||
|             %% Avoid direct loops. |  | ||||||
|             ok; |  | ||||||
|         _ -> |  | ||||||
|             case Mask =:= inherit orelse |  | ||||||
|                  lager_util:is_loggable(LagerMsg, Mask, ?MODULE) of |  | ||||||
|                 true -> |  | ||||||
|                     case lager_config:get({Sink, async}, false) of |  | ||||||
|                         true  -> gen_event:notify(SinkPid, {log, LagerMsg}); |  | ||||||
|                         false -> gen_event:sync_notify(SinkPid, {log, LagerMsg}) |  | ||||||
|                     end; |  | ||||||
|                 false -> |  | ||||||
|                     ok |  | ||||||
|             end |  | ||||||
|     end, |  | ||||||
|     {ok, State}; |  | ||||||
| handle_event(_Event, State) -> |  | ||||||
|     {ok, State}. |  | ||||||
| 
 |  | ||||||
| %% @private |  | ||||||
| handle_info(_Info, State) -> |  | ||||||
|     {ok, State}. |  | ||||||
| 
 |  | ||||||
| %% @private |  | ||||||
| terminate(_Reason, _State) -> |  | ||||||
|     ok. |  | ||||||
| 
 |  | ||||||
| %% @private |  | ||||||
| code_change(_OldVsn, State, _Extra) -> |  | ||||||
|     {ok, State}. |  | ||||||
|  | @ -14,11 +14,11 @@ amqp_params(ConnPid, Timeout) -> | ||||||
|     P = try |     P = try | ||||||
|             gen_server:call(ConnPid, {info, [amqp_params]}, Timeout) |             gen_server:call(ConnPid, {info, [amqp_params]}, Timeout) | ||||||
|         catch exit:{noproc, Error} -> |         catch exit:{noproc, Error} -> | ||||||
|                 rabbit_log:debug("file ~p, line ~p - connection process ~p not alive: ~p~n", |                 rabbit_log:debug("file ~p, line ~p - connection process ~p not alive: ~p", | ||||||
|                                  [?FILE, ?LINE, ConnPid, Error]), |                                  [?FILE, ?LINE, ConnPid, Error]), | ||||||
|             []; |             []; | ||||||
|               _:Error -> |               _:Error -> | ||||||
|                 rabbit_log:debug("file ~p, line ~p - failed to get amqp_params from connection process ~p: ~p~n", |                 rabbit_log:debug("file ~p, line ~p - failed to get amqp_params from connection process ~p: ~p", | ||||||
|                                  [?FILE, ?LINE, ConnPid, Error]), |                                  [?FILE, ?LINE, ConnPid, Error]), | ||||||
|             [] |             [] | ||||||
|         end, |         end, | ||||||
|  |  | ||||||
|  | @ -223,7 +223,7 @@ lookup_amqp_exception(#amqp_error{name        = Name, | ||||||
|     ExplBin = amqp_exception_explanation(Text, Expl), |     ExplBin = amqp_exception_explanation(Text, Expl), | ||||||
|     {ShouldClose, Code, ExplBin, Method}; |     {ShouldClose, Code, ExplBin, Method}; | ||||||
| lookup_amqp_exception(Other, Protocol) -> | lookup_amqp_exception(Other, Protocol) -> | ||||||
|     rabbit_log:warning("Non-AMQP exit reason '~p'~n", [Other]), |     rabbit_log:warning("Non-AMQP exit reason '~p'", [Other]), | ||||||
|     {ShouldClose, Code, Text} = Protocol:lookup_amqp_exception(internal_error), |     {ShouldClose, Code, Text} = Protocol:lookup_amqp_exception(internal_error), | ||||||
|     {ShouldClose, Code, Text, none}. |     {ShouldClose, Code, Text, none}. | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
Some files were not shown because too many files have changed in this diff Show More
		Loading…
	
		Reference in New Issue