This commit is contained in:
kjnilsson 2018-06-04 21:56:06 +01:00
commit e08ee11ca3
21 changed files with 573 additions and 105 deletions

View File

@ -1,5 +1,5 @@
This package, the RabbitMQ server is licensed under the MPL. For the
MPL, please see LICENSE-MPL-RabbitMQ.
This package, the RabbitMQ server is licensed under the MPL 1.1. For the
MPL 1.1, please see LICENSE-MPL-RabbitMQ.
If you have any questions regarding licensing, please contact us at
info@rabbitmq.com.

View File

@ -24,7 +24,8 @@ define PROJECT_ENV
%% 0 ("no limit") would make a better default, but that
%% breaks the QPid Java client
{frame_max, 131072},
{channel_max, 0},
%% see rabbitmq-server#1593
{channel_max, 2047},
{connection_max, infinity},
{heartbeat, 60},
{msg_store_file_size_limit, 16777216},
@ -62,7 +63,7 @@ define PROJECT_ENV
]},
{halt_on_upgrade_failure, true},
{hipe_compile, false},
%% see bug 24513 for how this list was created
%% see bug 24513 [in legacy Bugzilla] for how this list was created
{hipe_modules,
[rabbit_reader, rabbit_channel, gen_server2, rabbit_exchange,
rabbit_command_assembler, rabbit_framing_amqp_0_9_1, rabbit_basic,
@ -134,9 +135,11 @@ endef
LOCAL_DEPS = sasl mnesia os_mon inets
BUILD_DEPS = rabbitmq_cli
DEPS = ranch lager rabbit_common ra
DEPS = ranch syslog lager rabbit_common ra
TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers amqp_client meck proper
dep_syslog = git https://github.com/schlagert/syslog 3.4.2
define usage_xml_to_erl
$(subst __,_,$(patsubst $(DOCS_DIR)/rabbitmq%.1.xml, src/rabbit_%_usage.erl, $(subst -,_,$(1))))
endef

View File

@ -33,9 +33,9 @@
See [CONTRIBUTING.md](./CONTRIBUTING.md) and our [development process overview](https://rabbitmq.com/github.html).
## License
## Licensing
RabbitMQ server is [licensed under the MPL](LICENSE-MPL-RabbitMQ).
RabbitMQ server is [licensed under the MPL 1.1](LICENSE-MPL-RabbitMQ).
## Building From Source and Packaging

View File

@ -213,7 +213,7 @@
## * http://rabbitmq.com/heartbeats.html
## * http://rabbitmq.com/networking.html
##
# heartbeat = 600
# heartbeat = 60
## Set the max permissible size of an AMQP frame (in bytes).
##

View File

@ -1070,13 +1070,168 @@ end}.
{mapping, "log.syslog.level", "rabbit.log.syslog.level", [
{datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
]}.
{mapping, "log.syslog.identity", "rabbit.log.syslog.identity", [
{mapping, "log.syslog.identity", "syslog.app_name", [
{datatype, string}
]}.
{mapping, "log.syslog.facility", "rabbit.log.syslog.facility", [
{datatype, atom}
{mapping, "log.syslog.facility", "syslog.facility", [
{datatype, {enum, [kern, kernel, user, mail, daemon, auth, syslog, lpr,
news, uucp, cron, authpriv, ftp, ntp, audit, alert,
clock, local0, local1, local2, local3, local4,
local5, local6, local7]}}
]}.
{mapping, "log.syslog.multiline_mode", "syslog.multiline_mode", [
{datatype, {enum, [true, false]}}
]}.
{mapping, "log.syslog.ip", "syslog.dest_host", [
{datatype, string},
{validators, ["is_ip"]}
]}.
{mapping, "log.syslog.port", "syslog.dest_port", [
{datatype, integer}
]}.
{mapping, "log.syslog.transport", "syslog.protocol", [
{datatype, {enum, [udp, tcp, tls, ssl]}}
]}.
{mapping, "log.syslog.protocol", "syslog.protocol", [
{datatype, {enum, [rfc3164, rfc5424]}}
]}.
{mapping, "log.syslog.ssl_options.verify", "syslog.protocol", [
{datatype, {enum, [verify_peer, verify_none]}}]}.
{mapping, "log.syslog.ssl_options.fail_if_no_peer_cert", "syslog.protocol", [
{datatype, {enum, [true, false]}}]}.
{mapping, "log.syslog.ssl_options.cacertfile", "syslog.protocol",
[{datatype, string}, {validators, ["file_accessible"]}]}.
{mapping, "log.syslog.ssl_options.certfile", "syslog.protocol",
[{datatype, string}, {validators, ["file_accessible"]}]}.
{mapping, "log.syslog.ssl_options.cacerts.$name", "syslog.protocol",
[{datatype, string}]}.
{mapping, "log.syslog.ssl_options.cert", "syslog.protocol",
[{datatype, string}]}.
{mapping, "log.syslog.ssl_options.client_renegotiation", "syslog.protocol",
[{datatype, {enum, [true, false]}}]}.
{mapping, "log.syslog.ssl_options.crl_check", "syslog.protocol",
[{datatype, [{enum, [true, false, peer, best_effort]}]}]}.
{mapping, "log.syslog.ssl_options.depth", "syslog.protocol",
[{datatype, integer}, {validators, ["byte"]}]}.
{mapping, "log.syslog.ssl_options.dh", "syslog.protocol",
[{datatype, string}]}.
{mapping, "log.syslog.ssl_options.dhfile", "syslog.protocol",
[{datatype, string}, {validators, ["file_accessible"]}]}.
{mapping, "log.syslog.ssl_options.honor_cipher_order", "syslog.protocol",
[{datatype, {enum, [true, false]}}]}.
{mapping, "log.syslog.ssl_options.honor_ecc_order", "syslog.protocol",
[{datatype, {enum, [true, false]}}]}.
{mapping, "log.syslog.ssl_options.key.RSAPrivateKey", "syslog.protocol",
[{datatype, string}]}.
{mapping, "log.syslog.ssl_options.key.DSAPrivateKey", "syslog.protocol",
[{datatype, string}]}.
{mapping, "log.syslog.ssl_options.key.PrivateKeyInfo", "syslog.protocol",
[{datatype, string}]}.
{mapping, "log.syslog.ssl_options.keyfile", "syslog.protocol",
[{datatype, string}, {validators, ["file_accessible"]}]}.
{mapping, "log.syslog.ssl_options.log_alert", "syslog.protocol",
[{datatype, {enum, [true, false]}}]}.
{mapping, "log.syslog.ssl_options.password", "syslog.protocol",
[{datatype, string}]}.
{mapping, "log.syslog.ssl_options.psk_identity", "syslog.protocol",
[{datatype, string}]}.
{mapping, "log.syslog.ssl_options.reuse_sessions", "syslog.protocol",
[{datatype, {enum, [true, false]}}]}.
{mapping, "log.syslog.ssl_options.secure_renegotiate", "syslog.protocol",
[{datatype, {enum, [true, false]}}]}.
{mapping, "log.syslog.ssl_options.versions.$version", "syslog.protocol",
[{datatype, atom}]}.
{translation, "syslog.protocol",
fun(Conf) ->
ParseSslOptions = fun() ->
RawSettings = [
{verify, cuttlefish:conf_get("log.syslog.ssl_options.verify", Conf, undefined)},
{fail_if_no_peer_cert, cuttlefish:conf_get("log.syslog.ssl_options.fail_if_no_peer_cert", Conf, undefined)},
{cacertfile, cuttlefish:conf_get("log.syslog.ssl_options.cacertfile", Conf, undefined)},
{certfile, cuttlefish:conf_get("log.syslog.ssl_options.certfile", Conf, undefined)},
{cert, cuttlefish:conf_get("log.syslog.ssl_options.cert", Conf, undefined)},
{client_renegotiation, cuttlefish:conf_get("log.syslog.ssl_options.client_renegotiation", Conf, undefined)},
{crl_check, cuttlefish:conf_get("log.syslog.ssl_options.crl_check", Conf, undefined)},
{depth, cuttlefish:conf_get("log.syslog.ssl_options.depth", Conf, undefined)},
{dh, cuttlefish:conf_get("log.syslog.ssl_options.dh", Conf, undefined)},
{dhfile, cuttlefish:conf_get("log.syslog.ssl_options.dhfile", Conf, undefined)},
{honor_cipher_order, cuttlefish:conf_get("log.syslog.ssl_options.honor_cipher_order", Conf, undefined)},
{honor_ecc_order, cuttlefish:conf_get("log.syslog.ssl_options.honor_ecc_order", Conf, undefined)},
{keyfile, cuttlefish:conf_get("log.syslog.ssl_options.keyfile", Conf, undefined)},
{log_alert, cuttlefish:conf_get("log.syslog.ssl_options.log_alert", Conf, undefined)},
{password, cuttlefish:conf_get("log.syslog.ssl_options.password", Conf, undefined)},
{psk_identity, cuttlefish:conf_get("log.syslog.ssl_options.psk_identity", Conf, undefined)},
{reuse_sessions, cuttlefish:conf_get("log.syslog.ssl_options.reuse_sessions", Conf, undefined)},
{secure_renegotiate, cuttlefish:conf_get("log.syslog.ssl_options.secure_renegotiate", Conf, undefined)}
],
DefinedSettings = [{K, V} || {K, V} <- RawSettings, V =/= undefined],
lists:map(
fun({K, Val}) when K == dh; K == cert -> {K, list_to_binary(Val)};
({K, Val}) -> {K, Val}
end,
DefinedSettings) ++
[ {K, V}
|| {K, V} <-
[{cacerts, [ list_to_binary(V) || {_, V} <- cuttlefish_variable:filter_by_prefix("log.syslog.ssl_options.cacerts", Conf)]},
{versions, [ V || {_, V} <- cuttlefish_variable:filter_by_prefix("log.syslog.ssl_options.versions", Conf) ]},
{key, case cuttlefish_variable:filter_by_prefix("log.syslog.ssl_options.key", Conf) of
[{[_,_,Key], Val}|_] -> {list_to_atom(Key), list_to_binary(Val)};
_ -> undefined
end}],
V =/= undefined,
V =/= []]
end,
Proto = cuttlefish:conf_get("log.syslog.protocol", Conf, undefined),
Transport = cuttlefish:conf_get("log.syslog.transport", Conf, udp),
case Transport of
TLS when TLS == tls; TLS == ssl ->
case Proto of
rfc3164 ->
cuttlefish:invalid("Syslog protocol rfc3164 is not compatible with TLS");
_ ->
{rfc5424, tls, ParseSslOptions()}
end;
_ when Transport == udp; Transport == tcp ->
case Proto of
undefined -> {rfc3164, Transport};
_ -> {Proto, Transport}
end;
_ -> cuttlefish:invalid("Invalid syslog transport ~p~n", [Transport])
end
end}.
{mapping, "log.file", "rabbit.log.file.file", [
{datatype, [{enum, [false]}, string]}
]}.

View File

@ -120,6 +120,25 @@ if [ ! -f "${RABBITMQ_SCHEMA_DIR}/rabbit.schema" ]; then
cp "${RABBITMQ_HOME}/priv/schema/rabbit.schema" "${RABBITMQ_SCHEMA_DIR}"
fi
# The default allocation strategy RabbitMQ is using was introduced
# in Erlang/OTP 20.2.3. Earlier Erlang versions fail to start with
# this configuration. We therefore need to ensure that erl accepts
# these values before we can use them.
#
# The defaults are meant to reduce RabbitMQ's memory usage and help
# it reclaim memory at the cost of a slight decrease in performance
# (due to an increase in memory operations). These defaults can be
# overriden using the RABBITMQ_SERVER_ERL_ARGS variable.
RABBITMQ_DEFAULT_ALLOC_ARGS="+MBas ageffcbf +MHas ageffcbf +MBlmbcs 512 +MHlmbcs 512 +MMmcs 30"
${ERL_DIR}erl ${RABBITMQ_DEFAULT_ALLOC_ARGS} \
-boot "${CLEAN_BOOT_FILE}" \
-noinput -eval 'halt(0)' 2>/dev/null
if [ $? != 0 ] ; then
RABBITMQ_DEFAULT_ALLOC_ARGS=
fi
set -e
RABBITMQ_CONFIG_FILE_NOEX="${RABBITMQ_CONFIG_FILE%.*}"
@ -213,6 +232,7 @@ start_rabbitmq_server() {
${RABBITMQ_CONFIG_ARG} \
+W w \
+A ${RABBITMQ_IO_THREAD_POOL_SIZE} \
${RABBITMQ_DEFAULT_ALLOC_ARGS} \
${RABBITMQ_SERVER_ERL_ARGS} \
+K true \
-kernel inet_default_connect_options "[{nodelay,true}]" \

View File

@ -72,6 +72,27 @@ if ERRORLEVEL 2 (
set RABBITMQ_DIST_ARG=-kernel inet_dist_listen_min !RABBITMQ_DIST_PORT! -kernel inet_dist_listen_max !RABBITMQ_DIST_PORT!
)
rem The default allocation strategy RabbitMQ is using was introduced
rem in Erlang/OTP 20.2.3. Earlier Erlang versions fail to start with
rem this configuration. We therefore need to ensure that erl accepts
rem these values before we can use them.
rem
rem The defaults are meant to reduce RabbitMQ's memory usage and help
rem it reclaim memory at the cost of a slight decrease in performance
rem (due to an increase in memory operations). These defaults can be
rem overriden using the RABBITMQ_SERVER_ERL_ARGS variable.
set RABBITMQ_DEFAULT_ALLOC_ARGS=+MBas ageffcbf +MHas ageffcbf +MBlmbcs 512 +MHlmbcs 512 +MMmcs 30
"!ERLANG_HOME!\bin\erl.exe" ^
!RABBITMQ_DEFAULT_ALLOC_ARGS! ^
-boot !CLEAN_BOOT_FILE! ^
-noinput -eval "halt(0)"
if ERRORLEVEL 1 (
set RABBITMQ_DEFAULT_ALLOC_ARGS=
)
if not exist "!RABBITMQ_SCHEMA_DIR!" (
mkdir "!RABBITMQ_SCHEMA_DIR!"
)
@ -173,6 +194,7 @@ if "!ENV_OK!"=="false" (
!RABBITMQ_NAME_TYPE! !RABBITMQ_NODENAME! ^
+W w ^
+A "!RABBITMQ_IO_THREAD_POOL_SIZE!" ^
!RABBITMQ_DEFAULT_ALLOC_ARGS! ^
!RABBITMQ_SERVER_ERL_ARGS! ^
!RABBITMQ_LISTEN_ARG! ^
-kernel inet_default_connect_options "[{nodelay, true}]" ^

View File

@ -160,6 +160,27 @@ if ERRORLEVEL 3 (
set RABBITMQ_DIST_ARG=-kernel inet_dist_listen_min !RABBITMQ_DIST_PORT! -kernel inet_dist_listen_max !RABBITMQ_DIST_PORT!
)
rem The default allocation strategy RabbitMQ is using was introduced
rem in Erlang/OTP 20.2.3. Earlier Erlang versions fail to start with
rem this configuration. We therefore need to ensure that erl accepts
rem these values before we can use them.
rem
rem The defaults are meant to reduce RabbitMQ's memory usage and help
rem it reclaim memory at the cost of a slight decrease in performance
rem (due to an increase in memory operations). These defaults can be
rem overriden using the RABBITMQ_SERVER_ERL_ARGS variable.
set RABBITMQ_DEFAULT_ALLOC_ARGS=+MBas ageffcbf +MHas ageffcbf +MBlmbcs 512 +MHlmbcs 512 +MMmcs 30
"!ERLANG_HOME!\bin\erl.exe" ^
!RABBITMQ_DEFAULT_ALLOC_ARGS! ^
-boot !CLEAN_BOOT_FILE! ^
-noinput -eval "halt(0)"
if ERRORLEVEL 1 (
set RABBITMQ_DEFAULT_ALLOC_ARGS=
)
if not exist "!RABBITMQ_SCHEMA_DIR!" (
mkdir "!RABBITMQ_SCHEMA_DIR!"
)
@ -254,6 +275,7 @@ set ERLANG_SERVICE_ARGUMENTS= ^
!RABBITMQ_CONFIG_ARG! ^
+W w ^
+A "!RABBITMQ_IO_THREAD_POOL_SIZE!" ^
!RABBITMQ_DEFAULT_ALLOC_ARGS! ^
!RABBITMQ_SERVER_ERL_ARGS! ^
!RABBITMQ_LISTEN_ARG! ^
-kernel inet_default_connect_options "[{nodelay,true}]" ^

View File

@ -626,7 +626,7 @@ declare_args() ->
{<<"x-dead-letter-routing-key">>, fun check_dlxrk_arg/2},
{<<"x-max-length">>, fun check_non_neg_int_arg/2},
{<<"x-max-length-bytes">>, fun check_non_neg_int_arg/2},
{<<"x-max-priority">>, fun check_non_neg_int_arg/2},
{<<"x-max-priority">>, fun check_max_priority_arg/2},
{<<"x-overflow">>, fun check_overflow/2},
{<<"x-queue-mode">>, fun check_queue_mode/2},
{<<"x-queue-type">>, fun check_queue_type/2}].
@ -663,6 +663,13 @@ check_message_ttl_arg({Type, Val}, Args) ->
Error -> Error
end.
check_max_priority_arg({Type, Val}, Args) ->
case check_non_neg_int_arg({Type, Val}, Args) of
ok when Val =< ?MAX_SUPPORTED_PRIORITY -> ok;
ok -> {error, {max_value_exceeded, Val}};
Error -> Error
end.
%% Note that the validity of x-dead-letter-exchange is already verified
%% by rabbit_channel's queue.declare handler.
check_dlxname_arg({longstr, _}, _) -> ok;
@ -1123,7 +1130,7 @@ internal_delete(QueueName, ActingUser) ->
?INTERNAL_USER),
fun() ->
ok = T(),
rabbit_core_metrics:queue_deleted(QueueName),
rabbit_core_metrics:queue_deleted(QueueName),
ok = rabbit_event:notify(queue_deleted,
[{name, QueueName},
{user_who_performed_action, ActingUser}])

View File

@ -311,8 +311,14 @@ terminate_delete(EmitStats, Reason,
fun() -> emit_stats(State) end);
true -> ok
end,
%% don't care if the internal delete doesn't return 'ok'.
rabbit_amqqueue:internal_delete(QName, ActingUser),
%% This try-catch block transforms throws to errors since throws are not
%% logged.
try
%% don't care if the internal delete doesn't return 'ok'.
rabbit_amqqueue:internal_delete(QName, ActingUser)
catch
{error, Reason} -> error(Reason)
end,
BQS1
end.

View File

@ -166,6 +166,8 @@ add(Binding, InnerFun, ActingUser) ->
fun (Src, Dst, B) ->
case rabbit_exchange:validate_binding(Src, B) of
ok ->
lock_resource(Src),
lock_resource(Dst),
%% this argument is used to check queue exclusivity;
%% in general, we want to fail on that in preference to
%% anything else
@ -184,6 +186,8 @@ add(Binding, InnerFun, ActingUser) ->
end, fun not_found_or_absent_errs/1).
add(Src, Dst, B, ActingUser) ->
lock_resource(Src),
lock_resource(Dst),
[SrcDurable, DstDurable] = [durable(E) || E <- [Src, Dst]],
case (SrcDurable andalso DstDurable andalso
mnesia:read({rabbit_durable_route, B}) =/= []) of
@ -206,6 +210,8 @@ remove(Binding, InnerFun, ActingUser) ->
binding_action(
Binding,
fun (Src, Dst, B) ->
lock_resource(Src),
lock_resource(Dst),
case mnesia:read(rabbit_route, B, write) of
[] -> case mnesia:read(rabbit_durable_route, B, write) of
[] -> rabbit_misc:const(ok);
@ -219,6 +225,8 @@ remove(Binding, InnerFun, ActingUser) ->
end, fun absent_errs_only/1).
remove(Src, Dst, B, ActingUser) ->
lock_resource(Src),
lock_resource(Dst),
ok = sync_route(#route{binding = B}, durable(Src), durable(Dst),
fun mnesia:delete_object/3),
Deletions = maybe_auto_delete(
@ -303,12 +311,12 @@ has_for_source(SrcName) ->
contains(rabbit_semi_durable_route, Match).
remove_for_source(SrcName) ->
lock_route_tables(),
lock_resource(SrcName),
Match = #route{binding = #binding{source = SrcName, _ = '_'}},
remove_routes(
lists:usort(
mnesia:match_object(rabbit_route, Match, write) ++
mnesia:match_object(rabbit_semi_durable_route, Match, write))).
mnesia:match_object(rabbit_route, Match, read) ++
mnesia:match_object(rabbit_semi_durable_route, Match, read))).
remove_for_destination(DstName, OnlyDurable) ->
remove_for_destination(DstName, OnlyDurable, fun remove_routes/1).
@ -331,8 +339,8 @@ binding_action(Binding = #binding{source = SrcName,
Fun(Src, Dst, Binding#binding{args = SortedArgs})
end, ErrFun).
delete_object(Table, Record, LockKind) ->
mnesia:delete_object(Table, Record, LockKind).
dirty_delete_object(Table, Record, _LockKind) ->
mnesia:dirty_delete_object(Table, Record).
sync_route(Route, true, true, Fun) ->
ok = Fun(rabbit_durable_route, Route, write),
@ -393,66 +401,56 @@ continue('$end_of_table') -> false;
continue({[_|_], _}) -> true;
continue({[], Continuation}) -> continue(mnesia:select(Continuation)).
%% For bulk operations we lock the tables we are operating on in order
%% to reduce the time complexity. Without the table locks we end up
%% with num_tables*num_bulk_bindings row-level locks. Taking each lock
%% takes time proportional to the number of existing locks, thus
%% resulting in O(num_bulk_bindings^2) complexity.
%%
%% The locks need to be write locks since ultimately we end up
%% removing all these rows.
%%
%% The downside of all this is that no other binding operations except
%% lookup/routing (which uses dirty ops) can take place
%% concurrently. However, that is the case already since the bulk
%% operations involve mnesia:match_object calls with a partial key,
%% which entails taking a table lock.
lock_route_tables() ->
[mnesia:lock({table, T}, write) || T <- [rabbit_route,
rabbit_reverse_route,
rabbit_semi_durable_route,
rabbit_durable_route]].
remove_routes(Routes) ->
%% This partitioning allows us to suppress unnecessary delete
%% operations on disk tables, which require an fsync.
{RamRoutes, DiskRoutes} =
lists:partition(fun (R) -> mnesia:match_object(
rabbit_durable_route, R, write) == [] end,
rabbit_durable_route, R, read) == [] end,
Routes),
%% Of course the destination might not really be durable but it's
%% just as easy to try to delete it from the semi-durable table
%% than check first
[ok = sync_route(R, false, true, fun mnesia:delete_object/3) ||
[ok = sync_route(R, false, true, fun dirty_delete_object/3) ||
R <- RamRoutes],
[ok = sync_route(R, true, true, fun mnesia:delete_object/3) ||
[ok = sync_route(R, true, true, fun dirty_delete_object/3) ||
R <- DiskRoutes],
[R#route.binding || R <- Routes].
remove_transient_routes(Routes) ->
[begin
ok = sync_transient_route(R, fun delete_object/3),
ok = sync_transient_route(R, fun dirty_delete_object/3),
R#route.binding
end || R <- Routes].
remove_for_destination(DstName, OnlyDurable, Fun) ->
lock_route_tables(),
lock_resource(DstName),
MatchFwd = #route{binding = #binding{destination = DstName, _ = '_'}},
MatchRev = reverse_route(MatchFwd),
Routes = case OnlyDurable of
false -> [reverse_route(R) ||
false ->
[reverse_route(R) ||
R <- mnesia:match_object(
rabbit_reverse_route, MatchRev, write)];
rabbit_reverse_route, MatchRev, read)];
true -> lists:usort(
mnesia:match_object(
rabbit_durable_route, MatchFwd, write) ++
rabbit_durable_route, MatchFwd, read) ++
mnesia:match_object(
rabbit_semi_durable_route, MatchFwd, write))
rabbit_semi_durable_route, MatchFwd, read))
end,
Bindings = Fun(Routes),
group_bindings_fold(fun maybe_auto_delete/4, new_deletions(),
lists:keysort(#binding.source, Bindings), OnlyDurable).
%% Instead of locking entire table on remove operations we can lock the
%% affected resource only. This will allow us to use dirty_match_object for
%% do faster search of records to delete.
%% This works better when there are multiple resources deleted at once, for
%% example when exclusive queues are deleted.
lock_resource(Name) ->
mnesia:lock({global, Name, mnesia:table_info(rabbit_route, where_to_write)},
write).
%% Requires that its input binding list is sorted in exchange-name
%% order, so that the grouping of bindings (for passing to
%% group_bindings_and_auto_delete1) works properly.

View File

@ -27,7 +27,9 @@
start_logger() ->
application:stop(lager),
application:stop(syslog),
ensure_lager_configured(),
application:ensure_all_started(syslog),
lager:start(),
fold_sinks(
fun
@ -163,15 +165,19 @@ ensure_lager_configured() ->
end.
%% Lager should have handlers and sinks
%% Error logger forwarding to syslog should be disabled
lager_configured() ->
Sinks = lager:list_all_sinks(),
ExpectedSinks = list_expected_sinks(),
application:get_env(lager, handlers) =/= undefined
andalso
lists:all(fun(S) -> lists:member(S, Sinks) end, ExpectedSinks).
lists:all(fun(S) -> lists:member(S, Sinks) end, ExpectedSinks)
andalso
application:get_env(syslog, syslog_error_logger) =/= undefined.
configure_lager() ->
application:load(lager),
application:load(syslog),
%% Turn off reformatting for error_logger messages
case application:get_env(lager, error_logger_format_raw) of
undefined -> application:set_env(lager, error_logger_format_raw, true);
@ -192,6 +198,8 @@ configure_lager() ->
end,
%% Set rabbit.log config variable based on environment.
prepare_rabbit_log_config(),
%% Configure syslog library.
configure_syslog(),
%% At this point we should have rabbit.log application variable
%% configured to generate RabbitMQ log handlers.
GeneratedHandlers = generate_lager_handlers(),
@ -261,6 +269,13 @@ configure_lager() ->
end,
ok.
configure_syslog() ->
%% Disable error_logger forwarding to syslog if it's not configured
case application:get_env(syslog, syslog_error_logger) of
undefined -> application:set_env(syslog, syslog_error_logger, false);
_ -> ok
end.
remove_rabbit_handlers(Handlers, FormerHandlers) ->
lists:filter(fun(Handler) ->
not lists:member(Handler, FormerHandlers)
@ -296,19 +311,23 @@ generate_lager_handlers(LogHandlersConfig) ->
lager_backend(file) -> lager_file_backend;
lager_backend(console) -> lager_console_backend;
lager_backend(syslog) -> lager_syslog_backend;
lager_backend(syslog) -> syslog_lager_backend;
lager_backend(exchange) -> lager_exchange_backend.
%% Syslog backend is using an old API for configuration and
%% does not support proplists.
generate_handler(syslog_lager_backend, HandlerConfig) ->
Level = proplists:get_value(level, HandlerConfig,
default_config_value(level)),
[{syslog_lager_backend,
[Level,
{},
{lager_default_formatter, syslog_formatter_config()}]}];
generate_handler(Backend, HandlerConfig) ->
[{Backend,
lists:ukeymerge(1, lists:ukeysort(1, HandlerConfig),
lists:ukeysort(1, default_handler_config(Backend)))}].
default_handler_config(lager_syslog_backend) ->
[{level, default_config_value(level)},
{identity, "rabbitmq"},
{facility, daemon},
{formatter_config, default_config_value(formatter_config)}];
default_handler_config(lager_console_backend) ->
[{level, default_config_value(level)},
{formatter_config, default_config_value(formatter_config)}];
@ -327,6 +346,11 @@ default_config_value(formatter_config) ->
{pid, ""},
" ", message, "\n"].
syslog_formatter_config() ->
[color, "[", severity, "] ",
{pid, ""},
" ", message, "\n"].
prepare_rabbit_log_config() ->
%% If RABBIT_LOGS is not set, we should ignore it.
DefaultFile = application:get_env(rabbit, lager_default_file, undefined),

View File

@ -20,10 +20,13 @@
-export([names/1, diagnostics/1, make/1, parts/1, cookie_hash/0,
is_running/2, is_process_running/2,
cluster_name/0, set_cluster_name/2, ensure_epmd/0,
all_running/0, name_type/0]).
all_running/0, name_type/0, running_count/0,
await_running_count/2]).
-include_lib("kernel/include/inet.hrl").
-define(SAMPLING_INTERVAL, 1000).
%%----------------------------------------------------------------------------
%% Specs
%%----------------------------------------------------------------------------
@ -37,6 +40,7 @@
-spec cluster_name() -> binary().
-spec set_cluster_name(binary(), rabbit_types:username()) -> 'ok'.
-spec all_running() -> [node()].
-spec running_count() -> integer().
%%----------------------------------------------------------------------------
@ -85,3 +89,22 @@ ensure_epmd() ->
rabbit_nodes_common:ensure_epmd().
all_running() -> rabbit_mnesia:cluster_nodes(running).
running_count() -> length(all_running()).
-spec await_running_count(integer(), integer()) -> 'ok' | {'error', atom()}.
await_running_count(TargetCount, Timeout) ->
Retries = round(Timeout/?SAMPLING_INTERVAL),
await_running_count_with_retries(TargetCount, Retries).
await_running_count_with_retries(1, _Retries) -> ok;
await_running_count_with_retries(_TargetCount, Retries) when Retries =:= 0 ->
{error, timeout};
await_running_count_with_retries(TargetCount, Retries) ->
case running_count() >= TargetCount of
true -> ok;
false ->
timer:sleep(?SAMPLING_INTERVAL),
await_running_count_with_retries(TargetCount, Retries - 1)
end.

View File

@ -117,4 +117,3 @@ merge_policy_value(<<"message-ttl">>, Val, OpVal) -> min(Val, OpVal);
merge_policy_value(<<"max-length">>, Val, OpVal) -> min(Val, OpVal);
merge_policy_value(<<"max-length-bytes">>, Val, OpVal) -> min(Val, OpVal);
merge_policy_value(<<"expires">>, Val, OpVal) -> min(Val, OpVal).

View File

@ -128,11 +128,14 @@ collapse_recovery(QNames, DupNames, Recovery) ->
priorities(#amqqueue{arguments = Args}) ->
Ints = [long, short, signedint, byte, unsignedbyte, unsignedshort, unsignedint],
case rabbit_misc:table_lookup(Args, <<"x-max-priority">>) of
{Type, Max} -> case lists:member(Type, Ints) of
false -> none;
true -> lists:reverse(lists:seq(0, Max))
end;
_ -> none
{Type, RequestedMax} ->
case lists:member(Type, Ints) of
false -> none;
true ->
Max = min(RequestedMax, ?MAX_SUPPORTED_PRIORITY),
lists:reverse(lists:seq(0, Max))
end;
_ -> none
end.
%%----------------------------------------------------------------------------

View File

@ -387,10 +387,24 @@ start_connection(Parent, HelperSup, Deb, Sock) ->
%% socket w/o delay before termination.
rabbit_net:fast_close(RealSocket),
rabbit_networking:unregister_connection(self()),
rabbit_core_metrics:connection_closed(self()),
rabbit_event:notify(connection_closed, [{name, Name},
{pid, self()},
{node, node()}])
rabbit_core_metrics:connection_closed(self()),
ClientProperties = case get(client_properties) of
undefined ->
[];
Properties ->
Properties
end,
EventProperties = [{name, Name},
{pid, self()},
{node, node()},
{client_properties, ClientProperties}],
EventProperties1 = case get(connection_user_provided_name) of
undefined ->
EventProperties;
ConnectionUserProvidedName ->
[{user_provided_name, ConnectionUserProvidedName} | EventProperties]
end,
rabbit_event:notify(connection_closed, EventProperties1)
end,
done.
@ -607,7 +621,9 @@ handle_other({'$gen_cast', {force_event_refresh, Ref}}, State)
when ?IS_RUNNING(State) ->
rabbit_event:notify(
connection_created,
[{type, network} | infos(?CREATION_EVENT_KEYS, State)], Ref),
augment_infos_with_user_provided_connection_name(
[{type, network} | infos(?CREATION_EVENT_KEYS, State)], State),
Ref),
rabbit_event:init_stats_timer(State, #v1.stats_timer);
handle_other({'$gen_cast', {force_event_refresh, _Ref}}, State) ->
%% Ignore, we will emit a created event once we start running.
@ -1130,6 +1146,15 @@ handle_method0(#'connection.start_ok'{mechanism = Mechanism,
Connection2 = augment_connection_log_name(Connection1),
State = State0#v1{connection_state = securing,
connection = Connection2},
% adding client properties to process dictionary to send them later
% in the connection_closed event
put(client_properties, ClientProperties),
case user_provided_connection_name(Connection2) of
undefined ->
undefined;
UserProvidedConnectionName ->
put(connection_user_provided_name, UserProvidedConnectionName)
end,
auth_phase(Response, State);
handle_method0(#'connection.secure_ok'{response = Response},
@ -1202,7 +1227,10 @@ handle_method0(#'connection.open'{virtual_host = VHost},
connection = NewConnection,
channel_sup_sup_pid = ChannelSupSupPid,
throttle = Throttle1}),
Infos = [{type, network} | infos(?CREATION_EVENT_KEYS, State1)],
Infos = augment_infos_with_user_provided_connection_name(
[{type, network} | infos(?CREATION_EVENT_KEYS, State1)],
State1
),
rabbit_core_metrics:connection_created(proplists:get_value(pid, Infos),
Infos),
rabbit_event:notify(connection_created, Infos),
@ -1661,16 +1689,31 @@ control_throttle(State = #v1{connection_state = CS,
_ -> State1
end.
augment_connection_log_name(#connection{client_properties = ClientProperties,
name = Name} = Connection) ->
case rabbit_misc:table_lookup(ClientProperties, <<"connection_name">>) of
{longstr, UserSpecifiedName} ->
augment_connection_log_name(#connection{name = Name} = Connection) ->
case user_provided_connection_name(Connection) of
undefined ->
Connection;
UserSpecifiedName ->
LogName = <<Name/binary, " - ", UserSpecifiedName/binary>>,
rabbit_log_connection:info("Connection ~p (~s) has a client-provided name: ~s~n", [self(), Name, UserSpecifiedName]),
?store_proc_name(LogName),
Connection#connection{log_name = LogName};
Connection#connection{log_name = LogName}
end.
augment_infos_with_user_provided_connection_name(Infos, #v1{connection = Connection}) ->
case user_provided_connection_name(Connection) of
undefined ->
Infos;
UserProvidedConnectionName ->
[{user_provided_name, UserProvidedConnectionName} | Infos]
end.
user_provided_connection_name(#connection{client_properties = ClientProperties}) ->
case rabbit_misc:table_lookup(ClientProperties, <<"connection_name">>) of
{longstr, UserSpecifiedName} ->
UserSpecifiedName;
_ ->
Connection
undefined
end.
dynamic_connection_name(Default) ->

View File

@ -22,7 +22,7 @@
-export([recover/0, recover/1]).
-export([add/2, delete/2, exists/1, list/0, with/2, with_user_and_vhost/3, assert/1, update/2,
set_limits/2, limits_of/1]).
set_limits/2, limits_of/1, vhost_cluster_state/1, is_running_on_all_nodes/1, await_running_on_all_nodes/2]).
-export([info/1, info/2, info_all/0, info_all/1, info_all/2, info_all/3]).
-export([dir/1, msg_store_dir_path/1, msg_store_dir_wildcard/0]).
-export([delete_storage/1]).
@ -93,7 +93,7 @@ add(VHostPath, ActingUser) ->
fun (ok, true) ->
ok;
(ok, false) ->
[rabbit_exchange:declare(
[_ = rabbit_exchange:declare(
rabbit_misc:r(VHostPath, exchange, Name),
Type, true, false, Internal, [], ActingUser) ||
{Name, Type, Internal} <-
@ -148,6 +148,45 @@ delete(VHostPath, ActingUser) ->
rabbit_vhost_sup_sup:delete_on_all_nodes(VHostPath),
ok.
%% 50 ms
-define(AWAIT_SAMPLE_INTERVAL, 50).
-spec await_running_on_all_nodes(rabbit_types:vhost(), integer()) -> ok | {error, timeout}.
await_running_on_all_nodes(VHost, Timeout) ->
Attempts = round(Timeout / ?AWAIT_SAMPLE_INTERVAL),
await_running_on_all_nodes0(VHost, Attempts).
await_running_on_all_nodes0(_VHost, 0) ->
{error, timeout};
await_running_on_all_nodes0(VHost, Attempts) ->
case is_running_on_all_nodes(VHost) of
true -> ok;
_ ->
timer:sleep(?AWAIT_SAMPLE_INTERVAL),
await_running_on_all_nodes0(VHost, Attempts - 1)
end.
-spec is_running_on_all_nodes(rabbit_types:vhost()) -> boolean().
is_running_on_all_nodes(VHost) ->
States = vhost_cluster_state(VHost),
lists:all(fun ({_Node, State}) -> State =:= running end,
States).
-spec vhost_cluster_state(rabbit_types:vhost()) -> [{atom(), atom()}].
vhost_cluster_state(VHost) ->
Nodes = rabbit_nodes:all_running(),
lists:map(fun(Node) ->
State = case rabbit_misc:rpc_call(Node,
rabbit_vhost_sup_sup, is_vhost_alive,
[VHost]) of
{badrpc, nodedown} -> nodedown;
true -> running;
false -> stopped
end,
{Node, State}
end,
Nodes).
vhost_down(VHostPath) ->
ok = rabbit_event:notify(vhost_down,
[{name, VHostPath},
@ -264,19 +303,7 @@ infos(Items, X) -> [{Item, i(Item, X)} || Item <- Items].
i(name, VHost) -> VHost;
i(tracing, VHost) -> rabbit_trace:enabled(VHost);
i(cluster_state, VHost) ->
Nodes = rabbit_nodes:all_running(),
lists:map(fun(Node) ->
State = case rabbit_misc:rpc_call(Node,
rabbit_vhost_sup_sup, is_vhost_alive,
[VHost]) of
{badrpc, nodedown} -> nodedown;
true -> running;
false -> stopped
end,
{Node, State}
end,
Nodes);
i(cluster_state, VHost) -> vhost_cluster_state(VHost);
i(Item, _) -> throw({bad_argument, Item}).
info(VHost) -> infos(?INFO_KEYS, VHost).

View File

@ -18,6 +18,7 @@
-include_lib("common_test/include/ct.hrl").
-include_lib("amqp_client/include/amqp_client.hrl").
-include_lib("eunit/include/eunit.hrl").
-compile(export_all).
@ -53,7 +54,8 @@ groups() ->
forget_offline_removes_things,
force_boot,
status_with_alarm,
wait_fails_when_cluster_fails
pid_file_and_await_node_startup,
await_running_count
]},
{cluster_size_4, [], [
forget_promotes_offline_slave
@ -611,7 +613,7 @@ status_with_alarm(Config) ->
ok = alarm_information_on_each_node(R, Rabbit, Hare).
wait_fails_when_cluster_fails(Config) ->
pid_file_and_await_node_startup(Config) ->
[Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config,
nodename),
RabbitConfig = rabbit_ct_broker_helpers:get_node_config(Config,Rabbit),
@ -636,6 +638,46 @@ wait_fails_when_cluster_fails(Config) ->
{error, _, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Rabbit,
["wait", RabbitPidFile]).
await_running_count(Config) ->
[Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config,
nodename),
RabbitConfig = rabbit_ct_broker_helpers:get_node_config(Config,Rabbit),
RabbitPidFile = ?config(pid_file, RabbitConfig),
{ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Rabbit,
["wait", RabbitPidFile]),
%% stop both nodes
ok = rabbit_ct_broker_helpers:stop_node(Config, Hare),
ok = rabbit_ct_broker_helpers:stop_node(Config, Rabbit),
%% start one node in the background
rabbit_ct_broker_helpers:start_node(Config, Rabbit),
{ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Rabbit,
["wait", RabbitPidFile]),
?assertEqual(ok, rabbit_ct_broker_helpers:rpc(Config, Rabbit,
rabbit_nodes,
await_running_count, [1, 30000])),
?assertEqual({error, timeout},
rabbit_ct_broker_helpers:rpc(Config, Rabbit,
rabbit_nodes,
await_running_count, [2, 1000])),
?assertEqual({error, timeout},
rabbit_ct_broker_helpers:rpc(Config, Rabbit,
rabbit_nodes,
await_running_count, [5, 1000])),
rabbit_ct_broker_helpers:start_node(Config, Hare),
%% this now succeeds
?assertEqual(ok, rabbit_ct_broker_helpers:rpc(Config, Rabbit,
rabbit_nodes,
await_running_count, [2, 30000])),
%% this still succeeds
?assertEqual(ok, rabbit_ct_broker_helpers:rpc(Config, Rabbit,
rabbit_nodes,
await_running_count, [1, 30000])),
%% this still fails
?assertEqual({error, timeout},
rabbit_ct_broker_helpers:rpc(Config, Rabbit,
rabbit_nodes,
await_running_count, [5, 1000])).
%% ----------------------------------------------------------------------------
%% Internal utils
%% ----------------------------------------------------------------------------

View File

@ -534,5 +534,54 @@ credential_validator.regexp = ^abc\\d+",
[{kernel, [
{net_ticktime, 20}
]}],
[]}
[]},
{log_syslog_settings,
"log.syslog = true
log.syslog.identity = rabbitmq
log.syslog.facility = user
log.syslog.multiline_mode = true
log.syslog.ip = 10.10.10.10
log.syslog.port = 123",
[
{rabbit,[{log, [{syslog, [{enabled, true}]}]}]},
{syslog, [{app_name, "rabbitmq"},
{facility, user},
{multiline_mode, true},
{dest_host, "10.10.10.10"},
{dest_port, 123}]}
],
[]},
{log_syslog_tcp,
"log.syslog = true
log.syslog.transport = tcp
log.syslog.protocol = rfc5424",
[
{rabbit,[{log, [{syslog, [{enabled, true}]}]}]},
{syslog, [{protocol, {rfc5424, tcp}}]}
],
[]},
{log_syslog_udp_default,
"log.syslog = true
log.syslog.protocol = rfc3164",
[
{rabbit,[{log, [{syslog, [{enabled, true}]}]}]},
{syslog, [{protocol, {rfc3164, udp}}]}
],
[]},
{log_syslog_tls,
"log.syslog = true
log.syslog.transport = tls
log.syslog.ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
log.syslog.ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
log.syslog.ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
log.syslog.ssl_options.verify = verify_peer
log.syslog.ssl_options.fail_if_no_peer_cert = false",
[{rabbit, [{log, [{syslog, [{enabled, true}]}]}]},
{syslog, [{protocol, {rfc5424, tls,
[{verify,verify_peer},
{fail_if_no_peer_cert,false},
{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
{certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
{keyfile,"test/config_schema_SUITE_data/certs/key.pem"}]}}]}],
[]}
].

View File

@ -17,6 +17,7 @@
-module(priority_queue_SUITE).
-include_lib("common_test/include/ct.hrl").
-include_lib("eunit/include/eunit.hrl").
-include_lib("amqp_client/include/amqp_client.hrl").
-compile(export_all).
@ -46,7 +47,9 @@ groups() ->
simple_order,
straight_through,
invoke,
gen_server2_stats
gen_server2_stats,
negative_max_priorities,
max_priorities_above_hard_limit
]},
{cluster_size_3, [], [
mirror_queue_auto_ack,
@ -192,6 +195,28 @@ straight_through(Config) ->
rabbit_ct_client_helpers:close_connection(Conn),
passed.
max_priorities_above_hard_limit(Config) ->
{Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
Q = <<"max_priorities_above_hard_limit">>,
?assertExit(
{{shutdown, {server_initiated_close, 406, _}}, _},
%% Note that lower values (e.g. 300) will overflow the byte type here.
%% However, values >= 256 would still be rejected when used by
%% other clients
declare(Ch, Q, 3000)),
rabbit_ct_client_helpers:close_connection(Conn),
passed.
negative_max_priorities(Config) ->
{Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
Q = <<"negative_max_priorities">>,
?assertExit(
{{shutdown, {server_initiated_close, 406, _}}, _},
declare(Ch, Q, -10)),
rabbit_ct_client_helpers:close_connection(Conn),
passed.
invoke(Config) ->
%% Synthetic test to check the invoke callback, as the bug tested here
%% is only triggered with a race condition.
@ -669,7 +694,7 @@ get_ok(Ch, Q, Ack, PBin) ->
{#'basic.get_ok'{delivery_tag = DTag}, #amqp_msg{payload = PBin2}} =
amqp_channel:call(Ch, #'basic.get'{queue = Q,
no_ack = Ack =:= no_ack}),
PBin = PBin2,
?assertEqual(PBin, PBin2),
maybe_ack(Ch, Ack, DTag).
get_payload(Ch, Q, Ack, Ps) ->

View File

@ -394,7 +394,7 @@ config_multiple_handlers(_) ->
ConsoleHandlers = expected_console_handler(),
RabbitHandlers = expected_rabbit_handler(),
SyslogHandlers = expected_syslog_handler(error, "rabbitmq", daemon),
SyslogHandlers = expected_syslog_handler(error),
ExpectedHandlers = sort_handlers(SyslogHandlers ++ ConsoleHandlers ++ RabbitHandlers),
@ -464,14 +464,12 @@ config_syslog_handler_options(_) ->
DefaultLogFile = "rabbit_default.log",
application:set_env(rabbit, lager_default_file, DefaultLogFile),
application:set_env(rabbit, log, [{syslog, [{enabled, true},
{identity, "foo"},
{facility, local1},
{level, warning}]}]),
rabbit_lager:configure_lager(),
FileHandlers = default_expected_handlers(DefaultLogFile),
SyslogHandlers = expected_syslog_handler(warning, "foo", local1),
SyslogHandlers = expected_syslog_handler(warning),
ExpectedHandlers = sort_handlers(FileHandlers ++ SyslogHandlers),
@ -479,13 +477,12 @@ config_syslog_handler_options(_) ->
?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))).
expected_syslog_handler() ->
expected_syslog_handler(info, "rabbitmq", daemon).
expected_syslog_handler(info).
expected_syslog_handler(Level, Identity, Facility) ->
[{lager_syslog_backend, [{level, Level},
{facility, Facility},
{formatter_config, formatter_config()},
{identity, Identity}]}].
expected_syslog_handler(Level) ->
[{syslog_lager_backend, [Level,
{},
{lager_default_formatter, syslog_formatter_config()}]}].
env_var_overrides_config(_) ->
EnvLogFile = "rabbit_default.log",
@ -694,3 +691,6 @@ sort_handlers(Handlers) ->
formatter_config() ->
[date," ",time," ",color,"[",severity, "] ", {pid,[]}, " ",message,"\n"].
syslog_formatter_config() ->
[color,"[",severity, "] ", {pid,[]}, " ",message,"\n"].