Remove classic mirror queues
This commit is contained in:
parent
6c86442a06
commit
3bbda5bdba
|
@ -87,9 +87,8 @@ _APP_ENV = """[
|
|||
]},
|
||||
{halt_on_upgrade_failure, true},
|
||||
{ssl_apps, [asn1, crypto, public_key, ssl]},
|
||||
%% see rabbitmq-server#114
|
||||
{mirroring_flow_control, true},
|
||||
{mirroring_sync_batch_size, 4096},
|
||||
%% classic queue storage implementation version
|
||||
{classic_queue_default_version, 2},
|
||||
%% see rabbitmq-server#227 and related tickets.
|
||||
%% msg_store_credit_disc_bound only takes effect when
|
||||
%% messages are persisted to the message store. If messages
|
||||
|
@ -397,7 +396,7 @@ rabbitmq_integration_suite(
|
|||
additional_beam = [
|
||||
":test_queue_utils_beam",
|
||||
],
|
||||
shard_count = 8,
|
||||
shard_count = 6,
|
||||
)
|
||||
|
||||
rabbitmq_integration_suite(
|
||||
|
@ -429,17 +428,6 @@ rabbitmq_integration_suite(
|
|||
size = "medium",
|
||||
)
|
||||
|
||||
rabbitmq_integration_suite(
|
||||
name = "dynamic_ha_SUITE",
|
||||
size = "large",
|
||||
flaky = True,
|
||||
shard_count = 20,
|
||||
sharding_method = "case",
|
||||
deps = [
|
||||
"@proper//:erlang_app",
|
||||
],
|
||||
)
|
||||
|
||||
rabbitmq_integration_suite(
|
||||
name = "dynamic_qq_SUITE",
|
||||
size = "large",
|
||||
|
@ -451,18 +439,6 @@ rabbitmq_integration_suite(
|
|||
],
|
||||
)
|
||||
|
||||
rabbitmq_integration_suite(
|
||||
name = "eager_sync_SUITE",
|
||||
size = "large",
|
||||
additional_beam = [
|
||||
":sync_detection_SUITE_beam_files",
|
||||
],
|
||||
flaky = True,
|
||||
shard_count = 5,
|
||||
sharding_method = "case",
|
||||
tags = ["classic-queue"],
|
||||
)
|
||||
|
||||
rabbitmq_integration_suite(
|
||||
name = "feature_flags_SUITE",
|
||||
size = "large",
|
||||
|
@ -530,15 +506,6 @@ rabbitmq_integration_suite(
|
|||
],
|
||||
)
|
||||
|
||||
rabbitmq_integration_suite(
|
||||
name = "many_node_ha_SUITE",
|
||||
size = "medium",
|
||||
additional_beam = [
|
||||
":test_rabbit_ha_test_consumer_beam",
|
||||
":test_rabbit_ha_test_producer_beam",
|
||||
],
|
||||
)
|
||||
|
||||
rabbitmq_integration_suite(
|
||||
name = "rabbit_message_interceptor_SUITE",
|
||||
size = "medium",
|
||||
|
@ -677,7 +644,7 @@ rabbitmq_integration_suite(
|
|||
rabbitmq_integration_suite(
|
||||
name = "queue_master_location_SUITE",
|
||||
size = "large",
|
||||
shard_count = 3,
|
||||
shard_count = 2,
|
||||
)
|
||||
|
||||
rabbitmq_integration_suite(
|
||||
|
@ -686,7 +653,7 @@ rabbitmq_integration_suite(
|
|||
additional_beam = [
|
||||
":test_queue_utils_beam",
|
||||
],
|
||||
shard_count = 6,
|
||||
shard_count = 5,
|
||||
)
|
||||
|
||||
rabbitmq_integration_suite(
|
||||
|
@ -871,16 +838,6 @@ rabbitmq_integration_suite(
|
|||
size = "medium",
|
||||
)
|
||||
|
||||
rabbitmq_integration_suite(
|
||||
name = "simple_ha_SUITE",
|
||||
size = "large",
|
||||
additional_beam = [
|
||||
":test_rabbit_ha_test_consumer_beam",
|
||||
":test_rabbit_ha_test_producer_beam",
|
||||
],
|
||||
shard_count = 4,
|
||||
)
|
||||
|
||||
rabbitmq_integration_suite(
|
||||
name = "single_active_consumer_SUITE",
|
||||
size = "medium",
|
||||
|
@ -889,11 +846,6 @@ rabbitmq_integration_suite(
|
|||
],
|
||||
)
|
||||
|
||||
rabbitmq_integration_suite(
|
||||
name = "sync_detection_SUITE",
|
||||
size = "medium",
|
||||
)
|
||||
|
||||
rabbitmq_integration_suite(
|
||||
name = "term_to_binary_compat_prop_SUITE",
|
||||
deps = [
|
||||
|
@ -1018,15 +970,6 @@ rabbitmq_integration_suite(
|
|||
size = "medium",
|
||||
)
|
||||
|
||||
rabbitmq_suite(
|
||||
name = "unit_gm_SUITE",
|
||||
size = "small",
|
||||
deps = [
|
||||
"//deps/rabbitmq_ct_helpers:erlang_app",
|
||||
"@meck//:erlang_app",
|
||||
],
|
||||
)
|
||||
|
||||
rabbitmq_integration_suite(
|
||||
name = "unit_log_management_SUITE",
|
||||
size = "medium",
|
||||
|
@ -1107,22 +1050,6 @@ rabbitmq_integration_suite(
|
|||
],
|
||||
)
|
||||
|
||||
rabbitmq_suite(
|
||||
name = "unit_classic_mirrored_queue_sync_throttling_SUITE",
|
||||
size = "small",
|
||||
deps = [
|
||||
"//deps/rabbit_common:erlang_app",
|
||||
],
|
||||
)
|
||||
|
||||
rabbitmq_suite(
|
||||
name = "unit_classic_mirrored_queue_throughput_SUITE",
|
||||
size = "small",
|
||||
deps = [
|
||||
"//deps/rabbit_common:erlang_app",
|
||||
],
|
||||
)
|
||||
|
||||
rabbitmq_integration_suite(
|
||||
name = "direct_exchange_routing_v2_SUITE",
|
||||
size = "medium",
|
||||
|
@ -1387,8 +1314,6 @@ eunit(
|
|||
":test_rabbit_auth_backend_context_propagation_mock_beam",
|
||||
":test_rabbit_dummy_protocol_connection_info_beam",
|
||||
":test_rabbit_foo_protocol_connection_info_beam",
|
||||
":test_rabbit_ha_test_consumer_beam",
|
||||
":test_rabbit_ha_test_producer_beam",
|
||||
":test_test_util_beam",
|
||||
":test_test_rabbit_event_handler_beam",
|
||||
":test_clustering_utils_beam",
|
||||
|
|
|
@ -67,9 +67,8 @@ define PROJECT_ENV
|
|||
]},
|
||||
{halt_on_upgrade_failure, true},
|
||||
{ssl_apps, [asn1, crypto, public_key, ssl]},
|
||||
%% see rabbitmq-server#114
|
||||
{mirroring_flow_control, true},
|
||||
{mirroring_sync_batch_size, 4096},
|
||||
%% classic queue storage implementation version
|
||||
{classic_queue_default_version, 2},
|
||||
%% see rabbitmq-server#227 and related tickets.
|
||||
%% msg_store_credit_disc_bound only takes effect when
|
||||
%% messages are persisted to the message store. If messages
|
||||
|
|
|
@ -9,12 +9,10 @@ def all_beam_files(name = "all_beam_files"):
|
|||
erlang_bytecode(
|
||||
name = "behaviours",
|
||||
srcs = [
|
||||
"src/gm.erl",
|
||||
"src/mc.erl",
|
||||
"src/rabbit_backing_queue.erl",
|
||||
"src/rabbit_credential_validator.erl",
|
||||
"src/rabbit_exchange_type.erl",
|
||||
"src/rabbit_mirror_queue_mode.erl",
|
||||
"src/rabbit_policy_merge_strategy.erl",
|
||||
"src/rabbit_queue_master_locator.erl",
|
||||
"src/rabbit_queue_type.erl",
|
||||
|
@ -167,14 +165,7 @@ def all_beam_files(name = "all_beam_files"):
|
|||
"src/rabbit_memory_monitor.erl",
|
||||
"src/rabbit_message_interceptor.erl",
|
||||
"src/rabbit_metrics.erl",
|
||||
"src/rabbit_mirror_queue_coordinator.erl",
|
||||
"src/rabbit_mirror_queue_master.erl",
|
||||
"src/rabbit_mirror_queue_misc.erl",
|
||||
"src/rabbit_mirror_queue_mode_all.erl",
|
||||
"src/rabbit_mirror_queue_mode_exactly.erl",
|
||||
"src/rabbit_mirror_queue_mode_nodes.erl",
|
||||
"src/rabbit_mirror_queue_slave.erl",
|
||||
"src/rabbit_mirror_queue_sync.erl",
|
||||
"src/rabbit_mnesia.erl",
|
||||
"src/rabbit_msg_store.erl",
|
||||
"src/rabbit_msg_store_ets_index.erl",
|
||||
|
@ -198,7 +189,6 @@ def all_beam_files(name = "all_beam_files"):
|
|||
"src/rabbit_prelaunch_enabled_plugins_file.erl",
|
||||
"src/rabbit_prelaunch_feature_flags.erl",
|
||||
"src/rabbit_prelaunch_logging.erl",
|
||||
"src/rabbit_prequeue.erl",
|
||||
"src/rabbit_priority_queue.erl",
|
||||
"src/rabbit_process.erl",
|
||||
"src/rabbit_queue_consumers.erl",
|
||||
|
@ -276,12 +266,10 @@ def all_test_beam_files(name = "all_test_beam_files"):
|
|||
name = "test_behaviours",
|
||||
testonly = True,
|
||||
srcs = [
|
||||
"src/gm.erl",
|
||||
"src/mc.erl",
|
||||
"src/rabbit_backing_queue.erl",
|
||||
"src/rabbit_credential_validator.erl",
|
||||
"src/rabbit_exchange_type.erl",
|
||||
"src/rabbit_mirror_queue_mode.erl",
|
||||
"src/rabbit_policy_merge_strategy.erl",
|
||||
"src/rabbit_queue_master_locator.erl",
|
||||
"src/rabbit_queue_type.erl",
|
||||
|
@ -435,14 +423,7 @@ def all_test_beam_files(name = "all_test_beam_files"):
|
|||
"src/rabbit_memory_monitor.erl",
|
||||
"src/rabbit_message_interceptor.erl",
|
||||
"src/rabbit_metrics.erl",
|
||||
"src/rabbit_mirror_queue_coordinator.erl",
|
||||
"src/rabbit_mirror_queue_master.erl",
|
||||
"src/rabbit_mirror_queue_misc.erl",
|
||||
"src/rabbit_mirror_queue_mode_all.erl",
|
||||
"src/rabbit_mirror_queue_mode_exactly.erl",
|
||||
"src/rabbit_mirror_queue_mode_nodes.erl",
|
||||
"src/rabbit_mirror_queue_slave.erl",
|
||||
"src/rabbit_mirror_queue_sync.erl",
|
||||
"src/rabbit_mnesia.erl",
|
||||
"src/rabbit_msg_store.erl",
|
||||
"src/rabbit_msg_store_ets_index.erl",
|
||||
|
@ -466,7 +447,6 @@ def all_test_beam_files(name = "all_test_beam_files"):
|
|||
"src/rabbit_prelaunch_enabled_plugins_file.erl",
|
||||
"src/rabbit_prelaunch_feature_flags.erl",
|
||||
"src/rabbit_prelaunch_logging.erl",
|
||||
"src/rabbit_prequeue.erl",
|
||||
"src/rabbit_priority_queue.erl",
|
||||
"src/rabbit_process.erl",
|
||||
"src/rabbit_queue_consumers.erl",
|
||||
|
@ -548,7 +528,6 @@ def all_srcs(name = "all_srcs"):
|
|||
srcs = [
|
||||
"include/amqqueue.hrl",
|
||||
"include/amqqueue_v2.hrl",
|
||||
"include/gm_specs.hrl",
|
||||
"include/internal_user.hrl",
|
||||
"include/mc.hrl",
|
||||
"include/rabbit_amqp.hrl",
|
||||
|
@ -582,7 +561,6 @@ def all_srcs(name = "all_srcs"):
|
|||
"src/background_gc.erl",
|
||||
"src/code_server_cache.erl",
|
||||
"src/gatherer.erl",
|
||||
"src/gm.erl",
|
||||
"src/internal_user.erl",
|
||||
"src/lqueue.erl",
|
||||
"src/mc.erl",
|
||||
|
@ -721,15 +699,7 @@ def all_srcs(name = "all_srcs"):
|
|||
"src/rabbit_memory_monitor.erl",
|
||||
"src/rabbit_message_interceptor.erl",
|
||||
"src/rabbit_metrics.erl",
|
||||
"src/rabbit_mirror_queue_coordinator.erl",
|
||||
"src/rabbit_mirror_queue_master.erl",
|
||||
"src/rabbit_mirror_queue_misc.erl",
|
||||
"src/rabbit_mirror_queue_mode.erl",
|
||||
"src/rabbit_mirror_queue_mode_all.erl",
|
||||
"src/rabbit_mirror_queue_mode_exactly.erl",
|
||||
"src/rabbit_mirror_queue_mode_nodes.erl",
|
||||
"src/rabbit_mirror_queue_slave.erl",
|
||||
"src/rabbit_mirror_queue_sync.erl",
|
||||
"src/rabbit_mnesia.erl",
|
||||
"src/rabbit_msg_store.erl",
|
||||
"src/rabbit_msg_store_ets_index.erl",
|
||||
|
@ -754,7 +724,6 @@ def all_srcs(name = "all_srcs"):
|
|||
"src/rabbit_prelaunch_enabled_plugins_file.erl",
|
||||
"src/rabbit_prelaunch_feature_flags.erl",
|
||||
"src/rabbit_prelaunch_logging.erl",
|
||||
"src/rabbit_prequeue.erl",
|
||||
"src/rabbit_priority_queue.erl",
|
||||
"src/rabbit_process.erl",
|
||||
"src/rabbit_queue_consumers.erl",
|
||||
|
@ -990,15 +959,6 @@ def test_suite_beam_files(name = "test_suite_beam_files"):
|
|||
app_name = "rabbit",
|
||||
erlc_opts = "//:test_erlc_opts",
|
||||
)
|
||||
erlang_bytecode(
|
||||
name = "dynamic_ha_SUITE_beam_files",
|
||||
testonly = True,
|
||||
srcs = ["test/dynamic_ha_SUITE.erl"],
|
||||
outs = ["test/dynamic_ha_SUITE.beam"],
|
||||
app_name = "rabbit",
|
||||
erlc_opts = "//:test_erlc_opts",
|
||||
deps = ["//deps/amqp_client:erlang_app", "//deps/rabbitmq_ct_helpers:erlang_app", "@proper//:erlang_app"],
|
||||
)
|
||||
erlang_bytecode(
|
||||
name = "dynamic_qq_SUITE_beam_files",
|
||||
testonly = True,
|
||||
|
@ -1008,15 +968,6 @@ def test_suite_beam_files(name = "test_suite_beam_files"):
|
|||
erlc_opts = "//:test_erlc_opts",
|
||||
deps = ["//deps/amqp_client:erlang_app", "//deps/rabbitmq_ct_helpers:erlang_app"],
|
||||
)
|
||||
erlang_bytecode(
|
||||
name = "eager_sync_SUITE_beam_files",
|
||||
testonly = True,
|
||||
srcs = ["test/eager_sync_SUITE.erl"],
|
||||
outs = ["test/eager_sync_SUITE.beam"],
|
||||
app_name = "rabbit",
|
||||
erlc_opts = "//:test_erlc_opts",
|
||||
deps = ["//deps/amqp_client:erlang_app"],
|
||||
)
|
||||
erlang_bytecode(
|
||||
name = "feature_flags_SUITE_beam_files",
|
||||
testonly = True,
|
||||
|
@ -1087,15 +1038,6 @@ def test_suite_beam_files(name = "test_suite_beam_files"):
|
|||
erlc_opts = "//:test_erlc_opts",
|
||||
deps = ["//deps/amqp_client:erlang_app", "//deps/rabbitmq_ct_helpers:erlang_app"],
|
||||
)
|
||||
erlang_bytecode(
|
||||
name = "many_node_ha_SUITE_beam_files",
|
||||
testonly = True,
|
||||
srcs = ["test/many_node_ha_SUITE.erl"],
|
||||
outs = ["test/many_node_ha_SUITE.beam"],
|
||||
app_name = "rabbit",
|
||||
erlc_opts = "//:test_erlc_opts",
|
||||
deps = ["//deps/amqp_client:erlang_app"],
|
||||
)
|
||||
erlang_bytecode(
|
||||
name = "message_size_limit_SUITE_beam_files",
|
||||
testonly = True,
|
||||
|
@ -1485,15 +1427,6 @@ def test_suite_beam_files(name = "test_suite_beam_files"):
|
|||
app_name = "rabbit",
|
||||
erlc_opts = "//:test_erlc_opts",
|
||||
)
|
||||
erlang_bytecode(
|
||||
name = "simple_ha_SUITE_beam_files",
|
||||
testonly = True,
|
||||
srcs = ["test/simple_ha_SUITE.erl"],
|
||||
outs = ["test/simple_ha_SUITE.beam"],
|
||||
app_name = "rabbit",
|
||||
erlc_opts = "//:test_erlc_opts",
|
||||
deps = ["//deps/amqp_client:erlang_app"],
|
||||
)
|
||||
erlang_bytecode(
|
||||
name = "single_active_consumer_SUITE_beam_files",
|
||||
testonly = True,
|
||||
|
@ -1503,15 +1436,6 @@ def test_suite_beam_files(name = "test_suite_beam_files"):
|
|||
erlc_opts = "//:test_erlc_opts",
|
||||
deps = ["//deps/amqp_client:erlang_app"],
|
||||
)
|
||||
erlang_bytecode(
|
||||
name = "sync_detection_SUITE_beam_files",
|
||||
testonly = True,
|
||||
srcs = ["test/sync_detection_SUITE.erl"],
|
||||
outs = ["test/sync_detection_SUITE.beam"],
|
||||
app_name = "rabbit",
|
||||
erlc_opts = "//:test_erlc_opts",
|
||||
deps = ["//deps/amqp_client:erlang_app"],
|
||||
)
|
||||
erlang_bytecode(
|
||||
name = "term_to_binary_compat_prop_SUITE_beam_files",
|
||||
testonly = True,
|
||||
|
@ -1619,24 +1543,6 @@ def test_suite_beam_files(name = "test_suite_beam_files"):
|
|||
app_name = "rabbit",
|
||||
erlc_opts = "//:test_erlc_opts",
|
||||
)
|
||||
erlang_bytecode(
|
||||
name = "test_rabbit_ha_test_consumer_beam",
|
||||
testonly = True,
|
||||
srcs = ["test/rabbit_ha_test_consumer.erl"],
|
||||
outs = ["test/rabbit_ha_test_consumer.beam"],
|
||||
app_name = "rabbit",
|
||||
erlc_opts = "//:test_erlc_opts",
|
||||
deps = ["//deps/amqp_client:erlang_app"],
|
||||
)
|
||||
erlang_bytecode(
|
||||
name = "test_rabbit_ha_test_producer_beam",
|
||||
testonly = True,
|
||||
srcs = ["test/rabbit_ha_test_producer.erl"],
|
||||
outs = ["test/rabbit_ha_test_producer.beam"],
|
||||
app_name = "rabbit",
|
||||
erlc_opts = "//:test_erlc_opts",
|
||||
deps = ["//deps/amqp_client:erlang_app"],
|
||||
)
|
||||
erlang_bytecode(
|
||||
name = "test_test_util_beam",
|
||||
testonly = True,
|
||||
|
@ -1725,23 +1631,6 @@ def test_suite_beam_files(name = "test_suite_beam_files"):
|
|||
erlc_opts = "//:test_erlc_opts",
|
||||
deps = ["//deps/amqp_client:erlang_app"],
|
||||
)
|
||||
erlang_bytecode(
|
||||
name = "unit_classic_mirrored_queue_sync_throttling_SUITE_beam_files",
|
||||
testonly = True,
|
||||
srcs = ["test/unit_classic_mirrored_queue_sync_throttling_SUITE.erl"],
|
||||
outs = ["test/unit_classic_mirrored_queue_sync_throttling_SUITE.beam"],
|
||||
app_name = "rabbit",
|
||||
erlc_opts = "//:test_erlc_opts",
|
||||
deps = ["//deps/rabbit_common:erlang_app"],
|
||||
)
|
||||
erlang_bytecode(
|
||||
name = "unit_classic_mirrored_queue_throughput_SUITE_beam_files",
|
||||
testonly = True,
|
||||
srcs = ["test/unit_classic_mirrored_queue_throughput_SUITE.erl"],
|
||||
outs = ["test/unit_classic_mirrored_queue_throughput_SUITE.beam"],
|
||||
app_name = "rabbit",
|
||||
erlc_opts = "//:test_erlc_opts",
|
||||
)
|
||||
erlang_bytecode(
|
||||
name = "unit_cluster_formation_locking_mocks_SUITE_beam_files",
|
||||
testonly = True,
|
||||
|
@ -1816,15 +1705,6 @@ def test_suite_beam_files(name = "test_suite_beam_files"):
|
|||
app_name = "rabbit",
|
||||
erlc_opts = "//:test_erlc_opts",
|
||||
)
|
||||
erlang_bytecode(
|
||||
name = "unit_gm_SUITE_beam_files",
|
||||
testonly = True,
|
||||
srcs = ["test/unit_gm_SUITE.erl"],
|
||||
outs = ["test/unit_gm_SUITE.beam"],
|
||||
app_name = "rabbit",
|
||||
beam = ["ebin/gm.beam"],
|
||||
erlc_opts = "//:test_erlc_opts",
|
||||
)
|
||||
erlang_bytecode(
|
||||
name = "unit_log_management_SUITE_beam_files",
|
||||
testonly = True,
|
||||
|
|
|
@ -1,10 +0,0 @@
|
|||
%% This Source Code Form is subject to the terms of the Mozilla Public
|
||||
%% License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
%%
|
||||
%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
|
||||
%%
|
||||
|
||||
-type callback_result() :: 'ok' | {'stop', any()} | {'become', atom(), args()}.
|
||||
-type args() :: any().
|
||||
-type members() :: [pid()].
|
|
@ -28,9 +28,6 @@
|
|||
set_decorators/2,
|
||||
% exclusive_owner
|
||||
get_exclusive_owner/1,
|
||||
% gm_pids
|
||||
get_gm_pids/1,
|
||||
set_gm_pids/2,
|
||||
get_leader/1,
|
||||
% name (#resource)
|
||||
get_name/1,
|
||||
|
@ -53,21 +50,9 @@
|
|||
% type_state
|
||||
get_type_state/1,
|
||||
set_type_state/2,
|
||||
% recoverable_slaves
|
||||
get_recoverable_slaves/1,
|
||||
set_recoverable_slaves/2,
|
||||
% slave_pids
|
||||
get_slave_pids/1,
|
||||
set_slave_pids/2,
|
||||
% slave_pids_pending_shutdown
|
||||
get_slave_pids_pending_shutdown/1,
|
||||
set_slave_pids_pending_shutdown/2,
|
||||
% state
|
||||
get_state/1,
|
||||
set_state/2,
|
||||
% sync_slave_pids
|
||||
get_sync_slave_pids/1,
|
||||
set_sync_slave_pids/2,
|
||||
get_type/1,
|
||||
get_vhost/1,
|
||||
is_amqqueue/1,
|
||||
|
@ -81,7 +66,7 @@
|
|||
pattern_match_on_type/1,
|
||||
pattern_match_on_durable/1,
|
||||
pattern_match_on_type_and_durable/2,
|
||||
reset_mirroring_and_decorators/1,
|
||||
reset_decorators/1,
|
||||
set_immutable/1,
|
||||
qnode/1,
|
||||
to_printable/1,
|
||||
|
@ -104,12 +89,9 @@
|
|||
arguments = [] :: rabbit_framing:amqp_table() | ets:match_pattern(),
|
||||
%% durable (just so we know home node)
|
||||
pid :: pid() | ra_server_id() | none | ets:match_pattern(),
|
||||
%% transient
|
||||
slave_pids = [] :: [pid()] | none | ets:match_pattern(),
|
||||
%% transient
|
||||
sync_slave_pids = [] :: [pid()] | none| ets:match_pattern(),
|
||||
%% durable
|
||||
recoverable_slaves = [] :: [atom()] | none | ets:match_pattern(),
|
||||
slave_pids = [], %% reserved
|
||||
sync_slave_pids = [], %% reserved
|
||||
recoverable_slaves = [], %% reserved
|
||||
%% durable, implicit update as above
|
||||
policy :: proplists:proplist() | none | undefined | ets:match_pattern(),
|
||||
%% durable, implicit update as above
|
||||
|
@ -121,7 +103,7 @@
|
|||
%% durable (have we crashed?)
|
||||
state = live :: atom() | none | ets:match_pattern(),
|
||||
policy_version = 0 :: non_neg_integer() | ets:match_pattern(),
|
||||
slave_pids_pending_shutdown = [] :: [pid()] | ets:match_pattern(),
|
||||
slave_pids_pending_shutdown = [], %% reserved
|
||||
%% secondary index
|
||||
vhost :: rabbit_types:vhost() | undefined | ets:match_pattern(),
|
||||
options = #{} :: map() | ets:match_pattern(),
|
||||
|
@ -384,18 +366,6 @@ set_decorators(#amqqueue{} = Queue, Decorators) ->
|
|||
get_exclusive_owner(#amqqueue{exclusive_owner = Owner}) ->
|
||||
Owner.
|
||||
|
||||
% gm_pids
|
||||
|
||||
-spec get_gm_pids(amqqueue()) -> [{pid(), pid()}] | none.
|
||||
|
||||
get_gm_pids(#amqqueue{gm_pids = GMPids}) ->
|
||||
GMPids.
|
||||
|
||||
-spec set_gm_pids(amqqueue(), [{pid(), pid()}] | none) -> amqqueue().
|
||||
|
||||
set_gm_pids(#amqqueue{} = Queue, GMPids) ->
|
||||
Queue#amqqueue{gm_pids = GMPids}.
|
||||
|
||||
-spec get_leader(amqqueue_v2()) -> node().
|
||||
|
||||
get_leader(#amqqueue{type = rabbit_quorum_queue, pid = {_, Leader}}) -> Leader.
|
||||
|
@ -466,18 +436,6 @@ get_policy_version(#amqqueue{policy_version = PV}) ->
|
|||
set_policy_version(#amqqueue{} = Queue, PV) ->
|
||||
Queue#amqqueue{policy_version = PV}.
|
||||
|
||||
% recoverable_slaves
|
||||
|
||||
-spec get_recoverable_slaves(amqqueue()) -> [atom()] | none.
|
||||
|
||||
get_recoverable_slaves(#amqqueue{recoverable_slaves = Slaves}) ->
|
||||
Slaves.
|
||||
|
||||
-spec set_recoverable_slaves(amqqueue(), [atom()] | none) -> amqqueue().
|
||||
|
||||
set_recoverable_slaves(#amqqueue{} = Queue, Slaves) ->
|
||||
Queue#amqqueue{recoverable_slaves = Slaves}.
|
||||
|
||||
% type_state (new in v2)
|
||||
|
||||
-spec get_type_state(amqqueue()) -> map().
|
||||
|
@ -492,31 +450,6 @@ set_type_state(#amqqueue{} = Queue, TState) ->
|
|||
set_type_state(Queue, _TState) ->
|
||||
Queue.
|
||||
|
||||
% slave_pids
|
||||
|
||||
-spec get_slave_pids(amqqueue()) -> [pid()] | none.
|
||||
|
||||
get_slave_pids(#amqqueue{slave_pids = Slaves}) ->
|
||||
Slaves.
|
||||
|
||||
-spec set_slave_pids(amqqueue(), [pid()] | none) -> amqqueue().
|
||||
|
||||
set_slave_pids(#amqqueue{} = Queue, SlavePids) ->
|
||||
Queue#amqqueue{slave_pids = SlavePids}.
|
||||
|
||||
% slave_pids_pending_shutdown
|
||||
|
||||
-spec get_slave_pids_pending_shutdown(amqqueue()) -> [pid()].
|
||||
|
||||
get_slave_pids_pending_shutdown(
|
||||
#amqqueue{slave_pids_pending_shutdown = Slaves}) ->
|
||||
Slaves.
|
||||
|
||||
-spec set_slave_pids_pending_shutdown(amqqueue(), [pid()]) -> amqqueue().
|
||||
|
||||
set_slave_pids_pending_shutdown(#amqqueue{} = Queue, SlavePids) ->
|
||||
Queue#amqqueue{slave_pids_pending_shutdown = SlavePids}.
|
||||
|
||||
% state
|
||||
|
||||
-spec get_state(amqqueue()) -> atom() | none.
|
||||
|
@ -528,18 +461,6 @@ get_state(#amqqueue{state = State}) -> State.
|
|||
set_state(#amqqueue{} = Queue, State) ->
|
||||
Queue#amqqueue{state = State}.
|
||||
|
||||
% sync_slave_pids
|
||||
|
||||
-spec get_sync_slave_pids(amqqueue()) -> [pid()] | none.
|
||||
|
||||
get_sync_slave_pids(#amqqueue{sync_slave_pids = Pids}) ->
|
||||
Pids.
|
||||
|
||||
-spec set_sync_slave_pids(amqqueue(), [pid()] | none) -> amqqueue().
|
||||
|
||||
set_sync_slave_pids(#amqqueue{} = Queue, Pids) ->
|
||||
Queue#amqqueue{sync_slave_pids = Pids}.
|
||||
|
||||
%% New in v2.
|
||||
|
||||
-spec get_type(amqqueue()) -> atom().
|
||||
|
@ -610,22 +531,15 @@ pattern_match_on_durable(IsDurable) ->
|
|||
pattern_match_on_type_and_durable(Type, IsDurable) ->
|
||||
#amqqueue{type = Type, durable = IsDurable, _ = '_'}.
|
||||
|
||||
-spec reset_mirroring_and_decorators(amqqueue()) -> amqqueue().
|
||||
-spec reset_decorators(amqqueue()) -> amqqueue().
|
||||
|
||||
reset_mirroring_and_decorators(#amqqueue{} = Queue) ->
|
||||
Queue#amqqueue{slave_pids = [],
|
||||
sync_slave_pids = [],
|
||||
gm_pids = [],
|
||||
decorators = undefined}.
|
||||
reset_decorators(#amqqueue{} = Queue) ->
|
||||
Queue#amqqueue{decorators = undefined}.
|
||||
|
||||
-spec set_immutable(amqqueue()) -> amqqueue().
|
||||
|
||||
set_immutable(#amqqueue{} = Queue) ->
|
||||
Queue#amqqueue{pid = none,
|
||||
slave_pids = [],
|
||||
sync_slave_pids = none,
|
||||
recoverable_slaves = none,
|
||||
gm_pids = none,
|
||||
policy = none,
|
||||
decorators = none,
|
||||
state = none}.
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -36,16 +36,12 @@
|
|||
-export([notify_down_all/2, notify_down_all/3, activate_limit_all/2]).
|
||||
-export([on_node_up/1, on_node_down/1]).
|
||||
-export([update/2, store_queue/1, update_decorators/2, policy_changed/2]).
|
||||
-export([update_mirroring/1, sync_mirrors/1, cancel_sync_mirrors/1]).
|
||||
-export([emit_unresponsive/6, emit_unresponsive_local/5, is_unresponsive/2]).
|
||||
-export([has_synchronised_mirrors_online/1, is_match/2, is_in_virtual_host/2]).
|
||||
-export([is_match/2, is_in_virtual_host/2]).
|
||||
-export([is_replicated/1, is_exclusive/1, is_not_exclusive/1, is_dead_exclusive/1]).
|
||||
-export([list_local_quorum_queues/0, list_local_quorum_queue_names/0,
|
||||
list_local_stream_queues/0, list_stream_queues_on/1,
|
||||
list_local_mirrored_classic_queues/0, list_local_mirrored_classic_names/0,
|
||||
list_local_leaders/0, list_local_followers/0, get_quorum_nodes/1,
|
||||
list_local_mirrored_classic_without_synchronised_mirrors/0,
|
||||
list_local_mirrored_classic_without_synchronised_mirrors_for_cli/0,
|
||||
list_local_quorum_queues_with_name_matching/1,
|
||||
list_local_quorum_queues_with_name_matching/2]).
|
||||
-export([is_local_to_node/2, is_local_to_node_set/2]).
|
||||
|
@ -311,7 +307,6 @@ update_decorators(Name, Decorators) ->
|
|||
policy_changed(Q1, Q2) ->
|
||||
Decorators1 = amqqueue:get_decorators(Q1),
|
||||
Decorators2 = amqqueue:get_decorators(Q2),
|
||||
rabbit_mirror_queue_misc:update_mirrors(Q1, Q2),
|
||||
D1 = rabbit_queue_decorator:select(Decorators1),
|
||||
D2 = rabbit_queue_decorator:select(Decorators2),
|
||||
[ok = M:policy_changed(Q1, Q2) || M <- lists:usort(D1 ++ D2)],
|
||||
|
@ -395,7 +390,7 @@ get_rebalance_lock(Pid) when is_pid(Pid) ->
|
|||
false
|
||||
end.
|
||||
|
||||
-spec rebalance('all' | 'quorum' | 'classic', binary(), binary()) ->
|
||||
-spec rebalance('all' | 'quorum', binary(), binary()) ->
|
||||
{ok, [{node(), pos_integer()}]} | {error, term()}.
|
||||
rebalance(Type, VhostSpec, QueueSpec) ->
|
||||
%% We have not yet acquired the rebalance_queues global lock.
|
||||
|
@ -428,7 +423,7 @@ maybe_rebalance(false, _Type, _VhostSpec, _QueueSpec) ->
|
|||
|
||||
%% Stream queues don't yet support rebalance
|
||||
filter_per_type(all, Q) ->
|
||||
?amqqueue_is_quorum(Q) or ?amqqueue_is_classic(Q) or ?amqqueue_is_stream(Q);
|
||||
?amqqueue_is_quorum(Q) or ?amqqueue_is_stream(Q);
|
||||
filter_per_type(quorum, Q) ->
|
||||
?amqqueue_is_quorum(Q);
|
||||
filter_per_type(stream, Q) ->
|
||||
|
@ -439,9 +434,7 @@ filter_per_type(classic, Q) ->
|
|||
rebalance_module(Q) when ?amqqueue_is_quorum(Q) ->
|
||||
rabbit_quorum_queue;
|
||||
rebalance_module(Q) when ?amqqueue_is_stream(Q) ->
|
||||
rabbit_stream_queue;
|
||||
rebalance_module(Q) when ?amqqueue_is_classic(Q) ->
|
||||
rabbit_mirror_queue_misc.
|
||||
rabbit_stream_queue.
|
||||
|
||||
get_resource_name(#resource{name = Name}) ->
|
||||
Name.
|
||||
|
@ -552,23 +545,15 @@ with(#resource{} = Name, F, E, RetriesLeft) ->
|
|||
%% Something bad happened to that queue, we are bailing out
|
||||
%% on processing current request.
|
||||
E({absent, Q, timeout});
|
||||
{ok, Q} when ?amqqueue_state_is(Q, stopped) andalso RetriesLeft =:= 0 ->
|
||||
%% The queue was stopped and not migrated
|
||||
{ok, Q} when ?amqqueue_state_is(Q, stopped) ->
|
||||
%% The queue was stopped
|
||||
E({absent, Q, stopped});
|
||||
%% The queue process has crashed with unknown error
|
||||
{ok, Q} when ?amqqueue_state_is(Q, crashed) ->
|
||||
E({absent, Q, crashed});
|
||||
%% The queue process has been stopped by a supervisor.
|
||||
%% In that case a synchronised mirror can take over
|
||||
%% so we should retry.
|
||||
{ok, Q} when ?amqqueue_state_is(Q, stopped) ->
|
||||
%% The queue process was stopped by the supervisor
|
||||
rabbit_misc:with_exit_handler(
|
||||
fun () -> retry_wait(Q, F, E, RetriesLeft) end,
|
||||
fun () -> F(Q) end);
|
||||
%% The queue is supposed to be active.
|
||||
%% The leader node can go away or queue can be killed
|
||||
%% so we retry, waiting for a mirror to take over.
|
||||
%% The node can go away or queue can be killed so we retry.
|
||||
%% TODO review this: why to retry when mirroring is gone?
|
||||
{ok, Q} when ?amqqueue_state_is(Q, live) ->
|
||||
%% We check is_process_alive(QPid) in case we receive a
|
||||
%% nodedown (for example) in F() that has nothing to do
|
||||
|
@ -592,27 +577,19 @@ with(#resource{} = Name, F, E, RetriesLeft) ->
|
|||
retry_wait(Q, F, E, RetriesLeft) ->
|
||||
Name = amqqueue:get_name(Q),
|
||||
QPid = amqqueue:get_pid(Q),
|
||||
QState = amqqueue:get_state(Q),
|
||||
case {QState, is_replicated(Q)} of
|
||||
%% We don't want to repeat an operation if
|
||||
%% there are no mirrors to migrate to
|
||||
{stopped, false} ->
|
||||
E({absent, Q, stopped});
|
||||
_ ->
|
||||
case rabbit_process:is_process_alive(QPid) of
|
||||
true ->
|
||||
% rabbitmq-server#1682
|
||||
% The old check would have crashed here,
|
||||
% instead, log it and run the exit fun. absent & alive is weird,
|
||||
% but better than crashing with badmatch,true
|
||||
%% rabbitmq-server#1682
|
||||
%% The old check would have crashed here,
|
||||
%% instead, log it and run the exit fun. absent & alive is weird,
|
||||
%% but better than crashing with badmatch,true
|
||||
rabbit_log:debug("Unexpected alive queue process ~tp", [QPid]),
|
||||
E({absent, Q, alive});
|
||||
false ->
|
||||
ok % Expected result
|
||||
end,
|
||||
timer:sleep(30),
|
||||
with(Name, F, E, RetriesLeft - 1)
|
||||
end.
|
||||
with(Name, F, E, RetriesLeft - 1).
|
||||
|
||||
-spec with(name(), qfun(A)) ->
|
||||
A | rabbit_types:error(not_found_or_absent()).
|
||||
|
@ -1248,48 +1225,6 @@ list_local_followers() ->
|
|||
rabbit_quorum_queue:is_recoverable(Q)
|
||||
].
|
||||
|
||||
-spec list_local_mirrored_classic_queues() -> [amqqueue:amqqueue()].
|
||||
list_local_mirrored_classic_queues() ->
|
||||
[ Q || Q <- list(),
|
||||
amqqueue:get_state(Q) =/= crashed,
|
||||
amqqueue:is_classic(Q),
|
||||
is_local_to_node(amqqueue:get_pid(Q), node()),
|
||||
is_replicated(Q)].
|
||||
|
||||
-spec list_local_mirrored_classic_names() -> [name()].
|
||||
list_local_mirrored_classic_names() ->
|
||||
[ amqqueue:get_name(Q) || Q <- list(),
|
||||
amqqueue:get_state(Q) =/= crashed,
|
||||
amqqueue:is_classic(Q),
|
||||
is_local_to_node(amqqueue:get_pid(Q), node()),
|
||||
is_replicated(Q)].
|
||||
|
||||
-spec list_local_mirrored_classic_without_synchronised_mirrors() ->
|
||||
[amqqueue:amqqueue()].
|
||||
list_local_mirrored_classic_without_synchronised_mirrors() ->
|
||||
[ Q || Q <- list(),
|
||||
amqqueue:get_state(Q) =/= crashed,
|
||||
amqqueue:is_classic(Q),
|
||||
%% filter out exclusive queues as they won't actually be mirrored
|
||||
is_not_exclusive(Q),
|
||||
is_local_to_node(amqqueue:get_pid(Q), node()),
|
||||
is_replicated(Q),
|
||||
not has_synchronised_mirrors_online(Q)].
|
||||
|
||||
-spec list_local_mirrored_classic_without_synchronised_mirrors_for_cli() ->
|
||||
[#{binary => any()}].
|
||||
list_local_mirrored_classic_without_synchronised_mirrors_for_cli() ->
|
||||
ClassicQs = list_local_mirrored_classic_without_synchronised_mirrors(),
|
||||
[begin
|
||||
#resource{name = Name} = amqqueue:get_name(Q),
|
||||
#{
|
||||
<<"readable_name">> => rabbit_data_coercion:to_binary(rabbit_misc:rs(amqqueue:get_name(Q))),
|
||||
<<"name">> => Name,
|
||||
<<"virtual_host">> => amqqueue:get_vhost(Q),
|
||||
<<"type">> => <<"classic">>
|
||||
}
|
||||
end || Q <- ClassicQs].
|
||||
|
||||
-spec list_local_quorum_queues_with_name_matching(binary()) -> [amqqueue:amqqueue()].
|
||||
list_local_quorum_queues_with_name_matching(Pattern) ->
|
||||
[ Q || Q <- list_by_type(quorum),
|
||||
|
@ -1819,64 +1754,24 @@ internal_delete(Queue, ActingUser, Reason) ->
|
|||
%% Does it make any sense once mnesia is not used/removed?
|
||||
forget_all_durable(Node) ->
|
||||
UpdateFun = fun(Q) ->
|
||||
forget_node_for_queue(Node, Q)
|
||||
forget_node_for_queue(Q)
|
||||
end,
|
||||
FilterFun = fun(Q) ->
|
||||
is_local_to_node(amqqueue:get_pid(Q), Node)
|
||||
end,
|
||||
rabbit_db_queue:foreach_durable(UpdateFun, FilterFun).
|
||||
|
||||
%% Try to promote a mirror while down - it should recover as a
|
||||
%% leader. We try to take the oldest mirror here for best chance of
|
||||
%% recovery.
|
||||
forget_node_for_queue(_DeadNode, Q)
|
||||
forget_node_for_queue(Q)
|
||||
when ?amqqueue_is_quorum(Q) ->
|
||||
ok;
|
||||
forget_node_for_queue(_DeadNode, Q)
|
||||
forget_node_for_queue(Q)
|
||||
when ?amqqueue_is_stream(Q) ->
|
||||
ok;
|
||||
forget_node_for_queue(DeadNode, Q) ->
|
||||
RS = amqqueue:get_recoverable_slaves(Q),
|
||||
forget_node_for_queue(DeadNode, RS, Q).
|
||||
|
||||
forget_node_for_queue(_DeadNode, [], Q) ->
|
||||
%% No mirrors to recover from, queue is gone.
|
||||
forget_node_for_queue(Q) ->
|
||||
%% Don't process_deletions since that just calls callbacks and we
|
||||
%% are not really up.
|
||||
Name = amqqueue:get_name(Q),
|
||||
rabbit_db_queue:internal_delete(Name, true, normal);
|
||||
|
||||
%% Should not happen, but let's be conservative.
|
||||
forget_node_for_queue(DeadNode, [DeadNode | T], Q) ->
|
||||
forget_node_for_queue(DeadNode, T, Q);
|
||||
|
||||
forget_node_for_queue(DeadNode, [H|T], Q) when ?is_amqqueue(Q) ->
|
||||
Type = amqqueue:get_type(Q),
|
||||
case {node_permits_offline_promotion(H), Type} of
|
||||
{false, _} -> forget_node_for_queue(DeadNode, T, Q);
|
||||
{true, rabbit_classic_queue} ->
|
||||
Q1 = amqqueue:set_pid(Q, rabbit_misc:node_to_fake_pid(H)),
|
||||
%% rabbit_db_queue:set_many/1 just stores a durable queue record,
|
||||
%% that is the only one required here.
|
||||
%% rabbit_db_queue:set/1 writes both durable and transient, thus
|
||||
%% can't be used for this operation.
|
||||
ok = rabbit_db_queue:set_many([Q1]);
|
||||
{true, rabbit_quorum_queue} ->
|
||||
ok
|
||||
end.
|
||||
|
||||
node_permits_offline_promotion(Node) ->
|
||||
case node() of
|
||||
Node -> not rabbit:is_running(); %% [1]
|
||||
_ -> NotRunning = rabbit_nodes:list_not_running(),
|
||||
lists:member(Node, NotRunning) %% [2]
|
||||
end.
|
||||
%% [1] In this case if we are a real running node (i.e. rabbitmqctl
|
||||
%% has RPCed into us) then we cannot allow promotion. If on the other
|
||||
%% hand we *are* rabbitmqctl impersonating the node for offline
|
||||
%% node-forgetting then we can.
|
||||
%%
|
||||
%% [2] This is simpler; as long as it's down that's OK
|
||||
rabbit_db_queue:internal_delete(Name, true, normal).
|
||||
|
||||
-spec run_backing_queue
|
||||
(pid(), atom(), (fun ((atom(), A) -> {[rabbit_types:msg_id()], A}))) ->
|
||||
|
@ -1895,33 +1790,10 @@ set_ram_duration_target(QPid, Duration) ->
|
|||
set_maximum_since_use(QPid, Age) ->
|
||||
gen_server2:cast(QPid, {set_maximum_since_use, Age}).
|
||||
|
||||
-spec update_mirroring(pid()) -> 'ok'.
|
||||
|
||||
update_mirroring(QPid) ->
|
||||
ok = delegate:invoke_no_result(QPid, {gen_server2, cast, [update_mirroring]}).
|
||||
|
||||
-spec sync_mirrors(amqqueue:amqqueue() | pid()) ->
|
||||
'ok' | rabbit_types:error('not_mirrored').
|
||||
|
||||
sync_mirrors(Q) when ?is_amqqueue(Q) ->
|
||||
QPid = amqqueue:get_pid(Q),
|
||||
delegate:invoke(QPid, {gen_server2, call, [sync_mirrors, infinity]});
|
||||
sync_mirrors(QPid) ->
|
||||
delegate:invoke(QPid, {gen_server2, call, [sync_mirrors, infinity]}).
|
||||
|
||||
-spec cancel_sync_mirrors(amqqueue:amqqueue() | pid()) ->
|
||||
'ok' | {'ok', 'not_syncing'}.
|
||||
|
||||
cancel_sync_mirrors(Q) when ?is_amqqueue(Q) ->
|
||||
QPid = amqqueue:get_pid(Q),
|
||||
delegate:invoke(QPid, {gen_server2, call, [cancel_sync_mirrors, infinity]});
|
||||
cancel_sync_mirrors(QPid) ->
|
||||
delegate:invoke(QPid, {gen_server2, call, [cancel_sync_mirrors, infinity]}).
|
||||
|
||||
-spec is_replicated(amqqueue:amqqueue()) -> boolean().
|
||||
|
||||
is_replicated(Q) when ?amqqueue_is_classic(Q) ->
|
||||
rabbit_mirror_queue_misc:is_mirrored(Q);
|
||||
false;
|
||||
is_replicated(_Q) ->
|
||||
%% streams and quorum queues are all replicated
|
||||
true.
|
||||
|
@ -1940,50 +1812,10 @@ is_dead_exclusive(Q) when ?amqqueue_exclusive_owner_is_pid(Q) ->
|
|||
Pid = amqqueue:get_pid(Q),
|
||||
not rabbit_process:is_process_alive(Pid).
|
||||
|
||||
-spec has_synchronised_mirrors_online(amqqueue:amqqueue()) -> boolean().
|
||||
has_synchronised_mirrors_online(Q) ->
|
||||
%% a queue with all mirrors down would have no mirror pids.
|
||||
%% We treat these as in sync intentionally to avoid false positives.
|
||||
MirrorPids = amqqueue:get_sync_slave_pids(Q),
|
||||
MirrorPids =/= [] andalso lists:any(fun rabbit_misc:is_process_alive/1, MirrorPids).
|
||||
|
||||
-spec on_node_up(node()) -> 'ok'.
|
||||
|
||||
on_node_up(Node) ->
|
||||
rabbit_db_queue:foreach_transient(maybe_clear_recoverable_node(Node)).
|
||||
|
||||
maybe_clear_recoverable_node(Node) ->
|
||||
fun(Q) ->
|
||||
SPids = amqqueue:get_sync_slave_pids(Q),
|
||||
RSs = amqqueue:get_recoverable_slaves(Q),
|
||||
case lists:member(Node, RSs) of
|
||||
true ->
|
||||
%% There is a race with
|
||||
%% rabbit_mirror_queue_slave:record_synchronised/1 called
|
||||
%% by the incoming mirror node and this function, called
|
||||
%% by the leader node. If this function is executed after
|
||||
%% record_synchronised/1, the node is erroneously removed
|
||||
%% from the recoverable mirror list.
|
||||
%%
|
||||
%% We check if the mirror node's queue PID is alive. If it is
|
||||
%% the case, then this function is executed after. In this
|
||||
%% situation, we don't touch the queue record, it is already
|
||||
%% correct.
|
||||
DoClearNode =
|
||||
case [SP || SP <- SPids, node(SP) =:= Node] of
|
||||
[SPid] -> not rabbit_misc:is_process_alive(SPid);
|
||||
_ -> true
|
||||
end,
|
||||
if
|
||||
DoClearNode -> RSs1 = RSs -- [Node],
|
||||
store_queue(
|
||||
amqqueue:set_recoverable_slaves(Q, RSs1));
|
||||
true -> ok
|
||||
end;
|
||||
false ->
|
||||
ok
|
||||
end
|
||||
end.
|
||||
on_node_up(_Node) ->
|
||||
ok.
|
||||
|
||||
-spec on_node_down(node()) -> 'ok'.
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
-export([init_with_backing_queue_state/7]).
|
||||
|
||||
-export([start_link/2]).
|
||||
-export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2,
|
||||
handle_info/2, handle_pre_hibernate/1, prioritise_call/4,
|
||||
prioritise_cast/3, prioritise_info/3, format_message_queue/2]).
|
||||
|
@ -35,8 +36,7 @@
|
|||
%% This is used to determine when to delete auto-delete queues.
|
||||
has_had_consumers,
|
||||
%% backing queue module.
|
||||
%% for mirrored queues, this will be rabbit_mirror_queue_master.
|
||||
%% for non-priority and non-mirrored queues, rabbit_variable_queue.
|
||||
%% for non-priority queues, this will be rabbit_variable_queue.
|
||||
%% see rabbit_backing_queue.
|
||||
backing_queue,
|
||||
%% backing queue state.
|
||||
|
@ -81,11 +81,7 @@
|
|||
%% e.g. message expiration messages from previously set up timers
|
||||
%% that may or may not be still valid
|
||||
args_policy_version,
|
||||
%% used to discard outdated/superseded policy updates,
|
||||
%% e.g. when policies are applied concurrently. See
|
||||
%% https://github.com/rabbitmq/rabbitmq-server/issues/803 for one
|
||||
%% example.
|
||||
mirroring_policy_version = 0,
|
||||
mirroring_policy_version = 0, %% reserved
|
||||
%% running | flow | idle
|
||||
status,
|
||||
%% boolean()
|
||||
|
@ -111,9 +107,6 @@
|
|||
consumer_utilisation,
|
||||
consumer_capacity,
|
||||
memory,
|
||||
slave_pids,
|
||||
synchronised_slave_pids,
|
||||
recoverable_slaves,
|
||||
state,
|
||||
garbage_collection
|
||||
]).
|
||||
|
@ -140,6 +133,26 @@ statistics_keys() -> ?STATISTICS_KEYS ++ rabbit_backing_queue:info_keys().
|
|||
|
||||
%%----------------------------------------------------------------------------
|
||||
|
||||
-spec start_link(amqqueue:amqqueue(), pid())
|
||||
-> rabbit_types:ok_pid_or_error().
|
||||
|
||||
start_link(Q, Marker) ->
|
||||
gen_server2:start_link(?MODULE, {Q, Marker}, []).
|
||||
|
||||
init({Q, Marker}) ->
|
||||
case is_process_alive(Marker) of
|
||||
true ->
|
||||
%% start
|
||||
init(Q);
|
||||
false ->
|
||||
%% restart
|
||||
QueueName = amqqueue:get_name(Q),
|
||||
{ok, Q1} = rabbit_amqqueue:lookup(QueueName),
|
||||
rabbit_log:error("Restarting crashed ~ts.", [rabbit_misc:rs(QueueName)]),
|
||||
gen_server2:cast(self(), init),
|
||||
init(Q1)
|
||||
end;
|
||||
|
||||
init(Q) ->
|
||||
process_flag(trap_exit, true),
|
||||
?store_proc_name(amqqueue:get_name(Q)),
|
||||
|
@ -177,7 +190,7 @@ init_it(Recover, From, State = #q{q = Q0}) ->
|
|||
#q{backing_queue = undefined,
|
||||
backing_queue_state = undefined,
|
||||
q = Q} = State,
|
||||
BQ = backing_queue_module(Q),
|
||||
BQ = backing_queue_module(),
|
||||
{_, Terms} = recovery_status(Recover),
|
||||
BQS = bq_init(BQ, Q, Terms),
|
||||
%% Rely on terminate to delete the queue.
|
||||
|
@ -202,7 +215,7 @@ init_it2(Recover, From, State = #q{q = Q,
|
|||
ok = rabbit_memory_monitor:register(
|
||||
self(), {rabbit_amqqueue,
|
||||
set_ram_duration_target, [self()]}),
|
||||
BQ = backing_queue_module(Q1),
|
||||
BQ = backing_queue_module(),
|
||||
BQS = bq_init(BQ, Q, TermsOrNew),
|
||||
send_reply(From, {new, Q}),
|
||||
recovery_barrier(Barrier),
|
||||
|
@ -235,8 +248,7 @@ matches(new, Q1, Q2) ->
|
|||
amqqueue:is_auto_delete(Q1) =:= amqqueue:is_auto_delete(Q2) andalso
|
||||
amqqueue:get_exclusive_owner(Q1) =:= amqqueue:get_exclusive_owner(Q2) andalso
|
||||
amqqueue:get_arguments(Q1) =:= amqqueue:get_arguments(Q2) andalso
|
||||
amqqueue:get_pid(Q1) =:= amqqueue:get_pid(Q2) andalso
|
||||
amqqueue:get_slave_pids(Q1) =:= amqqueue:get_slave_pids(Q2);
|
||||
amqqueue:get_pid(Q1) =:= amqqueue:get_pid(Q2);
|
||||
%% FIXME: Should v1 vs. v2 of the same record match?
|
||||
matches(_, Q, Q) -> true;
|
||||
matches(_, _Q, _Q1) -> false.
|
||||
|
@ -505,12 +517,9 @@ next_state(State = #q{q = Q,
|
|||
timed -> {ensure_sync_timer(State1), 0 }
|
||||
end.
|
||||
|
||||
backing_queue_module(Q) ->
|
||||
case rabbit_mirror_queue_misc:is_mirrored(Q) of
|
||||
false -> {ok, BQM} = application:get_env(backing_queue_module),
|
||||
BQM;
|
||||
true -> rabbit_mirror_queue_master
|
||||
end.
|
||||
backing_queue_module() ->
|
||||
{ok, BQM} = application:get_env(backing_queue_module),
|
||||
BQM.
|
||||
|
||||
ensure_sync_timer(State) ->
|
||||
rabbit_misc:ensure_timer(State, #q.sync_timer_ref,
|
||||
|
@ -620,28 +629,15 @@ send_or_record_confirm(#delivery{confirm = true,
|
|||
{immediately, State}
|
||||
end.
|
||||
|
||||
%% This feature was used by `rabbit_amqqueue_process` and
|
||||
%% `rabbit_mirror_queue_slave` up-to and including RabbitMQ 3.7.x. It is
|
||||
%% unused in 3.8.x and thus deprecated. We keep it to support in-place
|
||||
%% upgrades to 3.8.x (i.e. mixed-version clusters), but it is a no-op
|
||||
%% starting with that version.
|
||||
send_mandatory(#delivery{mandatory = false}) ->
|
||||
ok;
|
||||
send_mandatory(#delivery{mandatory = true,
|
||||
sender = SenderPid,
|
||||
msg_seq_no = MsgSeqNo}) ->
|
||||
gen_server2:cast(SenderPid, {mandatory_received, MsgSeqNo}).
|
||||
|
||||
discard(#delivery{confirm = Confirm,
|
||||
sender = SenderPid,
|
||||
flow = Flow,
|
||||
message = Msg}, BQ, BQS, MTC, QName) ->
|
||||
MsgId = mc:get_annotation(id, Msg),
|
||||
MTC1 = case Confirm of
|
||||
true -> confirm_messages([MsgId], MTC, QName);
|
||||
false -> MTC
|
||||
end,
|
||||
BQS1 = BQ:discard(MsgId, SenderPid, Flow, BQS),
|
||||
BQS1 = BQ:discard(MsgId, SenderPid, BQS),
|
||||
{BQS1, MTC1}.
|
||||
|
||||
run_message_queue(State) -> run_message_queue(false, State).
|
||||
|
@ -665,7 +661,6 @@ run_message_queue(ActiveConsumersChanged, State) ->
|
|||
end.
|
||||
|
||||
attempt_delivery(Delivery = #delivery{sender = SenderPid,
|
||||
flow = Flow,
|
||||
message = Message},
|
||||
Props, Delivered, State = #q{q = Q,
|
||||
backing_queue = BQ,
|
||||
|
@ -674,7 +669,7 @@ attempt_delivery(Delivery = #delivery{sender = SenderPid,
|
|||
case rabbit_queue_consumers:deliver(
|
||||
fun (true) -> {AckTag, BQS1} =
|
||||
BQ:publish_delivered(
|
||||
Message, Props, SenderPid, Flow, BQS),
|
||||
Message, Props, SenderPid, BQS),
|
||||
{{Message, Delivered, AckTag}, {BQS1, MTC}};
|
||||
(false) -> {{Message, Delivered, undefined},
|
||||
discard(Delivery, BQ, BQS, MTC, amqqueue:get_name(Q))}
|
||||
|
@ -698,11 +693,10 @@ maybe_deliver_or_enqueue(Delivery = #delivery{message = Message},
|
|||
backing_queue_state = BQS,
|
||||
dlx = DLX,
|
||||
dlx_routing_key = RK}) ->
|
||||
send_mandatory(Delivery), %% must do this before confirms
|
||||
case {will_overflow(Delivery, State), Overflow} of
|
||||
{true, 'reject-publish'} ->
|
||||
%% Drop publish and nack to publisher
|
||||
send_reject_publish(Delivery, Delivered, State);
|
||||
send_reject_publish(Delivery, State);
|
||||
{true, 'reject-publish-dlx'} ->
|
||||
%% Publish to DLX
|
||||
_ = with_dlx(
|
||||
|
@ -717,7 +711,7 @@ maybe_deliver_or_enqueue(Delivery = #delivery{message = Message},
|
|||
disabled, 1)
|
||||
end),
|
||||
%% Drop publish and nack to publisher
|
||||
send_reject_publish(Delivery, Delivered, State);
|
||||
send_reject_publish(Delivery, State);
|
||||
_ ->
|
||||
{IsDuplicate, BQS1} = BQ:is_duplicate(Message, BQS),
|
||||
State1 = State#q{backing_queue_state = BQS1},
|
||||
|
@ -726,7 +720,7 @@ maybe_deliver_or_enqueue(Delivery = #delivery{message = Message},
|
|||
{true, drop} -> State1;
|
||||
%% Drop publish and nack to publisher
|
||||
{true, reject} ->
|
||||
send_reject_publish(Delivery, Delivered, State1);
|
||||
send_reject_publish(Delivery, State1);
|
||||
%% Enqueue and maybe drop head later
|
||||
false ->
|
||||
deliver_or_enqueue(Delivery, Delivered, State1)
|
||||
|
@ -734,8 +728,7 @@ maybe_deliver_or_enqueue(Delivery = #delivery{message = Message},
|
|||
end.
|
||||
|
||||
deliver_or_enqueue(Delivery = #delivery{message = Message,
|
||||
sender = SenderPid,
|
||||
flow = Flow},
|
||||
sender = SenderPid},
|
||||
Delivered,
|
||||
State = #q{q = Q, backing_queue = BQ}) ->
|
||||
{Confirm, State1} = send_or_record_confirm(Delivery, State),
|
||||
|
@ -753,7 +746,7 @@ deliver_or_enqueue(Delivery = #delivery{message = Message,
|
|||
State2#q{backing_queue_state = BQS1, msg_id_to_channel = MTC1};
|
||||
{undelivered, State2 = #q{backing_queue_state = BQS}} ->
|
||||
|
||||
BQS1 = BQ:publish(Message, Props, Delivered, SenderPid, Flow, BQS),
|
||||
BQS1 = BQ:publish(Message, Props, Delivered, SenderPid, BQS),
|
||||
{Dropped, State3 = #q{backing_queue_state = BQS2}} =
|
||||
maybe_drop_head(State2#q{backing_queue_state = BQS1}),
|
||||
QLen = BQ:len(BQS2),
|
||||
|
@ -802,10 +795,8 @@ maybe_drop_head(AlreadyDropped, State = #q{backing_queue = BQ,
|
|||
|
||||
send_reject_publish(#delivery{confirm = true,
|
||||
sender = SenderPid,
|
||||
flow = Flow,
|
||||
msg_seq_no = MsgSeqNo,
|
||||
message = Msg},
|
||||
_Delivered,
|
||||
State = #q{ q = Q,
|
||||
backing_queue = BQ,
|
||||
backing_queue_state = BQS,
|
||||
|
@ -815,10 +806,9 @@ send_reject_publish(#delivery{confirm = true,
|
|||
amqqueue:get_name(Q), MsgSeqNo),
|
||||
|
||||
MTC1 = maps:remove(MsgId, MTC),
|
||||
BQS1 = BQ:discard(MsgId, SenderPid, Flow, BQS),
|
||||
BQS1 = BQ:discard(MsgId, SenderPid, BQS),
|
||||
State#q{ backing_queue_state = BQS1, msg_id_to_channel = MTC1 };
|
||||
send_reject_publish(#delivery{confirm = false},
|
||||
_Delivered, State) ->
|
||||
send_reject_publish(#delivery{confirm = false}, State) ->
|
||||
State.
|
||||
|
||||
will_overflow(_, #q{max_length = undefined,
|
||||
|
@ -1136,40 +1126,6 @@ i(consumer_capacity, #q{consumers = Consumers}) ->
|
|||
i(memory, _) ->
|
||||
{memory, M} = process_info(self(), memory),
|
||||
M;
|
||||
i(slave_pids, #q{q = Q0}) ->
|
||||
Name = amqqueue:get_name(Q0),
|
||||
case rabbit_amqqueue:lookup(Name) of
|
||||
{ok, Q} ->
|
||||
case rabbit_mirror_queue_misc:is_mirrored(Q) of
|
||||
false -> '';
|
||||
true -> amqqueue:get_slave_pids(Q)
|
||||
end;
|
||||
{error, not_found} ->
|
||||
''
|
||||
end;
|
||||
i(synchronised_slave_pids, #q{q = Q0}) ->
|
||||
Name = amqqueue:get_name(Q0),
|
||||
case rabbit_amqqueue:lookup(Name) of
|
||||
{ok, Q} ->
|
||||
case rabbit_mirror_queue_misc:is_mirrored(Q) of
|
||||
false -> '';
|
||||
true -> amqqueue:get_sync_slave_pids(Q)
|
||||
end;
|
||||
{error, not_found} ->
|
||||
''
|
||||
end;
|
||||
i(recoverable_slaves, #q{q = Q0}) ->
|
||||
Name = amqqueue:get_name(Q0),
|
||||
Durable = amqqueue:is_durable(Q0),
|
||||
case rabbit_amqqueue:lookup(Name) of
|
||||
{ok, Q} ->
|
||||
case Durable andalso rabbit_mirror_queue_misc:is_mirrored(Q) of
|
||||
false -> '';
|
||||
true -> amqqueue:get_recoverable_slaves(Q)
|
||||
end;
|
||||
{error, not_found} ->
|
||||
''
|
||||
end;
|
||||
i(state, #q{status = running}) -> credit_flow:state();
|
||||
i(state, #q{status = State}) -> State;
|
||||
i(garbage_collection, _State) ->
|
||||
|
@ -1275,17 +1231,7 @@ prioritise_info(Msg, _Len, #q{q = Q}) ->
|
|||
end.
|
||||
|
||||
handle_call({init, Recover}, From, State) ->
|
||||
try
|
||||
init_it(Recover, From, State)
|
||||
catch
|
||||
{coordinator_not_started, Reason} ->
|
||||
%% The GM can shutdown before the coordinator has started up
|
||||
%% (lost membership or missing group), thus the start_link of
|
||||
%% the coordinator returns {error, shutdown} as rabbit_amqqueue_process
|
||||
%% is trapping exists. The master captures this return value and
|
||||
%% throws the current exception.
|
||||
{stop, Reason, State}
|
||||
end;
|
||||
init_it(Recover, From, State);
|
||||
|
||||
handle_call(info, _From, State) ->
|
||||
reply({ok, infos(info_keys(), State)}, State);
|
||||
|
@ -1457,36 +1403,7 @@ handle_call(purge, _From, State = #q{backing_queue = BQ,
|
|||
|
||||
handle_call({requeue, AckTags, ChPid}, From, State) ->
|
||||
gen_server2:reply(From, ok),
|
||||
noreply(requeue(AckTags, ChPid, State));
|
||||
|
||||
handle_call(sync_mirrors, _From,
|
||||
State = #q{backing_queue = rabbit_mirror_queue_master,
|
||||
backing_queue_state = BQS}) ->
|
||||
S = fun(BQSN) -> State#q{backing_queue_state = BQSN} end,
|
||||
HandleInfo = fun (Status) ->
|
||||
receive {'$gen_call', From, {info, Items}} ->
|
||||
Infos = infos(Items, State#q{status = Status}),
|
||||
gen_server2:reply(From, {ok, Infos})
|
||||
after 0 ->
|
||||
ok
|
||||
end
|
||||
end,
|
||||
EmitStats = fun (Status) ->
|
||||
rabbit_event:if_enabled(
|
||||
State, #q.stats_timer,
|
||||
fun() -> emit_stats(State#q{status = Status}) end)
|
||||
end,
|
||||
case rabbit_mirror_queue_master:sync_mirrors(HandleInfo, EmitStats, BQS) of
|
||||
{ok, BQS1} -> reply(ok, S(BQS1));
|
||||
{stop, Reason, BQS1} -> {stop, Reason, S(BQS1)}
|
||||
end;
|
||||
|
||||
handle_call(sync_mirrors, _From, State) ->
|
||||
reply({error, not_mirrored}, State);
|
||||
|
||||
%% By definition if we get this message here we do not have to do anything.
|
||||
handle_call(cancel_sync_mirrors, _From, State) ->
|
||||
reply({ok, not_syncing}, State).
|
||||
noreply(requeue(AckTags, ChPid, State)).
|
||||
|
||||
new_single_active_consumer_after_basic_cancel(ChPid, ConsumerTag, CurrentSingleActiveConsumer,
|
||||
_SingleActiveConsumerIsOn = true, Consumers) ->
|
||||
|
@ -1525,17 +1442,7 @@ maybe_notify_consumer_updated(#q{single_active_consumer_on = true} = State, _Pre
|
|||
end.
|
||||
|
||||
handle_cast(init, State) ->
|
||||
try
|
||||
init_it({no_barrier, non_clean_shutdown}, none, State)
|
||||
catch
|
||||
{coordinator_not_started, Reason} ->
|
||||
%% The GM can shutdown before the coordinator has started up
|
||||
%% (lost membership or missing group), thus the start_link of
|
||||
%% the coordinator returns {error, shutdown} as rabbit_amqqueue_process
|
||||
%% is trapping exists. The master captures this return value and
|
||||
%% throws the current exception.
|
||||
{stop, Reason, State}
|
||||
end;
|
||||
init_it({no_barrier, non_clean_shutdown}, none, State);
|
||||
|
||||
handle_cast({run_backing_queue, Mod, Fun},
|
||||
State = #q{backing_queue = BQ, backing_queue_state = BQS}) ->
|
||||
|
@ -1544,25 +1451,18 @@ handle_cast({run_backing_queue, Mod, Fun},
|
|||
handle_cast({deliver,
|
||||
Delivery = #delivery{sender = Sender,
|
||||
flow = Flow},
|
||||
SlaveWhenPublished},
|
||||
Delivered},
|
||||
State = #q{senders = Senders}) ->
|
||||
Senders1 = case Flow of
|
||||
%% In both credit_flow:ack/1 we are acking messages to the channel
|
||||
%% process that sent us the message delivery. See handle_ch_down
|
||||
%% for more info.
|
||||
flow -> credit_flow:ack(Sender),
|
||||
case SlaveWhenPublished of
|
||||
true -> credit_flow:ack(Sender); %% [0]
|
||||
false -> ok
|
||||
end,
|
||||
pmon:monitor(Sender, Senders);
|
||||
noflow -> Senders
|
||||
end,
|
||||
State1 = State#q{senders = Senders1},
|
||||
noreply(maybe_deliver_or_enqueue(Delivery, SlaveWhenPublished, State1));
|
||||
%% [0] The second ack is since the channel thought we were a mirror at
|
||||
%% the time it published this message, so it used two credits (see
|
||||
%% rabbit_queue_type:deliver/2).
|
||||
noreply(maybe_deliver_or_enqueue(Delivery, Delivered, State1));
|
||||
|
||||
handle_cast({ack, AckTags, ChPid}, State) ->
|
||||
noreply(ack(AckTags, ChPid, State));
|
||||
|
@ -1614,16 +1514,6 @@ handle_cast({set_maximum_since_use, Age}, State) ->
|
|||
ok = file_handle_cache:set_maximum_since_use(Age),
|
||||
noreply(State);
|
||||
|
||||
handle_cast(update_mirroring, State = #q{q = Q,
|
||||
mirroring_policy_version = Version}) ->
|
||||
case needs_update_mirroring(Q, Version) of
|
||||
false ->
|
||||
noreply(State);
|
||||
{Policy, NewVersion} ->
|
||||
State1 = State#q{mirroring_policy_version = NewVersion},
|
||||
noreply(update_mirroring(Policy, State1))
|
||||
end;
|
||||
|
||||
handle_cast({credit, SessionPid, CTag, Credit, Drain},
|
||||
#q{q = Q,
|
||||
backing_queue = BQ,
|
||||
|
@ -1700,7 +1590,7 @@ handle_cast(notify_decorators, State) ->
|
|||
handle_cast(policy_changed, State = #q{q = Q0}) ->
|
||||
Name = amqqueue:get_name(Q0),
|
||||
%% We depend on the #q.q field being up to date at least WRT
|
||||
%% policy (but not mirror pids) in various places, so when it
|
||||
%% policy in various places, so when it
|
||||
%% changes we go and read it from the database again.
|
||||
%%
|
||||
%% This also has the side effect of waking us up so we emit a
|
||||
|
@ -1712,7 +1602,7 @@ handle_cast({policy_changed, Q0}, State) ->
|
|||
Name = amqqueue:get_name(Q0),
|
||||
PolicyVersion0 = amqqueue:get_policy_version(Q0),
|
||||
%% We depend on the #q.q field being up to date at least WRT
|
||||
%% policy (but not mirror pids) in various places, so when it
|
||||
%% policy in various places, so when it
|
||||
%% changes we go and read it from the database again.
|
||||
%%
|
||||
%% This also has the side effect of waking us up so we emit a
|
||||
|
@ -1723,21 +1613,8 @@ handle_cast({policy_changed, Q0}, State) ->
|
|||
true ->
|
||||
noreply(process_args_policy(State#q{q = Q}));
|
||||
false ->
|
||||
%% Update just the policy, as pids and mirrors could have been
|
||||
%% updated simultaneously. A testcase on the `confirm_rejects_SUITE`
|
||||
%% fails consistently if the internal state is updated directly to `Q0`.
|
||||
Q1 = amqqueue:set_policy(Q, amqqueue:get_policy(Q0)),
|
||||
Q2 = amqqueue:set_operator_policy(Q1, amqqueue:get_operator_policy(Q0)),
|
||||
Q3 = amqqueue:set_policy_version(Q2, PolicyVersion0),
|
||||
noreply(process_args_policy(State#q{q = Q3}))
|
||||
end;
|
||||
|
||||
handle_cast({sync_start, _, _}, State = #q{q = Q}) ->
|
||||
Name = amqqueue:get_name(Q),
|
||||
%% Only a mirror should receive this, it means we are a duplicated master
|
||||
rabbit_mirror_queue_misc:log_warning(
|
||||
Name, "Stopping after receiving sync_start from another master", []),
|
||||
stop(State).
|
||||
noreply(process_args_policy(State#q{q = Q0}))
|
||||
end.
|
||||
|
||||
handle_info({maybe_expire, Vsn}, State = #q{q = Q, expires = Expiry, args_policy_version = Vsn}) ->
|
||||
case is_unused(State) of
|
||||
|
@ -1840,16 +1717,7 @@ format_message_queue(Opt, MQ) -> rabbit_misc:format_message_queue(Opt, MQ).
|
|||
|
||||
%% TODO: this can be removed after 3.13
|
||||
format(Q) when ?is_amqqueue(Q) ->
|
||||
case rabbit_mirror_queue_misc:is_mirrored(Q) of
|
||||
false ->
|
||||
[{node, node(amqqueue:get_pid(Q))}];
|
||||
true ->
|
||||
Slaves = amqqueue:get_slave_pids(Q),
|
||||
SSlaves = amqqueue:get_sync_slave_pids(Q),
|
||||
[{slave_nodes, [node(S) || S <- Slaves]},
|
||||
{synchronised_slave_nodes, [node(S) || S <- SSlaves]},
|
||||
{node, node(amqqueue:get_pid(Q))}]
|
||||
end.
|
||||
[{node, node(amqqueue:get_pid(Q))}].
|
||||
|
||||
-spec is_policy_applicable(amqqueue:amqqueue(), any()) -> boolean().
|
||||
is_policy_applicable(_Q, _Policy) ->
|
||||
|
@ -1871,58 +1739,6 @@ log_auto_delete(Reason, #q{ q = Q }) ->
|
|||
Reason,
|
||||
[QName, VHost]).
|
||||
|
||||
needs_update_mirroring(Q, Version) ->
|
||||
{ok, UpQ} = rabbit_amqqueue:lookup(amqqueue:get_name(Q)),
|
||||
DBVersion = amqqueue:get_policy_version(UpQ),
|
||||
case DBVersion > Version of
|
||||
true -> {rabbit_policy:get(<<"ha-mode">>, UpQ), DBVersion};
|
||||
false -> false
|
||||
end.
|
||||
|
||||
|
||||
update_mirroring(Policy, State = #q{backing_queue = BQ}) ->
|
||||
case update_to(Policy, BQ) of
|
||||
start_mirroring ->
|
||||
start_mirroring(State);
|
||||
stop_mirroring ->
|
||||
stop_mirroring(State);
|
||||
ignore ->
|
||||
State;
|
||||
update_ha_mode ->
|
||||
update_ha_mode(State)
|
||||
end.
|
||||
|
||||
update_to(undefined, rabbit_mirror_queue_master) ->
|
||||
stop_mirroring;
|
||||
update_to(_, rabbit_mirror_queue_master) ->
|
||||
update_ha_mode;
|
||||
update_to(undefined, BQ) when BQ =/= rabbit_mirror_queue_master ->
|
||||
ignore;
|
||||
update_to(_, BQ) when BQ =/= rabbit_mirror_queue_master ->
|
||||
start_mirroring.
|
||||
|
||||
start_mirroring(State = #q{backing_queue = BQ,
|
||||
backing_queue_state = BQS}) ->
|
||||
%% lookup again to get policy for init_with_existing_bq
|
||||
{ok, Q} = rabbit_amqqueue:lookup(qname(State)),
|
||||
true = BQ =/= rabbit_mirror_queue_master, %% assertion
|
||||
BQ1 = rabbit_mirror_queue_master,
|
||||
BQS1 = BQ1:init_with_existing_bq(Q, BQ, BQS),
|
||||
State#q{backing_queue = BQ1,
|
||||
backing_queue_state = BQS1}.
|
||||
|
||||
stop_mirroring(State = #q{backing_queue = BQ,
|
||||
backing_queue_state = BQS}) ->
|
||||
BQ = rabbit_mirror_queue_master, %% assertion
|
||||
{BQ1, BQS1} = BQ:stop_mirroring(BQS),
|
||||
State#q{backing_queue = BQ1,
|
||||
backing_queue_state = BQS1}.
|
||||
|
||||
update_ha_mode(State) ->
|
||||
{ok, Q} = rabbit_amqqueue:lookup(qname(State)),
|
||||
ok = rabbit_mirror_queue_misc:update_mirrors(Q),
|
||||
State.
|
||||
|
||||
confirm_to_sender(Pid, QName, MsgSeqNos) ->
|
||||
rabbit_classic_queue:confirm_to_sender(Pid, QName, MsgSeqNos).
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
|
||||
-behaviour(supervisor).
|
||||
|
||||
-export([start_link/2]).
|
||||
-export([start_link/1]).
|
||||
|
||||
-export([init/1]).
|
||||
|
||||
|
@ -17,19 +17,19 @@
|
|||
|
||||
%%----------------------------------------------------------------------------
|
||||
|
||||
-spec start_link(amqqueue:amqqueue(), rabbit_prequeue:start_mode()) ->
|
||||
-spec start_link(amqqueue:amqqueue()) ->
|
||||
{'ok', pid(), pid()}.
|
||||
|
||||
start_link(Q, StartMode) ->
|
||||
start_link(Q) ->
|
||||
Marker = spawn_link(fun() -> receive stop -> ok end end),
|
||||
StartMFA = {rabbit_prequeue, start_link, [Q, StartMode, Marker]},
|
||||
StartMFA = {rabbit_amqqueue_process, start_link, [Q, Marker]},
|
||||
ChildSpec = #{id => rabbit_amqqueue,
|
||||
start => StartMFA,
|
||||
restart => transient,
|
||||
significant => true,
|
||||
shutdown => ?CLASSIC_QUEUE_WORKER_WAIT,
|
||||
type => worker,
|
||||
modules => [rabbit_amqqueue_process, rabbit_mirror_queue_slave]},
|
||||
modules => [rabbit_amqqueue_process]},
|
||||
{ok, SupPid} = supervisor:start_link(?MODULE, []),
|
||||
{ok, QPid} = supervisor:start_child(SupPid, ChildSpec),
|
||||
unlink(Marker),
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
|
||||
-behaviour(supervisor).
|
||||
|
||||
-export([start_link/0, start_queue_process/3]).
|
||||
-export([start_link/0, start_queue_process/2]).
|
||||
-export([start_for_vhost/1, stop_for_vhost/1,
|
||||
find_for_vhost/2, find_for_vhost/1]).
|
||||
|
||||
|
@ -27,13 +27,12 @@ start_link() ->
|
|||
supervisor:start_link(?MODULE, []).
|
||||
|
||||
-spec start_queue_process
|
||||
(node(), amqqueue:amqqueue(), 'declare' | 'recovery' | 'slave') ->
|
||||
pid().
|
||||
(node(), amqqueue:amqqueue()) -> pid().
|
||||
|
||||
start_queue_process(Node, Q, StartMode) ->
|
||||
start_queue_process(Node, Q) ->
|
||||
#resource{virtual_host = VHost} = amqqueue:get_name(Q),
|
||||
{ok, Sup} = find_for_vhost(VHost, Node),
|
||||
{ok, _SupPid, QPid} = supervisor:start_child(Sup, [Q, StartMode]),
|
||||
{ok, _SupPid, QPid} = supervisor:start_child(Sup, [Q]),
|
||||
QPid.
|
||||
|
||||
init([]) ->
|
||||
|
|
|
@ -21,10 +21,7 @@
|
|||
-type ack() :: any().
|
||||
-type state() :: any().
|
||||
|
||||
-type flow() :: 'flow' | 'noflow'.
|
||||
-type msg_ids() :: [rabbit_types:msg_id()].
|
||||
-type publish() :: {mc:state(),
|
||||
rabbit_types:message_properties(), boolean()}.
|
||||
-type delivered_publish() :: {mc:state(),
|
||||
rabbit_types:message_properties()}.
|
||||
-type fetch_result(Ack) ::
|
||||
|
@ -96,28 +93,20 @@
|
|||
|
||||
%% Publish a message.
|
||||
-callback publish(mc:state(),
|
||||
rabbit_types:message_properties(), boolean(), pid(), flow(),
|
||||
rabbit_types:message_properties(), boolean(), pid(),
|
||||
state()) -> state().
|
||||
|
||||
%% Like publish/6 but for batches of publishes.
|
||||
-callback batch_publish([publish()], pid(), flow(), state()) -> state().
|
||||
|
||||
%% Called for messages which have already been passed straight
|
||||
%% out to a client. The queue will be empty for these calls
|
||||
%% (i.e. saves the round trip through the backing queue).
|
||||
-callback publish_delivered(mc:state(),
|
||||
rabbit_types:message_properties(), pid(), flow(),
|
||||
rabbit_types:message_properties(), pid(),
|
||||
state())
|
||||
-> {ack(), state()}.
|
||||
|
||||
%% Like publish_delivered/5 but for batches of publishes.
|
||||
-callback batch_publish_delivered([delivered_publish()], pid(), flow(),
|
||||
state())
|
||||
-> {[ack()], state()}.
|
||||
|
||||
%% Called to inform the BQ about messages which have reached the
|
||||
%% queue, but are not going to be further passed to BQ.
|
||||
-callback discard(rabbit_types:msg_id(), pid(), flow(), state()) -> state().
|
||||
-callback discard(rabbit_types:msg_id(), pid(), state()) -> state().
|
||||
|
||||
%% Return ids of messages which have been confirmed since the last
|
||||
%% invocation of this function (or initialisation).
|
||||
|
|
|
@ -159,8 +159,7 @@
|
|||
rejected,
|
||||
%% used by "one shot RPC" (amq.
|
||||
reply_consumer,
|
||||
%% flow | noflow, see rabbitmq-server#114
|
||||
delivery_flow,
|
||||
delivery_flow, %% Deprecated since removal of CMQ in 4.0
|
||||
interceptor_state,
|
||||
queue_states,
|
||||
tick_timer,
|
||||
|
@ -489,10 +488,6 @@ init([Channel, ReaderPid, WriterPid, ConnPid, ConnName, Protocol, User, VHost,
|
|||
?LG_PROCESS_TYPE(channel),
|
||||
?store_proc_name({ConnName, Channel}),
|
||||
ok = pg_local:join(rabbit_channels, self()),
|
||||
Flow = case rabbit_misc:get_env(rabbit, mirroring_flow_control, true) of
|
||||
true -> flow;
|
||||
false -> noflow
|
||||
end,
|
||||
{ok, {Global0, Prefetch}} = application:get_env(rabbit, default_consumer_prefetch),
|
||||
Limiter0 = rabbit_limiter:new(LimiterPid),
|
||||
Global = Global0 andalso is_global_qos_permitted(),
|
||||
|
@ -541,7 +536,6 @@ init([Channel, ReaderPid, WriterPid, ConnPid, ConnName, Protocol, User, VHost,
|
|||
rejected = [],
|
||||
confirmed = [],
|
||||
reply_consumer = none,
|
||||
delivery_flow = Flow,
|
||||
interceptor_state = undefined,
|
||||
queue_states = rabbit_queue_type:init()
|
||||
},
|
||||
|
@ -700,16 +694,6 @@ handle_cast({force_event_refresh, Ref}, State) ->
|
|||
Ref),
|
||||
noreply(rabbit_event:init_stats_timer(State, #ch.stats_timer));
|
||||
|
||||
handle_cast({mandatory_received, _MsgSeqNo}, State) ->
|
||||
%% This feature was used by `rabbit_amqqueue_process` and
|
||||
%% `rabbit_mirror_queue_slave` up-to and including RabbitMQ 3.7.x.
|
||||
%% It is unused in 3.8.x and thus deprecated. We keep it to support
|
||||
%% in-place upgrades to 3.8.x (i.e. mixed-version clusters), but it
|
||||
%% is a no-op starting with that version.
|
||||
%%
|
||||
%% NB: don't call noreply/1 since we don't want to send confirms.
|
||||
noreply_coalesce(State);
|
||||
|
||||
handle_cast({queue_event, QRef, Evt},
|
||||
#ch{queue_states = QueueStates0} = State0) ->
|
||||
case rabbit_queue_type:handle_event(QRef, Evt, QueueStates0) of
|
||||
|
|
|
@ -84,41 +84,42 @@ declare(Q, Node) when ?amqqueue_is_classic(Q) ->
|
|||
_ -> Node
|
||||
end
|
||||
end,
|
||||
Node2 = rabbit_mirror_queue_misc:initial_queue_node(Q, Node1),
|
||||
case rabbit_vhost_sup_sup:get_vhost_sup(VHost, Node2) of
|
||||
case rabbit_vhost_sup_sup:get_vhost_sup(VHost, Node1) of
|
||||
{ok, _} ->
|
||||
gen_server2:call(
|
||||
rabbit_amqqueue_sup_sup:start_queue_process(Node2, Q, declare),
|
||||
rabbit_amqqueue_sup_sup:start_queue_process(Node1, Q),
|
||||
{init, new}, infinity);
|
||||
{error, Error} ->
|
||||
{protocol_error, internal_error, "Cannot declare a queue '~ts' on node '~ts': ~255p",
|
||||
[rabbit_misc:rs(QName), Node2, Error]}
|
||||
[rabbit_misc:rs(QName), Node1, Error]}
|
||||
end.
|
||||
|
||||
delete(Q, IfUnused, IfEmpty, ActingUser) when ?amqqueue_is_classic(Q) ->
|
||||
case wait_for_promoted_or_stopped(Q) of
|
||||
{promoted, Q1} ->
|
||||
QPid = amqqueue:get_pid(Q1),
|
||||
delete(Q0, IfUnused, IfEmpty, ActingUser) when ?amqqueue_is_classic(Q0) ->
|
||||
QName = amqqueue:get_name(Q0),
|
||||
case rabbit_amqqueue:lookup(QName) of
|
||||
{ok, Q} ->
|
||||
QPid = amqqueue:get_pid(Q),
|
||||
case rabbit_process:is_process_alive(QPid) of
|
||||
true ->
|
||||
delegate:invoke(QPid, {gen_server2, call,
|
||||
[{delete, IfUnused, IfEmpty, ActingUser},
|
||||
infinity]});
|
||||
{stopped, Q1} ->
|
||||
#resource{name = Name, virtual_host = Vhost} = amqqueue:get_name(Q1),
|
||||
false ->
|
||||
#resource{name = Name, virtual_host = Vhost} = QName,
|
||||
case IfEmpty of
|
||||
true ->
|
||||
rabbit_log:error("Queue ~ts in vhost ~ts has its master node down and "
|
||||
"no mirrors available or eligible for promotion. "
|
||||
rabbit_log:error("Queue ~ts in vhost ~ts is down. "
|
||||
"The queue may be non-empty. "
|
||||
"Refusing to force-delete.",
|
||||
[Name, Vhost]),
|
||||
{error, not_empty};
|
||||
false ->
|
||||
rabbit_log:warning("Queue ~ts in vhost ~ts has its master node is down and "
|
||||
"no mirrors available or eligible for promotion. "
|
||||
rabbit_log:warning("Queue ~ts in vhost ~ts is down. "
|
||||
"Forcing queue deletion.",
|
||||
[Name, Vhost]),
|
||||
delete_crashed_internal(Q1, ActingUser),
|
||||
delete_crashed_internal(Q, ActingUser),
|
||||
{ok, 0}
|
||||
end
|
||||
end;
|
||||
{error, not_found} ->
|
||||
%% Assume the queue was deleted
|
||||
|
@ -128,10 +129,6 @@ delete(Q, IfUnused, IfEmpty, ActingUser) when ?amqqueue_is_classic(Q) ->
|
|||
is_recoverable(Q) when ?is_amqqueue(Q) and ?amqqueue_is_classic(Q) ->
|
||||
Node = node(),
|
||||
Node =:= amqqueue:qnode(Q) andalso
|
||||
%% Terminations on node down will not remove the rabbit_queue
|
||||
%% record if it is a mirrored queue (such info is now obtained from
|
||||
%% the policy). Thus, we must check if the local pid is alive
|
||||
%% - if the record is present - in order to restart.
|
||||
(not rabbit_db_queue:consistent_exists(amqqueue:get_name(Q))
|
||||
orelse not rabbit_process:is_process_alive(amqqueue:get_pid(Q))).
|
||||
|
||||
|
@ -199,20 +196,9 @@ format(Q, _Ctx) when ?is_amqqueue(Q) ->
|
|||
S ->
|
||||
S
|
||||
end,
|
||||
case rabbit_mirror_queue_misc:is_mirrored(Q) of
|
||||
false ->
|
||||
[{type, classic},
|
||||
{state, State},
|
||||
{node, node(amqqueue:get_pid(Q))}];
|
||||
true ->
|
||||
Slaves = amqqueue:get_slave_pids(Q),
|
||||
SSlaves = amqqueue:get_sync_slave_pids(Q),
|
||||
[{type, classic},
|
||||
{state, State},
|
||||
{slave_nodes, [node(S) || S <- Slaves]},
|
||||
{synchronised_slave_nodes, [node(S) || S <- SSlaves]},
|
||||
{node, node(amqqueue:get_pid(Q))}]
|
||||
end.
|
||||
{node, node(amqqueue:get_pid(Q))}].
|
||||
|
||||
-spec init(amqqueue:amqqueue()) -> {ok, state()}.
|
||||
init(Q) when ?amqqueue_is_classic(Q) ->
|
||||
|
@ -332,21 +318,14 @@ handle_event(QName, {reject_publish, SeqNo, _QPid},
|
|||
Actions = [{rejected, QName, Rejected}],
|
||||
{ok, State#?STATE{unconfirmed = U}, Actions};
|
||||
handle_event(QName, {down, Pid, Info}, #?STATE{monitored = Monitored,
|
||||
pid = MasterPid,
|
||||
unconfirmed = U0} = State0) ->
|
||||
State = State0#?STATE{monitored = maps:remove(Pid, Monitored)},
|
||||
Actions0 = case Pid =:= MasterPid of
|
||||
true ->
|
||||
[{queue_down, QName}];
|
||||
false ->
|
||||
[]
|
||||
end,
|
||||
Actions0 = [{queue_down, QName}],
|
||||
case rabbit_misc:is_abnormal_exit(Info) of
|
||||
false when Info =:= normal andalso Pid == MasterPid ->
|
||||
%% queue was deleted and masterpid is down
|
||||
false when Info =:= normal ->
|
||||
%% queue was deleted
|
||||
{eol, []};
|
||||
false ->
|
||||
%% this assumes the mirror isn't part of the active set
|
||||
MsgSeqNos = maps:keys(
|
||||
maps:filter(fun (_, #msg_status{pending = Pids}) ->
|
||||
lists:member(Pid, Pids)
|
||||
|
@ -359,8 +338,7 @@ handle_event(QName, {down, Pid, Info}, #?STATE{monitored = Monitored,
|
|||
{ok, State#?STATE{unconfirmed = Unconfirmed}, Actions};
|
||||
true ->
|
||||
%% any abnormal exit should be considered a full reject of the
|
||||
%% oustanding message ids - If the message didn't get to all
|
||||
%% mirrors we have to assume it will never get there
|
||||
%% oustanding message ids
|
||||
MsgIds = maps:fold(
|
||||
fun (SeqNo, Status, Acc) ->
|
||||
case lists:member(Pid, Status#msg_status.pending) of
|
||||
|
@ -401,8 +379,8 @@ deliver(Qs0, Msg0, Options) ->
|
|||
Flow = maps:get(flow, Options, noflow),
|
||||
Confirm = MsgSeqNo /= undefined,
|
||||
|
||||
{MPids, SPids, Qs} = qpids(Qs0, Confirm, MsgSeqNo),
|
||||
Delivery = rabbit_basic:delivery(Mandatory, Confirm, Msg, MsgSeqNo, Flow),
|
||||
{MPids, Qs} = qpids(Qs0, Confirm, MsgSeqNo),
|
||||
Delivery = rabbit_basic:delivery(Mandatory, Confirm, Msg, MsgSeqNo),
|
||||
|
||||
case Flow of
|
||||
%% Here we are tracking messages sent by the rabbit_channel
|
||||
|
@ -410,14 +388,11 @@ deliver(Qs0, Msg0, Options) ->
|
|||
%% dictionary.
|
||||
flow ->
|
||||
_ = [credit_flow:send(QPid) || QPid <- MPids],
|
||||
_ = [credit_flow:send(QPid) || QPid <- SPids],
|
||||
ok;
|
||||
noflow -> ok
|
||||
end,
|
||||
MMsg = {deliver, Delivery, false},
|
||||
SMsg = {deliver, Delivery, true},
|
||||
delegate:invoke_no_result(MPids, {gen_server2, cast, [MMsg]}),
|
||||
delegate:invoke_no_result(SPids, {gen_server2, cast, [SMsg]}),
|
||||
{Qs, []}.
|
||||
|
||||
-spec dequeue(rabbit_amqqueue:name(), NoAck :: boolean(),
|
||||
|
@ -466,62 +441,27 @@ purge(Q) when ?is_amqqueue(Q) ->
|
|||
|
||||
qpids(Qs, Confirm, MsgNo) ->
|
||||
lists:foldl(
|
||||
fun ({Q, S0}, {MPidAcc, SPidAcc, Qs0}) ->
|
||||
fun ({Q, S0}, {MPidAcc, Qs0}) ->
|
||||
QPid = amqqueue:get_pid(Q),
|
||||
SPids = amqqueue:get_slave_pids(Q),
|
||||
QRef = amqqueue:get_name(Q),
|
||||
S1 = ensure_monitor(QPid, QRef, S0),
|
||||
S2 = lists:foldl(fun(SPid, Acc) ->
|
||||
ensure_monitor(SPid, QRef, Acc)
|
||||
end, S1, SPids),
|
||||
%% confirm record only if necessary
|
||||
S = case S2 of
|
||||
S = case S1 of
|
||||
#?STATE{unconfirmed = U0} ->
|
||||
Rec = [QPid | SPids],
|
||||
Rec = [QPid],
|
||||
U = case Confirm of
|
||||
false ->
|
||||
U0;
|
||||
true ->
|
||||
U0#{MsgNo => #msg_status{pending = Rec}}
|
||||
end,
|
||||
S2#?STATE{pid = QPid,
|
||||
S1#?STATE{pid = QPid,
|
||||
unconfirmed = U};
|
||||
stateless ->
|
||||
S2
|
||||
S1
|
||||
end,
|
||||
{[QPid | MPidAcc], SPidAcc ++ SPids,
|
||||
[{Q, S} | Qs0]}
|
||||
end, {[], [], []}, Qs).
|
||||
|
||||
%% internal-ish
|
||||
-spec wait_for_promoted_or_stopped(amqqueue:amqqueue()) ->
|
||||
{promoted, amqqueue:amqqueue()} |
|
||||
{stopped, amqqueue:amqqueue()} |
|
||||
{error, not_found}.
|
||||
wait_for_promoted_or_stopped(Q0) ->
|
||||
QName = amqqueue:get_name(Q0),
|
||||
case rabbit_amqqueue:lookup(QName) of
|
||||
{ok, Q} ->
|
||||
QPid = amqqueue:get_pid(Q),
|
||||
SPids = amqqueue:get_slave_pids(Q),
|
||||
case rabbit_process:is_process_alive(QPid) of
|
||||
true -> {promoted, Q};
|
||||
false ->
|
||||
case lists:any(fun(Pid) ->
|
||||
rabbit_process:is_process_alive(Pid)
|
||||
end, SPids) of
|
||||
%% There is a live slave. May be promoted
|
||||
true ->
|
||||
timer:sleep(100),
|
||||
wait_for_promoted_or_stopped(Q);
|
||||
%% All slave pids are stopped.
|
||||
%% No process left for the queue
|
||||
false -> {stopped, Q}
|
||||
end
|
||||
end;
|
||||
{error, not_found} ->
|
||||
{error, not_found}
|
||||
end.
|
||||
{[QPid | MPidAcc], [{Q, S} | Qs0]}
|
||||
end, {[], []}, Qs).
|
||||
|
||||
-spec delete_crashed(amqqueue:amqqueue()) -> ok.
|
||||
delete_crashed(Q) ->
|
||||
|
@ -555,7 +495,7 @@ delete_crashed_in_backing_queue(Q) ->
|
|||
recover_durable_queues(QueuesAndRecoveryTerms) ->
|
||||
{Results, Failures} =
|
||||
gen_server2:mcall(
|
||||
[{rabbit_amqqueue_sup_sup:start_queue_process(node(), Q, recovery),
|
||||
[{rabbit_amqqueue_sup_sup:start_queue_process(node(), Q),
|
||||
{init, {self(), Terms}}} || {Q, Terms} <- QueuesAndRecoveryTerms]),
|
||||
[rabbit_log:error("Queue ~tp failed to initialise: ~tp",
|
||||
[Pid, Error]) || {Pid, Error} <- Failures],
|
||||
|
|
|
@ -317,10 +317,7 @@ list_for_count(VHostName) ->
|
|||
|
||||
list_for_count_in_mnesia(VHostName) ->
|
||||
%% this is certainly suboptimal but there is no way to count
|
||||
%% things using a secondary index in Mnesia. Our counter-table-per-node
|
||||
%% won't work here because with master migration of mirrored queues
|
||||
%% the "ownership" of queues by nodes becomes a non-trivial problem
|
||||
%% that requires a proper consensus algorithm.
|
||||
%% things using a secondary index in Mnesia.
|
||||
list_with_possible_retry_in_mnesia(
|
||||
fun() ->
|
||||
length(mnesia:dirty_index_read(?MNESIA_TABLE,
|
||||
|
@ -601,8 +598,7 @@ update_decorators_in_khepri(QName, Decorators) ->
|
|||
Path = khepri_queue_path(QName),
|
||||
Ret1 = rabbit_khepri:adv_get(Path),
|
||||
case Ret1 of
|
||||
{ok, #{data := Q0, payload_version := Vsn}} ->
|
||||
Q1 = amqqueue:reset_mirroring_and_decorators(Q0),
|
||||
{ok, #{data := Q1, payload_version := Vsn}} ->
|
||||
Q2 = amqqueue:set_decorators(Q1, Decorators),
|
||||
UpdatePath = khepri_path:combine_with_conditions(
|
||||
Path, [#if_payload_version{version = Vsn}]),
|
||||
|
@ -842,7 +838,7 @@ create_or_get(Q) ->
|
|||
}).
|
||||
|
||||
create_or_get_in_mnesia(Q) ->
|
||||
DurableQ = amqqueue:reset_mirroring_and_decorators(Q),
|
||||
DurableQ = amqqueue:reset_decorators(Q),
|
||||
QueueName = amqqueue:get_name(Q),
|
||||
rabbit_mnesia:execute_mnesia_transaction(
|
||||
fun () ->
|
||||
|
@ -879,7 +875,7 @@ create_or_get_in_khepri(Q) ->
|
|||
-spec set(Queue) -> ok when
|
||||
Queue :: amqqueue:amqqueue().
|
||||
%% @doc Writes a queue record. If the queue is durable, it writes both instances:
|
||||
%% durable and transient. For the durable one, it resets mirrors and decorators.
|
||||
%% durable and transient. For the durable one, it resets decorators.
|
||||
%% The transient one is left as it is.
|
||||
%%
|
||||
%% @private
|
||||
|
@ -891,7 +887,7 @@ set(Q) ->
|
|||
}).
|
||||
|
||||
set_in_mnesia(Q) ->
|
||||
DurableQ = amqqueue:reset_mirroring_and_decorators(Q),
|
||||
DurableQ = amqqueue:reset_decorators(Q),
|
||||
rabbit_mnesia:execute_mnesia_transaction(
|
||||
fun () ->
|
||||
set_in_mnesia_tx(DurableQ, Q)
|
||||
|
@ -1220,7 +1216,6 @@ get_durable_in_mnesia_tx(Name) ->
|
|||
[Q] -> {ok, Q}
|
||||
end.
|
||||
|
||||
%% TODO this should be internal, it's here because of mirrored queues
|
||||
get_in_khepri_tx(Name) ->
|
||||
case khepri_tx:get(khepri_queue_path(Name)) of
|
||||
{ok, X} -> [X];
|
||||
|
|
|
@ -30,9 +30,8 @@ clear_queue_read_cache([]) ->
|
|||
ok;
|
||||
clear_queue_read_cache([Q | Rest]) when ?is_amqqueue(Q) ->
|
||||
MPid = amqqueue:get_pid(Q),
|
||||
SPids = amqqueue:get_slave_pids(Q),
|
||||
%% Limit the action to the current node.
|
||||
Pids = [P || P <- [MPid | SPids], node(P) =:= node()],
|
||||
Pids = [P || P <- [MPid], node(P) =:= node()],
|
||||
%% This function is executed in the context of the backing queue
|
||||
%% process because the read buffer is stored in the process
|
||||
%% dictionary.
|
||||
|
|
|
@ -34,7 +34,6 @@
|
|||
primary_replica_transfer_candidate_nodes/0,
|
||||
random_primary_replica_transfer_candidate_node/2,
|
||||
transfer_leadership_of_quorum_queues/1,
|
||||
transfer_leadership_of_classic_mirrored_queues/1,
|
||||
table_definitions/0
|
||||
]).
|
||||
|
||||
|
@ -225,44 +224,6 @@ transfer_leadership_of_metadata_store(TransferCandidates) ->
|
|||
rabbit_log:warning("Skipping leadership transfer of metadata store: ~p", [Error])
|
||||
end.
|
||||
|
||||
-spec transfer_leadership_of_classic_mirrored_queues([node()]) -> ok.
|
||||
%% This function is no longer used by maintenance mode. We retain it in case
|
||||
%% classic mirrored queue leadership transfer would be reconsidered.
|
||||
%%
|
||||
%% With a lot of CMQs in a cluster, the transfer procedure can take prohibitively long
|
||||
%% for a pre-upgrade task.
|
||||
transfer_leadership_of_classic_mirrored_queues([]) ->
|
||||
rabbit_log:warning("Skipping leadership transfer of classic mirrored queues: no candidate "
|
||||
"(online, not under maintenance) nodes to transfer to!");
|
||||
transfer_leadership_of_classic_mirrored_queues(TransferCandidates) ->
|
||||
Queues = rabbit_amqqueue:list_local_mirrored_classic_queues(),
|
||||
ReadableCandidates = readable_candidate_list(TransferCandidates),
|
||||
rabbit_log:info("Will transfer leadership of ~b classic mirrored queues hosted on this node to these peer nodes: ~ts",
|
||||
[length(Queues), ReadableCandidates]),
|
||||
[begin
|
||||
Name = amqqueue:get_name(Q),
|
||||
ExistingReplicaNodes = [node(Pid) || Pid <- amqqueue:get_sync_slave_pids(Q)],
|
||||
rabbit_log:debug("Local ~ts has replicas on nodes ~ts",
|
||||
[rabbit_misc:rs(Name), readable_candidate_list(ExistingReplicaNodes)]),
|
||||
case random_primary_replica_transfer_candidate_node(TransferCandidates, ExistingReplicaNodes) of
|
||||
{ok, Pick} ->
|
||||
rabbit_log:debug("Will transfer leadership of local ~ts. Planned target node: ~ts",
|
||||
[rabbit_misc:rs(Name), Pick]),
|
||||
case rabbit_mirror_queue_misc:migrate_leadership_to_existing_replica(Q, Pick) of
|
||||
{migrated, NewPrimary} ->
|
||||
rabbit_log:debug("Successfully transferred leadership of queue ~ts to node ~ts",
|
||||
[rabbit_misc:rs(Name), NewPrimary]);
|
||||
Other ->
|
||||
rabbit_log:warning("Could not transfer leadership of queue ~ts: ~tp",
|
||||
[rabbit_misc:rs(Name), Other])
|
||||
end;
|
||||
undefined ->
|
||||
rabbit_log:warning("Could not transfer leadership of queue ~ts: no suitable candidates?",
|
||||
[Name])
|
||||
end
|
||||
end || Q <- Queues],
|
||||
rabbit_log:info("Leadership transfer for local classic mirrored queues is complete").
|
||||
|
||||
-spec transfer_leadership_of_stream_coordinator([node()]) -> ok.
|
||||
transfer_leadership_of_stream_coordinator([]) ->
|
||||
rabbit_log:warning("Skipping leadership transfer of stream coordinator: no candidate "
|
||||
|
@ -365,6 +326,3 @@ ok_or_first_error(ok, Acc) ->
|
|||
Acc;
|
||||
ok_or_first_error({error, _} = Err, _Acc) ->
|
||||
Err.
|
||||
|
||||
readable_candidate_list(Nodes) ->
|
||||
string:join(lists:map(fun rabbit_data_coercion:to_list/1, Nodes), ", ").
|
||||
|
|
|
@ -1,468 +0,0 @@
|
|||
%% This Source Code Form is subject to the terms of the Mozilla Public
|
||||
%% License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
%%
|
||||
%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
|
||||
%%
|
||||
|
||||
-module(rabbit_mirror_queue_coordinator).
|
||||
|
||||
-export([start_link/4, get_gm/1, ensure_monitoring/2]).
|
||||
|
||||
-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
|
||||
code_change/3, handle_pre_hibernate/1]).
|
||||
|
||||
-export([joined/2, members_changed/3, handle_msg/3, handle_terminate/2]).
|
||||
|
||||
-behaviour(gen_server2).
|
||||
-behaviour(gm).
|
||||
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include("amqqueue.hrl").
|
||||
-include("gm_specs.hrl").
|
||||
|
||||
-record(state, { q,
|
||||
gm,
|
||||
monitors,
|
||||
death_fun,
|
||||
depth_fun
|
||||
}).
|
||||
|
||||
%%----------------------------------------------------------------------------
|
||||
%%
|
||||
%% Mirror Queues
|
||||
%%
|
||||
%% A queue with mirrors consists of the following:
|
||||
%%
|
||||
%% #amqqueue{ pid, slave_pids }
|
||||
%% | |
|
||||
%% +----------+ +-------+--------------+-----------...etc...
|
||||
%% | | |
|
||||
%% V V V
|
||||
%% amqqueue_process---+ mirror-----+ mirror-----+ ...etc...
|
||||
%% | BQ = master----+ | | BQ = vq | | BQ = vq |
|
||||
%% | | BQ = vq | | +-+-------+ +-+-------+
|
||||
%% | +-+-------+ | | |
|
||||
%% +-++-----|---------+ | | (some details elided)
|
||||
%% || | | |
|
||||
%% || coordinator-+ | |
|
||||
%% || +-+---------+ | |
|
||||
%% || | | |
|
||||
%% || gm-+ -- -- -- -- gm-+- -- -- -- gm-+- -- --...etc...
|
||||
%% || +--+ +--+ +--+
|
||||
%% ||
|
||||
%% consumers
|
||||
%%
|
||||
%% The master is merely an implementation of bq, and thus is invoked
|
||||
%% through the normal bq interface by the amqqueue_process. The mirrors
|
||||
%% meanwhile are processes in their own right (as is the
|
||||
%% coordinator). The coordinator and all mirrors belong to the same gm
|
||||
%% group. Every member of a gm group receives messages sent to the gm
|
||||
%% group. Because the master is the bq of amqqueue_process, it doesn't
|
||||
%% have sole control over its mailbox, and as a result, the master
|
||||
%% itself cannot be passed messages directly (well, it could by via
|
||||
%% the amqqueue:run_backing_queue callback but that would induce
|
||||
%% additional unnecessary loading on the master queue process), yet it
|
||||
%% needs to react to gm events, such as the death of mirrors. Thus the
|
||||
%% master creates the coordinator, and it is the coordinator that is
|
||||
%% the gm callback module and event handler for the master.
|
||||
%%
|
||||
%% Consumers are only attached to the master. Thus the master is
|
||||
%% responsible for informing all mirrors when messages are fetched from
|
||||
%% the bq, when they're acked, and when they're requeued.
|
||||
%%
|
||||
%% The basic goal is to ensure that all mirrors performs actions on
|
||||
%% their bqs in the same order as the master. Thus the master
|
||||
%% intercepts all events going to its bq, and suitably broadcasts
|
||||
%% these events on the gm. The mirrors thus receive two streams of
|
||||
%% events: one stream is via the gm, and one stream is from channels
|
||||
%% directly. Whilst the stream via gm is guaranteed to be consistently
|
||||
%% seen by all mirrors , the same is not true of the stream via
|
||||
%% channels. For example, in the event of an unexpected death of a
|
||||
%% channel during a publish, only some of the mirrors may receive that
|
||||
%% publish. As a result of this problem, the messages broadcast over
|
||||
%% the gm contain published content, and thus mirrors can operate
|
||||
%% successfully on messages that they only receive via the gm.
|
||||
%%
|
||||
%% The key purpose of also sending messages directly from the channels
|
||||
%% to the mirrors is that without this, in the event of the death of
|
||||
%% the master, messages could be lost until a suitable mirror is
|
||||
%% promoted. However, that is not the only reason. A mirror cannot send
|
||||
%% confirms for a message until it has seen it from the
|
||||
%% channel. Otherwise, it might send a confirm to a channel for a
|
||||
%% message that it might *never* receive from that channel. This can
|
||||
%% happen because new mirrors join the gm ring (and thus receive
|
||||
%% messages from the master) before inserting themselves in the
|
||||
%% queue's mnesia record (which is what channels look at for routing).
|
||||
%% As it turns out, channels will simply ignore such bogus confirms,
|
||||
%% but relying on that would introduce a dangerously tight coupling.
|
||||
%%
|
||||
%% Hence the mirrors have to wait until they've seen both the publish
|
||||
%% via gm, and the publish via the channel before they issue the
|
||||
%% confirm. Either form of publish can arrive first, and a mirror can
|
||||
%% be upgraded to the master at any point during this
|
||||
%% process. Confirms continue to be issued correctly, however.
|
||||
%%
|
||||
%% Because the mirror is a full process, it impersonates parts of the
|
||||
%% amqqueue API. However, it does not need to implement all parts: for
|
||||
%% example, no ack or consumer-related message can arrive directly at
|
||||
%% a mirror from a channel: it is only publishes that pass both
|
||||
%% directly to the mirrors and go via gm.
|
||||
%%
|
||||
%% Slaves can be added dynamically. When this occurs, there is no
|
||||
%% attempt made to sync the current contents of the master with the
|
||||
%% new mirror, thus the mirror will start empty, regardless of the state
|
||||
%% of the master. Thus the mirror needs to be able to detect and ignore
|
||||
%% operations which are for messages it has not received: because of
|
||||
%% the strict FIFO nature of queues in general, this is
|
||||
%% straightforward - all new publishes that the new mirror receives via
|
||||
%% gm should be processed as normal, but fetches which are for
|
||||
%% messages the mirror has never seen should be ignored. Similarly,
|
||||
%% acks for messages the mirror never fetched should be
|
||||
%% ignored. Similarly, we don't republish rejected messages that we
|
||||
%% haven't seen. Eventually, as the master is consumed from, the
|
||||
%% messages at the head of the queue which were there before the slave
|
||||
%% joined will disappear, and the mirror will become fully synced with
|
||||
%% the state of the master.
|
||||
%%
|
||||
%% The detection of the sync-status is based on the depth of the BQs,
|
||||
%% where the depth is defined as the sum of the length of the BQ (as
|
||||
%% per BQ:len) and the messages pending an acknowledgement. When the
|
||||
%% depth of the mirror is equal to the master's, then the mirror is
|
||||
%% synchronised. We only store the difference between the two for
|
||||
%% simplicity. Comparing the length is not enough since we need to
|
||||
%% take into account rejected messages which will make it back into
|
||||
%% the master queue but can't go back in the mirror, since we don't
|
||||
%% want "holes" in the mirror queue. Note that the depth, and the
|
||||
%% length likewise, must always be shorter on the mirror - we assert
|
||||
%% that in various places. In case mirrors are joined to an empty queue
|
||||
%% which only goes on to receive publishes, they start by asking the
|
||||
%% master to broadcast its depth. This is enough for mirrors to always
|
||||
%% be able to work out when their head does not differ from the master
|
||||
%% (and is much simpler and cheaper than getting the master to hang on
|
||||
%% to the guid of the msg at the head of its queue). When a mirror is
|
||||
%% promoted to a master, it unilaterally broadcasts its depth, in
|
||||
%% order to solve the problem of depth requests from new mirrors being
|
||||
%% unanswered by a dead master.
|
||||
%%
|
||||
%% Obviously, due to the async nature of communication across gm, the
|
||||
%% mirrors can fall behind. This does not matter from a sync pov: if
|
||||
%% they fall behind and the master dies then a) no publishes are lost
|
||||
%% because all publishes go to all mirrors anyway; b) the worst that
|
||||
%% happens is that acks get lost and so messages come back to
|
||||
%% life. This is no worse than normal given you never get confirmation
|
||||
%% that an ack has been received (not quite true with QoS-prefetch,
|
||||
%% but close enough for jazz).
|
||||
%%
|
||||
%% Because acktags are issued by the bq independently, and because
|
||||
%% there is no requirement for the master and all mirrors to use the
|
||||
%% same bq, all references to msgs going over gm is by msg_id. Thus
|
||||
%% upon acking, the master must convert the acktags back to msg_ids
|
||||
%% (which happens to be what bq:ack returns), then sends the msg_ids
|
||||
%% over gm, the mirrors must convert the msg_ids to acktags (a mapping
|
||||
%% the mirrors themselves must maintain).
|
||||
%%
|
||||
%% When the master dies, a mirror gets promoted. This will be the
|
||||
%% eldest mirror, and thus the hope is that that mirror is most likely
|
||||
%% to be sync'd with the master. The design of gm is that the
|
||||
%% notification of the death of the master will only appear once all
|
||||
%% messages in-flight from the master have been fully delivered to all
|
||||
%% members of the gm group. Thus at this point, the mirror that gets
|
||||
%% promoted cannot broadcast different events in a different order
|
||||
%% than the master for the same msgs: there is no possibility for the
|
||||
%% same msg to be processed by the old master and the new master - if
|
||||
%% it was processed by the old master then it will have been processed
|
||||
%% by the mirror before the mirror was promoted, and vice versa.
|
||||
%%
|
||||
%% Upon promotion, all msgs pending acks are requeued as normal, the
|
||||
%% mirror constructs state suitable for use in the master module, and
|
||||
%% then dynamically changes into an amqqueue_process with the master
|
||||
%% as the bq, and the slave's bq as the master's bq. Thus the very
|
||||
%% same process that was the mirror is now a full amqqueue_process.
|
||||
%%
|
||||
%% It is important that we avoid memory leaks due to the death of
|
||||
%% senders (i.e. channels) and partial publications. A sender
|
||||
%% publishing a message may fail mid way through the publish and thus
|
||||
%% only some of the mirrors will receive the message. We need the
|
||||
%% mirrors to be able to detect this and tidy up as necessary to avoid
|
||||
%% leaks. If we just had the master monitoring all senders then we
|
||||
%% would have the possibility that a sender appears and only sends the
|
||||
%% message to a few of the mirrors before dying. Those mirrors would
|
||||
%% then hold on to the message, assuming they'll receive some
|
||||
%% instruction eventually from the master. Thus we have both mirrors
|
||||
%% and the master monitor all senders they become aware of. But there
|
||||
%% is a race: if the mirror receives a DOWN of a sender, how does it
|
||||
%% know whether or not the master is going to send it instructions
|
||||
%% regarding those messages?
|
||||
%%
|
||||
%% Whilst the master monitors senders, it can't access its mailbox
|
||||
%% directly, so it delegates monitoring to the coordinator. When the
|
||||
%% coordinator receives a DOWN message from a sender, it informs the
|
||||
%% master via a callback. This allows the master to do any tidying
|
||||
%% necessary, but more importantly allows the master to broadcast a
|
||||
%% sender_death message to all the mirrors , saying the sender has
|
||||
%% died. Once the mirrors receive the sender_death message, they know
|
||||
%% that they're not going to receive any more instructions from the gm
|
||||
%% regarding that sender. However, it is possible that the coordinator
|
||||
%% receives the DOWN and communicates that to the master before the
|
||||
%% master has finished receiving and processing publishes from the
|
||||
%% sender. This turns out not to be a problem: the sender has actually
|
||||
%% died, and so will not need to receive confirms or other feedback,
|
||||
%% and should further messages be "received" from the sender, the
|
||||
%% master will ask the coordinator to set up a new monitor, and
|
||||
%% will continue to process the messages normally. Slaves may thus
|
||||
%% receive publishes via gm from previously declared "dead" senders,
|
||||
%% but again, this is fine: should the mirror have just thrown out the
|
||||
%% message it had received directly from the sender (due to receiving
|
||||
%% a sender_death message via gm), it will be able to cope with the
|
||||
%% publication purely from the master via gm.
|
||||
%%
|
||||
%% When a mirror receives a DOWN message for a sender, if it has not
|
||||
%% received the sender_death message from the master via gm already,
|
||||
%% then it will wait 20 seconds before broadcasting a request for
|
||||
%% confirmation from the master that the sender really has died.
|
||||
%% Should a sender have only sent a publish to mirrors , this allows
|
||||
%% mirrors to inform the master of the previous existence of the
|
||||
%% sender. The master will thus monitor the sender, receive the DOWN,
|
||||
%% and subsequently broadcast the sender_death message, allowing the
|
||||
%% mirrors to tidy up. This process can repeat for the same sender:
|
||||
%% consider one mirror receives the publication, then the DOWN, then
|
||||
%% asks for confirmation of death, then the master broadcasts the
|
||||
%% sender_death message. Only then does another mirror receive the
|
||||
%% publication and thus set up its monitoring. Eventually that slave
|
||||
%% too will receive the DOWN, ask for confirmation and the master will
|
||||
%% monitor the sender again, receive another DOWN, and send out
|
||||
%% another sender_death message. Given the 20 second delay before
|
||||
%% requesting death confirmation, this is highly unlikely, but it is a
|
||||
%% possibility.
|
||||
%%
|
||||
%% When the 20 second timer expires, the mirror first checks to see
|
||||
%% whether it still needs confirmation of the death before requesting
|
||||
%% it. This prevents unnecessary traffic on gm as it allows one
|
||||
%% broadcast of the sender_death message to satisfy many mirrors.
|
||||
%%
|
||||
%% If we consider the promotion of a mirror at this point, we have two
|
||||
%% possibilities: that of the mirror that has received the DOWN and is
|
||||
%% thus waiting for confirmation from the master that the sender
|
||||
%% really is down; and that of the mirror that has not received the
|
||||
%% DOWN. In the first case, in the act of promotion to master, the new
|
||||
%% master will monitor again the dead sender, and after it has
|
||||
%% finished promoting itself, it should find another DOWN waiting,
|
||||
%% which it will then broadcast. This will allow mirrors to tidy up as
|
||||
%% normal. In the second case, we have the possibility that
|
||||
%% confirmation-of-sender-death request has been broadcast, but that
|
||||
%% it was broadcast before the master failed, and that the mirror being
|
||||
%% promoted does not know anything about that sender, and so will not
|
||||
%% monitor it on promotion. Thus a mirror that broadcasts such a
|
||||
%% request, at the point of broadcasting it, recurses, setting another
|
||||
%% 20 second timer. As before, on expiry of the timer, the mirrors
|
||||
%% checks to see whether it still has not received a sender_death
|
||||
%% message for the dead sender, and if not, broadcasts a death
|
||||
%% confirmation request. Thus this ensures that even when a master
|
||||
%% dies and the new mirror has no knowledge of the dead sender, it will
|
||||
%% eventually receive a death confirmation request, shall monitor the
|
||||
%% dead sender, receive the DOWN and broadcast the sender_death
|
||||
%% message.
|
||||
%%
|
||||
%% The preceding commentary deals with the possibility of mirrors
|
||||
%% receiving publications from senders which the master does not, and
|
||||
%% the need to prevent memory leaks in such scenarios. The inverse is
|
||||
%% also possible: a partial publication may cause only the master to
|
||||
%% receive a publication. It will then publish the message via gm. The
|
||||
%% mirrors will receive it via gm, will publish it to their BQ and will
|
||||
%% set up monitoring on the sender. They will then receive the DOWN
|
||||
%% message and the master will eventually publish the corresponding
|
||||
%% sender_death message. The mirror will then be able to tidy up its
|
||||
%% state as normal.
|
||||
%%
|
||||
%% Recovery of mirrored queues is straightforward: as nodes die, the
|
||||
%% remaining nodes record this, and eventually a situation is reached
|
||||
%% in which only one node is alive, which is the master. This is the
|
||||
%% only node which, upon recovery, will resurrect a mirrored queue:
|
||||
%% nodes which die and then rejoin as a mirror will start off empty as
|
||||
%% if they have no mirrored content at all. This is not surprising: to
|
||||
%% achieve anything more sophisticated would require the master and
|
||||
%% recovering mirror to be able to check to see whether they agree on
|
||||
%% the last seen state of the queue: checking depth alone is not
|
||||
%% sufficient in this case.
|
||||
%%
|
||||
%% For more documentation see the comments in bug 23554.
|
||||
%%
|
||||
%%----------------------------------------------------------------------------
|
||||
|
||||
-spec start_link
|
||||
(amqqueue:amqqueue(), pid() | 'undefined',
|
||||
rabbit_mirror_queue_master:death_fun(),
|
||||
rabbit_mirror_queue_master:depth_fun()) ->
|
||||
rabbit_types:ok_pid_or_error().
|
||||
|
||||
start_link(Queue, GM, DeathFun, DepthFun) ->
|
||||
gen_server2:start_link(?MODULE, [Queue, GM, DeathFun, DepthFun], []).
|
||||
|
||||
-spec get_gm(pid()) -> pid().
|
||||
|
||||
get_gm(CPid) ->
|
||||
gen_server2:call(CPid, get_gm, infinity).
|
||||
|
||||
-spec ensure_monitoring(pid(), [pid()]) -> 'ok'.
|
||||
|
||||
ensure_monitoring(CPid, Pids) ->
|
||||
gen_server2:cast(CPid, {ensure_monitoring, Pids}).
|
||||
|
||||
%% ---------------------------------------------------------------------------
|
||||
%% gen_server
|
||||
%% ---------------------------------------------------------------------------
|
||||
|
||||
init([Q, GM, DeathFun, DepthFun]) when ?is_amqqueue(Q) ->
|
||||
QueueName = amqqueue:get_name(Q),
|
||||
?store_proc_name(QueueName),
|
||||
GM1 = case GM of
|
||||
undefined ->
|
||||
{ok, GM2} = gm:start_link(
|
||||
QueueName, ?MODULE, [self()],
|
||||
fun rabbit_mnesia:execute_mnesia_transaction/1),
|
||||
receive {joined, GM2, _Members} ->
|
||||
ok
|
||||
end,
|
||||
GM2;
|
||||
_ ->
|
||||
true = link(GM),
|
||||
GM
|
||||
end,
|
||||
{ok, #state { q = Q,
|
||||
gm = GM1,
|
||||
monitors = pmon:new(),
|
||||
death_fun = DeathFun,
|
||||
depth_fun = DepthFun },
|
||||
hibernate,
|
||||
{backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
|
||||
|
||||
handle_call(get_gm, _From, State = #state { gm = GM }) ->
|
||||
reply(GM, State).
|
||||
|
||||
handle_cast({gm_deaths, DeadGMPids}, State = #state{q = Q}) when ?amqqueue_pid_runs_on_local_node(Q) ->
|
||||
QueueName = amqqueue:get_name(Q),
|
||||
MPid = amqqueue:get_pid(Q),
|
||||
case rabbit_mirror_queue_misc:remove_from_queue(
|
||||
QueueName, MPid, DeadGMPids) of
|
||||
{ok, MPid, DeadPids, ExtraNodes} ->
|
||||
rabbit_mirror_queue_misc:report_deaths(MPid, true, QueueName,
|
||||
DeadPids),
|
||||
rabbit_mirror_queue_misc:add_mirrors(QueueName, ExtraNodes, async),
|
||||
noreply(State);
|
||||
{ok, _MPid0, DeadPids, _ExtraNodes} ->
|
||||
%% see rabbitmq-server#914;
|
||||
%% Different mirror is now master, stop current coordinator normally.
|
||||
%% Initiating queue is now mirror and the least we could do is report
|
||||
%% deaths which we 'think' we saw.
|
||||
%% NOTE: Reported deaths here, could be inconsistent.
|
||||
rabbit_mirror_queue_misc:report_deaths(MPid, false, QueueName,
|
||||
DeadPids),
|
||||
{stop, shutdown, State};
|
||||
{error, not_found} ->
|
||||
{stop, normal, State};
|
||||
{error, {not_synced, _}} ->
|
||||
rabbit_log:error("Mirror queue ~tp in unexpected state."
|
||||
" Promoted to master but already a master.",
|
||||
[QueueName]),
|
||||
error(unexpected_mirrored_state)
|
||||
end;
|
||||
|
||||
handle_cast(request_depth, State = #state{depth_fun = DepthFun, q = QArg}) when ?is_amqqueue(QArg) ->
|
||||
QName = amqqueue:get_name(QArg),
|
||||
MPid = amqqueue:get_pid(QArg),
|
||||
case rabbit_amqqueue:lookup(QName) of
|
||||
{ok, QFound} when ?amqqueue_pid_equals(QFound, MPid) ->
|
||||
ok = DepthFun(),
|
||||
noreply(State);
|
||||
_ ->
|
||||
{stop, shutdown, State}
|
||||
end;
|
||||
|
||||
handle_cast({ensure_monitoring, Pids}, State = #state { monitors = Mons }) ->
|
||||
noreply(State #state { monitors = pmon:monitor_all(Pids, Mons) });
|
||||
|
||||
handle_cast({delete_and_terminate, {shutdown, ring_shutdown}}, State) ->
|
||||
{stop, normal, State};
|
||||
handle_cast({delete_and_terminate, Reason}, State) ->
|
||||
{stop, Reason, State}.
|
||||
|
||||
handle_info({'DOWN', _MonitorRef, process, Pid, _Reason},
|
||||
State = #state { monitors = Mons,
|
||||
death_fun = DeathFun }) ->
|
||||
noreply(case pmon:is_monitored(Pid, Mons) of
|
||||
false -> State;
|
||||
true -> ok = DeathFun(Pid),
|
||||
State #state { monitors = pmon:erase(Pid, Mons) }
|
||||
end);
|
||||
|
||||
handle_info(Msg, State) ->
|
||||
{stop, {unexpected_info, Msg}, State}.
|
||||
|
||||
terminate(_Reason, #state{}) ->
|
||||
ok.
|
||||
|
||||
code_change(_OldVsn, State, _Extra) ->
|
||||
{ok, State}.
|
||||
|
||||
handle_pre_hibernate(State = #state { gm = GM }) ->
|
||||
%% Since GM notifications of deaths are lazy we might not get a
|
||||
%% timely notification of mirror death if policy changes when
|
||||
%% everything is idle. So cause some activity just before we
|
||||
%% sleep. This won't cause us to go into perpetual motion as the
|
||||
%% heartbeat does not wake up coordinator or mirrors.
|
||||
gm:broadcast(GM, hibernate_heartbeat),
|
||||
{hibernate, State}.
|
||||
|
||||
%% ---------------------------------------------------------------------------
|
||||
%% GM
|
||||
%% ---------------------------------------------------------------------------
|
||||
|
||||
-spec joined(args(), members()) -> callback_result().
|
||||
|
||||
joined([CPid], Members) ->
|
||||
CPid ! {joined, self(), Members},
|
||||
ok.
|
||||
|
||||
-spec members_changed(args(), members(),members()) -> callback_result().
|
||||
|
||||
members_changed([_CPid], _Births, []) ->
|
||||
ok;
|
||||
members_changed([CPid], _Births, Deaths) ->
|
||||
ok = gen_server2:cast(CPid, {gm_deaths, Deaths}).
|
||||
|
||||
-spec handle_msg(args(), pid(), any()) -> callback_result().
|
||||
|
||||
handle_msg([CPid], _From, request_depth = Msg) ->
|
||||
ok = gen_server2:cast(CPid, Msg);
|
||||
handle_msg([CPid], _From, {ensure_monitoring, _Pids} = Msg) ->
|
||||
ok = gen_server2:cast(CPid, Msg);
|
||||
handle_msg([_CPid], _From, {delete_and_terminate, _Reason}) ->
|
||||
%% We tell GM to stop, but we don't instruct the coordinator to
|
||||
%% stop yet. The GM will first make sure all pending messages were
|
||||
%% actually delivered. Then it calls handle_terminate/2 below so the
|
||||
%% coordinator is stopped.
|
||||
%%
|
||||
%% If we stop the coordinator right now, remote mirrors could see the
|
||||
%% coordinator DOWN before delete_and_terminate was delivered to all
|
||||
%% GMs. One of those GM would be promoted as the master, and this GM
|
||||
%% would hang forever, waiting for other GMs to stop.
|
||||
{stop, {shutdown, ring_shutdown}};
|
||||
handle_msg([_CPid], _From, _Msg) ->
|
||||
ok.
|
||||
|
||||
-spec handle_terminate(args(), term()) -> any().
|
||||
|
||||
handle_terminate([CPid], Reason) ->
|
||||
ok = gen_server2:cast(CPid, {delete_and_terminate, Reason}),
|
||||
ok.
|
||||
|
||||
%% ---------------------------------------------------------------------------
|
||||
%% Others
|
||||
%% ---------------------------------------------------------------------------
|
||||
|
||||
noreply(State) ->
|
||||
{noreply, State, hibernate}.
|
||||
|
||||
reply(Reply, State) ->
|
||||
{reply, Reply, State, hibernate}.
|
|
@ -1,624 +0,0 @@
|
|||
%% This Source Code Form is subject to the terms of the Mozilla Public
|
||||
%% License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
%%
|
||||
%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
|
||||
%%
|
||||
|
||||
-module(rabbit_mirror_queue_master).
|
||||
|
||||
-export([init/3, terminate/2, delete_and_terminate/2,
|
||||
purge/1, purge_acks/1, publish/6, publish_delivered/5,
|
||||
batch_publish/4, batch_publish_delivered/4,
|
||||
discard/4, fetch/2, drop/2, ack/2, requeue/2, ackfold/4, fold/3,
|
||||
len/1, is_empty/1, depth/1, drain_confirmed/1,
|
||||
dropwhile/2, fetchwhile/4, set_ram_duration_target/2, ram_duration/1,
|
||||
needs_timeout/1, timeout/1, handle_pre_hibernate/1, resume/1,
|
||||
msg_rates/1, info/2, invoke/3, is_duplicate/2, set_queue_mode/2,
|
||||
set_queue_version/2,
|
||||
zip_msgs_and_acks/4]).
|
||||
|
||||
-export([start/2, stop/1, delete_crashed/1]).
|
||||
|
||||
-export([promote_backing_queue_state/8, sender_death_fun/0, depth_fun/0]).
|
||||
|
||||
-export([init_with_existing_bq/3, stop_mirroring/1, sync_mirrors/3]).
|
||||
|
||||
-behaviour(rabbit_backing_queue).
|
||||
|
||||
-include("amqqueue.hrl").
|
||||
|
||||
-record(state, { name,
|
||||
gm,
|
||||
coordinator,
|
||||
backing_queue,
|
||||
backing_queue_state,
|
||||
seen_status,
|
||||
confirmed,
|
||||
known_senders,
|
||||
wait_timeout
|
||||
}).
|
||||
|
||||
-export_type([death_fun/0, depth_fun/0, stats_fun/0]).
|
||||
|
||||
-type death_fun() :: fun ((pid()) -> 'ok').
|
||||
-type depth_fun() :: fun (() -> 'ok').
|
||||
-type stats_fun() :: fun ((any()) -> 'ok').
|
||||
-type master_state() :: #state { name :: rabbit_amqqueue:name(),
|
||||
gm :: pid(),
|
||||
coordinator :: pid(),
|
||||
backing_queue :: atom(),
|
||||
backing_queue_state :: any(),
|
||||
seen_status :: map(),
|
||||
confirmed :: [rabbit_guid:guid()],
|
||||
known_senders :: sets:set()
|
||||
}.
|
||||
|
||||
%% For general documentation of HA design, see
|
||||
%% rabbit_mirror_queue_coordinator
|
||||
|
||||
%% ---------------------------------------------------------------------------
|
||||
%% Backing queue
|
||||
%% ---------------------------------------------------------------------------
|
||||
|
||||
-spec start(_, _) -> no_return().
|
||||
start(_Vhost, _DurableQueues) ->
|
||||
%% This will never get called as this module will never be
|
||||
%% installed as the default BQ implementation.
|
||||
exit({not_valid_for_generic_backing_queue, ?MODULE}).
|
||||
|
||||
-spec stop(_) -> no_return().
|
||||
stop(_Vhost) ->
|
||||
%% Same as start/1.
|
||||
exit({not_valid_for_generic_backing_queue, ?MODULE}).
|
||||
|
||||
-spec delete_crashed(_) -> no_return().
|
||||
delete_crashed(_QName) ->
|
||||
exit({not_valid_for_generic_backing_queue, ?MODULE}).
|
||||
|
||||
init(Q, Recover, AsyncCallback) ->
|
||||
{ok, BQ} = application:get_env(backing_queue_module),
|
||||
BQS = BQ:init(Q, Recover, AsyncCallback),
|
||||
State = #state{gm = GM} = init_with_existing_bq(Q, BQ, BQS),
|
||||
ok = gm:broadcast(GM, {depth, BQ:depth(BQS)}),
|
||||
State.
|
||||
|
||||
-spec init_with_existing_bq(amqqueue:amqqueue(), atom(), any()) ->
|
||||
master_state().
|
||||
|
||||
init_with_existing_bq(Q0, BQ, BQS) when ?is_amqqueue(Q0) ->
|
||||
QName = amqqueue:get_name(Q0),
|
||||
case rabbit_mirror_queue_coordinator:start_link(
|
||||
Q0, undefined, sender_death_fun(), depth_fun()) of
|
||||
{ok, CPid} ->
|
||||
GM = rabbit_mirror_queue_coordinator:get_gm(CPid),
|
||||
Self = self(),
|
||||
migrate_queue_record(QName, GM, Self),
|
||||
{_MNode, SNodes} = rabbit_mirror_queue_misc:suggested_queue_nodes(Q0),
|
||||
%% We need synchronous add here (i.e. do not return until the
|
||||
%% mirror is running) so that when queue declaration is finished
|
||||
%% all mirrors are up; we don't want to end up with unsynced mirrors
|
||||
%% just by declaring a new queue. But add can't be synchronous all
|
||||
%% the time as it can be called by mirrors and that's
|
||||
%% deadlock-prone.
|
||||
rabbit_mirror_queue_misc:add_mirrors(QName, SNodes, sync),
|
||||
#state{name = QName,
|
||||
gm = GM,
|
||||
coordinator = CPid,
|
||||
backing_queue = BQ,
|
||||
backing_queue_state = BQS,
|
||||
seen_status = #{},
|
||||
confirmed = [],
|
||||
known_senders = sets:new([{version, 2}]),
|
||||
wait_timeout = rabbit_misc:get_env(rabbit, slave_wait_timeout, 15000)};
|
||||
{error, Reason} ->
|
||||
%% The GM can shutdown before the coordinator has started up
|
||||
%% (lost membership or missing group), thus the start_link of
|
||||
%% the coordinator returns {error, shutdown} as rabbit_amqqueue_process
|
||||
% is trapping exists
|
||||
throw({coordinator_not_started, Reason})
|
||||
end.
|
||||
|
||||
migrate_queue_record(QName, GM, Self) ->
|
||||
rabbit_khepri:handle_fallback(
|
||||
#{mnesia => fun() -> migrate_queue_record_in_mnesia(QName, GM, Self) end,
|
||||
khepri => fun() -> migrate_queue_record_in_khepri(QName, GM, Self) end
|
||||
}).
|
||||
|
||||
migrate_queue_record_in_mnesia(QName, GM, Self) ->
|
||||
Fun = fun () ->
|
||||
[Q1] = mnesia:read({rabbit_queue, QName}),
|
||||
true = amqqueue:is_amqqueue(Q1),
|
||||
GMPids0 = amqqueue:get_gm_pids(Q1),
|
||||
GMPids1 = [{GM, Self} | GMPids0],
|
||||
Q2 = amqqueue:set_gm_pids(Q1, GMPids1),
|
||||
Q3 = amqqueue:set_state(Q2, live),
|
||||
%% amqqueue migration:
|
||||
%% The amqqueue was read from this transaction, no
|
||||
%% need to handle migration.
|
||||
ok = rabbit_amqqueue:store_queue(Q3)
|
||||
end,
|
||||
ok = rabbit_mnesia:execute_mnesia_transaction(Fun).
|
||||
|
||||
migrate_queue_record_in_khepri(QName, GM, Self) ->
|
||||
Fun = fun () ->
|
||||
rabbit_db_queue:update_in_khepri_tx(
|
||||
QName,
|
||||
fun(Q1) ->
|
||||
GMPids0 = amqqueue:get_gm_pids(Q1),
|
||||
GMPids1 = [{GM, Self} | GMPids0],
|
||||
Q2 = amqqueue:set_gm_pids(Q1, GMPids1),
|
||||
amqqueue:set_state(Q2, live)
|
||||
%% Todo it's missing the decorators, but HA is not supported
|
||||
%% in khepri. This just makes things compile and maybe
|
||||
%% start HA queues
|
||||
end)
|
||||
end,
|
||||
_ = rabbit_khepri:transaction(Fun, rw),
|
||||
ok.
|
||||
|
||||
-spec stop_mirroring(master_state()) -> {atom(), any()}.
|
||||
|
||||
stop_mirroring(State = #state { coordinator = CPid,
|
||||
backing_queue = BQ,
|
||||
backing_queue_state = BQS }) ->
|
||||
unlink(CPid),
|
||||
stop_all_slaves(shutdown, State),
|
||||
{BQ, BQS}.
|
||||
|
||||
-spec sync_mirrors(stats_fun(), stats_fun(), master_state()) ->
|
||||
{'ok', master_state()} | {stop, any(), master_state()}.
|
||||
|
||||
sync_mirrors(HandleInfo, EmitStats,
|
||||
State = #state { name = QName,
|
||||
gm = GM,
|
||||
backing_queue = BQ,
|
||||
backing_queue_state = BQS }) ->
|
||||
Log = fun (Fmt, Params) ->
|
||||
rabbit_mirror_queue_misc:log_info(
|
||||
QName, "Synchronising: " ++ Fmt ++ "", Params)
|
||||
end,
|
||||
Log("~tp messages to synchronise", [BQ:len(BQS)]),
|
||||
{ok, Q} = rabbit_amqqueue:lookup(QName),
|
||||
SPids = amqqueue:get_slave_pids(Q),
|
||||
SyncBatchSize = rabbit_mirror_queue_misc:sync_batch_size(Q),
|
||||
SyncThroughput = rabbit_mirror_queue_misc:default_max_sync_throughput(),
|
||||
log_mirror_sync_config(Log, SyncBatchSize, SyncThroughput),
|
||||
Ref = make_ref(),
|
||||
Syncer = rabbit_mirror_queue_sync:master_prepare(Ref, QName, Log, SPids),
|
||||
gm:broadcast(GM, {sync_start, Ref, Syncer, SPids}),
|
||||
S = fun(BQSN) -> State#state{backing_queue_state = BQSN} end,
|
||||
case rabbit_mirror_queue_sync:master_go(
|
||||
Syncer, Ref, Log, HandleInfo, EmitStats, SyncBatchSize, SyncThroughput, BQ, BQS) of
|
||||
{cancelled, BQS1} -> Log(" synchronisation cancelled ", []),
|
||||
{ok, S(BQS1)};
|
||||
{shutdown, R, BQS1} -> {stop, R, S(BQS1)};
|
||||
{sync_died, R, BQS1} -> Log("~tp", [R]),
|
||||
{ok, S(BQS1)};
|
||||
{already_synced, BQS1} -> {ok, S(BQS1)};
|
||||
{ok, BQS1} -> Log("complete", []),
|
||||
{ok, S(BQS1)}
|
||||
end.
|
||||
|
||||
log_mirror_sync_config(Log, SyncBatchSize, 0) ->
|
||||
Log("batch size: ~tp", [SyncBatchSize]);
|
||||
log_mirror_sync_config(Log, SyncBatchSize, SyncThroughput) ->
|
||||
Log("max batch size: ~tp; max sync throughput: ~tp bytes/s", [SyncBatchSize, SyncThroughput]).
|
||||
|
||||
terminate({shutdown, dropped} = Reason,
|
||||
State = #state { backing_queue = BQ,
|
||||
backing_queue_state = BQS }) ->
|
||||
%% Backing queue termination - this node has been explicitly
|
||||
%% dropped. Normally, non-durable queues would be tidied up on
|
||||
%% startup, but there's a possibility that we will be added back
|
||||
%% in without this node being restarted. Thus we must do the full
|
||||
%% blown delete_and_terminate now, but only locally: we do not
|
||||
%% broadcast delete_and_terminate.
|
||||
State#state{backing_queue_state = BQ:delete_and_terminate(Reason, BQS)};
|
||||
|
||||
terminate(Reason,
|
||||
State = #state { name = QName,
|
||||
backing_queue = BQ,
|
||||
backing_queue_state = BQS }) ->
|
||||
%% Backing queue termination. The queue is going down but
|
||||
%% shouldn't be deleted. Most likely safe shutdown of this
|
||||
%% node.
|
||||
{ok, Q} = rabbit_amqqueue:lookup(QName),
|
||||
SSPids = amqqueue:get_sync_slave_pids(Q),
|
||||
case SSPids =:= [] andalso
|
||||
rabbit_policy:get(<<"ha-promote-on-shutdown">>, Q) =/= <<"always">> of
|
||||
true -> %% Remove the whole queue to avoid data loss
|
||||
rabbit_mirror_queue_misc:log_warning(
|
||||
QName, "Stopping all nodes on master shutdown since no "
|
||||
"synchronised mirror (replica) is available", []),
|
||||
stop_all_slaves(Reason, State);
|
||||
false -> %% Just let some other mirror take over.
|
||||
ok
|
||||
end,
|
||||
State #state { backing_queue_state = BQ:terminate(Reason, BQS) }.
|
||||
|
||||
delete_and_terminate(Reason, State = #state { backing_queue = BQ,
|
||||
backing_queue_state = BQS }) ->
|
||||
stop_all_slaves(Reason, State),
|
||||
State#state{backing_queue_state = BQ:delete_and_terminate(Reason, BQS)}.
|
||||
|
||||
stop_all_slaves(Reason, #state{name = QName, gm = GM, wait_timeout = WT}) ->
|
||||
{ok, Q} = rabbit_amqqueue:lookup(QName),
|
||||
SPids = amqqueue:get_slave_pids(Q),
|
||||
rabbit_mirror_queue_misc:stop_all_slaves(Reason, SPids, QName, GM, WT).
|
||||
|
||||
purge(State = #state { gm = GM,
|
||||
backing_queue = BQ,
|
||||
backing_queue_state = BQS }) ->
|
||||
ok = gm:broadcast(GM, {drop, 0, BQ:len(BQS), false}),
|
||||
{Count, BQS1} = BQ:purge(BQS),
|
||||
{Count, State #state { backing_queue_state = BQS1 }}.
|
||||
|
||||
-spec purge_acks(_) -> no_return().
|
||||
purge_acks(_State) -> exit({not_implemented, {?MODULE, purge_acks}}).
|
||||
|
||||
publish(Msg, MsgProps, IsDelivered, ChPid, Flow,
|
||||
State = #state { gm = GM,
|
||||
seen_status = SS,
|
||||
backing_queue = BQ,
|
||||
backing_queue_state = BQS }) ->
|
||||
MsgId = mc:get_annotation(id, Msg),
|
||||
{_, Size} = mc:size(Msg),
|
||||
|
||||
false = maps:is_key(MsgId, SS), %% ASSERTION
|
||||
ok = gm:broadcast(GM, {publish, ChPid, Flow, MsgProps, Msg},
|
||||
Size),
|
||||
BQS1 = BQ:publish(Msg, MsgProps, IsDelivered, ChPid, Flow, BQS),
|
||||
ensure_monitoring(ChPid, State #state { backing_queue_state = BQS1 }).
|
||||
|
||||
batch_publish(Publishes, ChPid, Flow,
|
||||
State = #state { gm = GM,
|
||||
seen_status = SS,
|
||||
backing_queue = BQ,
|
||||
backing_queue_state = BQS }) ->
|
||||
{Publishes1, false, MsgSizes} =
|
||||
lists:foldl(fun ({Msg,
|
||||
MsgProps, _IsDelivered}, {Pubs, false, Sizes}) ->
|
||||
MsgId = mc:get_annotation(id, Msg),
|
||||
{_, Size} = mc:size(Msg),
|
||||
{[{Msg, MsgProps, true} | Pubs], %% [0]
|
||||
false = maps:is_key(MsgId, SS), %% ASSERTION
|
||||
Sizes + Size}
|
||||
end, {[], false, 0}, Publishes),
|
||||
Publishes2 = lists:reverse(Publishes1),
|
||||
ok = gm:broadcast(GM, {batch_publish, ChPid, Flow, Publishes2},
|
||||
MsgSizes),
|
||||
BQS1 = BQ:batch_publish(Publishes2, ChPid, Flow, BQS),
|
||||
ensure_monitoring(ChPid, State #state { backing_queue_state = BQS1 }).
|
||||
%% [0] When the mirror process handles the publish command, it sets the
|
||||
%% IsDelivered flag to true, so to avoid iterating over the messages
|
||||
%% again at the mirror, we do it here.
|
||||
|
||||
publish_delivered(Msg, MsgProps,
|
||||
ChPid, Flow, State = #state { gm = GM,
|
||||
seen_status = SS,
|
||||
backing_queue = BQ,
|
||||
backing_queue_state = BQS }) ->
|
||||
MsgId = mc:get_annotation(id, Msg),
|
||||
{_, Size} = mc:size(Msg),
|
||||
false = maps:is_key(MsgId, SS), %% ASSERTION
|
||||
ok = gm:broadcast(GM, {publish_delivered, ChPid, Flow, MsgProps, Msg},
|
||||
Size),
|
||||
{AckTag, BQS1} = BQ:publish_delivered(Msg, MsgProps, ChPid, Flow, BQS),
|
||||
State1 = State #state { backing_queue_state = BQS1 },
|
||||
{AckTag, ensure_monitoring(ChPid, State1)}.
|
||||
|
||||
batch_publish_delivered(Publishes, ChPid, Flow,
|
||||
State = #state { gm = GM,
|
||||
seen_status = SS,
|
||||
backing_queue = BQ,
|
||||
backing_queue_state = BQS }) ->
|
||||
{false, MsgSizes} =
|
||||
lists:foldl(fun ({Msg, _MsgProps},
|
||||
{false, Sizes}) ->
|
||||
MsgId = mc:get_annotation(id, Msg),
|
||||
{_, Size} = mc:size(Msg),
|
||||
{false = maps:is_key(MsgId, SS), %% ASSERTION
|
||||
Sizes + Size}
|
||||
end, {false, 0}, Publishes),
|
||||
ok = gm:broadcast(GM, {batch_publish_delivered, ChPid, Flow, Publishes},
|
||||
MsgSizes),
|
||||
{AckTags, BQS1} = BQ:batch_publish_delivered(Publishes, ChPid, Flow, BQS),
|
||||
State1 = State #state { backing_queue_state = BQS1 },
|
||||
{AckTags, ensure_monitoring(ChPid, State1)}.
|
||||
|
||||
discard(MsgId, ChPid, Flow, State = #state { gm = GM,
|
||||
backing_queue = BQ,
|
||||
backing_queue_state = BQS,
|
||||
seen_status = SS }) ->
|
||||
false = maps:is_key(MsgId, SS), %% ASSERTION
|
||||
ok = gm:broadcast(GM, {discard, ChPid, Flow, MsgId}),
|
||||
ensure_monitoring(ChPid,
|
||||
State #state { backing_queue_state =
|
||||
BQ:discard(MsgId, ChPid, Flow, BQS) }).
|
||||
|
||||
dropwhile(Pred, State = #state{backing_queue = BQ,
|
||||
backing_queue_state = BQS }) ->
|
||||
Len = BQ:len(BQS),
|
||||
{Next, BQS1} = BQ:dropwhile(Pred, BQS),
|
||||
{Next, drop(Len, false, State #state { backing_queue_state = BQS1 })}.
|
||||
|
||||
fetchwhile(Pred, Fun, Acc, State = #state{backing_queue = BQ,
|
||||
backing_queue_state = BQS }) ->
|
||||
Len = BQ:len(BQS),
|
||||
{Next, Acc1, BQS1} = BQ:fetchwhile(Pred, Fun, Acc, BQS),
|
||||
{Next, Acc1, drop(Len, true, State #state { backing_queue_state = BQS1 })}.
|
||||
|
||||
drain_confirmed(State = #state { backing_queue = BQ,
|
||||
backing_queue_state = BQS,
|
||||
seen_status = SS,
|
||||
confirmed = Confirmed }) ->
|
||||
{MsgIds, BQS1} = BQ:drain_confirmed(BQS),
|
||||
{MsgIds1, SS1} =
|
||||
lists:foldl(
|
||||
fun (MsgId, {MsgIdsN, SSN}) ->
|
||||
%% We will never see 'discarded' here
|
||||
case maps:find(MsgId, SSN) of
|
||||
error ->
|
||||
{[MsgId | MsgIdsN], SSN};
|
||||
{ok, published} ->
|
||||
%% It was published when we were a mirror,
|
||||
%% and we were promoted before we saw the
|
||||
%% publish from the channel. We still
|
||||
%% haven't seen the channel publish, and
|
||||
%% consequently we need to filter out the
|
||||
%% confirm here. We will issue the confirm
|
||||
%% when we see the publish from the channel.
|
||||
{MsgIdsN, maps:put(MsgId, confirmed, SSN)};
|
||||
{ok, confirmed} ->
|
||||
%% Well, confirms are racy by definition.
|
||||
{[MsgId | MsgIdsN], SSN}
|
||||
end
|
||||
end, {[], SS}, MsgIds),
|
||||
{Confirmed ++ MsgIds1, State #state { backing_queue_state = BQS1,
|
||||
seen_status = SS1,
|
||||
confirmed = [] }}.
|
||||
|
||||
fetch(AckRequired, State = #state { backing_queue = BQ,
|
||||
backing_queue_state = BQS }) ->
|
||||
{Result, BQS1} = BQ:fetch(AckRequired, BQS),
|
||||
State1 = State #state { backing_queue_state = BQS1 },
|
||||
{Result, case Result of
|
||||
empty -> State1;
|
||||
{_MsgId, _IsDelivered, _AckTag} -> drop_one(AckRequired, State1)
|
||||
end}.
|
||||
|
||||
drop(AckRequired, State = #state { backing_queue = BQ,
|
||||
backing_queue_state = BQS }) ->
|
||||
{Result, BQS1} = BQ:drop(AckRequired, BQS),
|
||||
State1 = State #state { backing_queue_state = BQS1 },
|
||||
{Result, case Result of
|
||||
empty -> State1;
|
||||
{_MsgId, _AckTag} -> drop_one(AckRequired, State1)
|
||||
end}.
|
||||
|
||||
ack(AckTags, State = #state { gm = GM,
|
||||
backing_queue = BQ,
|
||||
backing_queue_state = BQS }) ->
|
||||
{MsgIds, BQS1} = BQ:ack(AckTags, BQS),
|
||||
case MsgIds of
|
||||
[] -> ok;
|
||||
_ -> ok = gm:broadcast(GM, {ack, MsgIds})
|
||||
end,
|
||||
{MsgIds, State #state { backing_queue_state = BQS1 }}.
|
||||
|
||||
requeue(AckTags, State = #state { gm = GM,
|
||||
backing_queue = BQ,
|
||||
backing_queue_state = BQS }) ->
|
||||
{MsgIds, BQS1} = BQ:requeue(AckTags, BQS),
|
||||
ok = gm:broadcast(GM, {requeue, MsgIds}),
|
||||
{MsgIds, State #state { backing_queue_state = BQS1 }}.
|
||||
|
||||
ackfold(MsgFun, Acc, State = #state { backing_queue = BQ,
|
||||
backing_queue_state = BQS }, AckTags) ->
|
||||
{Acc1, BQS1} = BQ:ackfold(MsgFun, Acc, BQS, AckTags),
|
||||
{Acc1, State #state { backing_queue_state = BQS1 }}.
|
||||
|
||||
fold(Fun, Acc, State = #state { backing_queue = BQ,
|
||||
backing_queue_state = BQS }) ->
|
||||
{Result, BQS1} = BQ:fold(Fun, Acc, BQS),
|
||||
{Result, State #state { backing_queue_state = BQS1 }}.
|
||||
|
||||
len(#state { backing_queue = BQ, backing_queue_state = BQS }) ->
|
||||
BQ:len(BQS).
|
||||
|
||||
is_empty(#state { backing_queue = BQ, backing_queue_state = BQS }) ->
|
||||
BQ:is_empty(BQS).
|
||||
|
||||
depth(#state { backing_queue = BQ, backing_queue_state = BQS }) ->
|
||||
BQ:depth(BQS).
|
||||
|
||||
set_ram_duration_target(Target, State = #state { backing_queue = BQ,
|
||||
backing_queue_state = BQS }) ->
|
||||
State #state { backing_queue_state =
|
||||
BQ:set_ram_duration_target(Target, BQS) }.
|
||||
|
||||
ram_duration(State = #state { backing_queue = BQ, backing_queue_state = BQS }) ->
|
||||
{Result, BQS1} = BQ:ram_duration(BQS),
|
||||
{Result, State #state { backing_queue_state = BQS1 }}.
|
||||
|
||||
needs_timeout(#state { backing_queue = BQ, backing_queue_state = BQS }) ->
|
||||
BQ:needs_timeout(BQS).
|
||||
|
||||
timeout(State = #state { backing_queue = BQ, backing_queue_state = BQS }) ->
|
||||
State #state { backing_queue_state = BQ:timeout(BQS) }.
|
||||
|
||||
handle_pre_hibernate(State = #state { backing_queue = BQ,
|
||||
backing_queue_state = BQS }) ->
|
||||
State #state { backing_queue_state = BQ:handle_pre_hibernate(BQS) }.
|
||||
|
||||
resume(State = #state { backing_queue = BQ,
|
||||
backing_queue_state = BQS }) ->
|
||||
State #state { backing_queue_state = BQ:resume(BQS) }.
|
||||
|
||||
msg_rates(#state { backing_queue = BQ, backing_queue_state = BQS }) ->
|
||||
BQ:msg_rates(BQS).
|
||||
|
||||
info(backing_queue_status,
|
||||
State = #state { backing_queue = BQ, backing_queue_state = BQS }) ->
|
||||
BQ:info(backing_queue_status, BQS) ++
|
||||
[ {mirror_seen, maps:size(State #state.seen_status)},
|
||||
{mirror_senders, sets:size(State #state.known_senders)} ];
|
||||
info(Item, #state { backing_queue = BQ, backing_queue_state = BQS }) ->
|
||||
BQ:info(Item, BQS).
|
||||
|
||||
invoke(?MODULE, Fun, State) ->
|
||||
Fun(?MODULE, State);
|
||||
invoke(Mod, Fun, State = #state { backing_queue = BQ,
|
||||
backing_queue_state = BQS }) ->
|
||||
State #state { backing_queue_state = BQ:invoke(Mod, Fun, BQS) }.
|
||||
|
||||
is_duplicate(Message,
|
||||
State = #state { seen_status = SS,
|
||||
backing_queue = BQ,
|
||||
backing_queue_state = BQS,
|
||||
confirmed = Confirmed }) ->
|
||||
MsgId = mc:get_annotation(id, Message),
|
||||
%% Here, we need to deal with the possibility that we're about to
|
||||
%% receive a message that we've already seen when we were a mirror
|
||||
%% (we received it via gm). Thus if we do receive such message now
|
||||
%% via the channel, there may be a confirm waiting to issue for
|
||||
%% it.
|
||||
|
||||
%% We will never see {published, ChPid, MsgSeqNo} here.
|
||||
case maps:find(MsgId, SS) of
|
||||
error ->
|
||||
%% We permit the underlying BQ to have a peek at it, but
|
||||
%% only if we ourselves are not filtering out the msg.
|
||||
{Result, BQS1} = BQ:is_duplicate(Message, BQS),
|
||||
{Result, State #state { backing_queue_state = BQS1 }};
|
||||
{ok, published} ->
|
||||
%% It already got published when we were a mirror and no
|
||||
%% confirmation is waiting. amqqueue_process will have, in
|
||||
%% its msg_id_to_channel mapping, the entry for dealing
|
||||
%% with the confirm when that comes back in (it's added
|
||||
%% immediately after calling is_duplicate). The msg is
|
||||
%% invalid. We will not see this again, nor will we be
|
||||
%% further involved in confirming this message, so erase.
|
||||
{{true, drop}, State #state { seen_status = maps:remove(MsgId, SS) }};
|
||||
{ok, Disposition}
|
||||
when Disposition =:= confirmed
|
||||
%% It got published when we were a mirror via gm, and
|
||||
%% confirmed some time after that (maybe even after
|
||||
%% promotion), but before we received the publish from the
|
||||
%% channel, so couldn't previously know what the
|
||||
%% msg_seq_no was (and thus confirm as a mirror). So we
|
||||
%% need to confirm now. As above, amqqueue_process will
|
||||
%% have the entry for the msg_id_to_channel mapping added
|
||||
%% immediately after calling is_duplicate/2.
|
||||
orelse Disposition =:= discarded ->
|
||||
%% Message was discarded while we were a mirror. Confirm now.
|
||||
%% As above, amqqueue_process will have the entry for the
|
||||
%% msg_id_to_channel mapping.
|
||||
{{true, drop}, State #state { seen_status = maps:remove(MsgId, SS),
|
||||
confirmed = [MsgId | Confirmed] }}
|
||||
end.
|
||||
|
||||
set_queue_mode(Mode, State = #state { gm = GM,
|
||||
backing_queue = BQ,
|
||||
backing_queue_state = BQS }) ->
|
||||
ok = gm:broadcast(GM, {set_queue_mode, Mode}),
|
||||
BQS1 = BQ:set_queue_mode(Mode, BQS),
|
||||
State #state { backing_queue_state = BQS1 }.
|
||||
|
||||
set_queue_version(Version, State = #state { gm = GM,
|
||||
backing_queue = BQ,
|
||||
backing_queue_state = BQS }) ->
|
||||
ok = gm:broadcast(GM, {set_queue_version, Version}),
|
||||
BQS1 = BQ:set_queue_version(Version, BQS),
|
||||
State #state { backing_queue_state = BQS1 }.
|
||||
|
||||
zip_msgs_and_acks(Msgs, AckTags, Accumulator,
|
||||
#state { backing_queue = BQ,
|
||||
backing_queue_state = BQS }) ->
|
||||
BQ:zip_msgs_and_acks(Msgs, AckTags, Accumulator, BQS).
|
||||
|
||||
%% ---------------------------------------------------------------------------
|
||||
%% Other exported functions
|
||||
%% ---------------------------------------------------------------------------
|
||||
|
||||
-spec promote_backing_queue_state
|
||||
(rabbit_amqqueue:name(), pid(), atom(), any(), pid(), [any()],
|
||||
map(), [pid()]) ->
|
||||
master_state().
|
||||
|
||||
promote_backing_queue_state(QName, CPid, BQ, BQS, GM, AckTags, Seen, KS) ->
|
||||
{MsgIds, BQS1} = BQ:requeue(AckTags, BQS),
|
||||
ok = gm:broadcast(GM, {requeue, MsgIds}),
|
||||
Len = BQ:len(BQS1),
|
||||
Depth = BQ:depth(BQS1),
|
||||
true = Len == Depth, %% ASSERTION: everything must have been requeued
|
||||
ok = gm:broadcast(GM, {depth, Depth}),
|
||||
WaitTimeout = rabbit_misc:get_env(rabbit, slave_wait_timeout, 15000),
|
||||
#state { name = QName,
|
||||
gm = GM,
|
||||
coordinator = CPid,
|
||||
backing_queue = BQ,
|
||||
backing_queue_state = BQS1,
|
||||
seen_status = Seen,
|
||||
confirmed = [],
|
||||
known_senders = sets:from_list(KS),
|
||||
wait_timeout = WaitTimeout }.
|
||||
|
||||
-spec sender_death_fun() -> death_fun().
|
||||
|
||||
sender_death_fun() ->
|
||||
Self = self(),
|
||||
fun (DeadPid) ->
|
||||
rabbit_amqqueue:run_backing_queue(
|
||||
Self, ?MODULE,
|
||||
fun (?MODULE, State = #state { gm = GM, known_senders = KS }) ->
|
||||
ok = gm:broadcast(GM, {sender_death, DeadPid}),
|
||||
KS1 = sets:del_element(DeadPid, KS),
|
||||
State #state { known_senders = KS1 }
|
||||
end)
|
||||
end.
|
||||
|
||||
-spec depth_fun() -> depth_fun().
|
||||
|
||||
depth_fun() ->
|
||||
Self = self(),
|
||||
fun () ->
|
||||
rabbit_amqqueue:run_backing_queue(
|
||||
Self, ?MODULE,
|
||||
fun (?MODULE, State = #state { gm = GM,
|
||||
backing_queue = BQ,
|
||||
backing_queue_state = BQS }) ->
|
||||
ok = gm:broadcast(GM, {depth, BQ:depth(BQS)}),
|
||||
State
|
||||
end)
|
||||
end.
|
||||
|
||||
%% ---------------------------------------------------------------------------
|
||||
%% Helpers
|
||||
%% ---------------------------------------------------------------------------
|
||||
|
||||
drop_one(AckRequired, State = #state { gm = GM,
|
||||
backing_queue = BQ,
|
||||
backing_queue_state = BQS }) ->
|
||||
ok = gm:broadcast(GM, {drop, BQ:len(BQS), 1, AckRequired}),
|
||||
State.
|
||||
|
||||
drop(PrevLen, AckRequired, State = #state { gm = GM,
|
||||
backing_queue = BQ,
|
||||
backing_queue_state = BQS }) ->
|
||||
Len = BQ:len(BQS),
|
||||
case PrevLen - Len of
|
||||
0 -> State;
|
||||
Dropped -> ok = gm:broadcast(GM, {drop, Len, Dropped, AckRequired}),
|
||||
State
|
||||
end.
|
||||
|
||||
ensure_monitoring(ChPid, State = #state { coordinator = CPid,
|
||||
known_senders = KS }) ->
|
||||
case sets:is_element(ChPid, KS) of
|
||||
true -> State;
|
||||
false -> ok = rabbit_mirror_queue_coordinator:ensure_monitoring(
|
||||
CPid, [ChPid]),
|
||||
State #state { known_senders = sets:add_element(ChPid, KS) }
|
||||
end.
|
File diff suppressed because it is too large
Load Diff
|
@ -1,42 +0,0 @@
|
|||
%% This Source Code Form is subject to the terms of the Mozilla Public
|
||||
%% License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
%%
|
||||
%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
|
||||
%%
|
||||
|
||||
-module(rabbit_mirror_queue_mode).
|
||||
|
||||
-behaviour(rabbit_registry_class).
|
||||
|
||||
-export([added_to_rabbit_registry/2, removed_from_rabbit_registry/1]).
|
||||
|
||||
-type master() :: node().
|
||||
-type slave() :: node().
|
||||
-type params() :: any().
|
||||
|
||||
-callback description() -> [proplists:property()].
|
||||
|
||||
%% Called whenever we think we might need to change nodes for a
|
||||
%% mirrored queue. Note that this is called from a variety of
|
||||
%% contexts, both inside and outside Mnesia transactions. Ideally it
|
||||
%% will be pure-functional.
|
||||
%%
|
||||
%% Takes: parameters set in the policy,
|
||||
%% current master,
|
||||
%% current mirrors,
|
||||
%% current synchronised mirrors,
|
||||
%% all nodes to consider
|
||||
%%
|
||||
%% Returns: tuple of new master, new mirrors
|
||||
%%
|
||||
-callback suggested_queue_nodes(
|
||||
params(), master(), [slave()], [slave()], [node()]) ->
|
||||
{master(), [slave()]}.
|
||||
|
||||
%% Are the parameters valid for this mode?
|
||||
-callback validate_policy(params()) ->
|
||||
rabbit_policy_validator:validate_results().
|
||||
|
||||
added_to_rabbit_registry(_Type, _ModuleName) -> ok.
|
||||
removed_from_rabbit_registry(_Type) -> ok.
|
|
@ -1,30 +0,0 @@
|
|||
%% This Source Code Form is subject to the terms of the Mozilla Public
|
||||
%% License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
%%
|
||||
%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
|
||||
%%
|
||||
|
||||
-module(rabbit_mirror_queue_mode_all).
|
||||
|
||||
-behaviour(rabbit_mirror_queue_mode).
|
||||
|
||||
-export([description/0, suggested_queue_nodes/5, validate_policy/1]).
|
||||
|
||||
-rabbit_boot_step({?MODULE,
|
||||
[{description, "mirror mode all"},
|
||||
{mfa, {rabbit_registry, register,
|
||||
[ha_mode, <<"all">>, ?MODULE]}},
|
||||
{requires, rabbit_registry},
|
||||
{enables, kernel_ready}]}).
|
||||
|
||||
description() ->
|
||||
[{description, <<"Mirror queue to all nodes">>}].
|
||||
|
||||
suggested_queue_nodes(_Params, MNode, _SNodes, _SSNodes, Poss) ->
|
||||
{MNode, Poss -- [MNode]}.
|
||||
|
||||
validate_policy(none) ->
|
||||
ok;
|
||||
validate_policy(_Params) ->
|
||||
{error, "ha-mode=\"all\" does not take parameters", []}.
|
|
@ -1,43 +0,0 @@
|
|||
%% This Source Code Form is subject to the terms of the Mozilla Public
|
||||
%% License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
%%
|
||||
%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
|
||||
%%
|
||||
|
||||
-module(rabbit_mirror_queue_mode_exactly).
|
||||
|
||||
-behaviour(rabbit_mirror_queue_mode).
|
||||
|
||||
-export([description/0, suggested_queue_nodes/5, validate_policy/1]).
|
||||
|
||||
-rabbit_boot_step({?MODULE,
|
||||
[{description, "mirror mode exactly"},
|
||||
{mfa, {rabbit_registry, register,
|
||||
[ha_mode, <<"exactly">>, ?MODULE]}},
|
||||
{requires, rabbit_registry},
|
||||
{enables, kernel_ready}]}).
|
||||
|
||||
description() ->
|
||||
[{description, <<"Mirror queue to a specified number of nodes">>}].
|
||||
|
||||
%% When we need to add nodes, we randomise our candidate list as a
|
||||
%% crude form of load-balancing. TODO it would also be nice to
|
||||
%% randomise the list of ones to remove when we have too many - we
|
||||
%% would have to take account of synchronisation though.
|
||||
suggested_queue_nodes(Count, MNode, SNodes, _SSNodes, Poss) ->
|
||||
SCount = Count - 1,
|
||||
{MNode, case SCount > length(SNodes) of
|
||||
true -> Cand = shuffle((Poss -- [MNode]) -- SNodes),
|
||||
SNodes ++ lists:sublist(Cand, SCount - length(SNodes));
|
||||
false -> lists:sublist(SNodes, SCount)
|
||||
end}.
|
||||
|
||||
shuffle(L) ->
|
||||
{_, L1} = lists:unzip(lists:keysort(1, [{rand:uniform(), N} || N <- L])),
|
||||
L1.
|
||||
|
||||
validate_policy(N) when is_integer(N) andalso N > 0 ->
|
||||
ok;
|
||||
validate_policy(Params) ->
|
||||
{error, "ha-mode=\"exactly\" takes an integer, ~tp given", [Params]}.
|
|
@ -1,67 +0,0 @@
|
|||
%% This Source Code Form is subject to the terms of the Mozilla Public
|
||||
%% License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
%%
|
||||
%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
|
||||
%%
|
||||
|
||||
-module(rabbit_mirror_queue_mode_nodes).
|
||||
|
||||
-behaviour(rabbit_mirror_queue_mode).
|
||||
|
||||
-export([description/0, suggested_queue_nodes/5, validate_policy/1]).
|
||||
|
||||
-rabbit_boot_step({?MODULE,
|
||||
[{description, "mirror mode nodes"},
|
||||
{mfa, {rabbit_registry, register,
|
||||
[ha_mode, <<"nodes">>, ?MODULE]}},
|
||||
{requires, rabbit_registry},
|
||||
{enables, kernel_ready}]}).
|
||||
|
||||
description() ->
|
||||
[{description, <<"Mirror queue to specified nodes">>}].
|
||||
|
||||
suggested_queue_nodes(PolicyNodes0, CurrentMaster, _SNodes, SSNodes, NodesRunningRabbitMQ) ->
|
||||
PolicyNodes1 = [list_to_atom(binary_to_list(Node)) || Node <- PolicyNodes0],
|
||||
%% If the current master is not in the nodes specified, then what we want
|
||||
%% to do depends on whether there are any synchronised mirrors. If there
|
||||
%% are then we can just kill the current master - the admin has asked for
|
||||
%% a migration and we should give it to them. If there are not however
|
||||
%% then we must keep the master around so as not to lose messages.
|
||||
|
||||
PolicyNodes = case SSNodes of
|
||||
[] -> lists:usort([CurrentMaster | PolicyNodes1]);
|
||||
_ -> PolicyNodes1
|
||||
end,
|
||||
Unavailable = PolicyNodes -- NodesRunningRabbitMQ,
|
||||
AvailablePolicyNodes = PolicyNodes -- Unavailable,
|
||||
case AvailablePolicyNodes of
|
||||
[] -> %% We have never heard of anything? Not much we can do but
|
||||
%% keep the master alive.
|
||||
{CurrentMaster, []};
|
||||
_ -> case lists:member(CurrentMaster, AvailablePolicyNodes) of
|
||||
true -> {CurrentMaster,
|
||||
AvailablePolicyNodes -- [CurrentMaster]};
|
||||
false -> %% Make sure the new master is synced! In order to
|
||||
%% get here SSNodes must not be empty.
|
||||
SyncPolicyNodes = [Node ||
|
||||
Node <- AvailablePolicyNodes,
|
||||
lists:member(Node, SSNodes)],
|
||||
NewMaster = case SyncPolicyNodes of
|
||||
[Node | _] -> Node;
|
||||
[] -> erlang:hd(SSNodes)
|
||||
end,
|
||||
{NewMaster, AvailablePolicyNodes -- [NewMaster]}
|
||||
end
|
||||
end.
|
||||
|
||||
validate_policy([]) ->
|
||||
{error, "ha-mode=\"nodes\" list must be non-empty", []};
|
||||
validate_policy(Nodes) when is_list(Nodes) ->
|
||||
case [I || I <- Nodes, not is_binary(I)] of
|
||||
[] -> ok;
|
||||
Invalid -> {error, "ha-mode=\"nodes\" takes a list of strings, "
|
||||
"~tp was not a string", [Invalid]}
|
||||
end;
|
||||
validate_policy(Params) ->
|
||||
{error, "ha-mode=\"nodes\" takes a list, ~tp given", [Params]}.
|
File diff suppressed because it is too large
Load Diff
|
@ -1,469 +0,0 @@
|
|||
%% This Source Code Form is subject to the terms of the Mozilla Public
|
||||
%% License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
%%
|
||||
%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
|
||||
%%
|
||||
|
||||
-module(rabbit_mirror_queue_sync).
|
||||
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
|
||||
-export([master_prepare/4, master_go/9, slave/7, conserve_resources/3]).
|
||||
|
||||
%% Export for UTs
|
||||
-export([maybe_master_batch_send/2, get_time_diff/3, append_to_acc/4]).
|
||||
|
||||
-define(SYNC_PROGRESS_INTERVAL, 1000000).
|
||||
|
||||
-define(SYNC_THROUGHPUT_EVAL_INTERVAL_MILLIS, 50).
|
||||
|
||||
%% There are three processes around, the master, the syncer and the
|
||||
%% slave(s). The syncer is an intermediary, linked to the master in
|
||||
%% order to make sure we do not mess with the master's credit flow or
|
||||
%% set of monitors.
|
||||
%%
|
||||
%% Interactions
|
||||
%% ------------
|
||||
%%
|
||||
%% '*' indicates repeating messages. All are standard Erlang messages
|
||||
%% except sync_start which is sent over GM to flush out any other
|
||||
%% messages that we might have sent that way already. (credit) is the
|
||||
%% usual credit_flow bump message every so often.
|
||||
%%
|
||||
%% Master Syncer Slave(s)
|
||||
%% sync_mirrors -> || ||
|
||||
%% || -- (spawns) --> || ||
|
||||
%% || --------- sync_start (over GM) -------> ||
|
||||
%% || || <--- sync_ready ---- ||
|
||||
%% || || (or) ||
|
||||
%% || || <--- sync_deny ----- ||
|
||||
%% || <--- ready ---- || ||
|
||||
%% || <--- next* ---- || || }
|
||||
%% || ---- msg* ----> || || } loop
|
||||
%% || || ---- sync_msgs* ---> || }
|
||||
%% || || <--- (credit)* ----- || }
|
||||
%% || <--- next ---- || ||
|
||||
%% || ---- done ----> || ||
|
||||
%% || || -- sync_complete --> ||
|
||||
%% || (Dies) ||
|
||||
|
||||
-type log_fun() :: fun ((string(), [any()]) -> 'ok').
|
||||
-type bq() :: atom().
|
||||
-type bqs() :: any().
|
||||
-type ack() :: any().
|
||||
-type slave_sync_state() :: {[{rabbit_types:msg_id(), ack()}], timer:tref(),
|
||||
bqs()}.
|
||||
|
||||
%% ---------------------------------------------------------------------------
|
||||
%% Master
|
||||
|
||||
-spec master_prepare(reference(), rabbit_amqqueue:name(),
|
||||
log_fun(), [pid()]) -> pid().
|
||||
|
||||
master_prepare(Ref, QName, Log, SPids) ->
|
||||
MPid = self(),
|
||||
spawn_link(fun () ->
|
||||
?store_proc_name(QName),
|
||||
syncer(Ref, Log, MPid, SPids)
|
||||
end).
|
||||
|
||||
-spec master_go(pid(), reference(), log_fun(),
|
||||
rabbit_mirror_queue_master:stats_fun(),
|
||||
rabbit_mirror_queue_master:stats_fun(),
|
||||
non_neg_integer(),
|
||||
non_neg_integer(),
|
||||
bq(), bqs()) ->
|
||||
{'already_synced', bqs()} | {'ok', bqs()} |
|
||||
{'cancelled', bqs()} |
|
||||
{'shutdown', any(), bqs()} |
|
||||
{'sync_died', any(), bqs()}.
|
||||
|
||||
master_go(Syncer, Ref, Log, HandleInfo, EmitStats, SyncBatchSize, SyncThroughput, BQ, BQS) ->
|
||||
Args = {Syncer, Ref, Log, HandleInfo, EmitStats, rabbit_misc:get_parent()},
|
||||
receive
|
||||
{'EXIT', Syncer, normal} -> {already_synced, BQS};
|
||||
{'EXIT', Syncer, Reason} -> {sync_died, Reason, BQS};
|
||||
{ready, Syncer} -> EmitStats({syncing, 0}),
|
||||
master_batch_go0(Args, SyncBatchSize, SyncThroughput,
|
||||
BQ, BQS)
|
||||
end.
|
||||
|
||||
master_batch_go0(Args, BatchSize, SyncThroughput, BQ, BQS) ->
|
||||
FoldFun =
|
||||
fun (Msg, MsgProps, Unacked, Acc) ->
|
||||
Acc1 = append_to_acc(Msg, MsgProps, Unacked, Acc),
|
||||
case maybe_master_batch_send(Acc1, BatchSize) of
|
||||
true -> master_batch_send(Args, Acc1);
|
||||
false -> {cont, Acc1}
|
||||
end
|
||||
end,
|
||||
FoldAcc = {[], 0, {0, erlang:monotonic_time(), SyncThroughput}, {0, BQ:depth(BQS)}, erlang:monotonic_time()},
|
||||
bq_fold(FoldFun, FoldAcc, Args, BQ, BQS).
|
||||
|
||||
master_batch_send({Syncer, Ref, Log, HandleInfo, EmitStats, Parent},
|
||||
{Batch, I, {TotalBytes, LastCheck, SyncThroughput}, {Curr, Len}, Last}) ->
|
||||
T = maybe_emit_stats(Last, I, EmitStats, Log),
|
||||
HandleInfo({syncing, I}),
|
||||
handle_set_maximum_since_use(),
|
||||
SyncMsg = {msgs, Ref, lists:reverse(Batch)},
|
||||
NewAcc = {[], I + length(Batch), {TotalBytes, LastCheck, SyncThroughput}, {Curr, Len}, T},
|
||||
master_send_receive(SyncMsg, NewAcc, Syncer, Ref, Parent).
|
||||
|
||||
%% Either send messages when we reach the last one in the queue or
|
||||
%% whenever we have accumulated BatchSize messages.
|
||||
maybe_master_batch_send({_, _, _, {Len, Len}, _}, _BatchSize) ->
|
||||
true;
|
||||
maybe_master_batch_send({_, _, _, {Curr, _Len}, _}, BatchSize)
|
||||
when Curr rem BatchSize =:= 0 ->
|
||||
true;
|
||||
maybe_master_batch_send({_, _, {TotalBytes, _, SyncThroughput}, {_Curr, _Len}, _}, _BatchSize)
|
||||
when TotalBytes > SyncThroughput ->
|
||||
true;
|
||||
maybe_master_batch_send(_Acc, _BatchSize) ->
|
||||
false.
|
||||
|
||||
bq_fold(FoldFun, FoldAcc, Args, BQ, BQS) ->
|
||||
case BQ:fold(FoldFun, FoldAcc, BQS) of
|
||||
{{shutdown, Reason}, BQS1} -> {shutdown, Reason, BQS1};
|
||||
{{sync_died, Reason}, BQS1} -> {sync_died, Reason, BQS1};
|
||||
{_, BQS1} -> master_done(Args, BQS1)
|
||||
end.
|
||||
|
||||
append_to_acc(Msg, MsgProps, Unacked, {Batch, I, {_, _, 0}, {Curr, Len}, T}) ->
|
||||
{[{Msg, MsgProps, Unacked} | Batch], I, {0, 0, 0}, {Curr + 1, Len}, T};
|
||||
append_to_acc(Msg, MsgProps, Unacked, {Batch, I, {TotalBytes, LastCheck, SyncThroughput}, {Curr, Len}, T}) ->
|
||||
{_, MsgSize} = mc:size(Msg),
|
||||
{[{Msg, MsgProps, Unacked} | Batch], I, {TotalBytes + MsgSize, LastCheck, SyncThroughput}, {Curr + 1, Len}, T}.
|
||||
|
||||
master_send_receive(SyncMsg, NewAcc, Syncer, Ref, Parent) ->
|
||||
receive
|
||||
{'$gen_call', From,
|
||||
cancel_sync_mirrors} -> stop_syncer(Syncer, {cancel, Ref}),
|
||||
gen_server2:reply(From, ok),
|
||||
{stop, cancelled};
|
||||
{next, Ref} -> Syncer ! SyncMsg,
|
||||
{Msgs, I , {TotalBytes, LastCheck, SyncThroughput}, {Curr, Len}, T} = NewAcc,
|
||||
{NewTotalBytes, NewLastCheck} = maybe_throttle_sync_throughput(TotalBytes, LastCheck, SyncThroughput),
|
||||
{cont, {Msgs, I, {NewTotalBytes, NewLastCheck, SyncThroughput}, {Curr, Len}, T}};
|
||||
{'EXIT', Parent, Reason} -> {stop, {shutdown, Reason}};
|
||||
{'EXIT', Syncer, Reason} -> {stop, {sync_died, Reason}}
|
||||
end.
|
||||
|
||||
maybe_throttle_sync_throughput(_ , _, 0) ->
|
||||
{0, erlang:monotonic_time()};
|
||||
maybe_throttle_sync_throughput(TotalBytes, LastCheck, SyncThroughput) ->
|
||||
Interval = erlang:convert_time_unit(erlang:monotonic_time() - LastCheck, native, milli_seconds),
|
||||
case Interval > ?SYNC_THROUGHPUT_EVAL_INTERVAL_MILLIS of
|
||||
true -> maybe_pause_sync(TotalBytes, Interval, SyncThroughput),
|
||||
{0, erlang:monotonic_time()}; %% reset TotalBytes counter and LastCheck.;
|
||||
false -> {TotalBytes, LastCheck}
|
||||
end.
|
||||
|
||||
maybe_pause_sync(TotalBytes, Interval, SyncThroughput) ->
|
||||
Delta = get_time_diff(TotalBytes, Interval, SyncThroughput),
|
||||
pause_queue_sync(Delta).
|
||||
|
||||
pause_queue_sync(0) ->
|
||||
rabbit_log_mirroring:debug("Sync throughput is ok.");
|
||||
pause_queue_sync(Delta) ->
|
||||
rabbit_log_mirroring:debug("Sync throughput exceeds threshold. Pause queue sync for ~tp ms", [Delta]),
|
||||
timer:sleep(Delta).
|
||||
|
||||
%% Sync throughput computation:
|
||||
%% - Total bytes have been sent since last check: TotalBytes
|
||||
%% - Used/Elapsed time since last check: Interval (in milliseconds)
|
||||
%% - Effective/Used throughput in bytes/s: TotalBytes/Interval * 1000.
|
||||
%% - When UsedThroughput > SyncThroughput -> we need to slow down to compensate over-used rate.
|
||||
%% The amount of time to pause queue sync is the different between time needed to broadcast TotalBytes at max throughput
|
||||
%% and the elapsed time (Interval).
|
||||
get_time_diff(TotalBytes, Interval, SyncThroughput) ->
|
||||
rabbit_log_mirroring:debug("Total ~tp bytes has been sent over last ~tp ms. Effective sync througput: ~tp", [TotalBytes, Interval, round(TotalBytes * 1000 / Interval)]),
|
||||
max(round(TotalBytes/SyncThroughput * 1000 - Interval), 0).
|
||||
|
||||
master_done({Syncer, Ref, _Log, _HandleInfo, _EmitStats, Parent}, BQS) ->
|
||||
receive
|
||||
{'$gen_call', From,
|
||||
cancel_sync_mirrors} ->
|
||||
stop_syncer(Syncer, {cancel, Ref}),
|
||||
gen_server2:reply(From, ok),
|
||||
{cancelled, BQS};
|
||||
{cancelled, Ref} ->
|
||||
{cancelled, BQS};
|
||||
{next, Ref} ->
|
||||
stop_syncer(Syncer, {done, Ref}),
|
||||
{ok, BQS};
|
||||
{'EXIT', Parent, Reason} ->
|
||||
{shutdown, Reason, BQS};
|
||||
{'EXIT', Syncer, Reason} ->
|
||||
{sync_died, Reason, BQS}
|
||||
end.
|
||||
|
||||
stop_syncer(Syncer, Msg) ->
|
||||
unlink(Syncer),
|
||||
Syncer ! Msg,
|
||||
receive {'EXIT', Syncer, _} -> ok
|
||||
after 0 -> ok
|
||||
end.
|
||||
|
||||
maybe_emit_stats(Last, I, EmitStats, Log) ->
|
||||
Interval = erlang:convert_time_unit(
|
||||
erlang:monotonic_time() - Last, native, micro_seconds),
|
||||
case Interval > ?SYNC_PROGRESS_INTERVAL of
|
||||
true -> EmitStats({syncing, I}),
|
||||
Log("~tp messages", [I]),
|
||||
erlang:monotonic_time();
|
||||
false -> Last
|
||||
end.
|
||||
|
||||
handle_set_maximum_since_use() ->
|
||||
receive
|
||||
{'$gen_cast', {set_maximum_since_use, Age}} ->
|
||||
ok = file_handle_cache:set_maximum_since_use(Age)
|
||||
after 0 ->
|
||||
ok
|
||||
end.
|
||||
|
||||
%% Master
|
||||
%% ---------------------------------------------------------------------------
|
||||
%% Syncer
|
||||
|
||||
syncer(Ref, Log, MPid, SPids) ->
|
||||
[erlang:monitor(process, SPid) || SPid <- SPids],
|
||||
%% We wait for a reply from the mirrors so that we know they are in
|
||||
%% a receive block and will thus receive messages we send to them
|
||||
%% *without* those messages ending up in their gen_server2 pqueue.
|
||||
case await_slaves(Ref, SPids) of
|
||||
[] -> Log("all mirrors already synced", []);
|
||||
SPids1 -> MPid ! {ready, self()},
|
||||
Log("mirrors ~tp to sync", [[node(SPid) || SPid <- SPids1]]),
|
||||
syncer_check_resources(Ref, MPid, SPids1)
|
||||
end.
|
||||
|
||||
await_slaves(Ref, SPids) ->
|
||||
[SPid || SPid <- SPids,
|
||||
rabbit_mnesia:on_running_node(SPid) andalso %% [0]
|
||||
receive
|
||||
{sync_ready, Ref, SPid} -> true;
|
||||
{sync_deny, Ref, SPid} -> false;
|
||||
{'DOWN', _, process, SPid, _} -> false
|
||||
end].
|
||||
%% [0] This check is in case there's been a partition which has then
|
||||
%% healed in between the master retrieving the mirror pids from Mnesia
|
||||
%% and sending 'sync_start' over GM. If so there might be mirrors on the
|
||||
%% other side of the partition which we can monitor (since they have
|
||||
%% rejoined the distributed system with us) but which did not get the
|
||||
%% 'sync_start' and so will not reply. We need to act as though they are
|
||||
%% down.
|
||||
|
||||
syncer_check_resources(Ref, MPid, SPids) ->
|
||||
_ = rabbit_alarm:register(self(), {?MODULE, conserve_resources, []}),
|
||||
%% Before we ask the master node to send the first batch of messages
|
||||
%% over here, we check if one node is already short on memory. If
|
||||
%% that's the case, we wait for the alarm to be cleared before
|
||||
%% starting the syncer loop.
|
||||
AlarmedNodes = lists:any(
|
||||
fun
|
||||
({{resource_limit, memory, _}, _}) -> true;
|
||||
({_, _}) -> false
|
||||
end, rabbit_alarm:get_alarms()),
|
||||
if
|
||||
not AlarmedNodes ->
|
||||
MPid ! {next, Ref},
|
||||
syncer_loop(Ref, MPid, SPids);
|
||||
true ->
|
||||
case wait_for_resources(Ref, SPids) of
|
||||
cancel -> MPid ! {cancelled, Ref};
|
||||
SPids1 -> MPid ! {next, Ref},
|
||||
syncer_loop(Ref, MPid, SPids1)
|
||||
end
|
||||
end.
|
||||
|
||||
syncer_loop(Ref, MPid, SPids) ->
|
||||
receive
|
||||
{conserve_resources, memory, true} ->
|
||||
case wait_for_resources(Ref, SPids) of
|
||||
cancel -> MPid ! {cancelled, Ref};
|
||||
SPids1 -> syncer_loop(Ref, MPid, SPids1)
|
||||
end;
|
||||
{conserve_resources, _, _} ->
|
||||
%% Ignore other alerts.
|
||||
syncer_loop(Ref, MPid, SPids);
|
||||
{msgs, Ref, Msgs} ->
|
||||
SPids1 = wait_for_credit(SPids),
|
||||
case SPids1 of
|
||||
[] ->
|
||||
% Die silently because there are no mirrors left.
|
||||
ok;
|
||||
_ ->
|
||||
_ = broadcast(SPids1, {sync_msgs, Ref, Msgs}),
|
||||
MPid ! {next, Ref},
|
||||
syncer_loop(Ref, MPid, SPids1)
|
||||
end;
|
||||
{cancel, Ref} ->
|
||||
%% We don't tell the mirrors we will die - so when we do
|
||||
%% they interpret that as a failure, which is what we
|
||||
%% want.
|
||||
ok;
|
||||
{done, Ref} ->
|
||||
[SPid ! {sync_complete, Ref} || SPid <- SPids]
|
||||
end.
|
||||
|
||||
broadcast(SPids, Msg) ->
|
||||
[begin
|
||||
credit_flow:send(SPid),
|
||||
SPid ! Msg
|
||||
end || SPid <- SPids].
|
||||
|
||||
-spec conserve_resources(pid(),
|
||||
rabbit_alarm:resource_alarm_source(),
|
||||
rabbit_alarm:resource_alert()) -> ok.
|
||||
conserve_resources(Pid, Source, {_, Conserve, _}) ->
|
||||
Pid ! {conserve_resources, Source, Conserve},
|
||||
ok.
|
||||
|
||||
wait_for_credit(SPids) ->
|
||||
case credit_flow:blocked() of
|
||||
true -> receive
|
||||
{bump_credit, Msg} ->
|
||||
credit_flow:handle_bump_msg(Msg),
|
||||
wait_for_credit(SPids);
|
||||
{'DOWN', _, process, SPid, _} ->
|
||||
credit_flow:peer_down(SPid),
|
||||
wait_for_credit(lists:delete(SPid, SPids))
|
||||
end;
|
||||
false -> SPids
|
||||
end.
|
||||
|
||||
wait_for_resources(Ref, SPids) ->
|
||||
erlang:garbage_collect(),
|
||||
receive
|
||||
{conserve_resources, memory, false} ->
|
||||
SPids;
|
||||
{conserve_resources, _, _} ->
|
||||
%% Ignore other alerts.
|
||||
wait_for_resources(Ref, SPids);
|
||||
{cancel, Ref} ->
|
||||
%% We don't tell the mirrors we will die - so when we do
|
||||
%% they interpret that as a failure, which is what we
|
||||
%% want.
|
||||
cancel;
|
||||
{'DOWN', _, process, SPid, _} ->
|
||||
credit_flow:peer_down(SPid),
|
||||
SPids1 = wait_for_credit(lists:delete(SPid, SPids)),
|
||||
wait_for_resources(Ref, SPids1)
|
||||
end.
|
||||
|
||||
%% Syncer
|
||||
%% ---------------------------------------------------------------------------
|
||||
%% Slave
|
||||
|
||||
-spec slave(non_neg_integer(), reference(), timer:tref(), pid(),
|
||||
bq(), bqs(), fun((bq(), bqs()) -> {timer:tref(), bqs()})) ->
|
||||
'denied' |
|
||||
{'ok' | 'failed', slave_sync_state()} |
|
||||
{'stop', any(), slave_sync_state()}.
|
||||
|
||||
slave(0, Ref, _TRef, Syncer, _BQ, _BQS, _UpdateRamDuration) ->
|
||||
Syncer ! {sync_deny, Ref, self()},
|
||||
denied;
|
||||
|
||||
slave(_DD, Ref, TRef, Syncer, BQ, BQS, UpdateRamDuration) ->
|
||||
MRef = erlang:monitor(process, Syncer),
|
||||
Syncer ! {sync_ready, Ref, self()},
|
||||
{_MsgCount, BQS1} = BQ:purge(BQ:purge_acks(BQS)),
|
||||
slave_sync_loop({Ref, MRef, Syncer, BQ, UpdateRamDuration,
|
||||
rabbit_misc:get_parent()}, {[], TRef, BQS1}).
|
||||
|
||||
slave_sync_loop(Args = {Ref, MRef, Syncer, BQ, UpdateRamDuration, Parent},
|
||||
State = {MA, TRef, BQS}) ->
|
||||
receive
|
||||
{'DOWN', MRef, process, Syncer, _Reason} ->
|
||||
%% If the master dies half way we are not in the usual
|
||||
%% half-synced state (with messages nearer the tail of the
|
||||
%% queue); instead we have ones nearer the head. If we then
|
||||
%% sync with a newly promoted master, or even just receive
|
||||
%% messages from it, we have a hole in the middle. So the
|
||||
%% only thing to do here is purge.
|
||||
{_MsgCount, BQS1} = BQ:purge(BQ:purge_acks(BQS)),
|
||||
credit_flow:peer_down(Syncer),
|
||||
{failed, {[], TRef, BQS1}};
|
||||
{bump_credit, Msg} ->
|
||||
credit_flow:handle_bump_msg(Msg),
|
||||
slave_sync_loop(Args, State);
|
||||
{sync_complete, Ref} ->
|
||||
erlang:demonitor(MRef, [flush]),
|
||||
credit_flow:peer_down(Syncer),
|
||||
{ok, State};
|
||||
{'$gen_cast', {set_maximum_since_use, Age}} ->
|
||||
ok = file_handle_cache:set_maximum_since_use(Age),
|
||||
slave_sync_loop(Args, State);
|
||||
{'$gen_cast', {set_ram_duration_target, Duration}} ->
|
||||
BQS1 = BQ:set_ram_duration_target(Duration, BQS),
|
||||
slave_sync_loop(Args, {MA, TRef, BQS1});
|
||||
{'$gen_cast', {run_backing_queue, Mod, Fun}} ->
|
||||
BQS1 = BQ:invoke(Mod, Fun, BQS),
|
||||
slave_sync_loop(Args, {MA, TRef, BQS1});
|
||||
update_ram_duration ->
|
||||
{TRef1, BQS1} = UpdateRamDuration(BQ, BQS),
|
||||
slave_sync_loop(Args, {MA, TRef1, BQS1});
|
||||
{sync_msgs, Ref, Batch} ->
|
||||
credit_flow:ack(Syncer),
|
||||
{MA1, BQS1} = process_batch(Batch, MA, BQ, BQS),
|
||||
slave_sync_loop(Args, {MA1, TRef, BQS1});
|
||||
{'EXIT', Parent, Reason} ->
|
||||
{stop, Reason, State};
|
||||
%% If the master throws an exception
|
||||
{'$gen_cast', {gm, {delete_and_terminate, Reason}}} ->
|
||||
BQ:delete_and_terminate(Reason, BQS),
|
||||
{stop, Reason, {[], TRef, undefined}}
|
||||
end.
|
||||
|
||||
%% We are partitioning messages by the Unacked element in the tuple.
|
||||
%% when unacked = true, then it's a publish_delivered message,
|
||||
%% otherwise it's a publish message.
|
||||
%%
|
||||
%% Note that we can't first partition the batch and then publish each
|
||||
%% part, since that would result in re-ordering messages, which we
|
||||
%% don't want to do.
|
||||
process_batch([], MA, _BQ, BQS) ->
|
||||
{MA, BQS};
|
||||
process_batch(Batch, MA, BQ, BQS) ->
|
||||
{_Msg, _MsgProps, Unacked} = hd(Batch),
|
||||
process_batch(Batch, Unacked, [], MA, BQ, BQS).
|
||||
|
||||
process_batch([{Msg, Props, true = Unacked} | Rest], true = Unacked,
|
||||
Acc, MA, BQ, BQS) ->
|
||||
%% publish_delivered messages don't need the IsDelivered flag,
|
||||
%% therefore we just add {Msg, Props} to the accumulator.
|
||||
process_batch(Rest, Unacked, [{Msg, props(Props)} | Acc],
|
||||
MA, BQ, BQS);
|
||||
process_batch([{Msg, Props, false = Unacked} | Rest], false = Unacked,
|
||||
Acc, MA, BQ, BQS) ->
|
||||
%% publish messages needs the IsDelivered flag which is set to true
|
||||
%% here.
|
||||
process_batch(Rest, Unacked, [{Msg, props(Props), true} | Acc],
|
||||
MA, BQ, BQS);
|
||||
process_batch(Batch, Unacked, Acc, MA, BQ, BQS) ->
|
||||
{MA1, BQS1} = publish_batch(Unacked, lists:reverse(Acc), MA, BQ, BQS),
|
||||
process_batch(Batch, MA1, BQ, BQS1).
|
||||
|
||||
%% Unacked msgs are published via batch_publish.
|
||||
publish_batch(false, Batch, MA, BQ, BQS) ->
|
||||
batch_publish(Batch, MA, BQ, BQS);
|
||||
%% Acked msgs are published via batch_publish_delivered.
|
||||
publish_batch(true, Batch, MA, BQ, BQS) ->
|
||||
batch_publish_delivered(Batch, MA, BQ, BQS).
|
||||
|
||||
|
||||
batch_publish(Batch, MA, BQ, BQS) ->
|
||||
BQS1 = BQ:batch_publish(Batch, none, noflow, BQS),
|
||||
{MA, BQS1}.
|
||||
|
||||
batch_publish_delivered(Batch, MA, BQ, BQS) ->
|
||||
{AckTags, BQS1} = BQ:batch_publish_delivered(Batch, none, noflow, BQS),
|
||||
MA1 = BQ:zip_msgs_and_acks(Batch, AckTags, MA, BQS1),
|
||||
{MA1, BQS1}.
|
||||
|
||||
props(Props) ->
|
||||
Props#message_properties{needs_confirming = false}.
|
|
@ -25,7 +25,7 @@
|
|||
|
||||
register() ->
|
||||
%% Note: there are more validators registered from other modules,
|
||||
%% such as rabbit_mirror_queue_misc
|
||||
%% such as rabbit_quorum_queue
|
||||
[rabbit_registry:register(Class, Name, ?MODULE) ||
|
||||
{Class, Name} <- [{policy_validator, <<"alternate-exchange">>},
|
||||
{policy_validator, <<"consumer-timeout">>},
|
||||
|
|
|
@ -1,99 +0,0 @@
|
|||
%% This Source Code Form is subject to the terms of the Mozilla Public
|
||||
%% License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
%%
|
||||
%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
|
||||
%%
|
||||
|
||||
-module(rabbit_prequeue).
|
||||
|
||||
%% This is the initial gen_server that all queue processes start off
|
||||
%% as. It handles the decision as to whether we need to start a new
|
||||
%% mirror, a new master/unmirrored, or whether we are restarting (and
|
||||
%% if so, as what). Thus a crashing queue process can restart from here
|
||||
%% and always do the right thing.
|
||||
|
||||
-export([start_link/3]).
|
||||
|
||||
-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
|
||||
code_change/3]).
|
||||
|
||||
-behaviour(gen_server2).
|
||||
|
||||
-include("amqqueue.hrl").
|
||||
|
||||
%%----------------------------------------------------------------------------
|
||||
|
||||
-export_type([start_mode/0]).
|
||||
|
||||
-type start_mode() :: 'declare' | 'recovery' | 'slave'.
|
||||
|
||||
%%----------------------------------------------------------------------------
|
||||
|
||||
-spec start_link(amqqueue:amqqueue(), start_mode(), pid())
|
||||
-> rabbit_types:ok_pid_or_error().
|
||||
|
||||
start_link(Q, StartMode, Marker) ->
|
||||
gen_server2:start_link(?MODULE, {Q, StartMode, Marker}, []).
|
||||
|
||||
%%----------------------------------------------------------------------------
|
||||
|
||||
init({Q, StartMode, Marker}) ->
|
||||
init(Q, case {is_process_alive(Marker), StartMode} of
|
||||
{true, slave} -> slave;
|
||||
{true, _} -> master;
|
||||
{false, _} -> restart
|
||||
end).
|
||||
|
||||
init(Q, master) -> rabbit_amqqueue_process:init(Q);
|
||||
init(Q, slave) -> rabbit_mirror_queue_slave:init(Q);
|
||||
|
||||
init(Q0, restart) when ?is_amqqueue(Q0) ->
|
||||
QueueName = amqqueue:get_name(Q0),
|
||||
{ok, Q1} = rabbit_amqqueue:lookup(QueueName),
|
||||
QPid = amqqueue:get_pid(Q1),
|
||||
SPids = amqqueue:get_slave_pids(Q1),
|
||||
LocalOrMasterDown = node(QPid) =:= node()
|
||||
orelse not rabbit_process:on_running_node(QPid),
|
||||
Slaves = [SPid || SPid <- SPids, rabbit_process:is_process_alive(SPid)],
|
||||
case rabbit_process:is_process_alive(QPid) of
|
||||
true -> false = LocalOrMasterDown, %% assertion
|
||||
rabbit_mirror_queue_slave:go(self(), async),
|
||||
rabbit_mirror_queue_slave:init(Q1); %% [1]
|
||||
false -> case LocalOrMasterDown andalso Slaves =:= [] of
|
||||
true -> crash_restart(Q1); %% [2]
|
||||
false -> timer:sleep(25),
|
||||
init(Q1, restart) %% [3]
|
||||
end
|
||||
end.
|
||||
%% [1] There is a master on another node. Regardless of whether we
|
||||
%% were originally a master or a mirror, we are now a new slave.
|
||||
%%
|
||||
%% [2] Nothing is alive. We are the last best hope. Try to restart as a master.
|
||||
%%
|
||||
%% [3] The current master is dead but either there are alive mirrors to
|
||||
%% take over or it's all happening on a different node anyway. This is
|
||||
%% not a stable situation. Sleep and wait for somebody else to make a
|
||||
%% move.
|
||||
|
||||
crash_restart(Q0) when ?is_amqqueue(Q0) ->
|
||||
QueueName = amqqueue:get_name(Q0),
|
||||
rabbit_log:error("Restarting crashed ~ts.", [rabbit_misc:rs(QueueName)]),
|
||||
gen_server2:cast(self(), init),
|
||||
Q1 = amqqueue:set_pid(Q0, self()),
|
||||
rabbit_amqqueue_process:init(Q1).
|
||||
|
||||
%%----------------------------------------------------------------------------
|
||||
|
||||
%% This gen_server2 always hands over to some other module at the end
|
||||
%% of init/1.
|
||||
-spec handle_call(_, _, _) -> no_return().
|
||||
handle_call(_Msg, _From, _State) -> exit(unreachable).
|
||||
-spec handle_cast(_, _) -> no_return().
|
||||
handle_cast(_Msg, _State) -> exit(unreachable).
|
||||
-spec handle_info(_, _) -> no_return().
|
||||
handle_info(_Msg, _State) -> exit(unreachable).
|
||||
-spec terminate(_, _) -> no_return().
|
||||
terminate(_Reason, _State) -> exit(unreachable).
|
||||
-spec code_change(_, _, _) -> no_return().
|
||||
code_change(_OldVsn, _State, _Extra) -> exit(unreachable).
|
|
@ -26,8 +26,7 @@
|
|||
|
||||
-export([init/3, terminate/2, delete_and_terminate/2, delete_crashed/1,
|
||||
purge/1, purge_acks/1,
|
||||
publish/6, publish_delivered/5, discard/4, drain_confirmed/1,
|
||||
batch_publish/4, batch_publish_delivered/4,
|
||||
publish/5, publish_delivered/4, discard/3, drain_confirmed/1,
|
||||
dropwhile/2, fetchwhile/4, fetch/2, drop/2, ack/2, requeue/2,
|
||||
ackfold/4, fold/3, len/1, is_empty/1, depth/1,
|
||||
set_ram_duration_target/2, ram_duration/1, needs_timeout/1, timeout/1,
|
||||
|
@ -199,54 +198,23 @@ purge_acks(State = #state{bq = BQ}) ->
|
|||
purge_acks(State = #passthrough{bq = BQ, bqs = BQS}) ->
|
||||
?passthrough1(purge_acks(BQS)).
|
||||
|
||||
publish(Msg, MsgProps, IsDelivered, ChPid, Flow, State = #state{bq = BQ}) ->
|
||||
publish(Msg, MsgProps, IsDelivered, ChPid, State = #state{bq = BQ}) ->
|
||||
pick1(fun (_P, BQSN) ->
|
||||
BQ:publish(Msg, MsgProps, IsDelivered, ChPid, Flow, BQSN)
|
||||
BQ:publish(Msg, MsgProps, IsDelivered, ChPid, BQSN)
|
||||
end, Msg, State);
|
||||
publish(Msg, MsgProps, IsDelivered, ChPid, Flow,
|
||||
publish(Msg, MsgProps, IsDelivered, ChPid,
|
||||
State = #passthrough{bq = BQ, bqs = BQS}) ->
|
||||
?passthrough1(publish(Msg, MsgProps, IsDelivered, ChPid, Flow, BQS)).
|
||||
?passthrough1(publish(Msg, MsgProps, IsDelivered, ChPid, BQS)).
|
||||
|
||||
batch_publish(Publishes, ChPid, Flow, State = #state{bq = BQ, bqss = [{MaxP, _} |_]}) ->
|
||||
PubMap = partition_publish_batch(Publishes, MaxP),
|
||||
lists:foldl(
|
||||
fun ({Priority, Pubs}, St) ->
|
||||
pick1(fun (_P, BQSN) ->
|
||||
BQ:batch_publish(Pubs, ChPid, Flow, BQSN)
|
||||
end, Priority, St)
|
||||
end, State, maps:to_list(PubMap));
|
||||
batch_publish(Publishes, ChPid, Flow,
|
||||
State = #passthrough{bq = BQ, bqs = BQS}) ->
|
||||
?passthrough1(batch_publish(Publishes, ChPid, Flow, BQS)).
|
||||
|
||||
publish_delivered(Msg, MsgProps, ChPid, Flow, State = #state{bq = BQ}) ->
|
||||
publish_delivered(Msg, MsgProps, ChPid, State = #state{bq = BQ}) ->
|
||||
pick2(fun (P, BQSN) ->
|
||||
{AckTag, BQSN1} = BQ:publish_delivered(
|
||||
Msg, MsgProps, ChPid, Flow, BQSN),
|
||||
Msg, MsgProps, ChPid, BQSN),
|
||||
{{P, AckTag}, BQSN1}
|
||||
end, Msg, State);
|
||||
publish_delivered(Msg, MsgProps, ChPid, Flow,
|
||||
publish_delivered(Msg, MsgProps, ChPid,
|
||||
State = #passthrough{bq = BQ, bqs = BQS}) ->
|
||||
?passthrough2(publish_delivered(Msg, MsgProps, ChPid, Flow, BQS)).
|
||||
|
||||
batch_publish_delivered(Publishes, ChPid, Flow, State = #state{bq = BQ, bqss = [{MaxP, _} |_]}) ->
|
||||
PubMap = partition_publish_delivered_batch(Publishes, MaxP),
|
||||
{PrioritiesAndAcks, State1} =
|
||||
lists:foldl(
|
||||
fun ({Priority, Pubs}, {PriosAndAcks, St}) ->
|
||||
{PriosAndAcks1, St1} =
|
||||
pick2(fun (P, BQSN) ->
|
||||
{AckTags, BQSN1} =
|
||||
BQ:batch_publish_delivered(
|
||||
Pubs, ChPid, Flow, BQSN),
|
||||
{priority_on_acktags(P, AckTags), BQSN1}
|
||||
end, Priority, St),
|
||||
{[PriosAndAcks1 | PriosAndAcks], St1}
|
||||
end, {[], State}, maps:to_list(PubMap)),
|
||||
{lists:reverse(PrioritiesAndAcks), State1};
|
||||
batch_publish_delivered(Publishes, ChPid, Flow,
|
||||
State = #passthrough{bq = BQ, bqs = BQS}) ->
|
||||
?passthrough2(batch_publish_delivered(Publishes, ChPid, Flow, BQS)).
|
||||
?passthrough2(publish_delivered(Msg, MsgProps, ChPid, BQS)).
|
||||
|
||||
%% TODO this is a hack. The BQ api does not give us enough information
|
||||
%% here - if we had the Msg we could look at its priority and forward
|
||||
|
@ -256,14 +224,14 @@ batch_publish_delivered(Publishes, ChPid, Flow,
|
|||
%% are talking to VQ*. discard/4 is used by HA, but that's "above" us
|
||||
%% (if in use) so we don't break that either, just some hypothetical
|
||||
%% alternate BQ implementation.
|
||||
discard(_MsgId, _ChPid, _Flow, State = #state{}) ->
|
||||
discard(_MsgId, _ChPid, State = #state{}) ->
|
||||
State;
|
||||
%% We should have something a bit like this here:
|
||||
%% pick1(fun (_P, BQSN) ->
|
||||
%% BQ:discard(MsgId, ChPid, Flow, BQSN)
|
||||
%% BQ:discard(MsgId, ChPid, BQSN)
|
||||
%% end, Msg, State);
|
||||
discard(MsgId, ChPid, Flow, State = #passthrough{bq = BQ, bqs = BQS}) ->
|
||||
?passthrough1(discard(MsgId, ChPid, Flow, BQS)).
|
||||
discard(MsgId, ChPid, State = #passthrough{bq = BQ, bqs = BQS}) ->
|
||||
?passthrough1(discard(MsgId, ChPid, BQS)).
|
||||
|
||||
drain_confirmed(State = #state{bq = BQ}) ->
|
||||
fold_append2(fun (_P, BQSN) -> BQ:drain_confirmed(BQSN) end, State);
|
||||
|
@ -599,10 +567,6 @@ a(State = #state{bqss = BQSs}) ->
|
|||
end.
|
||||
|
||||
%%----------------------------------------------------------------------------
|
||||
partition_publish_batch(Publishes, MaxP) ->
|
||||
partition_publishes(
|
||||
Publishes, fun ({Msg, _, _}) -> Msg end, MaxP).
|
||||
|
||||
partition_publish_delivered_batch(Publishes, MaxP) ->
|
||||
partition_publishes(
|
||||
Publishes, fun ({Msg, _}) -> Msg end, MaxP).
|
||||
|
|
|
@ -89,19 +89,4 @@ get_location_mod_by_config(Queue) when ?is_amqqueue(Queue) ->
|
|||
end.
|
||||
|
||||
all_nodes(Queue) when ?is_amqqueue(Queue) ->
|
||||
handle_is_mirrored_ha_nodes(rabbit_mirror_queue_misc:is_mirrored_ha_nodes(Queue), Queue).
|
||||
|
||||
handle_is_mirrored_ha_nodes(false, _Queue) ->
|
||||
% Note: ha-mode is NOT 'nodes' - it is either exactly or all, which means
|
||||
% that any node in the cluster is eligible to be the new queue master node
|
||||
rabbit_nodes:list_serving();
|
||||
handle_is_mirrored_ha_nodes(true, Queue) ->
|
||||
% Note: ha-mode is 'nodes', which explicitly specifies allowed nodes.
|
||||
% We must use suggested_queue_nodes to get that list of nodes as the
|
||||
% starting point for finding the queue master location
|
||||
handle_suggested_queue_nodes(rabbit_mirror_queue_misc:suggested_queue_nodes(Queue)).
|
||||
|
||||
handle_suggested_queue_nodes({_MNode, []}) ->
|
||||
rabbit_nodes:list_serving();
|
||||
handle_suggested_queue_nodes({MNode, SNodes}) ->
|
||||
[MNode | SNodes].
|
||||
rabbit_nodes:list_serving().
|
||||
|
|
|
@ -77,9 +77,7 @@
|
|||
|
||||
-define(STATE, ?MODULE).
|
||||
|
||||
%% Recoverable mirrors shouldn't really be a generic one, but let's keep it here until
|
||||
%% mirrored queues are deprecated.
|
||||
-define(DOWN_KEYS, [name, durable, auto_delete, arguments, pid, recoverable_slaves, type, state]).
|
||||
-define(DOWN_KEYS, [name, durable, auto_delete, arguments, pid, type, state]).
|
||||
|
||||
%% TODO resolve all registered queue types from registry
|
||||
-define(QUEUE_MODULES, [rabbit_classic_queue, rabbit_quorum_queue, rabbit_stream_queue]).
|
||||
|
@ -395,7 +393,6 @@ i_down(durable, Q, _) -> amqqueue:is_durable(Q);
|
|||
i_down(auto_delete, Q, _) -> amqqueue:is_auto_delete(Q);
|
||||
i_down(arguments, Q, _) -> amqqueue:get_arguments(Q);
|
||||
i_down(pid, Q, _) -> amqqueue:get_pid(Q);
|
||||
i_down(recoverable_slaves, Q, _) -> amqqueue:get_recoverable_slaves(Q);
|
||||
i_down(type, Q, _) -> amqqueue:get_type(Q);
|
||||
i_down(state, _Q, DownReason) -> DownReason;
|
||||
i_down(_K, _Q, _DownReason) -> ''.
|
||||
|
|
|
@ -359,7 +359,6 @@ definitions() ->
|
|||
|
||||
mandatory_definitions() ->
|
||||
pre_khepri_definitions()
|
||||
++ gm:table_definitions()
|
||||
++ mirrored_supervisor:table_definitions()
|
||||
++ rabbit_maintenance:table_definitions().
|
||||
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
-module(rabbit_upgrade_preparation).
|
||||
|
||||
-export([await_online_quorum_plus_one/1,
|
||||
await_online_synchronised_mirrors/1,
|
||||
list_with_minimum_quorum_for_cli/0]).
|
||||
|
||||
%%
|
||||
|
@ -21,12 +20,6 @@ await_online_quorum_plus_one(Timeout) ->
|
|||
Iterations = ceil(Timeout / ?SAMPLING_INTERVAL),
|
||||
do_await_safe_online_quorum(Iterations).
|
||||
|
||||
|
||||
await_online_synchronised_mirrors(Timeout) ->
|
||||
Iterations = ceil(Timeout / ?SAMPLING_INTERVAL),
|
||||
do_await_online_synchronised_mirrors(Iterations).
|
||||
|
||||
|
||||
%%
|
||||
%% Implementation
|
||||
%%
|
||||
|
@ -68,17 +61,6 @@ do_await_safe_online_quorum(IterationsLeft) ->
|
|||
do_await_safe_online_quorum(IterationsLeft - 1)
|
||||
end.
|
||||
|
||||
|
||||
do_await_online_synchronised_mirrors(0) ->
|
||||
false;
|
||||
do_await_online_synchronised_mirrors(IterationsLeft) ->
|
||||
case rabbit_amqqueue:list_local_mirrored_classic_without_synchronised_mirrors() of
|
||||
[] -> true;
|
||||
List when is_list(List) ->
|
||||
timer:sleep(?SAMPLING_INTERVAL),
|
||||
do_await_online_synchronised_mirrors(IterationsLeft - 1)
|
||||
end.
|
||||
|
||||
-spec list_with_minimum_quorum_for_cli() -> [#{binary() => term()}].
|
||||
list_with_minimum_quorum_for_cli() ->
|
||||
EndangeredQueues = lists:append(
|
||||
|
|
|
@ -9,9 +9,8 @@
|
|||
|
||||
-export([init/3, terminate/2, delete_and_terminate/2, delete_crashed/1,
|
||||
purge/1, purge_acks/1,
|
||||
publish/6, publish_delivered/5,
|
||||
batch_publish/4, batch_publish_delivered/4,
|
||||
discard/4, drain_confirmed/1,
|
||||
publish/5, publish_delivered/4,
|
||||
discard/3, drain_confirmed/1,
|
||||
dropwhile/2, fetchwhile/4, fetch/2, drop/2, ack/2, requeue/2,
|
||||
ackfold/4, fold/3, len/1, is_empty/1, depth/1,
|
||||
set_ram_duration_target/2, ram_duration/1, needs_timeout/1, timeout/1,
|
||||
|
@ -531,34 +530,21 @@ purge(State = #vqstate { len = Len }) ->
|
|||
|
||||
purge_acks(State) -> a(purge_pending_ack(false, State)).
|
||||
|
||||
publish(Msg, MsgProps, IsDelivered, ChPid, Flow, State) ->
|
||||
publish(Msg, MsgProps, IsDelivered, ChPid, State) ->
|
||||
State1 =
|
||||
publish1(Msg, MsgProps, IsDelivered, ChPid, Flow,
|
||||
publish1(Msg, MsgProps, IsDelivered, ChPid,
|
||||
fun maybe_write_to_disk/4,
|
||||
State),
|
||||
a(maybe_update_rates(State1)).
|
||||
|
||||
batch_publish(Publishes, ChPid, Flow, State) ->
|
||||
{ChPid, Flow, State1} =
|
||||
lists:foldl(fun batch_publish1/2, {ChPid, Flow, State}, Publishes),
|
||||
State2 = ui(State1),
|
||||
a(maybe_update_rates(State2)).
|
||||
|
||||
publish_delivered(Msg, MsgProps, ChPid, Flow, State) ->
|
||||
publish_delivered(Msg, MsgProps, ChPid, State) ->
|
||||
{SeqId, State1} =
|
||||
publish_delivered1(Msg, MsgProps, ChPid, Flow,
|
||||
publish_delivered1(Msg, MsgProps, ChPid,
|
||||
fun maybe_write_to_disk/4,
|
||||
State),
|
||||
{SeqId, a(maybe_update_rates(State1))}.
|
||||
|
||||
batch_publish_delivered(Publishes, ChPid, Flow, State) ->
|
||||
{ChPid, Flow, SeqIds, State1} =
|
||||
lists:foldl(fun batch_publish_delivered1/2,
|
||||
{ChPid, Flow, [], State}, Publishes),
|
||||
State2 = ui(State1),
|
||||
{lists:reverse(SeqIds), a(maybe_update_rates(State2))}.
|
||||
|
||||
discard(_MsgId, _ChPid, _Flow, State) -> State.
|
||||
discard(_MsgId, _ChPid, State) -> State.
|
||||
|
||||
drain_confirmed(State = #vqstate { confirmed = C }) ->
|
||||
case sets:is_empty(C) of
|
||||
|
@ -1695,7 +1681,7 @@ process_delivers_and_acks_fun(_) ->
|
|||
|
||||
publish1(Msg,
|
||||
MsgProps = #message_properties { needs_confirming = NeedsConfirming },
|
||||
IsDelivered, _ChPid, _Flow, PersistFun,
|
||||
IsDelivered, _ChPid, PersistFun,
|
||||
State = #vqstate { q3 = Q3, delta = Delta = #delta { count = DeltaCount },
|
||||
len = Len,
|
||||
qi_embed_msgs_below = IndexMaxSize,
|
||||
|
@ -1740,14 +1726,10 @@ maybe_next_deliver_seq_id(SeqId, NextDeliverSeqId, true) ->
|
|||
maybe_next_deliver_seq_id(_, NextDeliverSeqId, false) ->
|
||||
NextDeliverSeqId.
|
||||
|
||||
batch_publish1({Msg, MsgProps, IsDelivered}, {ChPid, Flow, State}) ->
|
||||
{ChPid, Flow, publish1(Msg, MsgProps, IsDelivered, ChPid, Flow,
|
||||
fun maybe_prepare_write_to_disk/4, State)}.
|
||||
|
||||
publish_delivered1(Msg,
|
||||
MsgProps = #message_properties {
|
||||
needs_confirming = NeedsConfirming },
|
||||
_ChPid, _Flow, PersistFun,
|
||||
_ChPid, PersistFun,
|
||||
State = #vqstate { qi_embed_msgs_below = IndexMaxSize,
|
||||
next_seq_id = SeqId,
|
||||
next_deliver_seq_id = NextDeliverSeqId,
|
||||
|
@ -1783,13 +1765,6 @@ maybe_needs_confirming(true, queue_store, MsgId, UC, UCS) ->
|
|||
maybe_needs_confirming(true, _, MsgId, UC, UCS) ->
|
||||
{sets:add_element(MsgId, UC), UCS}.
|
||||
|
||||
batch_publish_delivered1({Msg, MsgProps}, {ChPid, Flow, SeqIds, State}) ->
|
||||
{SeqId, State1} =
|
||||
publish_delivered1(Msg, MsgProps, ChPid, Flow,
|
||||
fun maybe_prepare_write_to_disk/4,
|
||||
State),
|
||||
{ChPid, Flow, [SeqId | SeqIds], State1}.
|
||||
|
||||
maybe_write_msg_to_disk(Force, MsgStatus = #msg_status {
|
||||
seq_id = SeqId,
|
||||
msg = Msg, msg_id = MsgId,
|
||||
|
|
|
@ -68,8 +68,6 @@ recover(VHost) ->
|
|||
rabbit_log:debug("rabbit_binding:recover/2 for vhost ~ts completed in ~fs", [VHost, Time/1000000]),
|
||||
|
||||
ok = rabbit_amqqueue:start(Recovered),
|
||||
%% Start queue mirrors.
|
||||
ok = rabbit_mirror_queue_misc:on_vhost_up(VHost),
|
||||
ok.
|
||||
|
||||
ensure_config_file(VHost) ->
|
||||
|
|
|
@ -20,7 +20,7 @@ memory() ->
|
|||
{Sums, _Other} = sum_processes(
|
||||
lists:append(All), distinguishers(), [memory]),
|
||||
|
||||
[Qs, QsSlave, Qqs, DlxWorkers, Ssqs, Srqs, SCoor, ConnsReader, ConnsWriter, ConnsChannel,
|
||||
[Qs, Qqs, DlxWorkers, Ssqs, Srqs, SCoor, ConnsReader, ConnsWriter, ConnsChannel,
|
||||
ConnsOther, MsgIndexProc, MgmtDbProc, Plugins] =
|
||||
[aggregate(Names, Sums, memory, fun (X) -> X end)
|
||||
|| Names <- distinguished_interesting_sups()],
|
||||
|
@ -63,7 +63,7 @@ memory() ->
|
|||
|
||||
OtherProc = Processes
|
||||
- ConnsReader - ConnsWriter - ConnsChannel - ConnsOther
|
||||
- Qs - QsSlave - Qqs - DlxWorkers - Ssqs - Srqs - SCoor - MsgIndexProc - Plugins
|
||||
- Qs - Qqs - DlxWorkers - Ssqs - Srqs - SCoor - MsgIndexProc - Plugins
|
||||
- MgmtDbProc - MetricsProc - MetadataStoreProc,
|
||||
[
|
||||
%% Connections
|
||||
|
@ -74,7 +74,6 @@ memory() ->
|
|||
|
||||
%% Queues
|
||||
{queue_procs, Qs},
|
||||
{queue_slave_procs, QsSlave},
|
||||
{quorum_queue_procs, Qqs},
|
||||
{quorum_queue_dlx_procs, DlxWorkers},
|
||||
{stream_queue_procs, Ssqs},
|
||||
|
@ -128,7 +127,7 @@ binary() ->
|
|||
sets:add_element({Ptr, Sz}, Acc0)
|
||||
end, Acc, Info)
|
||||
end, distinguishers(), [{binary, sets:new()}]),
|
||||
[Other, Qs, QsSlave, Qqs, DlxWorkers, Ssqs, Srqs, Scoor, ConnsReader, ConnsWriter,
|
||||
[Other, Qs, Qqs, DlxWorkers, Ssqs, Srqs, Scoor, ConnsReader, ConnsWriter,
|
||||
ConnsChannel, ConnsOther, MsgIndexProc, MgmtDbProc, Plugins] =
|
||||
[aggregate(Names, [{other, Rest} | Sums], binary, fun sum_binary/1)
|
||||
|| Names <- [[other] | distinguished_interesting_sups()]],
|
||||
|
@ -146,7 +145,6 @@ binary() ->
|
|||
{connection_channels, ConnsChannel},
|
||||
{connection_other, ConnsOther},
|
||||
{queue_procs, Qs},
|
||||
{queue_slave_procs, QsSlave},
|
||||
{quorum_queue_procs, Qqs},
|
||||
{quorum_queue_dlx_procs, DlxWorkers},
|
||||
{stream_queue_procs, Ssqs},
|
||||
|
@ -254,13 +252,11 @@ ranch_server_sups() ->
|
|||
|
||||
with(Sups, With) -> [{Sup, With} || Sup <- Sups].
|
||||
|
||||
distinguishers() -> with(queue_sups(), fun queue_type/1) ++
|
||||
with(conn_sups(), fun conn_type/1).
|
||||
distinguishers() -> with(conn_sups(), fun conn_type/1).
|
||||
|
||||
distinguished_interesting_sups() ->
|
||||
[
|
||||
with(queue_sups(), master),
|
||||
with(queue_sups(), slave),
|
||||
queue_sups(),
|
||||
quorum_sups(),
|
||||
dlx_sups(),
|
||||
stream_server_sups(),
|
||||
|
@ -308,12 +304,6 @@ extract(Name, Sums, Key, Fun) ->
|
|||
sum_binary(Set) ->
|
||||
sets:fold(fun({_Pt, Sz}, Acc) -> Acc + Sz end, 0, Set).
|
||||
|
||||
queue_type(PDict) ->
|
||||
case keyfind(process_name, PDict) of
|
||||
{value, {rabbit_mirror_queue_slave, _}} -> slave;
|
||||
_ -> master
|
||||
end.
|
||||
|
||||
conn_type(PDict) ->
|
||||
case keyfind(process_name, PDict) of
|
||||
{value, {rabbit_reader, _}} -> reader;
|
||||
|
|
|
@ -35,9 +35,7 @@
|
|||
variable_queue_purge,
|
||||
variable_queue_requeue,
|
||||
variable_queue_requeue_ram_beta,
|
||||
variable_queue_fold,
|
||||
variable_queue_batch_publish,
|
||||
variable_queue_batch_publish_delivered
|
||||
variable_queue_fold
|
||||
]).
|
||||
|
||||
-define(BACKING_QUEUE_TESTCASES, [
|
||||
|
@ -1551,36 +1549,6 @@ test_variable_queue_fold(Cut, Msgs, PendingMsgs, VQ0) ->
|
|||
Expected = lists:reverse(Acc), %% assertion
|
||||
VQ1.
|
||||
|
||||
variable_queue_batch_publish(Config) ->
|
||||
passed = rabbit_ct_broker_helpers:rpc(Config, 0,
|
||||
?MODULE, variable_queue_batch_publish1, [Config]).
|
||||
|
||||
variable_queue_batch_publish1(Config) ->
|
||||
with_fresh_variable_queue(
|
||||
fun variable_queue_batch_publish2/2,
|
||||
?config(variable_queue_type, Config)).
|
||||
|
||||
variable_queue_batch_publish2(VQ, _Config) ->
|
||||
Count = 10,
|
||||
VQ1 = variable_queue_batch_publish(true, Count, VQ),
|
||||
Count = rabbit_variable_queue:len(VQ1),
|
||||
VQ1.
|
||||
|
||||
variable_queue_batch_publish_delivered(Config) ->
|
||||
passed = rabbit_ct_broker_helpers:rpc(Config, 0,
|
||||
?MODULE, variable_queue_batch_publish_delivered1, [Config]).
|
||||
|
||||
variable_queue_batch_publish_delivered1(Config) ->
|
||||
with_fresh_variable_queue(
|
||||
fun variable_queue_batch_publish_delivered2/2,
|
||||
?config(variable_queue_type, Config)).
|
||||
|
||||
variable_queue_batch_publish_delivered2(VQ, _Config) ->
|
||||
Count = 10,
|
||||
VQ1 = variable_queue_batch_publish_delivered(true, Count, VQ),
|
||||
Count = rabbit_variable_queue:depth(VQ1),
|
||||
VQ1.
|
||||
|
||||
%% same as test_variable_queue_requeue_ram_beta but randomly changing
|
||||
%% the queue mode after every step.
|
||||
variable_queue_mode_change(Config) ->
|
||||
|
@ -1830,44 +1798,9 @@ variable_queue_publish(IsPersistent, Start, Count, PropFun, PayloadFun, VQ) ->
|
|||
rabbit_variable_queue:publish(
|
||||
Msg,
|
||||
PropFun(N, #message_properties{size = 10}),
|
||||
false, self(), noflow, VQN)
|
||||
false, self(), VQN)
|
||||
end, VQ, lists:seq(Start, Start + Count - 1))).
|
||||
|
||||
variable_queue_batch_publish(IsPersistent, Count, VQ) ->
|
||||
variable_queue_batch_publish(IsPersistent, Count, fun (_N, P) -> P end, VQ).
|
||||
|
||||
variable_queue_batch_publish(IsPersistent, Count, PropFun, VQ) ->
|
||||
variable_queue_batch_publish(IsPersistent, 1, Count, PropFun,
|
||||
fun (_N) -> <<>> end, VQ).
|
||||
|
||||
variable_queue_batch_publish(IsPersistent, Start, Count, PropFun, PayloadFun, VQ) ->
|
||||
variable_queue_batch_publish0(IsPersistent, Start, Count, PropFun,
|
||||
PayloadFun, fun make_publish/4,
|
||||
fun rabbit_variable_queue:batch_publish/4,
|
||||
VQ).
|
||||
|
||||
variable_queue_batch_publish_delivered(IsPersistent, Count, VQ) ->
|
||||
variable_queue_batch_publish_delivered(IsPersistent, Count, fun (_N, P) -> P end, VQ).
|
||||
|
||||
variable_queue_batch_publish_delivered(IsPersistent, Count, PropFun, VQ) ->
|
||||
variable_queue_batch_publish_delivered(IsPersistent, 1, Count, PropFun,
|
||||
fun (_N) -> <<>> end, VQ).
|
||||
|
||||
variable_queue_batch_publish_delivered(IsPersistent, Start, Count, PropFun, PayloadFun, VQ) ->
|
||||
variable_queue_batch_publish0(IsPersistent, Start, Count, PropFun,
|
||||
PayloadFun, fun make_publish_delivered/4,
|
||||
fun rabbit_variable_queue:batch_publish_delivered/4,
|
||||
VQ).
|
||||
|
||||
variable_queue_batch_publish0(IsPersistent, Start, Count, PropFun, PayloadFun,
|
||||
MakePubFun, PubFun, VQ) ->
|
||||
Publishes =
|
||||
[MakePubFun(IsPersistent, PayloadFun, PropFun, N)
|
||||
|| N <- lists:seq(Start, Start + Count - 1)],
|
||||
Res = PubFun(Publishes, self(), noflow, VQ),
|
||||
VQ1 = pub_res(Res),
|
||||
variable_queue_wait_for_shuffling_end(VQ1).
|
||||
|
||||
variable_queue_fetch(Count, IsPersistent, IsDelivered, Len, VQ) ->
|
||||
lists:foldl(fun (N, {VQN, AckTagsAcc}) ->
|
||||
Rem = Len - N,
|
||||
|
|
|
@ -10,9 +10,8 @@
|
|||
|
||||
-export([init/3, terminate/2, delete_and_terminate/2, delete_crashed/1,
|
||||
purge/1, purge_acks/1,
|
||||
publish/6, publish_delivered/5,
|
||||
batch_publish/4, batch_publish_delivered/4,
|
||||
discard/4, drain_confirmed/1,
|
||||
publish/5, publish_delivered/4,
|
||||
discard/3, drain_confirmed/1,
|
||||
dropwhile/2, fetchwhile/4, fetch/2, drop/2, ack/2, requeue/2,
|
||||
ackfold/4, fold/3, len/1, is_empty/1, depth/1,
|
||||
set_ram_duration_target/2, ram_duration/1, needs_timeout/1, timeout/1,
|
||||
|
@ -224,19 +223,13 @@ purge(State) ->
|
|||
purge_acks(State) ->
|
||||
rabbit_variable_queue:purge_acks(State).
|
||||
|
||||
publish(Msg, MsgProps, IsDelivered, ChPid, Flow, State) ->
|
||||
rabbit_variable_queue:publish(Msg, MsgProps, IsDelivered, ChPid, Flow, State).
|
||||
publish(Msg, MsgProps, IsDelivered, ChPid, State) ->
|
||||
rabbit_variable_queue:publish(Msg, MsgProps, IsDelivered, ChPid, State).
|
||||
|
||||
batch_publish(Publishes, ChPid, Flow, State) ->
|
||||
rabbit_variable_queue:batch_publish(Publishes, ChPid, Flow, State).
|
||||
publish_delivered(Msg, MsgProps, ChPid, State) ->
|
||||
rabbit_variable_queue:publish_delivered(Msg, MsgProps, ChPid, State).
|
||||
|
||||
publish_delivered(Msg, MsgProps, ChPid, Flow, State) ->
|
||||
rabbit_variable_queue:publish_delivered(Msg, MsgProps, ChPid, Flow, State).
|
||||
|
||||
batch_publish_delivered(Publishes, ChPid, Flow, State) ->
|
||||
rabbit_variable_queue:batch_publish_delivered(Publishes, ChPid, Flow, State).
|
||||
|
||||
discard(_MsgId, _ChPid, _Flow, State) -> State.
|
||||
discard(_MsgId, _ChPid, State) -> State.
|
||||
|
||||
drain_confirmed(State) ->
|
||||
rabbit_variable_queue:drain_confirmed(State).
|
||||
|
|
|
@ -557,19 +557,19 @@ reset_removes_things(Config) ->
|
|||
test_removes_things(Config, fun (R, _H) -> ok = reset(Config, R) end).
|
||||
|
||||
test_removes_things(Config, LoseRabbit) ->
|
||||
Unmirrored = <<"unmirrored-queue">>,
|
||||
Classic = <<"classic-queue">>,
|
||||
[Rabbit, Hare | _] = cluster_members(Config),
|
||||
RCh = rabbit_ct_client_helpers:open_channel(Config, Rabbit),
|
||||
declare(RCh, Unmirrored),
|
||||
declare(RCh, Classic),
|
||||
ok = stop_app(Config, Rabbit),
|
||||
|
||||
HCh = rabbit_ct_client_helpers:open_channel(Config, Hare),
|
||||
{'EXIT',{{shutdown,{server_initiated_close,404,_}}, _}} =
|
||||
(catch declare(HCh, Unmirrored)),
|
||||
(catch declare(HCh, Classic)),
|
||||
|
||||
ok = LoseRabbit(Rabbit, Hare),
|
||||
HCh2 = rabbit_ct_client_helpers:open_channel(Config, Hare),
|
||||
declare(HCh2, Unmirrored),
|
||||
declare(HCh2, Classic),
|
||||
ok.
|
||||
|
||||
forget_node_in_khepri(Config) ->
|
||||
|
@ -752,21 +752,21 @@ reset_last_disc_node(Config) ->
|
|||
forget_offline_removes_things(Config) ->
|
||||
[Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config,
|
||||
nodename),
|
||||
Unmirrored = <<"unmirrored-queue">>,
|
||||
Classic = <<"classic-queue">>,
|
||||
X = <<"X">>,
|
||||
RCh = rabbit_ct_client_helpers:open_channel(Config, Rabbit),
|
||||
declare(RCh, Unmirrored),
|
||||
declare(RCh, Classic),
|
||||
|
||||
amqp_channel:call(RCh, #'exchange.declare'{durable = true,
|
||||
exchange = X,
|
||||
auto_delete = true}),
|
||||
amqp_channel:call(RCh, #'queue.bind'{queue = Unmirrored,
|
||||
amqp_channel:call(RCh, #'queue.bind'{queue = Classic,
|
||||
exchange = X}),
|
||||
ok = rabbit_ct_broker_helpers:stop_broker(Config, Rabbit),
|
||||
|
||||
HCh = rabbit_ct_client_helpers:open_channel(Config, Hare),
|
||||
{'EXIT',{{shutdown,{server_initiated_close,404,_}}, _}} =
|
||||
(catch declare(HCh, Unmirrored)),
|
||||
(catch declare(HCh, Classic)),
|
||||
|
||||
ok = rabbit_ct_broker_helpers:stop_node(Config, Hare),
|
||||
ok = rabbit_ct_broker_helpers:stop_node(Config, Rabbit),
|
||||
|
@ -774,7 +774,7 @@ forget_offline_removes_things(Config) ->
|
|||
ok = rabbit_ct_broker_helpers:start_node(Config, Hare),
|
||||
|
||||
HCh2 = rabbit_ct_client_helpers:open_channel(Config, Hare),
|
||||
declare(HCh2, Unmirrored),
|
||||
declare(HCh2, Classic),
|
||||
{'EXIT',{{shutdown,{server_initiated_close,404,_}}, _}} =
|
||||
(catch amqp_channel:call(HCh2,#'exchange.declare'{durable = true,
|
||||
exchange = X,
|
||||
|
@ -782,49 +782,6 @@ forget_offline_removes_things(Config) ->
|
|||
passive = true})),
|
||||
ok.
|
||||
|
||||
set_ha_policy(Config, QName, Master, Slaves) ->
|
||||
Nodes = [list_to_binary(atom_to_list(N)) || N <- [Master | Slaves]],
|
||||
HaPolicy = {<<"nodes">>, Nodes},
|
||||
rabbit_ct_broker_helpers:set_ha_policy(Config, Master, QName, HaPolicy),
|
||||
await_followers(QName, Master, Slaves).
|
||||
|
||||
await_followers(QName, Master, Slaves) ->
|
||||
await_followers_0(QName, Master, Slaves, 10).
|
||||
|
||||
await_followers_0(QName, Master, Slaves0, Tries) ->
|
||||
{ok, Queue} = await_followers_lookup_queue(QName, Master),
|
||||
SPids = amqqueue:get_slave_pids(Queue),
|
||||
ActMaster = amqqueue:qnode(Queue),
|
||||
ActSlaves = lists:usort([node(P) || P <- SPids]),
|
||||
Slaves1 = lists:usort(Slaves0),
|
||||
await_followers_1(QName, ActMaster, ActSlaves, Master, Slaves1, Tries).
|
||||
|
||||
await_followers_1(QName, _ActMaster, _ActSlaves, _Master, _Slaves, 0) ->
|
||||
error({timeout_waiting_for_followers, QName});
|
||||
await_followers_1(QName, ActMaster, ActSlaves, Master, Slaves, Tries) ->
|
||||
case {Master, Slaves} of
|
||||
{ActMaster, ActSlaves} ->
|
||||
ok;
|
||||
_ ->
|
||||
timer:sleep(250),
|
||||
await_followers_0(QName, Master, Slaves, Tries - 1)
|
||||
end.
|
||||
|
||||
await_followers_lookup_queue(QName, Master) ->
|
||||
await_followers_lookup_queue(QName, Master, 10).
|
||||
|
||||
await_followers_lookup_queue(QName, _Master, 0) ->
|
||||
error({timeout_looking_up_queue, QName});
|
||||
await_followers_lookup_queue(QName, Master, Tries) ->
|
||||
RpcArgs = [rabbit_misc:r(<<"/">>, queue, QName)],
|
||||
case rpc:call(Master, rabbit_amqqueue, lookup, RpcArgs) of
|
||||
{error, not_found} ->
|
||||
timer:sleep(250),
|
||||
await_followers_lookup_queue(QName, Master, Tries - 1);
|
||||
{ok, Q} ->
|
||||
{ok, Q}
|
||||
end.
|
||||
|
||||
force_boot(Config) ->
|
||||
[Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config,
|
||||
nodename),
|
||||
|
|
|
@ -42,7 +42,6 @@ groups() ->
|
|||
|
||||
AllTestsParallel = [
|
||||
{classic_queue, [parallel], AllTests},
|
||||
{mirrored_queue, [parallel], AllTests},
|
||||
{quorum_queue, [parallel], AllTests}
|
||||
],
|
||||
[
|
||||
|
@ -79,20 +78,6 @@ init_per_group(quorum_queue, Config) ->
|
|||
[{policy_type, <<"quorum_queues">>},
|
||||
{queue_args, [{<<"x-queue-type">>, longstr, <<"quorum">>}]},
|
||||
{queue_durable, true}]);
|
||||
init_per_group(mirrored_queue, Config) ->
|
||||
case rabbit_ct_broker_helpers:configured_metadata_store(Config) of
|
||||
{khepri, _} ->
|
||||
{skip, <<"Classic queue mirroring not supported by Khepri">>};
|
||||
mnesia ->
|
||||
rabbit_ct_broker_helpers:set_ha_policy(Config, 0, <<"^max_length.*queue">>,
|
||||
<<"all">>, [{<<"ha-sync-mode">>, <<"automatic">>}]),
|
||||
Config1 = rabbit_ct_helpers:set_config(
|
||||
Config, [{policy_type, <<"classic_queues">>},
|
||||
{is_mirrored, true},
|
||||
{queue_args, [{<<"x-queue-type">>, longstr, <<"classic">>}]},
|
||||
{queue_durable, true}]),
|
||||
rabbit_ct_helpers:run_steps(Config1, [])
|
||||
end;
|
||||
init_per_group(Group, Config0) ->
|
||||
case lists:member({group, Group}, all()) of
|
||||
true ->
|
||||
|
|
|
@ -20,10 +20,9 @@ all() ->
|
|||
groups() ->
|
||||
[
|
||||
{cluster_size_2, [], [
|
||||
crashing_unmirrored_durable,
|
||||
crashing_mirrored,
|
||||
crashing_durable,
|
||||
give_up_after_repeated_crashes,
|
||||
crashing_unmirrored_transient
|
||||
crashing_transient
|
||||
]}
|
||||
].
|
||||
|
||||
|
@ -46,14 +45,7 @@ init_per_group(cluster_size_2, Config) ->
|
|||
end_per_group(_, Config) ->
|
||||
Config.
|
||||
|
||||
init_per_testcase(crashing_mirrored = Testcase, Config) ->
|
||||
case rabbit_ct_broker_helpers:configured_metadata_store(Config) of
|
||||
mnesia ->
|
||||
init_per_testcase0(Testcase, Config);
|
||||
_ ->
|
||||
{skip, "Classic queue mirroring not supported by Khepri"}
|
||||
end;
|
||||
init_per_testcase(crashing_unmirrored_transient = Testcase, Config) ->
|
||||
init_per_testcase(crashing_transient = Testcase, Config) ->
|
||||
case rabbit_ct_broker_helpers:configured_metadata_store(Config) of
|
||||
mnesia ->
|
||||
init_per_testcase0(Testcase, Config);
|
||||
|
@ -83,37 +75,26 @@ end_per_testcase(Testcase, Config) ->
|
|||
%% Testcases.
|
||||
%% -------------------------------------------------------------------
|
||||
|
||||
crashing_unmirrored_durable(Config) ->
|
||||
crashing_durable(Config) ->
|
||||
[A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
|
||||
ChA = rabbit_ct_client_helpers:open_channel(Config, A),
|
||||
ConnB = rabbit_ct_client_helpers:open_connection(Config, B),
|
||||
QName = <<"crashing_unmirrored-q">>,
|
||||
QName = <<"crashing-q">>,
|
||||
amqp_channel:call(ChA, #'confirm.select'{}),
|
||||
test_queue_failure(A, ChA, ConnB, 1, 0,
|
||||
#'queue.declare'{queue = QName, durable = true}),
|
||||
ok.
|
||||
|
||||
crashing_unmirrored_transient(Config) ->
|
||||
crashing_transient(Config) ->
|
||||
[A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
|
||||
ChA = rabbit_ct_client_helpers:open_channel(Config, A),
|
||||
ConnB = rabbit_ct_client_helpers:open_connection(Config, B),
|
||||
QName = <<"crashing_unmirrored-q">>,
|
||||
QName = <<"crashing-q">>,
|
||||
amqp_channel:call(ChA, #'confirm.select'{}),
|
||||
test_queue_failure(A, ChA, ConnB, 0, 0,
|
||||
#'queue.declare'{queue = QName, durable = false}),
|
||||
ok.
|
||||
|
||||
crashing_mirrored(Config) ->
|
||||
[A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
|
||||
rabbit_ct_broker_helpers:set_ha_policy(Config, A, <<".*">>, <<"all">>),
|
||||
ChA = rabbit_ct_client_helpers:open_channel(Config, A),
|
||||
ConnB = rabbit_ct_client_helpers:open_connection(Config, B),
|
||||
QName = <<"crashing_mirrored-q">>,
|
||||
amqp_channel:call(ChA, #'confirm.select'{}),
|
||||
test_queue_failure(A, ChA, ConnB, 2, 1,
|
||||
#'queue.declare'{queue = QName, durable = true}),
|
||||
ok.
|
||||
|
||||
test_queue_failure(Node, Ch, RaceConn, MsgCount, FollowerCount, Decl) ->
|
||||
#'queue.declare_ok'{queue = QName} = amqp_channel:call(Ch, Decl),
|
||||
try
|
||||
|
@ -123,7 +104,6 @@ test_queue_failure(Node, Ch, RaceConn, MsgCount, FollowerCount, Decl) ->
|
|||
QRes = rabbit_misc:r(<<"/">>, queue, QName),
|
||||
rabbit_amqqueue:kill_queue(Node, QRes),
|
||||
assert_message_count(MsgCount, Ch, QName),
|
||||
assert_follower_count(FollowerCount, Node, QName),
|
||||
stop_declare_racer(Racer)
|
||||
after
|
||||
amqp_channel:call(Ch, #'queue.delete'{queue = QName})
|
||||
|
@ -207,20 +187,3 @@ assert_message_count(Count, Ch, QName) ->
|
|||
#'queue.declare_ok'{message_count = Count} =
|
||||
amqp_channel:call(Ch, #'queue.declare'{queue = QName,
|
||||
passive = true}).
|
||||
|
||||
assert_follower_count(Count, Node, QName) ->
|
||||
Q = lookup(Node, QName),
|
||||
[{_, Pids}] = rpc:call(Node, rabbit_amqqueue, info, [Q, [slave_pids]]),
|
||||
RealCount = case Pids of
|
||||
'' -> 0;
|
||||
_ -> length(Pids)
|
||||
end,
|
||||
case RealCount of
|
||||
Count ->
|
||||
ok;
|
||||
_ when RealCount < Count ->
|
||||
timer:sleep(10),
|
||||
assert_follower_count(Count, Node, QName);
|
||||
_ ->
|
||||
exit({too_many_replicas, Count, RealCount})
|
||||
end.
|
||||
|
|
|
@ -67,8 +67,6 @@ groups() ->
|
|||
[
|
||||
{classic_queue, Opts, [{at_most_once, Opts, [dead_letter_max_length_reject_publish_dlx | DeadLetterTests]},
|
||||
{disabled, Opts, DisabledMetricTests}]},
|
||||
{mirrored_queue, Opts, [{at_most_once, Opts, [dead_letter_max_length_reject_publish_dlx | DeadLetterTests]},
|
||||
{disabled, Opts, DisabledMetricTests}]},
|
||||
{quorum_queue, Opts, [{at_most_once, Opts, DeadLetterTests},
|
||||
{disabled, Opts, DisabledMetricTests},
|
||||
{at_least_once, Opts, DeadLetterTests --
|
||||
|
@ -108,19 +106,6 @@ init_per_group(classic_queue, Config) ->
|
|||
Config,
|
||||
[{queue_args, [{<<"x-queue-type">>, longstr, <<"classic">>}]},
|
||||
{queue_durable, false}]);
|
||||
init_per_group(mirrored_queue, Config) ->
|
||||
case rabbit_ct_broker_helpers:configured_metadata_store(Config) of
|
||||
mnesia ->
|
||||
rabbit_ct_broker_helpers:set_ha_policy(Config, 0, <<"^max_length.*queue">>,
|
||||
<<"all">>, [{<<"ha-sync-mode">>, <<"automatic">>}]),
|
||||
Config1 = rabbit_ct_helpers:set_config(
|
||||
Config, [{is_mirrored, true},
|
||||
{queue_args, [{<<"x-queue-type">>, longstr, <<"classic">>}]},
|
||||
{queue_durable, false}]),
|
||||
rabbit_ct_helpers:run_steps(Config1, []);
|
||||
_ ->
|
||||
{skip, "Classic mirroring not supported by Khepri"}
|
||||
end;
|
||||
init_per_group(quorum_queue, Config) ->
|
||||
rabbit_ct_helpers:set_config(
|
||||
Config,
|
||||
|
@ -1787,13 +1772,6 @@ consume(Ch, QName, Payloads) ->
|
|||
consume_empty(Ch, QName) ->
|
||||
#'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = QName}).
|
||||
|
||||
sync_mirrors(QName, Config) ->
|
||||
case ?config(is_mirrored, Config) of
|
||||
true ->
|
||||
rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, [<<"sync_queue">>, QName]);
|
||||
_ -> ok
|
||||
end.
|
||||
|
||||
get_global_counters(Config) ->
|
||||
rabbit_ct_broker_helpers:rpc(Config, rabbit_global_counters, overview, []).
|
||||
|
||||
|
|
|
@ -396,7 +396,7 @@ import_invalid_file_case(Config, CaseName) ->
|
|||
|
||||
import_invalid_file_case_in_khepri(Config, CaseName) ->
|
||||
CasePath = filename:join(?config(data_dir, Config), CaseName ++ ".json"),
|
||||
rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, run_invalid_import_case_in_khepri, [CasePath]),
|
||||
rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, run_invalid_import_case, [CasePath]),
|
||||
ok.
|
||||
|
||||
import_invalid_file_case_if_unchanged(Config, CaseName) ->
|
||||
|
@ -480,28 +480,6 @@ run_invalid_import_case_if_unchanged(Path) ->
|
|||
{error, _E} -> ok
|
||||
end.
|
||||
|
||||
run_invalid_import_case_in_khepri(Path) ->
|
||||
case rabbit_khepri:is_enabled() of
|
||||
true ->
|
||||
run_invalid_import_case_in_khepri0(Path);
|
||||
false ->
|
||||
run_import_case(Path)
|
||||
end.
|
||||
|
||||
run_invalid_import_case_in_khepri0(Path) ->
|
||||
{ok, Body} = file:read_file(Path),
|
||||
ct:pal("Successfully loaded a definition file at ~tp~n", [Path]),
|
||||
case rabbit_definitions:import_raw(Body) of
|
||||
ok ->
|
||||
ct:pal("Expected import case ~tp to fail~n", [Path]),
|
||||
ct:fail({expected_failure, Path});
|
||||
{error, E} ->
|
||||
case re:run(E, ".*mirrored queues are deprecated.*", [{capture, none}]) of
|
||||
match -> ok;
|
||||
_ -> ct:fail({expected_failure, Path, E})
|
||||
end
|
||||
end.
|
||||
|
||||
queue_lookup(Config, VHost, Name) ->
|
||||
rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue, lookup, [rabbit_misc:r(VHost, queue, Name)]).
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,284 +0,0 @@
|
|||
%% This Source Code Form is subject to the terms of the Mozilla Public
|
||||
%% License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
%%
|
||||
%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
|
||||
%%
|
||||
|
||||
-module(eager_sync_SUITE).
|
||||
|
||||
-include_lib("amqp_client/include/amqp_client.hrl").
|
||||
|
||||
-compile(export_all).
|
||||
|
||||
-define(QNAME, <<"ha.two.test">>).
|
||||
-define(QNAME_AUTO, <<"ha.auto.test">>).
|
||||
-define(MESSAGE_COUNT, 200000).
|
||||
|
||||
all() ->
|
||||
[
|
||||
{group, non_parallel_tests}
|
||||
].
|
||||
|
||||
groups() ->
|
||||
[
|
||||
{non_parallel_tests, [], [
|
||||
eager_sync,
|
||||
eager_sync_cancel,
|
||||
eager_sync_auto,
|
||||
eager_sync_auto_on_policy_change,
|
||||
eager_sync_requeue
|
||||
]}
|
||||
].
|
||||
|
||||
suite() ->
|
||||
[
|
||||
%% If a test hangs, no need to wait for 30 minutes.
|
||||
{timetrap, {minutes, 15}}
|
||||
].
|
||||
|
||||
%% -------------------------------------------------------------------
|
||||
%% Testsuite setup/teardown.
|
||||
%% -------------------------------------------------------------------
|
||||
|
||||
init_per_suite(Config) ->
|
||||
case rabbit_ct_broker_helpers:configured_metadata_store(Config) of
|
||||
mnesia ->
|
||||
rabbit_ct_helpers:log_environment(),
|
||||
rabbit_ct_helpers:run_setup_steps(Config);
|
||||
_ ->
|
||||
{skip, "Classic mirroring not supported by Khepri"}
|
||||
end.
|
||||
|
||||
end_per_suite(Config) ->
|
||||
rabbit_ct_helpers:run_teardown_steps(Config).
|
||||
|
||||
init_per_group(_, Config) ->
|
||||
Config.
|
||||
|
||||
end_per_group(_, Config) ->
|
||||
Config.
|
||||
|
||||
init_per_testcase(Testcase, Config) ->
|
||||
rabbit_ct_helpers:testcase_started(Config, Testcase),
|
||||
ClusterSize = 3,
|
||||
TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
|
||||
Config1 = rabbit_ct_helpers:set_config(
|
||||
Config, [
|
||||
{rmq_nodes_count, ClusterSize},
|
||||
{rmq_nodes_clustered, true},
|
||||
{rmq_nodename_suffix, Testcase},
|
||||
{tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
|
||||
]),
|
||||
rabbit_ct_helpers:run_steps(
|
||||
Config1,
|
||||
rabbit_ct_broker_helpers:setup_steps() ++
|
||||
rabbit_ct_client_helpers:setup_steps() ++
|
||||
[
|
||||
fun rabbit_ct_broker_helpers:set_ha_policy_two_pos/1,
|
||||
fun rabbit_ct_broker_helpers:set_ha_policy_two_pos_batch_sync/1
|
||||
]).
|
||||
|
||||
end_per_testcase(Testcase, Config) ->
|
||||
Config1 = rabbit_ct_helpers:run_steps(Config,
|
||||
rabbit_ct_client_helpers:teardown_steps() ++
|
||||
rabbit_ct_broker_helpers:teardown_steps()),
|
||||
rabbit_ct_helpers:testcase_finished(Config1, Testcase).
|
||||
|
||||
%% -------------------------------------------------------------------
|
||||
%% Testcases.
|
||||
%% -------------------------------------------------------------------
|
||||
|
||||
eager_sync(Config) ->
|
||||
[A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
|
||||
%% Queue is on AB but not C.
|
||||
ACh = rabbit_ct_client_helpers:open_channel(Config, A),
|
||||
Ch = rabbit_ct_client_helpers:open_channel(Config, C),
|
||||
amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME,
|
||||
durable = true}),
|
||||
|
||||
%% Don't sync, lose messages
|
||||
rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
|
||||
restart(Config, A),
|
||||
restart(Config, B),
|
||||
rabbit_ct_client_helpers:consume(Ch, ?QNAME, 0),
|
||||
|
||||
%% Sync, keep messages
|
||||
rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
|
||||
restart(Config, A),
|
||||
ok = sync(C, ?QNAME),
|
||||
restart(Config, B),
|
||||
rabbit_ct_client_helpers:consume(Ch, ?QNAME, ?MESSAGE_COUNT),
|
||||
|
||||
%% Check the no-need-to-sync path
|
||||
rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
|
||||
ok = sync(C, ?QNAME),
|
||||
rabbit_ct_client_helpers:consume(Ch, ?QNAME, ?MESSAGE_COUNT),
|
||||
|
||||
%% keep unacknowledged messages
|
||||
rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
|
||||
rabbit_ct_client_helpers:fetch(Ch, ?QNAME, 2),
|
||||
restart(Config, A),
|
||||
rabbit_ct_client_helpers:fetch(Ch, ?QNAME, 3),
|
||||
sync(C, ?QNAME),
|
||||
restart(Config, B),
|
||||
rabbit_ct_client_helpers:consume(Ch, ?QNAME, ?MESSAGE_COUNT),
|
||||
|
||||
ok.
|
||||
|
||||
eager_sync_cancel(Config) ->
|
||||
[A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
|
||||
%% Queue is on AB but not C.
|
||||
ACh = rabbit_ct_client_helpers:open_channel(Config, A),
|
||||
Ch = rabbit_ct_client_helpers:open_channel(Config, C),
|
||||
|
||||
set_app_sync_batch_size(A),
|
||||
set_app_sync_batch_size(B),
|
||||
set_app_sync_batch_size(C),
|
||||
|
||||
amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME,
|
||||
durable = true}),
|
||||
{ok, not_syncing} = sync_cancel(C, ?QNAME), %% Idempotence
|
||||
eager_sync_cancel_test2(Config, A, B, C, Ch, 100).
|
||||
|
||||
eager_sync_cancel_test2(_, _, _, _, _, 0) ->
|
||||
error(no_more_attempts_left);
|
||||
eager_sync_cancel_test2(Config, A, B, C, Ch, Attempts) ->
|
||||
%% Sync then cancel
|
||||
rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
|
||||
restart(Config, A),
|
||||
set_app_sync_batch_size(A),
|
||||
spawn_link(fun() -> ok = sync_nowait(C, ?QNAME) end),
|
||||
case wait_for_syncing(C, ?QNAME, 1) of
|
||||
ok ->
|
||||
case sync_cancel(C, ?QNAME) of
|
||||
ok ->
|
||||
wait_for_running(C, ?QNAME),
|
||||
restart(Config, B),
|
||||
set_app_sync_batch_size(B),
|
||||
rabbit_ct_client_helpers:consume(Ch, ?QNAME, 0),
|
||||
|
||||
{ok, not_syncing} = sync_cancel(C, ?QNAME), %% Idempotence
|
||||
ok;
|
||||
{ok, not_syncing} ->
|
||||
%% Damn. Syncing finished between wait_for_syncing/3 and
|
||||
%% sync_cancel/2 above. Start again.
|
||||
amqp_channel:call(Ch, #'queue.purge'{queue = ?QNAME}),
|
||||
eager_sync_cancel_test2(Config, A, B, C, Ch, Attempts - 1)
|
||||
end;
|
||||
synced_already ->
|
||||
%% Damn. Syncing finished before wait_for_syncing/3. Start again.
|
||||
amqp_channel:call(Ch, #'queue.purge'{queue = ?QNAME}),
|
||||
eager_sync_cancel_test2(Config, A, B, C, Ch, Attempts - 1)
|
||||
end.
|
||||
|
||||
eager_sync_auto(Config) ->
|
||||
[A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
|
||||
ACh = rabbit_ct_client_helpers:open_channel(Config, A),
|
||||
Ch = rabbit_ct_client_helpers:open_channel(Config, C),
|
||||
amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME_AUTO,
|
||||
durable = true}),
|
||||
|
||||
%% Sync automatically, don't lose messages
|
||||
rabbit_ct_client_helpers:publish(Ch, ?QNAME_AUTO, ?MESSAGE_COUNT),
|
||||
restart(Config, A),
|
||||
wait_for_sync(C, ?QNAME_AUTO),
|
||||
restart(Config, B),
|
||||
wait_for_sync(C, ?QNAME_AUTO),
|
||||
rabbit_ct_client_helpers:consume(Ch, ?QNAME_AUTO, ?MESSAGE_COUNT),
|
||||
|
||||
ok.
|
||||
|
||||
eager_sync_auto_on_policy_change(Config) ->
|
||||
[A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
|
||||
%% Queue is on AB but not C.
|
||||
ACh = rabbit_ct_client_helpers:open_channel(Config, A),
|
||||
Ch = rabbit_ct_client_helpers:open_channel(Config, C),
|
||||
amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME,
|
||||
durable = true}),
|
||||
|
||||
%% Sync automatically once the policy is changed to tell us to.
|
||||
rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
|
||||
restart(Config, A),
|
||||
Params = [atom_to_binary(N) || N <- [A, B]],
|
||||
rabbit_ct_broker_helpers:set_ha_policy(Config,
|
||||
A, <<"^ha.two.">>, {<<"nodes">>, Params},
|
||||
[{<<"ha-sync-mode">>, <<"automatic">>}]),
|
||||
wait_for_sync(C, ?QNAME),
|
||||
|
||||
ok.
|
||||
|
||||
eager_sync_requeue(Config) ->
|
||||
[A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
|
||||
%% Queue is on AB but not C.
|
||||
ACh = rabbit_ct_client_helpers:open_channel(Config, A),
|
||||
Ch = rabbit_ct_client_helpers:open_channel(Config, C),
|
||||
amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME,
|
||||
durable = true}),
|
||||
|
||||
rabbit_ct_client_helpers:publish(Ch, ?QNAME, 2),
|
||||
{#'basic.get_ok'{delivery_tag = TagA}, _} =
|
||||
amqp_channel:call(Ch, #'basic.get'{queue = ?QNAME}),
|
||||
{#'basic.get_ok'{delivery_tag = TagB}, _} =
|
||||
amqp_channel:call(Ch, #'basic.get'{queue = ?QNAME}),
|
||||
amqp_channel:cast(Ch, #'basic.reject'{delivery_tag = TagA, requeue = true}),
|
||||
restart(Config, B),
|
||||
ok = sync(C, ?QNAME),
|
||||
amqp_channel:cast(Ch, #'basic.reject'{delivery_tag = TagB, requeue = true}),
|
||||
rabbit_ct_client_helpers:consume(Ch, ?QNAME, 2),
|
||||
|
||||
ok.
|
||||
|
||||
restart(Config, Node) ->
|
||||
rabbit_ct_broker_helpers:restart_broker(Config, Node).
|
||||
|
||||
sync(Node, QName) ->
|
||||
case sync_nowait(Node, QName) of
|
||||
ok -> wait_for_sync(Node, QName),
|
||||
ok;
|
||||
R -> R
|
||||
end.
|
||||
|
||||
sync_nowait(Node, QName) -> action(Node, sync_queue, QName).
|
||||
sync_cancel(Node, QName) -> action(Node, cancel_sync_queue, QName).
|
||||
|
||||
wait_for_sync(Node, QName) ->
|
||||
sync_detection_SUITE:wait_for_sync_status(true, Node, QName).
|
||||
|
||||
action(Node, Action, QName) ->
|
||||
rabbit_control_helper:command_with_output(
|
||||
Action, Node, [binary_to_list(QName)], [{"-p", "/"}]).
|
||||
|
||||
queue(Node, QName) ->
|
||||
QNameRes = rabbit_misc:r(<<"/">>, queue, QName),
|
||||
{ok, Q} = rpc:call(Node, rabbit_amqqueue, lookup, [QNameRes]),
|
||||
Q.
|
||||
|
||||
wait_for_syncing(Node, QName, Target) ->
|
||||
case state(Node, QName) of
|
||||
{{syncing, _}, _} -> ok;
|
||||
{running, Target} -> synced_already;
|
||||
_ -> timer:sleep(100),
|
||||
wait_for_syncing(Node, QName, Target)
|
||||
end.
|
||||
|
||||
wait_for_running(Node, QName) ->
|
||||
case state(Node, QName) of
|
||||
{running, _} -> ok;
|
||||
_ -> timer:sleep(100),
|
||||
wait_for_running(Node, QName)
|
||||
end.
|
||||
|
||||
state(Node, QName) ->
|
||||
[{state, State}, {synchronised_slave_pids, Pids}] =
|
||||
rpc:call(Node, rabbit_amqqueue, info,
|
||||
[queue(Node, QName), [state, synchronised_slave_pids]]),
|
||||
{State, length(Pids)}.
|
||||
|
||||
%% eager_sync_cancel_test needs a batch size that's < ?MESSAGE_COUNT
|
||||
%% in order to pass, because a SyncBatchSize >= ?MESSAGE_COUNT will
|
||||
%% always finish before the test is able to cancel the sync.
|
||||
set_app_sync_batch_size(Node) ->
|
||||
rabbit_control_helper:command(
|
||||
eval, Node,
|
||||
["application:set_env(rabbit, mirroring_sync_batch_size, 1)."]).
|
|
@ -91,16 +91,10 @@ init_per_testcase(Testcase, Config) ->
|
|||
{rmq_nodename_suffix, Testcase},
|
||||
{tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
|
||||
]),
|
||||
ExtraSteps =
|
||||
case rabbit_ct_broker_helpers:configured_metadata_store(Config) of
|
||||
{khepri, []} -> [];
|
||||
mnesia -> [fun rabbit_ct_broker_helpers:set_ha_policy_all/1]
|
||||
end,
|
||||
rabbit_ct_helpers:run_steps(
|
||||
Config1,
|
||||
rabbit_ct_broker_helpers:setup_steps() ++
|
||||
rabbit_ct_client_helpers:setup_steps() ++
|
||||
ExtraSteps).
|
||||
rabbit_ct_client_helpers:setup_steps()).
|
||||
|
||||
end_per_testcase(Testcase, Config) ->
|
||||
Config1 = rabbit_ct_helpers:run_steps(Config,
|
||||
|
|
|
@ -1,117 +0,0 @@
|
|||
%% This Source Code Form is subject to the terms of the Mozilla Public
|
||||
%% License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
%%
|
||||
%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
|
||||
%%
|
||||
|
||||
-module(many_node_ha_SUITE).
|
||||
|
||||
-include_lib("common_test/include/ct.hrl").
|
||||
-include_lib("amqp_client/include/amqp_client.hrl").
|
||||
|
||||
-compile(export_all).
|
||||
|
||||
suite() ->
|
||||
[
|
||||
{timetrap, {minutes, 5}}
|
||||
].
|
||||
|
||||
all() ->
|
||||
[
|
||||
{group, cluster_size_6}
|
||||
].
|
||||
|
||||
groups() ->
|
||||
[
|
||||
{cluster_size_6, [], [
|
||||
kill_intermediate
|
||||
]}
|
||||
].
|
||||
|
||||
%% -------------------------------------------------------------------
|
||||
%% Testsuite setup/teardown.
|
||||
%% -------------------------------------------------------------------
|
||||
|
||||
init_per_suite(Config) ->
|
||||
rabbit_ct_helpers:log_environment(),
|
||||
case rabbit_ct_broker_helpers:configured_metadata_store(Config) of
|
||||
mnesia ->
|
||||
rabbit_ct_helpers:run_setup_steps(Config);
|
||||
{khepri, _} ->
|
||||
{skip, "Classic queue mirroring not supported by Khepri"}
|
||||
end.
|
||||
|
||||
end_per_suite(Config) ->
|
||||
rabbit_ct_helpers:run_teardown_steps(Config).
|
||||
|
||||
init_per_group(cluster_size_6, Config) ->
|
||||
rabbit_ct_helpers:set_config(Config, [
|
||||
{rmq_nodes_count, 6}
|
||||
]).
|
||||
|
||||
end_per_group(_, Config) ->
|
||||
Config.
|
||||
|
||||
init_per_testcase(Testcase, Config) ->
|
||||
rabbit_ct_helpers:testcase_started(Config, Testcase),
|
||||
ClusterSize = ?config(rmq_nodes_count, Config),
|
||||
TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
|
||||
Config1 = rabbit_ct_helpers:set_config(Config, [
|
||||
{rmq_nodes_clustered, true},
|
||||
{rmq_nodename_suffix, Testcase},
|
||||
{tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
|
||||
]),
|
||||
rabbit_ct_helpers:run_steps(Config1,
|
||||
rabbit_ct_broker_helpers:setup_steps() ++
|
||||
rabbit_ct_client_helpers:setup_steps() ++ [
|
||||
fun rabbit_ct_broker_helpers:set_ha_policy_all/1
|
||||
]).
|
||||
|
||||
end_per_testcase(Testcase, Config) ->
|
||||
Config1 = rabbit_ct_helpers:run_steps(Config,
|
||||
rabbit_ct_client_helpers:teardown_steps() ++
|
||||
rabbit_ct_broker_helpers:teardown_steps()),
|
||||
rabbit_ct_helpers:testcase_finished(Config1, Testcase).
|
||||
|
||||
%% -------------------------------------------------------------------
|
||||
%% Test Cases
|
||||
%% -------------------------------------------------------------------
|
||||
|
||||
kill_intermediate(Config) ->
|
||||
[A, B, C, D, E, F] = rabbit_ct_broker_helpers:get_node_configs(Config,
|
||||
nodename),
|
||||
Msgs = rabbit_ct_helpers:cover_work_factor(Config, 20000),
|
||||
MasterChannel = rabbit_ct_client_helpers:open_channel(Config, A),
|
||||
ConsumerChannel = rabbit_ct_client_helpers:open_channel(Config, E),
|
||||
ProducerChannel = rabbit_ct_client_helpers:open_channel(Config, F),
|
||||
Queue = <<"test">>,
|
||||
amqp_channel:call(MasterChannel, #'queue.declare'{queue = Queue,
|
||||
auto_delete = false}),
|
||||
|
||||
%% TODO: this seems *highly* timing dependant - the assumption being
|
||||
%% that the kill will work quickly enough that there will still be
|
||||
%% some messages in-flight that we *must* receive despite the intervening
|
||||
%% node deaths. It would be nice if we could find a means to do this
|
||||
%% in a way that is not actually timing dependent.
|
||||
|
||||
%% Worse still, it assumes that killing the master will cause a
|
||||
%% failover to Slave1, and so on. Nope.
|
||||
|
||||
ConsumerPid = rabbit_ha_test_consumer:create(ConsumerChannel,
|
||||
Queue, self(), false, Msgs),
|
||||
|
||||
ProducerPid = rabbit_ha_test_producer:create(ProducerChannel,
|
||||
Queue, self(), false, Msgs),
|
||||
|
||||
%% create a killer for the master and the first 3 mirrors
|
||||
[rabbit_ct_broker_helpers:kill_node_after(Config, Node, Time) ||
|
||||
{Node, Time} <- [{A, 50},
|
||||
{B, 50},
|
||||
{C, 100},
|
||||
{D, 100}]],
|
||||
|
||||
%% verify that the consumer got all msgs, or die, or time out
|
||||
rabbit_ha_test_producer:await_response(ProducerPid),
|
||||
rabbit_ha_test_consumer:await_response(ConsumerPid),
|
||||
ok.
|
|
@ -24,7 +24,7 @@ all() ->
|
|||
|
||||
groups() ->
|
||||
[
|
||||
{mnesia_store, [], [target_count_policy] ++ all_tests()},
|
||||
{mnesia_store, [], all_tests()},
|
||||
{khepri_store, [], all_tests()},
|
||||
{khepri_migration, [], [
|
||||
from_mnesia_to_khepri
|
||||
|
@ -48,8 +48,7 @@ all_tests() ->
|
|||
is_supported_operator_policy_max_in_memory_bytes,
|
||||
is_supported_operator_policy_delivery_limit,
|
||||
is_supported_operator_policy_target_group_size,
|
||||
is_supported_operator_policy_overflow,
|
||||
is_supported_operator_policy_ha
|
||||
is_supported_operator_policy_overflow
|
||||
].
|
||||
|
||||
%% -------------------------------------------------------------------
|
||||
|
@ -191,63 +190,6 @@ operator_retroactive_policy_publish_ttl(Config) ->
|
|||
rabbit_ct_client_helpers:close_connection(Conn),
|
||||
passed.
|
||||
|
||||
target_count_policy(Config) ->
|
||||
[Server | _] = Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
|
||||
{Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
|
||||
QName = <<"policy_ha">>,
|
||||
declare(Ch, QName),
|
||||
BNodes = [atom_to_binary(N) || N <- Nodes],
|
||||
|
||||
AllPolicy = [{<<"ha-mode">>, <<"all">>}],
|
||||
ExactlyPolicyOne = [{<<"ha-mode">>, <<"exactly">>},
|
||||
{<<"ha-params">>, 1}],
|
||||
ExactlyPolicyTwo = [{<<"ha-mode">>, <<"exactly">>},
|
||||
{<<"ha-params">>, 2}],
|
||||
NodesPolicyAll = [{<<"ha-mode">>, <<"nodes">>},
|
||||
{<<"ha-params">>, BNodes}],
|
||||
NodesPolicyOne = [{<<"ha-mode">>, <<"nodes">>},
|
||||
{<<"ha-params">>, [hd(BNodes)]}],
|
||||
SyncModePolicyAuto = [{<<"ha-mode">>, <<"all">>}, {<<"ha-sync-mode">>, <<"automatic">>}],
|
||||
SyncModePolicyMan = [{<<"ha-mode">>, <<"all">>}, {<<"ha-sync-mode">>, <<"manual">>}],
|
||||
|
||||
%% ALL has precedence
|
||||
Opts = #{config => Config,
|
||||
server => Server,
|
||||
qname => QName},
|
||||
verify_policies(AllPolicy, ExactlyPolicyTwo, [{<<"ha-mode">>, <<"all">>}], Opts),
|
||||
|
||||
verify_policies(ExactlyPolicyTwo, AllPolicy, [{<<"ha-mode">>, <<"all">>}], Opts),
|
||||
|
||||
verify_policies(AllPolicy, NodesPolicyAll, [{<<"ha-mode">>, <<"all">>}], Opts),
|
||||
|
||||
verify_policies(NodesPolicyAll, AllPolicy, [{<<"ha-mode">>, <<"all">>}], Opts),
|
||||
|
||||
%% %% Sync mode OperPolicy has precedence
|
||||
verify_policies(SyncModePolicyMan, SyncModePolicyAuto, [{<<"ha-sync-mode">>, <<"automatic">>}], Opts),
|
||||
verify_policies(SyncModePolicyAuto, SyncModePolicyMan, [{<<"ha-sync-mode">>, <<"manual">>}], Opts),
|
||||
|
||||
%% exactly has precedence over nodes
|
||||
verify_policies(ExactlyPolicyTwo, NodesPolicyAll,[{<<"ha-mode">>, <<"exactly">>}, {<<"ha-params">>, 2}], Opts),
|
||||
|
||||
verify_policies(NodesPolicyAll, ExactlyPolicyTwo, [{<<"ha-mode">>, <<"exactly">>}, {<<"ha-params">>, 2}], Opts),
|
||||
|
||||
%% Highest exactly value has precedence
|
||||
verify_policies(ExactlyPolicyTwo, ExactlyPolicyOne, [{<<"ha-mode">>, <<"exactly">>}, {<<"ha-params">>, 2}], Opts),
|
||||
|
||||
verify_policies(ExactlyPolicyOne, ExactlyPolicyTwo, [{<<"ha-mode">>, <<"exactly">>}, {<<"ha-params">>, 2}], Opts),
|
||||
|
||||
%% Longest node count has precedence
|
||||
SortedNodes = lists:sort(BNodes),
|
||||
verify_policies(NodesPolicyAll, NodesPolicyOne, [{<<"ha-mode">>, <<"nodes">>}, {<<"ha-params">>, SortedNodes}], Opts),
|
||||
verify_policies(NodesPolicyOne, NodesPolicyAll, [{<<"ha-mode">>, <<"nodes">>}, {<<"ha-params">>, SortedNodes}], Opts),
|
||||
|
||||
delete(Ch, QName),
|
||||
rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"policy">>),
|
||||
rabbit_ct_broker_helpers:clear_operator_policy(Config, 0, <<"op_policy">>),
|
||||
rabbit_ct_client_helpers:close_channel(Ch),
|
||||
rabbit_ct_client_helpers:close_connection(Conn),
|
||||
passed.
|
||||
|
||||
queue_type_specific_policies(Config) ->
|
||||
[Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
|
||||
{Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
|
||||
|
@ -381,51 +323,6 @@ is_supported_operator_policy_overflow(Config) ->
|
|||
effective_operator_policy_per_queue_type(
|
||||
Config, <<"overflow">>, Value, Value, Value, undefined).
|
||||
|
||||
|
||||
is_supported_operator_policy_ha(Config) ->
|
||||
[Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
|
||||
{Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
|
||||
ClassicQ = <<"classic_queue">>,
|
||||
QuorumQ = <<"quorum_queue">>,
|
||||
StreamQ = <<"stream_queue">>,
|
||||
|
||||
declare(Ch, ClassicQ, [{<<"x-queue-type">>, longstr, <<"classic">>}]),
|
||||
declare(Ch, QuorumQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}]),
|
||||
declare(Ch, StreamQ, [{<<"x-queue-type">>, longstr, <<"stream">>}]),
|
||||
|
||||
case ?config(metadata_store, Config) of
|
||||
mnesia ->
|
||||
rabbit_ct_broker_helpers:set_operator_policy(
|
||||
Config, 0, <<"operator-policy">>, <<".*">>, <<"all">>,
|
||||
[{<<"ha-mode">>, <<"exactly">>},
|
||||
{<<"ha-params">>, 2},
|
||||
{<<"ha-sync-mode">>, <<"automatic">>}]),
|
||||
|
||||
?awaitMatch(<<"exactly">>, check_policy_value(Server, ClassicQ, <<"ha-mode">>), 30_000),
|
||||
?awaitMatch(2, check_policy_value(Server, ClassicQ, <<"ha-params">>), 30_000),
|
||||
?awaitMatch(<<"automatic">>, check_policy_value(Server, ClassicQ, <<"ha-sync-mode">>), 30_000),
|
||||
?awaitMatch(undefined, check_policy_value(Server, QuorumQ, <<"ha-mode">>), 30_000),
|
||||
?awaitMatch(undefined, check_policy_value(Server, StreamQ, <<"ha-mode">>), 30_000),
|
||||
|
||||
rabbit_ct_broker_helpers:clear_operator_policy(Config, 0, <<"operator-policy">>);
|
||||
khepri ->
|
||||
?assertError(
|
||||
{badmatch, _},
|
||||
rabbit_ct_broker_helpers:set_operator_policy(
|
||||
Config, 0, <<"operator-policy">>, <<".*">>, <<"all">>,
|
||||
[{<<"ha-mode">>, <<"exactly">>},
|
||||
{<<"ha-params">>, 2},
|
||||
{<<"ha-sync-mode">>, <<"automatic">>}]))
|
||||
end,
|
||||
|
||||
delete(Ch, ClassicQ),
|
||||
delete(Ch, QuorumQ),
|
||||
delete(Ch, StreamQ),
|
||||
|
||||
rabbit_ct_client_helpers:close_channel(Ch),
|
||||
rabbit_ct_client_helpers:close_connection(Conn),
|
||||
passed.
|
||||
|
||||
effective_operator_policy_per_queue_type(Config, Name, Value, ClassicValue, QuorumValue, StreamValue) ->
|
||||
[Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
|
||||
{Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
|
||||
|
|
|
@ -367,15 +367,13 @@ info_head_message_timestamp1(_Config) ->
|
|||
timestamp = 1000},
|
||||
payload_fragments_rev = []},
|
||||
{ok, Msg1} = mc_amqpl:message(ExName, <<>>, Content1, #{id => <<"msg1">>}),
|
||||
BQS2 = PQ:publish(Msg1, #message_properties{size = 0}, false, self(),
|
||||
noflow, BQS1),
|
||||
BQS2 = PQ:publish(Msg1, #message_properties{size = 0}, false, self(), BQS1),
|
||||
1000 = PQ:info(head_message_timestamp, BQS2),
|
||||
%% Publish a higher priority message with no timestamp.
|
||||
Content2 = #content{properties = #'P_basic'{priority = 2},
|
||||
payload_fragments_rev = []},
|
||||
{ok, Msg2} = mc_amqpl:message(ExName, <<>>, Content2, #{id => <<"msg2">>}),
|
||||
BQS3 = PQ:publish(Msg2, #message_properties{size = 0}, false, self(),
|
||||
noflow, BQS2),
|
||||
BQS3 = PQ:publish(Msg2, #message_properties{size = 0}, false, self(), BQS2),
|
||||
'' = PQ:info(head_message_timestamp, BQS3),
|
||||
%% Consume message with no timestamp.
|
||||
{{Msg2, _, _}, BQS4} = PQ:fetch(false, BQS3),
|
||||
|
@ -435,7 +433,7 @@ info_oldest_message_received_timestamp1(_Config) ->
|
|||
payload_fragments_rev = []},
|
||||
{ok, Msg1} = mc_amqpl:message(ExName, <<>>, Content1, #{id => <<"msg1">>}),
|
||||
BQS2 = PQ:publish(Msg1, #message_properties{size = 0}, false, self(),
|
||||
noflow, BQS1),
|
||||
BQS1),
|
||||
Ts1 = PQ:info(oldest_message_received_timestamp, BQS2),
|
||||
?assert(is_integer(Ts1)),
|
||||
%% Publish a higher priority message.
|
||||
|
@ -443,7 +441,7 @@ info_oldest_message_received_timestamp1(_Config) ->
|
|||
payload_fragments_rev = []},
|
||||
{ok, Msg2} = mc_amqpl:message(ExName, <<>>, Content2, #{id => <<"msg2">>}),
|
||||
BQS3 = PQ:publish(Msg2, #message_properties{size = 0}, false, self(),
|
||||
noflow, BQS2),
|
||||
BQS2),
|
||||
%% Even though is highest priority, the lower priority message is older.
|
||||
%% Timestamp hasn't changed.
|
||||
?assertEqual(Ts1, PQ:info(oldest_message_received_timestamp, BQS3)),
|
||||
|
|
|
@ -36,7 +36,6 @@ groups() ->
|
|||
{mnesia_store, [],
|
||||
[
|
||||
{classic_queue, [parallel], PublisherConfirmTests ++ [confirm_nack]},
|
||||
{mirrored_queue, [parallel], PublisherConfirmTests ++ [confirm_nack]},
|
||||
{quorum_queue, [parallel], PublisherConfirmTests},
|
||||
{quorum_queue, [], [confirm_minority]}
|
||||
]},
|
||||
|
@ -73,14 +72,6 @@ init_per_group(quorum_queue, Config) ->
|
|||
Config,
|
||||
[{queue_args, [{<<"x-queue-type">>, longstr, <<"quorum">>}]},
|
||||
{queue_durable, true}]);
|
||||
init_per_group(mirrored_queue, Config) ->
|
||||
rabbit_ct_broker_helpers:set_ha_policy(Config, 0, <<"^max_length.*queue">>,
|
||||
<<"all">>, [{<<"ha-sync-mode">>, <<"automatic">>}]),
|
||||
Config1 = rabbit_ct_helpers:set_config(
|
||||
Config, [{is_mirrored, true},
|
||||
{queue_args, [{<<"x-queue-type">>, longstr, <<"classic">>}]},
|
||||
{queue_durable, true}]),
|
||||
rabbit_ct_helpers:run_steps(Config1, []);
|
||||
init_per_group(mnesia_store = Group, Config0) ->
|
||||
Config = rabbit_ct_helpers:set_config(Config0, [{metadata_store, mnesia}]),
|
||||
init_per_group0(Group, Config);
|
||||
|
@ -380,13 +371,6 @@ consume(Ch, QName, Payloads) ->
|
|||
consume_empty(Ch, QName) ->
|
||||
#'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = QName}).
|
||||
|
||||
sync_mirrors(QName, Config) ->
|
||||
case ?config(is_mirrored, Config) of
|
||||
true ->
|
||||
rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, [<<"sync_queue">>, QName]);
|
||||
_ -> ok
|
||||
end.
|
||||
|
||||
receive_many([]) ->
|
||||
ok;
|
||||
receive_many(DTags) ->
|
||||
|
|
|
@ -29,8 +29,7 @@ groups() ->
|
|||
[
|
||||
{mnesia_parallel_tests, [parallel], [
|
||||
{max_length_classic, [], max_length_tests()},
|
||||
{max_length_quorum, [], max_length_quorum_tests()},
|
||||
{max_length_mirrored, [], max_length_tests()}
|
||||
{max_length_quorum, [], max_length_quorum_tests()}
|
||||
]},
|
||||
{khepri_parallel_tests, [parallel], [
|
||||
{max_length_classic, [], max_length_tests()},
|
||||
|
@ -79,14 +78,6 @@ init_per_group(max_length_quorum, Config) ->
|
|||
Config,
|
||||
[{queue_args, [{<<"x-queue-type">>, longstr, <<"quorum">>}]},
|
||||
{queue_durable, true}]);
|
||||
init_per_group(max_length_mirrored, Config) ->
|
||||
rabbit_ct_broker_helpers:set_ha_policy(Config, 0, <<"^max_length.*queue">>,
|
||||
<<"all">>, [{<<"ha-sync-mode">>, <<"automatic">>}]),
|
||||
Config1 = rabbit_ct_helpers:set_config(
|
||||
Config, [{is_mirrored, true},
|
||||
{queue_args, [{<<"x-queue-type">>, longstr, <<"classic">>}]},
|
||||
{queue_durable, false}]),
|
||||
rabbit_ct_helpers:run_steps(Config1, []);
|
||||
init_per_group(mnesia_parallel_tests = Group, Config0) ->
|
||||
Config = rabbit_ct_helpers:set_config(Config0, [{metadata_store, mnesia}]),
|
||||
init_per_group0(Group, Config);
|
||||
|
@ -109,10 +100,6 @@ init_per_group0(Group, Config) ->
|
|||
rabbit_ct_helpers:run_steps(Config, [])
|
||||
end.
|
||||
|
||||
end_per_group(max_length_mirrored, Config) ->
|
||||
rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"^max_length.*queue">>),
|
||||
Config1 = rabbit_ct_helpers:set_config(Config, [{is_mirrored, false}]),
|
||||
Config1;
|
||||
end_per_group(queue_max_length, Config) ->
|
||||
Config;
|
||||
end_per_group(Group, Config) ->
|
||||
|
@ -169,7 +156,7 @@ max_length_bytes_drop_head(Config, ExtraArgs) ->
|
|||
Payload1 = << <<"1">> || _ <- lists:seq(1, 80) >>,
|
||||
Payload2 = << <<"2">> || _ <- lists:seq(1, 80) >>,
|
||||
Payload3 = << <<"3">> || _ <- lists:seq(1, 80) >>,
|
||||
check_max_length_drops_head(Config, QName, Ch, Payload1, Payload2, Payload3).
|
||||
check_max_length_drops_head(QName, Ch, Payload1, Payload2, Payload3).
|
||||
|
||||
max_length_drop_head(Config) ->
|
||||
max_length_drop_head(Config, [{<<"x-overflow">>, longstr, <<"drop-head">>}]).
|
||||
|
@ -187,7 +174,7 @@ max_length_drop_head(Config, ExtraArgs) ->
|
|||
MaxLengthArgs = [{<<"x-max-length">>, long, 1}],
|
||||
#'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = MaxLengthArgs ++ Args ++ ExtraArgs, durable = Durable}),
|
||||
|
||||
check_max_length_drops_head(Config, QName, Ch, <<"1">>, <<"2">>, <<"3">>).
|
||||
check_max_length_drops_head(QName, Ch, <<"1">>, <<"2">>, <<"3">>).
|
||||
|
||||
max_length_reject_confirm(Config) ->
|
||||
{_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
|
||||
|
@ -198,8 +185,8 @@ max_length_reject_confirm(Config) ->
|
|||
OverflowArgs = [{<<"x-overflow">>, longstr, <<"reject-publish">>}],
|
||||
#'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = MaxLengthArgs ++ OverflowArgs ++ Args, durable = Durable}),
|
||||
#'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
|
||||
check_max_length_drops_publish(Config, QName, Ch, <<"1">>, <<"2">>, <<"3">>),
|
||||
check_max_length_rejects(Config, QName, Ch, <<"1">>, <<"2">>, <<"3">>).
|
||||
check_max_length_drops_publish(QName, Ch, <<"1">>, <<"2">>, <<"3">>),
|
||||
check_max_length_rejects(QName, Ch, <<"1">>, <<"2">>, <<"3">>).
|
||||
|
||||
max_length_bytes_reject_confirm(Config) ->
|
||||
{_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
|
||||
|
@ -216,8 +203,8 @@ max_length_bytes_reject_confirm(Config) ->
|
|||
Payload2 = << <<"2">> || _ <- lists:seq(1, 80) >>,
|
||||
Payload3 = << <<"3">> || _ <- lists:seq(1, 80) >>,
|
||||
|
||||
check_max_length_drops_publish(Config, QNameBytes, Ch, Payload1, Payload2, Payload3),
|
||||
check_max_length_rejects(Config, QNameBytes, Ch, Payload1, Payload2, Payload3).
|
||||
check_max_length_drops_publish(QNameBytes, Ch, Payload1, Payload2, Payload3),
|
||||
check_max_length_rejects(QNameBytes, Ch, Payload1, Payload2, Payload3).
|
||||
|
||||
max_length_drop_publish(Config) ->
|
||||
{_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
|
||||
|
@ -228,7 +215,7 @@ max_length_drop_publish(Config) ->
|
|||
OverflowArgs = [{<<"x-overflow">>, longstr, <<"reject-publish">>}],
|
||||
#'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = MaxLengthArgs ++ OverflowArgs ++ Args, durable = Durable}),
|
||||
%% If confirms are not enable, publishes will still be dropped in reject-publish mode.
|
||||
check_max_length_drops_publish(Config, QName, Ch, <<"1">>, <<"2">>, <<"3">>).
|
||||
check_max_length_drops_publish(QName, Ch, <<"1">>, <<"2">>, <<"3">>).
|
||||
|
||||
max_length_drop_publish_requeue(Config) ->
|
||||
{_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
|
||||
|
@ -239,7 +226,7 @@ max_length_drop_publish_requeue(Config) ->
|
|||
OverflowArgs = [{<<"x-overflow">>, longstr, <<"reject-publish">>}],
|
||||
#'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = MaxLengthArgs ++ OverflowArgs ++ Args, durable = Durable}),
|
||||
%% If confirms are not enable, publishes will still be dropped in reject-publish mode.
|
||||
check_max_length_requeue(Config, QName, Ch, <<"1">>, <<"2">>).
|
||||
check_max_length_requeue(QName, Ch, <<"1">>, <<"2">>).
|
||||
|
||||
max_length_bytes_drop_publish(Config) ->
|
||||
{_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
|
||||
|
@ -255,15 +242,13 @@ max_length_bytes_drop_publish(Config) ->
|
|||
Payload2 = << <<"2">> || _ <- lists:seq(1, 80) >>,
|
||||
Payload3 = << <<"3">> || _ <- lists:seq(1, 80) >>,
|
||||
|
||||
check_max_length_drops_publish(Config, QNameBytes, Ch, Payload1, Payload2, Payload3).
|
||||
check_max_length_drops_publish(QNameBytes, Ch, Payload1, Payload2, Payload3).
|
||||
|
||||
%% -------------------------------------------------------------------
|
||||
%% Implementation
|
||||
%% -------------------------------------------------------------------
|
||||
|
||||
check_max_length_requeue(Config, QName, Ch, Payload1, Payload2) ->
|
||||
sync_mirrors(QName, Config),
|
||||
|
||||
check_max_length_requeue(QName, Ch, Payload1, Payload2) ->
|
||||
#'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
|
||||
amqp_channel:register_confirm_handler(Ch, self()),
|
||||
|
||||
|
@ -287,9 +272,7 @@ check_max_length_requeue(Config, QName, Ch, Payload1, Payload2) ->
|
|||
{#'basic.get_ok'{}, #amqp_msg{payload = Payload2}} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
|
||||
#'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = QName}).
|
||||
|
||||
check_max_length_drops_publish(Config, QName, Ch, Payload1, Payload2, Payload3) ->
|
||||
sync_mirrors(QName, Config),
|
||||
|
||||
check_max_length_drops_publish(QName, Ch, Payload1, Payload2, Payload3) ->
|
||||
#'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
|
||||
amqp_channel:register_confirm_handler(Ch, self()),
|
||||
|
||||
|
@ -316,8 +299,7 @@ check_max_length_drops_publish(Config, QName, Ch, Payload1, Payload2, Payload3)
|
|||
{#'basic.get_ok'{}, #amqp_msg{payload = Payload1}} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
|
||||
#'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = QName}).
|
||||
|
||||
check_max_length_rejects(Config, QName, Ch, Payload1, Payload2, Payload3) ->
|
||||
sync_mirrors(QName, Config),
|
||||
check_max_length_rejects(QName, Ch, Payload1, Payload2, Payload3) ->
|
||||
amqp_channel:register_confirm_handler(Ch, self()),
|
||||
flush(),
|
||||
#'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
|
||||
|
@ -349,9 +331,7 @@ check_max_length_rejects(Config, QName, Ch, Payload1, Payload2, Payload3) ->
|
|||
|
||||
{#'basic.get_ok'{}, #amqp_msg{payload = Payload2}} = amqp_channel:call(Ch, #'basic.get'{queue = QName}).
|
||||
|
||||
check_max_length_drops_head(Config, QName, Ch, Payload1, Payload2, Payload3) ->
|
||||
sync_mirrors(QName, Config),
|
||||
|
||||
check_max_length_drops_head(QName, Ch, Payload1, Payload2, Payload3) ->
|
||||
#'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
|
||||
amqp_channel:register_confirm_handler(Ch, self()),
|
||||
|
||||
|
@ -379,13 +359,6 @@ check_max_length_drops_head(Config, QName, Ch, Payload1, Payload2, Payload3) ->
|
|||
{#'basic.get_ok'{}, #amqp_msg{payload = Payload3}} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
|
||||
#'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = QName}).
|
||||
|
||||
sync_mirrors(QName, Config) ->
|
||||
case rabbit_ct_helpers:get_config(Config, is_mirrored) of
|
||||
true ->
|
||||
rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, [<<"sync_queue">>, QName]);
|
||||
_ -> ok
|
||||
end.
|
||||
|
||||
flush() ->
|
||||
receive _ -> flush()
|
||||
after 10 -> ok
|
||||
|
|
|
@ -41,7 +41,7 @@ all() ->
|
|||
|
||||
groups() ->
|
||||
[
|
||||
{cluster_size_3, [], [{non_mirrored, [], [
|
||||
{cluster_size_3, [], [
|
||||
declare_args,
|
||||
declare_policy,
|
||||
declare_config,
|
||||
|
@ -49,11 +49,7 @@ groups() ->
|
|||
calculate_min_master_with_bindings,
|
||||
calculate_random,
|
||||
calculate_client_local
|
||||
]},
|
||||
{mirrored, [], [declare_invalid_policy,
|
||||
declare_policy_nodes,
|
||||
declare_policy_all,
|
||||
declare_policy_exactly]}]
|
||||
]
|
||||
},
|
||||
|
||||
{maintenance_mode, [], [
|
||||
|
@ -84,15 +80,6 @@ init_per_suite(Config) ->
|
|||
end_per_suite(Config) ->
|
||||
rabbit_ct_helpers:run_teardown_steps(Config).
|
||||
|
||||
init_per_group(mirrored, Config) ->
|
||||
case rabbit_ct_broker_helpers:configured_metadata_store(Config) of
|
||||
mnesia ->
|
||||
Config;
|
||||
{khepri, _} ->
|
||||
{skip, "Classic queue mirroring not supported by Khepri"}
|
||||
end;
|
||||
init_per_group(non_mirrored, Config) ->
|
||||
Config;
|
||||
init_per_group(cluster_size_3, Config) ->
|
||||
rabbit_ct_helpers:set_config(Config, [
|
||||
%% Replaced with a list of node names later
|
||||
|
@ -155,80 +142,6 @@ declare_policy(Config) ->
|
|||
declare(Config, QueueName, false, false, _Args=[], none),
|
||||
verify_min_master(Config, Q).
|
||||
|
||||
declare_invalid_policy(Config) ->
|
||||
%% Tests that queue masters location returns 'ok', otherwise the validation of
|
||||
%% any other parameter might be skipped and invalid policy accepted.
|
||||
setup_test_environment(Config),
|
||||
unset_location_config(Config),
|
||||
Policy = [{<<"queue-master-locator">>, <<"min-masters">>},
|
||||
{<<"ha-mode">>, <<"exactly">>},
|
||||
%% this field is expected to be an integer
|
||||
{<<"ha-params">>, <<"2">>}],
|
||||
{error_string, _} = rabbit_ct_broker_helpers:rpc(
|
||||
Config, 0, rabbit_policy, set,
|
||||
[<<"/">>, ?POLICY, <<".*">>, Policy, 0, <<"queues">>, <<"acting-user">>]).
|
||||
|
||||
declare_policy_nodes(Config) ->
|
||||
setup_test_environment(Config),
|
||||
unset_location_config(Config),
|
||||
% Note:
|
||||
% Node0 has 15 queues, Node1 has 8 and Node2 has 1
|
||||
Node0Name = rabbit_data_coercion:to_binary(
|
||||
rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename)),
|
||||
Node1 = rabbit_ct_broker_helpers:get_node_config(Config, 1, nodename),
|
||||
Node1Name = rabbit_data_coercion:to_binary(Node1),
|
||||
Nodes = [Node1Name, Node0Name],
|
||||
Policy = [{<<"queue-master-locator">>, <<"min-masters">>},
|
||||
{<<"ha-mode">>, <<"nodes">>},
|
||||
{<<"ha-params">>, Nodes}],
|
||||
ok = rabbit_ct_broker_helpers:set_policy(Config, 0, ?POLICY,
|
||||
<<".*">>, <<"queues">>, Policy),
|
||||
QueueName = rabbit_misc:r(<<"/">>, queue, Q = <<"qm.test">>),
|
||||
declare(Config, QueueName, false, false, _Args=[], none),
|
||||
verify_min_master(Config, Q, Node1).
|
||||
|
||||
declare_policy_all(Config) ->
|
||||
setup_test_environment(Config),
|
||||
unset_location_config(Config),
|
||||
% Note:
|
||||
% Node0 has 15 queues, Node1 has 8 and Node2 has 1
|
||||
Policy = [{<<"queue-master-locator">>, <<"min-masters">>},
|
||||
{<<"ha-mode">>, <<"all">>}],
|
||||
ok = rabbit_ct_broker_helpers:set_policy(Config, 0, ?POLICY,
|
||||
<<".*">>, <<"queues">>, Policy),
|
||||
QueueName = rabbit_misc:r(<<"/">>, queue, Q = <<"qm.test">>),
|
||||
declare(Config, QueueName, false, false, _Args=[], none),
|
||||
verify_min_master(Config, Q).
|
||||
|
||||
declare_policy_exactly(Config) ->
|
||||
setup_test_environment(Config),
|
||||
unset_location_config(Config),
|
||||
Policy = [{<<"queue-master-locator">>, <<"min-masters">>},
|
||||
{<<"ha-mode">>, <<"exactly">>},
|
||||
{<<"ha-params">>, 2}],
|
||||
ok = rabbit_ct_broker_helpers:set_policy(Config, 0, ?POLICY,
|
||||
<<".*">>, <<"queues">>, Policy),
|
||||
QueueRes = rabbit_misc:r(<<"/">>, queue, Q = <<"qm.test">>),
|
||||
declare(Config, QueueRes, false, false, _Args=[], none),
|
||||
|
||||
Node0 = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
|
||||
rabbit_ct_broker_helpers:control_action(sync_queue, Node0,
|
||||
[binary_to_list(Q)], [{"-p", "/"}]),
|
||||
?awaitMatch(true, synced(Config, Node0, QueueRes, 1), 60000),
|
||||
|
||||
{ok, Queue} = rabbit_ct_broker_helpers:rpc(Config, Node0,
|
||||
rabbit_amqqueue, lookup, [QueueRes]),
|
||||
{MNode0, [SNode], [SSNode]} = rabbit_ct_broker_helpers:rpc(Config, Node0,
|
||||
rabbit_mirror_queue_misc,
|
||||
actual_queue_nodes, [Queue]),
|
||||
?assertEqual(SNode, SSNode),
|
||||
{ok, MNode1} = rabbit_ct_broker_helpers:rpc(Config, 0,
|
||||
rabbit_queue_master_location_misc,
|
||||
lookup_master, [Q, ?DEFAULT_VHOST_PATH]),
|
||||
?assertEqual(MNode0, MNode1),
|
||||
Node2 = rabbit_ct_broker_helpers:get_node_config(Config, 2, nodename),
|
||||
?assertEqual(MNode1, Node2).
|
||||
|
||||
declare_config(Config) ->
|
||||
setup_test_environment(Config),
|
||||
set_location_config(Config, <<"min-masters">>),
|
||||
|
@ -469,10 +382,3 @@ verify_client_local(Config, Q) ->
|
|||
set_location_policy(Config, Name, Strategy) ->
|
||||
ok = rabbit_ct_broker_helpers:set_policy(Config, 0,
|
||||
Name, <<".*">>, <<"queues">>, [{<<"queue-master-locator">>, Strategy}]).
|
||||
|
||||
synced(Config, Nodename, Q, ExpectedSSPidLen) ->
|
||||
Args = [<<"/">>, [name, synchronised_slave_pids]],
|
||||
Info = rabbit_ct_broker_helpers:rpc(Config, Nodename,
|
||||
rabbit_amqqueue, info_all, Args),
|
||||
[SSPids] = [Pids || [{name, Q1}, {synchronised_slave_pids, Pids}] <- Info, Q =:= Q1],
|
||||
length(SSPids) =:= ExpectedSSPidLen.
|
||||
|
|
|
@ -67,8 +67,6 @@ groups() ->
|
|||
{parallel_tests, [], [
|
||||
{classic_queue, GroupOptions, AllTests ++ [delete_immediately_by_pid_succeeds,
|
||||
trigger_message_store_compaction]},
|
||||
{mirrored_queue, GroupOptions, AllTests ++ [delete_immediately_by_pid_succeeds,
|
||||
trigger_message_store_compaction]},
|
||||
{quorum_queue, GroupOptions, AllTests ++ ExtraBccTests ++ [delete_immediately_by_pid_fails]},
|
||||
{quorum_queue_in_memory_limit, GroupOptions, AllTests ++ [delete_immediately_by_pid_fails]},
|
||||
{quorum_queue_in_memory_bytes, GroupOptions, AllTests ++ [delete_immediately_by_pid_fails]},
|
||||
|
@ -118,21 +116,6 @@ init_per_group(quorum_queue_in_memory_bytes, Config) ->
|
|||
{<<"x-max-in-memory-bytes">>, long, 1}]},
|
||||
{consumer_args, []},
|
||||
{queue_durable, true}]);
|
||||
init_per_group(mirrored_queue, Config) ->
|
||||
case rabbit_ct_broker_helpers:configured_metadata_store(Config) of
|
||||
mnesia ->
|
||||
rabbit_ct_broker_helpers:set_ha_policy(
|
||||
Config, 0, <<"^max_length.*queue">>,
|
||||
<<"all">>, [{<<"ha-sync-mode">>, <<"automatic">>}]),
|
||||
Config1 = rabbit_ct_helpers:set_config(
|
||||
Config, [{is_mirrored, true},
|
||||
{queue_args, [{<<"x-queue-type">>, longstr, <<"classic">>}]},
|
||||
{consumer_args, []},
|
||||
{queue_durable, true}]),
|
||||
rabbit_ct_helpers:run_steps(Config1, []);
|
||||
{khepri, _} ->
|
||||
{skip, "Classic queue mirroring not supported by Khepri"}
|
||||
end;
|
||||
init_per_group(stream_queue, Config) ->
|
||||
rabbit_ct_helpers:set_config(
|
||||
Config,
|
||||
|
|
|
@ -44,24 +44,7 @@ end_per_suite(Config) ->
|
|||
rabbit_ct_helpers:run_teardown_steps(Config),
|
||||
ok.
|
||||
|
||||
init_per_group(classic = Group, Config0) ->
|
||||
ct:pal("init per group ~p", [Group]),
|
||||
case rabbit_ct_broker_helpers:configured_metadata_store(Config0) of
|
||||
mnesia ->
|
||||
Config = init_per_group0(classic, Config0),
|
||||
rabbit_ct_broker_helpers:set_policy(
|
||||
Config, 0,
|
||||
<<"ha-policy">>, <<".*">>, <<"queues">>,
|
||||
[{<<"ha-mode">>, <<"all">>}]),
|
||||
Config;
|
||||
{khepri, _} ->
|
||||
{skip, "Classic queue mirroring not supported by Khepri"}
|
||||
end;
|
||||
init_per_group(Group, Config) ->
|
||||
ct:pal("init per group ~p", [Group]),
|
||||
init_per_group0(Group, Config).
|
||||
|
||||
init_per_group0(Group, Config) ->
|
||||
ClusterSize = 3,
|
||||
Config1 = rabbit_ct_helpers:set_config(Config,
|
||||
[{rmq_nodes_count, ClusterSize},
|
||||
|
|
|
@ -15,8 +15,7 @@
|
|||
|
||||
all() ->
|
||||
[
|
||||
{group, non_parallel_tests},
|
||||
{group, cluster_tests}
|
||||
{group, non_parallel_tests}
|
||||
].
|
||||
|
||||
groups() ->
|
||||
|
@ -29,8 +28,7 @@ groups() ->
|
|||
gen_server2_metrics,
|
||||
consumer_metrics
|
||||
]
|
||||
},
|
||||
{cluster_tests, [], [cluster_queue_metrics]}
|
||||
}
|
||||
].
|
||||
|
||||
%% -------------------------------------------------------------------
|
||||
|
@ -43,16 +41,6 @@ merge_app_env(Config) ->
|
|||
{collect_statistics, fine}]},
|
||||
rabbit_ct_helpers:merge_app_env(Config, AppEnv).
|
||||
|
||||
init_per_group(cluster_tests, Config) ->
|
||||
rabbit_ct_helpers:log_environment(),
|
||||
case rabbit_ct_broker_helpers:configured_metadata_store(Config) of
|
||||
mnesia ->
|
||||
Conf = [{rmq_nodename_suffix, cluster_tests}, {rmq_nodes_count, 2}],
|
||||
Config1 = rabbit_ct_helpers:set_config(Config, Conf),
|
||||
rabbit_ct_helpers:run_setup_steps(Config1, setup_steps());
|
||||
{khepri, _} ->
|
||||
{skip, "Classic queue mirroring not supported by Khepri"}
|
||||
end;
|
||||
init_per_group(non_parallel_tests, Config) ->
|
||||
rabbit_ct_helpers:log_environment(),
|
||||
Conf = [{rmq_nodename_suffix, non_parallel_tests}],
|
||||
|
@ -329,72 +317,3 @@ x(Name) ->
|
|||
#resource{ virtual_host = <<"/">>,
|
||||
kind = exchange,
|
||||
name = Name }.
|
||||
|
||||
%% -------------------------------------------------------------------
|
||||
%% Cluster Testcases.
|
||||
%% -------------------------------------------------------------------
|
||||
|
||||
cluster_queue_metrics(Config) ->
|
||||
VHost = <<"/">>,
|
||||
QueueName = <<"cluster_queue_metrics">>,
|
||||
PolicyName = <<"ha-policy-1">>,
|
||||
PolicyPattern = <<".*">>,
|
||||
PolicyAppliesTo = <<"queues">>,
|
||||
|
||||
Node0 = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
|
||||
Node1 = rabbit_ct_broker_helpers:get_node_config(Config, 1, nodename),
|
||||
|
||||
Ch = rabbit_ct_client_helpers:open_channel(Config, Node0),
|
||||
|
||||
Node0Name = rabbit_data_coercion:to_binary(Node0),
|
||||
Definition0 = [{<<"ha-mode">>, <<"nodes">>}, {<<"ha-params">>, [Node0Name]}],
|
||||
ok = rabbit_ct_broker_helpers:set_policy(Config, 0,
|
||||
PolicyName, PolicyPattern,
|
||||
PolicyAppliesTo, Definition0),
|
||||
|
||||
amqp_channel:call(Ch, #'queue.declare'{queue = QueueName}),
|
||||
amqp_channel:call(Ch, #'basic.publish'{routing_key = QueueName},
|
||||
#amqp_msg{payload = <<"hello">>}),
|
||||
|
||||
% Update policy to point to other node
|
||||
Node1Name = rabbit_data_coercion:to_binary(Node1),
|
||||
Definition1 = [{<<"ha-mode">>, <<"nodes">>}, {<<"ha-params">>, [Node1Name]}],
|
||||
ok = rabbit_ct_broker_helpers:set_policy(Config, 0,
|
||||
PolicyName, PolicyPattern,
|
||||
PolicyAppliesTo, Definition1),
|
||||
|
||||
% Synchronize
|
||||
Name = rabbit_misc:r(VHost, queue, QueueName),
|
||||
[Q] = rabbit_ct_broker_helpers:rpc(Config, Node0, ets, lookup, [rabbit_queue, Name]),
|
||||
QPid = amqqueue:get_pid(Q),
|
||||
ok = rabbit_ct_broker_helpers:rpc(Config, Node0, rabbit_amqqueue, sync_mirrors, [QPid]),
|
||||
|
||||
% Check ETS table for data
|
||||
wait_for(fun () ->
|
||||
[] =:= rabbit_ct_broker_helpers:rpc(
|
||||
Config, Node0, ets, tab2list,
|
||||
[queue_coarse_metrics])
|
||||
end, 60),
|
||||
|
||||
wait_for(fun () ->
|
||||
Ret = rabbit_ct_broker_helpers:rpc(
|
||||
Config, Node1, ets, tab2list,
|
||||
[queue_coarse_metrics]),
|
||||
case Ret of
|
||||
[{Name, 1, 0, 1, _}] -> true;
|
||||
_ -> false
|
||||
end
|
||||
end, 60),
|
||||
|
||||
amqp_channel:call(Ch, #'queue.delete'{queue=QueueName}),
|
||||
rabbit_ct_client_helpers:close_channel(Ch),
|
||||
Config.
|
||||
|
||||
wait_for(_Fun, 0) -> false;
|
||||
wait_for(Fun, Seconds) ->
|
||||
case Fun() of
|
||||
true -> ok;
|
||||
false ->
|
||||
timer:sleep(1000),
|
||||
wait_for(Fun, Seconds - 1)
|
||||
end.
|
||||
|
|
|
@ -125,8 +125,6 @@ init_per_testcase(Testcase, Config) ->
|
|||
{single_dlx_worker, true, _} ->
|
||||
{skip, "single_dlx_worker is not mixed version compatible because process "
|
||||
"rabbit_fifo_dlx_sup does not exist in 3.9"};
|
||||
{many_target_queues, _, true} ->
|
||||
{skip, "Classic queue mirroring not supported by Khepri"};
|
||||
_ ->
|
||||
Config1 = rabbit_ct_helpers:testcase_started(Config, Testcase),
|
||||
T = rabbit_data_coercion:to_binary(Testcase),
|
||||
|
@ -811,32 +809,25 @@ target_quorum_queue_delete_create(Config) ->
|
|||
%% 2. Target queue can be classic queue, quorum queue, or stream queue.
|
||||
%%
|
||||
%% Lesson learnt by writing this test:
|
||||
%% If there are multiple target queues, messages will not be sent / routed to target non-mirrored durable classic queues
|
||||
%% If there are multiple target queues, messages will not be sent / routed to target durable classic queues
|
||||
%% when their host node is temporarily down because these queues get temporarily deleted from the rabbit_queue RAM table
|
||||
%% (but will still be present in the rabbit_durable_queue DISC table). See:
|
||||
%% https://github.com/rabbitmq/rabbitmq-server/blob/cf76b479300b767b8ea450293d096cbf729ed734/deps/rabbit/src/rabbit_amqqueue.erl#L1955-L1964
|
||||
many_target_queues(Config) ->
|
||||
[Server1, Server2, Server3] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
|
||||
Ch = rabbit_ct_client_helpers:open_channel(Config, Server1),
|
||||
Ch2 = rabbit_ct_client_helpers:open_channel(Config, Server2),
|
||||
SourceQ = ?config(source_queue, Config),
|
||||
RaName = ra_name(SourceQ),
|
||||
TargetQ1 = ?config(target_queue_1, Config),
|
||||
TargetQ2 = ?config(target_queue_2, Config),
|
||||
TargetQ3 = ?config(target_queue_3, Config),
|
||||
TargetQ4 = ?config(target_queue_4, Config),
|
||||
TargetQ5 = ?config(target_queue_5, Config),
|
||||
TargetQ6 = ?config(target_queue_6, Config),
|
||||
DLX = ?config(dead_letter_exchange, Config),
|
||||
DLRKey = <<"k1">>,
|
||||
%% Create topology:
|
||||
%% * source quorum queue with 1 replica on node 1
|
||||
%% * target non-mirrored classic queue on node 1
|
||||
%% * target classic queue on node 1
|
||||
%% * target quorum queue with 3 replicas
|
||||
%% * target stream queue with 3 replicas
|
||||
%% * target mirrored classic queue with 3 replicas (leader on node 1)
|
||||
%% * target mirrored classic queue with 1 replica (leader on node 2)
|
||||
%% * target mirrored classic queue with 3 replica (leader on node 2)
|
||||
declare_queue(Ch, SourceQ, [{<<"x-dead-letter-exchange">>, longstr, DLX},
|
||||
{<<"x-dead-letter-routing-key">>, longstr, DLRKey},
|
||||
{<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>},
|
||||
|
@ -855,22 +846,6 @@ many_target_queues(Config) ->
|
|||
{<<"x-initial-cluster-size">>, long, 3}
|
||||
]),
|
||||
bind_queue(Ch, TargetQ3, DLX, DLRKey),
|
||||
ok = rabbit_ct_broker_helpers:set_policy(Config, Server1, <<"mirror-q4">>, TargetQ4, <<"queues">>,
|
||||
[{<<"ha-mode">>, <<"all">>},
|
||||
{<<"queue-master-locator">>, <<"client-local">>}]),
|
||||
declare_queue(Ch, TargetQ4, []),
|
||||
bind_queue(Ch, TargetQ4, DLX, DLRKey),
|
||||
ok = rabbit_ct_broker_helpers:set_policy(Config, Server1, <<"mirror-q5">>, TargetQ5, <<"queues">>,
|
||||
[{<<"ha-mode">>, <<"exactly">>},
|
||||
{<<"ha-params">>, 1},
|
||||
{<<"queue-master-locator">>, <<"client-local">>}]),
|
||||
declare_queue(Ch2, TargetQ5, []),
|
||||
bind_queue(Ch2, TargetQ5, DLX, DLRKey),
|
||||
ok = rabbit_ct_broker_helpers:set_policy(Config, Server1, <<"mirror-q6">>, TargetQ6, <<"queues">>,
|
||||
[{<<"ha-mode">>, <<"all">>},
|
||||
{<<"queue-master-locator">>, <<"client-local">>}]),
|
||||
declare_queue(Ch2, TargetQ6, []),
|
||||
bind_queue(Ch2, TargetQ6, DLX, DLRKey),
|
||||
Msg1 = <<"m1">>,
|
||||
ok = amqp_channel:cast(Ch,
|
||||
#'basic.publish'{routing_key = SourceQ},
|
||||
|
@ -904,15 +879,6 @@ many_target_queues(Config) ->
|
|||
after 2000 ->
|
||||
exit(deliver_timeout)
|
||||
end,
|
||||
?awaitMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg1}},
|
||||
amqp_channel:call(Ch, #'basic.get'{queue = TargetQ4}),
|
||||
?DEFAULT_WAIT, ?DEFAULT_INTERVAL),
|
||||
?awaitMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg1}},
|
||||
amqp_channel:call(Ch2, #'basic.get'{queue = TargetQ5}),
|
||||
?DEFAULT_WAIT, ?DEFAULT_INTERVAL),
|
||||
?awaitMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg1}},
|
||||
amqp_channel:call(Ch2, #'basic.get'{queue = TargetQ6}),
|
||||
?DEFAULT_WAIT, ?DEFAULT_INTERVAL),
|
||||
?awaitMatch([{0, 0}],
|
||||
dirty_query([Server1], RaName, fun rabbit_fifo:query_stat_dlx/1),
|
||||
?DEFAULT_WAIT, ?DEFAULT_INTERVAL),
|
||||
|
@ -949,16 +915,6 @@ many_target_queues(Config) ->
|
|||
after 0 ->
|
||||
exit(deliver_timeout)
|
||||
end,
|
||||
?awaitMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg2}},
|
||||
amqp_channel:call(Ch, #'basic.get'{queue = TargetQ4}),
|
||||
?DEFAULT_WAIT, ?DEFAULT_INTERVAL),
|
||||
?awaitMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg2}},
|
||||
amqp_channel:call(Ch, #'basic.get'{queue = TargetQ5}),
|
||||
?DEFAULT_WAIT, ?DEFAULT_INTERVAL),
|
||||
%%TODO why is the 1st message (m1) a duplicate?
|
||||
?awaitMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg2}},
|
||||
amqp_channel:call(Ch, #'basic.get'{queue = TargetQ6}),
|
||||
?DEFAULT_WAIT, ?DEFAULT_INTERVAL),
|
||||
?assertEqual(2, counted(messages_dead_lettered_expired_total, Config)),
|
||||
?assertEqual(2, counted(messages_dead_lettered_confirmed_total, Config)).
|
||||
|
||||
|
|
|
@ -1,102 +0,0 @@
|
|||
%% This Source Code Form is subject to the terms of the Mozilla Public
|
||||
%% License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
%%
|
||||
%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
|
||||
%%
|
||||
-module(rabbit_ha_test_consumer).
|
||||
|
||||
-include_lib("amqp_client/include/amqp_client.hrl").
|
||||
|
||||
-export([await_response/1, create/5, start/6]).
|
||||
|
||||
await_response(ConsumerPid) ->
|
||||
case receive {ConsumerPid, Response} -> Response end of
|
||||
{error, Reason} -> erlang:error(Reason);
|
||||
ok -> ok
|
||||
end.
|
||||
|
||||
create(Channel, Queue, TestPid, CancelOnFailover, ExpectingMsgs) ->
|
||||
ConsumerPid = spawn_link(?MODULE, start,
|
||||
[TestPid, Channel, Queue, CancelOnFailover,
|
||||
ExpectingMsgs + 1, ExpectingMsgs]),
|
||||
amqp_channel:subscribe(
|
||||
Channel, consume_method(Queue, CancelOnFailover), ConsumerPid),
|
||||
ConsumerPid.
|
||||
|
||||
start(TestPid, Channel, Queue, CancelOnFailover, LowestSeen, MsgsToConsume) ->
|
||||
error_logger:info_msg("consumer ~tp on ~tp awaiting ~w messages "
|
||||
"(lowest seen = ~w, cancel-on-failover = ~w)~n",
|
||||
[self(), Channel, MsgsToConsume, LowestSeen,
|
||||
CancelOnFailover]),
|
||||
run(TestPid, Channel, Queue, CancelOnFailover, LowestSeen, MsgsToConsume).
|
||||
|
||||
run(TestPid, _Channel, _Queue, _CancelOnFailover, _LowestSeen, 0) ->
|
||||
consumer_reply(TestPid, ok);
|
||||
run(TestPid, Channel, Queue, CancelOnFailover, LowestSeen, MsgsToConsume) ->
|
||||
receive
|
||||
#'basic.consume_ok'{} ->
|
||||
run(TestPid, Channel, Queue,
|
||||
CancelOnFailover, LowestSeen, MsgsToConsume);
|
||||
{Delivery = #'basic.deliver'{ redelivered = Redelivered },
|
||||
#amqp_msg{payload = Payload}} ->
|
||||
MsgNum = list_to_integer(binary_to_list(Payload)),
|
||||
|
||||
ack(Delivery, Channel),
|
||||
|
||||
%% we can receive any message we've already seen and,
|
||||
%% because of the possibility of multiple requeuings, we
|
||||
%% might see these messages in any order. If we are seeing
|
||||
%% a message again, we don't decrement the MsgsToConsume
|
||||
%% counter.
|
||||
if
|
||||
MsgNum + 1 == LowestSeen ->
|
||||
run(TestPid, Channel, Queue,
|
||||
CancelOnFailover, MsgNum, MsgsToConsume - 1);
|
||||
MsgNum >= LowestSeen ->
|
||||
true = Redelivered, %% ASSERTION
|
||||
run(TestPid, Channel, Queue,
|
||||
CancelOnFailover, LowestSeen, MsgsToConsume);
|
||||
true ->
|
||||
%% We received a message we haven't seen before,
|
||||
%% but it is not the next message in the expected
|
||||
%% sequence.
|
||||
consumer_reply(TestPid,
|
||||
{error, {unexpected_message, MsgNum}})
|
||||
end;
|
||||
#'basic.cancel'{} when CancelOnFailover ->
|
||||
error_logger:info_msg("consumer ~tp on ~tp received basic.cancel: "
|
||||
"resubscribing to ~tp on ~tp~n",
|
||||
[self(), Channel, Queue, Channel]),
|
||||
resubscribe(TestPid, Channel, Queue, CancelOnFailover,
|
||||
LowestSeen, MsgsToConsume);
|
||||
#'basic.cancel'{} ->
|
||||
exit(cancel_received_without_cancel_on_failover)
|
||||
end.
|
||||
|
||||
%%
|
||||
%% Private API
|
||||
%%
|
||||
|
||||
resubscribe(TestPid, Channel, Queue, CancelOnFailover, LowestSeen,
|
||||
MsgsToConsume) ->
|
||||
amqp_channel:subscribe(
|
||||
Channel, consume_method(Queue, CancelOnFailover), self()),
|
||||
ok = receive #'basic.consume_ok'{} -> ok
|
||||
end,
|
||||
error_logger:info_msg("re-subscripting consumer ~tp on ~tp complete "
|
||||
"(received basic.consume_ok)",
|
||||
[self(), Channel]),
|
||||
start(TestPid, Channel, Queue, CancelOnFailover, LowestSeen, MsgsToConsume).
|
||||
|
||||
consume_method(Queue, CancelOnFailover) ->
|
||||
Args = [{<<"x-cancel-on-ha-failover">>, bool, CancelOnFailover}],
|
||||
#'basic.consume'{queue = Queue,
|
||||
arguments = Args}.
|
||||
|
||||
ack(#'basic.deliver'{delivery_tag = DeliveryTag}, Channel) ->
|
||||
amqp_channel:call(Channel, #'basic.ack'{delivery_tag = DeliveryTag}),
|
||||
ok.
|
||||
|
||||
consumer_reply(TestPid, Reply) ->
|
||||
TestPid ! {self(), Reply}.
|
|
@ -1,131 +0,0 @@
|
|||
%% This Source Code Form is subject to the terms of the Mozilla Public
|
||||
%% License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
%%
|
||||
%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
|
||||
%%
|
||||
-module(rabbit_ha_test_producer).
|
||||
|
||||
-export([await_response/1, start/6, create/5, create/6]).
|
||||
|
||||
-include_lib("amqp_client/include/amqp_client.hrl").
|
||||
|
||||
await_response(ProducerPid) ->
|
||||
error_logger:info_msg("waiting for producer pid ~tp~n", [ProducerPid]),
|
||||
case receive {ProducerPid, Response} -> Response end of
|
||||
ok -> ok;
|
||||
{error, _} = Else -> exit(Else);
|
||||
Else -> exit({weird_response, Else})
|
||||
end.
|
||||
|
||||
create(Channel, Queue, TestPid, Confirm, MsgsToSend) ->
|
||||
create(Channel, Queue, TestPid, Confirm, MsgsToSend, acks).
|
||||
|
||||
create(Channel, Queue, TestPid, Confirm, MsgsToSend, Mode) ->
|
||||
AckNackMsgs = case Mode of
|
||||
acks -> {ok, {error, received_nacks}};
|
||||
nacks -> {{error, received_acks}, ok}
|
||||
end,
|
||||
ProducerPid = spawn_link(?MODULE, start, [Channel, Queue, TestPid,
|
||||
Confirm, MsgsToSend, AckNackMsgs]),
|
||||
receive
|
||||
{ProducerPid, started} -> ProducerPid
|
||||
end.
|
||||
|
||||
start(Channel, Queue, TestPid, Confirm, MsgsToSend, AckNackMsgs) ->
|
||||
ConfirmState =
|
||||
case Confirm of
|
||||
true -> amqp_channel:register_confirm_handler(Channel, self()),
|
||||
#'confirm.select_ok'{} =
|
||||
amqp_channel:call(Channel, #'confirm.select'{}),
|
||||
gb_trees:empty();
|
||||
false -> none
|
||||
end,
|
||||
TestPid ! {self(), started},
|
||||
error_logger:info_msg("publishing ~w msgs on ~tp~n", [MsgsToSend, Channel]),
|
||||
producer(Channel, Queue, TestPid, ConfirmState, MsgsToSend, AckNackMsgs).
|
||||
|
||||
%%
|
||||
%% Private API
|
||||
%%
|
||||
|
||||
producer(_Channel, _Queue, TestPid, none, 0, _AckNackMsgs) ->
|
||||
TestPid ! {self(), ok};
|
||||
producer(Channel, _Queue, TestPid, ConfirmState, 0, {AckMsg, NackMsg}) ->
|
||||
error_logger:info_msg("awaiting confirms on channel ~tp~n", [Channel]),
|
||||
Msg = case drain_confirms(none, ConfirmState) of
|
||||
%% No acks or nacks
|
||||
acks -> AckMsg;
|
||||
nacks -> NackMsg;
|
||||
mix -> {error, received_both_acks_and_nacks};
|
||||
{Nacks, CS} -> {error, {missing_confirms, Nacks,
|
||||
lists:sort(gb_trees:keys(CS))}}
|
||||
end,
|
||||
TestPid ! {self(), Msg};
|
||||
|
||||
producer(Channel, Queue, TestPid, ConfirmState, MsgsToSend, AckNackMsgs) ->
|
||||
Method = #'basic.publish'{exchange = <<"">>,
|
||||
routing_key = Queue,
|
||||
mandatory = false,
|
||||
immediate = false},
|
||||
|
||||
ConfirmState1 = maybe_record_confirm(ConfirmState, Channel, MsgsToSend),
|
||||
|
||||
amqp_channel:call(Channel, Method,
|
||||
#amqp_msg{props = #'P_basic'{delivery_mode = 2},
|
||||
payload = list_to_binary(
|
||||
integer_to_list(MsgsToSend))}),
|
||||
|
||||
producer(Channel, Queue, TestPid, ConfirmState1, MsgsToSend - 1, AckNackMsgs).
|
||||
|
||||
maybe_record_confirm(none, _, _) ->
|
||||
none;
|
||||
maybe_record_confirm(ConfirmState, Channel, MsgsToSend) ->
|
||||
SeqNo = amqp_channel:next_publish_seqno(Channel),
|
||||
gb_trees:insert(SeqNo, MsgsToSend, ConfirmState).
|
||||
|
||||
drain_confirms(Collected, ConfirmState) ->
|
||||
case gb_trees:is_empty(ConfirmState) of
|
||||
true -> Collected;
|
||||
false -> receive
|
||||
#'basic.ack'{delivery_tag = DeliveryTag,
|
||||
multiple = IsMulti} ->
|
||||
Collected1 = case Collected of
|
||||
none -> acks;
|
||||
acks -> acks;
|
||||
nacks -> mix;
|
||||
mix -> mix
|
||||
end,
|
||||
drain_confirms(Collected1,
|
||||
delete_confirms(DeliveryTag, IsMulti,
|
||||
ConfirmState));
|
||||
#'basic.nack'{delivery_tag = DeliveryTag,
|
||||
multiple = IsMulti} ->
|
||||
Collected1 = case Collected of
|
||||
none -> nacks;
|
||||
nacks -> nacks;
|
||||
acks -> mix;
|
||||
mix -> mix
|
||||
end,
|
||||
drain_confirms(Collected1,
|
||||
delete_confirms(DeliveryTag, IsMulti,
|
||||
ConfirmState))
|
||||
after
|
||||
60000 -> {Collected, ConfirmState}
|
||||
end
|
||||
end.
|
||||
|
||||
delete_confirms(DeliveryTag, false, ConfirmState) ->
|
||||
gb_trees:delete(DeliveryTag, ConfirmState);
|
||||
delete_confirms(DeliveryTag, true, ConfirmState) ->
|
||||
multi_confirm(DeliveryTag, ConfirmState).
|
||||
|
||||
multi_confirm(DeliveryTag, ConfirmState) ->
|
||||
case gb_trees:is_empty(ConfirmState) of
|
||||
true -> ConfirmState;
|
||||
false -> {Key, _, ConfirmState1} = gb_trees:take_smallest(ConfirmState),
|
||||
case Key =< DeliveryTag of
|
||||
true -> multi_confirm(DeliveryTag, ConfirmState1);
|
||||
false -> ConfirmState
|
||||
end
|
||||
end.
|
|
@ -384,77 +384,27 @@ get_disc_nodes(Config, Node) ->
|
|||
%% -------------------------------------------------------------------
|
||||
|
||||
set_policy_when_cmq_is_permitted_by_default(Config) ->
|
||||
case ?config(metadata_store, Config) of
|
||||
mnesia ->
|
||||
set_policy_when_cmq_is_permitted_by_default_mnesia(Config);
|
||||
khepri ->
|
||||
set_policy_when_cmq_is_permitted_by_default_khepri(Config)
|
||||
end.
|
||||
|
||||
set_policy_when_cmq_is_permitted_by_default_mnesia(Config) ->
|
||||
?assertEqual(
|
||||
ok,
|
||||
rabbit_ct_broker_helpers:set_ha_policy(
|
||||
Config, 0, <<".*">>, <<"all">>)),
|
||||
|
||||
[NodeA] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
|
||||
|
||||
?assert(
|
||||
log_file_contains_message(
|
||||
Config, NodeA,
|
||||
["Deprecated features: `classic_queue_mirroring`: Classic mirrored "
|
||||
"queues are deprecated.",
|
||||
"By default, they can still be used for now."])),
|
||||
|
||||
%% Change the advanced configuration file to turn off classic queue
|
||||
%% mirroring.
|
||||
ConfigFilename0 = rabbit_ct_broker_helpers:get_node_config(
|
||||
Config, NodeA, erlang_node_config_filename),
|
||||
ConfigFilename = ConfigFilename0 ++ ".config",
|
||||
{ok, [ConfigContent0]} = file:consult(ConfigFilename),
|
||||
ConfigContent1 = rabbit_ct_helpers:merge_app_env_in_erlconf(
|
||||
ConfigContent0,
|
||||
{rabbit, [{permit_deprecated_features,
|
||||
#{classic_queue_mirroring => false}}]}),
|
||||
ConfigContent2 = lists:flatten(io_lib:format("~p.~n", [ConfigContent1])),
|
||||
ok = file:write_file(ConfigFilename, ConfigContent2),
|
||||
?assertEqual({ok, [ConfigContent1]}, file:consult(ConfigFilename)),
|
||||
|
||||
%% Restart the node and see if it was correctly converted to a disc node.
|
||||
{ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(
|
||||
Config, NodeA, ["stop_app"]),
|
||||
{error, 69, Message} = rabbit_ct_broker_helpers:rabbitmqctl(
|
||||
Config, NodeA, ["start_app"]),
|
||||
Ret = re:run(
|
||||
Message,
|
||||
":failed_to_deny_deprecated_features, "
|
||||
"\\[:classic_queue_mirroring\\]",
|
||||
[{capture, none}]),
|
||||
?assertEqual(match, Ret).
|
||||
|
||||
set_policy_when_cmq_is_permitted_by_default_khepri(Config) ->
|
||||
?assertError(
|
||||
{badmatch,
|
||||
{error_string,
|
||||
"Validation failed\n\nClassic mirrored queues are deprecated." ++ _}},
|
||||
rabbit_ct_broker_helpers:set_ha_policy(
|
||||
Config, 0, <<".*">>, <<"all">>)).
|
||||
set_cmq_policy(Config).
|
||||
|
||||
set_policy_when_cmq_is_not_permitted_from_conf(Config) ->
|
||||
set_cmq_policy(Config).
|
||||
|
||||
set_cmq_policy(Config) ->
|
||||
%% CMQ have been removed, any attempt to set a policy
|
||||
%% should fail as any other unknown policy.
|
||||
?assertError(
|
||||
{badmatch,
|
||||
{error_string,
|
||||
"Validation failed\n\nClassic mirrored queues are deprecated." ++ _}},
|
||||
rabbit_ct_broker_helpers:set_ha_policy(
|
||||
Config, 0, <<".*">>, <<"all">>)),
|
||||
"Validation failed\n\n[{<<\"ha-mode\">>,<<\"all\">>}] are not recognised policy settings" ++ _}},
|
||||
rabbit_ct_broker_helpers:set_policy(
|
||||
Config, 0, <<"ha">>, <<".*">>, <<"queues">>, [{<<"ha-mode">>, <<"all">>}])),
|
||||
|
||||
[NodeA] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
|
||||
|
||||
?assert(
|
||||
?assertNot(
|
||||
log_file_contains_message(
|
||||
Config, NodeA,
|
||||
["Deprecated features: `classic_queue_mirroring`: Classic mirrored queues are deprecated.",
|
||||
"Their use is not permitted per the configuration"])).
|
||||
["Deprecated features: `classic_queue_mirroring`: Classic mirrored queues have been removed."])).
|
||||
|
||||
%% -------------------------------------------------------------------
|
||||
%% Transient non-exclusive queues.
|
||||
|
|
|
@ -1,338 +0,0 @@
|
|||
%% This Source Code Form is subject to the terms of the Mozilla Public
|
||||
%% License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
%%
|
||||
%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
|
||||
%%
|
||||
|
||||
-module(simple_ha_SUITE).
|
||||
|
||||
-include_lib("common_test/include/ct.hrl").
|
||||
-include_lib("amqp_client/include/amqp_client.hrl").
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
|
||||
-compile(export_all).
|
||||
|
||||
-define(DELAY, 8000).
|
||||
|
||||
all() ->
|
||||
[
|
||||
{group, cluster_size_2},
|
||||
{group, cluster_size_3}
|
||||
].
|
||||
|
||||
groups() ->
|
||||
RejectTests = [
|
||||
rejects_survive_stop,
|
||||
rejects_survive_policy
|
||||
],
|
||||
[
|
||||
{cluster_size_2, [], [
|
||||
rapid_redeclare,
|
||||
declare_synchrony,
|
||||
clean_up_exclusive_queues
|
||||
]},
|
||||
{cluster_size_3, [], [
|
||||
consume_survives_stop,
|
||||
consume_survives_policy,
|
||||
auto_resume,
|
||||
auto_resume_no_ccn_client,
|
||||
confirms_survive_stop,
|
||||
confirms_survive_policy,
|
||||
{overflow_reject_publish, [], RejectTests},
|
||||
{overflow_reject_publish_dlx, [], RejectTests}
|
||||
]}
|
||||
].
|
||||
|
||||
%% -------------------------------------------------------------------
|
||||
%% Testsuite setup/teardown.
|
||||
%% -------------------------------------------------------------------
|
||||
|
||||
init_per_suite(Config) ->
|
||||
rabbit_ct_helpers:log_environment(),
|
||||
case rabbit_ct_broker_helpers:configured_metadata_store(Config) of
|
||||
mnesia ->
|
||||
rabbit_ct_helpers:run_setup_steps(Config);
|
||||
{khepri, _} ->
|
||||
{skip, "Classic queue mirroring not supported by Khepri"}
|
||||
end.
|
||||
|
||||
end_per_suite(Config) ->
|
||||
rabbit_ct_helpers:run_teardown_steps(Config).
|
||||
|
||||
init_per_group(cluster_size_2, Config) ->
|
||||
rabbit_ct_helpers:set_config(Config, [
|
||||
{rmq_nodes_count, 2}
|
||||
]);
|
||||
init_per_group(cluster_size_3, Config) ->
|
||||
rabbit_ct_helpers:set_config(Config, [
|
||||
{rmq_nodes_count, 3}
|
||||
]);
|
||||
init_per_group(overflow_reject_publish, Config) ->
|
||||
rabbit_ct_helpers:set_config(Config, [
|
||||
{overflow, <<"reject-publish">>}
|
||||
]);
|
||||
init_per_group(overflow_reject_publish_dlx, Config) ->
|
||||
rabbit_ct_helpers:set_config(Config, [
|
||||
{overflow, <<"reject-publish-dlx">>}
|
||||
]).
|
||||
|
||||
end_per_group(_, Config) ->
|
||||
Config.
|
||||
|
||||
init_per_testcase(Testcase, Config) ->
|
||||
rabbit_ct_helpers:testcase_started(Config, Testcase),
|
||||
ClusterSize = ?config(rmq_nodes_count, Config),
|
||||
TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
|
||||
Config1 = rabbit_ct_helpers:set_config(Config, [
|
||||
{rmq_nodes_clustered, true},
|
||||
{rmq_nodename_suffix, Testcase},
|
||||
{tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
|
||||
]),
|
||||
rabbit_ct_helpers:run_steps(Config1,
|
||||
rabbit_ct_broker_helpers:setup_steps() ++
|
||||
rabbit_ct_client_helpers:setup_steps() ++ [
|
||||
fun rabbit_ct_broker_helpers:set_ha_policy_all/1
|
||||
]).
|
||||
|
||||
end_per_testcase(Testcase, Config) ->
|
||||
Config1 = rabbit_ct_helpers:run_steps(Config,
|
||||
rabbit_ct_client_helpers:teardown_steps() ++
|
||||
rabbit_ct_broker_helpers:teardown_steps()),
|
||||
rabbit_ct_helpers:testcase_finished(Config1, Testcase).
|
||||
|
||||
%% -------------------------------------------------------------------
|
||||
%% Testcases.
|
||||
%% -------------------------------------------------------------------
|
||||
|
||||
rapid_redeclare(Config) ->
|
||||
A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
|
||||
Ch = rabbit_ct_client_helpers:open_channel(Config, A),
|
||||
Queue = <<"test">>,
|
||||
[begin
|
||||
amqp_channel:call(Ch, #'queue.declare'{queue = Queue,
|
||||
durable = true}),
|
||||
amqp_channel:call(Ch, #'queue.delete'{queue = Queue})
|
||||
end || _I <- lists:seq(1, 20)],
|
||||
ok.
|
||||
|
||||
%% Check that by the time we get a declare-ok back, the mirrors are up
|
||||
%% and in Mnesia.
|
||||
declare_synchrony(Config) ->
|
||||
[Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config,
|
||||
nodename),
|
||||
RabbitCh = rabbit_ct_client_helpers:open_channel(Config, Rabbit),
|
||||
HareCh = rabbit_ct_client_helpers:open_channel(Config, Hare),
|
||||
Q = <<"mirrored-queue">>,
|
||||
declare(RabbitCh, Q),
|
||||
amqp_channel:call(RabbitCh, #'confirm.select'{}),
|
||||
amqp_channel:cast(RabbitCh, #'basic.publish'{routing_key = Q},
|
||||
#amqp_msg{props = #'P_basic'{delivery_mode = 2}}),
|
||||
amqp_channel:wait_for_confirms(RabbitCh),
|
||||
rabbit_ct_broker_helpers:kill_node(Config, Rabbit),
|
||||
|
||||
#'queue.declare_ok'{message_count = 1} = declare(HareCh, Q),
|
||||
ok.
|
||||
|
||||
declare(Ch, Name) ->
|
||||
amqp_channel:call(Ch, #'queue.declare'{durable = true, queue = Name}).
|
||||
|
||||
%% Ensure that exclusive queues are cleaned up when part of ha cluster
|
||||
%% and node is killed abruptly then restarted
|
||||
clean_up_exclusive_queues(Config) ->
|
||||
QName = <<"excl">>,
|
||||
rabbit_ct_broker_helpers:set_ha_policy(Config, 0, <<".*">>, <<"all">>),
|
||||
[A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
|
||||
ChA = rabbit_ct_client_helpers:open_channel(Config, A),
|
||||
amqp_channel:call(ChA, #'queue.declare'{queue = QName,
|
||||
exclusive = true}),
|
||||
ok = rabbit_ct_broker_helpers:kill_node(Config, A),
|
||||
timer:sleep(?DELAY),
|
||||
[] = rabbit_ct_broker_helpers:rpc(Config, B, rabbit_amqqueue, list, []),
|
||||
ok = rabbit_ct_broker_helpers:start_node(Config, A),
|
||||
timer:sleep(?DELAY),
|
||||
[[],[]] = rabbit_ct_broker_helpers:rpc_all(Config, rabbit_amqqueue, list, []),
|
||||
ok.
|
||||
|
||||
consume_survives_stop(Cf) -> consume_survives(Cf, fun stop/2, true).
|
||||
consume_survives_sigkill(Cf) -> consume_survives(Cf, fun sigkill/2, true).
|
||||
consume_survives_policy(Cf) -> consume_survives(Cf, fun policy/2, true).
|
||||
auto_resume(Cf) -> consume_survives(Cf, fun sigkill/2, false).
|
||||
auto_resume_no_ccn_client(Cf) -> consume_survives(Cf, fun sigkill/2, false,
|
||||
false).
|
||||
|
||||
confirms_survive_stop(Cf) -> confirms_survive(Cf, fun stop/2).
|
||||
confirms_survive_policy(Cf) -> confirms_survive(Cf, fun policy/2).
|
||||
|
||||
rejects_survive_stop(Cf) -> rejects_survive(Cf, fun stop/2).
|
||||
rejects_survive_policy(Cf) -> rejects_survive(Cf, fun policy/2).
|
||||
|
||||
%%----------------------------------------------------------------------------
|
||||
|
||||
consume_survives(Config, DeathFun, CancelOnFailover) ->
|
||||
consume_survives(Config, DeathFun, CancelOnFailover, true).
|
||||
|
||||
consume_survives(Config,
|
||||
DeathFun, CancelOnFailover, CCNSupported) ->
|
||||
[A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
|
||||
Msgs = rabbit_ct_helpers:cover_work_factor(Config, 20000),
|
||||
Channel1 = rabbit_ct_client_helpers:open_channel(Config, A),
|
||||
Channel2 = rabbit_ct_client_helpers:open_channel(Config, B),
|
||||
Channel3 = rabbit_ct_client_helpers:open_channel(Config, C),
|
||||
|
||||
%% declare the queue on the master, mirrored to the two mirrors
|
||||
Queue = <<"test">>,
|
||||
amqp_channel:call(Channel1, #'queue.declare'{queue = Queue,
|
||||
auto_delete = false}),
|
||||
|
||||
%% start up a consumer
|
||||
ConsCh = case CCNSupported of
|
||||
true -> Channel2;
|
||||
false -> Port = rabbit_ct_broker_helpers:get_node_config(
|
||||
Config, B, tcp_port_amqp),
|
||||
open_incapable_channel(Port)
|
||||
end,
|
||||
ConsumerPid = rabbit_ha_test_consumer:create(
|
||||
ConsCh, Queue, self(), CancelOnFailover, Msgs),
|
||||
|
||||
%% send a bunch of messages from the producer
|
||||
ProducerPid = rabbit_ha_test_producer:create(Channel3, Queue,
|
||||
self(), false, Msgs),
|
||||
DeathFun(Config, A),
|
||||
%% verify that the consumer got all msgs, or die - the await_response
|
||||
%% calls throw an exception if anything goes wrong....
|
||||
ct:pal("awaiting produce ~w", [ProducerPid]),
|
||||
rabbit_ha_test_producer:await_response(ProducerPid),
|
||||
ct:pal("awaiting consumer ~w", [ConsumerPid]),
|
||||
rabbit_ha_test_consumer:await_response(ConsumerPid),
|
||||
ok.
|
||||
|
||||
confirms_survive(Config, DeathFun) ->
|
||||
[A, B, _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
|
||||
Msgs = rabbit_ct_helpers:cover_work_factor(Config, 20000),
|
||||
Node1Channel = rabbit_ct_client_helpers:open_channel(Config, A),
|
||||
Node2Channel = rabbit_ct_client_helpers:open_channel(Config, B),
|
||||
|
||||
%% declare the queue on the master, mirrored to the two mirrors
|
||||
Queue = <<"test">>,
|
||||
amqp_channel:call(Node1Channel,#'queue.declare'{queue = Queue,
|
||||
auto_delete = false,
|
||||
durable = true}),
|
||||
|
||||
%% send one message to ensure the channel is flowing
|
||||
amqp_channel:register_confirm_handler(Node1Channel, self()),
|
||||
#'confirm.select_ok'{} = amqp_channel:call(Node1Channel, #'confirm.select'{}),
|
||||
|
||||
Payload = <<"initial message">>,
|
||||
ok = amqp_channel:call(Node1Channel,
|
||||
#'basic.publish'{routing_key = Queue},
|
||||
#amqp_msg{payload = Payload}),
|
||||
|
||||
ok = receive
|
||||
#'basic.ack'{multiple = false} -> ok;
|
||||
#'basic.nack'{multiple = false} -> message_nacked
|
||||
after
|
||||
5000 -> confirm_not_received
|
||||
end,
|
||||
|
||||
%% send a bunch of messages from the producer
|
||||
ProducerPid = rabbit_ha_test_producer:create(Node2Channel, Queue,
|
||||
self(), true, Msgs),
|
||||
DeathFun(Config, A),
|
||||
rabbit_ha_test_producer:await_response(ProducerPid),
|
||||
ok.
|
||||
|
||||
rejects_survive(Config, DeathFun) ->
|
||||
[A, B, _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
|
||||
Msgs = rabbit_ct_helpers:cover_work_factor(Config, 20000),
|
||||
Node1Channel = rabbit_ct_client_helpers:open_channel(Config, A),
|
||||
Node2Channel = rabbit_ct_client_helpers:open_channel(Config, B),
|
||||
|
||||
%% declare the queue on the master, mirrored to the two mirrors
|
||||
XOverflow = ?config(overflow, Config),
|
||||
Queue = <<"test_rejects", "_", XOverflow/binary>>,
|
||||
amqp_channel:call(Node1Channel,#'queue.declare'{queue = Queue,
|
||||
auto_delete = false,
|
||||
durable = true,
|
||||
arguments = [{<<"x-max-length">>, long, 1},
|
||||
{<<"x-overflow">>, longstr, XOverflow}]}),
|
||||
|
||||
amqp_channel:register_confirm_handler(Node1Channel, self()),
|
||||
#'confirm.select_ok'{} = amqp_channel:call(Node1Channel, #'confirm.select'{}),
|
||||
|
||||
Payload = <<"there can be only one">>,
|
||||
ok = amqp_channel:call(Node1Channel,
|
||||
#'basic.publish'{routing_key = Queue},
|
||||
#amqp_msg{payload = Payload}),
|
||||
|
||||
ok = receive
|
||||
#'basic.ack'{multiple = false} -> ok;
|
||||
#'basic.nack'{multiple = false} -> message_nacked
|
||||
after
|
||||
5000 -> confirm_not_received
|
||||
end,
|
||||
|
||||
%% send a bunch of messages from the producer. They should all be nacked, as the queue is full.
|
||||
ProducerPid = rabbit_ha_test_producer:create(Node2Channel, Queue,
|
||||
self(), true, Msgs, nacks),
|
||||
DeathFun(Config, A),
|
||||
rabbit_ha_test_producer:await_response(ProducerPid),
|
||||
|
||||
{#'basic.get_ok'{}, #amqp_msg{payload = Payload}} =
|
||||
amqp_channel:call(Node2Channel, #'basic.get'{queue = Queue}),
|
||||
%% There is only one message.
|
||||
#'basic.get_empty'{} = amqp_channel:call(Node2Channel, #'basic.get'{queue = Queue}),
|
||||
ok.
|
||||
|
||||
|
||||
|
||||
stop(Config, Node) ->
|
||||
rabbit_ct_broker_helpers:stop_node_after(Config, Node, 50).
|
||||
|
||||
sigkill(Config, Node) ->
|
||||
rabbit_ct_broker_helpers:kill_node_after(Config, Node, 50).
|
||||
|
||||
policy(Config, Node)->
|
||||
Nodes = [
|
||||
atom_to_binary(N)
|
||||
|| N <- rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
|
||||
N =/= Node],
|
||||
rabbit_ct_broker_helpers:set_ha_policy(Config, Node, <<".*">>,
|
||||
{<<"nodes">>, Nodes}).
|
||||
|
||||
open_incapable_channel(NodePort) ->
|
||||
Props = [{<<"capabilities">>, table, []}],
|
||||
{ok, ConsConn} =
|
||||
amqp_connection:start(#amqp_params_network{port = NodePort,
|
||||
client_properties = Props}),
|
||||
{ok, Ch} = amqp_connection:open_channel(ConsConn),
|
||||
Ch.
|
||||
|
||||
declare_exclusive(Ch, QueueName, Args) ->
|
||||
Declare = #'queue.declare'{queue = QueueName,
|
||||
exclusive = true,
|
||||
arguments = Args
|
||||
},
|
||||
#'queue.declare_ok'{} = amqp_channel:call(Ch, Declare).
|
||||
|
||||
subscribe(Ch, QueueName) ->
|
||||
ConsumeOk = amqp_channel:call(Ch, #'basic.consume'{queue = QueueName,
|
||||
no_ack = true}),
|
||||
#'basic.consume_ok'{} = ConsumeOk,
|
||||
receive ConsumeOk -> ok after ?DELAY -> throw(consume_ok_timeout) end.
|
||||
|
||||
receive_cancels(Cancels) ->
|
||||
receive
|
||||
#'basic.cancel'{} = C ->
|
||||
receive_cancels([C|Cancels])
|
||||
after ?DELAY ->
|
||||
Cancels
|
||||
end.
|
||||
|
||||
receive_messages(All) ->
|
||||
receive
|
||||
{#'basic.deliver'{}, Msg} ->
|
||||
receive_messages([Msg|All])
|
||||
after ?DELAY ->
|
||||
lists:reverse(All)
|
||||
end.
|
|
@ -1,248 +0,0 @@
|
|||
%% This Source Code Form is subject to the terms of the Mozilla Public
|
||||
%% License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
%%
|
||||
%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
|
||||
%%
|
||||
|
||||
-module(sync_detection_SUITE).
|
||||
|
||||
-include_lib("common_test/include/ct.hrl").
|
||||
-include_lib("amqp_client/include/amqp_client.hrl").
|
||||
|
||||
-compile(export_all).
|
||||
|
||||
-define(LOOP_RECURSION_DELAY, 100).
|
||||
|
||||
all() ->
|
||||
[
|
||||
{group, cluster_size_2},
|
||||
{group, cluster_size_3}
|
||||
].
|
||||
|
||||
groups() ->
|
||||
[
|
||||
{cluster_size_2, [], [
|
||||
follower_synchronization
|
||||
]},
|
||||
{cluster_size_3, [], [
|
||||
follower_synchronization_ttl
|
||||
]}
|
||||
].
|
||||
|
||||
%% -------------------------------------------------------------------
|
||||
%% Testsuite setup/teardown.
|
||||
%% -------------------------------------------------------------------
|
||||
|
||||
init_per_suite(Config) ->
|
||||
rabbit_ct_helpers:log_environment(),
|
||||
case rabbit_ct_broker_helpers:configured_metadata_store(Config) of
|
||||
mnesia ->
|
||||
rabbit_ct_helpers:run_setup_steps(Config);
|
||||
{khepri, _} ->
|
||||
{skip, "Classic queue mirroring not supported by Khepri"}
|
||||
end.
|
||||
|
||||
end_per_suite(Config) ->
|
||||
rabbit_ct_helpers:run_teardown_steps(Config).
|
||||
|
||||
init_per_group(cluster_size_2, Config) ->
|
||||
rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 2}]);
|
||||
init_per_group(cluster_size_3, Config) ->
|
||||
rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 3}]).
|
||||
|
||||
end_per_group(_, Config) ->
|
||||
Config.
|
||||
|
||||
init_per_testcase(Testcase, Config) ->
|
||||
rabbit_ct_helpers:testcase_started(Config, Testcase),
|
||||
ClusterSize = ?config(rmq_nodes_count, Config),
|
||||
TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
|
||||
Config1 = rabbit_ct_helpers:set_config(Config, [
|
||||
{rmq_nodes_count, ClusterSize},
|
||||
{rmq_nodes_clustered, true},
|
||||
{rmq_nodename_suffix, Testcase},
|
||||
{tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
|
||||
]),
|
||||
rabbit_ct_helpers:run_steps(Config1,
|
||||
rabbit_ct_broker_helpers:setup_steps() ++
|
||||
rabbit_ct_client_helpers:setup_steps() ++ [
|
||||
fun rabbit_ct_broker_helpers:set_ha_policy_two_pos/1,
|
||||
fun rabbit_ct_broker_helpers:set_ha_policy_two_pos_batch_sync/1
|
||||
]).
|
||||
|
||||
end_per_testcase(Testcase, Config) ->
|
||||
Config1 = rabbit_ct_helpers:run_steps(Config,
|
||||
rabbit_ct_client_helpers:teardown_steps() ++
|
||||
rabbit_ct_broker_helpers:teardown_steps()),
|
||||
rabbit_ct_helpers:testcase_finished(Config1, Testcase).
|
||||
|
||||
%% -------------------------------------------------------------------
|
||||
%% Testcases.
|
||||
%% -------------------------------------------------------------------
|
||||
|
||||
follower_synchronization(Config) ->
|
||||
[Master, Slave] = rabbit_ct_broker_helpers:get_node_configs(Config,
|
||||
nodename),
|
||||
Channel = rabbit_ct_client_helpers:open_channel(Config, Master),
|
||||
Queue = <<"ha.two.test">>,
|
||||
#'queue.declare_ok'{} =
|
||||
amqp_channel:call(Channel, #'queue.declare'{queue = Queue,
|
||||
auto_delete = false}),
|
||||
|
||||
%% The comments on the right are the queue length and the pending acks on
|
||||
%% the master.
|
||||
rabbit_ct_broker_helpers:stop_broker(Config, Slave),
|
||||
|
||||
%% We get and ack one message when the mirror is down, and check that when we
|
||||
%% start the mirror it's not marked as synced until ack the message. We also
|
||||
%% publish another message when the mirror is up.
|
||||
send_dummy_message(Channel, Queue), % 1 - 0
|
||||
{#'basic.get_ok'{delivery_tag = Tag1}, _} =
|
||||
amqp_channel:call(Channel, #'basic.get'{queue = Queue}), % 0 - 1
|
||||
|
||||
rabbit_ct_broker_helpers:start_broker(Config, Slave),
|
||||
|
||||
follower_unsynced(Master, Queue),
|
||||
send_dummy_message(Channel, Queue), % 1 - 1
|
||||
follower_unsynced(Master, Queue),
|
||||
|
||||
amqp_channel:cast(Channel, #'basic.ack'{delivery_tag = Tag1}), % 1 - 0
|
||||
|
||||
follower_synced(Master, Queue),
|
||||
|
||||
%% We restart the mirror and we send a message, so that the mirror will only
|
||||
%% have one of the messages.
|
||||
rabbit_ct_broker_helpers:stop_broker(Config, Slave),
|
||||
rabbit_ct_broker_helpers:start_broker(Config, Slave),
|
||||
|
||||
send_dummy_message(Channel, Queue), % 2 - 0
|
||||
|
||||
follower_unsynced(Master, Queue),
|
||||
|
||||
%% We reject the message that the mirror doesn't have, and verify that it's
|
||||
%% still unsynced
|
||||
{#'basic.get_ok'{delivery_tag = Tag2}, _} =
|
||||
amqp_channel:call(Channel, #'basic.get'{queue = Queue}), % 1 - 1
|
||||
follower_unsynced(Master, Queue),
|
||||
amqp_channel:cast(Channel, #'basic.reject'{ delivery_tag = Tag2,
|
||||
requeue = true }), % 2 - 0
|
||||
follower_unsynced(Master, Queue),
|
||||
{#'basic.get_ok'{delivery_tag = Tag3}, _} =
|
||||
amqp_channel:call(Channel, #'basic.get'{queue = Queue}), % 1 - 1
|
||||
amqp_channel:cast(Channel, #'basic.ack'{delivery_tag = Tag3}), % 1 - 0
|
||||
follower_synced(Master, Queue),
|
||||
{#'basic.get_ok'{delivery_tag = Tag4}, _} =
|
||||
amqp_channel:call(Channel, #'basic.get'{queue = Queue}), % 0 - 1
|
||||
amqp_channel:cast(Channel, #'basic.ack'{delivery_tag = Tag4}), % 0 - 0
|
||||
follower_synced(Master, Queue).
|
||||
|
||||
follower_synchronization_ttl(Config) ->
|
||||
[Master, Slave, DLX] = rabbit_ct_broker_helpers:get_node_configs(Config,
|
||||
nodename),
|
||||
Channel = rabbit_ct_client_helpers:open_channel(Config, Master),
|
||||
DLXChannel = rabbit_ct_client_helpers:open_channel(Config, DLX),
|
||||
|
||||
%% We declare a DLX queue to wait for messages to be TTL'ed
|
||||
DLXQueue = <<"dlx-queue">>,
|
||||
#'queue.declare_ok'{} =
|
||||
amqp_channel:call(Channel, #'queue.declare'{queue = DLXQueue,
|
||||
auto_delete = false}),
|
||||
|
||||
TestMsgTTL = 5000,
|
||||
Queue = <<"ha.two.test">>,
|
||||
%% Sadly we need fairly high numbers for the TTL because starting/stopping
|
||||
%% nodes takes a fair amount of time.
|
||||
Args = [{<<"x-message-ttl">>, long, TestMsgTTL},
|
||||
{<<"x-dead-letter-exchange">>, longstr, <<>>},
|
||||
{<<"x-dead-letter-routing-key">>, longstr, DLXQueue}],
|
||||
#'queue.declare_ok'{} =
|
||||
amqp_channel:call(Channel, #'queue.declare'{queue = Queue,
|
||||
auto_delete = false,
|
||||
arguments = Args}),
|
||||
|
||||
follower_synced(Master, Queue),
|
||||
|
||||
%% All unknown
|
||||
rabbit_ct_broker_helpers:stop_broker(Config, Slave),
|
||||
send_dummy_message(Channel, Queue),
|
||||
send_dummy_message(Channel, Queue),
|
||||
rabbit_ct_broker_helpers:start_broker(Config, Slave),
|
||||
follower_unsynced(Master, Queue),
|
||||
wait_for_messages(DLXQueue, DLXChannel, 2),
|
||||
follower_synced(Master, Queue),
|
||||
|
||||
%% 1 unknown, 1 known
|
||||
rabbit_ct_broker_helpers:stop_broker(Config, Slave),
|
||||
send_dummy_message(Channel, Queue),
|
||||
rabbit_ct_broker_helpers:start_broker(Config, Slave),
|
||||
follower_unsynced(Master, Queue),
|
||||
send_dummy_message(Channel, Queue),
|
||||
follower_unsynced(Master, Queue),
|
||||
wait_for_messages(DLXQueue, DLXChannel, 2),
|
||||
follower_synced(Master, Queue),
|
||||
|
||||
%% %% both known
|
||||
send_dummy_message(Channel, Queue),
|
||||
send_dummy_message(Channel, Queue),
|
||||
follower_synced(Master, Queue),
|
||||
wait_for_messages(DLXQueue, DLXChannel, 2),
|
||||
follower_synced(Master, Queue),
|
||||
|
||||
ok.
|
||||
|
||||
send_dummy_message(Channel, Queue) ->
|
||||
Payload = <<"foo">>,
|
||||
Publish = #'basic.publish'{exchange = <<>>, routing_key = Queue},
|
||||
amqp_channel:cast(Channel, Publish, #amqp_msg{payload = Payload}).
|
||||
|
||||
follower_pids(Node, Queue) ->
|
||||
{ok, Q} = rpc:call(Node, rabbit_amqqueue, lookup,
|
||||
[rabbit_misc:r(<<"/">>, queue, Queue)]),
|
||||
SSP = synchronised_slave_pids,
|
||||
[{SSP, Pids}] = rpc:call(Node, rabbit_amqqueue, info, [Q, [SSP]]),
|
||||
case Pids of
|
||||
'' -> [];
|
||||
_ -> Pids
|
||||
end.
|
||||
|
||||
%% The mnesia synchronization takes a while, but we don't want to wait for the
|
||||
%% test to fail, since the timetrap is quite high.
|
||||
wait_for_sync_status(Status, Node, Queue) ->
|
||||
Max = 90000 / ?LOOP_RECURSION_DELAY,
|
||||
wait_for_sync_status(0, Max, Status, Node, Queue).
|
||||
|
||||
wait_for_sync_status(N, Max, Status, Node, Queue) when N >= Max ->
|
||||
erlang:error({sync_status_max_tries_failed,
|
||||
[{queue, Queue},
|
||||
{node, Node},
|
||||
{expected_status, Status},
|
||||
{max_tried, Max}]});
|
||||
wait_for_sync_status(N, Max, Status, Node, Queue) ->
|
||||
Synced = length(follower_pids(Node, Queue)) =:= 1,
|
||||
case Synced =:= Status of
|
||||
true -> ok;
|
||||
false -> timer:sleep(?LOOP_RECURSION_DELAY),
|
||||
wait_for_sync_status(N + 1, Max, Status, Node, Queue)
|
||||
end.
|
||||
|
||||
follower_synced(Node, Queue) ->
|
||||
wait_for_sync_status(true, Node, Queue).
|
||||
|
||||
follower_unsynced(Node, Queue) ->
|
||||
wait_for_sync_status(false, Node, Queue).
|
||||
|
||||
wait_for_messages(Queue, Channel, N) ->
|
||||
Sub = #'basic.consume'{queue = Queue},
|
||||
#'basic.consume_ok'{consumer_tag = CTag} = amqp_channel:call(Channel, Sub),
|
||||
receive
|
||||
#'basic.consume_ok'{} -> ok
|
||||
end,
|
||||
lists:foreach(
|
||||
fun (_) -> receive
|
||||
{#'basic.deliver'{delivery_tag = Tag}, _Content} ->
|
||||
amqp_channel:cast(Channel,
|
||||
#'basic.ack'{delivery_tag = Tag})
|
||||
end
|
||||
end, lists:seq(1, N)),
|
||||
amqp_channel:call(Channel, #'basic.cancel'{consumer_tag = CTag}).
|
|
@ -1,83 +0,0 @@
|
|||
-module(unit_classic_mirrored_queue_sync_throttling_SUITE).
|
||||
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include_lib("rabbit_common/include/rabbit_framing.hrl").
|
||||
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
|
||||
-compile(export_all).
|
||||
|
||||
all() ->
|
||||
[
|
||||
maybe_master_batch_send,
|
||||
get_time_diff,
|
||||
append_to_acc
|
||||
].
|
||||
|
||||
maybe_master_batch_send(_Config) ->
|
||||
SyncBatchSize = 4096,
|
||||
SyncThroughput = 2000,
|
||||
QueueLen = 10000,
|
||||
?assertEqual(
|
||||
true, %% Message reach the last one in the queue
|
||||
rabbit_mirror_queue_sync:maybe_master_batch_send({[], 0, {0, 0, SyncThroughput}, {QueueLen, QueueLen}, 0}, SyncBatchSize)),
|
||||
?assertEqual(
|
||||
true, %% # messages batched is less than batch size; and total message size has reached the batch size
|
||||
rabbit_mirror_queue_sync:maybe_master_batch_send({[], 0, {0, 0, SyncThroughput}, {SyncBatchSize, QueueLen}, 0}, SyncBatchSize)),
|
||||
TotalBytes0 = SyncThroughput + 1,
|
||||
Curr0 = 1,
|
||||
?assertEqual(
|
||||
true, %% Total batch size exceed max sync throughput
|
||||
rabbit_mirror_queue_sync:maybe_master_batch_send({[], 0, {TotalBytes0, 0, SyncThroughput}, {Curr0, QueueLen}, 0}, SyncBatchSize)),
|
||||
TotalBytes1 = 1,
|
||||
Curr1 = 1,
|
||||
?assertEqual(
|
||||
false, %% # messages batched is less than batch size; and total bytes is less than sync throughput
|
||||
rabbit_mirror_queue_sync:maybe_master_batch_send({[], 0, {TotalBytes1, 0, SyncThroughput}, {Curr1, QueueLen}, 0}, SyncBatchSize)),
|
||||
ok.
|
||||
|
||||
get_time_diff(_Config) ->
|
||||
TotalBytes0 = 100,
|
||||
Interval0 = 1000, %% ms
|
||||
MaxSyncThroughput0 = 100, %% bytes/s
|
||||
?assertEqual(%% Used throughput = 100 / 1000 * 1000 = 100 bytes/s; matched max throughput
|
||||
0, %% => no need to pause queue sync
|
||||
rabbit_mirror_queue_sync:get_time_diff(TotalBytes0, Interval0, MaxSyncThroughput0)),
|
||||
|
||||
TotalBytes1 = 100,
|
||||
Interval1 = 1000, %% ms
|
||||
MaxSyncThroughput1 = 200, %% bytes/s
|
||||
?assertEqual( %% Used throughput = 100 / 1000 * 1000 = 100 bytes/s; less than max throughput
|
||||
0, %% => no need to pause queue sync
|
||||
rabbit_mirror_queue_sync:get_time_diff(TotalBytes1, Interval1, MaxSyncThroughput1)),
|
||||
|
||||
TotalBytes2 = 100,
|
||||
Interval2 = 1000, %% ms
|
||||
MaxSyncThroughput2 = 50, %% bytes/s
|
||||
?assertEqual( %% Used throughput = 100 / 1000 * 1000 = 100 bytes/s; greater than max throughput
|
||||
1000, %% => pause queue sync for 1000 ms
|
||||
rabbit_mirror_queue_sync:get_time_diff(TotalBytes2, Interval2, MaxSyncThroughput2)),
|
||||
ok.
|
||||
|
||||
append_to_acc(_Config) ->
|
||||
Content = #content{properties = #'P_basic'{delivery_mode = 2,
|
||||
priority = 2},
|
||||
payload_fragments_rev = [[<<"1234567890">>]] %% 10 bytes
|
||||
},
|
||||
ExName = rabbit_misc:r(<<>>, exchange, <<>>),
|
||||
{ok, Msg} = mc_amqpl:message(ExName, <<>>, Content, #{id => 1}),
|
||||
BQDepth = 10,
|
||||
SyncThroughput_0 = 0,
|
||||
FoldAcc1 = {[], 0, {0, erlang:monotonic_time(), SyncThroughput_0}, {0, BQDepth}, erlang:monotonic_time()},
|
||||
{_, _, {TotalBytes1, _, _}, _, _} = rabbit_mirror_queue_sync:append_to_acc(Msg, {}, false, FoldAcc1),
|
||||
?assertEqual(0, TotalBytes1), %% Skipping calculating TotalBytes for the pending batch as SyncThroughput is 0.
|
||||
|
||||
SyncThroughput = 100,
|
||||
FoldAcc2 = {[], 0, {0, erlang:monotonic_time(), SyncThroughput}, {0, BQDepth}, erlang:monotonic_time()},
|
||||
{_, _, {TotalBytes2, _, _}, _, _} = rabbit_mirror_queue_sync:append_to_acc(Msg, {}, false, FoldAcc2),
|
||||
?assertEqual(10, TotalBytes2), %% Message size is added to existing TotalBytes
|
||||
|
||||
FoldAcc3 = {[], 0, {TotalBytes2, erlang:monotonic_time(), SyncThroughput}, {0, BQDepth}, erlang:monotonic_time()},
|
||||
{_, _, {TotalBytes3, _, _}, _, _} = rabbit_mirror_queue_sync:append_to_acc(Msg, {}, false, FoldAcc3),
|
||||
?assertEqual(TotalBytes2 + 10, TotalBytes3), %% Message size is added to existing TotalBytes
|
||||
ok.
|
|
@ -1,28 +0,0 @@
|
|||
-module(unit_classic_mirrored_queue_throughput_SUITE).
|
||||
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
|
||||
-compile(export_all).
|
||||
|
||||
all() ->
|
||||
[
|
||||
default_max_sync_throughput
|
||||
].
|
||||
|
||||
default_max_sync_throughput(_Config) ->
|
||||
?assertEqual(
|
||||
0,
|
||||
rabbit_mirror_queue_misc:default_max_sync_throughput()),
|
||||
application:set_env(rabbit, mirroring_sync_max_throughput, 100),
|
||||
?assertEqual(
|
||||
100,
|
||||
rabbit_mirror_queue_misc:default_max_sync_throughput()),
|
||||
application:set_env(rabbit, mirroring_sync_max_throughput, "100MiB"),
|
||||
?assertEqual(
|
||||
100*1024*1024,
|
||||
rabbit_mirror_queue_misc:default_max_sync_throughput()),
|
||||
application:set_env(rabbit, mirroring_sync_max_throughput, "100MB"),
|
||||
?assertEqual(
|
||||
100000000,
|
||||
rabbit_mirror_queue_misc:default_max_sync_throughput()),
|
||||
ok.
|
|
@ -1,240 +0,0 @@
|
|||
%% This Source Code Form is subject to the terms of the Mozilla Public
|
||||
%% License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
%%
|
||||
%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
|
||||
%%
|
||||
|
||||
-module(unit_gm_SUITE).
|
||||
|
||||
-behaviour(gm).
|
||||
|
||||
-include_lib("common_test/include/ct.hrl").
|
||||
|
||||
-compile(export_all).
|
||||
|
||||
-define(RECEIVE_OR_THROW(Body, Bool, Error),
|
||||
receive Body ->
|
||||
true = Bool,
|
||||
passed
|
||||
after 5000 ->
|
||||
throw(Error)
|
||||
end).
|
||||
|
||||
all() ->
|
||||
[
|
||||
join_leave,
|
||||
broadcast,
|
||||
confirmed_broadcast,
|
||||
member_death,
|
||||
receive_in_order,
|
||||
unexpected_msg,
|
||||
down_in_members_change
|
||||
].
|
||||
|
||||
init_per_suite(Config) ->
|
||||
ok = application:set_env(mnesia, dir, ?config(priv_dir, Config)),
|
||||
ok = application:start(mnesia),
|
||||
{ok, FHC} = file_handle_cache:start_link(),
|
||||
unlink(FHC),
|
||||
{ok, WPS} = worker_pool_sup:start_link(),
|
||||
unlink(WPS),
|
||||
rabbit_ct_helpers:set_config(Config, [
|
||||
{file_handle_cache_pid, FHC},
|
||||
{worker_pool_sup_pid, WPS}
|
||||
]).
|
||||
|
||||
end_per_suite(Config) ->
|
||||
exit(?config(worker_pool_sup_pid, Config), shutdown),
|
||||
exit(?config(file_handle_cache_pid, Config), shutdown),
|
||||
ok = application:stop(mnesia),
|
||||
Config.
|
||||
|
||||
%% ---------------------------------------------------------------------------
|
||||
%% Functional tests
|
||||
%% ---------------------------------------------------------------------------
|
||||
|
||||
join_leave(_Config) ->
|
||||
passed = with_two_members(fun (_Pid, _Pid2) -> passed end).
|
||||
|
||||
broadcast(_Config) ->
|
||||
passed = do_broadcast(fun gm:broadcast/2).
|
||||
|
||||
confirmed_broadcast(_Config) ->
|
||||
passed = do_broadcast(fun gm:confirmed_broadcast/2).
|
||||
|
||||
member_death(_Config) ->
|
||||
passed = with_two_members(
|
||||
fun (Pid, Pid2) ->
|
||||
{ok, Pid3} = gm:start_link(
|
||||
?MODULE, ?MODULE, self(),
|
||||
fun rabbit_mnesia:execute_mnesia_transaction/1),
|
||||
passed = receive_joined(Pid3, [Pid, Pid2, Pid3],
|
||||
timeout_joining_gm_group_3),
|
||||
passed = receive_birth(Pid, Pid3, timeout_waiting_for_birth_3_1),
|
||||
passed = receive_birth(Pid2, Pid3, timeout_waiting_for_birth_3_2),
|
||||
|
||||
unlink(Pid3),
|
||||
exit(Pid3, kill),
|
||||
|
||||
%% Have to do some broadcasts to ensure that all members
|
||||
%% find out about the death.
|
||||
BFun = broadcast_fun(fun gm:confirmed_broadcast/2),
|
||||
passed = BFun(Pid, Pid2),
|
||||
passed = BFun(Pid, Pid2),
|
||||
|
||||
passed = receive_death(Pid, Pid3, timeout_waiting_for_death_3_1),
|
||||
passed = receive_death(Pid2, Pid3, timeout_waiting_for_death_3_2),
|
||||
|
||||
passed
|
||||
end).
|
||||
|
||||
receive_in_order(_Config) ->
|
||||
passed = with_two_members(
|
||||
fun (Pid, Pid2) ->
|
||||
Numbers = lists:seq(1,1000),
|
||||
[begin ok = gm:broadcast(Pid, N), ok = gm:broadcast(Pid2, N) end
|
||||
|| N <- Numbers],
|
||||
passed = receive_numbers(
|
||||
Pid, Pid, {timeout_for_msgs, Pid, Pid}, Numbers),
|
||||
passed = receive_numbers(
|
||||
Pid, Pid2, {timeout_for_msgs, Pid, Pid2}, Numbers),
|
||||
passed = receive_numbers(
|
||||
Pid2, Pid, {timeout_for_msgs, Pid2, Pid}, Numbers),
|
||||
passed = receive_numbers(
|
||||
Pid2, Pid2, {timeout_for_msgs, Pid2, Pid2}, Numbers),
|
||||
passed
|
||||
end).
|
||||
|
||||
unexpected_msg(_Config) ->
|
||||
passed = with_two_members(
|
||||
fun(Pid, _) ->
|
||||
Pid ! {make_ref(), old_gen_server_answer},
|
||||
true = erlang:is_process_alive(Pid),
|
||||
passed
|
||||
end).
|
||||
|
||||
down_in_members_change(_Config) ->
|
||||
%% Setup
|
||||
ok = gm:create_tables(),
|
||||
{ok, Pid} = gm:start_link(?MODULE, ?MODULE, self(),
|
||||
fun rabbit_mnesia:execute_mnesia_transaction/1),
|
||||
passed = receive_joined(Pid, [Pid], timeout_joining_gm_group_1),
|
||||
{ok, Pid2} = gm:start_link(?MODULE, ?MODULE, self(),
|
||||
fun rabbit_mnesia:execute_mnesia_transaction/1),
|
||||
passed = receive_joined(Pid2, [Pid, Pid2], timeout_joining_gm_group_2),
|
||||
passed = receive_birth(Pid, Pid2, timeout_waiting_for_birth_2),
|
||||
|
||||
%% Test. Simulate that the gm group is deleted (forget_group) while
|
||||
%% processing the 'DOWN' message from the neighbour
|
||||
process_flag(trap_exit, true),
|
||||
ok = meck:new(mnesia, [passthrough]),
|
||||
ok = meck:expect(mnesia, read, fun({gm_group, ?MODULE}) ->
|
||||
[];
|
||||
(Key) ->
|
||||
meck:passthrough([Key])
|
||||
end),
|
||||
gm:leave(Pid2),
|
||||
Passed = receive
|
||||
{'EXIT', Pid, shutdown} ->
|
||||
passed;
|
||||
{'EXIT', Pid, _} ->
|
||||
crashed
|
||||
after 15000 ->
|
||||
timeout
|
||||
end,
|
||||
%% Cleanup
|
||||
meck:unload(mnesia),
|
||||
process_flag(trap_exit, false),
|
||||
passed = Passed.
|
||||
|
||||
|
||||
do_broadcast(Fun) ->
|
||||
with_two_members(broadcast_fun(Fun)).
|
||||
|
||||
broadcast_fun(Fun) ->
|
||||
fun (Pid, Pid2) ->
|
||||
ok = Fun(Pid, magic_message),
|
||||
passed = receive_or_throw({msg, Pid, Pid, magic_message},
|
||||
timeout_waiting_for_msg),
|
||||
passed = receive_or_throw({msg, Pid2, Pid, magic_message},
|
||||
timeout_waiting_for_msg)
|
||||
end.
|
||||
|
||||
with_two_members(Fun) ->
|
||||
ok = gm:create_tables(),
|
||||
|
||||
{ok, Pid} = gm:start_link(?MODULE, ?MODULE, self(),
|
||||
fun rabbit_mnesia:execute_mnesia_transaction/1),
|
||||
passed = receive_joined(Pid, [Pid], timeout_joining_gm_group_1),
|
||||
|
||||
{ok, Pid2} = gm:start_link(?MODULE, ?MODULE, self(),
|
||||
fun rabbit_mnesia:execute_mnesia_transaction/1),
|
||||
passed = receive_joined(Pid2, [Pid, Pid2], timeout_joining_gm_group_2),
|
||||
passed = receive_birth(Pid, Pid2, timeout_waiting_for_birth_2),
|
||||
|
||||
passed = Fun(Pid, Pid2),
|
||||
|
||||
ok = gm:leave(Pid),
|
||||
passed = receive_death(Pid2, Pid, timeout_waiting_for_death_1),
|
||||
passed =
|
||||
receive_termination(Pid, normal, timeout_waiting_for_termination_1),
|
||||
|
||||
ok = gm:leave(Pid2),
|
||||
passed =
|
||||
receive_termination(Pid2, normal, timeout_waiting_for_termination_2),
|
||||
|
||||
receive X -> throw({unexpected_message, X})
|
||||
after 0 -> passed
|
||||
end.
|
||||
|
||||
receive_or_throw(Pattern, Error) ->
|
||||
?RECEIVE_OR_THROW(Pattern, true, Error).
|
||||
|
||||
receive_birth(From, Born, Error) ->
|
||||
?RECEIVE_OR_THROW({members_changed, From, Birth, Death},
|
||||
([Born] == Birth) andalso ([] == Death),
|
||||
Error).
|
||||
|
||||
receive_death(From, Died, Error) ->
|
||||
?RECEIVE_OR_THROW({members_changed, From, Birth, Death},
|
||||
([] == Birth) andalso ([Died] == Death),
|
||||
Error).
|
||||
|
||||
receive_joined(From, Members, Error) ->
|
||||
?RECEIVE_OR_THROW({joined, From, Members1},
|
||||
lists:usort(Members) == lists:usort(Members1),
|
||||
Error).
|
||||
|
||||
receive_termination(From, Reason, Error) ->
|
||||
?RECEIVE_OR_THROW({termination, From, Reason1},
|
||||
Reason == Reason1,
|
||||
Error).
|
||||
|
||||
receive_numbers(_Pid, _Sender, _Error, []) ->
|
||||
passed;
|
||||
receive_numbers(Pid, Sender, Error, [N | Numbers]) ->
|
||||
?RECEIVE_OR_THROW({msg, Pid, Sender, M},
|
||||
M == N,
|
||||
Error),
|
||||
receive_numbers(Pid, Sender, Error, Numbers).
|
||||
|
||||
%% -------------------------------------------------------------------
|
||||
%% gm behavior callbacks.
|
||||
%% -------------------------------------------------------------------
|
||||
|
||||
joined(Pid, Members) ->
|
||||
Pid ! {joined, self(), Members},
|
||||
ok.
|
||||
|
||||
members_changed(Pid, Births, Deaths) ->
|
||||
Pid ! {members_changed, self(), Births, Deaths},
|
||||
ok.
|
||||
|
||||
handle_msg(Pid, From, Msg) ->
|
||||
Pid ! {msg, self(), From, Msg},
|
||||
ok.
|
||||
|
||||
handle_terminate(Pid, Reason) ->
|
||||
Pid ! {termination, self(), Reason},
|
||||
ok.
|
|
@ -13,8 +13,7 @@
|
|||
|
||||
all() ->
|
||||
[
|
||||
{group, core_validators},
|
||||
{group, classic_queue_mirroring_validators}
|
||||
{group, core_validators}
|
||||
].
|
||||
|
||||
groups() ->
|
||||
|
@ -32,11 +31,6 @@ groups() ->
|
|||
delivery_limit,
|
||||
classic_queue_lazy_mode,
|
||||
length_limit_overflow_mode
|
||||
]},
|
||||
|
||||
{classic_queue_mirroring_validators, [parallel], [
|
||||
classic_queue_ha_mode,
|
||||
classic_queue_ha_params
|
||||
]}
|
||||
].
|
||||
|
||||
|
@ -51,26 +45,9 @@ init_per_suite(Config) ->
|
|||
end_per_suite(Config) ->
|
||||
rabbit_ct_helpers:run_teardown_steps(Config).
|
||||
|
||||
init_per_group(Group = classic_queue_mirroring_validators, Config) ->
|
||||
case rabbit_ct_broker_helpers:configured_metadata_store(Config) of
|
||||
mnesia ->
|
||||
Config1 = rabbit_ct_helpers:set_config(
|
||||
Config, [
|
||||
{rmq_nodename_suffix, Group},
|
||||
{rmq_nodes_count, 1}
|
||||
]),
|
||||
rabbit_ct_helpers:run_steps(
|
||||
Config1,
|
||||
rabbit_ct_broker_helpers:setup_steps());
|
||||
{khepri, _} ->
|
||||
{skip, "Classic queue mirroring not supported by Khepri"}
|
||||
end;
|
||||
init_per_group(_, Config) ->
|
||||
Config.
|
||||
|
||||
end_per_group(classic_queue_mirroring_validators, Config) ->
|
||||
rabbit_ct_helpers:run_steps(Config,
|
||||
rabbit_ct_broker_helpers:teardown_steps());
|
||||
end_per_group(_, Config) ->
|
||||
Config.
|
||||
|
||||
|
@ -132,60 +109,6 @@ length_limit_overflow_mode(_Config) ->
|
|||
%% invalid values
|
||||
[<<"unknown">>, <<"publish">>, <<"overflow">>, <<"mode">>]).
|
||||
|
||||
|
||||
%% -------------------------------------------------------------------
|
||||
%% CMQ Validators
|
||||
%% -------------------------------------------------------------------
|
||||
|
||||
classic_queue_ha_mode(Config) ->
|
||||
rabbit_ct_broker_helpers:rpc(Config, 0,
|
||||
?MODULE, classic_queue_ha_mode1, [Config]).
|
||||
|
||||
classic_queue_ha_mode1(_Config) ->
|
||||
?assertEqual(ok, rabbit_mirror_queue_misc:validate_policy([
|
||||
{<<"ha-mode">>, <<"exactly">>},
|
||||
{<<"ha-params">>, 2}
|
||||
])),
|
||||
|
||||
?assertEqual(ok, rabbit_mirror_queue_misc:validate_policy([
|
||||
{<<"ha-mode">>, <<"nodes">>},
|
||||
{<<"ha-params">>, [<<"rabbit@host1">>, <<"rabbit@host2">>]}
|
||||
])),
|
||||
|
||||
?assertEqual(ok, rabbit_mirror_queue_misc:validate_policy([
|
||||
{<<"ha-mode">>, <<"all">>}
|
||||
])),
|
||||
|
||||
?assertMatch({error, _, _}, rabbit_mirror_queue_misc:validate_policy([
|
||||
{<<"ha-mode">>, <<"lolwut">>},
|
||||
{<<"ha-params">>, 2}
|
||||
])).
|
||||
|
||||
classic_queue_ha_params(Config) ->
|
||||
rabbit_ct_broker_helpers:rpc(Config, 0,
|
||||
?MODULE, classic_queue_ha_mode1, [Config]).
|
||||
|
||||
classic_queue_ha_params1(_Config) ->
|
||||
?assertMatch({error, _, _}, rabbit_mirror_queue_misc:validate_policy([
|
||||
{<<"ha-mode">>, <<"exactly">>},
|
||||
{<<"ha-params">>, <<"2">>}
|
||||
])),
|
||||
|
||||
?assertEqual(ok, rabbit_mirror_queue_misc:validate_policy([
|
||||
{<<"ha-mode">>, <<"nodes">>},
|
||||
{<<"ha-params">>, <<"lolwut">>}
|
||||
])),
|
||||
|
||||
?assertEqual(ok, rabbit_mirror_queue_misc:validate_policy([
|
||||
{<<"ha-mode">>, <<"all">>},
|
||||
{<<"ha-params">>, <<"lolwut">>}
|
||||
])),
|
||||
|
||||
?assertMatch({error, _, _}, rabbit_mirror_queue_misc:validate_policy([
|
||||
{<<"ha-mode">>, <<"lolwut">>},
|
||||
{<<"ha-params">>, 2}
|
||||
])).
|
||||
|
||||
%%
|
||||
%% Implementation
|
||||
%%
|
||||
|
|
|
@ -41,7 +41,6 @@ groups() ->
|
|||
vhost_failure_forces_connection_closure,
|
||||
vhost_failure_forces_connection_closure_on_failure_node,
|
||||
node_starts_with_dead_vhosts,
|
||||
node_starts_with_dead_vhosts_with_mirrors,
|
||||
vhost_creation_idempotency,
|
||||
vhost_deletion
|
||||
],
|
||||
|
@ -101,17 +100,6 @@ end_per_group(_Group, Config) ->
|
|||
rabbit_ct_client_helpers:teardown_steps() ++
|
||||
rabbit_ct_broker_helpers:teardown_steps()).
|
||||
|
||||
init_per_testcase(node_starts_with_dead_vhosts_with_mirrors = Testcase, Config) ->
|
||||
case lists:any(fun(B) -> B end,
|
||||
rabbit_ct_broker_helpers:rpc_all(
|
||||
Config, rabbit_feature_flags, is_enabled,
|
||||
[khepri_db])) of
|
||||
true ->
|
||||
{skip, "Classic queue mirroring not supported by Khepri"};
|
||||
false ->
|
||||
rabbit_ct_helpers:testcase_started(Config, Testcase),
|
||||
Config
|
||||
end;
|
||||
init_per_testcase(Testcase, Config) ->
|
||||
rabbit_ct_helpers:testcase_started(Config, Testcase),
|
||||
Config.
|
||||
|
@ -262,71 +250,6 @@ node_starts_with_dead_vhosts(Config) ->
|
|||
rabbit_vhost_sup_sup, is_vhost_alive, [VHost2]),
|
||||
?AWAIT_TIMEOUT).
|
||||
|
||||
node_starts_with_dead_vhosts_with_mirrors(Config) ->
|
||||
VHost1 = <<"vhost1">>,
|
||||
VHost2 = <<"vhost2">>,
|
||||
|
||||
set_up_vhost(Config, VHost1),
|
||||
set_up_vhost(Config, VHost2),
|
||||
|
||||
true = rabbit_ct_broker_helpers:rpc(Config, 1,
|
||||
rabbit_vhost_sup_sup, is_vhost_alive, [VHost1]),
|
||||
true = rabbit_ct_broker_helpers:rpc(Config, 1,
|
||||
rabbit_vhost_sup_sup, is_vhost_alive, [VHost2]),
|
||||
[] = rabbit_ct_broker_helpers:rpc(Config, 1,
|
||||
rabbit_vhost_sup_sup, check, []),
|
||||
|
||||
Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 0, VHost1),
|
||||
{ok, Chan} = amqp_connection:open_channel(Conn),
|
||||
|
||||
QName = <<"node_starts_with_dead_vhosts_with_mirrors-q-0">>,
|
||||
amqp_channel:call(Chan, #'queue.declare'{queue = QName, durable = true}),
|
||||
ok = rabbit_ct_broker_helpers:rpc(Config, 0,
|
||||
rabbit_policy, set,
|
||||
[VHost1, <<"mirror">>, <<".*">>, [{<<"ha-mode">>, <<"all">>}],
|
||||
0, <<"queues">>, <<"acting-user">>]),
|
||||
|
||||
%% Wait for the queue to start a mirror
|
||||
?awaitMatch([_],
|
||||
begin
|
||||
{ok, Q0} = rabbit_ct_broker_helpers:rpc(
|
||||
Config, 0,
|
||||
rabbit_amqqueue, lookup,
|
||||
[rabbit_misc:r(VHost1, queue, QName)], infinity),
|
||||
amqqueue:get_sync_slave_pids(Q0)
|
||||
end,
|
||||
?AWAIT_TIMEOUT),
|
||||
|
||||
rabbit_ct_client_helpers:publish(Chan, QName, 10),
|
||||
|
||||
{ok, Q} = rabbit_ct_broker_helpers:rpc(
|
||||
Config, 0,
|
||||
rabbit_amqqueue, lookup,
|
||||
[rabbit_misc:r(VHost1, queue, QName)], infinity),
|
||||
|
||||
Node1 = rabbit_ct_broker_helpers:get_node_config(Config, 1, nodename),
|
||||
|
||||
[Pid] = amqqueue:get_sync_slave_pids(Q),
|
||||
|
||||
Node1 = node(Pid),
|
||||
|
||||
DataStore1 = rabbit_ct_broker_helpers:rpc(
|
||||
Config, 1, rabbit_vhost, msg_store_dir_path, [VHost1]),
|
||||
|
||||
rabbit_ct_broker_helpers:stop_node(Config, 1),
|
||||
|
||||
file:write_file(filename:join(DataStore1, "recovery.dets"), <<"garbage">>),
|
||||
|
||||
%% The node should start without a vhost
|
||||
ok = rabbit_ct_broker_helpers:start_node(Config, 1),
|
||||
|
||||
?awaitMatch(true,
|
||||
rabbit_ct_broker_helpers:rpc(Config, 1, rabbit, is_running, []),
|
||||
?AWAIT_TIMEOUT),
|
||||
|
||||
?assertEqual(true, rabbit_ct_broker_helpers:rpc(Config, 1,
|
||||
rabbit_vhost_sup_sup, is_vhost_alive, [VHost2])).
|
||||
|
||||
vhost_creation_idempotency(Config) ->
|
||||
VHost = <<"idempotency-test">>,
|
||||
try
|
||||
|
|
|
@ -47,7 +47,6 @@ defmodule RabbitMQ.CLI.Core.DocGuide do
|
|||
Macros.defguide("erlang_versions", path_segment: "which-erlang")
|
||||
Macros.defguide("feature_flags")
|
||||
Macros.defguide("firehose")
|
||||
Macros.defguide("mirroring", path_segment: "ha")
|
||||
Macros.defguide("logging")
|
||||
Macros.defguide("management")
|
||||
Macros.defguide("memory_use")
|
||||
|
|
|
@ -1,52 +0,0 @@
|
|||
## This Source Code Form is subject to the terms of the Mozilla Public
|
||||
## License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
## file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
##
|
||||
## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
|
||||
|
||||
defmodule RabbitMQ.CLI.Ctl.Commands.CancelSyncQueueCommand do
|
||||
alias RabbitMQ.CLI.Core.DocGuide
|
||||
|
||||
@behaviour RabbitMQ.CLI.CommandBehaviour
|
||||
use RabbitMQ.CLI.DefaultOutput
|
||||
|
||||
def merge_defaults(args, opts) do
|
||||
{args, Map.merge(%{vhost: "/"}, opts)}
|
||||
end
|
||||
|
||||
use RabbitMQ.CLI.Core.AcceptsOnePositionalArgument
|
||||
|
||||
use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
|
||||
|
||||
def run([queue], %{vhost: vhost, node: node_name}) do
|
||||
:rpc.call(
|
||||
node_name,
|
||||
:rabbit_mirror_queue_misc,
|
||||
:cancel_sync_queue,
|
||||
[:rabbit_misc.r(vhost, :queue, queue)],
|
||||
:infinity
|
||||
)
|
||||
end
|
||||
|
||||
def usage, do: "cancel_sync_queue [--vhost <vhost>] <queue>"
|
||||
|
||||
def usage_additional() do
|
||||
[
|
||||
["<queue>", "Queue name"]
|
||||
]
|
||||
end
|
||||
|
||||
def usage_doc_guides() do
|
||||
[
|
||||
DocGuide.mirroring()
|
||||
]
|
||||
end
|
||||
|
||||
def help_section(), do: :replication
|
||||
|
||||
def description(), do: "Instructs a synchronising mirrored queue to stop synchronising itself"
|
||||
|
||||
def banner([queue], %{vhost: vhost, node: _node}) do
|
||||
"Stopping synchronising queue '#{queue}' in vhost '#{vhost}' ..."
|
||||
end
|
||||
end
|
|
@ -23,13 +23,8 @@ defmodule RabbitMQ.CLI.Ctl.Commands.ListQueuesCommand do
|
|||
message_bytes_unacknowledged message_bytes_ram message_bytes_persistent
|
||||
head_message_timestamp disk_reads disk_writes consumers
|
||||
consumer_utilisation consumer_capacity
|
||||
memory slave_pids synchronised_slave_pids state type
|
||||
leader members online
|
||||
mirror_pids synchronised_mirror_pids)a
|
||||
@info_key_aliases [
|
||||
{:mirror_pids, :slave_pids},
|
||||
{:synchronised_mirror_pids, :synchronised_slave_pids}
|
||||
]
|
||||
memory state type
|
||||
leader members online)a
|
||||
|
||||
def description(), do: "Lists queues and their properties"
|
||||
|
||||
|
@ -67,7 +62,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.ListQueuesCommand do
|
|||
end
|
||||
|
||||
def validate(args, _opts) do
|
||||
case InfoKeys.validate_info_keys(args, @info_keys, @info_key_aliases) do
|
||||
case InfoKeys.validate_info_keys(args, @info_keys) do
|
||||
{:ok, _} -> :ok
|
||||
err -> err
|
||||
end
|
||||
|
@ -91,7 +86,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.ListQueuesCommand do
|
|||
other -> other
|
||||
end
|
||||
|
||||
info_keys = InfoKeys.prepare_info_keys(args, @info_key_aliases)
|
||||
info_keys = InfoKeys.prepare_info_keys(args)
|
||||
broker_keys = InfoKeys.broker_keys(info_keys)
|
||||
|
||||
Helpers.with_nodes_in_cluster(node_name, fn nodes ->
|
||||
|
|
|
@ -14,9 +14,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.ListUnresponsiveQueuesCommand do
|
|||
@behaviour RabbitMQ.CLI.CommandBehaviour
|
||||
|
||||
@info_keys ~w(name durable auto_delete
|
||||
arguments pid recoverable_slaves
|
||||
recoverable_mirrors type)a
|
||||
@info_key_aliases [recoverable_mirrors: :recoverable_slaves]
|
||||
arguments pid)a
|
||||
|
||||
def info_keys(), do: @info_keys
|
||||
|
||||
|
@ -41,7 +39,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.ListUnresponsiveQueuesCommand do
|
|||
end
|
||||
|
||||
def validate(args, _opts) do
|
||||
case InfoKeys.validate_info_keys(args, @info_keys, @info_key_aliases) do
|
||||
case InfoKeys.validate_info_keys(args, @info_keys) do
|
||||
{:ok, _} -> :ok
|
||||
err -> err
|
||||
end
|
||||
|
@ -56,7 +54,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.ListUnresponsiveQueuesCommand do
|
|||
queue_timeout: qtimeout,
|
||||
local: local_opt
|
||||
}) do
|
||||
info_keys = InfoKeys.prepare_info_keys(args, @info_key_aliases)
|
||||
info_keys = InfoKeys.prepare_info_keys(args)
|
||||
broker_keys = InfoKeys.broker_keys(info_keys)
|
||||
queue_timeout = qtimeout * 1000
|
||||
|
||||
|
|
|
@ -5,8 +5,6 @@
|
|||
## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
|
||||
|
||||
defmodule RabbitMQ.CLI.Ctl.Commands.RemoveClassicQueueMirroringFromPoliciesCommand do
|
||||
alias RabbitMQ.CLI.Core.DocGuide
|
||||
|
||||
@behaviour RabbitMQ.CLI.CommandBehaviour
|
||||
|
||||
use RabbitMQ.CLI.Core.MergesNoDefaults
|
||||
|
@ -28,9 +26,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.RemoveClassicQueueMirroringFromPoliciesComma
|
|||
def usage, do: "remove_classic_queue_mirroring_from_policies"
|
||||
|
||||
def usage_doc_guides() do
|
||||
[
|
||||
DocGuide.mirroring()
|
||||
]
|
||||
[]
|
||||
end
|
||||
|
||||
def help_section(), do: :operations
|
||||
|
|
|
@ -1,56 +0,0 @@
|
|||
## This Source Code Form is subject to the terms of the Mozilla Public
|
||||
## License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
## file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
##
|
||||
## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
|
||||
|
||||
defmodule RabbitMQ.CLI.Ctl.Commands.SyncQueueCommand do
|
||||
alias RabbitMQ.CLI.Core.DocGuide
|
||||
|
||||
@behaviour RabbitMQ.CLI.CommandBehaviour
|
||||
|
||||
def merge_defaults(args, opts) do
|
||||
{args, Map.merge(%{vhost: "/"}, opts)}
|
||||
end
|
||||
|
||||
use RabbitMQ.CLI.Core.AcceptsOnePositionalArgument
|
||||
use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
|
||||
|
||||
def run([queue], %{vhost: vhost, node: node_name}) do
|
||||
:rpc.call(
|
||||
node_name,
|
||||
:rabbit_mirror_queue_misc,
|
||||
:sync_queue,
|
||||
[:rabbit_misc.r(vhost, :queue, queue)],
|
||||
:infinity
|
||||
)
|
||||
end
|
||||
|
||||
use RabbitMQ.CLI.DefaultOutput
|
||||
|
||||
def usage do
|
||||
"sync_queue [--vhost <vhost>] <queue>"
|
||||
end
|
||||
|
||||
def usage_additional() do
|
||||
[
|
||||
["<queue>", "Name of the queue to synchronise"]
|
||||
]
|
||||
end
|
||||
|
||||
def usage_doc_guides() do
|
||||
[
|
||||
DocGuide.mirroring()
|
||||
]
|
||||
end
|
||||
|
||||
def help_section(), do: :replication
|
||||
|
||||
def description(),
|
||||
do:
|
||||
"Instructs a mirrored queue with unsynchronised mirrors (follower replicas) to synchronise them"
|
||||
|
||||
def banner([queue], %{vhost: vhost, node: _node}) do
|
||||
"Synchronising queue '#{queue}' in vhost '#{vhost}' ..."
|
||||
end
|
||||
end
|
|
@ -12,7 +12,6 @@ defmodule RabbitMQ.CLI.Queues.Commands.RebalanceCommand do
|
|||
|
||||
@known_types [
|
||||
"all",
|
||||
"classic",
|
||||
"quorum",
|
||||
"stream"
|
||||
]
|
||||
|
@ -45,7 +44,7 @@ defmodule RabbitMQ.CLI.Queues.Commands.RebalanceCommand do
|
|||
:ok
|
||||
|
||||
false ->
|
||||
{:error, "type #{type} is not supported. Try one of all, classic, quorum, stream."}
|
||||
{:error, "type #{type} is not supported. Try one of all, quorum, stream."}
|
||||
end
|
||||
end
|
||||
|
||||
|
@ -58,11 +57,11 @@ defmodule RabbitMQ.CLI.Queues.Commands.RebalanceCommand do
|
|||
|
||||
def usage,
|
||||
do:
|
||||
"rebalance < all | classic | quorum | stream > [--vhost-pattern <pattern>] [--queue-pattern <pattern>]"
|
||||
"rebalance < all | quorum | stream > [--vhost-pattern <pattern>] [--queue-pattern <pattern>]"
|
||||
|
||||
def usage_additional do
|
||||
[
|
||||
["<type>", "queue type, must be one of: all, classic, quorum, stream"],
|
||||
["<type>", "queue type, must be one of: all, quorum, stream"],
|
||||
["--queue-pattern <pattern>", "regular expression to match queue names"],
|
||||
["--vhost-pattern <pattern>", "regular expression to match virtual host names"]
|
||||
]
|
||||
|
@ -83,10 +82,6 @@ defmodule RabbitMQ.CLI.Queues.Commands.RebalanceCommand do
|
|||
"Re-balancing leaders of all replicated queues..."
|
||||
end
|
||||
|
||||
def banner([:classic], _) do
|
||||
"Re-balancing leaders of replicated (mirrored, non-exclusive) classic queues..."
|
||||
end
|
||||
|
||||
def banner([:quorum], _) do
|
||||
"Re-balancing leaders of quorum queues..."
|
||||
end
|
||||
|
|
|
@ -1,113 +0,0 @@
|
|||
## This Source Code Form is subject to the terms of the Mozilla Public
|
||||
## License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
## file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
##
|
||||
## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
|
||||
|
||||
defmodule RabbitMQ.CLI.Upgrade.Commands.AwaitOnlineSynchronizedMirrorCommand do
|
||||
alias RabbitMQ.CLI.Core.DocGuide
|
||||
import RabbitMQ.CLI.Core.Config, only: [output_less?: 1]
|
||||
|
||||
@behaviour RabbitMQ.CLI.CommandBehaviour
|
||||
|
||||
@default_timeout 120_000
|
||||
|
||||
use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
|
||||
use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
|
||||
|
||||
def merge_defaults(args, opts) do
|
||||
timeout =
|
||||
case opts[:timeout] do
|
||||
nil -> @default_timeout
|
||||
:infinity -> @default_timeout
|
||||
val -> val
|
||||
end
|
||||
|
||||
{args, Map.put(opts, :timeout, timeout)}
|
||||
end
|
||||
|
||||
def run([], %{node: node_name, timeout: timeout}) do
|
||||
rpc_timeout = timeout + 500
|
||||
|
||||
case :rabbit_misc.rpc_call(node_name, :rabbit_nodes, :is_single_node_cluster, [], rpc_timeout) do
|
||||
# if target node is the only one in the cluster, the command makes little sense
|
||||
# and false positives can be misleading
|
||||
true ->
|
||||
{:ok, :single_node_cluster}
|
||||
|
||||
false ->
|
||||
case :rabbit_misc.rpc_call(
|
||||
node_name,
|
||||
:rabbit_upgrade_preparation,
|
||||
:await_online_synchronised_mirrors,
|
||||
[timeout],
|
||||
rpc_timeout
|
||||
) do
|
||||
{:error, _} = err ->
|
||||
err
|
||||
|
||||
{:error, _, _} = err ->
|
||||
err
|
||||
|
||||
{:badrpc, _} = err ->
|
||||
err
|
||||
|
||||
true ->
|
||||
:ok
|
||||
|
||||
false ->
|
||||
{:error,
|
||||
"time is up, no synchronised mirror came online for at least some classic mirrored queues"}
|
||||
end
|
||||
|
||||
other ->
|
||||
other
|
||||
end
|
||||
end
|
||||
|
||||
def output({:ok, :single_node_cluster}, %{formatter: "json"}) do
|
||||
{:ok,
|
||||
%{
|
||||
"result" => "ok",
|
||||
"message" =>
|
||||
"Target node seems to be the only one in a single node cluster, the check does not apply"
|
||||
}}
|
||||
end
|
||||
|
||||
def output({:error, msg}, %{node: node_name, formatter: "json"}) do
|
||||
{:error, %{"result" => "error", "node" => node_name, "message" => msg}}
|
||||
end
|
||||
|
||||
def output({:ok, :single_node_cluster}, opts) do
|
||||
case output_less?(opts) do
|
||||
true ->
|
||||
:ok
|
||||
|
||||
false ->
|
||||
{:ok,
|
||||
"Target node seems to be the only one in a single node cluster, the command does not apply"}
|
||||
end
|
||||
end
|
||||
|
||||
use RabbitMQ.CLI.DefaultOutput
|
||||
|
||||
def usage, do: "await_online_synchronized_mirror"
|
||||
|
||||
def usage_doc_guides() do
|
||||
[
|
||||
DocGuide.mirroring(),
|
||||
DocGuide.upgrade()
|
||||
]
|
||||
end
|
||||
|
||||
def help_section, do: :upgrade
|
||||
|
||||
def description() do
|
||||
"Waits for all classic mirrored queues hosted on the target node to have at least one synchronized mirror online. " <>
|
||||
"This makes sure that if target node is shut down, there will be an up-to-date mirror to promote."
|
||||
end
|
||||
|
||||
def banner([], %{timeout: timeout}) do
|
||||
"Will wait for a synchronised mirror be online for all classic mirrored queues for #{round(timeout / 1000)} seconds..."
|
||||
end
|
||||
end
|
|
@ -1,65 +0,0 @@
|
|||
## This Source Code Form is subject to the terms of the Mozilla Public
|
||||
## License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
## file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
##
|
||||
## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
|
||||
|
||||
defmodule CancelSyncQueueCommandTest do
|
||||
use ExUnit.Case, async: false
|
||||
import TestHelper
|
||||
|
||||
@command RabbitMQ.CLI.Ctl.Commands.CancelSyncQueueCommand
|
||||
|
||||
@vhost "/"
|
||||
|
||||
setup_all do
|
||||
RabbitMQ.CLI.Core.Distribution.start()
|
||||
|
||||
start_rabbitmq_app()
|
||||
|
||||
on_exit([], fn ->
|
||||
start_rabbitmq_app()
|
||||
end)
|
||||
|
||||
:ok
|
||||
end
|
||||
|
||||
setup do
|
||||
{:ok,
|
||||
opts: %{
|
||||
node: get_rabbit_hostname(),
|
||||
vhost: @vhost
|
||||
}}
|
||||
end
|
||||
|
||||
test "validate: specifying no queue name is reported as an error", context do
|
||||
assert @command.validate([], context[:opts]) ==
|
||||
{:validation_failure, :not_enough_args}
|
||||
end
|
||||
|
||||
test "validate: specifying two queue names is reported as an error", context do
|
||||
assert @command.validate(["q1", "q2"], context[:opts]) ==
|
||||
{:validation_failure, :too_many_args}
|
||||
end
|
||||
|
||||
test "validate: specifying three queue names is reported as an error", context do
|
||||
assert @command.validate(["q1", "q2", "q3"], context[:opts]) ==
|
||||
{:validation_failure, :too_many_args}
|
||||
end
|
||||
|
||||
test "validate: specifying one queue name succeeds", context do
|
||||
assert @command.validate(["q1"], context[:opts]) == :ok
|
||||
end
|
||||
|
||||
test "run: request to a non-existent RabbitMQ node returns a nodedown" do
|
||||
opts = %{node: :jake@thedog, vhost: @vhost, timeout: 200}
|
||||
assert match?({:badrpc, _}, @command.run(["q1"], opts))
|
||||
end
|
||||
|
||||
test "banner", context do
|
||||
s = @command.banner(["q1"], context[:opts])
|
||||
|
||||
assert s =~ ~r/Stopping synchronising queue/
|
||||
assert s =~ ~r/q1/
|
||||
end
|
||||
end
|
|
@ -156,25 +156,7 @@ defmodule SetPolicyCommandTest do
|
|||
test "ha policy validation", context do
|
||||
vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
|
||||
context = Map.put(context, :opts, vhost_opts)
|
||||
pass_validation(context, "{\"ha-mode\":\"all\"}")
|
||||
fail_validation(context, "{\"ha-mode\":\"made_up\"}")
|
||||
|
||||
fail_validation(context, "{\"ha-mode\":\"nodes\"}")
|
||||
fail_validation(context, "{\"ha-mode\":\"nodes\",\"ha-params\":2}")
|
||||
fail_validation(context, "{\"ha-mode\":\"nodes\",\"ha-params\":[\"a\",2]}")
|
||||
pass_validation(context, "{\"ha-mode\":\"nodes\",\"ha-params\":[\"a\",\"b\"]}")
|
||||
fail_validation(context, "{\"ha-params\":[\"a\",\"b\"]}")
|
||||
|
||||
fail_validation(context, "{\"ha-mode\":\"exactly\"}")
|
||||
fail_validation(context, "{\"ha-mode\":\"exactly\",\"ha-params\":[\"a\",\"b\"]}")
|
||||
pass_validation(context, "{\"ha-mode\":\"exactly\",\"ha-params\":2}")
|
||||
fail_validation(context, "{\"ha-params\":2}")
|
||||
|
||||
pass_validation(context, "{\"ha-mode\":\"all\",\"ha-sync-mode\":\"manual\"}")
|
||||
pass_validation(context, "{\"ha-mode\":\"all\",\"ha-sync-mode\":\"automatic\"}")
|
||||
fail_validation(context, "{\"ha-mode\":\"all\",\"ha-sync-mode\":\"made_up\"}")
|
||||
fail_validation(context, "{\"ha-sync-mode\":\"manual\"}")
|
||||
fail_validation(context, "{\"ha-sync-mode\":\"automatic\"}")
|
||||
fail_validation(context, "{\"ha-mode\":\"all\"}")
|
||||
end
|
||||
|
||||
@tag pattern: "ha_", key: "ha_policy_test", vhost: @vhost
|
||||
|
|
|
@ -1,65 +0,0 @@
|
|||
## This Source Code Form is subject to the terms of the Mozilla Public
|
||||
## License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
## file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
##
|
||||
## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
|
||||
|
||||
defmodule SyncQueueCommandTest do
|
||||
use ExUnit.Case, async: false
|
||||
import TestHelper
|
||||
|
||||
@command RabbitMQ.CLI.Ctl.Commands.SyncQueueCommand
|
||||
|
||||
@vhost "/"
|
||||
|
||||
setup_all do
|
||||
RabbitMQ.CLI.Core.Distribution.start()
|
||||
|
||||
start_rabbitmq_app()
|
||||
|
||||
on_exit([], fn ->
|
||||
start_rabbitmq_app()
|
||||
end)
|
||||
|
||||
:ok
|
||||
end
|
||||
|
||||
setup do
|
||||
{:ok,
|
||||
opts: %{
|
||||
node: get_rabbit_hostname(),
|
||||
vhost: @vhost
|
||||
}}
|
||||
end
|
||||
|
||||
test "validate: specifying no queue name is reported as an error", context do
|
||||
assert @command.validate([], context[:opts]) ==
|
||||
{:validation_failure, :not_enough_args}
|
||||
end
|
||||
|
||||
test "validate: specifying two queue names is reported as an error", context do
|
||||
assert @command.validate(["q1", "q2"], context[:opts]) ==
|
||||
{:validation_failure, :too_many_args}
|
||||
end
|
||||
|
||||
test "validate: specifying three queue names is reported as an error", context do
|
||||
assert @command.validate(["q1", "q2", "q3"], context[:opts]) ==
|
||||
{:validation_failure, :too_many_args}
|
||||
end
|
||||
|
||||
test "validate: specifying one queue name succeeds", context do
|
||||
assert @command.validate(["q1"], context[:opts]) == :ok
|
||||
end
|
||||
|
||||
test "run: request to a non-existent RabbitMQ node returns a nodedown" do
|
||||
opts = %{node: :jake@thedog, vhost: @vhost, timeout: 200}
|
||||
assert match?({:badrpc, _}, @command.run(["q1"], opts))
|
||||
end
|
||||
|
||||
test "banner", context do
|
||||
s = @command.banner(["q1"], context[:opts])
|
||||
|
||||
assert s =~ ~r/Synchronising queue/
|
||||
assert s =~ ~r/q1/
|
||||
end
|
||||
end
|
|
@ -1,45 +0,0 @@
|
|||
## This Source Code Form is subject to the terms of the Mozilla Public
|
||||
## License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
## file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
##
|
||||
## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
|
||||
|
||||
defmodule RabbitMQ.CLI.Queues.Commands.CheckIfNodeIsMirrorSyncCriticalCommandTest do
|
||||
use ExUnit.Case, async: false
|
||||
import TestHelper
|
||||
|
||||
@command RabbitMQ.CLI.Queues.Commands.CheckIfNodeIsMirrorSyncCriticalCommand
|
||||
|
||||
setup_all do
|
||||
RabbitMQ.CLI.Core.Distribution.start()
|
||||
|
||||
:ok
|
||||
end
|
||||
|
||||
setup context do
|
||||
{:ok,
|
||||
opts: %{
|
||||
node: get_rabbit_hostname(),
|
||||
timeout: context[:test_timeout] || 30000
|
||||
}}
|
||||
end
|
||||
|
||||
test "validate: accepts no positional arguments" do
|
||||
assert @command.validate([], %{}) == :ok
|
||||
end
|
||||
|
||||
test "validate: any positional arguments fail validation" do
|
||||
assert @command.validate(["quorum-queue-a"], %{}) == {:validation_failure, :too_many_args}
|
||||
|
||||
assert @command.validate(["quorum-queue-a", "two"], %{}) ==
|
||||
{:validation_failure, :too_many_args}
|
||||
|
||||
assert @command.validate(["quorum-queue-a", "two", "three"], %{}) ==
|
||||
{:validation_failure, :too_many_args}
|
||||
end
|
||||
|
||||
@tag test_timeout: 3000
|
||||
test "run: targeting an unreachable node throws a badrpc" do
|
||||
assert match?({:badrpc, _}, @command.run([], %{node: :jake@thedog, vhost: "/", timeout: 200}))
|
||||
end
|
||||
end
|
|
@ -1,44 +0,0 @@
|
|||
## This Source Code Form is subject to the terms of the Mozilla Public
|
||||
## License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
## file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
##
|
||||
## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
|
||||
|
||||
defmodule AwaitOnlineSynchronizedMirrorsCommandTest do
|
||||
use ExUnit.Case, async: false
|
||||
import TestHelper
|
||||
|
||||
@command RabbitMQ.CLI.Upgrade.Commands.AwaitOnlineSynchronizedMirrorCommand
|
||||
|
||||
setup_all do
|
||||
RabbitMQ.CLI.Core.Distribution.start()
|
||||
|
||||
:ok
|
||||
end
|
||||
|
||||
setup context do
|
||||
{:ok,
|
||||
opts: %{
|
||||
node: get_rabbit_hostname(),
|
||||
timeout: context[:test_timeout] || 5000
|
||||
}}
|
||||
end
|
||||
|
||||
test "merge_defaults: overrides a timeout" do
|
||||
assert @command.merge_defaults([], %{}) == {[], %{timeout: 120_000}}
|
||||
end
|
||||
|
||||
test "validate: accepts no positional arguments" do
|
||||
assert @command.validate(["extra-arg"], %{}) == {:validation_failure, :too_many_args}
|
||||
end
|
||||
|
||||
test "validate: succeeds with no positional arguments" do
|
||||
assert @command.validate([], %{}) == :ok
|
||||
end
|
||||
|
||||
@tag test_timeout: 3000
|
||||
test "run: targeting an unreachable node throws a badrpc", context do
|
||||
opts = %{node: :jake@thedog, timeout: 200}
|
||||
assert match?({:badrpc, _}, @command.run([], Map.merge(context[:opts], opts)))
|
||||
end
|
||||
end
|
|
@ -95,11 +95,6 @@
|
|||
clear_policy/4,
|
||||
set_operator_policy/6,
|
||||
clear_operator_policy/3,
|
||||
set_ha_policy/4, set_ha_policy/5,
|
||||
set_ha_policy_all/1,
|
||||
set_ha_policy_all/2,
|
||||
set_ha_policy_two_pos/1,
|
||||
set_ha_policy_two_pos_batch_sync/1,
|
||||
|
||||
set_parameter/5,
|
||||
set_parameter/6,
|
||||
|
@ -2093,50 +2088,6 @@ clear_operator_policy(Config, Node, Name) ->
|
|||
rpc(Config, Node,
|
||||
rabbit_policy, delete_op, [<<"/">>, Name, <<"acting-user">>]).
|
||||
|
||||
set_ha_policy(Config, Node, Pattern, Policy) ->
|
||||
set_ha_policy(Config, Node, Pattern, Policy, []).
|
||||
|
||||
set_ha_policy(Config, Node, Pattern, Policy, Extra) ->
|
||||
set_policy(Config, Node, Pattern, Pattern, <<"queues">>,
|
||||
ha_policy(Policy) ++ Extra).
|
||||
|
||||
ha_policy(<<"all">>) -> [{<<"ha-mode">>, <<"all">>}];
|
||||
ha_policy({Mode, Params}) -> [{<<"ha-mode">>, Mode},
|
||||
{<<"ha-params">>, Params}].
|
||||
|
||||
set_ha_policy_all(Config) ->
|
||||
set_ha_policy(Config, 0, <<".*">>, <<"all">>),
|
||||
Config.
|
||||
|
||||
set_ha_policy_all(Config, Extra) ->
|
||||
set_ha_policy(Config, 0, <<".*">>, <<"all">>, Extra),
|
||||
Config.
|
||||
|
||||
set_ha_policy_two_pos(Config) ->
|
||||
Members =
|
||||
[atom_to_binary(N)
|
||||
|| N <- get_node_configs(Config, nodename)],
|
||||
TwoNodes = [M || M <- lists:sublist(Members, 2)],
|
||||
set_ha_policy(Config, 0, <<"^ha.two.">>, {<<"nodes">>, TwoNodes},
|
||||
[{<<"ha-promote-on-shutdown">>, <<"always">>}]),
|
||||
set_ha_policy(Config, 0, <<"^ha.auto.">>, {<<"nodes">>, TwoNodes},
|
||||
[{<<"ha-sync-mode">>, <<"automatic">>},
|
||||
{<<"ha-promote-on-shutdown">>, <<"always">>}]),
|
||||
Config.
|
||||
|
||||
set_ha_policy_two_pos_batch_sync(Config) ->
|
||||
Members =
|
||||
[atom_to_binary(N)
|
||||
|| N <- get_node_configs(Config, nodename)],
|
||||
TwoNodes = [M || M <- lists:sublist(Members, 2)],
|
||||
set_ha_policy(Config, 0, <<"^ha.two.">>, {<<"nodes">>, TwoNodes},
|
||||
[{<<"ha-promote-on-shutdown">>, <<"always">>}]),
|
||||
set_ha_policy(Config, 0, <<"^ha.auto.">>, {<<"nodes">>, TwoNodes},
|
||||
[{<<"ha-sync-mode">>, <<"automatic">>},
|
||||
{<<"ha-sync-batch-size">>, 200},
|
||||
{<<"ha-promote-on-shutdown">>, <<"always">>}]),
|
||||
Config.
|
||||
|
||||
%% -------------------------------------------------------------------
|
||||
%% Parameter helpers.
|
||||
%% -------------------------------------------------------------------
|
||||
|
|
|
@ -66,7 +66,6 @@ def all_beam_files(name = "all_beam_files"):
|
|||
"src/rabbit_mgmt_wm_health_check_alarms.erl",
|
||||
"src/rabbit_mgmt_wm_health_check_certificate_expiration.erl",
|
||||
"src/rabbit_mgmt_wm_health_check_local_alarms.erl",
|
||||
"src/rabbit_mgmt_wm_health_check_node_is_mirror_sync_critical.erl",
|
||||
"src/rabbit_mgmt_wm_health_check_node_is_quorum_critical.erl",
|
||||
"src/rabbit_mgmt_wm_health_check_port_listener.erl",
|
||||
"src/rabbit_mgmt_wm_health_check_protocol_listener.erl",
|
||||
|
@ -199,7 +198,6 @@ def all_test_beam_files(name = "all_test_beam_files"):
|
|||
"src/rabbit_mgmt_wm_health_check_alarms.erl",
|
||||
"src/rabbit_mgmt_wm_health_check_certificate_expiration.erl",
|
||||
"src/rabbit_mgmt_wm_health_check_local_alarms.erl",
|
||||
"src/rabbit_mgmt_wm_health_check_node_is_mirror_sync_critical.erl",
|
||||
"src/rabbit_mgmt_wm_health_check_node_is_quorum_critical.erl",
|
||||
"src/rabbit_mgmt_wm_health_check_port_listener.erl",
|
||||
"src/rabbit_mgmt_wm_health_check_protocol_listener.erl",
|
||||
|
@ -422,7 +420,6 @@ def all_srcs(name = "all_srcs"):
|
|||
"src/rabbit_mgmt_wm_health_check_alarms.erl",
|
||||
"src/rabbit_mgmt_wm_health_check_certificate_expiration.erl",
|
||||
"src/rabbit_mgmt_wm_health_check_local_alarms.erl",
|
||||
"src/rabbit_mgmt_wm_health_check_node_is_mirror_sync_critical.erl",
|
||||
"src/rabbit_mgmt_wm_health_check_node_is_quorum_critical.erl",
|
||||
"src/rabbit_mgmt_wm_health_check_port_listener.erl",
|
||||
"src/rabbit_mgmt_wm_health_check_protocol_listener.erl",
|
||||
|
|
|
@ -559,9 +559,7 @@ vary: accept, accept-encoding, origin</pre>
|
|||
<td>X</td>
|
||||
<td class="path">/api/queues/<i>vhost</i>/<i>name</i>/actions</td>
|
||||
<td>
|
||||
Actions that can be taken on a queue. POST a body like:
|
||||
<pre>{"action":"sync"}</pre> Currently the actions which are
|
||||
supported are <code>sync</code> and <code>cancel_sync</code>.
|
||||
Actions that can be taken on a queue. Currently no actions are supported.
|
||||
</td>
|
||||
</tr>
|
||||
|
||||
|
@ -1071,19 +1069,6 @@ or:
|
|||
otherwise responds with a 503 Service Unavailable.
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>X</td>
|
||||
<td></td>
|
||||
<td></td>
|
||||
<td></td>
|
||||
<td class="path">/api/health/checks/node-is-mirror-sync-critical</td>
|
||||
<td>
|
||||
Checks if there are classic mirrored queues without synchronised mirrors online
|
||||
(queues that would potentially lose data if the target node is shut down).
|
||||
Responds a 200 OK if there are no such classic mirrored queues,
|
||||
otherwise responds with a 503 Service Unavailable.
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>X</td>
|
||||
<td></td>
|
||||
|
|
|
@ -205,36 +205,6 @@ function args_to_features(obj) {
|
|||
return res;
|
||||
}
|
||||
|
||||
function fmt_mirrors(queue) {
|
||||
var synced = queue.synchronised_slave_nodes || [];
|
||||
var unsynced = queue.slave_nodes || [];
|
||||
unsynced = jQuery.grep(unsynced,
|
||||
function (node, i) {
|
||||
return jQuery.inArray(node, synced) == -1;
|
||||
});
|
||||
var res = '';
|
||||
if (synced.length > 0) {
|
||||
res += ' <abbr title="Synchronised mirrors: ' + synced + '">+' +
|
||||
synced.length + '</abbr>';
|
||||
}
|
||||
if (synced.length == 0 && unsynced.length > 0) {
|
||||
res += ' <abbr title="There are no synchronised mirrors">+0</abbr>';
|
||||
}
|
||||
if (unsynced.length > 0) {
|
||||
res += ' <abbr class="warning" title="Unsynchronised mirrors: ' +
|
||||
unsynced + '">+' + unsynced.length + '</abbr>';
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
function fmt_sync_state(queue) {
|
||||
var res = '<p><b>Syncing: ';
|
||||
res += (queue.messages == 0) ? 100 : Math.round(100 * queue.sync_messages /
|
||||
queue.messages);
|
||||
res += '%</b></p>';
|
||||
return res;
|
||||
}
|
||||
|
||||
function fmt_members(queue) {
|
||||
var res = '';
|
||||
var isMajority = (queue.online.length >= (Math.floor(queue.members.length / 2) + 1));
|
||||
|
|
|
@ -471,18 +471,6 @@ var HELP = {
|
|||
|
||||
'binary-use' : '<p>Binary accounting is not exact; binaries are shared between processes (and thus the same binary might be counted in more than one section), and the VM does not allow us to track binaries that are not associated with processes (so some binary use might not appear at all).</p>',
|
||||
|
||||
'policy-ha-mode' : 'One of <code>all</code> (mirror to all nodes in the cluster), <code>exactly</code> (mirror to a set number of nodes) or <code>nodes</code> (mirror to an explicit list of nodes). If you choose one of the latter two, you must also set <code>ha-params</code>.',
|
||||
|
||||
'policy-ha-params' : 'Absent if <code>ha-mode</code> is <code>all</code>, a number\
|
||||
if <code>ha-mode</code> is <code>exactly</code>, or a list\
|
||||
of strings if <code>ha-mode</code> is <code>nodes</code>.',
|
||||
|
||||
'policy-ha-sync-mode' : 'One of <code>manual</code> or <code>automatic</code>. <a target="_blank" href="https://www.rabbitmq.com/ha.html#unsynchronised-mirrors">Learn more</a>',
|
||||
|
||||
'policy-ha-promote-on-shutdown' : 'One of <code>when-synced</code> or <code>always</code>. <a target="_blank" href="https://www.rabbitmq.com/ha.html#unsynchronised-mirrors">Learn more</a>',
|
||||
|
||||
'policy-ha-promote-on-failure' : 'One of <code>when-synced</code> or <code>always</code>. <a target="_blank" href="https://www.rabbitmq.com/ha.html#unsynchronised-mirrors">Learn more</a>',
|
||||
|
||||
'policy-federation-upstream-set' :
|
||||
'A string; only if the federation plugin is enabled. Chooses the name of a set of upstreams to use with federation, or "all" to use all upstreams. Incompatible with <code>federation-upstream</code>.',
|
||||
|
||||
|
|
|
@ -6,8 +6,7 @@
|
|||
</p>
|
||||
<% } else { %>
|
||||
<%
|
||||
var sections = {'queue_procs' : ['classic', 'Classic queues (masters)'],
|
||||
'queue_slave_procs' : ['classic', 'Classic queues (mirrors)'],
|
||||
var sections = {'queue_procs' : ['classic', 'Classic queues'],
|
||||
'quorum_queue_procs' : ['quorum', 'Quorum queues'],
|
||||
'quorum_queue_dlx_procs' : ['quorum', 'Dead letter workers'],
|
||||
'stream_queue_procs' : ['stream', 'Stream queues'],
|
||||
|
@ -29,8 +28,7 @@
|
|||
<div class="box">
|
||||
<%
|
||||
var key = [[{name: 'Classic Queues', colour: 'classic',
|
||||
keys: [['queue_procs', 'queues'],
|
||||
['queue_slave_procs', 'mirrors']]},
|
||||
keys: [['queue_procs', 'queues']]},
|
||||
{name: 'Quorum Queues', colour: 'quorum',
|
||||
keys: [['quorum_queue_procs', 'quorum'],
|
||||
['quorum_queue_dlx_procs', 'dead letter workers']]},
|
||||
|
|
|
@ -6,8 +6,7 @@
|
|||
</p>
|
||||
<% } else { %>
|
||||
<%
|
||||
var sections = {'queue_procs' : ['classic', 'Classic queues (masters)'],
|
||||
'queue_slave_procs' : ['classic', 'Classic queues (mirrors)'],
|
||||
var sections = {'queue_procs' : ['classic', 'Classic queues'],
|
||||
'quorum_queue_procs' : ['quorum', 'Quorum queues'],
|
||||
'quorum_queue_dlx_procs' : ['quorum', 'Dead letter workers'],
|
||||
'stream_queue_procs' : ['stream', 'Stream queues'],
|
||||
|
@ -36,8 +35,7 @@
|
|||
<div class="box">
|
||||
<%
|
||||
var key = [[{name: 'Classic Queues', colour: 'classic',
|
||||
keys: [['queue_procs', 'queues'],
|
||||
['queue_slave_procs', 'mirrors']]},
|
||||
keys: [['queue_procs', 'queues']]},
|
||||
{name: 'Quorum Queues', colour: 'quorum',
|
||||
keys: [['quorum_queue_procs','quorum'],
|
||||
['quorum_queue_dlx_procs', 'dead letter workers']]},
|
||||
|
|
|
@ -107,18 +107,12 @@
|
|||
<span class="argument-link" field="definition" key="expires" type="number">Auto expire</span> </br>
|
||||
<span class="argument-link" field="definition" key="dead-letter-exchange" type="string">Dead letter exchange</span> |
|
||||
<span class="argument-link" field="definition" key="dead-letter-routing-key" type="string">Dead letter routing key</span><br/>
|
||||
<span class="argument-link" field="definition" key="message-ttl" type="number">Message TTL</span><span class="help" id="queue-message-ttl"></span></br>
|
||||
<span class="argument-link" field="definition" key="message-ttl" type="number">Message TTL</span><span class="help" id="queue-message-ttl"></span> |
|
||||
<span class="argument-link" field="definition" key="consumer-timeout" type="number">Consumer Timeout</span><span class="help" id="queue-consumer-timeout"></span></br>
|
||||
</td>
|
||||
<tr>
|
||||
<td>Queues [Classic]</td>
|
||||
<td>
|
||||
<span class="argument-link" field="definition" key="ha-mode" type="string">HA mode</span> <span class="help" id="policy-ha-mode"></span> |
|
||||
<span class="argument-link" field="definition" key="ha-params" type="number">HA params</span> <span class="help" id="policy-ha-params"></span> |
|
||||
<span class="argument-link" field="definition" key="ha-sync-mode" type="string">HA sync mode</span> <span class="help" id="policy-ha-sync-mode"></span> </br>
|
||||
<span class="argument-link" field="definition" key="ha-promote-on-shutdown" type="string" value="">HA mirror promotion on shutdown</span> <span class="help" id="policy-ha-promote-on-shutdown"></span> |
|
||||
<span class="argument-link" field="definition" key="ha-promote-on-failure" type="string" value="">HA mirror promotion on failure</span> <span class="help" id="policy-ha-promote-on-failure"></span>
|
||||
</br>
|
||||
<span class="argument-link" field="definition" key="queue-version" type="number">Version</span> <span class="help" id="queue-version"></span> |
|
||||
<span class="argument-link" field="definition" key="queue-master-locator" type="string">Master locator</span></br>
|
||||
</td>
|
||||
|
@ -281,9 +275,6 @@
|
|||
<td>Queues [Classic]</td>
|
||||
<td>
|
||||
<span class="argument-link" field="definitionop" key="expires" type="number">Auto expire</span> |
|
||||
<span class="argument-link" field="definitionop" key="ha-mode" type="string">HA mode</span> <span class="help" id="policy-ha-mode"></span> |
|
||||
<span class="argument-link" field="definitionop" key="ha-params" type="number">HA params</span> <span class="help" id="policy-ha-params"></span> |
|
||||
<span class="argument-link" field="definitionop" key="ha-sync-mode" type="string">HA sync mode</span> <span class="help" id="policy-ha-sync-mode"></span> </br>
|
||||
<span class="argument-link" field="definitionop" key="max-length" type="number">Max length</span> |
|
||||
<span class="argument-link" field="definitionop" key="max-length-bytes" type="number">Max length bytes</span> |
|
||||
<span class="argument-link" field="definitionop" key="message-ttl" type="number">Message TTL</span> |
|
||||
|
|
|
@ -71,53 +71,6 @@
|
|||
<% } %>
|
||||
</td>
|
||||
</tr>
|
||||
<% } else { %>
|
||||
<% if (!queue.exclusive) { %>
|
||||
<tr>
|
||||
<th>Mirrors</th>
|
||||
<td>
|
||||
<%
|
||||
var has_unsynced_node = false;
|
||||
for (var i in queue.slave_nodes) {
|
||||
var node = queue.slave_nodes[i];
|
||||
%>
|
||||
<%
|
||||
if (jQuery.inArray(node, queue.synchronised_slave_nodes) == -1) {
|
||||
has_unsynced_node = true;
|
||||
%>
|
||||
<%= fmt_node(node) %> <b>(unsynchronised)</b>
|
||||
<% } else { %>
|
||||
<%= fmt_node(node) %>
|
||||
<% } %>
|
||||
<br/>
|
||||
<% } %>
|
||||
<% if (queue.state == 'syncing') { %>
|
||||
<table>
|
||||
<tr>
|
||||
<td>
|
||||
<%= fmt_sync_state(queue) %>
|
||||
</td>
|
||||
<td>
|
||||
<form action="#/queues/actions" method="post">
|
||||
<input type="hidden" name="vhost" value="<%= fmt_string(queue.vhost) %>"/>
|
||||
<input type="hidden" name="name" value="<%= fmt_string(queue.name) %>"/>
|
||||
<input type="hidden" name="action" value="cancel_sync"/>
|
||||
<input type="submit" value="Cancel" id="action-button" />
|
||||
</form>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
<% } else if (has_unsynced_node) { %>
|
||||
<form action="#/queues/actions" method="post">
|
||||
<input type="hidden" name="vhost" value="<%= fmt_string(queue.vhost) %>"/>
|
||||
<input type="hidden" name="name" value="<%= fmt_string(queue.name) %>"/>
|
||||
<input type="hidden" name="action" value="sync"/>
|
||||
<input type="submit" value="Synchronise" id="action-button" />
|
||||
</form>
|
||||
<% } %>
|
||||
</td>
|
||||
</tr>
|
||||
<% } %>
|
||||
<% } %>
|
||||
<% } %>
|
||||
</table>
|
||||
|
|
|
@ -129,11 +129,6 @@
|
|||
<% } %>
|
||||
<% if (queue.hasOwnProperty('members')) { %>
|
||||
<%= fmt_members(queue) %>
|
||||
<% } else { %>
|
||||
<%= fmt_mirrors(queue) %>
|
||||
<% if (queue.state == 'syncing') { %>
|
||||
<%= fmt_sync_state(queue) %>
|
||||
<% } %>
|
||||
<% } %>
|
||||
</td>
|
||||
<% } %>
|
||||
|
|
|
@ -197,7 +197,6 @@ dispatcher() ->
|
|||
{"/health/checks/port-listener/:port", rabbit_mgmt_wm_health_check_port_listener, []},
|
||||
{"/health/checks/protocol-listener/:protocol", rabbit_mgmt_wm_health_check_protocol_listener, []},
|
||||
{"/health/checks/virtual-hosts", rabbit_mgmt_wm_health_check_virtual_hosts, []},
|
||||
{"/health/checks/node-is-mirror-sync-critical", rabbit_mgmt_wm_health_check_node_is_mirror_sync_critical, []},
|
||||
{"/health/checks/node-is-quorum-critical", rabbit_mgmt_wm_health_check_node_is_quorum_critical, []},
|
||||
{"/reset", rabbit_mgmt_wm_reset, []},
|
||||
{"/reset/:node", rabbit_mgmt_wm_reset, []},
|
||||
|
|
|
@ -1,54 +0,0 @@
|
|||
%% This Source Code Form is subject to the terms of the Mozilla Public
|
||||
%% License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
%%
|
||||
%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
|
||||
%%
|
||||
|
||||
%% An HTTP API counterpart of 'rabbitmq-diagnostics check_if_node_is_quorum_critical'
|
||||
-module(rabbit_mgmt_wm_health_check_node_is_mirror_sync_critical).
|
||||
|
||||
-export([init/2, to_json/2, content_types_provided/2, is_authorized/2]).
|
||||
-export([resource_exists/2]).
|
||||
-export([variances/2]).
|
||||
|
||||
-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
init(Req, _State) ->
|
||||
{cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
|
||||
|
||||
variances(Req, Context) ->
|
||||
{[<<"accept-encoding">>, <<"origin">>], Req, Context}.
|
||||
|
||||
content_types_provided(ReqData, Context) ->
|
||||
{rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
|
||||
|
||||
resource_exists(ReqData, Context) ->
|
||||
{true, ReqData, Context}.
|
||||
|
||||
to_json(ReqData, Context) ->
|
||||
case rabbit_nodes:is_single_node_cluster() of
|
||||
true ->
|
||||
rabbit_mgmt_util:reply([{status, ok},
|
||||
{reason, <<"single node cluster">>}], ReqData, Context);
|
||||
false ->
|
||||
case rabbit_amqqueue:list_local_mirrored_classic_without_synchronised_mirrors_for_cli() of
|
||||
[] ->
|
||||
rabbit_mgmt_util:reply([{status, ok}], ReqData, Context);
|
||||
Qs when length(Qs) > 0 ->
|
||||
Msg = <<"There are classic mirrored queues without online synchronised mirrors">>,
|
||||
failure(Msg, Qs, ReqData, Context)
|
||||
end
|
||||
end.
|
||||
|
||||
failure(Message, Qs, ReqData, Context) ->
|
||||
{Response, ReqData1, Context1} = rabbit_mgmt_util:reply([{status, failed},
|
||||
{reason, Message},
|
||||
{queues, Qs}],
|
||||
ReqData, Context),
|
||||
{stop, cowboy_req:reply(503, #{}, Response, ReqData1), Context1}.
|
||||
|
||||
is_authorized(ReqData, Context) ->
|
||||
rabbit_mgmt_util:is_authorized(ReqData, Context).
|
|
@ -67,15 +67,5 @@ raise_not_found(ReqData, Context) ->
|
|||
Context).
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
action(<<"sync">>, Q, ReqData, Context) when ?is_amqqueue(Q) ->
|
||||
QPid = amqqueue:get_pid(Q),
|
||||
spawn(fun() -> rabbit_amqqueue:sync_mirrors(QPid) end),
|
||||
{true, ReqData, Context};
|
||||
|
||||
action(<<"cancel_sync">>, Q, ReqData, Context) when ?is_amqqueue(Q) ->
|
||||
QPid = amqqueue:get_pid(Q),
|
||||
_ = rabbit_amqqueue:cancel_sync_mirrors(QPid),
|
||||
{true, ReqData, Context};
|
||||
|
||||
action(Else, _Q, ReqData, Context) ->
|
||||
rabbit_mgmt_util:bad_request({unknown, Else}, ReqData, Context).
|
||||
|
|
|
@ -22,8 +22,7 @@
|
|||
|
||||
all() ->
|
||||
[
|
||||
{group, non_parallel_tests},
|
||||
{group, non_parallel_tests_mirroring}
|
||||
{group, non_parallel_tests}
|
||||
].
|
||||
|
||||
groups() ->
|
||||
|
@ -55,11 +54,6 @@ groups() ->
|
|||
qq_replicas_delete,
|
||||
qq_replicas_grow,
|
||||
qq_replicas_shrink
|
||||
]},
|
||||
{non_parallel_tests_mirroring, [
|
||||
multi_node_case1_test,
|
||||
ha_queue_hosted_on_other_node,
|
||||
ha_queue_with_multiple_consumers
|
||||
]}
|
||||
].
|
||||
|
||||
|
@ -97,21 +91,12 @@ end_per_suite(Config) ->
|
|||
rabbit_ct_helpers:run_teardown_steps(Config,
|
||||
rabbit_ct_broker_helpers:teardown_steps()).
|
||||
|
||||
init_per_group(non_parallel_tests_mirroring, Config) ->
|
||||
case rabbit_ct_broker_helpers:configured_metadata_store(Config) of
|
||||
mnesia ->
|
||||
Config;
|
||||
{khepri, _} ->
|
||||
{skip, "Classic queue mirroring not supported by Khepri"}
|
||||
end;
|
||||
init_per_group(_, Config) ->
|
||||
Config.
|
||||
|
||||
end_per_group(_, Config) ->
|
||||
Config.
|
||||
|
||||
init_per_testcase(multi_node_case1_test = Testcase, Config) ->
|
||||
rabbit_ct_helpers:testcase_started(Config, Testcase);
|
||||
init_per_testcase(Testcase, Config) ->
|
||||
rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, clear_all_table_data, []),
|
||||
rabbit_ct_broker_helpers:rpc(Config, 1, ?MODULE, clear_all_table_data, []),
|
||||
|
@ -120,9 +105,6 @@ init_per_testcase(Testcase, Config) ->
|
|||
Config1 = rabbit_ct_helpers:set_config(Config, {conn, Conn}),
|
||||
rabbit_ct_helpers:testcase_started(Config1, Testcase).
|
||||
|
||||
end_per_testcase(multi_node_case1_test = Testcase, Config) ->
|
||||
rabbit_ct_broker_helpers:close_all_connections(Config, 0, <<"clustering_SUITE:end_per_testcase">>),
|
||||
rabbit_ct_helpers:testcase_finished(Config, Testcase);
|
||||
end_per_testcase(Testcase, Config) ->
|
||||
rabbit_ct_client_helpers:close_connection(?config(conn, Config)),
|
||||
rabbit_ct_broker_helpers:close_all_connections(Config, 0, <<"clustering_SUITE:end_per_testcase">>),
|
||||
|
@ -137,107 +119,6 @@ list_cluster_nodes_test(Config) ->
|
|||
?assertEqual(2, length(http_get(Config, "/nodes"))),
|
||||
passed.
|
||||
|
||||
multi_node_case1_test(Config) ->
|
||||
Nodename1 = rabbit_data_coercion:to_binary(get_node_config(Config, 0, nodename)),
|
||||
Nodename2 = rabbit_data_coercion:to_binary(get_node_config(Config, 1, nodename)),
|
||||
Policy = [{pattern, <<".*">>},
|
||||
{definition, [{'ha-mode', <<"all">>}]}],
|
||||
http_put(Config, "/policies/%2F/HA", Policy, [?CREATED, ?NO_CONTENT]),
|
||||
http_delete(Config, "/queues/%2F/multi-node-test-queue", [?NO_CONTENT, ?NOT_FOUND]),
|
||||
|
||||
Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 1),
|
||||
{ok, Chan} = amqp_connection:open_channel(Conn),
|
||||
_ = queue_declare(Chan, <<"multi-node-test-queue">>),
|
||||
Q = wait_for_mirrored_queue(Config, "/queues/%2F/multi-node-test-queue"),
|
||||
|
||||
?assert(lists:member(maps:get(node, Q), [Nodename1, Nodename2])),
|
||||
[Mirror] = maps:get(slave_nodes, Q),
|
||||
[Mirror] = maps:get(synchronised_slave_nodes, Q),
|
||||
?assert(lists:member(Mirror, [Nodename1, Nodename2])),
|
||||
|
||||
%% restart node2 so that queue master migrates
|
||||
restart_node(Config, 1),
|
||||
|
||||
Q2 = wait_for_mirrored_queue(Config, "/queues/%2F/multi-node-test-queue"),
|
||||
http_delete(Config, "/queues/%2F/multi-node-test-queue", ?NO_CONTENT),
|
||||
http_delete(Config, "/policies/%2F/HA", ?NO_CONTENT),
|
||||
|
||||
?assert(lists:member(maps:get(node, Q2), [Nodename1, Nodename2])),
|
||||
|
||||
rabbit_ct_client_helpers:close_connection(Conn),
|
||||
|
||||
passed.
|
||||
|
||||
ha_queue_hosted_on_other_node(Config) ->
|
||||
Policy = [{pattern, <<".*">>},
|
||||
{definition, [{'ha-mode', <<"all">>}]}],
|
||||
http_put(Config, "/policies/%2F/HA", Policy, [?CREATED, ?NO_CONTENT]),
|
||||
|
||||
Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 1),
|
||||
{ok, Chan} = amqp_connection:open_channel(Conn),
|
||||
_ = queue_declare_durable(Chan, <<"ha-queue">>),
|
||||
_ = wait_for_mirrored_queue(Config, "/queues/%2F/ha-queue"),
|
||||
|
||||
{ok, Chan2} = amqp_connection:open_channel(?config(conn, Config)),
|
||||
consume(Chan, <<"ha-queue">>),
|
||||
|
||||
timer:sleep(5100),
|
||||
force_stats(),
|
||||
Res = http_get(Config, "/queues/%2F/ha-queue"),
|
||||
|
||||
% assert some basic data is there
|
||||
[Cons] = maps:get(consumer_details, Res),
|
||||
#{} = maps:get(channel_details, Cons), % channel details proplist must not be empty
|
||||
0 = maps:get(prefetch_count, Cons), % check one of the augmented properties
|
||||
<<"ha-queue">> = maps:get(name, Res),
|
||||
|
||||
amqp_channel:close(Chan),
|
||||
amqp_channel:close(Chan2),
|
||||
rabbit_ct_client_helpers:close_connection(Conn),
|
||||
|
||||
http_delete(Config, "/queues/%2F/ha-queue", ?NO_CONTENT),
|
||||
http_delete(Config, "/policies/%2F/HA", ?NO_CONTENT),
|
||||
|
||||
ok.
|
||||
|
||||
ha_queue_with_multiple_consumers(Config) ->
|
||||
Policy = [{pattern, <<".*">>},
|
||||
{definition, [{'ha-mode', <<"all">>}]}],
|
||||
http_put(Config, "/policies/%2F/HA", Policy, [?CREATED, ?NO_CONTENT]),
|
||||
|
||||
{ok, Chan} = amqp_connection:open_channel(?config(conn, Config)),
|
||||
_ = queue_declare_durable(Chan, <<"ha-queue3">>),
|
||||
_ = wait_for_mirrored_queue(Config, "/queues/%2F/ha-queue3"),
|
||||
|
||||
consume(Chan, <<"ha-queue3">>),
|
||||
force_stats(),
|
||||
|
||||
{ok, Chan2} = amqp_connection:open_channel(?config(conn, Config)),
|
||||
consume(Chan2, <<"ha-queue3">>),
|
||||
|
||||
timer:sleep(5100),
|
||||
force_stats(),
|
||||
|
||||
Res = http_get(Config, "/queues/%2F/ha-queue3"),
|
||||
|
||||
% assert some basic data is there
|
||||
[C1, C2] = maps:get(consumer_details, Res),
|
||||
% channel details proplist must not be empty
|
||||
#{} = maps:get(channel_details, C1),
|
||||
#{} = maps:get(channel_details, C2),
|
||||
% check one of the augmented properties
|
||||
0 = maps:get(prefetch_count, C1),
|
||||
0 = maps:get(prefetch_count, C2),
|
||||
<<"ha-queue3">> = maps:get(name, Res),
|
||||
|
||||
amqp_channel:close(Chan),
|
||||
amqp_channel:close(Chan2),
|
||||
|
||||
http_delete(Config, "/queues/%2F/ha-queue3", ?NO_CONTENT),
|
||||
http_delete(Config, "/policies/%2F/HA", ?NO_CONTENT),
|
||||
|
||||
ok.
|
||||
|
||||
qq_replicas_add(Config) ->
|
||||
Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 0),
|
||||
{ok, Chan} = amqp_connection:open_channel(Conn),
|
||||
|
@ -903,9 +784,6 @@ queue_bind(Chan, Ex, Q, Key) ->
|
|||
routing_key = Key},
|
||||
#'queue.bind_ok'{} = amqp_channel:call(Chan, Binding).
|
||||
|
||||
wait_for_mirrored_queue(Config, Path) ->
|
||||
wait_for_queue(Config, Path, [slave_nodes, synchronised_slave_nodes]).
|
||||
|
||||
wait_for_queue(Config, Path) ->
|
||||
wait_for_queue(Config, Path, []).
|
||||
|
||||
|
|
|
@ -218,23 +218,6 @@ queue_bind(Chan, Ex, Q, Key) ->
|
|||
routing_key = Key},
|
||||
#'queue.bind_ok'{} = amqp_channel:call(Chan, Binding).
|
||||
|
||||
wait_for(Config, Path) ->
|
||||
wait_for(Config, Path, [slave_nodes, synchronised_slave_nodes]).
|
||||
|
||||
wait_for(Config, Path, Keys) ->
|
||||
wait_for(Config, Path, Keys, 1000).
|
||||
|
||||
wait_for(_Config, Path, Keys, 0) ->
|
||||
exit({timeout, {Path, Keys}});
|
||||
|
||||
wait_for(Config, Path, Keys, Count) ->
|
||||
Res = http_get(Config, Path),
|
||||
case present(Keys, Res) of
|
||||
false -> timer:sleep(10),
|
||||
wait_for(Config, Path, Keys, Count - 1);
|
||||
true -> Res
|
||||
end.
|
||||
|
||||
present(Keys, Res) ->
|
||||
lists:all(fun (Key) ->
|
||||
X = pget(Key, Res),
|
||||
|
|
|
@ -363,7 +363,7 @@ memory_test(Config) ->
|
|||
Result = http_get(Config, Path, ?OK),
|
||||
assert_keys([memory], Result),
|
||||
Keys = [total, connection_readers, connection_writers, connection_channels,
|
||||
connection_other, queue_procs, queue_slave_procs, plugins,
|
||||
connection_other, queue_procs, plugins,
|
||||
other_proc, mnesia, mgmt_db, msg_index, other_ets, binary, code,
|
||||
atom, other_system, allocated_unused, reserved_unallocated],
|
||||
assert_keys(Keys, maps:get(memory, Result)),
|
||||
|
@ -2139,8 +2139,6 @@ queue_purge_test(Config) ->
|
|||
|
||||
queue_actions_test(Config) ->
|
||||
http_put(Config, "/queues/%2F/q", #{}, {group, '2xx'}),
|
||||
http_post(Config, "/queues/%2F/q/actions", [{action, sync}], {group, '2xx'}),
|
||||
http_post(Config, "/queues/%2F/q/actions", [{action, cancel_sync}], {group, '2xx'}),
|
||||
http_post(Config, "/queues/%2F/q/actions", [{action, change_colour}], ?BAD_REQUEST),
|
||||
http_delete(Config, "/queues/%2F/q", {group, '2xx'}),
|
||||
passed.
|
||||
|
@ -2703,8 +2701,7 @@ format_output_test(Config) ->
|
|||
assert_list([#{name => <<"test0">>,
|
||||
consumer_capacity => 0,
|
||||
consumer_utilisation => 0,
|
||||
exclusive_consumer_tag => null,
|
||||
recoverable_slaves => null}], http_get(Config, "/queues", ?OK)),
|
||||
exclusive_consumer_tag => null}], http_get(Config, "/queues", ?OK)),
|
||||
http_delete(Config, "/queues/%2F/test0", {group, '2xx'}),
|
||||
http_delete(Config, "/vhosts/vh129", {group, '2xx'}),
|
||||
passed.
|
||||
|
|
|
@ -35,13 +35,11 @@ groups() ->
|
|||
{single_node, [], [
|
||||
alarms_test,
|
||||
local_alarms_test,
|
||||
is_quorum_critical_single_node_test,
|
||||
is_mirror_sync_critical_single_node_test]}
|
||||
is_quorum_critical_single_node_test]}
|
||||
].
|
||||
|
||||
all_tests() -> [
|
||||
health_checks_test,
|
||||
is_mirror_sync_critical_test,
|
||||
virtual_hosts_test,
|
||||
protocol_listener_test,
|
||||
port_listener_test,
|
||||
|
@ -85,30 +83,9 @@ init_per_testcase(Testcase, Config) when Testcase == is_quorum_critical_test ->
|
|||
_ ->
|
||||
rabbit_ct_helpers:testcase_started(Config, Testcase)
|
||||
end;
|
||||
init_per_testcase(Testcase, Config)
|
||||
when Testcase == is_mirror_sync_critical_single_node_test
|
||||
orelse Testcase == is_mirror_sync_critical_test ->
|
||||
case rabbit_ct_helpers:is_mixed_versions() of
|
||||
true ->
|
||||
{skip, "not mixed versions compatible"};
|
||||
_ ->
|
||||
case rabbit_ct_broker_helpers:configured_metadata_store(Config) of
|
||||
mnesia ->
|
||||
rabbit_ct_helpers:testcase_started(Config, Testcase);
|
||||
{khepri, _} ->
|
||||
{skip, "Classic queue mirroring not supported by Khepri"}
|
||||
end
|
||||
end;
|
||||
init_per_testcase(Testcase, Config) ->
|
||||
rabbit_ct_helpers:testcase_started(Config, Testcase).
|
||||
|
||||
end_per_testcase(is_mirror_sync_critical_test = Testcase, Config) ->
|
||||
[_, Server2, Server3] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
|
||||
_ = rabbit_ct_broker_helpers:start_node(Config, Server2),
|
||||
_ = rabbit_ct_broker_helpers:start_node(Config, Server3),
|
||||
ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"ha">>),
|
||||
rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_queues, []),
|
||||
rabbit_ct_helpers:testcase_finished(Config, Testcase);
|
||||
end_per_testcase(Testcase, Config) ->
|
||||
rabbit_ct_helpers:testcase_finished(Config, Testcase).
|
||||
|
||||
|
@ -122,7 +99,6 @@ health_checks_test(Config) ->
|
|||
http_get(Config, io_lib:format("/health/checks/port-listener/~tp", [Port]), ?OK),
|
||||
http_get(Config, "/health/checks/protocol-listener/http", ?OK),
|
||||
http_get(Config, "/health/checks/virtual-hosts", ?OK),
|
||||
http_get(Config, "/health/checks/node-is-mirror-sync-critical", ?OK),
|
||||
http_get(Config, "/health/checks/node-is-quorum-critical", ?OK),
|
||||
passed.
|
||||
|
||||
|
@ -227,63 +203,6 @@ is_quorum_critical_test(Config) ->
|
|||
|
||||
passed.
|
||||
|
||||
is_mirror_sync_critical_single_node_test(Config) ->
|
||||
Check0 = http_get(Config, "/health/checks/node-is-mirror-sync-critical", ?OK),
|
||||
?assertEqual(<<"single node cluster">>, maps:get(reason, Check0)),
|
||||
?assertEqual(<<"ok">>, maps:get(status, Check0)),
|
||||
|
||||
ok = rabbit_ct_broker_helpers:set_policy(
|
||||
Config, 0, <<"ha">>, <<"is_mirror_sync.*">>, <<"queues">>,
|
||||
[{<<"ha-mode">>, <<"all">>}]),
|
||||
Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
|
||||
Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
|
||||
QName = <<"is_mirror_sync_critical_single_node_test">>,
|
||||
?assertEqual({'queue.declare_ok', QName, 0, 0},
|
||||
amqp_channel:call(Ch, #'queue.declare'{queue = QName,
|
||||
durable = true,
|
||||
auto_delete = false,
|
||||
arguments = []})),
|
||||
Check1 = http_get(Config, "/health/checks/node-is-mirror-sync-critical", ?OK),
|
||||
?assertEqual(<<"single node cluster">>, maps:get(reason, Check1)),
|
||||
|
||||
passed.
|
||||
|
||||
is_mirror_sync_critical_test(Config) ->
|
||||
Path = "/health/checks/node-is-mirror-sync-critical",
|
||||
Check0 = http_get(Config, Path, ?OK),
|
||||
?assertEqual(false, maps:is_key(reason, Check0)),
|
||||
?assertEqual(<<"ok">>, maps:get(status, Check0)),
|
||||
|
||||
ok = rabbit_ct_broker_helpers:set_policy(
|
||||
Config, 0, <<"ha">>, <<"is_mirror_sync.*">>, <<"queues">>,
|
||||
[{<<"ha-mode">>, <<"all">>}]),
|
||||
[Server1, Server2, Server3] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
|
||||
Ch = rabbit_ct_client_helpers:open_channel(Config, Server1),
|
||||
QName = <<"is_mirror_sync_critical_test">>,
|
||||
?assertEqual({'queue.declare_ok', QName, 0, 0},
|
||||
amqp_channel:call(Ch, #'queue.declare'{queue = QName,
|
||||
durable = true,
|
||||
auto_delete = false,
|
||||
arguments = []})),
|
||||
rabbit_ct_helpers:await_condition(
|
||||
fun() ->
|
||||
{ok, {{_, Code, _}, _, _}} = req(Config, get, Path, [auth_header("guest", "guest")]),
|
||||
Code == ?OK
|
||||
end),
|
||||
Check1 = http_get(Config, Path, ?OK),
|
||||
?assertEqual(false, maps:is_key(reason, Check1)),
|
||||
|
||||
ok = rabbit_ct_broker_helpers:stop_node(Config, Server2),
|
||||
ok = rabbit_ct_broker_helpers:stop_node(Config, Server3),
|
||||
|
||||
Body = http_get_failed(Config, Path),
|
||||
?assertEqual(<<"failed">>, maps:get(<<"status">>, Body)),
|
||||
?assertEqual(true, maps:is_key(<<"reason">>, Body)),
|
||||
[Queue] = maps:get(<<"queues">>, Body),
|
||||
?assertEqual(QName, maps:get(<<"name">>, Queue)),
|
||||
|
||||
passed.
|
||||
|
||||
virtual_hosts_test(Config) ->
|
||||
VHost1 = <<"vhost1">>,
|
||||
VHost2 = <<"vhost2">>,
|
||||
|
|
|
@ -55,7 +55,6 @@ all_tests() -> [
|
|||
connections_test,
|
||||
exchanges_test,
|
||||
queues_test,
|
||||
mirrored_queues_test,
|
||||
quorum_queues_test,
|
||||
permissions_vhost_test,
|
||||
permissions_connection_channel_consumer_test,
|
||||
|
@ -139,14 +138,6 @@ init_per_testcase(Testcase = permissions_vhost_test, Config) ->
|
|||
rabbit_ct_broker_helpers:delete_vhost(Config, <<"myvhost2">>),
|
||||
rabbit_ct_helpers:testcase_started(Config, Testcase);
|
||||
|
||||
init_per_testcase(mirrored_queues_test = Testcase, Config) ->
|
||||
case rabbit_ct_broker_helpers:configured_metadata_store(Config) of
|
||||
mnesia ->
|
||||
rabbit_ct_broker_helpers:close_all_connections(Config, 0, <<"rabbit_mgmt_only_http_SUITE:init_per_testcase">>),
|
||||
rabbit_ct_helpers:testcase_started(Config, Testcase);
|
||||
{khepri, _} ->
|
||||
{skip, "Classic queue mirroring not supported by Khepri"}
|
||||
end;
|
||||
init_per_testcase(Testcase, Config) ->
|
||||
rabbit_ct_broker_helpers:close_all_connections(Config, 0, <<"rabbit_mgmt_only_http_SUITE:init_per_testcase">>),
|
||||
rabbit_ct_helpers:testcase_started(Config, Testcase).
|
||||
|
@ -533,41 +524,6 @@ queues_enable_totals_test(Config) ->
|
|||
|
||||
passed.
|
||||
|
||||
mirrored_queues_test(Config) ->
|
||||
Policy = [{pattern, <<".*">>},
|
||||
{definition, [{<<"ha-mode">>, <<"all">>}]}],
|
||||
http_put(Config, "/policies/%2F/HA", Policy, {group, '2xx'}),
|
||||
|
||||
Good = [{durable, true}, {arguments, []}],
|
||||
http_get(Config, "/queues/%2f/ha", ?NOT_FOUND),
|
||||
http_put(Config, "/queues/%2f/ha", Good, {group, '2xx'}),
|
||||
|
||||
{Conn, Ch} = open_connection_and_channel(Config),
|
||||
Publish = fun() ->
|
||||
amqp_channel:call(
|
||||
Ch, #'basic.publish'{exchange = <<"">>,
|
||||
routing_key = <<"ha">>},
|
||||
#amqp_msg{payload = <<"message">>})
|
||||
end,
|
||||
Publish(),
|
||||
Publish(),
|
||||
|
||||
Queue = http_get(Config, "/queues/%2f/ha?lengths_age=60&lengths_incr=5&msg_rates_age=60&msg_rates_incr=5&data_rates_age=60&data_rates_incr=5"),
|
||||
|
||||
%% It's really only one node, but the only thing that matters in this test is to verify the
|
||||
%% key exists
|
||||
Nodes = lists:sort(rabbit_ct_broker_helpers:get_node_configs(Config, nodename)),
|
||||
|
||||
?assert(not maps:is_key(messages, Queue)),
|
||||
?assert(not maps:is_key(messages_details, Queue)),
|
||||
?assert(not maps:is_key(reductions_details, Queue)),
|
||||
?assert(true, lists:member(maps:get(node, Queue), Nodes)),
|
||||
?assertEqual([], get_nodes(slave_nodes, Queue)),
|
||||
?assertEqual([], get_nodes(synchronised_slave_nodes, Queue)),
|
||||
|
||||
http_delete(Config, "/queues/%2f/ha", {group, '2xx'}),
|
||||
close_connection(Conn).
|
||||
|
||||
quorum_queues_test(Config) ->
|
||||
Good = [{durable, true}, {arguments, [{'x-queue-type', 'quorum'}]}],
|
||||
http_get(Config, "/queues/%2f/qq", ?NOT_FOUND),
|
||||
|
@ -895,8 +851,6 @@ table_hash(Table) ->
|
|||
|
||||
queue_actions_test(Config) ->
|
||||
http_put(Config, "/queues/%2F/q", #{}, {group, '2xx'}),
|
||||
http_post(Config, "/queues/%2F/q/actions", [{action, sync}], {group, '2xx'}),
|
||||
http_post(Config, "/queues/%2F/q/actions", [{action, cancel_sync}], {group, '2xx'}),
|
||||
http_post(Config, "/queues/%2F/q/actions", [{action, change_colour}], ?BAD_REQUEST),
|
||||
http_delete(Config, "/queues/%2F/q", {group, '2xx'}),
|
||||
passed.
|
||||
|
|
|
@ -53,18 +53,10 @@ format_queue_stats({exclusive_consumer_pid, _}) ->
|
|||
[];
|
||||
format_queue_stats({single_active_consumer_pid, _}) ->
|
||||
[];
|
||||
format_queue_stats({slave_pids, ''}) ->
|
||||
[];
|
||||
format_queue_stats({slave_pids, Pids}) ->
|
||||
[{slave_nodes, [node(Pid) || Pid <- Pids]}];
|
||||
format_queue_stats({leader, Leader}) ->
|
||||
[{node, Leader}];
|
||||
format_queue_stats({synchronised_slave_pids, ''}) ->
|
||||
[];
|
||||
format_queue_stats({effective_policy_definition, []}) ->
|
||||
[{effective_policy_definition, #{}}];
|
||||
format_queue_stats({synchronised_slave_pids, Pids}) ->
|
||||
[{synchronised_slave_nodes, [node(Pid) || Pid <- Pids]}];
|
||||
format_queue_stats({backing_queue_status, Value}) ->
|
||||
case proplists:get_value(version, Value, undefined) of
|
||||
undefined -> [];
|
||||
|
@ -513,14 +505,6 @@ strip_pids([{channel_pid, _} | T], Acc) ->
|
|||
strip_pids(T, Acc);
|
||||
strip_pids([{exclusive_consumer_pid, _} | T], Acc) ->
|
||||
strip_pids(T, Acc);
|
||||
strip_pids([{slave_pids, ''} | T], Acc) ->
|
||||
strip_pids(T, Acc);
|
||||
strip_pids([{slave_pids, Pids} | T], Acc) ->
|
||||
strip_pids(T, [{slave_nodes, [node(Pid) || Pid <- Pids]} | Acc]);
|
||||
strip_pids([{synchronised_slave_pids, ''} | T], Acc) ->
|
||||
strip_pids(T, Acc);
|
||||
strip_pids([{synchronised_slave_pids, Pids} | T], Acc) ->
|
||||
strip_pids(T, [{synchronised_slave_nodes, [node(Pid) || Pid <- Pids]} | Acc]);
|
||||
strip_pids([{K, [P|_] = Nested} | T], Acc) when is_tuple(P) -> % recurse
|
||||
strip_pids(T, [{K, strip_pids(Nested)} | Acc]);
|
||||
strip_pids([{K, [L|_] = Nested} | T], Acc) when is_list(L) -> % recurse
|
||||
|
|
|
@ -66,7 +66,7 @@
|
|||
published = false :: boolean(),
|
||||
ssl_login_name :: none | binary(),
|
||||
retainer_pid :: pid(),
|
||||
delivery_flow :: flow | noflow,
|
||||
delivery_flow, %% Deprecated since removal of CMQ in 4.0
|
||||
trace_state :: rabbit_trace:state(),
|
||||
prefetch :: non_neg_integer(),
|
||||
vhost :: rabbit_types:vhost(),
|
||||
|
@ -144,10 +144,6 @@ process_connect(
|
|||
"protocol version: ~p, keepalive: ~p, property names: ~p",
|
||||
[ClientId0, Username0, CleanStart, ProtoVer, KeepaliveSecs, maps:keys(ConnectProps)]),
|
||||
SslLoginName = ssl_login_name(Socket),
|
||||
Flow = case application:get_env(rabbit, mirroring_flow_control) of
|
||||
{ok, true} -> flow;
|
||||
{ok, false} -> noflow
|
||||
end,
|
||||
MaxPacketSize = maps:get('Maximum-Packet-Size', ConnectProps, ?MAX_PACKET_SIZE),
|
||||
TopicAliasMax = persistent_term:get(?PERSISTENT_TERM_TOPIC_ALIAS_MAXIMUM),
|
||||
TopicAliasMaxOutbound = min(maps:get('Topic-Alias-Maximum', ConnectProps, 0), TopicAliasMax),
|
||||
|
@ -208,7 +204,6 @@ process_connect(
|
|||
clean_start = CleanStart,
|
||||
session_expiry_interval_secs = SessionExpiry,
|
||||
ssl_login_name = SslLoginName,
|
||||
delivery_flow = Flow,
|
||||
trace_state = TraceState,
|
||||
prefetch = prefetch(ConnectProps),
|
||||
conn_name = ConnName,
|
||||
|
@ -1541,7 +1536,6 @@ publish_to_queues(
|
|||
#mqtt_msg{topic = Topic,
|
||||
packet_id = PacketId} = MqttMsg,
|
||||
#state{cfg = #cfg{exchange = ExchangeName = #resource{name = ExchangeNameBin},
|
||||
delivery_flow = Flow,
|
||||
conn_name = ConnName,
|
||||
trace_state = TraceState},
|
||||
auth_state = #auth_state{user = #user{username = Username}}} = State) ->
|
||||
|
@ -1554,7 +1548,7 @@ publish_to_queues(
|
|||
QNames0 = rabbit_exchange:route(Exchange, Msg, #{return_binding_keys => true}),
|
||||
QNames = drop_local(QNames0, State),
|
||||
rabbit_trace:tap_in(Msg, QNames, ConnName, Username, TraceState),
|
||||
Opts = maps_put_truthy(flow, Flow, maps_put_truthy(correlation, PacketId, #{})),
|
||||
Opts = maps_put_truthy(correlation, PacketId, #{}),
|
||||
deliver_to_queues(Msg, Opts, QNames, State);
|
||||
{error, not_found} ->
|
||||
?LOG_ERROR("~s not found", [rabbit_misc:rs(ExchangeName)]),
|
||||
|
@ -2491,7 +2485,6 @@ format_status(
|
|||
published = Published,
|
||||
ssl_login_name = SSLLoginName,
|
||||
retainer_pid = RetainerPid,
|
||||
delivery_flow = DeliveryFlow,
|
||||
trace_state = TraceState,
|
||||
prefetch = Prefetch,
|
||||
client_id = ClientID,
|
||||
|
@ -2513,7 +2506,6 @@ format_status(
|
|||
ssl_login_name => SSLLoginName,
|
||||
retainer_pid => RetainerPid,
|
||||
|
||||
delivery_flow => DeliveryFlow,
|
||||
trace_state => TraceState,
|
||||
prefetch => Prefetch,
|
||||
client_id => ClientID,
|
||||
|
|
|
@ -153,8 +153,6 @@ cluster_size_3_tests() ->
|
|||
|
||||
mnesia_store_tests() ->
|
||||
[
|
||||
consuming_classic_mirrored_queue_down,
|
||||
flow_classic_mirrored_queue,
|
||||
publish_to_all_queue_types_qos0,
|
||||
publish_to_all_queue_types_qos1
|
||||
].
|
||||
|
@ -399,7 +397,6 @@ publish_to_all_queue_types(Config, QoS) ->
|
|||
Ch = rabbit_ct_client_helpers:open_channel(Config),
|
||||
|
||||
CQ = <<"classic-queue">>,
|
||||
CMQ = <<"classic-mirrored-queue">>,
|
||||
QQ = <<"quorum-queue">>,
|
||||
SQ = <<"stream-queue">>,
|
||||
Topic = <<"mytopic">>,
|
||||
|
@ -407,10 +404,6 @@ publish_to_all_queue_types(Config, QoS) ->
|
|||
declare_queue(Ch, CQ, []),
|
||||
bind(Ch, CQ, Topic),
|
||||
|
||||
ok = rabbit_ct_broker_helpers:set_ha_policy(Config, 0, CMQ, <<"all">>),
|
||||
declare_queue(Ch, CMQ, []),
|
||||
bind(Ch, CMQ, Topic),
|
||||
|
||||
declare_queue(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}]),
|
||||
bind(Ch, QQ, Topic),
|
||||
|
||||
|
@ -433,7 +426,7 @@ publish_to_all_queue_types(Config, QoS) ->
|
|||
eventually(?_assert(
|
||||
begin
|
||||
L = rabbitmqctl_list(Config, 0, ["list_queues", "messages", "--no-table-headers"]),
|
||||
length(L) =:= 4 andalso
|
||||
length(L) =:= 3 andalso
|
||||
lists:all(fun([Bin]) ->
|
||||
N = binary_to_integer(Bin),
|
||||
case QoS of
|
||||
|
@ -448,8 +441,7 @@ publish_to_all_queue_types(Config, QoS) ->
|
|||
end, L)
|
||||
end), 2000, 10),
|
||||
|
||||
delete_queue(Ch, [CQ, CMQ, QQ, SQ]),
|
||||
ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, CMQ),
|
||||
delete_queue(Ch, [CQ, QQ, SQ]),
|
||||
ok = emqtt:disconnect(C),
|
||||
?awaitMatch([],
|
||||
all_connection_pids(Config), 10_000, 1000).
|
||||
|
@ -513,23 +505,6 @@ publish_to_all_non_deprecated_queue_types(Config, QoS) ->
|
|||
?awaitMatch([],
|
||||
all_connection_pids(Config), 10_000, 1000).
|
||||
|
||||
flow_classic_mirrored_queue(Config) ->
|
||||
QueueName = <<"flow">>,
|
||||
ok = rabbit_ct_broker_helpers:set_ha_policy(Config, 0, QueueName, <<"all">>),
|
||||
%% New nodes lookup via persistent_term:get/1.
|
||||
%% Old nodes lookup via application:get_env/2.
|
||||
%% Therefore, we set both persistent_term and application.
|
||||
Key = credit_flow_default_credit,
|
||||
Val = {2, 1},
|
||||
DefaultVal = rabbit_ct_broker_helpers:rpc(Config, persistent_term, get, [Key]),
|
||||
Result = rpc_all(Config, persistent_term, put, [Key, Val]),
|
||||
?assert(lists:all(fun(R) -> R =:= ok end, Result)),
|
||||
|
||||
flow(Config, {rabbit, Key, Val}, <<"classic">>),
|
||||
|
||||
?assertEqual(Result, rpc_all(Config, persistent_term, put, [Key, DefaultVal])),
|
||||
ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, QueueName).
|
||||
|
||||
flow_quorum_queue(Config) ->
|
||||
flow(Config, {rabbit, quorum_commands_soft_limit, 1}, <<"quorum">>).
|
||||
|
||||
|
@ -828,48 +803,6 @@ queue_down_qos1(Config) ->
|
|||
delete_queue(Ch0, CQ),
|
||||
ok = emqtt:disconnect(C).
|
||||
|
||||
%% Even though classic mirrored queues are deprecated, we know that some users have set up
|
||||
%% a policy to mirror MQTT queues. So, we need to support that use case in RabbitMQ 3.x
|
||||
%% and failover consumption when the classic mirrored queue leader fails.
|
||||
consuming_classic_mirrored_queue_down(Config) ->
|
||||
[Server1, Server2, _Server3] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
|
||||
ClientId = Topic = PolicyName = atom_to_binary(?FUNCTION_NAME),
|
||||
|
||||
ok = rabbit_ct_broker_helpers:set_policy(
|
||||
Config, Server1, PolicyName, <<".*">>, <<"queues">>,
|
||||
[{<<"ha-mode">>, <<"all">>},
|
||||
{<<"queue-master-locator">>, <<"client-local">>}]),
|
||||
|
||||
%% Declare queue leader on Server1.
|
||||
C1 = connect(ClientId, Config, Server1, non_clean_sess_opts()),
|
||||
{ok, _, _} = emqtt:subscribe(C1, Topic, qos1),
|
||||
ok = emqtt:disconnect(C1),
|
||||
|
||||
%% Consume from Server2.
|
||||
C2 = connect(ClientId, Config, Server2, non_clean_sess_opts()),
|
||||
|
||||
%% Sanity check that consumption works.
|
||||
{ok, _} = emqtt:publish(C2, Topic, <<"m1">>, qos1),
|
||||
ok = expect_publishes(C2, Topic, [<<"m1">>]),
|
||||
|
||||
%% Let's stop the queue leader node.
|
||||
ok = rabbit_ct_broker_helpers:stop_node(Config, Server1),
|
||||
|
||||
%% Consumption should continue to work.
|
||||
{ok, _} = emqtt:publish(C2, Topic, <<"m2">>, qos1),
|
||||
ok = expect_publishes(C2, Topic, [<<"m2">>]),
|
||||
|
||||
%% Cleanup
|
||||
ok = emqtt:disconnect(C2),
|
||||
ok = rabbit_ct_broker_helpers:start_node(Config, Server1),
|
||||
?assertMatch([_Q],
|
||||
rpc(Config, Server1, rabbit_amqqueue, list, [])),
|
||||
C3 = connect(ClientId, Config, Server2, [{clean_start, true}]),
|
||||
ok = emqtt:disconnect(C3),
|
||||
?assertEqual([],
|
||||
rpc(Config, Server1, rabbit_amqqueue, list, [])),
|
||||
ok = rabbit_ct_broker_helpers:clear_policy(Config, Server1, PolicyName).
|
||||
|
||||
%% Consuming classic queue on a different node goes down.
|
||||
consuming_classic_queue_down(Config) ->
|
||||
[Server1, _Server2, Server3] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
|
||||
|
@ -891,7 +824,7 @@ consuming_classic_queue_down(Config) ->
|
|||
process_flag(trap_exit, true),
|
||||
ok = rabbit_ct_broker_helpers:stop_node(Config, Server1),
|
||||
|
||||
%% When the dedicated MQTT connection (non-mirrored classic) queue goes down, it is reasonable
|
||||
%% When the dedicated MQTT connection queue goes down, it is reasonable
|
||||
%% that the server closes the MQTT connection because the MQTT client cannot consume anymore.
|
||||
eventually(?_assertMatch(#{consumers := 0},
|
||||
get_global_counters(Config, ProtoVer, Server3)),
|
||||
|
|
Loading…
Reference in New Issue