2023-05-18 19:59:50 +08:00
|
|
|
% This Source Code Form is subject to the terms of the Mozilla Public
|
2020-07-10 21:31:17 +08:00
|
|
|
%% License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
|
|
|
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
2017-03-07 00:13:57 +08:00
|
|
|
%%
|
2023-01-02 12:17:36 +08:00
|
|
|
%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved.
|
2017-03-07 00:13:57 +08:00
|
|
|
%%
|
|
|
|
|
|
2017-03-08 22:46:25 +08:00
|
|
|
-module(backing_queue_SUITE).
|
2017-03-07 00:13:57 +08:00
|
|
|
|
|
|
|
|
-include_lib("common_test/include/ct.hrl").
|
|
|
|
|
-include_lib("amqp_client/include/amqp_client.hrl").
|
2018-10-11 18:12:39 +08:00
|
|
|
-include("amqqueue.hrl").
|
2017-03-07 00:13:57 +08:00
|
|
|
|
|
|
|
|
-compile(export_all).
|
|
|
|
|
|
2017-03-23 02:30:08 +08:00
|
|
|
-define(PERSISTENT_MSG_STORE, msg_store_persistent).
|
|
|
|
|
-define(TRANSIENT_MSG_STORE, msg_store_transient).
|
2017-03-07 00:13:57 +08:00
|
|
|
|
|
|
|
|
-define(TIMEOUT, 30000).
|
2017-03-23 02:30:08 +08:00
|
|
|
-define(VHOST, <<"/">>).
|
2017-03-07 00:13:57 +08:00
|
|
|
|
|
|
|
|
-define(VARIABLE_QUEUE_TESTCASES, [
|
|
|
|
|
variable_queue_partial_segments_delta_thing,
|
|
|
|
|
variable_queue_all_the_bits_not_covered_elsewhere_A,
|
|
|
|
|
variable_queue_all_the_bits_not_covered_elsewhere_B,
|
|
|
|
|
variable_queue_drop,
|
|
|
|
|
variable_queue_fold_msg_on_disk,
|
|
|
|
|
variable_queue_dropfetchwhile,
|
|
|
|
|
variable_queue_dropwhile_varying_ram_duration,
|
2021-08-10 23:10:00 +08:00
|
|
|
variable_queue_dropwhile_restart,
|
|
|
|
|
variable_queue_dropwhile_sync_restart,
|
2017-03-07 00:13:57 +08:00
|
|
|
variable_queue_fetchwhile_varying_ram_duration,
|
|
|
|
|
variable_queue_ack_limiting,
|
|
|
|
|
variable_queue_purge,
|
|
|
|
|
variable_queue_requeue,
|
|
|
|
|
variable_queue_requeue_ram_beta,
|
2020-04-28 03:38:58 +08:00
|
|
|
variable_queue_fold,
|
2017-03-07 00:13:57 +08:00
|
|
|
variable_queue_batch_publish,
|
|
|
|
|
variable_queue_batch_publish_delivered
|
|
|
|
|
]).
|
|
|
|
|
|
|
|
|
|
-define(BACKING_QUEUE_TESTCASES, [
|
|
|
|
|
bq_queue_index,
|
|
|
|
|
bq_queue_index_props,
|
2017-03-08 22:46:25 +08:00
|
|
|
{variable_queue_default, [parallel], ?VARIABLE_QUEUE_TESTCASES},
|
2017-03-07 00:13:57 +08:00
|
|
|
bq_variable_queue_delete_msg_store_files_callback,
|
|
|
|
|
bq_queue_recover
|
|
|
|
|
]).
|
|
|
|
|
|
|
|
|
|
all() ->
|
|
|
|
|
[
|
|
|
|
|
{group, backing_queue_tests}
|
|
|
|
|
].
|
|
|
|
|
|
|
|
|
|
groups() ->
|
2021-10-08 21:26:15 +08:00
|
|
|
Common = [
|
|
|
|
|
{backing_queue_embed_limit_0, [], ?BACKING_QUEUE_TESTCASES},
|
|
|
|
|
{backing_queue_embed_limit_1024, [], ?BACKING_QUEUE_TESTCASES}
|
|
|
|
|
],
|
2021-11-10 21:58:35 +08:00
|
|
|
V2Only = [
|
|
|
|
|
v2_delete_segment_file_completely_acked,
|
|
|
|
|
v2_delete_segment_file_partially_acked,
|
|
|
|
|
v2_delete_segment_file_partially_acked_with_holes
|
|
|
|
|
],
|
2017-03-07 00:13:57 +08:00
|
|
|
[
|
2021-09-30 20:53:37 +08:00
|
|
|
{backing_queue_tests, [], [
|
|
|
|
|
msg_store,
|
2021-11-10 21:58:35 +08:00
|
|
|
{backing_queue_v2, [], Common ++ V2Only},
|
2021-10-08 21:26:15 +08:00
|
|
|
{backing_queue_v1, [], Common}
|
2021-09-30 20:53:37 +08:00
|
|
|
]}
|
2017-03-07 00:13:57 +08:00
|
|
|
].
|
|
|
|
|
|
2021-10-11 18:30:39 +08:00
|
|
|
group(backing_queue_tests) ->
|
|
|
|
|
[
|
|
|
|
|
%% Several tests based on lazy queues may take more than 30 minutes.
|
|
|
|
|
{timetrap, {hours, 1}}
|
|
|
|
|
];
|
|
|
|
|
group(_) ->
|
|
|
|
|
[].
|
|
|
|
|
|
2017-03-07 00:13:57 +08:00
|
|
|
%% -------------------------------------------------------------------
|
|
|
|
|
%% Testsuite setup/teardown.
|
|
|
|
|
%% -------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
init_per_suite(Config) ->
|
|
|
|
|
rabbit_ct_helpers:log_environment(),
|
|
|
|
|
rabbit_ct_helpers:run_setup_steps(Config).
|
|
|
|
|
|
|
|
|
|
end_per_suite(Config) ->
|
|
|
|
|
rabbit_ct_helpers:run_teardown_steps(Config).
|
|
|
|
|
|
|
|
|
|
init_per_group(Group, Config) ->
|
|
|
|
|
case lists:member({group, Group}, all()) of
|
|
|
|
|
true ->
|
|
|
|
|
ClusterSize = 2,
|
|
|
|
|
Config1 = rabbit_ct_helpers:set_config(Config, [
|
|
|
|
|
{rmq_nodename_suffix, Group},
|
|
|
|
|
{rmq_nodes_count, ClusterSize}
|
|
|
|
|
]),
|
|
|
|
|
rabbit_ct_helpers:run_steps(Config1,
|
|
|
|
|
rabbit_ct_broker_helpers:setup_steps() ++
|
|
|
|
|
rabbit_ct_client_helpers:setup_steps() ++ [
|
|
|
|
|
fun(C) -> init_per_group1(Group, C) end,
|
|
|
|
|
fun setup_file_handle_cache/1
|
|
|
|
|
]);
|
|
|
|
|
false ->
|
|
|
|
|
rabbit_ct_helpers:run_steps(Config, [
|
|
|
|
|
fun(C) -> init_per_group1(Group, C) end
|
|
|
|
|
])
|
|
|
|
|
end.
|
|
|
|
|
|
|
|
|
|
init_per_group1(backing_queue_tests, Config) ->
|
2021-10-21 19:17:18 +08:00
|
|
|
%% @todo Is that test still relevant?
|
2017-03-07 00:13:57 +08:00
|
|
|
Module = rabbit_ct_broker_helpers:rpc(Config, 0,
|
|
|
|
|
application, get_env, [rabbit, backing_queue_module]),
|
|
|
|
|
case Module of
|
|
|
|
|
{ok, rabbit_priority_queue} ->
|
|
|
|
|
rabbit_ct_broker_helpers:rpc(Config, 0,
|
|
|
|
|
?MODULE, setup_backing_queue_test_group, [Config]);
|
|
|
|
|
_ ->
|
|
|
|
|
{skip, rabbit_misc:format(
|
2022-10-08 06:59:05 +08:00
|
|
|
"Backing queue module not supported by this test group: ~tp~n",
|
2017-03-07 00:13:57 +08:00
|
|
|
[Module])}
|
|
|
|
|
end;
|
2021-10-08 21:26:15 +08:00
|
|
|
init_per_group1(backing_queue_v1, Config) ->
|
|
|
|
|
ok = rabbit_ct_broker_helpers:rpc(Config, 0,
|
2022-01-24 23:19:22 +08:00
|
|
|
application, set_env, [rabbit, classic_queue_default_version, 1]),
|
2021-10-08 21:26:15 +08:00
|
|
|
Config;
|
|
|
|
|
init_per_group1(backing_queue_v2, Config) ->
|
|
|
|
|
ok = rabbit_ct_broker_helpers:rpc(Config, 0,
|
2022-01-24 23:19:22 +08:00
|
|
|
application, set_env, [rabbit, classic_queue_default_version, 2]),
|
2021-10-08 21:26:15 +08:00
|
|
|
Config;
|
2021-09-30 20:53:37 +08:00
|
|
|
init_per_group1(backing_queue_embed_limit_0, Config) ->
|
|
|
|
|
ok = rabbit_ct_broker_helpers:rpc(Config, 0,
|
|
|
|
|
application, set_env, [rabbit, queue_index_embed_msgs_below, 0]),
|
|
|
|
|
Config;
|
|
|
|
|
init_per_group1(backing_queue_embed_limit_1024, Config) ->
|
|
|
|
|
ok = rabbit_ct_broker_helpers:rpc(Config, 0,
|
|
|
|
|
application, set_env, [rabbit, queue_index_embed_msgs_below, 1024]),
|
|
|
|
|
Config;
|
2017-03-07 00:13:57 +08:00
|
|
|
init_per_group1(variable_queue_default, Config) ->
|
|
|
|
|
rabbit_ct_helpers:set_config(Config, {variable_queue_type, default});
|
2021-10-21 19:17:18 +08:00
|
|
|
%% @todo These groups are no longer used?
|
2017-03-07 00:13:57 +08:00
|
|
|
init_per_group1(from_cluster_node1, Config) ->
|
|
|
|
|
rabbit_ct_helpers:set_config(Config, {test_direction, {0, 1}});
|
|
|
|
|
init_per_group1(from_cluster_node2, Config) ->
|
|
|
|
|
rabbit_ct_helpers:set_config(Config, {test_direction, {1, 0}});
|
|
|
|
|
init_per_group1(_, Config) ->
|
|
|
|
|
Config.
|
|
|
|
|
|
|
|
|
|
setup_file_handle_cache(Config) ->
|
|
|
|
|
ok = rabbit_ct_broker_helpers:rpc(Config, 0,
|
|
|
|
|
?MODULE, setup_file_handle_cache1, []),
|
|
|
|
|
Config.
|
|
|
|
|
|
|
|
|
|
setup_file_handle_cache1() ->
|
|
|
|
|
%% FIXME: Why are we doing this?
|
2021-06-04 17:01:23 +08:00
|
|
|
application:set_env(rabbit, file_handles_high_watermark, 100),
|
|
|
|
|
ok = file_handle_cache:set_limit(100),
|
2017-03-07 00:13:57 +08:00
|
|
|
ok.
|
|
|
|
|
|
|
|
|
|
end_per_group(Group, Config) ->
|
|
|
|
|
case lists:member({group, Group}, all()) of
|
|
|
|
|
true ->
|
|
|
|
|
rabbit_ct_helpers:run_steps(Config,
|
|
|
|
|
[fun(C) -> end_per_group1(Group, C) end] ++
|
|
|
|
|
rabbit_ct_client_helpers:teardown_steps() ++
|
|
|
|
|
rabbit_ct_broker_helpers:teardown_steps());
|
|
|
|
|
false ->
|
|
|
|
|
Config
|
|
|
|
|
end.
|
|
|
|
|
|
|
|
|
|
end_per_group1(backing_queue_tests, Config) ->
|
|
|
|
|
rabbit_ct_broker_helpers:rpc(Config, 0,
|
|
|
|
|
?MODULE, teardown_backing_queue_test_group, [Config]);
|
2021-09-30 20:53:37 +08:00
|
|
|
end_per_group1(Group, Config)
|
2021-10-08 21:26:15 +08:00
|
|
|
when Group =:= backing_queue_v1
|
|
|
|
|
orelse Group =:= backing_queue_v2 ->
|
|
|
|
|
ok = rabbit_ct_broker_helpers:rpc(Config, 0,
|
2022-01-24 23:19:22 +08:00
|
|
|
application, unset_env, [rabbit, classic_queue_default_version]),
|
2021-10-08 21:26:15 +08:00
|
|
|
Config;
|
|
|
|
|
end_per_group1(Group, Config)
|
2021-09-30 20:53:37 +08:00
|
|
|
when Group =:= backing_queue_embed_limit_0
|
|
|
|
|
orelse Group =:= backing_queue_embed_limit_1024 ->
|
|
|
|
|
ok = rabbit_ct_broker_helpers:rpc(Config, 0,
|
|
|
|
|
application, set_env, [rabbit, queue_index_embed_msgs_below,
|
|
|
|
|
?config(rmq_queue_index_embed_msgs_below, Config)]),
|
|
|
|
|
Config;
|
2017-03-07 00:13:57 +08:00
|
|
|
end_per_group1(_, Config) ->
|
|
|
|
|
Config.
|
|
|
|
|
|
2020-04-28 03:38:58 +08:00
|
|
|
init_per_testcase(Testcase, Config) when Testcase == variable_queue_requeue;
|
|
|
|
|
Testcase == variable_queue_fold ->
|
2017-03-07 00:13:57 +08:00
|
|
|
ok = rabbit_ct_broker_helpers:rpc(
|
|
|
|
|
Config, 0, application, set_env,
|
|
|
|
|
[rabbit, queue_explicit_gc_run_operation_threshold, 0]),
|
|
|
|
|
rabbit_ct_helpers:testcase_started(Config, Testcase);
|
|
|
|
|
init_per_testcase(Testcase, Config) ->
|
|
|
|
|
rabbit_ct_helpers:testcase_started(Config, Testcase).
|
|
|
|
|
|
2020-04-28 03:38:58 +08:00
|
|
|
end_per_testcase(Testcase, Config) when Testcase == variable_queue_requeue;
|
|
|
|
|
Testcase == variable_queue_fold ->
|
2017-03-07 00:13:57 +08:00
|
|
|
ok = rabbit_ct_broker_helpers:rpc(
|
|
|
|
|
Config, 0, application, set_env,
|
|
|
|
|
[rabbit, queue_explicit_gc_run_operation_threshold, 1000]),
|
|
|
|
|
rabbit_ct_helpers:testcase_finished(Config, Testcase);
|
|
|
|
|
end_per_testcase(Testcase, Config) ->
|
|
|
|
|
rabbit_ct_helpers:testcase_finished(Config, Testcase).
|
|
|
|
|
|
|
|
|
|
%% -------------------------------------------------------------------
|
|
|
|
|
%% Message store.
|
|
|
|
|
%% -------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
msg_store(Config) ->
|
|
|
|
|
passed = rabbit_ct_broker_helpers:rpc(Config, 0,
|
|
|
|
|
?MODULE, msg_store1, [Config]).
|
|
|
|
|
|
|
|
|
|
msg_store1(_Config) ->
|
2023-03-10 16:48:43 +08:00
|
|
|
%% We simulate the SeqId (used as a message ref for the flying optimisation)
|
|
|
|
|
%% using the process dictionary.
|
|
|
|
|
GenRefFun = fun(Key) -> V = case get(Key) of undefined -> 0; V0 -> V0 end, put(Key, V + 1), V end,
|
|
|
|
|
GenRef = fun() -> GenRefFun(msc) end,
|
2017-03-07 00:13:57 +08:00
|
|
|
restart_msg_store_empty(),
|
2023-03-10 16:48:43 +08:00
|
|
|
MsgIds = [{GenRef(), msg_id_bin(M)} || M <- lists:seq(1,100)],
|
2017-03-07 00:13:57 +08:00
|
|
|
{MsgIds1stHalf, MsgIds2ndHalf} = lists:split(length(MsgIds) div 2, MsgIds),
|
|
|
|
|
Ref = rabbit_guid:gen(),
|
|
|
|
|
{Cap, MSCState} = msg_store_client_init_capture(
|
|
|
|
|
?PERSISTENT_MSG_STORE, Ref),
|
|
|
|
|
Ref2 = rabbit_guid:gen(),
|
|
|
|
|
{Cap2, MSC2State} = msg_store_client_init_capture(
|
|
|
|
|
?PERSISTENT_MSG_STORE, Ref2),
|
|
|
|
|
%% check we don't contain any of the msgs we're about to publish
|
|
|
|
|
false = msg_store_contains(false, MsgIds, MSCState),
|
|
|
|
|
%% test confirm logic
|
2023-03-10 16:48:43 +08:00
|
|
|
passed = test_msg_store_confirms([hd(MsgIds)], Cap, GenRef, MSCState),
|
2017-03-07 00:13:57 +08:00
|
|
|
%% check we don't contain any of the msgs we're about to publish
|
|
|
|
|
false = msg_store_contains(false, MsgIds, MSCState),
|
|
|
|
|
%% publish the first half
|
|
|
|
|
ok = msg_store_write(MsgIds1stHalf, MSCState),
|
|
|
|
|
%% sync on the first half
|
|
|
|
|
ok = on_disk_await(Cap, MsgIds1stHalf),
|
|
|
|
|
%% publish the second half
|
|
|
|
|
ok = msg_store_write(MsgIds2ndHalf, MSCState),
|
|
|
|
|
%% check they're all in there
|
|
|
|
|
true = msg_store_contains(true, MsgIds, MSCState),
|
|
|
|
|
%% publish the latter half twice so we hit the caching and ref
|
|
|
|
|
%% count code. We need to do this through a 2nd client since a
|
|
|
|
|
%% single client is not supposed to write the same message more
|
|
|
|
|
%% than once without first removing it.
|
2023-03-10 16:48:43 +08:00
|
|
|
ok = msg_store_write([{GenRefFun(msc2), MsgId} || {_, MsgId} <- MsgIds2ndHalf], MSC2State),
|
2017-03-07 00:13:57 +08:00
|
|
|
%% check they're still all in there
|
|
|
|
|
true = msg_store_contains(true, MsgIds, MSCState),
|
|
|
|
|
%% sync on the 2nd half
|
|
|
|
|
ok = on_disk_await(Cap2, MsgIds2ndHalf),
|
|
|
|
|
%% cleanup
|
|
|
|
|
ok = on_disk_stop(Cap2),
|
|
|
|
|
ok = rabbit_msg_store:client_delete_and_terminate(MSC2State),
|
|
|
|
|
ok = on_disk_stop(Cap),
|
|
|
|
|
%% read them all
|
|
|
|
|
MSCState1 = msg_store_read(MsgIds, MSCState),
|
|
|
|
|
%% read them all again - this will hit the cache, not disk
|
|
|
|
|
MSCState2 = msg_store_read(MsgIds, MSCState1),
|
|
|
|
|
%% remove them all
|
2023-06-21 02:04:17 +08:00
|
|
|
{ok, _} = msg_store_remove(MsgIds, MSCState2),
|
2017-03-07 00:13:57 +08:00
|
|
|
%% check first half doesn't exist
|
|
|
|
|
false = msg_store_contains(false, MsgIds1stHalf, MSCState2),
|
|
|
|
|
%% check second half does exist
|
|
|
|
|
true = msg_store_contains(true, MsgIds2ndHalf, MSCState2),
|
|
|
|
|
%% read the second half again
|
|
|
|
|
MSCState3 = msg_store_read(MsgIds2ndHalf, MSCState2),
|
|
|
|
|
%% read the second half again, just for fun (aka code coverage)
|
|
|
|
|
MSCState4 = msg_store_read(MsgIds2ndHalf, MSCState3),
|
|
|
|
|
ok = rabbit_msg_store:client_terminate(MSCState4),
|
|
|
|
|
%% stop and restart, preserving every other msg in 2nd half
|
2017-03-23 02:30:08 +08:00
|
|
|
ok = rabbit_variable_queue:stop_msg_store(?VHOST),
|
|
|
|
|
ok = rabbit_variable_queue:start_msg_store(?VHOST,
|
|
|
|
|
[], {fun ([]) -> finished;
|
2023-03-10 16:48:43 +08:00
|
|
|
([{_, MsgId}|MsgIdsTail])
|
2017-03-07 00:13:57 +08:00
|
|
|
when length(MsgIdsTail) rem 2 == 0 ->
|
|
|
|
|
{MsgId, 1, MsgIdsTail};
|
2023-03-10 16:48:43 +08:00
|
|
|
([{_, MsgId}|MsgIdsTail]) ->
|
2017-03-07 00:13:57 +08:00
|
|
|
{MsgId, 0, MsgIdsTail}
|
|
|
|
|
end, MsgIds2ndHalf}),
|
|
|
|
|
MSCState5 = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref),
|
|
|
|
|
%% check we have the right msgs left
|
|
|
|
|
lists:foldl(
|
2023-03-10 16:48:43 +08:00
|
|
|
fun ({_, MsgId}, Bool) ->
|
2017-03-07 00:13:57 +08:00
|
|
|
not(Bool = rabbit_msg_store:contains(MsgId, MSCState5))
|
|
|
|
|
end, false, MsgIds2ndHalf),
|
|
|
|
|
ok = rabbit_msg_store:client_terminate(MSCState5),
|
|
|
|
|
%% restart empty
|
|
|
|
|
restart_msg_store_empty(),
|
|
|
|
|
MSCState6 = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref),
|
|
|
|
|
%% check we don't contain any of the msgs
|
|
|
|
|
false = msg_store_contains(false, MsgIds, MSCState6),
|
|
|
|
|
%% publish the first half again
|
|
|
|
|
ok = msg_store_write(MsgIds1stHalf, MSCState6),
|
|
|
|
|
%% this should force some sort of sync internally otherwise misread
|
|
|
|
|
ok = rabbit_msg_store:client_terminate(
|
|
|
|
|
msg_store_read(MsgIds1stHalf, MSCState6)),
|
|
|
|
|
MSCState7 = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref),
|
2023-06-21 02:04:17 +08:00
|
|
|
{ok, _} = msg_store_remove(MsgIds1stHalf, MSCState7),
|
2017-03-07 00:13:57 +08:00
|
|
|
ok = rabbit_msg_store:client_terminate(MSCState7),
|
|
|
|
|
%% restart empty
|
|
|
|
|
restart_msg_store_empty(), %% now safe to reuse msg_ids
|
|
|
|
|
%% push a lot of msgs in... at least 100 files worth
|
|
|
|
|
{ok, FileSize} = application:get_env(rabbit, msg_store_file_size_limit),
|
|
|
|
|
PayloadSizeBits = 65536,
|
|
|
|
|
BigCount = trunc(100 * FileSize / (PayloadSizeBits div 8)),
|
2023-03-10 16:48:43 +08:00
|
|
|
MsgIdsBig = [{GenRef(), msg_id_bin(X)} || X <- lists:seq(1, BigCount)],
|
2017-03-07 00:13:57 +08:00
|
|
|
Payload = << 0:PayloadSizeBits >>,
|
|
|
|
|
ok = with_msg_store_client(
|
|
|
|
|
?PERSISTENT_MSG_STORE, Ref,
|
|
|
|
|
fun (MSCStateM) ->
|
2023-03-10 16:48:43 +08:00
|
|
|
[ok = rabbit_msg_store:write(SeqId, MsgId, Payload, MSCStateM) ||
|
|
|
|
|
{SeqId, MsgId} <- MsgIdsBig],
|
2017-03-07 00:13:57 +08:00
|
|
|
MSCStateM
|
|
|
|
|
end),
|
|
|
|
|
%% now read them to ensure we hit the fast client-side reading
|
|
|
|
|
ok = foreach_with_msg_store_client(
|
|
|
|
|
?PERSISTENT_MSG_STORE, Ref,
|
2023-03-10 16:48:43 +08:00
|
|
|
fun ({_, MsgId}, MSCStateM) ->
|
2017-03-07 00:13:57 +08:00
|
|
|
{{ok, Payload}, MSCStateN} = rabbit_msg_store:read(
|
|
|
|
|
MsgId, MSCStateM),
|
|
|
|
|
MSCStateN
|
|
|
|
|
end, MsgIdsBig),
|
2023-03-10 16:48:43 +08:00
|
|
|
%% We remove every other other message first, then do it again a second
|
|
|
|
|
%% time with another set of messages and then a third time. We start
|
|
|
|
|
%% with younger messages on purpose. So we split the list in three
|
|
|
|
|
%% lists keeping the message reference.
|
|
|
|
|
Part = fun
|
|
|
|
|
PartFun([], _, Acc) ->
|
|
|
|
|
Acc;
|
|
|
|
|
PartFun([E|Tail], N, Acc) ->
|
|
|
|
|
Pos = 1 + (N rem 3),
|
|
|
|
|
AccL = element(Pos, Acc),
|
|
|
|
|
PartFun(Tail, N + 1, setelement(Pos, Acc, [E|AccL]))
|
|
|
|
|
end,
|
|
|
|
|
{One, Two, Three} = Part(MsgIdsBig, 0, {[], [], []}),
|
|
|
|
|
ok = msg_store_remove(?PERSISTENT_MSG_STORE, Ref, One),
|
|
|
|
|
%% This is likely to hit GC (under 50% good data left in files, but no empty files).
|
|
|
|
|
ok = msg_store_remove(?PERSISTENT_MSG_STORE, Ref, Two),
|
|
|
|
|
%% Files are empty now and will get removed.
|
|
|
|
|
ok = msg_store_remove(?PERSISTENT_MSG_STORE, Ref, Three),
|
2017-03-07 00:13:57 +08:00
|
|
|
%% ensure empty
|
|
|
|
|
ok = with_msg_store_client(
|
|
|
|
|
?PERSISTENT_MSG_STORE, Ref,
|
|
|
|
|
fun (MSCStateM) ->
|
|
|
|
|
false = msg_store_contains(false, MsgIdsBig, MSCStateM),
|
|
|
|
|
MSCStateM
|
|
|
|
|
end),
|
|
|
|
|
%%
|
2023-03-10 16:48:43 +08:00
|
|
|
passed = test_msg_store_client_delete_and_terminate(fun() -> GenRefFun(msc_cdat) end),
|
2017-03-07 00:13:57 +08:00
|
|
|
%% restart empty
|
|
|
|
|
restart_msg_store_empty(),
|
|
|
|
|
passed.
|
|
|
|
|
|
|
|
|
|
restart_msg_store_empty() ->
|
2017-03-23 02:30:08 +08:00
|
|
|
ok = rabbit_variable_queue:stop_msg_store(?VHOST),
|
|
|
|
|
ok = rabbit_variable_queue:start_msg_store(?VHOST,
|
2017-03-07 00:13:57 +08:00
|
|
|
undefined, {fun (ok) -> finished end, ok}).
|
|
|
|
|
|
|
|
|
|
msg_id_bin(X) ->
|
|
|
|
|
erlang:md5(term_to_binary(X)).
|
|
|
|
|
|
|
|
|
|
on_disk_capture() ->
|
|
|
|
|
receive
|
|
|
|
|
{await, MsgIds, Pid} -> on_disk_capture([], MsgIds, Pid);
|
|
|
|
|
stop -> done
|
|
|
|
|
end.
|
|
|
|
|
|
|
|
|
|
on_disk_capture([_|_], _Awaiting, Pid) ->
|
|
|
|
|
Pid ! {self(), surplus};
|
|
|
|
|
on_disk_capture(OnDisk, Awaiting, Pid) ->
|
|
|
|
|
receive
|
|
|
|
|
{on_disk, MsgIdsS} ->
|
2022-06-10 21:24:02 +08:00
|
|
|
MsgIds = sets:to_list(MsgIdsS),
|
2017-03-07 00:13:57 +08:00
|
|
|
on_disk_capture(OnDisk ++ (MsgIds -- Awaiting), Awaiting -- MsgIds,
|
|
|
|
|
Pid);
|
|
|
|
|
stop ->
|
|
|
|
|
done
|
|
|
|
|
after (case Awaiting of [] -> 200; _ -> ?TIMEOUT end) ->
|
|
|
|
|
case Awaiting of
|
|
|
|
|
[] -> Pid ! {self(), arrived}, on_disk_capture();
|
|
|
|
|
_ -> Pid ! {self(), timeout}
|
|
|
|
|
end
|
|
|
|
|
end.
|
|
|
|
|
|
2023-03-10 16:48:43 +08:00
|
|
|
on_disk_await(Pid, MsgIds0) when is_list(MsgIds0) ->
|
|
|
|
|
{_, MsgIds} = lists:unzip(MsgIds0),
|
2017-03-07 00:13:57 +08:00
|
|
|
Pid ! {await, MsgIds, self()},
|
|
|
|
|
receive
|
|
|
|
|
{Pid, arrived} -> ok;
|
|
|
|
|
{Pid, Error} -> Error
|
|
|
|
|
end.
|
|
|
|
|
|
|
|
|
|
on_disk_stop(Pid) ->
|
|
|
|
|
MRef = erlang:monitor(process, Pid),
|
|
|
|
|
Pid ! stop,
|
|
|
|
|
receive {'DOWN', MRef, process, Pid, _Reason} ->
|
|
|
|
|
ok
|
|
|
|
|
end.
|
|
|
|
|
|
|
|
|
|
msg_store_client_init_capture(MsgStore, Ref) ->
|
|
|
|
|
Pid = spawn(fun on_disk_capture/0),
|
2017-03-23 02:30:08 +08:00
|
|
|
{Pid, rabbit_vhost_msg_store:client_init(?VHOST, MsgStore, Ref,
|
|
|
|
|
fun (MsgIds, _ActionTaken) ->
|
|
|
|
|
Pid ! {on_disk, MsgIds}
|
2023-02-03 20:56:02 +08:00
|
|
|
end)}.
|
2017-03-07 00:13:57 +08:00
|
|
|
|
|
|
|
|
msg_store_contains(Atom, MsgIds, MSCState) ->
|
|
|
|
|
Atom = lists:foldl(
|
2023-03-10 16:48:43 +08:00
|
|
|
fun ({_, MsgId}, Atom1) when Atom1 =:= Atom ->
|
2017-03-07 00:13:57 +08:00
|
|
|
rabbit_msg_store:contains(MsgId, MSCState) end,
|
|
|
|
|
Atom, MsgIds).
|
|
|
|
|
|
|
|
|
|
msg_store_read(MsgIds, MSCState) ->
|
2023-03-10 16:48:43 +08:00
|
|
|
lists:foldl(fun ({_, MsgId}, MSCStateM) ->
|
2017-03-07 00:13:57 +08:00
|
|
|
{{ok, MsgId}, MSCStateN} = rabbit_msg_store:read(
|
|
|
|
|
MsgId, MSCStateM),
|
|
|
|
|
MSCStateN
|
|
|
|
|
end, MSCState, MsgIds).
|
|
|
|
|
|
|
|
|
|
msg_store_write(MsgIds, MSCState) ->
|
2023-03-10 16:48:43 +08:00
|
|
|
ok = lists:foldl(fun ({SeqId, MsgId}, ok) ->
|
|
|
|
|
rabbit_msg_store:write(SeqId, MsgId, MsgId, MSCState)
|
2017-03-07 00:13:57 +08:00
|
|
|
end, ok, MsgIds).
|
|
|
|
|
|
|
|
|
|
msg_store_write_flow(MsgIds, MSCState) ->
|
2023-03-10 16:48:43 +08:00
|
|
|
ok = lists:foldl(fun ({SeqId, MsgId}, ok) ->
|
|
|
|
|
rabbit_msg_store:write_flow(SeqId, MsgId, MsgId, MSCState)
|
2017-03-07 00:13:57 +08:00
|
|
|
end, ok, MsgIds).
|
|
|
|
|
|
|
|
|
|
msg_store_remove(MsgIds, MSCState) ->
|
|
|
|
|
rabbit_msg_store:remove(MsgIds, MSCState).
|
|
|
|
|
|
|
|
|
|
msg_store_remove(MsgStore, Ref, MsgIds) ->
|
|
|
|
|
with_msg_store_client(MsgStore, Ref,
|
|
|
|
|
fun (MSCStateM) ->
|
2023-06-21 02:04:17 +08:00
|
|
|
{ok, _} = msg_store_remove(MsgIds, MSCStateM),
|
2017-03-07 00:13:57 +08:00
|
|
|
MSCStateM
|
|
|
|
|
end).
|
|
|
|
|
|
|
|
|
|
with_msg_store_client(MsgStore, Ref, Fun) ->
|
|
|
|
|
rabbit_msg_store:client_terminate(
|
|
|
|
|
Fun(msg_store_client_init(MsgStore, Ref))).
|
|
|
|
|
|
|
|
|
|
foreach_with_msg_store_client(MsgStore, Ref, Fun, L) ->
|
|
|
|
|
rabbit_msg_store:client_terminate(
|
|
|
|
|
lists:foldl(fun (MsgId, MSCState) -> Fun(MsgId, MSCState) end,
|
|
|
|
|
msg_store_client_init(MsgStore, Ref), L)).
|
|
|
|
|
|
2023-03-10 16:48:43 +08:00
|
|
|
test_msg_store_confirms(MsgIds, Cap, GenRef, MSCState) ->
|
2017-03-07 00:13:57 +08:00
|
|
|
%% write -> confirmed
|
2023-03-10 16:48:43 +08:00
|
|
|
MsgIds1 = [{GenRef(), MsgId} || {_, MsgId} <- MsgIds],
|
|
|
|
|
ok = msg_store_write(MsgIds1, MSCState),
|
|
|
|
|
ok = on_disk_await(Cap, MsgIds1),
|
2017-03-07 00:13:57 +08:00
|
|
|
%% remove -> _
|
2023-06-21 02:04:17 +08:00
|
|
|
{ok, []} = msg_store_remove(MsgIds1, MSCState),
|
2017-03-07 00:13:57 +08:00
|
|
|
ok = on_disk_await(Cap, []),
|
|
|
|
|
%% write, remove -> confirmed
|
2023-03-10 16:48:43 +08:00
|
|
|
MsgIds2 = [{GenRef(), MsgId} || {_, MsgId} <- MsgIds],
|
|
|
|
|
ok = msg_store_write(MsgIds2, MSCState),
|
2023-06-21 02:04:17 +08:00
|
|
|
{ok, ConfirmedMsgIds2} = msg_store_remove(MsgIds2, MSCState),
|
|
|
|
|
ok = on_disk_await(Cap, lists:filter(fun({_, MsgId}) -> not lists:member(MsgId, ConfirmedMsgIds2) end, MsgIds2)),
|
2017-03-07 00:13:57 +08:00
|
|
|
%% write, remove, write -> confirmed, confirmed
|
2023-03-10 16:48:43 +08:00
|
|
|
MsgIds3 = [{GenRef(), MsgId} || {_, MsgId} <- MsgIds],
|
|
|
|
|
ok = msg_store_write(MsgIds3, MSCState),
|
2023-06-21 02:04:17 +08:00
|
|
|
{ok, ConfirmedMsgIds3} = msg_store_remove(MsgIds3, MSCState),
|
2023-03-10 16:48:43 +08:00
|
|
|
MsgIds4 = [{GenRef(), MsgId} || {_, MsgId} <- MsgIds],
|
|
|
|
|
ok = msg_store_write(MsgIds4, MSCState),
|
2023-06-21 02:04:17 +08:00
|
|
|
ok = on_disk_await(Cap, lists:filter(fun({_, MsgId}) -> not lists:member(MsgId, ConfirmedMsgIds3) end, MsgIds3) ++ MsgIds4),
|
2017-03-07 00:13:57 +08:00
|
|
|
%% remove, write -> confirmed
|
2023-06-21 02:04:17 +08:00
|
|
|
{ok, []} = msg_store_remove(MsgIds4, MSCState),
|
2023-03-10 16:48:43 +08:00
|
|
|
MsgIds5 = [{GenRef(), MsgId} || {_, MsgId} <- MsgIds],
|
|
|
|
|
ok = msg_store_write(MsgIds5, MSCState),
|
|
|
|
|
ok = on_disk_await(Cap, MsgIds5),
|
2017-03-07 00:13:57 +08:00
|
|
|
%% remove, write, remove -> confirmed
|
2023-06-21 02:04:17 +08:00
|
|
|
{ok, []} = msg_store_remove(MsgIds5, MSCState),
|
2023-03-10 16:48:43 +08:00
|
|
|
MsgIds6 = [{GenRef(), MsgId} || {_, MsgId} <- MsgIds],
|
|
|
|
|
ok = msg_store_write(MsgIds6, MSCState),
|
2023-06-21 02:04:17 +08:00
|
|
|
{ok, ConfirmedMsgIds6} = msg_store_remove(MsgIds6, MSCState),
|
|
|
|
|
ok = on_disk_await(Cap, lists:filter(fun({_, MsgId}) -> not lists:member(MsgId, ConfirmedMsgIds6) end, MsgIds6)),
|
2017-03-07 00:13:57 +08:00
|
|
|
%% confirmation on timer-based sync
|
2023-03-10 16:48:43 +08:00
|
|
|
passed = test_msg_store_confirm_timer(GenRef),
|
2017-03-07 00:13:57 +08:00
|
|
|
passed.
|
|
|
|
|
|
2023-03-10 16:48:43 +08:00
|
|
|
test_msg_store_confirm_timer(GenRef) ->
|
2017-03-07 00:13:57 +08:00
|
|
|
Ref = rabbit_guid:gen(),
|
|
|
|
|
MsgId = msg_id_bin(1),
|
|
|
|
|
Self = self(),
|
2017-03-23 02:30:08 +08:00
|
|
|
MSCState = rabbit_vhost_msg_store:client_init(
|
|
|
|
|
?VHOST,
|
|
|
|
|
?PERSISTENT_MSG_STORE,
|
|
|
|
|
Ref,
|
|
|
|
|
fun (MsgIds, _ActionTaken) ->
|
2022-06-10 21:24:02 +08:00
|
|
|
case sets:is_element(MsgId, MsgIds) of
|
2017-03-23 02:30:08 +08:00
|
|
|
true -> Self ! on_disk;
|
|
|
|
|
false -> ok
|
|
|
|
|
end
|
2023-02-03 20:56:02 +08:00
|
|
|
end),
|
2023-03-10 16:48:43 +08:00
|
|
|
MsgIdsChecked = [{GenRef(), MsgId}],
|
|
|
|
|
ok = msg_store_write(MsgIdsChecked, MSCState),
|
|
|
|
|
ok = msg_store_keep_busy_until_confirm([msg_id_bin(2)], GenRef, MSCState, false),
|
2023-06-21 02:04:17 +08:00
|
|
|
{ok, _} = msg_store_remove(MsgIdsChecked, MSCState),
|
2017-03-07 00:13:57 +08:00
|
|
|
ok = rabbit_msg_store:client_delete_and_terminate(MSCState),
|
|
|
|
|
passed.
|
|
|
|
|
|
2023-03-10 16:48:43 +08:00
|
|
|
msg_store_keep_busy_until_confirm(MsgIds, GenRef, MSCState, Blocked) ->
|
2017-03-07 00:13:57 +08:00
|
|
|
After = case Blocked of
|
|
|
|
|
false -> 0;
|
|
|
|
|
true -> ?MAX_WAIT
|
|
|
|
|
end,
|
|
|
|
|
Recurse = fun () -> msg_store_keep_busy_until_confirm(
|
2023-03-10 16:48:43 +08:00
|
|
|
MsgIds, GenRef, MSCState, credit_flow:blocked()) end,
|
2017-03-07 00:13:57 +08:00
|
|
|
receive
|
|
|
|
|
on_disk -> ok;
|
|
|
|
|
{bump_credit, Msg} -> credit_flow:handle_bump_msg(Msg),
|
|
|
|
|
Recurse()
|
|
|
|
|
after After ->
|
2023-03-10 16:48:43 +08:00
|
|
|
MsgIds1 = [{GenRef(), MsgId} || MsgId <- MsgIds],
|
|
|
|
|
ok = msg_store_write_flow(MsgIds1, MSCState),
|
2023-06-21 02:04:17 +08:00
|
|
|
{ok, _} = msg_store_remove(MsgIds1, MSCState),
|
2017-03-07 00:13:57 +08:00
|
|
|
Recurse()
|
|
|
|
|
end.
|
|
|
|
|
|
2023-03-10 16:48:43 +08:00
|
|
|
test_msg_store_client_delete_and_terminate(GenRef) ->
|
2017-03-07 00:13:57 +08:00
|
|
|
restart_msg_store_empty(),
|
2023-03-10 16:48:43 +08:00
|
|
|
MsgIds = [{GenRef(), msg_id_bin(M)} || M <- lists:seq(1, 10)],
|
2017-03-07 00:13:57 +08:00
|
|
|
Ref = rabbit_guid:gen(),
|
|
|
|
|
MSCState = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref),
|
|
|
|
|
ok = msg_store_write(MsgIds, MSCState),
|
|
|
|
|
%% test the 'dying client' fast path for writes
|
|
|
|
|
ok = rabbit_msg_store:client_delete_and_terminate(MSCState),
|
|
|
|
|
passed.
|
|
|
|
|
|
|
|
|
|
%% -------------------------------------------------------------------
|
|
|
|
|
%% Backing queue.
|
|
|
|
|
%% -------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
setup_backing_queue_test_group(Config) ->
|
2021-10-11 18:30:39 +08:00
|
|
|
{ok, MaxJournal} =
|
|
|
|
|
application:get_env(rabbit, queue_index_max_journal_entries),
|
|
|
|
|
application:set_env(rabbit, queue_index_max_journal_entries, 128),
|
2021-09-30 20:53:37 +08:00
|
|
|
{ok, Bytes} =
|
|
|
|
|
application:get_env(rabbit, queue_index_embed_msgs_below),
|
|
|
|
|
rabbit_ct_helpers:set_config(Config, [
|
2021-10-11 18:30:39 +08:00
|
|
|
{rmq_queue_index_max_journal_entries, MaxJournal},
|
2021-09-30 20:53:37 +08:00
|
|
|
{rmq_queue_index_embed_msgs_below, Bytes}
|
|
|
|
|
]).
|
2017-03-07 00:13:57 +08:00
|
|
|
|
|
|
|
|
teardown_backing_queue_test_group(Config) ->
|
|
|
|
|
%% FIXME: Undo all the setup function did.
|
2021-09-30 20:53:37 +08:00
|
|
|
application:set_env(rabbit, queue_index_max_journal_entries,
|
|
|
|
|
?config(rmq_queue_index_max_journal_entries, Config)),
|
2017-03-07 00:13:57 +08:00
|
|
|
%% We will have restarted the message store, and thus changed
|
|
|
|
|
%% the order of the children of rabbit_sup. This will cause
|
|
|
|
|
%% problems if there are subsequent failures - see bug 24262.
|
|
|
|
|
ok = restart_app(),
|
|
|
|
|
Config.
|
|
|
|
|
|
|
|
|
|
bq_queue_index(Config) ->
|
|
|
|
|
passed = rabbit_ct_broker_helpers:rpc(Config, 0,
|
|
|
|
|
?MODULE, bq_queue_index1, [Config]).
|
|
|
|
|
|
2021-10-11 17:49:11 +08:00
|
|
|
index_mod() ->
|
2022-01-24 23:19:22 +08:00
|
|
|
case application:get_env(rabbit, classic_queue_default_version) of
|
2021-10-11 17:49:11 +08:00
|
|
|
{ok, 1} -> rabbit_queue_index;
|
|
|
|
|
{ok, 2} -> rabbit_classic_queue_index_v2
|
|
|
|
|
end.
|
|
|
|
|
|
2017-03-07 00:13:57 +08:00
|
|
|
bq_queue_index1(_Config) ->
|
2021-04-07 20:45:43 +08:00
|
|
|
init_queue_index(),
|
2021-10-11 17:49:11 +08:00
|
|
|
IndexMod = index_mod(),
|
|
|
|
|
SegmentSize = IndexMod:next_segment_boundary(0),
|
2017-03-07 00:13:57 +08:00
|
|
|
TwoSegs = SegmentSize + SegmentSize,
|
|
|
|
|
MostOfASegment = trunc(SegmentSize*0.75),
|
|
|
|
|
SeqIdsA = lists:seq(0, MostOfASegment-1),
|
|
|
|
|
SeqIdsB = lists:seq(MostOfASegment, 2*MostOfASegment),
|
|
|
|
|
SeqIdsC = lists:seq(0, trunc(SegmentSize/2)),
|
|
|
|
|
SeqIdsD = lists:seq(0, SegmentSize*4),
|
|
|
|
|
|
2021-11-09 18:41:31 +08:00
|
|
|
VerifyReadWithPublishedFun = case IndexMod of
|
|
|
|
|
rabbit_queue_index -> fun verify_read_with_published_v1/3;
|
|
|
|
|
rabbit_classic_queue_index_v2 -> fun verify_read_with_published_v2/3
|
|
|
|
|
end,
|
|
|
|
|
|
2017-03-07 00:13:57 +08:00
|
|
|
with_empty_test_queue(
|
2017-03-08 22:46:25 +08:00
|
|
|
fun (Qi0, QName) ->
|
2021-10-11 17:49:11 +08:00
|
|
|
{0, 0, Qi1} = IndexMod:bounds(Qi0),
|
2017-03-07 00:13:57 +08:00
|
|
|
{Qi2, SeqIdsMsgIdsA} = queue_index_publish(SeqIdsA, false, Qi1),
|
2021-10-11 17:49:11 +08:00
|
|
|
{0, SegmentSize, Qi3} = IndexMod:bounds(Qi2),
|
|
|
|
|
{ReadA, Qi4} = IndexMod:read(0, SegmentSize, Qi3),
|
2021-11-09 18:41:31 +08:00
|
|
|
ok = VerifyReadWithPublishedFun(false, ReadA,
|
2017-03-07 00:13:57 +08:00
|
|
|
lists:reverse(SeqIdsMsgIdsA)),
|
|
|
|
|
%% should get length back as 0, as all the msgs were transient
|
2017-03-08 22:46:25 +08:00
|
|
|
{0, 0, Qi6} = restart_test_queue(Qi4, QName),
|
2021-10-11 17:49:11 +08:00
|
|
|
{0, 0, Qi7} = IndexMod:bounds(Qi6),
|
2017-03-07 00:13:57 +08:00
|
|
|
{Qi8, SeqIdsMsgIdsB} = queue_index_publish(SeqIdsB, true, Qi7),
|
2021-10-11 17:49:11 +08:00
|
|
|
{0, TwoSegs, Qi9} = IndexMod:bounds(Qi8),
|
|
|
|
|
{ReadB, Qi10} = IndexMod:read(0, SegmentSize, Qi9),
|
2021-11-09 18:41:31 +08:00
|
|
|
ok = VerifyReadWithPublishedFun(true, ReadB,
|
2017-03-07 00:13:57 +08:00
|
|
|
lists:reverse(SeqIdsMsgIdsB)),
|
|
|
|
|
%% should get length back as MostOfASegment
|
|
|
|
|
LenB = length(SeqIdsB),
|
|
|
|
|
BytesB = LenB * 10,
|
2017-03-08 22:46:25 +08:00
|
|
|
{LenB, BytesB, Qi12} = restart_test_queue(Qi10, QName),
|
2021-10-11 17:49:11 +08:00
|
|
|
{0, TwoSegs, Qi13} = IndexMod:bounds(Qi12),
|
|
|
|
|
Qi15 = case IndexMod of
|
|
|
|
|
rabbit_queue_index ->
|
|
|
|
|
Qi14 = IndexMod:deliver(SeqIdsB, Qi13),
|
|
|
|
|
{ReadC, Qi14b} = IndexMod:read(0, SegmentSize, Qi14),
|
2021-11-09 18:41:31 +08:00
|
|
|
ok = VerifyReadWithPublishedFun(true, ReadC,
|
2021-10-11 17:49:11 +08:00
|
|
|
lists:reverse(SeqIdsMsgIdsB)),
|
|
|
|
|
Qi14b;
|
|
|
|
|
_ ->
|
|
|
|
|
Qi13
|
|
|
|
|
end,
|
|
|
|
|
{_DeletedSegments, Qi16} = IndexMod:ack(SeqIdsB, Qi15),
|
|
|
|
|
Qi17 = IndexMod:flush(Qi16),
|
2017-03-07 00:13:57 +08:00
|
|
|
%% Everything will have gone now because #pubs == #acks
|
2021-10-11 17:49:11 +08:00
|
|
|
{0, 0, Qi18} = IndexMod:bounds(Qi17),
|
2017-03-07 00:13:57 +08:00
|
|
|
%% should get length back as 0 because all persistent
|
|
|
|
|
%% msgs have been acked
|
2017-03-08 22:46:25 +08:00
|
|
|
{0, 0, Qi19} = restart_test_queue(Qi18, QName),
|
2017-03-07 00:13:57 +08:00
|
|
|
Qi19
|
|
|
|
|
end),
|
|
|
|
|
|
|
|
|
|
%% These next bits are just to hit the auto deletion of segment files.
|
|
|
|
|
%% First, partials:
|
2021-10-11 18:30:39 +08:00
|
|
|
%% a) partial pub+del+ack, then move to new segment
|
2017-03-07 00:13:57 +08:00
|
|
|
with_empty_test_queue(
|
2017-03-08 22:46:25 +08:00
|
|
|
fun (Qi0, _QName) ->
|
2017-03-07 00:13:57 +08:00
|
|
|
{Qi1, _SeqIdsMsgIdsC} = queue_index_publish(SeqIdsC,
|
|
|
|
|
false, Qi0),
|
2021-10-11 18:30:39 +08:00
|
|
|
Qi2 = case IndexMod of
|
|
|
|
|
rabbit_queue_index -> IndexMod:deliver(SeqIdsC, Qi1);
|
|
|
|
|
_ -> Qi1
|
|
|
|
|
end,
|
|
|
|
|
{_DeletedSegments, Qi3} = IndexMod:ack(SeqIdsC, Qi2),
|
2021-10-11 17:49:11 +08:00
|
|
|
Qi4 = IndexMod:flush(Qi3),
|
2017-03-07 00:13:57 +08:00
|
|
|
{Qi5, _SeqIdsMsgIdsC1} = queue_index_publish([SegmentSize],
|
|
|
|
|
false, Qi4),
|
|
|
|
|
Qi5
|
|
|
|
|
end),
|
|
|
|
|
|
2021-10-11 18:30:39 +08:00
|
|
|
%% b) partial pub+del, then move to new segment, then ack all in old segment
|
2017-03-07 00:13:57 +08:00
|
|
|
with_empty_test_queue(
|
2017-03-08 22:46:25 +08:00
|
|
|
fun (Qi0, _QName) ->
|
2017-03-07 00:13:57 +08:00
|
|
|
{Qi1, _SeqIdsMsgIdsC2} = queue_index_publish(SeqIdsC,
|
|
|
|
|
false, Qi0),
|
2021-10-11 18:30:39 +08:00
|
|
|
Qi2 = case IndexMod of
|
|
|
|
|
rabbit_queue_index -> IndexMod:deliver(SeqIdsC, Qi1);
|
|
|
|
|
_ -> Qi1
|
|
|
|
|
end,
|
2017-03-07 00:13:57 +08:00
|
|
|
{Qi3, _SeqIdsMsgIdsC3} = queue_index_publish([SegmentSize],
|
2021-10-11 18:30:39 +08:00
|
|
|
false, Qi2),
|
2021-10-11 17:49:11 +08:00
|
|
|
{_DeletedSegments, Qi4} = IndexMod:ack(SeqIdsC, Qi3),
|
|
|
|
|
IndexMod:flush(Qi4)
|
2017-03-07 00:13:57 +08:00
|
|
|
end),
|
|
|
|
|
|
2021-08-30 20:10:47 +08:00
|
|
|
%% c) just fill up several segments of all pubs, then +acks
|
2017-03-07 00:13:57 +08:00
|
|
|
with_empty_test_queue(
|
2017-03-08 22:46:25 +08:00
|
|
|
fun (Qi0, _QName) ->
|
2017-03-07 00:13:57 +08:00
|
|
|
{Qi1, _SeqIdsMsgIdsD} = queue_index_publish(SeqIdsD,
|
|
|
|
|
false, Qi0),
|
2021-10-11 18:30:39 +08:00
|
|
|
Qi2 = case IndexMod of
|
|
|
|
|
rabbit_queue_index -> IndexMod:deliver(SeqIdsD, Qi1);
|
|
|
|
|
_ -> Qi1
|
|
|
|
|
end,
|
|
|
|
|
{_DeletedSegments, Qi3} = IndexMod:ack(SeqIdsD, Qi2),
|
2021-10-11 17:49:11 +08:00
|
|
|
IndexMod:flush(Qi3)
|
2017-03-07 00:13:57 +08:00
|
|
|
end),
|
|
|
|
|
|
2021-09-15 21:09:11 +08:00
|
|
|
%% d) get messages in all states to a segment, then flush, then do
|
2021-10-11 18:30:39 +08:00
|
|
|
%% the same again, don't flush and read. CQ v1: this will hit all
|
|
|
|
|
%% possibilities in combining the segment with the journal.
|
2021-09-15 21:09:11 +08:00
|
|
|
with_empty_test_queue(
|
|
|
|
|
fun (Qi0, _QName) ->
|
|
|
|
|
{Qi1, [Seven,Five,Four|_]} = queue_index_publish([0,1,2,4,5,7],
|
|
|
|
|
false, Qi0),
|
2021-10-11 17:49:11 +08:00
|
|
|
Qi2 = case IndexMod of
|
|
|
|
|
rabbit_queue_index -> IndexMod:deliver([0,1,4], Qi1);
|
|
|
|
|
_ -> Qi1
|
|
|
|
|
end,
|
|
|
|
|
{_DeletedSegments3, Qi3} = IndexMod:ack([0], Qi2),
|
|
|
|
|
Qi4 = IndexMod:flush(Qi3),
|
2021-09-15 21:09:11 +08:00
|
|
|
{Qi5, [Eight,Six|_]} = queue_index_publish([3,6,8], false, Qi4),
|
2021-10-11 17:49:11 +08:00
|
|
|
Qi6 = case IndexMod of
|
|
|
|
|
rabbit_queue_index -> IndexMod:deliver([2,3,5,6], Qi5);
|
|
|
|
|
_ -> Qi5
|
|
|
|
|
end,
|
|
|
|
|
{_DeletedSegments7, Qi7} = IndexMod:ack([1,2,3], Qi6),
|
|
|
|
|
{[], Qi8} = IndexMod:read(0, 4, Qi7),
|
|
|
|
|
{ReadD, Qi9} = IndexMod:read(4, 7, Qi8),
|
2021-11-09 18:41:31 +08:00
|
|
|
ok = VerifyReadWithPublishedFun(false, ReadD,
|
2021-09-15 21:09:11 +08:00
|
|
|
[Four, Five, Six]),
|
2021-10-11 17:49:11 +08:00
|
|
|
{ReadE, Qi10} = IndexMod:read(7, 9, Qi9),
|
2021-11-09 18:41:31 +08:00
|
|
|
ok = VerifyReadWithPublishedFun(false, ReadE,
|
2021-09-15 21:09:11 +08:00
|
|
|
[Seven, Eight]),
|
|
|
|
|
Qi10
|
|
|
|
|
end),
|
|
|
|
|
|
2021-10-11 18:30:39 +08:00
|
|
|
%% e) as for (d), but use terminate instead of read, which (CQ v1) will
|
|
|
|
|
%% exercise journal_minus_segment, not segment_plus_journal.
|
2021-09-15 21:09:11 +08:00
|
|
|
with_empty_test_queue(
|
|
|
|
|
fun (Qi0, QName) ->
|
|
|
|
|
{Qi1, _SeqIdsMsgIdsE} = queue_index_publish([0,1,2,4,5,7],
|
|
|
|
|
true, Qi0),
|
2021-10-11 17:49:11 +08:00
|
|
|
Qi2 = case IndexMod of
|
|
|
|
|
rabbit_queue_index -> IndexMod:deliver([0,1,4], Qi1);
|
|
|
|
|
_ -> Qi1
|
|
|
|
|
end,
|
|
|
|
|
{_DeletedSegments3, Qi3} = IndexMod:ack([0], Qi2),
|
2021-09-15 21:09:11 +08:00
|
|
|
{5, 50, Qi4} = restart_test_queue(Qi3, QName),
|
|
|
|
|
{Qi5, _SeqIdsMsgIdsF} = queue_index_publish([3,6,8], true, Qi4),
|
2021-10-11 17:49:11 +08:00
|
|
|
Qi6 = case IndexMod of
|
|
|
|
|
rabbit_queue_index -> IndexMod:deliver([2,3,5,6], Qi5);
|
|
|
|
|
_ -> Qi5
|
|
|
|
|
end,
|
|
|
|
|
{_DeletedSegments7, Qi7} = IndexMod:ack([1,2,3], Qi6),
|
2021-09-15 21:09:11 +08:00
|
|
|
{5, 50, Qi8} = restart_test_queue(Qi7, QName),
|
|
|
|
|
Qi8
|
|
|
|
|
end),
|
2017-03-07 00:13:57 +08:00
|
|
|
|
2017-03-23 02:30:08 +08:00
|
|
|
ok = rabbit_variable_queue:stop(?VHOST),
|
|
|
|
|
{ok, _} = rabbit_variable_queue:start(?VHOST, []),
|
2017-03-07 00:13:57 +08:00
|
|
|
|
|
|
|
|
passed.
|
|
|
|
|
|
2021-11-09 18:41:31 +08:00
|
|
|
verify_read_with_published_v1(_Persistent, [], _) ->
|
|
|
|
|
ok;
|
|
|
|
|
verify_read_with_published_v1(Persistent,
|
|
|
|
|
[{MsgId, SeqId, _Location, _Props, Persistent}|Read],
|
|
|
|
|
[{SeqId, MsgId}|Published]) ->
|
|
|
|
|
verify_read_with_published_v1(Persistent, Read, Published);
|
|
|
|
|
verify_read_with_published_v1(_Persistent, _Read, _Published) ->
|
|
|
|
|
ko.
|
|
|
|
|
|
|
|
|
|
%% The v2 index does not store the MsgId unless required.
|
|
|
|
|
%% We therefore do not check it.
|
|
|
|
|
verify_read_with_published_v2(_Persistent, [], _) ->
|
|
|
|
|
ok;
|
|
|
|
|
verify_read_with_published_v2(Persistent,
|
|
|
|
|
[{_MsgId1, SeqId, _Location, _Props, Persistent}|Read],
|
|
|
|
|
[{SeqId, _MsgId2}|Published]) ->
|
|
|
|
|
verify_read_with_published_v2(Persistent, Read, Published);
|
|
|
|
|
verify_read_with_published_v2(_Persistent, _Read, _Published) ->
|
|
|
|
|
ko.
|
|
|
|
|
|
2017-03-07 00:13:57 +08:00
|
|
|
bq_queue_index_props(Config) ->
|
|
|
|
|
passed = rabbit_ct_broker_helpers:rpc(Config, 0,
|
|
|
|
|
?MODULE, bq_queue_index_props1, [Config]).
|
|
|
|
|
|
|
|
|
|
bq_queue_index_props1(_Config) ->
|
2021-10-11 17:49:11 +08:00
|
|
|
IndexMod = index_mod(),
|
|
|
|
|
|
2017-03-07 00:13:57 +08:00
|
|
|
with_empty_test_queue(
|
2017-03-08 22:46:25 +08:00
|
|
|
fun(Qi0, _QName) ->
|
2017-03-07 00:13:57 +08:00
|
|
|
MsgId = rabbit_guid:gen(),
|
|
|
|
|
Props = #message_properties{expiry=12345, size = 10},
|
2021-10-11 17:49:11 +08:00
|
|
|
Qi1 = IndexMod:publish(
|
2021-08-27 21:57:09 +08:00
|
|
|
MsgId, 0, memory, Props, true, infinity, Qi0),
|
2021-08-30 20:10:47 +08:00
|
|
|
{[{MsgId, 0, _, Props, _}], Qi2} =
|
2021-10-11 17:49:11 +08:00
|
|
|
IndexMod:read(0, 1, Qi1),
|
2017-03-07 00:13:57 +08:00
|
|
|
Qi2
|
|
|
|
|
end),
|
|
|
|
|
|
2017-03-23 02:30:08 +08:00
|
|
|
ok = rabbit_variable_queue:stop(?VHOST),
|
|
|
|
|
{ok, _} = rabbit_variable_queue:start(?VHOST, []),
|
2017-03-07 00:13:57 +08:00
|
|
|
|
|
|
|
|
passed.
|
|
|
|
|
|
2021-11-10 21:58:35 +08:00
|
|
|
v2_delete_segment_file_completely_acked(Config) ->
|
|
|
|
|
passed = rabbit_ct_broker_helpers:rpc(Config, 0,
|
|
|
|
|
?MODULE, v2_delete_segment_file_completely_acked1, [Config]).
|
|
|
|
|
|
|
|
|
|
v2_delete_segment_file_completely_acked1(_Config) ->
|
|
|
|
|
IndexMod = rabbit_classic_queue_index_v2,
|
|
|
|
|
SegmentSize = IndexMod:next_segment_boundary(0),
|
|
|
|
|
SeqIds = lists:seq(0, SegmentSize - 1),
|
|
|
|
|
|
|
|
|
|
with_empty_test_queue(
|
2021-11-10 23:21:40 +08:00
|
|
|
fun (Qi0, _QName) ->
|
2021-11-10 21:58:35 +08:00
|
|
|
%% Publish a full segment file.
|
|
|
|
|
{Qi1, SeqIdsMsgIds} = queue_index_publish(SeqIds, true, Qi0),
|
|
|
|
|
SegmentSize = length(SeqIdsMsgIds),
|
|
|
|
|
{0, SegmentSize, Qi2} = IndexMod:bounds(Qi1),
|
|
|
|
|
%% Confirm that the file exists on disk.
|
|
|
|
|
Path = IndexMod:segment_file(0, Qi2),
|
|
|
|
|
true = filelib:is_file(Path),
|
|
|
|
|
%% Ack the full segment file.
|
|
|
|
|
{[0], Qi3} = IndexMod:ack(SeqIds, Qi2),
|
|
|
|
|
%% Confirm that the file was deleted.
|
|
|
|
|
false = filelib:is_file(Path),
|
|
|
|
|
Qi3
|
|
|
|
|
end),
|
|
|
|
|
|
|
|
|
|
passed.
|
|
|
|
|
|
|
|
|
|
v2_delete_segment_file_partially_acked(Config) ->
|
|
|
|
|
passed = rabbit_ct_broker_helpers:rpc(Config, 0,
|
|
|
|
|
?MODULE, v2_delete_segment_file_partially_acked1, [Config]).
|
|
|
|
|
|
|
|
|
|
v2_delete_segment_file_partially_acked1(_Config) ->
|
|
|
|
|
IndexMod = rabbit_classic_queue_index_v2,
|
|
|
|
|
SegmentSize = IndexMod:next_segment_boundary(0),
|
|
|
|
|
SeqIds = lists:seq(0, SegmentSize div 2),
|
|
|
|
|
SeqIdsLen = length(SeqIds),
|
|
|
|
|
|
|
|
|
|
with_empty_test_queue(
|
2021-11-10 23:21:40 +08:00
|
|
|
fun (Qi0, _QName) ->
|
2021-11-10 21:58:35 +08:00
|
|
|
%% Publish a partial segment file.
|
|
|
|
|
{Qi1, SeqIdsMsgIds} = queue_index_publish(SeqIds, true, Qi0),
|
|
|
|
|
SeqIdsLen = length(SeqIdsMsgIds),
|
|
|
|
|
{0, SegmentSize, Qi2} = IndexMod:bounds(Qi1),
|
|
|
|
|
%% Confirm that the file exists on disk.
|
|
|
|
|
Path = IndexMod:segment_file(0, Qi2),
|
|
|
|
|
true = filelib:is_file(Path),
|
|
|
|
|
%% Ack the partial segment file.
|
|
|
|
|
{[0], Qi3} = IndexMod:ack(SeqIds, Qi2),
|
|
|
|
|
%% Confirm that the file was deleted.
|
|
|
|
|
false = filelib:is_file(Path),
|
|
|
|
|
Qi3
|
|
|
|
|
end),
|
|
|
|
|
|
|
|
|
|
passed.
|
|
|
|
|
|
|
|
|
|
v2_delete_segment_file_partially_acked_with_holes(Config) ->
|
|
|
|
|
passed = rabbit_ct_broker_helpers:rpc(Config, 0,
|
|
|
|
|
?MODULE, v2_delete_segment_file_partially_acked_with_holes1, [Config]).
|
|
|
|
|
|
|
|
|
|
v2_delete_segment_file_partially_acked_with_holes1(_Config) ->
|
|
|
|
|
IndexMod = rabbit_classic_queue_index_v2,
|
|
|
|
|
SegmentSize = IndexMod:next_segment_boundary(0),
|
|
|
|
|
SeqIdsA = lists:seq(0, SegmentSize div 2),
|
|
|
|
|
SeqIdsB = lists:seq(11 + SegmentSize div 2, SegmentSize - 1),
|
|
|
|
|
SeqIdsLen = length(SeqIdsA) + length(SeqIdsB),
|
|
|
|
|
|
|
|
|
|
with_empty_test_queue(
|
2021-11-10 23:21:40 +08:00
|
|
|
fun (Qi0, _QName) ->
|
2021-11-10 21:58:35 +08:00
|
|
|
%% Publish a partial segment file with holes.
|
|
|
|
|
{Qi1, SeqIdsMsgIdsA} = queue_index_publish(SeqIdsA, true, Qi0),
|
|
|
|
|
{Qi2, SeqIdsMsgIdsB} = queue_index_publish(SeqIdsB, true, Qi1),
|
|
|
|
|
SeqIdsLen = length(SeqIdsMsgIdsA) + length(SeqIdsMsgIdsB),
|
|
|
|
|
{0, SegmentSize, Qi3} = IndexMod:bounds(Qi2),
|
|
|
|
|
%% Confirm that the file exists on disk.
|
|
|
|
|
Path = IndexMod:segment_file(0, Qi3),
|
|
|
|
|
true = filelib:is_file(Path),
|
|
|
|
|
%% Ack the partial segment file with holes.
|
|
|
|
|
{[], Qi4} = IndexMod:ack(SeqIdsA, Qi3),
|
|
|
|
|
{[0], Qi5} = IndexMod:ack(SeqIdsB, Qi4),
|
|
|
|
|
%% Confirm that the file was deleted.
|
|
|
|
|
false = filelib:is_file(Path),
|
|
|
|
|
Qi5
|
|
|
|
|
end),
|
|
|
|
|
|
|
|
|
|
passed.
|
|
|
|
|
|
2017-03-07 00:13:57 +08:00
|
|
|
bq_variable_queue_delete_msg_store_files_callback(Config) ->
|
|
|
|
|
passed = rabbit_ct_broker_helpers:rpc(Config, 0,
|
|
|
|
|
?MODULE, bq_variable_queue_delete_msg_store_files_callback1, [Config]).
|
|
|
|
|
|
|
|
|
|
bq_variable_queue_delete_msg_store_files_callback1(Config) ->
|
|
|
|
|
ok = restart_msg_store_empty(),
|
2018-10-11 18:12:39 +08:00
|
|
|
QName0 = queue_name(Config, <<"bq_variable_queue_delete_msg_store_files_callback-q">>),
|
|
|
|
|
{new, Q} = rabbit_amqqueue:declare(QName0, true, false, [], none, <<"acting-user">>),
|
|
|
|
|
QName = amqqueue:get_name(Q),
|
|
|
|
|
QPid = amqqueue:get_pid(Q),
|
2017-03-07 00:13:57 +08:00
|
|
|
Payload = <<0:8388608>>, %% 1MB
|
|
|
|
|
Count = 30,
|
2020-09-29 18:43:24 +08:00
|
|
|
QTState = publish_and_confirm(Q, Payload, Count),
|
2017-03-07 00:13:57 +08:00
|
|
|
|
|
|
|
|
rabbit_amqqueue:set_ram_duration_target(QPid, 0),
|
|
|
|
|
|
|
|
|
|
{ok, Limiter} = rabbit_limiter:start_link(no_id),
|
|
|
|
|
|
|
|
|
|
CountMinusOne = Count - 1,
|
2020-09-29 18:43:24 +08:00
|
|
|
{ok, CountMinusOne, {QName, QPid, _AckTag, false, _Msg}, _} =
|
|
|
|
|
rabbit_amqqueue:basic_get(Q, true, Limiter,
|
Quorum queues (#1706)
* Test queue.declare method with quorum type
[#154472130]
* Cosmetics
[#154472130]
* Start quorum queue
Includes ra as a rabbit dependency
[#154472152]
* Update info and list operations to use quorum queues
Basic implementation. Might need an update when more functionality
is added to the quorum queues.
[#154472152]
* Stop quorum queue
[#154472158]
* Restart quorum queue
[#154472164]
* Introduce UId in ra config to support newer version of ra
Improved ra stop
[#154472158]
* Put data inside VHost specific subdirs
[#154472164]
* Include ra in rabbit deps to support stop_app/start_app command
[#154472164]
* Stop quorum queues in `rabbit_amqqueue:stop/1`
[#154472158]
* Revert creation of fifo ets table inside rabbit
Now supported by ra
[#154472158]
* Filter quorum queues
[#154472158]
* Test restart node with quorum queues
[#154472164]
* Publish to quorum queues
[#154472174]
* Use `ra:restart_node/1`
[#154472164]
* Wait for stats to be published when querying quorum queues
[#154472174]
* Test publish and queue length after restart
[#154472174]
* Consume messages from quorum queues with basic.get
[#154472211]
* Autoack messages from quorum queues on basic.get
[#154472211]
* Fix no_ack meaning
no_ack = true is equivalent to autoack
[#154472211]
* Use data_dir as provided in the config
If we modify the data_dir, ra is not able to delete the data
when a queue is deleted
[#154472158]
* Remove unused code/variables
[#154472158]
* Subscribe to a quorum queue
Supports auto-ack
[#154472215]
* Ack messages consumed from quorum queues
[#154472221]
* Nack messages consumed from quorum queues
[#154804608]
* Use delivery tag as consumer tag for basic.get in quorum queues
[#154472221]
* Support for publisher confirms in quorum queues
[#154472198]
* Integrate with ra_fifo_client
* Clear queue state on queue.delete
[#154472158]
* Fix quorum nack
[#154804608]
* Test redelivery after nack
[#154804608]
* Nack without requeueing
[#154472225]
* Test multiple acks
[#154804208]
* Test multiple nacks
[#154804314]
* Configure dead letter exchange with queue declare
[#155076661]
* Use a per-vhost process to handle dead-lettering
Needs to hold state for quorum queues
[#155401802]
* Implement dead-lettering on nack'ed messages
[#154804620]
* Use queue name as a resource on message delivery
Fixes a previously introduced bug
[#154804608]
* Handle ra events on dead letter process
[#155401802]
* Pass empty queue states to queue delete
Queue deletion on vhost deletion calls directly to rabbit_amqqueue.
Queue states are not available, but we can provide an empty map as
in deletion the states are only needed for cleanup.
* Generate quorum queue stats and events
Consumer delete events are still pending, as depend on basic.cancel
(not implemented yet), ra terminating or ra detecting channel down
[#154472241]
* Ensure quorum mapping entries are available before metric emission
[#154472241]
* Configure data_dir, uses new RABBITMQ_QUORUM_BASE env var
[#154472152]
* Use untracked enqueues when sending wihtout channel
Updated several other calls missed during the quorum implementation
* Revert "Configure data_dir, uses new RABBITMQ_QUORUM_BASE env var"
This reverts commit f2261212410affecb238fcbd1fb451381aee4036.
* Configure data_dir, uses new RABBITMQ_QUORUM_DIR based on mnesia dir
[#154472152]
* Fix get_quorum_state
* Fix calculation of quorum pids
* Move all quorum queues code to its own module
[#154472241]
* Return an error when declaring a quorum queue with an incompatible argument
[#154521696]
* Cleanup of quorum queue state after queue delete
Also fixes some existing problems where the state wasn't properly
stored
[#155458625]
* Revert Revert "Declare a quorum queue using the queue.declare method"
* Remove duplicated state info
[#154472241]
* Start/stop multi-node quorum queue
[#154472231]
[#154472236]
* Restart nodes in a multi-node quorum cluster
[#154472238]
* Test restart and leadership takeover on multiple nodes
[#154472238]
* Wait for leader down after deleting a quorum cluster
It ensures an smooth delete-declare sequence without race
conditions. The test included here detected the situation before
the fix.
[#154472236]
* Populate quorum_mapping from mnesia when not available
Ensures that leader nodes that don't have direct requests can get
the mapping ra name -> queue name
* Cosmetics
* Do not emit core metrics if queue has just been deleted
* Use rabbit_mnesia:is_process_alive
Fixes bug introduced by cac9583e1bb2705be7f06c2ab7f416a75d11c875
[#154472231]
* Only try to report stats if quorum process is alive
* Implement cancel consumer callback
Deletes metrics and sends consumer deleted event
* Remove unnecessary trigger election call
ra:restart_node has already been called during the recovery
* Apply cancellation callback on node hosting the channel
* Cosmetics
* Read new fifo metrics which store directly total, ready and unack
* Implement basic.cancel for quorum queues
* Store leader in amqqueue record, report all in stats
[#154472407]
* Declare quorum queue in mnesia before starting the ra cluster
Record needs to be stored first to update the leader on ra effects
* Revert
* Purge quorum queues
[#154472182]
* Improve use of untracked_enqueue
Choose the persisted leader id instead of just using the id of the
leader at point of creation.
* Store quorum leader in the pid field of amqqueue record
Same as mirrored queues, no real need for an additional field
* Improve recovery
When a ra node has never been started on a rabbit node ensure it doesn't
fail but instead rebuilds the config and starts the node as a new node.
Also fix issue when a quorum queue is declared when one of it's rabbit
nodes are unavailable.
[#157054606]
* Cleanup core metrics after leader change
[#157054473]
* Return an error on sync_queue on quorum queues
[#154472334]
* Return an error on cancel_sync_queue on quorum queues
[#154472337]
* Fix basic_cancel and basic_consume return values
Ensure the quorum queue state is always returned by these functions.
* Restore arity of amqqeueu delete and purge functions.
This avoids some breaking changes in the cli.
* Fix bug returning consumers.
* remove rogue debug log
* Integrate ingress flow control with quorum queues
[#157000583]
* Configure commands soft limit
[#157000583]
* Support quorum pids on rabbit_mnesia:is_process_alive
* Publish consumers metric for quorum queues
* Whitelist quorum directory in is_virgin_node
Allow the quorum directoy to exist without affecting the status of the
Rabbit node.
* Delete queue_metrics on leader change.
Also run the become_leader handler in a separate process to avoid
blocking.
[#157424225]
* Report cluster status in quorum queue infos. New per node status command.
Related to
[#157146500]
* Remove quorum_mapping table
As we can store the full queue name resource as the cluster id of the
ra_fifo_client state we can avoid needed the quorum_mapping table.
* Fix xref issue
* Provide quorum members information in stats
[#157146500]
* fix unused variable
* quorum queue multiple declare handling
Extend rabbit_amqqueue:internal_declare/2 to indicate if the queue
record was created or exisiting. From this we can then provide a code
path that should handle concurrent queue declares of the same quorum
queue.
* Return an error when declaring exclusive/auto-delete quorum queue
[#157472160]
* Restore lost changes
from 79c9bd201e1eac006a42bd162e7c86df96496629
* recover another part of commit
* fixup cherry pick
* Ra io/file metrics handler and stats publishing
[#157193081]
* Revert "Ra io/file metrics handler and stats publishing"
This reverts commit 05d15c786540322583fc655709825db215b70952.
* Do not issue confirms on node down for quorum queues.
Only a ra_event should be used to issue positive confirms for a quorum
queue.
* Ra stats publishing
[#157193081]
* Pick consumer utilisation from ra data
[#155402726]
* Handle error when deleting a quorum queue and all nodes are already down
This is in fact a successful deletion as all raft nodes are already 'stopped'
[#158656366]
* Return an error when declaring non-durable quorum queues
[#158656454]
* Rename dirty_query to committed_query
* Delete stats on leader node
[#158661152]
* Give full list of nodes to fifo client
* Handle timeout in quorum basic_get
* Fix unused variable error
* Handle timeout in basic get
[#158656366]
* Force GC after purge
[#158789389]
* Increase `ra:delete_cluster` timeout to 120s
* Revert "Force GC after purge"
This reverts commit 5c98bf22994eb39004760799d3a2c5041d16e9d4.
* Add quorum member command
[#157481599]
* Delete quorum member command
[#157481599]
* Implement basic.recover for quorum queues
[#157597411]
* Change concumer utilisation
to use the new ra_fifo table and api.
* Set max quorum queue size limit
Defaults to 7, can be configured per queue on queue.declare
Nodes are selected randomly from the list of nodes, but the one
that is executing the queue.declare command
[#159338081]
* remove potentially unrelated changes to rabbit_networking
* Move ra_fifo to rabbit
Copied ra_fifo to rabbit and renamed it rabbit_fifo.
[#159338031]
* rabbit_fifo tidy up
* rabbit_fifo tidy up
* rabbit_fifo: customer -> consumer rename
* Move ra_fifo tests
[#159338031]
* Tweak quorum_queue defaults
* quorum_queue test reliability
* Optimise quorum_queue test suite.
By only starting a rabbit cluster per group rather than test.
[#160612638]
* Renamings in line with ra API changes
* rabbit_fifo fixes
* Update with ra API changes
Ra has consolidated and simplified it's api. These changes update to
confirm to that.
* Update rabbit_fifo with latest ra changes
* Clean up out of date comment
* Return map of states
* Add test case for basic.get on an empty queue
Before the previous patch, any subsequent basic.get would crash as
the map of states had been replaced by a single state.
* Clarify use of deliver tags on record_sent
* Clean up queues after testcase
* Remove erlang monitor of quorum queues in rabbit_channel
The eol event can be used instead
* Use macros to make clearer distinctions between quorum/classic queues
Cosmetic only
* Erase queue stats on 'eol' event
* Update to follow Ra's cluster_id -> cluster_name rename.
* Rename qourum-cluster-size
To quorum-initial-group-size
* Issue confirms on quorum queue eol
Also avoid creating quorum queue session state on queue operation
methods.
* Only classic queues should be notified on channel down
* Quorum queues do not support global qos
Exit with protocol error of a basic.consume for a quorum queue is issued
on a channel with global qos enabled.
* unused variable name
* Refactoring
Strictly enfornce that channels do not monitor quorum queues.
* Refactor foreach_per_queue in the channel.
To make it call classic and quorum queues the same way.
[#161314899]
* rename function
* Query classic and quorum queues separately
during recovery as they should not be marked as stopped during failed
vhost recovery.
* Remove force_event_refresh function
As the only user of this function, the management API no longer requires
it.
* fix errors
* Remove created_at from amqqueue record
[#161343680]
* rabbit_fifo: support AMQP 1.0 consumer credit
This change implements an alternative consumer credit mechanism similar
to AMQP 1.0 link credit where the credit (prefetch) isn't automatically
topped up as deliveries are settled and instead needs to be manually
increased using a credit command. This is to be integrated with the AMQP
1.0 plugin.
[#161256187]
* Add basic.credit support for quorum queues.
Added support for AMQP 1.0 transfer flow control.
[#161256187]
* Make quorum queue recover idempotent
So that if a vhost crashes and runs the recover steps it doesn't fail
because ra servers are still running.
[#161343651]
* Add tests for vhost deletion
To ensure quorum queues are cleaned up on vhost removal.
Also fix xref issue.
[#161343673]
* remove unused clause
* always return latest value of queue
* Add rabbitmq-queues scripts. Remove ra config from .bat scripts.
* Return error if trying to get quorum status of a classic queue.
2018-10-29 17:47:29 +08:00
|
|
|
<<"bq_variable_queue_delete_msg_store_files_callback1">>,
|
2020-09-29 18:43:24 +08:00
|
|
|
QTState),
|
2017-03-07 00:13:57 +08:00
|
|
|
{ok, CountMinusOne} = rabbit_amqqueue:purge(Q),
|
|
|
|
|
|
|
|
|
|
%% give the queue a second to receive the close_fds callback msg
|
|
|
|
|
timer:sleep(1000),
|
|
|
|
|
|
2017-03-09 23:05:37 +08:00
|
|
|
rabbit_amqqueue:delete(Q, false, false, <<"acting-user">>),
|
2017-03-07 00:13:57 +08:00
|
|
|
passed.
|
|
|
|
|
|
|
|
|
|
bq_queue_recover(Config) ->
|
|
|
|
|
passed = rabbit_ct_broker_helpers:rpc(Config, 0,
|
|
|
|
|
?MODULE, bq_queue_recover1, [Config]).
|
|
|
|
|
|
|
|
|
|
bq_queue_recover1(Config) ->
|
2021-04-07 20:45:43 +08:00
|
|
|
init_queue_index(),
|
2021-10-11 17:49:11 +08:00
|
|
|
IndexMod = index_mod(),
|
|
|
|
|
Count = 2 * IndexMod:next_segment_boundary(0),
|
2018-10-11 18:12:39 +08:00
|
|
|
QName0 = queue_name(Config, <<"bq_queue_recover-q">>),
|
|
|
|
|
{new, Q} = rabbit_amqqueue:declare(QName0, true, false, [], none, <<"acting-user">>),
|
|
|
|
|
QName = amqqueue:get_name(Q),
|
|
|
|
|
QPid = amqqueue:get_pid(Q),
|
2020-09-29 18:43:24 +08:00
|
|
|
QT = publish_and_confirm(Q, <<>>, Count),
|
2019-01-24 21:04:42 +08:00
|
|
|
SupPid = get_queue_sup_pid(Q),
|
2017-03-07 00:13:57 +08:00
|
|
|
true = is_pid(SupPid),
|
|
|
|
|
exit(SupPid, kill),
|
|
|
|
|
exit(QPid, kill),
|
|
|
|
|
MRef = erlang:monitor(process, QPid),
|
|
|
|
|
receive {'DOWN', MRef, process, QPid, _Info} -> ok
|
|
|
|
|
after 10000 -> exit(timeout_waiting_for_queue_death)
|
|
|
|
|
end,
|
2017-03-23 02:30:08 +08:00
|
|
|
rabbit_amqqueue:stop(?VHOST),
|
2020-09-29 18:43:24 +08:00
|
|
|
{Recovered, []} = rabbit_amqqueue:recover(?VHOST),
|
2019-02-13 06:26:47 +08:00
|
|
|
rabbit_amqqueue:start(Recovered),
|
2017-03-07 00:13:57 +08:00
|
|
|
{ok, Limiter} = rabbit_limiter:start_link(no_id),
|
|
|
|
|
rabbit_amqqueue:with_or_die(
|
|
|
|
|
QName,
|
2018-10-11 18:12:39 +08:00
|
|
|
fun (Q1) when ?is_amqqueue(Q1) ->
|
|
|
|
|
QPid1 = amqqueue:get_pid(Q1),
|
2017-03-07 00:13:57 +08:00
|
|
|
CountMinusOne = Count - 1,
|
2020-09-29 18:43:24 +08:00
|
|
|
{ok, CountMinusOne, {QName, QPid1, _AckTag, true, _Msg}, _} =
|
|
|
|
|
rabbit_amqqueue:basic_get(Q1, false, Limiter,
|
|
|
|
|
<<"bq_queue_recover1">>, QT),
|
2017-03-07 00:13:57 +08:00
|
|
|
exit(QPid1, shutdown),
|
|
|
|
|
VQ1 = variable_queue_init(Q, true),
|
|
|
|
|
{{_Msg1, true, _AckTag1}, VQ2} =
|
|
|
|
|
rabbit_variable_queue:fetch(true, VQ1),
|
|
|
|
|
CountMinusOne = rabbit_variable_queue:len(VQ2),
|
|
|
|
|
_VQ3 = rabbit_variable_queue:delete_and_terminate(shutdown, VQ2),
|
2023-03-17 16:08:15 +08:00
|
|
|
ok = rabbit_amqqueue:internal_delete(Q1, <<"acting-user">>)
|
2017-03-07 00:13:57 +08:00
|
|
|
end),
|
|
|
|
|
passed.
|
|
|
|
|
|
2019-01-24 21:04:42 +08:00
|
|
|
%% Return the PID of the given queue's supervisor.
|
2018-10-11 18:12:39 +08:00
|
|
|
get_queue_sup_pid(Q) when ?is_amqqueue(Q) ->
|
|
|
|
|
QName = amqqueue:get_name(Q),
|
|
|
|
|
QPid = amqqueue:get_pid(Q),
|
2019-01-24 21:04:42 +08:00
|
|
|
VHost = QName#resource.virtual_host,
|
|
|
|
|
{ok, AmqSup} = rabbit_amqqueue_sup_sup:find_for_vhost(VHost, node(QPid)),
|
|
|
|
|
Sups = supervisor:which_children(AmqSup),
|
|
|
|
|
get_queue_sup_pid(Sups, QPid).
|
|
|
|
|
|
|
|
|
|
get_queue_sup_pid([{_, SupPid, _, _} | Rest], QueuePid) ->
|
|
|
|
|
WorkerPids = [Pid || {_, Pid, _, _} <- supervisor:which_children(SupPid)],
|
|
|
|
|
case lists:member(QueuePid, WorkerPids) of
|
|
|
|
|
true -> SupPid;
|
|
|
|
|
false -> get_queue_sup_pid(Rest, QueuePid)
|
|
|
|
|
end;
|
|
|
|
|
get_queue_sup_pid([], _QueuePid) ->
|
|
|
|
|
undefined.
|
|
|
|
|
|
2017-03-07 00:13:57 +08:00
|
|
|
variable_queue_partial_segments_delta_thing(Config) ->
|
|
|
|
|
passed = rabbit_ct_broker_helpers:rpc(Config, 0,
|
|
|
|
|
?MODULE, variable_queue_partial_segments_delta_thing1, [Config]).
|
|
|
|
|
|
|
|
|
|
variable_queue_partial_segments_delta_thing1(Config) ->
|
|
|
|
|
with_fresh_variable_queue(
|
2017-03-08 22:46:25 +08:00
|
|
|
fun variable_queue_partial_segments_delta_thing2/2,
|
2017-03-07 00:13:57 +08:00
|
|
|
?config(variable_queue_type, Config)).
|
|
|
|
|
|
2017-03-08 22:46:25 +08:00
|
|
|
variable_queue_partial_segments_delta_thing2(VQ0, _QName) ->
|
2021-10-11 17:49:11 +08:00
|
|
|
IndexMod = index_mod(),
|
|
|
|
|
SegmentSize = IndexMod:next_segment_boundary(0),
|
2017-03-07 00:13:57 +08:00
|
|
|
HalfSegment = SegmentSize div 2,
|
|
|
|
|
OneAndAHalfSegment = SegmentSize + HalfSegment,
|
|
|
|
|
VQ1 = variable_queue_publish(true, OneAndAHalfSegment, VQ0),
|
|
|
|
|
{_Duration, VQ2} = rabbit_variable_queue:ram_duration(VQ1),
|
2022-09-06 17:51:34 +08:00
|
|
|
VQ3 = check_variable_queue_status(
|
|
|
|
|
variable_queue_set_ram_duration_target(0, VQ2),
|
|
|
|
|
%% We only have one message in memory because the amount in memory
|
|
|
|
|
%% depends on the consume rate, which is nil in this test.
|
|
|
|
|
[{delta, {delta, 1, OneAndAHalfSegment - 1, 0, OneAndAHalfSegment}},
|
|
|
|
|
{q3, 1},
|
|
|
|
|
{len, OneAndAHalfSegment}]),
|
2017-03-07 00:13:57 +08:00
|
|
|
VQ4 = variable_queue_set_ram_duration_target(infinity, VQ3),
|
2022-09-06 17:51:34 +08:00
|
|
|
VQ5 = check_variable_queue_status(
|
|
|
|
|
variable_queue_publish(true, 1, VQ4),
|
|
|
|
|
%% one alpha, but it's in the same segment as the deltas
|
|
|
|
|
%% @todo That's wrong now! v1/v2
|
|
|
|
|
[{delta, {delta, 1, OneAndAHalfSegment, 0, OneAndAHalfSegment + 1}},
|
|
|
|
|
{q3, 1},
|
|
|
|
|
{len, OneAndAHalfSegment + 1}]),
|
2017-03-07 00:13:57 +08:00
|
|
|
{VQ6, AckTags} = variable_queue_fetch(SegmentSize, true, false,
|
|
|
|
|
SegmentSize + HalfSegment + 1, VQ5),
|
2022-09-06 17:51:34 +08:00
|
|
|
VQ7 = check_variable_queue_status(
|
|
|
|
|
VQ6,
|
|
|
|
|
%% We only read from delta up to the end of the segment, so
|
|
|
|
|
%% after fetching exactly one segment, we should have no
|
|
|
|
|
%% messages in memory.
|
|
|
|
|
[{delta, {delta, SegmentSize, HalfSegment + 1, 0, OneAndAHalfSegment + 1}},
|
|
|
|
|
{q3, 0},
|
|
|
|
|
{len, HalfSegment + 1}]),
|
2017-03-07 00:13:57 +08:00
|
|
|
{VQ8, AckTags1} = variable_queue_fetch(HalfSegment + 1, true, false,
|
|
|
|
|
HalfSegment + 1, VQ7),
|
|
|
|
|
{_Guids, VQ9} = rabbit_variable_queue:ack(AckTags ++ AckTags1, VQ8),
|
|
|
|
|
%% should be empty now
|
|
|
|
|
{empty, VQ10} = rabbit_variable_queue:fetch(true, VQ9),
|
|
|
|
|
VQ10.
|
|
|
|
|
|
|
|
|
|
variable_queue_all_the_bits_not_covered_elsewhere_A(Config) ->
|
|
|
|
|
passed = rabbit_ct_broker_helpers:rpc(Config, 0,
|
|
|
|
|
?MODULE, variable_queue_all_the_bits_not_covered_elsewhere_A1, [Config]).
|
|
|
|
|
|
|
|
|
|
variable_queue_all_the_bits_not_covered_elsewhere_A1(Config) ->
|
|
|
|
|
with_fresh_variable_queue(
|
2017-03-08 22:46:25 +08:00
|
|
|
fun variable_queue_all_the_bits_not_covered_elsewhere_A2/2,
|
2017-03-07 00:13:57 +08:00
|
|
|
?config(variable_queue_type, Config)).
|
|
|
|
|
|
2017-03-08 22:46:25 +08:00
|
|
|
variable_queue_all_the_bits_not_covered_elsewhere_A2(VQ0, QName) ->
|
2021-10-11 17:49:11 +08:00
|
|
|
IndexMod = index_mod(),
|
|
|
|
|
Count = 2 * IndexMod:next_segment_boundary(0),
|
2017-03-07 00:13:57 +08:00
|
|
|
VQ1 = variable_queue_publish(true, Count, VQ0),
|
|
|
|
|
VQ2 = variable_queue_publish(false, Count, VQ1),
|
|
|
|
|
VQ3 = variable_queue_set_ram_duration_target(0, VQ2),
|
|
|
|
|
{VQ4, _AckTags} = variable_queue_fetch(Count, true, false,
|
|
|
|
|
Count + Count, VQ3),
|
|
|
|
|
{VQ5, _AckTags1} = variable_queue_fetch(Count, false, false,
|
|
|
|
|
Count, VQ4),
|
|
|
|
|
_VQ6 = rabbit_variable_queue:terminate(shutdown, VQ5),
|
2017-03-08 22:46:25 +08:00
|
|
|
VQ7 = variable_queue_init(test_amqqueue(QName, true), true),
|
2017-03-07 00:13:57 +08:00
|
|
|
{{_Msg1, true, _AckTag1}, VQ8} = rabbit_variable_queue:fetch(true, VQ7),
|
|
|
|
|
Count1 = rabbit_variable_queue:len(VQ8),
|
|
|
|
|
VQ9 = variable_queue_publish(false, 1, VQ8),
|
|
|
|
|
VQ10 = variable_queue_set_ram_duration_target(0, VQ9),
|
|
|
|
|
{VQ11, _AckTags2} = variable_queue_fetch(Count1, true, true, Count, VQ10),
|
|
|
|
|
{VQ12, _AckTags3} = variable_queue_fetch(1, false, false, 1, VQ11),
|
|
|
|
|
VQ12.
|
|
|
|
|
|
|
|
|
|
variable_queue_all_the_bits_not_covered_elsewhere_B(Config) ->
|
|
|
|
|
passed = rabbit_ct_broker_helpers:rpc(Config, 0,
|
|
|
|
|
?MODULE, variable_queue_all_the_bits_not_covered_elsewhere_B1, [Config]).
|
|
|
|
|
|
|
|
|
|
variable_queue_all_the_bits_not_covered_elsewhere_B1(Config) ->
|
|
|
|
|
with_fresh_variable_queue(
|
2017-03-08 22:46:25 +08:00
|
|
|
fun variable_queue_all_the_bits_not_covered_elsewhere_B2/2,
|
2017-03-07 00:13:57 +08:00
|
|
|
?config(variable_queue_type, Config)).
|
|
|
|
|
|
2017-03-08 22:46:25 +08:00
|
|
|
variable_queue_all_the_bits_not_covered_elsewhere_B2(VQ0, QName) ->
|
2017-03-07 00:13:57 +08:00
|
|
|
VQ1 = variable_queue_set_ram_duration_target(0, VQ0),
|
|
|
|
|
VQ2 = variable_queue_publish(false, 4, VQ1),
|
|
|
|
|
{VQ3, AckTags} = variable_queue_fetch(2, false, false, 4, VQ2),
|
|
|
|
|
{_Guids, VQ4} =
|
|
|
|
|
rabbit_variable_queue:requeue(AckTags, VQ3),
|
|
|
|
|
VQ5 = rabbit_variable_queue:timeout(VQ4),
|
|
|
|
|
_VQ6 = rabbit_variable_queue:terminate(shutdown, VQ5),
|
2017-03-08 22:46:25 +08:00
|
|
|
VQ7 = variable_queue_init(test_amqqueue(QName, true), true),
|
2017-03-07 00:13:57 +08:00
|
|
|
{empty, VQ8} = rabbit_variable_queue:fetch(false, VQ7),
|
|
|
|
|
VQ8.
|
|
|
|
|
|
|
|
|
|
variable_queue_drop(Config) ->
|
|
|
|
|
passed = rabbit_ct_broker_helpers:rpc(Config, 0,
|
|
|
|
|
?MODULE, variable_queue_drop1, [Config]).
|
|
|
|
|
|
|
|
|
|
variable_queue_drop1(Config) ->
|
|
|
|
|
with_fresh_variable_queue(
|
2017-03-08 22:46:25 +08:00
|
|
|
fun variable_queue_drop2/2,
|
2017-03-07 00:13:57 +08:00
|
|
|
?config(variable_queue_type, Config)).
|
|
|
|
|
|
2017-03-08 22:46:25 +08:00
|
|
|
variable_queue_drop2(VQ0, _QName) ->
|
2017-03-07 00:13:57 +08:00
|
|
|
%% start by sending a messages
|
|
|
|
|
VQ1 = variable_queue_publish(false, 1, VQ0),
|
|
|
|
|
%% drop message with AckRequired = true
|
|
|
|
|
{{MsgId, AckTag}, VQ2} = rabbit_variable_queue:drop(true, VQ1),
|
|
|
|
|
true = rabbit_variable_queue:is_empty(VQ2),
|
|
|
|
|
true = AckTag =/= undefinded,
|
|
|
|
|
%% drop again -> empty
|
|
|
|
|
{empty, VQ3} = rabbit_variable_queue:drop(false, VQ2),
|
|
|
|
|
%% requeue
|
|
|
|
|
{[MsgId], VQ4} = rabbit_variable_queue:requeue([AckTag], VQ3),
|
|
|
|
|
%% drop message with AckRequired = false
|
|
|
|
|
{{MsgId, undefined}, VQ5} = rabbit_variable_queue:drop(false, VQ4),
|
|
|
|
|
true = rabbit_variable_queue:is_empty(VQ5),
|
|
|
|
|
VQ5.
|
|
|
|
|
|
|
|
|
|
variable_queue_fold_msg_on_disk(Config) ->
|
|
|
|
|
passed = rabbit_ct_broker_helpers:rpc(Config, 0,
|
|
|
|
|
?MODULE, variable_queue_fold_msg_on_disk1, [Config]).
|
|
|
|
|
|
|
|
|
|
variable_queue_fold_msg_on_disk1(Config) ->
|
|
|
|
|
with_fresh_variable_queue(
|
2017-03-08 22:46:25 +08:00
|
|
|
fun variable_queue_fold_msg_on_disk2/2,
|
2017-03-07 00:13:57 +08:00
|
|
|
?config(variable_queue_type, Config)).
|
|
|
|
|
|
2017-03-08 22:46:25 +08:00
|
|
|
variable_queue_fold_msg_on_disk2(VQ0, _QName) ->
|
2017-03-07 00:13:57 +08:00
|
|
|
VQ1 = variable_queue_publish(true, 1, VQ0),
|
|
|
|
|
{VQ2, AckTags} = variable_queue_fetch(1, true, false, 1, VQ1),
|
|
|
|
|
{ok, VQ3} = rabbit_variable_queue:ackfold(fun (_M, _A, ok) -> ok end,
|
|
|
|
|
ok, VQ2, AckTags),
|
|
|
|
|
VQ3.
|
|
|
|
|
|
|
|
|
|
variable_queue_dropfetchwhile(Config) ->
|
|
|
|
|
passed = rabbit_ct_broker_helpers:rpc(Config, 0,
|
|
|
|
|
?MODULE, variable_queue_dropfetchwhile1, [Config]).
|
|
|
|
|
|
|
|
|
|
variable_queue_dropfetchwhile1(Config) ->
|
|
|
|
|
with_fresh_variable_queue(
|
2017-03-08 22:46:25 +08:00
|
|
|
fun variable_queue_dropfetchwhile2/2,
|
2017-03-07 00:13:57 +08:00
|
|
|
?config(variable_queue_type, Config)).
|
|
|
|
|
|
2017-03-08 22:46:25 +08:00
|
|
|
variable_queue_dropfetchwhile2(VQ0, _QName) ->
|
2017-03-07 00:13:57 +08:00
|
|
|
Count = 10,
|
|
|
|
|
|
|
|
|
|
%% add messages with sequential expiry
|
|
|
|
|
VQ1 = variable_queue_publish(
|
|
|
|
|
false, 1, Count,
|
|
|
|
|
fun (N, Props) -> Props#message_properties{expiry = N} end,
|
|
|
|
|
fun erlang:term_to_binary/1, VQ0),
|
|
|
|
|
|
|
|
|
|
%% fetch the first 5 messages
|
|
|
|
|
{#message_properties{expiry = 6}, {Msgs, AckTags}, VQ2} =
|
|
|
|
|
rabbit_variable_queue:fetchwhile(
|
|
|
|
|
fun (#message_properties{expiry = Expiry}) -> Expiry =< 5 end,
|
|
|
|
|
fun (Msg, AckTag, {MsgAcc, AckAcc}) ->
|
|
|
|
|
{[Msg | MsgAcc], [AckTag | AckAcc]}
|
|
|
|
|
end, {[], []}, VQ1),
|
|
|
|
|
true = lists:seq(1, 5) == [msg2int(M) || M <- lists:reverse(Msgs)],
|
|
|
|
|
|
|
|
|
|
%% requeue them
|
|
|
|
|
{_MsgIds, VQ3} = rabbit_variable_queue:requeue(AckTags, VQ2),
|
|
|
|
|
|
|
|
|
|
%% drop the first 5 messages
|
|
|
|
|
{#message_properties{expiry = 6}, VQ4} =
|
|
|
|
|
rabbit_variable_queue:dropwhile(
|
|
|
|
|
fun (#message_properties {expiry = Expiry}) -> Expiry =< 5 end, VQ3),
|
|
|
|
|
|
|
|
|
|
%% fetch 5
|
|
|
|
|
VQ5 = lists:foldl(fun (N, VQN) ->
|
|
|
|
|
{{Msg, _, _}, VQM} =
|
|
|
|
|
rabbit_variable_queue:fetch(false, VQN),
|
|
|
|
|
true = msg2int(Msg) == N,
|
|
|
|
|
VQM
|
|
|
|
|
end, VQ4, lists:seq(6, Count)),
|
|
|
|
|
|
|
|
|
|
%% should be empty now
|
|
|
|
|
true = rabbit_variable_queue:is_empty(VQ5),
|
|
|
|
|
|
|
|
|
|
VQ5.
|
|
|
|
|
|
2021-08-10 23:10:00 +08:00
|
|
|
variable_queue_dropwhile_restart(Config) ->
|
|
|
|
|
passed = rabbit_ct_broker_helpers:rpc(Config, 0,
|
|
|
|
|
?MODULE, variable_queue_dropwhile_restart1, [Config]).
|
|
|
|
|
|
|
|
|
|
variable_queue_dropwhile_restart1(Config) ->
|
|
|
|
|
with_fresh_variable_queue(
|
|
|
|
|
fun variable_queue_dropwhile_restart2/2,
|
|
|
|
|
?config(variable_queue_type, Config)).
|
|
|
|
|
|
|
|
|
|
variable_queue_dropwhile_restart2(VQ0, QName) ->
|
|
|
|
|
Count = 10000,
|
|
|
|
|
|
|
|
|
|
%% add messages with sequential expiry
|
|
|
|
|
VQ1 = variable_queue_publish(
|
|
|
|
|
true, 1, Count,
|
|
|
|
|
fun (N, Props) -> Props#message_properties{expiry = N} end,
|
|
|
|
|
fun erlang:term_to_binary/1, VQ0),
|
|
|
|
|
|
|
|
|
|
%% drop the first 5 messages
|
|
|
|
|
{#message_properties{expiry = 6}, VQ2} =
|
|
|
|
|
rabbit_variable_queue:dropwhile(
|
|
|
|
|
fun (#message_properties {expiry = Expiry}) -> Expiry =< 5 end, VQ1),
|
|
|
|
|
|
|
|
|
|
_VQ3 = rabbit_variable_queue:terminate(shutdown, VQ2),
|
|
|
|
|
Terms = variable_queue_read_terms(QName),
|
|
|
|
|
VQ4 = variable_queue_init(test_amqqueue(QName, true), Terms),
|
|
|
|
|
|
|
|
|
|
%% fetch 5
|
|
|
|
|
VQ5 = lists:foldl(fun (_, VQN) ->
|
|
|
|
|
{{_, _, _}, VQM} =
|
|
|
|
|
rabbit_variable_queue:fetch(false, VQN),
|
|
|
|
|
VQM
|
|
|
|
|
end, VQ4, lists:seq(6, Count)),
|
|
|
|
|
|
|
|
|
|
%% should be empty now
|
|
|
|
|
true = rabbit_variable_queue:is_empty(VQ5),
|
|
|
|
|
|
|
|
|
|
VQ5.
|
|
|
|
|
|
|
|
|
|
variable_queue_dropwhile_sync_restart(Config) ->
|
|
|
|
|
passed = rabbit_ct_broker_helpers:rpc(Config, 0,
|
|
|
|
|
?MODULE, variable_queue_dropwhile_sync_restart1, [Config]).
|
|
|
|
|
|
|
|
|
|
variable_queue_dropwhile_sync_restart1(Config) ->
|
|
|
|
|
with_fresh_variable_queue(
|
|
|
|
|
fun variable_queue_dropwhile_sync_restart2/2,
|
|
|
|
|
?config(variable_queue_type, Config)).
|
|
|
|
|
|
|
|
|
|
variable_queue_dropwhile_sync_restart2(VQ0, QName) ->
|
|
|
|
|
Count = 10000,
|
|
|
|
|
|
|
|
|
|
%% add messages with sequential expiry
|
|
|
|
|
VQ1 = variable_queue_publish(
|
|
|
|
|
true, 1, Count,
|
|
|
|
|
fun (N, Props) -> Props#message_properties{expiry = N} end,
|
|
|
|
|
fun erlang:term_to_binary/1, VQ0),
|
|
|
|
|
|
|
|
|
|
%% drop the first 5 messages
|
|
|
|
|
{#message_properties{expiry = 6}, VQ2} =
|
|
|
|
|
rabbit_variable_queue:dropwhile(
|
|
|
|
|
fun (#message_properties {expiry = Expiry}) -> Expiry =< 5 end, VQ1),
|
|
|
|
|
|
|
|
|
|
%% Queue index sync.
|
|
|
|
|
VQ2b = rabbit_variable_queue:handle_pre_hibernate(VQ2),
|
|
|
|
|
|
|
|
|
|
_VQ3 = rabbit_variable_queue:terminate(shutdown, VQ2b),
|
|
|
|
|
Terms = variable_queue_read_terms(QName),
|
|
|
|
|
VQ4 = variable_queue_init(test_amqqueue(QName, true), Terms),
|
|
|
|
|
|
|
|
|
|
%% fetch 5
|
|
|
|
|
VQ5 = lists:foldl(fun (_, VQN) ->
|
|
|
|
|
{{_, _, _}, VQM} =
|
|
|
|
|
rabbit_variable_queue:fetch(false, VQN),
|
|
|
|
|
VQM
|
|
|
|
|
end, VQ4, lists:seq(6, Count)),
|
|
|
|
|
|
|
|
|
|
%% should be empty now
|
|
|
|
|
true = rabbit_variable_queue:is_empty(VQ5),
|
|
|
|
|
|
|
|
|
|
VQ5.
|
|
|
|
|
|
2017-03-07 00:13:57 +08:00
|
|
|
variable_queue_dropwhile_varying_ram_duration(Config) ->
|
|
|
|
|
passed = rabbit_ct_broker_helpers:rpc(Config, 0,
|
|
|
|
|
?MODULE, variable_queue_dropwhile_varying_ram_duration1, [Config]).
|
|
|
|
|
|
|
|
|
|
variable_queue_dropwhile_varying_ram_duration1(Config) ->
|
|
|
|
|
with_fresh_variable_queue(
|
2017-03-08 22:46:25 +08:00
|
|
|
fun variable_queue_dropwhile_varying_ram_duration2/2,
|
2017-03-07 00:13:57 +08:00
|
|
|
?config(variable_queue_type, Config)).
|
|
|
|
|
|
2017-03-08 22:46:25 +08:00
|
|
|
variable_queue_dropwhile_varying_ram_duration2(VQ0, _QName) ->
|
2017-03-07 00:13:57 +08:00
|
|
|
test_dropfetchwhile_varying_ram_duration(
|
|
|
|
|
fun (VQ1) ->
|
|
|
|
|
{_, VQ2} = rabbit_variable_queue:dropwhile(
|
|
|
|
|
fun (_) -> false end, VQ1),
|
|
|
|
|
VQ2
|
|
|
|
|
end, VQ0).
|
|
|
|
|
|
|
|
|
|
variable_queue_fetchwhile_varying_ram_duration(Config) ->
|
|
|
|
|
passed = rabbit_ct_broker_helpers:rpc(Config, 0,
|
|
|
|
|
?MODULE, variable_queue_fetchwhile_varying_ram_duration1, [Config]).
|
|
|
|
|
|
|
|
|
|
variable_queue_fetchwhile_varying_ram_duration1(Config) ->
|
|
|
|
|
with_fresh_variable_queue(
|
2017-03-08 22:46:25 +08:00
|
|
|
fun variable_queue_fetchwhile_varying_ram_duration2/2,
|
2017-03-07 00:13:57 +08:00
|
|
|
?config(variable_queue_type, Config)).
|
|
|
|
|
|
2017-03-08 22:46:25 +08:00
|
|
|
variable_queue_fetchwhile_varying_ram_duration2(VQ0, _QName) ->
|
2017-03-07 00:13:57 +08:00
|
|
|
test_dropfetchwhile_varying_ram_duration(
|
|
|
|
|
fun (VQ1) ->
|
2022-09-05 19:37:33 +08:00
|
|
|
{_, ok, VQ2} = rabbit_variable_queue:fetchwhile(
|
2017-03-07 00:13:57 +08:00
|
|
|
fun (_) -> false end,
|
|
|
|
|
fun (_, _, A) -> A end,
|
|
|
|
|
ok, VQ1),
|
|
|
|
|
VQ2
|
|
|
|
|
end, VQ0).
|
|
|
|
|
|
|
|
|
|
test_dropfetchwhile_varying_ram_duration(Fun, VQ0) ->
|
|
|
|
|
VQ1 = variable_queue_publish(false, 1, VQ0),
|
|
|
|
|
VQ2 = variable_queue_set_ram_duration_target(0, VQ1),
|
|
|
|
|
VQ3 = Fun(VQ2),
|
|
|
|
|
VQ4 = variable_queue_set_ram_duration_target(infinity, VQ3),
|
|
|
|
|
VQ5 = variable_queue_publish(false, 1, VQ4),
|
|
|
|
|
VQ6 = Fun(VQ5),
|
|
|
|
|
VQ6.
|
|
|
|
|
|
|
|
|
|
variable_queue_ack_limiting(Config) ->
|
|
|
|
|
passed = rabbit_ct_broker_helpers:rpc(Config, 0,
|
|
|
|
|
?MODULE, variable_queue_ack_limiting1, [Config]).
|
|
|
|
|
|
|
|
|
|
variable_queue_ack_limiting1(Config) ->
|
|
|
|
|
with_fresh_variable_queue(
|
2017-03-08 22:46:25 +08:00
|
|
|
fun variable_queue_ack_limiting2/2,
|
2017-03-07 00:13:57 +08:00
|
|
|
?config(variable_queue_type, Config)).
|
|
|
|
|
|
2017-03-08 22:46:25 +08:00
|
|
|
variable_queue_ack_limiting2(VQ0, _Config) ->
|
2017-03-07 00:13:57 +08:00
|
|
|
%% start by sending in a bunch of messages
|
|
|
|
|
Len = 1024,
|
|
|
|
|
VQ1 = variable_queue_publish(false, Len, VQ0),
|
|
|
|
|
|
|
|
|
|
%% squeeze and relax queue
|
|
|
|
|
Churn = Len div 32,
|
|
|
|
|
VQ2 = publish_fetch_and_ack(Churn, Len, VQ1),
|
|
|
|
|
|
|
|
|
|
%% update stats for duration
|
|
|
|
|
{_Duration, VQ3} = rabbit_variable_queue:ram_duration(VQ2),
|
|
|
|
|
|
|
|
|
|
%% fetch half the messages
|
|
|
|
|
{VQ4, _AckTags} = variable_queue_fetch(Len div 2, false, false, Len, VQ3),
|
|
|
|
|
|
2022-09-06 17:51:34 +08:00
|
|
|
%% We only check the length anymore because
|
|
|
|
|
%% that's the only predictable stats we got.
|
|
|
|
|
VQ5 = check_variable_queue_status(VQ4, [{len, Len div 2}]),
|
|
|
|
|
|
2022-04-04 17:08:54 +08:00
|
|
|
VQ6 = variable_queue_set_ram_duration_target(0, VQ5),
|
2017-03-07 00:13:57 +08:00
|
|
|
|
|
|
|
|
VQ6.
|
|
|
|
|
|
|
|
|
|
variable_queue_purge(Config) ->
|
|
|
|
|
passed = rabbit_ct_broker_helpers:rpc(Config, 0,
|
|
|
|
|
?MODULE, variable_queue_purge1, [Config]).
|
|
|
|
|
|
|
|
|
|
variable_queue_purge1(Config) ->
|
|
|
|
|
with_fresh_variable_queue(
|
2017-03-08 22:46:25 +08:00
|
|
|
fun variable_queue_purge2/2,
|
2017-03-07 00:13:57 +08:00
|
|
|
?config(variable_queue_type, Config)).
|
|
|
|
|
|
2017-03-08 22:46:25 +08:00
|
|
|
variable_queue_purge2(VQ0, _Config) ->
|
2017-03-07 00:13:57 +08:00
|
|
|
LenDepth = fun (VQ) ->
|
|
|
|
|
{rabbit_variable_queue:len(VQ),
|
|
|
|
|
rabbit_variable_queue:depth(VQ)}
|
|
|
|
|
end,
|
|
|
|
|
VQ1 = variable_queue_publish(false, 10, VQ0),
|
|
|
|
|
{VQ2, Acks} = variable_queue_fetch(6, false, false, 10, VQ1),
|
|
|
|
|
{4, VQ3} = rabbit_variable_queue:purge(VQ2),
|
|
|
|
|
{0, 6} = LenDepth(VQ3),
|
|
|
|
|
{_, VQ4} = rabbit_variable_queue:requeue(lists:sublist(Acks, 2), VQ3),
|
|
|
|
|
{2, 6} = LenDepth(VQ4),
|
|
|
|
|
VQ5 = rabbit_variable_queue:purge_acks(VQ4),
|
|
|
|
|
{2, 2} = LenDepth(VQ5),
|
|
|
|
|
VQ5.
|
|
|
|
|
|
|
|
|
|
variable_queue_requeue(Config) ->
|
|
|
|
|
passed = rabbit_ct_broker_helpers:rpc(Config, 0,
|
|
|
|
|
?MODULE, variable_queue_requeue1, [Config]).
|
|
|
|
|
|
|
|
|
|
variable_queue_requeue1(Config) ->
|
|
|
|
|
with_fresh_variable_queue(
|
2017-03-08 22:46:25 +08:00
|
|
|
fun variable_queue_requeue2/2,
|
2017-03-07 00:13:57 +08:00
|
|
|
?config(variable_queue_type, Config)).
|
|
|
|
|
|
2017-03-08 22:46:25 +08:00
|
|
|
variable_queue_requeue2(VQ0, _Config) ->
|
2017-03-07 00:13:57 +08:00
|
|
|
{_PendingMsgs, RequeuedMsgs, FreshMsgs, VQ1} =
|
|
|
|
|
variable_queue_with_holes(VQ0),
|
|
|
|
|
Msgs =
|
|
|
|
|
lists:zip(RequeuedMsgs,
|
|
|
|
|
lists:duplicate(length(RequeuedMsgs), true)) ++
|
|
|
|
|
lists:zip(FreshMsgs,
|
|
|
|
|
lists:duplicate(length(FreshMsgs), false)),
|
|
|
|
|
VQ2 = lists:foldl(fun ({I, Requeued}, VQa) ->
|
|
|
|
|
{{M, MRequeued, _}, VQb} =
|
|
|
|
|
rabbit_variable_queue:fetch(true, VQa),
|
|
|
|
|
Requeued = MRequeued, %% assertion
|
|
|
|
|
I = msg2int(M), %% assertion
|
|
|
|
|
VQb
|
|
|
|
|
end, VQ1, Msgs),
|
|
|
|
|
{empty, VQ3} = rabbit_variable_queue:fetch(true, VQ2),
|
|
|
|
|
VQ3.
|
|
|
|
|
|
|
|
|
|
%% requeue from ram_pending_ack into q3, move to delta and then empty queue
|
|
|
|
|
variable_queue_requeue_ram_beta(Config) ->
|
|
|
|
|
passed = rabbit_ct_broker_helpers:rpc(Config, 0,
|
|
|
|
|
?MODULE, variable_queue_requeue_ram_beta1, [Config]).
|
|
|
|
|
|
|
|
|
|
variable_queue_requeue_ram_beta1(Config) ->
|
|
|
|
|
with_fresh_variable_queue(
|
2017-03-08 22:46:25 +08:00
|
|
|
fun variable_queue_requeue_ram_beta2/2,
|
2017-03-07 00:13:57 +08:00
|
|
|
?config(variable_queue_type, Config)).
|
|
|
|
|
|
2017-03-08 22:46:25 +08:00
|
|
|
variable_queue_requeue_ram_beta2(VQ0, _Config) ->
|
2021-10-11 17:49:11 +08:00
|
|
|
IndexMod = index_mod(),
|
|
|
|
|
Count = IndexMod:next_segment_boundary(0)*2 + 2,
|
2017-03-07 00:13:57 +08:00
|
|
|
VQ1 = variable_queue_publish(false, Count, VQ0),
|
|
|
|
|
{VQ2, AcksR} = variable_queue_fetch(Count, false, false, Count, VQ1),
|
|
|
|
|
{Back, Front} = lists:split(Count div 2, AcksR),
|
|
|
|
|
{_, VQ3} = rabbit_variable_queue:requeue(erlang:tl(Back), VQ2),
|
|
|
|
|
VQ4 = variable_queue_set_ram_duration_target(0, VQ3),
|
|
|
|
|
{_, VQ5} = rabbit_variable_queue:requeue([erlang:hd(Back)], VQ4),
|
|
|
|
|
VQ6 = requeue_one_by_one(Front, VQ5),
|
|
|
|
|
{VQ7, AcksAll} = variable_queue_fetch(Count, false, true, Count, VQ6),
|
|
|
|
|
{_, VQ8} = rabbit_variable_queue:ack(AcksAll, VQ7),
|
|
|
|
|
VQ8.
|
|
|
|
|
|
2020-04-28 03:38:58 +08:00
|
|
|
variable_queue_fold(Config) ->
|
|
|
|
|
passed = rabbit_ct_broker_helpers:rpc(Config, 0,
|
|
|
|
|
?MODULE, variable_queue_fold1, [Config]).
|
|
|
|
|
|
|
|
|
|
variable_queue_fold1(Config) ->
|
|
|
|
|
with_fresh_variable_queue(
|
|
|
|
|
fun variable_queue_fold2/2,
|
|
|
|
|
?config(variable_queue_type, Config)).
|
|
|
|
|
|
|
|
|
|
variable_queue_fold2(VQ0, _Config) ->
|
|
|
|
|
{PendingMsgs, RequeuedMsgs, FreshMsgs, VQ1} =
|
|
|
|
|
variable_queue_with_holes(VQ0),
|
|
|
|
|
Count = rabbit_variable_queue:depth(VQ1),
|
|
|
|
|
Msgs = lists:sort(PendingMsgs ++ RequeuedMsgs ++ FreshMsgs),
|
|
|
|
|
lists:foldl(fun (Cut, VQ2) ->
|
|
|
|
|
test_variable_queue_fold(Cut, Msgs, PendingMsgs, VQ2)
|
|
|
|
|
end, VQ1, [0, 1, 2, Count div 2,
|
|
|
|
|
Count - 1, Count, Count + 1, Count * 2]).
|
|
|
|
|
|
|
|
|
|
test_variable_queue_fold(Cut, Msgs, PendingMsgs, VQ0) ->
|
|
|
|
|
{Acc, VQ1} = rabbit_variable_queue:fold(
|
|
|
|
|
fun (M, _, Pending, A) ->
|
|
|
|
|
MInt = msg2int(M),
|
|
|
|
|
Pending = lists:member(MInt, PendingMsgs), %% assert
|
|
|
|
|
case MInt =< Cut of
|
|
|
|
|
true -> {cont, [MInt | A]};
|
|
|
|
|
false -> {stop, A}
|
|
|
|
|
end
|
|
|
|
|
end, [], VQ0),
|
|
|
|
|
Expected = lists:takewhile(fun (I) -> I =< Cut end, Msgs),
|
|
|
|
|
Expected = lists:reverse(Acc), %% assertion
|
|
|
|
|
VQ1.
|
|
|
|
|
|
2017-03-07 00:13:57 +08:00
|
|
|
variable_queue_batch_publish(Config) ->
|
|
|
|
|
passed = rabbit_ct_broker_helpers:rpc(Config, 0,
|
|
|
|
|
?MODULE, variable_queue_batch_publish1, [Config]).
|
|
|
|
|
|
|
|
|
|
variable_queue_batch_publish1(Config) ->
|
|
|
|
|
with_fresh_variable_queue(
|
2017-03-08 22:46:25 +08:00
|
|
|
fun variable_queue_batch_publish2/2,
|
2017-03-07 00:13:57 +08:00
|
|
|
?config(variable_queue_type, Config)).
|
|
|
|
|
|
2017-03-08 22:46:25 +08:00
|
|
|
variable_queue_batch_publish2(VQ, _Config) ->
|
2017-03-07 00:13:57 +08:00
|
|
|
Count = 10,
|
|
|
|
|
VQ1 = variable_queue_batch_publish(true, Count, VQ),
|
|
|
|
|
Count = rabbit_variable_queue:len(VQ1),
|
|
|
|
|
VQ1.
|
|
|
|
|
|
|
|
|
|
variable_queue_batch_publish_delivered(Config) ->
|
|
|
|
|
passed = rabbit_ct_broker_helpers:rpc(Config, 0,
|
|
|
|
|
?MODULE, variable_queue_batch_publish_delivered1, [Config]).
|
|
|
|
|
|
|
|
|
|
variable_queue_batch_publish_delivered1(Config) ->
|
|
|
|
|
with_fresh_variable_queue(
|
2017-03-08 22:46:25 +08:00
|
|
|
fun variable_queue_batch_publish_delivered2/2,
|
2017-03-07 00:13:57 +08:00
|
|
|
?config(variable_queue_type, Config)).
|
|
|
|
|
|
2017-03-08 22:46:25 +08:00
|
|
|
variable_queue_batch_publish_delivered2(VQ, _Config) ->
|
2017-03-07 00:13:57 +08:00
|
|
|
Count = 10,
|
|
|
|
|
VQ1 = variable_queue_batch_publish_delivered(true, Count, VQ),
|
|
|
|
|
Count = rabbit_variable_queue:depth(VQ1),
|
|
|
|
|
VQ1.
|
|
|
|
|
|
|
|
|
|
%% same as test_variable_queue_requeue_ram_beta but randomly changing
|
|
|
|
|
%% the queue mode after every step.
|
|
|
|
|
variable_queue_mode_change(Config) ->
|
|
|
|
|
passed = rabbit_ct_broker_helpers:rpc(Config, 0,
|
|
|
|
|
?MODULE, variable_queue_mode_change1, [Config]).
|
|
|
|
|
|
|
|
|
|
variable_queue_mode_change1(Config) ->
|
|
|
|
|
with_fresh_variable_queue(
|
2017-03-08 22:46:25 +08:00
|
|
|
fun variable_queue_mode_change2/2,
|
2017-03-07 00:13:57 +08:00
|
|
|
?config(variable_queue_type, Config)).
|
|
|
|
|
|
2017-03-08 22:46:25 +08:00
|
|
|
variable_queue_mode_change2(VQ0, _Config) ->
|
2021-10-11 17:49:11 +08:00
|
|
|
IndexMod = index_mod(),
|
|
|
|
|
Count = IndexMod:next_segment_boundary(0)*2 + 2,
|
2017-03-07 00:13:57 +08:00
|
|
|
VQ1 = variable_queue_publish(false, Count, VQ0),
|
|
|
|
|
VQ2 = maybe_switch_queue_mode(VQ1),
|
|
|
|
|
{VQ3, AcksR} = variable_queue_fetch(Count, false, false, Count, VQ2),
|
|
|
|
|
VQ4 = maybe_switch_queue_mode(VQ3),
|
|
|
|
|
{Back, Front} = lists:split(Count div 2, AcksR),
|
|
|
|
|
{_, VQ5} = rabbit_variable_queue:requeue(erlang:tl(Back), VQ4),
|
|
|
|
|
VQ6 = maybe_switch_queue_mode(VQ5),
|
|
|
|
|
VQ7 = variable_queue_set_ram_duration_target(0, VQ6),
|
|
|
|
|
VQ8 = maybe_switch_queue_mode(VQ7),
|
|
|
|
|
{_, VQ9} = rabbit_variable_queue:requeue([erlang:hd(Back)], VQ8),
|
|
|
|
|
VQ10 = maybe_switch_queue_mode(VQ9),
|
|
|
|
|
VQ11 = requeue_one_by_one(Front, VQ10),
|
|
|
|
|
VQ12 = maybe_switch_queue_mode(VQ11),
|
|
|
|
|
{VQ13, AcksAll} = variable_queue_fetch(Count, false, true, Count, VQ12),
|
|
|
|
|
VQ14 = maybe_switch_queue_mode(VQ13),
|
|
|
|
|
{_, VQ15} = rabbit_variable_queue:ack(AcksAll, VQ14),
|
|
|
|
|
VQ16 = maybe_switch_queue_mode(VQ15),
|
|
|
|
|
VQ16.
|
|
|
|
|
|
|
|
|
|
maybe_switch_queue_mode(VQ) ->
|
|
|
|
|
Mode = random_queue_mode(),
|
|
|
|
|
set_queue_mode(Mode, VQ).
|
|
|
|
|
|
|
|
|
|
random_queue_mode() ->
|
|
|
|
|
Modes = [lazy, default],
|
2017-03-09 23:05:37 +08:00
|
|
|
lists:nth(rand:uniform(length(Modes)), Modes).
|
2017-03-07 00:13:57 +08:00
|
|
|
|
|
|
|
|
pub_res({_, VQS}) ->
|
|
|
|
|
VQS;
|
|
|
|
|
pub_res(VQS) ->
|
|
|
|
|
VQS.
|
|
|
|
|
|
|
|
|
|
make_publish(IsPersistent, PayloadFun, PropFun, N) ->
|
|
|
|
|
{rabbit_basic:message(
|
|
|
|
|
rabbit_misc:r(<<>>, exchange, <<>>),
|
|
|
|
|
<<>>, #'P_basic'{delivery_mode = case IsPersistent of
|
|
|
|
|
true -> 2;
|
|
|
|
|
false -> 1
|
|
|
|
|
end},
|
|
|
|
|
PayloadFun(N)),
|
|
|
|
|
PropFun(N, #message_properties{size = 10}),
|
|
|
|
|
false}.
|
|
|
|
|
|
|
|
|
|
make_publish_delivered(IsPersistent, PayloadFun, PropFun, N) ->
|
|
|
|
|
{rabbit_basic:message(
|
|
|
|
|
rabbit_misc:r(<<>>, exchange, <<>>),
|
|
|
|
|
<<>>, #'P_basic'{delivery_mode = case IsPersistent of
|
|
|
|
|
true -> 2;
|
|
|
|
|
false -> 1
|
|
|
|
|
end},
|
|
|
|
|
PayloadFun(N)),
|
|
|
|
|
PropFun(N, #message_properties{size = 10})}.
|
|
|
|
|
|
|
|
|
|
queue_name(Config, Name) ->
|
2017-06-06 22:27:37 +08:00
|
|
|
Name1 = iolist_to_binary(rabbit_ct_helpers:config_to_testcase_name(Config, Name)),
|
2017-03-07 00:13:57 +08:00
|
|
|
queue_name(Name1).
|
|
|
|
|
|
|
|
|
|
queue_name(Name) ->
|
|
|
|
|
rabbit_misc:r(<<"/">>, queue, Name).
|
|
|
|
|
|
|
|
|
|
test_queue() ->
|
2017-03-08 22:46:25 +08:00
|
|
|
queue_name(rabbit_guid:gen()).
|
2017-03-07 00:13:57 +08:00
|
|
|
|
2017-03-08 22:46:25 +08:00
|
|
|
init_test_queue(QName) ->
|
2017-03-07 00:13:57 +08:00
|
|
|
PRef = rabbit_guid:gen(),
|
|
|
|
|
PersistentClient = msg_store_client_init(?PERSISTENT_MSG_STORE, PRef),
|
2021-10-11 17:49:11 +08:00
|
|
|
IndexMod = index_mod(),
|
|
|
|
|
Res = IndexMod:recover(
|
2017-03-08 22:46:25 +08:00
|
|
|
QName, [], false,
|
2017-03-07 00:13:57 +08:00
|
|
|
fun (MsgId) ->
|
|
|
|
|
rabbit_msg_store:contains(MsgId, PersistentClient)
|
|
|
|
|
end,
|
2021-11-19 21:42:12 +08:00
|
|
|
fun nop/1, fun nop/1,
|
|
|
|
|
main),
|
2017-03-07 00:13:57 +08:00
|
|
|
ok = rabbit_msg_store:client_delete_and_terminate(PersistentClient),
|
|
|
|
|
Res.
|
|
|
|
|
|
2017-03-08 22:46:25 +08:00
|
|
|
restart_test_queue(Qi, QName) ->
|
2021-10-11 17:49:11 +08:00
|
|
|
IndexMod = index_mod(),
|
|
|
|
|
_ = IndexMod:terminate(?VHOST, [], Qi),
|
2017-03-23 02:30:08 +08:00
|
|
|
ok = rabbit_variable_queue:stop(?VHOST),
|
|
|
|
|
{ok, _} = rabbit_variable_queue:start(?VHOST, [QName]),
|
2017-03-08 22:46:25 +08:00
|
|
|
init_test_queue(QName).
|
2017-03-07 00:13:57 +08:00
|
|
|
|
2017-03-08 22:46:25 +08:00
|
|
|
empty_test_queue(QName) ->
|
2017-03-23 02:30:08 +08:00
|
|
|
ok = rabbit_variable_queue:stop(?VHOST),
|
|
|
|
|
{ok, _} = rabbit_variable_queue:start(?VHOST, []),
|
2017-03-08 22:46:25 +08:00
|
|
|
{0, 0, Qi} = init_test_queue(QName),
|
2021-10-11 17:49:11 +08:00
|
|
|
IndexMod = index_mod(),
|
|
|
|
|
_ = IndexMod:delete_and_terminate(Qi),
|
2017-03-08 22:46:25 +08:00
|
|
|
ok.
|
|
|
|
|
|
|
|
|
|
unin_empty_test_queue(QName) ->
|
|
|
|
|
{0, 0, Qi} = init_test_queue(QName),
|
2021-10-11 17:49:11 +08:00
|
|
|
IndexMod = index_mod(),
|
|
|
|
|
_ = IndexMod:delete_and_terminate(Qi),
|
2017-03-07 00:13:57 +08:00
|
|
|
ok.
|
|
|
|
|
|
|
|
|
|
with_empty_test_queue(Fun) ->
|
2017-03-08 22:46:25 +08:00
|
|
|
QName = test_queue(),
|
|
|
|
|
ok = empty_test_queue(QName),
|
|
|
|
|
{0, 0, Qi} = init_test_queue(QName),
|
2021-10-11 17:49:11 +08:00
|
|
|
IndexMod = index_mod(),
|
|
|
|
|
IndexMod:delete_and_terminate(Fun(Qi, QName)).
|
2017-03-07 00:13:57 +08:00
|
|
|
|
2021-04-07 20:45:43 +08:00
|
|
|
init_queue_index() ->
|
|
|
|
|
%% We must set the segment entry count in the process dictionary
|
2021-11-10 21:58:35 +08:00
|
|
|
%% for tests that call the v1 queue index directly to have a correct
|
2021-04-07 20:45:43 +08:00
|
|
|
%% value.
|
|
|
|
|
put(segment_entry_count, 2048),
|
|
|
|
|
ok.
|
|
|
|
|
|
2017-03-07 00:13:57 +08:00
|
|
|
restart_app() ->
|
|
|
|
|
rabbit:stop(),
|
|
|
|
|
rabbit:start().
|
|
|
|
|
|
|
|
|
|
queue_index_publish(SeqIds, Persistent, Qi) ->
|
2021-10-11 17:49:11 +08:00
|
|
|
IndexMod = index_mod(),
|
2017-03-07 00:13:57 +08:00
|
|
|
Ref = rabbit_guid:gen(),
|
|
|
|
|
MsgStore = case Persistent of
|
|
|
|
|
true -> ?PERSISTENT_MSG_STORE;
|
|
|
|
|
false -> ?TRANSIENT_MSG_STORE
|
|
|
|
|
end,
|
|
|
|
|
MSCState = msg_store_client_init(MsgStore, Ref),
|
|
|
|
|
{A, B = [{_SeqId, LastMsgIdWritten} | _]} =
|
|
|
|
|
lists:foldl(
|
|
|
|
|
fun (SeqId, {QiN, SeqIdsMsgIdsAcc}) ->
|
|
|
|
|
MsgId = rabbit_guid:gen(),
|
2021-10-11 17:49:11 +08:00
|
|
|
QiM = IndexMod:publish(
|
2021-08-30 20:10:47 +08:00
|
|
|
MsgId, SeqId, rabbit_msg_store,
|
|
|
|
|
#message_properties{size = 10},
|
2021-08-27 21:57:09 +08:00
|
|
|
Persistent, infinity, QiN),
|
2023-03-10 16:48:43 +08:00
|
|
|
ok = rabbit_msg_store:write(SeqId, MsgId, MsgId, MSCState),
|
2017-03-07 00:13:57 +08:00
|
|
|
{QiM, [{SeqId, MsgId} | SeqIdsMsgIdsAcc]}
|
|
|
|
|
end, {Qi, []}, SeqIds),
|
|
|
|
|
%% do this just to force all of the publishes through to the msg_store:
|
|
|
|
|
true = rabbit_msg_store:contains(LastMsgIdWritten, MSCState),
|
|
|
|
|
ok = rabbit_msg_store:client_delete_and_terminate(MSCState),
|
|
|
|
|
{A, B}.
|
|
|
|
|
|
|
|
|
|
nop(_) -> ok.
|
|
|
|
|
nop(_, _) -> ok.
|
|
|
|
|
|
|
|
|
|
msg_store_client_init(MsgStore, Ref) ->
|
2023-02-03 20:56:02 +08:00
|
|
|
rabbit_vhost_msg_store:client_init(?VHOST, MsgStore, Ref, undefined).
|
2017-03-07 00:13:57 +08:00
|
|
|
|
|
|
|
|
variable_queue_init(Q, Recover) ->
|
|
|
|
|
rabbit_variable_queue:init(
|
|
|
|
|
Q, case Recover of
|
|
|
|
|
true -> non_clean_shutdown;
|
2021-08-10 23:10:00 +08:00
|
|
|
false -> new;
|
|
|
|
|
Terms -> Terms
|
2023-02-03 20:56:02 +08:00
|
|
|
end, fun nop/2, fun nop/1, fun nop/1).
|
2017-03-07 00:13:57 +08:00
|
|
|
|
2021-08-10 23:10:00 +08:00
|
|
|
variable_queue_read_terms(QName) ->
|
|
|
|
|
#resource { kind = queue,
|
|
|
|
|
virtual_host = VHost,
|
|
|
|
|
name = Name } = QName,
|
|
|
|
|
<<Num:128>> = erlang:md5(<<"queue", VHost/binary, Name/binary>>),
|
|
|
|
|
DirName = rabbit_misc:format("~.36B", [Num]),
|
|
|
|
|
{ok, Terms} = rabbit_recovery_terms:read(VHost, DirName),
|
|
|
|
|
Terms.
|
|
|
|
|
|
2017-03-07 00:13:57 +08:00
|
|
|
publish_and_confirm(Q, Payload, Count) ->
|
|
|
|
|
Seqs = lists:seq(1, Count),
|
2020-09-29 18:43:24 +08:00
|
|
|
QTState0 = rabbit_queue_type:new(Q, rabbit_queue_type:init()),
|
|
|
|
|
QTState =
|
|
|
|
|
lists:foldl(
|
|
|
|
|
fun (Seq, Acc0) ->
|
|
|
|
|
Msg = rabbit_basic:message(rabbit_misc:r(<<>>, exchange, <<>>),
|
|
|
|
|
<<>>, #'P_basic'{delivery_mode = 2},
|
|
|
|
|
Payload),
|
|
|
|
|
Delivery = #delivery{mandatory = false, sender = self(),
|
|
|
|
|
confirm = true, message = Msg, msg_seq_no = Seq,
|
|
|
|
|
flow = noflow},
|
|
|
|
|
{ok, Acc, _Actions} = rabbit_queue_type:deliver([Q], Delivery, Acc0),
|
|
|
|
|
Acc
|
|
|
|
|
end, QTState0, Seqs),
|
2023-01-31 22:37:47 +08:00
|
|
|
wait_for_confirms(sets:from_list(Seqs, [{version, 2}])),
|
2020-09-29 18:43:24 +08:00
|
|
|
QTState.
|
2017-03-07 00:13:57 +08:00
|
|
|
|
|
|
|
|
wait_for_confirms(Unconfirmed) ->
|
2022-06-10 21:24:02 +08:00
|
|
|
case sets:is_empty(Unconfirmed) of
|
2017-03-07 00:13:57 +08:00
|
|
|
true -> ok;
|
2020-10-07 20:05:25 +08:00
|
|
|
false ->
|
|
|
|
|
receive
|
2022-10-18 18:29:15 +08:00
|
|
|
{'$gen_cast', {queue_event, _QName, {confirm, Confirmed, _}}} ->
|
2020-10-07 20:05:25 +08:00
|
|
|
wait_for_confirms(
|
2022-06-10 21:24:02 +08:00
|
|
|
sets:subtract(
|
2023-01-31 22:37:47 +08:00
|
|
|
Unconfirmed, sets:from_list(Confirmed, [{version, 2}])))
|
2020-10-07 20:05:25 +08:00
|
|
|
after ?TIMEOUT ->
|
|
|
|
|
flush(),
|
|
|
|
|
exit(timeout_waiting_for_confirm)
|
|
|
|
|
end
|
2017-03-07 00:13:57 +08:00
|
|
|
end.
|
|
|
|
|
|
|
|
|
|
with_fresh_variable_queue(Fun, Mode) ->
|
|
|
|
|
Ref = make_ref(),
|
|
|
|
|
Me = self(),
|
|
|
|
|
%% Run in a separate process since rabbit_msg_store will send
|
|
|
|
|
%% bump_credit messages and we want to ignore them
|
|
|
|
|
spawn_link(fun() ->
|
2017-03-08 22:46:25 +08:00
|
|
|
QName = test_queue(),
|
|
|
|
|
ok = unin_empty_test_queue(QName),
|
|
|
|
|
VQ = variable_queue_init(test_amqqueue(QName, true), false),
|
2017-03-07 00:13:57 +08:00
|
|
|
S0 = variable_queue_status(VQ),
|
|
|
|
|
assert_props(S0, [{q1, 0}, {q2, 0},
|
|
|
|
|
{delta,
|
2022-04-04 17:08:54 +08:00
|
|
|
{delta, undefined, 0, 0, undefined}},
|
2017-03-07 00:13:57 +08:00
|
|
|
{q3, 0}, {q4, 0},
|
|
|
|
|
{len, 0}]),
|
|
|
|
|
VQ1 = set_queue_mode(Mode, VQ),
|
|
|
|
|
try
|
|
|
|
|
_ = rabbit_variable_queue:delete_and_terminate(
|
2017-03-08 22:46:25 +08:00
|
|
|
shutdown, Fun(VQ1, QName)),
|
2017-03-07 00:13:57 +08:00
|
|
|
Me ! Ref
|
|
|
|
|
catch
|
2018-12-20 23:31:15 +08:00
|
|
|
Type:Error:Stacktrace ->
|
|
|
|
|
Me ! {Ref, Type, Error, Stacktrace}
|
2017-03-07 00:13:57 +08:00
|
|
|
end
|
|
|
|
|
end),
|
|
|
|
|
receive
|
|
|
|
|
Ref -> ok;
|
|
|
|
|
{Ref, Type, Error, ST} -> exit({Type, Error, ST})
|
|
|
|
|
end,
|
|
|
|
|
passed.
|
|
|
|
|
|
|
|
|
|
set_queue_mode(Mode, VQ) ->
|
2022-09-05 19:37:33 +08:00
|
|
|
rabbit_variable_queue:set_queue_mode(Mode, VQ).
|
2017-03-07 00:13:57 +08:00
|
|
|
|
|
|
|
|
variable_queue_publish(IsPersistent, Count, VQ) ->
|
|
|
|
|
variable_queue_publish(IsPersistent, Count, fun (_N, P) -> P end, VQ).
|
|
|
|
|
|
|
|
|
|
variable_queue_publish(IsPersistent, Count, PropFun, VQ) ->
|
|
|
|
|
variable_queue_publish(IsPersistent, 1, Count, PropFun,
|
|
|
|
|
fun (_N) -> <<>> end, VQ).
|
|
|
|
|
|
|
|
|
|
variable_queue_publish(IsPersistent, Start, Count, PropFun, PayloadFun, VQ) ->
|
|
|
|
|
variable_queue_wait_for_shuffling_end(
|
|
|
|
|
lists:foldl(
|
|
|
|
|
fun (N, VQN) ->
|
2020-09-29 18:43:24 +08:00
|
|
|
|
2017-03-07 00:13:57 +08:00
|
|
|
rabbit_variable_queue:publish(
|
|
|
|
|
rabbit_basic:message(
|
|
|
|
|
rabbit_misc:r(<<>>, exchange, <<>>),
|
|
|
|
|
<<>>, #'P_basic'{delivery_mode = case IsPersistent of
|
|
|
|
|
true -> 2;
|
|
|
|
|
false -> 1
|
|
|
|
|
end},
|
|
|
|
|
PayloadFun(N)),
|
|
|
|
|
PropFun(N, #message_properties{size = 10}),
|
|
|
|
|
false, self(), noflow, VQN)
|
|
|
|
|
end, VQ, lists:seq(Start, Start + Count - 1))).
|
|
|
|
|
|
|
|
|
|
variable_queue_batch_publish(IsPersistent, Count, VQ) ->
|
|
|
|
|
variable_queue_batch_publish(IsPersistent, Count, fun (_N, P) -> P end, VQ).
|
|
|
|
|
|
|
|
|
|
variable_queue_batch_publish(IsPersistent, Count, PropFun, VQ) ->
|
|
|
|
|
variable_queue_batch_publish(IsPersistent, 1, Count, PropFun,
|
|
|
|
|
fun (_N) -> <<>> end, VQ).
|
|
|
|
|
|
|
|
|
|
variable_queue_batch_publish(IsPersistent, Start, Count, PropFun, PayloadFun, VQ) ->
|
|
|
|
|
variable_queue_batch_publish0(IsPersistent, Start, Count, PropFun,
|
|
|
|
|
PayloadFun, fun make_publish/4,
|
|
|
|
|
fun rabbit_variable_queue:batch_publish/4,
|
|
|
|
|
VQ).
|
|
|
|
|
|
|
|
|
|
variable_queue_batch_publish_delivered(IsPersistent, Count, VQ) ->
|
|
|
|
|
variable_queue_batch_publish_delivered(IsPersistent, Count, fun (_N, P) -> P end, VQ).
|
|
|
|
|
|
|
|
|
|
variable_queue_batch_publish_delivered(IsPersistent, Count, PropFun, VQ) ->
|
|
|
|
|
variable_queue_batch_publish_delivered(IsPersistent, 1, Count, PropFun,
|
|
|
|
|
fun (_N) -> <<>> end, VQ).
|
|
|
|
|
|
|
|
|
|
variable_queue_batch_publish_delivered(IsPersistent, Start, Count, PropFun, PayloadFun, VQ) ->
|
|
|
|
|
variable_queue_batch_publish0(IsPersistent, Start, Count, PropFun,
|
|
|
|
|
PayloadFun, fun make_publish_delivered/4,
|
|
|
|
|
fun rabbit_variable_queue:batch_publish_delivered/4,
|
|
|
|
|
VQ).
|
|
|
|
|
|
|
|
|
|
variable_queue_batch_publish0(IsPersistent, Start, Count, PropFun, PayloadFun,
|
|
|
|
|
MakePubFun, PubFun, VQ) ->
|
|
|
|
|
Publishes =
|
|
|
|
|
[MakePubFun(IsPersistent, PayloadFun, PropFun, N)
|
|
|
|
|
|| N <- lists:seq(Start, Start + Count - 1)],
|
|
|
|
|
Res = PubFun(Publishes, self(), noflow, VQ),
|
|
|
|
|
VQ1 = pub_res(Res),
|
|
|
|
|
variable_queue_wait_for_shuffling_end(VQ1).
|
|
|
|
|
|
|
|
|
|
variable_queue_fetch(Count, IsPersistent, IsDelivered, Len, VQ) ->
|
|
|
|
|
lists:foldl(fun (N, {VQN, AckTagsAcc}) ->
|
|
|
|
|
Rem = Len - N,
|
|
|
|
|
{{#basic_message { is_persistent = IsPersistent },
|
|
|
|
|
IsDelivered, AckTagN}, VQM} =
|
|
|
|
|
rabbit_variable_queue:fetch(true, VQN),
|
|
|
|
|
Rem = rabbit_variable_queue:len(VQM),
|
|
|
|
|
{VQM, [AckTagN | AckTagsAcc]}
|
|
|
|
|
end, {VQ, []}, lists:seq(1, Count)).
|
|
|
|
|
|
2017-03-08 22:46:25 +08:00
|
|
|
test_amqqueue(QName, Durable) ->
|
2018-10-11 18:12:39 +08:00
|
|
|
rabbit_amqqueue:pseudo_queue(QName, self(), Durable).
|
2017-03-07 00:13:57 +08:00
|
|
|
|
|
|
|
|
assert_prop(List, Prop, Value) ->
|
|
|
|
|
case proplists:get_value(Prop, List)of
|
|
|
|
|
Value -> ok;
|
|
|
|
|
_ -> {exit, Prop, exp, Value, List}
|
|
|
|
|
end.
|
|
|
|
|
|
|
|
|
|
assert_props(List, PropVals) ->
|
2022-04-04 17:08:54 +08:00
|
|
|
Res = [assert_prop(List, Prop, Value) || {Prop, Value} <- PropVals],
|
|
|
|
|
case lists:usort(Res) of
|
|
|
|
|
[ok] -> ok;
|
|
|
|
|
Error -> error(Error -- [ok])
|
|
|
|
|
end.
|
2017-03-07 00:13:57 +08:00
|
|
|
|
|
|
|
|
variable_queue_set_ram_duration_target(Duration, VQ) ->
|
|
|
|
|
variable_queue_wait_for_shuffling_end(
|
|
|
|
|
rabbit_variable_queue:set_ram_duration_target(Duration, VQ)).
|
|
|
|
|
|
|
|
|
|
publish_fetch_and_ack(0, _Len, VQ0) ->
|
|
|
|
|
VQ0;
|
|
|
|
|
publish_fetch_and_ack(N, Len, VQ0) ->
|
|
|
|
|
VQ1 = variable_queue_publish(false, 1, VQ0),
|
|
|
|
|
{{_Msg, false, AckTag}, VQ2} = rabbit_variable_queue:fetch(true, VQ1),
|
|
|
|
|
Len = rabbit_variable_queue:len(VQ2),
|
|
|
|
|
{_Guids, VQ3} = rabbit_variable_queue:ack([AckTag], VQ2),
|
|
|
|
|
publish_fetch_and_ack(N-1, Len, VQ3).
|
|
|
|
|
|
|
|
|
|
variable_queue_status(VQ) ->
|
|
|
|
|
Keys = rabbit_backing_queue:info_keys() -- [backing_queue_status],
|
|
|
|
|
[{K, rabbit_variable_queue:info(K, VQ)} || K <- Keys] ++
|
|
|
|
|
rabbit_variable_queue:info(backing_queue_status, VQ).
|
|
|
|
|
|
|
|
|
|
variable_queue_wait_for_shuffling_end(VQ) ->
|
|
|
|
|
case credit_flow:blocked() of
|
|
|
|
|
false -> VQ;
|
2020-09-29 18:43:24 +08:00
|
|
|
true ->
|
|
|
|
|
receive
|
|
|
|
|
{bump_credit, Msg} ->
|
|
|
|
|
credit_flow:handle_bump_msg(Msg),
|
|
|
|
|
variable_queue_wait_for_shuffling_end(
|
|
|
|
|
rabbit_variable_queue:resume(VQ))
|
|
|
|
|
end
|
2017-03-07 00:13:57 +08:00
|
|
|
end.
|
|
|
|
|
|
|
|
|
|
msg2int(#basic_message{content = #content{ payload_fragments_rev = P}}) ->
|
|
|
|
|
binary_to_term(list_to_binary(lists:reverse(P))).
|
|
|
|
|
|
|
|
|
|
ack_subset(AckSeqs, Interval, Rem) ->
|
|
|
|
|
lists:filter(fun ({_Ack, N}) -> (N + Rem) rem Interval == 0 end, AckSeqs).
|
|
|
|
|
|
|
|
|
|
requeue_one_by_one(Acks, VQ) ->
|
|
|
|
|
lists:foldl(fun (AckTag, VQN) ->
|
|
|
|
|
{_MsgId, VQM} = rabbit_variable_queue:requeue(
|
|
|
|
|
[AckTag], VQN),
|
|
|
|
|
VQM
|
|
|
|
|
end, VQ, Acks).
|
|
|
|
|
|
|
|
|
|
%% Create a vq with messages in q1, delta, and q3, and holes (in the
|
|
|
|
|
%% form of pending acks) in the latter two.
|
|
|
|
|
variable_queue_with_holes(VQ0) ->
|
|
|
|
|
Interval = 2048, %% should match vq:IO_BATCH_SIZE
|
2021-10-11 17:49:11 +08:00
|
|
|
IndexMod = index_mod(),
|
|
|
|
|
Count = IndexMod:next_segment_boundary(0)*2 + 2 * Interval,
|
2017-03-07 00:13:57 +08:00
|
|
|
Seq = lists:seq(1, Count),
|
2022-03-18 19:47:22 +08:00
|
|
|
VQ1 = variable_queue_publish(
|
2017-03-07 00:13:57 +08:00
|
|
|
false, 1, Count,
|
2022-03-18 19:47:22 +08:00
|
|
|
fun (_, P) -> P end, fun erlang:term_to_binary/1, VQ0),
|
|
|
|
|
VQ2 = variable_queue_set_ram_duration_target(0, VQ1),
|
2017-03-07 00:13:57 +08:00
|
|
|
{VQ3, AcksR} = variable_queue_fetch(Count, false, false, Count, VQ2),
|
|
|
|
|
Acks = lists:reverse(AcksR),
|
|
|
|
|
AckSeqs = lists:zip(Acks, Seq),
|
|
|
|
|
[{Subset1, _Seq1}, {Subset2, _Seq2}, {Subset3, Seq3}] =
|
|
|
|
|
[lists:unzip(ack_subset(AckSeqs, Interval, I)) || I <- [0, 1, 2]],
|
|
|
|
|
%% we requeue in three phases in order to exercise requeuing logic
|
|
|
|
|
%% in various vq states
|
|
|
|
|
{_MsgIds, VQ4} = rabbit_variable_queue:requeue(
|
|
|
|
|
Acks -- (Subset1 ++ Subset2 ++ Subset3), VQ3),
|
|
|
|
|
VQ5 = requeue_one_by_one(Subset1, VQ4),
|
|
|
|
|
%% by now we have some messages (and holes) in delta
|
|
|
|
|
VQ6 = requeue_one_by_one(Subset2, VQ5),
|
|
|
|
|
VQ7 = variable_queue_set_ram_duration_target(infinity, VQ6),
|
|
|
|
|
%% add the q1 tail
|
|
|
|
|
VQ8 = variable_queue_publish(
|
|
|
|
|
true, Count + 1, Interval,
|
|
|
|
|
fun (_, P) -> P end, fun erlang:term_to_binary/1, VQ7),
|
|
|
|
|
%% assertions
|
2022-09-06 17:51:34 +08:00
|
|
|
vq_with_holes_assertions(VQ8),
|
2017-03-07 00:13:57 +08:00
|
|
|
Depth = Count + Interval,
|
|
|
|
|
Depth = rabbit_variable_queue:depth(VQ8),
|
|
|
|
|
Len = Depth - length(Subset3),
|
|
|
|
|
Len = rabbit_variable_queue:len(VQ8),
|
2020-09-29 18:43:24 +08:00
|
|
|
|
2017-03-07 00:13:57 +08:00
|
|
|
{Seq3, Seq -- Seq3, lists:seq(Count + 1, Count + Interval), VQ8}.
|
|
|
|
|
|
2022-09-06 17:51:34 +08:00
|
|
|
vq_with_holes_assertions(VQ) ->
|
|
|
|
|
[false =
|
|
|
|
|
case V of
|
|
|
|
|
{delta, _, 0, _, _} -> true;
|
|
|
|
|
0 -> true;
|
|
|
|
|
_ -> false
|
|
|
|
|
end || {K, V} <- variable_queue_status(VQ),
|
|
|
|
|
lists:member(K, [delta, q3])].
|
2017-03-07 00:13:57 +08:00
|
|
|
|
|
|
|
|
check_variable_queue_status(VQ0, Props) ->
|
|
|
|
|
VQ1 = variable_queue_wait_for_shuffling_end(VQ0),
|
|
|
|
|
S = variable_queue_status(VQ1),
|
|
|
|
|
assert_props(S, Props),
|
|
|
|
|
VQ1.
|
2020-09-29 18:43:24 +08:00
|
|
|
|
|
|
|
|
flush() ->
|
|
|
|
|
receive
|
|
|
|
|
Any ->
|
2022-10-08 06:59:05 +08:00
|
|
|
ct:pal("flush ~tp", [Any]),
|
2020-09-29 18:43:24 +08:00
|
|
|
flush()
|
|
|
|
|
after 0 ->
|
|
|
|
|
ok
|
|
|
|
|
end.
|