Merge pull request #14236 from rabbitmq/mergify/bp/v4.1.x/pr-14235
Test (make) / Build and Xref (1.17, 26) (push) Waiting to run Details
Test (make) / Build and Xref (1.17, 27) (push) Waiting to run Details
Test (make) / Test (1.17, 27, khepri) (push) Waiting to run Details
Test (make) / Test (1.17, 27, mnesia) (push) Waiting to run Details
Test (make) / Test mixed clusters (1.17, 27, khepri) (push) Waiting to run Details
Test (make) / Test mixed clusters (1.17, 27, mnesia) (push) Waiting to run Details
Test (make) / Type check (1.17, 27) (push) Waiting to run Details

Fix flake in stream plugin test suite (backport #14235)
This commit is contained in:
Arnaud Cogoluègnes 2025-07-15 16:06:58 +00:00 committed by GitHub
commit d9a0f078cd
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
1 changed files with 45 additions and 5 deletions

View File

@ -177,14 +177,20 @@ simple_sac_consumer_should_get_disconnected_on_network_partition(Config) ->
delete_stream(stream_port(Config, 0), S), delete_stream(stream_port(Config, 0), S),
%% online consumers should receive a metadata update frame (stream deleted) %% online consumers should receive a metadata update frame (stream deleted)
%% we unqueue the this frame before closing the connection %% we unqueue this frame before closing the connection
%% directly closing the connection of the cancelled consumer %% directly closing the connection of the cancelled consumer
%% Edge case:
%% the waiting consumer can get 2 frames: consumer_update then metadata_update.
%% This is because the active consumer is removed from the group and this triggers
%% a rebalancing. The 2 remaining consumers are most of the time cancelled when the
%% stream is deleted, so the rebalancing does not take place.
%% We just tolerate an extra frame when closing their respective connections.
maps:foreach(fun(K, {S0, C0}) when K /= DiscSubId -> maps:foreach(fun(K, {S0, C0}) when K /= DiscSubId ->
log("Expecting frame in consumer ~p", [K]), log("Expecting frame in consumer ~p", [K]),
{Cmd1, C1} = receive_commands(S0, C0), {Cmd1, C1} = receive_commands(S0, C0),
log("Received ~p", [Cmd1]), log("Received ~p", [Cmd1]),
log("Closing"), log("Closing"),
{ok, _} = stream_test_utils:close(S0, C1); {ok, _} = close_connection(S0, C1);
(K, {S0, C0}) -> (K, {S0, C0}) ->
log("Closing ~p", [K]), log("Closing ~p", [K]),
{ok, _} = stream_test_utils:close(S0, C0) {ok, _} = stream_test_utils:close(S0, C0)
@ -290,12 +296,18 @@ simple_sac_consumer_should_get_disconnected_on_coord_leader_network_partition(Co
%% online consumers should receive a metadata update frame (stream deleted) %% online consumers should receive a metadata update frame (stream deleted)
%% we unqueue this frame before closing the connection %% we unqueue this frame before closing the connection
%% directly closing the connection of the cancelled consumer %% directly closing the connection of the cancelled consumer
%% Edge case:
%% the waiting consumer can get 2 frames: consumer_update then metadata_update.
%% This is because the active consumer is removed from the group and this triggers
%% a rebalancing. The 2 remaining consumers are most of the time cancelled when the
%% stream is deleted, so the rebalancing does not take place.
%% We just tolerate an extra frame when closing their respective connections.
maps:foreach(fun(K, {S0, C0}) when K /= DiscSubId -> maps:foreach(fun(K, {S0, C0}) when K /= DiscSubId ->
log("Expecting frame in consumer ~p", [K]), log("Expecting frame in consumer ~p", [K]),
{Cmd1, C1} = receive_commands(S0, C0), {Cmd1, C1} = receive_commands(S0, C0),
log("Received ~p", [Cmd1]), log("Received ~p", [Cmd1]),
log("Closing"), log("Closing"),
{ok, _} = stream_test_utils:close(S0, C1); {ok, _} = close_connection(S0, C1);
(K, {S0, C0}) -> (K, {S0, C0}) ->
log("Closing ~p", [K]), log("Closing ~p", [K]),
{ok, _} = stream_test_utils:close(S0, C0) {ok, _} = stream_test_utils:close(S0, C0)
@ -395,12 +407,18 @@ super_stream_sac_consumer_should_get_disconnected_on_network_partition(Config) -
%% online consumers should receive a metadata update frame (stream deleted) %% online consumers should receive a metadata update frame (stream deleted)
%% we unqueue this frame before closing the connection %% we unqueue this frame before closing the connection
%% directly closing the connection of the cancelled consumer %% directly closing the connection of the cancelled consumer
%% Edge case:
%% the waiting consumer can get 2 frames: consumer_update then metadata_update.
%% This is because the active consumer is removed from the group and this triggers
%% a rebalancing. The 2 remaining consumers are most of the time cancelled when the
%% stream is deleted, so the rebalancing does not take place.
%% We just tolerate an extra frame when closing their respective connections.
maps:foreach(fun(K, {S0, C0}) when K /= DiscSubId -> maps:foreach(fun(K, {S0, C0}) when K /= DiscSubId ->
log("Expecting frame in consumer ~p", [K]), log("Expecting frame in consumer ~p", [K]),
{Cmd1, C1} = receive_commands(S0, C0), {Cmd1, C1} = receive_commands(S0, C0),
log("Received ~p", [Cmd1]), log("Received ~p", [Cmd1]),
log("Closing"), log("Closing"),
{ok, _} = stream_test_utils:close(S0, C1); {ok, _} = close_connection(S0, C1);
(K, {S0, C0}) -> (K, {S0, C0}) ->
log("Closing ~p", [K]), log("Closing ~p", [K]),
{ok, _} = stream_test_utils:close(S0, C0) {ok, _} = stream_test_utils:close(S0, C0)
@ -516,12 +534,18 @@ super_stream_sac_consumer_should_get_disconnected_on_coord_leader_network_partit
%% online consumers should receive a metadata update frame (stream deleted) %% online consumers should receive a metadata update frame (stream deleted)
%% we unqueue this frame before closing the connection %% we unqueue this frame before closing the connection
%% directly closing the connection of the cancelled consumer %% directly closing the connection of the cancelled consumer
%% Edge case:
%% the waiting consumer can get 2 frames: consumer_update then metadata_update.
%% This is because the active consumer is removed from the group and this triggers
%% a rebalancing. The 2 remaining consumers are most of the time cancelled when the
%% stream is deleted, so the rebalancing does not take place.
%% We just tolerate an extra frame when closing their respective connections.
maps:foreach(fun(K, {S0, C0}) when K /= DiscSubId -> maps:foreach(fun(K, {S0, C0}) when K /= DiscSubId ->
log("Expecting frame in consumer ~p", [K]), log("Expecting frame in consumer ~p", [K]),
{Cmd1, C1} = receive_commands(S0, C0), {Cmd1, C1} = receive_commands(S0, C0),
log("Received ~p", [Cmd1]), log("Received ~p", [Cmd1]),
log("Closing"), log("Closing"),
{ok, _} = stream_test_utils:close(S0, C1); {ok, _} = close_connection(S0, C1);
(K, {S0, C0}) -> (K, {S0, C0}) ->
log("Closing ~p", [K]), log("Closing ~p", [K]),
{ok, _} = stream_test_utils:close(S0, C0) {ok, _} = stream_test_utils:close(S0, C0)
@ -858,3 +882,19 @@ log(Format) ->
log(Format, Args) -> log(Format, Args) ->
ct:pal(Format, Args). ct:pal(Format, Args).
close_connection(Sock, C) ->
CloseReason = <<"OK">>,
CloseFrame = rabbit_stream_core:frame({request, 1, {close, ?RESPONSE_CODE_OK, CloseReason}}),
ok = gen_tcp:send(Sock, CloseFrame),
pump_until_close(Sock, C, 10).
pump_until_close(_, _, 0) ->
ct:fail("did not get close response");
pump_until_close(Sock, C0, N) ->
case stream_test_utils:receive_stream_commands(Sock, C0) of
{{response, 1, {close, ?RESPONSE_CODE_OK}}, C1} ->
{ok, C1};
{_Cmd, C1} ->
pump_until_close(Sock, C1, N - 1)
end.