rabbitmq-server/deps/rabbitmq_ct_helpers/test/terraform_SUITE.erl

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

167 lines
6.3 KiB
Erlang
Raw Permalink Normal View History

%% This Source Code Form is subject to the terms of the Mozilla Public
%% License, v. 2.0. If a copy of the MPL was not distributed with this
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
%%
2024-01-20 08:53:28 +08:00
%% Copyright (c) 2007-2025 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
%%
-module(terraform_SUITE).
-include_lib("common_test/include/ct.hrl").
-include_lib("eunit/include/eunit.hrl").
-export([all/0,
groups/0,
init_per_suite/1, end_per_suite/1,
init_per_group/2, end_per_group/2,
init_per_testcase/2, end_per_testcase/2,
run_code_on_one_vm/1, do_run_code_on_one_vm/1,
run_code_on_three_vms/1, do_run_code_on_three_vms/1,
run_one_rabbitmq_node/1,
run_four_rabbitmq_nodes/1
]).
all() ->
[
{group, direct_vms},
{group, autoscaling_group}
].
groups() ->
[
{direct_vms, [parallel], [{group, run_code},
{group, run_rabbitmq}]},
{autoscaling_group, [parallel], [{group, run_code},
{group, run_rabbitmq}]},
{run_code, [parallel], [run_code_on_one_vm,
run_code_on_three_vms]},
{run_rabbitmq, [parallel], [run_one_rabbitmq_node,
run_four_rabbitmq_nodes]}
].
init_per_suite(Config) ->
rabbit_ct_helpers:log_environment(),
rabbit_ct_helpers:run_setup_steps(Config).
end_per_suite(Config) ->
rabbit_ct_helpers:run_teardown_steps(Config).
init_per_group(autoscaling_group, Config) ->
TfConfigDir = rabbit_ct_vm_helpers:aws_autoscaling_group_module(Config),
rabbit_ct_helpers:set_config(
Config, {terraform_config_dir, TfConfigDir});
init_per_group(Group, Config) ->
rabbit_ct_helpers:set_config(
Config, {run_rabbitmq, Group =:= run_rabbitmq}).
end_per_group(_Group, Config) ->
Config.
init_per_testcase(Testcase, Config) ->
rabbit_ct_helpers:testcase_started(Config, Testcase),
RunRabbitMQ = ?config(run_rabbitmq, Config),
InstanceCount = case Testcase of
run_code_on_three_vms -> 3;
run_three_rabbitmq_nodes -> 3;
% We want more RabbitMQs than VMs.
run_four_rabbitmq_nodes -> 3;
_ -> 1
end,
InstanceName = rabbit_ct_helpers:testcase_absname(Config, Testcase),
ClusterSize = case Testcase of
run_one_rabbitmq_node -> 1;
run_three_rabbitmq_nodes -> 3;
% We want more RabbitMQs than VMs.
run_four_rabbitmq_nodes -> 4;
_ -> 0
end,
Config1 = rabbit_ct_helpers:set_config(
Config,
[{terraform_instance_count, InstanceCount},
{terraform_instance_name, InstanceName},
{rmq_nodename_suffix, Testcase},
{rmq_nodes_count, ClusterSize}]),
case RunRabbitMQ of
false ->
rabbit_ct_helpers:run_steps(
Config1,
rabbit_ct_vm_helpers:setup_steps());
true ->
rabbit_ct_helpers:run_steps(
Config1,
[fun rabbit_ct_broker_helpers:run_make_dist/1] ++
rabbit_ct_vm_helpers:setup_steps() ++
rabbit_ct_broker_helpers:setup_steps_for_vms())
end.
end_per_testcase(Testcase, Config) ->
RunRabbitMQ = ?config(run_rabbitmq, Config),
Config1 = case RunRabbitMQ of
false ->
rabbit_ct_helpers:run_steps(
Config,
rabbit_ct_vm_helpers:teardown_steps());
true ->
rabbit_ct_helpers:run_steps(
Config,
rabbit_ct_broker_helpers:teardown_steps_for_vms() ++
rabbit_ct_vm_helpers:teardown_steps())
end,
rabbit_ct_helpers:testcase_finished(Config1, Testcase).
%% -------------------------------------------------------------------
%% Run arbitrary code.
%% -------------------------------------------------------------------
run_code_on_one_vm(Config) ->
rabbit_ct_vm_helpers:rpc_all(Config,
?MODULE, do_run_code_on_one_vm, [node()]).
do_run_code_on_one_vm(CTMaster) ->
CTPeer = node(),
ct:pal("Testcase running on ~ts", [CTPeer]),
?assertNotEqual(CTMaster, CTPeer),
?assertEqual(pong, net_adm:ping(CTMaster)).
run_code_on_three_vms(Config) ->
rabbit_ct_vm_helpers:rpc_all(Config,
?MODULE, do_run_code_on_three_vms, [node()]).
do_run_code_on_three_vms(CTMaster) ->
CTPeer = node(),
ct:pal("Testcase running on ~ts", [CTPeer]),
?assertNotEqual(CTMaster, CTPeer),
?assertEqual(pong, net_adm:ping(CTMaster)).
%% -------------------------------------------------------------------
%% Run RabbitMQ node.
%% -------------------------------------------------------------------
run_one_rabbitmq_node(Config) ->
CTPeers = rabbit_ct_vm_helpers:get_ct_peers(Config),
?assertEqual([false],
[rabbit:is_running(CTPeer) || CTPeer <- CTPeers]),
RabbitMQNodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
?assertEqual([true],
[rabbit:is_running(RabbitMQNode) || RabbitMQNode <- RabbitMQNodes]).
run_four_rabbitmq_nodes(Config) ->
CTPeers = rabbit_ct_vm_helpers:get_ct_peers(Config),
?assertEqual([false, false, false],
[rabbit:is_running(CTPeer) || CTPeer <- CTPeers]),
RabbitMQNodes = lists:sort(
rabbit_ct_broker_helpers:get_node_configs(
Config, nodename)),
?assertEqual([true, true, true, true],
[rabbit:is_running(Node) || Node <- RabbitMQNodes]),
?assertEqual([true, true, true, true],
rabbit_ct_broker_helpers:rpc_all(
Config, rabbit_db_cluster, is_clustered, [])),
ClusteredNodes = lists:sort(
rabbit_ct_broker_helpers:rpc(
rabbit_nodes: Add list functions to clarify which nodes we are interested in So far, we had the following functions to list nodes in a RabbitMQ cluster: * `rabbit_mnesia:cluster_nodes/1` to get members of the Mnesia cluster; the argument was used to select members (all members or only those running Mnesia and participating in the cluster) * `rabbit_nodes:all/0` to get all members of the Mnesia cluster * `rabbit_nodes:all_running/0` to get all members who currently run Mnesia Basically: * `rabbit_nodes:all/0` calls `rabbit_mnesia:cluster_nodes(all)` * `rabbit_nodes:all_running/0` calls `rabbit_mnesia:cluster_nodes(running)` We also have: * `rabbit_node_monitor:alive_nodes/1` which filters the given list of nodes to only select those currently running Mnesia * `rabbit_node_monitor:alive_rabbit_nodes/1` which filters the given list of nodes to only select those currently running RabbitMQ Most of the code uses `rabbit_mnesia:cluster_nodes/1` or the `rabbit_nodes:all*/0` functions. `rabbit_mnesia:cluster_nodes(running)` or `rabbit_nodes:all_running/0` is often used as a close approximation of "all cluster members running RabbitMQ". This list might be incorrect in times where a node is joining the clustered or is being worked on (i.e. Mnesia is running but not RabbitMQ). With Khepri, there won't be the same possible approximation because we will try to keep Khepri/Ra running even if RabbitMQ is stopped to expand/shrink the cluster. So in order to clarify what we want when we query a list of nodes, this patch introduces the following functions: * `rabbit_nodes:list_members/0` to get all cluster members, regardless of their state * `rabbit_nodes:list_reachable/0` to get all cluster members we can reach using Erlang distribution, regardless of the state of RabbitMQ * `rabbit_nodes:list_running/0` to get all cluster members who run RabbitMQ, regardless of the maintenance state * `rabbit_nodes:list_serving/0` to get all cluster members who run RabbitMQ and are accepting clients In addition to the list functions, there are the corresponding `rabbit_nodes:is_*(Node)` checks and `rabbit_nodes:filter_*(Nodes)` filtering functions. The code is modified to use these new functions. One possible significant change is that the new list functions will perform RPC calls to query the nodes' state, unlike `rabbit_mnesia:cluster_nodes(running)`.
2023-01-18 01:39:20 +08:00
Config, 0, rabbit_nodes, list_running, [])),
?assertEqual(ClusteredNodes, RabbitMQNodes).