2022-10-28 01:07:43 +08:00
|
|
|
%% This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
%% License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
|
|
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
|
|
|
%%
|
2024-01-02 11:02:20 +08:00
|
|
|
%% Copyright (c) 2007-2025 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
|
2023-01-03 01:02:43 +08:00
|
|
|
|
2023-02-22 02:24:15 +08:00
|
|
|
%% This test suite contains test cases that are shared between (i.e. executed across):
|
|
|
|
%% 1. plugins rabbitmq_mqtt and rabbitmq_web_mqtt
|
|
|
|
%% 2. MQTT versions v3, v4 and v5
|
|
|
|
%%
|
|
|
|
%% In other words, this test suite should not contain any test case that is executed
|
2023-02-24 00:49:33 +08:00
|
|
|
%% only with a particular plugin or particular MQTT version.
|
2024-09-11 17:44:50 +08:00
|
|
|
%%
|
|
|
|
%% When adding a test case here the same function must be defined in web_mqtt_shared_SUITE.
|
|
|
|
-module(mqtt_shared_SUITE).
|
2022-10-28 01:07:43 +08:00
|
|
|
-compile([export_all,
|
|
|
|
nowarn_export_all]).
|
|
|
|
|
2023-01-03 01:02:43 +08:00
|
|
|
-include_lib("common_test/include/ct.hrl").
|
2022-10-28 01:07:43 +08:00
|
|
|
-include_lib("eunit/include/eunit.hrl").
|
2022-10-29 00:28:47 +08:00
|
|
|
-include_lib("amqp_client/include/amqp_client.hrl").
|
|
|
|
-include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl").
|
2022-12-20 02:03:18 +08:00
|
|
|
-include_lib("rabbitmq_ct_helpers/include/rabbit_mgmt_test.hrl").
|
|
|
|
|
|
|
|
-import(rabbit_ct_broker_helpers,
|
|
|
|
[rabbitmqctl_list/3,
|
2025-06-01 22:43:39 +08:00
|
|
|
rabbitmqctl/3,
|
2023-01-03 01:02:43 +08:00
|
|
|
rpc/4,
|
2022-12-20 02:03:18 +08:00
|
|
|
rpc/5,
|
|
|
|
rpc_all/4,
|
2023-01-03 01:02:43 +08:00
|
|
|
get_node_config/3,
|
|
|
|
drain_node/2,
|
2024-08-06 22:16:14 +08:00
|
|
|
revive_node/2,
|
|
|
|
await_metadata_store_consistent/2
|
2023-01-03 01:02:43 +08:00
|
|
|
]).
|
2022-12-20 02:03:18 +08:00
|
|
|
-import(rabbit_ct_helpers,
|
|
|
|
[eventually/3,
|
|
|
|
eventually/1]).
|
|
|
|
-import(util,
|
|
|
|
[all_connection_pids/1,
|
2023-02-22 02:24:15 +08:00
|
|
|
get_global_counters/1, get_global_counters/2,
|
|
|
|
get_global_counters/3, get_global_counters/4,
|
2023-01-03 01:02:43 +08:00
|
|
|
expect_publishes/3,
|
|
|
|
connect/2, connect/3, connect/4,
|
|
|
|
get_events/1, assert_event_type/2, assert_event_prop/2,
|
|
|
|
await_exit/1, await_exit/2,
|
2023-03-17 20:48:26 +08:00
|
|
|
publish_qos1_timeout/4,
|
|
|
|
non_clean_sess_opts/0]).
|
2022-12-20 02:03:18 +08:00
|
|
|
-import(rabbit_mgmt_test_util,
|
|
|
|
[http_get/2,
|
|
|
|
http_delete/3]).
|
|
|
|
|
2023-05-02 22:08:15 +08:00
|
|
|
%% defined in MQTT v5 (not in v4 or v3)
|
2023-06-05 21:15:58 +08:00
|
|
|
-define(RC_SERVER_SHUTTING_DOWN, 16#8B).
|
2023-05-02 22:08:15 +08:00
|
|
|
-define(RC_KEEP_ALIVE_TIMEOUT, 16#8D).
|
|
|
|
-define(RC_SESSION_TAKEN_OVER, 16#8E).
|
2024-12-10 23:19:34 +08:00
|
|
|
-define(TIMEOUT, 30_000).
|
2023-05-02 22:08:15 +08:00
|
|
|
|
2022-10-28 01:07:43 +08:00
|
|
|
all() ->
|
2024-09-11 17:44:50 +08:00
|
|
|
[{group, mqtt}].
|
2023-02-22 02:24:15 +08:00
|
|
|
|
2024-06-20 02:02:33 +08:00
|
|
|
%% The code being tested under v3 and v4 is almost identical.
|
|
|
|
%% To save time in CI, we therefore run only a very small subset of tests in v3.
|
2023-02-22 02:24:15 +08:00
|
|
|
groups() ->
|
2022-10-28 01:07:43 +08:00
|
|
|
[
|
2023-02-22 02:24:15 +08:00
|
|
|
{mqtt, [],
|
2024-06-20 02:02:33 +08:00
|
|
|
[{cluster_size_1, [],
|
|
|
|
[{v3, [], cluster_size_1_tests_v3()},
|
|
|
|
{v4, [], cluster_size_1_tests()},
|
|
|
|
{v5, [], cluster_size_1_tests()}]},
|
|
|
|
{cluster_size_3, [],
|
|
|
|
[{v4, [], cluster_size_3_tests()},
|
|
|
|
{v5, [], cluster_size_3_tests()}]}
|
2023-02-22 02:24:15 +08:00
|
|
|
]}
|
2022-10-28 01:07:43 +08:00
|
|
|
].
|
|
|
|
|
2024-06-20 02:02:33 +08:00
|
|
|
cluster_size_1_tests_v3() ->
|
|
|
|
[global_counters,
|
|
|
|
events
|
|
|
|
].
|
|
|
|
|
2023-02-22 02:24:15 +08:00
|
|
|
cluster_size_1_tests() ->
|
2023-01-03 01:02:43 +08:00
|
|
|
[
|
2023-02-22 02:24:15 +08:00
|
|
|
global_counters %% must be the 1st test case
|
Emit histogram metric for received message sizes per protocol (#12342)
* Add global histogram metrics for received message sizes per-protocol
fixup: add new files to bazel
fixup: expose message_size_bytes as prometheus classic histogram type
`rabbit_msg_size_metrics` does not use `seshat` any more, but
`counters` directly.
fixup: add msg_size_metrics unit test
* Improve message size histogram
1.
Avoid unnecessary time series emitted for stream protocol
The stream protocol cannot observe message sizes.
This commit ensures that the following time series are omitted:
```
rabbitmq_global_message_size_bytes_bucket{protocol="stream",le="64"} 0
rabbitmq_global_message_size_bytes_bucket{protocol="stream",le="256"} 0
rabbitmq_global_message_size_bytes_bucket{protocol="stream",le="1024"} 0
rabbitmq_global_message_size_bytes_bucket{protocol="stream",le="4096"} 0
rabbitmq_global_message_size_bytes_bucket{protocol="stream",le="16384"} 0
rabbitmq_global_message_size_bytes_bucket{protocol="stream",le="65536"} 0
rabbitmq_global_message_size_bytes_bucket{protocol="stream",le="262144"} 0
rabbitmq_global_message_size_bytes_bucket{protocol="stream",le="1048576"} 0
rabbitmq_global_message_size_bytes_bucket{protocol="stream",le="4194304"} 0
rabbitmq_global_message_size_bytes_bucket{protocol="stream",le="16777216"} 0
rabbitmq_global_message_size_bytes_bucket{protocol="stream",le="67108864"} 0
rabbitmq_global_message_size_bytes_bucket{protocol="stream",le="268435456"} 0
rabbitmq_global_message_size_bytes_bucket{protocol="stream",le="+Inf"} 0
rabbitmq_global_message_size_bytes_count{protocol="stream"} 0
rabbitmq_global_message_size_bytes_sum{protocol="stream"} 0
```
This reduces the number of time series by 15.
2.
Further reduce the number of time series by reducing the number of
buckets. Instead of 13 bucktes, emit only 9 buckets. Buckets are not
free, each is an extra time series stored.
Prior to this commit:
```
curl -s -u guest:guest localhost:15692/metrics | ag message_size | wc -l
92
```
After this commit:
```
curl -s -u guest:guest localhost:15692/metrics | ag message_size | wc -l
57
```
3.
The emitted metric should be called
`rabbitmq_message_size_bytes_bucket` instead of `rabbitmq_global_message_size_bytes_bucket`.
The latter is poor naming. There is no need to use `global` in
the metric name given that this metric doesn't exist in the old flawed
aggregated metrics.
4.
This commit simplies module `rabbit_global_counters`.
5.
Avoid garbage collecting the 10-elements list of buckets per message
being received.
---------
Co-authored-by: Péter Gömöri <peter@84codes.com>
2024-09-25 00:08:24 +08:00
|
|
|
,message_size_metrics
|
2023-02-22 02:24:15 +08:00
|
|
|
,block_only_publisher
|
|
|
|
,many_qos1_messages
|
2023-03-17 20:48:26 +08:00
|
|
|
,session_expiry
|
2024-08-28 18:27:46 +08:00
|
|
|
,cli_close_all_connections
|
|
|
|
,cli_close_all_user_connections
|
2023-02-22 02:24:15 +08:00
|
|
|
,management_plugin_connection
|
|
|
|
,management_plugin_enable
|
|
|
|
,disconnect
|
|
|
|
,pubsub_shared_connection
|
|
|
|
,pubsub_separate_connections
|
|
|
|
,will_with_disconnect
|
|
|
|
,will_without_disconnect
|
|
|
|
,decode_basic_properties
|
|
|
|
,quorum_queue_rejects
|
|
|
|
,events
|
|
|
|
,internal_event_handler
|
|
|
|
,non_clean_sess_reconnect_qos1
|
|
|
|
,non_clean_sess_reconnect_qos0
|
|
|
|
,non_clean_sess_reconnect_qos0_and_qos1
|
|
|
|
,non_clean_sess_empty_client_id
|
|
|
|
,subscribe_same_topic_same_qos
|
|
|
|
,subscribe_same_topic_different_qos
|
|
|
|
,subscribe_multiple
|
|
|
|
,large_message_mqtt_to_mqtt
|
|
|
|
,large_message_amqp_to_mqtt
|
|
|
|
,keepalive
|
|
|
|
,keepalive_turned_off
|
|
|
|
,block
|
|
|
|
,amqp_to_mqtt_qos0
|
2025-06-04 22:48:47 +08:00
|
|
|
,clean_session_disconnect_client
|
2023-02-22 02:24:15 +08:00
|
|
|
,clean_session_node_restart
|
|
|
|
,clean_session_node_kill
|
|
|
|
,rabbit_status_connection_count
|
|
|
|
,trace
|
Message Containers (#5077)
This PR implements an approach for a "protocol (data format) agnostic core" where the format of the message isn't converted at point of reception.
Currently all non AMQP 0.9.1 originating messages are converted into a AMQP 0.9.1 flavoured basic_message record before sent to a queue. If the messages are then consumed by the originating protocol they are converted back from AMQP 0.9.1. For some protocols such as MQTT 3.1 this isn't too expensive as MQTT is mostly a fairly easily mapped subset of AMQP 0.9.1 but for others such as AMQP 1.0 the conversions are awkward and in some cases lossy even if consuming from the originating protocol.
This PR instead wraps all incoming messages in their originating form into a generic, extensible message container type (mc). The container module exposes an API to get common message details such as size and various properties (ttl, priority etc) directly from the source data type. Each protocol needs to implement the mc behaviour such that when a message originating form one protocol is consumed by another protocol we convert it to the target protocol at that point.
The message container also contains annotations, dead letter records and other meta data we need to record during the lifetime of a message. The original protocol message is never modified unless it is consumed.
This includes conversion modules to and from amqp, amqpl (AMQP 0.9.1) and mqtt.
COMMIT HISTORY:
* Refactor away from using the delivery{} record
In many places including exchange types. This should make it
easier to move towards using a message container type instead of
basic_message.
Add mc module and move direct replies outside of exchange
Lots of changes incl classic queues
Implement stream support incl amqp conversions
simplify mc state record
move mc.erl
mc dlx stuff
recent history exchange
Make tracking work
But doesn't take a protocol agnostic approach as we just convert
everything into AMQP legacy and back. Might be good enough for now.
Tracing as a whole may want a bit of a re-vamp at some point.
tidy
make quorum queue peek work by legacy conversion
dead lettering fixes
dead lettering fixes
CMQ fixes
rabbit_trace type fixes
fixes
fix
Fix classic queue props
test assertion fix
feature flag and backwards compat
Enable message_container feature flag in some SUITEs
Dialyzer fixes
fixes
fix
test fixes
Various
Manually update a gazelle generated file
until a gazelle enhancement can be made
https://github.com/rabbitmq/rules_erlang/issues/185
Add message_containers_SUITE to bazel
and regen bazel files with gazelle from rules_erlang@main
Simplify essential proprty access
Such as durable, ttl and priority by extracting them into annotations
at message container init time.
Move type
to remove dependenc on amqp10 stuff in mc.erl
mostly because I don't know how to make bazel do the right thing
add more stuff
Refine routing header stuff
wip
Cosmetics
Do not use "maybe" as type name as "maybe" is a keyword since OTP 25
which makes Erlang LS complain.
* Dedup death queue names
* Fix function clause crashes
Fix failing tests in the MQTT shared_SUITE:
A classic queue message ID can be undefined as set in
https://github.com/rabbitmq/rabbitmq-server/blob/fbe79ff47b4edbc0fd95457e623d6593161ad198/deps/rabbit/src/rabbit_classic_queue_index_v2.erl#L1048
Fix failing tests in the MQTT shared_SUITE-mixed:
When feature flag message_containers is disabled, the
message is not an #mc{} record, but a #basic_message{} record.
* Fix is_utf8_no_null crash
Prior to this commit, the function crashed if invalid UTF-8 was
provided, e.g.:
```
1> rabbit_misc:is_valid_shortstr(<<"😇"/utf16>>).
** exception error: no function clause matching rabbit_misc:is_utf8_no_null(<<216,61,222,7>>) (rabbit_misc.erl, line 1481)
```
* Implement mqtt mc behaviour
For now via amqp translation.
This is still work in progress, but the following SUITEs pass:
```
make -C deps/rabbitmq_mqtt ct-shared t=[mqtt,v5,cluster_size_1] FULL=1
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_1] FULL=1
```
* Shorten mc file names
Module name length matters because for each persistent message the #mc{}
record is persisted to disk.
```
1> iolist_size(term_to_iovec({mc, rabbit_mc_amqp_legacy})).
30
2> iolist_size(term_to_iovec({mc, mc_amqpl})).
17
```
This commit renames the mc modules:
```
ag -l rabbit_mc_amqp_legacy | xargs sed -i 's/rabbit_mc_amqp_legacy/mc_amqpl/g'
ag -l rabbit_mc_amqp | xargs sed -i 's/rabbit_mc_amqp/mc_amqp/g'
ag -l rabbit_mqtt_mc | xargs sed -i 's/rabbit_mqtt_mc/mc_mqtt/g'
```
* mc: make deaths an annotation + fixes
* Fix mc_mqtt protocol_state callback
* Fix test will_delay_node_restart
```
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_3]:will_delay_node_restart FULL=1
```
* Bazel run gazelle
* mix format rabbitmqctl.ex
* Ensure ttl annotation is refelected in amqp legacy protocol state
* Fix id access in message store
* Fix rabbit_message_interceptor_SUITE
* dializer fixes
* Fix rabbit:rabbit_message_interceptor_SUITE-mixed
set_annotation/3 should not result in duplicate keys
* Fix MQTT shared_SUITE-mixed
Up to 3.12 non-MQTT publishes were always QoS 1 regardless of delivery_mode.
https://github.com/rabbitmq/rabbitmq-server/blob/75a953ce286a10aca910c098805a4f545989af38/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl#L2075-L2076
From now on, non-MQTT publishes are QoS 1 if durable.
This makes more sense.
The MQTT plugin must send a #basic_message{} to an old node that does
not understand message containers.
* Field content of 'v1_0.data' can be binary
Fix
```
bazel test //deps/rabbitmq_mqtt:shared_SUITE-mixed \
--test_env FOCUS="-group [mqtt,v4,cluster_size_1] -case trace" \
-t- --test_sharding_strategy=disabled
```
* Remove route/2 and implement route/3 for all exchange types.
This removes the route/2 callback from rabbit_exchange_type and
makes route/3 mandatory instead. This is a breaking change and
will require all implementations of exchange types to update their
code, however this is necessary anyway for them to correctly handle
the mc type.
stream filtering fixes
* Translate directly from MQTT to AMQP 0.9.1
* handle undecoded properties in mc_compat
amqpl: put clause in right order
recover death deatails from amqp data
* Replace callback init_amqp with convert_from
* Fix return value of lists:keyfind/3
* Translate directly from AMQP 0.9.1 to MQTT
* Fix MQTT payload size
MQTT payload can be a list when converted from AMQP 0.9.1 for example
First conversions tests
Plus some other conversion related fixes.
bazel
bazel
translate amqp 1.0 null to undefined
mc: property/2 and correlation_id/message_id return type tagged values.
To ensure we can support a variety of types better.
The type type tags are AMQP 1.0 flavoured.
fix death recovery
mc_mqtt: impl new api
Add callbacks to allow protocols to compact data before storage
And make readable if needing to query things repeatedly.
bazel fix
* more decoding
* tracking mixed versions compat
* mc: flip default of `durable` annotation to save some data.
Assuming most messages are durable and that in memory messages suffer less
from persistence overhead it makes sense for a non existent `durable`
annotation to mean durable=true.
* mc conversion tests and tidy up
* mc make x_header unstrict again
* amqpl: death record fixes
* bazel
* amqp -> amqpl conversion test
* Fix crash in mc_amqp:size/1
Body can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0/ ct-system t=java
```
on branch native-amqp.
* Fix crash in lists:flatten/1
Data can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp.
* Fix crash in rabbit_writer
Running test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp resulted in the following crash:
```
crasher:
initial call: rabbit_writer:enter_mainloop/2
pid: <0.711.0>
registered_name: []
exception error: bad argument
in function size/1
called as size([<<0>>,<<"Sw">>,[<<160,2>>,<<"hi">>]])
*** argument 1: not tuple or binary
in call from rabbit_binary_generator:build_content_frames/7 (rabbit_binary_generator.erl, line 89)
in call from rabbit_binary_generator:build_simple_content_frames/4 (rabbit_binary_generator.erl, line 61)
in call from rabbit_writer:assemble_frames/5 (rabbit_writer.erl, line 334)
in call from rabbit_writer:internal_send_command_async/3 (rabbit_writer.erl, line 365)
in call from rabbit_writer:handle_message/2 (rabbit_writer.erl, line 265)
in call from rabbit_writer:handle_message/3 (rabbit_writer.erl, line 232)
in call from rabbit_writer:mainloop1/2 (rabbit_writer.erl, line 223)
```
because #content.payload_fragments_rev is currently supposed to
be a flat list of binaries instead of being an iolist.
This commit fixes this crash inefficiently by calling
iolist_to_binary/1. A better solution would be to allow AMQP legacy's #content.payload_fragments_rev
to be an iolist.
* Add accidentally deleted line back
* mc: optimise mc_amqp internal format
By removint the outer records for message and delivery annotations
as well as application properties and footers.
* mc: optimis mc_amqp map_add by using upsert
* mc: refactoring and bug fixes
* mc_SUITE routingheader assertions
* mc remove serialize/1 callback as only used by amqp
* mc_amqp: avoid returning a nested list from protocol_state
* test and bug fix
* move infer_type to mc_util
* mc fixes and additiona assertions
* Support headers exchange routing for MQTT messages
When a headers exchange is bound to the MQTT topic exchange, routing
will be performend based on both MQTT topic (by the topic exchange) and
MQTT User Property (by the headers exchange).
This combines the best worlds of both MQTT 5.0 and AMQP 0.9.1 and
enables powerful routing topologies.
When the User Property contains the same name multiple times, only the
last name (and value) will be considered by the headers exchange.
* Fix crash when sending from stream to amqpl
When publishing a message via the stream protocol and consuming it via
AMQP 0.9.1, the following crash occurred prior to this commit:
```
crasher:
initial call: rabbit_channel:init/1
pid: <0.818.0>
registered_name: []
exception exit: {{badmatch,undefined},
[{rabbit_channel,handle_deliver0,4,
[{file,"rabbit_channel.erl"},
{line,2728}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1594}]},
{rabbit_channel,handle_cast,2,
[{file,"rabbit_channel.erl"},
{line,728}]},
{gen_server2,handle_msg,2,
[{file,"gen_server2.erl"},{line,1056}]},
{proc_lib,wake_up,3,
[{file,"proc_lib.erl"},{line,251}]}]}
```
This commit first gives `mc:init/3` the chance to set exchange and
routing_keys annotations.
If not set, `rabbit_stream_queue` will set these annotations assuming
the message was originally published via the stream protocol.
* Support consistent hash exchange routing for MQTT 5.0
When a consistent hash exchange is bound to the MQTT topic exchange,
MQTT 5.0 messages can be routed to queues consistently based on the
Correlation-Data in the PUBLISH packet.
* Convert MQTT 5.0 User Property
* to AMQP 0.9.1 headers
* from AMQP 0.9.1 headers
* to AMQP 1.0 application properties and message annotations
* from AMQP 1.0 application properties and message annotations
* Make use of Annotations in mc_mqtt:protocol_state/2
mc_mqtt:protocol_state/2 includes Annotations as parameter.
It's cleaner to make use of these Annotations when computing the
protocol state instead of relying on the caller (rabbitmq_mqtt_processor)
to compute the protocol state.
* Enforce AMQP 0.9.1 field name length limit
The AMQP 0.9.1 spec prohibits field names longer than 128 characters.
Therefore, when converting AMQP 1.0 message annotations, application
properties or MQTT 5.0 User Property to AMQP 0.9.1 headers, drop any
names longer than 128 characters.
* Fix type specs
Apply feedback from Michael Davis
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* Add mc_mqtt unit test suite
Implement mc_mqtt:x_header/2
* Translate indicator that payload is UTF-8 encoded
when converting between MQTT 5.0 and AMQP 1.0
* Translate single amqp-value section from AMQP 1.0 to MQTT
Convert to a text representation, if possible, and indicate to MQTT
client that the payload is UTF-8 encoded. This way, the MQTT client will
be able to parse the payload.
If conversion to text representation is not possible, encode the payload
using the AMQP 1.0 type system and indiate the encoding via Content-Type
message/vnd.rabbitmq.amqp.
This Content-Type is not registered.
Type "message" makes sense since it's a message.
Vendor tree "vnd.rabbitmq.amqp" makes sense since merely subtype "amqp" is not
registered.
* Fix payload conversion
* Translate Response Topic between MQTT and AMQP
Translate MQTT 5.0 Response Topic to AMQP 1.0 reply-to address and vice
versa.
The Response Topic must be a UTF-8 encoded string.
This commit re-uses the already defined RabbitMQ target addresses:
```
"/topic/" RK Publish to amq.topic with routing key RK
"/exchange/" X "/" RK Publish to exchange X with routing key RK
```
By default, the MQTT topic exchange is configure dto be amq.topic using
the 1st target address.
When an operator modifies the mqtt.exchange, the 2nd target address is
used.
* Apply PR feedback
and fix formatting
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* tidy up
* Add MQTT message_containers test
* consistent hash exchange: avoid amqp legacy conversion
When hashing on a header value.
* Avoid converting to amqp legacy when using exchange federation
* Fix test flake
* test and dialyzer fixes
* dialyzer fix
* Add MQTT protocol interoperability tests
Test receiving from and sending to MQTT 5.0 and
* AMQP 0.9.1
* AMQP 1.0
* STOMP
* Streams
* Regenerate portions of deps/rabbit/app.bzl with gazelle
I'm not exactly sure how this happened, but gazell seems to have been
run with an older version of the rules_erlang gazelle extension at
some point. This caused generation of a structure that is no longer
used. This commit updates the structure to the current pattern.
* mc: refactoring
* mc_amqpl: handle delivery annotations
Just in case they are included.
Also use iolist_to_iovec to create flat list of binaries when
converting from amqp with amqp encoded payload.
---------
Co-authored-by: David Ansari <david.ansari@gmx.de>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
Co-authored-by: Rin Kuryloski <kuryloskip@vmware.com>
2023-08-31 18:27:13 +08:00
|
|
|
,trace_large_message
|
2023-02-22 02:24:15 +08:00
|
|
|
,max_packet_size_unauthenticated
|
2023-02-25 02:23:41 +08:00
|
|
|
,max_packet_size_authenticated
|
2023-02-22 02:24:15 +08:00
|
|
|
,default_queue_type
|
2025-04-17 16:15:32 +08:00
|
|
|
,message_interceptors
|
2023-02-22 02:24:15 +08:00
|
|
|
,utf8
|
2023-06-07 19:20:28 +08:00
|
|
|
,retained_message_conversion
|
Support MQTT 5.0 features No Local, RAP, Subscription IDs
Support subscription options "No Local" and "Retain As Published"
as well as Subscription Identifiers.
All three MQTT 5.0 features can be set on a per subscription basis.
Due to wildcards in topic filters, multiple subscriptions
can match a given topic. Therefore, to implement Retain As Published and
Subscription Identifiers, the destination MQTT connection process needs
to know what subscription(s) caused it to receive the message.
There are a few ways how this could be implemented:
1. The destination MQTT connection process is aware of all its
subscriptions. Whenever, it receives a message, it can match the
message's routing key / topic against all its known topic filters.
However, to iteratively match the routing key against all topic
filters for every received message can become very expensive in the
worst case when the MQTT client creates many subscriptions containing
wildcards. This could be the case for an MQTT client that acts as a
bridge or proxy or dispatcher: It could subscribe via a wildcard for
each of its own clients.
2. Instead of interatively matching the topic of the received message
against all topic filters that contain wildcards, a better approach
would be for every MQTT subscriber connection process to maintain a
local trie datastructure (similar to how topic exchanges are
implemented) and perform matching therefore more efficiently.
However, this does not sound optimal either because routing is
effectively performed twice: in the topic exchange and again against
a much smaller trie in each destination connection process.
3. Given that the topic exchange already perform routing, a much more
sensible way would be to send the matched binding key(s) to the
destination MQTT connection process. A subscription (topic filter)
maps to a binding key in AMQP 0.9.1 routing. Therefore, for the first
time in RabbitMQ, the routing function should not only output a list
of unique destination queues, but also the binding keys (subscriptions)
that caused the message to be routed to the destination queue.
This commit therefore implements the 3rd approach.
The downside of the 3rd approach is that it requires API changes to the
routing function and topic exchange.
Specifically, this commit adds a new function rabbit_exchange:route/3
that accepts a list of routing options. If that list contains version 2,
the caller of the routing function knows how to handle the return value
that could also contain binding keys.
This commits allows an MQTT connection process, the channel process, and
at-most-once dead lettering to handle binding keys. Binding keys are
included as AMQP 0.9.1 headers into the basic message.
Therefore, whenever a message is sent from an MQTT client or AMQP 0.9.1
client or AMQP 1.0 client or STOMP client, the MQTT receiver will know
the subscription identifier that caused the message to be received.
Note that due to the low number of allowed wildcard characters (# and
+), the cardinality of matched binding keys shouldn't be high even if
the topic contains for example 3 levels and the message is sent to for
example 5 million destination queues. In other words, sending multiple
distinct basic messages to the destination shouldn't hurt the delegate
optimisation too much. The delegate optimisation implemented for classic
queues and rabbit_mqtt_qos0_queue(s) still takes place for all basic
messages that contain the same set of matched binding keys.
The topic exchange returns all matched binding keys by remembering the
edges walked down to the leaves. As an optimisation, only for MQTT
queues are binding keys being returned. This does add a small dependency
from app rabbit to app rabbitmq_mqtt which is not optimal. However, this
dependency should be simple to remove when omitting this optimisation.
Another important feature of this commit is persisting subscription
options and subscription identifiers because they are part of the
MQTT 5.0 session state.
In MQTT v3 and v4, the only subscription information that were part of
the session state was the topic filter and the QoS level.
Both information were implicitly stored in the form of bindings:
The topic filter as the binding key and the QoS level as the destination
queue name of the binding.
For MQTT v5 we need to persist more subscription information.
From a domain perspective, it makes sense to store subscription options
as part of subscriptions, i.e. bindings, even though they are currently
not used in routing.
Therefore, this commits stores subscription options as binding arguments.
Storing subscription options as binding arguments comes in turn with
new challenges: How to handle mixed version clusters and upgrading an
MQTT session from v3 or v4 to v5?
Imagine an MQTT client connects via v5 with Session Expiry Interval > 0
to a new node in a mixed version cluster, creates a subscription,
disconnects, and subsequently connects via v3 to an old node. The
client should continue to receive messages.
To simplify such edge cases, this commit introduces a new feature flag
called mqtt_v5. If mqtt_v5 is disabled, clients cannot connect to
RabbitMQ via MQTT 5.0.
This still doesn't entirely solve the problem of MQTT session upgrades
(v4 to v5 client) or session downgrades (v5 to v4 client).
Ideally, once mqtt_v5 is enabled, all MQTT bindings contain non-empty binding
arguments. However, this will require a feature flag migration function
to modify all MQTT bindings. To be more precise, all MQTT bindings need
to be deleted and added because the binding argument is part of the
Mnesia table key.
Since feature flag migration functions are non-trivial to implement in
RabbitMQ (they can run on every node multiple times and concurrently),
this commit takes a simpler approach:
All v3 / v4 sessions keep the empty binding argument [].
All v5 sessions use the new binding argument [#mqtt_subscription_opts{}].
This requires only handling a session upgrade / downgrade by
creating a binding (with the new binding arg) and deleting the old
binding (with the old binding arg) when processing the CONNECT packet.
Note that such session upgrades or downgrades should be rather rare in
practice. Therefore these binding transactions shouldn't hurt peformance.
The No Local option is implemented within the MQTT publishing connection
process: The message is not sent to the MQTT destination if the
destination queue name matches the current MQTT client ID and the
message was routed due to a subscription that has the No Local flag set.
This avoids unnecessary traffic on the MQTT queue.
The alternative would have been that the "receiving side" (same process)
filters the message out - which would have been more consistent in how
Retain As Published and Subscription Identifiers are implemented, but
would have caused unnecessary load on the MQTT queue.
2023-04-19 21:32:34 +08:00
|
|
|
,bind_exchange_to_exchange
|
Return matched binding keys faster
For MQTT 5.0 destination queues, the topic exchange does not only have
to return the destination queue names, but also the matched binding
keys.
This is needed to implement MQTT 5.0 subscription options No Local,
Retain As Published and Subscription Identifiers.
Prior to this commit, as the trie was walked down, we remembered the
edges being walked and assembled the final binding key with
list_to_binary/1.
list_to_binary/1 is very expensive with long lists (long topic names),
even in OTP 26.
The CPU flame graph showed ~3% of CPU usage was spent only in
list_to_binary/1.
Unfortunately and unnecessarily, the current topic exchange
implementation stores topic levels as lists.
It would be better to store topic levels as binaries:
split_topic_key/1 should ideally use binary:split/3 similar as follows:
```
1> P = binary:compile_pattern(<<".">>).
{bm,#Ref<0.1273071188.1488322568.63736>}
2> Bin = <<"aaa.bbb..ccc">>.
<<"aaa.bbb..ccc">>
3> binary:split(Bin, P, [global]).
[<<"aaa">>,<<"bbb">>,<<>>,<<"ccc">>]
```
The compiled pattern could be placed into persistent term.
This commit decided to avoid migrating Mnesia tables to use binaries
instead of lists. Mnesia migrations are non-trivial, especially with the
current feature flag subsystem.
Furthermore the Mnesia topic tables are already getting migrated to
their Khepri counterparts in 3.13.
Adding additional migration only for Mnesia does not make sense.
So, instead of assembling the binding key as we walk down the trie and
then calling list_to_binary/1 in the leaf, it
would be better to just fetch the binding key from the database in the leaf.
As we reach the leaf of the trie, we know both source and destination.
Unfortunately, we cannot fetch the binding key efficiently with the
current rabbit_route (sorted by source exchange) and
rabbit_reverse_route (sorted by destination) tables as the key is in
the middle between source and destination.
If there are a huge number of bindings for a given sourc exchange (very
realistic in MQTT use cases) or a large number of bindings for a given
destination (also realistic), it would require scanning these large
number of bindings.
Therefore this commit takes the simplest possible solution:
The solution leverages the fact that binding arguments are already part of
table rabbit_topic_trie_binding.
So, if we simply include the binding key into the binding arguments, we
can fetch and return it efficiently in the topic exchange
implementation.
The following patch omitting fetching the empty list binding argument
(the default) makes routing slower because function
`analyze_pattern.constprop.0` requires significantly more (~2.5%) CPU time
```
@@ -273,7 +273,11 @@ trie_bindings(X, Node) ->
node_id = Node,
destination = '$1',
arguments = '$2'}},
- mnesia:select(?MNESIA_BINDING_TABLE, [{MatchHead, [], [{{'$1', '$2'}}]}]).
+ mnesia:select(
+ ?MNESIA_BINDING_TABLE,
+ [{MatchHead, [{'andalso', {'is_list', '$2'}, {'=/=', '$2', []}}], [{{'$1', '$2'}}]},
+ {MatchHead, [], ['$1']}
+ ]).
```
Hence, this commit always fetches the binding arguments.
All MQTT 5.0 destination queues will create a binding that
contains the binding key in the binding arguments.
Not only does this solution avoid expensive list_to_binay/1 calls, but
it also means that Erlang app rabbit (specifically the topic exchange
implementation) does not need to be aware of MQTT anymore:
It just returns the binding key when the binding args tell to do so.
In future, once the Khepri migration completed, we should be able to
relatively simply remove the binding key from the binding arguments
again to free up some storage space.
Note that one of the advantages of a trie data structue is its space
efficiency that you don't have to store the same prefixes multiple
times.
However, for RabbitMQ the binding key is already stored at least N times
in various routing tables, so storing it a few times more via the
binding arguments should be acceptable.
The speed improvements are favoured over a few more MBs ETS usage.
2023-06-09 18:52:34 +08:00
|
|
|
,bind_exchange_to_exchange_single_message
|
2025-06-01 22:43:39 +08:00
|
|
|
,notify_consumer_classic_queue_deleted
|
|
|
|
,notify_consumer_quorum_queue_deleted
|
|
|
|
,notify_consumer_qos0_queue_deleted
|
2023-01-03 01:02:43 +08:00
|
|
|
].
|
|
|
|
|
2023-02-22 02:24:15 +08:00
|
|
|
cluster_size_3_tests() ->
|
2022-10-28 01:07:43 +08:00
|
|
|
[
|
Message Containers (#5077)
This PR implements an approach for a "protocol (data format) agnostic core" where the format of the message isn't converted at point of reception.
Currently all non AMQP 0.9.1 originating messages are converted into a AMQP 0.9.1 flavoured basic_message record before sent to a queue. If the messages are then consumed by the originating protocol they are converted back from AMQP 0.9.1. For some protocols such as MQTT 3.1 this isn't too expensive as MQTT is mostly a fairly easily mapped subset of AMQP 0.9.1 but for others such as AMQP 1.0 the conversions are awkward and in some cases lossy even if consuming from the originating protocol.
This PR instead wraps all incoming messages in their originating form into a generic, extensible message container type (mc). The container module exposes an API to get common message details such as size and various properties (ttl, priority etc) directly from the source data type. Each protocol needs to implement the mc behaviour such that when a message originating form one protocol is consumed by another protocol we convert it to the target protocol at that point.
The message container also contains annotations, dead letter records and other meta data we need to record during the lifetime of a message. The original protocol message is never modified unless it is consumed.
This includes conversion modules to and from amqp, amqpl (AMQP 0.9.1) and mqtt.
COMMIT HISTORY:
* Refactor away from using the delivery{} record
In many places including exchange types. This should make it
easier to move towards using a message container type instead of
basic_message.
Add mc module and move direct replies outside of exchange
Lots of changes incl classic queues
Implement stream support incl amqp conversions
simplify mc state record
move mc.erl
mc dlx stuff
recent history exchange
Make tracking work
But doesn't take a protocol agnostic approach as we just convert
everything into AMQP legacy and back. Might be good enough for now.
Tracing as a whole may want a bit of a re-vamp at some point.
tidy
make quorum queue peek work by legacy conversion
dead lettering fixes
dead lettering fixes
CMQ fixes
rabbit_trace type fixes
fixes
fix
Fix classic queue props
test assertion fix
feature flag and backwards compat
Enable message_container feature flag in some SUITEs
Dialyzer fixes
fixes
fix
test fixes
Various
Manually update a gazelle generated file
until a gazelle enhancement can be made
https://github.com/rabbitmq/rules_erlang/issues/185
Add message_containers_SUITE to bazel
and regen bazel files with gazelle from rules_erlang@main
Simplify essential proprty access
Such as durable, ttl and priority by extracting them into annotations
at message container init time.
Move type
to remove dependenc on amqp10 stuff in mc.erl
mostly because I don't know how to make bazel do the right thing
add more stuff
Refine routing header stuff
wip
Cosmetics
Do not use "maybe" as type name as "maybe" is a keyword since OTP 25
which makes Erlang LS complain.
* Dedup death queue names
* Fix function clause crashes
Fix failing tests in the MQTT shared_SUITE:
A classic queue message ID can be undefined as set in
https://github.com/rabbitmq/rabbitmq-server/blob/fbe79ff47b4edbc0fd95457e623d6593161ad198/deps/rabbit/src/rabbit_classic_queue_index_v2.erl#L1048
Fix failing tests in the MQTT shared_SUITE-mixed:
When feature flag message_containers is disabled, the
message is not an #mc{} record, but a #basic_message{} record.
* Fix is_utf8_no_null crash
Prior to this commit, the function crashed if invalid UTF-8 was
provided, e.g.:
```
1> rabbit_misc:is_valid_shortstr(<<"😇"/utf16>>).
** exception error: no function clause matching rabbit_misc:is_utf8_no_null(<<216,61,222,7>>) (rabbit_misc.erl, line 1481)
```
* Implement mqtt mc behaviour
For now via amqp translation.
This is still work in progress, but the following SUITEs pass:
```
make -C deps/rabbitmq_mqtt ct-shared t=[mqtt,v5,cluster_size_1] FULL=1
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_1] FULL=1
```
* Shorten mc file names
Module name length matters because for each persistent message the #mc{}
record is persisted to disk.
```
1> iolist_size(term_to_iovec({mc, rabbit_mc_amqp_legacy})).
30
2> iolist_size(term_to_iovec({mc, mc_amqpl})).
17
```
This commit renames the mc modules:
```
ag -l rabbit_mc_amqp_legacy | xargs sed -i 's/rabbit_mc_amqp_legacy/mc_amqpl/g'
ag -l rabbit_mc_amqp | xargs sed -i 's/rabbit_mc_amqp/mc_amqp/g'
ag -l rabbit_mqtt_mc | xargs sed -i 's/rabbit_mqtt_mc/mc_mqtt/g'
```
* mc: make deaths an annotation + fixes
* Fix mc_mqtt protocol_state callback
* Fix test will_delay_node_restart
```
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_3]:will_delay_node_restart FULL=1
```
* Bazel run gazelle
* mix format rabbitmqctl.ex
* Ensure ttl annotation is refelected in amqp legacy protocol state
* Fix id access in message store
* Fix rabbit_message_interceptor_SUITE
* dializer fixes
* Fix rabbit:rabbit_message_interceptor_SUITE-mixed
set_annotation/3 should not result in duplicate keys
* Fix MQTT shared_SUITE-mixed
Up to 3.12 non-MQTT publishes were always QoS 1 regardless of delivery_mode.
https://github.com/rabbitmq/rabbitmq-server/blob/75a953ce286a10aca910c098805a4f545989af38/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl#L2075-L2076
From now on, non-MQTT publishes are QoS 1 if durable.
This makes more sense.
The MQTT plugin must send a #basic_message{} to an old node that does
not understand message containers.
* Field content of 'v1_0.data' can be binary
Fix
```
bazel test //deps/rabbitmq_mqtt:shared_SUITE-mixed \
--test_env FOCUS="-group [mqtt,v4,cluster_size_1] -case trace" \
-t- --test_sharding_strategy=disabled
```
* Remove route/2 and implement route/3 for all exchange types.
This removes the route/2 callback from rabbit_exchange_type and
makes route/3 mandatory instead. This is a breaking change and
will require all implementations of exchange types to update their
code, however this is necessary anyway for them to correctly handle
the mc type.
stream filtering fixes
* Translate directly from MQTT to AMQP 0.9.1
* handle undecoded properties in mc_compat
amqpl: put clause in right order
recover death deatails from amqp data
* Replace callback init_amqp with convert_from
* Fix return value of lists:keyfind/3
* Translate directly from AMQP 0.9.1 to MQTT
* Fix MQTT payload size
MQTT payload can be a list when converted from AMQP 0.9.1 for example
First conversions tests
Plus some other conversion related fixes.
bazel
bazel
translate amqp 1.0 null to undefined
mc: property/2 and correlation_id/message_id return type tagged values.
To ensure we can support a variety of types better.
The type type tags are AMQP 1.0 flavoured.
fix death recovery
mc_mqtt: impl new api
Add callbacks to allow protocols to compact data before storage
And make readable if needing to query things repeatedly.
bazel fix
* more decoding
* tracking mixed versions compat
* mc: flip default of `durable` annotation to save some data.
Assuming most messages are durable and that in memory messages suffer less
from persistence overhead it makes sense for a non existent `durable`
annotation to mean durable=true.
* mc conversion tests and tidy up
* mc make x_header unstrict again
* amqpl: death record fixes
* bazel
* amqp -> amqpl conversion test
* Fix crash in mc_amqp:size/1
Body can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0/ ct-system t=java
```
on branch native-amqp.
* Fix crash in lists:flatten/1
Data can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp.
* Fix crash in rabbit_writer
Running test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp resulted in the following crash:
```
crasher:
initial call: rabbit_writer:enter_mainloop/2
pid: <0.711.0>
registered_name: []
exception error: bad argument
in function size/1
called as size([<<0>>,<<"Sw">>,[<<160,2>>,<<"hi">>]])
*** argument 1: not tuple or binary
in call from rabbit_binary_generator:build_content_frames/7 (rabbit_binary_generator.erl, line 89)
in call from rabbit_binary_generator:build_simple_content_frames/4 (rabbit_binary_generator.erl, line 61)
in call from rabbit_writer:assemble_frames/5 (rabbit_writer.erl, line 334)
in call from rabbit_writer:internal_send_command_async/3 (rabbit_writer.erl, line 365)
in call from rabbit_writer:handle_message/2 (rabbit_writer.erl, line 265)
in call from rabbit_writer:handle_message/3 (rabbit_writer.erl, line 232)
in call from rabbit_writer:mainloop1/2 (rabbit_writer.erl, line 223)
```
because #content.payload_fragments_rev is currently supposed to
be a flat list of binaries instead of being an iolist.
This commit fixes this crash inefficiently by calling
iolist_to_binary/1. A better solution would be to allow AMQP legacy's #content.payload_fragments_rev
to be an iolist.
* Add accidentally deleted line back
* mc: optimise mc_amqp internal format
By removint the outer records for message and delivery annotations
as well as application properties and footers.
* mc: optimis mc_amqp map_add by using upsert
* mc: refactoring and bug fixes
* mc_SUITE routingheader assertions
* mc remove serialize/1 callback as only used by amqp
* mc_amqp: avoid returning a nested list from protocol_state
* test and bug fix
* move infer_type to mc_util
* mc fixes and additiona assertions
* Support headers exchange routing for MQTT messages
When a headers exchange is bound to the MQTT topic exchange, routing
will be performend based on both MQTT topic (by the topic exchange) and
MQTT User Property (by the headers exchange).
This combines the best worlds of both MQTT 5.0 and AMQP 0.9.1 and
enables powerful routing topologies.
When the User Property contains the same name multiple times, only the
last name (and value) will be considered by the headers exchange.
* Fix crash when sending from stream to amqpl
When publishing a message via the stream protocol and consuming it via
AMQP 0.9.1, the following crash occurred prior to this commit:
```
crasher:
initial call: rabbit_channel:init/1
pid: <0.818.0>
registered_name: []
exception exit: {{badmatch,undefined},
[{rabbit_channel,handle_deliver0,4,
[{file,"rabbit_channel.erl"},
{line,2728}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1594}]},
{rabbit_channel,handle_cast,2,
[{file,"rabbit_channel.erl"},
{line,728}]},
{gen_server2,handle_msg,2,
[{file,"gen_server2.erl"},{line,1056}]},
{proc_lib,wake_up,3,
[{file,"proc_lib.erl"},{line,251}]}]}
```
This commit first gives `mc:init/3` the chance to set exchange and
routing_keys annotations.
If not set, `rabbit_stream_queue` will set these annotations assuming
the message was originally published via the stream protocol.
* Support consistent hash exchange routing for MQTT 5.0
When a consistent hash exchange is bound to the MQTT topic exchange,
MQTT 5.0 messages can be routed to queues consistently based on the
Correlation-Data in the PUBLISH packet.
* Convert MQTT 5.0 User Property
* to AMQP 0.9.1 headers
* from AMQP 0.9.1 headers
* to AMQP 1.0 application properties and message annotations
* from AMQP 1.0 application properties and message annotations
* Make use of Annotations in mc_mqtt:protocol_state/2
mc_mqtt:protocol_state/2 includes Annotations as parameter.
It's cleaner to make use of these Annotations when computing the
protocol state instead of relying on the caller (rabbitmq_mqtt_processor)
to compute the protocol state.
* Enforce AMQP 0.9.1 field name length limit
The AMQP 0.9.1 spec prohibits field names longer than 128 characters.
Therefore, when converting AMQP 1.0 message annotations, application
properties or MQTT 5.0 User Property to AMQP 0.9.1 headers, drop any
names longer than 128 characters.
* Fix type specs
Apply feedback from Michael Davis
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* Add mc_mqtt unit test suite
Implement mc_mqtt:x_header/2
* Translate indicator that payload is UTF-8 encoded
when converting between MQTT 5.0 and AMQP 1.0
* Translate single amqp-value section from AMQP 1.0 to MQTT
Convert to a text representation, if possible, and indicate to MQTT
client that the payload is UTF-8 encoded. This way, the MQTT client will
be able to parse the payload.
If conversion to text representation is not possible, encode the payload
using the AMQP 1.0 type system and indiate the encoding via Content-Type
message/vnd.rabbitmq.amqp.
This Content-Type is not registered.
Type "message" makes sense since it's a message.
Vendor tree "vnd.rabbitmq.amqp" makes sense since merely subtype "amqp" is not
registered.
* Fix payload conversion
* Translate Response Topic between MQTT and AMQP
Translate MQTT 5.0 Response Topic to AMQP 1.0 reply-to address and vice
versa.
The Response Topic must be a UTF-8 encoded string.
This commit re-uses the already defined RabbitMQ target addresses:
```
"/topic/" RK Publish to amq.topic with routing key RK
"/exchange/" X "/" RK Publish to exchange X with routing key RK
```
By default, the MQTT topic exchange is configure dto be amq.topic using
the 1st target address.
When an operator modifies the mqtt.exchange, the 2nd target address is
used.
* Apply PR feedback
and fix formatting
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* tidy up
* Add MQTT message_containers test
* consistent hash exchange: avoid amqp legacy conversion
When hashing on a header value.
* Avoid converting to amqp legacy when using exchange federation
* Fix test flake
* test and dialyzer fixes
* dialyzer fix
* Add MQTT protocol interoperability tests
Test receiving from and sending to MQTT 5.0 and
* AMQP 0.9.1
* AMQP 1.0
* STOMP
* Streams
* Regenerate portions of deps/rabbit/app.bzl with gazelle
I'm not exactly sure how this happened, but gazell seems to have been
run with an older version of the rules_erlang gazelle extension at
some point. This caused generation of a structure that is no longer
used. This commit updates the structure to the current pattern.
* mc: refactoring
* mc_amqpl: handle delivery annotations
Just in case they are included.
Also use iolist_to_iovec to create flat list of binaries when
converting from amqp with amqp encoded payload.
---------
Co-authored-by: David Ansari <david.ansari@gmx.de>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
Co-authored-by: Rin Kuryloski <kuryloskip@vmware.com>
2023-08-31 18:27:13 +08:00
|
|
|
pubsub,
|
2023-02-22 02:24:15 +08:00
|
|
|
queue_down_qos1,
|
|
|
|
consuming_classic_queue_down,
|
2024-12-04 00:34:32 +08:00
|
|
|
flow_classic_queue,
|
2023-02-22 02:24:15 +08:00
|
|
|
flow_quorum_queue,
|
|
|
|
flow_stream,
|
|
|
|
rabbit_mqtt_qos0_queue,
|
Overwrite rabbit_mqtt_qos0_queue record from crashed node
When a node is shut down cleanly, the rabbit_mqtt_qos0_queue record is
removed from Mnesia.
When a node crashes and subsequently reboots the new node incarnation
removes the old rabbit_mqtt_qos0_queue record from Mnesia (via
rabbit_mqtt_qos0_queue:recover/2)
However, when a node crashes, the rabbit_mqtt_qos0_queue will be removed
from Mnesia table rabbit_queue, but will still be present in table
rabbit_durable_queue on the other live nodes.
Prior to this commit, when the same MQTT client (i.e. same MQTT client
ID) re-connects from the crashed node to another live node and
re-subscribes, the following error occurred:
```
[info] <0.43155.0> Accepted MQTT connection 10.105.0.18:60508 -> 10.105.0.10:1883 for client ID nodered_24e214feb018a232
[debug] <0.43155.0> Received a SUBSCRIBE for topic(s) [{mqtt_topic,
[debug] <0.43155.0> <<"as923/gateway/+/command/#">>,0}]
[error] <0.43155.0> Failed to declare queue 'mqtt-subscription-nodered_24e214feb018a232qos0' in vhost '/': {absent,
[error] <0.43155.0> {amqqueue,
[error] <0.43155.0> {resource,
[error] <0.43155.0> <<"/">>,
[error] <0.43155.0> queue,
[error] <0.43155.0> <<"mqtt-subscription-nodered_24e214feb018a232qos0">>},
[error] <0.43155.0> true,
[error] <0.43155.0> false,
[error] <0.43155.0> <15486.32690.0>,
[error] <0.43155.0> [],
[error] <0.43155.0> <15486.32690.0>,
[error] <0.43155.0> [],
[error] <0.43155.0> [],
[error] <0.43155.0> [],
[error] <0.43155.0> [{vhost,
[error] <0.43155.0> <<"/">>},
[error] <0.43155.0> {name,
[error] <0.43155.0> <<"ha-all-mqtt">>},
[error] <0.43155.0> {pattern,
[error] <0.43155.0> <<"^mqtt-">>},
[error] <0.43155.0> {'apply-to',
[error] <0.43155.0> <<"all">>},
[error] <0.43155.0> {definition,
[error] <0.43155.0> [{<<"ha-mode">>,
[error] <0.43155.0> <<"all">>}]},
[error] <0.43155.0> {priority,
[error] <0.43155.0> 0}],
[error] <0.43155.0> undefined,
[error] <0.43155.0> [],
[error] <0.43155.0> undefined,
[error] <0.43155.0> live,
[error] <0.43155.0> 0,
[error] <0.43155.0> [],
[error] <0.43155.0> <<"/">>,
[error] <0.43155.0> #{user =>
[error] <0.43155.0> <<"iottester">>},
[error] <0.43155.0> rabbit_mqtt_qos0_queue,
[error] <0.43155.0> #{}},
[error] <0.43155.0> nodedown}
[error] <0.43155.0> MQTT protocol error on connection 10.105.0.18:60508 -> 10.105.0.10:1883: subscribe_error
```
This commit fixes this error allowing an MQTT client that connects with CleanSession=true and
subscribes with QoS 0 to re-connect and re-subscribe to another live
node if the original Rabbit node crashes.
Reported in https://groups.google.com/g/rabbitmq-users/c/pxgy0QiwilM/m/LkJQ-3DyBgAJ
2023-12-22 00:15:27 +08:00
|
|
|
rabbit_mqtt_qos0_queue_kill_node,
|
2023-02-22 02:24:15 +08:00
|
|
|
cli_list_queues,
|
|
|
|
delete_create_queue,
|
Support Will Delay Interval
Previously, the Will Message could be kept in memory in the MQTT
connection process state. Upon termination, the Will Message is sent.
The new MQTT 5.0 feature Will Delay Interval requires storing the Will
Message outside of the MQTT connection process state.
The Will Message should not be stored node local because the client
could reconnect to a different node.
Storing the Will Message in Mnesia is not an option because we want to
get rid of Mnesia. Storing the Will Message in a Ra cluster or in Khepri
is only an option if the Will Payload is small as there is currently no
way in Ra to **efficiently** snapshot large binary data (Note that these
Will Messages are not consumed in a FIFO style workload like messages in
quorum queues. A Will Message needs to be stored for as long as the
Session lasts - up to 1 day by default, but could also be much longer if
RabbitMQ is configured with a higher maximum session expiry interval.)
Usually Will Payloads are small: They are just a notification that its
MQTT session ended abnormally. However, we don't know how users leverage
the Will Message feature. The MQTT protocol allows for large Will Payloads.
Therefore, the solution implemented in this commit - which should work
good enough - is storing the Will Message in a queue.
Each MQTT session which has a Session Expiry Interval and Will Delay
Interval of > 0 seconds will create a queue if the current Network
Connection ends where it stores its Will Message. The Will Message has a
message TTL set (corresponds to the Will Delay Interval) and the queue
has a queue TTL set (corresponds to the Session Expiry Interval).
If the client does not reconnect within the Will Delay Interval, the
message is dead lettered to the configured MQTT topic exchange
(amq.topic by default).
The Will Delay Interval can be set by both publishers and subscribers.
Therefore, the Will Message is the 1st session state that RabbitMQ keeps
for publish-only MQTT clients.
One current limitation of this commit is that a Will Message that is
delayed (i.e. Will Delay Interval is set) and retained (i.e. Will Retain
flag set) will not be retained.
One solution to retain delayed Will Messages is that the retainer
process consumes from a queue and the queue binds to the topic exchange
with a topic starting with `$`, for example `$retain/#`.
The AMQP 0.9.1 Will Message that is dead lettered could then be added a
CC header such that it won't not only be published with the Will Topic,
but also with `$retain` topic. For example, if the Will Topic is `a/b`,
it will publish with routing key `a/b` and CC header `$retain/a/b`.
The reason this is not implemented in this commit is that to keep the
currently broken retained message store behaviour, we would require
creating at least one queue per node and publishing only to that local
queue. In future, once we have a replicated retained message store based
on a Stream for example, we could just publish all retained messages to
the `$retain` topic and thefore into the Stream.
So, for now, we list "retained and delayed Will Messages" as a limitation
that they actually won't be retained.
2023-05-18 23:36:25 +08:00
|
|
|
session_reconnect,
|
2023-10-27 15:15:14 +08:00
|
|
|
session_takeover,
|
2023-10-27 21:49:05 +08:00
|
|
|
duplicate_client_id,
|
Allow to use Khepri database to store metadata instead of Mnesia
[Why]
Mnesia is a very powerful and convenient tool for Erlang applications:
it is a persistent disc-based database, it handles replication accross
multiple Erlang nodes and it is available out-of-the-box from the
Erlang/OTP distribution. RabbitMQ relies on Mnesia to manage all its
metadata:
* virtual hosts' properties
* intenal users
* queue, exchange and binding declarations (not queues data)
* runtime parameters and policies
* ...
Unfortunately Mnesia makes it difficult to handle network partition and,
as a consequence, the merge conflicts between Erlang nodes once the
network partition is resolved. RabbitMQ provides several partition
handling strategies but they are not bullet-proof. Users still hit
situations where it is a pain to repair a cluster following a network
partition.
[How]
@kjnilsson created Ra [1], a Raft consensus library that RabbitMQ
already uses successfully to implement quorum queues and streams for
instance. Those queues do not suffer from network partitions.
We created Khepri [2], a new persistent and replicated database engine
based on Ra and we want to use it in place of Mnesia in RabbitMQ to
solve the problems with network partitions.
This patch integrates Khepri as an experimental feature. When enabled,
RabbitMQ will store all its metadata in Khepri instead of Mnesia.
This change comes with behavior changes. While Khepri remains disabled,
you should see no changes to the behavior of RabbitMQ. If there are
changes, it is a bug. After Khepri is enabled, there are significant
changes of behavior that you should be aware of.
Because it is based on the Raft consensus algorithm, when there is a
network partition, only the cluster members that are in the partition
with at least `(Number of nodes in the cluster ÷ 2) + 1` number of nodes
can "make progress". In other words, only those nodes may write to the
Khepri database and read from the database and expect a consistent
result.
For instance in a cluster of 5 RabbitMQ nodes:
* If there are two partitions, one with 3 nodes, one with 2 nodes, only
the group of 3 nodes will be able to write to the database.
* If there are three partitions, two with 2 nodes, one with 1 node, none
of the group can write to the database.
Because the Khepri database will be used for all kind of metadata, it
means that RabbitMQ nodes that can't write to the database will be
unable to perform some operations. A list of operations and what to
expect is documented in the associated pull request and the RabbitMQ
website.
This requirement from Raft also affects the startup of RabbitMQ nodes in
a cluster. Indeed, at least a quorum number of nodes must be started at
once to allow nodes to become ready.
To enable Khepri, you need to enable the `khepri_db` feature flag:
rabbitmqctl enable_feature_flag khepri_db
When the `khepri_db` feature flag is enabled, the migration code
performs the following two tasks:
1. It synchronizes the Khepri cluster membership from the Mnesia
cluster. It uses `mnesia_to_khepri:sync_cluster_membership/1` from
the `khepri_mnesia_migration` application [3].
2. It copies data from relevant Mnesia tables to Khepri, doing some
conversion if necessary on the way. Again, it uses
`mnesia_to_khepri:copy_tables/4` from `khepri_mnesia_migration` to do
it.
This can be performed on a running standalone RabbitMQ node or cluster.
Data will be migrated from Mnesia to Khepri without any service
interruption. Note that during the migration, the performance may
decrease and the memory footprint may go up.
Because this feature flag is considered experimental, it is not enabled
by default even on a brand new RabbitMQ deployment.
More about the implementation details below:
In the past months, all accesses to Mnesia were isolated in a collection
of `rabbit_db*` modules. This is where the integration of Khepri mostly
takes place: we use a function called `rabbit_khepri:handle_fallback/1`
which selects the database and perform the query or the transaction.
Here is an example from `rabbit_db_vhost`:
* Up until RabbitMQ 3.12.x:
get(VHostName) when is_binary(VHostName) ->
get_in_mnesia(VHostName).
* Starting with RabbitMQ 3.13.0:
get(VHostName) when is_binary(VHostName) ->
rabbit_khepri:handle_fallback(
#{mnesia => fun() -> get_in_mnesia(VHostName) end,
khepri => fun() -> get_in_khepri(VHostName) end}).
This `rabbit_khepri:handle_fallback/1` function relies on two things:
1. the fact that the `khepri_db` feature flag is enabled, in which case
it always executes the Khepri-based variant.
4. the ability or not to read and write to Mnesia tables otherwise.
Before the feature flag is enabled, or during the migration, the
function will try to execute the Mnesia-based variant. If it succeeds,
then it returns the result. If it fails because one or more Mnesia
tables can't be used, it restarts from scratch: it means the feature
flag is being enabled and depending on the outcome, either the
Mnesia-based variant will succeed (the feature flag couldn't be enabled)
or the feature flag will be marked as enabled and it will call the
Khepri-based variant. The meat of this function really lives in the
`khepri_mnesia_migration` application [3] and
`rabbit_khepri:handle_fallback/1` is a wrapper on top of it that knows
about the feature flag.
However, some calls to the database do not depend on the existence of
Mnesia tables, such as functions where we need to learn about the
members of a cluster. For those, we can't rely on exceptions from
Mnesia. Therefore, we just look at the state of the feature flag to
determine which database to use. There are two situations though:
* Sometimes, we need the feature flag state query to block because the
function interested in it can't return a valid answer during the
migration. Here is an example:
case rabbit_khepri:is_enabled(RemoteNode) of
true -> can_join_using_khepri(RemoteNode);
false -> can_join_using_mnesia(RemoteNode)
end
* Sometimes, we need the feature flag state query to NOT block (for
instance because it would cause a deadlock). Here is an example:
case rabbit_khepri:get_feature_state() of
enabled -> members_using_khepri();
_ -> members_using_mnesia()
end
Direct accesses to Mnesia still exists. They are limited to code that is
specific to Mnesia such as classic queue mirroring or network partitions
handling strategies.
Now, to discover the Mnesia tables to migrate and how to migrate them,
we use an Erlang module attribute called
`rabbit_mnesia_tables_to_khepri_db` which indicates a list of Mnesia
tables and an associated converter module. Here is an example in the
`rabbitmq_recent_history_exchange` plugin:
-rabbit_mnesia_tables_to_khepri_db(
[{?RH_TABLE, rabbit_db_rh_exchange_m2k_converter}]).
The converter module — `rabbit_db_rh_exchange_m2k_converter` in this
example — is is fact a "sub" converter module called but
`rabbit_db_m2k_converter`. See the documentation of a `mnesia_to_khepri`
converter module to learn more about these modules.
[1] https://github.com/rabbitmq/ra
[2] https://github.com/rabbitmq/khepri
[3] https://github.com/rabbitmq/khepri_mnesia_migration
See #7206.
Co-authored-by: Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
Co-authored-by: Diana Parra Corbacho <dparracorbac@vmware.com>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
2023-01-05 20:57:50 +08:00
|
|
|
publish_to_all_queue_types_qos0,
|
2024-06-20 02:02:33 +08:00
|
|
|
publish_to_all_queue_types_qos1,
|
|
|
|
maintenance
|
Allow to use Khepri database to store metadata instead of Mnesia
[Why]
Mnesia is a very powerful and convenient tool for Erlang applications:
it is a persistent disc-based database, it handles replication accross
multiple Erlang nodes and it is available out-of-the-box from the
Erlang/OTP distribution. RabbitMQ relies on Mnesia to manage all its
metadata:
* virtual hosts' properties
* intenal users
* queue, exchange and binding declarations (not queues data)
* runtime parameters and policies
* ...
Unfortunately Mnesia makes it difficult to handle network partition and,
as a consequence, the merge conflicts between Erlang nodes once the
network partition is resolved. RabbitMQ provides several partition
handling strategies but they are not bullet-proof. Users still hit
situations where it is a pain to repair a cluster following a network
partition.
[How]
@kjnilsson created Ra [1], a Raft consensus library that RabbitMQ
already uses successfully to implement quorum queues and streams for
instance. Those queues do not suffer from network partitions.
We created Khepri [2], a new persistent and replicated database engine
based on Ra and we want to use it in place of Mnesia in RabbitMQ to
solve the problems with network partitions.
This patch integrates Khepri as an experimental feature. When enabled,
RabbitMQ will store all its metadata in Khepri instead of Mnesia.
This change comes with behavior changes. While Khepri remains disabled,
you should see no changes to the behavior of RabbitMQ. If there are
changes, it is a bug. After Khepri is enabled, there are significant
changes of behavior that you should be aware of.
Because it is based on the Raft consensus algorithm, when there is a
network partition, only the cluster members that are in the partition
with at least `(Number of nodes in the cluster ÷ 2) + 1` number of nodes
can "make progress". In other words, only those nodes may write to the
Khepri database and read from the database and expect a consistent
result.
For instance in a cluster of 5 RabbitMQ nodes:
* If there are two partitions, one with 3 nodes, one with 2 nodes, only
the group of 3 nodes will be able to write to the database.
* If there are three partitions, two with 2 nodes, one with 1 node, none
of the group can write to the database.
Because the Khepri database will be used for all kind of metadata, it
means that RabbitMQ nodes that can't write to the database will be
unable to perform some operations. A list of operations and what to
expect is documented in the associated pull request and the RabbitMQ
website.
This requirement from Raft also affects the startup of RabbitMQ nodes in
a cluster. Indeed, at least a quorum number of nodes must be started at
once to allow nodes to become ready.
To enable Khepri, you need to enable the `khepri_db` feature flag:
rabbitmqctl enable_feature_flag khepri_db
When the `khepri_db` feature flag is enabled, the migration code
performs the following two tasks:
1. It synchronizes the Khepri cluster membership from the Mnesia
cluster. It uses `mnesia_to_khepri:sync_cluster_membership/1` from
the `khepri_mnesia_migration` application [3].
2. It copies data from relevant Mnesia tables to Khepri, doing some
conversion if necessary on the way. Again, it uses
`mnesia_to_khepri:copy_tables/4` from `khepri_mnesia_migration` to do
it.
This can be performed on a running standalone RabbitMQ node or cluster.
Data will be migrated from Mnesia to Khepri without any service
interruption. Note that during the migration, the performance may
decrease and the memory footprint may go up.
Because this feature flag is considered experimental, it is not enabled
by default even on a brand new RabbitMQ deployment.
More about the implementation details below:
In the past months, all accesses to Mnesia were isolated in a collection
of `rabbit_db*` modules. This is where the integration of Khepri mostly
takes place: we use a function called `rabbit_khepri:handle_fallback/1`
which selects the database and perform the query or the transaction.
Here is an example from `rabbit_db_vhost`:
* Up until RabbitMQ 3.12.x:
get(VHostName) when is_binary(VHostName) ->
get_in_mnesia(VHostName).
* Starting with RabbitMQ 3.13.0:
get(VHostName) when is_binary(VHostName) ->
rabbit_khepri:handle_fallback(
#{mnesia => fun() -> get_in_mnesia(VHostName) end,
khepri => fun() -> get_in_khepri(VHostName) end}).
This `rabbit_khepri:handle_fallback/1` function relies on two things:
1. the fact that the `khepri_db` feature flag is enabled, in which case
it always executes the Khepri-based variant.
4. the ability or not to read and write to Mnesia tables otherwise.
Before the feature flag is enabled, or during the migration, the
function will try to execute the Mnesia-based variant. If it succeeds,
then it returns the result. If it fails because one or more Mnesia
tables can't be used, it restarts from scratch: it means the feature
flag is being enabled and depending on the outcome, either the
Mnesia-based variant will succeed (the feature flag couldn't be enabled)
or the feature flag will be marked as enabled and it will call the
Khepri-based variant. The meat of this function really lives in the
`khepri_mnesia_migration` application [3] and
`rabbit_khepri:handle_fallback/1` is a wrapper on top of it that knows
about the feature flag.
However, some calls to the database do not depend on the existence of
Mnesia tables, such as functions where we need to learn about the
members of a cluster. For those, we can't rely on exceptions from
Mnesia. Therefore, we just look at the state of the feature flag to
determine which database to use. There are two situations though:
* Sometimes, we need the feature flag state query to block because the
function interested in it can't return a valid answer during the
migration. Here is an example:
case rabbit_khepri:is_enabled(RemoteNode) of
true -> can_join_using_khepri(RemoteNode);
false -> can_join_using_mnesia(RemoteNode)
end
* Sometimes, we need the feature flag state query to NOT block (for
instance because it would cause a deadlock). Here is an example:
case rabbit_khepri:get_feature_state() of
enabled -> members_using_khepri();
_ -> members_using_mnesia()
end
Direct accesses to Mnesia still exists. They are limited to code that is
specific to Mnesia such as classic queue mirroring or network partitions
handling strategies.
Now, to discover the Mnesia tables to migrate and how to migrate them,
we use an Erlang module attribute called
`rabbit_mnesia_tables_to_khepri_db` which indicates a list of Mnesia
tables and an associated converter module. Here is an example in the
`rabbitmq_recent_history_exchange` plugin:
-rabbit_mnesia_tables_to_khepri_db(
[{?RH_TABLE, rabbit_db_rh_exchange_m2k_converter}]).
The converter module — `rabbit_db_rh_exchange_m2k_converter` in this
example — is is fact a "sub" converter module called but
`rabbit_db_m2k_converter`. See the documentation of a `mnesia_to_khepri`
converter module to learn more about these modules.
[1] https://github.com/rabbitmq/ra
[2] https://github.com/rabbitmq/khepri
[3] https://github.com/rabbitmq/khepri_mnesia_migration
See #7206.
Co-authored-by: Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
Co-authored-by: Diana Parra Corbacho <dparracorbac@vmware.com>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
2023-01-05 20:57:50 +08:00
|
|
|
].
|
|
|
|
|
2022-10-28 01:07:43 +08:00
|
|
|
suite() ->
|
Bump test cluster creation timeouts
in MQTT tests.
For example in the ff_SUITE wee see in Buildbuddy sporadic
failures that the cluster cannot be created within 2 minutes:
```
*** CT Error Notification 2023-04-24 10:58:55.628 ***🔗
rabbit_ct_helpers:port_receive_loop failed on line 945
Reason: {timetrap_timeout,120000}
...
=== Ended at 2023-04-24 10:58:55
=== Location: [{rabbit_ct_helpers,port_receive_loop,945},
{rabbit_ct_helpers,exec,920},
{rabbit_ct_broker_helpers,cluster_nodes1,858},
{rabbit_ct_broker_helpers,cluster_nodes1,840},
{rabbit_ct_helpers,run_steps,141},
{ff_SUITE,init_per_group,last_expr},
{test_server,ts_tc,1782},
{test_server,run_test_case_eval1,1379},
{test_server,run_test_case_eval,1223}]
=== Reason: timetrap timeout
===
*** init_per_group failed.
Skipping all cases.
```
The default time limit for a test case is 30 minutes.
2023-04-24 20:38:29 +08:00
|
|
|
[{timetrap, {minutes, 10}}].
|
2022-10-28 01:07:43 +08:00
|
|
|
|
|
|
|
%% -------------------------------------------------------------------
|
|
|
|
%% Testsuite setup/teardown.
|
|
|
|
%% -------------------------------------------------------------------
|
|
|
|
|
|
|
|
init_per_suite(Config) ->
|
|
|
|
rabbit_ct_helpers:log_environment(),
|
2024-06-20 02:02:33 +08:00
|
|
|
Config1 = rabbit_ct_helpers:merge_app_env(
|
|
|
|
Config, {rabbit, [
|
|
|
|
{quorum_tick_interval, 1000},
|
rabbitmq_ct_broker_helpers: Use node 2 as the cluster seed node
[Why]
When running mixed-version tests, nodes 1/3/5/... are using the primary
umbrella, so usually the newest version. Nodes 2/4/6/... are using the
secondary umbrella, thus the old version.
When clustering, we used to use node 1 (running a new version) as the
seed node, meaning other nodes would join it.
This complicates things with feature flags because we have to make sure
that we start node 1 with new stable feature flags disabled to allow old
nodes to join.
This is also a problem with Khepri machine versions because the cluster
would start with the latest version, which old nodes might not have.
[How]
This patch changes the logic to use a node running the secondary
umbrella as the seed node instead. If there is no node running it, we
pick the first node as before.
V2: Revert part of "rabbitmq_ct_helpers: Fix how we set
`$RABBITMQ_FEATURE_FLAGS` in tests" (commit
57ed962ef69669cb72c5dd7be93cb7fbf3ca4c0c). These changes are no
longer needed with the new logic.
V3: The check that verifies that the correct metadata store is used has
a special case for nodes that use the secondary umbrella: if Khepri
is supposed to be used but it's not, the feature flag is enabled.
The reason is that the `v4.0.x` branch doesn't know about the `rel`
configuration of `forced_feature_flags_on_init`. The nodes will
have ignored thies parameter and booted with the stable feature
flags only.
Many testsuites are adapted to the new clustering order. If they
manage which node joins which node, either the order is changed in
the testcases, or nodes are started with only required feature
flags. For testsuites that rely on peer discovery where the order is
unknown, nodes are started with only required feature flags.
2025-01-20 18:24:11 +08:00
|
|
|
{stream_tick_interval, 1000},
|
2025-07-02 17:35:41 +08:00
|
|
|
{forced_feature_flags_on_init, [
|
|
|
|
delete_ra_cluster_mqtt_node,
|
|
|
|
mqtt_v5,
|
|
|
|
rabbit_mqtt_qos0_queue,
|
|
|
|
restart_streams,
|
|
|
|
stream_sac_coordinator_unblock_group,
|
|
|
|
stream_update_config_command,
|
|
|
|
stream_filtering,
|
|
|
|
message_containers,
|
|
|
|
quorum_queue_non_voters
|
|
|
|
]},
|
2025-06-05 21:02:58 +08:00
|
|
|
{start_rmq_with_plugins_disabled, true}
|
2024-06-20 02:02:33 +08:00
|
|
|
]}),
|
|
|
|
rabbit_ct_helpers:run_setup_steps(Config1).
|
2022-10-28 01:07:43 +08:00
|
|
|
|
|
|
|
end_per_suite(Config) ->
|
|
|
|
rabbit_ct_helpers:run_teardown_steps(Config).
|
|
|
|
|
2025-06-01 22:43:39 +08:00
|
|
|
init_per_group(mqtt, Config0) ->
|
|
|
|
rabbit_ct_helpers:set_config(Config0, {websocket, false});
|
2024-07-09 18:30:47 +08:00
|
|
|
init_per_group(Group, Config)
|
2023-02-22 02:24:15 +08:00
|
|
|
when Group =:= v3;
|
|
|
|
Group =:= v4;
|
|
|
|
Group =:= v5 ->
|
2024-07-09 18:30:47 +08:00
|
|
|
rabbit_ct_helpers:set_config(Config, {mqtt_version, Group});
|
2023-02-22 02:24:15 +08:00
|
|
|
|
2023-10-27 15:15:14 +08:00
|
|
|
init_per_group(Group, Config0) ->
|
2023-02-22 02:24:15 +08:00
|
|
|
Nodes = case Group of
|
|
|
|
cluster_size_1 -> 1;
|
2024-06-20 02:02:33 +08:00
|
|
|
cluster_size_3 -> 3
|
2023-02-22 02:24:15 +08:00
|
|
|
end,
|
2023-10-27 15:15:14 +08:00
|
|
|
Suffix = rabbit_ct_helpers:testcase_absname(Config0, "", "-"),
|
2024-06-20 02:02:33 +08:00
|
|
|
Config = rabbit_ct_helpers:set_config(
|
|
|
|
Config0,
|
|
|
|
[{rmq_nodes_count, Nodes},
|
|
|
|
{rmq_nodename_suffix, Suffix}]),
|
2025-06-05 21:02:58 +08:00
|
|
|
Config1 = rabbit_ct_helpers:run_steps(
|
|
|
|
Config,
|
|
|
|
rabbit_ct_broker_helpers:setup_steps() ++
|
|
|
|
rabbit_ct_client_helpers:setup_steps()),
|
|
|
|
util:enable_plugin(Config1, rabbitmq_mqtt),
|
|
|
|
Config1.
|
2023-01-28 02:25:57 +08:00
|
|
|
|
2023-01-03 01:02:43 +08:00
|
|
|
end_per_group(G, Config)
|
2023-02-22 02:24:15 +08:00
|
|
|
when G =:= cluster_size_1;
|
2024-06-20 02:02:33 +08:00
|
|
|
G =:= cluster_size_3 ->
|
2023-06-21 23:19:38 +08:00
|
|
|
rabbit_ct_helpers:run_steps(
|
2022-10-28 01:07:43 +08:00
|
|
|
Config,
|
|
|
|
rabbit_ct_client_helpers:teardown_steps() ++
|
2023-02-22 02:24:15 +08:00
|
|
|
rabbit_ct_broker_helpers:teardown_steps());
|
|
|
|
end_per_group(_, Config) ->
|
|
|
|
Config.
|
2022-10-28 01:07:43 +08:00
|
|
|
|
2023-01-03 01:02:43 +08:00
|
|
|
init_per_testcase(T, Config)
|
|
|
|
when T =:= management_plugin_connection;
|
2024-08-28 18:27:46 +08:00
|
|
|
T =:= management_plugin_enable ->
|
2024-07-09 18:30:47 +08:00
|
|
|
inets:start(),
|
2023-01-03 01:02:43 +08:00
|
|
|
init_per_testcase0(T, Config);
|
2025-02-03 22:59:34 +08:00
|
|
|
init_per_testcase(T, Config)
|
|
|
|
when T =:= clean_session_disconnect_client;
|
2025-06-04 16:49:19 +08:00
|
|
|
T =:= zero_session_expiry_interval_disconnect_client;
|
2025-02-03 22:59:34 +08:00
|
|
|
T =:= clean_session_node_restart;
|
2025-06-01 22:43:39 +08:00
|
|
|
T =:= clean_session_node_kill;
|
|
|
|
T =:= notify_consumer_qos0_queue_deleted ->
|
2025-02-03 22:59:34 +08:00
|
|
|
ok = rpc(Config, rabbit_registry, register, [queue, <<"qos0">>, rabbit_mqtt_qos0_queue]),
|
|
|
|
init_per_testcase0(T, Config);
|
2022-10-28 01:07:43 +08:00
|
|
|
init_per_testcase(Testcase, Config) ->
|
2023-01-03 01:02:43 +08:00
|
|
|
init_per_testcase0(Testcase, Config).
|
|
|
|
|
|
|
|
init_per_testcase0(Testcase, Config) ->
|
2022-10-28 01:07:43 +08:00
|
|
|
rabbit_ct_helpers:testcase_started(Config, Testcase).
|
|
|
|
|
2023-01-03 01:02:43 +08:00
|
|
|
end_per_testcase(T, Config)
|
|
|
|
when T =:= management_plugin_connection;
|
2024-08-28 18:27:46 +08:00
|
|
|
T =:= management_plugin_enable ->
|
2023-01-03 01:02:43 +08:00
|
|
|
ok = inets:stop(),
|
|
|
|
end_per_testcase0(T, Config);
|
2025-02-03 22:59:34 +08:00
|
|
|
end_per_testcase(T, Config)
|
|
|
|
when T =:= clean_session_disconnect_client;
|
2025-06-04 16:49:19 +08:00
|
|
|
T =:= zero_session_expiry_interval_disconnect_client;
|
2025-02-03 22:59:34 +08:00
|
|
|
T =:= clean_session_node_restart;
|
2025-06-01 22:43:39 +08:00
|
|
|
T =:= clean_session_node_kill;
|
|
|
|
T =:= notify_consumer_qos0_queue_deleted ->
|
2025-02-03 22:59:34 +08:00
|
|
|
ok = rpc(Config, rabbit_registry, unregister, [queue, <<"qos0">>]),
|
|
|
|
end_per_testcase0(T, Config);
|
2022-10-28 01:07:43 +08:00
|
|
|
end_per_testcase(Testcase, Config) ->
|
2023-01-03 01:02:43 +08:00
|
|
|
end_per_testcase0(Testcase, Config).
|
|
|
|
|
|
|
|
end_per_testcase0(Testcase, Config) ->
|
2022-12-20 02:03:18 +08:00
|
|
|
rabbit_ct_client_helpers:close_channels_and_connection(Config, 0),
|
2023-06-02 22:37:34 +08:00
|
|
|
%% Assert that every testcase cleaned up their MQTT sessions.
|
2025-03-15 00:20:36 +08:00
|
|
|
_ = rpc(Config, ?MODULE, delete_queues, []),
|
2023-06-02 22:37:34 +08:00
|
|
|
eventually(?_assertEqual([], rpc(Config, rabbit_amqqueue, list, []))),
|
2022-10-28 01:07:43 +08:00
|
|
|
rabbit_ct_helpers:testcase_finished(Config, Testcase).
|
|
|
|
|
2025-03-15 00:20:36 +08:00
|
|
|
delete_queues() ->
|
|
|
|
[catch rabbit_amqqueue:delete(Q, false, false, <<"dummy">>)
|
|
|
|
|| Q <- rabbit_amqqueue:list()].
|
|
|
|
|
2022-10-28 01:07:43 +08:00
|
|
|
%% -------------------------------------------------------------------
|
|
|
|
%% Testsuite cases
|
|
|
|
%% -------------------------------------------------------------------
|
|
|
|
|
2023-01-03 01:02:43 +08:00
|
|
|
disconnect(Config) ->
|
|
|
|
C = connect(?FUNCTION_NAME, Config),
|
|
|
|
eventually(?_assertEqual(1, length(all_connection_pids(Config)))),
|
|
|
|
process_flag(trap_exit, true),
|
|
|
|
ok = emqtt:disconnect(C),
|
|
|
|
await_exit(C, normal),
|
|
|
|
eventually(?_assertEqual([], all_connection_pids(Config))),
|
|
|
|
ok.
|
|
|
|
|
|
|
|
pubsub_shared_connection(Config) ->
|
|
|
|
C = connect(?FUNCTION_NAME, Config),
|
|
|
|
|
|
|
|
Topic = <<"/topic/test-mqtt">>,
|
|
|
|
{ok, _, [1]} = emqtt:subscribe(C, Topic, qos1),
|
|
|
|
|
|
|
|
Payload = <<"a\x00a">>,
|
|
|
|
?assertMatch({ok, #{packet_id := _,
|
|
|
|
reason_code := 0,
|
|
|
|
reason_code_name := success
|
|
|
|
}},
|
|
|
|
emqtt:publish(C, Topic, Payload, [{qos, 1}])),
|
|
|
|
ok = expect_publishes(C, Topic, [Payload]),
|
|
|
|
ok = emqtt:disconnect(C).
|
|
|
|
|
|
|
|
pubsub_separate_connections(Config) ->
|
|
|
|
Pub = connect(<<(atom_to_binary(?FUNCTION_NAME))/binary, "_publisher">>, Config),
|
|
|
|
Sub = connect(<<(atom_to_binary(?FUNCTION_NAME))/binary, "_subscriber">>, Config),
|
|
|
|
|
|
|
|
Topic = <<"/topic/test-mqtt">>,
|
|
|
|
{ok, _, [1]} = emqtt:subscribe(Sub, Topic, qos1),
|
|
|
|
|
|
|
|
Payload = <<"a\x00a">>,
|
|
|
|
?assertMatch({ok, #{packet_id := _,
|
|
|
|
reason_code := 0,
|
|
|
|
reason_code_name := success
|
|
|
|
}},
|
|
|
|
emqtt:publish(Pub, Topic, Payload, [{qos, 1}])),
|
|
|
|
ok = expect_publishes(Sub, Topic, [Payload]),
|
|
|
|
ok = emqtt:disconnect(Pub),
|
|
|
|
ok = emqtt:disconnect(Sub).
|
|
|
|
|
|
|
|
will_with_disconnect(Config) ->
|
|
|
|
LastWillTopic = <<"/topic/last-will">>,
|
|
|
|
LastWillMsg = <<"last will message">>,
|
2023-03-02 16:52:11 +08:00
|
|
|
Opts = [{will_topic, LastWillTopic},
|
|
|
|
{will_payload, LastWillMsg},
|
|
|
|
{will_qos, 1}],
|
|
|
|
Pub = connect(<<(atom_to_binary(?FUNCTION_NAME))/binary, "_publisher">>, Config, Opts),
|
2023-01-03 01:02:43 +08:00
|
|
|
Sub = connect(<<(atom_to_binary(?FUNCTION_NAME))/binary, "_subscriber">>, Config),
|
|
|
|
{ok, _, [1]} = emqtt:subscribe(Sub, LastWillTopic, qos1),
|
|
|
|
|
|
|
|
%% Client sends DISCONNECT packet. Therefore, will message should not be sent.
|
|
|
|
ok = emqtt:disconnect(Pub),
|
|
|
|
?assertEqual({publish_not_received, LastWillMsg},
|
|
|
|
expect_publishes(Sub, LastWillTopic, [LastWillMsg])),
|
|
|
|
|
|
|
|
ok = emqtt:disconnect(Sub).
|
|
|
|
|
|
|
|
will_without_disconnect(Config) ->
|
|
|
|
LastWillTopic = <<"/topic/last-will">>,
|
|
|
|
LastWillMsg = <<"last will message">>,
|
2023-03-02 16:52:11 +08:00
|
|
|
Opts = [{will_topic, LastWillTopic},
|
|
|
|
{will_payload, LastWillMsg},
|
|
|
|
{will_qos, 1}],
|
|
|
|
Pub = connect(<<(atom_to_binary(?FUNCTION_NAME))/binary, "_publisher">>, Config, Opts),
|
2023-01-03 01:02:43 +08:00
|
|
|
Sub = connect(<<(atom_to_binary(?FUNCTION_NAME))/binary, "_subscriber">>, Config),
|
|
|
|
{ok, _, [1]} = emqtt:subscribe(Sub, LastWillTopic, qos1),
|
|
|
|
|
|
|
|
%% Client does not send DISCONNECT packet. Therefore, will message should be sent.
|
|
|
|
unlink(Pub),
|
Support Will Delay Interval
Previously, the Will Message could be kept in memory in the MQTT
connection process state. Upon termination, the Will Message is sent.
The new MQTT 5.0 feature Will Delay Interval requires storing the Will
Message outside of the MQTT connection process state.
The Will Message should not be stored node local because the client
could reconnect to a different node.
Storing the Will Message in Mnesia is not an option because we want to
get rid of Mnesia. Storing the Will Message in a Ra cluster or in Khepri
is only an option if the Will Payload is small as there is currently no
way in Ra to **efficiently** snapshot large binary data (Note that these
Will Messages are not consumed in a FIFO style workload like messages in
quorum queues. A Will Message needs to be stored for as long as the
Session lasts - up to 1 day by default, but could also be much longer if
RabbitMQ is configured with a higher maximum session expiry interval.)
Usually Will Payloads are small: They are just a notification that its
MQTT session ended abnormally. However, we don't know how users leverage
the Will Message feature. The MQTT protocol allows for large Will Payloads.
Therefore, the solution implemented in this commit - which should work
good enough - is storing the Will Message in a queue.
Each MQTT session which has a Session Expiry Interval and Will Delay
Interval of > 0 seconds will create a queue if the current Network
Connection ends where it stores its Will Message. The Will Message has a
message TTL set (corresponds to the Will Delay Interval) and the queue
has a queue TTL set (corresponds to the Session Expiry Interval).
If the client does not reconnect within the Will Delay Interval, the
message is dead lettered to the configured MQTT topic exchange
(amq.topic by default).
The Will Delay Interval can be set by both publishers and subscribers.
Therefore, the Will Message is the 1st session state that RabbitMQ keeps
for publish-only MQTT clients.
One current limitation of this commit is that a Will Message that is
delayed (i.e. Will Delay Interval is set) and retained (i.e. Will Retain
flag set) will not be retained.
One solution to retain delayed Will Messages is that the retainer
process consumes from a queue and the queue binds to the topic exchange
with a topic starting with `$`, for example `$retain/#`.
The AMQP 0.9.1 Will Message that is dead lettered could then be added a
CC header such that it won't not only be published with the Will Topic,
but also with `$retain` topic. For example, if the Will Topic is `a/b`,
it will publish with routing key `a/b` and CC header `$retain/a/b`.
The reason this is not implemented in this commit is that to keep the
currently broken retained message store behaviour, we would require
creating at least one queue per node and publishing only to that local
queue. In future, once we have a replicated retained message store based
on a Stream for example, we could just publish all retained messages to
the `$retain` topic and thefore into the Stream.
So, for now, we list "retained and delayed Will Messages" as a limitation
that they actually won't be retained.
2023-05-18 23:36:25 +08:00
|
|
|
erlang:exit(Pub, test_will),
|
2023-01-03 01:02:43 +08:00
|
|
|
?assertEqual(ok, expect_publishes(Sub, LastWillTopic, [LastWillMsg])),
|
|
|
|
|
|
|
|
ok = emqtt:disconnect(Sub).
|
|
|
|
|
Fix Native MQTT crash if properties encoded
Fixes https://github.com/rabbitmq/rabbitmq-server/discussions/8252
The MQTT connection must decode AMQP 0.9.1 properties as they are
getting encoded for example in:
https://github.com/rabbitmq/rabbitmq-server/blob/712c2b9ec96dca4f07f14dfed9c6028ef0e748fd/deps/rabbit/src/rabbit_variable_queue.erl#L2219
and
https://github.com/rabbitmq/rabbitmq-server/blob/712c2b9ec96dca4f07f14dfed9c6028ef0e748fd/deps/rabbit/src/rabbit_quorum_queue.erl#L1680
Prior to this commit, the MQTT connection process could crash with:
```
[{rabbit_mqtt_processor,deliver_one_to_client,
[{{resource,<<"/">>,queue,
<<"mqtt-subscription-mqtt-explorer-c5351d21qos0">>},
<0.4546.0>,undefined,false,
{basic_message,
{resource,<<"/">>,exchange,<<"amq.topic">>},
[<<"plant.v1.M3.BCD423.rev.fillStateChangedEvent">>],
{content,60,none,
<<80,64,16,97,112,112,108,105,99,97,116,105,111,110,47,
106,115,111,110,2,0,0,0,0,100,103,141,186>>,
rabbit_framing_amqp_0_9_1,
[<<"{\"plantIdentificationCode\":\"M3/BCD423/rev\",\"isFullSensorTriggered\":false,\"numberOfCarriers\":20,\"maxNumberOfCarriers\":1174,\"numberOfEmpties\":0,\"numberOfCarriersWithPayload\":20,\"numberOfCarriersWithOrder\":0,\"trackLength\":30000,\"trackLengthOccupied\":1130}">>]},
<<31,230,178,158,209,240,53,221,100,60,64,5,227,237,58,21>>,
true}},
false,
{state,
{cfg,#Port<0.282>,mqtt311,true,undefined,
{resource,<<"/">>,exchange,<<"amq.topic">>},
undefined,false,none,<0.680.0>,flow,none,10,<<"/">>,
<<"mqtt-explorer-c5351d21">>,undefined,
{192,168,10,131},
1883,
{192,168,10,130},
53244,1684508087392,#Fun<rabbit_mqtt_reader.0.106886>},
{rabbit_queue_type,
#{{resource,<<"/">>,queue,
<<"mqtt-subscription-mqtt-explorer-c5351d21qos0">>} =>
{ctx,rabbit_classic_queue,
{rabbit_classic_queue,<0.4546.0>,#{},
#{<0.4546.0> => ok}}}}},
#{},#{},1,
#{<<"#">> => 0,<<"$SYS/#">> => 0},
{auth_state,
{user,<<"rabbit">>,[],
[{rabbit_auth_backend_internal,
#Fun<rabbit_auth_backend_internal.3.114557357>}]},
#{<<"client_id">> => <<"mqtt-explorer-c5351d21">>}},
registered,#{},0}],
[{file,"rabbit_mqtt_processor.erl"},{line,1414}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1350}]},
{rabbit_mqtt_processor,handle_queue_event,2,
[{file,"rabbit_mqtt_processor.erl"},{line,1345}]},
{rabbit_mqtt_reader,handle_cast,2,
[{file,"rabbit_mqtt_reader.erl"},{line,134}]},
{gen_server,try_dispatch,4,[{file,"gen_server.erl"},{line,1123}]},
{gen_server,handle_msg,6,[{file,"gen_server.erl"},{line,1200}]},
{proc_lib,init_p_do_apply,3,[{file,"proc_lib.erl"},{line,240}]}]}
```
2023-05-20 01:02:57 +08:00
|
|
|
%% Test that an MQTT connection decodes the AMQP 0.9.1 'P_basic' properties.
|
|
|
|
%% see https://github.com/rabbitmq/rabbitmq-server/discussions/8252
|
|
|
|
decode_basic_properties(Config) ->
|
2025-06-01 22:43:39 +08:00
|
|
|
set_durable_queue_type(Config),
|
Fix Native MQTT crash if properties encoded
Fixes https://github.com/rabbitmq/rabbitmq-server/discussions/8252
The MQTT connection must decode AMQP 0.9.1 properties as they are
getting encoded for example in:
https://github.com/rabbitmq/rabbitmq-server/blob/712c2b9ec96dca4f07f14dfed9c6028ef0e748fd/deps/rabbit/src/rabbit_variable_queue.erl#L2219
and
https://github.com/rabbitmq/rabbitmq-server/blob/712c2b9ec96dca4f07f14dfed9c6028ef0e748fd/deps/rabbit/src/rabbit_quorum_queue.erl#L1680
Prior to this commit, the MQTT connection process could crash with:
```
[{rabbit_mqtt_processor,deliver_one_to_client,
[{{resource,<<"/">>,queue,
<<"mqtt-subscription-mqtt-explorer-c5351d21qos0">>},
<0.4546.0>,undefined,false,
{basic_message,
{resource,<<"/">>,exchange,<<"amq.topic">>},
[<<"plant.v1.M3.BCD423.rev.fillStateChangedEvent">>],
{content,60,none,
<<80,64,16,97,112,112,108,105,99,97,116,105,111,110,47,
106,115,111,110,2,0,0,0,0,100,103,141,186>>,
rabbit_framing_amqp_0_9_1,
[<<"{\"plantIdentificationCode\":\"M3/BCD423/rev\",\"isFullSensorTriggered\":false,\"numberOfCarriers\":20,\"maxNumberOfCarriers\":1174,\"numberOfEmpties\":0,\"numberOfCarriersWithPayload\":20,\"numberOfCarriersWithOrder\":0,\"trackLength\":30000,\"trackLengthOccupied\":1130}">>]},
<<31,230,178,158,209,240,53,221,100,60,64,5,227,237,58,21>>,
true}},
false,
{state,
{cfg,#Port<0.282>,mqtt311,true,undefined,
{resource,<<"/">>,exchange,<<"amq.topic">>},
undefined,false,none,<0.680.0>,flow,none,10,<<"/">>,
<<"mqtt-explorer-c5351d21">>,undefined,
{192,168,10,131},
1883,
{192,168,10,130},
53244,1684508087392,#Fun<rabbit_mqtt_reader.0.106886>},
{rabbit_queue_type,
#{{resource,<<"/">>,queue,
<<"mqtt-subscription-mqtt-explorer-c5351d21qos0">>} =>
{ctx,rabbit_classic_queue,
{rabbit_classic_queue,<0.4546.0>,#{},
#{<0.4546.0> => ok}}}}},
#{},#{},1,
#{<<"#">> => 0,<<"$SYS/#">> => 0},
{auth_state,
{user,<<"rabbit">>,[],
[{rabbit_auth_backend_internal,
#Fun<rabbit_auth_backend_internal.3.114557357>}]},
#{<<"client_id">> => <<"mqtt-explorer-c5351d21">>}},
registered,#{},0}],
[{file,"rabbit_mqtt_processor.erl"},{line,1414}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1350}]},
{rabbit_mqtt_processor,handle_queue_event,2,
[{file,"rabbit_mqtt_processor.erl"},{line,1345}]},
{rabbit_mqtt_reader,handle_cast,2,
[{file,"rabbit_mqtt_reader.erl"},{line,134}]},
{gen_server,try_dispatch,4,[{file,"gen_server.erl"},{line,1123}]},
{gen_server,handle_msg,6,[{file,"gen_server.erl"},{line,1200}]},
{proc_lib,init_p_do_apply,3,[{file,"proc_lib.erl"},{line,240}]}]}
```
2023-05-20 01:02:57 +08:00
|
|
|
ClientId = Topic = Payload = atom_to_binary(?FUNCTION_NAME),
|
2023-05-21 18:32:14 +08:00
|
|
|
C1 = connect(ClientId, Config, non_clean_sess_opts()),
|
Fix Native MQTT crash if properties encoded
Fixes https://github.com/rabbitmq/rabbitmq-server/discussions/8252
The MQTT connection must decode AMQP 0.9.1 properties as they are
getting encoded for example in:
https://github.com/rabbitmq/rabbitmq-server/blob/712c2b9ec96dca4f07f14dfed9c6028ef0e748fd/deps/rabbit/src/rabbit_variable_queue.erl#L2219
and
https://github.com/rabbitmq/rabbitmq-server/blob/712c2b9ec96dca4f07f14dfed9c6028ef0e748fd/deps/rabbit/src/rabbit_quorum_queue.erl#L1680
Prior to this commit, the MQTT connection process could crash with:
```
[{rabbit_mqtt_processor,deliver_one_to_client,
[{{resource,<<"/">>,queue,
<<"mqtt-subscription-mqtt-explorer-c5351d21qos0">>},
<0.4546.0>,undefined,false,
{basic_message,
{resource,<<"/">>,exchange,<<"amq.topic">>},
[<<"plant.v1.M3.BCD423.rev.fillStateChangedEvent">>],
{content,60,none,
<<80,64,16,97,112,112,108,105,99,97,116,105,111,110,47,
106,115,111,110,2,0,0,0,0,100,103,141,186>>,
rabbit_framing_amqp_0_9_1,
[<<"{\"plantIdentificationCode\":\"M3/BCD423/rev\",\"isFullSensorTriggered\":false,\"numberOfCarriers\":20,\"maxNumberOfCarriers\":1174,\"numberOfEmpties\":0,\"numberOfCarriersWithPayload\":20,\"numberOfCarriersWithOrder\":0,\"trackLength\":30000,\"trackLengthOccupied\":1130}">>]},
<<31,230,178,158,209,240,53,221,100,60,64,5,227,237,58,21>>,
true}},
false,
{state,
{cfg,#Port<0.282>,mqtt311,true,undefined,
{resource,<<"/">>,exchange,<<"amq.topic">>},
undefined,false,none,<0.680.0>,flow,none,10,<<"/">>,
<<"mqtt-explorer-c5351d21">>,undefined,
{192,168,10,131},
1883,
{192,168,10,130},
53244,1684508087392,#Fun<rabbit_mqtt_reader.0.106886>},
{rabbit_queue_type,
#{{resource,<<"/">>,queue,
<<"mqtt-subscription-mqtt-explorer-c5351d21qos0">>} =>
{ctx,rabbit_classic_queue,
{rabbit_classic_queue,<0.4546.0>,#{},
#{<0.4546.0> => ok}}}}},
#{},#{},1,
#{<<"#">> => 0,<<"$SYS/#">> => 0},
{auth_state,
{user,<<"rabbit">>,[],
[{rabbit_auth_backend_internal,
#Fun<rabbit_auth_backend_internal.3.114557357>}]},
#{<<"client_id">> => <<"mqtt-explorer-c5351d21">>}},
registered,#{},0}],
[{file,"rabbit_mqtt_processor.erl"},{line,1414}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1350}]},
{rabbit_mqtt_processor,handle_queue_event,2,
[{file,"rabbit_mqtt_processor.erl"},{line,1345}]},
{rabbit_mqtt_reader,handle_cast,2,
[{file,"rabbit_mqtt_reader.erl"},{line,134}]},
{gen_server,try_dispatch,4,[{file,"gen_server.erl"},{line,1123}]},
{gen_server,handle_msg,6,[{file,"gen_server.erl"},{line,1200}]},
{proc_lib,init_p_do_apply,3,[{file,"proc_lib.erl"},{line,240}]}]}
```
2023-05-20 01:02:57 +08:00
|
|
|
{ok, _, [1]} = emqtt:subscribe(C1, Topic, qos1),
|
|
|
|
QuorumQueues = rpc(Config, rabbit_amqqueue, list_by_type, [rabbit_quorum_queue]),
|
|
|
|
?assertEqual(1, length(QuorumQueues)),
|
2025-03-15 00:20:36 +08:00
|
|
|
{Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config),
|
Fix Native MQTT crash if properties encoded
Fixes https://github.com/rabbitmq/rabbitmq-server/discussions/8252
The MQTT connection must decode AMQP 0.9.1 properties as they are
getting encoded for example in:
https://github.com/rabbitmq/rabbitmq-server/blob/712c2b9ec96dca4f07f14dfed9c6028ef0e748fd/deps/rabbit/src/rabbit_variable_queue.erl#L2219
and
https://github.com/rabbitmq/rabbitmq-server/blob/712c2b9ec96dca4f07f14dfed9c6028ef0e748fd/deps/rabbit/src/rabbit_quorum_queue.erl#L1680
Prior to this commit, the MQTT connection process could crash with:
```
[{rabbit_mqtt_processor,deliver_one_to_client,
[{{resource,<<"/">>,queue,
<<"mqtt-subscription-mqtt-explorer-c5351d21qos0">>},
<0.4546.0>,undefined,false,
{basic_message,
{resource,<<"/">>,exchange,<<"amq.topic">>},
[<<"plant.v1.M3.BCD423.rev.fillStateChangedEvent">>],
{content,60,none,
<<80,64,16,97,112,112,108,105,99,97,116,105,111,110,47,
106,115,111,110,2,0,0,0,0,100,103,141,186>>,
rabbit_framing_amqp_0_9_1,
[<<"{\"plantIdentificationCode\":\"M3/BCD423/rev\",\"isFullSensorTriggered\":false,\"numberOfCarriers\":20,\"maxNumberOfCarriers\":1174,\"numberOfEmpties\":0,\"numberOfCarriersWithPayload\":20,\"numberOfCarriersWithOrder\":0,\"trackLength\":30000,\"trackLengthOccupied\":1130}">>]},
<<31,230,178,158,209,240,53,221,100,60,64,5,227,237,58,21>>,
true}},
false,
{state,
{cfg,#Port<0.282>,mqtt311,true,undefined,
{resource,<<"/">>,exchange,<<"amq.topic">>},
undefined,false,none,<0.680.0>,flow,none,10,<<"/">>,
<<"mqtt-explorer-c5351d21">>,undefined,
{192,168,10,131},
1883,
{192,168,10,130},
53244,1684508087392,#Fun<rabbit_mqtt_reader.0.106886>},
{rabbit_queue_type,
#{{resource,<<"/">>,queue,
<<"mqtt-subscription-mqtt-explorer-c5351d21qos0">>} =>
{ctx,rabbit_classic_queue,
{rabbit_classic_queue,<0.4546.0>,#{},
#{<0.4546.0> => ok}}}}},
#{},#{},1,
#{<<"#">> => 0,<<"$SYS/#">> => 0},
{auth_state,
{user,<<"rabbit">>,[],
[{rabbit_auth_backend_internal,
#Fun<rabbit_auth_backend_internal.3.114557357>}]},
#{<<"client_id">> => <<"mqtt-explorer-c5351d21">>}},
registered,#{},0}],
[{file,"rabbit_mqtt_processor.erl"},{line,1414}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1350}]},
{rabbit_mqtt_processor,handle_queue_event,2,
[{file,"rabbit_mqtt_processor.erl"},{line,1345}]},
{rabbit_mqtt_reader,handle_cast,2,
[{file,"rabbit_mqtt_reader.erl"},{line,134}]},
{gen_server,try_dispatch,4,[{file,"gen_server.erl"},{line,1123}]},
{gen_server,handle_msg,6,[{file,"gen_server.erl"},{line,1200}]},
{proc_lib,init_p_do_apply,3,[{file,"proc_lib.erl"},{line,240}]}]}
```
2023-05-20 01:02:57 +08:00
|
|
|
amqp_channel:call(Ch, #'basic.publish'{exchange = <<"amq.topic">>,
|
|
|
|
routing_key = Topic},
|
|
|
|
#amqp_msg{payload = Payload}),
|
|
|
|
ok = expect_publishes(C1, Topic, [Payload]),
|
|
|
|
ok = emqtt:disconnect(C1),
|
|
|
|
C2 = connect(ClientId, Config, [{clean_start, true}]),
|
|
|
|
ok = emqtt:disconnect(C2),
|
2025-06-01 22:43:39 +08:00
|
|
|
unset_durable_queue_type(Config),
|
2025-03-15 00:20:36 +08:00
|
|
|
ok = rabbit_ct_client_helpers:close_connection_and_channel(Conn, Ch).
|
Fix Native MQTT crash if properties encoded
Fixes https://github.com/rabbitmq/rabbitmq-server/discussions/8252
The MQTT connection must decode AMQP 0.9.1 properties as they are
getting encoded for example in:
https://github.com/rabbitmq/rabbitmq-server/blob/712c2b9ec96dca4f07f14dfed9c6028ef0e748fd/deps/rabbit/src/rabbit_variable_queue.erl#L2219
and
https://github.com/rabbitmq/rabbitmq-server/blob/712c2b9ec96dca4f07f14dfed9c6028ef0e748fd/deps/rabbit/src/rabbit_quorum_queue.erl#L1680
Prior to this commit, the MQTT connection process could crash with:
```
[{rabbit_mqtt_processor,deliver_one_to_client,
[{{resource,<<"/">>,queue,
<<"mqtt-subscription-mqtt-explorer-c5351d21qos0">>},
<0.4546.0>,undefined,false,
{basic_message,
{resource,<<"/">>,exchange,<<"amq.topic">>},
[<<"plant.v1.M3.BCD423.rev.fillStateChangedEvent">>],
{content,60,none,
<<80,64,16,97,112,112,108,105,99,97,116,105,111,110,47,
106,115,111,110,2,0,0,0,0,100,103,141,186>>,
rabbit_framing_amqp_0_9_1,
[<<"{\"plantIdentificationCode\":\"M3/BCD423/rev\",\"isFullSensorTriggered\":false,\"numberOfCarriers\":20,\"maxNumberOfCarriers\":1174,\"numberOfEmpties\":0,\"numberOfCarriersWithPayload\":20,\"numberOfCarriersWithOrder\":0,\"trackLength\":30000,\"trackLengthOccupied\":1130}">>]},
<<31,230,178,158,209,240,53,221,100,60,64,5,227,237,58,21>>,
true}},
false,
{state,
{cfg,#Port<0.282>,mqtt311,true,undefined,
{resource,<<"/">>,exchange,<<"amq.topic">>},
undefined,false,none,<0.680.0>,flow,none,10,<<"/">>,
<<"mqtt-explorer-c5351d21">>,undefined,
{192,168,10,131},
1883,
{192,168,10,130},
53244,1684508087392,#Fun<rabbit_mqtt_reader.0.106886>},
{rabbit_queue_type,
#{{resource,<<"/">>,queue,
<<"mqtt-subscription-mqtt-explorer-c5351d21qos0">>} =>
{ctx,rabbit_classic_queue,
{rabbit_classic_queue,<0.4546.0>,#{},
#{<0.4546.0> => ok}}}}},
#{},#{},1,
#{<<"#">> => 0,<<"$SYS/#">> => 0},
{auth_state,
{user,<<"rabbit">>,[],
[{rabbit_auth_backend_internal,
#Fun<rabbit_auth_backend_internal.3.114557357>}]},
#{<<"client_id">> => <<"mqtt-explorer-c5351d21">>}},
registered,#{},0}],
[{file,"rabbit_mqtt_processor.erl"},{line,1414}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1350}]},
{rabbit_mqtt_processor,handle_queue_event,2,
[{file,"rabbit_mqtt_processor.erl"},{line,1345}]},
{rabbit_mqtt_reader,handle_cast,2,
[{file,"rabbit_mqtt_reader.erl"},{line,134}]},
{gen_server,try_dispatch,4,[{file,"gen_server.erl"},{line,1123}]},
{gen_server,handle_msg,6,[{file,"gen_server.erl"},{line,1200}]},
{proc_lib,init_p_do_apply,3,[{file,"proc_lib.erl"},{line,240}]}]}
```
2023-05-20 01:02:57 +08:00
|
|
|
|
2022-11-08 23:46:37 +08:00
|
|
|
quorum_queue_rejects(Config) ->
|
2023-01-05 23:29:42 +08:00
|
|
|
{_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config),
|
2022-11-08 23:46:37 +08:00
|
|
|
Name = atom_to_binary(?FUNCTION_NAME),
|
|
|
|
|
|
|
|
ok = rabbit_ct_broker_helpers:set_policy(
|
|
|
|
Config, 0, <<"qq-policy">>, Name, <<"queues">>, [{<<"max-length">>, 1},
|
|
|
|
{<<"overflow">>, <<"reject-publish">>}]),
|
|
|
|
declare_queue(Ch, Name, [{<<"x-queue-type">>, longstr, <<"quorum">>}]),
|
|
|
|
bind(Ch, Name, Name),
|
|
|
|
|
2022-11-29 19:41:50 +08:00
|
|
|
C = connect(Name, Config, [{retry_interval, 1}]),
|
2023-08-09 19:13:19 +08:00
|
|
|
{ok, #{reason_code_name := success}} = emqtt:publish(C, Name, <<"m1">>, qos1),
|
|
|
|
{ok, #{reason_code_name := success}} = emqtt:publish(C, Name, <<"m2">>, qos1),
|
|
|
|
|
|
|
|
%% The queue will reject m3.
|
|
|
|
V = ?config(mqtt_version, Config),
|
|
|
|
if V =:= v3 orelse V =:= v4 ->
|
|
|
|
%% v3 and v4 do not support NACKs. Therefore, the server should drop the message.
|
|
|
|
?assertEqual(puback_timeout, util:publish_qos1_timeout(C, Name, <<"m3">>, 700));
|
|
|
|
V =:= v5 ->
|
|
|
|
%% v5 supports NACKs. Therefore, the server should send us a NACK.
|
|
|
|
?assertMatch({ok, #{reason_code_name := implementation_specific_error}},
|
|
|
|
emqtt:publish(C, Name, <<"m3">>, qos1))
|
|
|
|
end,
|
2022-11-08 23:46:37 +08:00
|
|
|
|
|
|
|
?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = <<"m1">>}},
|
2023-08-09 19:13:19 +08:00
|
|
|
amqp_channel:call(Ch, #'basic.get'{queue = Name})),
|
2022-11-08 23:46:37 +08:00
|
|
|
?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = <<"m2">>}},
|
2023-08-09 19:13:19 +08:00
|
|
|
amqp_channel:call(Ch, #'basic.get'{queue = Name})),
|
|
|
|
if V =:= v3 orelse V =:= v4 ->
|
|
|
|
%% m3 is re-sent by emqtt since we didn't receive a PUBACK.
|
|
|
|
?awaitMatch({#'basic.get_ok'{}, #amqp_msg{payload = <<"m3">>}},
|
|
|
|
amqp_channel:call(Ch, #'basic.get'{queue = Name}),
|
|
|
|
2000, 200);
|
|
|
|
V =:= v5 ->
|
|
|
|
%% m3 should not be re-sent by emqtt since we received a PUBACK.
|
|
|
|
?assertMatch(#'basic.get_empty'{},
|
|
|
|
amqp_channel:call(Ch, #'basic.get'{queue = Name}))
|
|
|
|
end,
|
2022-11-08 23:46:37 +08:00
|
|
|
|
|
|
|
ok = emqtt:disconnect(C),
|
|
|
|
delete_queue(Ch, Name),
|
|
|
|
ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"qq-policy">>).
|
|
|
|
|
2022-10-29 00:28:47 +08:00
|
|
|
publish_to_all_queue_types_qos0(Config) ->
|
|
|
|
publish_to_all_queue_types(Config, qos0).
|
|
|
|
|
|
|
|
publish_to_all_queue_types_qos1(Config) ->
|
|
|
|
publish_to_all_queue_types(Config, qos1).
|
|
|
|
|
|
|
|
publish_to_all_queue_types(Config, QoS) ->
|
2025-03-15 00:20:36 +08:00
|
|
|
{Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config),
|
2022-10-29 00:28:47 +08:00
|
|
|
|
|
|
|
CQ = <<"classic-queue">>,
|
|
|
|
QQ = <<"quorum-queue">>,
|
|
|
|
SQ = <<"stream-queue">>,
|
|
|
|
Topic = <<"mytopic">>,
|
|
|
|
|
|
|
|
declare_queue(Ch, CQ, []),
|
|
|
|
bind(Ch, CQ, Topic),
|
|
|
|
|
|
|
|
declare_queue(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}]),
|
|
|
|
bind(Ch, QQ, Topic),
|
|
|
|
|
|
|
|
declare_queue(Ch, SQ, [{<<"x-queue-type">>, longstr, <<"stream">>}]),
|
|
|
|
bind(Ch, SQ, Topic),
|
|
|
|
|
2024-06-20 02:02:33 +08:00
|
|
|
NumMsgs = 1000,
|
|
|
|
C = connect(?FUNCTION_NAME, Config, [{max_inflight, 200},
|
|
|
|
{retry_interval, 2}]),
|
|
|
|
Self = self(),
|
|
|
|
lists:foreach(
|
|
|
|
fun(N) ->
|
|
|
|
%% Publish async all messages at once to trigger flow control
|
|
|
|
ok = emqtt:publish_async(C, Topic, integer_to_binary(N), QoS,
|
|
|
|
{fun(N0, {ok, #{reason_code_name := success}}) ->
|
|
|
|
Self ! {self(), N0};
|
|
|
|
(N0, ok) ->
|
|
|
|
Self ! {self(), N0}
|
|
|
|
end, [N]})
|
|
|
|
end, lists:seq(1, NumMsgs)),
|
|
|
|
ok = await_confirms_ordered(C, 1, NumMsgs),
|
2022-11-08 19:59:16 +08:00
|
|
|
eventually(?_assert(
|
|
|
|
begin
|
|
|
|
L = rabbitmqctl_list(Config, 0, ["list_queues", "messages", "--no-table-headers"]),
|
2023-10-04 18:11:54 +08:00
|
|
|
length(L) =:= 3 andalso
|
2022-11-08 19:59:16 +08:00
|
|
|
lists:all(fun([Bin]) ->
|
|
|
|
N = binary_to_integer(Bin),
|
|
|
|
case QoS of
|
|
|
|
qos0 ->
|
|
|
|
N =:= NumMsgs;
|
|
|
|
qos1 ->
|
|
|
|
%% Allow for some duplicates when client resends
|
|
|
|
%% a message that gets acked at roughly the same time.
|
|
|
|
N >= NumMsgs andalso
|
|
|
|
N < NumMsgs * 2
|
|
|
|
end
|
|
|
|
end, L)
|
2024-06-20 02:02:33 +08:00
|
|
|
end), 1000, 20),
|
2022-10-29 00:28:47 +08:00
|
|
|
|
2023-10-04 18:11:54 +08:00
|
|
|
delete_queue(Ch, [CQ, QQ, SQ]),
|
2022-10-29 00:28:47 +08:00
|
|
|
ok = emqtt:disconnect(C),
|
2022-11-14 23:57:39 +08:00
|
|
|
?awaitMatch([],
|
2025-03-15 00:20:36 +08:00
|
|
|
all_connection_pids(Config), 10_000, 1000),
|
|
|
|
ok = rabbit_ct_client_helpers:close_connection_and_channel(Conn, Ch).
|
2022-11-14 23:57:39 +08:00
|
|
|
|
Allow to use Khepri database to store metadata instead of Mnesia
[Why]
Mnesia is a very powerful and convenient tool for Erlang applications:
it is a persistent disc-based database, it handles replication accross
multiple Erlang nodes and it is available out-of-the-box from the
Erlang/OTP distribution. RabbitMQ relies on Mnesia to manage all its
metadata:
* virtual hosts' properties
* intenal users
* queue, exchange and binding declarations (not queues data)
* runtime parameters and policies
* ...
Unfortunately Mnesia makes it difficult to handle network partition and,
as a consequence, the merge conflicts between Erlang nodes once the
network partition is resolved. RabbitMQ provides several partition
handling strategies but they are not bullet-proof. Users still hit
situations where it is a pain to repair a cluster following a network
partition.
[How]
@kjnilsson created Ra [1], a Raft consensus library that RabbitMQ
already uses successfully to implement quorum queues and streams for
instance. Those queues do not suffer from network partitions.
We created Khepri [2], a new persistent and replicated database engine
based on Ra and we want to use it in place of Mnesia in RabbitMQ to
solve the problems with network partitions.
This patch integrates Khepri as an experimental feature. When enabled,
RabbitMQ will store all its metadata in Khepri instead of Mnesia.
This change comes with behavior changes. While Khepri remains disabled,
you should see no changes to the behavior of RabbitMQ. If there are
changes, it is a bug. After Khepri is enabled, there are significant
changes of behavior that you should be aware of.
Because it is based on the Raft consensus algorithm, when there is a
network partition, only the cluster members that are in the partition
with at least `(Number of nodes in the cluster ÷ 2) + 1` number of nodes
can "make progress". In other words, only those nodes may write to the
Khepri database and read from the database and expect a consistent
result.
For instance in a cluster of 5 RabbitMQ nodes:
* If there are two partitions, one with 3 nodes, one with 2 nodes, only
the group of 3 nodes will be able to write to the database.
* If there are three partitions, two with 2 nodes, one with 1 node, none
of the group can write to the database.
Because the Khepri database will be used for all kind of metadata, it
means that RabbitMQ nodes that can't write to the database will be
unable to perform some operations. A list of operations and what to
expect is documented in the associated pull request and the RabbitMQ
website.
This requirement from Raft also affects the startup of RabbitMQ nodes in
a cluster. Indeed, at least a quorum number of nodes must be started at
once to allow nodes to become ready.
To enable Khepri, you need to enable the `khepri_db` feature flag:
rabbitmqctl enable_feature_flag khepri_db
When the `khepri_db` feature flag is enabled, the migration code
performs the following two tasks:
1. It synchronizes the Khepri cluster membership from the Mnesia
cluster. It uses `mnesia_to_khepri:sync_cluster_membership/1` from
the `khepri_mnesia_migration` application [3].
2. It copies data from relevant Mnesia tables to Khepri, doing some
conversion if necessary on the way. Again, it uses
`mnesia_to_khepri:copy_tables/4` from `khepri_mnesia_migration` to do
it.
This can be performed on a running standalone RabbitMQ node or cluster.
Data will be migrated from Mnesia to Khepri without any service
interruption. Note that during the migration, the performance may
decrease and the memory footprint may go up.
Because this feature flag is considered experimental, it is not enabled
by default even on a brand new RabbitMQ deployment.
More about the implementation details below:
In the past months, all accesses to Mnesia were isolated in a collection
of `rabbit_db*` modules. This is where the integration of Khepri mostly
takes place: we use a function called `rabbit_khepri:handle_fallback/1`
which selects the database and perform the query or the transaction.
Here is an example from `rabbit_db_vhost`:
* Up until RabbitMQ 3.12.x:
get(VHostName) when is_binary(VHostName) ->
get_in_mnesia(VHostName).
* Starting with RabbitMQ 3.13.0:
get(VHostName) when is_binary(VHostName) ->
rabbit_khepri:handle_fallback(
#{mnesia => fun() -> get_in_mnesia(VHostName) end,
khepri => fun() -> get_in_khepri(VHostName) end}).
This `rabbit_khepri:handle_fallback/1` function relies on two things:
1. the fact that the `khepri_db` feature flag is enabled, in which case
it always executes the Khepri-based variant.
4. the ability or not to read and write to Mnesia tables otherwise.
Before the feature flag is enabled, or during the migration, the
function will try to execute the Mnesia-based variant. If it succeeds,
then it returns the result. If it fails because one or more Mnesia
tables can't be used, it restarts from scratch: it means the feature
flag is being enabled and depending on the outcome, either the
Mnesia-based variant will succeed (the feature flag couldn't be enabled)
or the feature flag will be marked as enabled and it will call the
Khepri-based variant. The meat of this function really lives in the
`khepri_mnesia_migration` application [3] and
`rabbit_khepri:handle_fallback/1` is a wrapper on top of it that knows
about the feature flag.
However, some calls to the database do not depend on the existence of
Mnesia tables, such as functions where we need to learn about the
members of a cluster. For those, we can't rely on exceptions from
Mnesia. Therefore, we just look at the state of the feature flag to
determine which database to use. There are two situations though:
* Sometimes, we need the feature flag state query to block because the
function interested in it can't return a valid answer during the
migration. Here is an example:
case rabbit_khepri:is_enabled(RemoteNode) of
true -> can_join_using_khepri(RemoteNode);
false -> can_join_using_mnesia(RemoteNode)
end
* Sometimes, we need the feature flag state query to NOT block (for
instance because it would cause a deadlock). Here is an example:
case rabbit_khepri:get_feature_state() of
enabled -> members_using_khepri();
_ -> members_using_mnesia()
end
Direct accesses to Mnesia still exists. They are limited to code that is
specific to Mnesia such as classic queue mirroring or network partitions
handling strategies.
Now, to discover the Mnesia tables to migrate and how to migrate them,
we use an Erlang module attribute called
`rabbit_mnesia_tables_to_khepri_db` which indicates a list of Mnesia
tables and an associated converter module. Here is an example in the
`rabbitmq_recent_history_exchange` plugin:
-rabbit_mnesia_tables_to_khepri_db(
[{?RH_TABLE, rabbit_db_rh_exchange_m2k_converter}]).
The converter module — `rabbit_db_rh_exchange_m2k_converter` in this
example — is is fact a "sub" converter module called but
`rabbit_db_m2k_converter`. See the documentation of a `mnesia_to_khepri`
converter module to learn more about these modules.
[1] https://github.com/rabbitmq/ra
[2] https://github.com/rabbitmq/khepri
[3] https://github.com/rabbitmq/khepri_mnesia_migration
See #7206.
Co-authored-by: Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
Co-authored-by: Diana Parra Corbacho <dparracorbac@vmware.com>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
2023-01-05 20:57:50 +08:00
|
|
|
publish_to_all_non_deprecated_queue_types_qos0(Config) ->
|
|
|
|
publish_to_all_non_deprecated_queue_types(Config, qos0).
|
|
|
|
|
|
|
|
publish_to_all_non_deprecated_queue_types_qos1(Config) ->
|
|
|
|
publish_to_all_non_deprecated_queue_types(Config, qos1).
|
|
|
|
|
|
|
|
publish_to_all_non_deprecated_queue_types(Config, QoS) ->
|
2025-03-15 00:20:36 +08:00
|
|
|
{Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config),
|
Allow to use Khepri database to store metadata instead of Mnesia
[Why]
Mnesia is a very powerful and convenient tool for Erlang applications:
it is a persistent disc-based database, it handles replication accross
multiple Erlang nodes and it is available out-of-the-box from the
Erlang/OTP distribution. RabbitMQ relies on Mnesia to manage all its
metadata:
* virtual hosts' properties
* intenal users
* queue, exchange and binding declarations (not queues data)
* runtime parameters and policies
* ...
Unfortunately Mnesia makes it difficult to handle network partition and,
as a consequence, the merge conflicts between Erlang nodes once the
network partition is resolved. RabbitMQ provides several partition
handling strategies but they are not bullet-proof. Users still hit
situations where it is a pain to repair a cluster following a network
partition.
[How]
@kjnilsson created Ra [1], a Raft consensus library that RabbitMQ
already uses successfully to implement quorum queues and streams for
instance. Those queues do not suffer from network partitions.
We created Khepri [2], a new persistent and replicated database engine
based on Ra and we want to use it in place of Mnesia in RabbitMQ to
solve the problems with network partitions.
This patch integrates Khepri as an experimental feature. When enabled,
RabbitMQ will store all its metadata in Khepri instead of Mnesia.
This change comes with behavior changes. While Khepri remains disabled,
you should see no changes to the behavior of RabbitMQ. If there are
changes, it is a bug. After Khepri is enabled, there are significant
changes of behavior that you should be aware of.
Because it is based on the Raft consensus algorithm, when there is a
network partition, only the cluster members that are in the partition
with at least `(Number of nodes in the cluster ÷ 2) + 1` number of nodes
can "make progress". In other words, only those nodes may write to the
Khepri database and read from the database and expect a consistent
result.
For instance in a cluster of 5 RabbitMQ nodes:
* If there are two partitions, one with 3 nodes, one with 2 nodes, only
the group of 3 nodes will be able to write to the database.
* If there are three partitions, two with 2 nodes, one with 1 node, none
of the group can write to the database.
Because the Khepri database will be used for all kind of metadata, it
means that RabbitMQ nodes that can't write to the database will be
unable to perform some operations. A list of operations and what to
expect is documented in the associated pull request and the RabbitMQ
website.
This requirement from Raft also affects the startup of RabbitMQ nodes in
a cluster. Indeed, at least a quorum number of nodes must be started at
once to allow nodes to become ready.
To enable Khepri, you need to enable the `khepri_db` feature flag:
rabbitmqctl enable_feature_flag khepri_db
When the `khepri_db` feature flag is enabled, the migration code
performs the following two tasks:
1. It synchronizes the Khepri cluster membership from the Mnesia
cluster. It uses `mnesia_to_khepri:sync_cluster_membership/1` from
the `khepri_mnesia_migration` application [3].
2. It copies data from relevant Mnesia tables to Khepri, doing some
conversion if necessary on the way. Again, it uses
`mnesia_to_khepri:copy_tables/4` from `khepri_mnesia_migration` to do
it.
This can be performed on a running standalone RabbitMQ node or cluster.
Data will be migrated from Mnesia to Khepri without any service
interruption. Note that during the migration, the performance may
decrease and the memory footprint may go up.
Because this feature flag is considered experimental, it is not enabled
by default even on a brand new RabbitMQ deployment.
More about the implementation details below:
In the past months, all accesses to Mnesia were isolated in a collection
of `rabbit_db*` modules. This is where the integration of Khepri mostly
takes place: we use a function called `rabbit_khepri:handle_fallback/1`
which selects the database and perform the query or the transaction.
Here is an example from `rabbit_db_vhost`:
* Up until RabbitMQ 3.12.x:
get(VHostName) when is_binary(VHostName) ->
get_in_mnesia(VHostName).
* Starting with RabbitMQ 3.13.0:
get(VHostName) when is_binary(VHostName) ->
rabbit_khepri:handle_fallback(
#{mnesia => fun() -> get_in_mnesia(VHostName) end,
khepri => fun() -> get_in_khepri(VHostName) end}).
This `rabbit_khepri:handle_fallback/1` function relies on two things:
1. the fact that the `khepri_db` feature flag is enabled, in which case
it always executes the Khepri-based variant.
4. the ability or not to read and write to Mnesia tables otherwise.
Before the feature flag is enabled, or during the migration, the
function will try to execute the Mnesia-based variant. If it succeeds,
then it returns the result. If it fails because one or more Mnesia
tables can't be used, it restarts from scratch: it means the feature
flag is being enabled and depending on the outcome, either the
Mnesia-based variant will succeed (the feature flag couldn't be enabled)
or the feature flag will be marked as enabled and it will call the
Khepri-based variant. The meat of this function really lives in the
`khepri_mnesia_migration` application [3] and
`rabbit_khepri:handle_fallback/1` is a wrapper on top of it that knows
about the feature flag.
However, some calls to the database do not depend on the existence of
Mnesia tables, such as functions where we need to learn about the
members of a cluster. For those, we can't rely on exceptions from
Mnesia. Therefore, we just look at the state of the feature flag to
determine which database to use. There are two situations though:
* Sometimes, we need the feature flag state query to block because the
function interested in it can't return a valid answer during the
migration. Here is an example:
case rabbit_khepri:is_enabled(RemoteNode) of
true -> can_join_using_khepri(RemoteNode);
false -> can_join_using_mnesia(RemoteNode)
end
* Sometimes, we need the feature flag state query to NOT block (for
instance because it would cause a deadlock). Here is an example:
case rabbit_khepri:get_feature_state() of
enabled -> members_using_khepri();
_ -> members_using_mnesia()
end
Direct accesses to Mnesia still exists. They are limited to code that is
specific to Mnesia such as classic queue mirroring or network partitions
handling strategies.
Now, to discover the Mnesia tables to migrate and how to migrate them,
we use an Erlang module attribute called
`rabbit_mnesia_tables_to_khepri_db` which indicates a list of Mnesia
tables and an associated converter module. Here is an example in the
`rabbitmq_recent_history_exchange` plugin:
-rabbit_mnesia_tables_to_khepri_db(
[{?RH_TABLE, rabbit_db_rh_exchange_m2k_converter}]).
The converter module — `rabbit_db_rh_exchange_m2k_converter` in this
example — is is fact a "sub" converter module called but
`rabbit_db_m2k_converter`. See the documentation of a `mnesia_to_khepri`
converter module to learn more about these modules.
[1] https://github.com/rabbitmq/ra
[2] https://github.com/rabbitmq/khepri
[3] https://github.com/rabbitmq/khepri_mnesia_migration
See #7206.
Co-authored-by: Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
Co-authored-by: Diana Parra Corbacho <dparracorbac@vmware.com>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
2023-01-05 20:57:50 +08:00
|
|
|
|
|
|
|
CQ = <<"classic-queue">>,
|
|
|
|
QQ = <<"quorum-queue">>,
|
|
|
|
SQ = <<"stream-queue">>,
|
|
|
|
Topic = <<"mytopic">>,
|
|
|
|
|
|
|
|
declare_queue(Ch, CQ, []),
|
|
|
|
bind(Ch, CQ, Topic),
|
|
|
|
|
|
|
|
declare_queue(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}]),
|
|
|
|
bind(Ch, QQ, Topic),
|
|
|
|
|
|
|
|
declare_queue(Ch, SQ, [{<<"x-queue-type">>, longstr, <<"stream">>}]),
|
|
|
|
bind(Ch, SQ, Topic),
|
|
|
|
|
|
|
|
NumMsgs = 2000,
|
|
|
|
C = connect(?FUNCTION_NAME, Config, [{retry_interval, 2}]),
|
|
|
|
lists:foreach(fun(N) ->
|
|
|
|
case emqtt:publish(C, Topic, integer_to_binary(N), QoS) of
|
|
|
|
ok ->
|
|
|
|
ok;
|
|
|
|
{ok, _} ->
|
|
|
|
ok;
|
|
|
|
Other ->
|
|
|
|
ct:fail("Failed to publish: ~p", [Other])
|
|
|
|
end
|
|
|
|
end, lists:seq(1, NumMsgs)),
|
|
|
|
|
|
|
|
eventually(?_assert(
|
|
|
|
begin
|
|
|
|
L = rabbitmqctl_list(Config, 0, ["list_queues", "messages", "--no-table-headers"]),
|
|
|
|
length(L) =:= 3 andalso
|
|
|
|
lists:all(fun([Bin]) ->
|
|
|
|
N = binary_to_integer(Bin),
|
|
|
|
case QoS of
|
|
|
|
qos0 ->
|
|
|
|
N =:= NumMsgs;
|
|
|
|
qos1 ->
|
|
|
|
%% Allow for some duplicates when client resends
|
|
|
|
%% a message that gets acked at roughly the same time.
|
|
|
|
N >= NumMsgs andalso
|
|
|
|
N < NumMsgs * 2
|
|
|
|
end
|
|
|
|
end, L)
|
|
|
|
end), 2000, 10),
|
|
|
|
|
|
|
|
delete_queue(Ch, [CQ, QQ, SQ]),
|
|
|
|
ok = emqtt:disconnect(C),
|
|
|
|
?awaitMatch([],
|
2025-03-15 00:20:36 +08:00
|
|
|
all_connection_pids(Config), 10_000, 1000),
|
|
|
|
ok = rabbit_ct_client_helpers:close_connection_and_channel(Conn, Ch).
|
Allow to use Khepri database to store metadata instead of Mnesia
[Why]
Mnesia is a very powerful and convenient tool for Erlang applications:
it is a persistent disc-based database, it handles replication accross
multiple Erlang nodes and it is available out-of-the-box from the
Erlang/OTP distribution. RabbitMQ relies on Mnesia to manage all its
metadata:
* virtual hosts' properties
* intenal users
* queue, exchange and binding declarations (not queues data)
* runtime parameters and policies
* ...
Unfortunately Mnesia makes it difficult to handle network partition and,
as a consequence, the merge conflicts between Erlang nodes once the
network partition is resolved. RabbitMQ provides several partition
handling strategies but they are not bullet-proof. Users still hit
situations where it is a pain to repair a cluster following a network
partition.
[How]
@kjnilsson created Ra [1], a Raft consensus library that RabbitMQ
already uses successfully to implement quorum queues and streams for
instance. Those queues do not suffer from network partitions.
We created Khepri [2], a new persistent and replicated database engine
based on Ra and we want to use it in place of Mnesia in RabbitMQ to
solve the problems with network partitions.
This patch integrates Khepri as an experimental feature. When enabled,
RabbitMQ will store all its metadata in Khepri instead of Mnesia.
This change comes with behavior changes. While Khepri remains disabled,
you should see no changes to the behavior of RabbitMQ. If there are
changes, it is a bug. After Khepri is enabled, there are significant
changes of behavior that you should be aware of.
Because it is based on the Raft consensus algorithm, when there is a
network partition, only the cluster members that are in the partition
with at least `(Number of nodes in the cluster ÷ 2) + 1` number of nodes
can "make progress". In other words, only those nodes may write to the
Khepri database and read from the database and expect a consistent
result.
For instance in a cluster of 5 RabbitMQ nodes:
* If there are two partitions, one with 3 nodes, one with 2 nodes, only
the group of 3 nodes will be able to write to the database.
* If there are three partitions, two with 2 nodes, one with 1 node, none
of the group can write to the database.
Because the Khepri database will be used for all kind of metadata, it
means that RabbitMQ nodes that can't write to the database will be
unable to perform some operations. A list of operations and what to
expect is documented in the associated pull request and the RabbitMQ
website.
This requirement from Raft also affects the startup of RabbitMQ nodes in
a cluster. Indeed, at least a quorum number of nodes must be started at
once to allow nodes to become ready.
To enable Khepri, you need to enable the `khepri_db` feature flag:
rabbitmqctl enable_feature_flag khepri_db
When the `khepri_db` feature flag is enabled, the migration code
performs the following two tasks:
1. It synchronizes the Khepri cluster membership from the Mnesia
cluster. It uses `mnesia_to_khepri:sync_cluster_membership/1` from
the `khepri_mnesia_migration` application [3].
2. It copies data from relevant Mnesia tables to Khepri, doing some
conversion if necessary on the way. Again, it uses
`mnesia_to_khepri:copy_tables/4` from `khepri_mnesia_migration` to do
it.
This can be performed on a running standalone RabbitMQ node or cluster.
Data will be migrated from Mnesia to Khepri without any service
interruption. Note that during the migration, the performance may
decrease and the memory footprint may go up.
Because this feature flag is considered experimental, it is not enabled
by default even on a brand new RabbitMQ deployment.
More about the implementation details below:
In the past months, all accesses to Mnesia were isolated in a collection
of `rabbit_db*` modules. This is where the integration of Khepri mostly
takes place: we use a function called `rabbit_khepri:handle_fallback/1`
which selects the database and perform the query or the transaction.
Here is an example from `rabbit_db_vhost`:
* Up until RabbitMQ 3.12.x:
get(VHostName) when is_binary(VHostName) ->
get_in_mnesia(VHostName).
* Starting with RabbitMQ 3.13.0:
get(VHostName) when is_binary(VHostName) ->
rabbit_khepri:handle_fallback(
#{mnesia => fun() -> get_in_mnesia(VHostName) end,
khepri => fun() -> get_in_khepri(VHostName) end}).
This `rabbit_khepri:handle_fallback/1` function relies on two things:
1. the fact that the `khepri_db` feature flag is enabled, in which case
it always executes the Khepri-based variant.
4. the ability or not to read and write to Mnesia tables otherwise.
Before the feature flag is enabled, or during the migration, the
function will try to execute the Mnesia-based variant. If it succeeds,
then it returns the result. If it fails because one or more Mnesia
tables can't be used, it restarts from scratch: it means the feature
flag is being enabled and depending on the outcome, either the
Mnesia-based variant will succeed (the feature flag couldn't be enabled)
or the feature flag will be marked as enabled and it will call the
Khepri-based variant. The meat of this function really lives in the
`khepri_mnesia_migration` application [3] and
`rabbit_khepri:handle_fallback/1` is a wrapper on top of it that knows
about the feature flag.
However, some calls to the database do not depend on the existence of
Mnesia tables, such as functions where we need to learn about the
members of a cluster. For those, we can't rely on exceptions from
Mnesia. Therefore, we just look at the state of the feature flag to
determine which database to use. There are two situations though:
* Sometimes, we need the feature flag state query to block because the
function interested in it can't return a valid answer during the
migration. Here is an example:
case rabbit_khepri:is_enabled(RemoteNode) of
true -> can_join_using_khepri(RemoteNode);
false -> can_join_using_mnesia(RemoteNode)
end
* Sometimes, we need the feature flag state query to NOT block (for
instance because it would cause a deadlock). Here is an example:
case rabbit_khepri:get_feature_state() of
enabled -> members_using_khepri();
_ -> members_using_mnesia()
end
Direct accesses to Mnesia still exists. They are limited to code that is
specific to Mnesia such as classic queue mirroring or network partitions
handling strategies.
Now, to discover the Mnesia tables to migrate and how to migrate them,
we use an Erlang module attribute called
`rabbit_mnesia_tables_to_khepri_db` which indicates a list of Mnesia
tables and an associated converter module. Here is an example in the
`rabbitmq_recent_history_exchange` plugin:
-rabbit_mnesia_tables_to_khepri_db(
[{?RH_TABLE, rabbit_db_rh_exchange_m2k_converter}]).
The converter module — `rabbit_db_rh_exchange_m2k_converter` in this
example — is is fact a "sub" converter module called but
`rabbit_db_m2k_converter`. See the documentation of a `mnesia_to_khepri`
converter module to learn more about these modules.
[1] https://github.com/rabbitmq/ra
[2] https://github.com/rabbitmq/khepri
[3] https://github.com/rabbitmq/khepri_mnesia_migration
See #7206.
Co-authored-by: Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
Co-authored-by: Diana Parra Corbacho <dparracorbac@vmware.com>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
2023-01-05 20:57:50 +08:00
|
|
|
|
2024-12-04 00:34:32 +08:00
|
|
|
%% This test case does not require multiple nodes
|
|
|
|
%% but it is grouped together with flow test cases for other queue types
|
|
|
|
%% (and historically used to use a mirrored classic queue on multiple nodes)
|
|
|
|
flow_classic_queue(Config) ->
|
|
|
|
%% New nodes lookup via persistent_term:get/1 (since 4.0.0)
|
|
|
|
%% Old nodes lookup via application:get_env/2. (that is taken care of by flow/3)
|
|
|
|
%% Therefore, we set both persistent_term and application.
|
|
|
|
Key = credit_flow_default_credit,
|
|
|
|
Val = {2, 1},
|
|
|
|
DefaultVal = rabbit_ct_broker_helpers:rpc(Config, persistent_term, get, [Key]),
|
|
|
|
Result = rpc_all(Config, persistent_term, put, [Key, Val]),
|
|
|
|
?assert(lists:all(fun(R) -> R =:= ok end, Result)),
|
|
|
|
|
|
|
|
flow(Config, {rabbit, Key, Val}, <<"classic">>),
|
|
|
|
|
|
|
|
?assertEqual(Result, rpc_all(Config, persistent_term, put, [Key, DefaultVal])),
|
|
|
|
ok.
|
|
|
|
|
2022-11-14 23:57:39 +08:00
|
|
|
flow_quorum_queue(Config) ->
|
2022-11-17 19:48:56 +08:00
|
|
|
flow(Config, {rabbit, quorum_commands_soft_limit, 1}, <<"quorum">>).
|
2022-11-14 23:57:39 +08:00
|
|
|
|
|
|
|
flow_stream(Config) ->
|
2022-11-17 19:48:56 +08:00
|
|
|
flow(Config, {rabbit, stream_messages_soft_limit, 1}, <<"stream">>).
|
2022-11-14 23:57:39 +08:00
|
|
|
|
2022-11-17 19:48:56 +08:00
|
|
|
flow(Config, {App, Par, Val}, QueueType)
|
|
|
|
when is_binary(QueueType) ->
|
2023-01-03 01:02:43 +08:00
|
|
|
{ok, DefaultVal} = rpc(Config, application, get_env, [App, Par]),
|
2022-11-17 19:48:56 +08:00
|
|
|
Result = rpc_all(Config, application, set_env, [App, Par, Val]),
|
2022-11-14 23:57:39 +08:00
|
|
|
?assert(lists:all(fun(R) -> R =:= ok end, Result)),
|
|
|
|
|
2025-03-15 00:20:36 +08:00
|
|
|
{Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config),
|
2022-11-14 23:57:39 +08:00
|
|
|
QueueName = Topic = atom_to_binary(?FUNCTION_NAME),
|
|
|
|
declare_queue(Ch, QueueName, [{<<"x-queue-type">>, longstr, QueueType}]),
|
|
|
|
bind(Ch, QueueName, Topic),
|
|
|
|
|
|
|
|
NumMsgs = 1000,
|
2022-11-29 19:41:50 +08:00
|
|
|
C = connect(?FUNCTION_NAME, Config, [{retry_interval, 600},
|
2023-01-03 01:02:43 +08:00
|
|
|
{max_inflight, NumMsgs}]),
|
2022-11-14 23:57:39 +08:00
|
|
|
TestPid = self(),
|
|
|
|
lists:foreach(
|
|
|
|
fun(N) ->
|
|
|
|
%% Publish async all messages at once to trigger flow control
|
|
|
|
ok = emqtt:publish_async(C, Topic, integer_to_binary(N), qos1,
|
|
|
|
{fun(N0, {ok, #{reason_code_name := success}}) ->
|
|
|
|
TestPid ! {self(), N0}
|
|
|
|
end, [N]})
|
|
|
|
end, lists:seq(1, NumMsgs)),
|
2022-11-18 04:13:27 +08:00
|
|
|
ok = await_confirms_ordered(C, 1, NumMsgs),
|
2022-11-14 23:57:39 +08:00
|
|
|
eventually(?_assertEqual(
|
|
|
|
[[integer_to_binary(NumMsgs)]],
|
|
|
|
rabbitmqctl_list(Config, 0, ["list_queues", "messages", "--no-table-headers"])
|
|
|
|
), 1000, 10),
|
|
|
|
|
|
|
|
delete_queue(Ch, QueueName),
|
|
|
|
ok = emqtt:disconnect(C),
|
|
|
|
?awaitMatch([],
|
|
|
|
all_connection_pids(Config), 10_000, 1000),
|
2024-05-31 20:15:41 +08:00
|
|
|
?assertEqual(Result,
|
2025-03-15 00:20:36 +08:00
|
|
|
rpc_all(Config, application, set_env, [App, Par, DefaultVal])),
|
|
|
|
ok = rabbit_ct_client_helpers:close_connection_and_channel(Conn, Ch).
|
2022-10-28 01:07:43 +08:00
|
|
|
|
2022-10-29 00:28:47 +08:00
|
|
|
events(Config) ->
|
2022-10-28 01:07:43 +08:00
|
|
|
ok = rabbit_ct_broker_helpers:add_code_path_to_all_nodes(Config, event_recorder),
|
2022-12-20 02:03:18 +08:00
|
|
|
Server = get_node_config(Config, 0, nodename),
|
2022-10-28 01:07:43 +08:00
|
|
|
ok = gen_event:add_handler({rabbit_event, Server}, event_recorder, []),
|
|
|
|
|
2022-10-29 00:28:47 +08:00
|
|
|
ClientId = atom_to_binary(?FUNCTION_NAME),
|
2022-11-29 19:41:50 +08:00
|
|
|
C = connect(ClientId, Config),
|
2022-10-28 01:07:43 +08:00
|
|
|
|
|
|
|
[E0, E1] = get_events(Server),
|
|
|
|
assert_event_type(user_authentication_success, E0),
|
|
|
|
assert_event_prop([{name, <<"guest">>},
|
|
|
|
{connection_type, network}],
|
|
|
|
E0),
|
|
|
|
assert_event_type(connection_created, E1),
|
2022-11-15 23:25:59 +08:00
|
|
|
[ConnectionPid] = all_connection_pids(Config),
|
2023-02-22 02:24:15 +08:00
|
|
|
ProtoName = case ?config(websocket, Config) of
|
|
|
|
true -> 'Web MQTT';
|
|
|
|
false -> 'MQTT'
|
|
|
|
end,
|
|
|
|
ProtoVer = case ?config(mqtt_version, Config) of
|
|
|
|
v3 -> {3,1,0};
|
|
|
|
v4 -> {3,1,1};
|
|
|
|
v5 -> {5,0}
|
|
|
|
end,
|
|
|
|
ExpectedConnectionProps = [{protocol, {ProtoName, ProtoVer}},
|
|
|
|
{node, Server},
|
|
|
|
{vhost, <<"/">>},
|
|
|
|
{user, <<"guest">>},
|
|
|
|
{client_properties, [{client_id, longstr, ClientId}]},
|
|
|
|
{pid, ConnectionPid}],
|
|
|
|
assert_event_prop(ExpectedConnectionProps, E1),
|
2022-10-28 01:07:43 +08:00
|
|
|
|
Support MQTT 5.0 features No Local, RAP, Subscription IDs
Support subscription options "No Local" and "Retain As Published"
as well as Subscription Identifiers.
All three MQTT 5.0 features can be set on a per subscription basis.
Due to wildcards in topic filters, multiple subscriptions
can match a given topic. Therefore, to implement Retain As Published and
Subscription Identifiers, the destination MQTT connection process needs
to know what subscription(s) caused it to receive the message.
There are a few ways how this could be implemented:
1. The destination MQTT connection process is aware of all its
subscriptions. Whenever, it receives a message, it can match the
message's routing key / topic against all its known topic filters.
However, to iteratively match the routing key against all topic
filters for every received message can become very expensive in the
worst case when the MQTT client creates many subscriptions containing
wildcards. This could be the case for an MQTT client that acts as a
bridge or proxy or dispatcher: It could subscribe via a wildcard for
each of its own clients.
2. Instead of interatively matching the topic of the received message
against all topic filters that contain wildcards, a better approach
would be for every MQTT subscriber connection process to maintain a
local trie datastructure (similar to how topic exchanges are
implemented) and perform matching therefore more efficiently.
However, this does not sound optimal either because routing is
effectively performed twice: in the topic exchange and again against
a much smaller trie in each destination connection process.
3. Given that the topic exchange already perform routing, a much more
sensible way would be to send the matched binding key(s) to the
destination MQTT connection process. A subscription (topic filter)
maps to a binding key in AMQP 0.9.1 routing. Therefore, for the first
time in RabbitMQ, the routing function should not only output a list
of unique destination queues, but also the binding keys (subscriptions)
that caused the message to be routed to the destination queue.
This commit therefore implements the 3rd approach.
The downside of the 3rd approach is that it requires API changes to the
routing function and topic exchange.
Specifically, this commit adds a new function rabbit_exchange:route/3
that accepts a list of routing options. If that list contains version 2,
the caller of the routing function knows how to handle the return value
that could also contain binding keys.
This commits allows an MQTT connection process, the channel process, and
at-most-once dead lettering to handle binding keys. Binding keys are
included as AMQP 0.9.1 headers into the basic message.
Therefore, whenever a message is sent from an MQTT client or AMQP 0.9.1
client or AMQP 1.0 client or STOMP client, the MQTT receiver will know
the subscription identifier that caused the message to be received.
Note that due to the low number of allowed wildcard characters (# and
+), the cardinality of matched binding keys shouldn't be high even if
the topic contains for example 3 levels and the message is sent to for
example 5 million destination queues. In other words, sending multiple
distinct basic messages to the destination shouldn't hurt the delegate
optimisation too much. The delegate optimisation implemented for classic
queues and rabbit_mqtt_qos0_queue(s) still takes place for all basic
messages that contain the same set of matched binding keys.
The topic exchange returns all matched binding keys by remembering the
edges walked down to the leaves. As an optimisation, only for MQTT
queues are binding keys being returned. This does add a small dependency
from app rabbit to app rabbitmq_mqtt which is not optimal. However, this
dependency should be simple to remove when omitting this optimisation.
Another important feature of this commit is persisting subscription
options and subscription identifiers because they are part of the
MQTT 5.0 session state.
In MQTT v3 and v4, the only subscription information that were part of
the session state was the topic filter and the QoS level.
Both information were implicitly stored in the form of bindings:
The topic filter as the binding key and the QoS level as the destination
queue name of the binding.
For MQTT v5 we need to persist more subscription information.
From a domain perspective, it makes sense to store subscription options
as part of subscriptions, i.e. bindings, even though they are currently
not used in routing.
Therefore, this commits stores subscription options as binding arguments.
Storing subscription options as binding arguments comes in turn with
new challenges: How to handle mixed version clusters and upgrading an
MQTT session from v3 or v4 to v5?
Imagine an MQTT client connects via v5 with Session Expiry Interval > 0
to a new node in a mixed version cluster, creates a subscription,
disconnects, and subsequently connects via v3 to an old node. The
client should continue to receive messages.
To simplify such edge cases, this commit introduces a new feature flag
called mqtt_v5. If mqtt_v5 is disabled, clients cannot connect to
RabbitMQ via MQTT 5.0.
This still doesn't entirely solve the problem of MQTT session upgrades
(v4 to v5 client) or session downgrades (v5 to v4 client).
Ideally, once mqtt_v5 is enabled, all MQTT bindings contain non-empty binding
arguments. However, this will require a feature flag migration function
to modify all MQTT bindings. To be more precise, all MQTT bindings need
to be deleted and added because the binding argument is part of the
Mnesia table key.
Since feature flag migration functions are non-trivial to implement in
RabbitMQ (they can run on every node multiple times and concurrently),
this commit takes a simpler approach:
All v3 / v4 sessions keep the empty binding argument [].
All v5 sessions use the new binding argument [#mqtt_subscription_opts{}].
This requires only handling a session upgrade / downgrade by
creating a binding (with the new binding arg) and deleting the old
binding (with the old binding arg) when processing the CONNECT packet.
Note that such session upgrades or downgrades should be rather rare in
practice. Therefore these binding transactions shouldn't hurt peformance.
The No Local option is implemented within the MQTT publishing connection
process: The message is not sent to the MQTT destination if the
destination queue name matches the current MQTT client ID and the
message was routed due to a subscription that has the No Local flag set.
This avoids unnecessary traffic on the MQTT queue.
The alternative would have been that the "receiving side" (same process)
filters the message out - which would have been more consistent in how
Retain As Published and Subscription Identifiers are implemented, but
would have caused unnecessary load on the MQTT queue.
2023-04-19 21:32:34 +08:00
|
|
|
Qos = 0,
|
Return matched binding keys faster
For MQTT 5.0 destination queues, the topic exchange does not only have
to return the destination queue names, but also the matched binding
keys.
This is needed to implement MQTT 5.0 subscription options No Local,
Retain As Published and Subscription Identifiers.
Prior to this commit, as the trie was walked down, we remembered the
edges being walked and assembled the final binding key with
list_to_binary/1.
list_to_binary/1 is very expensive with long lists (long topic names),
even in OTP 26.
The CPU flame graph showed ~3% of CPU usage was spent only in
list_to_binary/1.
Unfortunately and unnecessarily, the current topic exchange
implementation stores topic levels as lists.
It would be better to store topic levels as binaries:
split_topic_key/1 should ideally use binary:split/3 similar as follows:
```
1> P = binary:compile_pattern(<<".">>).
{bm,#Ref<0.1273071188.1488322568.63736>}
2> Bin = <<"aaa.bbb..ccc">>.
<<"aaa.bbb..ccc">>
3> binary:split(Bin, P, [global]).
[<<"aaa">>,<<"bbb">>,<<>>,<<"ccc">>]
```
The compiled pattern could be placed into persistent term.
This commit decided to avoid migrating Mnesia tables to use binaries
instead of lists. Mnesia migrations are non-trivial, especially with the
current feature flag subsystem.
Furthermore the Mnesia topic tables are already getting migrated to
their Khepri counterparts in 3.13.
Adding additional migration only for Mnesia does not make sense.
So, instead of assembling the binding key as we walk down the trie and
then calling list_to_binary/1 in the leaf, it
would be better to just fetch the binding key from the database in the leaf.
As we reach the leaf of the trie, we know both source and destination.
Unfortunately, we cannot fetch the binding key efficiently with the
current rabbit_route (sorted by source exchange) and
rabbit_reverse_route (sorted by destination) tables as the key is in
the middle between source and destination.
If there are a huge number of bindings for a given sourc exchange (very
realistic in MQTT use cases) or a large number of bindings for a given
destination (also realistic), it would require scanning these large
number of bindings.
Therefore this commit takes the simplest possible solution:
The solution leverages the fact that binding arguments are already part of
table rabbit_topic_trie_binding.
So, if we simply include the binding key into the binding arguments, we
can fetch and return it efficiently in the topic exchange
implementation.
The following patch omitting fetching the empty list binding argument
(the default) makes routing slower because function
`analyze_pattern.constprop.0` requires significantly more (~2.5%) CPU time
```
@@ -273,7 +273,11 @@ trie_bindings(X, Node) ->
node_id = Node,
destination = '$1',
arguments = '$2'}},
- mnesia:select(?MNESIA_BINDING_TABLE, [{MatchHead, [], [{{'$1', '$2'}}]}]).
+ mnesia:select(
+ ?MNESIA_BINDING_TABLE,
+ [{MatchHead, [{'andalso', {'is_list', '$2'}, {'=/=', '$2', []}}], [{{'$1', '$2'}}]},
+ {MatchHead, [], ['$1']}
+ ]).
```
Hence, this commit always fetches the binding arguments.
All MQTT 5.0 destination queues will create a binding that
contains the binding key in the binding arguments.
Not only does this solution avoid expensive list_to_binay/1 calls, but
it also means that Erlang app rabbit (specifically the topic exchange
implementation) does not need to be aware of MQTT anymore:
It just returns the binding key when the binding args tell to do so.
In future, once the Khepri migration completed, we should be able to
relatively simply remove the binding key from the binding arguments
again to free up some storage space.
Note that one of the advantages of a trie data structue is its space
efficiency that you don't have to store the same prefixes multiple
times.
However, for RabbitMQ the binding key is already stored at least N times
in various routing tables, so storing it a few times more via the
binding arguments should be acceptable.
The speed improvements are favoured over a few more MBs ETS usage.
2023-06-09 18:52:34 +08:00
|
|
|
MqttTopic = <<"my/topic">>,
|
|
|
|
AmqpTopic = <<"my.topic">>,
|
Attempt to fix flake
Attempt to fix the following flake:
```
=== Ended at 2023-06-26 07:13:34
=== Location: [{shared_SUITE,events,570},
{test_server,ts_tc,1782},
{test_server,run_test_case_eval1,1291},
{test_server,run_test_case_eval,1223}]
=== === Reason: no match of right hand side value []
in function shared_SUITE:events/1 (shared_SUITE.erl, line 570)
in call from test_server:ts_tc/3 (test_server.erl, line 1782)
in call from test_server:run_test_case_eval1/6 (test_server.erl, line 1291)
in call from test_server:run_test_case_eval/9 (test_server.erl, line 1223)
```
of test
```
-group [web_mqtt,v3,cluster_size_1] -case events
```
The logs showed that deletion of the exclusive queue took place in the
same millisecond as the server received the DISCONNECT:
```
2023-06-26 07:13:32.838282+00:00 [debug] <0.2494.0> Received a CONNECT, client ID: events, username: undefined, clean start: true, protocol version: 3, keepalive: 60, property names: []
2023-06-26 07:13:32.838436+00:00 [debug] <0.2494.0> MQTT connection 127.0.0.1:38808 -> 127.0.0.1:21007 picked vhost using plugin_configuration_or_default_vhost
2023-06-26 07:13:32.838523+00:00 [debug] <0.2494.0> User 'guest' authenticated successfully by backend rabbit_auth_backend_internal
2023-06-26 07:13:32.838672+00:00 [info] <0.2494.0> Accepted Web MQTT connection 127.0.0.1:38808 -> 127.0.0.1:21007 for client ID events
2023-06-26 07:13:33.147196+00:00 [debug] <0.2494.0> Received a SUBSCRIBE with subscription(s) [{mqtt_subscription,<<"my/topic">>,
2023-06-26 07:13:33.147196+00:00 [debug] <0.2494.0> {mqtt_subscription_opts,0,false,
2023-06-26 07:13:33.147196+00:00 [debug] <0.2494.0> false,0,undefined}}]
2023-06-26 07:13:33.457541+00:00 [debug] <0.2494.0> Received an UNSUBSCRIBE for topic filter(s) [<<"my/topic">>]
2023-06-26 07:13:33.762171+00:00 [debug] <0.2494.0> Received a DISCONNECT with reason code 0 and properties #{}
2023-06-26 07:13:33.762350+00:00 [info] <0.2494.0> Web MQTT closing connection 127.0.0.1:38808 -> 127.0.0.1:21007
2023-06-26 07:13:33.762780+00:00 [debug] <0.2504.0> Deleting exclusive queue 'mqtt-subscription-eventsqos0' in vhost '/' because its declaring connection <0.2494.0> was closed
```
However, there could be some delay between disconnecting on the client
WebMQTT side and the processor processing the DISCONNECT packet.
2023-06-26 15:33:02 +08:00
|
|
|
{ok, _, [Qos]} = emqtt:subscribe(C, MqttTopic, Qos),
|
2022-10-28 01:07:43 +08:00
|
|
|
|
|
|
|
QueueNameBin = <<"mqtt-subscription-", ClientId/binary, "qos0">>,
|
|
|
|
QueueName = {resource, <<"/">>, queue, QueueNameBin},
|
2024-07-09 18:30:47 +08:00
|
|
|
[E2, E3] = get_events(Server),
|
2022-11-30 02:13:50 +08:00
|
|
|
assert_event_type(queue_created, E2),
|
2022-10-28 01:07:43 +08:00
|
|
|
assert_event_prop([{name, QueueName},
|
|
|
|
{durable, true},
|
|
|
|
{auto_delete, false},
|
|
|
|
{exclusive, true},
|
2024-07-09 18:30:47 +08:00
|
|
|
{type, rabbit_mqtt_qos0_queue},
|
2022-10-28 01:07:43 +08:00
|
|
|
{arguments, []}],
|
|
|
|
E2),
|
|
|
|
assert_event_type(binding_created, E3),
|
Support MQTT 5.0 features No Local, RAP, Subscription IDs
Support subscription options "No Local" and "Retain As Published"
as well as Subscription Identifiers.
All three MQTT 5.0 features can be set on a per subscription basis.
Due to wildcards in topic filters, multiple subscriptions
can match a given topic. Therefore, to implement Retain As Published and
Subscription Identifiers, the destination MQTT connection process needs
to know what subscription(s) caused it to receive the message.
There are a few ways how this could be implemented:
1. The destination MQTT connection process is aware of all its
subscriptions. Whenever, it receives a message, it can match the
message's routing key / topic against all its known topic filters.
However, to iteratively match the routing key against all topic
filters for every received message can become very expensive in the
worst case when the MQTT client creates many subscriptions containing
wildcards. This could be the case for an MQTT client that acts as a
bridge or proxy or dispatcher: It could subscribe via a wildcard for
each of its own clients.
2. Instead of interatively matching the topic of the received message
against all topic filters that contain wildcards, a better approach
would be for every MQTT subscriber connection process to maintain a
local trie datastructure (similar to how topic exchanges are
implemented) and perform matching therefore more efficiently.
However, this does not sound optimal either because routing is
effectively performed twice: in the topic exchange and again against
a much smaller trie in each destination connection process.
3. Given that the topic exchange already perform routing, a much more
sensible way would be to send the matched binding key(s) to the
destination MQTT connection process. A subscription (topic filter)
maps to a binding key in AMQP 0.9.1 routing. Therefore, for the first
time in RabbitMQ, the routing function should not only output a list
of unique destination queues, but also the binding keys (subscriptions)
that caused the message to be routed to the destination queue.
This commit therefore implements the 3rd approach.
The downside of the 3rd approach is that it requires API changes to the
routing function and topic exchange.
Specifically, this commit adds a new function rabbit_exchange:route/3
that accepts a list of routing options. If that list contains version 2,
the caller of the routing function knows how to handle the return value
that could also contain binding keys.
This commits allows an MQTT connection process, the channel process, and
at-most-once dead lettering to handle binding keys. Binding keys are
included as AMQP 0.9.1 headers into the basic message.
Therefore, whenever a message is sent from an MQTT client or AMQP 0.9.1
client or AMQP 1.0 client or STOMP client, the MQTT receiver will know
the subscription identifier that caused the message to be received.
Note that due to the low number of allowed wildcard characters (# and
+), the cardinality of matched binding keys shouldn't be high even if
the topic contains for example 3 levels and the message is sent to for
example 5 million destination queues. In other words, sending multiple
distinct basic messages to the destination shouldn't hurt the delegate
optimisation too much. The delegate optimisation implemented for classic
queues and rabbit_mqtt_qos0_queue(s) still takes place for all basic
messages that contain the same set of matched binding keys.
The topic exchange returns all matched binding keys by remembering the
edges walked down to the leaves. As an optimisation, only for MQTT
queues are binding keys being returned. This does add a small dependency
from app rabbit to app rabbitmq_mqtt which is not optimal. However, this
dependency should be simple to remove when omitting this optimisation.
Another important feature of this commit is persisting subscription
options and subscription identifiers because they are part of the
MQTT 5.0 session state.
In MQTT v3 and v4, the only subscription information that were part of
the session state was the topic filter and the QoS level.
Both information were implicitly stored in the form of bindings:
The topic filter as the binding key and the QoS level as the destination
queue name of the binding.
For MQTT v5 we need to persist more subscription information.
From a domain perspective, it makes sense to store subscription options
as part of subscriptions, i.e. bindings, even though they are currently
not used in routing.
Therefore, this commits stores subscription options as binding arguments.
Storing subscription options as binding arguments comes in turn with
new challenges: How to handle mixed version clusters and upgrading an
MQTT session from v3 or v4 to v5?
Imagine an MQTT client connects via v5 with Session Expiry Interval > 0
to a new node in a mixed version cluster, creates a subscription,
disconnects, and subsequently connects via v3 to an old node. The
client should continue to receive messages.
To simplify such edge cases, this commit introduces a new feature flag
called mqtt_v5. If mqtt_v5 is disabled, clients cannot connect to
RabbitMQ via MQTT 5.0.
This still doesn't entirely solve the problem of MQTT session upgrades
(v4 to v5 client) or session downgrades (v5 to v4 client).
Ideally, once mqtt_v5 is enabled, all MQTT bindings contain non-empty binding
arguments. However, this will require a feature flag migration function
to modify all MQTT bindings. To be more precise, all MQTT bindings need
to be deleted and added because the binding argument is part of the
Mnesia table key.
Since feature flag migration functions are non-trivial to implement in
RabbitMQ (they can run on every node multiple times and concurrently),
this commit takes a simpler approach:
All v3 / v4 sessions keep the empty binding argument [].
All v5 sessions use the new binding argument [#mqtt_subscription_opts{}].
This requires only handling a session upgrade / downgrade by
creating a binding (with the new binding arg) and deleting the old
binding (with the old binding arg) when processing the CONNECT packet.
Note that such session upgrades or downgrades should be rather rare in
practice. Therefore these binding transactions shouldn't hurt peformance.
The No Local option is implemented within the MQTT publishing connection
process: The message is not sent to the MQTT destination if the
destination queue name matches the current MQTT client ID and the
message was routed due to a subscription that has the No Local flag set.
This avoids unnecessary traffic on the MQTT queue.
The alternative would have been that the "receiving side" (same process)
filters the message out - which would have been more consistent in how
Retain As Published and Subscription Identifiers are implemented, but
would have caused unnecessary load on the MQTT queue.
2023-04-19 21:32:34 +08:00
|
|
|
ExpectedBindingArgs = case ?config(mqtt_version, Config) of
|
Return matched binding keys faster
For MQTT 5.0 destination queues, the topic exchange does not only have
to return the destination queue names, but also the matched binding
keys.
This is needed to implement MQTT 5.0 subscription options No Local,
Retain As Published and Subscription Identifiers.
Prior to this commit, as the trie was walked down, we remembered the
edges being walked and assembled the final binding key with
list_to_binary/1.
list_to_binary/1 is very expensive with long lists (long topic names),
even in OTP 26.
The CPU flame graph showed ~3% of CPU usage was spent only in
list_to_binary/1.
Unfortunately and unnecessarily, the current topic exchange
implementation stores topic levels as lists.
It would be better to store topic levels as binaries:
split_topic_key/1 should ideally use binary:split/3 similar as follows:
```
1> P = binary:compile_pattern(<<".">>).
{bm,#Ref<0.1273071188.1488322568.63736>}
2> Bin = <<"aaa.bbb..ccc">>.
<<"aaa.bbb..ccc">>
3> binary:split(Bin, P, [global]).
[<<"aaa">>,<<"bbb">>,<<>>,<<"ccc">>]
```
The compiled pattern could be placed into persistent term.
This commit decided to avoid migrating Mnesia tables to use binaries
instead of lists. Mnesia migrations are non-trivial, especially with the
current feature flag subsystem.
Furthermore the Mnesia topic tables are already getting migrated to
their Khepri counterparts in 3.13.
Adding additional migration only for Mnesia does not make sense.
So, instead of assembling the binding key as we walk down the trie and
then calling list_to_binary/1 in the leaf, it
would be better to just fetch the binding key from the database in the leaf.
As we reach the leaf of the trie, we know both source and destination.
Unfortunately, we cannot fetch the binding key efficiently with the
current rabbit_route (sorted by source exchange) and
rabbit_reverse_route (sorted by destination) tables as the key is in
the middle between source and destination.
If there are a huge number of bindings for a given sourc exchange (very
realistic in MQTT use cases) or a large number of bindings for a given
destination (also realistic), it would require scanning these large
number of bindings.
Therefore this commit takes the simplest possible solution:
The solution leverages the fact that binding arguments are already part of
table rabbit_topic_trie_binding.
So, if we simply include the binding key into the binding arguments, we
can fetch and return it efficiently in the topic exchange
implementation.
The following patch omitting fetching the empty list binding argument
(the default) makes routing slower because function
`analyze_pattern.constprop.0` requires significantly more (~2.5%) CPU time
```
@@ -273,7 +273,11 @@ trie_bindings(X, Node) ->
node_id = Node,
destination = '$1',
arguments = '$2'}},
- mnesia:select(?MNESIA_BINDING_TABLE, [{MatchHead, [], [{{'$1', '$2'}}]}]).
+ mnesia:select(
+ ?MNESIA_BINDING_TABLE,
+ [{MatchHead, [{'andalso', {'is_list', '$2'}, {'=/=', '$2', []}}], [{{'$1', '$2'}}]},
+ {MatchHead, [], ['$1']}
+ ]).
```
Hence, this commit always fetches the binding arguments.
All MQTT 5.0 destination queues will create a binding that
contains the binding key in the binding arguments.
Not only does this solution avoid expensive list_to_binay/1 calls, but
it also means that Erlang app rabbit (specifically the topic exchange
implementation) does not need to be aware of MQTT anymore:
It just returns the binding key when the binding args tell to do so.
In future, once the Khepri migration completed, we should be able to
relatively simply remove the binding key from the binding arguments
again to free up some storage space.
Note that one of the advantages of a trie data structue is its space
efficiency that you don't have to store the same prefixes multiple
times.
However, for RabbitMQ the binding key is already stored at least N times
in various routing tables, so storing it a few times more via the
binding arguments should be acceptable.
The speed improvements are favoured over a few more MBs ETS usage.
2023-06-09 18:52:34 +08:00
|
|
|
v5 -> [{mqtt_subscription_opts, Qos, false, false, 0, undefined},
|
|
|
|
{<<"x-binding-key">>, longstr, AmqpTopic}];
|
Support MQTT 5.0 features No Local, RAP, Subscription IDs
Support subscription options "No Local" and "Retain As Published"
as well as Subscription Identifiers.
All three MQTT 5.0 features can be set on a per subscription basis.
Due to wildcards in topic filters, multiple subscriptions
can match a given topic. Therefore, to implement Retain As Published and
Subscription Identifiers, the destination MQTT connection process needs
to know what subscription(s) caused it to receive the message.
There are a few ways how this could be implemented:
1. The destination MQTT connection process is aware of all its
subscriptions. Whenever, it receives a message, it can match the
message's routing key / topic against all its known topic filters.
However, to iteratively match the routing key against all topic
filters for every received message can become very expensive in the
worst case when the MQTT client creates many subscriptions containing
wildcards. This could be the case for an MQTT client that acts as a
bridge or proxy or dispatcher: It could subscribe via a wildcard for
each of its own clients.
2. Instead of interatively matching the topic of the received message
against all topic filters that contain wildcards, a better approach
would be for every MQTT subscriber connection process to maintain a
local trie datastructure (similar to how topic exchanges are
implemented) and perform matching therefore more efficiently.
However, this does not sound optimal either because routing is
effectively performed twice: in the topic exchange and again against
a much smaller trie in each destination connection process.
3. Given that the topic exchange already perform routing, a much more
sensible way would be to send the matched binding key(s) to the
destination MQTT connection process. A subscription (topic filter)
maps to a binding key in AMQP 0.9.1 routing. Therefore, for the first
time in RabbitMQ, the routing function should not only output a list
of unique destination queues, but also the binding keys (subscriptions)
that caused the message to be routed to the destination queue.
This commit therefore implements the 3rd approach.
The downside of the 3rd approach is that it requires API changes to the
routing function and topic exchange.
Specifically, this commit adds a new function rabbit_exchange:route/3
that accepts a list of routing options. If that list contains version 2,
the caller of the routing function knows how to handle the return value
that could also contain binding keys.
This commits allows an MQTT connection process, the channel process, and
at-most-once dead lettering to handle binding keys. Binding keys are
included as AMQP 0.9.1 headers into the basic message.
Therefore, whenever a message is sent from an MQTT client or AMQP 0.9.1
client or AMQP 1.0 client or STOMP client, the MQTT receiver will know
the subscription identifier that caused the message to be received.
Note that due to the low number of allowed wildcard characters (# and
+), the cardinality of matched binding keys shouldn't be high even if
the topic contains for example 3 levels and the message is sent to for
example 5 million destination queues. In other words, sending multiple
distinct basic messages to the destination shouldn't hurt the delegate
optimisation too much. The delegate optimisation implemented for classic
queues and rabbit_mqtt_qos0_queue(s) still takes place for all basic
messages that contain the same set of matched binding keys.
The topic exchange returns all matched binding keys by remembering the
edges walked down to the leaves. As an optimisation, only for MQTT
queues are binding keys being returned. This does add a small dependency
from app rabbit to app rabbitmq_mqtt which is not optimal. However, this
dependency should be simple to remove when omitting this optimisation.
Another important feature of this commit is persisting subscription
options and subscription identifiers because they are part of the
MQTT 5.0 session state.
In MQTT v3 and v4, the only subscription information that were part of
the session state was the topic filter and the QoS level.
Both information were implicitly stored in the form of bindings:
The topic filter as the binding key and the QoS level as the destination
queue name of the binding.
For MQTT v5 we need to persist more subscription information.
From a domain perspective, it makes sense to store subscription options
as part of subscriptions, i.e. bindings, even though they are currently
not used in routing.
Therefore, this commits stores subscription options as binding arguments.
Storing subscription options as binding arguments comes in turn with
new challenges: How to handle mixed version clusters and upgrading an
MQTT session from v3 or v4 to v5?
Imagine an MQTT client connects via v5 with Session Expiry Interval > 0
to a new node in a mixed version cluster, creates a subscription,
disconnects, and subsequently connects via v3 to an old node. The
client should continue to receive messages.
To simplify such edge cases, this commit introduces a new feature flag
called mqtt_v5. If mqtt_v5 is disabled, clients cannot connect to
RabbitMQ via MQTT 5.0.
This still doesn't entirely solve the problem of MQTT session upgrades
(v4 to v5 client) or session downgrades (v5 to v4 client).
Ideally, once mqtt_v5 is enabled, all MQTT bindings contain non-empty binding
arguments. However, this will require a feature flag migration function
to modify all MQTT bindings. To be more precise, all MQTT bindings need
to be deleted and added because the binding argument is part of the
Mnesia table key.
Since feature flag migration functions are non-trivial to implement in
RabbitMQ (they can run on every node multiple times and concurrently),
this commit takes a simpler approach:
All v3 / v4 sessions keep the empty binding argument [].
All v5 sessions use the new binding argument [#mqtt_subscription_opts{}].
This requires only handling a session upgrade / downgrade by
creating a binding (with the new binding arg) and deleting the old
binding (with the old binding arg) when processing the CONNECT packet.
Note that such session upgrades or downgrades should be rather rare in
practice. Therefore these binding transactions shouldn't hurt peformance.
The No Local option is implemented within the MQTT publishing connection
process: The message is not sent to the MQTT destination if the
destination queue name matches the current MQTT client ID and the
message was routed due to a subscription that has the No Local flag set.
This avoids unnecessary traffic on the MQTT queue.
The alternative would have been that the "receiving side" (same process)
filters the message out - which would have been more consistent in how
Retain As Published and Subscription Identifiers are implemented, but
would have caused unnecessary load on the MQTT queue.
2023-04-19 21:32:34 +08:00
|
|
|
_ -> []
|
|
|
|
end,
|
2022-10-28 01:07:43 +08:00
|
|
|
assert_event_prop([{source_name, <<"amq.topic">>},
|
|
|
|
{source_kind, exchange},
|
|
|
|
{destination_name, QueueNameBin},
|
|
|
|
{destination_kind, queue},
|
Return matched binding keys faster
For MQTT 5.0 destination queues, the topic exchange does not only have
to return the destination queue names, but also the matched binding
keys.
This is needed to implement MQTT 5.0 subscription options No Local,
Retain As Published and Subscription Identifiers.
Prior to this commit, as the trie was walked down, we remembered the
edges being walked and assembled the final binding key with
list_to_binary/1.
list_to_binary/1 is very expensive with long lists (long topic names),
even in OTP 26.
The CPU flame graph showed ~3% of CPU usage was spent only in
list_to_binary/1.
Unfortunately and unnecessarily, the current topic exchange
implementation stores topic levels as lists.
It would be better to store topic levels as binaries:
split_topic_key/1 should ideally use binary:split/3 similar as follows:
```
1> P = binary:compile_pattern(<<".">>).
{bm,#Ref<0.1273071188.1488322568.63736>}
2> Bin = <<"aaa.bbb..ccc">>.
<<"aaa.bbb..ccc">>
3> binary:split(Bin, P, [global]).
[<<"aaa">>,<<"bbb">>,<<>>,<<"ccc">>]
```
The compiled pattern could be placed into persistent term.
This commit decided to avoid migrating Mnesia tables to use binaries
instead of lists. Mnesia migrations are non-trivial, especially with the
current feature flag subsystem.
Furthermore the Mnesia topic tables are already getting migrated to
their Khepri counterparts in 3.13.
Adding additional migration only for Mnesia does not make sense.
So, instead of assembling the binding key as we walk down the trie and
then calling list_to_binary/1 in the leaf, it
would be better to just fetch the binding key from the database in the leaf.
As we reach the leaf of the trie, we know both source and destination.
Unfortunately, we cannot fetch the binding key efficiently with the
current rabbit_route (sorted by source exchange) and
rabbit_reverse_route (sorted by destination) tables as the key is in
the middle between source and destination.
If there are a huge number of bindings for a given sourc exchange (very
realistic in MQTT use cases) or a large number of bindings for a given
destination (also realistic), it would require scanning these large
number of bindings.
Therefore this commit takes the simplest possible solution:
The solution leverages the fact that binding arguments are already part of
table rabbit_topic_trie_binding.
So, if we simply include the binding key into the binding arguments, we
can fetch and return it efficiently in the topic exchange
implementation.
The following patch omitting fetching the empty list binding argument
(the default) makes routing slower because function
`analyze_pattern.constprop.0` requires significantly more (~2.5%) CPU time
```
@@ -273,7 +273,11 @@ trie_bindings(X, Node) ->
node_id = Node,
destination = '$1',
arguments = '$2'}},
- mnesia:select(?MNESIA_BINDING_TABLE, [{MatchHead, [], [{{'$1', '$2'}}]}]).
+ mnesia:select(
+ ?MNESIA_BINDING_TABLE,
+ [{MatchHead, [{'andalso', {'is_list', '$2'}, {'=/=', '$2', []}}], [{{'$1', '$2'}}]},
+ {MatchHead, [], ['$1']}
+ ]).
```
Hence, this commit always fetches the binding arguments.
All MQTT 5.0 destination queues will create a binding that
contains the binding key in the binding arguments.
Not only does this solution avoid expensive list_to_binay/1 calls, but
it also means that Erlang app rabbit (specifically the topic exchange
implementation) does not need to be aware of MQTT anymore:
It just returns the binding key when the binding args tell to do so.
In future, once the Khepri migration completed, we should be able to
relatively simply remove the binding key from the binding arguments
again to free up some storage space.
Note that one of the advantages of a trie data structue is its space
efficiency that you don't have to store the same prefixes multiple
times.
However, for RabbitMQ the binding key is already stored at least N times
in various routing tables, so storing it a few times more via the
binding arguments should be acceptable.
The speed improvements are favoured over a few more MBs ETS usage.
2023-06-09 18:52:34 +08:00
|
|
|
{routing_key, AmqpTopic},
|
Support MQTT 5.0 features No Local, RAP, Subscription IDs
Support subscription options "No Local" and "Retain As Published"
as well as Subscription Identifiers.
All three MQTT 5.0 features can be set on a per subscription basis.
Due to wildcards in topic filters, multiple subscriptions
can match a given topic. Therefore, to implement Retain As Published and
Subscription Identifiers, the destination MQTT connection process needs
to know what subscription(s) caused it to receive the message.
There are a few ways how this could be implemented:
1. The destination MQTT connection process is aware of all its
subscriptions. Whenever, it receives a message, it can match the
message's routing key / topic against all its known topic filters.
However, to iteratively match the routing key against all topic
filters for every received message can become very expensive in the
worst case when the MQTT client creates many subscriptions containing
wildcards. This could be the case for an MQTT client that acts as a
bridge or proxy or dispatcher: It could subscribe via a wildcard for
each of its own clients.
2. Instead of interatively matching the topic of the received message
against all topic filters that contain wildcards, a better approach
would be for every MQTT subscriber connection process to maintain a
local trie datastructure (similar to how topic exchanges are
implemented) and perform matching therefore more efficiently.
However, this does not sound optimal either because routing is
effectively performed twice: in the topic exchange and again against
a much smaller trie in each destination connection process.
3. Given that the topic exchange already perform routing, a much more
sensible way would be to send the matched binding key(s) to the
destination MQTT connection process. A subscription (topic filter)
maps to a binding key in AMQP 0.9.1 routing. Therefore, for the first
time in RabbitMQ, the routing function should not only output a list
of unique destination queues, but also the binding keys (subscriptions)
that caused the message to be routed to the destination queue.
This commit therefore implements the 3rd approach.
The downside of the 3rd approach is that it requires API changes to the
routing function and topic exchange.
Specifically, this commit adds a new function rabbit_exchange:route/3
that accepts a list of routing options. If that list contains version 2,
the caller of the routing function knows how to handle the return value
that could also contain binding keys.
This commits allows an MQTT connection process, the channel process, and
at-most-once dead lettering to handle binding keys. Binding keys are
included as AMQP 0.9.1 headers into the basic message.
Therefore, whenever a message is sent from an MQTT client or AMQP 0.9.1
client or AMQP 1.0 client or STOMP client, the MQTT receiver will know
the subscription identifier that caused the message to be received.
Note that due to the low number of allowed wildcard characters (# and
+), the cardinality of matched binding keys shouldn't be high even if
the topic contains for example 3 levels and the message is sent to for
example 5 million destination queues. In other words, sending multiple
distinct basic messages to the destination shouldn't hurt the delegate
optimisation too much. The delegate optimisation implemented for classic
queues and rabbit_mqtt_qos0_queue(s) still takes place for all basic
messages that contain the same set of matched binding keys.
The topic exchange returns all matched binding keys by remembering the
edges walked down to the leaves. As an optimisation, only for MQTT
queues are binding keys being returned. This does add a small dependency
from app rabbit to app rabbitmq_mqtt which is not optimal. However, this
dependency should be simple to remove when omitting this optimisation.
Another important feature of this commit is persisting subscription
options and subscription identifiers because they are part of the
MQTT 5.0 session state.
In MQTT v3 and v4, the only subscription information that were part of
the session state was the topic filter and the QoS level.
Both information were implicitly stored in the form of bindings:
The topic filter as the binding key and the QoS level as the destination
queue name of the binding.
For MQTT v5 we need to persist more subscription information.
From a domain perspective, it makes sense to store subscription options
as part of subscriptions, i.e. bindings, even though they are currently
not used in routing.
Therefore, this commits stores subscription options as binding arguments.
Storing subscription options as binding arguments comes in turn with
new challenges: How to handle mixed version clusters and upgrading an
MQTT session from v3 or v4 to v5?
Imagine an MQTT client connects via v5 with Session Expiry Interval > 0
to a new node in a mixed version cluster, creates a subscription,
disconnects, and subsequently connects via v3 to an old node. The
client should continue to receive messages.
To simplify such edge cases, this commit introduces a new feature flag
called mqtt_v5. If mqtt_v5 is disabled, clients cannot connect to
RabbitMQ via MQTT 5.0.
This still doesn't entirely solve the problem of MQTT session upgrades
(v4 to v5 client) or session downgrades (v5 to v4 client).
Ideally, once mqtt_v5 is enabled, all MQTT bindings contain non-empty binding
arguments. However, this will require a feature flag migration function
to modify all MQTT bindings. To be more precise, all MQTT bindings need
to be deleted and added because the binding argument is part of the
Mnesia table key.
Since feature flag migration functions are non-trivial to implement in
RabbitMQ (they can run on every node multiple times and concurrently),
this commit takes a simpler approach:
All v3 / v4 sessions keep the empty binding argument [].
All v5 sessions use the new binding argument [#mqtt_subscription_opts{}].
This requires only handling a session upgrade / downgrade by
creating a binding (with the new binding arg) and deleting the old
binding (with the old binding arg) when processing the CONNECT packet.
Note that such session upgrades or downgrades should be rather rare in
practice. Therefore these binding transactions shouldn't hurt peformance.
The No Local option is implemented within the MQTT publishing connection
process: The message is not sent to the MQTT destination if the
destination queue name matches the current MQTT client ID and the
message was routed due to a subscription that has the No Local flag set.
This avoids unnecessary traffic on the MQTT queue.
The alternative would have been that the "receiving side" (same process)
filters the message out - which would have been more consistent in how
Retain As Published and Subscription Identifiers are implemented, but
would have caused unnecessary load on the MQTT queue.
2023-04-19 21:32:34 +08:00
|
|
|
{arguments, ExpectedBindingArgs}],
|
2022-10-28 01:07:43 +08:00
|
|
|
E3),
|
|
|
|
|
Return matched binding keys faster
For MQTT 5.0 destination queues, the topic exchange does not only have
to return the destination queue names, but also the matched binding
keys.
This is needed to implement MQTT 5.0 subscription options No Local,
Retain As Published and Subscription Identifiers.
Prior to this commit, as the trie was walked down, we remembered the
edges being walked and assembled the final binding key with
list_to_binary/1.
list_to_binary/1 is very expensive with long lists (long topic names),
even in OTP 26.
The CPU flame graph showed ~3% of CPU usage was spent only in
list_to_binary/1.
Unfortunately and unnecessarily, the current topic exchange
implementation stores topic levels as lists.
It would be better to store topic levels as binaries:
split_topic_key/1 should ideally use binary:split/3 similar as follows:
```
1> P = binary:compile_pattern(<<".">>).
{bm,#Ref<0.1273071188.1488322568.63736>}
2> Bin = <<"aaa.bbb..ccc">>.
<<"aaa.bbb..ccc">>
3> binary:split(Bin, P, [global]).
[<<"aaa">>,<<"bbb">>,<<>>,<<"ccc">>]
```
The compiled pattern could be placed into persistent term.
This commit decided to avoid migrating Mnesia tables to use binaries
instead of lists. Mnesia migrations are non-trivial, especially with the
current feature flag subsystem.
Furthermore the Mnesia topic tables are already getting migrated to
their Khepri counterparts in 3.13.
Adding additional migration only for Mnesia does not make sense.
So, instead of assembling the binding key as we walk down the trie and
then calling list_to_binary/1 in the leaf, it
would be better to just fetch the binding key from the database in the leaf.
As we reach the leaf of the trie, we know both source and destination.
Unfortunately, we cannot fetch the binding key efficiently with the
current rabbit_route (sorted by source exchange) and
rabbit_reverse_route (sorted by destination) tables as the key is in
the middle between source and destination.
If there are a huge number of bindings for a given sourc exchange (very
realistic in MQTT use cases) or a large number of bindings for a given
destination (also realistic), it would require scanning these large
number of bindings.
Therefore this commit takes the simplest possible solution:
The solution leverages the fact that binding arguments are already part of
table rabbit_topic_trie_binding.
So, if we simply include the binding key into the binding arguments, we
can fetch and return it efficiently in the topic exchange
implementation.
The following patch omitting fetching the empty list binding argument
(the default) makes routing slower because function
`analyze_pattern.constprop.0` requires significantly more (~2.5%) CPU time
```
@@ -273,7 +273,11 @@ trie_bindings(X, Node) ->
node_id = Node,
destination = '$1',
arguments = '$2'}},
- mnesia:select(?MNESIA_BINDING_TABLE, [{MatchHead, [], [{{'$1', '$2'}}]}]).
+ mnesia:select(
+ ?MNESIA_BINDING_TABLE,
+ [{MatchHead, [{'andalso', {'is_list', '$2'}, {'=/=', '$2', []}}], [{{'$1', '$2'}}]},
+ {MatchHead, [], ['$1']}
+ ]).
```
Hence, this commit always fetches the binding arguments.
All MQTT 5.0 destination queues will create a binding that
contains the binding key in the binding arguments.
Not only does this solution avoid expensive list_to_binay/1 calls, but
it also means that Erlang app rabbit (specifically the topic exchange
implementation) does not need to be aware of MQTT anymore:
It just returns the binding key when the binding args tell to do so.
In future, once the Khepri migration completed, we should be able to
relatively simply remove the binding key from the binding arguments
again to free up some storage space.
Note that one of the advantages of a trie data structue is its space
efficiency that you don't have to store the same prefixes multiple
times.
However, for RabbitMQ the binding key is already stored at least N times
in various routing tables, so storing it a few times more via the
binding arguments should be acceptable.
The speed improvements are favoured over a few more MBs ETS usage.
2023-06-09 18:52:34 +08:00
|
|
|
{ok, _, _} = emqtt:unsubscribe(C, MqttTopic),
|
2022-10-28 01:07:43 +08:00
|
|
|
|
2024-07-09 18:30:47 +08:00
|
|
|
[E4] = get_events(Server),
|
|
|
|
assert_event_type(binding_deleted, E4),
|
2022-10-28 01:07:43 +08:00
|
|
|
|
|
|
|
ok = emqtt:disconnect(C),
|
|
|
|
|
2024-07-09 18:30:47 +08:00
|
|
|
[E5, E6] = get_events(Server),
|
|
|
|
assert_event_type(connection_closed, E5),
|
|
|
|
?assertEqual(E1#event.props, E5#event.props,
|
2023-05-04 16:51:30 +08:00
|
|
|
"connection_closed event props should match connection_created event props. "
|
|
|
|
"See https://github.com/rabbitmq/rabbitmq-server/discussions/6331"),
|
2024-07-09 18:30:47 +08:00
|
|
|
assert_event_type(queue_deleted, E6),
|
|
|
|
assert_event_prop({name, QueueName}, E6),
|
2022-10-28 01:07:43 +08:00
|
|
|
|
|
|
|
ok = gen_event:delete_handler({rabbit_event, Server}, event_recorder, []).
|
|
|
|
|
2022-11-14 00:23:03 +08:00
|
|
|
internal_event_handler(Config) ->
|
2022-12-20 02:03:18 +08:00
|
|
|
Server = get_node_config(Config, 0, nodename),
|
2022-11-14 00:23:03 +08:00
|
|
|
ok = gen_event:call({rabbit_event, Server}, rabbit_mqtt_internal_event_handler, ignored_request, 1000).
|
|
|
|
|
2023-02-22 02:24:15 +08:00
|
|
|
global_counters(Config) ->
|
|
|
|
C = connect(?FUNCTION_NAME, Config),
|
2022-11-10 23:52:46 +08:00
|
|
|
|
2022-11-11 20:26:55 +08:00
|
|
|
Topic0 = <<"test-topic0">>,
|
|
|
|
Topic1 = <<"test-topic1">>,
|
2022-11-21 23:25:34 +08:00
|
|
|
Topic2 = <<"test-topic2">>,
|
|
|
|
{ok, _, [0]} = emqtt:subscribe(C, Topic0, qos0),
|
2022-11-11 20:26:55 +08:00
|
|
|
{ok, _, [1]} = emqtt:subscribe(C, Topic1, qos1),
|
2022-11-21 23:25:34 +08:00
|
|
|
{ok, _, [1]} = emqtt:subscribe(C, Topic2, qos1),
|
2022-11-24 21:42:45 +08:00
|
|
|
|
2022-11-21 23:25:34 +08:00
|
|
|
ok = emqtt:publish(C, Topic0, <<"testm0">>, qos0),
|
|
|
|
ok = emqtt:publish(C, Topic1, <<"testm1">>, qos0),
|
|
|
|
{ok, _} = emqtt:publish(C, Topic2, <<"testm2">>, qos1),
|
2022-11-24 21:42:45 +08:00
|
|
|
ok = emqtt:publish(C, <<"no/queue/bound">>, <<"msg-dropped">>, qos0),
|
2023-06-06 01:23:46 +08:00
|
|
|
{ok, Pub} = emqtt:publish(C, <<"no/queue/bound">>, <<"msg-returned">>, qos1),
|
|
|
|
case ?config(mqtt_version, Config) of
|
|
|
|
v3 -> ok;
|
|
|
|
v4 -> ok;
|
|
|
|
v5 -> ?assertMatch(#{reason_code_name := no_matching_subscribers}, Pub)
|
|
|
|
end,
|
2022-11-24 21:42:45 +08:00
|
|
|
|
2023-01-03 01:02:43 +08:00
|
|
|
ok = expect_publishes(C, Topic0, [<<"testm0">>]),
|
|
|
|
ok = expect_publishes(C, Topic1, [<<"testm1">>]),
|
|
|
|
ok = expect_publishes(C, Topic2, [<<"testm2">>]),
|
2022-11-10 23:52:46 +08:00
|
|
|
|
2023-02-22 02:24:15 +08:00
|
|
|
ProtoVer = ?config(mqtt_version, Config),
|
2022-11-10 23:52:46 +08:00
|
|
|
?assertEqual(#{publishers => 1,
|
|
|
|
consumers => 1,
|
2022-11-24 21:42:45 +08:00
|
|
|
messages_confirmed_total => 2,
|
|
|
|
messages_received_confirm_total => 2,
|
|
|
|
messages_received_total => 5,
|
2022-11-21 23:25:34 +08:00
|
|
|
messages_routed_total => 3,
|
2022-11-24 21:42:45 +08:00
|
|
|
messages_unroutable_dropped_total => 1,
|
|
|
|
messages_unroutable_returned_total => 1},
|
2022-11-10 23:52:46 +08:00
|
|
|
get_global_counters(Config, ProtoVer)),
|
2024-07-09 18:30:47 +08:00
|
|
|
?assertEqual(#{messages_delivered_total => 2,
|
|
|
|
messages_acknowledged_total => 1,
|
|
|
|
messages_delivered_consume_auto_ack_total => 1,
|
|
|
|
messages_delivered_consume_manual_ack_total => 1,
|
|
|
|
messages_delivered_get_auto_ack_total => 0,
|
|
|
|
messages_delivered_get_manual_ack_total => 0,
|
|
|
|
messages_get_empty_total => 0,
|
|
|
|
messages_redelivered_total => 0},
|
|
|
|
get_global_counters(Config, ProtoVer, 0, [{queue_type, rabbit_classic_queue}])),
|
|
|
|
?assertEqual(#{messages_delivered_total => 1,
|
|
|
|
messages_acknowledged_total => 0,
|
|
|
|
messages_delivered_consume_auto_ack_total => 1,
|
|
|
|
messages_delivered_consume_manual_ack_total => 0,
|
|
|
|
messages_delivered_get_auto_ack_total => 0,
|
|
|
|
messages_delivered_get_manual_ack_total => 0,
|
|
|
|
messages_get_empty_total => 0,
|
|
|
|
messages_redelivered_total => 0},
|
|
|
|
get_global_counters(Config, ProtoVer, 0, [{queue_type, rabbit_mqtt_qos0_queue}])),
|
2022-11-21 23:25:34 +08:00
|
|
|
|
2022-11-11 20:26:55 +08:00
|
|
|
{ok, _, _} = emqtt:unsubscribe(C, Topic1),
|
|
|
|
?assertEqual(1, maps:get(consumers, get_global_counters(Config, ProtoVer))),
|
2022-11-10 23:52:46 +08:00
|
|
|
|
2022-11-11 20:26:55 +08:00
|
|
|
ok = emqtt:disconnect(C),
|
Fix flaky test
Very rarely, the assertion failed with
```
=== Ended at 2023-02-13 13:25:52
=== Location: [{shared_SUITE,global_counters,590},
{test_server,ts_tc,1782},
{test_server,run_test_case_eval1,1291},
{test_server,run_test_case_eval,1223}]
=== === Reason: {assertEqual,
[{module,shared_SUITE},
{line,590},
{expression,"get_global_counters ( Config , ProtoVer )"},
{expected,
#{consumers => 0,messages_confirmed_total => 2,
messages_received_confirm_total => 2,
messages_received_total => 5,
messages_routed_total => 3,
messages_unroutable_dropped_total => 1,
messages_unroutable_returned_total => 1,
publishers => 0}},
{value,
#{consumers => 1,messages_confirmed_total => 2,
messages_received_confirm_total => 2,
messages_received_total => 5,
messages_routed_total => 3,
messages_unroutable_dropped_total => 1,
messages_unroutable_returned_total => 1,
publishers => 1}}]}
in function shared_SUITE:global_counters/2 (shared_SUITE.erl, line 590)
in call from test_server:ts_tc/3 (test_server.erl, line 1782)
in call from test_server:run_test_case_eval1/6 (test_server.erl, line 1291)
in call from test_server:run_test_case_eval/9 (test_server.erl, line 1223)
```
The DISCONNECT packet is sent one-way from client to server.
2023-02-13 22:14:14 +08:00
|
|
|
eventually(?_assertEqual(#{publishers => 0,
|
|
|
|
consumers => 0,
|
|
|
|
messages_confirmed_total => 2,
|
|
|
|
messages_received_confirm_total => 2,
|
|
|
|
messages_received_total => 5,
|
|
|
|
messages_routed_total => 3,
|
|
|
|
messages_unroutable_dropped_total => 1,
|
|
|
|
messages_unroutable_returned_total => 1},
|
|
|
|
get_global_counters(Config, ProtoVer))).
|
2022-11-10 23:52:46 +08:00
|
|
|
|
Emit histogram metric for received message sizes per protocol (#12342)
* Add global histogram metrics for received message sizes per-protocol
fixup: add new files to bazel
fixup: expose message_size_bytes as prometheus classic histogram type
`rabbit_msg_size_metrics` does not use `seshat` any more, but
`counters` directly.
fixup: add msg_size_metrics unit test
* Improve message size histogram
1.
Avoid unnecessary time series emitted for stream protocol
The stream protocol cannot observe message sizes.
This commit ensures that the following time series are omitted:
```
rabbitmq_global_message_size_bytes_bucket{protocol="stream",le="64"} 0
rabbitmq_global_message_size_bytes_bucket{protocol="stream",le="256"} 0
rabbitmq_global_message_size_bytes_bucket{protocol="stream",le="1024"} 0
rabbitmq_global_message_size_bytes_bucket{protocol="stream",le="4096"} 0
rabbitmq_global_message_size_bytes_bucket{protocol="stream",le="16384"} 0
rabbitmq_global_message_size_bytes_bucket{protocol="stream",le="65536"} 0
rabbitmq_global_message_size_bytes_bucket{protocol="stream",le="262144"} 0
rabbitmq_global_message_size_bytes_bucket{protocol="stream",le="1048576"} 0
rabbitmq_global_message_size_bytes_bucket{protocol="stream",le="4194304"} 0
rabbitmq_global_message_size_bytes_bucket{protocol="stream",le="16777216"} 0
rabbitmq_global_message_size_bytes_bucket{protocol="stream",le="67108864"} 0
rabbitmq_global_message_size_bytes_bucket{protocol="stream",le="268435456"} 0
rabbitmq_global_message_size_bytes_bucket{protocol="stream",le="+Inf"} 0
rabbitmq_global_message_size_bytes_count{protocol="stream"} 0
rabbitmq_global_message_size_bytes_sum{protocol="stream"} 0
```
This reduces the number of time series by 15.
2.
Further reduce the number of time series by reducing the number of
buckets. Instead of 13 bucktes, emit only 9 buckets. Buckets are not
free, each is an extra time series stored.
Prior to this commit:
```
curl -s -u guest:guest localhost:15692/metrics | ag message_size | wc -l
92
```
After this commit:
```
curl -s -u guest:guest localhost:15692/metrics | ag message_size | wc -l
57
```
3.
The emitted metric should be called
`rabbitmq_message_size_bytes_bucket` instead of `rabbitmq_global_message_size_bytes_bucket`.
The latter is poor naming. There is no need to use `global` in
the metric name given that this metric doesn't exist in the old flawed
aggregated metrics.
4.
This commit simplies module `rabbit_global_counters`.
5.
Avoid garbage collecting the 10-elements list of buckets per message
being received.
---------
Co-authored-by: Péter Gömöri <peter@84codes.com>
2024-09-25 00:08:24 +08:00
|
|
|
message_size_metrics(Config) ->
|
|
|
|
Protocol = case ?config(mqtt_version, Config) of
|
|
|
|
v4 -> mqtt311;
|
|
|
|
v5 -> mqtt50
|
|
|
|
end,
|
|
|
|
BucketsBefore = rpc(Config, rabbit_msg_size_metrics, raw_buckets, [Protocol]),
|
|
|
|
|
|
|
|
Topic = ClientId = atom_to_binary(?FUNCTION_NAME),
|
|
|
|
C = connect(ClientId, Config),
|
|
|
|
{ok, _, [0]} = emqtt:subscribe(C, Topic, qos0),
|
|
|
|
Payload1B = <<255>>,
|
|
|
|
Payload500B = binary:copy(Payload1B, 500),
|
|
|
|
Payload5KB = binary:copy(Payload1B, 5_000),
|
|
|
|
Payload2MB = binary:copy(Payload1B, 2_000_000),
|
|
|
|
Payloads = [Payload2MB, Payload5KB, Payload500B, Payload1B, Payload500B],
|
|
|
|
[ok = emqtt:publish(C, Topic, P, qos0) || P <- Payloads],
|
|
|
|
ok = expect_publishes(C, Topic, Payloads),
|
|
|
|
|
|
|
|
BucketsAfter = rpc(Config, rabbit_msg_size_metrics, raw_buckets, [Protocol]),
|
|
|
|
?assertEqual(
|
|
|
|
[{100, 1},
|
|
|
|
{1000, 2},
|
|
|
|
{10_000, 1},
|
|
|
|
{10_000_000, 1}],
|
|
|
|
rabbit_msg_size_metrics:diff_raw_buckets(BucketsAfter, BucketsBefore)),
|
|
|
|
|
|
|
|
ok = emqtt:disconnect(C).
|
|
|
|
|
Message Containers (#5077)
This PR implements an approach for a "protocol (data format) agnostic core" where the format of the message isn't converted at point of reception.
Currently all non AMQP 0.9.1 originating messages are converted into a AMQP 0.9.1 flavoured basic_message record before sent to a queue. If the messages are then consumed by the originating protocol they are converted back from AMQP 0.9.1. For some protocols such as MQTT 3.1 this isn't too expensive as MQTT is mostly a fairly easily mapped subset of AMQP 0.9.1 but for others such as AMQP 1.0 the conversions are awkward and in some cases lossy even if consuming from the originating protocol.
This PR instead wraps all incoming messages in their originating form into a generic, extensible message container type (mc). The container module exposes an API to get common message details such as size and various properties (ttl, priority etc) directly from the source data type. Each protocol needs to implement the mc behaviour such that when a message originating form one protocol is consumed by another protocol we convert it to the target protocol at that point.
The message container also contains annotations, dead letter records and other meta data we need to record during the lifetime of a message. The original protocol message is never modified unless it is consumed.
This includes conversion modules to and from amqp, amqpl (AMQP 0.9.1) and mqtt.
COMMIT HISTORY:
* Refactor away from using the delivery{} record
In many places including exchange types. This should make it
easier to move towards using a message container type instead of
basic_message.
Add mc module and move direct replies outside of exchange
Lots of changes incl classic queues
Implement stream support incl amqp conversions
simplify mc state record
move mc.erl
mc dlx stuff
recent history exchange
Make tracking work
But doesn't take a protocol agnostic approach as we just convert
everything into AMQP legacy and back. Might be good enough for now.
Tracing as a whole may want a bit of a re-vamp at some point.
tidy
make quorum queue peek work by legacy conversion
dead lettering fixes
dead lettering fixes
CMQ fixes
rabbit_trace type fixes
fixes
fix
Fix classic queue props
test assertion fix
feature flag and backwards compat
Enable message_container feature flag in some SUITEs
Dialyzer fixes
fixes
fix
test fixes
Various
Manually update a gazelle generated file
until a gazelle enhancement can be made
https://github.com/rabbitmq/rules_erlang/issues/185
Add message_containers_SUITE to bazel
and regen bazel files with gazelle from rules_erlang@main
Simplify essential proprty access
Such as durable, ttl and priority by extracting them into annotations
at message container init time.
Move type
to remove dependenc on amqp10 stuff in mc.erl
mostly because I don't know how to make bazel do the right thing
add more stuff
Refine routing header stuff
wip
Cosmetics
Do not use "maybe" as type name as "maybe" is a keyword since OTP 25
which makes Erlang LS complain.
* Dedup death queue names
* Fix function clause crashes
Fix failing tests in the MQTT shared_SUITE:
A classic queue message ID can be undefined as set in
https://github.com/rabbitmq/rabbitmq-server/blob/fbe79ff47b4edbc0fd95457e623d6593161ad198/deps/rabbit/src/rabbit_classic_queue_index_v2.erl#L1048
Fix failing tests in the MQTT shared_SUITE-mixed:
When feature flag message_containers is disabled, the
message is not an #mc{} record, but a #basic_message{} record.
* Fix is_utf8_no_null crash
Prior to this commit, the function crashed if invalid UTF-8 was
provided, e.g.:
```
1> rabbit_misc:is_valid_shortstr(<<"😇"/utf16>>).
** exception error: no function clause matching rabbit_misc:is_utf8_no_null(<<216,61,222,7>>) (rabbit_misc.erl, line 1481)
```
* Implement mqtt mc behaviour
For now via amqp translation.
This is still work in progress, but the following SUITEs pass:
```
make -C deps/rabbitmq_mqtt ct-shared t=[mqtt,v5,cluster_size_1] FULL=1
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_1] FULL=1
```
* Shorten mc file names
Module name length matters because for each persistent message the #mc{}
record is persisted to disk.
```
1> iolist_size(term_to_iovec({mc, rabbit_mc_amqp_legacy})).
30
2> iolist_size(term_to_iovec({mc, mc_amqpl})).
17
```
This commit renames the mc modules:
```
ag -l rabbit_mc_amqp_legacy | xargs sed -i 's/rabbit_mc_amqp_legacy/mc_amqpl/g'
ag -l rabbit_mc_amqp | xargs sed -i 's/rabbit_mc_amqp/mc_amqp/g'
ag -l rabbit_mqtt_mc | xargs sed -i 's/rabbit_mqtt_mc/mc_mqtt/g'
```
* mc: make deaths an annotation + fixes
* Fix mc_mqtt protocol_state callback
* Fix test will_delay_node_restart
```
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_3]:will_delay_node_restart FULL=1
```
* Bazel run gazelle
* mix format rabbitmqctl.ex
* Ensure ttl annotation is refelected in amqp legacy protocol state
* Fix id access in message store
* Fix rabbit_message_interceptor_SUITE
* dializer fixes
* Fix rabbit:rabbit_message_interceptor_SUITE-mixed
set_annotation/3 should not result in duplicate keys
* Fix MQTT shared_SUITE-mixed
Up to 3.12 non-MQTT publishes were always QoS 1 regardless of delivery_mode.
https://github.com/rabbitmq/rabbitmq-server/blob/75a953ce286a10aca910c098805a4f545989af38/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl#L2075-L2076
From now on, non-MQTT publishes are QoS 1 if durable.
This makes more sense.
The MQTT plugin must send a #basic_message{} to an old node that does
not understand message containers.
* Field content of 'v1_0.data' can be binary
Fix
```
bazel test //deps/rabbitmq_mqtt:shared_SUITE-mixed \
--test_env FOCUS="-group [mqtt,v4,cluster_size_1] -case trace" \
-t- --test_sharding_strategy=disabled
```
* Remove route/2 and implement route/3 for all exchange types.
This removes the route/2 callback from rabbit_exchange_type and
makes route/3 mandatory instead. This is a breaking change and
will require all implementations of exchange types to update their
code, however this is necessary anyway for them to correctly handle
the mc type.
stream filtering fixes
* Translate directly from MQTT to AMQP 0.9.1
* handle undecoded properties in mc_compat
amqpl: put clause in right order
recover death deatails from amqp data
* Replace callback init_amqp with convert_from
* Fix return value of lists:keyfind/3
* Translate directly from AMQP 0.9.1 to MQTT
* Fix MQTT payload size
MQTT payload can be a list when converted from AMQP 0.9.1 for example
First conversions tests
Plus some other conversion related fixes.
bazel
bazel
translate amqp 1.0 null to undefined
mc: property/2 and correlation_id/message_id return type tagged values.
To ensure we can support a variety of types better.
The type type tags are AMQP 1.0 flavoured.
fix death recovery
mc_mqtt: impl new api
Add callbacks to allow protocols to compact data before storage
And make readable if needing to query things repeatedly.
bazel fix
* more decoding
* tracking mixed versions compat
* mc: flip default of `durable` annotation to save some data.
Assuming most messages are durable and that in memory messages suffer less
from persistence overhead it makes sense for a non existent `durable`
annotation to mean durable=true.
* mc conversion tests and tidy up
* mc make x_header unstrict again
* amqpl: death record fixes
* bazel
* amqp -> amqpl conversion test
* Fix crash in mc_amqp:size/1
Body can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0/ ct-system t=java
```
on branch native-amqp.
* Fix crash in lists:flatten/1
Data can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp.
* Fix crash in rabbit_writer
Running test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp resulted in the following crash:
```
crasher:
initial call: rabbit_writer:enter_mainloop/2
pid: <0.711.0>
registered_name: []
exception error: bad argument
in function size/1
called as size([<<0>>,<<"Sw">>,[<<160,2>>,<<"hi">>]])
*** argument 1: not tuple or binary
in call from rabbit_binary_generator:build_content_frames/7 (rabbit_binary_generator.erl, line 89)
in call from rabbit_binary_generator:build_simple_content_frames/4 (rabbit_binary_generator.erl, line 61)
in call from rabbit_writer:assemble_frames/5 (rabbit_writer.erl, line 334)
in call from rabbit_writer:internal_send_command_async/3 (rabbit_writer.erl, line 365)
in call from rabbit_writer:handle_message/2 (rabbit_writer.erl, line 265)
in call from rabbit_writer:handle_message/3 (rabbit_writer.erl, line 232)
in call from rabbit_writer:mainloop1/2 (rabbit_writer.erl, line 223)
```
because #content.payload_fragments_rev is currently supposed to
be a flat list of binaries instead of being an iolist.
This commit fixes this crash inefficiently by calling
iolist_to_binary/1. A better solution would be to allow AMQP legacy's #content.payload_fragments_rev
to be an iolist.
* Add accidentally deleted line back
* mc: optimise mc_amqp internal format
By removint the outer records for message and delivery annotations
as well as application properties and footers.
* mc: optimis mc_amqp map_add by using upsert
* mc: refactoring and bug fixes
* mc_SUITE routingheader assertions
* mc remove serialize/1 callback as only used by amqp
* mc_amqp: avoid returning a nested list from protocol_state
* test and bug fix
* move infer_type to mc_util
* mc fixes and additiona assertions
* Support headers exchange routing for MQTT messages
When a headers exchange is bound to the MQTT topic exchange, routing
will be performend based on both MQTT topic (by the topic exchange) and
MQTT User Property (by the headers exchange).
This combines the best worlds of both MQTT 5.0 and AMQP 0.9.1 and
enables powerful routing topologies.
When the User Property contains the same name multiple times, only the
last name (and value) will be considered by the headers exchange.
* Fix crash when sending from stream to amqpl
When publishing a message via the stream protocol and consuming it via
AMQP 0.9.1, the following crash occurred prior to this commit:
```
crasher:
initial call: rabbit_channel:init/1
pid: <0.818.0>
registered_name: []
exception exit: {{badmatch,undefined},
[{rabbit_channel,handle_deliver0,4,
[{file,"rabbit_channel.erl"},
{line,2728}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1594}]},
{rabbit_channel,handle_cast,2,
[{file,"rabbit_channel.erl"},
{line,728}]},
{gen_server2,handle_msg,2,
[{file,"gen_server2.erl"},{line,1056}]},
{proc_lib,wake_up,3,
[{file,"proc_lib.erl"},{line,251}]}]}
```
This commit first gives `mc:init/3` the chance to set exchange and
routing_keys annotations.
If not set, `rabbit_stream_queue` will set these annotations assuming
the message was originally published via the stream protocol.
* Support consistent hash exchange routing for MQTT 5.0
When a consistent hash exchange is bound to the MQTT topic exchange,
MQTT 5.0 messages can be routed to queues consistently based on the
Correlation-Data in the PUBLISH packet.
* Convert MQTT 5.0 User Property
* to AMQP 0.9.1 headers
* from AMQP 0.9.1 headers
* to AMQP 1.0 application properties and message annotations
* from AMQP 1.0 application properties and message annotations
* Make use of Annotations in mc_mqtt:protocol_state/2
mc_mqtt:protocol_state/2 includes Annotations as parameter.
It's cleaner to make use of these Annotations when computing the
protocol state instead of relying on the caller (rabbitmq_mqtt_processor)
to compute the protocol state.
* Enforce AMQP 0.9.1 field name length limit
The AMQP 0.9.1 spec prohibits field names longer than 128 characters.
Therefore, when converting AMQP 1.0 message annotations, application
properties or MQTT 5.0 User Property to AMQP 0.9.1 headers, drop any
names longer than 128 characters.
* Fix type specs
Apply feedback from Michael Davis
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* Add mc_mqtt unit test suite
Implement mc_mqtt:x_header/2
* Translate indicator that payload is UTF-8 encoded
when converting between MQTT 5.0 and AMQP 1.0
* Translate single amqp-value section from AMQP 1.0 to MQTT
Convert to a text representation, if possible, and indicate to MQTT
client that the payload is UTF-8 encoded. This way, the MQTT client will
be able to parse the payload.
If conversion to text representation is not possible, encode the payload
using the AMQP 1.0 type system and indiate the encoding via Content-Type
message/vnd.rabbitmq.amqp.
This Content-Type is not registered.
Type "message" makes sense since it's a message.
Vendor tree "vnd.rabbitmq.amqp" makes sense since merely subtype "amqp" is not
registered.
* Fix payload conversion
* Translate Response Topic between MQTT and AMQP
Translate MQTT 5.0 Response Topic to AMQP 1.0 reply-to address and vice
versa.
The Response Topic must be a UTF-8 encoded string.
This commit re-uses the already defined RabbitMQ target addresses:
```
"/topic/" RK Publish to amq.topic with routing key RK
"/exchange/" X "/" RK Publish to exchange X with routing key RK
```
By default, the MQTT topic exchange is configure dto be amq.topic using
the 1st target address.
When an operator modifies the mqtt.exchange, the 2nd target address is
used.
* Apply PR feedback
and fix formatting
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* tidy up
* Add MQTT message_containers test
* consistent hash exchange: avoid amqp legacy conversion
When hashing on a header value.
* Avoid converting to amqp legacy when using exchange federation
* Fix test flake
* test and dialyzer fixes
* dialyzer fix
* Add MQTT protocol interoperability tests
Test receiving from and sending to MQTT 5.0 and
* AMQP 0.9.1
* AMQP 1.0
* STOMP
* Streams
* Regenerate portions of deps/rabbit/app.bzl with gazelle
I'm not exactly sure how this happened, but gazell seems to have been
run with an older version of the rules_erlang gazelle extension at
some point. This caused generation of a structure that is no longer
used. This commit updates the structure to the current pattern.
* mc: refactoring
* mc_amqpl: handle delivery annotations
Just in case they are included.
Also use iolist_to_iovec to create flat list of binaries when
converting from amqp with amqp encoded payload.
---------
Co-authored-by: David Ansari <david.ansari@gmx.de>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
Co-authored-by: Rin Kuryloski <kuryloskip@vmware.com>
2023-08-31 18:27:13 +08:00
|
|
|
pubsub(Config) ->
|
|
|
|
Topic0 = <<"t/0">>,
|
|
|
|
Topic1 = <<"t/1">>,
|
|
|
|
C1 = connect(<<"c1">>, Config, 1, []),
|
|
|
|
{ok, _, [1]} = emqtt:subscribe(C1, Topic1, qos1),
|
2023-11-03 16:52:53 +08:00
|
|
|
C0 = connect(<<"c0">>, Config, 0, []),
|
|
|
|
{ok, _, [1]} = emqtt:subscribe(C0, Topic0, qos1),
|
Message Containers (#5077)
This PR implements an approach for a "protocol (data format) agnostic core" where the format of the message isn't converted at point of reception.
Currently all non AMQP 0.9.1 originating messages are converted into a AMQP 0.9.1 flavoured basic_message record before sent to a queue. If the messages are then consumed by the originating protocol they are converted back from AMQP 0.9.1. For some protocols such as MQTT 3.1 this isn't too expensive as MQTT is mostly a fairly easily mapped subset of AMQP 0.9.1 but for others such as AMQP 1.0 the conversions are awkward and in some cases lossy even if consuming from the originating protocol.
This PR instead wraps all incoming messages in their originating form into a generic, extensible message container type (mc). The container module exposes an API to get common message details such as size and various properties (ttl, priority etc) directly from the source data type. Each protocol needs to implement the mc behaviour such that when a message originating form one protocol is consumed by another protocol we convert it to the target protocol at that point.
The message container also contains annotations, dead letter records and other meta data we need to record during the lifetime of a message. The original protocol message is never modified unless it is consumed.
This includes conversion modules to and from amqp, amqpl (AMQP 0.9.1) and mqtt.
COMMIT HISTORY:
* Refactor away from using the delivery{} record
In many places including exchange types. This should make it
easier to move towards using a message container type instead of
basic_message.
Add mc module and move direct replies outside of exchange
Lots of changes incl classic queues
Implement stream support incl amqp conversions
simplify mc state record
move mc.erl
mc dlx stuff
recent history exchange
Make tracking work
But doesn't take a protocol agnostic approach as we just convert
everything into AMQP legacy and back. Might be good enough for now.
Tracing as a whole may want a bit of a re-vamp at some point.
tidy
make quorum queue peek work by legacy conversion
dead lettering fixes
dead lettering fixes
CMQ fixes
rabbit_trace type fixes
fixes
fix
Fix classic queue props
test assertion fix
feature flag and backwards compat
Enable message_container feature flag in some SUITEs
Dialyzer fixes
fixes
fix
test fixes
Various
Manually update a gazelle generated file
until a gazelle enhancement can be made
https://github.com/rabbitmq/rules_erlang/issues/185
Add message_containers_SUITE to bazel
and regen bazel files with gazelle from rules_erlang@main
Simplify essential proprty access
Such as durable, ttl and priority by extracting them into annotations
at message container init time.
Move type
to remove dependenc on amqp10 stuff in mc.erl
mostly because I don't know how to make bazel do the right thing
add more stuff
Refine routing header stuff
wip
Cosmetics
Do not use "maybe" as type name as "maybe" is a keyword since OTP 25
which makes Erlang LS complain.
* Dedup death queue names
* Fix function clause crashes
Fix failing tests in the MQTT shared_SUITE:
A classic queue message ID can be undefined as set in
https://github.com/rabbitmq/rabbitmq-server/blob/fbe79ff47b4edbc0fd95457e623d6593161ad198/deps/rabbit/src/rabbit_classic_queue_index_v2.erl#L1048
Fix failing tests in the MQTT shared_SUITE-mixed:
When feature flag message_containers is disabled, the
message is not an #mc{} record, but a #basic_message{} record.
* Fix is_utf8_no_null crash
Prior to this commit, the function crashed if invalid UTF-8 was
provided, e.g.:
```
1> rabbit_misc:is_valid_shortstr(<<"😇"/utf16>>).
** exception error: no function clause matching rabbit_misc:is_utf8_no_null(<<216,61,222,7>>) (rabbit_misc.erl, line 1481)
```
* Implement mqtt mc behaviour
For now via amqp translation.
This is still work in progress, but the following SUITEs pass:
```
make -C deps/rabbitmq_mqtt ct-shared t=[mqtt,v5,cluster_size_1] FULL=1
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_1] FULL=1
```
* Shorten mc file names
Module name length matters because for each persistent message the #mc{}
record is persisted to disk.
```
1> iolist_size(term_to_iovec({mc, rabbit_mc_amqp_legacy})).
30
2> iolist_size(term_to_iovec({mc, mc_amqpl})).
17
```
This commit renames the mc modules:
```
ag -l rabbit_mc_amqp_legacy | xargs sed -i 's/rabbit_mc_amqp_legacy/mc_amqpl/g'
ag -l rabbit_mc_amqp | xargs sed -i 's/rabbit_mc_amqp/mc_amqp/g'
ag -l rabbit_mqtt_mc | xargs sed -i 's/rabbit_mqtt_mc/mc_mqtt/g'
```
* mc: make deaths an annotation + fixes
* Fix mc_mqtt protocol_state callback
* Fix test will_delay_node_restart
```
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_3]:will_delay_node_restart FULL=1
```
* Bazel run gazelle
* mix format rabbitmqctl.ex
* Ensure ttl annotation is refelected in amqp legacy protocol state
* Fix id access in message store
* Fix rabbit_message_interceptor_SUITE
* dializer fixes
* Fix rabbit:rabbit_message_interceptor_SUITE-mixed
set_annotation/3 should not result in duplicate keys
* Fix MQTT shared_SUITE-mixed
Up to 3.12 non-MQTT publishes were always QoS 1 regardless of delivery_mode.
https://github.com/rabbitmq/rabbitmq-server/blob/75a953ce286a10aca910c098805a4f545989af38/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl#L2075-L2076
From now on, non-MQTT publishes are QoS 1 if durable.
This makes more sense.
The MQTT plugin must send a #basic_message{} to an old node that does
not understand message containers.
* Field content of 'v1_0.data' can be binary
Fix
```
bazel test //deps/rabbitmq_mqtt:shared_SUITE-mixed \
--test_env FOCUS="-group [mqtt,v4,cluster_size_1] -case trace" \
-t- --test_sharding_strategy=disabled
```
* Remove route/2 and implement route/3 for all exchange types.
This removes the route/2 callback from rabbit_exchange_type and
makes route/3 mandatory instead. This is a breaking change and
will require all implementations of exchange types to update their
code, however this is necessary anyway for them to correctly handle
the mc type.
stream filtering fixes
* Translate directly from MQTT to AMQP 0.9.1
* handle undecoded properties in mc_compat
amqpl: put clause in right order
recover death deatails from amqp data
* Replace callback init_amqp with convert_from
* Fix return value of lists:keyfind/3
* Translate directly from AMQP 0.9.1 to MQTT
* Fix MQTT payload size
MQTT payload can be a list when converted from AMQP 0.9.1 for example
First conversions tests
Plus some other conversion related fixes.
bazel
bazel
translate amqp 1.0 null to undefined
mc: property/2 and correlation_id/message_id return type tagged values.
To ensure we can support a variety of types better.
The type type tags are AMQP 1.0 flavoured.
fix death recovery
mc_mqtt: impl new api
Add callbacks to allow protocols to compact data before storage
And make readable if needing to query things repeatedly.
bazel fix
* more decoding
* tracking mixed versions compat
* mc: flip default of `durable` annotation to save some data.
Assuming most messages are durable and that in memory messages suffer less
from persistence overhead it makes sense for a non existent `durable`
annotation to mean durable=true.
* mc conversion tests and tidy up
* mc make x_header unstrict again
* amqpl: death record fixes
* bazel
* amqp -> amqpl conversion test
* Fix crash in mc_amqp:size/1
Body can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0/ ct-system t=java
```
on branch native-amqp.
* Fix crash in lists:flatten/1
Data can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp.
* Fix crash in rabbit_writer
Running test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp resulted in the following crash:
```
crasher:
initial call: rabbit_writer:enter_mainloop/2
pid: <0.711.0>
registered_name: []
exception error: bad argument
in function size/1
called as size([<<0>>,<<"Sw">>,[<<160,2>>,<<"hi">>]])
*** argument 1: not tuple or binary
in call from rabbit_binary_generator:build_content_frames/7 (rabbit_binary_generator.erl, line 89)
in call from rabbit_binary_generator:build_simple_content_frames/4 (rabbit_binary_generator.erl, line 61)
in call from rabbit_writer:assemble_frames/5 (rabbit_writer.erl, line 334)
in call from rabbit_writer:internal_send_command_async/3 (rabbit_writer.erl, line 365)
in call from rabbit_writer:handle_message/2 (rabbit_writer.erl, line 265)
in call from rabbit_writer:handle_message/3 (rabbit_writer.erl, line 232)
in call from rabbit_writer:mainloop1/2 (rabbit_writer.erl, line 223)
```
because #content.payload_fragments_rev is currently supposed to
be a flat list of binaries instead of being an iolist.
This commit fixes this crash inefficiently by calling
iolist_to_binary/1. A better solution would be to allow AMQP legacy's #content.payload_fragments_rev
to be an iolist.
* Add accidentally deleted line back
* mc: optimise mc_amqp internal format
By removint the outer records for message and delivery annotations
as well as application properties and footers.
* mc: optimis mc_amqp map_add by using upsert
* mc: refactoring and bug fixes
* mc_SUITE routingheader assertions
* mc remove serialize/1 callback as only used by amqp
* mc_amqp: avoid returning a nested list from protocol_state
* test and bug fix
* move infer_type to mc_util
* mc fixes and additiona assertions
* Support headers exchange routing for MQTT messages
When a headers exchange is bound to the MQTT topic exchange, routing
will be performend based on both MQTT topic (by the topic exchange) and
MQTT User Property (by the headers exchange).
This combines the best worlds of both MQTT 5.0 and AMQP 0.9.1 and
enables powerful routing topologies.
When the User Property contains the same name multiple times, only the
last name (and value) will be considered by the headers exchange.
* Fix crash when sending from stream to amqpl
When publishing a message via the stream protocol and consuming it via
AMQP 0.9.1, the following crash occurred prior to this commit:
```
crasher:
initial call: rabbit_channel:init/1
pid: <0.818.0>
registered_name: []
exception exit: {{badmatch,undefined},
[{rabbit_channel,handle_deliver0,4,
[{file,"rabbit_channel.erl"},
{line,2728}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1594}]},
{rabbit_channel,handle_cast,2,
[{file,"rabbit_channel.erl"},
{line,728}]},
{gen_server2,handle_msg,2,
[{file,"gen_server2.erl"},{line,1056}]},
{proc_lib,wake_up,3,
[{file,"proc_lib.erl"},{line,251}]}]}
```
This commit first gives `mc:init/3` the chance to set exchange and
routing_keys annotations.
If not set, `rabbit_stream_queue` will set these annotations assuming
the message was originally published via the stream protocol.
* Support consistent hash exchange routing for MQTT 5.0
When a consistent hash exchange is bound to the MQTT topic exchange,
MQTT 5.0 messages can be routed to queues consistently based on the
Correlation-Data in the PUBLISH packet.
* Convert MQTT 5.0 User Property
* to AMQP 0.9.1 headers
* from AMQP 0.9.1 headers
* to AMQP 1.0 application properties and message annotations
* from AMQP 1.0 application properties and message annotations
* Make use of Annotations in mc_mqtt:protocol_state/2
mc_mqtt:protocol_state/2 includes Annotations as parameter.
It's cleaner to make use of these Annotations when computing the
protocol state instead of relying on the caller (rabbitmq_mqtt_processor)
to compute the protocol state.
* Enforce AMQP 0.9.1 field name length limit
The AMQP 0.9.1 spec prohibits field names longer than 128 characters.
Therefore, when converting AMQP 1.0 message annotations, application
properties or MQTT 5.0 User Property to AMQP 0.9.1 headers, drop any
names longer than 128 characters.
* Fix type specs
Apply feedback from Michael Davis
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* Add mc_mqtt unit test suite
Implement mc_mqtt:x_header/2
* Translate indicator that payload is UTF-8 encoded
when converting between MQTT 5.0 and AMQP 1.0
* Translate single amqp-value section from AMQP 1.0 to MQTT
Convert to a text representation, if possible, and indicate to MQTT
client that the payload is UTF-8 encoded. This way, the MQTT client will
be able to parse the payload.
If conversion to text representation is not possible, encode the payload
using the AMQP 1.0 type system and indiate the encoding via Content-Type
message/vnd.rabbitmq.amqp.
This Content-Type is not registered.
Type "message" makes sense since it's a message.
Vendor tree "vnd.rabbitmq.amqp" makes sense since merely subtype "amqp" is not
registered.
* Fix payload conversion
* Translate Response Topic between MQTT and AMQP
Translate MQTT 5.0 Response Topic to AMQP 1.0 reply-to address and vice
versa.
The Response Topic must be a UTF-8 encoded string.
This commit re-uses the already defined RabbitMQ target addresses:
```
"/topic/" RK Publish to amq.topic with routing key RK
"/exchange/" X "/" RK Publish to exchange X with routing key RK
```
By default, the MQTT topic exchange is configure dto be amq.topic using
the 1st target address.
When an operator modifies the mqtt.exchange, the 2nd target address is
used.
* Apply PR feedback
and fix formatting
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* tidy up
* Add MQTT message_containers test
* consistent hash exchange: avoid amqp legacy conversion
When hashing on a header value.
* Avoid converting to amqp legacy when using exchange federation
* Fix test flake
* test and dialyzer fixes
* dialyzer fix
* Add MQTT protocol interoperability tests
Test receiving from and sending to MQTT 5.0 and
* AMQP 0.9.1
* AMQP 1.0
* STOMP
* Streams
* Regenerate portions of deps/rabbit/app.bzl with gazelle
I'm not exactly sure how this happened, but gazell seems to have been
run with an older version of the rules_erlang gazelle extension at
some point. This caused generation of a structure that is no longer
used. This commit updates the structure to the current pattern.
* mc: refactoring
* mc_amqpl: handle delivery annotations
Just in case they are included.
Also use iolist_to_iovec to create flat list of binaries when
converting from amqp with amqp encoded payload.
---------
Co-authored-by: David Ansari <david.ansari@gmx.de>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
Co-authored-by: Rin Kuryloski <kuryloskip@vmware.com>
2023-08-31 18:27:13 +08:00
|
|
|
|
|
|
|
{ok, _} = emqtt:publish(C0, Topic1, <<"m1">>, qos1),
|
|
|
|
receive {publish, #{client_pid := C1,
|
|
|
|
qos := 1,
|
|
|
|
payload := <<"m1">>}} -> ok
|
2024-12-10 23:19:34 +08:00
|
|
|
after ?TIMEOUT -> ct:fail("missing m1")
|
Message Containers (#5077)
This PR implements an approach for a "protocol (data format) agnostic core" where the format of the message isn't converted at point of reception.
Currently all non AMQP 0.9.1 originating messages are converted into a AMQP 0.9.1 flavoured basic_message record before sent to a queue. If the messages are then consumed by the originating protocol they are converted back from AMQP 0.9.1. For some protocols such as MQTT 3.1 this isn't too expensive as MQTT is mostly a fairly easily mapped subset of AMQP 0.9.1 but for others such as AMQP 1.0 the conversions are awkward and in some cases lossy even if consuming from the originating protocol.
This PR instead wraps all incoming messages in their originating form into a generic, extensible message container type (mc). The container module exposes an API to get common message details such as size and various properties (ttl, priority etc) directly from the source data type. Each protocol needs to implement the mc behaviour such that when a message originating form one protocol is consumed by another protocol we convert it to the target protocol at that point.
The message container also contains annotations, dead letter records and other meta data we need to record during the lifetime of a message. The original protocol message is never modified unless it is consumed.
This includes conversion modules to and from amqp, amqpl (AMQP 0.9.1) and mqtt.
COMMIT HISTORY:
* Refactor away from using the delivery{} record
In many places including exchange types. This should make it
easier to move towards using a message container type instead of
basic_message.
Add mc module and move direct replies outside of exchange
Lots of changes incl classic queues
Implement stream support incl amqp conversions
simplify mc state record
move mc.erl
mc dlx stuff
recent history exchange
Make tracking work
But doesn't take a protocol agnostic approach as we just convert
everything into AMQP legacy and back. Might be good enough for now.
Tracing as a whole may want a bit of a re-vamp at some point.
tidy
make quorum queue peek work by legacy conversion
dead lettering fixes
dead lettering fixes
CMQ fixes
rabbit_trace type fixes
fixes
fix
Fix classic queue props
test assertion fix
feature flag and backwards compat
Enable message_container feature flag in some SUITEs
Dialyzer fixes
fixes
fix
test fixes
Various
Manually update a gazelle generated file
until a gazelle enhancement can be made
https://github.com/rabbitmq/rules_erlang/issues/185
Add message_containers_SUITE to bazel
and regen bazel files with gazelle from rules_erlang@main
Simplify essential proprty access
Such as durable, ttl and priority by extracting them into annotations
at message container init time.
Move type
to remove dependenc on amqp10 stuff in mc.erl
mostly because I don't know how to make bazel do the right thing
add more stuff
Refine routing header stuff
wip
Cosmetics
Do not use "maybe" as type name as "maybe" is a keyword since OTP 25
which makes Erlang LS complain.
* Dedup death queue names
* Fix function clause crashes
Fix failing tests in the MQTT shared_SUITE:
A classic queue message ID can be undefined as set in
https://github.com/rabbitmq/rabbitmq-server/blob/fbe79ff47b4edbc0fd95457e623d6593161ad198/deps/rabbit/src/rabbit_classic_queue_index_v2.erl#L1048
Fix failing tests in the MQTT shared_SUITE-mixed:
When feature flag message_containers is disabled, the
message is not an #mc{} record, but a #basic_message{} record.
* Fix is_utf8_no_null crash
Prior to this commit, the function crashed if invalid UTF-8 was
provided, e.g.:
```
1> rabbit_misc:is_valid_shortstr(<<"😇"/utf16>>).
** exception error: no function clause matching rabbit_misc:is_utf8_no_null(<<216,61,222,7>>) (rabbit_misc.erl, line 1481)
```
* Implement mqtt mc behaviour
For now via amqp translation.
This is still work in progress, but the following SUITEs pass:
```
make -C deps/rabbitmq_mqtt ct-shared t=[mqtt,v5,cluster_size_1] FULL=1
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_1] FULL=1
```
* Shorten mc file names
Module name length matters because for each persistent message the #mc{}
record is persisted to disk.
```
1> iolist_size(term_to_iovec({mc, rabbit_mc_amqp_legacy})).
30
2> iolist_size(term_to_iovec({mc, mc_amqpl})).
17
```
This commit renames the mc modules:
```
ag -l rabbit_mc_amqp_legacy | xargs sed -i 's/rabbit_mc_amqp_legacy/mc_amqpl/g'
ag -l rabbit_mc_amqp | xargs sed -i 's/rabbit_mc_amqp/mc_amqp/g'
ag -l rabbit_mqtt_mc | xargs sed -i 's/rabbit_mqtt_mc/mc_mqtt/g'
```
* mc: make deaths an annotation + fixes
* Fix mc_mqtt protocol_state callback
* Fix test will_delay_node_restart
```
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_3]:will_delay_node_restart FULL=1
```
* Bazel run gazelle
* mix format rabbitmqctl.ex
* Ensure ttl annotation is refelected in amqp legacy protocol state
* Fix id access in message store
* Fix rabbit_message_interceptor_SUITE
* dializer fixes
* Fix rabbit:rabbit_message_interceptor_SUITE-mixed
set_annotation/3 should not result in duplicate keys
* Fix MQTT shared_SUITE-mixed
Up to 3.12 non-MQTT publishes were always QoS 1 regardless of delivery_mode.
https://github.com/rabbitmq/rabbitmq-server/blob/75a953ce286a10aca910c098805a4f545989af38/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl#L2075-L2076
From now on, non-MQTT publishes are QoS 1 if durable.
This makes more sense.
The MQTT plugin must send a #basic_message{} to an old node that does
not understand message containers.
* Field content of 'v1_0.data' can be binary
Fix
```
bazel test //deps/rabbitmq_mqtt:shared_SUITE-mixed \
--test_env FOCUS="-group [mqtt,v4,cluster_size_1] -case trace" \
-t- --test_sharding_strategy=disabled
```
* Remove route/2 and implement route/3 for all exchange types.
This removes the route/2 callback from rabbit_exchange_type and
makes route/3 mandatory instead. This is a breaking change and
will require all implementations of exchange types to update their
code, however this is necessary anyway for them to correctly handle
the mc type.
stream filtering fixes
* Translate directly from MQTT to AMQP 0.9.1
* handle undecoded properties in mc_compat
amqpl: put clause in right order
recover death deatails from amqp data
* Replace callback init_amqp with convert_from
* Fix return value of lists:keyfind/3
* Translate directly from AMQP 0.9.1 to MQTT
* Fix MQTT payload size
MQTT payload can be a list when converted from AMQP 0.9.1 for example
First conversions tests
Plus some other conversion related fixes.
bazel
bazel
translate amqp 1.0 null to undefined
mc: property/2 and correlation_id/message_id return type tagged values.
To ensure we can support a variety of types better.
The type type tags are AMQP 1.0 flavoured.
fix death recovery
mc_mqtt: impl new api
Add callbacks to allow protocols to compact data before storage
And make readable if needing to query things repeatedly.
bazel fix
* more decoding
* tracking mixed versions compat
* mc: flip default of `durable` annotation to save some data.
Assuming most messages are durable and that in memory messages suffer less
from persistence overhead it makes sense for a non existent `durable`
annotation to mean durable=true.
* mc conversion tests and tidy up
* mc make x_header unstrict again
* amqpl: death record fixes
* bazel
* amqp -> amqpl conversion test
* Fix crash in mc_amqp:size/1
Body can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0/ ct-system t=java
```
on branch native-amqp.
* Fix crash in lists:flatten/1
Data can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp.
* Fix crash in rabbit_writer
Running test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp resulted in the following crash:
```
crasher:
initial call: rabbit_writer:enter_mainloop/2
pid: <0.711.0>
registered_name: []
exception error: bad argument
in function size/1
called as size([<<0>>,<<"Sw">>,[<<160,2>>,<<"hi">>]])
*** argument 1: not tuple or binary
in call from rabbit_binary_generator:build_content_frames/7 (rabbit_binary_generator.erl, line 89)
in call from rabbit_binary_generator:build_simple_content_frames/4 (rabbit_binary_generator.erl, line 61)
in call from rabbit_writer:assemble_frames/5 (rabbit_writer.erl, line 334)
in call from rabbit_writer:internal_send_command_async/3 (rabbit_writer.erl, line 365)
in call from rabbit_writer:handle_message/2 (rabbit_writer.erl, line 265)
in call from rabbit_writer:handle_message/3 (rabbit_writer.erl, line 232)
in call from rabbit_writer:mainloop1/2 (rabbit_writer.erl, line 223)
```
because #content.payload_fragments_rev is currently supposed to
be a flat list of binaries instead of being an iolist.
This commit fixes this crash inefficiently by calling
iolist_to_binary/1. A better solution would be to allow AMQP legacy's #content.payload_fragments_rev
to be an iolist.
* Add accidentally deleted line back
* mc: optimise mc_amqp internal format
By removint the outer records for message and delivery annotations
as well as application properties and footers.
* mc: optimis mc_amqp map_add by using upsert
* mc: refactoring and bug fixes
* mc_SUITE routingheader assertions
* mc remove serialize/1 callback as only used by amqp
* mc_amqp: avoid returning a nested list from protocol_state
* test and bug fix
* move infer_type to mc_util
* mc fixes and additiona assertions
* Support headers exchange routing for MQTT messages
When a headers exchange is bound to the MQTT topic exchange, routing
will be performend based on both MQTT topic (by the topic exchange) and
MQTT User Property (by the headers exchange).
This combines the best worlds of both MQTT 5.0 and AMQP 0.9.1 and
enables powerful routing topologies.
When the User Property contains the same name multiple times, only the
last name (and value) will be considered by the headers exchange.
* Fix crash when sending from stream to amqpl
When publishing a message via the stream protocol and consuming it via
AMQP 0.9.1, the following crash occurred prior to this commit:
```
crasher:
initial call: rabbit_channel:init/1
pid: <0.818.0>
registered_name: []
exception exit: {{badmatch,undefined},
[{rabbit_channel,handle_deliver0,4,
[{file,"rabbit_channel.erl"},
{line,2728}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1594}]},
{rabbit_channel,handle_cast,2,
[{file,"rabbit_channel.erl"},
{line,728}]},
{gen_server2,handle_msg,2,
[{file,"gen_server2.erl"},{line,1056}]},
{proc_lib,wake_up,3,
[{file,"proc_lib.erl"},{line,251}]}]}
```
This commit first gives `mc:init/3` the chance to set exchange and
routing_keys annotations.
If not set, `rabbit_stream_queue` will set these annotations assuming
the message was originally published via the stream protocol.
* Support consistent hash exchange routing for MQTT 5.0
When a consistent hash exchange is bound to the MQTT topic exchange,
MQTT 5.0 messages can be routed to queues consistently based on the
Correlation-Data in the PUBLISH packet.
* Convert MQTT 5.0 User Property
* to AMQP 0.9.1 headers
* from AMQP 0.9.1 headers
* to AMQP 1.0 application properties and message annotations
* from AMQP 1.0 application properties and message annotations
* Make use of Annotations in mc_mqtt:protocol_state/2
mc_mqtt:protocol_state/2 includes Annotations as parameter.
It's cleaner to make use of these Annotations when computing the
protocol state instead of relying on the caller (rabbitmq_mqtt_processor)
to compute the protocol state.
* Enforce AMQP 0.9.1 field name length limit
The AMQP 0.9.1 spec prohibits field names longer than 128 characters.
Therefore, when converting AMQP 1.0 message annotations, application
properties or MQTT 5.0 User Property to AMQP 0.9.1 headers, drop any
names longer than 128 characters.
* Fix type specs
Apply feedback from Michael Davis
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* Add mc_mqtt unit test suite
Implement mc_mqtt:x_header/2
* Translate indicator that payload is UTF-8 encoded
when converting between MQTT 5.0 and AMQP 1.0
* Translate single amqp-value section from AMQP 1.0 to MQTT
Convert to a text representation, if possible, and indicate to MQTT
client that the payload is UTF-8 encoded. This way, the MQTT client will
be able to parse the payload.
If conversion to text representation is not possible, encode the payload
using the AMQP 1.0 type system and indiate the encoding via Content-Type
message/vnd.rabbitmq.amqp.
This Content-Type is not registered.
Type "message" makes sense since it's a message.
Vendor tree "vnd.rabbitmq.amqp" makes sense since merely subtype "amqp" is not
registered.
* Fix payload conversion
* Translate Response Topic between MQTT and AMQP
Translate MQTT 5.0 Response Topic to AMQP 1.0 reply-to address and vice
versa.
The Response Topic must be a UTF-8 encoded string.
This commit re-uses the already defined RabbitMQ target addresses:
```
"/topic/" RK Publish to amq.topic with routing key RK
"/exchange/" X "/" RK Publish to exchange X with routing key RK
```
By default, the MQTT topic exchange is configure dto be amq.topic using
the 1st target address.
When an operator modifies the mqtt.exchange, the 2nd target address is
used.
* Apply PR feedback
and fix formatting
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* tidy up
* Add MQTT message_containers test
* consistent hash exchange: avoid amqp legacy conversion
When hashing on a header value.
* Avoid converting to amqp legacy when using exchange federation
* Fix test flake
* test and dialyzer fixes
* dialyzer fix
* Add MQTT protocol interoperability tests
Test receiving from and sending to MQTT 5.0 and
* AMQP 0.9.1
* AMQP 1.0
* STOMP
* Streams
* Regenerate portions of deps/rabbit/app.bzl with gazelle
I'm not exactly sure how this happened, but gazell seems to have been
run with an older version of the rules_erlang gazelle extension at
some point. This caused generation of a structure that is no longer
used. This commit updates the structure to the current pattern.
* mc: refactoring
* mc_amqpl: handle delivery annotations
Just in case they are included.
Also use iolist_to_iovec to create flat list of binaries when
converting from amqp with amqp encoded payload.
---------
Co-authored-by: David Ansari <david.ansari@gmx.de>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
Co-authored-by: Rin Kuryloski <kuryloskip@vmware.com>
2023-08-31 18:27:13 +08:00
|
|
|
end,
|
|
|
|
|
|
|
|
ok = emqtt:publish(C0, Topic1, <<"m2">>, qos0),
|
|
|
|
receive {publish, #{client_pid := C1,
|
|
|
|
qos := 0,
|
|
|
|
payload := <<"m2">>}} -> ok
|
2024-12-10 23:19:34 +08:00
|
|
|
after ?TIMEOUT -> ct:fail("missing m2")
|
Message Containers (#5077)
This PR implements an approach for a "protocol (data format) agnostic core" where the format of the message isn't converted at point of reception.
Currently all non AMQP 0.9.1 originating messages are converted into a AMQP 0.9.1 flavoured basic_message record before sent to a queue. If the messages are then consumed by the originating protocol they are converted back from AMQP 0.9.1. For some protocols such as MQTT 3.1 this isn't too expensive as MQTT is mostly a fairly easily mapped subset of AMQP 0.9.1 but for others such as AMQP 1.0 the conversions are awkward and in some cases lossy even if consuming from the originating protocol.
This PR instead wraps all incoming messages in their originating form into a generic, extensible message container type (mc). The container module exposes an API to get common message details such as size and various properties (ttl, priority etc) directly from the source data type. Each protocol needs to implement the mc behaviour such that when a message originating form one protocol is consumed by another protocol we convert it to the target protocol at that point.
The message container also contains annotations, dead letter records and other meta data we need to record during the lifetime of a message. The original protocol message is never modified unless it is consumed.
This includes conversion modules to and from amqp, amqpl (AMQP 0.9.1) and mqtt.
COMMIT HISTORY:
* Refactor away from using the delivery{} record
In many places including exchange types. This should make it
easier to move towards using a message container type instead of
basic_message.
Add mc module and move direct replies outside of exchange
Lots of changes incl classic queues
Implement stream support incl amqp conversions
simplify mc state record
move mc.erl
mc dlx stuff
recent history exchange
Make tracking work
But doesn't take a protocol agnostic approach as we just convert
everything into AMQP legacy and back. Might be good enough for now.
Tracing as a whole may want a bit of a re-vamp at some point.
tidy
make quorum queue peek work by legacy conversion
dead lettering fixes
dead lettering fixes
CMQ fixes
rabbit_trace type fixes
fixes
fix
Fix classic queue props
test assertion fix
feature flag and backwards compat
Enable message_container feature flag in some SUITEs
Dialyzer fixes
fixes
fix
test fixes
Various
Manually update a gazelle generated file
until a gazelle enhancement can be made
https://github.com/rabbitmq/rules_erlang/issues/185
Add message_containers_SUITE to bazel
and regen bazel files with gazelle from rules_erlang@main
Simplify essential proprty access
Such as durable, ttl and priority by extracting them into annotations
at message container init time.
Move type
to remove dependenc on amqp10 stuff in mc.erl
mostly because I don't know how to make bazel do the right thing
add more stuff
Refine routing header stuff
wip
Cosmetics
Do not use "maybe" as type name as "maybe" is a keyword since OTP 25
which makes Erlang LS complain.
* Dedup death queue names
* Fix function clause crashes
Fix failing tests in the MQTT shared_SUITE:
A classic queue message ID can be undefined as set in
https://github.com/rabbitmq/rabbitmq-server/blob/fbe79ff47b4edbc0fd95457e623d6593161ad198/deps/rabbit/src/rabbit_classic_queue_index_v2.erl#L1048
Fix failing tests in the MQTT shared_SUITE-mixed:
When feature flag message_containers is disabled, the
message is not an #mc{} record, but a #basic_message{} record.
* Fix is_utf8_no_null crash
Prior to this commit, the function crashed if invalid UTF-8 was
provided, e.g.:
```
1> rabbit_misc:is_valid_shortstr(<<"😇"/utf16>>).
** exception error: no function clause matching rabbit_misc:is_utf8_no_null(<<216,61,222,7>>) (rabbit_misc.erl, line 1481)
```
* Implement mqtt mc behaviour
For now via amqp translation.
This is still work in progress, but the following SUITEs pass:
```
make -C deps/rabbitmq_mqtt ct-shared t=[mqtt,v5,cluster_size_1] FULL=1
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_1] FULL=1
```
* Shorten mc file names
Module name length matters because for each persistent message the #mc{}
record is persisted to disk.
```
1> iolist_size(term_to_iovec({mc, rabbit_mc_amqp_legacy})).
30
2> iolist_size(term_to_iovec({mc, mc_amqpl})).
17
```
This commit renames the mc modules:
```
ag -l rabbit_mc_amqp_legacy | xargs sed -i 's/rabbit_mc_amqp_legacy/mc_amqpl/g'
ag -l rabbit_mc_amqp | xargs sed -i 's/rabbit_mc_amqp/mc_amqp/g'
ag -l rabbit_mqtt_mc | xargs sed -i 's/rabbit_mqtt_mc/mc_mqtt/g'
```
* mc: make deaths an annotation + fixes
* Fix mc_mqtt protocol_state callback
* Fix test will_delay_node_restart
```
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_3]:will_delay_node_restart FULL=1
```
* Bazel run gazelle
* mix format rabbitmqctl.ex
* Ensure ttl annotation is refelected in amqp legacy protocol state
* Fix id access in message store
* Fix rabbit_message_interceptor_SUITE
* dializer fixes
* Fix rabbit:rabbit_message_interceptor_SUITE-mixed
set_annotation/3 should not result in duplicate keys
* Fix MQTT shared_SUITE-mixed
Up to 3.12 non-MQTT publishes were always QoS 1 regardless of delivery_mode.
https://github.com/rabbitmq/rabbitmq-server/blob/75a953ce286a10aca910c098805a4f545989af38/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl#L2075-L2076
From now on, non-MQTT publishes are QoS 1 if durable.
This makes more sense.
The MQTT plugin must send a #basic_message{} to an old node that does
not understand message containers.
* Field content of 'v1_0.data' can be binary
Fix
```
bazel test //deps/rabbitmq_mqtt:shared_SUITE-mixed \
--test_env FOCUS="-group [mqtt,v4,cluster_size_1] -case trace" \
-t- --test_sharding_strategy=disabled
```
* Remove route/2 and implement route/3 for all exchange types.
This removes the route/2 callback from rabbit_exchange_type and
makes route/3 mandatory instead. This is a breaking change and
will require all implementations of exchange types to update their
code, however this is necessary anyway for them to correctly handle
the mc type.
stream filtering fixes
* Translate directly from MQTT to AMQP 0.9.1
* handle undecoded properties in mc_compat
amqpl: put clause in right order
recover death deatails from amqp data
* Replace callback init_amqp with convert_from
* Fix return value of lists:keyfind/3
* Translate directly from AMQP 0.9.1 to MQTT
* Fix MQTT payload size
MQTT payload can be a list when converted from AMQP 0.9.1 for example
First conversions tests
Plus some other conversion related fixes.
bazel
bazel
translate amqp 1.0 null to undefined
mc: property/2 and correlation_id/message_id return type tagged values.
To ensure we can support a variety of types better.
The type type tags are AMQP 1.0 flavoured.
fix death recovery
mc_mqtt: impl new api
Add callbacks to allow protocols to compact data before storage
And make readable if needing to query things repeatedly.
bazel fix
* more decoding
* tracking mixed versions compat
* mc: flip default of `durable` annotation to save some data.
Assuming most messages are durable and that in memory messages suffer less
from persistence overhead it makes sense for a non existent `durable`
annotation to mean durable=true.
* mc conversion tests and tidy up
* mc make x_header unstrict again
* amqpl: death record fixes
* bazel
* amqp -> amqpl conversion test
* Fix crash in mc_amqp:size/1
Body can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0/ ct-system t=java
```
on branch native-amqp.
* Fix crash in lists:flatten/1
Data can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp.
* Fix crash in rabbit_writer
Running test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp resulted in the following crash:
```
crasher:
initial call: rabbit_writer:enter_mainloop/2
pid: <0.711.0>
registered_name: []
exception error: bad argument
in function size/1
called as size([<<0>>,<<"Sw">>,[<<160,2>>,<<"hi">>]])
*** argument 1: not tuple or binary
in call from rabbit_binary_generator:build_content_frames/7 (rabbit_binary_generator.erl, line 89)
in call from rabbit_binary_generator:build_simple_content_frames/4 (rabbit_binary_generator.erl, line 61)
in call from rabbit_writer:assemble_frames/5 (rabbit_writer.erl, line 334)
in call from rabbit_writer:internal_send_command_async/3 (rabbit_writer.erl, line 365)
in call from rabbit_writer:handle_message/2 (rabbit_writer.erl, line 265)
in call from rabbit_writer:handle_message/3 (rabbit_writer.erl, line 232)
in call from rabbit_writer:mainloop1/2 (rabbit_writer.erl, line 223)
```
because #content.payload_fragments_rev is currently supposed to
be a flat list of binaries instead of being an iolist.
This commit fixes this crash inefficiently by calling
iolist_to_binary/1. A better solution would be to allow AMQP legacy's #content.payload_fragments_rev
to be an iolist.
* Add accidentally deleted line back
* mc: optimise mc_amqp internal format
By removint the outer records for message and delivery annotations
as well as application properties and footers.
* mc: optimis mc_amqp map_add by using upsert
* mc: refactoring and bug fixes
* mc_SUITE routingheader assertions
* mc remove serialize/1 callback as only used by amqp
* mc_amqp: avoid returning a nested list from protocol_state
* test and bug fix
* move infer_type to mc_util
* mc fixes and additiona assertions
* Support headers exchange routing for MQTT messages
When a headers exchange is bound to the MQTT topic exchange, routing
will be performend based on both MQTT topic (by the topic exchange) and
MQTT User Property (by the headers exchange).
This combines the best worlds of both MQTT 5.0 and AMQP 0.9.1 and
enables powerful routing topologies.
When the User Property contains the same name multiple times, only the
last name (and value) will be considered by the headers exchange.
* Fix crash when sending from stream to amqpl
When publishing a message via the stream protocol and consuming it via
AMQP 0.9.1, the following crash occurred prior to this commit:
```
crasher:
initial call: rabbit_channel:init/1
pid: <0.818.0>
registered_name: []
exception exit: {{badmatch,undefined},
[{rabbit_channel,handle_deliver0,4,
[{file,"rabbit_channel.erl"},
{line,2728}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1594}]},
{rabbit_channel,handle_cast,2,
[{file,"rabbit_channel.erl"},
{line,728}]},
{gen_server2,handle_msg,2,
[{file,"gen_server2.erl"},{line,1056}]},
{proc_lib,wake_up,3,
[{file,"proc_lib.erl"},{line,251}]}]}
```
This commit first gives `mc:init/3` the chance to set exchange and
routing_keys annotations.
If not set, `rabbit_stream_queue` will set these annotations assuming
the message was originally published via the stream protocol.
* Support consistent hash exchange routing for MQTT 5.0
When a consistent hash exchange is bound to the MQTT topic exchange,
MQTT 5.0 messages can be routed to queues consistently based on the
Correlation-Data in the PUBLISH packet.
* Convert MQTT 5.0 User Property
* to AMQP 0.9.1 headers
* from AMQP 0.9.1 headers
* to AMQP 1.0 application properties and message annotations
* from AMQP 1.0 application properties and message annotations
* Make use of Annotations in mc_mqtt:protocol_state/2
mc_mqtt:protocol_state/2 includes Annotations as parameter.
It's cleaner to make use of these Annotations when computing the
protocol state instead of relying on the caller (rabbitmq_mqtt_processor)
to compute the protocol state.
* Enforce AMQP 0.9.1 field name length limit
The AMQP 0.9.1 spec prohibits field names longer than 128 characters.
Therefore, when converting AMQP 1.0 message annotations, application
properties or MQTT 5.0 User Property to AMQP 0.9.1 headers, drop any
names longer than 128 characters.
* Fix type specs
Apply feedback from Michael Davis
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* Add mc_mqtt unit test suite
Implement mc_mqtt:x_header/2
* Translate indicator that payload is UTF-8 encoded
when converting between MQTT 5.0 and AMQP 1.0
* Translate single amqp-value section from AMQP 1.0 to MQTT
Convert to a text representation, if possible, and indicate to MQTT
client that the payload is UTF-8 encoded. This way, the MQTT client will
be able to parse the payload.
If conversion to text representation is not possible, encode the payload
using the AMQP 1.0 type system and indiate the encoding via Content-Type
message/vnd.rabbitmq.amqp.
This Content-Type is not registered.
Type "message" makes sense since it's a message.
Vendor tree "vnd.rabbitmq.amqp" makes sense since merely subtype "amqp" is not
registered.
* Fix payload conversion
* Translate Response Topic between MQTT and AMQP
Translate MQTT 5.0 Response Topic to AMQP 1.0 reply-to address and vice
versa.
The Response Topic must be a UTF-8 encoded string.
This commit re-uses the already defined RabbitMQ target addresses:
```
"/topic/" RK Publish to amq.topic with routing key RK
"/exchange/" X "/" RK Publish to exchange X with routing key RK
```
By default, the MQTT topic exchange is configure dto be amq.topic using
the 1st target address.
When an operator modifies the mqtt.exchange, the 2nd target address is
used.
* Apply PR feedback
and fix formatting
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* tidy up
* Add MQTT message_containers test
* consistent hash exchange: avoid amqp legacy conversion
When hashing on a header value.
* Avoid converting to amqp legacy when using exchange federation
* Fix test flake
* test and dialyzer fixes
* dialyzer fix
* Add MQTT protocol interoperability tests
Test receiving from and sending to MQTT 5.0 and
* AMQP 0.9.1
* AMQP 1.0
* STOMP
* Streams
* Regenerate portions of deps/rabbit/app.bzl with gazelle
I'm not exactly sure how this happened, but gazell seems to have been
run with an older version of the rules_erlang gazelle extension at
some point. This caused generation of a structure that is no longer
used. This commit updates the structure to the current pattern.
* mc: refactoring
* mc_amqpl: handle delivery annotations
Just in case they are included.
Also use iolist_to_iovec to create flat list of binaries when
converting from amqp with amqp encoded payload.
---------
Co-authored-by: David Ansari <david.ansari@gmx.de>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
Co-authored-by: Rin Kuryloski <kuryloskip@vmware.com>
2023-08-31 18:27:13 +08:00
|
|
|
end,
|
|
|
|
|
|
|
|
{ok, _} = emqtt:publish(C1, Topic0, <<"m3">>, qos1),
|
|
|
|
receive {publish, #{client_pid := C0,
|
|
|
|
qos := 1,
|
|
|
|
payload := <<"m3">>}} -> ok
|
2024-12-10 23:19:34 +08:00
|
|
|
after ?TIMEOUT -> ct:fail("missing m3")
|
Message Containers (#5077)
This PR implements an approach for a "protocol (data format) agnostic core" where the format of the message isn't converted at point of reception.
Currently all non AMQP 0.9.1 originating messages are converted into a AMQP 0.9.1 flavoured basic_message record before sent to a queue. If the messages are then consumed by the originating protocol they are converted back from AMQP 0.9.1. For some protocols such as MQTT 3.1 this isn't too expensive as MQTT is mostly a fairly easily mapped subset of AMQP 0.9.1 but for others such as AMQP 1.0 the conversions are awkward and in some cases lossy even if consuming from the originating protocol.
This PR instead wraps all incoming messages in their originating form into a generic, extensible message container type (mc). The container module exposes an API to get common message details such as size and various properties (ttl, priority etc) directly from the source data type. Each protocol needs to implement the mc behaviour such that when a message originating form one protocol is consumed by another protocol we convert it to the target protocol at that point.
The message container also contains annotations, dead letter records and other meta data we need to record during the lifetime of a message. The original protocol message is never modified unless it is consumed.
This includes conversion modules to and from amqp, amqpl (AMQP 0.9.1) and mqtt.
COMMIT HISTORY:
* Refactor away from using the delivery{} record
In many places including exchange types. This should make it
easier to move towards using a message container type instead of
basic_message.
Add mc module and move direct replies outside of exchange
Lots of changes incl classic queues
Implement stream support incl amqp conversions
simplify mc state record
move mc.erl
mc dlx stuff
recent history exchange
Make tracking work
But doesn't take a protocol agnostic approach as we just convert
everything into AMQP legacy and back. Might be good enough for now.
Tracing as a whole may want a bit of a re-vamp at some point.
tidy
make quorum queue peek work by legacy conversion
dead lettering fixes
dead lettering fixes
CMQ fixes
rabbit_trace type fixes
fixes
fix
Fix classic queue props
test assertion fix
feature flag and backwards compat
Enable message_container feature flag in some SUITEs
Dialyzer fixes
fixes
fix
test fixes
Various
Manually update a gazelle generated file
until a gazelle enhancement can be made
https://github.com/rabbitmq/rules_erlang/issues/185
Add message_containers_SUITE to bazel
and regen bazel files with gazelle from rules_erlang@main
Simplify essential proprty access
Such as durable, ttl and priority by extracting them into annotations
at message container init time.
Move type
to remove dependenc on amqp10 stuff in mc.erl
mostly because I don't know how to make bazel do the right thing
add more stuff
Refine routing header stuff
wip
Cosmetics
Do not use "maybe" as type name as "maybe" is a keyword since OTP 25
which makes Erlang LS complain.
* Dedup death queue names
* Fix function clause crashes
Fix failing tests in the MQTT shared_SUITE:
A classic queue message ID can be undefined as set in
https://github.com/rabbitmq/rabbitmq-server/blob/fbe79ff47b4edbc0fd95457e623d6593161ad198/deps/rabbit/src/rabbit_classic_queue_index_v2.erl#L1048
Fix failing tests in the MQTT shared_SUITE-mixed:
When feature flag message_containers is disabled, the
message is not an #mc{} record, but a #basic_message{} record.
* Fix is_utf8_no_null crash
Prior to this commit, the function crashed if invalid UTF-8 was
provided, e.g.:
```
1> rabbit_misc:is_valid_shortstr(<<"😇"/utf16>>).
** exception error: no function clause matching rabbit_misc:is_utf8_no_null(<<216,61,222,7>>) (rabbit_misc.erl, line 1481)
```
* Implement mqtt mc behaviour
For now via amqp translation.
This is still work in progress, but the following SUITEs pass:
```
make -C deps/rabbitmq_mqtt ct-shared t=[mqtt,v5,cluster_size_1] FULL=1
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_1] FULL=1
```
* Shorten mc file names
Module name length matters because for each persistent message the #mc{}
record is persisted to disk.
```
1> iolist_size(term_to_iovec({mc, rabbit_mc_amqp_legacy})).
30
2> iolist_size(term_to_iovec({mc, mc_amqpl})).
17
```
This commit renames the mc modules:
```
ag -l rabbit_mc_amqp_legacy | xargs sed -i 's/rabbit_mc_amqp_legacy/mc_amqpl/g'
ag -l rabbit_mc_amqp | xargs sed -i 's/rabbit_mc_amqp/mc_amqp/g'
ag -l rabbit_mqtt_mc | xargs sed -i 's/rabbit_mqtt_mc/mc_mqtt/g'
```
* mc: make deaths an annotation + fixes
* Fix mc_mqtt protocol_state callback
* Fix test will_delay_node_restart
```
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_3]:will_delay_node_restart FULL=1
```
* Bazel run gazelle
* mix format rabbitmqctl.ex
* Ensure ttl annotation is refelected in amqp legacy protocol state
* Fix id access in message store
* Fix rabbit_message_interceptor_SUITE
* dializer fixes
* Fix rabbit:rabbit_message_interceptor_SUITE-mixed
set_annotation/3 should not result in duplicate keys
* Fix MQTT shared_SUITE-mixed
Up to 3.12 non-MQTT publishes were always QoS 1 regardless of delivery_mode.
https://github.com/rabbitmq/rabbitmq-server/blob/75a953ce286a10aca910c098805a4f545989af38/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl#L2075-L2076
From now on, non-MQTT publishes are QoS 1 if durable.
This makes more sense.
The MQTT plugin must send a #basic_message{} to an old node that does
not understand message containers.
* Field content of 'v1_0.data' can be binary
Fix
```
bazel test //deps/rabbitmq_mqtt:shared_SUITE-mixed \
--test_env FOCUS="-group [mqtt,v4,cluster_size_1] -case trace" \
-t- --test_sharding_strategy=disabled
```
* Remove route/2 and implement route/3 for all exchange types.
This removes the route/2 callback from rabbit_exchange_type and
makes route/3 mandatory instead. This is a breaking change and
will require all implementations of exchange types to update their
code, however this is necessary anyway for them to correctly handle
the mc type.
stream filtering fixes
* Translate directly from MQTT to AMQP 0.9.1
* handle undecoded properties in mc_compat
amqpl: put clause in right order
recover death deatails from amqp data
* Replace callback init_amqp with convert_from
* Fix return value of lists:keyfind/3
* Translate directly from AMQP 0.9.1 to MQTT
* Fix MQTT payload size
MQTT payload can be a list when converted from AMQP 0.9.1 for example
First conversions tests
Plus some other conversion related fixes.
bazel
bazel
translate amqp 1.0 null to undefined
mc: property/2 and correlation_id/message_id return type tagged values.
To ensure we can support a variety of types better.
The type type tags are AMQP 1.0 flavoured.
fix death recovery
mc_mqtt: impl new api
Add callbacks to allow protocols to compact data before storage
And make readable if needing to query things repeatedly.
bazel fix
* more decoding
* tracking mixed versions compat
* mc: flip default of `durable` annotation to save some data.
Assuming most messages are durable and that in memory messages suffer less
from persistence overhead it makes sense for a non existent `durable`
annotation to mean durable=true.
* mc conversion tests and tidy up
* mc make x_header unstrict again
* amqpl: death record fixes
* bazel
* amqp -> amqpl conversion test
* Fix crash in mc_amqp:size/1
Body can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0/ ct-system t=java
```
on branch native-amqp.
* Fix crash in lists:flatten/1
Data can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp.
* Fix crash in rabbit_writer
Running test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp resulted in the following crash:
```
crasher:
initial call: rabbit_writer:enter_mainloop/2
pid: <0.711.0>
registered_name: []
exception error: bad argument
in function size/1
called as size([<<0>>,<<"Sw">>,[<<160,2>>,<<"hi">>]])
*** argument 1: not tuple or binary
in call from rabbit_binary_generator:build_content_frames/7 (rabbit_binary_generator.erl, line 89)
in call from rabbit_binary_generator:build_simple_content_frames/4 (rabbit_binary_generator.erl, line 61)
in call from rabbit_writer:assemble_frames/5 (rabbit_writer.erl, line 334)
in call from rabbit_writer:internal_send_command_async/3 (rabbit_writer.erl, line 365)
in call from rabbit_writer:handle_message/2 (rabbit_writer.erl, line 265)
in call from rabbit_writer:handle_message/3 (rabbit_writer.erl, line 232)
in call from rabbit_writer:mainloop1/2 (rabbit_writer.erl, line 223)
```
because #content.payload_fragments_rev is currently supposed to
be a flat list of binaries instead of being an iolist.
This commit fixes this crash inefficiently by calling
iolist_to_binary/1. A better solution would be to allow AMQP legacy's #content.payload_fragments_rev
to be an iolist.
* Add accidentally deleted line back
* mc: optimise mc_amqp internal format
By removint the outer records for message and delivery annotations
as well as application properties and footers.
* mc: optimis mc_amqp map_add by using upsert
* mc: refactoring and bug fixes
* mc_SUITE routingheader assertions
* mc remove serialize/1 callback as only used by amqp
* mc_amqp: avoid returning a nested list from protocol_state
* test and bug fix
* move infer_type to mc_util
* mc fixes and additiona assertions
* Support headers exchange routing for MQTT messages
When a headers exchange is bound to the MQTT topic exchange, routing
will be performend based on both MQTT topic (by the topic exchange) and
MQTT User Property (by the headers exchange).
This combines the best worlds of both MQTT 5.0 and AMQP 0.9.1 and
enables powerful routing topologies.
When the User Property contains the same name multiple times, only the
last name (and value) will be considered by the headers exchange.
* Fix crash when sending from stream to amqpl
When publishing a message via the stream protocol and consuming it via
AMQP 0.9.1, the following crash occurred prior to this commit:
```
crasher:
initial call: rabbit_channel:init/1
pid: <0.818.0>
registered_name: []
exception exit: {{badmatch,undefined},
[{rabbit_channel,handle_deliver0,4,
[{file,"rabbit_channel.erl"},
{line,2728}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1594}]},
{rabbit_channel,handle_cast,2,
[{file,"rabbit_channel.erl"},
{line,728}]},
{gen_server2,handle_msg,2,
[{file,"gen_server2.erl"},{line,1056}]},
{proc_lib,wake_up,3,
[{file,"proc_lib.erl"},{line,251}]}]}
```
This commit first gives `mc:init/3` the chance to set exchange and
routing_keys annotations.
If not set, `rabbit_stream_queue` will set these annotations assuming
the message was originally published via the stream protocol.
* Support consistent hash exchange routing for MQTT 5.0
When a consistent hash exchange is bound to the MQTT topic exchange,
MQTT 5.0 messages can be routed to queues consistently based on the
Correlation-Data in the PUBLISH packet.
* Convert MQTT 5.0 User Property
* to AMQP 0.9.1 headers
* from AMQP 0.9.1 headers
* to AMQP 1.0 application properties and message annotations
* from AMQP 1.0 application properties and message annotations
* Make use of Annotations in mc_mqtt:protocol_state/2
mc_mqtt:protocol_state/2 includes Annotations as parameter.
It's cleaner to make use of these Annotations when computing the
protocol state instead of relying on the caller (rabbitmq_mqtt_processor)
to compute the protocol state.
* Enforce AMQP 0.9.1 field name length limit
The AMQP 0.9.1 spec prohibits field names longer than 128 characters.
Therefore, when converting AMQP 1.0 message annotations, application
properties or MQTT 5.0 User Property to AMQP 0.9.1 headers, drop any
names longer than 128 characters.
* Fix type specs
Apply feedback from Michael Davis
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* Add mc_mqtt unit test suite
Implement mc_mqtt:x_header/2
* Translate indicator that payload is UTF-8 encoded
when converting between MQTT 5.0 and AMQP 1.0
* Translate single amqp-value section from AMQP 1.0 to MQTT
Convert to a text representation, if possible, and indicate to MQTT
client that the payload is UTF-8 encoded. This way, the MQTT client will
be able to parse the payload.
If conversion to text representation is not possible, encode the payload
using the AMQP 1.0 type system and indiate the encoding via Content-Type
message/vnd.rabbitmq.amqp.
This Content-Type is not registered.
Type "message" makes sense since it's a message.
Vendor tree "vnd.rabbitmq.amqp" makes sense since merely subtype "amqp" is not
registered.
* Fix payload conversion
* Translate Response Topic between MQTT and AMQP
Translate MQTT 5.0 Response Topic to AMQP 1.0 reply-to address and vice
versa.
The Response Topic must be a UTF-8 encoded string.
This commit re-uses the already defined RabbitMQ target addresses:
```
"/topic/" RK Publish to amq.topic with routing key RK
"/exchange/" X "/" RK Publish to exchange X with routing key RK
```
By default, the MQTT topic exchange is configure dto be amq.topic using
the 1st target address.
When an operator modifies the mqtt.exchange, the 2nd target address is
used.
* Apply PR feedback
and fix formatting
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* tidy up
* Add MQTT message_containers test
* consistent hash exchange: avoid amqp legacy conversion
When hashing on a header value.
* Avoid converting to amqp legacy when using exchange federation
* Fix test flake
* test and dialyzer fixes
* dialyzer fix
* Add MQTT protocol interoperability tests
Test receiving from and sending to MQTT 5.0 and
* AMQP 0.9.1
* AMQP 1.0
* STOMP
* Streams
* Regenerate portions of deps/rabbit/app.bzl with gazelle
I'm not exactly sure how this happened, but gazell seems to have been
run with an older version of the rules_erlang gazelle extension at
some point. This caused generation of a structure that is no longer
used. This commit updates the structure to the current pattern.
* mc: refactoring
* mc_amqpl: handle delivery annotations
Just in case they are included.
Also use iolist_to_iovec to create flat list of binaries when
converting from amqp with amqp encoded payload.
---------
Co-authored-by: David Ansari <david.ansari@gmx.de>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
Co-authored-by: Rin Kuryloski <kuryloskip@vmware.com>
2023-08-31 18:27:13 +08:00
|
|
|
end,
|
|
|
|
|
|
|
|
ok = emqtt:publish(C1, Topic0, <<"m4">>, qos0),
|
|
|
|
receive {publish, #{client_pid := C0,
|
|
|
|
qos := 0,
|
|
|
|
payload := <<"m4">>}} -> ok
|
2024-12-10 23:19:34 +08:00
|
|
|
after ?TIMEOUT -> ct:fail("missing m4")
|
Message Containers (#5077)
This PR implements an approach for a "protocol (data format) agnostic core" where the format of the message isn't converted at point of reception.
Currently all non AMQP 0.9.1 originating messages are converted into a AMQP 0.9.1 flavoured basic_message record before sent to a queue. If the messages are then consumed by the originating protocol they are converted back from AMQP 0.9.1. For some protocols such as MQTT 3.1 this isn't too expensive as MQTT is mostly a fairly easily mapped subset of AMQP 0.9.1 but for others such as AMQP 1.0 the conversions are awkward and in some cases lossy even if consuming from the originating protocol.
This PR instead wraps all incoming messages in their originating form into a generic, extensible message container type (mc). The container module exposes an API to get common message details such as size and various properties (ttl, priority etc) directly from the source data type. Each protocol needs to implement the mc behaviour such that when a message originating form one protocol is consumed by another protocol we convert it to the target protocol at that point.
The message container also contains annotations, dead letter records and other meta data we need to record during the lifetime of a message. The original protocol message is never modified unless it is consumed.
This includes conversion modules to and from amqp, amqpl (AMQP 0.9.1) and mqtt.
COMMIT HISTORY:
* Refactor away from using the delivery{} record
In many places including exchange types. This should make it
easier to move towards using a message container type instead of
basic_message.
Add mc module and move direct replies outside of exchange
Lots of changes incl classic queues
Implement stream support incl amqp conversions
simplify mc state record
move mc.erl
mc dlx stuff
recent history exchange
Make tracking work
But doesn't take a protocol agnostic approach as we just convert
everything into AMQP legacy and back. Might be good enough for now.
Tracing as a whole may want a bit of a re-vamp at some point.
tidy
make quorum queue peek work by legacy conversion
dead lettering fixes
dead lettering fixes
CMQ fixes
rabbit_trace type fixes
fixes
fix
Fix classic queue props
test assertion fix
feature flag and backwards compat
Enable message_container feature flag in some SUITEs
Dialyzer fixes
fixes
fix
test fixes
Various
Manually update a gazelle generated file
until a gazelle enhancement can be made
https://github.com/rabbitmq/rules_erlang/issues/185
Add message_containers_SUITE to bazel
and regen bazel files with gazelle from rules_erlang@main
Simplify essential proprty access
Such as durable, ttl and priority by extracting them into annotations
at message container init time.
Move type
to remove dependenc on amqp10 stuff in mc.erl
mostly because I don't know how to make bazel do the right thing
add more stuff
Refine routing header stuff
wip
Cosmetics
Do not use "maybe" as type name as "maybe" is a keyword since OTP 25
which makes Erlang LS complain.
* Dedup death queue names
* Fix function clause crashes
Fix failing tests in the MQTT shared_SUITE:
A classic queue message ID can be undefined as set in
https://github.com/rabbitmq/rabbitmq-server/blob/fbe79ff47b4edbc0fd95457e623d6593161ad198/deps/rabbit/src/rabbit_classic_queue_index_v2.erl#L1048
Fix failing tests in the MQTT shared_SUITE-mixed:
When feature flag message_containers is disabled, the
message is not an #mc{} record, but a #basic_message{} record.
* Fix is_utf8_no_null crash
Prior to this commit, the function crashed if invalid UTF-8 was
provided, e.g.:
```
1> rabbit_misc:is_valid_shortstr(<<"😇"/utf16>>).
** exception error: no function clause matching rabbit_misc:is_utf8_no_null(<<216,61,222,7>>) (rabbit_misc.erl, line 1481)
```
* Implement mqtt mc behaviour
For now via amqp translation.
This is still work in progress, but the following SUITEs pass:
```
make -C deps/rabbitmq_mqtt ct-shared t=[mqtt,v5,cluster_size_1] FULL=1
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_1] FULL=1
```
* Shorten mc file names
Module name length matters because for each persistent message the #mc{}
record is persisted to disk.
```
1> iolist_size(term_to_iovec({mc, rabbit_mc_amqp_legacy})).
30
2> iolist_size(term_to_iovec({mc, mc_amqpl})).
17
```
This commit renames the mc modules:
```
ag -l rabbit_mc_amqp_legacy | xargs sed -i 's/rabbit_mc_amqp_legacy/mc_amqpl/g'
ag -l rabbit_mc_amqp | xargs sed -i 's/rabbit_mc_amqp/mc_amqp/g'
ag -l rabbit_mqtt_mc | xargs sed -i 's/rabbit_mqtt_mc/mc_mqtt/g'
```
* mc: make deaths an annotation + fixes
* Fix mc_mqtt protocol_state callback
* Fix test will_delay_node_restart
```
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_3]:will_delay_node_restart FULL=1
```
* Bazel run gazelle
* mix format rabbitmqctl.ex
* Ensure ttl annotation is refelected in amqp legacy protocol state
* Fix id access in message store
* Fix rabbit_message_interceptor_SUITE
* dializer fixes
* Fix rabbit:rabbit_message_interceptor_SUITE-mixed
set_annotation/3 should not result in duplicate keys
* Fix MQTT shared_SUITE-mixed
Up to 3.12 non-MQTT publishes were always QoS 1 regardless of delivery_mode.
https://github.com/rabbitmq/rabbitmq-server/blob/75a953ce286a10aca910c098805a4f545989af38/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl#L2075-L2076
From now on, non-MQTT publishes are QoS 1 if durable.
This makes more sense.
The MQTT plugin must send a #basic_message{} to an old node that does
not understand message containers.
* Field content of 'v1_0.data' can be binary
Fix
```
bazel test //deps/rabbitmq_mqtt:shared_SUITE-mixed \
--test_env FOCUS="-group [mqtt,v4,cluster_size_1] -case trace" \
-t- --test_sharding_strategy=disabled
```
* Remove route/2 and implement route/3 for all exchange types.
This removes the route/2 callback from rabbit_exchange_type and
makes route/3 mandatory instead. This is a breaking change and
will require all implementations of exchange types to update their
code, however this is necessary anyway for them to correctly handle
the mc type.
stream filtering fixes
* Translate directly from MQTT to AMQP 0.9.1
* handle undecoded properties in mc_compat
amqpl: put clause in right order
recover death deatails from amqp data
* Replace callback init_amqp with convert_from
* Fix return value of lists:keyfind/3
* Translate directly from AMQP 0.9.1 to MQTT
* Fix MQTT payload size
MQTT payload can be a list when converted from AMQP 0.9.1 for example
First conversions tests
Plus some other conversion related fixes.
bazel
bazel
translate amqp 1.0 null to undefined
mc: property/2 and correlation_id/message_id return type tagged values.
To ensure we can support a variety of types better.
The type type tags are AMQP 1.0 flavoured.
fix death recovery
mc_mqtt: impl new api
Add callbacks to allow protocols to compact data before storage
And make readable if needing to query things repeatedly.
bazel fix
* more decoding
* tracking mixed versions compat
* mc: flip default of `durable` annotation to save some data.
Assuming most messages are durable and that in memory messages suffer less
from persistence overhead it makes sense for a non existent `durable`
annotation to mean durable=true.
* mc conversion tests and tidy up
* mc make x_header unstrict again
* amqpl: death record fixes
* bazel
* amqp -> amqpl conversion test
* Fix crash in mc_amqp:size/1
Body can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0/ ct-system t=java
```
on branch native-amqp.
* Fix crash in lists:flatten/1
Data can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp.
* Fix crash in rabbit_writer
Running test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp resulted in the following crash:
```
crasher:
initial call: rabbit_writer:enter_mainloop/2
pid: <0.711.0>
registered_name: []
exception error: bad argument
in function size/1
called as size([<<0>>,<<"Sw">>,[<<160,2>>,<<"hi">>]])
*** argument 1: not tuple or binary
in call from rabbit_binary_generator:build_content_frames/7 (rabbit_binary_generator.erl, line 89)
in call from rabbit_binary_generator:build_simple_content_frames/4 (rabbit_binary_generator.erl, line 61)
in call from rabbit_writer:assemble_frames/5 (rabbit_writer.erl, line 334)
in call from rabbit_writer:internal_send_command_async/3 (rabbit_writer.erl, line 365)
in call from rabbit_writer:handle_message/2 (rabbit_writer.erl, line 265)
in call from rabbit_writer:handle_message/3 (rabbit_writer.erl, line 232)
in call from rabbit_writer:mainloop1/2 (rabbit_writer.erl, line 223)
```
because #content.payload_fragments_rev is currently supposed to
be a flat list of binaries instead of being an iolist.
This commit fixes this crash inefficiently by calling
iolist_to_binary/1. A better solution would be to allow AMQP legacy's #content.payload_fragments_rev
to be an iolist.
* Add accidentally deleted line back
* mc: optimise mc_amqp internal format
By removint the outer records for message and delivery annotations
as well as application properties and footers.
* mc: optimis mc_amqp map_add by using upsert
* mc: refactoring and bug fixes
* mc_SUITE routingheader assertions
* mc remove serialize/1 callback as only used by amqp
* mc_amqp: avoid returning a nested list from protocol_state
* test and bug fix
* move infer_type to mc_util
* mc fixes and additiona assertions
* Support headers exchange routing for MQTT messages
When a headers exchange is bound to the MQTT topic exchange, routing
will be performend based on both MQTT topic (by the topic exchange) and
MQTT User Property (by the headers exchange).
This combines the best worlds of both MQTT 5.0 and AMQP 0.9.1 and
enables powerful routing topologies.
When the User Property contains the same name multiple times, only the
last name (and value) will be considered by the headers exchange.
* Fix crash when sending from stream to amqpl
When publishing a message via the stream protocol and consuming it via
AMQP 0.9.1, the following crash occurred prior to this commit:
```
crasher:
initial call: rabbit_channel:init/1
pid: <0.818.0>
registered_name: []
exception exit: {{badmatch,undefined},
[{rabbit_channel,handle_deliver0,4,
[{file,"rabbit_channel.erl"},
{line,2728}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1594}]},
{rabbit_channel,handle_cast,2,
[{file,"rabbit_channel.erl"},
{line,728}]},
{gen_server2,handle_msg,2,
[{file,"gen_server2.erl"},{line,1056}]},
{proc_lib,wake_up,3,
[{file,"proc_lib.erl"},{line,251}]}]}
```
This commit first gives `mc:init/3` the chance to set exchange and
routing_keys annotations.
If not set, `rabbit_stream_queue` will set these annotations assuming
the message was originally published via the stream protocol.
* Support consistent hash exchange routing for MQTT 5.0
When a consistent hash exchange is bound to the MQTT topic exchange,
MQTT 5.0 messages can be routed to queues consistently based on the
Correlation-Data in the PUBLISH packet.
* Convert MQTT 5.0 User Property
* to AMQP 0.9.1 headers
* from AMQP 0.9.1 headers
* to AMQP 1.0 application properties and message annotations
* from AMQP 1.0 application properties and message annotations
* Make use of Annotations in mc_mqtt:protocol_state/2
mc_mqtt:protocol_state/2 includes Annotations as parameter.
It's cleaner to make use of these Annotations when computing the
protocol state instead of relying on the caller (rabbitmq_mqtt_processor)
to compute the protocol state.
* Enforce AMQP 0.9.1 field name length limit
The AMQP 0.9.1 spec prohibits field names longer than 128 characters.
Therefore, when converting AMQP 1.0 message annotations, application
properties or MQTT 5.0 User Property to AMQP 0.9.1 headers, drop any
names longer than 128 characters.
* Fix type specs
Apply feedback from Michael Davis
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* Add mc_mqtt unit test suite
Implement mc_mqtt:x_header/2
* Translate indicator that payload is UTF-8 encoded
when converting between MQTT 5.0 and AMQP 1.0
* Translate single amqp-value section from AMQP 1.0 to MQTT
Convert to a text representation, if possible, and indicate to MQTT
client that the payload is UTF-8 encoded. This way, the MQTT client will
be able to parse the payload.
If conversion to text representation is not possible, encode the payload
using the AMQP 1.0 type system and indiate the encoding via Content-Type
message/vnd.rabbitmq.amqp.
This Content-Type is not registered.
Type "message" makes sense since it's a message.
Vendor tree "vnd.rabbitmq.amqp" makes sense since merely subtype "amqp" is not
registered.
* Fix payload conversion
* Translate Response Topic between MQTT and AMQP
Translate MQTT 5.0 Response Topic to AMQP 1.0 reply-to address and vice
versa.
The Response Topic must be a UTF-8 encoded string.
This commit re-uses the already defined RabbitMQ target addresses:
```
"/topic/" RK Publish to amq.topic with routing key RK
"/exchange/" X "/" RK Publish to exchange X with routing key RK
```
By default, the MQTT topic exchange is configure dto be amq.topic using
the 1st target address.
When an operator modifies the mqtt.exchange, the 2nd target address is
used.
* Apply PR feedback
and fix formatting
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* tidy up
* Add MQTT message_containers test
* consistent hash exchange: avoid amqp legacy conversion
When hashing on a header value.
* Avoid converting to amqp legacy when using exchange federation
* Fix test flake
* test and dialyzer fixes
* dialyzer fix
* Add MQTT protocol interoperability tests
Test receiving from and sending to MQTT 5.0 and
* AMQP 0.9.1
* AMQP 1.0
* STOMP
* Streams
* Regenerate portions of deps/rabbit/app.bzl with gazelle
I'm not exactly sure how this happened, but gazell seems to have been
run with an older version of the rules_erlang gazelle extension at
some point. This caused generation of a structure that is no longer
used. This commit updates the structure to the current pattern.
* mc: refactoring
* mc_amqpl: handle delivery annotations
Just in case they are included.
Also use iolist_to_iovec to create flat list of binaries when
converting from amqp with amqp encoded payload.
---------
Co-authored-by: David Ansari <david.ansari@gmx.de>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
Co-authored-by: Rin Kuryloski <kuryloskip@vmware.com>
2023-08-31 18:27:13 +08:00
|
|
|
end,
|
|
|
|
|
|
|
|
ok = emqtt:disconnect(C0),
|
|
|
|
ok = emqtt:disconnect(C1).
|
|
|
|
|
2022-11-11 02:55:47 +08:00
|
|
|
queue_down_qos1(Config) ->
|
2022-12-20 02:03:18 +08:00
|
|
|
{Conn1, Ch1} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 1),
|
2022-11-11 02:55:47 +08:00
|
|
|
CQ = Topic = atom_to_binary(?FUNCTION_NAME),
|
|
|
|
declare_queue(Ch1, CQ, []),
|
|
|
|
bind(Ch1, CQ, Topic),
|
2022-12-20 02:03:18 +08:00
|
|
|
ok = rabbit_ct_client_helpers:close_connection_and_channel(Conn1, Ch1),
|
2022-11-11 02:55:47 +08:00
|
|
|
ok = rabbit_ct_broker_helpers:stop_node(Config, 1),
|
2022-11-29 19:41:50 +08:00
|
|
|
C = connect(?FUNCTION_NAME, Config, [{retry_interval, 2}]),
|
|
|
|
|
2022-11-11 02:55:47 +08:00
|
|
|
%% classic queue is down, therefore message is rejected
|
2023-08-09 19:13:19 +08:00
|
|
|
V = ?config(mqtt_version, Config),
|
|
|
|
if V =:= v3 orelse V =:= v4 ->
|
|
|
|
?assertEqual(puback_timeout, util:publish_qos1_timeout(C, Topic, <<"msg">>, 500)),
|
|
|
|
ok = rabbit_ct_broker_helpers:start_node(Config, 1),
|
|
|
|
%% Classic queue is up. Therefore, message should arrive.
|
|
|
|
eventually(?_assertEqual([[<<"1">>]],
|
|
|
|
rabbitmqctl_list(Config, 1, ["list_queues", "messages", "--no-table-headers"])),
|
|
|
|
500, 20);
|
|
|
|
V =:= v5 ->
|
|
|
|
?assertMatch({ok, #{reason_code_name := implementation_specific_error}},
|
|
|
|
emqtt:publish(C, Topic, <<"msg">>, qos1)),
|
|
|
|
ok = rabbit_ct_broker_helpers:start_node(Config, 1)
|
|
|
|
end,
|
2022-11-11 02:55:47 +08:00
|
|
|
|
2025-03-15 00:20:36 +08:00
|
|
|
{Conn, Ch0} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
|
2022-11-11 02:55:47 +08:00
|
|
|
delete_queue(Ch0, CQ),
|
2025-03-15 00:20:36 +08:00
|
|
|
ok = emqtt:disconnect(C),
|
|
|
|
ok = rabbit_ct_client_helpers:close_connection_and_channel(Conn, Ch0).
|
2022-11-11 02:55:47 +08:00
|
|
|
|
2022-11-17 18:22:31 +08:00
|
|
|
%% Consuming classic queue on a different node goes down.
|
|
|
|
consuming_classic_queue_down(Config) ->
|
2022-11-17 19:48:56 +08:00
|
|
|
[Server1, _Server2, Server3] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
|
2022-11-17 18:22:31 +08:00
|
|
|
ClientId = Topic = atom_to_binary(?FUNCTION_NAME),
|
|
|
|
|
|
|
|
%% Declare classic queue on Server1.
|
2023-03-17 20:48:26 +08:00
|
|
|
C1 = connect(ClientId, Config, non_clean_sess_opts()),
|
2022-11-17 18:22:31 +08:00
|
|
|
{ok, _, _} = emqtt:subscribe(C1, Topic, qos1),
|
|
|
|
ok = emqtt:disconnect(C1),
|
|
|
|
|
2022-11-17 19:48:56 +08:00
|
|
|
%% Consume from Server3.
|
2023-03-17 20:48:26 +08:00
|
|
|
C2 = connect(ClientId, Config, Server3, non_clean_sess_opts()),
|
2022-11-17 18:22:31 +08:00
|
|
|
|
2023-02-22 02:24:15 +08:00
|
|
|
ProtoVer = ?config(mqtt_version, Config),
|
2022-11-18 02:24:00 +08:00
|
|
|
?assertMatch(#{consumers := 1},
|
|
|
|
get_global_counters(Config, ProtoVer, Server3)),
|
2022-11-17 18:22:31 +08:00
|
|
|
|
|
|
|
%% Let's stop the queue leader node.
|
|
|
|
process_flag(trap_exit, true),
|
|
|
|
ok = rabbit_ct_broker_helpers:stop_node(Config, Server1),
|
|
|
|
|
2023-10-04 18:11:54 +08:00
|
|
|
%% When the dedicated MQTT connection queue goes down, it is reasonable
|
2022-11-17 18:22:31 +08:00
|
|
|
%% that the server closes the MQTT connection because the MQTT client cannot consume anymore.
|
|
|
|
eventually(?_assertMatch(#{consumers := 0},
|
2022-12-20 02:03:18 +08:00
|
|
|
get_global_counters(Config, ProtoVer, Server3)),
|
|
|
|
1000, 5),
|
2023-01-03 01:02:43 +08:00
|
|
|
await_exit(C2),
|
2022-11-17 18:22:31 +08:00
|
|
|
|
|
|
|
%% Cleanup
|
|
|
|
ok = rabbit_ct_broker_helpers:start_node(Config, Server1),
|
2023-01-03 01:02:43 +08:00
|
|
|
C3 = connect(ClientId, Config, Server3, [{clean_start, true}]),
|
2022-11-17 18:22:31 +08:00
|
|
|
ok = emqtt:disconnect(C3),
|
|
|
|
?assertEqual([],
|
2022-11-17 19:48:56 +08:00
|
|
|
rpc(Config, Server1, rabbit_amqqueue, list, [])),
|
2022-11-17 18:22:31 +08:00
|
|
|
ok.
|
2022-11-17 00:20:17 +08:00
|
|
|
|
2022-11-12 01:56:32 +08:00
|
|
|
delete_create_queue(Config) ->
|
2025-03-15 00:20:36 +08:00
|
|
|
{Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config),
|
2022-11-12 01:56:32 +08:00
|
|
|
CQ1 = <<"classic-queue-1-delete-create">>,
|
|
|
|
CQ2 = <<"classic-queue-2-delete-create">>,
|
|
|
|
QQ = <<"quorum-queue-delete-create">>,
|
|
|
|
Topic = atom_to_binary(?FUNCTION_NAME),
|
|
|
|
|
|
|
|
DeclareQueues = fun() ->
|
|
|
|
declare_queue(Ch, CQ1, []),
|
|
|
|
bind(Ch, CQ1, Topic),
|
|
|
|
declare_queue(Ch, CQ2, []),
|
|
|
|
bind(Ch, CQ2, Topic),
|
|
|
|
declare_queue(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}]),
|
|
|
|
bind(Ch, QQ, Topic)
|
|
|
|
end,
|
|
|
|
DeclareQueues(),
|
|
|
|
|
|
|
|
%% some large retry_interval to avoid re-sending
|
2022-11-29 19:41:50 +08:00
|
|
|
C = connect(?FUNCTION_NAME, Config, [{retry_interval, 300}]),
|
2022-11-12 01:56:32 +08:00
|
|
|
NumMsgs = 50,
|
|
|
|
TestPid = self(),
|
|
|
|
spawn(
|
|
|
|
fun() ->
|
|
|
|
lists:foreach(
|
|
|
|
fun(N) ->
|
|
|
|
ok = emqtt:publish_async(C, Topic, integer_to_binary(N), qos1,
|
|
|
|
{fun(N0, {ok, #{reason_code_name := success}}) ->
|
|
|
|
TestPid ! {self(), N0}
|
|
|
|
end, [N]})
|
|
|
|
end, lists:seq(1, NumMsgs))
|
|
|
|
end),
|
|
|
|
|
|
|
|
%% Delete queues while sending to them.
|
|
|
|
%% We want to test the path where a queue is deleted while confirms are outstanding.
|
|
|
|
timer:sleep(2),
|
|
|
|
delete_queue(Ch, [CQ1, QQ]),
|
|
|
|
%% Give queues some time to be fully deleted
|
Quorum queues v4 (#10637)
This commit contains the following new quorum queue features:
* Fair share high/low priorities
* SAC consumers honour consumer priorities
* Credited consumer refactoring to meet AMQP requirements.
* Use checkpoints feature to reduce memory use for queues with long backlogs
* Consumer cancel option that immediately removes consumer and returns all pending messages.
* More compact commands of the most common commands such as enqueue, settle and credit
* Correctly track the delivery-count to be compatible with the AMQP spec
* Support the "modified" AMQP 1.0 outcome better.
Commits:
* Quorum queues v4 scaffolding.
Create the new version but not including any changes yet.
QQ: force delete followers after leader has terminated.
Also try a longer sleep for mqtt_shared_SUITE so that the
delete operation stands a chance to time out and move on
to the forced deletion stage.
In some mixed machine version scenarios some followers will never
apply the poison pill command so we may as well force delete them
just in case.
QQ: skip test in amqp_client that cannot pass with mixed machine versions
QQ: remove dead code
Code relating to prior machine versions and state conversions.
rabbit_fifo_prop_SUITE fixes
* QQ: add v4 ff and new more compact enqueue command.
Also update rabbit_fifo_* suites to test more relevant code versions
where applicable.
QQ: always use the updated credit mode format
QQv4: use more compact consumer reference in settle, credit, return
This introudces a new type: consumer_key() which is either the consumer_id
or the raft index the checkout was processed at. If the consumer is
using one of the updated credit spec formats rabbit_fifo will use the
raft index as the primary key for the consumer such that the rabbit
fifo client can then use the more space efficient integer index
instead of the full consumer id in subsequent commands.
There is compatibility code to still accept the consumer id in
settle, return, discard and credit commands but this is slighlyt
slower and of course less space efficient.
The old form will be used in cases where the fifo client may have
already remove the local consumer state (as happens after a cancel).
Lots of test refactorings of the rabbit_fifo_SUITE to begin to use
the new forms.
* More test refactoring and new API fixes
rabbit_fifo_prop_SUITE refactoring and other fixes.
* First pass SAC consumer priority implementation.
Single active consumers will be activated if they have a higher priority
than the currently active consumer. if the currently active consumer
has pending messages, no further messages will be assigned to the
consumer and the activation of the new consumer will happen once
all pending messages are settled. This is to ensure processing order.
Consumers with the same priority will internally be ordered to
favour those with credit then those that attached first.
QQ: add SAC consumer priority integration tests
QQ: add check for ff in tests
* QQ: add new consumer cancel option: 'remove'
This option immediately removes and returns all messages for a
consumer instead of the softer 'cancel' option which keeps the
consumer around until all pending messages have been either
settled or returned.
This involves a change to the rabbit_queue_type:cancel/5 API
to rabbit_queue_type:cancel/3.
* QQ: capture checked out time for each consumer message.
This will form the basis for queue initiated consumer timeouts.
* QQ: Refactor to use the new ra_machine:handle_aux/5 API
Instead of the old ra_machine:handle_aux/6 callback.
* QQ hi/lo priority queue
* QQ: Avoid using mc:size/1 inside rabbit_fifo
As we dont want to depend on external functions for things that may
change the state of the queue.
* QQ bug fix: Maintain order when returning multiple
Prior to this commit, quorum queues requeued messages in an undefined
order, which is wrong.
This commit fixes this bug and requeues messages always in the order as
nacked / rejected / released by the client.
We ensure that order of requeues is deterministic from the client's
point of view and doesn't depend on whether the quorum queue soft limit
was exceeded temporarily.
So, even when rabbit_fifo_client batches requeues, the order as nacked
by the client is still maintained.
* Simplify
* Add rabbit_quorum_queue:file_handle* functions back.
For backwards compat.
* dialyzer fix
* dynamic_qq_SUITE: avoid mixed versions failure.
* QQ: track number of requeues for message.
To be able to calculate the correct value for the AMQP delivery_count
header we need to be able to distinguish between messages that were
"released" or returned in QQ speak and those that were returned
due to errors such as channel termination.
This commit implement such tracking as well as the calculation
of a new mc annotations `delivery_count` that AMQP makes use
of to set the header value accordingly.
* Use QQ consumer removal when AMQP client detaches
This enables us to unskip some AMQP tests.
* Use AMQP address v2 in fsharp-tests
* QQ: track number of requeues for message.
To be able to calculate the correct value for the AMQP delivery_count
header we need to be able to distinguish between messages that were
"released" or returned in QQ speak and those that were returned
due to errors such as channel termination.
This commit implement such tracking as well as the calculation
of a new mc annotations `delivery_count` that AMQP makes use
of to set the header value accordingly.
* rabbit_fifo: Use Ra checkpoints
* quorum queues: Use a custom interval for checkpoints
* rabbit_fifo_SUITE: List actual effects in ?ASSERT_EFF failure
* QQ: Checkpoints modifications
* fixes
* QQ: emit release cursors on tick for followers and leaders
else followers could end up holding on to segments a bit longer
after traffic stops.
* Support draining a QQ SAC waiting consumer
By issuing drain=true, the client says "either send a transfer or a flow frame".
Since there are no messages to send to an inactive consumer, the sending
queue should advance the delivery-count consuming all link-credit and send
a credit_reply with drain=true to the session proc which causes the session
proc to send a flow frame to the client.
* Extract applying #credit{} cmd into 2 functions
This commit is only refactoring and doesn't change any behaviour.
* Fix default priority level
Prior to this commit, when a message didn't have a priority level set,
it got enqueued as high prio.
This is wrong because the default priority is 4 and
"for example, if 2 distinct priorities are implemented,
then levels 0 to 4 are equivalent, and levels 5 to 9 are equivalent
and levels 4 and 5 are distinct."
Hence, by default a message without priority set, must be enqueued as
low prio.
* bazel run gazelle
* Avoid deprecated time unit
* Fix aux_test
* Delete dead code
* Fix rabbit_fifo_q:get_lowest_index/1
* Delete unused normalize functions
* Generate less garbage
* Add integration test for QQ SAC with consumer priority
* Improve readability
* Change modified outcome behaviour
With the new quorum queue v4 improvements where a requeue counter was
added in addition to the quorum queue delivery counter, the following
sentence from https://github.com/rabbitmq/rabbitmq-server/pull/6292#issue-1431275848
doesn't apply anymore:
> Also the case where delivery_failed=false|undefined requires the release of the
> message without incrementing the delivery_count. Again this is not something
> that our queues are able to do so again we have to reject without requeue.
Therefore, we simplify the modified outcome behaviour:
RabbitMQ will from now on only discard the message if the modified's
undeliverable-here field is true.
* Introduce single feature flag rabbitmq_4.0.0
## What?
Merge all feature flags introduced in RabbitMQ 4.0.0 into a single
feature flag called rabbitmq_4.0.0.
## Why?
1. This fixes the crash in
https://github.com/rabbitmq/rabbitmq-server/pull/10637#discussion_r1681002352
2. It's better user experience.
* QQ: expose priority metrics in UI
* Enable skipped test after rebasing onto main
* QQ: add new command "modify" to better handle AMQP modified outcomes.
This new command can be used to annotate returned or rejected messages.
This commit also retains the delivery-count across dead letter boundaries
such that the AMQP header delivery-count field can now include _all_ failed
deliver attempts since the message was originally received.
Internally the quorum queue has moved it's delivery_count header to
only track the AMQP protocol delivery attempts and now introduces
a new acquired_count to track all message acquisitions by consumers.
* Type tweaks and naming
* Add test for modified outcome with classic queue
* Add test routing on message-annotations in modified outcome
* Skip tests in mixed version tests
Skip tests in mixed version tests because feature flag
rabbitmq_4.0.0 is needed for the new #modify{} Ra command
being sent to quorum queues.
---------
Co-authored-by: David Ansari <david.ansari@gmx.de>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
2024-08-08 15:48:27 +08:00
|
|
|
%% TODO: wait longer for quorum queues in mixed mode as it can take longer
|
|
|
|
%% for deletion to complete, delete timeout is 5s so we need to exceed that
|
|
|
|
timer:sleep(6000),
|
2022-11-12 01:56:32 +08:00
|
|
|
|
2022-11-18 04:13:27 +08:00
|
|
|
%% We expect confirms for all messages.
|
2022-11-12 01:56:32 +08:00
|
|
|
%% Confirm here does not mean that messages made it ever to the deleted queues.
|
2022-11-18 04:13:27 +08:00
|
|
|
%% It is valid for confirms to sporadically arrive out of order: This happens when the classic
|
|
|
|
%% queue is being deleted while the remaining messages are routed and confirmed to the 2nd and 3rd queues
|
|
|
|
%% before the monitor to the classic queue fires.
|
|
|
|
ok = await_confirms_unordered(C, NumMsgs),
|
2022-11-12 01:56:32 +08:00
|
|
|
|
|
|
|
%% Recreate the same queues.
|
|
|
|
DeclareQueues(),
|
|
|
|
|
|
|
|
%% Sending a message to each of them should work.
|
|
|
|
{ok, _} = emqtt:publish(C, Topic, <<"m">>, qos1),
|
|
|
|
eventually(?_assertEqual(lists:sort([[CQ1, <<"1">>],
|
|
|
|
%% This queue should have all messages because we did not delete it.
|
|
|
|
[CQ2, integer_to_binary(NumMsgs + 1)],
|
|
|
|
[QQ, <<"1">>]]),
|
|
|
|
lists:sort(rabbitmqctl_list(Config, 0, ["list_queues", "name", "messages", "--no-table-headers"]))),
|
|
|
|
1000, 10),
|
|
|
|
|
|
|
|
delete_queue(Ch, [CQ1, CQ2, QQ]),
|
2025-03-15 00:20:36 +08:00
|
|
|
ok = emqtt:disconnect(C),
|
|
|
|
ok = rabbit_ct_client_helpers:close_connection_and_channel(Conn, Ch).
|
2022-11-12 01:56:32 +08:00
|
|
|
|
2023-03-17 20:48:26 +08:00
|
|
|
session_expiry(Config) ->
|
2023-01-14 00:15:15 +08:00
|
|
|
App = rabbitmq_mqtt,
|
2023-07-13 22:38:47 +08:00
|
|
|
Par = max_session_expiry_interval_seconds,
|
2023-03-17 20:48:26 +08:00
|
|
|
Seconds = 1,
|
2023-01-14 00:15:15 +08:00
|
|
|
{ok, DefaultVal} = rpc(Config, application, get_env, [App, Par]),
|
2023-03-17 20:48:26 +08:00
|
|
|
ok = rpc(Config, application, set_env, [App, Par, Seconds]),
|
2023-01-14 00:15:15 +08:00
|
|
|
|
2023-03-17 20:48:26 +08:00
|
|
|
C = connect(?FUNCTION_NAME, Config, non_clean_sess_opts()),
|
2023-01-14 00:15:15 +08:00
|
|
|
{ok, _, [0, 1]} = emqtt:subscribe(C, [{<<"topic0">>, qos0},
|
|
|
|
{<<"topic1">>, qos1}]),
|
|
|
|
ok = emqtt:disconnect(C),
|
|
|
|
|
|
|
|
?assertEqual(2, rpc(Config, rabbit_amqqueue, count, [])),
|
2023-03-17 20:48:26 +08:00
|
|
|
timer:sleep(timer:seconds(Seconds) + 100),
|
2023-08-03 16:37:53 +08:00
|
|
|
%% On a slow machine, this test might fail. Let's consider
|
|
|
|
%% the expiry on a longer time window
|
|
|
|
?awaitMatch(0, rpc(Config, rabbit_amqqueue, count, []), 15_000, 1000),
|
2023-01-14 00:15:15 +08:00
|
|
|
|
|
|
|
ok = rpc(Config, application, set_env, [App, Par, DefaultVal]).
|
|
|
|
|
|
|
|
non_clean_sess_reconnect_qos1(Config) ->
|
2024-11-08 17:34:49 +08:00
|
|
|
non_clean_sess_reconnect(Config, 1).
|
2023-01-14 00:15:15 +08:00
|
|
|
|
|
|
|
non_clean_sess_reconnect_qos0(Config) ->
|
2024-11-08 17:34:49 +08:00
|
|
|
non_clean_sess_reconnect(Config, 0).
|
2023-01-14 00:15:15 +08:00
|
|
|
|
|
|
|
non_clean_sess_reconnect(Config, SubscriptionQoS) ->
|
2023-01-10 17:20:19 +08:00
|
|
|
Pub = connect(<<"publisher">>, Config),
|
|
|
|
Topic = ClientId = atom_to_binary(?FUNCTION_NAME),
|
|
|
|
|
2023-03-17 20:48:26 +08:00
|
|
|
C1 = connect(ClientId, Config, non_clean_sess_opts()),
|
2024-11-08 17:34:49 +08:00
|
|
|
{ok, _, [SubscriptionQoS]} = emqtt:subscribe(C1, Topic, SubscriptionQoS),
|
|
|
|
ok = await_consumer_count(1, ClientId, SubscriptionQoS, Config),
|
2022-11-18 02:24:00 +08:00
|
|
|
|
2022-11-17 18:22:06 +08:00
|
|
|
ok = emqtt:disconnect(C1),
|
2024-11-08 17:34:49 +08:00
|
|
|
ok = await_consumer_count(0, ClientId, SubscriptionQoS, Config),
|
2022-11-17 18:22:06 +08:00
|
|
|
|
2023-01-10 17:20:19 +08:00
|
|
|
ok = emqtt:publish(Pub, Topic, <<"msg-3-qos0">>, qos0),
|
|
|
|
{ok, _} = emqtt:publish(Pub, Topic, <<"msg-4-qos1">>, qos1),
|
|
|
|
|
2023-03-17 20:48:26 +08:00
|
|
|
C2 = connect(ClientId, Config, non_clean_sess_opts()),
|
|
|
|
%% Server should reply in CONNACK that it has session state.
|
|
|
|
?assertEqual({session_present, 1},
|
|
|
|
proplists:lookup(session_present, emqtt:info(C2))),
|
2024-11-08 17:34:49 +08:00
|
|
|
ok = await_consumer_count(1, ClientId, SubscriptionQoS, Config),
|
2022-11-17 18:22:06 +08:00
|
|
|
|
2023-01-10 17:20:19 +08:00
|
|
|
ok = emqtt:publish(Pub, Topic, <<"msg-5-qos0">>, qos0),
|
|
|
|
{ok, _} = emqtt:publish(Pub, Topic, <<"msg-6-qos1">>, qos1),
|
|
|
|
|
2022-11-17 18:22:06 +08:00
|
|
|
%% shouldn't receive message after unsubscribe
|
|
|
|
{ok, _, _} = emqtt:unsubscribe(C2, Topic),
|
2022-11-18 02:24:00 +08:00
|
|
|
?assertMatch(#{consumers := 0},
|
2023-02-22 02:24:15 +08:00
|
|
|
get_global_counters(Config)),
|
2023-01-10 17:20:19 +08:00
|
|
|
{ok, _} = emqtt:publish(Pub, Topic, <<"msg-7-qos0">>, qos1),
|
|
|
|
|
2023-01-14 00:15:15 +08:00
|
|
|
%% "After the disconnection of a Session that had CleanSession set to 0, the Server MUST store
|
|
|
|
%% further QoS 1 and QoS 2 messages that match any subscriptions that the client had at the
|
|
|
|
%% time of disconnection as part of the Session state [MQTT-3.1.2-5].
|
|
|
|
%% It MAY also store QoS 0 messages that meet the same criteria."
|
|
|
|
%% Starting with RabbitMQ v3.12 we store QoS 0 messages as well.
|
2023-01-10 17:20:19 +08:00
|
|
|
ok = expect_publishes(C2, Topic, [<<"msg-3-qos0">>, <<"msg-4-qos1">>,
|
|
|
|
<<"msg-5-qos0">>, <<"msg-6-qos1">>]),
|
|
|
|
{publish_not_received, <<"msg-7-qos0">>} = expect_publishes(C2, Topic, [<<"msg-7-qos0">>]),
|
2022-11-17 18:22:06 +08:00
|
|
|
|
2023-01-10 17:20:19 +08:00
|
|
|
ok = emqtt:disconnect(Pub),
|
|
|
|
ok = emqtt:disconnect(C2),
|
2022-11-17 18:22:06 +08:00
|
|
|
%% connect with clean sess true to clean up
|
2023-01-10 17:20:19 +08:00
|
|
|
C3 = connect(ClientId, Config, [{clean_start, true}]),
|
2022-11-17 18:22:06 +08:00
|
|
|
ok = emqtt:disconnect(C3).
|
2023-01-14 00:15:15 +08:00
|
|
|
|
|
|
|
non_clean_sess_reconnect_qos0_and_qos1(Config) ->
|
|
|
|
Pub = connect(<<"publisher">>, Config),
|
|
|
|
Topic0 = <<"t/0">>,
|
|
|
|
Topic1 = <<"t/1">>,
|
|
|
|
ClientId = ?FUNCTION_NAME,
|
|
|
|
|
2023-03-17 20:48:26 +08:00
|
|
|
C1 = connect(ClientId, Config, non_clean_sess_opts()),
|
2024-11-08 17:34:49 +08:00
|
|
|
{ok, _, [1, 0]} = emqtt:subscribe(C1, [{Topic1, qos1},
|
|
|
|
{Topic0, qos0}]),
|
|
|
|
ok = await_consumer_count(1, ClientId, 0, Config),
|
|
|
|
ok = await_consumer_count(1, ClientId, 1, Config),
|
2023-01-14 00:15:15 +08:00
|
|
|
|
|
|
|
ok = emqtt:disconnect(C1),
|
2024-11-08 17:34:49 +08:00
|
|
|
ok = await_consumer_count(0, ClientId, 0, Config),
|
|
|
|
ok = await_consumer_count(0, ClientId, 1, Config),
|
2023-01-14 00:15:15 +08:00
|
|
|
{ok, _} = emqtt:publish(Pub, Topic0, <<"msg-0">>, qos1),
|
|
|
|
{ok, _} = emqtt:publish(Pub, Topic1, <<"msg-1">>, qos1),
|
|
|
|
|
2023-03-17 20:48:26 +08:00
|
|
|
C2 = connect(ClientId, Config, non_clean_sess_opts()),
|
2024-11-08 17:34:49 +08:00
|
|
|
ok = await_consumer_count(1, ClientId, 0, Config),
|
|
|
|
ok = await_consumer_count(1, ClientId, 1, Config),
|
2023-01-14 00:15:15 +08:00
|
|
|
ok = expect_publishes(C2, Topic0, [<<"msg-0">>]),
|
|
|
|
ok = expect_publishes(C2, Topic1, [<<"msg-1">>]),
|
|
|
|
|
|
|
|
ok = emqtt:disconnect(Pub),
|
|
|
|
ok = emqtt:disconnect(C2),
|
|
|
|
C3 = connect(ClientId, Config, [{clean_start, true}]),
|
|
|
|
ok = emqtt:disconnect(C3).
|
2022-11-17 18:22:06 +08:00
|
|
|
|
2023-02-28 16:45:15 +08:00
|
|
|
non_clean_sess_empty_client_id(Config) ->
|
2023-03-17 20:48:26 +08:00
|
|
|
{C, Connect} = util:start_client(<<>>, Config, 0, non_clean_sess_opts()),
|
2023-02-28 21:51:16 +08:00
|
|
|
case ?config(mqtt_version, Config) of
|
|
|
|
V when V =:= v3;
|
|
|
|
V =:= v4 ->
|
|
|
|
%% "If the Client supplies a zero-byte ClientId with CleanSession set to 0,
|
|
|
|
%% the Server MUST respond to the CONNECT Packet with a CONNACK return code 0x02
|
|
|
|
%% (Identifier rejected) and then close the Network Connection" [MQTT-3.1.3-8].
|
|
|
|
process_flag(trap_exit, true),
|
|
|
|
?assertMatch({error, {client_identifier_not_valid, _}}, Connect(C)),
|
|
|
|
ok = await_exit(C);
|
|
|
|
v5 ->
|
|
|
|
%% "If the Client connects using a zero length Client Identifier, the Server MUST respond with
|
|
|
|
%% a CONNACK containing an Assigned Client Identifier. The Assigned Client Identifier MUST be
|
|
|
|
%% a new Client Identifier not used by any other Session currently in the Server [MQTT-3.2.2-16]."
|
|
|
|
{ok, #{'Assigned-Client-Identifier' := ClientId}} = Connect(C),
|
|
|
|
{C2, Connect2} = util:start_client(<<>>, Config, 0, [{clean_start, true}]),
|
|
|
|
{ok, #{'Assigned-Client-Identifier' := ClientId2}} = Connect2(C2),
|
|
|
|
?assertNotEqual(ClientId, ClientId2),
|
|
|
|
ok = emqtt:disconnect(C),
|
|
|
|
ok = emqtt:disconnect(C2)
|
|
|
|
end.
|
2023-02-28 16:45:15 +08:00
|
|
|
|
2022-11-21 19:31:00 +08:00
|
|
|
subscribe_same_topic_same_qos(Config) ->
|
2022-11-29 19:41:50 +08:00
|
|
|
C = connect(?FUNCTION_NAME, Config),
|
2022-11-21 19:31:00 +08:00
|
|
|
Topic = <<"a/b">>,
|
|
|
|
|
|
|
|
{ok, _} = emqtt:publish(C, Topic, <<"retained">>, [{retain, true},
|
|
|
|
{qos, 1}]),
|
|
|
|
%% Subscribe with QoS 0
|
|
|
|
{ok, _, [0]} = emqtt:subscribe(C, Topic, qos0),
|
|
|
|
{ok, _} = emqtt:publish(C, Topic, <<"msg1">>, qos1),
|
|
|
|
%% Subscribe to same topic with same QoS
|
|
|
|
{ok, _, [0]} = emqtt:subscribe(C, Topic, qos0),
|
|
|
|
{ok, _} = emqtt:publish(C, Topic, <<"msg2">>, qos1),
|
|
|
|
|
|
|
|
%% "Any existing retained messages matching the Topic Filter MUST be re-sent" [MQTT-3.8.4-3]
|
2023-01-03 01:02:43 +08:00
|
|
|
ok = expect_publishes(C, Topic, [<<"retained">>, <<"msg1">>,
|
|
|
|
<<"retained">>, <<"msg2">>
|
|
|
|
]),
|
2022-11-21 19:31:00 +08:00
|
|
|
ok = emqtt:disconnect(C).
|
|
|
|
|
|
|
|
subscribe_same_topic_different_qos(Config) ->
|
2023-03-17 20:48:26 +08:00
|
|
|
C = connect(?FUNCTION_NAME, Config, non_clean_sess_opts()),
|
2022-11-21 19:31:00 +08:00
|
|
|
Topic = <<"b/c">>,
|
|
|
|
|
|
|
|
{ok, _} = emqtt:publish(C, Topic, <<"retained">>, [{retain, true},
|
|
|
|
{qos, 1}]),
|
|
|
|
%% Subscribe with QoS 0
|
|
|
|
{ok, _, [0]} = emqtt:subscribe(C, Topic, qos0),
|
|
|
|
{ok, _} = emqtt:publish(C, Topic, <<"msg1">>, qos1),
|
|
|
|
%% Subscribe to same topic with QoS 1
|
|
|
|
{ok, _, [1]} = emqtt:subscribe(C, Topic, qos1),
|
|
|
|
{ok, _} = emqtt:publish(C, Topic, <<"msg2">>, qos1),
|
|
|
|
%% Subscribe to same topic with QoS 0 again
|
|
|
|
{ok, _, [0]} = emqtt:subscribe(C, Topic, qos0),
|
|
|
|
{ok, _} = emqtt:publish(C, Topic, <<"msg3">>, qos1),
|
|
|
|
|
|
|
|
%% "Any existing retained messages matching the Topic Filter MUST be re-sent" [MQTT-3.8.4-3]
|
2023-01-03 01:02:43 +08:00
|
|
|
ok = expect_publishes(C, Topic, [<<"retained">>, <<"msg1">>,
|
|
|
|
<<"retained">>, <<"msg2">>,
|
|
|
|
<<"retained">>, <<"msg3">>]),
|
2022-11-21 19:31:00 +08:00
|
|
|
|
2022-11-22 01:26:40 +08:00
|
|
|
%% There should be exactly one consumer for each queue: qos0 and qos1
|
2023-01-03 01:02:43 +08:00
|
|
|
Consumers = rpc(Config, rabbit_amqqueue, consumers_all, [<<"/">>]),
|
2022-11-22 01:26:40 +08:00
|
|
|
?assertEqual(2, length(Consumers)),
|
|
|
|
|
2022-11-21 19:31:00 +08:00
|
|
|
ok = emqtt:disconnect(C),
|
2022-11-29 19:41:50 +08:00
|
|
|
C1 = connect(?FUNCTION_NAME, Config, [{clean_start, true}]),
|
2022-11-21 19:31:00 +08:00
|
|
|
ok = emqtt:disconnect(C1).
|
|
|
|
|
Avoid exceptions in mixed version cluster
1. Avoid following exceptions in mixed version clusters when new MQTT
connections are created:
```
{{exception,{undef,[{rabbit_mqtt_util,remove_duplicate_clientid_connections,
[{<<"/">>,
<<"publish_to_all_queue_types">>},
<0.1447.0>],
[]}]}},
[{erpc,execute_cast,3,[{file,"erpc.erl"},{line,621}]}]}
```
If feature flag delete_ra_cluster_mqtt_node is disabled, let's still
populate pg with MQTT client IDs such that we don't have to migrate them
from the Ra cluster to pg when we enable the feature flag.
However, for actually closing duplicate MQTT client ID connections, if
that feature flag is disabled, let's rely on the Ra cluster to take care
of it.
2. Write a test ensuring the QoS responses are in the right order when a
single SUBSCRIBE packet contains multiple subscriptions.
2022-11-24 00:44:42 +08:00
|
|
|
subscribe_multiple(Config) ->
|
2022-11-29 19:41:50 +08:00
|
|
|
C = connect(?FUNCTION_NAME, Config),
|
Avoid exceptions in mixed version cluster
1. Avoid following exceptions in mixed version clusters when new MQTT
connections are created:
```
{{exception,{undef,[{rabbit_mqtt_util,remove_duplicate_clientid_connections,
[{<<"/">>,
<<"publish_to_all_queue_types">>},
<0.1447.0>],
[]}]}},
[{erpc,execute_cast,3,[{file,"erpc.erl"},{line,621}]}]}
```
If feature flag delete_ra_cluster_mqtt_node is disabled, let's still
populate pg with MQTT client IDs such that we don't have to migrate them
from the Ra cluster to pg when we enable the feature flag.
However, for actually closing duplicate MQTT client ID connections, if
that feature flag is disabled, let's rely on the Ra cluster to take care
of it.
2. Write a test ensuring the QoS responses are in the right order when a
single SUBSCRIBE packet contains multiple subscriptions.
2022-11-24 00:44:42 +08:00
|
|
|
%% Subscribe to multiple topics at once
|
|
|
|
?assertMatch({ok, _, [0, 1]},
|
|
|
|
emqtt:subscribe(C, [{<<"topic0">>, qos0},
|
|
|
|
{<<"topic1">>, qos1}])),
|
|
|
|
ok = emqtt:disconnect(C).
|
|
|
|
|
2022-12-05 00:29:07 +08:00
|
|
|
large_message_mqtt_to_mqtt(Config) ->
|
|
|
|
Topic = ClientId = atom_to_binary(?FUNCTION_NAME),
|
|
|
|
C = connect(ClientId, Config),
|
|
|
|
{ok, _, [1]} = emqtt:subscribe(C, {Topic, qos1}),
|
|
|
|
|
2022-12-29 21:01:55 +08:00
|
|
|
Payload0 = binary:copy(<<"x">>, 8_000_000),
|
2022-12-05 00:29:07 +08:00
|
|
|
Payload = <<Payload0/binary, "y">>,
|
|
|
|
{ok, _} = emqtt:publish(C, Topic, Payload, qos1),
|
2023-01-03 01:02:43 +08:00
|
|
|
ok = expect_publishes(C, Topic, [Payload]),
|
2022-12-05 00:29:07 +08:00
|
|
|
ok = emqtt:disconnect(C).
|
|
|
|
|
|
|
|
large_message_amqp_to_mqtt(Config) ->
|
|
|
|
Topic = ClientId = atom_to_binary(?FUNCTION_NAME),
|
|
|
|
C = connect(ClientId, Config),
|
|
|
|
{ok, _, [1]} = emqtt:subscribe(C, {Topic, qos1}),
|
|
|
|
|
2025-03-15 00:20:36 +08:00
|
|
|
{Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config),
|
2022-12-29 21:01:55 +08:00
|
|
|
Payload0 = binary:copy(<<"x">>, 8_000_000),
|
2022-12-05 00:29:07 +08:00
|
|
|
Payload = <<Payload0/binary, "y">>,
|
2023-01-05 23:29:42 +08:00
|
|
|
amqp_channel:call(Ch,
|
|
|
|
#'basic.publish'{exchange = <<"amq.topic">>,
|
|
|
|
routing_key = Topic},
|
|
|
|
#amqp_msg{payload = Payload}),
|
|
|
|
ok = expect_publishes(C, Topic, [Payload]),
|
2025-03-15 00:20:36 +08:00
|
|
|
ok = emqtt:disconnect(C),
|
|
|
|
ok = rabbit_ct_client_helpers:close_connection_and_channel(Conn, Ch).
|
2023-01-05 23:29:42 +08:00
|
|
|
|
|
|
|
amqp_to_mqtt_qos0(Config) ->
|
|
|
|
Topic = ClientId = Payload = atom_to_binary(?FUNCTION_NAME),
|
|
|
|
C = connect(ClientId, Config),
|
|
|
|
{ok, _, [0]} = emqtt:subscribe(C, {Topic, qos0}),
|
|
|
|
|
2025-03-15 00:20:36 +08:00
|
|
|
{Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config),
|
2022-12-05 00:29:07 +08:00
|
|
|
amqp_channel:call(Ch,
|
|
|
|
#'basic.publish'{exchange = <<"amq.topic">>,
|
|
|
|
routing_key = Topic},
|
|
|
|
#amqp_msg{payload = Payload}),
|
2023-01-03 01:02:43 +08:00
|
|
|
ok = expect_publishes(C, Topic, [Payload]),
|
2025-03-15 00:20:36 +08:00
|
|
|
ok = emqtt:disconnect(C),
|
|
|
|
ok = rabbit_ct_client_helpers:close_connection_and_channel(Conn, Ch).
|
2022-12-05 00:29:07 +08:00
|
|
|
|
2022-12-29 21:01:55 +08:00
|
|
|
%% Packet identifier is a non zero two byte integer.
|
|
|
|
%% Test that the server wraps around the packet identifier.
|
|
|
|
many_qos1_messages(Config) ->
|
|
|
|
Topic = ClientId = atom_to_binary(?FUNCTION_NAME),
|
|
|
|
NumMsgs = 16#ffff + 100,
|
2024-06-20 02:02:33 +08:00
|
|
|
C = connect(ClientId, Config, 0, [{retry_interval, 600},
|
|
|
|
{max_inflight, NumMsgs div 8}]),
|
|
|
|
{ok, _, [1]} = emqtt:subscribe(C, {Topic, qos1}),
|
2023-01-03 01:02:43 +08:00
|
|
|
Payloads = lists:map(fun integer_to_binary/1, lists:seq(1, NumMsgs)),
|
2024-06-20 02:02:33 +08:00
|
|
|
Self = self(),
|
|
|
|
Target = lists:last(Payloads),
|
2023-01-03 01:02:43 +08:00
|
|
|
lists:foreach(fun(P) ->
|
2024-06-20 02:02:33 +08:00
|
|
|
Cb = {fun(T, _) when T == Target ->
|
|
|
|
Self ! proceed;
|
|
|
|
(_, _) ->
|
|
|
|
ok
|
|
|
|
end, [P]},
|
|
|
|
ok = emqtt:publish_async(C, Topic, P, qos1, Cb)
|
2023-01-03 01:02:43 +08:00
|
|
|
end, Payloads),
|
2024-06-20 02:02:33 +08:00
|
|
|
receive
|
|
|
|
proceed -> ok
|
2025-01-14 21:56:41 +08:00
|
|
|
after 300_000 ->
|
2024-06-20 02:02:33 +08:00
|
|
|
ct:fail("message to proceed never received")
|
|
|
|
end,
|
2023-06-13 17:40:03 +08:00
|
|
|
ok = expect_publishes(C, Topic, Payloads),
|
2022-12-05 00:29:07 +08:00
|
|
|
ok = emqtt:disconnect(C).
|
|
|
|
|
2022-11-30 02:13:50 +08:00
|
|
|
%% This test is mostly interesting in mixed version mode where feature flag
|
|
|
|
%% rabbit_mqtt_qos0_queue is disabled and therefore a classic queue gets created.
|
|
|
|
rabbit_mqtt_qos0_queue(Config) ->
|
|
|
|
Topic = atom_to_binary(?FUNCTION_NAME),
|
|
|
|
|
|
|
|
%% Place MQTT subscriber process on new node in mixed version.
|
|
|
|
Sub = connect(<<"subscriber">>, Config),
|
|
|
|
{ok, _, [0]} = emqtt:subscribe(Sub, Topic, qos0),
|
|
|
|
|
|
|
|
%% Place MQTT publisher process on old node in mixed version.
|
2023-01-03 01:02:43 +08:00
|
|
|
Pub = connect(<<"publisher">>, Config, 1, []),
|
2022-11-30 02:13:50 +08:00
|
|
|
|
|
|
|
Msg = <<"msg">>,
|
|
|
|
ok = emqtt:publish(Pub, Topic, Msg, qos0),
|
2023-01-03 01:02:43 +08:00
|
|
|
ok = expect_publishes(Sub, Topic, [Msg]),
|
2022-12-12 16:57:33 +08:00
|
|
|
|
|
|
|
ok = emqtt:disconnect(Sub),
|
|
|
|
ok = emqtt:disconnect(Pub).
|
|
|
|
|
Overwrite rabbit_mqtt_qos0_queue record from crashed node
When a node is shut down cleanly, the rabbit_mqtt_qos0_queue record is
removed from Mnesia.
When a node crashes and subsequently reboots the new node incarnation
removes the old rabbit_mqtt_qos0_queue record from Mnesia (via
rabbit_mqtt_qos0_queue:recover/2)
However, when a node crashes, the rabbit_mqtt_qos0_queue will be removed
from Mnesia table rabbit_queue, but will still be present in table
rabbit_durable_queue on the other live nodes.
Prior to this commit, when the same MQTT client (i.e. same MQTT client
ID) re-connects from the crashed node to another live node and
re-subscribes, the following error occurred:
```
[info] <0.43155.0> Accepted MQTT connection 10.105.0.18:60508 -> 10.105.0.10:1883 for client ID nodered_24e214feb018a232
[debug] <0.43155.0> Received a SUBSCRIBE for topic(s) [{mqtt_topic,
[debug] <0.43155.0> <<"as923/gateway/+/command/#">>,0}]
[error] <0.43155.0> Failed to declare queue 'mqtt-subscription-nodered_24e214feb018a232qos0' in vhost '/': {absent,
[error] <0.43155.0> {amqqueue,
[error] <0.43155.0> {resource,
[error] <0.43155.0> <<"/">>,
[error] <0.43155.0> queue,
[error] <0.43155.0> <<"mqtt-subscription-nodered_24e214feb018a232qos0">>},
[error] <0.43155.0> true,
[error] <0.43155.0> false,
[error] <0.43155.0> <15486.32690.0>,
[error] <0.43155.0> [],
[error] <0.43155.0> <15486.32690.0>,
[error] <0.43155.0> [],
[error] <0.43155.0> [],
[error] <0.43155.0> [],
[error] <0.43155.0> [{vhost,
[error] <0.43155.0> <<"/">>},
[error] <0.43155.0> {name,
[error] <0.43155.0> <<"ha-all-mqtt">>},
[error] <0.43155.0> {pattern,
[error] <0.43155.0> <<"^mqtt-">>},
[error] <0.43155.0> {'apply-to',
[error] <0.43155.0> <<"all">>},
[error] <0.43155.0> {definition,
[error] <0.43155.0> [{<<"ha-mode">>,
[error] <0.43155.0> <<"all">>}]},
[error] <0.43155.0> {priority,
[error] <0.43155.0> 0}],
[error] <0.43155.0> undefined,
[error] <0.43155.0> [],
[error] <0.43155.0> undefined,
[error] <0.43155.0> live,
[error] <0.43155.0> 0,
[error] <0.43155.0> [],
[error] <0.43155.0> <<"/">>,
[error] <0.43155.0> #{user =>
[error] <0.43155.0> <<"iottester">>},
[error] <0.43155.0> rabbit_mqtt_qos0_queue,
[error] <0.43155.0> #{}},
[error] <0.43155.0> nodedown}
[error] <0.43155.0> MQTT protocol error on connection 10.105.0.18:60508 -> 10.105.0.10:1883: subscribe_error
```
This commit fixes this error allowing an MQTT client that connects with CleanSession=true and
subscribes with QoS 0 to re-connect and re-subscribe to another live
node if the original Rabbit node crashes.
Reported in https://groups.google.com/g/rabbitmq-users/c/pxgy0QiwilM/m/LkJQ-3DyBgAJ
2023-12-22 00:15:27 +08:00
|
|
|
rabbit_mqtt_qos0_queue_kill_node(Config) ->
|
Allow MQTT QoS 0 subscribers to reconnect
The solution in #10203 has the following issues:
1. Bindings can be left ofter in Mnesia table rabbit_durable_queue.
One solution to 1. would be to first delete the old queue via
`rabbit_amqqueue:internal_delete(Q, User, missing_owner)`
and subsequently declare the new queue via
`rabbit_amqqueue:internal_declare(Q, false)`
However, even then, it suffers from:
2. Race conditions between `rabbit_amqqueue:on_node_down/1`
and `rabbit_mqtt_qos0_queue:declare/2`:
`rabbit_amqqueue:on_node_down/1` could first read the queue records that
need to be deleted, thereafter `rabbit_mqtt_qos0_queue:declare/2` could
re-create the queue owned by the new connection PID, and `rabbit_amqqueue:on_node_down/1`
could subsequently delete the re-created queue.
Unfortunately, `rabbit_amqqueue:on_node_down/1` does not delete
transient queues in one isolated transaction. Instead it first reads
queues and subsequenlty deletes queues in batches making it prone to
race conditions.
Ideally, this commit deletes all rabbit_mqtt_qos0_queue queues of the
node that has crashed including their bindings.
However, doing so in one transaction is risky as there may be millions
of such queues and the current code path applies the same logic on all
live nodes resulting in conflicting transactions and therefore a long
database operation.
Hence, this commit uses the simplest approach which should still be
safe:
Do not remove rabbit_mqtt_qos0_queue queues if a node crashes.
Other live nodes will continue to route to these dead queues.
That should be okay, given that the rabbit_mqtt_qos0_queue clients auto
confirm.
Continuing routing however has the effect of counting as routing result
for AMQP 0.9.1 `mandatory` property.
If an MQTT client re-connects to a live node with the same client ID,
the new node will delete and then re-create the queue.
Once the crashed node comes back online, it will clean up its leftover
queues and bindings.
2023-12-27 23:40:52 +08:00
|
|
|
Topic1 = <<"t/1">>,
|
|
|
|
Topic2 = <<"t/2">>,
|
Overwrite rabbit_mqtt_qos0_queue record from crashed node
When a node is shut down cleanly, the rabbit_mqtt_qos0_queue record is
removed from Mnesia.
When a node crashes and subsequently reboots the new node incarnation
removes the old rabbit_mqtt_qos0_queue record from Mnesia (via
rabbit_mqtt_qos0_queue:recover/2)
However, when a node crashes, the rabbit_mqtt_qos0_queue will be removed
from Mnesia table rabbit_queue, but will still be present in table
rabbit_durable_queue on the other live nodes.
Prior to this commit, when the same MQTT client (i.e. same MQTT client
ID) re-connects from the crashed node to another live node and
re-subscribes, the following error occurred:
```
[info] <0.43155.0> Accepted MQTT connection 10.105.0.18:60508 -> 10.105.0.10:1883 for client ID nodered_24e214feb018a232
[debug] <0.43155.0> Received a SUBSCRIBE for topic(s) [{mqtt_topic,
[debug] <0.43155.0> <<"as923/gateway/+/command/#">>,0}]
[error] <0.43155.0> Failed to declare queue 'mqtt-subscription-nodered_24e214feb018a232qos0' in vhost '/': {absent,
[error] <0.43155.0> {amqqueue,
[error] <0.43155.0> {resource,
[error] <0.43155.0> <<"/">>,
[error] <0.43155.0> queue,
[error] <0.43155.0> <<"mqtt-subscription-nodered_24e214feb018a232qos0">>},
[error] <0.43155.0> true,
[error] <0.43155.0> false,
[error] <0.43155.0> <15486.32690.0>,
[error] <0.43155.0> [],
[error] <0.43155.0> <15486.32690.0>,
[error] <0.43155.0> [],
[error] <0.43155.0> [],
[error] <0.43155.0> [],
[error] <0.43155.0> [{vhost,
[error] <0.43155.0> <<"/">>},
[error] <0.43155.0> {name,
[error] <0.43155.0> <<"ha-all-mqtt">>},
[error] <0.43155.0> {pattern,
[error] <0.43155.0> <<"^mqtt-">>},
[error] <0.43155.0> {'apply-to',
[error] <0.43155.0> <<"all">>},
[error] <0.43155.0> {definition,
[error] <0.43155.0> [{<<"ha-mode">>,
[error] <0.43155.0> <<"all">>}]},
[error] <0.43155.0> {priority,
[error] <0.43155.0> 0}],
[error] <0.43155.0> undefined,
[error] <0.43155.0> [],
[error] <0.43155.0> undefined,
[error] <0.43155.0> live,
[error] <0.43155.0> 0,
[error] <0.43155.0> [],
[error] <0.43155.0> <<"/">>,
[error] <0.43155.0> #{user =>
[error] <0.43155.0> <<"iottester">>},
[error] <0.43155.0> rabbit_mqtt_qos0_queue,
[error] <0.43155.0> #{}},
[error] <0.43155.0> nodedown}
[error] <0.43155.0> MQTT protocol error on connection 10.105.0.18:60508 -> 10.105.0.10:1883: subscribe_error
```
This commit fixes this error allowing an MQTT client that connects with CleanSession=true and
subscribes with QoS 0 to re-connect and re-subscribe to another live
node if the original Rabbit node crashes.
Reported in https://groups.google.com/g/rabbitmq-users/c/pxgy0QiwilM/m/LkJQ-3DyBgAJ
2023-12-22 00:15:27 +08:00
|
|
|
Pub = connect(<<"publisher">>, Config, 2, []),
|
|
|
|
|
|
|
|
SubscriberId = <<"subscriber">>,
|
|
|
|
Sub0 = connect(SubscriberId, Config, 0, []),
|
Allow MQTT QoS 0 subscribers to reconnect
The solution in #10203 has the following issues:
1. Bindings can be left ofter in Mnesia table rabbit_durable_queue.
One solution to 1. would be to first delete the old queue via
`rabbit_amqqueue:internal_delete(Q, User, missing_owner)`
and subsequently declare the new queue via
`rabbit_amqqueue:internal_declare(Q, false)`
However, even then, it suffers from:
2. Race conditions between `rabbit_amqqueue:on_node_down/1`
and `rabbit_mqtt_qos0_queue:declare/2`:
`rabbit_amqqueue:on_node_down/1` could first read the queue records that
need to be deleted, thereafter `rabbit_mqtt_qos0_queue:declare/2` could
re-create the queue owned by the new connection PID, and `rabbit_amqqueue:on_node_down/1`
could subsequently delete the re-created queue.
Unfortunately, `rabbit_amqqueue:on_node_down/1` does not delete
transient queues in one isolated transaction. Instead it first reads
queues and subsequenlty deletes queues in batches making it prone to
race conditions.
Ideally, this commit deletes all rabbit_mqtt_qos0_queue queues of the
node that has crashed including their bindings.
However, doing so in one transaction is risky as there may be millions
of such queues and the current code path applies the same logic on all
live nodes resulting in conflicting transactions and therefore a long
database operation.
Hence, this commit uses the simplest approach which should still be
safe:
Do not remove rabbit_mqtt_qos0_queue queues if a node crashes.
Other live nodes will continue to route to these dead queues.
That should be okay, given that the rabbit_mqtt_qos0_queue clients auto
confirm.
Continuing routing however has the effect of counting as routing result
for AMQP 0.9.1 `mandatory` property.
If an MQTT client re-connects to a live node with the same client ID,
the new node will delete and then re-create the queue.
Once the crashed node comes back online, it will clean up its leftover
queues and bindings.
2023-12-27 23:40:52 +08:00
|
|
|
{ok, _, [0]} = emqtt:subscribe(Sub0, Topic1, qos0),
|
2024-08-06 22:16:14 +08:00
|
|
|
ok = await_metadata_store_consistent(Config, 2),
|
Allow MQTT QoS 0 subscribers to reconnect
The solution in #10203 has the following issues:
1. Bindings can be left ofter in Mnesia table rabbit_durable_queue.
One solution to 1. would be to first delete the old queue via
`rabbit_amqqueue:internal_delete(Q, User, missing_owner)`
and subsequently declare the new queue via
`rabbit_amqqueue:internal_declare(Q, false)`
However, even then, it suffers from:
2. Race conditions between `rabbit_amqqueue:on_node_down/1`
and `rabbit_mqtt_qos0_queue:declare/2`:
`rabbit_amqqueue:on_node_down/1` could first read the queue records that
need to be deleted, thereafter `rabbit_mqtt_qos0_queue:declare/2` could
re-create the queue owned by the new connection PID, and `rabbit_amqqueue:on_node_down/1`
could subsequently delete the re-created queue.
Unfortunately, `rabbit_amqqueue:on_node_down/1` does not delete
transient queues in one isolated transaction. Instead it first reads
queues and subsequenlty deletes queues in batches making it prone to
race conditions.
Ideally, this commit deletes all rabbit_mqtt_qos0_queue queues of the
node that has crashed including their bindings.
However, doing so in one transaction is risky as there may be millions
of such queues and the current code path applies the same logic on all
live nodes resulting in conflicting transactions and therefore a long
database operation.
Hence, this commit uses the simplest approach which should still be
safe:
Do not remove rabbit_mqtt_qos0_queue queues if a node crashes.
Other live nodes will continue to route to these dead queues.
That should be okay, given that the rabbit_mqtt_qos0_queue clients auto
confirm.
Continuing routing however has the effect of counting as routing result
for AMQP 0.9.1 `mandatory` property.
If an MQTT client re-connects to a live node with the same client ID,
the new node will delete and then re-create the queue.
Once the crashed node comes back online, it will clean up its leftover
queues and bindings.
2023-12-27 23:40:52 +08:00
|
|
|
ok = emqtt:publish(Pub, Topic1, <<"m0">>, qos0),
|
|
|
|
ok = expect_publishes(Sub0, Topic1, [<<"m0">>]),
|
Overwrite rabbit_mqtt_qos0_queue record from crashed node
When a node is shut down cleanly, the rabbit_mqtt_qos0_queue record is
removed from Mnesia.
When a node crashes and subsequently reboots the new node incarnation
removes the old rabbit_mqtt_qos0_queue record from Mnesia (via
rabbit_mqtt_qos0_queue:recover/2)
However, when a node crashes, the rabbit_mqtt_qos0_queue will be removed
from Mnesia table rabbit_queue, but will still be present in table
rabbit_durable_queue on the other live nodes.
Prior to this commit, when the same MQTT client (i.e. same MQTT client
ID) re-connects from the crashed node to another live node and
re-subscribes, the following error occurred:
```
[info] <0.43155.0> Accepted MQTT connection 10.105.0.18:60508 -> 10.105.0.10:1883 for client ID nodered_24e214feb018a232
[debug] <0.43155.0> Received a SUBSCRIBE for topic(s) [{mqtt_topic,
[debug] <0.43155.0> <<"as923/gateway/+/command/#">>,0}]
[error] <0.43155.0> Failed to declare queue 'mqtt-subscription-nodered_24e214feb018a232qos0' in vhost '/': {absent,
[error] <0.43155.0> {amqqueue,
[error] <0.43155.0> {resource,
[error] <0.43155.0> <<"/">>,
[error] <0.43155.0> queue,
[error] <0.43155.0> <<"mqtt-subscription-nodered_24e214feb018a232qos0">>},
[error] <0.43155.0> true,
[error] <0.43155.0> false,
[error] <0.43155.0> <15486.32690.0>,
[error] <0.43155.0> [],
[error] <0.43155.0> <15486.32690.0>,
[error] <0.43155.0> [],
[error] <0.43155.0> [],
[error] <0.43155.0> [],
[error] <0.43155.0> [{vhost,
[error] <0.43155.0> <<"/">>},
[error] <0.43155.0> {name,
[error] <0.43155.0> <<"ha-all-mqtt">>},
[error] <0.43155.0> {pattern,
[error] <0.43155.0> <<"^mqtt-">>},
[error] <0.43155.0> {'apply-to',
[error] <0.43155.0> <<"all">>},
[error] <0.43155.0> {definition,
[error] <0.43155.0> [{<<"ha-mode">>,
[error] <0.43155.0> <<"all">>}]},
[error] <0.43155.0> {priority,
[error] <0.43155.0> 0}],
[error] <0.43155.0> undefined,
[error] <0.43155.0> [],
[error] <0.43155.0> undefined,
[error] <0.43155.0> live,
[error] <0.43155.0> 0,
[error] <0.43155.0> [],
[error] <0.43155.0> <<"/">>,
[error] <0.43155.0> #{user =>
[error] <0.43155.0> <<"iottester">>},
[error] <0.43155.0> rabbit_mqtt_qos0_queue,
[error] <0.43155.0> #{}},
[error] <0.43155.0> nodedown}
[error] <0.43155.0> MQTT protocol error on connection 10.105.0.18:60508 -> 10.105.0.10:1883: subscribe_error
```
This commit fixes this error allowing an MQTT client that connects with CleanSession=true and
subscribes with QoS 0 to re-connect and re-subscribe to another live
node if the original Rabbit node crashes.
Reported in https://groups.google.com/g/rabbitmq-users/c/pxgy0QiwilM/m/LkJQ-3DyBgAJ
2023-12-22 00:15:27 +08:00
|
|
|
|
|
|
|
process_flag(trap_exit, true),
|
|
|
|
ok = rabbit_ct_broker_helpers:kill_node(Config, 0),
|
Allow MQTT QoS 0 subscribers to reconnect
The solution in #10203 has the following issues:
1. Bindings can be left ofter in Mnesia table rabbit_durable_queue.
One solution to 1. would be to first delete the old queue via
`rabbit_amqqueue:internal_delete(Q, User, missing_owner)`
and subsequently declare the new queue via
`rabbit_amqqueue:internal_declare(Q, false)`
However, even then, it suffers from:
2. Race conditions between `rabbit_amqqueue:on_node_down/1`
and `rabbit_mqtt_qos0_queue:declare/2`:
`rabbit_amqqueue:on_node_down/1` could first read the queue records that
need to be deleted, thereafter `rabbit_mqtt_qos0_queue:declare/2` could
re-create the queue owned by the new connection PID, and `rabbit_amqqueue:on_node_down/1`
could subsequently delete the re-created queue.
Unfortunately, `rabbit_amqqueue:on_node_down/1` does not delete
transient queues in one isolated transaction. Instead it first reads
queues and subsequenlty deletes queues in batches making it prone to
race conditions.
Ideally, this commit deletes all rabbit_mqtt_qos0_queue queues of the
node that has crashed including their bindings.
However, doing so in one transaction is risky as there may be millions
of such queues and the current code path applies the same logic on all
live nodes resulting in conflicting transactions and therefore a long
database operation.
Hence, this commit uses the simplest approach which should still be
safe:
Do not remove rabbit_mqtt_qos0_queue queues if a node crashes.
Other live nodes will continue to route to these dead queues.
That should be okay, given that the rabbit_mqtt_qos0_queue clients auto
confirm.
Continuing routing however has the effect of counting as routing result
for AMQP 0.9.1 `mandatory` property.
If an MQTT client re-connects to a live node with the same client ID,
the new node will delete and then re-create the queue.
Once the crashed node comes back online, it will clean up its leftover
queues and bindings.
2023-12-27 23:40:52 +08:00
|
|
|
ok = await_exit(Sub0),
|
|
|
|
%% Wait to run rabbit_amqqueue:on_node_down/1 on both live nodes.
|
Overwrite rabbit_mqtt_qos0_queue record from crashed node
When a node is shut down cleanly, the rabbit_mqtt_qos0_queue record is
removed from Mnesia.
When a node crashes and subsequently reboots the new node incarnation
removes the old rabbit_mqtt_qos0_queue record from Mnesia (via
rabbit_mqtt_qos0_queue:recover/2)
However, when a node crashes, the rabbit_mqtt_qos0_queue will be removed
from Mnesia table rabbit_queue, but will still be present in table
rabbit_durable_queue on the other live nodes.
Prior to this commit, when the same MQTT client (i.e. same MQTT client
ID) re-connects from the crashed node to another live node and
re-subscribes, the following error occurred:
```
[info] <0.43155.0> Accepted MQTT connection 10.105.0.18:60508 -> 10.105.0.10:1883 for client ID nodered_24e214feb018a232
[debug] <0.43155.0> Received a SUBSCRIBE for topic(s) [{mqtt_topic,
[debug] <0.43155.0> <<"as923/gateway/+/command/#">>,0}]
[error] <0.43155.0> Failed to declare queue 'mqtt-subscription-nodered_24e214feb018a232qos0' in vhost '/': {absent,
[error] <0.43155.0> {amqqueue,
[error] <0.43155.0> {resource,
[error] <0.43155.0> <<"/">>,
[error] <0.43155.0> queue,
[error] <0.43155.0> <<"mqtt-subscription-nodered_24e214feb018a232qos0">>},
[error] <0.43155.0> true,
[error] <0.43155.0> false,
[error] <0.43155.0> <15486.32690.0>,
[error] <0.43155.0> [],
[error] <0.43155.0> <15486.32690.0>,
[error] <0.43155.0> [],
[error] <0.43155.0> [],
[error] <0.43155.0> [],
[error] <0.43155.0> [{vhost,
[error] <0.43155.0> <<"/">>},
[error] <0.43155.0> {name,
[error] <0.43155.0> <<"ha-all-mqtt">>},
[error] <0.43155.0> {pattern,
[error] <0.43155.0> <<"^mqtt-">>},
[error] <0.43155.0> {'apply-to',
[error] <0.43155.0> <<"all">>},
[error] <0.43155.0> {definition,
[error] <0.43155.0> [{<<"ha-mode">>,
[error] <0.43155.0> <<"all">>}]},
[error] <0.43155.0> {priority,
[error] <0.43155.0> 0}],
[error] <0.43155.0> undefined,
[error] <0.43155.0> [],
[error] <0.43155.0> undefined,
[error] <0.43155.0> live,
[error] <0.43155.0> 0,
[error] <0.43155.0> [],
[error] <0.43155.0> <<"/">>,
[error] <0.43155.0> #{user =>
[error] <0.43155.0> <<"iottester">>},
[error] <0.43155.0> rabbit_mqtt_qos0_queue,
[error] <0.43155.0> #{}},
[error] <0.43155.0> nodedown}
[error] <0.43155.0> MQTT protocol error on connection 10.105.0.18:60508 -> 10.105.0.10:1883: subscribe_error
```
This commit fixes this error allowing an MQTT client that connects with CleanSession=true and
subscribes with QoS 0 to re-connect and re-subscribe to another live
node if the original Rabbit node crashes.
Reported in https://groups.google.com/g/rabbitmq-users/c/pxgy0QiwilM/m/LkJQ-3DyBgAJ
2023-12-22 00:15:27 +08:00
|
|
|
timer:sleep(500),
|
Allow MQTT QoS 0 subscribers to reconnect
The solution in #10203 has the following issues:
1. Bindings can be left ofter in Mnesia table rabbit_durable_queue.
One solution to 1. would be to first delete the old queue via
`rabbit_amqqueue:internal_delete(Q, User, missing_owner)`
and subsequently declare the new queue via
`rabbit_amqqueue:internal_declare(Q, false)`
However, even then, it suffers from:
2. Race conditions between `rabbit_amqqueue:on_node_down/1`
and `rabbit_mqtt_qos0_queue:declare/2`:
`rabbit_amqqueue:on_node_down/1` could first read the queue records that
need to be deleted, thereafter `rabbit_mqtt_qos0_queue:declare/2` could
re-create the queue owned by the new connection PID, and `rabbit_amqqueue:on_node_down/1`
could subsequently delete the re-created queue.
Unfortunately, `rabbit_amqqueue:on_node_down/1` does not delete
transient queues in one isolated transaction. Instead it first reads
queues and subsequenlty deletes queues in batches making it prone to
race conditions.
Ideally, this commit deletes all rabbit_mqtt_qos0_queue queues of the
node that has crashed including their bindings.
However, doing so in one transaction is risky as there may be millions
of such queues and the current code path applies the same logic on all
live nodes resulting in conflicting transactions and therefore a long
database operation.
Hence, this commit uses the simplest approach which should still be
safe:
Do not remove rabbit_mqtt_qos0_queue queues if a node crashes.
Other live nodes will continue to route to these dead queues.
That should be okay, given that the rabbit_mqtt_qos0_queue clients auto
confirm.
Continuing routing however has the effect of counting as routing result
for AMQP 0.9.1 `mandatory` property.
If an MQTT client re-connects to a live node with the same client ID,
the new node will delete and then re-create the queue.
Once the crashed node comes back online, it will clean up its leftover
queues and bindings.
2023-12-27 23:40:52 +08:00
|
|
|
%% Re-connect to a live node with same MQTT client ID.
|
Overwrite rabbit_mqtt_qos0_queue record from crashed node
When a node is shut down cleanly, the rabbit_mqtt_qos0_queue record is
removed from Mnesia.
When a node crashes and subsequently reboots the new node incarnation
removes the old rabbit_mqtt_qos0_queue record from Mnesia (via
rabbit_mqtt_qos0_queue:recover/2)
However, when a node crashes, the rabbit_mqtt_qos0_queue will be removed
from Mnesia table rabbit_queue, but will still be present in table
rabbit_durable_queue on the other live nodes.
Prior to this commit, when the same MQTT client (i.e. same MQTT client
ID) re-connects from the crashed node to another live node and
re-subscribes, the following error occurred:
```
[info] <0.43155.0> Accepted MQTT connection 10.105.0.18:60508 -> 10.105.0.10:1883 for client ID nodered_24e214feb018a232
[debug] <0.43155.0> Received a SUBSCRIBE for topic(s) [{mqtt_topic,
[debug] <0.43155.0> <<"as923/gateway/+/command/#">>,0}]
[error] <0.43155.0> Failed to declare queue 'mqtt-subscription-nodered_24e214feb018a232qos0' in vhost '/': {absent,
[error] <0.43155.0> {amqqueue,
[error] <0.43155.0> {resource,
[error] <0.43155.0> <<"/">>,
[error] <0.43155.0> queue,
[error] <0.43155.0> <<"mqtt-subscription-nodered_24e214feb018a232qos0">>},
[error] <0.43155.0> true,
[error] <0.43155.0> false,
[error] <0.43155.0> <15486.32690.0>,
[error] <0.43155.0> [],
[error] <0.43155.0> <15486.32690.0>,
[error] <0.43155.0> [],
[error] <0.43155.0> [],
[error] <0.43155.0> [],
[error] <0.43155.0> [{vhost,
[error] <0.43155.0> <<"/">>},
[error] <0.43155.0> {name,
[error] <0.43155.0> <<"ha-all-mqtt">>},
[error] <0.43155.0> {pattern,
[error] <0.43155.0> <<"^mqtt-">>},
[error] <0.43155.0> {'apply-to',
[error] <0.43155.0> <<"all">>},
[error] <0.43155.0> {definition,
[error] <0.43155.0> [{<<"ha-mode">>,
[error] <0.43155.0> <<"all">>}]},
[error] <0.43155.0> {priority,
[error] <0.43155.0> 0}],
[error] <0.43155.0> undefined,
[error] <0.43155.0> [],
[error] <0.43155.0> undefined,
[error] <0.43155.0> live,
[error] <0.43155.0> 0,
[error] <0.43155.0> [],
[error] <0.43155.0> <<"/">>,
[error] <0.43155.0> #{user =>
[error] <0.43155.0> <<"iottester">>},
[error] <0.43155.0> rabbit_mqtt_qos0_queue,
[error] <0.43155.0> #{}},
[error] <0.43155.0> nodedown}
[error] <0.43155.0> MQTT protocol error on connection 10.105.0.18:60508 -> 10.105.0.10:1883: subscribe_error
```
This commit fixes this error allowing an MQTT client that connects with CleanSession=true and
subscribes with QoS 0 to re-connect and re-subscribe to another live
node if the original Rabbit node crashes.
Reported in https://groups.google.com/g/rabbitmq-users/c/pxgy0QiwilM/m/LkJQ-3DyBgAJ
2023-12-22 00:15:27 +08:00
|
|
|
Sub1 = connect(SubscriberId, Config, 1, []),
|
Allow MQTT QoS 0 subscribers to reconnect
The solution in #10203 has the following issues:
1. Bindings can be left ofter in Mnesia table rabbit_durable_queue.
One solution to 1. would be to first delete the old queue via
`rabbit_amqqueue:internal_delete(Q, User, missing_owner)`
and subsequently declare the new queue via
`rabbit_amqqueue:internal_declare(Q, false)`
However, even then, it suffers from:
2. Race conditions between `rabbit_amqqueue:on_node_down/1`
and `rabbit_mqtt_qos0_queue:declare/2`:
`rabbit_amqqueue:on_node_down/1` could first read the queue records that
need to be deleted, thereafter `rabbit_mqtt_qos0_queue:declare/2` could
re-create the queue owned by the new connection PID, and `rabbit_amqqueue:on_node_down/1`
could subsequently delete the re-created queue.
Unfortunately, `rabbit_amqqueue:on_node_down/1` does not delete
transient queues in one isolated transaction. Instead it first reads
queues and subsequenlty deletes queues in batches making it prone to
race conditions.
Ideally, this commit deletes all rabbit_mqtt_qos0_queue queues of the
node that has crashed including their bindings.
However, doing so in one transaction is risky as there may be millions
of such queues and the current code path applies the same logic on all
live nodes resulting in conflicting transactions and therefore a long
database operation.
Hence, this commit uses the simplest approach which should still be
safe:
Do not remove rabbit_mqtt_qos0_queue queues if a node crashes.
Other live nodes will continue to route to these dead queues.
That should be okay, given that the rabbit_mqtt_qos0_queue clients auto
confirm.
Continuing routing however has the effect of counting as routing result
for AMQP 0.9.1 `mandatory` property.
If an MQTT client re-connects to a live node with the same client ID,
the new node will delete and then re-create the queue.
Once the crashed node comes back online, it will clean up its leftover
queues and bindings.
2023-12-27 23:40:52 +08:00
|
|
|
{ok, _, [0]} = emqtt:subscribe(Sub1, Topic2, qos0),
|
Attempt to eliminate test flake
This commit attempts to eliminate the test flake described in
https://github.com/rabbitmq/rabbitmq-server/issues/12413#issuecomment-2385449940
```
rabbitmq_mqtt > parallel-ct-set-1 > mqtt_shared_SUITE > cluster_size_3 > v4 rabbit_mqtt_qos0_queue_kill_node
=== Ended at 2024-10-01 09:59:52
=== Location: [{mqtt_shared_SUITE,rabbit_mqtt_qos0_queue_kill_node,[1165](https://github.com/rabbitmq/rabbitmq-server/issues/mqtt_shared_suite.src.html#1165)},
{test_server,ts_tc,1793},
{test_server,run_test_case_eval1,1302},
{test_server,run_test_case_eval,1234}]
=== === Reason: no match of right hand side value {publish_not_received,
<<"m1">>}
in function mqtt_shared_SUITE:rabbit_mqtt_qos0_queue_kill_node/1 (mqtt_shared_SUITE.erl, line 1165)
in call from test_server:ts_tc/3 (test_server.erl, line 1793)
in call from test_server:run_test_case_eval1/6 (test_server.erl, line 1302)
in call from test_server:run_test_case_eval/9 (test_server.erl, line 1234)
```
This flake could not be reproduced locally.
This commit also assumes that this flake occurred under Khepri but not
under Mnesia.
The hypothesis is the following:
* Node 0 is down
* MQTT client creates binding on node 1
* Khepri commits since the binding is replicated and persisted on node 1
and node 2. However the binding isn't reflected yet in node 2's
routing projecting table.
* Publishing a message to node 2 routes to nowhere.
2024-10-24 01:36:58 +08:00
|
|
|
ok = await_metadata_store_consistent(Config, 2),
|
Allow MQTT QoS 0 subscribers to reconnect
The solution in #10203 has the following issues:
1. Bindings can be left ofter in Mnesia table rabbit_durable_queue.
One solution to 1. would be to first delete the old queue via
`rabbit_amqqueue:internal_delete(Q, User, missing_owner)`
and subsequently declare the new queue via
`rabbit_amqqueue:internal_declare(Q, false)`
However, even then, it suffers from:
2. Race conditions between `rabbit_amqqueue:on_node_down/1`
and `rabbit_mqtt_qos0_queue:declare/2`:
`rabbit_amqqueue:on_node_down/1` could first read the queue records that
need to be deleted, thereafter `rabbit_mqtt_qos0_queue:declare/2` could
re-create the queue owned by the new connection PID, and `rabbit_amqqueue:on_node_down/1`
could subsequently delete the re-created queue.
Unfortunately, `rabbit_amqqueue:on_node_down/1` does not delete
transient queues in one isolated transaction. Instead it first reads
queues and subsequenlty deletes queues in batches making it prone to
race conditions.
Ideally, this commit deletes all rabbit_mqtt_qos0_queue queues of the
node that has crashed including their bindings.
However, doing so in one transaction is risky as there may be millions
of such queues and the current code path applies the same logic on all
live nodes resulting in conflicting transactions and therefore a long
database operation.
Hence, this commit uses the simplest approach which should still be
safe:
Do not remove rabbit_mqtt_qos0_queue queues if a node crashes.
Other live nodes will continue to route to these dead queues.
That should be okay, given that the rabbit_mqtt_qos0_queue clients auto
confirm.
Continuing routing however has the effect of counting as routing result
for AMQP 0.9.1 `mandatory` property.
If an MQTT client re-connects to a live node with the same client ID,
the new node will delete and then re-create the queue.
Once the crashed node comes back online, it will clean up its leftover
queues and bindings.
2023-12-27 23:40:52 +08:00
|
|
|
ok = emqtt:publish(Pub, Topic2, <<"m1">>, qos0),
|
|
|
|
ok = expect_publishes(Sub1, Topic2, [<<"m1">>]),
|
|
|
|
%% Since we started a new clean session, previous subscription should have been deleted.
|
|
|
|
ok = emqtt:publish(Pub, Topic1, <<"m2">>, qos0),
|
|
|
|
receive {publish, _} = Publish -> ct:fail({unexpected, Publish})
|
|
|
|
after 300 -> ok
|
|
|
|
end,
|
Overwrite rabbit_mqtt_qos0_queue record from crashed node
When a node is shut down cleanly, the rabbit_mqtt_qos0_queue record is
removed from Mnesia.
When a node crashes and subsequently reboots the new node incarnation
removes the old rabbit_mqtt_qos0_queue record from Mnesia (via
rabbit_mqtt_qos0_queue:recover/2)
However, when a node crashes, the rabbit_mqtt_qos0_queue will be removed
from Mnesia table rabbit_queue, but will still be present in table
rabbit_durable_queue on the other live nodes.
Prior to this commit, when the same MQTT client (i.e. same MQTT client
ID) re-connects from the crashed node to another live node and
re-subscribes, the following error occurred:
```
[info] <0.43155.0> Accepted MQTT connection 10.105.0.18:60508 -> 10.105.0.10:1883 for client ID nodered_24e214feb018a232
[debug] <0.43155.0> Received a SUBSCRIBE for topic(s) [{mqtt_topic,
[debug] <0.43155.0> <<"as923/gateway/+/command/#">>,0}]
[error] <0.43155.0> Failed to declare queue 'mqtt-subscription-nodered_24e214feb018a232qos0' in vhost '/': {absent,
[error] <0.43155.0> {amqqueue,
[error] <0.43155.0> {resource,
[error] <0.43155.0> <<"/">>,
[error] <0.43155.0> queue,
[error] <0.43155.0> <<"mqtt-subscription-nodered_24e214feb018a232qos0">>},
[error] <0.43155.0> true,
[error] <0.43155.0> false,
[error] <0.43155.0> <15486.32690.0>,
[error] <0.43155.0> [],
[error] <0.43155.0> <15486.32690.0>,
[error] <0.43155.0> [],
[error] <0.43155.0> [],
[error] <0.43155.0> [],
[error] <0.43155.0> [{vhost,
[error] <0.43155.0> <<"/">>},
[error] <0.43155.0> {name,
[error] <0.43155.0> <<"ha-all-mqtt">>},
[error] <0.43155.0> {pattern,
[error] <0.43155.0> <<"^mqtt-">>},
[error] <0.43155.0> {'apply-to',
[error] <0.43155.0> <<"all">>},
[error] <0.43155.0> {definition,
[error] <0.43155.0> [{<<"ha-mode">>,
[error] <0.43155.0> <<"all">>}]},
[error] <0.43155.0> {priority,
[error] <0.43155.0> 0}],
[error] <0.43155.0> undefined,
[error] <0.43155.0> [],
[error] <0.43155.0> undefined,
[error] <0.43155.0> live,
[error] <0.43155.0> 0,
[error] <0.43155.0> [],
[error] <0.43155.0> <<"/">>,
[error] <0.43155.0> #{user =>
[error] <0.43155.0> <<"iottester">>},
[error] <0.43155.0> rabbit_mqtt_qos0_queue,
[error] <0.43155.0> #{}},
[error] <0.43155.0> nodedown}
[error] <0.43155.0> MQTT protocol error on connection 10.105.0.18:60508 -> 10.105.0.10:1883: subscribe_error
```
This commit fixes this error allowing an MQTT client that connects with CleanSession=true and
subscribes with QoS 0 to re-connect and re-subscribe to another live
node if the original Rabbit node crashes.
Reported in https://groups.google.com/g/rabbitmq-users/c/pxgy0QiwilM/m/LkJQ-3DyBgAJ
2023-12-22 00:15:27 +08:00
|
|
|
|
|
|
|
ok = rabbit_ct_broker_helpers:start_node(Config, 0),
|
|
|
|
ok = rabbit_ct_broker_helpers:kill_node(Config, 1),
|
Allow MQTT QoS 0 subscribers to reconnect
The solution in #10203 has the following issues:
1. Bindings can be left ofter in Mnesia table rabbit_durable_queue.
One solution to 1. would be to first delete the old queue via
`rabbit_amqqueue:internal_delete(Q, User, missing_owner)`
and subsequently declare the new queue via
`rabbit_amqqueue:internal_declare(Q, false)`
However, even then, it suffers from:
2. Race conditions between `rabbit_amqqueue:on_node_down/1`
and `rabbit_mqtt_qos0_queue:declare/2`:
`rabbit_amqqueue:on_node_down/1` could first read the queue records that
need to be deleted, thereafter `rabbit_mqtt_qos0_queue:declare/2` could
re-create the queue owned by the new connection PID, and `rabbit_amqqueue:on_node_down/1`
could subsequently delete the re-created queue.
Unfortunately, `rabbit_amqqueue:on_node_down/1` does not delete
transient queues in one isolated transaction. Instead it first reads
queues and subsequenlty deletes queues in batches making it prone to
race conditions.
Ideally, this commit deletes all rabbit_mqtt_qos0_queue queues of the
node that has crashed including their bindings.
However, doing so in one transaction is risky as there may be millions
of such queues and the current code path applies the same logic on all
live nodes resulting in conflicting transactions and therefore a long
database operation.
Hence, this commit uses the simplest approach which should still be
safe:
Do not remove rabbit_mqtt_qos0_queue queues if a node crashes.
Other live nodes will continue to route to these dead queues.
That should be okay, given that the rabbit_mqtt_qos0_queue clients auto
confirm.
Continuing routing however has the effect of counting as routing result
for AMQP 0.9.1 `mandatory` property.
If an MQTT client re-connects to a live node with the same client ID,
the new node will delete and then re-create the queue.
Once the crashed node comes back online, it will clean up its leftover
queues and bindings.
2023-12-27 23:40:52 +08:00
|
|
|
%% This time, do not wait.
|
|
|
|
%% rabbit_amqqueue:on_node_down/1 may or may not have run.
|
Overwrite rabbit_mqtt_qos0_queue record from crashed node
When a node is shut down cleanly, the rabbit_mqtt_qos0_queue record is
removed from Mnesia.
When a node crashes and subsequently reboots the new node incarnation
removes the old rabbit_mqtt_qos0_queue record from Mnesia (via
rabbit_mqtt_qos0_queue:recover/2)
However, when a node crashes, the rabbit_mqtt_qos0_queue will be removed
from Mnesia table rabbit_queue, but will still be present in table
rabbit_durable_queue on the other live nodes.
Prior to this commit, when the same MQTT client (i.e. same MQTT client
ID) re-connects from the crashed node to another live node and
re-subscribes, the following error occurred:
```
[info] <0.43155.0> Accepted MQTT connection 10.105.0.18:60508 -> 10.105.0.10:1883 for client ID nodered_24e214feb018a232
[debug] <0.43155.0> Received a SUBSCRIBE for topic(s) [{mqtt_topic,
[debug] <0.43155.0> <<"as923/gateway/+/command/#">>,0}]
[error] <0.43155.0> Failed to declare queue 'mqtt-subscription-nodered_24e214feb018a232qos0' in vhost '/': {absent,
[error] <0.43155.0> {amqqueue,
[error] <0.43155.0> {resource,
[error] <0.43155.0> <<"/">>,
[error] <0.43155.0> queue,
[error] <0.43155.0> <<"mqtt-subscription-nodered_24e214feb018a232qos0">>},
[error] <0.43155.0> true,
[error] <0.43155.0> false,
[error] <0.43155.0> <15486.32690.0>,
[error] <0.43155.0> [],
[error] <0.43155.0> <15486.32690.0>,
[error] <0.43155.0> [],
[error] <0.43155.0> [],
[error] <0.43155.0> [],
[error] <0.43155.0> [{vhost,
[error] <0.43155.0> <<"/">>},
[error] <0.43155.0> {name,
[error] <0.43155.0> <<"ha-all-mqtt">>},
[error] <0.43155.0> {pattern,
[error] <0.43155.0> <<"^mqtt-">>},
[error] <0.43155.0> {'apply-to',
[error] <0.43155.0> <<"all">>},
[error] <0.43155.0> {definition,
[error] <0.43155.0> [{<<"ha-mode">>,
[error] <0.43155.0> <<"all">>}]},
[error] <0.43155.0> {priority,
[error] <0.43155.0> 0}],
[error] <0.43155.0> undefined,
[error] <0.43155.0> [],
[error] <0.43155.0> undefined,
[error] <0.43155.0> live,
[error] <0.43155.0> 0,
[error] <0.43155.0> [],
[error] <0.43155.0> <<"/">>,
[error] <0.43155.0> #{user =>
[error] <0.43155.0> <<"iottester">>},
[error] <0.43155.0> rabbit_mqtt_qos0_queue,
[error] <0.43155.0> #{}},
[error] <0.43155.0> nodedown}
[error] <0.43155.0> MQTT protocol error on connection 10.105.0.18:60508 -> 10.105.0.10:1883: subscribe_error
```
This commit fixes this error allowing an MQTT client that connects with CleanSession=true and
subscribes with QoS 0 to re-connect and re-subscribe to another live
node if the original Rabbit node crashes.
Reported in https://groups.google.com/g/rabbitmq-users/c/pxgy0QiwilM/m/LkJQ-3DyBgAJ
2023-12-22 00:15:27 +08:00
|
|
|
Sub2 = connect(SubscriberId, Config, 2, []),
|
Allow MQTT QoS 0 subscribers to reconnect
The solution in #10203 has the following issues:
1. Bindings can be left ofter in Mnesia table rabbit_durable_queue.
One solution to 1. would be to first delete the old queue via
`rabbit_amqqueue:internal_delete(Q, User, missing_owner)`
and subsequently declare the new queue via
`rabbit_amqqueue:internal_declare(Q, false)`
However, even then, it suffers from:
2. Race conditions between `rabbit_amqqueue:on_node_down/1`
and `rabbit_mqtt_qos0_queue:declare/2`:
`rabbit_amqqueue:on_node_down/1` could first read the queue records that
need to be deleted, thereafter `rabbit_mqtt_qos0_queue:declare/2` could
re-create the queue owned by the new connection PID, and `rabbit_amqqueue:on_node_down/1`
could subsequently delete the re-created queue.
Unfortunately, `rabbit_amqqueue:on_node_down/1` does not delete
transient queues in one isolated transaction. Instead it first reads
queues and subsequenlty deletes queues in batches making it prone to
race conditions.
Ideally, this commit deletes all rabbit_mqtt_qos0_queue queues of the
node that has crashed including their bindings.
However, doing so in one transaction is risky as there may be millions
of such queues and the current code path applies the same logic on all
live nodes resulting in conflicting transactions and therefore a long
database operation.
Hence, this commit uses the simplest approach which should still be
safe:
Do not remove rabbit_mqtt_qos0_queue queues if a node crashes.
Other live nodes will continue to route to these dead queues.
That should be okay, given that the rabbit_mqtt_qos0_queue clients auto
confirm.
Continuing routing however has the effect of counting as routing result
for AMQP 0.9.1 `mandatory` property.
If an MQTT client re-connects to a live node with the same client ID,
the new node will delete and then re-create the queue.
Once the crashed node comes back online, it will clean up its leftover
queues and bindings.
2023-12-27 23:40:52 +08:00
|
|
|
{ok, _, [0]} = emqtt:subscribe(Sub2, Topic2, qos0),
|
|
|
|
ok = emqtt:publish(Pub, Topic2, <<"m3">>, qos0),
|
|
|
|
ok = expect_publishes(Sub2, Topic2, [<<"m3">>]),
|
Overwrite rabbit_mqtt_qos0_queue record from crashed node
When a node is shut down cleanly, the rabbit_mqtt_qos0_queue record is
removed from Mnesia.
When a node crashes and subsequently reboots the new node incarnation
removes the old rabbit_mqtt_qos0_queue record from Mnesia (via
rabbit_mqtt_qos0_queue:recover/2)
However, when a node crashes, the rabbit_mqtt_qos0_queue will be removed
from Mnesia table rabbit_queue, but will still be present in table
rabbit_durable_queue on the other live nodes.
Prior to this commit, when the same MQTT client (i.e. same MQTT client
ID) re-connects from the crashed node to another live node and
re-subscribes, the following error occurred:
```
[info] <0.43155.0> Accepted MQTT connection 10.105.0.18:60508 -> 10.105.0.10:1883 for client ID nodered_24e214feb018a232
[debug] <0.43155.0> Received a SUBSCRIBE for topic(s) [{mqtt_topic,
[debug] <0.43155.0> <<"as923/gateway/+/command/#">>,0}]
[error] <0.43155.0> Failed to declare queue 'mqtt-subscription-nodered_24e214feb018a232qos0' in vhost '/': {absent,
[error] <0.43155.0> {amqqueue,
[error] <0.43155.0> {resource,
[error] <0.43155.0> <<"/">>,
[error] <0.43155.0> queue,
[error] <0.43155.0> <<"mqtt-subscription-nodered_24e214feb018a232qos0">>},
[error] <0.43155.0> true,
[error] <0.43155.0> false,
[error] <0.43155.0> <15486.32690.0>,
[error] <0.43155.0> [],
[error] <0.43155.0> <15486.32690.0>,
[error] <0.43155.0> [],
[error] <0.43155.0> [],
[error] <0.43155.0> [],
[error] <0.43155.0> [{vhost,
[error] <0.43155.0> <<"/">>},
[error] <0.43155.0> {name,
[error] <0.43155.0> <<"ha-all-mqtt">>},
[error] <0.43155.0> {pattern,
[error] <0.43155.0> <<"^mqtt-">>},
[error] <0.43155.0> {'apply-to',
[error] <0.43155.0> <<"all">>},
[error] <0.43155.0> {definition,
[error] <0.43155.0> [{<<"ha-mode">>,
[error] <0.43155.0> <<"all">>}]},
[error] <0.43155.0> {priority,
[error] <0.43155.0> 0}],
[error] <0.43155.0> undefined,
[error] <0.43155.0> [],
[error] <0.43155.0> undefined,
[error] <0.43155.0> live,
[error] <0.43155.0> 0,
[error] <0.43155.0> [],
[error] <0.43155.0> <<"/">>,
[error] <0.43155.0> #{user =>
[error] <0.43155.0> <<"iottester">>},
[error] <0.43155.0> rabbit_mqtt_qos0_queue,
[error] <0.43155.0> #{}},
[error] <0.43155.0> nodedown}
[error] <0.43155.0> MQTT protocol error on connection 10.105.0.18:60508 -> 10.105.0.10:1883: subscribe_error
```
This commit fixes this error allowing an MQTT client that connects with CleanSession=true and
subscribes with QoS 0 to re-connect and re-subscribe to another live
node if the original Rabbit node crashes.
Reported in https://groups.google.com/g/rabbitmq-users/c/pxgy0QiwilM/m/LkJQ-3DyBgAJ
2023-12-22 00:15:27 +08:00
|
|
|
|
|
|
|
ok = emqtt:disconnect(Sub2),
|
|
|
|
ok = emqtt:disconnect(Pub),
|
Allow MQTT QoS 0 subscribers to reconnect
The solution in #10203 has the following issues:
1. Bindings can be left ofter in Mnesia table rabbit_durable_queue.
One solution to 1. would be to first delete the old queue via
`rabbit_amqqueue:internal_delete(Q, User, missing_owner)`
and subsequently declare the new queue via
`rabbit_amqqueue:internal_declare(Q, false)`
However, even then, it suffers from:
2. Race conditions between `rabbit_amqqueue:on_node_down/1`
and `rabbit_mqtt_qos0_queue:declare/2`:
`rabbit_amqqueue:on_node_down/1` could first read the queue records that
need to be deleted, thereafter `rabbit_mqtt_qos0_queue:declare/2` could
re-create the queue owned by the new connection PID, and `rabbit_amqqueue:on_node_down/1`
could subsequently delete the re-created queue.
Unfortunately, `rabbit_amqqueue:on_node_down/1` does not delete
transient queues in one isolated transaction. Instead it first reads
queues and subsequenlty deletes queues in batches making it prone to
race conditions.
Ideally, this commit deletes all rabbit_mqtt_qos0_queue queues of the
node that has crashed including their bindings.
However, doing so in one transaction is risky as there may be millions
of such queues and the current code path applies the same logic on all
live nodes resulting in conflicting transactions and therefore a long
database operation.
Hence, this commit uses the simplest approach which should still be
safe:
Do not remove rabbit_mqtt_qos0_queue queues if a node crashes.
Other live nodes will continue to route to these dead queues.
That should be okay, given that the rabbit_mqtt_qos0_queue clients auto
confirm.
Continuing routing however has the effect of counting as routing result
for AMQP 0.9.1 `mandatory` property.
If an MQTT client re-connects to a live node with the same client ID,
the new node will delete and then re-create the queue.
Once the crashed node comes back online, it will clean up its leftover
queues and bindings.
2023-12-27 23:40:52 +08:00
|
|
|
ok = rabbit_ct_broker_helpers:start_node(Config, 1),
|
|
|
|
?assertEqual([], rpc(Config, rabbit_db_binding, get_all, [])).
|
Overwrite rabbit_mqtt_qos0_queue record from crashed node
When a node is shut down cleanly, the rabbit_mqtt_qos0_queue record is
removed from Mnesia.
When a node crashes and subsequently reboots the new node incarnation
removes the old rabbit_mqtt_qos0_queue record from Mnesia (via
rabbit_mqtt_qos0_queue:recover/2)
However, when a node crashes, the rabbit_mqtt_qos0_queue will be removed
from Mnesia table rabbit_queue, but will still be present in table
rabbit_durable_queue on the other live nodes.
Prior to this commit, when the same MQTT client (i.e. same MQTT client
ID) re-connects from the crashed node to another live node and
re-subscribes, the following error occurred:
```
[info] <0.43155.0> Accepted MQTT connection 10.105.0.18:60508 -> 10.105.0.10:1883 for client ID nodered_24e214feb018a232
[debug] <0.43155.0> Received a SUBSCRIBE for topic(s) [{mqtt_topic,
[debug] <0.43155.0> <<"as923/gateway/+/command/#">>,0}]
[error] <0.43155.0> Failed to declare queue 'mqtt-subscription-nodered_24e214feb018a232qos0' in vhost '/': {absent,
[error] <0.43155.0> {amqqueue,
[error] <0.43155.0> {resource,
[error] <0.43155.0> <<"/">>,
[error] <0.43155.0> queue,
[error] <0.43155.0> <<"mqtt-subscription-nodered_24e214feb018a232qos0">>},
[error] <0.43155.0> true,
[error] <0.43155.0> false,
[error] <0.43155.0> <15486.32690.0>,
[error] <0.43155.0> [],
[error] <0.43155.0> <15486.32690.0>,
[error] <0.43155.0> [],
[error] <0.43155.0> [],
[error] <0.43155.0> [],
[error] <0.43155.0> [{vhost,
[error] <0.43155.0> <<"/">>},
[error] <0.43155.0> {name,
[error] <0.43155.0> <<"ha-all-mqtt">>},
[error] <0.43155.0> {pattern,
[error] <0.43155.0> <<"^mqtt-">>},
[error] <0.43155.0> {'apply-to',
[error] <0.43155.0> <<"all">>},
[error] <0.43155.0> {definition,
[error] <0.43155.0> [{<<"ha-mode">>,
[error] <0.43155.0> <<"all">>}]},
[error] <0.43155.0> {priority,
[error] <0.43155.0> 0}],
[error] <0.43155.0> undefined,
[error] <0.43155.0> [],
[error] <0.43155.0> undefined,
[error] <0.43155.0> live,
[error] <0.43155.0> 0,
[error] <0.43155.0> [],
[error] <0.43155.0> <<"/">>,
[error] <0.43155.0> #{user =>
[error] <0.43155.0> <<"iottester">>},
[error] <0.43155.0> rabbit_mqtt_qos0_queue,
[error] <0.43155.0> #{}},
[error] <0.43155.0> nodedown}
[error] <0.43155.0> MQTT protocol error on connection 10.105.0.18:60508 -> 10.105.0.10:1883: subscribe_error
```
This commit fixes this error allowing an MQTT client that connects with CleanSession=true and
subscribes with QoS 0 to re-connect and re-subscribe to another live
node if the original Rabbit node crashes.
Reported in https://groups.google.com/g/rabbitmq-users/c/pxgy0QiwilM/m/LkJQ-3DyBgAJ
2023-12-22 00:15:27 +08:00
|
|
|
|
2024-08-28 18:27:46 +08:00
|
|
|
cli_close_all_connections(Config) ->
|
|
|
|
ClientId = atom_to_binary(?FUNCTION_NAME),
|
|
|
|
C = connect(ClientId, Config),
|
|
|
|
process_flag(trap_exit, true),
|
|
|
|
{ok, String} = rabbit_ct_broker_helpers:rabbitmqctl(
|
|
|
|
Config, 0, ["close_all_connections", "bye"]),
|
|
|
|
?assertEqual(match, re:run(String, "Closing .* reason: bye", [{capture, none}])),
|
|
|
|
ok = await_exit(C).
|
|
|
|
|
|
|
|
cli_close_all_user_connections(Config) ->
|
|
|
|
ClientId = atom_to_binary(?FUNCTION_NAME),
|
|
|
|
C = connect(ClientId, Config),
|
|
|
|
process_flag(trap_exit, true),
|
|
|
|
{ok, String} = rabbit_ct_broker_helpers:rabbitmqctl(
|
|
|
|
Config, 0, ["close_all_user_connections","guest", "bye"]),
|
|
|
|
?assertEqual(match, re:run(String, "Closing .* reason: bye", [{capture, none}])),
|
|
|
|
ok = await_exit(C).
|
|
|
|
|
2022-12-20 02:03:18 +08:00
|
|
|
%% Test that MQTT connection can be listed and closed via the rabbitmq_management plugin.
|
|
|
|
management_plugin_connection(Config) ->
|
|
|
|
KeepaliveSecs = 99,
|
|
|
|
ClientId = atom_to_binary(?FUNCTION_NAME),
|
|
|
|
Node = atom_to_binary(get_node_config(Config, 0, nodename)),
|
|
|
|
|
2023-12-14 19:35:51 +08:00
|
|
|
C1 = connect(ClientId, Config, [{keepalive, KeepaliveSecs}]),
|
2024-11-13 20:25:29 +08:00
|
|
|
FilterFun =
|
|
|
|
fun(#{client_properties := #{client_id := CId}})
|
|
|
|
when CId == ClientId -> true;
|
|
|
|
(_) -> false
|
|
|
|
end,
|
|
|
|
%% Sometimes connections remain open from other testcases,
|
|
|
|
%% let's match the one we're looking for
|
|
|
|
eventually(
|
|
|
|
?_assertMatch(
|
|
|
|
[_],
|
|
|
|
lists:filter(FilterFun, http_get(Config, "/connections"))),
|
|
|
|
1000, 10),
|
2022-12-20 02:03:18 +08:00
|
|
|
[#{client_properties := #{client_id := ClientId},
|
|
|
|
timeout := KeepaliveSecs,
|
|
|
|
node := Node,
|
2024-11-13 20:25:29 +08:00
|
|
|
name := ConnectionName}] =
|
|
|
|
lists:filter(FilterFun, http_get(Config, "/connections")),
|
2022-12-20 02:03:18 +08:00
|
|
|
process_flag(trap_exit, true),
|
|
|
|
http_delete(Config,
|
Support AMQP 1.0 natively
## What
Similar to Native MQTT in #5895, this commits implements Native AMQP 1.0.
By "native", we mean do not proxy via AMQP 0.9.1 anymore.
## Why
Native AMQP 1.0 comes with the following major benefits:
1. Similar to Native MQTT, this commit provides better throughput, latency,
scalability, and resource usage for AMQP 1.0.
See https://blog.rabbitmq.com/posts/2023/03/native-mqtt for native MQTT improvements.
See further below for some benchmarks.
2. Since AMQP 1.0 is not limited anymore by the AMQP 0.9.1 protocol,
this commit allows implementing more AMQP 1.0 features in the future.
Some features are already implemented in this commit (see next section).
3. Simpler, better understandable, and more maintainable code.
Native AMQP 1.0 as implemented in this commit has the
following major benefits compared to AMQP 0.9.1:
4. Memory and disk alarms will only stop accepting incoming TRANSFER frames.
New connections can still be created to consume from RabbitMQ to empty queues.
5. Due to 4. no need anymore for separate connections for publishers and
consumers as we currently recommended for AMQP 0.9.1. which potentially
halves the number of physical TCP connections.
6. When a single connection sends to multiple target queues, a single
slow target queue won't block the entire connection.
Publisher can still send data quickly to all other target queues.
7. A publisher can request whether it wants publisher confirmation on a per-message basis.
In AMQP 0.9.1 publisher confirms are configured per channel only.
8. Consumers can change their "prefetch count" dynamically which isn't
possible in our AMQP 0.9.1 implementation. See #10174
9. AMQP 1.0 is an extensible protocol
This commit also fixes dozens of bugs present in the AMQP 1.0 plugin in
RabbitMQ 3.x - most of which cannot be backported due to the complexity
and limitations of the old 3.x implementation.
This commit contains breaking changes and is therefore targeted for RabbitMQ 4.0.
## Implementation details
1. Breaking change: With Native AMQP, the behaviour of
```
Convert AMQP 0.9.1 message headers to application properties for an AMQP 1.0 consumer
amqp1_0.convert_amqp091_headers_to_app_props = false | true (default false)
Convert AMQP 1.0 Application Properties to AMQP 0.9.1 headers
amqp1_0.convert_app_props_to_amqp091_headers = false | true (default false)
```
will break because we always convert according to the message container conversions.
For example, AMQP 0.9.1 x-headers will go into message-annotations instead of application properties.
Also, `false` won’t be respected since we always convert the headers with message containers.
2. Remove rabbit_queue_collector
rabbit_queue_collector is responsible for synchronously deleting
exclusive queues. Since the AMQP 1.0 plugin never creates exclusive
queues, rabbit_queue_collector doesn't need to be started in the first
place. This will save 1 Erlang process per AMQP 1.0 connection.
3. 7 processes per connection + 1 process per session in this commit instead of
7 processes per connection + 15 processes per session in 3.x
Supervision hierarchy got re-designed.
4. Use 1 writer process per AMQP 1.0 connection
AMQP 0.9.1 uses a separate rabbit_writer Erlang process per AMQP 0.9.1 channel.
Prior to this commit, AMQP 1.0 used a separate rabbit_amqp1_0_writer process per AMQP 1.0 session.
Advantage of single writer proc per session (prior to this commit):
* High parallelism for serialising packets if multiple sessions within
a connection write heavily at the same time.
This commit uses a single writer process per AMQP 1.0 connection that is
shared across all AMQP 1.0 sessions.
Advantages of single writer proc per connection (this commit):
* Lower memory usage with hundreds of thousands of AMQP 1.0 sessions
* Less TCP and IP header overhead given that the single writer process
can accumulate across all sessions bytes before flushing the socket.
In other words, this commit decides that a reader / writer process pair
per AMQP 1.0 connection is good enough for bi-directional TRANSFER flows.
Having a writer per session is too heavy.
We still ensure high throughput by having separate reader, writer, and
session processes.
5. Transform rabbit_amqp1_0_writer into gen_server
Why:
Prior to this commit, when clicking on the AMQP 1.0 writer process in
observer, the process crashed.
Instead of handling all these debug messages of the sys module, it's better
to implement a gen_server.
There is no advantage of using a special OTP process over gen_server
for the AMQP 1.0 writer.
gen_server also provides cleaner format status output.
How:
Message callbacks return a timeout of 0.
After all messages in the inbox are processed, the timeout message is
handled by flushing any pending bytes.
6. Remove stats timer from writer
AMQP 1.0 connections haven't emitted any stats previously.
7. When there are contiguous queue confirmations in the session process
mailbox, batch them. When the confirmations are sent to the publisher, a
single DISPOSITION frame is sent for contiguously confirmed delivery
IDs.
This approach should be good enough. However it's sub optimal in
scenarios where contiguous delivery IDs that need confirmations are rare,
for example:
* There are multiple links in the session with different sender
settlement modes and sender publishes across these links interleaved.
* sender settlement mode is mixed and sender publishes interleaved settled
and unsettled TRANSFERs.
8. Introduce credit API v2
Why:
The AMQP 0.9.1 credit extension which is to be removed in 4.0 was poorly
designed since basic.credit is a synchronous call into the queue process
blocking the entire AMQP 1.0 session process.
How:
Change the interactions between queue clients and queue server
implementations:
* Clients only request a credit reply if the FLOW's `echo` field is set
* Include all link flow control state held by the queue process into a
new credit_reply queue event:
* `available` after the queue sends any deliveries
* `link-credit` after the queue sends any deliveries
* `drain` which allows us to combine the old queue events
send_credit_reply and send_drained into a single new queue event
credit_reply.
* Include the consumer tag into the credit_reply queue event such that
the AMQP 1.0 session process can process any credit replies
asynchronously.
Link flow control state `delivery-count` also moves to the queue processes.
The new interactions are hidden behind feature flag credit_api_v2 to
allow for rolling upgrades from 3.13 to 4.0.
9. Use serial number arithmetic in quorum queues and session process.
10. Completely bypass the rabbit_limiter module for AMQP 1.0
flow control. The goal is to eventually remove the rabbit_limiter module
in 4.0 since AMQP 0.9.1 global QoS will be unsupported in 4.0. This
commit lifts the AMQP 1.0 link flow control logic out of rabbit_limiter
into rabbit_queue_consumers.
11. Fix credit bug for streams:
AMQP 1.0 settlements shouldn't top up link credit,
only FLOW frames should top up link credit.
12. Allow sender settle mode unsettled for streams
since AMQP 1.0 acknowledgements to streams are no-ops (currently).
13. Fix AMQP 1.0 client bugs
Auto renewing credits should not be related to settling TRANSFERs.
Remove field link_credit_unsettled as it was wrong and confusing.
Prior to this commit auto renewal did not work when the sender uses
sender settlement mode settled.
14. Fix AMQP 1.0 client bugs
The wrong outdated Link was passed to function auto_flow/2
15. Use osiris chunk iterator
Only hold messages of uncompressed sub batches in memory if consumer
doesn't have sufficient credits.
Compressed sub batches are skipped for non Stream protocol consumers.
16. Fix incoming link flow control
Always use confirms between AMQP 1.0 queue clients and queue servers.
As already done internally by rabbit_fifo_client and
rabbit_stream_queue, use confirms for classic queues as well.
17. Include link handle into correlation when publishing messages to target queues
such that session process can correlate confirms from target queues to
incoming links.
18. Only grant more credits to publishers if publisher hasn't sufficient credits
anymore and there are not too many unconfirmed messages on the link.
19. Completely ignore `block` and `unblock` queue actions and RabbitMQ credit flow
between classic queue process and session process.
20. Link flow control is independent between links.
A client can refer to a queue or to an exchange with multiple
dynamically added target queues. Multiple incoming links can also fan
in to the same queue. However the link topology looks like, this
commit ensures that each link is only granted more credits if that link
isn't overloaded.
21. A connection or a session can send to many different queues.
In AMQP 0.9.1, a single slow queue will lead to the entire channel, and
then entire connection being blocked.
This commit makes sure that a single slow queue from one link won't slow
down sending on other links.
For example, having link A sending to a local classic queue and
link B sending to 5 replica quorum queue, link B will naturally
grant credits slower than link A. So, despite the quorum queue being
slower in confirming messages, the same AMQP 1.0 connection and session
can still pump data very fast into the classic queue.
22. If cluster wide memory or disk alarm occurs.
Each session sends a FLOW with incoming-window to 0 to sending client.
If sending clients don’t obey, force disconnect the client.
If cluster wide memory alarm clears:
Each session resumes with a FLOW defaulting to initial incoming-window.
23. All operations apart of publishing TRANSFERS to RabbitMQ can continue during cluster wide alarms,
specifically, attaching consumers and consuming, i.e. emptying queues.
There is no need for separate AMQP 1.0 connections for publishers and consumers as recommended in our AMQP 0.9.1 implementation.
24. Flow control summary:
* If queue becomes bottleneck, that’s solved by slowing down individual sending links (AMQP 1.0 link flow control).
* If session becomes bottleneck (more unlikely), that’s solved by AMQP 1.0 session flow control.
* If connection becomes bottleneck, it naturally won’t read fast enough from the socket causing TCP backpressure being applied.
Nowhere will RabbitMQ internal credit based flow control (i.e. module credit_flow) be used on the incoming AMQP 1.0 message path.
25. Register AMQP sessions
Prefer local-only pg over our custom pg_local implementation as
pg is a better process group implementation than pg_local.
pg_local was identified as bottleneck in tests where many MQTT clients were disconnected at once.
26. Start a local-only pg when Rabbit boots:
> A scope can be kept local-only by using a scope name that is unique cluster-wide, e.g. the node name:
> pg:start_link(node()).
Register AMQP 1.0 connections and sessions with pg.
In future we should remove pg_local and instead use the new local-only
pg for all registered processes such as AMQP 0.9.1 connections and channels.
27. Requeue messages if link detached
Although the spec allows to settle delivery IDs on detached links, RabbitMQ does not respect the 'closed'
field of the DETACH frame and therefore handles every DETACH frame as closed. Since the link is closed,
we expect every outstanding delivery to be requeued.
In addition to consumer cancellation, detaching a link therefore causes in flight deliveries to be requeued.
Note that this behaviour is different from merely consumer cancellation in AMQP 0.9.1:
"After a consumer is cancelled there will be no future deliveries dispatched to it. Note that there can
still be "in flight" deliveries dispatched previously. Cancelling a consumer will neither discard nor requeue them."
[https://www.rabbitmq.com/consumers.html#unsubscribing]
An AMQP receiver can first drain, and then detach to prevent "in flight" deliveries
28. Init AMQP session with BEGIN frame
Similar to how there can't be an MQTT processor without a CONNECT
frame, there can't be an AMQP session without a BEGIN frame.
This allows having strict dialyzer types for session flow control
fields (i.e. not allowing 'undefined').
29. Move serial_number to AMQP 1.0 common lib
such that it can be used by both AMQP 1.0 server and client
30. Fix AMQP client to do serial number arithmetic.
31. AMQP client: Differentiate between delivery-id and transfer-id for better
understandability.
32. Fix link flow control in classic queues
This commit fixes
```
java -jar target/perf-test.jar -ad false -f persistent -u cq -c 3000 -C 1000000 -y 0
```
followed by
```
./omq -x 0 amqp -T /queue/cq -D 1000000 --amqp-consumer-credits 2
```
Prior to this commit, (and on RabbitMQ 3.x) the consuming would halt after around
8 - 10,000 messages.
The bug was that in flight messages from classic queue process to
session process were not taken into account when topping up credit to
the classic queue process.
Fixes #2597
The solution to this bug (and a much cleaner design anyway independent of
this bug) is that queues should hold all link flow control state including
the delivery-count.
Hence, when credit API v2 is used the delivery-count will be held by the
classic queue process, quorum queue process, and stream queue client
instead of managing the delivery-count in the session.
33. The double level crediting between (a) session process and
rabbit_fifo_client, and (b) rabbit_fifo_client and rabbit_fifo was
removed. Therefore, instead of managing 3 separate delivery-counts (i. session,
ii. rabbit_fifo_client, iii. rabbit_fifo), only 1 delivery-count is used
in rabbit_fifo. This is a big simplification.
34. This commit fixes quorum queues without bumping the machine version
nor introducing new rabbit_fifo commands.
Whether credit API v2 is used is solely determined at link attachment time
depending on whether feature flag credit_api_v2 is enabled.
Even when that feature flag will be enabled later on, this link will
keep using credit API v1 until detached (or the node is shut down).
Eventually, after feature flag credit_api_v2 has been enabled and a
subsequent rolling upgrade, all links will use credit API v2.
This approach is safe and simple.
The 2 alternatives to move delivery-count from the session process to the
queue processes would have been:
i. Explicit feature flag credit_api_v2 migration function
* Can use a gen_server:call and only finish migration once all delivery-counts were migrated.
Cons:
* Extra new message format just for migration is required.
* Risky as migration will fail if a target queue doesn’t reply.
ii. Session always includes DeliveryCountSnd when crediting to the queue:
Cons:
* 2 delivery counts will be hold simultaneously in session proc and queue proc;
could be solved by deleting the session proc’s delivery-count for credit-reply
* What happens if the receiver doesn’t provide credit for a very long time? Is that a problem?
35. Support stream filtering in AMQP 1.0 (by @acogoluegnes)
Use the x-stream-filter-value message annotation
to carry the filter value in a published message.
Use the rabbitmq:stream-filter and rabbitmq:stream-match-unfiltered
filters when creating a receiver that wants to filter
out messages from a stream.
36. Remove credit extension from AMQP 0.9.1 client
37. Support maintenance mode closing AMQP 1.0 connections.
38. Remove AMQP 0.9.1 client dependency from AMQP 1.0 implementation.
39. Move AMQP 1.0 plugin to the core. AMQP 1.0 is enabled by default.
The old rabbitmq_amqp1_0 plugin will be kept as a no-op plugin to prevent deployment
tools from failing that execute:
```
rabbitmq-plugins enable rabbitmq_amqp1_0
rabbitmq-plugins disable rabbitmq_amqp1_0
```
40. Breaking change: Remove CLI command `rabbitmqctl list_amqp10_connections`.
Instead, list both AMQP 0.9.1 and AMQP 1.0 connections in `list_connections`:
```
rabbitmqctl list_connections protocol
Listing connections ...
protocol
{1, 0}
{0,9,1}
```
## Benchmarks
### Throughput & Latency
Setup:
* Single node Ubuntu 22.04
* Erlang 26.1.1
Start RabbitMQ:
```
make run-broker PLUGINS="rabbitmq_management rabbitmq_amqp1_0" FULL=1 RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS="+S 3"
```
Predeclare durable classic queue cq1, durable quorum queue qq1, durable stream queue sq1.
Start client:
https://github.com/ssorj/quiver
https://hub.docker.com/r/ssorj/quiver/tags (digest 453a2aceda64)
```
docker run -it --rm --add-host host.docker.internal:host-gateway ssorj/quiver:latest
bash-5.1# quiver --version
quiver 0.4.0-SNAPSHOT
```
1. Classic queue
```
quiver //host.docker.internal//amq/queue/cq1 --durable --count 1m --duration 10m --body-size 12 --credit 1000
```
This commit:
```
Count ............................................. 1,000,000 messages
Duration ............................................... 73.8 seconds
Sender rate .......................................... 13,548 messages/s
Receiver rate ........................................ 13,547 messages/s
End-to-end rate ...................................... 13,547 messages/s
Latencies by percentile:
0% ........ 0 ms 90.00% ........ 9 ms
25% ........ 2 ms 99.00% ....... 14 ms
50% ........ 4 ms 99.90% ....... 17 ms
100% ....... 26 ms 99.99% ....... 24 ms
```
RabbitMQ 3.x (main branch as of 30 January 2024):
```
---------------------- Sender ----------------------- --------------------- Receiver ---------------------- --------
Time [s] Count [m] Rate [m/s] CPU [%] RSS [M] Time [s] Count [m] Rate [m/s] CPU [%] RSS [M] Lat [ms]
----------------------------------------------------- ----------------------------------------------------- --------
2.1 130,814 65,342 6 73.6 2.1 3,217 1,607 0 8.0 511
4.1 163,580 16,367 2 74.1 4.1 3,217 0 0 8.0 0
6.1 229,114 32,767 3 74.1 6.1 3,217 0 0 8.0 0
8.1 261,880 16,367 2 74.1 8.1 67,874 32,296 8 8.2 7,662
10.1 294,646 16,367 2 74.1 10.1 67,874 0 0 8.2 0
12.1 360,180 32,734 3 74.1 12.1 67,874 0 0 8.2 0
14.1 392,946 16,367 3 74.1 14.1 68,604 365 0 8.2 12,147
16.1 458,480 32,734 3 74.1 16.1 68,604 0 0 8.2 0
18.1 491,246 16,367 2 74.1 18.1 68,604 0 0 8.2 0
20.1 556,780 32,767 4 74.1 20.1 68,604 0 0 8.2 0
22.1 589,546 16,375 2 74.1 22.1 68,604 0 0 8.2 0
receiver timed out
24.1 622,312 16,367 2 74.1 24.1 68,604 0 0 8.2 0
quiver: error: PlanoProcessError: Command 'quiver-arrow receive //host.docker.internal//amq/queue/cq1 --impl qpid-proton-c --duration 10m --count 1m --rate 0 --body-size 12 --credit 1000 --transaction-size 0 --timeout 10 --durable --output /tmp/quiver-otujr23y' returned non-zero exit status 1.
Traceback (most recent call last):
File "/usr/local/lib/quiver/python/quiver/pair.py", line 144, in run
_plano.wait(receiver, check=True)
File "/usr/local/lib/quiver/python/plano/main.py", line 1243, in wait
raise PlanoProcessError(proc)
plano.main.PlanoProcessError: Command 'quiver-arrow receive //host.docker.internal//amq/queue/cq1 --impl qpid-proton-c --duration 10m --count 1m --rate 0 --body-size 12 --credit 1000 --transaction-size 0 --timeout 10 --durable --output /tmp/quiver-otujr23y' returned non-zero exit status 1.
```
2. Quorum queue:
```
quiver //host.docker.internal//amq/queue/qq1 --durable --count 1m --duration 10m --body-size 12 --credit 1000
```
This commit:
```
Count ............................................. 1,000,000 messages
Duration .............................................. 101.4 seconds
Sender rate ........................................... 9,867 messages/s
Receiver rate ......................................... 9,868 messages/s
End-to-end rate ....................................... 9,865 messages/s
Latencies by percentile:
0% ....... 11 ms 90.00% ....... 23 ms
25% ....... 15 ms 99.00% ....... 28 ms
50% ....... 18 ms 99.90% ....... 33 ms
100% ....... 49 ms 99.99% ....... 47 ms
```
RabbitMQ 3.x:
```
---------------------- Sender ----------------------- --------------------- Receiver ---------------------- --------
Time [s] Count [m] Rate [m/s] CPU [%] RSS [M] Time [s] Count [m] Rate [m/s] CPU [%] RSS [M] Lat [ms]
----------------------------------------------------- ----------------------------------------------------- --------
2.1 130,814 65,342 9 69.9 2.1 18,430 9,206 5 7.6 1,221
4.1 163,580 16,375 5 70.2 4.1 18,867 218 0 7.6 2,168
6.1 229,114 32,767 6 70.2 6.1 18,867 0 0 7.6 0
8.1 294,648 32,734 7 70.2 8.1 18,867 0 0 7.6 0
10.1 360,182 32,734 6 70.2 10.1 18,867 0 0 7.6 0
12.1 425,716 32,767 6 70.2 12.1 18,867 0 0 7.6 0
receiver timed out
14.1 458,482 16,367 5 70.2 14.1 18,867 0 0 7.6 0
quiver: error: PlanoProcessError: Command 'quiver-arrow receive //host.docker.internal//amq/queue/qq1 --impl qpid-proton-c --duration 10m --count 1m --rate 0 --body-size 12 --credit 1000 --transaction-size 0 --timeout 10 --durable --output /tmp/quiver-b1gcup43' returned non-zero exit status 1.
Traceback (most recent call last):
File "/usr/local/lib/quiver/python/quiver/pair.py", line 144, in run
_plano.wait(receiver, check=True)
File "/usr/local/lib/quiver/python/plano/main.py", line 1243, in wait
raise PlanoProcessError(proc)
plano.main.PlanoProcessError: Command 'quiver-arrow receive //host.docker.internal//amq/queue/qq1 --impl qpid-proton-c --duration 10m --count 1m --rate 0 --body-size 12 --credit 1000 --transaction-size 0 --timeout 10 --durable --output /tmp/quiver-b1gcup43' returned non-zero exit status 1.
```
3. Stream:
```
quiver-arrow send //host.docker.internal//amq/queue/sq1 --durable --count 1m -d 10m --summary --verbose
```
This commit:
```
Count ............................................. 1,000,000 messages
Duration ................................................ 8.7 seconds
Message rate ........................................ 115,154 messages/s
```
RabbitMQ 3.x:
```
Count ............................................. 1,000,000 messages
Duration ............................................... 21.2 seconds
Message rate ......................................... 47,232 messages/s
```
### Memory usage
Start RabbitMQ:
```
ERL_MAX_PORTS=3000000 RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS="+P 3000000 +S 6" make run-broker PLUGINS="rabbitmq_amqp1_0" FULL=1 RABBITMQ_CONFIG_FILE="rabbitmq.conf"
```
```
/bin/cat rabbitmq.conf
tcp_listen_options.sndbuf = 2048
tcp_listen_options.recbuf = 2048
vm_memory_high_watermark.relative = 0.95
vm_memory_high_watermark_paging_ratio = 0.95
loopback_users = none
```
Create 50k connections with 2 sessions per connection, i.e. 100k session in total:
```go
package main
import (
"context"
"log"
"time"
"github.com/Azure/go-amqp"
)
func main() {
for i := 0; i < 50000; i++ {
conn, err := amqp.Dial(context.TODO(), "amqp://nuc", &amqp.ConnOptions{SASLType: amqp.SASLTypeAnonymous()})
if err != nil {
log.Fatal("dialing AMQP server:", err)
}
_, err = conn.NewSession(context.TODO(), nil)
if err != nil {
log.Fatal("creating AMQP session:", err)
}
_, err = conn.NewSession(context.TODO(), nil)
if err != nil {
log.Fatal("creating AMQP session:", err)
}
}
log.Println("opened all connections")
time.Sleep(5 * time.Hour)
}
```
This commit:
```
erlang:memory().
[{total,4586376480},
{processes,4025898504},
{processes_used,4025871040},
{system,560477976},
{atom,1048841},
{atom_used,1042841},
{binary,233228608},
{code,21449982},
{ets,108560464}]
erlang:system_info(process_count).
450289
```
7 procs per connection + 1 proc per session.
(7 + 2*1) * 50,000 = 450,000 procs
RabbitMQ 3.x:
```
erlang:memory().
[{total,15168232704},
{processes,14044779256},
{processes_used,14044755120},
{system,1123453448},
{atom,1057033},
{atom_used,1052587},
{binary,236381264},
{code,21790238},
{ets,391423744}]
erlang:system_info(process_count).
1850309
```
7 procs per connection + 15 per session
(7 + 2*15) * 50,000 = 1,850,000 procs
50k connections + 100k session require
with this commit: 4.5 GB
in RabbitMQ 3.x: 15 GB
## Future work
1. More efficient parser and serializer
2. TODO in mc_amqp: Do not store the parsed message on disk.
3. Implement both AMQP HTTP extension and AMQP management extension to allow AMQP
clients to create RabbitMQ objects (queues, exchanges, ...).
2023-07-21 18:29:07 +08:00
|
|
|
"/connections/" ++ binary_to_list(uri_string:quote(ConnectionName)),
|
2022-12-20 02:03:18 +08:00
|
|
|
?NO_CONTENT),
|
2023-12-14 19:35:51 +08:00
|
|
|
await_exit(C1),
|
2024-11-13 20:25:29 +08:00
|
|
|
eventually(
|
|
|
|
?_assertMatch(
|
|
|
|
[],
|
|
|
|
lists:filter(FilterFun, http_get(Config, "/connections"))),
|
|
|
|
1000, 10),
|
2023-12-14 19:35:51 +08:00
|
|
|
eventually(?_assertEqual([], all_connection_pids(Config)), 500, 3),
|
2025-01-02 06:54:10 +08:00
|
|
|
|
2023-12-14 19:35:51 +08:00
|
|
|
C2 = connect(ClientId, Config, [{keepalive, KeepaliveSecs}]),
|
2024-11-13 20:25:29 +08:00
|
|
|
eventually(
|
|
|
|
?_assertMatch(
|
|
|
|
[_],
|
|
|
|
lists:filter(FilterFun, http_get(Config, "/connections"))),
|
|
|
|
1000, 10),
|
2023-12-14 19:35:51 +08:00
|
|
|
http_delete(Config,
|
|
|
|
"/connections/username/guest",
|
|
|
|
?NO_CONTENT),
|
|
|
|
await_exit(C2),
|
2024-11-13 20:25:29 +08:00
|
|
|
eventually(
|
|
|
|
?_assertMatch(
|
|
|
|
[],
|
|
|
|
lists:filter(FilterFun, http_get(Config, "/connections"))),
|
|
|
|
1000, 10),
|
2022-12-27 18:00:33 +08:00
|
|
|
eventually(?_assertEqual([], all_connection_pids(Config)), 500, 3).
|
2022-12-20 02:03:18 +08:00
|
|
|
|
|
|
|
management_plugin_enable(Config) ->
|
|
|
|
ok = rabbit_ct_broker_helpers:disable_plugin(Config, 0, rabbitmq_management),
|
|
|
|
ok = rabbit_ct_broker_helpers:disable_plugin(Config, 0, rabbitmq_management_agent),
|
|
|
|
|
2023-01-03 01:02:43 +08:00
|
|
|
%% If the (web) MQTT connection is established **before** the management plugin is enabled,
|
|
|
|
%% the management plugin should still list the (web) MQTT connection.
|
2024-11-13 20:25:29 +08:00
|
|
|
ClientId = atom_to_binary(?FUNCTION_NAME),
|
|
|
|
C = connect(ClientId, Config),
|
2022-12-20 02:03:18 +08:00
|
|
|
ok = rabbit_ct_broker_helpers:enable_plugin(Config, 0, rabbitmq_management_agent),
|
|
|
|
ok = rabbit_ct_broker_helpers:enable_plugin(Config, 0, rabbitmq_management),
|
2024-11-13 20:25:29 +08:00
|
|
|
FilterFun =
|
|
|
|
fun(#{client_properties := #{client_id := CId}})
|
|
|
|
when ClientId == CId -> true;
|
|
|
|
(_) -> false
|
|
|
|
end,
|
|
|
|
%% Sometimes connections remain open from other testcases,
|
|
|
|
%% let's match the one we're looking for
|
|
|
|
eventually(
|
|
|
|
?_assertMatch(
|
|
|
|
[_],
|
|
|
|
lists:filter(FilterFun, http_get(Config, "/connections"))),
|
|
|
|
1000, 10),
|
2022-12-20 02:03:18 +08:00
|
|
|
|
|
|
|
ok = emqtt:disconnect(C).
|
|
|
|
|
2022-12-23 07:17:01 +08:00
|
|
|
%% Test that queues of type rabbit_mqtt_qos0_queue can be listed via rabbitmqctl.
|
|
|
|
cli_list_queues(Config) ->
|
|
|
|
C = connect(?FUNCTION_NAME, Config),
|
|
|
|
{ok, _, _} = emqtt:subscribe(C, <<"a/b/c">>, qos0),
|
|
|
|
|
|
|
|
Qs = rabbit_ct_broker_helpers:rabbitmqctl_list(
|
|
|
|
Config, 1,
|
|
|
|
["list_queues", "--no-table-headers",
|
|
|
|
"type", "name", "state", "durable", "auto_delete",
|
|
|
|
"arguments", "pid", "owner_pid", "messages", "exclusive_consumer_tag"
|
|
|
|
]),
|
2024-07-09 18:30:47 +08:00
|
|
|
?assertMatch([[<<"MQTT QoS 0">>, <<"mqtt-subscription-cli_list_queuesqos0">>,
|
2022-12-23 07:17:01 +08:00
|
|
|
<<"running">>, <<"true">>, <<"false">>, <<"[]">>, _, _, <<"0">>, <<"">>]],
|
|
|
|
Qs),
|
2023-01-28 02:25:57 +08:00
|
|
|
|
2022-12-23 07:17:01 +08:00
|
|
|
?assertEqual([],
|
|
|
|
rabbit_ct_broker_helpers:rabbitmqctl_list(
|
|
|
|
Config, 1, ["list_queues", "--local", "--no-table-headers"])
|
|
|
|
),
|
|
|
|
|
|
|
|
ok = emqtt:disconnect(C).
|
|
|
|
|
2023-01-03 01:02:43 +08:00
|
|
|
maintenance(Config) ->
|
2023-10-27 21:49:05 +08:00
|
|
|
C0 = connect(<<"client-0">>, Config, 0, []),
|
|
|
|
C1a = connect(<<"client-1a">>, Config, 1, []),
|
|
|
|
C1b = connect(<<"client-1b">>, Config, 1, []),
|
|
|
|
ClientsNode1 = [C1a, C1b],
|
2023-01-03 01:02:43 +08:00
|
|
|
|
2023-10-27 21:49:05 +08:00
|
|
|
timer:sleep(500),
|
2023-01-03 01:02:43 +08:00
|
|
|
|
2023-10-27 21:49:05 +08:00
|
|
|
ok = drain_node(Config, 2),
|
|
|
|
ok = revive_node(Config, 2),
|
|
|
|
timer:sleep(500),
|
|
|
|
[?assert(erlang:is_process_alive(C)) || C <- [C0, C1a, C1b]],
|
|
|
|
|
|
|
|
process_flag(trap_exit, true),
|
|
|
|
ok = drain_node(Config, 1),
|
|
|
|
[await_exit(Pid) || Pid <- ClientsNode1],
|
|
|
|
[assert_v5_disconnect_reason_code(Config, ?RC_SERVER_SHUTTING_DOWN) || _ <- ClientsNode1],
|
|
|
|
ok = revive_node(Config, 1),
|
|
|
|
?assert(erlang:is_process_alive(C0)),
|
|
|
|
|
|
|
|
ok = drain_node(Config, 0),
|
|
|
|
await_exit(C0),
|
|
|
|
assert_v5_disconnect_reason_code(Config, ?RC_SERVER_SHUTTING_DOWN),
|
|
|
|
ok = revive_node(Config, 0).
|
2023-01-03 01:02:43 +08:00
|
|
|
|
|
|
|
keepalive(Config) ->
|
|
|
|
KeepaliveSecs = 1,
|
|
|
|
KeepaliveMs = timer:seconds(KeepaliveSecs),
|
|
|
|
WillTopic = <<"will/topic">>,
|
|
|
|
WillPayload = <<"will-payload">>,
|
|
|
|
C1 = connect(?FUNCTION_NAME, Config, [{keepalive, KeepaliveSecs},
|
|
|
|
{will_topic, WillTopic},
|
|
|
|
{will_payload, WillPayload},
|
|
|
|
{will_retain, true},
|
|
|
|
{will_qos, 0}]),
|
|
|
|
ok = emqtt:publish(C1, <<"ignored">>, <<"msg">>),
|
|
|
|
|
|
|
|
%% Connection should stay up when client sends PING requests.
|
|
|
|
timer:sleep(KeepaliveMs),
|
|
|
|
?assertMatch(#{publishers := 1},
|
2023-02-22 02:24:15 +08:00
|
|
|
util:get_global_counters(Config)),
|
2023-01-03 01:02:43 +08:00
|
|
|
|
|
|
|
%% Mock the server socket to not have received any bytes.
|
|
|
|
rabbit_ct_broker_helpers:setup_meck(Config),
|
|
|
|
Mod = rabbit_net,
|
|
|
|
ok = rpc(Config, meck, new, [Mod, [no_link, passthrough]]),
|
|
|
|
ok = rpc(Config, meck, expect, [Mod, getstat, 2, {ok, [{recv_oct, 999}]} ]),
|
|
|
|
process_flag(trap_exit, true),
|
|
|
|
|
|
|
|
%% We expect the server to respect the keepalive closing the connection.
|
|
|
|
eventually(?_assertMatch(#{publishers := 0},
|
2023-02-22 02:24:15 +08:00
|
|
|
util:get_global_counters(Config)),
|
Support Will Delay Interval
Previously, the Will Message could be kept in memory in the MQTT
connection process state. Upon termination, the Will Message is sent.
The new MQTT 5.0 feature Will Delay Interval requires storing the Will
Message outside of the MQTT connection process state.
The Will Message should not be stored node local because the client
could reconnect to a different node.
Storing the Will Message in Mnesia is not an option because we want to
get rid of Mnesia. Storing the Will Message in a Ra cluster or in Khepri
is only an option if the Will Payload is small as there is currently no
way in Ra to **efficiently** snapshot large binary data (Note that these
Will Messages are not consumed in a FIFO style workload like messages in
quorum queues. A Will Message needs to be stored for as long as the
Session lasts - up to 1 day by default, but could also be much longer if
RabbitMQ is configured with a higher maximum session expiry interval.)
Usually Will Payloads are small: They are just a notification that its
MQTT session ended abnormally. However, we don't know how users leverage
the Will Message feature. The MQTT protocol allows for large Will Payloads.
Therefore, the solution implemented in this commit - which should work
good enough - is storing the Will Message in a queue.
Each MQTT session which has a Session Expiry Interval and Will Delay
Interval of > 0 seconds will create a queue if the current Network
Connection ends where it stores its Will Message. The Will Message has a
message TTL set (corresponds to the Will Delay Interval) and the queue
has a queue TTL set (corresponds to the Session Expiry Interval).
If the client does not reconnect within the Will Delay Interval, the
message is dead lettered to the configured MQTT topic exchange
(amq.topic by default).
The Will Delay Interval can be set by both publishers and subscribers.
Therefore, the Will Message is the 1st session state that RabbitMQ keeps
for publish-only MQTT clients.
One current limitation of this commit is that a Will Message that is
delayed (i.e. Will Delay Interval is set) and retained (i.e. Will Retain
flag set) will not be retained.
One solution to retain delayed Will Messages is that the retainer
process consumes from a queue and the queue binds to the topic exchange
with a topic starting with `$`, for example `$retain/#`.
The AMQP 0.9.1 Will Message that is dead lettered could then be added a
CC header such that it won't not only be published with the Will Topic,
but also with `$retain` topic. For example, if the Will Topic is `a/b`,
it will publish with routing key `a/b` and CC header `$retain/a/b`.
The reason this is not implemented in this commit is that to keep the
currently broken retained message store behaviour, we would require
creating at least one queue per node and publishing only to that local
queue. In future, once we have a replicated retained message store based
on a Stream for example, we could just publish all retained messages to
the `$retain` topic and thefore into the Stream.
So, for now, we list "retained and delayed Will Messages" as a limitation
that they actually won't be retained.
2023-05-18 23:36:25 +08:00
|
|
|
KeepaliveMs, 4 * KeepaliveSecs),
|
2023-01-03 01:02:43 +08:00
|
|
|
|
2023-05-02 22:08:15 +08:00
|
|
|
await_exit(C1),
|
|
|
|
assert_v5_disconnect_reason_code(Config, ?RC_KEEP_ALIVE_TIMEOUT),
|
Support AMQP 1.0 natively
## What
Similar to Native MQTT in #5895, this commits implements Native AMQP 1.0.
By "native", we mean do not proxy via AMQP 0.9.1 anymore.
## Why
Native AMQP 1.0 comes with the following major benefits:
1. Similar to Native MQTT, this commit provides better throughput, latency,
scalability, and resource usage for AMQP 1.0.
See https://blog.rabbitmq.com/posts/2023/03/native-mqtt for native MQTT improvements.
See further below for some benchmarks.
2. Since AMQP 1.0 is not limited anymore by the AMQP 0.9.1 protocol,
this commit allows implementing more AMQP 1.0 features in the future.
Some features are already implemented in this commit (see next section).
3. Simpler, better understandable, and more maintainable code.
Native AMQP 1.0 as implemented in this commit has the
following major benefits compared to AMQP 0.9.1:
4. Memory and disk alarms will only stop accepting incoming TRANSFER frames.
New connections can still be created to consume from RabbitMQ to empty queues.
5. Due to 4. no need anymore for separate connections for publishers and
consumers as we currently recommended for AMQP 0.9.1. which potentially
halves the number of physical TCP connections.
6. When a single connection sends to multiple target queues, a single
slow target queue won't block the entire connection.
Publisher can still send data quickly to all other target queues.
7. A publisher can request whether it wants publisher confirmation on a per-message basis.
In AMQP 0.9.1 publisher confirms are configured per channel only.
8. Consumers can change their "prefetch count" dynamically which isn't
possible in our AMQP 0.9.1 implementation. See #10174
9. AMQP 1.0 is an extensible protocol
This commit also fixes dozens of bugs present in the AMQP 1.0 plugin in
RabbitMQ 3.x - most of which cannot be backported due to the complexity
and limitations of the old 3.x implementation.
This commit contains breaking changes and is therefore targeted for RabbitMQ 4.0.
## Implementation details
1. Breaking change: With Native AMQP, the behaviour of
```
Convert AMQP 0.9.1 message headers to application properties for an AMQP 1.0 consumer
amqp1_0.convert_amqp091_headers_to_app_props = false | true (default false)
Convert AMQP 1.0 Application Properties to AMQP 0.9.1 headers
amqp1_0.convert_app_props_to_amqp091_headers = false | true (default false)
```
will break because we always convert according to the message container conversions.
For example, AMQP 0.9.1 x-headers will go into message-annotations instead of application properties.
Also, `false` won’t be respected since we always convert the headers with message containers.
2. Remove rabbit_queue_collector
rabbit_queue_collector is responsible for synchronously deleting
exclusive queues. Since the AMQP 1.0 plugin never creates exclusive
queues, rabbit_queue_collector doesn't need to be started in the first
place. This will save 1 Erlang process per AMQP 1.0 connection.
3. 7 processes per connection + 1 process per session in this commit instead of
7 processes per connection + 15 processes per session in 3.x
Supervision hierarchy got re-designed.
4. Use 1 writer process per AMQP 1.0 connection
AMQP 0.9.1 uses a separate rabbit_writer Erlang process per AMQP 0.9.1 channel.
Prior to this commit, AMQP 1.0 used a separate rabbit_amqp1_0_writer process per AMQP 1.0 session.
Advantage of single writer proc per session (prior to this commit):
* High parallelism for serialising packets if multiple sessions within
a connection write heavily at the same time.
This commit uses a single writer process per AMQP 1.0 connection that is
shared across all AMQP 1.0 sessions.
Advantages of single writer proc per connection (this commit):
* Lower memory usage with hundreds of thousands of AMQP 1.0 sessions
* Less TCP and IP header overhead given that the single writer process
can accumulate across all sessions bytes before flushing the socket.
In other words, this commit decides that a reader / writer process pair
per AMQP 1.0 connection is good enough for bi-directional TRANSFER flows.
Having a writer per session is too heavy.
We still ensure high throughput by having separate reader, writer, and
session processes.
5. Transform rabbit_amqp1_0_writer into gen_server
Why:
Prior to this commit, when clicking on the AMQP 1.0 writer process in
observer, the process crashed.
Instead of handling all these debug messages of the sys module, it's better
to implement a gen_server.
There is no advantage of using a special OTP process over gen_server
for the AMQP 1.0 writer.
gen_server also provides cleaner format status output.
How:
Message callbacks return a timeout of 0.
After all messages in the inbox are processed, the timeout message is
handled by flushing any pending bytes.
6. Remove stats timer from writer
AMQP 1.0 connections haven't emitted any stats previously.
7. When there are contiguous queue confirmations in the session process
mailbox, batch them. When the confirmations are sent to the publisher, a
single DISPOSITION frame is sent for contiguously confirmed delivery
IDs.
This approach should be good enough. However it's sub optimal in
scenarios where contiguous delivery IDs that need confirmations are rare,
for example:
* There are multiple links in the session with different sender
settlement modes and sender publishes across these links interleaved.
* sender settlement mode is mixed and sender publishes interleaved settled
and unsettled TRANSFERs.
8. Introduce credit API v2
Why:
The AMQP 0.9.1 credit extension which is to be removed in 4.0 was poorly
designed since basic.credit is a synchronous call into the queue process
blocking the entire AMQP 1.0 session process.
How:
Change the interactions between queue clients and queue server
implementations:
* Clients only request a credit reply if the FLOW's `echo` field is set
* Include all link flow control state held by the queue process into a
new credit_reply queue event:
* `available` after the queue sends any deliveries
* `link-credit` after the queue sends any deliveries
* `drain` which allows us to combine the old queue events
send_credit_reply and send_drained into a single new queue event
credit_reply.
* Include the consumer tag into the credit_reply queue event such that
the AMQP 1.0 session process can process any credit replies
asynchronously.
Link flow control state `delivery-count` also moves to the queue processes.
The new interactions are hidden behind feature flag credit_api_v2 to
allow for rolling upgrades from 3.13 to 4.0.
9. Use serial number arithmetic in quorum queues and session process.
10. Completely bypass the rabbit_limiter module for AMQP 1.0
flow control. The goal is to eventually remove the rabbit_limiter module
in 4.0 since AMQP 0.9.1 global QoS will be unsupported in 4.0. This
commit lifts the AMQP 1.0 link flow control logic out of rabbit_limiter
into rabbit_queue_consumers.
11. Fix credit bug for streams:
AMQP 1.0 settlements shouldn't top up link credit,
only FLOW frames should top up link credit.
12. Allow sender settle mode unsettled for streams
since AMQP 1.0 acknowledgements to streams are no-ops (currently).
13. Fix AMQP 1.0 client bugs
Auto renewing credits should not be related to settling TRANSFERs.
Remove field link_credit_unsettled as it was wrong and confusing.
Prior to this commit auto renewal did not work when the sender uses
sender settlement mode settled.
14. Fix AMQP 1.0 client bugs
The wrong outdated Link was passed to function auto_flow/2
15. Use osiris chunk iterator
Only hold messages of uncompressed sub batches in memory if consumer
doesn't have sufficient credits.
Compressed sub batches are skipped for non Stream protocol consumers.
16. Fix incoming link flow control
Always use confirms between AMQP 1.0 queue clients and queue servers.
As already done internally by rabbit_fifo_client and
rabbit_stream_queue, use confirms for classic queues as well.
17. Include link handle into correlation when publishing messages to target queues
such that session process can correlate confirms from target queues to
incoming links.
18. Only grant more credits to publishers if publisher hasn't sufficient credits
anymore and there are not too many unconfirmed messages on the link.
19. Completely ignore `block` and `unblock` queue actions and RabbitMQ credit flow
between classic queue process and session process.
20. Link flow control is independent between links.
A client can refer to a queue or to an exchange with multiple
dynamically added target queues. Multiple incoming links can also fan
in to the same queue. However the link topology looks like, this
commit ensures that each link is only granted more credits if that link
isn't overloaded.
21. A connection or a session can send to many different queues.
In AMQP 0.9.1, a single slow queue will lead to the entire channel, and
then entire connection being blocked.
This commit makes sure that a single slow queue from one link won't slow
down sending on other links.
For example, having link A sending to a local classic queue and
link B sending to 5 replica quorum queue, link B will naturally
grant credits slower than link A. So, despite the quorum queue being
slower in confirming messages, the same AMQP 1.0 connection and session
can still pump data very fast into the classic queue.
22. If cluster wide memory or disk alarm occurs.
Each session sends a FLOW with incoming-window to 0 to sending client.
If sending clients don’t obey, force disconnect the client.
If cluster wide memory alarm clears:
Each session resumes with a FLOW defaulting to initial incoming-window.
23. All operations apart of publishing TRANSFERS to RabbitMQ can continue during cluster wide alarms,
specifically, attaching consumers and consuming, i.e. emptying queues.
There is no need for separate AMQP 1.0 connections for publishers and consumers as recommended in our AMQP 0.9.1 implementation.
24. Flow control summary:
* If queue becomes bottleneck, that’s solved by slowing down individual sending links (AMQP 1.0 link flow control).
* If session becomes bottleneck (more unlikely), that’s solved by AMQP 1.0 session flow control.
* If connection becomes bottleneck, it naturally won’t read fast enough from the socket causing TCP backpressure being applied.
Nowhere will RabbitMQ internal credit based flow control (i.e. module credit_flow) be used on the incoming AMQP 1.0 message path.
25. Register AMQP sessions
Prefer local-only pg over our custom pg_local implementation as
pg is a better process group implementation than pg_local.
pg_local was identified as bottleneck in tests where many MQTT clients were disconnected at once.
26. Start a local-only pg when Rabbit boots:
> A scope can be kept local-only by using a scope name that is unique cluster-wide, e.g. the node name:
> pg:start_link(node()).
Register AMQP 1.0 connections and sessions with pg.
In future we should remove pg_local and instead use the new local-only
pg for all registered processes such as AMQP 0.9.1 connections and channels.
27. Requeue messages if link detached
Although the spec allows to settle delivery IDs on detached links, RabbitMQ does not respect the 'closed'
field of the DETACH frame and therefore handles every DETACH frame as closed. Since the link is closed,
we expect every outstanding delivery to be requeued.
In addition to consumer cancellation, detaching a link therefore causes in flight deliveries to be requeued.
Note that this behaviour is different from merely consumer cancellation in AMQP 0.9.1:
"After a consumer is cancelled there will be no future deliveries dispatched to it. Note that there can
still be "in flight" deliveries dispatched previously. Cancelling a consumer will neither discard nor requeue them."
[https://www.rabbitmq.com/consumers.html#unsubscribing]
An AMQP receiver can first drain, and then detach to prevent "in flight" deliveries
28. Init AMQP session with BEGIN frame
Similar to how there can't be an MQTT processor without a CONNECT
frame, there can't be an AMQP session without a BEGIN frame.
This allows having strict dialyzer types for session flow control
fields (i.e. not allowing 'undefined').
29. Move serial_number to AMQP 1.0 common lib
such that it can be used by both AMQP 1.0 server and client
30. Fix AMQP client to do serial number arithmetic.
31. AMQP client: Differentiate between delivery-id and transfer-id for better
understandability.
32. Fix link flow control in classic queues
This commit fixes
```
java -jar target/perf-test.jar -ad false -f persistent -u cq -c 3000 -C 1000000 -y 0
```
followed by
```
./omq -x 0 amqp -T /queue/cq -D 1000000 --amqp-consumer-credits 2
```
Prior to this commit, (and on RabbitMQ 3.x) the consuming would halt after around
8 - 10,000 messages.
The bug was that in flight messages from classic queue process to
session process were not taken into account when topping up credit to
the classic queue process.
Fixes #2597
The solution to this bug (and a much cleaner design anyway independent of
this bug) is that queues should hold all link flow control state including
the delivery-count.
Hence, when credit API v2 is used the delivery-count will be held by the
classic queue process, quorum queue process, and stream queue client
instead of managing the delivery-count in the session.
33. The double level crediting between (a) session process and
rabbit_fifo_client, and (b) rabbit_fifo_client and rabbit_fifo was
removed. Therefore, instead of managing 3 separate delivery-counts (i. session,
ii. rabbit_fifo_client, iii. rabbit_fifo), only 1 delivery-count is used
in rabbit_fifo. This is a big simplification.
34. This commit fixes quorum queues without bumping the machine version
nor introducing new rabbit_fifo commands.
Whether credit API v2 is used is solely determined at link attachment time
depending on whether feature flag credit_api_v2 is enabled.
Even when that feature flag will be enabled later on, this link will
keep using credit API v1 until detached (or the node is shut down).
Eventually, after feature flag credit_api_v2 has been enabled and a
subsequent rolling upgrade, all links will use credit API v2.
This approach is safe and simple.
The 2 alternatives to move delivery-count from the session process to the
queue processes would have been:
i. Explicit feature flag credit_api_v2 migration function
* Can use a gen_server:call and only finish migration once all delivery-counts were migrated.
Cons:
* Extra new message format just for migration is required.
* Risky as migration will fail if a target queue doesn’t reply.
ii. Session always includes DeliveryCountSnd when crediting to the queue:
Cons:
* 2 delivery counts will be hold simultaneously in session proc and queue proc;
could be solved by deleting the session proc’s delivery-count for credit-reply
* What happens if the receiver doesn’t provide credit for a very long time? Is that a problem?
35. Support stream filtering in AMQP 1.0 (by @acogoluegnes)
Use the x-stream-filter-value message annotation
to carry the filter value in a published message.
Use the rabbitmq:stream-filter and rabbitmq:stream-match-unfiltered
filters when creating a receiver that wants to filter
out messages from a stream.
36. Remove credit extension from AMQP 0.9.1 client
37. Support maintenance mode closing AMQP 1.0 connections.
38. Remove AMQP 0.9.1 client dependency from AMQP 1.0 implementation.
39. Move AMQP 1.0 plugin to the core. AMQP 1.0 is enabled by default.
The old rabbitmq_amqp1_0 plugin will be kept as a no-op plugin to prevent deployment
tools from failing that execute:
```
rabbitmq-plugins enable rabbitmq_amqp1_0
rabbitmq-plugins disable rabbitmq_amqp1_0
```
40. Breaking change: Remove CLI command `rabbitmqctl list_amqp10_connections`.
Instead, list both AMQP 0.9.1 and AMQP 1.0 connections in `list_connections`:
```
rabbitmqctl list_connections protocol
Listing connections ...
protocol
{1, 0}
{0,9,1}
```
## Benchmarks
### Throughput & Latency
Setup:
* Single node Ubuntu 22.04
* Erlang 26.1.1
Start RabbitMQ:
```
make run-broker PLUGINS="rabbitmq_management rabbitmq_amqp1_0" FULL=1 RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS="+S 3"
```
Predeclare durable classic queue cq1, durable quorum queue qq1, durable stream queue sq1.
Start client:
https://github.com/ssorj/quiver
https://hub.docker.com/r/ssorj/quiver/tags (digest 453a2aceda64)
```
docker run -it --rm --add-host host.docker.internal:host-gateway ssorj/quiver:latest
bash-5.1# quiver --version
quiver 0.4.0-SNAPSHOT
```
1. Classic queue
```
quiver //host.docker.internal//amq/queue/cq1 --durable --count 1m --duration 10m --body-size 12 --credit 1000
```
This commit:
```
Count ............................................. 1,000,000 messages
Duration ............................................... 73.8 seconds
Sender rate .......................................... 13,548 messages/s
Receiver rate ........................................ 13,547 messages/s
End-to-end rate ...................................... 13,547 messages/s
Latencies by percentile:
0% ........ 0 ms 90.00% ........ 9 ms
25% ........ 2 ms 99.00% ....... 14 ms
50% ........ 4 ms 99.90% ....... 17 ms
100% ....... 26 ms 99.99% ....... 24 ms
```
RabbitMQ 3.x (main branch as of 30 January 2024):
```
---------------------- Sender ----------------------- --------------------- Receiver ---------------------- --------
Time [s] Count [m] Rate [m/s] CPU [%] RSS [M] Time [s] Count [m] Rate [m/s] CPU [%] RSS [M] Lat [ms]
----------------------------------------------------- ----------------------------------------------------- --------
2.1 130,814 65,342 6 73.6 2.1 3,217 1,607 0 8.0 511
4.1 163,580 16,367 2 74.1 4.1 3,217 0 0 8.0 0
6.1 229,114 32,767 3 74.1 6.1 3,217 0 0 8.0 0
8.1 261,880 16,367 2 74.1 8.1 67,874 32,296 8 8.2 7,662
10.1 294,646 16,367 2 74.1 10.1 67,874 0 0 8.2 0
12.1 360,180 32,734 3 74.1 12.1 67,874 0 0 8.2 0
14.1 392,946 16,367 3 74.1 14.1 68,604 365 0 8.2 12,147
16.1 458,480 32,734 3 74.1 16.1 68,604 0 0 8.2 0
18.1 491,246 16,367 2 74.1 18.1 68,604 0 0 8.2 0
20.1 556,780 32,767 4 74.1 20.1 68,604 0 0 8.2 0
22.1 589,546 16,375 2 74.1 22.1 68,604 0 0 8.2 0
receiver timed out
24.1 622,312 16,367 2 74.1 24.1 68,604 0 0 8.2 0
quiver: error: PlanoProcessError: Command 'quiver-arrow receive //host.docker.internal//amq/queue/cq1 --impl qpid-proton-c --duration 10m --count 1m --rate 0 --body-size 12 --credit 1000 --transaction-size 0 --timeout 10 --durable --output /tmp/quiver-otujr23y' returned non-zero exit status 1.
Traceback (most recent call last):
File "/usr/local/lib/quiver/python/quiver/pair.py", line 144, in run
_plano.wait(receiver, check=True)
File "/usr/local/lib/quiver/python/plano/main.py", line 1243, in wait
raise PlanoProcessError(proc)
plano.main.PlanoProcessError: Command 'quiver-arrow receive //host.docker.internal//amq/queue/cq1 --impl qpid-proton-c --duration 10m --count 1m --rate 0 --body-size 12 --credit 1000 --transaction-size 0 --timeout 10 --durable --output /tmp/quiver-otujr23y' returned non-zero exit status 1.
```
2. Quorum queue:
```
quiver //host.docker.internal//amq/queue/qq1 --durable --count 1m --duration 10m --body-size 12 --credit 1000
```
This commit:
```
Count ............................................. 1,000,000 messages
Duration .............................................. 101.4 seconds
Sender rate ........................................... 9,867 messages/s
Receiver rate ......................................... 9,868 messages/s
End-to-end rate ....................................... 9,865 messages/s
Latencies by percentile:
0% ....... 11 ms 90.00% ....... 23 ms
25% ....... 15 ms 99.00% ....... 28 ms
50% ....... 18 ms 99.90% ....... 33 ms
100% ....... 49 ms 99.99% ....... 47 ms
```
RabbitMQ 3.x:
```
---------------------- Sender ----------------------- --------------------- Receiver ---------------------- --------
Time [s] Count [m] Rate [m/s] CPU [%] RSS [M] Time [s] Count [m] Rate [m/s] CPU [%] RSS [M] Lat [ms]
----------------------------------------------------- ----------------------------------------------------- --------
2.1 130,814 65,342 9 69.9 2.1 18,430 9,206 5 7.6 1,221
4.1 163,580 16,375 5 70.2 4.1 18,867 218 0 7.6 2,168
6.1 229,114 32,767 6 70.2 6.1 18,867 0 0 7.6 0
8.1 294,648 32,734 7 70.2 8.1 18,867 0 0 7.6 0
10.1 360,182 32,734 6 70.2 10.1 18,867 0 0 7.6 0
12.1 425,716 32,767 6 70.2 12.1 18,867 0 0 7.6 0
receiver timed out
14.1 458,482 16,367 5 70.2 14.1 18,867 0 0 7.6 0
quiver: error: PlanoProcessError: Command 'quiver-arrow receive //host.docker.internal//amq/queue/qq1 --impl qpid-proton-c --duration 10m --count 1m --rate 0 --body-size 12 --credit 1000 --transaction-size 0 --timeout 10 --durable --output /tmp/quiver-b1gcup43' returned non-zero exit status 1.
Traceback (most recent call last):
File "/usr/local/lib/quiver/python/quiver/pair.py", line 144, in run
_plano.wait(receiver, check=True)
File "/usr/local/lib/quiver/python/plano/main.py", line 1243, in wait
raise PlanoProcessError(proc)
plano.main.PlanoProcessError: Command 'quiver-arrow receive //host.docker.internal//amq/queue/qq1 --impl qpid-proton-c --duration 10m --count 1m --rate 0 --body-size 12 --credit 1000 --transaction-size 0 --timeout 10 --durable --output /tmp/quiver-b1gcup43' returned non-zero exit status 1.
```
3. Stream:
```
quiver-arrow send //host.docker.internal//amq/queue/sq1 --durable --count 1m -d 10m --summary --verbose
```
This commit:
```
Count ............................................. 1,000,000 messages
Duration ................................................ 8.7 seconds
Message rate ........................................ 115,154 messages/s
```
RabbitMQ 3.x:
```
Count ............................................. 1,000,000 messages
Duration ............................................... 21.2 seconds
Message rate ......................................... 47,232 messages/s
```
### Memory usage
Start RabbitMQ:
```
ERL_MAX_PORTS=3000000 RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS="+P 3000000 +S 6" make run-broker PLUGINS="rabbitmq_amqp1_0" FULL=1 RABBITMQ_CONFIG_FILE="rabbitmq.conf"
```
```
/bin/cat rabbitmq.conf
tcp_listen_options.sndbuf = 2048
tcp_listen_options.recbuf = 2048
vm_memory_high_watermark.relative = 0.95
vm_memory_high_watermark_paging_ratio = 0.95
loopback_users = none
```
Create 50k connections with 2 sessions per connection, i.e. 100k session in total:
```go
package main
import (
"context"
"log"
"time"
"github.com/Azure/go-amqp"
)
func main() {
for i := 0; i < 50000; i++ {
conn, err := amqp.Dial(context.TODO(), "amqp://nuc", &amqp.ConnOptions{SASLType: amqp.SASLTypeAnonymous()})
if err != nil {
log.Fatal("dialing AMQP server:", err)
}
_, err = conn.NewSession(context.TODO(), nil)
if err != nil {
log.Fatal("creating AMQP session:", err)
}
_, err = conn.NewSession(context.TODO(), nil)
if err != nil {
log.Fatal("creating AMQP session:", err)
}
}
log.Println("opened all connections")
time.Sleep(5 * time.Hour)
}
```
This commit:
```
erlang:memory().
[{total,4586376480},
{processes,4025898504},
{processes_used,4025871040},
{system,560477976},
{atom,1048841},
{atom_used,1042841},
{binary,233228608},
{code,21449982},
{ets,108560464}]
erlang:system_info(process_count).
450289
```
7 procs per connection + 1 proc per session.
(7 + 2*1) * 50,000 = 450,000 procs
RabbitMQ 3.x:
```
erlang:memory().
[{total,15168232704},
{processes,14044779256},
{processes_used,14044755120},
{system,1123453448},
{atom,1057033},
{atom_used,1052587},
{binary,236381264},
{code,21790238},
{ets,391423744}]
erlang:system_info(process_count).
1850309
```
7 procs per connection + 15 per session
(7 + 2*15) * 50,000 = 1,850,000 procs
50k connections + 100k session require
with this commit: 4.5 GB
in RabbitMQ 3.x: 15 GB
## Future work
1. More efficient parser and serializer
2. TODO in mc_amqp: Do not store the parsed message on disk.
3. Implement both AMQP HTTP extension and AMQP management extension to allow AMQP
clients to create RabbitMQ objects (queues, exchanges, ...).
2023-07-21 18:29:07 +08:00
|
|
|
?assert(rpc(Config, meck, validate, [Mod])),
|
2023-01-03 01:02:43 +08:00
|
|
|
ok = rpc(Config, meck, unload, [Mod]),
|
|
|
|
|
|
|
|
C2 = connect(<<"client2">>, Config),
|
|
|
|
{ok, _, [0]} = emqtt:subscribe(C2, WillTopic),
|
|
|
|
receive {publish, #{client_pid := C2,
|
|
|
|
dup := false,
|
|
|
|
qos := 0,
|
|
|
|
retain := true,
|
|
|
|
topic := WillTopic,
|
|
|
|
payload := WillPayload}} -> ok
|
2024-12-10 23:19:34 +08:00
|
|
|
after ?TIMEOUT -> ct:fail("missing will")
|
2023-01-03 01:02:43 +08:00
|
|
|
end,
|
|
|
|
ok = emqtt:disconnect(C2).
|
|
|
|
|
|
|
|
keepalive_turned_off(Config) ->
|
|
|
|
%% "A Keep Alive value of zero (0) has the effect of turning off the keep alive mechanism."
|
|
|
|
KeepaliveSecs = 0,
|
|
|
|
C = connect(?FUNCTION_NAME, Config, [{keepalive, KeepaliveSecs}]),
|
|
|
|
ok = emqtt:publish(C, <<"TopicB">>, <<"Payload">>),
|
|
|
|
|
|
|
|
%% Mock the server socket to not have received any bytes.
|
|
|
|
rabbit_ct_broker_helpers:setup_meck(Config),
|
|
|
|
Mod = rabbit_net,
|
|
|
|
ok = rpc(Config, meck, new, [Mod, [no_link, passthrough]]),
|
|
|
|
ok = rpc(Config, meck, expect, [Mod, getstat, 2, {ok, [{recv_oct, 999}]} ]),
|
|
|
|
|
|
|
|
rabbit_ct_helpers:consistently(?_assert(erlang:is_process_alive(C))),
|
|
|
|
|
Support AMQP 1.0 natively
## What
Similar to Native MQTT in #5895, this commits implements Native AMQP 1.0.
By "native", we mean do not proxy via AMQP 0.9.1 anymore.
## Why
Native AMQP 1.0 comes with the following major benefits:
1. Similar to Native MQTT, this commit provides better throughput, latency,
scalability, and resource usage for AMQP 1.0.
See https://blog.rabbitmq.com/posts/2023/03/native-mqtt for native MQTT improvements.
See further below for some benchmarks.
2. Since AMQP 1.0 is not limited anymore by the AMQP 0.9.1 protocol,
this commit allows implementing more AMQP 1.0 features in the future.
Some features are already implemented in this commit (see next section).
3. Simpler, better understandable, and more maintainable code.
Native AMQP 1.0 as implemented in this commit has the
following major benefits compared to AMQP 0.9.1:
4. Memory and disk alarms will only stop accepting incoming TRANSFER frames.
New connections can still be created to consume from RabbitMQ to empty queues.
5. Due to 4. no need anymore for separate connections for publishers and
consumers as we currently recommended for AMQP 0.9.1. which potentially
halves the number of physical TCP connections.
6. When a single connection sends to multiple target queues, a single
slow target queue won't block the entire connection.
Publisher can still send data quickly to all other target queues.
7. A publisher can request whether it wants publisher confirmation on a per-message basis.
In AMQP 0.9.1 publisher confirms are configured per channel only.
8. Consumers can change their "prefetch count" dynamically which isn't
possible in our AMQP 0.9.1 implementation. See #10174
9. AMQP 1.0 is an extensible protocol
This commit also fixes dozens of bugs present in the AMQP 1.0 plugin in
RabbitMQ 3.x - most of which cannot be backported due to the complexity
and limitations of the old 3.x implementation.
This commit contains breaking changes and is therefore targeted for RabbitMQ 4.0.
## Implementation details
1. Breaking change: With Native AMQP, the behaviour of
```
Convert AMQP 0.9.1 message headers to application properties for an AMQP 1.0 consumer
amqp1_0.convert_amqp091_headers_to_app_props = false | true (default false)
Convert AMQP 1.0 Application Properties to AMQP 0.9.1 headers
amqp1_0.convert_app_props_to_amqp091_headers = false | true (default false)
```
will break because we always convert according to the message container conversions.
For example, AMQP 0.9.1 x-headers will go into message-annotations instead of application properties.
Also, `false` won’t be respected since we always convert the headers with message containers.
2. Remove rabbit_queue_collector
rabbit_queue_collector is responsible for synchronously deleting
exclusive queues. Since the AMQP 1.0 plugin never creates exclusive
queues, rabbit_queue_collector doesn't need to be started in the first
place. This will save 1 Erlang process per AMQP 1.0 connection.
3. 7 processes per connection + 1 process per session in this commit instead of
7 processes per connection + 15 processes per session in 3.x
Supervision hierarchy got re-designed.
4. Use 1 writer process per AMQP 1.0 connection
AMQP 0.9.1 uses a separate rabbit_writer Erlang process per AMQP 0.9.1 channel.
Prior to this commit, AMQP 1.0 used a separate rabbit_amqp1_0_writer process per AMQP 1.0 session.
Advantage of single writer proc per session (prior to this commit):
* High parallelism for serialising packets if multiple sessions within
a connection write heavily at the same time.
This commit uses a single writer process per AMQP 1.0 connection that is
shared across all AMQP 1.0 sessions.
Advantages of single writer proc per connection (this commit):
* Lower memory usage with hundreds of thousands of AMQP 1.0 sessions
* Less TCP and IP header overhead given that the single writer process
can accumulate across all sessions bytes before flushing the socket.
In other words, this commit decides that a reader / writer process pair
per AMQP 1.0 connection is good enough for bi-directional TRANSFER flows.
Having a writer per session is too heavy.
We still ensure high throughput by having separate reader, writer, and
session processes.
5. Transform rabbit_amqp1_0_writer into gen_server
Why:
Prior to this commit, when clicking on the AMQP 1.0 writer process in
observer, the process crashed.
Instead of handling all these debug messages of the sys module, it's better
to implement a gen_server.
There is no advantage of using a special OTP process over gen_server
for the AMQP 1.0 writer.
gen_server also provides cleaner format status output.
How:
Message callbacks return a timeout of 0.
After all messages in the inbox are processed, the timeout message is
handled by flushing any pending bytes.
6. Remove stats timer from writer
AMQP 1.0 connections haven't emitted any stats previously.
7. When there are contiguous queue confirmations in the session process
mailbox, batch them. When the confirmations are sent to the publisher, a
single DISPOSITION frame is sent for contiguously confirmed delivery
IDs.
This approach should be good enough. However it's sub optimal in
scenarios where contiguous delivery IDs that need confirmations are rare,
for example:
* There are multiple links in the session with different sender
settlement modes and sender publishes across these links interleaved.
* sender settlement mode is mixed and sender publishes interleaved settled
and unsettled TRANSFERs.
8. Introduce credit API v2
Why:
The AMQP 0.9.1 credit extension which is to be removed in 4.0 was poorly
designed since basic.credit is a synchronous call into the queue process
blocking the entire AMQP 1.0 session process.
How:
Change the interactions between queue clients and queue server
implementations:
* Clients only request a credit reply if the FLOW's `echo` field is set
* Include all link flow control state held by the queue process into a
new credit_reply queue event:
* `available` after the queue sends any deliveries
* `link-credit` after the queue sends any deliveries
* `drain` which allows us to combine the old queue events
send_credit_reply and send_drained into a single new queue event
credit_reply.
* Include the consumer tag into the credit_reply queue event such that
the AMQP 1.0 session process can process any credit replies
asynchronously.
Link flow control state `delivery-count` also moves to the queue processes.
The new interactions are hidden behind feature flag credit_api_v2 to
allow for rolling upgrades from 3.13 to 4.0.
9. Use serial number arithmetic in quorum queues and session process.
10. Completely bypass the rabbit_limiter module for AMQP 1.0
flow control. The goal is to eventually remove the rabbit_limiter module
in 4.0 since AMQP 0.9.1 global QoS will be unsupported in 4.0. This
commit lifts the AMQP 1.0 link flow control logic out of rabbit_limiter
into rabbit_queue_consumers.
11. Fix credit bug for streams:
AMQP 1.0 settlements shouldn't top up link credit,
only FLOW frames should top up link credit.
12. Allow sender settle mode unsettled for streams
since AMQP 1.0 acknowledgements to streams are no-ops (currently).
13. Fix AMQP 1.0 client bugs
Auto renewing credits should not be related to settling TRANSFERs.
Remove field link_credit_unsettled as it was wrong and confusing.
Prior to this commit auto renewal did not work when the sender uses
sender settlement mode settled.
14. Fix AMQP 1.0 client bugs
The wrong outdated Link was passed to function auto_flow/2
15. Use osiris chunk iterator
Only hold messages of uncompressed sub batches in memory if consumer
doesn't have sufficient credits.
Compressed sub batches are skipped for non Stream protocol consumers.
16. Fix incoming link flow control
Always use confirms between AMQP 1.0 queue clients and queue servers.
As already done internally by rabbit_fifo_client and
rabbit_stream_queue, use confirms for classic queues as well.
17. Include link handle into correlation when publishing messages to target queues
such that session process can correlate confirms from target queues to
incoming links.
18. Only grant more credits to publishers if publisher hasn't sufficient credits
anymore and there are not too many unconfirmed messages on the link.
19. Completely ignore `block` and `unblock` queue actions and RabbitMQ credit flow
between classic queue process and session process.
20. Link flow control is independent between links.
A client can refer to a queue or to an exchange with multiple
dynamically added target queues. Multiple incoming links can also fan
in to the same queue. However the link topology looks like, this
commit ensures that each link is only granted more credits if that link
isn't overloaded.
21. A connection or a session can send to many different queues.
In AMQP 0.9.1, a single slow queue will lead to the entire channel, and
then entire connection being blocked.
This commit makes sure that a single slow queue from one link won't slow
down sending on other links.
For example, having link A sending to a local classic queue and
link B sending to 5 replica quorum queue, link B will naturally
grant credits slower than link A. So, despite the quorum queue being
slower in confirming messages, the same AMQP 1.0 connection and session
can still pump data very fast into the classic queue.
22. If cluster wide memory or disk alarm occurs.
Each session sends a FLOW with incoming-window to 0 to sending client.
If sending clients don’t obey, force disconnect the client.
If cluster wide memory alarm clears:
Each session resumes with a FLOW defaulting to initial incoming-window.
23. All operations apart of publishing TRANSFERS to RabbitMQ can continue during cluster wide alarms,
specifically, attaching consumers and consuming, i.e. emptying queues.
There is no need for separate AMQP 1.0 connections for publishers and consumers as recommended in our AMQP 0.9.1 implementation.
24. Flow control summary:
* If queue becomes bottleneck, that’s solved by slowing down individual sending links (AMQP 1.0 link flow control).
* If session becomes bottleneck (more unlikely), that’s solved by AMQP 1.0 session flow control.
* If connection becomes bottleneck, it naturally won’t read fast enough from the socket causing TCP backpressure being applied.
Nowhere will RabbitMQ internal credit based flow control (i.e. module credit_flow) be used on the incoming AMQP 1.0 message path.
25. Register AMQP sessions
Prefer local-only pg over our custom pg_local implementation as
pg is a better process group implementation than pg_local.
pg_local was identified as bottleneck in tests where many MQTT clients were disconnected at once.
26. Start a local-only pg when Rabbit boots:
> A scope can be kept local-only by using a scope name that is unique cluster-wide, e.g. the node name:
> pg:start_link(node()).
Register AMQP 1.0 connections and sessions with pg.
In future we should remove pg_local and instead use the new local-only
pg for all registered processes such as AMQP 0.9.1 connections and channels.
27. Requeue messages if link detached
Although the spec allows to settle delivery IDs on detached links, RabbitMQ does not respect the 'closed'
field of the DETACH frame and therefore handles every DETACH frame as closed. Since the link is closed,
we expect every outstanding delivery to be requeued.
In addition to consumer cancellation, detaching a link therefore causes in flight deliveries to be requeued.
Note that this behaviour is different from merely consumer cancellation in AMQP 0.9.1:
"After a consumer is cancelled there will be no future deliveries dispatched to it. Note that there can
still be "in flight" deliveries dispatched previously. Cancelling a consumer will neither discard nor requeue them."
[https://www.rabbitmq.com/consumers.html#unsubscribing]
An AMQP receiver can first drain, and then detach to prevent "in flight" deliveries
28. Init AMQP session with BEGIN frame
Similar to how there can't be an MQTT processor without a CONNECT
frame, there can't be an AMQP session without a BEGIN frame.
This allows having strict dialyzer types for session flow control
fields (i.e. not allowing 'undefined').
29. Move serial_number to AMQP 1.0 common lib
such that it can be used by both AMQP 1.0 server and client
30. Fix AMQP client to do serial number arithmetic.
31. AMQP client: Differentiate between delivery-id and transfer-id for better
understandability.
32. Fix link flow control in classic queues
This commit fixes
```
java -jar target/perf-test.jar -ad false -f persistent -u cq -c 3000 -C 1000000 -y 0
```
followed by
```
./omq -x 0 amqp -T /queue/cq -D 1000000 --amqp-consumer-credits 2
```
Prior to this commit, (and on RabbitMQ 3.x) the consuming would halt after around
8 - 10,000 messages.
The bug was that in flight messages from classic queue process to
session process were not taken into account when topping up credit to
the classic queue process.
Fixes #2597
The solution to this bug (and a much cleaner design anyway independent of
this bug) is that queues should hold all link flow control state including
the delivery-count.
Hence, when credit API v2 is used the delivery-count will be held by the
classic queue process, quorum queue process, and stream queue client
instead of managing the delivery-count in the session.
33. The double level crediting between (a) session process and
rabbit_fifo_client, and (b) rabbit_fifo_client and rabbit_fifo was
removed. Therefore, instead of managing 3 separate delivery-counts (i. session,
ii. rabbit_fifo_client, iii. rabbit_fifo), only 1 delivery-count is used
in rabbit_fifo. This is a big simplification.
34. This commit fixes quorum queues without bumping the machine version
nor introducing new rabbit_fifo commands.
Whether credit API v2 is used is solely determined at link attachment time
depending on whether feature flag credit_api_v2 is enabled.
Even when that feature flag will be enabled later on, this link will
keep using credit API v1 until detached (or the node is shut down).
Eventually, after feature flag credit_api_v2 has been enabled and a
subsequent rolling upgrade, all links will use credit API v2.
This approach is safe and simple.
The 2 alternatives to move delivery-count from the session process to the
queue processes would have been:
i. Explicit feature flag credit_api_v2 migration function
* Can use a gen_server:call and only finish migration once all delivery-counts were migrated.
Cons:
* Extra new message format just for migration is required.
* Risky as migration will fail if a target queue doesn’t reply.
ii. Session always includes DeliveryCountSnd when crediting to the queue:
Cons:
* 2 delivery counts will be hold simultaneously in session proc and queue proc;
could be solved by deleting the session proc’s delivery-count for credit-reply
* What happens if the receiver doesn’t provide credit for a very long time? Is that a problem?
35. Support stream filtering in AMQP 1.0 (by @acogoluegnes)
Use the x-stream-filter-value message annotation
to carry the filter value in a published message.
Use the rabbitmq:stream-filter and rabbitmq:stream-match-unfiltered
filters when creating a receiver that wants to filter
out messages from a stream.
36. Remove credit extension from AMQP 0.9.1 client
37. Support maintenance mode closing AMQP 1.0 connections.
38. Remove AMQP 0.9.1 client dependency from AMQP 1.0 implementation.
39. Move AMQP 1.0 plugin to the core. AMQP 1.0 is enabled by default.
The old rabbitmq_amqp1_0 plugin will be kept as a no-op plugin to prevent deployment
tools from failing that execute:
```
rabbitmq-plugins enable rabbitmq_amqp1_0
rabbitmq-plugins disable rabbitmq_amqp1_0
```
40. Breaking change: Remove CLI command `rabbitmqctl list_amqp10_connections`.
Instead, list both AMQP 0.9.1 and AMQP 1.0 connections in `list_connections`:
```
rabbitmqctl list_connections protocol
Listing connections ...
protocol
{1, 0}
{0,9,1}
```
## Benchmarks
### Throughput & Latency
Setup:
* Single node Ubuntu 22.04
* Erlang 26.1.1
Start RabbitMQ:
```
make run-broker PLUGINS="rabbitmq_management rabbitmq_amqp1_0" FULL=1 RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS="+S 3"
```
Predeclare durable classic queue cq1, durable quorum queue qq1, durable stream queue sq1.
Start client:
https://github.com/ssorj/quiver
https://hub.docker.com/r/ssorj/quiver/tags (digest 453a2aceda64)
```
docker run -it --rm --add-host host.docker.internal:host-gateway ssorj/quiver:latest
bash-5.1# quiver --version
quiver 0.4.0-SNAPSHOT
```
1. Classic queue
```
quiver //host.docker.internal//amq/queue/cq1 --durable --count 1m --duration 10m --body-size 12 --credit 1000
```
This commit:
```
Count ............................................. 1,000,000 messages
Duration ............................................... 73.8 seconds
Sender rate .......................................... 13,548 messages/s
Receiver rate ........................................ 13,547 messages/s
End-to-end rate ...................................... 13,547 messages/s
Latencies by percentile:
0% ........ 0 ms 90.00% ........ 9 ms
25% ........ 2 ms 99.00% ....... 14 ms
50% ........ 4 ms 99.90% ....... 17 ms
100% ....... 26 ms 99.99% ....... 24 ms
```
RabbitMQ 3.x (main branch as of 30 January 2024):
```
---------------------- Sender ----------------------- --------------------- Receiver ---------------------- --------
Time [s] Count [m] Rate [m/s] CPU [%] RSS [M] Time [s] Count [m] Rate [m/s] CPU [%] RSS [M] Lat [ms]
----------------------------------------------------- ----------------------------------------------------- --------
2.1 130,814 65,342 6 73.6 2.1 3,217 1,607 0 8.0 511
4.1 163,580 16,367 2 74.1 4.1 3,217 0 0 8.0 0
6.1 229,114 32,767 3 74.1 6.1 3,217 0 0 8.0 0
8.1 261,880 16,367 2 74.1 8.1 67,874 32,296 8 8.2 7,662
10.1 294,646 16,367 2 74.1 10.1 67,874 0 0 8.2 0
12.1 360,180 32,734 3 74.1 12.1 67,874 0 0 8.2 0
14.1 392,946 16,367 3 74.1 14.1 68,604 365 0 8.2 12,147
16.1 458,480 32,734 3 74.1 16.1 68,604 0 0 8.2 0
18.1 491,246 16,367 2 74.1 18.1 68,604 0 0 8.2 0
20.1 556,780 32,767 4 74.1 20.1 68,604 0 0 8.2 0
22.1 589,546 16,375 2 74.1 22.1 68,604 0 0 8.2 0
receiver timed out
24.1 622,312 16,367 2 74.1 24.1 68,604 0 0 8.2 0
quiver: error: PlanoProcessError: Command 'quiver-arrow receive //host.docker.internal//amq/queue/cq1 --impl qpid-proton-c --duration 10m --count 1m --rate 0 --body-size 12 --credit 1000 --transaction-size 0 --timeout 10 --durable --output /tmp/quiver-otujr23y' returned non-zero exit status 1.
Traceback (most recent call last):
File "/usr/local/lib/quiver/python/quiver/pair.py", line 144, in run
_plano.wait(receiver, check=True)
File "/usr/local/lib/quiver/python/plano/main.py", line 1243, in wait
raise PlanoProcessError(proc)
plano.main.PlanoProcessError: Command 'quiver-arrow receive //host.docker.internal//amq/queue/cq1 --impl qpid-proton-c --duration 10m --count 1m --rate 0 --body-size 12 --credit 1000 --transaction-size 0 --timeout 10 --durable --output /tmp/quiver-otujr23y' returned non-zero exit status 1.
```
2. Quorum queue:
```
quiver //host.docker.internal//amq/queue/qq1 --durable --count 1m --duration 10m --body-size 12 --credit 1000
```
This commit:
```
Count ............................................. 1,000,000 messages
Duration .............................................. 101.4 seconds
Sender rate ........................................... 9,867 messages/s
Receiver rate ......................................... 9,868 messages/s
End-to-end rate ....................................... 9,865 messages/s
Latencies by percentile:
0% ....... 11 ms 90.00% ....... 23 ms
25% ....... 15 ms 99.00% ....... 28 ms
50% ....... 18 ms 99.90% ....... 33 ms
100% ....... 49 ms 99.99% ....... 47 ms
```
RabbitMQ 3.x:
```
---------------------- Sender ----------------------- --------------------- Receiver ---------------------- --------
Time [s] Count [m] Rate [m/s] CPU [%] RSS [M] Time [s] Count [m] Rate [m/s] CPU [%] RSS [M] Lat [ms]
----------------------------------------------------- ----------------------------------------------------- --------
2.1 130,814 65,342 9 69.9 2.1 18,430 9,206 5 7.6 1,221
4.1 163,580 16,375 5 70.2 4.1 18,867 218 0 7.6 2,168
6.1 229,114 32,767 6 70.2 6.1 18,867 0 0 7.6 0
8.1 294,648 32,734 7 70.2 8.1 18,867 0 0 7.6 0
10.1 360,182 32,734 6 70.2 10.1 18,867 0 0 7.6 0
12.1 425,716 32,767 6 70.2 12.1 18,867 0 0 7.6 0
receiver timed out
14.1 458,482 16,367 5 70.2 14.1 18,867 0 0 7.6 0
quiver: error: PlanoProcessError: Command 'quiver-arrow receive //host.docker.internal//amq/queue/qq1 --impl qpid-proton-c --duration 10m --count 1m --rate 0 --body-size 12 --credit 1000 --transaction-size 0 --timeout 10 --durable --output /tmp/quiver-b1gcup43' returned non-zero exit status 1.
Traceback (most recent call last):
File "/usr/local/lib/quiver/python/quiver/pair.py", line 144, in run
_plano.wait(receiver, check=True)
File "/usr/local/lib/quiver/python/plano/main.py", line 1243, in wait
raise PlanoProcessError(proc)
plano.main.PlanoProcessError: Command 'quiver-arrow receive //host.docker.internal//amq/queue/qq1 --impl qpid-proton-c --duration 10m --count 1m --rate 0 --body-size 12 --credit 1000 --transaction-size 0 --timeout 10 --durable --output /tmp/quiver-b1gcup43' returned non-zero exit status 1.
```
3. Stream:
```
quiver-arrow send //host.docker.internal//amq/queue/sq1 --durable --count 1m -d 10m --summary --verbose
```
This commit:
```
Count ............................................. 1,000,000 messages
Duration ................................................ 8.7 seconds
Message rate ........................................ 115,154 messages/s
```
RabbitMQ 3.x:
```
Count ............................................. 1,000,000 messages
Duration ............................................... 21.2 seconds
Message rate ......................................... 47,232 messages/s
```
### Memory usage
Start RabbitMQ:
```
ERL_MAX_PORTS=3000000 RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS="+P 3000000 +S 6" make run-broker PLUGINS="rabbitmq_amqp1_0" FULL=1 RABBITMQ_CONFIG_FILE="rabbitmq.conf"
```
```
/bin/cat rabbitmq.conf
tcp_listen_options.sndbuf = 2048
tcp_listen_options.recbuf = 2048
vm_memory_high_watermark.relative = 0.95
vm_memory_high_watermark_paging_ratio = 0.95
loopback_users = none
```
Create 50k connections with 2 sessions per connection, i.e. 100k session in total:
```go
package main
import (
"context"
"log"
"time"
"github.com/Azure/go-amqp"
)
func main() {
for i := 0; i < 50000; i++ {
conn, err := amqp.Dial(context.TODO(), "amqp://nuc", &amqp.ConnOptions{SASLType: amqp.SASLTypeAnonymous()})
if err != nil {
log.Fatal("dialing AMQP server:", err)
}
_, err = conn.NewSession(context.TODO(), nil)
if err != nil {
log.Fatal("creating AMQP session:", err)
}
_, err = conn.NewSession(context.TODO(), nil)
if err != nil {
log.Fatal("creating AMQP session:", err)
}
}
log.Println("opened all connections")
time.Sleep(5 * time.Hour)
}
```
This commit:
```
erlang:memory().
[{total,4586376480},
{processes,4025898504},
{processes_used,4025871040},
{system,560477976},
{atom,1048841},
{atom_used,1042841},
{binary,233228608},
{code,21449982},
{ets,108560464}]
erlang:system_info(process_count).
450289
```
7 procs per connection + 1 proc per session.
(7 + 2*1) * 50,000 = 450,000 procs
RabbitMQ 3.x:
```
erlang:memory().
[{total,15168232704},
{processes,14044779256},
{processes_used,14044755120},
{system,1123453448},
{atom,1057033},
{atom_used,1052587},
{binary,236381264},
{code,21790238},
{ets,391423744}]
erlang:system_info(process_count).
1850309
```
7 procs per connection + 15 per session
(7 + 2*15) * 50,000 = 1,850,000 procs
50k connections + 100k session require
with this commit: 4.5 GB
in RabbitMQ 3.x: 15 GB
## Future work
1. More efficient parser and serializer
2. TODO in mc_amqp: Do not store the parsed message on disk.
3. Implement both AMQP HTTP extension and AMQP management extension to allow AMQP
clients to create RabbitMQ objects (queues, exchanges, ...).
2023-07-21 18:29:07 +08:00
|
|
|
?assert(rpc(Config, meck, validate, [Mod])),
|
2023-01-03 01:02:43 +08:00
|
|
|
ok = rpc(Config, meck, unload, [Mod]),
|
|
|
|
ok = emqtt:disconnect(C).
|
|
|
|
|
|
|
|
duplicate_client_id(Config) ->
|
2023-04-27 18:52:56 +08:00
|
|
|
[Server1, Server2, _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
|
Support Will Delay Interval
Previously, the Will Message could be kept in memory in the MQTT
connection process state. Upon termination, the Will Message is sent.
The new MQTT 5.0 feature Will Delay Interval requires storing the Will
Message outside of the MQTT connection process state.
The Will Message should not be stored node local because the client
could reconnect to a different node.
Storing the Will Message in Mnesia is not an option because we want to
get rid of Mnesia. Storing the Will Message in a Ra cluster or in Khepri
is only an option if the Will Payload is small as there is currently no
way in Ra to **efficiently** snapshot large binary data (Note that these
Will Messages are not consumed in a FIFO style workload like messages in
quorum queues. A Will Message needs to be stored for as long as the
Session lasts - up to 1 day by default, but could also be much longer if
RabbitMQ is configured with a higher maximum session expiry interval.)
Usually Will Payloads are small: They are just a notification that its
MQTT session ended abnormally. However, we don't know how users leverage
the Will Message feature. The MQTT protocol allows for large Will Payloads.
Therefore, the solution implemented in this commit - which should work
good enough - is storing the Will Message in a queue.
Each MQTT session which has a Session Expiry Interval and Will Delay
Interval of > 0 seconds will create a queue if the current Network
Connection ends where it stores its Will Message. The Will Message has a
message TTL set (corresponds to the Will Delay Interval) and the queue
has a queue TTL set (corresponds to the Session Expiry Interval).
If the client does not reconnect within the Will Delay Interval, the
message is dead lettered to the configured MQTT topic exchange
(amq.topic by default).
The Will Delay Interval can be set by both publishers and subscribers.
Therefore, the Will Message is the 1st session state that RabbitMQ keeps
for publish-only MQTT clients.
One current limitation of this commit is that a Will Message that is
delayed (i.e. Will Delay Interval is set) and retained (i.e. Will Retain
flag set) will not be retained.
One solution to retain delayed Will Messages is that the retainer
process consumes from a queue and the queue binds to the topic exchange
with a topic starting with `$`, for example `$retain/#`.
The AMQP 0.9.1 Will Message that is dead lettered could then be added a
CC header such that it won't not only be published with the Will Topic,
but also with `$retain` topic. For example, if the Will Topic is `a/b`,
it will publish with routing key `a/b` and CC header `$retain/a/b`.
The reason this is not implemented in this commit is that to keep the
currently broken retained message store behaviour, we would require
creating at least one queue per node and publishing only to that local
queue. In future, once we have a replicated retained message store based
on a Stream for example, we could just publish all retained messages to
the `$retain` topic and thefore into the Stream.
So, for now, we list "retained and delayed Will Messages" as a limitation
that they actually won't be retained.
2023-05-18 23:36:25 +08:00
|
|
|
%% Test session takeover by both new and old node in mixed version clusters.
|
|
|
|
ClientId1 = <<"c1">>,
|
|
|
|
ClientId2 = <<"c2">>,
|
|
|
|
C1a = connect(ClientId1, Config, Server2, []),
|
|
|
|
C2a = connect(ClientId2, Config, Server1, []),
|
2023-10-27 21:49:05 +08:00
|
|
|
eventually(?_assertEqual(2, length(all_connection_pids(Config)))),
|
2023-01-03 01:02:43 +08:00
|
|
|
process_flag(trap_exit, true),
|
Support Will Delay Interval
Previously, the Will Message could be kept in memory in the MQTT
connection process state. Upon termination, the Will Message is sent.
The new MQTT 5.0 feature Will Delay Interval requires storing the Will
Message outside of the MQTT connection process state.
The Will Message should not be stored node local because the client
could reconnect to a different node.
Storing the Will Message in Mnesia is not an option because we want to
get rid of Mnesia. Storing the Will Message in a Ra cluster or in Khepri
is only an option if the Will Payload is small as there is currently no
way in Ra to **efficiently** snapshot large binary data (Note that these
Will Messages are not consumed in a FIFO style workload like messages in
quorum queues. A Will Message needs to be stored for as long as the
Session lasts - up to 1 day by default, but could also be much longer if
RabbitMQ is configured with a higher maximum session expiry interval.)
Usually Will Payloads are small: They are just a notification that its
MQTT session ended abnormally. However, we don't know how users leverage
the Will Message feature. The MQTT protocol allows for large Will Payloads.
Therefore, the solution implemented in this commit - which should work
good enough - is storing the Will Message in a queue.
Each MQTT session which has a Session Expiry Interval and Will Delay
Interval of > 0 seconds will create a queue if the current Network
Connection ends where it stores its Will Message. The Will Message has a
message TTL set (corresponds to the Will Delay Interval) and the queue
has a queue TTL set (corresponds to the Session Expiry Interval).
If the client does not reconnect within the Will Delay Interval, the
message is dead lettered to the configured MQTT topic exchange
(amq.topic by default).
The Will Delay Interval can be set by both publishers and subscribers.
Therefore, the Will Message is the 1st session state that RabbitMQ keeps
for publish-only MQTT clients.
One current limitation of this commit is that a Will Message that is
delayed (i.e. Will Delay Interval is set) and retained (i.e. Will Retain
flag set) will not be retained.
One solution to retain delayed Will Messages is that the retainer
process consumes from a queue and the queue binds to the topic exchange
with a topic starting with `$`, for example `$retain/#`.
The AMQP 0.9.1 Will Message that is dead lettered could then be added a
CC header such that it won't not only be published with the Will Topic,
but also with `$retain` topic. For example, if the Will Topic is `a/b`,
it will publish with routing key `a/b` and CC header `$retain/a/b`.
The reason this is not implemented in this commit is that to keep the
currently broken retained message store behaviour, we would require
creating at least one queue per node and publishing only to that local
queue. In future, once we have a replicated retained message store based
on a Stream for example, we could just publish all retained messages to
the `$retain` topic and thefore into the Stream.
So, for now, we list "retained and delayed Will Messages" as a limitation
that they actually won't be retained.
2023-05-18 23:36:25 +08:00
|
|
|
C1b = connect(ClientId1, Config, Server1, []),
|
|
|
|
C2b = connect(ClientId2, Config, Server2, []),
|
2023-05-02 22:08:15 +08:00
|
|
|
assert_v5_disconnect_reason_code(Config, ?RC_SESSION_TAKEN_OVER),
|
Support Will Delay Interval
Previously, the Will Message could be kept in memory in the MQTT
connection process state. Upon termination, the Will Message is sent.
The new MQTT 5.0 feature Will Delay Interval requires storing the Will
Message outside of the MQTT connection process state.
The Will Message should not be stored node local because the client
could reconnect to a different node.
Storing the Will Message in Mnesia is not an option because we want to
get rid of Mnesia. Storing the Will Message in a Ra cluster or in Khepri
is only an option if the Will Payload is small as there is currently no
way in Ra to **efficiently** snapshot large binary data (Note that these
Will Messages are not consumed in a FIFO style workload like messages in
quorum queues. A Will Message needs to be stored for as long as the
Session lasts - up to 1 day by default, but could also be much longer if
RabbitMQ is configured with a higher maximum session expiry interval.)
Usually Will Payloads are small: They are just a notification that its
MQTT session ended abnormally. However, we don't know how users leverage
the Will Message feature. The MQTT protocol allows for large Will Payloads.
Therefore, the solution implemented in this commit - which should work
good enough - is storing the Will Message in a queue.
Each MQTT session which has a Session Expiry Interval and Will Delay
Interval of > 0 seconds will create a queue if the current Network
Connection ends where it stores its Will Message. The Will Message has a
message TTL set (corresponds to the Will Delay Interval) and the queue
has a queue TTL set (corresponds to the Session Expiry Interval).
If the client does not reconnect within the Will Delay Interval, the
message is dead lettered to the configured MQTT topic exchange
(amq.topic by default).
The Will Delay Interval can be set by both publishers and subscribers.
Therefore, the Will Message is the 1st session state that RabbitMQ keeps
for publish-only MQTT clients.
One current limitation of this commit is that a Will Message that is
delayed (i.e. Will Delay Interval is set) and retained (i.e. Will Retain
flag set) will not be retained.
One solution to retain delayed Will Messages is that the retainer
process consumes from a queue and the queue binds to the topic exchange
with a topic starting with `$`, for example `$retain/#`.
The AMQP 0.9.1 Will Message that is dead lettered could then be added a
CC header such that it won't not only be published with the Will Topic,
but also with `$retain` topic. For example, if the Will Topic is `a/b`,
it will publish with routing key `a/b` and CC header `$retain/a/b`.
The reason this is not implemented in this commit is that to keep the
currently broken retained message store behaviour, we would require
creating at least one queue per node and publishing only to that local
queue. In future, once we have a replicated retained message store based
on a Stream for example, we could just publish all retained messages to
the `$retain` topic and thefore into the Stream.
So, for now, we list "retained and delayed Will Messages" as a limitation
that they actually won't be retained.
2023-05-18 23:36:25 +08:00
|
|
|
assert_v5_disconnect_reason_code(Config, ?RC_SESSION_TAKEN_OVER),
|
|
|
|
await_exit(C1a),
|
|
|
|
await_exit(C2a),
|
2023-01-03 01:02:43 +08:00
|
|
|
timer:sleep(200),
|
2023-10-27 21:49:05 +08:00
|
|
|
?assertEqual(2, length(all_connection_pids(Config))),
|
Support Will Delay Interval
Previously, the Will Message could be kept in memory in the MQTT
connection process state. Upon termination, the Will Message is sent.
The new MQTT 5.0 feature Will Delay Interval requires storing the Will
Message outside of the MQTT connection process state.
The Will Message should not be stored node local because the client
could reconnect to a different node.
Storing the Will Message in Mnesia is not an option because we want to
get rid of Mnesia. Storing the Will Message in a Ra cluster or in Khepri
is only an option if the Will Payload is small as there is currently no
way in Ra to **efficiently** snapshot large binary data (Note that these
Will Messages are not consumed in a FIFO style workload like messages in
quorum queues. A Will Message needs to be stored for as long as the
Session lasts - up to 1 day by default, but could also be much longer if
RabbitMQ is configured with a higher maximum session expiry interval.)
Usually Will Payloads are small: They are just a notification that its
MQTT session ended abnormally. However, we don't know how users leverage
the Will Message feature. The MQTT protocol allows for large Will Payloads.
Therefore, the solution implemented in this commit - which should work
good enough - is storing the Will Message in a queue.
Each MQTT session which has a Session Expiry Interval and Will Delay
Interval of > 0 seconds will create a queue if the current Network
Connection ends where it stores its Will Message. The Will Message has a
message TTL set (corresponds to the Will Delay Interval) and the queue
has a queue TTL set (corresponds to the Session Expiry Interval).
If the client does not reconnect within the Will Delay Interval, the
message is dead lettered to the configured MQTT topic exchange
(amq.topic by default).
The Will Delay Interval can be set by both publishers and subscribers.
Therefore, the Will Message is the 1st session state that RabbitMQ keeps
for publish-only MQTT clients.
One current limitation of this commit is that a Will Message that is
delayed (i.e. Will Delay Interval is set) and retained (i.e. Will Retain
flag set) will not be retained.
One solution to retain delayed Will Messages is that the retainer
process consumes from a queue and the queue binds to the topic exchange
with a topic starting with `$`, for example `$retain/#`.
The AMQP 0.9.1 Will Message that is dead lettered could then be added a
CC header such that it won't not only be published with the Will Topic,
but also with `$retain` topic. For example, if the Will Topic is `a/b`,
it will publish with routing key `a/b` and CC header `$retain/a/b`.
The reason this is not implemented in this commit is that to keep the
currently broken retained message store behaviour, we would require
creating at least one queue per node and publishing only to that local
queue. In future, once we have a replicated retained message store based
on a Stream for example, we could just publish all retained messages to
the `$retain` topic and thefore into the Stream.
So, for now, we list "retained and delayed Will Messages" as a limitation
that they actually won't be retained.
2023-05-18 23:36:25 +08:00
|
|
|
ok = emqtt:disconnect(C1b),
|
|
|
|
ok = emqtt:disconnect(C2b),
|
2023-04-27 18:52:56 +08:00
|
|
|
eventually(?_assertEqual(0, length(all_connection_pids(Config)))).
|
2023-01-03 01:02:43 +08:00
|
|
|
|
Support Will Delay Interval
Previously, the Will Message could be kept in memory in the MQTT
connection process state. Upon termination, the Will Message is sent.
The new MQTT 5.0 feature Will Delay Interval requires storing the Will
Message outside of the MQTT connection process state.
The Will Message should not be stored node local because the client
could reconnect to a different node.
Storing the Will Message in Mnesia is not an option because we want to
get rid of Mnesia. Storing the Will Message in a Ra cluster or in Khepri
is only an option if the Will Payload is small as there is currently no
way in Ra to **efficiently** snapshot large binary data (Note that these
Will Messages are not consumed in a FIFO style workload like messages in
quorum queues. A Will Message needs to be stored for as long as the
Session lasts - up to 1 day by default, but could also be much longer if
RabbitMQ is configured with a higher maximum session expiry interval.)
Usually Will Payloads are small: They are just a notification that its
MQTT session ended abnormally. However, we don't know how users leverage
the Will Message feature. The MQTT protocol allows for large Will Payloads.
Therefore, the solution implemented in this commit - which should work
good enough - is storing the Will Message in a queue.
Each MQTT session which has a Session Expiry Interval and Will Delay
Interval of > 0 seconds will create a queue if the current Network
Connection ends where it stores its Will Message. The Will Message has a
message TTL set (corresponds to the Will Delay Interval) and the queue
has a queue TTL set (corresponds to the Session Expiry Interval).
If the client does not reconnect within the Will Delay Interval, the
message is dead lettered to the configured MQTT topic exchange
(amq.topic by default).
The Will Delay Interval can be set by both publishers and subscribers.
Therefore, the Will Message is the 1st session state that RabbitMQ keeps
for publish-only MQTT clients.
One current limitation of this commit is that a Will Message that is
delayed (i.e. Will Delay Interval is set) and retained (i.e. Will Retain
flag set) will not be retained.
One solution to retain delayed Will Messages is that the retainer
process consumes from a queue and the queue binds to the topic exchange
with a topic starting with `$`, for example `$retain/#`.
The AMQP 0.9.1 Will Message that is dead lettered could then be added a
CC header such that it won't not only be published with the Will Topic,
but also with `$retain` topic. For example, if the Will Topic is `a/b`,
it will publish with routing key `a/b` and CC header `$retain/a/b`.
The reason this is not implemented in this commit is that to keep the
currently broken retained message store behaviour, we would require
creating at least one queue per node and publishing only to that local
queue. In future, once we have a replicated retained message store based
on a Stream for example, we could just publish all retained messages to
the `$retain` topic and thefore into the Stream.
So, for now, we list "retained and delayed Will Messages" as a limitation
that they actually won't be retained.
2023-05-18 23:36:25 +08:00
|
|
|
session_reconnect(Config) ->
|
|
|
|
session_switch(Config, true).
|
|
|
|
|
|
|
|
session_takeover(Config) ->
|
|
|
|
session_switch(Config, false).
|
|
|
|
|
|
|
|
session_switch(Config, Disconnect) ->
|
|
|
|
Topic = ClientId = atom_to_binary(?FUNCTION_NAME),
|
|
|
|
%% Connect to old node in mixed version cluster.
|
2023-10-27 22:59:12 +08:00
|
|
|
C1 = connect(ClientId, Config, 1, non_clean_sess_opts()),
|
Support Will Delay Interval
Previously, the Will Message could be kept in memory in the MQTT
connection process state. Upon termination, the Will Message is sent.
The new MQTT 5.0 feature Will Delay Interval requires storing the Will
Message outside of the MQTT connection process state.
The Will Message should not be stored node local because the client
could reconnect to a different node.
Storing the Will Message in Mnesia is not an option because we want to
get rid of Mnesia. Storing the Will Message in a Ra cluster or in Khepri
is only an option if the Will Payload is small as there is currently no
way in Ra to **efficiently** snapshot large binary data (Note that these
Will Messages are not consumed in a FIFO style workload like messages in
quorum queues. A Will Message needs to be stored for as long as the
Session lasts - up to 1 day by default, but could also be much longer if
RabbitMQ is configured with a higher maximum session expiry interval.)
Usually Will Payloads are small: They are just a notification that its
MQTT session ended abnormally. However, we don't know how users leverage
the Will Message feature. The MQTT protocol allows for large Will Payloads.
Therefore, the solution implemented in this commit - which should work
good enough - is storing the Will Message in a queue.
Each MQTT session which has a Session Expiry Interval and Will Delay
Interval of > 0 seconds will create a queue if the current Network
Connection ends where it stores its Will Message. The Will Message has a
message TTL set (corresponds to the Will Delay Interval) and the queue
has a queue TTL set (corresponds to the Session Expiry Interval).
If the client does not reconnect within the Will Delay Interval, the
message is dead lettered to the configured MQTT topic exchange
(amq.topic by default).
The Will Delay Interval can be set by both publishers and subscribers.
Therefore, the Will Message is the 1st session state that RabbitMQ keeps
for publish-only MQTT clients.
One current limitation of this commit is that a Will Message that is
delayed (i.e. Will Delay Interval is set) and retained (i.e. Will Retain
flag set) will not be retained.
One solution to retain delayed Will Messages is that the retainer
process consumes from a queue and the queue binds to the topic exchange
with a topic starting with `$`, for example `$retain/#`.
The AMQP 0.9.1 Will Message that is dead lettered could then be added a
CC header such that it won't not only be published with the Will Topic,
but also with `$retain` topic. For example, if the Will Topic is `a/b`,
it will publish with routing key `a/b` and CC header `$retain/a/b`.
The reason this is not implemented in this commit is that to keep the
currently broken retained message store behaviour, we would require
creating at least one queue per node and publishing only to that local
queue. In future, once we have a replicated retained message store based
on a Stream for example, we could just publish all retained messages to
the `$retain` topic and thefore into the Stream.
So, for now, we list "retained and delayed Will Messages" as a limitation
that they actually won't be retained.
2023-05-18 23:36:25 +08:00
|
|
|
{ok, _, [1]} = emqtt:subscribe(C1, Topic, qos1),
|
|
|
|
case Disconnect of
|
|
|
|
true -> ok = emqtt:disconnect(C1);
|
|
|
|
false -> unlink(C1)
|
|
|
|
end,
|
|
|
|
%% Connect to new node in mixed version cluster.
|
2023-10-27 22:59:12 +08:00
|
|
|
C2 = connect(ClientId, Config, 0, non_clean_sess_opts()),
|
Support Will Delay Interval
Previously, the Will Message could be kept in memory in the MQTT
connection process state. Upon termination, the Will Message is sent.
The new MQTT 5.0 feature Will Delay Interval requires storing the Will
Message outside of the MQTT connection process state.
The Will Message should not be stored node local because the client
could reconnect to a different node.
Storing the Will Message in Mnesia is not an option because we want to
get rid of Mnesia. Storing the Will Message in a Ra cluster or in Khepri
is only an option if the Will Payload is small as there is currently no
way in Ra to **efficiently** snapshot large binary data (Note that these
Will Messages are not consumed in a FIFO style workload like messages in
quorum queues. A Will Message needs to be stored for as long as the
Session lasts - up to 1 day by default, but could also be much longer if
RabbitMQ is configured with a higher maximum session expiry interval.)
Usually Will Payloads are small: They are just a notification that its
MQTT session ended abnormally. However, we don't know how users leverage
the Will Message feature. The MQTT protocol allows for large Will Payloads.
Therefore, the solution implemented in this commit - which should work
good enough - is storing the Will Message in a queue.
Each MQTT session which has a Session Expiry Interval and Will Delay
Interval of > 0 seconds will create a queue if the current Network
Connection ends where it stores its Will Message. The Will Message has a
message TTL set (corresponds to the Will Delay Interval) and the queue
has a queue TTL set (corresponds to the Session Expiry Interval).
If the client does not reconnect within the Will Delay Interval, the
message is dead lettered to the configured MQTT topic exchange
(amq.topic by default).
The Will Delay Interval can be set by both publishers and subscribers.
Therefore, the Will Message is the 1st session state that RabbitMQ keeps
for publish-only MQTT clients.
One current limitation of this commit is that a Will Message that is
delayed (i.e. Will Delay Interval is set) and retained (i.e. Will Retain
flag set) will not be retained.
One solution to retain delayed Will Messages is that the retainer
process consumes from a queue and the queue binds to the topic exchange
with a topic starting with `$`, for example `$retain/#`.
The AMQP 0.9.1 Will Message that is dead lettered could then be added a
CC header such that it won't not only be published with the Will Topic,
but also with `$retain` topic. For example, if the Will Topic is `a/b`,
it will publish with routing key `a/b` and CC header `$retain/a/b`.
The reason this is not implemented in this commit is that to keep the
currently broken retained message store behaviour, we would require
creating at least one queue per node and publishing only to that local
queue. In future, once we have a replicated retained message store based
on a Stream for example, we could just publish all retained messages to
the `$retain` topic and thefore into the Stream.
So, for now, we list "retained and delayed Will Messages" as a limitation
that they actually won't be retained.
2023-05-18 23:36:25 +08:00
|
|
|
case Disconnect of
|
|
|
|
true -> ok;
|
|
|
|
false -> assert_v5_disconnect_reason_code(Config, ?RC_SESSION_TAKEN_OVER)
|
|
|
|
end,
|
|
|
|
%% New connection should be able to modify subscription.
|
|
|
|
{ok, _, [0]} = emqtt:subscribe(C2, Topic, qos0),
|
|
|
|
{ok, _} = emqtt:publish(C2, Topic, <<"m1">>, qos1),
|
|
|
|
receive {publish, #{client_pid := C2,
|
|
|
|
payload := <<"m1">>,
|
|
|
|
qos := 0}} -> ok
|
2024-12-10 23:19:34 +08:00
|
|
|
after ?TIMEOUT -> ct:fail("did not receive m1 with QoS 0")
|
Support Will Delay Interval
Previously, the Will Message could be kept in memory in the MQTT
connection process state. Upon termination, the Will Message is sent.
The new MQTT 5.0 feature Will Delay Interval requires storing the Will
Message outside of the MQTT connection process state.
The Will Message should not be stored node local because the client
could reconnect to a different node.
Storing the Will Message in Mnesia is not an option because we want to
get rid of Mnesia. Storing the Will Message in a Ra cluster or in Khepri
is only an option if the Will Payload is small as there is currently no
way in Ra to **efficiently** snapshot large binary data (Note that these
Will Messages are not consumed in a FIFO style workload like messages in
quorum queues. A Will Message needs to be stored for as long as the
Session lasts - up to 1 day by default, but could also be much longer if
RabbitMQ is configured with a higher maximum session expiry interval.)
Usually Will Payloads are small: They are just a notification that its
MQTT session ended abnormally. However, we don't know how users leverage
the Will Message feature. The MQTT protocol allows for large Will Payloads.
Therefore, the solution implemented in this commit - which should work
good enough - is storing the Will Message in a queue.
Each MQTT session which has a Session Expiry Interval and Will Delay
Interval of > 0 seconds will create a queue if the current Network
Connection ends where it stores its Will Message. The Will Message has a
message TTL set (corresponds to the Will Delay Interval) and the queue
has a queue TTL set (corresponds to the Session Expiry Interval).
If the client does not reconnect within the Will Delay Interval, the
message is dead lettered to the configured MQTT topic exchange
(amq.topic by default).
The Will Delay Interval can be set by both publishers and subscribers.
Therefore, the Will Message is the 1st session state that RabbitMQ keeps
for publish-only MQTT clients.
One current limitation of this commit is that a Will Message that is
delayed (i.e. Will Delay Interval is set) and retained (i.e. Will Retain
flag set) will not be retained.
One solution to retain delayed Will Messages is that the retainer
process consumes from a queue and the queue binds to the topic exchange
with a topic starting with `$`, for example `$retain/#`.
The AMQP 0.9.1 Will Message that is dead lettered could then be added a
CC header such that it won't not only be published with the Will Topic,
but also with `$retain` topic. For example, if the Will Topic is `a/b`,
it will publish with routing key `a/b` and CC header `$retain/a/b`.
The reason this is not implemented in this commit is that to keep the
currently broken retained message store behaviour, we would require
creating at least one queue per node and publishing only to that local
queue. In future, once we have a replicated retained message store based
on a Stream for example, we could just publish all retained messages to
the `$retain` topic and thefore into the Stream.
So, for now, we list "retained and delayed Will Messages" as a limitation
that they actually won't be retained.
2023-05-18 23:36:25 +08:00
|
|
|
end,
|
|
|
|
%% New connection should be able to unsubscribe.
|
|
|
|
?assertMatch({ok, _, _}, emqtt:unsubscribe(C2, Topic)),
|
|
|
|
{ok, _} = emqtt:publish(C2, Topic, <<"m2">>, qos1),
|
|
|
|
receive Unexpected -> ct:fail({unexpected, Unexpected})
|
|
|
|
after 300 -> ok
|
|
|
|
end,
|
|
|
|
|
|
|
|
ok = emqtt:disconnect(C2),
|
|
|
|
C3 = connect(ClientId, Config, 0, [{clean_start, true}]),
|
|
|
|
ok = emqtt:disconnect(C3),
|
|
|
|
eventually(?_assertEqual([], all_connection_pids(Config))).
|
|
|
|
|
2023-01-03 01:02:43 +08:00
|
|
|
block(Config) ->
|
|
|
|
Topic = ClientId = atom_to_binary(?FUNCTION_NAME),
|
|
|
|
C = connect(ClientId, Config),
|
|
|
|
|
|
|
|
{ok, _, _} = emqtt:subscribe(C, Topic),
|
|
|
|
{ok, _} = emqtt:publish(C, Topic, <<"Not blocked yet">>, [{qos, 1}]),
|
|
|
|
|
2023-01-05 02:29:28 +08:00
|
|
|
ok = rpc(Config, vm_memory_monitor, set_vm_memory_high_watermark, [0]),
|
2023-01-03 01:02:43 +08:00
|
|
|
%% Let it block
|
|
|
|
timer:sleep(100),
|
|
|
|
|
|
|
|
%% Blocked, but still will publish when unblocked
|
|
|
|
puback_timeout = publish_qos1_timeout(C, Topic, <<"Now blocked">>, 1000),
|
|
|
|
puback_timeout = publish_qos1_timeout(C, Topic, <<"Still blocked">>, 1000),
|
|
|
|
|
|
|
|
%% Unblock
|
2024-08-29 18:10:49 +08:00
|
|
|
rpc(Config, vm_memory_monitor, set_vm_memory_high_watermark, [0.6]),
|
2023-01-03 01:02:43 +08:00
|
|
|
ok = expect_publishes(C, Topic, [<<"Not blocked yet">>,
|
|
|
|
<<"Now blocked">>,
|
|
|
|
<<"Still blocked">>]),
|
|
|
|
ok = emqtt:disconnect(C).
|
|
|
|
|
2023-01-05 02:29:28 +08:00
|
|
|
block_only_publisher(Config) ->
|
|
|
|
Topic = atom_to_binary(?FUNCTION_NAME),
|
|
|
|
|
2024-06-20 02:02:33 +08:00
|
|
|
Opts = [{ack_timeout, 1}],
|
2023-01-05 02:29:28 +08:00
|
|
|
Con = connect(<<"background-connection">>, Config, Opts),
|
|
|
|
Sub = connect(<<"subscriber-connection">>, Config, Opts),
|
|
|
|
Pub = connect(<<"publisher-connection">>, Config, Opts),
|
|
|
|
PubSub = connect(<<"publisher-and-subscriber-connection">>, Config, Opts),
|
|
|
|
|
|
|
|
{ok, _, [1]} = emqtt:subscribe(Sub, Topic, qos1),
|
|
|
|
{ok, _, [1]} = emqtt:subscribe(PubSub, Topic, qos1),
|
|
|
|
{ok, _} = emqtt:publish(Pub, Topic, <<"from Pub">>, [{qos, 1}]),
|
|
|
|
{ok, _} = emqtt:publish(PubSub, Topic, <<"from PubSub">>, [{qos, 1}]),
|
|
|
|
ok = expect_publishes(Sub, Topic, [<<"from Pub">>, <<"from PubSub">>]),
|
|
|
|
ok = expect_publishes(PubSub, Topic, [<<"from Pub">>, <<"from PubSub">>]),
|
|
|
|
|
|
|
|
ok = rpc(Config, vm_memory_monitor, set_vm_memory_high_watermark, [0]),
|
|
|
|
%% Let it block
|
|
|
|
timer:sleep(100),
|
|
|
|
|
|
|
|
%% We expect that the publishing connections are blocked.
|
|
|
|
[?assertEqual({error, ack_timeout}, emqtt:ping(Pid)) || Pid <- [Pub, PubSub]],
|
|
|
|
%% We expect that the non-publishing connections are not blocked.
|
|
|
|
[?assertEqual(pong, emqtt:ping(Pid)) || Pid <- [Con, Sub]],
|
|
|
|
|
|
|
|
%% While the memory alarm is on, let's turn a non-publishing connection
|
|
|
|
%% into a publishing connection.
|
|
|
|
{ok, _} = emqtt:publish(Con, Topic, <<"from Con 1">>, [{qos, 1}]),
|
|
|
|
%% The very first message still goes through.
|
|
|
|
ok = expect_publishes(Sub, Topic, [<<"from Con 1">>]),
|
|
|
|
%% But now the new publisher should be blocked as well.
|
|
|
|
?assertEqual({error, ack_timeout}, emqtt:ping(Con)),
|
|
|
|
?assertEqual(puback_timeout, publish_qos1_timeout(Con, Topic, <<"from Con 2">>, 500)),
|
|
|
|
?assertEqual(pong, emqtt:ping(Sub)),
|
|
|
|
|
2024-08-29 18:10:49 +08:00
|
|
|
rpc(Config, vm_memory_monitor, set_vm_memory_high_watermark, [0.6]),
|
2023-01-05 02:29:28 +08:00
|
|
|
%% Let it unblock
|
|
|
|
timer:sleep(100),
|
|
|
|
|
|
|
|
%% All connections are unblocked.
|
|
|
|
[?assertEqual(pong, emqtt:ping(Pid)) || Pid <- [Con, Sub, Pub, PubSub]],
|
|
|
|
%% The publishing connections should be able to publish again.
|
|
|
|
{ok, _} = emqtt:publish(Con, Topic, <<"from Con 3">>, [{qos, 1}]),
|
|
|
|
ok = expect_publishes(Sub, Topic, [<<"from Con 2">>, <<"from Con 3">>]),
|
|
|
|
ok = expect_publishes(PubSub, Topic, [<<"from Con 1">>, <<"from Con 2">>, <<"from Con 3">>]),
|
|
|
|
|
|
|
|
[ok = emqtt:disconnect(Pid) || Pid <- [Con, Sub, Pub, PubSub]].
|
|
|
|
|
2023-01-03 01:02:43 +08:00
|
|
|
clean_session_disconnect_client(Config) ->
|
|
|
|
C = connect(?FUNCTION_NAME, Config),
|
|
|
|
{ok, _, _} = emqtt:subscribe(C, <<"topic0">>, qos0),
|
|
|
|
{ok, _, _} = emqtt:subscribe(C, <<"topic1">>, qos1),
|
|
|
|
QsQos0 = rpc(Config, rabbit_amqqueue, list_by_type, [rabbit_mqtt_qos0_queue]),
|
|
|
|
QsClassic = rpc(Config, rabbit_amqqueue, list_by_type, [rabbit_classic_queue]),
|
2024-07-09 18:30:47 +08:00
|
|
|
?assertEqual(1, length(QsQos0)),
|
|
|
|
?assertEqual(1, length(QsClassic)),
|
2023-01-03 01:02:43 +08:00
|
|
|
|
|
|
|
ok = emqtt:disconnect(C),
|
|
|
|
%% After terminating a clean session, we expect any session state to be cleaned up on the server.
|
|
|
|
timer:sleep(200), %% Give some time to clean up exclusive classic queue.
|
|
|
|
L = rpc(Config, rabbit_amqqueue, list, []),
|
|
|
|
?assertEqual(0, length(L)).
|
|
|
|
|
Clean up exclusive durable queues after unclean shutdown
What:
Delete bindings of exclusive durable queues after an
unclean shutdown.
Why:
Native MQTT in 3.12 uses only durable queues to ease transition to
Khepri. Since auto-delete queues are not deleted after an unclean
shutdown, Native MQTT uses exclusive (instead of auto-delete) queues
for clean sessions.
While this bug is not specific to Native MQTT, this bug is most relevant
for the upcoming 3.12 release since exclusive durable queues are rarely used
otherwise.
How:
During queue recovery, not all bindings are recovered yet.
Therefore, if, during queue recovery, an exclusive, durable queue need to
be deleted, only durable bindings should be queried.
Queue types need to make sure that their exclusive, durable queues including their
bindings are deleted before starting with binding recovery.
Otherwise binding deletion and binding recovery get interleaved leading to topic
bindings being created and left behind.
Therefore, a classic queue process replies to the recovery process after it
deleted its queue record and associated bindings from the database.
2023-03-24 00:54:21 +08:00
|
|
|
clean_session_node_restart(Config) ->
|
|
|
|
clean_session_node_down(stop_node, Config).
|
|
|
|
|
|
|
|
clean_session_node_kill(Config) ->
|
|
|
|
clean_session_node_down(kill_node, Config).
|
|
|
|
|
|
|
|
clean_session_node_down(NodeDown, Config) ->
|
2023-01-03 01:02:43 +08:00
|
|
|
C = connect(?FUNCTION_NAME, Config),
|
|
|
|
{ok, _, _} = emqtt:subscribe(C, <<"topic0">>, qos0),
|
|
|
|
{ok, _, _} = emqtt:subscribe(C, <<"topic1">>, qos1),
|
|
|
|
QsQos0 = rpc(Config, rabbit_amqqueue, list_by_type, [rabbit_mqtt_qos0_queue]),
|
|
|
|
QsClassic = rpc(Config, rabbit_amqqueue, list_by_type, [rabbit_classic_queue]),
|
2024-07-09 18:30:47 +08:00
|
|
|
?assertEqual(1, length(QsQos0)),
|
|
|
|
?assertEqual(1, length(QsClassic)),
|
Allow to use Khepri database to store metadata instead of Mnesia
[Why]
Mnesia is a very powerful and convenient tool for Erlang applications:
it is a persistent disc-based database, it handles replication accross
multiple Erlang nodes and it is available out-of-the-box from the
Erlang/OTP distribution. RabbitMQ relies on Mnesia to manage all its
metadata:
* virtual hosts' properties
* intenal users
* queue, exchange and binding declarations (not queues data)
* runtime parameters and policies
* ...
Unfortunately Mnesia makes it difficult to handle network partition and,
as a consequence, the merge conflicts between Erlang nodes once the
network partition is resolved. RabbitMQ provides several partition
handling strategies but they are not bullet-proof. Users still hit
situations where it is a pain to repair a cluster following a network
partition.
[How]
@kjnilsson created Ra [1], a Raft consensus library that RabbitMQ
already uses successfully to implement quorum queues and streams for
instance. Those queues do not suffer from network partitions.
We created Khepri [2], a new persistent and replicated database engine
based on Ra and we want to use it in place of Mnesia in RabbitMQ to
solve the problems with network partitions.
This patch integrates Khepri as an experimental feature. When enabled,
RabbitMQ will store all its metadata in Khepri instead of Mnesia.
This change comes with behavior changes. While Khepri remains disabled,
you should see no changes to the behavior of RabbitMQ. If there are
changes, it is a bug. After Khepri is enabled, there are significant
changes of behavior that you should be aware of.
Because it is based on the Raft consensus algorithm, when there is a
network partition, only the cluster members that are in the partition
with at least `(Number of nodes in the cluster ÷ 2) + 1` number of nodes
can "make progress". In other words, only those nodes may write to the
Khepri database and read from the database and expect a consistent
result.
For instance in a cluster of 5 RabbitMQ nodes:
* If there are two partitions, one with 3 nodes, one with 2 nodes, only
the group of 3 nodes will be able to write to the database.
* If there are three partitions, two with 2 nodes, one with 1 node, none
of the group can write to the database.
Because the Khepri database will be used for all kind of metadata, it
means that RabbitMQ nodes that can't write to the database will be
unable to perform some operations. A list of operations and what to
expect is documented in the associated pull request and the RabbitMQ
website.
This requirement from Raft also affects the startup of RabbitMQ nodes in
a cluster. Indeed, at least a quorum number of nodes must be started at
once to allow nodes to become ready.
To enable Khepri, you need to enable the `khepri_db` feature flag:
rabbitmqctl enable_feature_flag khepri_db
When the `khepri_db` feature flag is enabled, the migration code
performs the following two tasks:
1. It synchronizes the Khepri cluster membership from the Mnesia
cluster. It uses `mnesia_to_khepri:sync_cluster_membership/1` from
the `khepri_mnesia_migration` application [3].
2. It copies data from relevant Mnesia tables to Khepri, doing some
conversion if necessary on the way. Again, it uses
`mnesia_to_khepri:copy_tables/4` from `khepri_mnesia_migration` to do
it.
This can be performed on a running standalone RabbitMQ node or cluster.
Data will be migrated from Mnesia to Khepri without any service
interruption. Note that during the migration, the performance may
decrease and the memory footprint may go up.
Because this feature flag is considered experimental, it is not enabled
by default even on a brand new RabbitMQ deployment.
More about the implementation details below:
In the past months, all accesses to Mnesia were isolated in a collection
of `rabbit_db*` modules. This is where the integration of Khepri mostly
takes place: we use a function called `rabbit_khepri:handle_fallback/1`
which selects the database and perform the query or the transaction.
Here is an example from `rabbit_db_vhost`:
* Up until RabbitMQ 3.12.x:
get(VHostName) when is_binary(VHostName) ->
get_in_mnesia(VHostName).
* Starting with RabbitMQ 3.13.0:
get(VHostName) when is_binary(VHostName) ->
rabbit_khepri:handle_fallback(
#{mnesia => fun() -> get_in_mnesia(VHostName) end,
khepri => fun() -> get_in_khepri(VHostName) end}).
This `rabbit_khepri:handle_fallback/1` function relies on two things:
1. the fact that the `khepri_db` feature flag is enabled, in which case
it always executes the Khepri-based variant.
4. the ability or not to read and write to Mnesia tables otherwise.
Before the feature flag is enabled, or during the migration, the
function will try to execute the Mnesia-based variant. If it succeeds,
then it returns the result. If it fails because one or more Mnesia
tables can't be used, it restarts from scratch: it means the feature
flag is being enabled and depending on the outcome, either the
Mnesia-based variant will succeed (the feature flag couldn't be enabled)
or the feature flag will be marked as enabled and it will call the
Khepri-based variant. The meat of this function really lives in the
`khepri_mnesia_migration` application [3] and
`rabbit_khepri:handle_fallback/1` is a wrapper on top of it that knows
about the feature flag.
However, some calls to the database do not depend on the existence of
Mnesia tables, such as functions where we need to learn about the
members of a cluster. For those, we can't rely on exceptions from
Mnesia. Therefore, we just look at the state of the feature flag to
determine which database to use. There are two situations though:
* Sometimes, we need the feature flag state query to block because the
function interested in it can't return a valid answer during the
migration. Here is an example:
case rabbit_khepri:is_enabled(RemoteNode) of
true -> can_join_using_khepri(RemoteNode);
false -> can_join_using_mnesia(RemoteNode)
end
* Sometimes, we need the feature flag state query to NOT block (for
instance because it would cause a deadlock). Here is an example:
case rabbit_khepri:get_feature_state() of
enabled -> members_using_khepri();
_ -> members_using_mnesia()
end
Direct accesses to Mnesia still exists. They are limited to code that is
specific to Mnesia such as classic queue mirroring or network partitions
handling strategies.
Now, to discover the Mnesia tables to migrate and how to migrate them,
we use an Erlang module attribute called
`rabbit_mnesia_tables_to_khepri_db` which indicates a list of Mnesia
tables and an associated converter module. Here is an example in the
`rabbitmq_recent_history_exchange` plugin:
-rabbit_mnesia_tables_to_khepri_db(
[{?RH_TABLE, rabbit_db_rh_exchange_m2k_converter}]).
The converter module — `rabbit_db_rh_exchange_m2k_converter` in this
example — is is fact a "sub" converter module called but
`rabbit_db_m2k_converter`. See the documentation of a `mnesia_to_khepri`
converter module to learn more about these modules.
[1] https://github.com/rabbitmq/ra
[2] https://github.com/rabbitmq/khepri
[3] https://github.com/rabbitmq/khepri_mnesia_migration
See #7206.
Co-authored-by: Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
Co-authored-by: Diana Parra Corbacho <dparracorbac@vmware.com>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
2023-01-05 20:57:50 +08:00
|
|
|
?assertEqual(2, rpc(Config, rabbit_amqqueue, count, [])),
|
2023-01-03 01:02:43 +08:00
|
|
|
|
|
|
|
unlink(C),
|
Clean up exclusive durable queues after unclean shutdown
What:
Delete bindings of exclusive durable queues after an
unclean shutdown.
Why:
Native MQTT in 3.12 uses only durable queues to ease transition to
Khepri. Since auto-delete queues are not deleted after an unclean
shutdown, Native MQTT uses exclusive (instead of auto-delete) queues
for clean sessions.
While this bug is not specific to Native MQTT, this bug is most relevant
for the upcoming 3.12 release since exclusive durable queues are rarely used
otherwise.
How:
During queue recovery, not all bindings are recovered yet.
Therefore, if, during queue recovery, an exclusive, durable queue need to
be deleted, only durable bindings should be queried.
Queue types need to make sure that their exclusive, durable queues including their
bindings are deleted before starting with binding recovery.
Otherwise binding deletion and binding recovery get interleaved leading to topic
bindings being created and left behind.
Therefore, a classic queue process replies to the recovery process after it
deleted its queue record and associated bindings from the database.
2023-03-24 00:54:21 +08:00
|
|
|
ok = rabbit_ct_broker_helpers:NodeDown(Config, 0),
|
2023-01-03 01:02:43 +08:00
|
|
|
ok = rabbit_ct_broker_helpers:start_node(Config, 0),
|
|
|
|
|
Allow to use Khepri database to store metadata instead of Mnesia
[Why]
Mnesia is a very powerful and convenient tool for Erlang applications:
it is a persistent disc-based database, it handles replication accross
multiple Erlang nodes and it is available out-of-the-box from the
Erlang/OTP distribution. RabbitMQ relies on Mnesia to manage all its
metadata:
* virtual hosts' properties
* intenal users
* queue, exchange and binding declarations (not queues data)
* runtime parameters and policies
* ...
Unfortunately Mnesia makes it difficult to handle network partition and,
as a consequence, the merge conflicts between Erlang nodes once the
network partition is resolved. RabbitMQ provides several partition
handling strategies but they are not bullet-proof. Users still hit
situations where it is a pain to repair a cluster following a network
partition.
[How]
@kjnilsson created Ra [1], a Raft consensus library that RabbitMQ
already uses successfully to implement quorum queues and streams for
instance. Those queues do not suffer from network partitions.
We created Khepri [2], a new persistent and replicated database engine
based on Ra and we want to use it in place of Mnesia in RabbitMQ to
solve the problems with network partitions.
This patch integrates Khepri as an experimental feature. When enabled,
RabbitMQ will store all its metadata in Khepri instead of Mnesia.
This change comes with behavior changes. While Khepri remains disabled,
you should see no changes to the behavior of RabbitMQ. If there are
changes, it is a bug. After Khepri is enabled, there are significant
changes of behavior that you should be aware of.
Because it is based on the Raft consensus algorithm, when there is a
network partition, only the cluster members that are in the partition
with at least `(Number of nodes in the cluster ÷ 2) + 1` number of nodes
can "make progress". In other words, only those nodes may write to the
Khepri database and read from the database and expect a consistent
result.
For instance in a cluster of 5 RabbitMQ nodes:
* If there are two partitions, one with 3 nodes, one with 2 nodes, only
the group of 3 nodes will be able to write to the database.
* If there are three partitions, two with 2 nodes, one with 1 node, none
of the group can write to the database.
Because the Khepri database will be used for all kind of metadata, it
means that RabbitMQ nodes that can't write to the database will be
unable to perform some operations. A list of operations and what to
expect is documented in the associated pull request and the RabbitMQ
website.
This requirement from Raft also affects the startup of RabbitMQ nodes in
a cluster. Indeed, at least a quorum number of nodes must be started at
once to allow nodes to become ready.
To enable Khepri, you need to enable the `khepri_db` feature flag:
rabbitmqctl enable_feature_flag khepri_db
When the `khepri_db` feature flag is enabled, the migration code
performs the following two tasks:
1. It synchronizes the Khepri cluster membership from the Mnesia
cluster. It uses `mnesia_to_khepri:sync_cluster_membership/1` from
the `khepri_mnesia_migration` application [3].
2. It copies data from relevant Mnesia tables to Khepri, doing some
conversion if necessary on the way. Again, it uses
`mnesia_to_khepri:copy_tables/4` from `khepri_mnesia_migration` to do
it.
This can be performed on a running standalone RabbitMQ node or cluster.
Data will be migrated from Mnesia to Khepri without any service
interruption. Note that during the migration, the performance may
decrease and the memory footprint may go up.
Because this feature flag is considered experimental, it is not enabled
by default even on a brand new RabbitMQ deployment.
More about the implementation details below:
In the past months, all accesses to Mnesia were isolated in a collection
of `rabbit_db*` modules. This is where the integration of Khepri mostly
takes place: we use a function called `rabbit_khepri:handle_fallback/1`
which selects the database and perform the query or the transaction.
Here is an example from `rabbit_db_vhost`:
* Up until RabbitMQ 3.12.x:
get(VHostName) when is_binary(VHostName) ->
get_in_mnesia(VHostName).
* Starting with RabbitMQ 3.13.0:
get(VHostName) when is_binary(VHostName) ->
rabbit_khepri:handle_fallback(
#{mnesia => fun() -> get_in_mnesia(VHostName) end,
khepri => fun() -> get_in_khepri(VHostName) end}).
This `rabbit_khepri:handle_fallback/1` function relies on two things:
1. the fact that the `khepri_db` feature flag is enabled, in which case
it always executes the Khepri-based variant.
4. the ability or not to read and write to Mnesia tables otherwise.
Before the feature flag is enabled, or during the migration, the
function will try to execute the Mnesia-based variant. If it succeeds,
then it returns the result. If it fails because one or more Mnesia
tables can't be used, it restarts from scratch: it means the feature
flag is being enabled and depending on the outcome, either the
Mnesia-based variant will succeed (the feature flag couldn't be enabled)
or the feature flag will be marked as enabled and it will call the
Khepri-based variant. The meat of this function really lives in the
`khepri_mnesia_migration` application [3] and
`rabbit_khepri:handle_fallback/1` is a wrapper on top of it that knows
about the feature flag.
However, some calls to the database do not depend on the existence of
Mnesia tables, such as functions where we need to learn about the
members of a cluster. For those, we can't rely on exceptions from
Mnesia. Therefore, we just look at the state of the feature flag to
determine which database to use. There are two situations though:
* Sometimes, we need the feature flag state query to block because the
function interested in it can't return a valid answer during the
migration. Here is an example:
case rabbit_khepri:is_enabled(RemoteNode) of
true -> can_join_using_khepri(RemoteNode);
false -> can_join_using_mnesia(RemoteNode)
end
* Sometimes, we need the feature flag state query to NOT block (for
instance because it would cause a deadlock). Here is an example:
case rabbit_khepri:get_feature_state() of
enabled -> members_using_khepri();
_ -> members_using_mnesia()
end
Direct accesses to Mnesia still exists. They are limited to code that is
specific to Mnesia such as classic queue mirroring or network partitions
handling strategies.
Now, to discover the Mnesia tables to migrate and how to migrate them,
we use an Erlang module attribute called
`rabbit_mnesia_tables_to_khepri_db` which indicates a list of Mnesia
tables and an associated converter module. Here is an example in the
`rabbitmq_recent_history_exchange` plugin:
-rabbit_mnesia_tables_to_khepri_db(
[{?RH_TABLE, rabbit_db_rh_exchange_m2k_converter}]).
The converter module — `rabbit_db_rh_exchange_m2k_converter` in this
example — is is fact a "sub" converter module called but
`rabbit_db_m2k_converter`. See the documentation of a `mnesia_to_khepri`
converter module to learn more about these modules.
[1] https://github.com/rabbitmq/ra
[2] https://github.com/rabbitmq/khepri
[3] https://github.com/rabbitmq/khepri_mnesia_migration
See #7206.
Co-authored-by: Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
Co-authored-by: Diana Parra Corbacho <dparracorbac@vmware.com>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
2023-01-05 20:57:50 +08:00
|
|
|
%% After terminating a clean session by a node crash, we expect any session
|
|
|
|
%% state to be cleaned up on the server once the server comes back up.
|
|
|
|
?assertEqual(0, rpc(Config, rabbit_amqqueue, count, [])).
|
2022-12-29 21:01:55 +08:00
|
|
|
|
2023-01-12 19:34:57 +08:00
|
|
|
rabbit_status_connection_count(Config) ->
|
2023-02-17 01:53:09 +08:00
|
|
|
_Pid = rabbit_ct_client_helpers:open_connection(Config, 0),
|
2023-01-12 19:34:57 +08:00
|
|
|
C = connect(?FUNCTION_NAME, Config),
|
|
|
|
|
|
|
|
{ok, String} = rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, ["status"]),
|
|
|
|
?assertNotEqual(nomatch, string:find(String, "Connection count: 2")),
|
|
|
|
|
|
|
|
ok = emqtt:disconnect(C).
|
|
|
|
|
2023-01-25 00:35:00 +08:00
|
|
|
trace(Config) ->
|
|
|
|
Server = atom_to_binary(get_node_config(Config, 0, nodename)),
|
|
|
|
Topic = Payload = TraceQ = atom_to_binary(?FUNCTION_NAME),
|
2025-03-15 00:20:36 +08:00
|
|
|
{Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config),
|
2023-01-25 00:35:00 +08:00
|
|
|
declare_queue(Ch, TraceQ, []),
|
|
|
|
#'queue.bind_ok'{} = amqp_channel:call(
|
|
|
|
Ch, #'queue.bind'{queue = TraceQ,
|
|
|
|
exchange = <<"amq.rabbitmq.trace">>,
|
|
|
|
routing_key = <<"#">>}),
|
|
|
|
|
|
|
|
%% We expect traced messages for connections created before and connections
|
|
|
|
%% created after tracing is enabled.
|
|
|
|
Pub = connect(<<(atom_to_binary(?FUNCTION_NAME))/binary, "_publisher">>, Config),
|
|
|
|
{ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, ["trace_on"]),
|
|
|
|
Sub = connect(<<(atom_to_binary(?FUNCTION_NAME))/binary, "_subscriber">>, Config),
|
|
|
|
|
|
|
|
{ok, _, [0]} = emqtt:subscribe(Sub, Topic, qos0),
|
|
|
|
{ok, _} = emqtt:publish(Pub, Topic, Payload, qos1),
|
|
|
|
ok = expect_publishes(Sub, Topic, [Payload]),
|
Support MQTT 5.0 features No Local, RAP, Subscription IDs
Support subscription options "No Local" and "Retain As Published"
as well as Subscription Identifiers.
All three MQTT 5.0 features can be set on a per subscription basis.
Due to wildcards in topic filters, multiple subscriptions
can match a given topic. Therefore, to implement Retain As Published and
Subscription Identifiers, the destination MQTT connection process needs
to know what subscription(s) caused it to receive the message.
There are a few ways how this could be implemented:
1. The destination MQTT connection process is aware of all its
subscriptions. Whenever, it receives a message, it can match the
message's routing key / topic against all its known topic filters.
However, to iteratively match the routing key against all topic
filters for every received message can become very expensive in the
worst case when the MQTT client creates many subscriptions containing
wildcards. This could be the case for an MQTT client that acts as a
bridge or proxy or dispatcher: It could subscribe via a wildcard for
each of its own clients.
2. Instead of interatively matching the topic of the received message
against all topic filters that contain wildcards, a better approach
would be for every MQTT subscriber connection process to maintain a
local trie datastructure (similar to how topic exchanges are
implemented) and perform matching therefore more efficiently.
However, this does not sound optimal either because routing is
effectively performed twice: in the topic exchange and again against
a much smaller trie in each destination connection process.
3. Given that the topic exchange already perform routing, a much more
sensible way would be to send the matched binding key(s) to the
destination MQTT connection process. A subscription (topic filter)
maps to a binding key in AMQP 0.9.1 routing. Therefore, for the first
time in RabbitMQ, the routing function should not only output a list
of unique destination queues, but also the binding keys (subscriptions)
that caused the message to be routed to the destination queue.
This commit therefore implements the 3rd approach.
The downside of the 3rd approach is that it requires API changes to the
routing function and topic exchange.
Specifically, this commit adds a new function rabbit_exchange:route/3
that accepts a list of routing options. If that list contains version 2,
the caller of the routing function knows how to handle the return value
that could also contain binding keys.
This commits allows an MQTT connection process, the channel process, and
at-most-once dead lettering to handle binding keys. Binding keys are
included as AMQP 0.9.1 headers into the basic message.
Therefore, whenever a message is sent from an MQTT client or AMQP 0.9.1
client or AMQP 1.0 client or STOMP client, the MQTT receiver will know
the subscription identifier that caused the message to be received.
Note that due to the low number of allowed wildcard characters (# and
+), the cardinality of matched binding keys shouldn't be high even if
the topic contains for example 3 levels and the message is sent to for
example 5 million destination queues. In other words, sending multiple
distinct basic messages to the destination shouldn't hurt the delegate
optimisation too much. The delegate optimisation implemented for classic
queues and rabbit_mqtt_qos0_queue(s) still takes place for all basic
messages that contain the same set of matched binding keys.
The topic exchange returns all matched binding keys by remembering the
edges walked down to the leaves. As an optimisation, only for MQTT
queues are binding keys being returned. This does add a small dependency
from app rabbit to app rabbitmq_mqtt which is not optimal. However, this
dependency should be simple to remove when omitting this optimisation.
Another important feature of this commit is persisting subscription
options and subscription identifiers because they are part of the
MQTT 5.0 session state.
In MQTT v3 and v4, the only subscription information that were part of
the session state was the topic filter and the QoS level.
Both information were implicitly stored in the form of bindings:
The topic filter as the binding key and the QoS level as the destination
queue name of the binding.
For MQTT v5 we need to persist more subscription information.
From a domain perspective, it makes sense to store subscription options
as part of subscriptions, i.e. bindings, even though they are currently
not used in routing.
Therefore, this commits stores subscription options as binding arguments.
Storing subscription options as binding arguments comes in turn with
new challenges: How to handle mixed version clusters and upgrading an
MQTT session from v3 or v4 to v5?
Imagine an MQTT client connects via v5 with Session Expiry Interval > 0
to a new node in a mixed version cluster, creates a subscription,
disconnects, and subsequently connects via v3 to an old node. The
client should continue to receive messages.
To simplify such edge cases, this commit introduces a new feature flag
called mqtt_v5. If mqtt_v5 is disabled, clients cannot connect to
RabbitMQ via MQTT 5.0.
This still doesn't entirely solve the problem of MQTT session upgrades
(v4 to v5 client) or session downgrades (v5 to v4 client).
Ideally, once mqtt_v5 is enabled, all MQTT bindings contain non-empty binding
arguments. However, this will require a feature flag migration function
to modify all MQTT bindings. To be more precise, all MQTT bindings need
to be deleted and added because the binding argument is part of the
Mnesia table key.
Since feature flag migration functions are non-trivial to implement in
RabbitMQ (they can run on every node multiple times and concurrently),
this commit takes a simpler approach:
All v3 / v4 sessions keep the empty binding argument [].
All v5 sessions use the new binding argument [#mqtt_subscription_opts{}].
This requires only handling a session upgrade / downgrade by
creating a binding (with the new binding arg) and deleting the old
binding (with the old binding arg) when processing the CONNECT packet.
Note that such session upgrades or downgrades should be rather rare in
practice. Therefore these binding transactions shouldn't hurt peformance.
The No Local option is implemented within the MQTT publishing connection
process: The message is not sent to the MQTT destination if the
destination queue name matches the current MQTT client ID and the
message was routed due to a subscription that has the No Local flag set.
This avoids unnecessary traffic on the MQTT queue.
The alternative would have been that the "receiving side" (same process)
filters the message out - which would have been more consistent in how
Retain As Published and Subscription Identifiers are implemented, but
would have caused unnecessary load on the MQTT queue.
2023-04-19 21:32:34 +08:00
|
|
|
timer:sleep(10),
|
2023-01-25 00:35:00 +08:00
|
|
|
|
|
|
|
{#'basic.get_ok'{routing_key = <<"publish.amq.topic">>},
|
|
|
|
#amqp_msg{props = #'P_basic'{headers = PublishHeaders},
|
|
|
|
payload = Payload}} =
|
2023-08-09 19:13:19 +08:00
|
|
|
amqp_channel:call(Ch, #'basic.get'{queue = TraceQ}),
|
2023-01-25 00:35:00 +08:00
|
|
|
?assertMatch(#{<<"exchange_name">> := <<"amq.topic">>,
|
|
|
|
<<"routing_keys">> := [Topic],
|
|
|
|
<<"connection">> := <<"127.0.0.1:", _/binary>>,
|
|
|
|
<<"node">> := Server,
|
|
|
|
<<"vhost">> := <<"/">>,
|
|
|
|
<<"channel">> := 0,
|
|
|
|
<<"user">> := <<"guest">>,
|
Message Containers (#5077)
This PR implements an approach for a "protocol (data format) agnostic core" where the format of the message isn't converted at point of reception.
Currently all non AMQP 0.9.1 originating messages are converted into a AMQP 0.9.1 flavoured basic_message record before sent to a queue. If the messages are then consumed by the originating protocol they are converted back from AMQP 0.9.1. For some protocols such as MQTT 3.1 this isn't too expensive as MQTT is mostly a fairly easily mapped subset of AMQP 0.9.1 but for others such as AMQP 1.0 the conversions are awkward and in some cases lossy even if consuming from the originating protocol.
This PR instead wraps all incoming messages in their originating form into a generic, extensible message container type (mc). The container module exposes an API to get common message details such as size and various properties (ttl, priority etc) directly from the source data type. Each protocol needs to implement the mc behaviour such that when a message originating form one protocol is consumed by another protocol we convert it to the target protocol at that point.
The message container also contains annotations, dead letter records and other meta data we need to record during the lifetime of a message. The original protocol message is never modified unless it is consumed.
This includes conversion modules to and from amqp, amqpl (AMQP 0.9.1) and mqtt.
COMMIT HISTORY:
* Refactor away from using the delivery{} record
In many places including exchange types. This should make it
easier to move towards using a message container type instead of
basic_message.
Add mc module and move direct replies outside of exchange
Lots of changes incl classic queues
Implement stream support incl amqp conversions
simplify mc state record
move mc.erl
mc dlx stuff
recent history exchange
Make tracking work
But doesn't take a protocol agnostic approach as we just convert
everything into AMQP legacy and back. Might be good enough for now.
Tracing as a whole may want a bit of a re-vamp at some point.
tidy
make quorum queue peek work by legacy conversion
dead lettering fixes
dead lettering fixes
CMQ fixes
rabbit_trace type fixes
fixes
fix
Fix classic queue props
test assertion fix
feature flag and backwards compat
Enable message_container feature flag in some SUITEs
Dialyzer fixes
fixes
fix
test fixes
Various
Manually update a gazelle generated file
until a gazelle enhancement can be made
https://github.com/rabbitmq/rules_erlang/issues/185
Add message_containers_SUITE to bazel
and regen bazel files with gazelle from rules_erlang@main
Simplify essential proprty access
Such as durable, ttl and priority by extracting them into annotations
at message container init time.
Move type
to remove dependenc on amqp10 stuff in mc.erl
mostly because I don't know how to make bazel do the right thing
add more stuff
Refine routing header stuff
wip
Cosmetics
Do not use "maybe" as type name as "maybe" is a keyword since OTP 25
which makes Erlang LS complain.
* Dedup death queue names
* Fix function clause crashes
Fix failing tests in the MQTT shared_SUITE:
A classic queue message ID can be undefined as set in
https://github.com/rabbitmq/rabbitmq-server/blob/fbe79ff47b4edbc0fd95457e623d6593161ad198/deps/rabbit/src/rabbit_classic_queue_index_v2.erl#L1048
Fix failing tests in the MQTT shared_SUITE-mixed:
When feature flag message_containers is disabled, the
message is not an #mc{} record, but a #basic_message{} record.
* Fix is_utf8_no_null crash
Prior to this commit, the function crashed if invalid UTF-8 was
provided, e.g.:
```
1> rabbit_misc:is_valid_shortstr(<<"😇"/utf16>>).
** exception error: no function clause matching rabbit_misc:is_utf8_no_null(<<216,61,222,7>>) (rabbit_misc.erl, line 1481)
```
* Implement mqtt mc behaviour
For now via amqp translation.
This is still work in progress, but the following SUITEs pass:
```
make -C deps/rabbitmq_mqtt ct-shared t=[mqtt,v5,cluster_size_1] FULL=1
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_1] FULL=1
```
* Shorten mc file names
Module name length matters because for each persistent message the #mc{}
record is persisted to disk.
```
1> iolist_size(term_to_iovec({mc, rabbit_mc_amqp_legacy})).
30
2> iolist_size(term_to_iovec({mc, mc_amqpl})).
17
```
This commit renames the mc modules:
```
ag -l rabbit_mc_amqp_legacy | xargs sed -i 's/rabbit_mc_amqp_legacy/mc_amqpl/g'
ag -l rabbit_mc_amqp | xargs sed -i 's/rabbit_mc_amqp/mc_amqp/g'
ag -l rabbit_mqtt_mc | xargs sed -i 's/rabbit_mqtt_mc/mc_mqtt/g'
```
* mc: make deaths an annotation + fixes
* Fix mc_mqtt protocol_state callback
* Fix test will_delay_node_restart
```
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_3]:will_delay_node_restart FULL=1
```
* Bazel run gazelle
* mix format rabbitmqctl.ex
* Ensure ttl annotation is refelected in amqp legacy protocol state
* Fix id access in message store
* Fix rabbit_message_interceptor_SUITE
* dializer fixes
* Fix rabbit:rabbit_message_interceptor_SUITE-mixed
set_annotation/3 should not result in duplicate keys
* Fix MQTT shared_SUITE-mixed
Up to 3.12 non-MQTT publishes were always QoS 1 regardless of delivery_mode.
https://github.com/rabbitmq/rabbitmq-server/blob/75a953ce286a10aca910c098805a4f545989af38/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl#L2075-L2076
From now on, non-MQTT publishes are QoS 1 if durable.
This makes more sense.
The MQTT plugin must send a #basic_message{} to an old node that does
not understand message containers.
* Field content of 'v1_0.data' can be binary
Fix
```
bazel test //deps/rabbitmq_mqtt:shared_SUITE-mixed \
--test_env FOCUS="-group [mqtt,v4,cluster_size_1] -case trace" \
-t- --test_sharding_strategy=disabled
```
* Remove route/2 and implement route/3 for all exchange types.
This removes the route/2 callback from rabbit_exchange_type and
makes route/3 mandatory instead. This is a breaking change and
will require all implementations of exchange types to update their
code, however this is necessary anyway for them to correctly handle
the mc type.
stream filtering fixes
* Translate directly from MQTT to AMQP 0.9.1
* handle undecoded properties in mc_compat
amqpl: put clause in right order
recover death deatails from amqp data
* Replace callback init_amqp with convert_from
* Fix return value of lists:keyfind/3
* Translate directly from AMQP 0.9.1 to MQTT
* Fix MQTT payload size
MQTT payload can be a list when converted from AMQP 0.9.1 for example
First conversions tests
Plus some other conversion related fixes.
bazel
bazel
translate amqp 1.0 null to undefined
mc: property/2 and correlation_id/message_id return type tagged values.
To ensure we can support a variety of types better.
The type type tags are AMQP 1.0 flavoured.
fix death recovery
mc_mqtt: impl new api
Add callbacks to allow protocols to compact data before storage
And make readable if needing to query things repeatedly.
bazel fix
* more decoding
* tracking mixed versions compat
* mc: flip default of `durable` annotation to save some data.
Assuming most messages are durable and that in memory messages suffer less
from persistence overhead it makes sense for a non existent `durable`
annotation to mean durable=true.
* mc conversion tests and tidy up
* mc make x_header unstrict again
* amqpl: death record fixes
* bazel
* amqp -> amqpl conversion test
* Fix crash in mc_amqp:size/1
Body can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0/ ct-system t=java
```
on branch native-amqp.
* Fix crash in lists:flatten/1
Data can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp.
* Fix crash in rabbit_writer
Running test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp resulted in the following crash:
```
crasher:
initial call: rabbit_writer:enter_mainloop/2
pid: <0.711.0>
registered_name: []
exception error: bad argument
in function size/1
called as size([<<0>>,<<"Sw">>,[<<160,2>>,<<"hi">>]])
*** argument 1: not tuple or binary
in call from rabbit_binary_generator:build_content_frames/7 (rabbit_binary_generator.erl, line 89)
in call from rabbit_binary_generator:build_simple_content_frames/4 (rabbit_binary_generator.erl, line 61)
in call from rabbit_writer:assemble_frames/5 (rabbit_writer.erl, line 334)
in call from rabbit_writer:internal_send_command_async/3 (rabbit_writer.erl, line 365)
in call from rabbit_writer:handle_message/2 (rabbit_writer.erl, line 265)
in call from rabbit_writer:handle_message/3 (rabbit_writer.erl, line 232)
in call from rabbit_writer:mainloop1/2 (rabbit_writer.erl, line 223)
```
because #content.payload_fragments_rev is currently supposed to
be a flat list of binaries instead of being an iolist.
This commit fixes this crash inefficiently by calling
iolist_to_binary/1. A better solution would be to allow AMQP legacy's #content.payload_fragments_rev
to be an iolist.
* Add accidentally deleted line back
* mc: optimise mc_amqp internal format
By removint the outer records for message and delivery annotations
as well as application properties and footers.
* mc: optimis mc_amqp map_add by using upsert
* mc: refactoring and bug fixes
* mc_SUITE routingheader assertions
* mc remove serialize/1 callback as only used by amqp
* mc_amqp: avoid returning a nested list from protocol_state
* test and bug fix
* move infer_type to mc_util
* mc fixes and additiona assertions
* Support headers exchange routing for MQTT messages
When a headers exchange is bound to the MQTT topic exchange, routing
will be performend based on both MQTT topic (by the topic exchange) and
MQTT User Property (by the headers exchange).
This combines the best worlds of both MQTT 5.0 and AMQP 0.9.1 and
enables powerful routing topologies.
When the User Property contains the same name multiple times, only the
last name (and value) will be considered by the headers exchange.
* Fix crash when sending from stream to amqpl
When publishing a message via the stream protocol and consuming it via
AMQP 0.9.1, the following crash occurred prior to this commit:
```
crasher:
initial call: rabbit_channel:init/1
pid: <0.818.0>
registered_name: []
exception exit: {{badmatch,undefined},
[{rabbit_channel,handle_deliver0,4,
[{file,"rabbit_channel.erl"},
{line,2728}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1594}]},
{rabbit_channel,handle_cast,2,
[{file,"rabbit_channel.erl"},
{line,728}]},
{gen_server2,handle_msg,2,
[{file,"gen_server2.erl"},{line,1056}]},
{proc_lib,wake_up,3,
[{file,"proc_lib.erl"},{line,251}]}]}
```
This commit first gives `mc:init/3` the chance to set exchange and
routing_keys annotations.
If not set, `rabbit_stream_queue` will set these annotations assuming
the message was originally published via the stream protocol.
* Support consistent hash exchange routing for MQTT 5.0
When a consistent hash exchange is bound to the MQTT topic exchange,
MQTT 5.0 messages can be routed to queues consistently based on the
Correlation-Data in the PUBLISH packet.
* Convert MQTT 5.0 User Property
* to AMQP 0.9.1 headers
* from AMQP 0.9.1 headers
* to AMQP 1.0 application properties and message annotations
* from AMQP 1.0 application properties and message annotations
* Make use of Annotations in mc_mqtt:protocol_state/2
mc_mqtt:protocol_state/2 includes Annotations as parameter.
It's cleaner to make use of these Annotations when computing the
protocol state instead of relying on the caller (rabbitmq_mqtt_processor)
to compute the protocol state.
* Enforce AMQP 0.9.1 field name length limit
The AMQP 0.9.1 spec prohibits field names longer than 128 characters.
Therefore, when converting AMQP 1.0 message annotations, application
properties or MQTT 5.0 User Property to AMQP 0.9.1 headers, drop any
names longer than 128 characters.
* Fix type specs
Apply feedback from Michael Davis
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* Add mc_mqtt unit test suite
Implement mc_mqtt:x_header/2
* Translate indicator that payload is UTF-8 encoded
when converting between MQTT 5.0 and AMQP 1.0
* Translate single amqp-value section from AMQP 1.0 to MQTT
Convert to a text representation, if possible, and indicate to MQTT
client that the payload is UTF-8 encoded. This way, the MQTT client will
be able to parse the payload.
If conversion to text representation is not possible, encode the payload
using the AMQP 1.0 type system and indiate the encoding via Content-Type
message/vnd.rabbitmq.amqp.
This Content-Type is not registered.
Type "message" makes sense since it's a message.
Vendor tree "vnd.rabbitmq.amqp" makes sense since merely subtype "amqp" is not
registered.
* Fix payload conversion
* Translate Response Topic between MQTT and AMQP
Translate MQTT 5.0 Response Topic to AMQP 1.0 reply-to address and vice
versa.
The Response Topic must be a UTF-8 encoded string.
This commit re-uses the already defined RabbitMQ target addresses:
```
"/topic/" RK Publish to amq.topic with routing key RK
"/exchange/" X "/" RK Publish to exchange X with routing key RK
```
By default, the MQTT topic exchange is configure dto be amq.topic using
the 1st target address.
When an operator modifies the mqtt.exchange, the 2nd target address is
used.
* Apply PR feedback
and fix formatting
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* tidy up
* Add MQTT message_containers test
* consistent hash exchange: avoid amqp legacy conversion
When hashing on a header value.
* Avoid converting to amqp legacy when using exchange federation
* Fix test flake
* test and dialyzer fixes
* dialyzer fix
* Add MQTT protocol interoperability tests
Test receiving from and sending to MQTT 5.0 and
* AMQP 0.9.1
* AMQP 1.0
* STOMP
* Streams
* Regenerate portions of deps/rabbit/app.bzl with gazelle
I'm not exactly sure how this happened, but gazell seems to have been
run with an older version of the rules_erlang gazelle extension at
some point. This caused generation of a structure that is no longer
used. This commit updates the structure to the current pattern.
* mc: refactoring
* mc_amqpl: handle delivery annotations
Just in case they are included.
Also use iolist_to_iovec to create flat list of binaries when
converting from amqp with amqp encoded payload.
---------
Co-authored-by: David Ansari <david.ansari@gmx.de>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
Co-authored-by: Rin Kuryloski <kuryloskip@vmware.com>
2023-08-31 18:27:13 +08:00
|
|
|
<<"properties">> := #{<<"delivery_mode">> := 2},
|
2023-01-25 00:35:00 +08:00
|
|
|
<<"routed_queues">> := [<<"mqtt-subscription-trace_subscriberqos0">>]},
|
|
|
|
rabbit_misc:amqp_table(PublishHeaders)),
|
2023-01-28 02:25:57 +08:00
|
|
|
|
2023-01-25 00:35:00 +08:00
|
|
|
{#'basic.get_ok'{routing_key = <<"deliver.mqtt-subscription-trace_subscriberqos0">>},
|
|
|
|
#amqp_msg{props = #'P_basic'{headers = DeliverHeaders},
|
|
|
|
payload = Payload}} =
|
2023-08-09 19:13:19 +08:00
|
|
|
amqp_channel:call(Ch, #'basic.get'{queue = TraceQ}),
|
2023-01-25 00:35:00 +08:00
|
|
|
?assertMatch(#{<<"exchange_name">> := <<"amq.topic">>,
|
|
|
|
<<"routing_keys">> := [Topic],
|
|
|
|
<<"connection">> := <<"127.0.0.1:", _/binary>>,
|
|
|
|
<<"node">> := Server,
|
|
|
|
<<"vhost">> := <<"/">>,
|
|
|
|
<<"channel">> := 0,
|
|
|
|
<<"user">> := <<"guest">>,
|
Message Containers (#5077)
This PR implements an approach for a "protocol (data format) agnostic core" where the format of the message isn't converted at point of reception.
Currently all non AMQP 0.9.1 originating messages are converted into a AMQP 0.9.1 flavoured basic_message record before sent to a queue. If the messages are then consumed by the originating protocol they are converted back from AMQP 0.9.1. For some protocols such as MQTT 3.1 this isn't too expensive as MQTT is mostly a fairly easily mapped subset of AMQP 0.9.1 but for others such as AMQP 1.0 the conversions are awkward and in some cases lossy even if consuming from the originating protocol.
This PR instead wraps all incoming messages in their originating form into a generic, extensible message container type (mc). The container module exposes an API to get common message details such as size and various properties (ttl, priority etc) directly from the source data type. Each protocol needs to implement the mc behaviour such that when a message originating form one protocol is consumed by another protocol we convert it to the target protocol at that point.
The message container also contains annotations, dead letter records and other meta data we need to record during the lifetime of a message. The original protocol message is never modified unless it is consumed.
This includes conversion modules to and from amqp, amqpl (AMQP 0.9.1) and mqtt.
COMMIT HISTORY:
* Refactor away from using the delivery{} record
In many places including exchange types. This should make it
easier to move towards using a message container type instead of
basic_message.
Add mc module and move direct replies outside of exchange
Lots of changes incl classic queues
Implement stream support incl amqp conversions
simplify mc state record
move mc.erl
mc dlx stuff
recent history exchange
Make tracking work
But doesn't take a protocol agnostic approach as we just convert
everything into AMQP legacy and back. Might be good enough for now.
Tracing as a whole may want a bit of a re-vamp at some point.
tidy
make quorum queue peek work by legacy conversion
dead lettering fixes
dead lettering fixes
CMQ fixes
rabbit_trace type fixes
fixes
fix
Fix classic queue props
test assertion fix
feature flag and backwards compat
Enable message_container feature flag in some SUITEs
Dialyzer fixes
fixes
fix
test fixes
Various
Manually update a gazelle generated file
until a gazelle enhancement can be made
https://github.com/rabbitmq/rules_erlang/issues/185
Add message_containers_SUITE to bazel
and regen bazel files with gazelle from rules_erlang@main
Simplify essential proprty access
Such as durable, ttl and priority by extracting them into annotations
at message container init time.
Move type
to remove dependenc on amqp10 stuff in mc.erl
mostly because I don't know how to make bazel do the right thing
add more stuff
Refine routing header stuff
wip
Cosmetics
Do not use "maybe" as type name as "maybe" is a keyword since OTP 25
which makes Erlang LS complain.
* Dedup death queue names
* Fix function clause crashes
Fix failing tests in the MQTT shared_SUITE:
A classic queue message ID can be undefined as set in
https://github.com/rabbitmq/rabbitmq-server/blob/fbe79ff47b4edbc0fd95457e623d6593161ad198/deps/rabbit/src/rabbit_classic_queue_index_v2.erl#L1048
Fix failing tests in the MQTT shared_SUITE-mixed:
When feature flag message_containers is disabled, the
message is not an #mc{} record, but a #basic_message{} record.
* Fix is_utf8_no_null crash
Prior to this commit, the function crashed if invalid UTF-8 was
provided, e.g.:
```
1> rabbit_misc:is_valid_shortstr(<<"😇"/utf16>>).
** exception error: no function clause matching rabbit_misc:is_utf8_no_null(<<216,61,222,7>>) (rabbit_misc.erl, line 1481)
```
* Implement mqtt mc behaviour
For now via amqp translation.
This is still work in progress, but the following SUITEs pass:
```
make -C deps/rabbitmq_mqtt ct-shared t=[mqtt,v5,cluster_size_1] FULL=1
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_1] FULL=1
```
* Shorten mc file names
Module name length matters because for each persistent message the #mc{}
record is persisted to disk.
```
1> iolist_size(term_to_iovec({mc, rabbit_mc_amqp_legacy})).
30
2> iolist_size(term_to_iovec({mc, mc_amqpl})).
17
```
This commit renames the mc modules:
```
ag -l rabbit_mc_amqp_legacy | xargs sed -i 's/rabbit_mc_amqp_legacy/mc_amqpl/g'
ag -l rabbit_mc_amqp | xargs sed -i 's/rabbit_mc_amqp/mc_amqp/g'
ag -l rabbit_mqtt_mc | xargs sed -i 's/rabbit_mqtt_mc/mc_mqtt/g'
```
* mc: make deaths an annotation + fixes
* Fix mc_mqtt protocol_state callback
* Fix test will_delay_node_restart
```
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_3]:will_delay_node_restart FULL=1
```
* Bazel run gazelle
* mix format rabbitmqctl.ex
* Ensure ttl annotation is refelected in amqp legacy protocol state
* Fix id access in message store
* Fix rabbit_message_interceptor_SUITE
* dializer fixes
* Fix rabbit:rabbit_message_interceptor_SUITE-mixed
set_annotation/3 should not result in duplicate keys
* Fix MQTT shared_SUITE-mixed
Up to 3.12 non-MQTT publishes were always QoS 1 regardless of delivery_mode.
https://github.com/rabbitmq/rabbitmq-server/blob/75a953ce286a10aca910c098805a4f545989af38/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl#L2075-L2076
From now on, non-MQTT publishes are QoS 1 if durable.
This makes more sense.
The MQTT plugin must send a #basic_message{} to an old node that does
not understand message containers.
* Field content of 'v1_0.data' can be binary
Fix
```
bazel test //deps/rabbitmq_mqtt:shared_SUITE-mixed \
--test_env FOCUS="-group [mqtt,v4,cluster_size_1] -case trace" \
-t- --test_sharding_strategy=disabled
```
* Remove route/2 and implement route/3 for all exchange types.
This removes the route/2 callback from rabbit_exchange_type and
makes route/3 mandatory instead. This is a breaking change and
will require all implementations of exchange types to update their
code, however this is necessary anyway for them to correctly handle
the mc type.
stream filtering fixes
* Translate directly from MQTT to AMQP 0.9.1
* handle undecoded properties in mc_compat
amqpl: put clause in right order
recover death deatails from amqp data
* Replace callback init_amqp with convert_from
* Fix return value of lists:keyfind/3
* Translate directly from AMQP 0.9.1 to MQTT
* Fix MQTT payload size
MQTT payload can be a list when converted from AMQP 0.9.1 for example
First conversions tests
Plus some other conversion related fixes.
bazel
bazel
translate amqp 1.0 null to undefined
mc: property/2 and correlation_id/message_id return type tagged values.
To ensure we can support a variety of types better.
The type type tags are AMQP 1.0 flavoured.
fix death recovery
mc_mqtt: impl new api
Add callbacks to allow protocols to compact data before storage
And make readable if needing to query things repeatedly.
bazel fix
* more decoding
* tracking mixed versions compat
* mc: flip default of `durable` annotation to save some data.
Assuming most messages are durable and that in memory messages suffer less
from persistence overhead it makes sense for a non existent `durable`
annotation to mean durable=true.
* mc conversion tests and tidy up
* mc make x_header unstrict again
* amqpl: death record fixes
* bazel
* amqp -> amqpl conversion test
* Fix crash in mc_amqp:size/1
Body can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0/ ct-system t=java
```
on branch native-amqp.
* Fix crash in lists:flatten/1
Data can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp.
* Fix crash in rabbit_writer
Running test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp resulted in the following crash:
```
crasher:
initial call: rabbit_writer:enter_mainloop/2
pid: <0.711.0>
registered_name: []
exception error: bad argument
in function size/1
called as size([<<0>>,<<"Sw">>,[<<160,2>>,<<"hi">>]])
*** argument 1: not tuple or binary
in call from rabbit_binary_generator:build_content_frames/7 (rabbit_binary_generator.erl, line 89)
in call from rabbit_binary_generator:build_simple_content_frames/4 (rabbit_binary_generator.erl, line 61)
in call from rabbit_writer:assemble_frames/5 (rabbit_writer.erl, line 334)
in call from rabbit_writer:internal_send_command_async/3 (rabbit_writer.erl, line 365)
in call from rabbit_writer:handle_message/2 (rabbit_writer.erl, line 265)
in call from rabbit_writer:handle_message/3 (rabbit_writer.erl, line 232)
in call from rabbit_writer:mainloop1/2 (rabbit_writer.erl, line 223)
```
because #content.payload_fragments_rev is currently supposed to
be a flat list of binaries instead of being an iolist.
This commit fixes this crash inefficiently by calling
iolist_to_binary/1. A better solution would be to allow AMQP legacy's #content.payload_fragments_rev
to be an iolist.
* Add accidentally deleted line back
* mc: optimise mc_amqp internal format
By removint the outer records for message and delivery annotations
as well as application properties and footers.
* mc: optimis mc_amqp map_add by using upsert
* mc: refactoring and bug fixes
* mc_SUITE routingheader assertions
* mc remove serialize/1 callback as only used by amqp
* mc_amqp: avoid returning a nested list from protocol_state
* test and bug fix
* move infer_type to mc_util
* mc fixes and additiona assertions
* Support headers exchange routing for MQTT messages
When a headers exchange is bound to the MQTT topic exchange, routing
will be performend based on both MQTT topic (by the topic exchange) and
MQTT User Property (by the headers exchange).
This combines the best worlds of both MQTT 5.0 and AMQP 0.9.1 and
enables powerful routing topologies.
When the User Property contains the same name multiple times, only the
last name (and value) will be considered by the headers exchange.
* Fix crash when sending from stream to amqpl
When publishing a message via the stream protocol and consuming it via
AMQP 0.9.1, the following crash occurred prior to this commit:
```
crasher:
initial call: rabbit_channel:init/1
pid: <0.818.0>
registered_name: []
exception exit: {{badmatch,undefined},
[{rabbit_channel,handle_deliver0,4,
[{file,"rabbit_channel.erl"},
{line,2728}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1594}]},
{rabbit_channel,handle_cast,2,
[{file,"rabbit_channel.erl"},
{line,728}]},
{gen_server2,handle_msg,2,
[{file,"gen_server2.erl"},{line,1056}]},
{proc_lib,wake_up,3,
[{file,"proc_lib.erl"},{line,251}]}]}
```
This commit first gives `mc:init/3` the chance to set exchange and
routing_keys annotations.
If not set, `rabbit_stream_queue` will set these annotations assuming
the message was originally published via the stream protocol.
* Support consistent hash exchange routing for MQTT 5.0
When a consistent hash exchange is bound to the MQTT topic exchange,
MQTT 5.0 messages can be routed to queues consistently based on the
Correlation-Data in the PUBLISH packet.
* Convert MQTT 5.0 User Property
* to AMQP 0.9.1 headers
* from AMQP 0.9.1 headers
* to AMQP 1.0 application properties and message annotations
* from AMQP 1.0 application properties and message annotations
* Make use of Annotations in mc_mqtt:protocol_state/2
mc_mqtt:protocol_state/2 includes Annotations as parameter.
It's cleaner to make use of these Annotations when computing the
protocol state instead of relying on the caller (rabbitmq_mqtt_processor)
to compute the protocol state.
* Enforce AMQP 0.9.1 field name length limit
The AMQP 0.9.1 spec prohibits field names longer than 128 characters.
Therefore, when converting AMQP 1.0 message annotations, application
properties or MQTT 5.0 User Property to AMQP 0.9.1 headers, drop any
names longer than 128 characters.
* Fix type specs
Apply feedback from Michael Davis
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* Add mc_mqtt unit test suite
Implement mc_mqtt:x_header/2
* Translate indicator that payload is UTF-8 encoded
when converting between MQTT 5.0 and AMQP 1.0
* Translate single amqp-value section from AMQP 1.0 to MQTT
Convert to a text representation, if possible, and indicate to MQTT
client that the payload is UTF-8 encoded. This way, the MQTT client will
be able to parse the payload.
If conversion to text representation is not possible, encode the payload
using the AMQP 1.0 type system and indiate the encoding via Content-Type
message/vnd.rabbitmq.amqp.
This Content-Type is not registered.
Type "message" makes sense since it's a message.
Vendor tree "vnd.rabbitmq.amqp" makes sense since merely subtype "amqp" is not
registered.
* Fix payload conversion
* Translate Response Topic between MQTT and AMQP
Translate MQTT 5.0 Response Topic to AMQP 1.0 reply-to address and vice
versa.
The Response Topic must be a UTF-8 encoded string.
This commit re-uses the already defined RabbitMQ target addresses:
```
"/topic/" RK Publish to amq.topic with routing key RK
"/exchange/" X "/" RK Publish to exchange X with routing key RK
```
By default, the MQTT topic exchange is configure dto be amq.topic using
the 1st target address.
When an operator modifies the mqtt.exchange, the 2nd target address is
used.
* Apply PR feedback
and fix formatting
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* tidy up
* Add MQTT message_containers test
* consistent hash exchange: avoid amqp legacy conversion
When hashing on a header value.
* Avoid converting to amqp legacy when using exchange federation
* Fix test flake
* test and dialyzer fixes
* dialyzer fix
* Add MQTT protocol interoperability tests
Test receiving from and sending to MQTT 5.0 and
* AMQP 0.9.1
* AMQP 1.0
* STOMP
* Streams
* Regenerate portions of deps/rabbit/app.bzl with gazelle
I'm not exactly sure how this happened, but gazell seems to have been
run with an older version of the rules_erlang gazelle extension at
some point. This caused generation of a structure that is no longer
used. This commit updates the structure to the current pattern.
* mc: refactoring
* mc_amqpl: handle delivery annotations
Just in case they are included.
Also use iolist_to_iovec to create flat list of binaries when
converting from amqp with amqp encoded payload.
---------
Co-authored-by: David Ansari <david.ansari@gmx.de>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
Co-authored-by: Rin Kuryloski <kuryloskip@vmware.com>
2023-08-31 18:27:13 +08:00
|
|
|
<<"properties">> := #{<<"delivery_mode">> := 2},
|
2023-01-25 00:35:00 +08:00
|
|
|
<<"redelivered">> := 0},
|
|
|
|
rabbit_misc:amqp_table(DeliverHeaders)),
|
|
|
|
|
|
|
|
{ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, ["trace_off"]),
|
|
|
|
{ok, _} = emqtt:publish(Pub, Topic, Payload, qos1),
|
|
|
|
ok = expect_publishes(Sub, Topic, [Payload]),
|
|
|
|
?assertMatch(#'basic.get_empty'{},
|
2023-08-09 19:13:19 +08:00
|
|
|
amqp_channel:call(Ch, #'basic.get'{queue = TraceQ})),
|
2023-01-25 00:35:00 +08:00
|
|
|
|
|
|
|
delete_queue(Ch, TraceQ),
|
2025-03-15 00:20:36 +08:00
|
|
|
[ok = emqtt:disconnect(C) || C <- [Pub, Sub]],
|
|
|
|
ok = rabbit_ct_client_helpers:close_connection_and_channel(Conn, Ch).
|
2023-01-25 00:35:00 +08:00
|
|
|
|
Message Containers (#5077)
This PR implements an approach for a "protocol (data format) agnostic core" where the format of the message isn't converted at point of reception.
Currently all non AMQP 0.9.1 originating messages are converted into a AMQP 0.9.1 flavoured basic_message record before sent to a queue. If the messages are then consumed by the originating protocol they are converted back from AMQP 0.9.1. For some protocols such as MQTT 3.1 this isn't too expensive as MQTT is mostly a fairly easily mapped subset of AMQP 0.9.1 but for others such as AMQP 1.0 the conversions are awkward and in some cases lossy even if consuming from the originating protocol.
This PR instead wraps all incoming messages in their originating form into a generic, extensible message container type (mc). The container module exposes an API to get common message details such as size and various properties (ttl, priority etc) directly from the source data type. Each protocol needs to implement the mc behaviour such that when a message originating form one protocol is consumed by another protocol we convert it to the target protocol at that point.
The message container also contains annotations, dead letter records and other meta data we need to record during the lifetime of a message. The original protocol message is never modified unless it is consumed.
This includes conversion modules to and from amqp, amqpl (AMQP 0.9.1) and mqtt.
COMMIT HISTORY:
* Refactor away from using the delivery{} record
In many places including exchange types. This should make it
easier to move towards using a message container type instead of
basic_message.
Add mc module and move direct replies outside of exchange
Lots of changes incl classic queues
Implement stream support incl amqp conversions
simplify mc state record
move mc.erl
mc dlx stuff
recent history exchange
Make tracking work
But doesn't take a protocol agnostic approach as we just convert
everything into AMQP legacy and back. Might be good enough for now.
Tracing as a whole may want a bit of a re-vamp at some point.
tidy
make quorum queue peek work by legacy conversion
dead lettering fixes
dead lettering fixes
CMQ fixes
rabbit_trace type fixes
fixes
fix
Fix classic queue props
test assertion fix
feature flag and backwards compat
Enable message_container feature flag in some SUITEs
Dialyzer fixes
fixes
fix
test fixes
Various
Manually update a gazelle generated file
until a gazelle enhancement can be made
https://github.com/rabbitmq/rules_erlang/issues/185
Add message_containers_SUITE to bazel
and regen bazel files with gazelle from rules_erlang@main
Simplify essential proprty access
Such as durable, ttl and priority by extracting them into annotations
at message container init time.
Move type
to remove dependenc on amqp10 stuff in mc.erl
mostly because I don't know how to make bazel do the right thing
add more stuff
Refine routing header stuff
wip
Cosmetics
Do not use "maybe" as type name as "maybe" is a keyword since OTP 25
which makes Erlang LS complain.
* Dedup death queue names
* Fix function clause crashes
Fix failing tests in the MQTT shared_SUITE:
A classic queue message ID can be undefined as set in
https://github.com/rabbitmq/rabbitmq-server/blob/fbe79ff47b4edbc0fd95457e623d6593161ad198/deps/rabbit/src/rabbit_classic_queue_index_v2.erl#L1048
Fix failing tests in the MQTT shared_SUITE-mixed:
When feature flag message_containers is disabled, the
message is not an #mc{} record, but a #basic_message{} record.
* Fix is_utf8_no_null crash
Prior to this commit, the function crashed if invalid UTF-8 was
provided, e.g.:
```
1> rabbit_misc:is_valid_shortstr(<<"😇"/utf16>>).
** exception error: no function clause matching rabbit_misc:is_utf8_no_null(<<216,61,222,7>>) (rabbit_misc.erl, line 1481)
```
* Implement mqtt mc behaviour
For now via amqp translation.
This is still work in progress, but the following SUITEs pass:
```
make -C deps/rabbitmq_mqtt ct-shared t=[mqtt,v5,cluster_size_1] FULL=1
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_1] FULL=1
```
* Shorten mc file names
Module name length matters because for each persistent message the #mc{}
record is persisted to disk.
```
1> iolist_size(term_to_iovec({mc, rabbit_mc_amqp_legacy})).
30
2> iolist_size(term_to_iovec({mc, mc_amqpl})).
17
```
This commit renames the mc modules:
```
ag -l rabbit_mc_amqp_legacy | xargs sed -i 's/rabbit_mc_amqp_legacy/mc_amqpl/g'
ag -l rabbit_mc_amqp | xargs sed -i 's/rabbit_mc_amqp/mc_amqp/g'
ag -l rabbit_mqtt_mc | xargs sed -i 's/rabbit_mqtt_mc/mc_mqtt/g'
```
* mc: make deaths an annotation + fixes
* Fix mc_mqtt protocol_state callback
* Fix test will_delay_node_restart
```
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_3]:will_delay_node_restart FULL=1
```
* Bazel run gazelle
* mix format rabbitmqctl.ex
* Ensure ttl annotation is refelected in amqp legacy protocol state
* Fix id access in message store
* Fix rabbit_message_interceptor_SUITE
* dializer fixes
* Fix rabbit:rabbit_message_interceptor_SUITE-mixed
set_annotation/3 should not result in duplicate keys
* Fix MQTT shared_SUITE-mixed
Up to 3.12 non-MQTT publishes were always QoS 1 regardless of delivery_mode.
https://github.com/rabbitmq/rabbitmq-server/blob/75a953ce286a10aca910c098805a4f545989af38/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl#L2075-L2076
From now on, non-MQTT publishes are QoS 1 if durable.
This makes more sense.
The MQTT plugin must send a #basic_message{} to an old node that does
not understand message containers.
* Field content of 'v1_0.data' can be binary
Fix
```
bazel test //deps/rabbitmq_mqtt:shared_SUITE-mixed \
--test_env FOCUS="-group [mqtt,v4,cluster_size_1] -case trace" \
-t- --test_sharding_strategy=disabled
```
* Remove route/2 and implement route/3 for all exchange types.
This removes the route/2 callback from rabbit_exchange_type and
makes route/3 mandatory instead. This is a breaking change and
will require all implementations of exchange types to update their
code, however this is necessary anyway for them to correctly handle
the mc type.
stream filtering fixes
* Translate directly from MQTT to AMQP 0.9.1
* handle undecoded properties in mc_compat
amqpl: put clause in right order
recover death deatails from amqp data
* Replace callback init_amqp with convert_from
* Fix return value of lists:keyfind/3
* Translate directly from AMQP 0.9.1 to MQTT
* Fix MQTT payload size
MQTT payload can be a list when converted from AMQP 0.9.1 for example
First conversions tests
Plus some other conversion related fixes.
bazel
bazel
translate amqp 1.0 null to undefined
mc: property/2 and correlation_id/message_id return type tagged values.
To ensure we can support a variety of types better.
The type type tags are AMQP 1.0 flavoured.
fix death recovery
mc_mqtt: impl new api
Add callbacks to allow protocols to compact data before storage
And make readable if needing to query things repeatedly.
bazel fix
* more decoding
* tracking mixed versions compat
* mc: flip default of `durable` annotation to save some data.
Assuming most messages are durable and that in memory messages suffer less
from persistence overhead it makes sense for a non existent `durable`
annotation to mean durable=true.
* mc conversion tests and tidy up
* mc make x_header unstrict again
* amqpl: death record fixes
* bazel
* amqp -> amqpl conversion test
* Fix crash in mc_amqp:size/1
Body can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0/ ct-system t=java
```
on branch native-amqp.
* Fix crash in lists:flatten/1
Data can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp.
* Fix crash in rabbit_writer
Running test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp resulted in the following crash:
```
crasher:
initial call: rabbit_writer:enter_mainloop/2
pid: <0.711.0>
registered_name: []
exception error: bad argument
in function size/1
called as size([<<0>>,<<"Sw">>,[<<160,2>>,<<"hi">>]])
*** argument 1: not tuple or binary
in call from rabbit_binary_generator:build_content_frames/7 (rabbit_binary_generator.erl, line 89)
in call from rabbit_binary_generator:build_simple_content_frames/4 (rabbit_binary_generator.erl, line 61)
in call from rabbit_writer:assemble_frames/5 (rabbit_writer.erl, line 334)
in call from rabbit_writer:internal_send_command_async/3 (rabbit_writer.erl, line 365)
in call from rabbit_writer:handle_message/2 (rabbit_writer.erl, line 265)
in call from rabbit_writer:handle_message/3 (rabbit_writer.erl, line 232)
in call from rabbit_writer:mainloop1/2 (rabbit_writer.erl, line 223)
```
because #content.payload_fragments_rev is currently supposed to
be a flat list of binaries instead of being an iolist.
This commit fixes this crash inefficiently by calling
iolist_to_binary/1. A better solution would be to allow AMQP legacy's #content.payload_fragments_rev
to be an iolist.
* Add accidentally deleted line back
* mc: optimise mc_amqp internal format
By removint the outer records for message and delivery annotations
as well as application properties and footers.
* mc: optimis mc_amqp map_add by using upsert
* mc: refactoring and bug fixes
* mc_SUITE routingheader assertions
* mc remove serialize/1 callback as only used by amqp
* mc_amqp: avoid returning a nested list from protocol_state
* test and bug fix
* move infer_type to mc_util
* mc fixes and additiona assertions
* Support headers exchange routing for MQTT messages
When a headers exchange is bound to the MQTT topic exchange, routing
will be performend based on both MQTT topic (by the topic exchange) and
MQTT User Property (by the headers exchange).
This combines the best worlds of both MQTT 5.0 and AMQP 0.9.1 and
enables powerful routing topologies.
When the User Property contains the same name multiple times, only the
last name (and value) will be considered by the headers exchange.
* Fix crash when sending from stream to amqpl
When publishing a message via the stream protocol and consuming it via
AMQP 0.9.1, the following crash occurred prior to this commit:
```
crasher:
initial call: rabbit_channel:init/1
pid: <0.818.0>
registered_name: []
exception exit: {{badmatch,undefined},
[{rabbit_channel,handle_deliver0,4,
[{file,"rabbit_channel.erl"},
{line,2728}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1594}]},
{rabbit_channel,handle_cast,2,
[{file,"rabbit_channel.erl"},
{line,728}]},
{gen_server2,handle_msg,2,
[{file,"gen_server2.erl"},{line,1056}]},
{proc_lib,wake_up,3,
[{file,"proc_lib.erl"},{line,251}]}]}
```
This commit first gives `mc:init/3` the chance to set exchange and
routing_keys annotations.
If not set, `rabbit_stream_queue` will set these annotations assuming
the message was originally published via the stream protocol.
* Support consistent hash exchange routing for MQTT 5.0
When a consistent hash exchange is bound to the MQTT topic exchange,
MQTT 5.0 messages can be routed to queues consistently based on the
Correlation-Data in the PUBLISH packet.
* Convert MQTT 5.0 User Property
* to AMQP 0.9.1 headers
* from AMQP 0.9.1 headers
* to AMQP 1.0 application properties and message annotations
* from AMQP 1.0 application properties and message annotations
* Make use of Annotations in mc_mqtt:protocol_state/2
mc_mqtt:protocol_state/2 includes Annotations as parameter.
It's cleaner to make use of these Annotations when computing the
protocol state instead of relying on the caller (rabbitmq_mqtt_processor)
to compute the protocol state.
* Enforce AMQP 0.9.1 field name length limit
The AMQP 0.9.1 spec prohibits field names longer than 128 characters.
Therefore, when converting AMQP 1.0 message annotations, application
properties or MQTT 5.0 User Property to AMQP 0.9.1 headers, drop any
names longer than 128 characters.
* Fix type specs
Apply feedback from Michael Davis
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* Add mc_mqtt unit test suite
Implement mc_mqtt:x_header/2
* Translate indicator that payload is UTF-8 encoded
when converting between MQTT 5.0 and AMQP 1.0
* Translate single amqp-value section from AMQP 1.0 to MQTT
Convert to a text representation, if possible, and indicate to MQTT
client that the payload is UTF-8 encoded. This way, the MQTT client will
be able to parse the payload.
If conversion to text representation is not possible, encode the payload
using the AMQP 1.0 type system and indiate the encoding via Content-Type
message/vnd.rabbitmq.amqp.
This Content-Type is not registered.
Type "message" makes sense since it's a message.
Vendor tree "vnd.rabbitmq.amqp" makes sense since merely subtype "amqp" is not
registered.
* Fix payload conversion
* Translate Response Topic between MQTT and AMQP
Translate MQTT 5.0 Response Topic to AMQP 1.0 reply-to address and vice
versa.
The Response Topic must be a UTF-8 encoded string.
This commit re-uses the already defined RabbitMQ target addresses:
```
"/topic/" RK Publish to amq.topic with routing key RK
"/exchange/" X "/" RK Publish to exchange X with routing key RK
```
By default, the MQTT topic exchange is configure dto be amq.topic using
the 1st target address.
When an operator modifies the mqtt.exchange, the 2nd target address is
used.
* Apply PR feedback
and fix formatting
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* tidy up
* Add MQTT message_containers test
* consistent hash exchange: avoid amqp legacy conversion
When hashing on a header value.
* Avoid converting to amqp legacy when using exchange federation
* Fix test flake
* test and dialyzer fixes
* dialyzer fix
* Add MQTT protocol interoperability tests
Test receiving from and sending to MQTT 5.0 and
* AMQP 0.9.1
* AMQP 1.0
* STOMP
* Streams
* Regenerate portions of deps/rabbit/app.bzl with gazelle
I'm not exactly sure how this happened, but gazell seems to have been
run with an older version of the rules_erlang gazelle extension at
some point. This caused generation of a structure that is no longer
used. This commit updates the structure to the current pattern.
* mc: refactoring
* mc_amqpl: handle delivery annotations
Just in case they are included.
Also use iolist_to_iovec to create flat list of binaries when
converting from amqp with amqp encoded payload.
---------
Co-authored-by: David Ansari <david.ansari@gmx.de>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
Co-authored-by: Rin Kuryloski <kuryloskip@vmware.com>
2023-08-31 18:27:13 +08:00
|
|
|
trace_large_message(Config) ->
|
|
|
|
TraceQ = <<"trace-queue">>,
|
2025-03-15 00:20:36 +08:00
|
|
|
{Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config),
|
Message Containers (#5077)
This PR implements an approach for a "protocol (data format) agnostic core" where the format of the message isn't converted at point of reception.
Currently all non AMQP 0.9.1 originating messages are converted into a AMQP 0.9.1 flavoured basic_message record before sent to a queue. If the messages are then consumed by the originating protocol they are converted back from AMQP 0.9.1. For some protocols such as MQTT 3.1 this isn't too expensive as MQTT is mostly a fairly easily mapped subset of AMQP 0.9.1 but for others such as AMQP 1.0 the conversions are awkward and in some cases lossy even if consuming from the originating protocol.
This PR instead wraps all incoming messages in their originating form into a generic, extensible message container type (mc). The container module exposes an API to get common message details such as size and various properties (ttl, priority etc) directly from the source data type. Each protocol needs to implement the mc behaviour such that when a message originating form one protocol is consumed by another protocol we convert it to the target protocol at that point.
The message container also contains annotations, dead letter records and other meta data we need to record during the lifetime of a message. The original protocol message is never modified unless it is consumed.
This includes conversion modules to and from amqp, amqpl (AMQP 0.9.1) and mqtt.
COMMIT HISTORY:
* Refactor away from using the delivery{} record
In many places including exchange types. This should make it
easier to move towards using a message container type instead of
basic_message.
Add mc module and move direct replies outside of exchange
Lots of changes incl classic queues
Implement stream support incl amqp conversions
simplify mc state record
move mc.erl
mc dlx stuff
recent history exchange
Make tracking work
But doesn't take a protocol agnostic approach as we just convert
everything into AMQP legacy and back. Might be good enough for now.
Tracing as a whole may want a bit of a re-vamp at some point.
tidy
make quorum queue peek work by legacy conversion
dead lettering fixes
dead lettering fixes
CMQ fixes
rabbit_trace type fixes
fixes
fix
Fix classic queue props
test assertion fix
feature flag and backwards compat
Enable message_container feature flag in some SUITEs
Dialyzer fixes
fixes
fix
test fixes
Various
Manually update a gazelle generated file
until a gazelle enhancement can be made
https://github.com/rabbitmq/rules_erlang/issues/185
Add message_containers_SUITE to bazel
and regen bazel files with gazelle from rules_erlang@main
Simplify essential proprty access
Such as durable, ttl and priority by extracting them into annotations
at message container init time.
Move type
to remove dependenc on amqp10 stuff in mc.erl
mostly because I don't know how to make bazel do the right thing
add more stuff
Refine routing header stuff
wip
Cosmetics
Do not use "maybe" as type name as "maybe" is a keyword since OTP 25
which makes Erlang LS complain.
* Dedup death queue names
* Fix function clause crashes
Fix failing tests in the MQTT shared_SUITE:
A classic queue message ID can be undefined as set in
https://github.com/rabbitmq/rabbitmq-server/blob/fbe79ff47b4edbc0fd95457e623d6593161ad198/deps/rabbit/src/rabbit_classic_queue_index_v2.erl#L1048
Fix failing tests in the MQTT shared_SUITE-mixed:
When feature flag message_containers is disabled, the
message is not an #mc{} record, but a #basic_message{} record.
* Fix is_utf8_no_null crash
Prior to this commit, the function crashed if invalid UTF-8 was
provided, e.g.:
```
1> rabbit_misc:is_valid_shortstr(<<"😇"/utf16>>).
** exception error: no function clause matching rabbit_misc:is_utf8_no_null(<<216,61,222,7>>) (rabbit_misc.erl, line 1481)
```
* Implement mqtt mc behaviour
For now via amqp translation.
This is still work in progress, but the following SUITEs pass:
```
make -C deps/rabbitmq_mqtt ct-shared t=[mqtt,v5,cluster_size_1] FULL=1
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_1] FULL=1
```
* Shorten mc file names
Module name length matters because for each persistent message the #mc{}
record is persisted to disk.
```
1> iolist_size(term_to_iovec({mc, rabbit_mc_amqp_legacy})).
30
2> iolist_size(term_to_iovec({mc, mc_amqpl})).
17
```
This commit renames the mc modules:
```
ag -l rabbit_mc_amqp_legacy | xargs sed -i 's/rabbit_mc_amqp_legacy/mc_amqpl/g'
ag -l rabbit_mc_amqp | xargs sed -i 's/rabbit_mc_amqp/mc_amqp/g'
ag -l rabbit_mqtt_mc | xargs sed -i 's/rabbit_mqtt_mc/mc_mqtt/g'
```
* mc: make deaths an annotation + fixes
* Fix mc_mqtt protocol_state callback
* Fix test will_delay_node_restart
```
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_3]:will_delay_node_restart FULL=1
```
* Bazel run gazelle
* mix format rabbitmqctl.ex
* Ensure ttl annotation is refelected in amqp legacy protocol state
* Fix id access in message store
* Fix rabbit_message_interceptor_SUITE
* dializer fixes
* Fix rabbit:rabbit_message_interceptor_SUITE-mixed
set_annotation/3 should not result in duplicate keys
* Fix MQTT shared_SUITE-mixed
Up to 3.12 non-MQTT publishes were always QoS 1 regardless of delivery_mode.
https://github.com/rabbitmq/rabbitmq-server/blob/75a953ce286a10aca910c098805a4f545989af38/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl#L2075-L2076
From now on, non-MQTT publishes are QoS 1 if durable.
This makes more sense.
The MQTT plugin must send a #basic_message{} to an old node that does
not understand message containers.
* Field content of 'v1_0.data' can be binary
Fix
```
bazel test //deps/rabbitmq_mqtt:shared_SUITE-mixed \
--test_env FOCUS="-group [mqtt,v4,cluster_size_1] -case trace" \
-t- --test_sharding_strategy=disabled
```
* Remove route/2 and implement route/3 for all exchange types.
This removes the route/2 callback from rabbit_exchange_type and
makes route/3 mandatory instead. This is a breaking change and
will require all implementations of exchange types to update their
code, however this is necessary anyway for them to correctly handle
the mc type.
stream filtering fixes
* Translate directly from MQTT to AMQP 0.9.1
* handle undecoded properties in mc_compat
amqpl: put clause in right order
recover death deatails from amqp data
* Replace callback init_amqp with convert_from
* Fix return value of lists:keyfind/3
* Translate directly from AMQP 0.9.1 to MQTT
* Fix MQTT payload size
MQTT payload can be a list when converted from AMQP 0.9.1 for example
First conversions tests
Plus some other conversion related fixes.
bazel
bazel
translate amqp 1.0 null to undefined
mc: property/2 and correlation_id/message_id return type tagged values.
To ensure we can support a variety of types better.
The type type tags are AMQP 1.0 flavoured.
fix death recovery
mc_mqtt: impl new api
Add callbacks to allow protocols to compact data before storage
And make readable if needing to query things repeatedly.
bazel fix
* more decoding
* tracking mixed versions compat
* mc: flip default of `durable` annotation to save some data.
Assuming most messages are durable and that in memory messages suffer less
from persistence overhead it makes sense for a non existent `durable`
annotation to mean durable=true.
* mc conversion tests and tidy up
* mc make x_header unstrict again
* amqpl: death record fixes
* bazel
* amqp -> amqpl conversion test
* Fix crash in mc_amqp:size/1
Body can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0/ ct-system t=java
```
on branch native-amqp.
* Fix crash in lists:flatten/1
Data can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp.
* Fix crash in rabbit_writer
Running test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp resulted in the following crash:
```
crasher:
initial call: rabbit_writer:enter_mainloop/2
pid: <0.711.0>
registered_name: []
exception error: bad argument
in function size/1
called as size([<<0>>,<<"Sw">>,[<<160,2>>,<<"hi">>]])
*** argument 1: not tuple or binary
in call from rabbit_binary_generator:build_content_frames/7 (rabbit_binary_generator.erl, line 89)
in call from rabbit_binary_generator:build_simple_content_frames/4 (rabbit_binary_generator.erl, line 61)
in call from rabbit_writer:assemble_frames/5 (rabbit_writer.erl, line 334)
in call from rabbit_writer:internal_send_command_async/3 (rabbit_writer.erl, line 365)
in call from rabbit_writer:handle_message/2 (rabbit_writer.erl, line 265)
in call from rabbit_writer:handle_message/3 (rabbit_writer.erl, line 232)
in call from rabbit_writer:mainloop1/2 (rabbit_writer.erl, line 223)
```
because #content.payload_fragments_rev is currently supposed to
be a flat list of binaries instead of being an iolist.
This commit fixes this crash inefficiently by calling
iolist_to_binary/1. A better solution would be to allow AMQP legacy's #content.payload_fragments_rev
to be an iolist.
* Add accidentally deleted line back
* mc: optimise mc_amqp internal format
By removint the outer records for message and delivery annotations
as well as application properties and footers.
* mc: optimis mc_amqp map_add by using upsert
* mc: refactoring and bug fixes
* mc_SUITE routingheader assertions
* mc remove serialize/1 callback as only used by amqp
* mc_amqp: avoid returning a nested list from protocol_state
* test and bug fix
* move infer_type to mc_util
* mc fixes and additiona assertions
* Support headers exchange routing for MQTT messages
When a headers exchange is bound to the MQTT topic exchange, routing
will be performend based on both MQTT topic (by the topic exchange) and
MQTT User Property (by the headers exchange).
This combines the best worlds of both MQTT 5.0 and AMQP 0.9.1 and
enables powerful routing topologies.
When the User Property contains the same name multiple times, only the
last name (and value) will be considered by the headers exchange.
* Fix crash when sending from stream to amqpl
When publishing a message via the stream protocol and consuming it via
AMQP 0.9.1, the following crash occurred prior to this commit:
```
crasher:
initial call: rabbit_channel:init/1
pid: <0.818.0>
registered_name: []
exception exit: {{badmatch,undefined},
[{rabbit_channel,handle_deliver0,4,
[{file,"rabbit_channel.erl"},
{line,2728}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1594}]},
{rabbit_channel,handle_cast,2,
[{file,"rabbit_channel.erl"},
{line,728}]},
{gen_server2,handle_msg,2,
[{file,"gen_server2.erl"},{line,1056}]},
{proc_lib,wake_up,3,
[{file,"proc_lib.erl"},{line,251}]}]}
```
This commit first gives `mc:init/3` the chance to set exchange and
routing_keys annotations.
If not set, `rabbit_stream_queue` will set these annotations assuming
the message was originally published via the stream protocol.
* Support consistent hash exchange routing for MQTT 5.0
When a consistent hash exchange is bound to the MQTT topic exchange,
MQTT 5.0 messages can be routed to queues consistently based on the
Correlation-Data in the PUBLISH packet.
* Convert MQTT 5.0 User Property
* to AMQP 0.9.1 headers
* from AMQP 0.9.1 headers
* to AMQP 1.0 application properties and message annotations
* from AMQP 1.0 application properties and message annotations
* Make use of Annotations in mc_mqtt:protocol_state/2
mc_mqtt:protocol_state/2 includes Annotations as parameter.
It's cleaner to make use of these Annotations when computing the
protocol state instead of relying on the caller (rabbitmq_mqtt_processor)
to compute the protocol state.
* Enforce AMQP 0.9.1 field name length limit
The AMQP 0.9.1 spec prohibits field names longer than 128 characters.
Therefore, when converting AMQP 1.0 message annotations, application
properties or MQTT 5.0 User Property to AMQP 0.9.1 headers, drop any
names longer than 128 characters.
* Fix type specs
Apply feedback from Michael Davis
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* Add mc_mqtt unit test suite
Implement mc_mqtt:x_header/2
* Translate indicator that payload is UTF-8 encoded
when converting between MQTT 5.0 and AMQP 1.0
* Translate single amqp-value section from AMQP 1.0 to MQTT
Convert to a text representation, if possible, and indicate to MQTT
client that the payload is UTF-8 encoded. This way, the MQTT client will
be able to parse the payload.
If conversion to text representation is not possible, encode the payload
using the AMQP 1.0 type system and indiate the encoding via Content-Type
message/vnd.rabbitmq.amqp.
This Content-Type is not registered.
Type "message" makes sense since it's a message.
Vendor tree "vnd.rabbitmq.amqp" makes sense since merely subtype "amqp" is not
registered.
* Fix payload conversion
* Translate Response Topic between MQTT and AMQP
Translate MQTT 5.0 Response Topic to AMQP 1.0 reply-to address and vice
versa.
The Response Topic must be a UTF-8 encoded string.
This commit re-uses the already defined RabbitMQ target addresses:
```
"/topic/" RK Publish to amq.topic with routing key RK
"/exchange/" X "/" RK Publish to exchange X with routing key RK
```
By default, the MQTT topic exchange is configure dto be amq.topic using
the 1st target address.
When an operator modifies the mqtt.exchange, the 2nd target address is
used.
* Apply PR feedback
and fix formatting
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* tidy up
* Add MQTT message_containers test
* consistent hash exchange: avoid amqp legacy conversion
When hashing on a header value.
* Avoid converting to amqp legacy when using exchange federation
* Fix test flake
* test and dialyzer fixes
* dialyzer fix
* Add MQTT protocol interoperability tests
Test receiving from and sending to MQTT 5.0 and
* AMQP 0.9.1
* AMQP 1.0
* STOMP
* Streams
* Regenerate portions of deps/rabbit/app.bzl with gazelle
I'm not exactly sure how this happened, but gazell seems to have been
run with an older version of the rules_erlang gazelle extension at
some point. This caused generation of a structure that is no longer
used. This commit updates the structure to the current pattern.
* mc: refactoring
* mc_amqpl: handle delivery annotations
Just in case they are included.
Also use iolist_to_iovec to create flat list of binaries when
converting from amqp with amqp encoded payload.
---------
Co-authored-by: David Ansari <david.ansari@gmx.de>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
Co-authored-by: Rin Kuryloski <kuryloskip@vmware.com>
2023-08-31 18:27:13 +08:00
|
|
|
declare_queue(Ch, TraceQ, []),
|
|
|
|
#'queue.bind_ok'{} = amqp_channel:call(
|
|
|
|
Ch, #'queue.bind'{queue = TraceQ,
|
|
|
|
exchange = <<"amq.rabbitmq.trace">>,
|
|
|
|
routing_key = <<"deliver.*">>}),
|
|
|
|
C = connect(<<"my-client">>, Config),
|
|
|
|
{ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, ["trace_on"]),
|
|
|
|
{ok, _, [0]} = emqtt:subscribe(C, <<"/my/topic">>),
|
|
|
|
Payload0 = binary:copy(<<"x">>, 1_000_000),
|
|
|
|
Payload = <<Payload0/binary, "y">>,
|
|
|
|
amqp_channel:call(Ch,
|
|
|
|
#'basic.publish'{exchange = <<"amq.topic">>,
|
|
|
|
routing_key = <<".my.topic">>},
|
|
|
|
#amqp_msg{payload = Payload}),
|
|
|
|
ok = expect_publishes(C, <<"/my/topic">>, [Payload]),
|
|
|
|
timer:sleep(10),
|
|
|
|
?assertMatch(
|
|
|
|
{#'basic.get_ok'{routing_key = <<"deliver.mqtt-subscription-my-clientqos0">>},
|
|
|
|
#amqp_msg{payload = Payload}},
|
|
|
|
amqp_channel:call(Ch, #'basic.get'{queue = TraceQ})
|
|
|
|
),
|
|
|
|
|
|
|
|
{ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, ["trace_off"]),
|
|
|
|
delete_queue(Ch, TraceQ),
|
2025-03-15 00:20:36 +08:00
|
|
|
ok = emqtt:disconnect(C),
|
|
|
|
ok = rabbit_ct_client_helpers:close_connection_and_channel(Conn, Ch).
|
Message Containers (#5077)
This PR implements an approach for a "protocol (data format) agnostic core" where the format of the message isn't converted at point of reception.
Currently all non AMQP 0.9.1 originating messages are converted into a AMQP 0.9.1 flavoured basic_message record before sent to a queue. If the messages are then consumed by the originating protocol they are converted back from AMQP 0.9.1. For some protocols such as MQTT 3.1 this isn't too expensive as MQTT is mostly a fairly easily mapped subset of AMQP 0.9.1 but for others such as AMQP 1.0 the conversions are awkward and in some cases lossy even if consuming from the originating protocol.
This PR instead wraps all incoming messages in their originating form into a generic, extensible message container type (mc). The container module exposes an API to get common message details such as size and various properties (ttl, priority etc) directly from the source data type. Each protocol needs to implement the mc behaviour such that when a message originating form one protocol is consumed by another protocol we convert it to the target protocol at that point.
The message container also contains annotations, dead letter records and other meta data we need to record during the lifetime of a message. The original protocol message is never modified unless it is consumed.
This includes conversion modules to and from amqp, amqpl (AMQP 0.9.1) and mqtt.
COMMIT HISTORY:
* Refactor away from using the delivery{} record
In many places including exchange types. This should make it
easier to move towards using a message container type instead of
basic_message.
Add mc module and move direct replies outside of exchange
Lots of changes incl classic queues
Implement stream support incl amqp conversions
simplify mc state record
move mc.erl
mc dlx stuff
recent history exchange
Make tracking work
But doesn't take a protocol agnostic approach as we just convert
everything into AMQP legacy and back. Might be good enough for now.
Tracing as a whole may want a bit of a re-vamp at some point.
tidy
make quorum queue peek work by legacy conversion
dead lettering fixes
dead lettering fixes
CMQ fixes
rabbit_trace type fixes
fixes
fix
Fix classic queue props
test assertion fix
feature flag and backwards compat
Enable message_container feature flag in some SUITEs
Dialyzer fixes
fixes
fix
test fixes
Various
Manually update a gazelle generated file
until a gazelle enhancement can be made
https://github.com/rabbitmq/rules_erlang/issues/185
Add message_containers_SUITE to bazel
and regen bazel files with gazelle from rules_erlang@main
Simplify essential proprty access
Such as durable, ttl and priority by extracting them into annotations
at message container init time.
Move type
to remove dependenc on amqp10 stuff in mc.erl
mostly because I don't know how to make bazel do the right thing
add more stuff
Refine routing header stuff
wip
Cosmetics
Do not use "maybe" as type name as "maybe" is a keyword since OTP 25
which makes Erlang LS complain.
* Dedup death queue names
* Fix function clause crashes
Fix failing tests in the MQTT shared_SUITE:
A classic queue message ID can be undefined as set in
https://github.com/rabbitmq/rabbitmq-server/blob/fbe79ff47b4edbc0fd95457e623d6593161ad198/deps/rabbit/src/rabbit_classic_queue_index_v2.erl#L1048
Fix failing tests in the MQTT shared_SUITE-mixed:
When feature flag message_containers is disabled, the
message is not an #mc{} record, but a #basic_message{} record.
* Fix is_utf8_no_null crash
Prior to this commit, the function crashed if invalid UTF-8 was
provided, e.g.:
```
1> rabbit_misc:is_valid_shortstr(<<"😇"/utf16>>).
** exception error: no function clause matching rabbit_misc:is_utf8_no_null(<<216,61,222,7>>) (rabbit_misc.erl, line 1481)
```
* Implement mqtt mc behaviour
For now via amqp translation.
This is still work in progress, but the following SUITEs pass:
```
make -C deps/rabbitmq_mqtt ct-shared t=[mqtt,v5,cluster_size_1] FULL=1
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_1] FULL=1
```
* Shorten mc file names
Module name length matters because for each persistent message the #mc{}
record is persisted to disk.
```
1> iolist_size(term_to_iovec({mc, rabbit_mc_amqp_legacy})).
30
2> iolist_size(term_to_iovec({mc, mc_amqpl})).
17
```
This commit renames the mc modules:
```
ag -l rabbit_mc_amqp_legacy | xargs sed -i 's/rabbit_mc_amqp_legacy/mc_amqpl/g'
ag -l rabbit_mc_amqp | xargs sed -i 's/rabbit_mc_amqp/mc_amqp/g'
ag -l rabbit_mqtt_mc | xargs sed -i 's/rabbit_mqtt_mc/mc_mqtt/g'
```
* mc: make deaths an annotation + fixes
* Fix mc_mqtt protocol_state callback
* Fix test will_delay_node_restart
```
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_3]:will_delay_node_restart FULL=1
```
* Bazel run gazelle
* mix format rabbitmqctl.ex
* Ensure ttl annotation is refelected in amqp legacy protocol state
* Fix id access in message store
* Fix rabbit_message_interceptor_SUITE
* dializer fixes
* Fix rabbit:rabbit_message_interceptor_SUITE-mixed
set_annotation/3 should not result in duplicate keys
* Fix MQTT shared_SUITE-mixed
Up to 3.12 non-MQTT publishes were always QoS 1 regardless of delivery_mode.
https://github.com/rabbitmq/rabbitmq-server/blob/75a953ce286a10aca910c098805a4f545989af38/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl#L2075-L2076
From now on, non-MQTT publishes are QoS 1 if durable.
This makes more sense.
The MQTT plugin must send a #basic_message{} to an old node that does
not understand message containers.
* Field content of 'v1_0.data' can be binary
Fix
```
bazel test //deps/rabbitmq_mqtt:shared_SUITE-mixed \
--test_env FOCUS="-group [mqtt,v4,cluster_size_1] -case trace" \
-t- --test_sharding_strategy=disabled
```
* Remove route/2 and implement route/3 for all exchange types.
This removes the route/2 callback from rabbit_exchange_type and
makes route/3 mandatory instead. This is a breaking change and
will require all implementations of exchange types to update their
code, however this is necessary anyway for them to correctly handle
the mc type.
stream filtering fixes
* Translate directly from MQTT to AMQP 0.9.1
* handle undecoded properties in mc_compat
amqpl: put clause in right order
recover death deatails from amqp data
* Replace callback init_amqp with convert_from
* Fix return value of lists:keyfind/3
* Translate directly from AMQP 0.9.1 to MQTT
* Fix MQTT payload size
MQTT payload can be a list when converted from AMQP 0.9.1 for example
First conversions tests
Plus some other conversion related fixes.
bazel
bazel
translate amqp 1.0 null to undefined
mc: property/2 and correlation_id/message_id return type tagged values.
To ensure we can support a variety of types better.
The type type tags are AMQP 1.0 flavoured.
fix death recovery
mc_mqtt: impl new api
Add callbacks to allow protocols to compact data before storage
And make readable if needing to query things repeatedly.
bazel fix
* more decoding
* tracking mixed versions compat
* mc: flip default of `durable` annotation to save some data.
Assuming most messages are durable and that in memory messages suffer less
from persistence overhead it makes sense for a non existent `durable`
annotation to mean durable=true.
* mc conversion tests and tidy up
* mc make x_header unstrict again
* amqpl: death record fixes
* bazel
* amqp -> amqpl conversion test
* Fix crash in mc_amqp:size/1
Body can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0/ ct-system t=java
```
on branch native-amqp.
* Fix crash in lists:flatten/1
Data can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp.
* Fix crash in rabbit_writer
Running test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp resulted in the following crash:
```
crasher:
initial call: rabbit_writer:enter_mainloop/2
pid: <0.711.0>
registered_name: []
exception error: bad argument
in function size/1
called as size([<<0>>,<<"Sw">>,[<<160,2>>,<<"hi">>]])
*** argument 1: not tuple or binary
in call from rabbit_binary_generator:build_content_frames/7 (rabbit_binary_generator.erl, line 89)
in call from rabbit_binary_generator:build_simple_content_frames/4 (rabbit_binary_generator.erl, line 61)
in call from rabbit_writer:assemble_frames/5 (rabbit_writer.erl, line 334)
in call from rabbit_writer:internal_send_command_async/3 (rabbit_writer.erl, line 365)
in call from rabbit_writer:handle_message/2 (rabbit_writer.erl, line 265)
in call from rabbit_writer:handle_message/3 (rabbit_writer.erl, line 232)
in call from rabbit_writer:mainloop1/2 (rabbit_writer.erl, line 223)
```
because #content.payload_fragments_rev is currently supposed to
be a flat list of binaries instead of being an iolist.
This commit fixes this crash inefficiently by calling
iolist_to_binary/1. A better solution would be to allow AMQP legacy's #content.payload_fragments_rev
to be an iolist.
* Add accidentally deleted line back
* mc: optimise mc_amqp internal format
By removint the outer records for message and delivery annotations
as well as application properties and footers.
* mc: optimis mc_amqp map_add by using upsert
* mc: refactoring and bug fixes
* mc_SUITE routingheader assertions
* mc remove serialize/1 callback as only used by amqp
* mc_amqp: avoid returning a nested list from protocol_state
* test and bug fix
* move infer_type to mc_util
* mc fixes and additiona assertions
* Support headers exchange routing for MQTT messages
When a headers exchange is bound to the MQTT topic exchange, routing
will be performend based on both MQTT topic (by the topic exchange) and
MQTT User Property (by the headers exchange).
This combines the best worlds of both MQTT 5.0 and AMQP 0.9.1 and
enables powerful routing topologies.
When the User Property contains the same name multiple times, only the
last name (and value) will be considered by the headers exchange.
* Fix crash when sending from stream to amqpl
When publishing a message via the stream protocol and consuming it via
AMQP 0.9.1, the following crash occurred prior to this commit:
```
crasher:
initial call: rabbit_channel:init/1
pid: <0.818.0>
registered_name: []
exception exit: {{badmatch,undefined},
[{rabbit_channel,handle_deliver0,4,
[{file,"rabbit_channel.erl"},
{line,2728}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1594}]},
{rabbit_channel,handle_cast,2,
[{file,"rabbit_channel.erl"},
{line,728}]},
{gen_server2,handle_msg,2,
[{file,"gen_server2.erl"},{line,1056}]},
{proc_lib,wake_up,3,
[{file,"proc_lib.erl"},{line,251}]}]}
```
This commit first gives `mc:init/3` the chance to set exchange and
routing_keys annotations.
If not set, `rabbit_stream_queue` will set these annotations assuming
the message was originally published via the stream protocol.
* Support consistent hash exchange routing for MQTT 5.0
When a consistent hash exchange is bound to the MQTT topic exchange,
MQTT 5.0 messages can be routed to queues consistently based on the
Correlation-Data in the PUBLISH packet.
* Convert MQTT 5.0 User Property
* to AMQP 0.9.1 headers
* from AMQP 0.9.1 headers
* to AMQP 1.0 application properties and message annotations
* from AMQP 1.0 application properties and message annotations
* Make use of Annotations in mc_mqtt:protocol_state/2
mc_mqtt:protocol_state/2 includes Annotations as parameter.
It's cleaner to make use of these Annotations when computing the
protocol state instead of relying on the caller (rabbitmq_mqtt_processor)
to compute the protocol state.
* Enforce AMQP 0.9.1 field name length limit
The AMQP 0.9.1 spec prohibits field names longer than 128 characters.
Therefore, when converting AMQP 1.0 message annotations, application
properties or MQTT 5.0 User Property to AMQP 0.9.1 headers, drop any
names longer than 128 characters.
* Fix type specs
Apply feedback from Michael Davis
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* Add mc_mqtt unit test suite
Implement mc_mqtt:x_header/2
* Translate indicator that payload is UTF-8 encoded
when converting between MQTT 5.0 and AMQP 1.0
* Translate single amqp-value section from AMQP 1.0 to MQTT
Convert to a text representation, if possible, and indicate to MQTT
client that the payload is UTF-8 encoded. This way, the MQTT client will
be able to parse the payload.
If conversion to text representation is not possible, encode the payload
using the AMQP 1.0 type system and indiate the encoding via Content-Type
message/vnd.rabbitmq.amqp.
This Content-Type is not registered.
Type "message" makes sense since it's a message.
Vendor tree "vnd.rabbitmq.amqp" makes sense since merely subtype "amqp" is not
registered.
* Fix payload conversion
* Translate Response Topic between MQTT and AMQP
Translate MQTT 5.0 Response Topic to AMQP 1.0 reply-to address and vice
versa.
The Response Topic must be a UTF-8 encoded string.
This commit re-uses the already defined RabbitMQ target addresses:
```
"/topic/" RK Publish to amq.topic with routing key RK
"/exchange/" X "/" RK Publish to exchange X with routing key RK
```
By default, the MQTT topic exchange is configure dto be amq.topic using
the 1st target address.
When an operator modifies the mqtt.exchange, the 2nd target address is
used.
* Apply PR feedback
and fix formatting
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* tidy up
* Add MQTT message_containers test
* consistent hash exchange: avoid amqp legacy conversion
When hashing on a header value.
* Avoid converting to amqp legacy when using exchange federation
* Fix test flake
* test and dialyzer fixes
* dialyzer fix
* Add MQTT protocol interoperability tests
Test receiving from and sending to MQTT 5.0 and
* AMQP 0.9.1
* AMQP 1.0
* STOMP
* Streams
* Regenerate portions of deps/rabbit/app.bzl with gazelle
I'm not exactly sure how this happened, but gazell seems to have been
run with an older version of the rules_erlang gazelle extension at
some point. This caused generation of a structure that is no longer
used. This commit updates the structure to the current pattern.
* mc: refactoring
* mc_amqpl: handle delivery annotations
Just in case they are included.
Also use iolist_to_iovec to create flat list of binaries when
converting from amqp with amqp encoded payload.
---------
Co-authored-by: David Ansari <david.ansari@gmx.de>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
Co-authored-by: Rin Kuryloski <kuryloskip@vmware.com>
2023-08-31 18:27:13 +08:00
|
|
|
|
2023-01-29 22:46:26 +08:00
|
|
|
max_packet_size_unauthenticated(Config) ->
|
2023-02-25 02:23:41 +08:00
|
|
|
ClientId = ?FUNCTION_NAME,
|
2023-01-29 22:46:26 +08:00
|
|
|
Opts = [{will_topic, <<"will/topic">>}],
|
|
|
|
|
|
|
|
{C1, Connect} = util:start_client(
|
|
|
|
ClientId, Config, 0,
|
|
|
|
[{will_payload, binary:copy(<<"a">>, 64_000)} | Opts]),
|
|
|
|
?assertMatch({ok, _}, Connect(C1)),
|
|
|
|
ok = emqtt:disconnect(C1),
|
|
|
|
|
2023-02-25 02:23:41 +08:00
|
|
|
Key = mqtt_max_packet_size_unauthenticated,
|
|
|
|
OldMaxSize = rpc(Config, persistent_term, get, [Key]),
|
2023-01-29 22:46:26 +08:00
|
|
|
MaxSize = 500,
|
2023-02-25 02:23:41 +08:00
|
|
|
ok = rpc(Config, persistent_term, put, [Key, MaxSize]),
|
2023-01-29 22:46:26 +08:00
|
|
|
|
|
|
|
{C2, Connect} = util:start_client(
|
|
|
|
ClientId, Config, 0,
|
|
|
|
[{will_payload, binary:copy(<<"b">>, MaxSize + 1)} | Opts]),
|
|
|
|
true = unlink(C2),
|
|
|
|
?assertMatch({error, _}, Connect(C2)),
|
|
|
|
|
|
|
|
{C3, Connect} = util:start_client(
|
|
|
|
ClientId, Config, 0,
|
|
|
|
[{will_payload, binary:copy(<<"c">>, round(MaxSize / 2))} | Opts]),
|
|
|
|
?assertMatch({ok, _}, Connect(C3)),
|
|
|
|
ok = emqtt:disconnect(C3),
|
|
|
|
|
2023-02-25 02:23:41 +08:00
|
|
|
ok = rpc(Config, persistent_term, put, [Key, OldMaxSize]).
|
|
|
|
|
|
|
|
max_packet_size_authenticated(Config) ->
|
|
|
|
Topic = ClientId = atom_to_binary(?FUNCTION_NAME),
|
|
|
|
Key = mqtt_max_packet_size_authenticated,
|
|
|
|
OldMaxSize = rpc(Config, persistent_term, get, [Key]),
|
|
|
|
MaxSize = 500,
|
|
|
|
ok = rpc(Config, persistent_term, put, [Key, MaxSize]),
|
|
|
|
|
2023-02-28 21:51:16 +08:00
|
|
|
{C, Connect} = util:start_client(ClientId, Config, 0, []),
|
|
|
|
{ok, ConnAckProps} = Connect(C),
|
2023-02-25 02:23:41 +08:00
|
|
|
process_flag(trap_exit, true),
|
|
|
|
ok = emqtt:publish(C, Topic, binary:copy(<<"x">>, MaxSize + 1), qos0),
|
|
|
|
await_exit(C),
|
|
|
|
case ?config(mqtt_version, Config) of
|
2023-06-06 01:23:46 +08:00
|
|
|
v3 -> ok;
|
|
|
|
v4 -> ok;
|
|
|
|
v5 -> ?assertMatch(#{'Maximum-Packet-Size' := MaxSize}, ConnAckProps),
|
|
|
|
receive {disconnected, _ReasonCodePacketTooLarge = 149, _Props} -> ok
|
2024-12-10 23:19:34 +08:00
|
|
|
after ?TIMEOUT -> ct:fail("missing DISCONNECT packet from server")
|
2023-06-06 01:23:46 +08:00
|
|
|
end
|
2023-02-25 02:23:41 +08:00
|
|
|
end,
|
|
|
|
ok = rpc(Config, persistent_term, put, [Key, OldMaxSize]).
|
2023-01-29 22:46:26 +08:00
|
|
|
|
2023-02-10 01:24:52 +08:00
|
|
|
%% Test that the per vhost default queue type introduced in
|
|
|
|
%% https://github.com/rabbitmq/rabbitmq-server/pull/5305
|
|
|
|
%% does not apply to queues created for MQTT connections
|
|
|
|
%% because having millions of quorum queues is too expensive.
|
|
|
|
default_queue_type(Config) ->
|
|
|
|
Server = get_node_config(Config, 0, nodename),
|
|
|
|
QName = Vhost = ClientId = Topic = atom_to_binary(?FUNCTION_NAME),
|
|
|
|
ok = erpc:call(Server, rabbit_vhost, add, [Vhost,
|
|
|
|
#{default_queue_type => <<"quorum">>},
|
|
|
|
<<"acting-user">>]),
|
|
|
|
ok = rabbit_ct_broker_helpers:set_full_permissions(Config, <<"guest">>, Vhost),
|
|
|
|
|
|
|
|
?assertEqual([], rpc(Config, rabbit_amqqueue, list, [])),
|
|
|
|
%% Sanity check that the configured default queue type works with AMQP 0.9.1.
|
|
|
|
Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config, Server, Vhost),
|
|
|
|
{ok, Ch} = amqp_connection:open_channel(Conn),
|
|
|
|
declare_queue(Ch, QName, []),
|
|
|
|
QuorumQueues = rpc(Config, rabbit_amqqueue, list_by_type, [rabbit_quorum_queue]),
|
|
|
|
?assertEqual(1, length(QuorumQueues)),
|
|
|
|
delete_queue(Ch, QName),
|
|
|
|
ok = rabbit_ct_client_helpers:close_connection_and_channel(Conn, Ch),
|
|
|
|
|
|
|
|
%% Test that the configured default queue type does not apply to MQTT.
|
|
|
|
Creds = [{username, <<Vhost/binary, ":guest">>},
|
|
|
|
{password, <<"guest">>}],
|
2023-03-17 20:48:26 +08:00
|
|
|
C1 = connect(ClientId, Config, Creds ++ non_clean_sess_opts()),
|
2023-02-10 01:24:52 +08:00
|
|
|
{ok, _, [1]} = emqtt:subscribe(C1, Topic, qos1),
|
|
|
|
ClassicQueues = rpc(Config, rabbit_amqqueue, list_by_type, [rabbit_classic_queue]),
|
|
|
|
?assertEqual(1, length(ClassicQueues)),
|
|
|
|
|
|
|
|
ok = emqtt:disconnect(C1),
|
|
|
|
C2 = connect(ClientId, Config, [{clean_start, true} | Creds]),
|
|
|
|
ok = emqtt:disconnect(C2),
|
|
|
|
ok = rabbit_ct_broker_helpers:delete_vhost(Config, Vhost).
|
|
|
|
|
2025-04-17 16:15:32 +08:00
|
|
|
message_interceptors(Config) ->
|
2025-04-15 23:40:21 +08:00
|
|
|
ok = rpc(Config, persistent_term, put,
|
2025-04-17 16:15:32 +08:00
|
|
|
[message_interceptors,
|
|
|
|
[
|
|
|
|
{rabbit_mqtt_msg_interceptor_client_id, #{}},
|
|
|
|
{rabbit_msg_interceptor_timestamp, #{overwrite => false,
|
|
|
|
incoming => true,
|
|
|
|
outgoing => true}}
|
|
|
|
]]),
|
2025-03-15 00:20:36 +08:00
|
|
|
{Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config),
|
2025-04-15 23:40:21 +08:00
|
|
|
Payload = Topic = atom_to_binary(?FUNCTION_NAME),
|
|
|
|
ClientId = <<"🆔"/utf8>>,
|
2024-04-06 00:30:00 +08:00
|
|
|
CQName = <<"my classic queue">>,
|
|
|
|
Stream = <<"my stream">>,
|
|
|
|
declare_queue(Ch, CQName, [{<<"x-queue-type">>, longstr, <<"classic">>}]),
|
|
|
|
declare_queue(Ch, Stream, [{<<"x-queue-type">>, longstr, <<"stream">>}]),
|
|
|
|
bind(Ch, CQName, Topic),
|
|
|
|
bind(Ch, Stream, Topic),
|
Move plugin rabbitmq-message-timestamp to the core
As reported in https://groups.google.com/g/rabbitmq-users/c/x8ACs4dBlkI/
plugins that implement rabbit_channel_interceptor break with
Native MQTT in 3.12 because Native MQTT does not use rabbit_channel anymore.
Specifically, these plugins don't work anymore in 3.12 when sending a message
from an MQTT publisher to an AMQP 0.9.1 consumer.
Two of these plugins are
https://github.com/rabbitmq/rabbitmq-message-timestamp
and
https://github.com/rabbitmq/rabbitmq-routing-node-stamp
This commit moves both plugins into rabbitmq-server.
Therefore, these plugins are deprecated starting in 3.12.
Instead of using these plugins, the user gets the same behaviour by
configuring rabbitmq.conf as follows:
```
incoming_message_interceptors.set_header_timestamp.overwrite = false
incoming_message_interceptors.set_header_routing_node.overwrite = false
```
While both plugins were incompatible to be used together, this commit
allows setting both headers.
We name the top level configuration key `incoming_message_interceptors`
because only incoming messages are intercepted.
Currently, only `set_header_timestamp` and `set_header_routing_node` are
supported. (We might support more in the future.)
Both can set `overwrite` to `false` or `true`.
The meaning of `overwrite` is the same as documented in
https://github.com/rabbitmq/rabbitmq-message-timestamp#always-overwrite-timestamps
i.e. whether headers should be overwritten if they are already present
in the message.
Both `set_header_timestamp` and `set_header_routing_node` behave exactly
to plugins `rabbitmq-message-timestamp` and `rabbitmq-routing-node-stamp`,
respectively.
Upon node boot, the configuration is put into persistent_term to not
cause any performance penalty in the default case where these settings
are disabled.
The channel and MQTT connection process will intercept incoming messages
and - if configured - add the desired AMQP 0.9.1 headers.
For now, this allows using Native MQTT in 3.12 with the old plugins
behaviour.
In the future, once "message containers" are implemented,
we can think about more generic message interceptors where plugins can be
written to modify arbitrary headers or message contents for various protocols.
Likewise, in the future, once MQTT 5.0 is implemented, we can think
about an MQTT connection interceptor which could function similar to a
`rabbit_channel_interceptor` allowing to modify any MQTT packet.
2023-05-12 22:12:50 +08:00
|
|
|
C = connect(ClientId, Config),
|
2024-04-06 00:30:00 +08:00
|
|
|
|
Move plugin rabbitmq-message-timestamp to the core
As reported in https://groups.google.com/g/rabbitmq-users/c/x8ACs4dBlkI/
plugins that implement rabbit_channel_interceptor break with
Native MQTT in 3.12 because Native MQTT does not use rabbit_channel anymore.
Specifically, these plugins don't work anymore in 3.12 when sending a message
from an MQTT publisher to an AMQP 0.9.1 consumer.
Two of these plugins are
https://github.com/rabbitmq/rabbitmq-message-timestamp
and
https://github.com/rabbitmq/rabbitmq-routing-node-stamp
This commit moves both plugins into rabbitmq-server.
Therefore, these plugins are deprecated starting in 3.12.
Instead of using these plugins, the user gets the same behaviour by
configuring rabbitmq.conf as follows:
```
incoming_message_interceptors.set_header_timestamp.overwrite = false
incoming_message_interceptors.set_header_routing_node.overwrite = false
```
While both plugins were incompatible to be used together, this commit
allows setting both headers.
We name the top level configuration key `incoming_message_interceptors`
because only incoming messages are intercepted.
Currently, only `set_header_timestamp` and `set_header_routing_node` are
supported. (We might support more in the future.)
Both can set `overwrite` to `false` or `true`.
The meaning of `overwrite` is the same as documented in
https://github.com/rabbitmq/rabbitmq-message-timestamp#always-overwrite-timestamps
i.e. whether headers should be overwritten if they are already present
in the message.
Both `set_header_timestamp` and `set_header_routing_node` behave exactly
to plugins `rabbitmq-message-timestamp` and `rabbitmq-routing-node-stamp`,
respectively.
Upon node boot, the configuration is put into persistent_term to not
cause any performance penalty in the default case where these settings
are disabled.
The channel and MQTT connection process will intercept incoming messages
and - if configured - add the desired AMQP 0.9.1 headers.
For now, this allows using Native MQTT in 3.12 with the old plugins
behaviour.
In the future, once "message containers" are implemented,
we can think about more generic message interceptors where plugins can be
written to modify arbitrary headers or message contents for various protocols.
Likewise, in the future, once MQTT 5.0 is implemented, we can think
about an MQTT connection interceptor which could function similar to a
`rabbit_channel_interceptor` allowing to modify any MQTT packet.
2023-05-12 22:12:50 +08:00
|
|
|
NowSecs = os:system_time(second),
|
2024-04-06 00:30:00 +08:00
|
|
|
NowMillis = os:system_time(millisecond),
|
|
|
|
{ok, _} = emqtt:publish(C, Topic, Payload, qos1),
|
Move plugin rabbitmq-message-timestamp to the core
As reported in https://groups.google.com/g/rabbitmq-users/c/x8ACs4dBlkI/
plugins that implement rabbit_channel_interceptor break with
Native MQTT in 3.12 because Native MQTT does not use rabbit_channel anymore.
Specifically, these plugins don't work anymore in 3.12 when sending a message
from an MQTT publisher to an AMQP 0.9.1 consumer.
Two of these plugins are
https://github.com/rabbitmq/rabbitmq-message-timestamp
and
https://github.com/rabbitmq/rabbitmq-routing-node-stamp
This commit moves both plugins into rabbitmq-server.
Therefore, these plugins are deprecated starting in 3.12.
Instead of using these plugins, the user gets the same behaviour by
configuring rabbitmq.conf as follows:
```
incoming_message_interceptors.set_header_timestamp.overwrite = false
incoming_message_interceptors.set_header_routing_node.overwrite = false
```
While both plugins were incompatible to be used together, this commit
allows setting both headers.
We name the top level configuration key `incoming_message_interceptors`
because only incoming messages are intercepted.
Currently, only `set_header_timestamp` and `set_header_routing_node` are
supported. (We might support more in the future.)
Both can set `overwrite` to `false` or `true`.
The meaning of `overwrite` is the same as documented in
https://github.com/rabbitmq/rabbitmq-message-timestamp#always-overwrite-timestamps
i.e. whether headers should be overwritten if they are already present
in the message.
Both `set_header_timestamp` and `set_header_routing_node` behave exactly
to plugins `rabbitmq-message-timestamp` and `rabbitmq-routing-node-stamp`,
respectively.
Upon node boot, the configuration is put into persistent_term to not
cause any performance penalty in the default case where these settings
are disabled.
The channel and MQTT connection process will intercept incoming messages
and - if configured - add the desired AMQP 0.9.1 headers.
For now, this allows using Native MQTT in 3.12 with the old plugins
behaviour.
In the future, once "message containers" are implemented,
we can think about more generic message interceptors where plugins can be
written to modify arbitrary headers or message contents for various protocols.
Likewise, in the future, once MQTT 5.0 is implemented, we can think
about an MQTT connection interceptor which could function similar to a
`rabbit_channel_interceptor` allowing to modify any MQTT packet.
2023-05-12 22:12:50 +08:00
|
|
|
|
2024-04-06 00:30:00 +08:00
|
|
|
{#'basic.get_ok'{},
|
|
|
|
#amqp_msg{payload = Payload,
|
|
|
|
props = #'P_basic'{
|
|
|
|
timestamp = Secs,
|
2025-04-15 23:40:21 +08:00
|
|
|
headers = Headers
|
2024-04-06 00:30:00 +08:00
|
|
|
}}
|
|
|
|
} = amqp_channel:call(Ch, #'basic.get'{queue = CQName}),
|
|
|
|
|
2025-04-17 16:15:32 +08:00
|
|
|
{_, long, ReceivedTs} = lists:keyfind(<<"timestamp_in_ms">>, 1, Headers),
|
|
|
|
?assert(Secs < NowSecs + 9),
|
|
|
|
?assert(Secs > NowSecs - 9),
|
|
|
|
?assert(ReceivedTs < NowMillis + 9000),
|
|
|
|
?assert(ReceivedTs > NowMillis - 9000),
|
|
|
|
{_, long, SentTs} = lists:keyfind(<<"x-opt-rabbitmq-sent-time">>, 1, Headers),
|
|
|
|
?assert(SentTs < NowMillis + 9000),
|
|
|
|
?assert(SentTs > NowMillis - 9000),
|
2024-04-06 00:30:00 +08:00
|
|
|
|
2025-04-15 23:40:21 +08:00
|
|
|
?assertEqual({<<"x-opt-mqtt-client-id">>, longstr, ClientId},
|
|
|
|
lists:keyfind(<<"x-opt-mqtt-client-id">>, 1, Headers)),
|
|
|
|
|
2024-04-06 00:30:00 +08:00
|
|
|
#'basic.qos_ok'{} = amqp_channel:call(Ch, #'basic.qos'{prefetch_count = 1}),
|
|
|
|
CTag = <<"my ctag">>,
|
|
|
|
#'basic.consume_ok'{} = amqp_channel:subscribe(
|
|
|
|
Ch,
|
|
|
|
#'basic.consume'{
|
|
|
|
queue = Stream,
|
|
|
|
consumer_tag = CTag,
|
|
|
|
arguments = [{<<"x-stream-offset">>, longstr, <<"first">>}]},
|
|
|
|
self()),
|
|
|
|
receive {#'basic.deliver'{consumer_tag = CTag},
|
|
|
|
#amqp_msg{payload = Payload,
|
|
|
|
props = #'P_basic'{
|
2025-04-17 16:15:32 +08:00
|
|
|
headers = [{<<"timestamp_in_ms">>, long, ReceivedTs} | XHeaders]
|
2024-04-06 00:30:00 +08:00
|
|
|
}}} ->
|
2025-04-15 23:40:21 +08:00
|
|
|
?assertEqual({<<"x-opt-mqtt-client-id">>, longstr, ClientId},
|
2025-04-17 16:15:32 +08:00
|
|
|
lists:keyfind(<<"x-opt-mqtt-client-id">>, 1, XHeaders)),
|
|
|
|
|
|
|
|
{_, long, SentTs1} = lists:keyfind(<<"x-opt-rabbitmq-sent-time">>, 1, XHeaders),
|
|
|
|
?assert(SentTs1 < NowMillis + 9000),
|
|
|
|
?assert(SentTs1 > NowMillis - 9000)
|
2024-12-10 23:19:34 +08:00
|
|
|
after ?TIMEOUT -> ct:fail(missing_deliver)
|
2024-04-06 00:30:00 +08:00
|
|
|
end,
|
|
|
|
|
|
|
|
delete_queue(Ch, Stream),
|
|
|
|
delete_queue(Ch, CQName),
|
2025-04-17 16:15:32 +08:00
|
|
|
ok = rpc(Config, persistent_term, put, [message_interceptors, []]),
|
2025-03-15 00:20:36 +08:00
|
|
|
ok = emqtt:disconnect(C),
|
|
|
|
ok = rabbit_ct_client_helpers:close_connection_and_channel(Conn, Ch).
|
Move plugin rabbitmq-message-timestamp to the core
As reported in https://groups.google.com/g/rabbitmq-users/c/x8ACs4dBlkI/
plugins that implement rabbit_channel_interceptor break with
Native MQTT in 3.12 because Native MQTT does not use rabbit_channel anymore.
Specifically, these plugins don't work anymore in 3.12 when sending a message
from an MQTT publisher to an AMQP 0.9.1 consumer.
Two of these plugins are
https://github.com/rabbitmq/rabbitmq-message-timestamp
and
https://github.com/rabbitmq/rabbitmq-routing-node-stamp
This commit moves both plugins into rabbitmq-server.
Therefore, these plugins are deprecated starting in 3.12.
Instead of using these plugins, the user gets the same behaviour by
configuring rabbitmq.conf as follows:
```
incoming_message_interceptors.set_header_timestamp.overwrite = false
incoming_message_interceptors.set_header_routing_node.overwrite = false
```
While both plugins were incompatible to be used together, this commit
allows setting both headers.
We name the top level configuration key `incoming_message_interceptors`
because only incoming messages are intercepted.
Currently, only `set_header_timestamp` and `set_header_routing_node` are
supported. (We might support more in the future.)
Both can set `overwrite` to `false` or `true`.
The meaning of `overwrite` is the same as documented in
https://github.com/rabbitmq/rabbitmq-message-timestamp#always-overwrite-timestamps
i.e. whether headers should be overwritten if they are already present
in the message.
Both `set_header_timestamp` and `set_header_routing_node` behave exactly
to plugins `rabbitmq-message-timestamp` and `rabbitmq-routing-node-stamp`,
respectively.
Upon node boot, the configuration is put into persistent_term to not
cause any performance penalty in the default case where these settings
are disabled.
The channel and MQTT connection process will intercept incoming messages
and - if configured - add the desired AMQP 0.9.1 headers.
For now, this allows using Native MQTT in 3.12 with the old plugins
behaviour.
In the future, once "message containers" are implemented,
we can think about more generic message interceptors where plugins can be
written to modify arbitrary headers or message contents for various protocols.
Likewise, in the future, once MQTT 5.0 is implemented, we can think
about an MQTT connection interceptor which could function similar to a
`rabbit_channel_interceptor` allowing to modify any MQTT packet.
2023-05-12 22:12:50 +08:00
|
|
|
|
2023-06-07 19:20:28 +08:00
|
|
|
%% This test makes sure that a retained message that got written in 3.12 or earlier
|
|
|
|
%% can be consumed in 3.13 or later.
|
|
|
|
retained_message_conversion(Config) ->
|
|
|
|
Topic = <<"a/b">>,
|
|
|
|
Payload = <<"my retained msg">>,
|
|
|
|
OldMqttMsgFormat = {mqtt_msg, _Retain = true, _QoS = 1, Topic, _Dup = false, _PktId = 1, Payload},
|
|
|
|
RetainerPid = rpc(Config, rabbit_mqtt_retainer_sup, start_child_for_vhost, [<<"/">>]),
|
|
|
|
{rabbit_mqtt_retainer, StoreState, _} = sys:get_state(RetainerPid),
|
|
|
|
ok = rpc(Config, rabbit_mqtt_retained_msg_store, insert, [Topic, OldMqttMsgFormat, StoreState]),
|
|
|
|
|
|
|
|
C = connect(?FUNCTION_NAME, Config),
|
|
|
|
{ok, _, [1]} = emqtt:subscribe(C, Topic, qos1),
|
|
|
|
receive {publish, #{client_pid := C,
|
|
|
|
dup := false,
|
|
|
|
qos := 1,
|
|
|
|
retain := true,
|
|
|
|
topic := Topic,
|
|
|
|
payload := Payload}} -> ok
|
2024-12-10 23:19:34 +08:00
|
|
|
after ?TIMEOUT -> ct:fail("missing retained message")
|
2023-06-07 19:20:28 +08:00
|
|
|
end,
|
|
|
|
ok = emqtt:publish(C, Topic, <<>>, [{retain, true}]),
|
|
|
|
ok = emqtt:disconnect(C).
|
|
|
|
|
2023-02-08 18:34:05 +08:00
|
|
|
%% Test that the server can handle UTF-8 encoded strings.
|
|
|
|
utf8(Config) ->
|
|
|
|
C = connect(?FUNCTION_NAME, Config),
|
|
|
|
% "The Topic Name MUST be present as the first field in the PUBLISH Packet Variable header.
|
|
|
|
% It MUST be a UTF-8 encoded string [MQTT-3.3.2-1] as defined in section 1.5.3."
|
|
|
|
Topic = <<"うさぎ"/utf8>>, %% Rabbit in Japanese
|
|
|
|
{ok, _, [1]} = emqtt:subscribe(C, Topic, qos1),
|
|
|
|
{ok, _} = emqtt:publish(C, Topic, <<"msg">>, qos1),
|
|
|
|
ok = expect_publishes(C, Topic, [<<"msg">>]),
|
|
|
|
ok = emqtt:disconnect(C).
|
|
|
|
|
Support MQTT 5.0 features No Local, RAP, Subscription IDs
Support subscription options "No Local" and "Retain As Published"
as well as Subscription Identifiers.
All three MQTT 5.0 features can be set on a per subscription basis.
Due to wildcards in topic filters, multiple subscriptions
can match a given topic. Therefore, to implement Retain As Published and
Subscription Identifiers, the destination MQTT connection process needs
to know what subscription(s) caused it to receive the message.
There are a few ways how this could be implemented:
1. The destination MQTT connection process is aware of all its
subscriptions. Whenever, it receives a message, it can match the
message's routing key / topic against all its known topic filters.
However, to iteratively match the routing key against all topic
filters for every received message can become very expensive in the
worst case when the MQTT client creates many subscriptions containing
wildcards. This could be the case for an MQTT client that acts as a
bridge or proxy or dispatcher: It could subscribe via a wildcard for
each of its own clients.
2. Instead of interatively matching the topic of the received message
against all topic filters that contain wildcards, a better approach
would be for every MQTT subscriber connection process to maintain a
local trie datastructure (similar to how topic exchanges are
implemented) and perform matching therefore more efficiently.
However, this does not sound optimal either because routing is
effectively performed twice: in the topic exchange and again against
a much smaller trie in each destination connection process.
3. Given that the topic exchange already perform routing, a much more
sensible way would be to send the matched binding key(s) to the
destination MQTT connection process. A subscription (topic filter)
maps to a binding key in AMQP 0.9.1 routing. Therefore, for the first
time in RabbitMQ, the routing function should not only output a list
of unique destination queues, but also the binding keys (subscriptions)
that caused the message to be routed to the destination queue.
This commit therefore implements the 3rd approach.
The downside of the 3rd approach is that it requires API changes to the
routing function and topic exchange.
Specifically, this commit adds a new function rabbit_exchange:route/3
that accepts a list of routing options. If that list contains version 2,
the caller of the routing function knows how to handle the return value
that could also contain binding keys.
This commits allows an MQTT connection process, the channel process, and
at-most-once dead lettering to handle binding keys. Binding keys are
included as AMQP 0.9.1 headers into the basic message.
Therefore, whenever a message is sent from an MQTT client or AMQP 0.9.1
client or AMQP 1.0 client or STOMP client, the MQTT receiver will know
the subscription identifier that caused the message to be received.
Note that due to the low number of allowed wildcard characters (# and
+), the cardinality of matched binding keys shouldn't be high even if
the topic contains for example 3 levels and the message is sent to for
example 5 million destination queues. In other words, sending multiple
distinct basic messages to the destination shouldn't hurt the delegate
optimisation too much. The delegate optimisation implemented for classic
queues and rabbit_mqtt_qos0_queue(s) still takes place for all basic
messages that contain the same set of matched binding keys.
The topic exchange returns all matched binding keys by remembering the
edges walked down to the leaves. As an optimisation, only for MQTT
queues are binding keys being returned. This does add a small dependency
from app rabbit to app rabbitmq_mqtt which is not optimal. However, this
dependency should be simple to remove when omitting this optimisation.
Another important feature of this commit is persisting subscription
options and subscription identifiers because they are part of the
MQTT 5.0 session state.
In MQTT v3 and v4, the only subscription information that were part of
the session state was the topic filter and the QoS level.
Both information were implicitly stored in the form of bindings:
The topic filter as the binding key and the QoS level as the destination
queue name of the binding.
For MQTT v5 we need to persist more subscription information.
From a domain perspective, it makes sense to store subscription options
as part of subscriptions, i.e. bindings, even though they are currently
not used in routing.
Therefore, this commits stores subscription options as binding arguments.
Storing subscription options as binding arguments comes in turn with
new challenges: How to handle mixed version clusters and upgrading an
MQTT session from v3 or v4 to v5?
Imagine an MQTT client connects via v5 with Session Expiry Interval > 0
to a new node in a mixed version cluster, creates a subscription,
disconnects, and subsequently connects via v3 to an old node. The
client should continue to receive messages.
To simplify such edge cases, this commit introduces a new feature flag
called mqtt_v5. If mqtt_v5 is disabled, clients cannot connect to
RabbitMQ via MQTT 5.0.
This still doesn't entirely solve the problem of MQTT session upgrades
(v4 to v5 client) or session downgrades (v5 to v4 client).
Ideally, once mqtt_v5 is enabled, all MQTT bindings contain non-empty binding
arguments. However, this will require a feature flag migration function
to modify all MQTT bindings. To be more precise, all MQTT bindings need
to be deleted and added because the binding argument is part of the
Mnesia table key.
Since feature flag migration functions are non-trivial to implement in
RabbitMQ (they can run on every node multiple times and concurrently),
this commit takes a simpler approach:
All v3 / v4 sessions keep the empty binding argument [].
All v5 sessions use the new binding argument [#mqtt_subscription_opts{}].
This requires only handling a session upgrade / downgrade by
creating a binding (with the new binding arg) and deleting the old
binding (with the old binding arg) when processing the CONNECT packet.
Note that such session upgrades or downgrades should be rather rare in
practice. Therefore these binding transactions shouldn't hurt peformance.
The No Local option is implemented within the MQTT publishing connection
process: The message is not sent to the MQTT destination if the
destination queue name matches the current MQTT client ID and the
message was routed due to a subscription that has the No Local flag set.
This avoids unnecessary traffic on the MQTT queue.
The alternative would have been that the "receiving side" (same process)
filters the message out - which would have been more consistent in how
Retain As Published and Subscription Identifiers are implemented, but
would have caused unnecessary load on the MQTT queue.
2023-04-19 21:32:34 +08:00
|
|
|
bind_exchange_to_exchange(Config) ->
|
|
|
|
SourceX = <<"amq.topic">>,
|
|
|
|
DestinationX = <<"destination">>,
|
|
|
|
Q = <<"q">>,
|
2025-03-15 00:20:36 +08:00
|
|
|
{Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config),
|
Support MQTT 5.0 features No Local, RAP, Subscription IDs
Support subscription options "No Local" and "Retain As Published"
as well as Subscription Identifiers.
All three MQTT 5.0 features can be set on a per subscription basis.
Due to wildcards in topic filters, multiple subscriptions
can match a given topic. Therefore, to implement Retain As Published and
Subscription Identifiers, the destination MQTT connection process needs
to know what subscription(s) caused it to receive the message.
There are a few ways how this could be implemented:
1. The destination MQTT connection process is aware of all its
subscriptions. Whenever, it receives a message, it can match the
message's routing key / topic against all its known topic filters.
However, to iteratively match the routing key against all topic
filters for every received message can become very expensive in the
worst case when the MQTT client creates many subscriptions containing
wildcards. This could be the case for an MQTT client that acts as a
bridge or proxy or dispatcher: It could subscribe via a wildcard for
each of its own clients.
2. Instead of interatively matching the topic of the received message
against all topic filters that contain wildcards, a better approach
would be for every MQTT subscriber connection process to maintain a
local trie datastructure (similar to how topic exchanges are
implemented) and perform matching therefore more efficiently.
However, this does not sound optimal either because routing is
effectively performed twice: in the topic exchange and again against
a much smaller trie in each destination connection process.
3. Given that the topic exchange already perform routing, a much more
sensible way would be to send the matched binding key(s) to the
destination MQTT connection process. A subscription (topic filter)
maps to a binding key in AMQP 0.9.1 routing. Therefore, for the first
time in RabbitMQ, the routing function should not only output a list
of unique destination queues, but also the binding keys (subscriptions)
that caused the message to be routed to the destination queue.
This commit therefore implements the 3rd approach.
The downside of the 3rd approach is that it requires API changes to the
routing function and topic exchange.
Specifically, this commit adds a new function rabbit_exchange:route/3
that accepts a list of routing options. If that list contains version 2,
the caller of the routing function knows how to handle the return value
that could also contain binding keys.
This commits allows an MQTT connection process, the channel process, and
at-most-once dead lettering to handle binding keys. Binding keys are
included as AMQP 0.9.1 headers into the basic message.
Therefore, whenever a message is sent from an MQTT client or AMQP 0.9.1
client or AMQP 1.0 client or STOMP client, the MQTT receiver will know
the subscription identifier that caused the message to be received.
Note that due to the low number of allowed wildcard characters (# and
+), the cardinality of matched binding keys shouldn't be high even if
the topic contains for example 3 levels and the message is sent to for
example 5 million destination queues. In other words, sending multiple
distinct basic messages to the destination shouldn't hurt the delegate
optimisation too much. The delegate optimisation implemented for classic
queues and rabbit_mqtt_qos0_queue(s) still takes place for all basic
messages that contain the same set of matched binding keys.
The topic exchange returns all matched binding keys by remembering the
edges walked down to the leaves. As an optimisation, only for MQTT
queues are binding keys being returned. This does add a small dependency
from app rabbit to app rabbitmq_mqtt which is not optimal. However, this
dependency should be simple to remove when omitting this optimisation.
Another important feature of this commit is persisting subscription
options and subscription identifiers because they are part of the
MQTT 5.0 session state.
In MQTT v3 and v4, the only subscription information that were part of
the session state was the topic filter and the QoS level.
Both information were implicitly stored in the form of bindings:
The topic filter as the binding key and the QoS level as the destination
queue name of the binding.
For MQTT v5 we need to persist more subscription information.
From a domain perspective, it makes sense to store subscription options
as part of subscriptions, i.e. bindings, even though they are currently
not used in routing.
Therefore, this commits stores subscription options as binding arguments.
Storing subscription options as binding arguments comes in turn with
new challenges: How to handle mixed version clusters and upgrading an
MQTT session from v3 or v4 to v5?
Imagine an MQTT client connects via v5 with Session Expiry Interval > 0
to a new node in a mixed version cluster, creates a subscription,
disconnects, and subsequently connects via v3 to an old node. The
client should continue to receive messages.
To simplify such edge cases, this commit introduces a new feature flag
called mqtt_v5. If mqtt_v5 is disabled, clients cannot connect to
RabbitMQ via MQTT 5.0.
This still doesn't entirely solve the problem of MQTT session upgrades
(v4 to v5 client) or session downgrades (v5 to v4 client).
Ideally, once mqtt_v5 is enabled, all MQTT bindings contain non-empty binding
arguments. However, this will require a feature flag migration function
to modify all MQTT bindings. To be more precise, all MQTT bindings need
to be deleted and added because the binding argument is part of the
Mnesia table key.
Since feature flag migration functions are non-trivial to implement in
RabbitMQ (they can run on every node multiple times and concurrently),
this commit takes a simpler approach:
All v3 / v4 sessions keep the empty binding argument [].
All v5 sessions use the new binding argument [#mqtt_subscription_opts{}].
This requires only handling a session upgrade / downgrade by
creating a binding (with the new binding arg) and deleting the old
binding (with the old binding arg) when processing the CONNECT packet.
Note that such session upgrades or downgrades should be rather rare in
practice. Therefore these binding transactions shouldn't hurt peformance.
The No Local option is implemented within the MQTT publishing connection
process: The message is not sent to the MQTT destination if the
destination queue name matches the current MQTT client ID and the
message was routed due to a subscription that has the No Local flag set.
This avoids unnecessary traffic on the MQTT queue.
The alternative would have been that the "receiving side" (same process)
filters the message out - which would have been more consistent in how
Retain As Published and Subscription Identifiers are implemented, but
would have caused unnecessary load on the MQTT queue.
2023-04-19 21:32:34 +08:00
|
|
|
#'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = DestinationX,
|
|
|
|
durable = true,
|
|
|
|
auto_delete = true}),
|
|
|
|
#'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = Q,
|
|
|
|
durable = true}),
|
|
|
|
#'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{exchange = DestinationX,
|
|
|
|
queue = Q,
|
|
|
|
routing_key = <<"a.b">>}),
|
|
|
|
#'exchange.bind_ok'{} = amqp_channel:call(Ch, #'exchange.bind'{destination = DestinationX,
|
|
|
|
source = SourceX,
|
|
|
|
routing_key = <<"*.b">>}),
|
|
|
|
C = connect(?FUNCTION_NAME, Config),
|
|
|
|
%% Message should be routed as follows: SourceX -> DestinationX -> Q
|
|
|
|
{ok, _} = emqtt:publish(C, <<"a/b">>, <<"msg">>, qos1),
|
|
|
|
eventually(?_assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = <<"msg">>}},
|
|
|
|
amqp_channel:call(Ch, #'basic.get'{queue = Q}))),
|
Return matched binding keys faster
For MQTT 5.0 destination queues, the topic exchange does not only have
to return the destination queue names, but also the matched binding
keys.
This is needed to implement MQTT 5.0 subscription options No Local,
Retain As Published and Subscription Identifiers.
Prior to this commit, as the trie was walked down, we remembered the
edges being walked and assembled the final binding key with
list_to_binary/1.
list_to_binary/1 is very expensive with long lists (long topic names),
even in OTP 26.
The CPU flame graph showed ~3% of CPU usage was spent only in
list_to_binary/1.
Unfortunately and unnecessarily, the current topic exchange
implementation stores topic levels as lists.
It would be better to store topic levels as binaries:
split_topic_key/1 should ideally use binary:split/3 similar as follows:
```
1> P = binary:compile_pattern(<<".">>).
{bm,#Ref<0.1273071188.1488322568.63736>}
2> Bin = <<"aaa.bbb..ccc">>.
<<"aaa.bbb..ccc">>
3> binary:split(Bin, P, [global]).
[<<"aaa">>,<<"bbb">>,<<>>,<<"ccc">>]
```
The compiled pattern could be placed into persistent term.
This commit decided to avoid migrating Mnesia tables to use binaries
instead of lists. Mnesia migrations are non-trivial, especially with the
current feature flag subsystem.
Furthermore the Mnesia topic tables are already getting migrated to
their Khepri counterparts in 3.13.
Adding additional migration only for Mnesia does not make sense.
So, instead of assembling the binding key as we walk down the trie and
then calling list_to_binary/1 in the leaf, it
would be better to just fetch the binding key from the database in the leaf.
As we reach the leaf of the trie, we know both source and destination.
Unfortunately, we cannot fetch the binding key efficiently with the
current rabbit_route (sorted by source exchange) and
rabbit_reverse_route (sorted by destination) tables as the key is in
the middle between source and destination.
If there are a huge number of bindings for a given sourc exchange (very
realistic in MQTT use cases) or a large number of bindings for a given
destination (also realistic), it would require scanning these large
number of bindings.
Therefore this commit takes the simplest possible solution:
The solution leverages the fact that binding arguments are already part of
table rabbit_topic_trie_binding.
So, if we simply include the binding key into the binding arguments, we
can fetch and return it efficiently in the topic exchange
implementation.
The following patch omitting fetching the empty list binding argument
(the default) makes routing slower because function
`analyze_pattern.constprop.0` requires significantly more (~2.5%) CPU time
```
@@ -273,7 +273,11 @@ trie_bindings(X, Node) ->
node_id = Node,
destination = '$1',
arguments = '$2'}},
- mnesia:select(?MNESIA_BINDING_TABLE, [{MatchHead, [], [{{'$1', '$2'}}]}]).
+ mnesia:select(
+ ?MNESIA_BINDING_TABLE,
+ [{MatchHead, [{'andalso', {'is_list', '$2'}, {'=/=', '$2', []}}], [{{'$1', '$2'}}]},
+ {MatchHead, [], ['$1']}
+ ]).
```
Hence, this commit always fetches the binding arguments.
All MQTT 5.0 destination queues will create a binding that
contains the binding key in the binding arguments.
Not only does this solution avoid expensive list_to_binay/1 calls, but
it also means that Erlang app rabbit (specifically the topic exchange
implementation) does not need to be aware of MQTT anymore:
It just returns the binding key when the binding args tell to do so.
In future, once the Khepri migration completed, we should be able to
relatively simply remove the binding key from the binding arguments
again to free up some storage space.
Note that one of the advantages of a trie data structue is its space
efficiency that you don't have to store the same prefixes multiple
times.
However, for RabbitMQ the binding key is already stored at least N times
in various routing tables, so storing it a few times more via the
binding arguments should be acceptable.
The speed improvements are favoured over a few more MBs ETS usage.
2023-06-09 18:52:34 +08:00
|
|
|
#'queue.delete_ok'{message_count = 0} = amqp_channel:call(Ch, #'queue.delete'{queue = Q}),
|
2025-03-15 00:20:36 +08:00
|
|
|
ok = emqtt:disconnect(C),
|
|
|
|
ok = rabbit_ct_client_helpers:close_connection_and_channel(Conn, Ch).
|
Return matched binding keys faster
For MQTT 5.0 destination queues, the topic exchange does not only have
to return the destination queue names, but also the matched binding
keys.
This is needed to implement MQTT 5.0 subscription options No Local,
Retain As Published and Subscription Identifiers.
Prior to this commit, as the trie was walked down, we remembered the
edges being walked and assembled the final binding key with
list_to_binary/1.
list_to_binary/1 is very expensive with long lists (long topic names),
even in OTP 26.
The CPU flame graph showed ~3% of CPU usage was spent only in
list_to_binary/1.
Unfortunately and unnecessarily, the current topic exchange
implementation stores topic levels as lists.
It would be better to store topic levels as binaries:
split_topic_key/1 should ideally use binary:split/3 similar as follows:
```
1> P = binary:compile_pattern(<<".">>).
{bm,#Ref<0.1273071188.1488322568.63736>}
2> Bin = <<"aaa.bbb..ccc">>.
<<"aaa.bbb..ccc">>
3> binary:split(Bin, P, [global]).
[<<"aaa">>,<<"bbb">>,<<>>,<<"ccc">>]
```
The compiled pattern could be placed into persistent term.
This commit decided to avoid migrating Mnesia tables to use binaries
instead of lists. Mnesia migrations are non-trivial, especially with the
current feature flag subsystem.
Furthermore the Mnesia topic tables are already getting migrated to
their Khepri counterparts in 3.13.
Adding additional migration only for Mnesia does not make sense.
So, instead of assembling the binding key as we walk down the trie and
then calling list_to_binary/1 in the leaf, it
would be better to just fetch the binding key from the database in the leaf.
As we reach the leaf of the trie, we know both source and destination.
Unfortunately, we cannot fetch the binding key efficiently with the
current rabbit_route (sorted by source exchange) and
rabbit_reverse_route (sorted by destination) tables as the key is in
the middle between source and destination.
If there are a huge number of bindings for a given sourc exchange (very
realistic in MQTT use cases) or a large number of bindings for a given
destination (also realistic), it would require scanning these large
number of bindings.
Therefore this commit takes the simplest possible solution:
The solution leverages the fact that binding arguments are already part of
table rabbit_topic_trie_binding.
So, if we simply include the binding key into the binding arguments, we
can fetch and return it efficiently in the topic exchange
implementation.
The following patch omitting fetching the empty list binding argument
(the default) makes routing slower because function
`analyze_pattern.constprop.0` requires significantly more (~2.5%) CPU time
```
@@ -273,7 +273,11 @@ trie_bindings(X, Node) ->
node_id = Node,
destination = '$1',
arguments = '$2'}},
- mnesia:select(?MNESIA_BINDING_TABLE, [{MatchHead, [], [{{'$1', '$2'}}]}]).
+ mnesia:select(
+ ?MNESIA_BINDING_TABLE,
+ [{MatchHead, [{'andalso', {'is_list', '$2'}, {'=/=', '$2', []}}], [{{'$1', '$2'}}]},
+ {MatchHead, [], ['$1']}
+ ]).
```
Hence, this commit always fetches the binding arguments.
All MQTT 5.0 destination queues will create a binding that
contains the binding key in the binding arguments.
Not only does this solution avoid expensive list_to_binay/1 calls, but
it also means that Erlang app rabbit (specifically the topic exchange
implementation) does not need to be aware of MQTT anymore:
It just returns the binding key when the binding args tell to do so.
In future, once the Khepri migration completed, we should be able to
relatively simply remove the binding key from the binding arguments
again to free up some storage space.
Note that one of the advantages of a trie data structue is its space
efficiency that you don't have to store the same prefixes multiple
times.
However, for RabbitMQ the binding key is already stored at least N times
in various routing tables, so storing it a few times more via the
binding arguments should be acceptable.
The speed improvements are favoured over a few more MBs ETS usage.
2023-06-09 18:52:34 +08:00
|
|
|
|
|
|
|
bind_exchange_to_exchange_single_message(Config) ->
|
|
|
|
SourceX = <<"amq.topic">>,
|
|
|
|
DestinationX = <<"destination">>,
|
|
|
|
Q = <<"q">>,
|
2025-03-15 00:20:36 +08:00
|
|
|
{Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config),
|
Return matched binding keys faster
For MQTT 5.0 destination queues, the topic exchange does not only have
to return the destination queue names, but also the matched binding
keys.
This is needed to implement MQTT 5.0 subscription options No Local,
Retain As Published and Subscription Identifiers.
Prior to this commit, as the trie was walked down, we remembered the
edges being walked and assembled the final binding key with
list_to_binary/1.
list_to_binary/1 is very expensive with long lists (long topic names),
even in OTP 26.
The CPU flame graph showed ~3% of CPU usage was spent only in
list_to_binary/1.
Unfortunately and unnecessarily, the current topic exchange
implementation stores topic levels as lists.
It would be better to store topic levels as binaries:
split_topic_key/1 should ideally use binary:split/3 similar as follows:
```
1> P = binary:compile_pattern(<<".">>).
{bm,#Ref<0.1273071188.1488322568.63736>}
2> Bin = <<"aaa.bbb..ccc">>.
<<"aaa.bbb..ccc">>
3> binary:split(Bin, P, [global]).
[<<"aaa">>,<<"bbb">>,<<>>,<<"ccc">>]
```
The compiled pattern could be placed into persistent term.
This commit decided to avoid migrating Mnesia tables to use binaries
instead of lists. Mnesia migrations are non-trivial, especially with the
current feature flag subsystem.
Furthermore the Mnesia topic tables are already getting migrated to
their Khepri counterparts in 3.13.
Adding additional migration only for Mnesia does not make sense.
So, instead of assembling the binding key as we walk down the trie and
then calling list_to_binary/1 in the leaf, it
would be better to just fetch the binding key from the database in the leaf.
As we reach the leaf of the trie, we know both source and destination.
Unfortunately, we cannot fetch the binding key efficiently with the
current rabbit_route (sorted by source exchange) and
rabbit_reverse_route (sorted by destination) tables as the key is in
the middle between source and destination.
If there are a huge number of bindings for a given sourc exchange (very
realistic in MQTT use cases) or a large number of bindings for a given
destination (also realistic), it would require scanning these large
number of bindings.
Therefore this commit takes the simplest possible solution:
The solution leverages the fact that binding arguments are already part of
table rabbit_topic_trie_binding.
So, if we simply include the binding key into the binding arguments, we
can fetch and return it efficiently in the topic exchange
implementation.
The following patch omitting fetching the empty list binding argument
(the default) makes routing slower because function
`analyze_pattern.constprop.0` requires significantly more (~2.5%) CPU time
```
@@ -273,7 +273,11 @@ trie_bindings(X, Node) ->
node_id = Node,
destination = '$1',
arguments = '$2'}},
- mnesia:select(?MNESIA_BINDING_TABLE, [{MatchHead, [], [{{'$1', '$2'}}]}]).
+ mnesia:select(
+ ?MNESIA_BINDING_TABLE,
+ [{MatchHead, [{'andalso', {'is_list', '$2'}, {'=/=', '$2', []}}], [{{'$1', '$2'}}]},
+ {MatchHead, [], ['$1']}
+ ]).
```
Hence, this commit always fetches the binding arguments.
All MQTT 5.0 destination queues will create a binding that
contains the binding key in the binding arguments.
Not only does this solution avoid expensive list_to_binay/1 calls, but
it also means that Erlang app rabbit (specifically the topic exchange
implementation) does not need to be aware of MQTT anymore:
It just returns the binding key when the binding args tell to do so.
In future, once the Khepri migration completed, we should be able to
relatively simply remove the binding key from the binding arguments
again to free up some storage space.
Note that one of the advantages of a trie data structue is its space
efficiency that you don't have to store the same prefixes multiple
times.
However, for RabbitMQ the binding key is already stored at least N times
in various routing tables, so storing it a few times more via the
binding arguments should be acceptable.
The speed improvements are favoured over a few more MBs ETS usage.
2023-06-09 18:52:34 +08:00
|
|
|
#'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = DestinationX,
|
|
|
|
durable = true,
|
|
|
|
auto_delete = true}),
|
|
|
|
#'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = Q,
|
|
|
|
durable = true}),
|
|
|
|
#'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{queue = Q,
|
|
|
|
exchange = DestinationX,
|
|
|
|
routing_key = <<"a.b">>}),
|
|
|
|
#'exchange.bind_ok'{} = amqp_channel:call(Ch, #'exchange.bind'{destination = DestinationX,
|
|
|
|
source = SourceX,
|
|
|
|
routing_key = <<"*.b">>}),
|
|
|
|
#'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{queue = Q,
|
|
|
|
exchange = SourceX,
|
|
|
|
routing_key = <<"a.b">>}),
|
|
|
|
C = connect(?FUNCTION_NAME, Config),
|
|
|
|
%% Message should be routed as follows:
|
|
|
|
%% SourceX -> DestinationX -> Q and
|
|
|
|
%% SourceX -> Q
|
|
|
|
{ok, _} = emqtt:publish(C, <<"a/b">>, <<"msg">>, qos1),
|
|
|
|
%% However, since we publish only one time a single message and have a single destination queue,
|
|
|
|
%% we expect only one copy of the message to end up in the destination queue.
|
|
|
|
eventually(?_assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = <<"msg">>}},
|
|
|
|
amqp_channel:call(Ch, #'basic.get'{queue = Q}))),
|
|
|
|
timer:sleep(10),
|
|
|
|
?assertEqual(#'queue.delete_ok'{message_count = 0},
|
|
|
|
amqp_channel:call(Ch, #'queue.delete'{queue = Q})),
|
2025-03-15 00:20:36 +08:00
|
|
|
ok = emqtt:disconnect(C),
|
|
|
|
ok = rabbit_ct_client_helpers:close_connection_and_channel(Conn, Ch).
|
Support MQTT 5.0 features No Local, RAP, Subscription IDs
Support subscription options "No Local" and "Retain As Published"
as well as Subscription Identifiers.
All three MQTT 5.0 features can be set on a per subscription basis.
Due to wildcards in topic filters, multiple subscriptions
can match a given topic. Therefore, to implement Retain As Published and
Subscription Identifiers, the destination MQTT connection process needs
to know what subscription(s) caused it to receive the message.
There are a few ways how this could be implemented:
1. The destination MQTT connection process is aware of all its
subscriptions. Whenever, it receives a message, it can match the
message's routing key / topic against all its known topic filters.
However, to iteratively match the routing key against all topic
filters for every received message can become very expensive in the
worst case when the MQTT client creates many subscriptions containing
wildcards. This could be the case for an MQTT client that acts as a
bridge or proxy or dispatcher: It could subscribe via a wildcard for
each of its own clients.
2. Instead of interatively matching the topic of the received message
against all topic filters that contain wildcards, a better approach
would be for every MQTT subscriber connection process to maintain a
local trie datastructure (similar to how topic exchanges are
implemented) and perform matching therefore more efficiently.
However, this does not sound optimal either because routing is
effectively performed twice: in the topic exchange and again against
a much smaller trie in each destination connection process.
3. Given that the topic exchange already perform routing, a much more
sensible way would be to send the matched binding key(s) to the
destination MQTT connection process. A subscription (topic filter)
maps to a binding key in AMQP 0.9.1 routing. Therefore, for the first
time in RabbitMQ, the routing function should not only output a list
of unique destination queues, but also the binding keys (subscriptions)
that caused the message to be routed to the destination queue.
This commit therefore implements the 3rd approach.
The downside of the 3rd approach is that it requires API changes to the
routing function and topic exchange.
Specifically, this commit adds a new function rabbit_exchange:route/3
that accepts a list of routing options. If that list contains version 2,
the caller of the routing function knows how to handle the return value
that could also contain binding keys.
This commits allows an MQTT connection process, the channel process, and
at-most-once dead lettering to handle binding keys. Binding keys are
included as AMQP 0.9.1 headers into the basic message.
Therefore, whenever a message is sent from an MQTT client or AMQP 0.9.1
client or AMQP 1.0 client or STOMP client, the MQTT receiver will know
the subscription identifier that caused the message to be received.
Note that due to the low number of allowed wildcard characters (# and
+), the cardinality of matched binding keys shouldn't be high even if
the topic contains for example 3 levels and the message is sent to for
example 5 million destination queues. In other words, sending multiple
distinct basic messages to the destination shouldn't hurt the delegate
optimisation too much. The delegate optimisation implemented for classic
queues and rabbit_mqtt_qos0_queue(s) still takes place for all basic
messages that contain the same set of matched binding keys.
The topic exchange returns all matched binding keys by remembering the
edges walked down to the leaves. As an optimisation, only for MQTT
queues are binding keys being returned. This does add a small dependency
from app rabbit to app rabbitmq_mqtt which is not optimal. However, this
dependency should be simple to remove when omitting this optimisation.
Another important feature of this commit is persisting subscription
options and subscription identifiers because they are part of the
MQTT 5.0 session state.
In MQTT v3 and v4, the only subscription information that were part of
the session state was the topic filter and the QoS level.
Both information were implicitly stored in the form of bindings:
The topic filter as the binding key and the QoS level as the destination
queue name of the binding.
For MQTT v5 we need to persist more subscription information.
From a domain perspective, it makes sense to store subscription options
as part of subscriptions, i.e. bindings, even though they are currently
not used in routing.
Therefore, this commits stores subscription options as binding arguments.
Storing subscription options as binding arguments comes in turn with
new challenges: How to handle mixed version clusters and upgrading an
MQTT session from v3 or v4 to v5?
Imagine an MQTT client connects via v5 with Session Expiry Interval > 0
to a new node in a mixed version cluster, creates a subscription,
disconnects, and subsequently connects via v3 to an old node. The
client should continue to receive messages.
To simplify such edge cases, this commit introduces a new feature flag
called mqtt_v5. If mqtt_v5 is disabled, clients cannot connect to
RabbitMQ via MQTT 5.0.
This still doesn't entirely solve the problem of MQTT session upgrades
(v4 to v5 client) or session downgrades (v5 to v4 client).
Ideally, once mqtt_v5 is enabled, all MQTT bindings contain non-empty binding
arguments. However, this will require a feature flag migration function
to modify all MQTT bindings. To be more precise, all MQTT bindings need
to be deleted and added because the binding argument is part of the
Mnesia table key.
Since feature flag migration functions are non-trivial to implement in
RabbitMQ (they can run on every node multiple times and concurrently),
this commit takes a simpler approach:
All v3 / v4 sessions keep the empty binding argument [].
All v5 sessions use the new binding argument [#mqtt_subscription_opts{}].
This requires only handling a session upgrade / downgrade by
creating a binding (with the new binding arg) and deleting the old
binding (with the old binding arg) when processing the CONNECT packet.
Note that such session upgrades or downgrades should be rather rare in
practice. Therefore these binding transactions shouldn't hurt peformance.
The No Local option is implemented within the MQTT publishing connection
process: The message is not sent to the MQTT destination if the
destination queue name matches the current MQTT client ID and the
message was routed due to a subscription that has the No Local flag set.
This avoids unnecessary traffic on the MQTT queue.
The alternative would have been that the "receiving side" (same process)
filters the message out - which would have been more consistent in how
Retain As Published and Subscription Identifiers are implemented, but
would have caused unnecessary load on the MQTT queue.
2023-04-19 21:32:34 +08:00
|
|
|
|
2025-06-01 22:43:39 +08:00
|
|
|
notify_consumer_qos0_queue_deleted(Config) ->
|
|
|
|
Topic = atom_to_binary(?FUNCTION_NAME),
|
|
|
|
notify_consumer_queue_deleted(Config, Topic, <<"MQTT QoS 0">>, [{retry_interval, 1}], qos0).
|
|
|
|
|
|
|
|
notify_consumer_classic_queue_deleted(Config) ->
|
|
|
|
Topic = atom_to_binary(?FUNCTION_NAME),
|
|
|
|
notify_consumer_queue_deleted(Config, Topic, <<"classic">>, non_clean_sess_opts(), qos0).
|
|
|
|
|
|
|
|
notify_consumer_quorum_queue_deleted(Config) ->
|
|
|
|
set_durable_queue_type(Config),
|
|
|
|
Topic = atom_to_binary(?FUNCTION_NAME),
|
|
|
|
notify_consumer_queue_deleted(Config, Topic, <<"quorum">>, non_clean_sess_opts(), qos1),
|
|
|
|
unset_durable_queue_type(Config).
|
|
|
|
|
|
|
|
notify_consumer_queue_deleted(Config, Name = Topic, ExpectedType, ConnOpts, Qos) ->
|
|
|
|
C = connect(Name, Config, ConnOpts),
|
|
|
|
{ok, _, _} = emqtt:subscribe(C, Topic, Qos),
|
|
|
|
{ok, #{reason_code_name := success}} = emqtt:publish(C, Name, <<"m1">>, qos1),
|
|
|
|
{ok, #{reason_code_name := success}} = emqtt:publish(C, Name, <<"m2">>, qos1),
|
|
|
|
ok = expect_publishes(C, Topic, [<<"m1">>, <<"m2">>]),
|
|
|
|
|
|
|
|
[[QName, Type]] = rabbitmqctl_list(Config, 0, ["list_queues", "name", "type", "--no-table-headers"]),
|
|
|
|
?assertMatch(ExpectedType, Type),
|
|
|
|
|
|
|
|
process_flag(trap_exit, true),
|
|
|
|
{ok, _} = rabbitmqctl(Config, 0, ["delete_queue", QName]),
|
|
|
|
|
|
|
|
await_exit(C).
|
|
|
|
|
2022-11-17 18:22:06 +08:00
|
|
|
%% -------------------------------------------------------------------
|
|
|
|
%% Internal helpers
|
|
|
|
%% -------------------------------------------------------------------
|
|
|
|
|
2022-11-18 04:13:27 +08:00
|
|
|
await_confirms_ordered(_, To, To) ->
|
2022-11-12 01:56:32 +08:00
|
|
|
ok;
|
2022-11-18 04:13:27 +08:00
|
|
|
await_confirms_ordered(From, N, To) ->
|
2022-11-12 01:56:32 +08:00
|
|
|
Expected = {From, N},
|
|
|
|
receive
|
|
|
|
Expected ->
|
2022-11-18 04:13:27 +08:00
|
|
|
await_confirms_ordered(From, N + 1, To);
|
2022-11-12 01:56:32 +08:00
|
|
|
Got ->
|
|
|
|
ct:fail("Received unexpected message. Expected: ~p Got: ~p", [Expected, Got])
|
2024-12-10 23:19:34 +08:00
|
|
|
after ?TIMEOUT ->
|
2022-11-12 01:56:32 +08:00
|
|
|
ct:fail("Did not receive expected message: ~p", [Expected])
|
|
|
|
end.
|
|
|
|
|
2022-11-18 04:13:27 +08:00
|
|
|
await_confirms_unordered(_, 0) ->
|
|
|
|
ok;
|
|
|
|
await_confirms_unordered(From, Left) ->
|
|
|
|
receive
|
|
|
|
{From, _N} ->
|
|
|
|
await_confirms_unordered(From, Left - 1);
|
|
|
|
Other ->
|
|
|
|
ct:fail("Received unexpected message: ~p", [Other])
|
2024-12-10 23:19:34 +08:00
|
|
|
after ?TIMEOUT ->
|
2022-11-18 04:13:27 +08:00
|
|
|
ct:fail("~b confirms are missing", [Left])
|
|
|
|
end.
|
|
|
|
|
2024-11-08 17:34:49 +08:00
|
|
|
await_consumer_count(ConsumerCount, ClientId, QoS, Config) ->
|
2025-06-01 22:43:39 +08:00
|
|
|
{_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config),
|
2024-11-08 17:34:49 +08:00
|
|
|
QueueName = rabbit_mqtt_util:queue_name_bin(
|
|
|
|
rabbit_data_coercion:to_binary(ClientId), QoS),
|
|
|
|
eventually(
|
|
|
|
?_assertMatch(
|
|
|
|
#'queue.declare_ok'{consumer_count = ConsumerCount},
|
|
|
|
amqp_channel:call(Ch, #'queue.declare'{queue = QueueName,
|
|
|
|
passive = true})), 500, 10),
|
|
|
|
ok = rabbit_ct_client_helpers:close_channel(Ch).
|
|
|
|
|
2022-10-29 00:28:47 +08:00
|
|
|
declare_queue(Ch, QueueName, Args)
|
|
|
|
when is_pid(Ch), is_binary(QueueName), is_list(Args) ->
|
|
|
|
#'queue.declare_ok'{} = amqp_channel:call(
|
|
|
|
Ch, #'queue.declare'{
|
|
|
|
queue = QueueName,
|
|
|
|
durable = true,
|
|
|
|
arguments = Args}).
|
2023-01-28 02:25:57 +08:00
|
|
|
|
2022-10-29 00:28:47 +08:00
|
|
|
delete_queue(Ch, QueueNames)
|
|
|
|
when is_pid(Ch), is_list(QueueNames) ->
|
|
|
|
lists:foreach(
|
|
|
|
fun(Q) ->
|
|
|
|
delete_queue(Ch, Q)
|
|
|
|
end, QueueNames);
|
|
|
|
delete_queue(Ch, QueueName)
|
|
|
|
when is_pid(Ch), is_binary(QueueName) ->
|
|
|
|
#'queue.delete_ok'{} = amqp_channel:call(
|
|
|
|
Ch, #'queue.delete'{
|
|
|
|
queue = QueueName}).
|
2023-01-28 02:25:57 +08:00
|
|
|
|
2022-10-29 00:28:47 +08:00
|
|
|
bind(Ch, QueueName, Topic)
|
|
|
|
when is_pid(Ch), is_binary(QueueName), is_binary(Topic) ->
|
|
|
|
#'queue.bind_ok'{} = amqp_channel:call(
|
|
|
|
Ch, #'queue.bind'{queue = QueueName,
|
|
|
|
exchange = <<"amq.topic">>,
|
|
|
|
routing_key = Topic}).
|
2023-05-02 22:08:15 +08:00
|
|
|
|
|
|
|
assert_v5_disconnect_reason_code(Config, ReasonCode) ->
|
|
|
|
case ?config(mqtt_version, Config) of
|
2023-06-06 01:23:46 +08:00
|
|
|
v3 -> ok;
|
|
|
|
v4 -> ok;
|
|
|
|
v5 -> receive {disconnected, ReasonCode, _Props} -> ok
|
2024-12-10 23:19:34 +08:00
|
|
|
after ?TIMEOUT -> ct:fail("missing DISCONNECT packet from server")
|
2023-06-06 01:23:46 +08:00
|
|
|
end
|
2023-05-21 18:32:14 +08:00
|
|
|
end.
|
2025-06-01 22:43:39 +08:00
|
|
|
|
|
|
|
set_durable_queue_type(Config) ->
|
|
|
|
ok = rpc(Config, application, set_env, [rabbitmq_mqtt, durable_queue_type, quorum]).
|
|
|
|
|
|
|
|
unset_durable_queue_type(Config) ->
|
|
|
|
ok = rpc(Config, application, unset_env, [rabbitmq_mqtt, durable_queue_type]).
|