2020-07-10 21:31:17 +08:00
|
|
|
%% This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
|
%% License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
|
|
|
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
2009-10-07 20:53:03 +08:00
|
|
|
%%
|
2024-01-02 11:02:20 +08:00
|
|
|
%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
|
2009-10-07 20:53:03 +08:00
|
|
|
%%
|
|
|
|
|
|
|
|
|
|
-module(rabbit_variable_queue).
|
|
|
|
|
|
2014-09-09 20:36:06 +08:00
|
|
|
-export([init/3, terminate/2, delete_and_terminate/2, delete_crashed/1,
|
|
|
|
|
purge/1, purge_acks/1,
|
2023-10-04 18:11:54 +08:00
|
|
|
publish/5, publish_delivered/4,
|
|
|
|
|
discard/3, drain_confirmed/1,
|
2014-03-04 00:29:55 +08:00
|
|
|
dropwhile/2, fetchwhile/4, fetch/2, drop/2, ack/2, requeue/2,
|
|
|
|
|
ackfold/4, fold/3, len/1, is_empty/1, depth/1,
|
2024-06-20 21:17:25 +08:00
|
|
|
update_rates/1, needs_timeout/1, timeout/1,
|
2014-03-04 00:29:55 +08:00
|
|
|
handle_pre_hibernate/1, resume/1, msg_rates/1,
|
2015-10-10 23:40:23 +08:00
|
|
|
info/2, invoke/3, is_duplicate/2, set_queue_mode/2,
|
2022-08-02 22:59:26 +08:00
|
|
|
set_queue_version/2, zip_msgs_and_acks/4]).
|
2010-04-08 21:44:15 +08:00
|
|
|
|
2017-03-17 21:03:03 +08:00
|
|
|
-export([start/2, stop/1]).
|
2016-12-22 19:55:33 +08:00
|
|
|
|
2024-03-04 17:08:22 +08:00
|
|
|
%% This function is used by rabbit_classic_queue_index_v2
|
|
|
|
|
%% to convert v1 queues to v2 after an upgrade to 4.0.
|
2021-11-24 23:46:40 +08:00
|
|
|
-export([convert_from_v1_to_v2_loop/8]).
|
2021-11-18 19:13:28 +08:00
|
|
|
|
2010-07-19 22:26:13 +08:00
|
|
|
%% exported for testing only
|
2023-02-03 20:56:02 +08:00
|
|
|
-export([start_msg_store/3, stop_msg_store/1, init/5]).
|
2010-07-19 22:26:13 +08:00
|
|
|
|
Message Containers (#5077)
This PR implements an approach for a "protocol (data format) agnostic core" where the format of the message isn't converted at point of reception.
Currently all non AMQP 0.9.1 originating messages are converted into a AMQP 0.9.1 flavoured basic_message record before sent to a queue. If the messages are then consumed by the originating protocol they are converted back from AMQP 0.9.1. For some protocols such as MQTT 3.1 this isn't too expensive as MQTT is mostly a fairly easily mapped subset of AMQP 0.9.1 but for others such as AMQP 1.0 the conversions are awkward and in some cases lossy even if consuming from the originating protocol.
This PR instead wraps all incoming messages in their originating form into a generic, extensible message container type (mc). The container module exposes an API to get common message details such as size and various properties (ttl, priority etc) directly from the source data type. Each protocol needs to implement the mc behaviour such that when a message originating form one protocol is consumed by another protocol we convert it to the target protocol at that point.
The message container also contains annotations, dead letter records and other meta data we need to record during the lifetime of a message. The original protocol message is never modified unless it is consumed.
This includes conversion modules to and from amqp, amqpl (AMQP 0.9.1) and mqtt.
COMMIT HISTORY:
* Refactor away from using the delivery{} record
In many places including exchange types. This should make it
easier to move towards using a message container type instead of
basic_message.
Add mc module and move direct replies outside of exchange
Lots of changes incl classic queues
Implement stream support incl amqp conversions
simplify mc state record
move mc.erl
mc dlx stuff
recent history exchange
Make tracking work
But doesn't take a protocol agnostic approach as we just convert
everything into AMQP legacy and back. Might be good enough for now.
Tracing as a whole may want a bit of a re-vamp at some point.
tidy
make quorum queue peek work by legacy conversion
dead lettering fixes
dead lettering fixes
CMQ fixes
rabbit_trace type fixes
fixes
fix
Fix classic queue props
test assertion fix
feature flag and backwards compat
Enable message_container feature flag in some SUITEs
Dialyzer fixes
fixes
fix
test fixes
Various
Manually update a gazelle generated file
until a gazelle enhancement can be made
https://github.com/rabbitmq/rules_erlang/issues/185
Add message_containers_SUITE to bazel
and regen bazel files with gazelle from rules_erlang@main
Simplify essential proprty access
Such as durable, ttl and priority by extracting them into annotations
at message container init time.
Move type
to remove dependenc on amqp10 stuff in mc.erl
mostly because I don't know how to make bazel do the right thing
add more stuff
Refine routing header stuff
wip
Cosmetics
Do not use "maybe" as type name as "maybe" is a keyword since OTP 25
which makes Erlang LS complain.
* Dedup death queue names
* Fix function clause crashes
Fix failing tests in the MQTT shared_SUITE:
A classic queue message ID can be undefined as set in
https://github.com/rabbitmq/rabbitmq-server/blob/fbe79ff47b4edbc0fd95457e623d6593161ad198/deps/rabbit/src/rabbit_classic_queue_index_v2.erl#L1048
Fix failing tests in the MQTT shared_SUITE-mixed:
When feature flag message_containers is disabled, the
message is not an #mc{} record, but a #basic_message{} record.
* Fix is_utf8_no_null crash
Prior to this commit, the function crashed if invalid UTF-8 was
provided, e.g.:
```
1> rabbit_misc:is_valid_shortstr(<<"😇"/utf16>>).
** exception error: no function clause matching rabbit_misc:is_utf8_no_null(<<216,61,222,7>>) (rabbit_misc.erl, line 1481)
```
* Implement mqtt mc behaviour
For now via amqp translation.
This is still work in progress, but the following SUITEs pass:
```
make -C deps/rabbitmq_mqtt ct-shared t=[mqtt,v5,cluster_size_1] FULL=1
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_1] FULL=1
```
* Shorten mc file names
Module name length matters because for each persistent message the #mc{}
record is persisted to disk.
```
1> iolist_size(term_to_iovec({mc, rabbit_mc_amqp_legacy})).
30
2> iolist_size(term_to_iovec({mc, mc_amqpl})).
17
```
This commit renames the mc modules:
```
ag -l rabbit_mc_amqp_legacy | xargs sed -i 's/rabbit_mc_amqp_legacy/mc_amqpl/g'
ag -l rabbit_mc_amqp | xargs sed -i 's/rabbit_mc_amqp/mc_amqp/g'
ag -l rabbit_mqtt_mc | xargs sed -i 's/rabbit_mqtt_mc/mc_mqtt/g'
```
* mc: make deaths an annotation + fixes
* Fix mc_mqtt protocol_state callback
* Fix test will_delay_node_restart
```
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_3]:will_delay_node_restart FULL=1
```
* Bazel run gazelle
* mix format rabbitmqctl.ex
* Ensure ttl annotation is refelected in amqp legacy protocol state
* Fix id access in message store
* Fix rabbit_message_interceptor_SUITE
* dializer fixes
* Fix rabbit:rabbit_message_interceptor_SUITE-mixed
set_annotation/3 should not result in duplicate keys
* Fix MQTT shared_SUITE-mixed
Up to 3.12 non-MQTT publishes were always QoS 1 regardless of delivery_mode.
https://github.com/rabbitmq/rabbitmq-server/blob/75a953ce286a10aca910c098805a4f545989af38/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl#L2075-L2076
From now on, non-MQTT publishes are QoS 1 if durable.
This makes more sense.
The MQTT plugin must send a #basic_message{} to an old node that does
not understand message containers.
* Field content of 'v1_0.data' can be binary
Fix
```
bazel test //deps/rabbitmq_mqtt:shared_SUITE-mixed \
--test_env FOCUS="-group [mqtt,v4,cluster_size_1] -case trace" \
-t- --test_sharding_strategy=disabled
```
* Remove route/2 and implement route/3 for all exchange types.
This removes the route/2 callback from rabbit_exchange_type and
makes route/3 mandatory instead. This is a breaking change and
will require all implementations of exchange types to update their
code, however this is necessary anyway for them to correctly handle
the mc type.
stream filtering fixes
* Translate directly from MQTT to AMQP 0.9.1
* handle undecoded properties in mc_compat
amqpl: put clause in right order
recover death deatails from amqp data
* Replace callback init_amqp with convert_from
* Fix return value of lists:keyfind/3
* Translate directly from AMQP 0.9.1 to MQTT
* Fix MQTT payload size
MQTT payload can be a list when converted from AMQP 0.9.1 for example
First conversions tests
Plus some other conversion related fixes.
bazel
bazel
translate amqp 1.0 null to undefined
mc: property/2 and correlation_id/message_id return type tagged values.
To ensure we can support a variety of types better.
The type type tags are AMQP 1.0 flavoured.
fix death recovery
mc_mqtt: impl new api
Add callbacks to allow protocols to compact data before storage
And make readable if needing to query things repeatedly.
bazel fix
* more decoding
* tracking mixed versions compat
* mc: flip default of `durable` annotation to save some data.
Assuming most messages are durable and that in memory messages suffer less
from persistence overhead it makes sense for a non existent `durable`
annotation to mean durable=true.
* mc conversion tests and tidy up
* mc make x_header unstrict again
* amqpl: death record fixes
* bazel
* amqp -> amqpl conversion test
* Fix crash in mc_amqp:size/1
Body can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0/ ct-system t=java
```
on branch native-amqp.
* Fix crash in lists:flatten/1
Data can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp.
* Fix crash in rabbit_writer
Running test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp resulted in the following crash:
```
crasher:
initial call: rabbit_writer:enter_mainloop/2
pid: <0.711.0>
registered_name: []
exception error: bad argument
in function size/1
called as size([<<0>>,<<"Sw">>,[<<160,2>>,<<"hi">>]])
*** argument 1: not tuple or binary
in call from rabbit_binary_generator:build_content_frames/7 (rabbit_binary_generator.erl, line 89)
in call from rabbit_binary_generator:build_simple_content_frames/4 (rabbit_binary_generator.erl, line 61)
in call from rabbit_writer:assemble_frames/5 (rabbit_writer.erl, line 334)
in call from rabbit_writer:internal_send_command_async/3 (rabbit_writer.erl, line 365)
in call from rabbit_writer:handle_message/2 (rabbit_writer.erl, line 265)
in call from rabbit_writer:handle_message/3 (rabbit_writer.erl, line 232)
in call from rabbit_writer:mainloop1/2 (rabbit_writer.erl, line 223)
```
because #content.payload_fragments_rev is currently supposed to
be a flat list of binaries instead of being an iolist.
This commit fixes this crash inefficiently by calling
iolist_to_binary/1. A better solution would be to allow AMQP legacy's #content.payload_fragments_rev
to be an iolist.
* Add accidentally deleted line back
* mc: optimise mc_amqp internal format
By removint the outer records for message and delivery annotations
as well as application properties and footers.
* mc: optimis mc_amqp map_add by using upsert
* mc: refactoring and bug fixes
* mc_SUITE routingheader assertions
* mc remove serialize/1 callback as only used by amqp
* mc_amqp: avoid returning a nested list from protocol_state
* test and bug fix
* move infer_type to mc_util
* mc fixes and additiona assertions
* Support headers exchange routing for MQTT messages
When a headers exchange is bound to the MQTT topic exchange, routing
will be performend based on both MQTT topic (by the topic exchange) and
MQTT User Property (by the headers exchange).
This combines the best worlds of both MQTT 5.0 and AMQP 0.9.1 and
enables powerful routing topologies.
When the User Property contains the same name multiple times, only the
last name (and value) will be considered by the headers exchange.
* Fix crash when sending from stream to amqpl
When publishing a message via the stream protocol and consuming it via
AMQP 0.9.1, the following crash occurred prior to this commit:
```
crasher:
initial call: rabbit_channel:init/1
pid: <0.818.0>
registered_name: []
exception exit: {{badmatch,undefined},
[{rabbit_channel,handle_deliver0,4,
[{file,"rabbit_channel.erl"},
{line,2728}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1594}]},
{rabbit_channel,handle_cast,2,
[{file,"rabbit_channel.erl"},
{line,728}]},
{gen_server2,handle_msg,2,
[{file,"gen_server2.erl"},{line,1056}]},
{proc_lib,wake_up,3,
[{file,"proc_lib.erl"},{line,251}]}]}
```
This commit first gives `mc:init/3` the chance to set exchange and
routing_keys annotations.
If not set, `rabbit_stream_queue` will set these annotations assuming
the message was originally published via the stream protocol.
* Support consistent hash exchange routing for MQTT 5.0
When a consistent hash exchange is bound to the MQTT topic exchange,
MQTT 5.0 messages can be routed to queues consistently based on the
Correlation-Data in the PUBLISH packet.
* Convert MQTT 5.0 User Property
* to AMQP 0.9.1 headers
* from AMQP 0.9.1 headers
* to AMQP 1.0 application properties and message annotations
* from AMQP 1.0 application properties and message annotations
* Make use of Annotations in mc_mqtt:protocol_state/2
mc_mqtt:protocol_state/2 includes Annotations as parameter.
It's cleaner to make use of these Annotations when computing the
protocol state instead of relying on the caller (rabbitmq_mqtt_processor)
to compute the protocol state.
* Enforce AMQP 0.9.1 field name length limit
The AMQP 0.9.1 spec prohibits field names longer than 128 characters.
Therefore, when converting AMQP 1.0 message annotations, application
properties or MQTT 5.0 User Property to AMQP 0.9.1 headers, drop any
names longer than 128 characters.
* Fix type specs
Apply feedback from Michael Davis
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* Add mc_mqtt unit test suite
Implement mc_mqtt:x_header/2
* Translate indicator that payload is UTF-8 encoded
when converting between MQTT 5.0 and AMQP 1.0
* Translate single amqp-value section from AMQP 1.0 to MQTT
Convert to a text representation, if possible, and indicate to MQTT
client that the payload is UTF-8 encoded. This way, the MQTT client will
be able to parse the payload.
If conversion to text representation is not possible, encode the payload
using the AMQP 1.0 type system and indiate the encoding via Content-Type
message/vnd.rabbitmq.amqp.
This Content-Type is not registered.
Type "message" makes sense since it's a message.
Vendor tree "vnd.rabbitmq.amqp" makes sense since merely subtype "amqp" is not
registered.
* Fix payload conversion
* Translate Response Topic between MQTT and AMQP
Translate MQTT 5.0 Response Topic to AMQP 1.0 reply-to address and vice
versa.
The Response Topic must be a UTF-8 encoded string.
This commit re-uses the already defined RabbitMQ target addresses:
```
"/topic/" RK Publish to amq.topic with routing key RK
"/exchange/" X "/" RK Publish to exchange X with routing key RK
```
By default, the MQTT topic exchange is configure dto be amq.topic using
the 1st target address.
When an operator modifies the mqtt.exchange, the 2nd target address is
used.
* Apply PR feedback
and fix formatting
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* tidy up
* Add MQTT message_containers test
* consistent hash exchange: avoid amqp legacy conversion
When hashing on a header value.
* Avoid converting to amqp legacy when using exchange federation
* Fix test flake
* test and dialyzer fixes
* dialyzer fix
* Add MQTT protocol interoperability tests
Test receiving from and sending to MQTT 5.0 and
* AMQP 0.9.1
* AMQP 1.0
* STOMP
* Streams
* Regenerate portions of deps/rabbit/app.bzl with gazelle
I'm not exactly sure how this happened, but gazell seems to have been
run with an older version of the rules_erlang gazelle extension at
some point. This caused generation of a structure that is no longer
used. This commit updates the structure to the current pattern.
* mc: refactoring
* mc_amqpl: handle delivery annotations
Just in case they are included.
Also use iolist_to_iovec to create flat list of binaries when
converting from amqp with amqp encoded payload.
---------
Co-authored-by: David Ansari <david.ansari@gmx.de>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
Co-authored-by: Rin Kuryloski <kuryloskip@vmware.com>
2023-08-31 18:27:13 +08:00
|
|
|
-include("mc.hrl").
|
2016-04-21 00:54:24 +08:00
|
|
|
-include_lib("stdlib/include/qlc.hrl").
|
|
|
|
|
|
2016-11-29 00:27:45 +08:00
|
|
|
-define(QUEUE_MIGRATION_BATCH_SIZE, 100).
|
2017-03-17 21:03:03 +08:00
|
|
|
-define(EMPTY_START_FUN_STATE, {fun (ok) -> finished end, ok}).
|
2016-11-29 00:27:45 +08:00
|
|
|
|
2010-01-21 23:07:06 +08:00
|
|
|
%%----------------------------------------------------------------------------
|
2022-09-20 19:58:37 +08:00
|
|
|
%% Messages, their metadata and their position in the queue (SeqId),
|
|
|
|
|
%% can be in memory or on disk, or both. Persistent messages in
|
|
|
|
|
%% durable queues are always written to disk when they arrive.
|
|
|
|
|
%% Transient messages as well as persistent messages in non-durable
|
|
|
|
|
%% queues may be kept only in memory.
|
|
|
|
|
%%
|
|
|
|
|
%% The number of messages kept in memory is dependent on the consume
|
|
|
|
|
%% rate of the queue. At a minimum 1 message is kept (necessary because
|
|
|
|
|
%% we often need to check the expiration at the head of the queue) and
|
|
|
|
|
%% at a maximum the semi-arbitrary number 2048.
|
|
|
|
|
%%
|
|
|
|
|
%% Messages are never written back to disk after they have been read
|
|
|
|
|
%% into memory. Instead the queue is designed to avoid keeping too
|
|
|
|
|
%% much to begin with.
|
|
|
|
|
%%
|
|
|
|
|
%% Messages are persisted using a queue index and a message store.
|
|
|
|
|
%% A few different scenarios may play out depending on the message
|
2024-03-04 17:08:22 +08:00
|
|
|
%% size:
|
2022-09-20 19:58:37 +08:00
|
|
|
%%
|
2024-03-04 17:08:22 +08:00
|
|
|
%% - size < qi_msgs_embed_below: the metadata
|
2022-09-20 19:58:37 +08:00
|
|
|
%% is stored in rabbit_classic_queue_index_v2, while the content
|
|
|
|
|
%% is stored in the per-queue rabbit_classic_queue_store_v2
|
|
|
|
|
%%
|
2024-03-04 17:08:22 +08:00
|
|
|
%% - size >= qi_msgs_embed_below: the metadata
|
2022-09-20 19:58:37 +08:00
|
|
|
%% is stored in rabbit_classic_queue_index_v2, while the content
|
|
|
|
|
%% is stored in the per-vhost shared rabbit_msg_store
|
|
|
|
|
%%
|
|
|
|
|
%% When messages must be read from disk, message bodies will
|
2022-12-14 01:25:13 +08:00
|
|
|
%% also be read from disk except if the message is stored
|
2022-09-20 19:58:37 +08:00
|
|
|
%% in the per-vhost shared rabbit_msg_store. In that case
|
|
|
|
|
%% the message gets read before it needs to be sent to the
|
|
|
|
|
%% consumer. Messages are read from rabbit_msg_store one
|
|
|
|
|
%% at a time currently.
|
|
|
|
|
%%
|
|
|
|
|
%% The queue also keeps track of messages that were delivered
|
|
|
|
|
%% but for which the ack has not been received. Pending acks
|
|
|
|
|
%% are currently kept in memory although the message may be
|
|
|
|
|
%% on disk.
|
|
|
|
|
%%
|
|
|
|
|
%% Messages being requeued are returned to their position in
|
|
|
|
|
%% the queue using their SeqId value.
|
2010-05-18 23:55:44 +08:00
|
|
|
%%
|
|
|
|
|
%% In order to try to achieve as fast a start-up as possible, if a
|
|
|
|
|
%% clean shutdown occurs, we try to save out state to disk to reduce
|
2022-09-20 19:58:37 +08:00
|
|
|
%% work on startup. In rabbit_msg_store this takes the form of the
|
2010-05-18 23:55:44 +08:00
|
|
|
%% index_module's state, plus the file_summary ets table, and client
|
|
|
|
|
%% refs. In the VQ, this takes the form of the count of persistent
|
|
|
|
|
%% messages in the queue and references into the msg_stores. The
|
2022-09-20 19:58:37 +08:00
|
|
|
%% queue index adds to these terms the details of its segments and
|
2010-05-18 23:55:44 +08:00
|
|
|
%% stores the terms in the queue directory.
|
|
|
|
|
%%
|
2022-09-20 19:58:37 +08:00
|
|
|
%% Two rabbit_msg_store(s) are used. One is created for persistent messages
|
2010-07-19 21:20:41 +08:00
|
|
|
%% to durable queues that must survive restarts, and the other is used
|
|
|
|
|
%% for all other messages that just happen to need to be written to
|
|
|
|
|
%% disk. On start up we can therefore nuke the transient message
|
|
|
|
|
%% store, and be sure that the messages in the persistent store are
|
|
|
|
|
%% all that we need.
|
|
|
|
|
%%
|
2010-05-18 23:55:44 +08:00
|
|
|
%% The references to the msg_stores are there so that the msg_store
|
|
|
|
|
%% knows to only trust its saved state if all of the queues it was
|
|
|
|
|
%% previously talking to come up cleanly. Likewise, the queues
|
2022-09-20 19:58:37 +08:00
|
|
|
%% themselves (especially indexes) skip work in init if all the queues
|
2010-05-18 23:55:44 +08:00
|
|
|
%% and msg_store were shutdown cleanly. This gives both good speed
|
|
|
|
|
%% improvements and also robustness so that if anything possibly went
|
|
|
|
|
%% wrong in shutdown (or there was subsequent manual tampering), all
|
|
|
|
|
%% messages and queues that can be recovered are recovered, safely.
|
|
|
|
|
%%
|
|
|
|
|
%% To delete transient messages lazily, the variable_queue, on
|
|
|
|
|
%% startup, stores the next_seq_id reported by the queue_index as the
|
|
|
|
|
%% transient_threshold. From that point on, whenever it's reading a
|
|
|
|
|
%% message off disk via the queue_index, if the seq_id is below this
|
2010-07-19 21:20:41 +08:00
|
|
|
%% threshold and the message is transient then it drops the message
|
|
|
|
|
%% (the message itself won't exist on disk because it would have been
|
|
|
|
|
%% stored in the transient msg_store which would have had its saved
|
|
|
|
|
%% state nuked on startup). This avoids the expensive operation of
|
|
|
|
|
%% scanning the entire queue on startup in order to delete transient
|
2022-09-20 19:58:37 +08:00
|
|
|
%% messages that were written to disk.
|
2010-05-18 23:55:44 +08:00
|
|
|
%%
|
2022-09-20 19:58:37 +08:00
|
|
|
%% The queue is keeping track of delivers via the
|
2021-11-08 21:01:10 +08:00
|
|
|
%% next_deliver_seq_id variable. This variable gets increased
|
|
|
|
|
%% with every (first-time) delivery. When delivering messages
|
|
|
|
|
%% the seq_id of the message is checked against this variable
|
|
|
|
|
%% to determine whether the message is a redelivery. The variable
|
|
|
|
|
%% is stored in the queue terms on graceful shutdown. On dirty
|
|
|
|
|
%% recovery the variable becomes the seq_id of the most recent
|
|
|
|
|
%% message in the queue (effectively marking all messages as
|
|
|
|
|
%% delivered, like the v1 index was doing).
|
|
|
|
|
%%
|
2022-09-20 19:58:37 +08:00
|
|
|
%% Previous versions of classic queues had a much more complex
|
|
|
|
|
%% way of working. Messages were categorized into four groups,
|
|
|
|
|
%% and remnants of these terms remain in the code at the time
|
|
|
|
|
%% of writing:
|
|
|
|
|
%%
|
|
|
|
|
%% alpha: this is a message where both the message itself, and its
|
|
|
|
|
%% position within the queue are held in RAM
|
|
|
|
|
%%
|
|
|
|
|
%% beta: this is a message where the message itself is only held on
|
|
|
|
|
%% disk (if persisted to the message store) but its position
|
|
|
|
|
%% within the queue is held in RAM.
|
|
|
|
|
%%
|
|
|
|
|
%% gamma: this is a message where the message itself is only held on
|
|
|
|
|
%% disk, but its position is both in RAM and on disk.
|
|
|
|
|
%%
|
|
|
|
|
%% delta: this is a collection of messages, represented by a single
|
|
|
|
|
%% term, where the messages and their position are only held on
|
|
|
|
|
%% disk.
|
|
|
|
|
%%
|
|
|
|
|
%% Messages may have been stored in q1, q2, delta, q3 or q4 depending
|
|
|
|
|
%% on their location in the queue. The current version of classic
|
|
|
|
|
%% queues only use delta (on-disk, for the tail of the queue) or
|
|
|
|
|
%% q3 (in-memory, head of the queue). Messages used to move from
|
|
|
|
|
%% q1 -> q2 -> delta -> q3 -> q4 (and sometimes q3 -> delta or
|
|
|
|
|
%% q4 -> delta to reduce memory use). Now messages only move
|
|
|
|
|
%% from delta to q3. Full details on the old mechanisms can be
|
|
|
|
|
%% found in previous versions of this file (such as the 3.11 version).
|
|
|
|
|
%%
|
|
|
|
|
%% In the current version of classic queues, there is no distinction
|
|
|
|
|
%% between default and lazy queues. The current behavior is close to
|
|
|
|
|
%% lazy queues, except we avoid some write to disks when queues are
|
|
|
|
|
%% empty.
|
2009-10-09 19:13:41 +08:00
|
|
|
%%----------------------------------------------------------------------------
|
2009-10-07 20:53:03 +08:00
|
|
|
|
2010-04-13 03:24:14 +08:00
|
|
|
-behaviour(rabbit_backing_queue).
|
2010-04-08 23:05:08 +08:00
|
|
|
|
2009-10-07 20:53:03 +08:00
|
|
|
-record(vqstate,
|
2022-05-11 21:10:44 +08:00
|
|
|
{ q1, %% Unused.
|
|
|
|
|
q2, %% Unused.
|
2010-01-13 02:33:37 +08:00
|
|
|
delta,
|
2009-10-07 20:53:03 +08:00
|
|
|
q3,
|
2022-05-11 21:10:44 +08:00
|
|
|
q4, %% Unused.
|
2010-06-14 15:43:26 +08:00
|
|
|
next_seq_id,
|
2021-07-28 19:43:31 +08:00
|
|
|
%% seq_id() of first undelivered message
|
|
|
|
|
%% everything before this seq_id() was delivered at least once
|
|
|
|
|
next_deliver_seq_id,
|
2022-06-08 21:39:42 +08:00
|
|
|
ram_pending_ack, %% msgs still in RAM
|
2014-12-11 21:58:14 +08:00
|
|
|
disk_pending_ack, %% msgs in store, paged out
|
2022-06-08 21:39:42 +08:00
|
|
|
qi_pending_ack, %% Unused.
|
2024-03-04 17:08:22 +08:00
|
|
|
index_mod, %% Unused.
|
2010-06-14 15:43:26 +08:00
|
|
|
index_state,
|
2021-07-06 20:38:47 +08:00
|
|
|
store_state,
|
2010-06-14 15:43:26 +08:00
|
|
|
msg_store_clients,
|
|
|
|
|
durable,
|
|
|
|
|
transient_threshold,
|
2015-09-01 18:40:04 +08:00
|
|
|
qi_embed_msgs_below,
|
2010-06-14 15:43:26 +08:00
|
|
|
|
2022-06-03 17:53:45 +08:00
|
|
|
len, %% w/o unacked @todo No longer needed, is delta+q3.
|
2014-08-12 21:26:43 +08:00
|
|
|
bytes, %% w/o unacked
|
|
|
|
|
unacked_bytes,
|
2014-07-30 00:16:16 +08:00
|
|
|
persistent_count, %% w unacked
|
|
|
|
|
persistent_bytes, %% w unacked
|
2017-02-10 18:56:23 +08:00
|
|
|
delta_transient_bytes, %%
|
2010-06-14 15:43:26 +08:00
|
|
|
|
2010-12-06 03:31:22 +08:00
|
|
|
target_ram_count,
|
2014-07-30 00:16:16 +08:00
|
|
|
ram_msg_count, %% w/o unacked
|
2009-11-27 00:28:46 +08:00
|
|
|
ram_msg_count_prev,
|
2010-11-03 00:57:38 +08:00
|
|
|
ram_ack_count_prev,
|
2014-07-30 00:16:16 +08:00
|
|
|
ram_bytes, %% w unacked
|
2009-10-07 23:27:23 +08:00
|
|
|
out_counter,
|
In the absense of an egress rate, use ingress rate instead. Also, if there's been no fetches/publishes since the last measurement, use the previous measurement, appropriately scaled. This means that the rates will gently fall off and approach zero in the absence of activity, which is preferable to them suddenly jumping to zero. Also, the average is now the sum of the fetches/publishes in the last two segments, over the time since the start of the last segment (i.e. it's better than before, which was just a straight /2, which would be wrong if the segments are different sizes, which they could be, given a very busy queue).
2009-11-17 21:10:52 +08:00
|
|
|
in_counter,
|
2010-08-31 17:35:46 +08:00
|
|
|
rates,
|
2022-06-16 17:21:47 +08:00
|
|
|
%% There are two confirms paths: either store/index produce confirms
|
|
|
|
|
%% separately (v1 and v2 with per-vhost message store) or the confirms
|
|
|
|
|
%% are produced all at once while syncing/flushing (v2 with per-queue
|
|
|
|
|
%% message store). The latter is more efficient as it avoids many
|
|
|
|
|
%% sets operations.
|
2010-09-03 01:14:46 +08:00
|
|
|
msgs_on_disk,
|
2010-09-04 00:17:58 +08:00
|
|
|
msg_indices_on_disk,
|
2010-11-24 23:04:14 +08:00
|
|
|
unconfirmed,
|
2011-03-05 08:31:49 +08:00
|
|
|
confirmed,
|
2010-11-03 00:34:35 +08:00
|
|
|
ack_out_counter,
|
2015-02-03 00:35:05 +08:00
|
|
|
ack_in_counter,
|
|
|
|
|
%% Unlike the other counters these two do not feed into
|
|
|
|
|
%% #rates{} and get reset
|
|
|
|
|
disk_read_count,
|
2015-08-27 23:11:08 +08:00
|
|
|
disk_write_count,
|
|
|
|
|
|
2015-10-10 23:40:23 +08:00
|
|
|
io_batch_size,
|
|
|
|
|
|
|
|
|
|
%% default queue or lazy queue
|
2022-05-11 21:10:44 +08:00
|
|
|
mode, %% Unused.
|
2024-03-04 17:08:22 +08:00
|
|
|
version = 2, %% Unused.
|
2022-06-16 17:21:47 +08:00
|
|
|
%% Fast path for confirms handling. Instead of having
|
|
|
|
|
%% index/store keep track of confirms separately and
|
|
|
|
|
%% doing intersect/subtract/union we just put the messages
|
|
|
|
|
%% here and on sync move them to 'confirmed'.
|
|
|
|
|
%%
|
|
|
|
|
%% Note: This field used to be 'memory_reduction_run_count'.
|
|
|
|
|
unconfirmed_simple,
|
2017-03-17 21:03:03 +08:00
|
|
|
%% Queue data is grouped by VHost. We need to store it
|
|
|
|
|
%% to work with queue index.
|
2017-10-12 21:20:44 +08:00
|
|
|
virtual_host,
|
|
|
|
|
waiting_bump = false
|
2010-09-03 01:14:46 +08:00
|
|
|
}).
|
2009-10-07 20:53:03 +08:00
|
|
|
|
2014-01-27 21:59:16 +08:00
|
|
|
-record(rates, { in, out, ack_in, ack_out, timestamp }).
|
2010-07-22 16:36:55 +08:00
|
|
|
|
2021-10-01 20:26:44 +08:00
|
|
|
-type msg_location() :: memory
|
|
|
|
|
| rabbit_msg_store
|
2021-10-21 19:17:18 +08:00
|
|
|
| rabbit_queue_index
|
2021-10-04 21:07:30 +08:00
|
|
|
| rabbit_classic_queue_store_v2:msg_location().
|
2021-10-01 20:26:44 +08:00
|
|
|
-export_type([msg_location/0]).
|
|
|
|
|
|
2011-09-29 17:19:44 +08:00
|
|
|
-record(msg_status,
|
|
|
|
|
{ seq_id,
|
|
|
|
|
msg_id,
|
|
|
|
|
msg,
|
|
|
|
|
is_persistent,
|
|
|
|
|
is_delivered,
|
2021-10-08 21:26:15 +08:00
|
|
|
msg_location, %% ?IN_SHARED_STORE | ?IN_QUEUE_STORE | ?IN_QUEUE_INDEX | ?IN_MEMORY
|
2011-09-29 17:19:44 +08:00
|
|
|
index_on_disk,
|
2015-01-07 02:30:24 +08:00
|
|
|
persist_to,
|
2011-09-29 17:19:44 +08:00
|
|
|
msg_props
|
|
|
|
|
}).
|
|
|
|
|
|
2010-04-08 23:05:08 +08:00
|
|
|
-record(delta,
|
2010-05-18 19:53:54 +08:00
|
|
|
{ start_seq_id, %% start_seq_id is inclusive
|
2010-04-08 23:05:08 +08:00
|
|
|
count,
|
2017-02-10 18:56:23 +08:00
|
|
|
transient,
|
2010-05-18 19:53:54 +08:00
|
|
|
end_seq_id %% end_seq_id is exclusive
|
2011-03-05 03:32:39 +08:00
|
|
|
}).
|
2010-04-08 23:05:08 +08:00
|
|
|
|
2015-01-20 22:55:12 +08:00
|
|
|
-define(HEADER_GUESS_SIZE, 100). %% see determine_persist_to/2
|
2010-06-03 21:33:47 +08:00
|
|
|
-define(PERSISTENT_MSG_STORE, msg_store_persistent).
|
|
|
|
|
-define(TRANSIENT_MSG_STORE, msg_store_transient).
|
2016-04-21 00:54:24 +08:00
|
|
|
|
2011-09-30 00:09:09 +08:00
|
|
|
-define(QUEUE, lqueue).
|
2010-01-13 02:33:37 +08:00
|
|
|
|
2021-07-06 20:38:47 +08:00
|
|
|
-define(IN_SHARED_STORE, rabbit_msg_store).
|
2021-11-05 23:42:18 +08:00
|
|
|
-define(IN_QUEUE_STORE, {rabbit_classic_queue_store_v2, _, _}).
|
2021-10-08 21:26:15 +08:00
|
|
|
-define(IN_QUEUE_INDEX, rabbit_queue_index).
|
2021-07-06 20:38:47 +08:00
|
|
|
-define(IN_MEMORY, memory).
|
|
|
|
|
|
2018-10-11 18:12:39 +08:00
|
|
|
-include_lib("rabbit_common/include/rabbit.hrl").
|
|
|
|
|
-include("amqqueue.hrl").
|
2010-04-09 00:20:11 +08:00
|
|
|
|
2009-10-09 19:13:41 +08:00
|
|
|
%%----------------------------------------------------------------------------
|
|
|
|
|
|
2016-06-28 21:26:41 +08:00
|
|
|
-type seq_id() :: non_neg_integer().
|
2021-10-01 20:26:44 +08:00
|
|
|
-export_type([seq_id/0]).
|
2009-11-10 02:12:00 +08:00
|
|
|
|
2016-06-28 21:26:41 +08:00
|
|
|
-type rates() :: #rates { in :: float(),
|
2014-01-27 21:59:16 +08:00
|
|
|
out :: float(),
|
|
|
|
|
ack_in :: float(),
|
|
|
|
|
ack_out :: float(),
|
2016-06-28 21:26:41 +08:00
|
|
|
timestamp :: rabbit_types:timestamp()}.
|
2010-07-22 16:36:55 +08:00
|
|
|
|
2016-06-28 21:26:41 +08:00
|
|
|
-type delta() :: #delta { start_seq_id :: non_neg_integer(),
|
2010-12-13 20:19:26 +08:00
|
|
|
count :: non_neg_integer(),
|
2016-06-28 21:26:41 +08:00
|
|
|
end_seq_id :: non_neg_integer() }.
|
2010-04-08 23:05:08 +08:00
|
|
|
|
2012-05-04 01:20:53 +08:00
|
|
|
%% The compiler (rightfully) complains that ack() and state() are
|
|
|
|
|
%% unused. For this reason we duplicate a -spec from
|
|
|
|
|
%% rabbit_backing_queue with the only intent being to remove
|
|
|
|
|
%% warnings. The problem here is that we can't parameterise the BQ
|
|
|
|
|
%% behaviour by these two types as we would like to. We still leave
|
|
|
|
|
%% these here for documentation purposes.
|
2016-06-28 21:26:41 +08:00
|
|
|
-type ack() :: seq_id().
|
|
|
|
|
-type state() :: #vqstate {
|
2011-10-02 22:13:35 +08:00
|
|
|
q1 :: ?QUEUE:?QUEUE(),
|
|
|
|
|
q2 :: ?QUEUE:?QUEUE(),
|
2010-11-12 03:33:43 +08:00
|
|
|
delta :: delta(),
|
2011-10-02 22:13:35 +08:00
|
|
|
q3 :: ?QUEUE:?QUEUE(),
|
|
|
|
|
q4 :: ?QUEUE:?QUEUE(),
|
2010-11-12 03:33:43 +08:00
|
|
|
next_seq_id :: seq_id(),
|
2021-07-28 19:43:31 +08:00
|
|
|
next_deliver_seq_id :: seq_id(),
|
2022-06-08 22:31:08 +08:00
|
|
|
ram_pending_ack :: map(),
|
|
|
|
|
disk_pending_ack :: map(),
|
|
|
|
|
qi_pending_ack :: undefined,
|
2010-11-12 03:33:43 +08:00
|
|
|
index_state :: any(),
|
2021-10-08 21:26:15 +08:00
|
|
|
store_state :: any(),
|
2010-11-12 03:33:43 +08:00
|
|
|
msg_store_clients :: 'undefined' | {{any(), binary()},
|
2010-06-14 15:43:26 +08:00
|
|
|
{any(), binary()}},
|
2010-11-12 03:33:43 +08:00
|
|
|
durable :: boolean(),
|
2011-03-04 23:41:25 +08:00
|
|
|
transient_threshold :: non_neg_integer(),
|
2015-09-01 18:40:04 +08:00
|
|
|
qi_embed_msgs_below :: non_neg_integer(),
|
2011-03-04 23:41:25 +08:00
|
|
|
|
2010-11-12 03:33:43 +08:00
|
|
|
len :: non_neg_integer(),
|
2014-07-23 23:20:17 +08:00
|
|
|
bytes :: non_neg_integer(),
|
2014-08-12 21:26:43 +08:00
|
|
|
unacked_bytes :: non_neg_integer(),
|
|
|
|
|
|
2010-11-12 03:33:43 +08:00
|
|
|
persistent_count :: non_neg_integer(),
|
2014-07-23 23:20:17 +08:00
|
|
|
persistent_bytes :: non_neg_integer(),
|
2010-11-12 03:33:43 +08:00
|
|
|
|
2010-12-06 03:31:22 +08:00
|
|
|
target_ram_count :: non_neg_integer() | 'infinity',
|
2010-11-12 03:33:43 +08:00
|
|
|
ram_msg_count :: non_neg_integer(),
|
|
|
|
|
ram_msg_count_prev :: non_neg_integer(),
|
2014-08-11 22:34:50 +08:00
|
|
|
ram_ack_count_prev :: non_neg_integer(),
|
|
|
|
|
ram_bytes :: non_neg_integer(),
|
2010-11-12 03:33:43 +08:00
|
|
|
out_counter :: non_neg_integer(),
|
|
|
|
|
in_counter :: non_neg_integer(),
|
2010-11-24 23:04:14 +08:00
|
|
|
rates :: rates(),
|
2022-06-10 21:24:02 +08:00
|
|
|
msgs_on_disk :: sets:set(),
|
|
|
|
|
msg_indices_on_disk :: sets:set(),
|
|
|
|
|
unconfirmed :: sets:set(),
|
|
|
|
|
confirmed :: sets:set(),
|
2010-11-12 03:33:43 +08:00
|
|
|
ack_out_counter :: non_neg_integer(),
|
2015-02-03 00:35:05 +08:00
|
|
|
ack_in_counter :: non_neg_integer(),
|
|
|
|
|
disk_read_count :: non_neg_integer(),
|
2015-08-27 23:11:08 +08:00
|
|
|
disk_write_count :: non_neg_integer(),
|
|
|
|
|
|
2015-10-10 23:40:23 +08:00
|
|
|
io_batch_size :: pos_integer(),
|
2016-09-23 19:08:23 +08:00
|
|
|
mode :: 'default' | 'lazy',
|
2024-03-04 17:08:22 +08:00
|
|
|
version :: 2,
|
2022-06-16 17:21:47 +08:00
|
|
|
unconfirmed_simple :: sets:set()}.
|
2009-11-10 02:12:00 +08:00
|
|
|
|
2010-01-13 02:33:37 +08:00
|
|
|
-define(BLANK_DELTA, #delta { start_seq_id = undefined,
|
2010-06-04 05:16:23 +08:00
|
|
|
count = 0,
|
2017-02-10 18:56:23 +08:00
|
|
|
transient = 0,
|
2010-06-04 05:16:23 +08:00
|
|
|
end_seq_id = undefined }).
|
2010-04-06 07:18:05 +08:00
|
|
|
-define(BLANK_DELTA_PATTERN(Z), #delta { start_seq_id = Z,
|
2010-06-04 05:16:23 +08:00
|
|
|
count = 0,
|
2017-02-10 18:56:23 +08:00
|
|
|
transient = 0,
|
2010-06-04 05:16:23 +08:00
|
|
|
end_seq_id = Z }).
|
2009-11-27 21:44:47 +08:00
|
|
|
|
2014-01-27 21:59:16 +08:00
|
|
|
-define(MICROS_PER_SECOND, 1000000.0).
|
2014-01-27 23:05:01 +08:00
|
|
|
|
2024-06-20 21:17:25 +08:00
|
|
|
%% We're updating rates every 5s at most; a half life that is of
|
2014-01-27 23:05:01 +08:00
|
|
|
%% the same order of magnitude is probably about right.
|
|
|
|
|
-define(RATE_AVG_HALF_LIFE, 5.0).
|
2014-01-27 21:59:16 +08:00
|
|
|
|
2024-06-20 21:17:25 +08:00
|
|
|
%% We will recalculate the #rates{} every 5 seconds,
|
|
|
|
|
%% or every N messages published, whichever is
|
2014-01-30 20:30:54 +08:00
|
|
|
%% sooner. We do this since the priority calculations in
|
|
|
|
|
%% rabbit_amqqueue_process need fairly fresh rates.
|
2014-01-29 20:24:55 +08:00
|
|
|
-define(MSGS_PER_RATE_CALC, 100).
|
2014-01-27 21:59:16 +08:00
|
|
|
|
2009-10-09 19:13:41 +08:00
|
|
|
%%----------------------------------------------------------------------------
|
|
|
|
|
%% Public API
|
|
|
|
|
%%----------------------------------------------------------------------------
|
|
|
|
|
|
2017-03-17 21:03:03 +08:00
|
|
|
start(VHost, DurableQueues) ->
|
2021-10-08 21:26:15 +08:00
|
|
|
%% The v2 index walker function covers both v1 and v2 index files.
|
|
|
|
|
{AllTerms, StartFunState} = rabbit_classic_queue_index_v2:start(VHost, DurableQueues),
|
2016-12-21 02:38:40 +08:00
|
|
|
%% Group recovery terms by vhost.
|
2017-03-17 21:03:03 +08:00
|
|
|
ClientRefs = [Ref || Terms <- AllTerms,
|
|
|
|
|
Terms /= non_clean_shutdown,
|
|
|
|
|
begin
|
|
|
|
|
Ref = proplists:get_value(persistent_ref, Terms),
|
|
|
|
|
Ref =/= undefined
|
|
|
|
|
end],
|
|
|
|
|
start_msg_store(VHost, ClientRefs, StartFunState),
|
2014-01-15 01:28:10 +08:00
|
|
|
{ok, AllTerms}.
|
2010-07-19 22:26:13 +08:00
|
|
|
|
2017-03-17 21:03:03 +08:00
|
|
|
stop(VHost) ->
|
|
|
|
|
ok = stop_msg_store(VHost),
|
2021-10-08 21:26:15 +08:00
|
|
|
ok = rabbit_classic_queue_index_v2:stop(VHost).
|
2010-04-09 23:09:28 +08:00
|
|
|
|
2017-03-17 21:03:03 +08:00
|
|
|
start_msg_store(VHost, Refs, StartFunState) when is_list(Refs); Refs == undefined ->
|
Follow-up to #5486
Discovered by @dumbbell
Ensure externally read strings are saved as utf-8 encoded binaries. This
is necessary since `cmd.exe` on Windows uses ISO-8859-1 encoding and
directories can have latin1 characters, like `RabbitMQ Sérvér`.
The `é` is represented by decimal `233` in the ISO-8859-1 encoding. The
unicode code point is the same decimal value, `233`, so you will see
this in the charlist data. However, when encoded using utf-8, this
becomes the two-byte sequence `C3 A9` (hexidecimal).
When reading strings from env variables and configuration, they will be
unicode charlists, with each list item representing a unicode code
point. All of Erlang string functions can handle strings in this form.
Once these strings are written to ETS or Mnesia, they will be converted
to utf-8 encoded binaries. Prior to these changes just
`list_to_binary/1` was used.
Fix xref error
re:replace requires an iodata, which is not a list of unicode code points
Correctly parse unicode vhost tags
Fix many format strings to account for utf8 input. Try again to fix unicode vhost tags
More format string fixes, try to get the CONFIG_FILE var correct
Be sure to use the `unicode` option for re:replace when necessary
More unicode format strings, add unicode option to re:split
More format strings updated
Change ~s to ~ts for vhost format strings
Change ~s to ~ts for more vhost format strings
Change ~s to ~ts for more vhost format strings
Add unicode format chars to disk monitor
Quote the directory on unix
Finally figure out the correct way to pass unicode to the port
2022-08-19 02:30:05 +08:00
|
|
|
rabbit_log:info("Starting message stores for vhost '~ts'", [VHost]),
|
2017-07-06 20:12:06 +08:00
|
|
|
do_start_msg_store(VHost, ?TRANSIENT_MSG_STORE, undefined, ?EMPTY_START_FUN_STATE),
|
|
|
|
|
do_start_msg_store(VHost, ?PERSISTENT_MSG_STORE, Refs, StartFunState),
|
2017-07-01 11:39:03 +08:00
|
|
|
ok.
|
|
|
|
|
|
2017-07-06 20:12:06 +08:00
|
|
|
do_start_msg_store(VHost, Type, Refs, StartFunState) ->
|
2017-07-01 11:39:03 +08:00
|
|
|
case rabbit_vhost_msg_store:start(VHost, Type, Refs, StartFunState) of
|
|
|
|
|
{ok, _} ->
|
2022-10-08 06:59:05 +08:00
|
|
|
rabbit_log:info("Started message store of type ~ts for vhost '~ts'", [abbreviated_type(Type), VHost]);
|
2017-07-05 00:00:11 +08:00
|
|
|
{error, {no_such_vhost, VHost}} = Err ->
|
2022-10-08 06:59:05 +08:00
|
|
|
rabbit_log:error("Failed to start message store of type ~ts for vhost '~ts': the vhost no longer exists!",
|
2017-07-05 00:00:11 +08:00
|
|
|
[Type, VHost]),
|
|
|
|
|
exit(Err);
|
2017-07-01 11:39:03 +08:00
|
|
|
{error, Error} ->
|
2022-10-08 06:59:05 +08:00
|
|
|
rabbit_log:error("Failed to start message store of type ~ts for vhost '~ts': ~tp",
|
2017-07-05 00:00:11 +08:00
|
|
|
[Type, VHost, Error]),
|
|
|
|
|
exit({error, Error})
|
2017-07-01 11:39:03 +08:00
|
|
|
end.
|
|
|
|
|
|
|
|
|
|
abbreviated_type(?TRANSIENT_MSG_STORE) -> transient;
|
|
|
|
|
abbreviated_type(?PERSISTENT_MSG_STORE) -> persistent.
|
2016-12-22 19:55:33 +08:00
|
|
|
|
2017-03-17 21:03:03 +08:00
|
|
|
stop_msg_store(VHost) ->
|
|
|
|
|
rabbit_vhost_msg_store:stop(VHost, ?TRANSIENT_MSG_STORE),
|
|
|
|
|
rabbit_vhost_msg_store:stop(VHost, ?PERSISTENT_MSG_STORE),
|
2016-10-19 21:45:52 +08:00
|
|
|
ok.
|
|
|
|
|
|
2014-12-04 01:27:16 +08:00
|
|
|
init(Queue, Recover, Callback) ->
|
|
|
|
|
init(
|
2023-02-03 20:56:02 +08:00
|
|
|
Queue, Recover,
|
2014-12-04 01:27:16 +08:00
|
|
|
fun (MsgIds, ActionTaken) ->
|
|
|
|
|
msgs_written_to_disk(Callback, MsgIds, ActionTaken)
|
|
|
|
|
end,
|
|
|
|
|
fun (MsgIds) -> msg_indices_written_to_disk(Callback, MsgIds) end,
|
|
|
|
|
fun (MsgIds) -> msgs_and_indices_written_to_disk(Callback, MsgIds) end).
|
2010-09-27 22:48:01 +08:00
|
|
|
|
2023-02-03 20:56:02 +08:00
|
|
|
init(Q, new, MsgOnDiskFun, MsgIdxOnDiskFun, MsgAndIdxOnDiskFun) when ?is_amqqueue(Q) ->
|
2018-10-11 18:12:39 +08:00
|
|
|
QueueName = amqqueue:get_name(Q),
|
|
|
|
|
IsDurable = amqqueue:is_durable(Q),
|
2024-03-04 17:08:22 +08:00
|
|
|
IndexState = rabbit_classic_queue_index_v2:init(QueueName,
|
|
|
|
|
MsgIdxOnDiskFun, MsgAndIdxOnDiskFun),
|
2022-06-16 17:21:47 +08:00
|
|
|
StoreState = rabbit_classic_queue_store_v2:init(QueueName),
|
2016-03-10 21:29:08 +08:00
|
|
|
VHost = QueueName#resource.virtual_host,
|
2024-03-04 17:08:22 +08:00
|
|
|
init(IsDurable, IndexState, StoreState, 0, 0, [],
|
2010-10-21 07:21:37 +08:00
|
|
|
case IsDurable of
|
2017-03-17 21:03:03 +08:00
|
|
|
true -> msg_store_client_init(?PERSISTENT_MSG_STORE,
|
2023-02-03 20:56:02 +08:00
|
|
|
MsgOnDiskFun, VHost);
|
2010-10-21 07:21:37 +08:00
|
|
|
false -> undefined
|
|
|
|
|
end,
|
2017-03-17 21:03:03 +08:00
|
|
|
msg_store_client_init(?TRANSIENT_MSG_STORE, undefined,
|
2023-02-03 20:56:02 +08:00
|
|
|
VHost), VHost);
|
2010-04-06 03:58:48 +08:00
|
|
|
|
2014-08-20 21:34:35 +08:00
|
|
|
%% We can be recovering a transient queue if it crashed
|
2023-02-03 20:56:02 +08:00
|
|
|
init(Q, Terms, MsgOnDiskFun, MsgIdxOnDiskFun, MsgAndIdxOnDiskFun) when ?is_amqqueue(Q) ->
|
2018-10-11 18:12:39 +08:00
|
|
|
QueueName = amqqueue:get_name(Q),
|
|
|
|
|
IsDurable = amqqueue:is_durable(Q),
|
2014-01-15 22:30:56 +08:00
|
|
|
{PRef, RecoveryTerms} = process_recovery_terms(Terms),
|
2016-03-10 21:29:08 +08:00
|
|
|
VHost = QueueName#resource.virtual_host,
|
2014-08-20 21:34:35 +08:00
|
|
|
{PersistentClient, ContainsCheckFun} =
|
|
|
|
|
case IsDurable of
|
2017-03-17 21:03:03 +08:00
|
|
|
true -> C = msg_store_client_init(?PERSISTENT_MSG_STORE, PRef,
|
2023-02-03 20:56:02 +08:00
|
|
|
MsgOnDiskFun, VHost),
|
2014-12-04 01:38:49 +08:00
|
|
|
{C, fun (MsgId) when is_binary(MsgId) ->
|
|
|
|
|
rabbit_msg_store:contains(MsgId, C);
|
Message Containers (#5077)
This PR implements an approach for a "protocol (data format) agnostic core" where the format of the message isn't converted at point of reception.
Currently all non AMQP 0.9.1 originating messages are converted into a AMQP 0.9.1 flavoured basic_message record before sent to a queue. If the messages are then consumed by the originating protocol they are converted back from AMQP 0.9.1. For some protocols such as MQTT 3.1 this isn't too expensive as MQTT is mostly a fairly easily mapped subset of AMQP 0.9.1 but for others such as AMQP 1.0 the conversions are awkward and in some cases lossy even if consuming from the originating protocol.
This PR instead wraps all incoming messages in their originating form into a generic, extensible message container type (mc). The container module exposes an API to get common message details such as size and various properties (ttl, priority etc) directly from the source data type. Each protocol needs to implement the mc behaviour such that when a message originating form one protocol is consumed by another protocol we convert it to the target protocol at that point.
The message container also contains annotations, dead letter records and other meta data we need to record during the lifetime of a message. The original protocol message is never modified unless it is consumed.
This includes conversion modules to and from amqp, amqpl (AMQP 0.9.1) and mqtt.
COMMIT HISTORY:
* Refactor away from using the delivery{} record
In many places including exchange types. This should make it
easier to move towards using a message container type instead of
basic_message.
Add mc module and move direct replies outside of exchange
Lots of changes incl classic queues
Implement stream support incl amqp conversions
simplify mc state record
move mc.erl
mc dlx stuff
recent history exchange
Make tracking work
But doesn't take a protocol agnostic approach as we just convert
everything into AMQP legacy and back. Might be good enough for now.
Tracing as a whole may want a bit of a re-vamp at some point.
tidy
make quorum queue peek work by legacy conversion
dead lettering fixes
dead lettering fixes
CMQ fixes
rabbit_trace type fixes
fixes
fix
Fix classic queue props
test assertion fix
feature flag and backwards compat
Enable message_container feature flag in some SUITEs
Dialyzer fixes
fixes
fix
test fixes
Various
Manually update a gazelle generated file
until a gazelle enhancement can be made
https://github.com/rabbitmq/rules_erlang/issues/185
Add message_containers_SUITE to bazel
and regen bazel files with gazelle from rules_erlang@main
Simplify essential proprty access
Such as durable, ttl and priority by extracting them into annotations
at message container init time.
Move type
to remove dependenc on amqp10 stuff in mc.erl
mostly because I don't know how to make bazel do the right thing
add more stuff
Refine routing header stuff
wip
Cosmetics
Do not use "maybe" as type name as "maybe" is a keyword since OTP 25
which makes Erlang LS complain.
* Dedup death queue names
* Fix function clause crashes
Fix failing tests in the MQTT shared_SUITE:
A classic queue message ID can be undefined as set in
https://github.com/rabbitmq/rabbitmq-server/blob/fbe79ff47b4edbc0fd95457e623d6593161ad198/deps/rabbit/src/rabbit_classic_queue_index_v2.erl#L1048
Fix failing tests in the MQTT shared_SUITE-mixed:
When feature flag message_containers is disabled, the
message is not an #mc{} record, but a #basic_message{} record.
* Fix is_utf8_no_null crash
Prior to this commit, the function crashed if invalid UTF-8 was
provided, e.g.:
```
1> rabbit_misc:is_valid_shortstr(<<"😇"/utf16>>).
** exception error: no function clause matching rabbit_misc:is_utf8_no_null(<<216,61,222,7>>) (rabbit_misc.erl, line 1481)
```
* Implement mqtt mc behaviour
For now via amqp translation.
This is still work in progress, but the following SUITEs pass:
```
make -C deps/rabbitmq_mqtt ct-shared t=[mqtt,v5,cluster_size_1] FULL=1
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_1] FULL=1
```
* Shorten mc file names
Module name length matters because for each persistent message the #mc{}
record is persisted to disk.
```
1> iolist_size(term_to_iovec({mc, rabbit_mc_amqp_legacy})).
30
2> iolist_size(term_to_iovec({mc, mc_amqpl})).
17
```
This commit renames the mc modules:
```
ag -l rabbit_mc_amqp_legacy | xargs sed -i 's/rabbit_mc_amqp_legacy/mc_amqpl/g'
ag -l rabbit_mc_amqp | xargs sed -i 's/rabbit_mc_amqp/mc_amqp/g'
ag -l rabbit_mqtt_mc | xargs sed -i 's/rabbit_mqtt_mc/mc_mqtt/g'
```
* mc: make deaths an annotation + fixes
* Fix mc_mqtt protocol_state callback
* Fix test will_delay_node_restart
```
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_3]:will_delay_node_restart FULL=1
```
* Bazel run gazelle
* mix format rabbitmqctl.ex
* Ensure ttl annotation is refelected in amqp legacy protocol state
* Fix id access in message store
* Fix rabbit_message_interceptor_SUITE
* dializer fixes
* Fix rabbit:rabbit_message_interceptor_SUITE-mixed
set_annotation/3 should not result in duplicate keys
* Fix MQTT shared_SUITE-mixed
Up to 3.12 non-MQTT publishes were always QoS 1 regardless of delivery_mode.
https://github.com/rabbitmq/rabbitmq-server/blob/75a953ce286a10aca910c098805a4f545989af38/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl#L2075-L2076
From now on, non-MQTT publishes are QoS 1 if durable.
This makes more sense.
The MQTT plugin must send a #basic_message{} to an old node that does
not understand message containers.
* Field content of 'v1_0.data' can be binary
Fix
```
bazel test //deps/rabbitmq_mqtt:shared_SUITE-mixed \
--test_env FOCUS="-group [mqtt,v4,cluster_size_1] -case trace" \
-t- --test_sharding_strategy=disabled
```
* Remove route/2 and implement route/3 for all exchange types.
This removes the route/2 callback from rabbit_exchange_type and
makes route/3 mandatory instead. This is a breaking change and
will require all implementations of exchange types to update their
code, however this is necessary anyway for them to correctly handle
the mc type.
stream filtering fixes
* Translate directly from MQTT to AMQP 0.9.1
* handle undecoded properties in mc_compat
amqpl: put clause in right order
recover death deatails from amqp data
* Replace callback init_amqp with convert_from
* Fix return value of lists:keyfind/3
* Translate directly from AMQP 0.9.1 to MQTT
* Fix MQTT payload size
MQTT payload can be a list when converted from AMQP 0.9.1 for example
First conversions tests
Plus some other conversion related fixes.
bazel
bazel
translate amqp 1.0 null to undefined
mc: property/2 and correlation_id/message_id return type tagged values.
To ensure we can support a variety of types better.
The type type tags are AMQP 1.0 flavoured.
fix death recovery
mc_mqtt: impl new api
Add callbacks to allow protocols to compact data before storage
And make readable if needing to query things repeatedly.
bazel fix
* more decoding
* tracking mixed versions compat
* mc: flip default of `durable` annotation to save some data.
Assuming most messages are durable and that in memory messages suffer less
from persistence overhead it makes sense for a non existent `durable`
annotation to mean durable=true.
* mc conversion tests and tidy up
* mc make x_header unstrict again
* amqpl: death record fixes
* bazel
* amqp -> amqpl conversion test
* Fix crash in mc_amqp:size/1
Body can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0/ ct-system t=java
```
on branch native-amqp.
* Fix crash in lists:flatten/1
Data can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp.
* Fix crash in rabbit_writer
Running test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp resulted in the following crash:
```
crasher:
initial call: rabbit_writer:enter_mainloop/2
pid: <0.711.0>
registered_name: []
exception error: bad argument
in function size/1
called as size([<<0>>,<<"Sw">>,[<<160,2>>,<<"hi">>]])
*** argument 1: not tuple or binary
in call from rabbit_binary_generator:build_content_frames/7 (rabbit_binary_generator.erl, line 89)
in call from rabbit_binary_generator:build_simple_content_frames/4 (rabbit_binary_generator.erl, line 61)
in call from rabbit_writer:assemble_frames/5 (rabbit_writer.erl, line 334)
in call from rabbit_writer:internal_send_command_async/3 (rabbit_writer.erl, line 365)
in call from rabbit_writer:handle_message/2 (rabbit_writer.erl, line 265)
in call from rabbit_writer:handle_message/3 (rabbit_writer.erl, line 232)
in call from rabbit_writer:mainloop1/2 (rabbit_writer.erl, line 223)
```
because #content.payload_fragments_rev is currently supposed to
be a flat list of binaries instead of being an iolist.
This commit fixes this crash inefficiently by calling
iolist_to_binary/1. A better solution would be to allow AMQP legacy's #content.payload_fragments_rev
to be an iolist.
* Add accidentally deleted line back
* mc: optimise mc_amqp internal format
By removint the outer records for message and delivery annotations
as well as application properties and footers.
* mc: optimis mc_amqp map_add by using upsert
* mc: refactoring and bug fixes
* mc_SUITE routingheader assertions
* mc remove serialize/1 callback as only used by amqp
* mc_amqp: avoid returning a nested list from protocol_state
* test and bug fix
* move infer_type to mc_util
* mc fixes and additiona assertions
* Support headers exchange routing for MQTT messages
When a headers exchange is bound to the MQTT topic exchange, routing
will be performend based on both MQTT topic (by the topic exchange) and
MQTT User Property (by the headers exchange).
This combines the best worlds of both MQTT 5.0 and AMQP 0.9.1 and
enables powerful routing topologies.
When the User Property contains the same name multiple times, only the
last name (and value) will be considered by the headers exchange.
* Fix crash when sending from stream to amqpl
When publishing a message via the stream protocol and consuming it via
AMQP 0.9.1, the following crash occurred prior to this commit:
```
crasher:
initial call: rabbit_channel:init/1
pid: <0.818.0>
registered_name: []
exception exit: {{badmatch,undefined},
[{rabbit_channel,handle_deliver0,4,
[{file,"rabbit_channel.erl"},
{line,2728}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1594}]},
{rabbit_channel,handle_cast,2,
[{file,"rabbit_channel.erl"},
{line,728}]},
{gen_server2,handle_msg,2,
[{file,"gen_server2.erl"},{line,1056}]},
{proc_lib,wake_up,3,
[{file,"proc_lib.erl"},{line,251}]}]}
```
This commit first gives `mc:init/3` the chance to set exchange and
routing_keys annotations.
If not set, `rabbit_stream_queue` will set these annotations assuming
the message was originally published via the stream protocol.
* Support consistent hash exchange routing for MQTT 5.0
When a consistent hash exchange is bound to the MQTT topic exchange,
MQTT 5.0 messages can be routed to queues consistently based on the
Correlation-Data in the PUBLISH packet.
* Convert MQTT 5.0 User Property
* to AMQP 0.9.1 headers
* from AMQP 0.9.1 headers
* to AMQP 1.0 application properties and message annotations
* from AMQP 1.0 application properties and message annotations
* Make use of Annotations in mc_mqtt:protocol_state/2
mc_mqtt:protocol_state/2 includes Annotations as parameter.
It's cleaner to make use of these Annotations when computing the
protocol state instead of relying on the caller (rabbitmq_mqtt_processor)
to compute the protocol state.
* Enforce AMQP 0.9.1 field name length limit
The AMQP 0.9.1 spec prohibits field names longer than 128 characters.
Therefore, when converting AMQP 1.0 message annotations, application
properties or MQTT 5.0 User Property to AMQP 0.9.1 headers, drop any
names longer than 128 characters.
* Fix type specs
Apply feedback from Michael Davis
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* Add mc_mqtt unit test suite
Implement mc_mqtt:x_header/2
* Translate indicator that payload is UTF-8 encoded
when converting between MQTT 5.0 and AMQP 1.0
* Translate single amqp-value section from AMQP 1.0 to MQTT
Convert to a text representation, if possible, and indicate to MQTT
client that the payload is UTF-8 encoded. This way, the MQTT client will
be able to parse the payload.
If conversion to text representation is not possible, encode the payload
using the AMQP 1.0 type system and indiate the encoding via Content-Type
message/vnd.rabbitmq.amqp.
This Content-Type is not registered.
Type "message" makes sense since it's a message.
Vendor tree "vnd.rabbitmq.amqp" makes sense since merely subtype "amqp" is not
registered.
* Fix payload conversion
* Translate Response Topic between MQTT and AMQP
Translate MQTT 5.0 Response Topic to AMQP 1.0 reply-to address and vice
versa.
The Response Topic must be a UTF-8 encoded string.
This commit re-uses the already defined RabbitMQ target addresses:
```
"/topic/" RK Publish to amq.topic with routing key RK
"/exchange/" X "/" RK Publish to exchange X with routing key RK
```
By default, the MQTT topic exchange is configure dto be amq.topic using
the 1st target address.
When an operator modifies the mqtt.exchange, the 2nd target address is
used.
* Apply PR feedback
and fix formatting
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* tidy up
* Add MQTT message_containers test
* consistent hash exchange: avoid amqp legacy conversion
When hashing on a header value.
* Avoid converting to amqp legacy when using exchange federation
* Fix test flake
* test and dialyzer fixes
* dialyzer fix
* Add MQTT protocol interoperability tests
Test receiving from and sending to MQTT 5.0 and
* AMQP 0.9.1
* AMQP 1.0
* STOMP
* Streams
* Regenerate portions of deps/rabbit/app.bzl with gazelle
I'm not exactly sure how this happened, but gazell seems to have been
run with an older version of the rules_erlang gazelle extension at
some point. This caused generation of a structure that is no longer
used. This commit updates the structure to the current pattern.
* mc: refactoring
* mc_amqpl: handle delivery annotations
Just in case they are included.
Also use iolist_to_iovec to create flat list of binaries when
converting from amqp with amqp encoded payload.
---------
Co-authored-by: David Ansari <david.ansari@gmx.de>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
Co-authored-by: Rin Kuryloski <kuryloskip@vmware.com>
2023-08-31 18:27:13 +08:00
|
|
|
(Msg) ->
|
|
|
|
|
mc:is_persistent(Msg)
|
2014-12-04 01:38:49 +08:00
|
|
|
end};
|
2014-08-20 21:34:35 +08:00
|
|
|
false -> {undefined, fun(_MsgId) -> false end}
|
|
|
|
|
end,
|
2017-03-17 21:03:03 +08:00
|
|
|
TransientClient = msg_store_client_init(?TRANSIENT_MSG_STORE,
|
2023-02-03 20:56:02 +08:00
|
|
|
undefined, VHost),
|
2014-07-23 23:20:17 +08:00
|
|
|
{DeltaCount, DeltaBytes, IndexState} =
|
2024-03-04 17:08:22 +08:00
|
|
|
rabbit_classic_queue_index_v2:recover(
|
2014-01-15 22:30:56 +08:00
|
|
|
QueueName, RecoveryTerms,
|
2017-03-17 21:03:03 +08:00
|
|
|
rabbit_vhost_msg_store:successfully_recovered_state(
|
|
|
|
|
VHost,
|
|
|
|
|
?PERSISTENT_MSG_STORE),
|
2021-11-19 21:42:12 +08:00
|
|
|
ContainsCheckFun, MsgIdxOnDiskFun, MsgAndIdxOnDiskFun,
|
|
|
|
|
main),
|
2022-06-16 17:21:47 +08:00
|
|
|
StoreState = rabbit_classic_queue_store_v2:init(QueueName),
|
2024-03-04 17:08:22 +08:00
|
|
|
init(IsDurable, IndexState, StoreState,
|
|
|
|
|
DeltaCount, DeltaBytes, RecoveryTerms,
|
2017-03-17 21:03:03 +08:00
|
|
|
PersistentClient, TransientClient, VHost).
|
2009-10-07 20:53:03 +08:00
|
|
|
|
2014-01-15 22:30:56 +08:00
|
|
|
process_recovery_terms(Terms=non_clean_shutdown) ->
|
|
|
|
|
{rabbit_guid:gen(), Terms};
|
2013-12-19 18:19:44 +08:00
|
|
|
process_recovery_terms(Terms) ->
|
|
|
|
|
case proplists:get_value(persistent_ref, Terms) of
|
2014-01-15 22:30:56 +08:00
|
|
|
undefined -> {rabbit_guid:gen(), []};
|
2014-01-16 03:14:23 +08:00
|
|
|
PRef -> {PRef, Terms}
|
2013-12-19 18:19:44 +08:00
|
|
|
end.
|
|
|
|
|
|
2011-06-02 22:40:26 +08:00
|
|
|
terminate(_Reason, State) ->
|
2021-07-28 19:43:31 +08:00
|
|
|
State1 = #vqstate { virtual_host = VHost,
|
|
|
|
|
next_seq_id = NextSeqId,
|
|
|
|
|
next_deliver_seq_id = NextDeliverSeqId,
|
|
|
|
|
persistent_count = PCount,
|
|
|
|
|
persistent_bytes = PBytes,
|
|
|
|
|
index_state = IndexState,
|
2021-09-15 20:56:51 +08:00
|
|
|
store_state = StoreState,
|
2021-07-28 19:43:31 +08:00
|
|
|
msg_store_clients = {MSCStateP, MSCStateT} } =
|
2011-09-30 05:42:36 +08:00
|
|
|
purge_pending_ack(true, State),
|
2010-10-21 07:21:37 +08:00
|
|
|
PRef = case MSCStateP of
|
2010-10-26 21:51:35 +08:00
|
|
|
undefined -> undefined;
|
2017-04-19 21:03:24 +08:00
|
|
|
_ -> ok = maybe_client_terminate(MSCStateP),
|
2010-10-21 07:21:37 +08:00
|
|
|
rabbit_msg_store:client_ref(MSCStateP)
|
|
|
|
|
end,
|
2011-08-22 19:47:52 +08:00
|
|
|
ok = rabbit_msg_store:client_delete_and_terminate(MSCStateT),
|
2021-07-28 19:43:31 +08:00
|
|
|
Terms = [{next_seq_id, NextSeqId},
|
|
|
|
|
{next_deliver_seq_id, NextDeliverSeqId},
|
|
|
|
|
{persistent_ref, PRef},
|
|
|
|
|
{persistent_count, PCount},
|
|
|
|
|
{persistent_bytes, PBytes}],
|
2017-03-17 21:03:03 +08:00
|
|
|
a(State1#vqstate {
|
2024-03-04 17:08:22 +08:00
|
|
|
index_state = rabbit_classic_queue_index_v2:terminate(VHost, Terms, IndexState),
|
2021-11-05 23:42:18 +08:00
|
|
|
store_state = rabbit_classic_queue_store_v2:terminate(StoreState),
|
2017-03-17 21:03:03 +08:00
|
|
|
msg_store_clients = undefined }).
|
2009-10-22 00:04:36 +08:00
|
|
|
|
2010-04-16 18:14:08 +08:00
|
|
|
%% the only difference between purge and delete is that delete also
|
|
|
|
|
%% needs to delete everything that's been delivered and not ack'd.
|
2011-06-02 22:40:26 +08:00
|
|
|
delete_and_terminate(_Reason, State) ->
|
2015-09-08 00:32:13 +08:00
|
|
|
%% Normally when we purge messages we interact with the qi by
|
|
|
|
|
%% issues delivers and acks for every purged message. In this case
|
|
|
|
|
%% we don't need to do that, so we just delete the qi.
|
2015-09-07 23:11:40 +08:00
|
|
|
State1 = purge_and_index_reset(State),
|
2015-09-08 00:15:48 +08:00
|
|
|
State2 = #vqstate { msg_store_clients = {MSCStateP, MSCStateT} } =
|
2015-09-08 00:23:19 +08:00
|
|
|
purge_pending_ack_delete_and_terminate(State1),
|
2010-05-22 01:01:41 +08:00
|
|
|
case MSCStateP of
|
|
|
|
|
undefined -> ok;
|
2010-10-21 07:21:37 +08:00
|
|
|
_ -> rabbit_msg_store:client_delete_and_terminate(MSCStateP)
|
2010-05-22 01:01:41 +08:00
|
|
|
end,
|
2010-10-21 07:21:37 +08:00
|
|
|
rabbit_msg_store:client_delete_and_terminate(MSCStateT),
|
2015-09-04 17:52:31 +08:00
|
|
|
a(State2 #vqstate { msg_store_clients = undefined }).
|
2010-04-16 18:14:08 +08:00
|
|
|
|
2018-10-11 18:12:39 +08:00
|
|
|
delete_crashed(Q) when ?is_amqqueue(Q) ->
|
|
|
|
|
QName = amqqueue:get_name(Q),
|
2021-10-08 21:26:15 +08:00
|
|
|
ok = rabbit_classic_queue_index_v2:erase(QName).
|
2014-09-09 20:36:06 +08:00
|
|
|
|
2015-09-06 18:45:14 +08:00
|
|
|
purge(State = #vqstate { len = Len }) ->
|
2016-06-24 19:30:32 +08:00
|
|
|
case is_pending_ack_empty(State) and is_unconfirmed_empty(State) of
|
2015-09-06 18:45:14 +08:00
|
|
|
true ->
|
2015-09-07 23:11:40 +08:00
|
|
|
{Len, purge_and_index_reset(State)};
|
2015-09-06 18:45:14 +08:00
|
|
|
false ->
|
|
|
|
|
{Len, purge_when_pending_acks(State)}
|
|
|
|
|
end.
|
2010-04-16 18:14:08 +08:00
|
|
|
|
2013-01-19 22:20:51 +08:00
|
|
|
purge_acks(State) -> a(purge_pending_ack(false, State)).
|
|
|
|
|
|
2023-10-04 18:11:54 +08:00
|
|
|
publish(Msg, MsgProps, IsDelivered, ChPid, State) ->
|
2015-09-29 06:41:51 +08:00
|
|
|
State1 =
|
2023-10-04 18:11:54 +08:00
|
|
|
publish1(Msg, MsgProps, IsDelivered, ChPid,
|
2015-09-29 06:41:51 +08:00
|
|
|
fun maybe_write_to_disk/4,
|
|
|
|
|
State),
|
2022-05-10 23:40:52 +08:00
|
|
|
a(maybe_update_rates(State1)).
|
2009-10-07 20:53:03 +08:00
|
|
|
|
2023-10-04 18:11:54 +08:00
|
|
|
publish_delivered(Msg, MsgProps, ChPid, State) ->
|
2015-09-29 06:41:51 +08:00
|
|
|
{SeqId, State1} =
|
2023-10-04 18:11:54 +08:00
|
|
|
publish_delivered1(Msg, MsgProps, ChPid,
|
2015-09-29 06:41:51 +08:00
|
|
|
fun maybe_write_to_disk/4,
|
|
|
|
|
State),
|
2022-05-10 23:40:52 +08:00
|
|
|
{SeqId, a(maybe_update_rates(State1))}.
|
2015-09-29 06:41:51 +08:00
|
|
|
|
2023-10-04 18:11:54 +08:00
|
|
|
discard(_MsgId, _ChPid, State) -> State.
|
2012-10-17 00:39:53 +08:00
|
|
|
|
2011-03-05 08:31:49 +08:00
|
|
|
drain_confirmed(State = #vqstate { confirmed = C }) ->
|
2022-06-10 21:24:02 +08:00
|
|
|
case sets:is_empty(C) of
|
2011-10-02 00:09:51 +08:00
|
|
|
true -> {[], State}; %% common case
|
2022-06-10 21:24:02 +08:00
|
|
|
false -> {sets:to_list(C), State #vqstate {
|
|
|
|
|
confirmed = sets:new([{version, 2}]) }}
|
2011-10-02 00:09:51 +08:00
|
|
|
end.
|
2011-03-05 08:31:49 +08:00
|
|
|
|
2012-11-30 06:51:37 +08:00
|
|
|
dropwhile(Pred, State) ->
|
2015-09-12 03:13:54 +08:00
|
|
|
{MsgProps, State1} =
|
|
|
|
|
remove_by_predicate(Pred, State),
|
|
|
|
|
{MsgProps, a(State1)}.
|
2012-04-23 23:51:06 +08:00
|
|
|
|
2012-11-30 06:51:37 +08:00
|
|
|
fetchwhile(Pred, Fun, Acc, State) ->
|
2015-09-28 10:32:20 +08:00
|
|
|
{MsgProps, Acc1, State1} =
|
|
|
|
|
fetch_by_predicate(Pred, Fun, Acc, State),
|
|
|
|
|
{MsgProps, Acc1, a(State1)}.
|
2010-09-23 01:37:40 +08:00
|
|
|
|
|
|
|
|
fetch(AckRequired, State) ->
|
2011-06-25 22:17:49 +08:00
|
|
|
case queue_out(State) of
|
|
|
|
|
{empty, State1} ->
|
|
|
|
|
{empty, a(State1)};
|
|
|
|
|
{{value, MsgStatus}, State1} ->
|
|
|
|
|
%% it is possible that the message wasn't read from disk
|
|
|
|
|
%% at this point, so read it in.
|
2013-01-12 18:18:28 +08:00
|
|
|
{Msg, State2} = read_msg(MsgStatus, State1),
|
2013-01-03 00:58:55 +08:00
|
|
|
{AckTag, State3} = remove(AckRequired, MsgStatus, State2),
|
|
|
|
|
{{Msg, MsgStatus#msg_status.is_delivered, AckTag}, a(State3)}
|
2011-06-25 22:17:49 +08:00
|
|
|
end.
|
|
|
|
|
|
2023-06-21 02:04:17 +08:00
|
|
|
%% @todo It may seem like we would benefit from avoiding reading the
|
|
|
|
|
%% message content from disk. But benchmarks tell a different
|
|
|
|
|
%% story. So we don't, until a better understanding is gained.
|
2012-11-20 21:34:59 +08:00
|
|
|
drop(AckRequired, State) ->
|
|
|
|
|
case queue_out(State) of
|
|
|
|
|
{empty, State1} ->
|
|
|
|
|
{empty, a(State1)};
|
|
|
|
|
{{value, MsgStatus}, State1} ->
|
2013-01-02 22:54:51 +08:00
|
|
|
{AckTag, State2} = remove(AckRequired, MsgStatus, State1),
|
2012-11-22 21:32:14 +08:00
|
|
|
{{MsgStatus#msg_status.msg_id, AckTag}, a(State2)}
|
2011-06-25 22:17:49 +08:00
|
|
|
end.
|
|
|
|
|
|
2018-11-30 18:30:36 +08:00
|
|
|
%% Duplicated from rabbit_backing_queue
|
|
|
|
|
-spec ack([ack()], state()) -> {[rabbit_guid:guid()], state()}.
|
|
|
|
|
|
2012-02-24 21:10:40 +08:00
|
|
|
ack([], State) ->
|
2011-09-30 04:56:41 +08:00
|
|
|
{[], State};
|
2014-02-03 23:26:43 +08:00
|
|
|
%% optimisation: this head is essentially a partial evaluation of the
|
|
|
|
|
%% general case below, for the single-ack case.
|
|
|
|
|
ack([SeqId], State) ->
|
2016-10-26 21:59:12 +08:00
|
|
|
case remove_pending_ack(true, SeqId, State) of
|
|
|
|
|
{none, _} ->
|
2020-01-28 03:33:11 +08:00
|
|
|
{[], State};
|
2023-06-21 02:04:17 +08:00
|
|
|
{MsgStatus = #msg_status{ msg_id = MsgId },
|
|
|
|
|
State1 = #vqstate{ ack_out_counter = AckOutCount }} ->
|
|
|
|
|
State2 = remove_from_disk(MsgStatus, State1),
|
2016-10-26 21:59:12 +08:00
|
|
|
{[MsgId],
|
2023-06-21 02:04:17 +08:00
|
|
|
a(State2 #vqstate { ack_out_counter = AckOutCount + 1 })}
|
2016-10-26 21:59:12 +08:00
|
|
|
end;
|
2012-02-24 21:10:40 +08:00
|
|
|
ack(AckTags, State) ->
|
2021-09-22 21:07:38 +08:00
|
|
|
{{IndexOnDiskSeqIds, MsgIdsByStore, SeqIdsInStore, AllMsgIds},
|
2024-03-04 17:08:22 +08:00
|
|
|
State1 = #vqstate { index_state = IndexState,
|
2021-07-06 20:38:47 +08:00
|
|
|
store_state = StoreState0,
|
2011-09-30 04:56:41 +08:00
|
|
|
ack_out_counter = AckOutCount }} =
|
|
|
|
|
lists:foldl(
|
2011-09-30 05:42:36 +08:00
|
|
|
fun (SeqId, {Acc, State2}) ->
|
2022-06-03 17:53:45 +08:00
|
|
|
%% @todo When acking many messages we should update stats once not for every.
|
2022-06-08 21:39:42 +08:00
|
|
|
%% Also remove the pending acks all at once instead of every.
|
2016-10-26 21:59:12 +08:00
|
|
|
case remove_pending_ack(true, SeqId, State2) of
|
|
|
|
|
{none, _} ->
|
|
|
|
|
{Acc, State2};
|
|
|
|
|
{MsgStatus, State3} ->
|
|
|
|
|
{accumulate_ack(MsgStatus, Acc), State3}
|
|
|
|
|
end
|
2011-09-30 04:56:41 +08:00
|
|
|
end, {accumulate_ack_init(), State}, AckTags),
|
2024-03-04 17:08:22 +08:00
|
|
|
{DeletedSegments, IndexState1} = rabbit_classic_queue_index_v2:ack(IndexOnDiskSeqIds, IndexState),
|
2021-11-05 23:42:18 +08:00
|
|
|
StoreState1 = rabbit_classic_queue_store_v2:delete_segments(DeletedSegments, StoreState0),
|
|
|
|
|
StoreState = lists:foldl(fun rabbit_classic_queue_store_v2:remove/2, StoreState1, SeqIdsInStore),
|
2023-06-21 02:04:17 +08:00
|
|
|
State2 = remove_vhost_msgs_by_id(MsgIdsByStore, State1),
|
2011-09-30 04:56:41 +08:00
|
|
|
{lists:reverse(AllMsgIds),
|
2023-06-21 02:04:17 +08:00
|
|
|
a(State2 #vqstate { index_state = IndexState1,
|
2021-07-06 20:38:47 +08:00
|
|
|
store_state = StoreState,
|
2012-02-24 21:10:40 +08:00
|
|
|
ack_out_counter = AckOutCount + length(AckTags) })}.
|
|
|
|
|
|
2022-04-04 17:08:54 +08:00
|
|
|
requeue(AckTags, #vqstate { delta = Delta,
|
2015-10-13 22:05:22 +08:00
|
|
|
q3 = Q3,
|
|
|
|
|
in_counter = InCounter,
|
|
|
|
|
len = Len } = State) ->
|
2022-06-03 17:53:45 +08:00
|
|
|
%% @todo This can be heavily simplified: if the message falls into delta,
|
|
|
|
|
%% add it there. Otherwise just add it to q3 in the correct position.
|
2022-09-05 19:37:33 +08:00
|
|
|
{SeqIds, Q3a, MsgIds, State1} = requeue_merge(lists:sort(AckTags), Q3, [],
|
|
|
|
|
delta_limit(Delta), State),
|
2015-10-13 22:05:22 +08:00
|
|
|
{Delta1, MsgIds1, State2} = delta_merge(SeqIds, Delta, MsgIds,
|
|
|
|
|
State1),
|
|
|
|
|
MsgCount = length(MsgIds1),
|
2022-05-10 23:40:52 +08:00
|
|
|
{MsgIds1, a(
|
2016-01-27 23:59:21 +08:00
|
|
|
maybe_update_rates(ui(
|
2015-10-13 22:05:22 +08:00
|
|
|
State2 #vqstate { delta = Delta1,
|
|
|
|
|
q3 = Q3a,
|
|
|
|
|
in_counter = InCounter + MsgCount,
|
2022-05-10 23:40:52 +08:00
|
|
|
len = Len + MsgCount })))}.
|
2009-10-12 21:56:07 +08:00
|
|
|
|
2013-01-02 02:56:06 +08:00
|
|
|
ackfold(MsgFun, Acc, State, AckTags) ->
|
|
|
|
|
{AccN, StateN} =
|
2013-01-08 06:19:33 +08:00
|
|
|
lists:foldl(fun(SeqId, {Acc0, State0}) ->
|
|
|
|
|
MsgStatus = lookup_pending_ack(SeqId, State0),
|
2013-01-12 18:18:28 +08:00
|
|
|
{Msg, State1} = read_msg(MsgStatus, State0),
|
2013-01-08 06:19:33 +08:00
|
|
|
{MsgFun(Msg, SeqId, Acc0), State1}
|
|
|
|
|
end, {Acc, State}, AckTags),
|
2013-01-02 02:56:06 +08:00
|
|
|
{AccN, a(StateN)}.
|
|
|
|
|
|
2013-01-13 18:49:08 +08:00
|
|
|
fold(Fun, Acc, State = #vqstate{index_state = IndexState}) ->
|
2013-01-13 18:59:59 +08:00
|
|
|
{Its, IndexState1} = lists:foldl(fun inext/2, {[], IndexState},
|
|
|
|
|
[msg_iterator(State),
|
|
|
|
|
disk_ack_iterator(State),
|
2022-06-08 21:39:42 +08:00
|
|
|
ram_ack_iterator(State)]),
|
2013-01-13 18:49:08 +08:00
|
|
|
ifold(Fun, Acc, Its, State#vqstate{index_state = IndexState1}).
|
2012-11-22 01:53:18 +08:00
|
|
|
|
2010-06-15 15:29:55 +08:00
|
|
|
len(#vqstate { len = Len }) -> Len.
|
2009-10-12 23:27:48 +08:00
|
|
|
|
2010-06-15 15:29:55 +08:00
|
|
|
is_empty(State) -> 0 == len(State).
|
2009-10-12 23:27:48 +08:00
|
|
|
|
2015-09-04 17:52:31 +08:00
|
|
|
depth(State) ->
|
|
|
|
|
len(State) + count_pending_acks(State).
|
2012-08-31 20:59:09 +08:00
|
|
|
|
2014-02-07 20:02:55 +08:00
|
|
|
maybe_update_rates(State = #vqstate{ in_counter = InCount,
|
|
|
|
|
out_counter = OutCount })
|
|
|
|
|
when InCount + OutCount > ?MSGS_PER_RATE_CALC ->
|
|
|
|
|
update_rates(State);
|
|
|
|
|
maybe_update_rates(State) ->
|
|
|
|
|
State.
|
|
|
|
|
|
2014-02-06 02:55:45 +08:00
|
|
|
update_rates(State = #vqstate{ in_counter = InCount,
|
|
|
|
|
out_counter = OutCount,
|
|
|
|
|
ack_in_counter = AckInCount,
|
2014-01-27 21:59:16 +08:00
|
|
|
ack_out_counter = AckOutCount,
|
2014-02-06 02:55:45 +08:00
|
|
|
rates = #rates{ in = InRate,
|
|
|
|
|
out = OutRate,
|
|
|
|
|
ack_in = AckInRate,
|
2014-01-27 21:59:16 +08:00
|
|
|
ack_out = AckOutRate,
|
|
|
|
|
timestamp = TS }}) ->
|
2016-05-12 17:34:15 +08:00
|
|
|
Now = erlang:monotonic_time(),
|
2014-01-27 21:16:33 +08:00
|
|
|
|
2014-02-06 02:55:45 +08:00
|
|
|
Rates = #rates { in = update_rate(Now, TS, InCount, InRate),
|
|
|
|
|
out = update_rate(Now, TS, OutCount, OutRate),
|
|
|
|
|
ack_in = update_rate(Now, TS, AckInCount, AckInRate),
|
2014-01-30 20:30:54 +08:00
|
|
|
ack_out = update_rate(Now, TS, AckOutCount, AckOutRate),
|
2014-01-27 21:59:16 +08:00
|
|
|
timestamp = Now },
|
|
|
|
|
|
2014-01-27 21:16:33 +08:00
|
|
|
State#vqstate{ in_counter = 0,
|
|
|
|
|
out_counter = 0,
|
|
|
|
|
ack_in_counter = 0,
|
|
|
|
|
ack_out_counter = 0,
|
|
|
|
|
rates = Rates }.
|
|
|
|
|
|
2014-01-27 23:05:01 +08:00
|
|
|
update_rate(Now, TS, Count, Rate) ->
|
2016-05-12 17:34:15 +08:00
|
|
|
Time = erlang:convert_time_unit(Now - TS, native, micro_seconds) /
|
2015-07-30 23:33:05 +08:00
|
|
|
?MICROS_PER_SECOND,
|
2015-08-12 00:15:59 +08:00
|
|
|
if
|
|
|
|
|
Time == 0 -> Rate;
|
|
|
|
|
true -> rabbit_misc:moving_average(Time, ?RATE_AVG_HALF_LIFE,
|
|
|
|
|
Count / Time, Rate)
|
|
|
|
|
end.
|
2014-01-24 21:55:54 +08:00
|
|
|
|
2024-03-04 17:08:22 +08:00
|
|
|
needs_timeout(#vqstate { index_state = IndexState,
|
2022-06-16 17:21:47 +08:00
|
|
|
unconfirmed_simple = UCS }) ->
|
2024-03-04 17:08:22 +08:00
|
|
|
case {rabbit_classic_queue_index_v2:needs_sync(IndexState), sets:is_empty(UCS)} of
|
2022-06-16 17:21:47 +08:00
|
|
|
{false, false} -> timed;
|
|
|
|
|
{confirms, _} -> timed;
|
|
|
|
|
{false, true} -> false
|
2011-01-10 20:29:58 +08:00
|
|
|
end.
|
|
|
|
|
|
2024-03-04 17:08:22 +08:00
|
|
|
timeout(State = #vqstate { index_state = IndexState0,
|
2022-06-16 17:21:47 +08:00
|
|
|
store_state = StoreState0,
|
|
|
|
|
unconfirmed_simple = UCS,
|
|
|
|
|
confirmed = C }) ->
|
2024-03-04 17:08:22 +08:00
|
|
|
IndexState = rabbit_classic_queue_index_v2:sync(IndexState0),
|
2022-06-16 17:21:47 +08:00
|
|
|
StoreState = rabbit_classic_queue_store_v2:sync(StoreState0),
|
2021-07-06 20:38:47 +08:00
|
|
|
State #vqstate { index_state = IndexState,
|
2022-06-16 17:21:47 +08:00
|
|
|
store_state = StoreState,
|
|
|
|
|
unconfirmed_simple = sets:new([{version,2}]),
|
|
|
|
|
confirmed = sets:union(C, UCS) }.
|
2009-10-13 01:05:16 +08:00
|
|
|
|
2024-03-04 17:08:22 +08:00
|
|
|
handle_pre_hibernate(State = #vqstate { index_state = IndexState0,
|
2022-06-16 17:21:47 +08:00
|
|
|
store_state = StoreState0,
|
2023-05-24 23:05:02 +08:00
|
|
|
msg_store_clients = MSCState0,
|
2022-06-16 17:21:47 +08:00
|
|
|
unconfirmed_simple = UCS,
|
|
|
|
|
confirmed = C }) ->
|
2023-05-24 23:05:02 +08:00
|
|
|
MSCState = msg_store_pre_hibernate(MSCState0),
|
2024-03-04 17:08:22 +08:00
|
|
|
IndexState = rabbit_classic_queue_index_v2:flush(IndexState0),
|
2022-06-16 17:21:47 +08:00
|
|
|
StoreState = rabbit_classic_queue_store_v2:sync(StoreState0),
|
|
|
|
|
State #vqstate { index_state = IndexState,
|
|
|
|
|
store_state = StoreState,
|
2023-05-24 23:05:02 +08:00
|
|
|
msg_store_clients = MSCState,
|
2022-06-16 17:21:47 +08:00
|
|
|
unconfirmed_simple = sets:new([{version,2}]),
|
|
|
|
|
confirmed = sets:union(C, UCS) }.
|
2009-10-26 23:29:56 +08:00
|
|
|
|
2022-09-08 21:41:46 +08:00
|
|
|
resume(State) -> a(timeout(State)).
|
2014-03-04 00:29:55 +08:00
|
|
|
|
2014-01-30 20:30:54 +08:00
|
|
|
msg_rates(#vqstate { rates = #rates { in = AvgIngressRate,
|
|
|
|
|
out = AvgEgressRate } }) ->
|
2013-11-15 21:40:40 +08:00
|
|
|
{AvgIngressRate, AvgEgressRate}.
|
|
|
|
|
|
2014-07-29 20:09:02 +08:00
|
|
|
info(messages_ready_ram, #vqstate{ram_msg_count = RamMsgCount}) ->
|
|
|
|
|
RamMsgCount;
|
2022-06-08 21:39:42 +08:00
|
|
|
info(messages_unacknowledged_ram, #vqstate{ram_pending_ack = RPA}) ->
|
2022-06-08 22:31:08 +08:00
|
|
|
map_size(RPA);
|
2014-07-29 20:09:02 +08:00
|
|
|
info(messages_ram, State) ->
|
|
|
|
|
info(messages_ready_ram, State) + info(messages_unacknowledged_ram, State);
|
|
|
|
|
info(messages_persistent, #vqstate{persistent_count = PersistentCount}) ->
|
|
|
|
|
PersistentCount;
|
2017-02-10 18:56:23 +08:00
|
|
|
info(messages_paged_out, #vqstate{delta = #delta{transient = Count}}) ->
|
|
|
|
|
Count;
|
2014-08-12 21:26:43 +08:00
|
|
|
info(message_bytes, #vqstate{bytes = Bytes,
|
|
|
|
|
unacked_bytes = UBytes}) ->
|
|
|
|
|
Bytes + UBytes;
|
|
|
|
|
info(message_bytes_ready, #vqstate{bytes = Bytes}) ->
|
2014-07-30 00:16:16 +08:00
|
|
|
Bytes;
|
2014-08-12 21:26:43 +08:00
|
|
|
info(message_bytes_unacknowledged, #vqstate{unacked_bytes = UBytes}) ->
|
|
|
|
|
UBytes;
|
2014-07-30 00:16:16 +08:00
|
|
|
info(message_bytes_ram, #vqstate{ram_bytes = RamBytes}) ->
|
|
|
|
|
RamBytes;
|
2014-07-29 20:26:43 +08:00
|
|
|
info(message_bytes_persistent, #vqstate{persistent_bytes = PersistentBytes}) ->
|
|
|
|
|
PersistentBytes;
|
2017-02-10 18:56:23 +08:00
|
|
|
info(message_bytes_paged_out, #vqstate{delta_transient_bytes = PagedOutBytes}) ->
|
|
|
|
|
PagedOutBytes;
|
2015-04-08 17:36:17 +08:00
|
|
|
info(head_message_timestamp, #vqstate{
|
2015-04-27 21:14:10 +08:00
|
|
|
q3 = Q3,
|
2022-06-08 21:39:42 +08:00
|
|
|
ram_pending_ack = RPA}) ->
|
|
|
|
|
head_message_timestamp(Q3, RPA);
|
2023-12-20 16:33:55 +08:00
|
|
|
info(oldest_message_received_timestamp, #vqstate{
|
|
|
|
|
q3 = Q3,
|
|
|
|
|
ram_pending_ack = RPA}) ->
|
|
|
|
|
oldest_message_received_timestamp(Q3, RPA);
|
2015-02-03 19:11:04 +08:00
|
|
|
info(disk_reads, #vqstate{disk_read_count = Count}) ->
|
2015-02-03 00:35:05 +08:00
|
|
|
Count;
|
2015-02-03 19:11:04 +08:00
|
|
|
info(disk_writes, #vqstate{disk_write_count = Count}) ->
|
2015-02-03 00:35:05 +08:00
|
|
|
Count;
|
2014-07-29 20:09:02 +08:00
|
|
|
info(backing_queue_status, #vqstate {
|
2022-05-11 21:10:44 +08:00
|
|
|
delta = Delta, q3 = Q3,
|
2015-10-13 21:31:41 +08:00
|
|
|
mode = Mode,
|
2014-07-29 23:30:24 +08:00
|
|
|
len = Len,
|
2010-12-06 03:31:22 +08:00
|
|
|
target_ram_count = TargetRamCount,
|
|
|
|
|
next_seq_id = NextSeqId,
|
2021-12-09 21:22:35 +08:00
|
|
|
next_deliver_seq_id = NextDeliverSeqId,
|
2021-08-17 22:28:06 +08:00
|
|
|
ram_pending_ack = RPA,
|
|
|
|
|
disk_pending_ack = DPA,
|
|
|
|
|
unconfirmed = UC,
|
|
|
|
|
unconfirmed_simple = UCS,
|
|
|
|
|
index_state = IndexState,
|
|
|
|
|
store_state = StoreState,
|
2014-01-27 21:59:16 +08:00
|
|
|
rates = #rates { in = AvgIngressRate,
|
|
|
|
|
out = AvgEgressRate,
|
|
|
|
|
ack_in = AvgAckIngressRate,
|
|
|
|
|
ack_out = AvgAckEgressRate }}) ->
|
2015-10-13 22:01:42 +08:00
|
|
|
[ {mode , Mode},
|
2024-03-04 17:08:22 +08:00
|
|
|
{version , 2},
|
2022-05-11 21:10:44 +08:00
|
|
|
{q1 , 0},
|
|
|
|
|
{q2 , 0},
|
2010-12-06 03:31:22 +08:00
|
|
|
{delta , Delta},
|
2011-09-30 00:09:09 +08:00
|
|
|
{q3 , ?QUEUE:len(Q3)},
|
2022-05-11 21:10:44 +08:00
|
|
|
{q4 , 0},
|
2014-07-29 23:30:24 +08:00
|
|
|
{len , Len},
|
2010-12-06 03:31:22 +08:00
|
|
|
{target_ram_count , TargetRamCount},
|
|
|
|
|
{next_seq_id , NextSeqId},
|
2021-12-09 21:22:35 +08:00
|
|
|
{next_deliver_seq_id , NextDeliverSeqId},
|
2021-08-17 22:28:06 +08:00
|
|
|
{num_pending_acks , map_size(RPA) + map_size(DPA)},
|
|
|
|
|
{num_unconfirmed , sets:size(UC) + sets:size(UCS)},
|
2010-12-06 03:31:22 +08:00
|
|
|
{avg_ingress_rate , AvgIngressRate},
|
|
|
|
|
{avg_egress_rate , AvgEgressRate},
|
|
|
|
|
{avg_ack_ingress_rate, AvgAckIngressRate},
|
2021-08-17 22:28:06 +08:00
|
|
|
{avg_ack_egress_rate , AvgAckEgressRate} ]
|
2024-03-04 17:08:22 +08:00
|
|
|
++ rabbit_classic_queue_index_v2:info(IndexState)
|
2021-08-17 22:28:06 +08:00
|
|
|
++ rabbit_classic_queue_store_v2:info(StoreState);
|
2020-10-19 20:03:02 +08:00
|
|
|
info(_, _) ->
|
|
|
|
|
''.
|
2009-11-03 02:11:18 +08:00
|
|
|
|
2013-01-29 00:17:44 +08:00
|
|
|
invoke(?MODULE, Fun, State) -> Fun(?MODULE, State);
|
|
|
|
|
invoke( _, _, State) -> State.
|
2011-04-08 19:03:42 +08:00
|
|
|
|
2011-06-25 04:02:57 +08:00
|
|
|
is_duplicate(_Msg, State) -> {false, State}.
|
2011-04-08 19:03:42 +08:00
|
|
|
|
2022-04-04 17:08:54 +08:00
|
|
|
%% Queue mode has been unified.
|
2015-10-10 23:40:23 +08:00
|
|
|
set_queue_mode(_, State) ->
|
|
|
|
|
State.
|
|
|
|
|
|
2015-11-19 03:06:40 +08:00
|
|
|
zip_msgs_and_acks(Msgs, AckTags, Accumulator, _State) ->
|
Message Containers (#5077)
This PR implements an approach for a "protocol (data format) agnostic core" where the format of the message isn't converted at point of reception.
Currently all non AMQP 0.9.1 originating messages are converted into a AMQP 0.9.1 flavoured basic_message record before sent to a queue. If the messages are then consumed by the originating protocol they are converted back from AMQP 0.9.1. For some protocols such as MQTT 3.1 this isn't too expensive as MQTT is mostly a fairly easily mapped subset of AMQP 0.9.1 but for others such as AMQP 1.0 the conversions are awkward and in some cases lossy even if consuming from the originating protocol.
This PR instead wraps all incoming messages in their originating form into a generic, extensible message container type (mc). The container module exposes an API to get common message details such as size and various properties (ttl, priority etc) directly from the source data type. Each protocol needs to implement the mc behaviour such that when a message originating form one protocol is consumed by another protocol we convert it to the target protocol at that point.
The message container also contains annotations, dead letter records and other meta data we need to record during the lifetime of a message. The original protocol message is never modified unless it is consumed.
This includes conversion modules to and from amqp, amqpl (AMQP 0.9.1) and mqtt.
COMMIT HISTORY:
* Refactor away from using the delivery{} record
In many places including exchange types. This should make it
easier to move towards using a message container type instead of
basic_message.
Add mc module and move direct replies outside of exchange
Lots of changes incl classic queues
Implement stream support incl amqp conversions
simplify mc state record
move mc.erl
mc dlx stuff
recent history exchange
Make tracking work
But doesn't take a protocol agnostic approach as we just convert
everything into AMQP legacy and back. Might be good enough for now.
Tracing as a whole may want a bit of a re-vamp at some point.
tidy
make quorum queue peek work by legacy conversion
dead lettering fixes
dead lettering fixes
CMQ fixes
rabbit_trace type fixes
fixes
fix
Fix classic queue props
test assertion fix
feature flag and backwards compat
Enable message_container feature flag in some SUITEs
Dialyzer fixes
fixes
fix
test fixes
Various
Manually update a gazelle generated file
until a gazelle enhancement can be made
https://github.com/rabbitmq/rules_erlang/issues/185
Add message_containers_SUITE to bazel
and regen bazel files with gazelle from rules_erlang@main
Simplify essential proprty access
Such as durable, ttl and priority by extracting them into annotations
at message container init time.
Move type
to remove dependenc on amqp10 stuff in mc.erl
mostly because I don't know how to make bazel do the right thing
add more stuff
Refine routing header stuff
wip
Cosmetics
Do not use "maybe" as type name as "maybe" is a keyword since OTP 25
which makes Erlang LS complain.
* Dedup death queue names
* Fix function clause crashes
Fix failing tests in the MQTT shared_SUITE:
A classic queue message ID can be undefined as set in
https://github.com/rabbitmq/rabbitmq-server/blob/fbe79ff47b4edbc0fd95457e623d6593161ad198/deps/rabbit/src/rabbit_classic_queue_index_v2.erl#L1048
Fix failing tests in the MQTT shared_SUITE-mixed:
When feature flag message_containers is disabled, the
message is not an #mc{} record, but a #basic_message{} record.
* Fix is_utf8_no_null crash
Prior to this commit, the function crashed if invalid UTF-8 was
provided, e.g.:
```
1> rabbit_misc:is_valid_shortstr(<<"😇"/utf16>>).
** exception error: no function clause matching rabbit_misc:is_utf8_no_null(<<216,61,222,7>>) (rabbit_misc.erl, line 1481)
```
* Implement mqtt mc behaviour
For now via amqp translation.
This is still work in progress, but the following SUITEs pass:
```
make -C deps/rabbitmq_mqtt ct-shared t=[mqtt,v5,cluster_size_1] FULL=1
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_1] FULL=1
```
* Shorten mc file names
Module name length matters because for each persistent message the #mc{}
record is persisted to disk.
```
1> iolist_size(term_to_iovec({mc, rabbit_mc_amqp_legacy})).
30
2> iolist_size(term_to_iovec({mc, mc_amqpl})).
17
```
This commit renames the mc modules:
```
ag -l rabbit_mc_amqp_legacy | xargs sed -i 's/rabbit_mc_amqp_legacy/mc_amqpl/g'
ag -l rabbit_mc_amqp | xargs sed -i 's/rabbit_mc_amqp/mc_amqp/g'
ag -l rabbit_mqtt_mc | xargs sed -i 's/rabbit_mqtt_mc/mc_mqtt/g'
```
* mc: make deaths an annotation + fixes
* Fix mc_mqtt protocol_state callback
* Fix test will_delay_node_restart
```
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_3]:will_delay_node_restart FULL=1
```
* Bazel run gazelle
* mix format rabbitmqctl.ex
* Ensure ttl annotation is refelected in amqp legacy protocol state
* Fix id access in message store
* Fix rabbit_message_interceptor_SUITE
* dializer fixes
* Fix rabbit:rabbit_message_interceptor_SUITE-mixed
set_annotation/3 should not result in duplicate keys
* Fix MQTT shared_SUITE-mixed
Up to 3.12 non-MQTT publishes were always QoS 1 regardless of delivery_mode.
https://github.com/rabbitmq/rabbitmq-server/blob/75a953ce286a10aca910c098805a4f545989af38/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl#L2075-L2076
From now on, non-MQTT publishes are QoS 1 if durable.
This makes more sense.
The MQTT plugin must send a #basic_message{} to an old node that does
not understand message containers.
* Field content of 'v1_0.data' can be binary
Fix
```
bazel test //deps/rabbitmq_mqtt:shared_SUITE-mixed \
--test_env FOCUS="-group [mqtt,v4,cluster_size_1] -case trace" \
-t- --test_sharding_strategy=disabled
```
* Remove route/2 and implement route/3 for all exchange types.
This removes the route/2 callback from rabbit_exchange_type and
makes route/3 mandatory instead. This is a breaking change and
will require all implementations of exchange types to update their
code, however this is necessary anyway for them to correctly handle
the mc type.
stream filtering fixes
* Translate directly from MQTT to AMQP 0.9.1
* handle undecoded properties in mc_compat
amqpl: put clause in right order
recover death deatails from amqp data
* Replace callback init_amqp with convert_from
* Fix return value of lists:keyfind/3
* Translate directly from AMQP 0.9.1 to MQTT
* Fix MQTT payload size
MQTT payload can be a list when converted from AMQP 0.9.1 for example
First conversions tests
Plus some other conversion related fixes.
bazel
bazel
translate amqp 1.0 null to undefined
mc: property/2 and correlation_id/message_id return type tagged values.
To ensure we can support a variety of types better.
The type type tags are AMQP 1.0 flavoured.
fix death recovery
mc_mqtt: impl new api
Add callbacks to allow protocols to compact data before storage
And make readable if needing to query things repeatedly.
bazel fix
* more decoding
* tracking mixed versions compat
* mc: flip default of `durable` annotation to save some data.
Assuming most messages are durable and that in memory messages suffer less
from persistence overhead it makes sense for a non existent `durable`
annotation to mean durable=true.
* mc conversion tests and tidy up
* mc make x_header unstrict again
* amqpl: death record fixes
* bazel
* amqp -> amqpl conversion test
* Fix crash in mc_amqp:size/1
Body can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0/ ct-system t=java
```
on branch native-amqp.
* Fix crash in lists:flatten/1
Data can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp.
* Fix crash in rabbit_writer
Running test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp resulted in the following crash:
```
crasher:
initial call: rabbit_writer:enter_mainloop/2
pid: <0.711.0>
registered_name: []
exception error: bad argument
in function size/1
called as size([<<0>>,<<"Sw">>,[<<160,2>>,<<"hi">>]])
*** argument 1: not tuple or binary
in call from rabbit_binary_generator:build_content_frames/7 (rabbit_binary_generator.erl, line 89)
in call from rabbit_binary_generator:build_simple_content_frames/4 (rabbit_binary_generator.erl, line 61)
in call from rabbit_writer:assemble_frames/5 (rabbit_writer.erl, line 334)
in call from rabbit_writer:internal_send_command_async/3 (rabbit_writer.erl, line 365)
in call from rabbit_writer:handle_message/2 (rabbit_writer.erl, line 265)
in call from rabbit_writer:handle_message/3 (rabbit_writer.erl, line 232)
in call from rabbit_writer:mainloop1/2 (rabbit_writer.erl, line 223)
```
because #content.payload_fragments_rev is currently supposed to
be a flat list of binaries instead of being an iolist.
This commit fixes this crash inefficiently by calling
iolist_to_binary/1. A better solution would be to allow AMQP legacy's #content.payload_fragments_rev
to be an iolist.
* Add accidentally deleted line back
* mc: optimise mc_amqp internal format
By removint the outer records for message and delivery annotations
as well as application properties and footers.
* mc: optimis mc_amqp map_add by using upsert
* mc: refactoring and bug fixes
* mc_SUITE routingheader assertions
* mc remove serialize/1 callback as only used by amqp
* mc_amqp: avoid returning a nested list from protocol_state
* test and bug fix
* move infer_type to mc_util
* mc fixes and additiona assertions
* Support headers exchange routing for MQTT messages
When a headers exchange is bound to the MQTT topic exchange, routing
will be performend based on both MQTT topic (by the topic exchange) and
MQTT User Property (by the headers exchange).
This combines the best worlds of both MQTT 5.0 and AMQP 0.9.1 and
enables powerful routing topologies.
When the User Property contains the same name multiple times, only the
last name (and value) will be considered by the headers exchange.
* Fix crash when sending from stream to amqpl
When publishing a message via the stream protocol and consuming it via
AMQP 0.9.1, the following crash occurred prior to this commit:
```
crasher:
initial call: rabbit_channel:init/1
pid: <0.818.0>
registered_name: []
exception exit: {{badmatch,undefined},
[{rabbit_channel,handle_deliver0,4,
[{file,"rabbit_channel.erl"},
{line,2728}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1594}]},
{rabbit_channel,handle_cast,2,
[{file,"rabbit_channel.erl"},
{line,728}]},
{gen_server2,handle_msg,2,
[{file,"gen_server2.erl"},{line,1056}]},
{proc_lib,wake_up,3,
[{file,"proc_lib.erl"},{line,251}]}]}
```
This commit first gives `mc:init/3` the chance to set exchange and
routing_keys annotations.
If not set, `rabbit_stream_queue` will set these annotations assuming
the message was originally published via the stream protocol.
* Support consistent hash exchange routing for MQTT 5.0
When a consistent hash exchange is bound to the MQTT topic exchange,
MQTT 5.0 messages can be routed to queues consistently based on the
Correlation-Data in the PUBLISH packet.
* Convert MQTT 5.0 User Property
* to AMQP 0.9.1 headers
* from AMQP 0.9.1 headers
* to AMQP 1.0 application properties and message annotations
* from AMQP 1.0 application properties and message annotations
* Make use of Annotations in mc_mqtt:protocol_state/2
mc_mqtt:protocol_state/2 includes Annotations as parameter.
It's cleaner to make use of these Annotations when computing the
protocol state instead of relying on the caller (rabbitmq_mqtt_processor)
to compute the protocol state.
* Enforce AMQP 0.9.1 field name length limit
The AMQP 0.9.1 spec prohibits field names longer than 128 characters.
Therefore, when converting AMQP 1.0 message annotations, application
properties or MQTT 5.0 User Property to AMQP 0.9.1 headers, drop any
names longer than 128 characters.
* Fix type specs
Apply feedback from Michael Davis
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* Add mc_mqtt unit test suite
Implement mc_mqtt:x_header/2
* Translate indicator that payload is UTF-8 encoded
when converting between MQTT 5.0 and AMQP 1.0
* Translate single amqp-value section from AMQP 1.0 to MQTT
Convert to a text representation, if possible, and indicate to MQTT
client that the payload is UTF-8 encoded. This way, the MQTT client will
be able to parse the payload.
If conversion to text representation is not possible, encode the payload
using the AMQP 1.0 type system and indiate the encoding via Content-Type
message/vnd.rabbitmq.amqp.
This Content-Type is not registered.
Type "message" makes sense since it's a message.
Vendor tree "vnd.rabbitmq.amqp" makes sense since merely subtype "amqp" is not
registered.
* Fix payload conversion
* Translate Response Topic between MQTT and AMQP
Translate MQTT 5.0 Response Topic to AMQP 1.0 reply-to address and vice
versa.
The Response Topic must be a UTF-8 encoded string.
This commit re-uses the already defined RabbitMQ target addresses:
```
"/topic/" RK Publish to amq.topic with routing key RK
"/exchange/" X "/" RK Publish to exchange X with routing key RK
```
By default, the MQTT topic exchange is configure dto be amq.topic using
the 1st target address.
When an operator modifies the mqtt.exchange, the 2nd target address is
used.
* Apply PR feedback
and fix formatting
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* tidy up
* Add MQTT message_containers test
* consistent hash exchange: avoid amqp legacy conversion
When hashing on a header value.
* Avoid converting to amqp legacy when using exchange federation
* Fix test flake
* test and dialyzer fixes
* dialyzer fix
* Add MQTT protocol interoperability tests
Test receiving from and sending to MQTT 5.0 and
* AMQP 0.9.1
* AMQP 1.0
* STOMP
* Streams
* Regenerate portions of deps/rabbit/app.bzl with gazelle
I'm not exactly sure how this happened, but gazell seems to have been
run with an older version of the rules_erlang gazelle extension at
some point. This caused generation of a structure that is no longer
used. This commit updates the structure to the current pattern.
* mc: refactoring
* mc_amqpl: handle delivery annotations
Just in case they are included.
Also use iolist_to_iovec to create flat list of binaries when
converting from amqp with amqp encoded payload.
---------
Co-authored-by: David Ansari <david.ansari@gmx.de>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
Co-authored-by: Rin Kuryloski <kuryloskip@vmware.com>
2023-08-31 18:27:13 +08:00
|
|
|
lists:foldl(fun ({{Msg, _Props}, AckTag}, Acc) ->
|
|
|
|
|
Id = mc:get_annotation(id, Msg),
|
2015-11-19 03:06:40 +08:00
|
|
|
[{Id, AckTag} | Acc]
|
|
|
|
|
end, Accumulator, lists:zip(Msgs, AckTags)).
|
|
|
|
|
|
2024-03-04 17:08:22 +08:00
|
|
|
%% Queue version now ignored; only v2 is available.
|
|
|
|
|
set_queue_version(_, State) ->
|
|
|
|
|
State.
|
2021-10-21 19:17:18 +08:00
|
|
|
|
2024-03-04 17:08:22 +08:00
|
|
|
%% This function is used by rabbit_classic_queue_index_v2
|
|
|
|
|
%% to convert v1 queues to v2 after an upgrade to 4.0.
|
2021-11-24 23:46:40 +08:00
|
|
|
convert_from_v1_to_v2_loop(_, _, V2Index, V2Store, _, HiSeqId, HiSeqId, _) ->
|
2021-10-21 19:17:18 +08:00
|
|
|
{V2Index, V2Store};
|
2021-11-18 19:13:28 +08:00
|
|
|
convert_from_v1_to_v2_loop(QueueName, V1Index0, V2Index0, V2Store0,
|
|
|
|
|
Counters = {CountersRef, CountIx, BytesIx},
|
2021-11-24 23:46:40 +08:00
|
|
|
LoSeqId, HiSeqId, SkipFun) ->
|
2021-10-21 19:17:18 +08:00
|
|
|
UpSeqId = lists:min([rabbit_queue_index:next_segment_boundary(LoSeqId),
|
|
|
|
|
HiSeqId]),
|
|
|
|
|
{Messages, V1Index} = rabbit_queue_index:read(LoSeqId, UpSeqId, V1Index0),
|
|
|
|
|
%% We do a garbage collect immediately after the old index read
|
|
|
|
|
%% because that may have created a lot of garbage.
|
|
|
|
|
garbage_collect(),
|
|
|
|
|
{V2Index3, V2Store3} = lists:foldl(fun
|
|
|
|
|
%% Move embedded messages to the per-queue store.
|
Message Containers (#5077)
This PR implements an approach for a "protocol (data format) agnostic core" where the format of the message isn't converted at point of reception.
Currently all non AMQP 0.9.1 originating messages are converted into a AMQP 0.9.1 flavoured basic_message record before sent to a queue. If the messages are then consumed by the originating protocol they are converted back from AMQP 0.9.1. For some protocols such as MQTT 3.1 this isn't too expensive as MQTT is mostly a fairly easily mapped subset of AMQP 0.9.1 but for others such as AMQP 1.0 the conversions are awkward and in some cases lossy even if consuming from the originating protocol.
This PR instead wraps all incoming messages in their originating form into a generic, extensible message container type (mc). The container module exposes an API to get common message details such as size and various properties (ttl, priority etc) directly from the source data type. Each protocol needs to implement the mc behaviour such that when a message originating form one protocol is consumed by another protocol we convert it to the target protocol at that point.
The message container also contains annotations, dead letter records and other meta data we need to record during the lifetime of a message. The original protocol message is never modified unless it is consumed.
This includes conversion modules to and from amqp, amqpl (AMQP 0.9.1) and mqtt.
COMMIT HISTORY:
* Refactor away from using the delivery{} record
In many places including exchange types. This should make it
easier to move towards using a message container type instead of
basic_message.
Add mc module and move direct replies outside of exchange
Lots of changes incl classic queues
Implement stream support incl amqp conversions
simplify mc state record
move mc.erl
mc dlx stuff
recent history exchange
Make tracking work
But doesn't take a protocol agnostic approach as we just convert
everything into AMQP legacy and back. Might be good enough for now.
Tracing as a whole may want a bit of a re-vamp at some point.
tidy
make quorum queue peek work by legacy conversion
dead lettering fixes
dead lettering fixes
CMQ fixes
rabbit_trace type fixes
fixes
fix
Fix classic queue props
test assertion fix
feature flag and backwards compat
Enable message_container feature flag in some SUITEs
Dialyzer fixes
fixes
fix
test fixes
Various
Manually update a gazelle generated file
until a gazelle enhancement can be made
https://github.com/rabbitmq/rules_erlang/issues/185
Add message_containers_SUITE to bazel
and regen bazel files with gazelle from rules_erlang@main
Simplify essential proprty access
Such as durable, ttl and priority by extracting them into annotations
at message container init time.
Move type
to remove dependenc on amqp10 stuff in mc.erl
mostly because I don't know how to make bazel do the right thing
add more stuff
Refine routing header stuff
wip
Cosmetics
Do not use "maybe" as type name as "maybe" is a keyword since OTP 25
which makes Erlang LS complain.
* Dedup death queue names
* Fix function clause crashes
Fix failing tests in the MQTT shared_SUITE:
A classic queue message ID can be undefined as set in
https://github.com/rabbitmq/rabbitmq-server/blob/fbe79ff47b4edbc0fd95457e623d6593161ad198/deps/rabbit/src/rabbit_classic_queue_index_v2.erl#L1048
Fix failing tests in the MQTT shared_SUITE-mixed:
When feature flag message_containers is disabled, the
message is not an #mc{} record, but a #basic_message{} record.
* Fix is_utf8_no_null crash
Prior to this commit, the function crashed if invalid UTF-8 was
provided, e.g.:
```
1> rabbit_misc:is_valid_shortstr(<<"😇"/utf16>>).
** exception error: no function clause matching rabbit_misc:is_utf8_no_null(<<216,61,222,7>>) (rabbit_misc.erl, line 1481)
```
* Implement mqtt mc behaviour
For now via amqp translation.
This is still work in progress, but the following SUITEs pass:
```
make -C deps/rabbitmq_mqtt ct-shared t=[mqtt,v5,cluster_size_1] FULL=1
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_1] FULL=1
```
* Shorten mc file names
Module name length matters because for each persistent message the #mc{}
record is persisted to disk.
```
1> iolist_size(term_to_iovec({mc, rabbit_mc_amqp_legacy})).
30
2> iolist_size(term_to_iovec({mc, mc_amqpl})).
17
```
This commit renames the mc modules:
```
ag -l rabbit_mc_amqp_legacy | xargs sed -i 's/rabbit_mc_amqp_legacy/mc_amqpl/g'
ag -l rabbit_mc_amqp | xargs sed -i 's/rabbit_mc_amqp/mc_amqp/g'
ag -l rabbit_mqtt_mc | xargs sed -i 's/rabbit_mqtt_mc/mc_mqtt/g'
```
* mc: make deaths an annotation + fixes
* Fix mc_mqtt protocol_state callback
* Fix test will_delay_node_restart
```
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_3]:will_delay_node_restart FULL=1
```
* Bazel run gazelle
* mix format rabbitmqctl.ex
* Ensure ttl annotation is refelected in amqp legacy protocol state
* Fix id access in message store
* Fix rabbit_message_interceptor_SUITE
* dializer fixes
* Fix rabbit:rabbit_message_interceptor_SUITE-mixed
set_annotation/3 should not result in duplicate keys
* Fix MQTT shared_SUITE-mixed
Up to 3.12 non-MQTT publishes were always QoS 1 regardless of delivery_mode.
https://github.com/rabbitmq/rabbitmq-server/blob/75a953ce286a10aca910c098805a4f545989af38/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl#L2075-L2076
From now on, non-MQTT publishes are QoS 1 if durable.
This makes more sense.
The MQTT plugin must send a #basic_message{} to an old node that does
not understand message containers.
* Field content of 'v1_0.data' can be binary
Fix
```
bazel test //deps/rabbitmq_mqtt:shared_SUITE-mixed \
--test_env FOCUS="-group [mqtt,v4,cluster_size_1] -case trace" \
-t- --test_sharding_strategy=disabled
```
* Remove route/2 and implement route/3 for all exchange types.
This removes the route/2 callback from rabbit_exchange_type and
makes route/3 mandatory instead. This is a breaking change and
will require all implementations of exchange types to update their
code, however this is necessary anyway for them to correctly handle
the mc type.
stream filtering fixes
* Translate directly from MQTT to AMQP 0.9.1
* handle undecoded properties in mc_compat
amqpl: put clause in right order
recover death deatails from amqp data
* Replace callback init_amqp with convert_from
* Fix return value of lists:keyfind/3
* Translate directly from AMQP 0.9.1 to MQTT
* Fix MQTT payload size
MQTT payload can be a list when converted from AMQP 0.9.1 for example
First conversions tests
Plus some other conversion related fixes.
bazel
bazel
translate amqp 1.0 null to undefined
mc: property/2 and correlation_id/message_id return type tagged values.
To ensure we can support a variety of types better.
The type type tags are AMQP 1.0 flavoured.
fix death recovery
mc_mqtt: impl new api
Add callbacks to allow protocols to compact data before storage
And make readable if needing to query things repeatedly.
bazel fix
* more decoding
* tracking mixed versions compat
* mc: flip default of `durable` annotation to save some data.
Assuming most messages are durable and that in memory messages suffer less
from persistence overhead it makes sense for a non existent `durable`
annotation to mean durable=true.
* mc conversion tests and tidy up
* mc make x_header unstrict again
* amqpl: death record fixes
* bazel
* amqp -> amqpl conversion test
* Fix crash in mc_amqp:size/1
Body can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0/ ct-system t=java
```
on branch native-amqp.
* Fix crash in lists:flatten/1
Data can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp.
* Fix crash in rabbit_writer
Running test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp resulted in the following crash:
```
crasher:
initial call: rabbit_writer:enter_mainloop/2
pid: <0.711.0>
registered_name: []
exception error: bad argument
in function size/1
called as size([<<0>>,<<"Sw">>,[<<160,2>>,<<"hi">>]])
*** argument 1: not tuple or binary
in call from rabbit_binary_generator:build_content_frames/7 (rabbit_binary_generator.erl, line 89)
in call from rabbit_binary_generator:build_simple_content_frames/4 (rabbit_binary_generator.erl, line 61)
in call from rabbit_writer:assemble_frames/5 (rabbit_writer.erl, line 334)
in call from rabbit_writer:internal_send_command_async/3 (rabbit_writer.erl, line 365)
in call from rabbit_writer:handle_message/2 (rabbit_writer.erl, line 265)
in call from rabbit_writer:handle_message/3 (rabbit_writer.erl, line 232)
in call from rabbit_writer:mainloop1/2 (rabbit_writer.erl, line 223)
```
because #content.payload_fragments_rev is currently supposed to
be a flat list of binaries instead of being an iolist.
This commit fixes this crash inefficiently by calling
iolist_to_binary/1. A better solution would be to allow AMQP legacy's #content.payload_fragments_rev
to be an iolist.
* Add accidentally deleted line back
* mc: optimise mc_amqp internal format
By removint the outer records for message and delivery annotations
as well as application properties and footers.
* mc: optimis mc_amqp map_add by using upsert
* mc: refactoring and bug fixes
* mc_SUITE routingheader assertions
* mc remove serialize/1 callback as only used by amqp
* mc_amqp: avoid returning a nested list from protocol_state
* test and bug fix
* move infer_type to mc_util
* mc fixes and additiona assertions
* Support headers exchange routing for MQTT messages
When a headers exchange is bound to the MQTT topic exchange, routing
will be performend based on both MQTT topic (by the topic exchange) and
MQTT User Property (by the headers exchange).
This combines the best worlds of both MQTT 5.0 and AMQP 0.9.1 and
enables powerful routing topologies.
When the User Property contains the same name multiple times, only the
last name (and value) will be considered by the headers exchange.
* Fix crash when sending from stream to amqpl
When publishing a message via the stream protocol and consuming it via
AMQP 0.9.1, the following crash occurred prior to this commit:
```
crasher:
initial call: rabbit_channel:init/1
pid: <0.818.0>
registered_name: []
exception exit: {{badmatch,undefined},
[{rabbit_channel,handle_deliver0,4,
[{file,"rabbit_channel.erl"},
{line,2728}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1594}]},
{rabbit_channel,handle_cast,2,
[{file,"rabbit_channel.erl"},
{line,728}]},
{gen_server2,handle_msg,2,
[{file,"gen_server2.erl"},{line,1056}]},
{proc_lib,wake_up,3,
[{file,"proc_lib.erl"},{line,251}]}]}
```
This commit first gives `mc:init/3` the chance to set exchange and
routing_keys annotations.
If not set, `rabbit_stream_queue` will set these annotations assuming
the message was originally published via the stream protocol.
* Support consistent hash exchange routing for MQTT 5.0
When a consistent hash exchange is bound to the MQTT topic exchange,
MQTT 5.0 messages can be routed to queues consistently based on the
Correlation-Data in the PUBLISH packet.
* Convert MQTT 5.0 User Property
* to AMQP 0.9.1 headers
* from AMQP 0.9.1 headers
* to AMQP 1.0 application properties and message annotations
* from AMQP 1.0 application properties and message annotations
* Make use of Annotations in mc_mqtt:protocol_state/2
mc_mqtt:protocol_state/2 includes Annotations as parameter.
It's cleaner to make use of these Annotations when computing the
protocol state instead of relying on the caller (rabbitmq_mqtt_processor)
to compute the protocol state.
* Enforce AMQP 0.9.1 field name length limit
The AMQP 0.9.1 spec prohibits field names longer than 128 characters.
Therefore, when converting AMQP 1.0 message annotations, application
properties or MQTT 5.0 User Property to AMQP 0.9.1 headers, drop any
names longer than 128 characters.
* Fix type specs
Apply feedback from Michael Davis
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* Add mc_mqtt unit test suite
Implement mc_mqtt:x_header/2
* Translate indicator that payload is UTF-8 encoded
when converting between MQTT 5.0 and AMQP 1.0
* Translate single amqp-value section from AMQP 1.0 to MQTT
Convert to a text representation, if possible, and indicate to MQTT
client that the payload is UTF-8 encoded. This way, the MQTT client will
be able to parse the payload.
If conversion to text representation is not possible, encode the payload
using the AMQP 1.0 type system and indiate the encoding via Content-Type
message/vnd.rabbitmq.amqp.
This Content-Type is not registered.
Type "message" makes sense since it's a message.
Vendor tree "vnd.rabbitmq.amqp" makes sense since merely subtype "amqp" is not
registered.
* Fix payload conversion
* Translate Response Topic between MQTT and AMQP
Translate MQTT 5.0 Response Topic to AMQP 1.0 reply-to address and vice
versa.
The Response Topic must be a UTF-8 encoded string.
This commit re-uses the already defined RabbitMQ target addresses:
```
"/topic/" RK Publish to amq.topic with routing key RK
"/exchange/" X "/" RK Publish to exchange X with routing key RK
```
By default, the MQTT topic exchange is configure dto be amq.topic using
the 1st target address.
When an operator modifies the mqtt.exchange, the 2nd target address is
used.
* Apply PR feedback
and fix formatting
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* tidy up
* Add MQTT message_containers test
* consistent hash exchange: avoid amqp legacy conversion
When hashing on a header value.
* Avoid converting to amqp legacy when using exchange federation
* Fix test flake
* test and dialyzer fixes
* dialyzer fix
* Add MQTT protocol interoperability tests
Test receiving from and sending to MQTT 5.0 and
* AMQP 0.9.1
* AMQP 1.0
* STOMP
* Streams
* Regenerate portions of deps/rabbit/app.bzl with gazelle
I'm not exactly sure how this happened, but gazell seems to have been
run with an older version of the rules_erlang gazelle extension at
some point. This caused generation of a structure that is no longer
used. This commit updates the structure to the current pattern.
* mc: refactoring
* mc_amqpl: handle delivery annotations
Just in case they are included.
Also use iolist_to_iovec to create flat list of binaries when
converting from amqp with amqp encoded payload.
---------
Co-authored-by: David Ansari <david.ansari@gmx.de>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
Co-authored-by: Rin Kuryloski <kuryloskip@vmware.com>
2023-08-31 18:27:13 +08:00
|
|
|
({Msg, SeqId, rabbit_queue_index, Props, IsPersistent},
|
2021-10-21 19:17:18 +08:00
|
|
|
{V2Index1, V2Store1}) ->
|
Message Containers (#5077)
This PR implements an approach for a "protocol (data format) agnostic core" where the format of the message isn't converted at point of reception.
Currently all non AMQP 0.9.1 originating messages are converted into a AMQP 0.9.1 flavoured basic_message record before sent to a queue. If the messages are then consumed by the originating protocol they are converted back from AMQP 0.9.1. For some protocols such as MQTT 3.1 this isn't too expensive as MQTT is mostly a fairly easily mapped subset of AMQP 0.9.1 but for others such as AMQP 1.0 the conversions are awkward and in some cases lossy even if consuming from the originating protocol.
This PR instead wraps all incoming messages in their originating form into a generic, extensible message container type (mc). The container module exposes an API to get common message details such as size and various properties (ttl, priority etc) directly from the source data type. Each protocol needs to implement the mc behaviour such that when a message originating form one protocol is consumed by another protocol we convert it to the target protocol at that point.
The message container also contains annotations, dead letter records and other meta data we need to record during the lifetime of a message. The original protocol message is never modified unless it is consumed.
This includes conversion modules to and from amqp, amqpl (AMQP 0.9.1) and mqtt.
COMMIT HISTORY:
* Refactor away from using the delivery{} record
In many places including exchange types. This should make it
easier to move towards using a message container type instead of
basic_message.
Add mc module and move direct replies outside of exchange
Lots of changes incl classic queues
Implement stream support incl amqp conversions
simplify mc state record
move mc.erl
mc dlx stuff
recent history exchange
Make tracking work
But doesn't take a protocol agnostic approach as we just convert
everything into AMQP legacy and back. Might be good enough for now.
Tracing as a whole may want a bit of a re-vamp at some point.
tidy
make quorum queue peek work by legacy conversion
dead lettering fixes
dead lettering fixes
CMQ fixes
rabbit_trace type fixes
fixes
fix
Fix classic queue props
test assertion fix
feature flag and backwards compat
Enable message_container feature flag in some SUITEs
Dialyzer fixes
fixes
fix
test fixes
Various
Manually update a gazelle generated file
until a gazelle enhancement can be made
https://github.com/rabbitmq/rules_erlang/issues/185
Add message_containers_SUITE to bazel
and regen bazel files with gazelle from rules_erlang@main
Simplify essential proprty access
Such as durable, ttl and priority by extracting them into annotations
at message container init time.
Move type
to remove dependenc on amqp10 stuff in mc.erl
mostly because I don't know how to make bazel do the right thing
add more stuff
Refine routing header stuff
wip
Cosmetics
Do not use "maybe" as type name as "maybe" is a keyword since OTP 25
which makes Erlang LS complain.
* Dedup death queue names
* Fix function clause crashes
Fix failing tests in the MQTT shared_SUITE:
A classic queue message ID can be undefined as set in
https://github.com/rabbitmq/rabbitmq-server/blob/fbe79ff47b4edbc0fd95457e623d6593161ad198/deps/rabbit/src/rabbit_classic_queue_index_v2.erl#L1048
Fix failing tests in the MQTT shared_SUITE-mixed:
When feature flag message_containers is disabled, the
message is not an #mc{} record, but a #basic_message{} record.
* Fix is_utf8_no_null crash
Prior to this commit, the function crashed if invalid UTF-8 was
provided, e.g.:
```
1> rabbit_misc:is_valid_shortstr(<<"😇"/utf16>>).
** exception error: no function clause matching rabbit_misc:is_utf8_no_null(<<216,61,222,7>>) (rabbit_misc.erl, line 1481)
```
* Implement mqtt mc behaviour
For now via amqp translation.
This is still work in progress, but the following SUITEs pass:
```
make -C deps/rabbitmq_mqtt ct-shared t=[mqtt,v5,cluster_size_1] FULL=1
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_1] FULL=1
```
* Shorten mc file names
Module name length matters because for each persistent message the #mc{}
record is persisted to disk.
```
1> iolist_size(term_to_iovec({mc, rabbit_mc_amqp_legacy})).
30
2> iolist_size(term_to_iovec({mc, mc_amqpl})).
17
```
This commit renames the mc modules:
```
ag -l rabbit_mc_amqp_legacy | xargs sed -i 's/rabbit_mc_amqp_legacy/mc_amqpl/g'
ag -l rabbit_mc_amqp | xargs sed -i 's/rabbit_mc_amqp/mc_amqp/g'
ag -l rabbit_mqtt_mc | xargs sed -i 's/rabbit_mqtt_mc/mc_mqtt/g'
```
* mc: make deaths an annotation + fixes
* Fix mc_mqtt protocol_state callback
* Fix test will_delay_node_restart
```
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_3]:will_delay_node_restart FULL=1
```
* Bazel run gazelle
* mix format rabbitmqctl.ex
* Ensure ttl annotation is refelected in amqp legacy protocol state
* Fix id access in message store
* Fix rabbit_message_interceptor_SUITE
* dializer fixes
* Fix rabbit:rabbit_message_interceptor_SUITE-mixed
set_annotation/3 should not result in duplicate keys
* Fix MQTT shared_SUITE-mixed
Up to 3.12 non-MQTT publishes were always QoS 1 regardless of delivery_mode.
https://github.com/rabbitmq/rabbitmq-server/blob/75a953ce286a10aca910c098805a4f545989af38/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl#L2075-L2076
From now on, non-MQTT publishes are QoS 1 if durable.
This makes more sense.
The MQTT plugin must send a #basic_message{} to an old node that does
not understand message containers.
* Field content of 'v1_0.data' can be binary
Fix
```
bazel test //deps/rabbitmq_mqtt:shared_SUITE-mixed \
--test_env FOCUS="-group [mqtt,v4,cluster_size_1] -case trace" \
-t- --test_sharding_strategy=disabled
```
* Remove route/2 and implement route/3 for all exchange types.
This removes the route/2 callback from rabbit_exchange_type and
makes route/3 mandatory instead. This is a breaking change and
will require all implementations of exchange types to update their
code, however this is necessary anyway for them to correctly handle
the mc type.
stream filtering fixes
* Translate directly from MQTT to AMQP 0.9.1
* handle undecoded properties in mc_compat
amqpl: put clause in right order
recover death deatails from amqp data
* Replace callback init_amqp with convert_from
* Fix return value of lists:keyfind/3
* Translate directly from AMQP 0.9.1 to MQTT
* Fix MQTT payload size
MQTT payload can be a list when converted from AMQP 0.9.1 for example
First conversions tests
Plus some other conversion related fixes.
bazel
bazel
translate amqp 1.0 null to undefined
mc: property/2 and correlation_id/message_id return type tagged values.
To ensure we can support a variety of types better.
The type type tags are AMQP 1.0 flavoured.
fix death recovery
mc_mqtt: impl new api
Add callbacks to allow protocols to compact data before storage
And make readable if needing to query things repeatedly.
bazel fix
* more decoding
* tracking mixed versions compat
* mc: flip default of `durable` annotation to save some data.
Assuming most messages are durable and that in memory messages suffer less
from persistence overhead it makes sense for a non existent `durable`
annotation to mean durable=true.
* mc conversion tests and tidy up
* mc make x_header unstrict again
* amqpl: death record fixes
* bazel
* amqp -> amqpl conversion test
* Fix crash in mc_amqp:size/1
Body can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0/ ct-system t=java
```
on branch native-amqp.
* Fix crash in lists:flatten/1
Data can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp.
* Fix crash in rabbit_writer
Running test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp resulted in the following crash:
```
crasher:
initial call: rabbit_writer:enter_mainloop/2
pid: <0.711.0>
registered_name: []
exception error: bad argument
in function size/1
called as size([<<0>>,<<"Sw">>,[<<160,2>>,<<"hi">>]])
*** argument 1: not tuple or binary
in call from rabbit_binary_generator:build_content_frames/7 (rabbit_binary_generator.erl, line 89)
in call from rabbit_binary_generator:build_simple_content_frames/4 (rabbit_binary_generator.erl, line 61)
in call from rabbit_writer:assemble_frames/5 (rabbit_writer.erl, line 334)
in call from rabbit_writer:internal_send_command_async/3 (rabbit_writer.erl, line 365)
in call from rabbit_writer:handle_message/2 (rabbit_writer.erl, line 265)
in call from rabbit_writer:handle_message/3 (rabbit_writer.erl, line 232)
in call from rabbit_writer:mainloop1/2 (rabbit_writer.erl, line 223)
```
because #content.payload_fragments_rev is currently supposed to
be a flat list of binaries instead of being an iolist.
This commit fixes this crash inefficiently by calling
iolist_to_binary/1. A better solution would be to allow AMQP legacy's #content.payload_fragments_rev
to be an iolist.
* Add accidentally deleted line back
* mc: optimise mc_amqp internal format
By removint the outer records for message and delivery annotations
as well as application properties and footers.
* mc: optimis mc_amqp map_add by using upsert
* mc: refactoring and bug fixes
* mc_SUITE routingheader assertions
* mc remove serialize/1 callback as only used by amqp
* mc_amqp: avoid returning a nested list from protocol_state
* test and bug fix
* move infer_type to mc_util
* mc fixes and additiona assertions
* Support headers exchange routing for MQTT messages
When a headers exchange is bound to the MQTT topic exchange, routing
will be performend based on both MQTT topic (by the topic exchange) and
MQTT User Property (by the headers exchange).
This combines the best worlds of both MQTT 5.0 and AMQP 0.9.1 and
enables powerful routing topologies.
When the User Property contains the same name multiple times, only the
last name (and value) will be considered by the headers exchange.
* Fix crash when sending from stream to amqpl
When publishing a message via the stream protocol and consuming it via
AMQP 0.9.1, the following crash occurred prior to this commit:
```
crasher:
initial call: rabbit_channel:init/1
pid: <0.818.0>
registered_name: []
exception exit: {{badmatch,undefined},
[{rabbit_channel,handle_deliver0,4,
[{file,"rabbit_channel.erl"},
{line,2728}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1594}]},
{rabbit_channel,handle_cast,2,
[{file,"rabbit_channel.erl"},
{line,728}]},
{gen_server2,handle_msg,2,
[{file,"gen_server2.erl"},{line,1056}]},
{proc_lib,wake_up,3,
[{file,"proc_lib.erl"},{line,251}]}]}
```
This commit first gives `mc:init/3` the chance to set exchange and
routing_keys annotations.
If not set, `rabbit_stream_queue` will set these annotations assuming
the message was originally published via the stream protocol.
* Support consistent hash exchange routing for MQTT 5.0
When a consistent hash exchange is bound to the MQTT topic exchange,
MQTT 5.0 messages can be routed to queues consistently based on the
Correlation-Data in the PUBLISH packet.
* Convert MQTT 5.0 User Property
* to AMQP 0.9.1 headers
* from AMQP 0.9.1 headers
* to AMQP 1.0 application properties and message annotations
* from AMQP 1.0 application properties and message annotations
* Make use of Annotations in mc_mqtt:protocol_state/2
mc_mqtt:protocol_state/2 includes Annotations as parameter.
It's cleaner to make use of these Annotations when computing the
protocol state instead of relying on the caller (rabbitmq_mqtt_processor)
to compute the protocol state.
* Enforce AMQP 0.9.1 field name length limit
The AMQP 0.9.1 spec prohibits field names longer than 128 characters.
Therefore, when converting AMQP 1.0 message annotations, application
properties or MQTT 5.0 User Property to AMQP 0.9.1 headers, drop any
names longer than 128 characters.
* Fix type specs
Apply feedback from Michael Davis
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* Add mc_mqtt unit test suite
Implement mc_mqtt:x_header/2
* Translate indicator that payload is UTF-8 encoded
when converting between MQTT 5.0 and AMQP 1.0
* Translate single amqp-value section from AMQP 1.0 to MQTT
Convert to a text representation, if possible, and indicate to MQTT
client that the payload is UTF-8 encoded. This way, the MQTT client will
be able to parse the payload.
If conversion to text representation is not possible, encode the payload
using the AMQP 1.0 type system and indiate the encoding via Content-Type
message/vnd.rabbitmq.amqp.
This Content-Type is not registered.
Type "message" makes sense since it's a message.
Vendor tree "vnd.rabbitmq.amqp" makes sense since merely subtype "amqp" is not
registered.
* Fix payload conversion
* Translate Response Topic between MQTT and AMQP
Translate MQTT 5.0 Response Topic to AMQP 1.0 reply-to address and vice
versa.
The Response Topic must be a UTF-8 encoded string.
This commit re-uses the already defined RabbitMQ target addresses:
```
"/topic/" RK Publish to amq.topic with routing key RK
"/exchange/" X "/" RK Publish to exchange X with routing key RK
```
By default, the MQTT topic exchange is configure dto be amq.topic using
the 1st target address.
When an operator modifies the mqtt.exchange, the 2nd target address is
used.
* Apply PR feedback
and fix formatting
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* tidy up
* Add MQTT message_containers test
* consistent hash exchange: avoid amqp legacy conversion
When hashing on a header value.
* Avoid converting to amqp legacy when using exchange federation
* Fix test flake
* test and dialyzer fixes
* dialyzer fix
* Add MQTT protocol interoperability tests
Test receiving from and sending to MQTT 5.0 and
* AMQP 0.9.1
* AMQP 1.0
* STOMP
* Streams
* Regenerate portions of deps/rabbit/app.bzl with gazelle
I'm not exactly sure how this happened, but gazell seems to have been
run with an older version of the rules_erlang gazelle extension at
some point. This caused generation of a structure that is no longer
used. This commit updates the structure to the current pattern.
* mc: refactoring
* mc_amqpl: handle delivery annotations
Just in case they are included.
Also use iolist_to_iovec to create flat list of binaries when
converting from amqp with amqp encoded payload.
---------
Co-authored-by: David Ansari <david.ansari@gmx.de>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
Co-authored-by: Rin Kuryloski <kuryloskip@vmware.com>
2023-08-31 18:27:13 +08:00
|
|
|
MsgId = mc:get_annotation(id, Msg),
|
2021-10-21 19:17:18 +08:00
|
|
|
{MsgLocation, V2Store2} = rabbit_classic_queue_store_v2:write(SeqId, Msg, Props, V2Store1),
|
2021-11-24 23:46:40 +08:00
|
|
|
V2Index2 = case SkipFun(SeqId, V2Index1) of
|
|
|
|
|
{skip, V2Index1a} ->
|
|
|
|
|
V2Index1a;
|
|
|
|
|
{write, V2Index1a} ->
|
|
|
|
|
counters:add(CountersRef, CountIx, 1),
|
|
|
|
|
counters:add(CountersRef, BytesIx, Props#message_properties.size),
|
|
|
|
|
rabbit_classic_queue_index_v2:publish(MsgId, SeqId, MsgLocation, Props, IsPersistent, infinity, V2Index1a)
|
|
|
|
|
end,
|
2021-10-21 19:17:18 +08:00
|
|
|
{V2Index2, V2Store2};
|
|
|
|
|
%% Keep messages in the per-vhost store where they are.
|
|
|
|
|
({MsgId, SeqId, rabbit_msg_store, Props, IsPersistent},
|
|
|
|
|
{V2Index1, V2Store1}) ->
|
2021-11-24 23:46:40 +08:00
|
|
|
V2Index2 = case SkipFun(SeqId, V2Index1) of
|
|
|
|
|
{skip, V2Index1a} ->
|
|
|
|
|
V2Index1a;
|
|
|
|
|
{write, V2Index1a} ->
|
|
|
|
|
counters:add(CountersRef, CountIx, 1),
|
|
|
|
|
counters:add(CountersRef, BytesIx, Props#message_properties.size),
|
|
|
|
|
rabbit_classic_queue_index_v2:publish(MsgId, SeqId, rabbit_msg_store, Props, IsPersistent, infinity, V2Index1a)
|
|
|
|
|
end,
|
2021-10-21 19:17:18 +08:00
|
|
|
{V2Index2, V2Store1}
|
|
|
|
|
end, {V2Index0, V2Store0}, Messages),
|
|
|
|
|
%% Flush to disk to avoid keeping too much in memory between segments.
|
|
|
|
|
V2Index = rabbit_classic_queue_index_v2:flush(V2Index3),
|
|
|
|
|
V2Store = rabbit_classic_queue_store_v2:sync(V2Store3),
|
|
|
|
|
%% We have written everything to disk. We can delete the old segment file
|
|
|
|
|
%% to free up much needed space, to avoid doubling disk usage during the upgrade.
|
|
|
|
|
rabbit_queue_index:delete_segment_file_for_seq_id(LoSeqId, V1Index),
|
|
|
|
|
%% Log some progress to keep the user aware of what's going on, as moving
|
|
|
|
|
%% embedded messages can take quite some time.
|
|
|
|
|
#resource{virtual_host = VHost, name = Name} = QueueName,
|
Follow-up to #5486
Discovered by @dumbbell
Ensure externally read strings are saved as utf-8 encoded binaries. This
is necessary since `cmd.exe` on Windows uses ISO-8859-1 encoding and
directories can have latin1 characters, like `RabbitMQ Sérvér`.
The `é` is represented by decimal `233` in the ISO-8859-1 encoding. The
unicode code point is the same decimal value, `233`, so you will see
this in the charlist data. However, when encoded using utf-8, this
becomes the two-byte sequence `C3 A9` (hexidecimal).
When reading strings from env variables and configuration, they will be
unicode charlists, with each list item representing a unicode code
point. All of Erlang string functions can handle strings in this form.
Once these strings are written to ETS or Mnesia, they will be converted
to utf-8 encoded binaries. Prior to these changes just
`list_to_binary/1` was used.
Fix xref error
re:replace requires an iodata, which is not a list of unicode code points
Correctly parse unicode vhost tags
Fix many format strings to account for utf8 input. Try again to fix unicode vhost tags
More format string fixes, try to get the CONFIG_FILE var correct
Be sure to use the `unicode` option for re:replace when necessary
More unicode format strings, add unicode option to re:split
More format strings updated
Change ~s to ~ts for vhost format strings
Change ~s to ~ts for more vhost format strings
Change ~s to ~ts for more vhost format strings
Add unicode format chars to disk monitor
Quote the directory on unix
Finally figure out the correct way to pass unicode to the port
2022-08-19 02:30:05 +08:00
|
|
|
rabbit_log:info("Queue ~ts in vhost ~ts converted ~b messages from v1 to v2",
|
2021-11-24 23:46:40 +08:00
|
|
|
[Name, VHost, length(Messages)]),
|
|
|
|
|
convert_from_v1_to_v2_loop(QueueName, V1Index, V2Index, V2Store, Counters, UpSeqId, HiSeqId, SkipFun).
|
2021-10-21 19:17:18 +08:00
|
|
|
|
2015-04-27 21:14:10 +08:00
|
|
|
%% Get the Timestamp property of the first msg, if present. This is
|
|
|
|
|
%% the one with the oldest timestamp among the heads of the pending
|
|
|
|
|
%% acks and unread queues. We can't check disk_pending_acks as these
|
|
|
|
|
%% are paged out - we assume some will soon be paged in rather than
|
|
|
|
|
%% forcing it to happen. Pending ack msgs are included as they are
|
|
|
|
|
%% regarded as unprocessed until acked, this also prevents the result
|
2022-05-11 21:10:44 +08:00
|
|
|
%% apparently oscillating during repeated rejects.
|
2022-06-08 21:39:42 +08:00
|
|
|
%%
|
|
|
|
|
head_message_timestamp(Q3, RPA) ->
|
2015-04-27 21:14:10 +08:00
|
|
|
HeadMsgs = [ HeadMsgStatus#msg_status.msg ||
|
|
|
|
|
HeadMsgStatus <-
|
2022-05-11 21:10:44 +08:00
|
|
|
[ get_q_head(Q3),
|
2022-06-08 21:39:42 +08:00
|
|
|
get_pa_head(RPA) ],
|
2015-10-14 13:19:48 +08:00
|
|
|
HeadMsgStatus /= undefined,
|
|
|
|
|
HeadMsgStatus#msg_status.msg /= undefined ],
|
2015-04-27 21:14:10 +08:00
|
|
|
|
|
|
|
|
Timestamps =
|
Message Containers (#5077)
This PR implements an approach for a "protocol (data format) agnostic core" where the format of the message isn't converted at point of reception.
Currently all non AMQP 0.9.1 originating messages are converted into a AMQP 0.9.1 flavoured basic_message record before sent to a queue. If the messages are then consumed by the originating protocol they are converted back from AMQP 0.9.1. For some protocols such as MQTT 3.1 this isn't too expensive as MQTT is mostly a fairly easily mapped subset of AMQP 0.9.1 but for others such as AMQP 1.0 the conversions are awkward and in some cases lossy even if consuming from the originating protocol.
This PR instead wraps all incoming messages in their originating form into a generic, extensible message container type (mc). The container module exposes an API to get common message details such as size and various properties (ttl, priority etc) directly from the source data type. Each protocol needs to implement the mc behaviour such that when a message originating form one protocol is consumed by another protocol we convert it to the target protocol at that point.
The message container also contains annotations, dead letter records and other meta data we need to record during the lifetime of a message. The original protocol message is never modified unless it is consumed.
This includes conversion modules to and from amqp, amqpl (AMQP 0.9.1) and mqtt.
COMMIT HISTORY:
* Refactor away from using the delivery{} record
In many places including exchange types. This should make it
easier to move towards using a message container type instead of
basic_message.
Add mc module and move direct replies outside of exchange
Lots of changes incl classic queues
Implement stream support incl amqp conversions
simplify mc state record
move mc.erl
mc dlx stuff
recent history exchange
Make tracking work
But doesn't take a protocol agnostic approach as we just convert
everything into AMQP legacy and back. Might be good enough for now.
Tracing as a whole may want a bit of a re-vamp at some point.
tidy
make quorum queue peek work by legacy conversion
dead lettering fixes
dead lettering fixes
CMQ fixes
rabbit_trace type fixes
fixes
fix
Fix classic queue props
test assertion fix
feature flag and backwards compat
Enable message_container feature flag in some SUITEs
Dialyzer fixes
fixes
fix
test fixes
Various
Manually update a gazelle generated file
until a gazelle enhancement can be made
https://github.com/rabbitmq/rules_erlang/issues/185
Add message_containers_SUITE to bazel
and regen bazel files with gazelle from rules_erlang@main
Simplify essential proprty access
Such as durable, ttl and priority by extracting them into annotations
at message container init time.
Move type
to remove dependenc on amqp10 stuff in mc.erl
mostly because I don't know how to make bazel do the right thing
add more stuff
Refine routing header stuff
wip
Cosmetics
Do not use "maybe" as type name as "maybe" is a keyword since OTP 25
which makes Erlang LS complain.
* Dedup death queue names
* Fix function clause crashes
Fix failing tests in the MQTT shared_SUITE:
A classic queue message ID can be undefined as set in
https://github.com/rabbitmq/rabbitmq-server/blob/fbe79ff47b4edbc0fd95457e623d6593161ad198/deps/rabbit/src/rabbit_classic_queue_index_v2.erl#L1048
Fix failing tests in the MQTT shared_SUITE-mixed:
When feature flag message_containers is disabled, the
message is not an #mc{} record, but a #basic_message{} record.
* Fix is_utf8_no_null crash
Prior to this commit, the function crashed if invalid UTF-8 was
provided, e.g.:
```
1> rabbit_misc:is_valid_shortstr(<<"😇"/utf16>>).
** exception error: no function clause matching rabbit_misc:is_utf8_no_null(<<216,61,222,7>>) (rabbit_misc.erl, line 1481)
```
* Implement mqtt mc behaviour
For now via amqp translation.
This is still work in progress, but the following SUITEs pass:
```
make -C deps/rabbitmq_mqtt ct-shared t=[mqtt,v5,cluster_size_1] FULL=1
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_1] FULL=1
```
* Shorten mc file names
Module name length matters because for each persistent message the #mc{}
record is persisted to disk.
```
1> iolist_size(term_to_iovec({mc, rabbit_mc_amqp_legacy})).
30
2> iolist_size(term_to_iovec({mc, mc_amqpl})).
17
```
This commit renames the mc modules:
```
ag -l rabbit_mc_amqp_legacy | xargs sed -i 's/rabbit_mc_amqp_legacy/mc_amqpl/g'
ag -l rabbit_mc_amqp | xargs sed -i 's/rabbit_mc_amqp/mc_amqp/g'
ag -l rabbit_mqtt_mc | xargs sed -i 's/rabbit_mqtt_mc/mc_mqtt/g'
```
* mc: make deaths an annotation + fixes
* Fix mc_mqtt protocol_state callback
* Fix test will_delay_node_restart
```
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_3]:will_delay_node_restart FULL=1
```
* Bazel run gazelle
* mix format rabbitmqctl.ex
* Ensure ttl annotation is refelected in amqp legacy protocol state
* Fix id access in message store
* Fix rabbit_message_interceptor_SUITE
* dializer fixes
* Fix rabbit:rabbit_message_interceptor_SUITE-mixed
set_annotation/3 should not result in duplicate keys
* Fix MQTT shared_SUITE-mixed
Up to 3.12 non-MQTT publishes were always QoS 1 regardless of delivery_mode.
https://github.com/rabbitmq/rabbitmq-server/blob/75a953ce286a10aca910c098805a4f545989af38/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl#L2075-L2076
From now on, non-MQTT publishes are QoS 1 if durable.
This makes more sense.
The MQTT plugin must send a #basic_message{} to an old node that does
not understand message containers.
* Field content of 'v1_0.data' can be binary
Fix
```
bazel test //deps/rabbitmq_mqtt:shared_SUITE-mixed \
--test_env FOCUS="-group [mqtt,v4,cluster_size_1] -case trace" \
-t- --test_sharding_strategy=disabled
```
* Remove route/2 and implement route/3 for all exchange types.
This removes the route/2 callback from rabbit_exchange_type and
makes route/3 mandatory instead. This is a breaking change and
will require all implementations of exchange types to update their
code, however this is necessary anyway for them to correctly handle
the mc type.
stream filtering fixes
* Translate directly from MQTT to AMQP 0.9.1
* handle undecoded properties in mc_compat
amqpl: put clause in right order
recover death deatails from amqp data
* Replace callback init_amqp with convert_from
* Fix return value of lists:keyfind/3
* Translate directly from AMQP 0.9.1 to MQTT
* Fix MQTT payload size
MQTT payload can be a list when converted from AMQP 0.9.1 for example
First conversions tests
Plus some other conversion related fixes.
bazel
bazel
translate amqp 1.0 null to undefined
mc: property/2 and correlation_id/message_id return type tagged values.
To ensure we can support a variety of types better.
The type type tags are AMQP 1.0 flavoured.
fix death recovery
mc_mqtt: impl new api
Add callbacks to allow protocols to compact data before storage
And make readable if needing to query things repeatedly.
bazel fix
* more decoding
* tracking mixed versions compat
* mc: flip default of `durable` annotation to save some data.
Assuming most messages are durable and that in memory messages suffer less
from persistence overhead it makes sense for a non existent `durable`
annotation to mean durable=true.
* mc conversion tests and tidy up
* mc make x_header unstrict again
* amqpl: death record fixes
* bazel
* amqp -> amqpl conversion test
* Fix crash in mc_amqp:size/1
Body can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0/ ct-system t=java
```
on branch native-amqp.
* Fix crash in lists:flatten/1
Data can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp.
* Fix crash in rabbit_writer
Running test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp resulted in the following crash:
```
crasher:
initial call: rabbit_writer:enter_mainloop/2
pid: <0.711.0>
registered_name: []
exception error: bad argument
in function size/1
called as size([<<0>>,<<"Sw">>,[<<160,2>>,<<"hi">>]])
*** argument 1: not tuple or binary
in call from rabbit_binary_generator:build_content_frames/7 (rabbit_binary_generator.erl, line 89)
in call from rabbit_binary_generator:build_simple_content_frames/4 (rabbit_binary_generator.erl, line 61)
in call from rabbit_writer:assemble_frames/5 (rabbit_writer.erl, line 334)
in call from rabbit_writer:internal_send_command_async/3 (rabbit_writer.erl, line 365)
in call from rabbit_writer:handle_message/2 (rabbit_writer.erl, line 265)
in call from rabbit_writer:handle_message/3 (rabbit_writer.erl, line 232)
in call from rabbit_writer:mainloop1/2 (rabbit_writer.erl, line 223)
```
because #content.payload_fragments_rev is currently supposed to
be a flat list of binaries instead of being an iolist.
This commit fixes this crash inefficiently by calling
iolist_to_binary/1. A better solution would be to allow AMQP legacy's #content.payload_fragments_rev
to be an iolist.
* Add accidentally deleted line back
* mc: optimise mc_amqp internal format
By removint the outer records for message and delivery annotations
as well as application properties and footers.
* mc: optimis mc_amqp map_add by using upsert
* mc: refactoring and bug fixes
* mc_SUITE routingheader assertions
* mc remove serialize/1 callback as only used by amqp
* mc_amqp: avoid returning a nested list from protocol_state
* test and bug fix
* move infer_type to mc_util
* mc fixes and additiona assertions
* Support headers exchange routing for MQTT messages
When a headers exchange is bound to the MQTT topic exchange, routing
will be performend based on both MQTT topic (by the topic exchange) and
MQTT User Property (by the headers exchange).
This combines the best worlds of both MQTT 5.0 and AMQP 0.9.1 and
enables powerful routing topologies.
When the User Property contains the same name multiple times, only the
last name (and value) will be considered by the headers exchange.
* Fix crash when sending from stream to amqpl
When publishing a message via the stream protocol and consuming it via
AMQP 0.9.1, the following crash occurred prior to this commit:
```
crasher:
initial call: rabbit_channel:init/1
pid: <0.818.0>
registered_name: []
exception exit: {{badmatch,undefined},
[{rabbit_channel,handle_deliver0,4,
[{file,"rabbit_channel.erl"},
{line,2728}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1594}]},
{rabbit_channel,handle_cast,2,
[{file,"rabbit_channel.erl"},
{line,728}]},
{gen_server2,handle_msg,2,
[{file,"gen_server2.erl"},{line,1056}]},
{proc_lib,wake_up,3,
[{file,"proc_lib.erl"},{line,251}]}]}
```
This commit first gives `mc:init/3` the chance to set exchange and
routing_keys annotations.
If not set, `rabbit_stream_queue` will set these annotations assuming
the message was originally published via the stream protocol.
* Support consistent hash exchange routing for MQTT 5.0
When a consistent hash exchange is bound to the MQTT topic exchange,
MQTT 5.0 messages can be routed to queues consistently based on the
Correlation-Data in the PUBLISH packet.
* Convert MQTT 5.0 User Property
* to AMQP 0.9.1 headers
* from AMQP 0.9.1 headers
* to AMQP 1.0 application properties and message annotations
* from AMQP 1.0 application properties and message annotations
* Make use of Annotations in mc_mqtt:protocol_state/2
mc_mqtt:protocol_state/2 includes Annotations as parameter.
It's cleaner to make use of these Annotations when computing the
protocol state instead of relying on the caller (rabbitmq_mqtt_processor)
to compute the protocol state.
* Enforce AMQP 0.9.1 field name length limit
The AMQP 0.9.1 spec prohibits field names longer than 128 characters.
Therefore, when converting AMQP 1.0 message annotations, application
properties or MQTT 5.0 User Property to AMQP 0.9.1 headers, drop any
names longer than 128 characters.
* Fix type specs
Apply feedback from Michael Davis
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* Add mc_mqtt unit test suite
Implement mc_mqtt:x_header/2
* Translate indicator that payload is UTF-8 encoded
when converting between MQTT 5.0 and AMQP 1.0
* Translate single amqp-value section from AMQP 1.0 to MQTT
Convert to a text representation, if possible, and indicate to MQTT
client that the payload is UTF-8 encoded. This way, the MQTT client will
be able to parse the payload.
If conversion to text representation is not possible, encode the payload
using the AMQP 1.0 type system and indiate the encoding via Content-Type
message/vnd.rabbitmq.amqp.
This Content-Type is not registered.
Type "message" makes sense since it's a message.
Vendor tree "vnd.rabbitmq.amqp" makes sense since merely subtype "amqp" is not
registered.
* Fix payload conversion
* Translate Response Topic between MQTT and AMQP
Translate MQTT 5.0 Response Topic to AMQP 1.0 reply-to address and vice
versa.
The Response Topic must be a UTF-8 encoded string.
This commit re-uses the already defined RabbitMQ target addresses:
```
"/topic/" RK Publish to amq.topic with routing key RK
"/exchange/" X "/" RK Publish to exchange X with routing key RK
```
By default, the MQTT topic exchange is configure dto be amq.topic using
the 1st target address.
When an operator modifies the mqtt.exchange, the 2nd target address is
used.
* Apply PR feedback
and fix formatting
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* tidy up
* Add MQTT message_containers test
* consistent hash exchange: avoid amqp legacy conversion
When hashing on a header value.
* Avoid converting to amqp legacy when using exchange federation
* Fix test flake
* test and dialyzer fixes
* dialyzer fix
* Add MQTT protocol interoperability tests
Test receiving from and sending to MQTT 5.0 and
* AMQP 0.9.1
* AMQP 1.0
* STOMP
* Streams
* Regenerate portions of deps/rabbit/app.bzl with gazelle
I'm not exactly sure how this happened, but gazell seems to have been
run with an older version of the rules_erlang gazelle extension at
some point. This caused generation of a structure that is no longer
used. This commit updates the structure to the current pattern.
* mc: refactoring
* mc_amqpl: handle delivery annotations
Just in case they are included.
Also use iolist_to_iovec to create flat list of binaries when
converting from amqp with amqp encoded payload.
---------
Co-authored-by: David Ansari <david.ansari@gmx.de>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
Co-authored-by: Rin Kuryloski <kuryloskip@vmware.com>
2023-08-31 18:27:13 +08:00
|
|
|
[Timestamp div 1000
|
|
|
|
|
|| HeadMsg <- HeadMsgs,
|
|
|
|
|
Timestamp <- [mc:timestamp(HeadMsg)],
|
|
|
|
|
Timestamp /= undefined
|
2015-04-27 21:14:10 +08:00
|
|
|
],
|
|
|
|
|
|
|
|
|
|
case Timestamps == [] of
|
2015-04-08 17:36:17 +08:00
|
|
|
true -> '';
|
2015-04-27 21:14:10 +08:00
|
|
|
false -> lists:min(Timestamps)
|
2015-02-24 22:33:48 +08:00
|
|
|
end.
|
|
|
|
|
|
2023-12-20 16:33:55 +08:00
|
|
|
oldest_message_received_timestamp(Q3, RPA) ->
|
|
|
|
|
HeadMsgs = [ HeadMsgStatus#msg_status.msg ||
|
|
|
|
|
HeadMsgStatus <-
|
|
|
|
|
[ get_q_head(Q3),
|
|
|
|
|
get_pa_head(RPA) ],
|
|
|
|
|
HeadMsgStatus /= undefined,
|
|
|
|
|
HeadMsgStatus#msg_status.msg /= undefined ],
|
|
|
|
|
|
|
|
|
|
Timestamps =
|
|
|
|
|
[Timestamp
|
|
|
|
|
|| HeadMsg <- HeadMsgs,
|
Reduce per message disk overhead (#10339)
* Reduce per message disk overhead
Message container annotation keys are stored on disk.
By shortening them we save 95 - 58 = 37 bytes per message.
```
1> byte_size(term_to_binary(#{exchange => <<>>, routing_keys => [<<"my-key">>], durable => true, priority => 3, timestamp => 1000})).
95
2> byte_size(term_to_binary(#{x => <<>>, rk => [<<"my-key">>], d => true, p => 3, ts => 1000})).
58
```
This should somewhat reduce disk I/O and disk space.
* Ensure durable is a boolean
Prevent key 'durable' with value 'undefined' being added to the
mc annotations, for example when the durable field was not set, but
another AMQP 1.0 header field was set.
* Apply feedback
2024-01-18 18:53:02 +08:00
|
|
|
Timestamp <- [mc:get_annotation(?ANN_RECEIVED_AT_TIMESTAMP, HeadMsg)],
|
2023-12-20 16:33:55 +08:00
|
|
|
Timestamp /= undefined
|
|
|
|
|
],
|
|
|
|
|
|
|
|
|
|
case Timestamps == [] of
|
|
|
|
|
true -> '';
|
|
|
|
|
false -> lists:min(Timestamps)
|
|
|
|
|
end.
|
|
|
|
|
|
2015-04-27 21:14:10 +08:00
|
|
|
get_q_head(Q) ->
|
2022-02-05 00:01:43 +08:00
|
|
|
?QUEUE:get(Q, undefined).
|
2015-04-27 21:14:10 +08:00
|
|
|
|
|
|
|
|
get_pa_head(PA) ->
|
2022-06-08 22:31:08 +08:00
|
|
|
case maps:keys(PA) of
|
|
|
|
|
[] -> undefined;
|
|
|
|
|
Keys ->
|
|
|
|
|
Smallest = lists:min(Keys),
|
|
|
|
|
map_get(Smallest, PA)
|
2015-04-27 21:14:10 +08:00
|
|
|
end.
|
2015-02-25 22:32:48 +08:00
|
|
|
|
2022-04-04 17:08:54 +08:00
|
|
|
a(State = #vqstate { delta = Delta, q3 = Q3,
|
2011-10-11 23:47:12 +08:00
|
|
|
len = Len,
|
2014-07-23 23:20:17 +08:00
|
|
|
bytes = Bytes,
|
2014-08-12 21:26:43 +08:00
|
|
|
unacked_bytes = UnackedBytes,
|
2011-10-11 23:47:12 +08:00
|
|
|
persistent_count = PersistentCount,
|
2014-08-11 22:34:50 +08:00
|
|
|
persistent_bytes = PersistentBytes,
|
2014-07-23 23:20:17 +08:00
|
|
|
ram_msg_count = RamMsgCount,
|
2014-07-30 00:16:16 +08:00
|
|
|
ram_bytes = RamBytes}) ->
|
2010-06-16 01:40:20 +08:00
|
|
|
ED = Delta#delta.count == 0,
|
2011-09-30 00:09:09 +08:00
|
|
|
E3 = ?QUEUE:is_empty(Q3),
|
2015-10-10 23:49:53 +08:00
|
|
|
LZ = Len == 0,
|
2015-10-14 02:56:26 +08:00
|
|
|
L3 = ?QUEUE:len(Q3),
|
2015-10-10 23:49:53 +08:00
|
|
|
|
2015-10-14 02:56:26 +08:00
|
|
|
%% if the queue is empty, then delta is empty and q3 is empty.
|
2015-10-10 23:49:53 +08:00
|
|
|
true = LZ == (ED and E3),
|
|
|
|
|
|
2015-10-16 13:24:36 +08:00
|
|
|
%% There should be no messages in q1, q2, and q4
|
|
|
|
|
true = Delta#delta.count + L3 == Len,
|
2015-10-14 02:56:26 +08:00
|
|
|
|
2015-10-10 23:49:53 +08:00
|
|
|
true = Len >= 0,
|
|
|
|
|
true = Bytes >= 0,
|
|
|
|
|
true = UnackedBytes >= 0,
|
|
|
|
|
true = PersistentCount >= 0,
|
|
|
|
|
true = PersistentBytes >= 0,
|
2015-10-14 02:56:26 +08:00
|
|
|
true = RamMsgCount >= 0,
|
2015-10-10 23:49:53 +08:00
|
|
|
true = RamMsgCount =< Len,
|
|
|
|
|
true = RamBytes >= 0,
|
|
|
|
|
true = RamBytes =< Bytes + UnackedBytes,
|
|
|
|
|
|
2010-06-16 01:40:20 +08:00
|
|
|
State.
|
|
|
|
|
|
2011-10-15 22:11:24 +08:00
|
|
|
d(Delta = #delta { start_seq_id = Start, count = Count, end_seq_id = End })
|
|
|
|
|
when Start + Count =< End ->
|
|
|
|
|
Delta.
|
|
|
|
|
|
2014-12-12 18:51:17 +08:00
|
|
|
m(MsgStatus = #msg_status { is_persistent = IsPersistent,
|
2021-07-06 20:38:47 +08:00
|
|
|
msg_location = MsgLocation,
|
2010-07-15 20:37:54 +08:00
|
|
|
index_on_disk = IndexOnDisk }) ->
|
|
|
|
|
true = (not IsPersistent) or IndexOnDisk,
|
2021-07-06 20:38:47 +08:00
|
|
|
true = msg_in_ram(MsgStatus) or (MsgLocation =/= memory),
|
2010-07-15 20:37:54 +08:00
|
|
|
MsgStatus.
|
|
|
|
|
|
2010-06-13 21:47:17 +08:00
|
|
|
one_if(true ) -> 1;
|
|
|
|
|
one_if(false) -> 0.
|
2010-06-03 14:52:05 +08:00
|
|
|
|
2010-07-19 16:01:28 +08:00
|
|
|
cons_if(true, E, L) -> [E | L];
|
|
|
|
|
cons_if(false, _E, L) -> L.
|
|
|
|
|
|
2024-03-04 17:08:22 +08:00
|
|
|
msg_status(IsPersistent, IsDelivered, SeqId,
|
Message Containers (#5077)
This PR implements an approach for a "protocol (data format) agnostic core" where the format of the message isn't converted at point of reception.
Currently all non AMQP 0.9.1 originating messages are converted into a AMQP 0.9.1 flavoured basic_message record before sent to a queue. If the messages are then consumed by the originating protocol they are converted back from AMQP 0.9.1. For some protocols such as MQTT 3.1 this isn't too expensive as MQTT is mostly a fairly easily mapped subset of AMQP 0.9.1 but for others such as AMQP 1.0 the conversions are awkward and in some cases lossy even if consuming from the originating protocol.
This PR instead wraps all incoming messages in their originating form into a generic, extensible message container type (mc). The container module exposes an API to get common message details such as size and various properties (ttl, priority etc) directly from the source data type. Each protocol needs to implement the mc behaviour such that when a message originating form one protocol is consumed by another protocol we convert it to the target protocol at that point.
The message container also contains annotations, dead letter records and other meta data we need to record during the lifetime of a message. The original protocol message is never modified unless it is consumed.
This includes conversion modules to and from amqp, amqpl (AMQP 0.9.1) and mqtt.
COMMIT HISTORY:
* Refactor away from using the delivery{} record
In many places including exchange types. This should make it
easier to move towards using a message container type instead of
basic_message.
Add mc module and move direct replies outside of exchange
Lots of changes incl classic queues
Implement stream support incl amqp conversions
simplify mc state record
move mc.erl
mc dlx stuff
recent history exchange
Make tracking work
But doesn't take a protocol agnostic approach as we just convert
everything into AMQP legacy and back. Might be good enough for now.
Tracing as a whole may want a bit of a re-vamp at some point.
tidy
make quorum queue peek work by legacy conversion
dead lettering fixes
dead lettering fixes
CMQ fixes
rabbit_trace type fixes
fixes
fix
Fix classic queue props
test assertion fix
feature flag and backwards compat
Enable message_container feature flag in some SUITEs
Dialyzer fixes
fixes
fix
test fixes
Various
Manually update a gazelle generated file
until a gazelle enhancement can be made
https://github.com/rabbitmq/rules_erlang/issues/185
Add message_containers_SUITE to bazel
and regen bazel files with gazelle from rules_erlang@main
Simplify essential proprty access
Such as durable, ttl and priority by extracting them into annotations
at message container init time.
Move type
to remove dependenc on amqp10 stuff in mc.erl
mostly because I don't know how to make bazel do the right thing
add more stuff
Refine routing header stuff
wip
Cosmetics
Do not use "maybe" as type name as "maybe" is a keyword since OTP 25
which makes Erlang LS complain.
* Dedup death queue names
* Fix function clause crashes
Fix failing tests in the MQTT shared_SUITE:
A classic queue message ID can be undefined as set in
https://github.com/rabbitmq/rabbitmq-server/blob/fbe79ff47b4edbc0fd95457e623d6593161ad198/deps/rabbit/src/rabbit_classic_queue_index_v2.erl#L1048
Fix failing tests in the MQTT shared_SUITE-mixed:
When feature flag message_containers is disabled, the
message is not an #mc{} record, but a #basic_message{} record.
* Fix is_utf8_no_null crash
Prior to this commit, the function crashed if invalid UTF-8 was
provided, e.g.:
```
1> rabbit_misc:is_valid_shortstr(<<"😇"/utf16>>).
** exception error: no function clause matching rabbit_misc:is_utf8_no_null(<<216,61,222,7>>) (rabbit_misc.erl, line 1481)
```
* Implement mqtt mc behaviour
For now via amqp translation.
This is still work in progress, but the following SUITEs pass:
```
make -C deps/rabbitmq_mqtt ct-shared t=[mqtt,v5,cluster_size_1] FULL=1
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_1] FULL=1
```
* Shorten mc file names
Module name length matters because for each persistent message the #mc{}
record is persisted to disk.
```
1> iolist_size(term_to_iovec({mc, rabbit_mc_amqp_legacy})).
30
2> iolist_size(term_to_iovec({mc, mc_amqpl})).
17
```
This commit renames the mc modules:
```
ag -l rabbit_mc_amqp_legacy | xargs sed -i 's/rabbit_mc_amqp_legacy/mc_amqpl/g'
ag -l rabbit_mc_amqp | xargs sed -i 's/rabbit_mc_amqp/mc_amqp/g'
ag -l rabbit_mqtt_mc | xargs sed -i 's/rabbit_mqtt_mc/mc_mqtt/g'
```
* mc: make deaths an annotation + fixes
* Fix mc_mqtt protocol_state callback
* Fix test will_delay_node_restart
```
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_3]:will_delay_node_restart FULL=1
```
* Bazel run gazelle
* mix format rabbitmqctl.ex
* Ensure ttl annotation is refelected in amqp legacy protocol state
* Fix id access in message store
* Fix rabbit_message_interceptor_SUITE
* dializer fixes
* Fix rabbit:rabbit_message_interceptor_SUITE-mixed
set_annotation/3 should not result in duplicate keys
* Fix MQTT shared_SUITE-mixed
Up to 3.12 non-MQTT publishes were always QoS 1 regardless of delivery_mode.
https://github.com/rabbitmq/rabbitmq-server/blob/75a953ce286a10aca910c098805a4f545989af38/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl#L2075-L2076
From now on, non-MQTT publishes are QoS 1 if durable.
This makes more sense.
The MQTT plugin must send a #basic_message{} to an old node that does
not understand message containers.
* Field content of 'v1_0.data' can be binary
Fix
```
bazel test //deps/rabbitmq_mqtt:shared_SUITE-mixed \
--test_env FOCUS="-group [mqtt,v4,cluster_size_1] -case trace" \
-t- --test_sharding_strategy=disabled
```
* Remove route/2 and implement route/3 for all exchange types.
This removes the route/2 callback from rabbit_exchange_type and
makes route/3 mandatory instead. This is a breaking change and
will require all implementations of exchange types to update their
code, however this is necessary anyway for them to correctly handle
the mc type.
stream filtering fixes
* Translate directly from MQTT to AMQP 0.9.1
* handle undecoded properties in mc_compat
amqpl: put clause in right order
recover death deatails from amqp data
* Replace callback init_amqp with convert_from
* Fix return value of lists:keyfind/3
* Translate directly from AMQP 0.9.1 to MQTT
* Fix MQTT payload size
MQTT payload can be a list when converted from AMQP 0.9.1 for example
First conversions tests
Plus some other conversion related fixes.
bazel
bazel
translate amqp 1.0 null to undefined
mc: property/2 and correlation_id/message_id return type tagged values.
To ensure we can support a variety of types better.
The type type tags are AMQP 1.0 flavoured.
fix death recovery
mc_mqtt: impl new api
Add callbacks to allow protocols to compact data before storage
And make readable if needing to query things repeatedly.
bazel fix
* more decoding
* tracking mixed versions compat
* mc: flip default of `durable` annotation to save some data.
Assuming most messages are durable and that in memory messages suffer less
from persistence overhead it makes sense for a non existent `durable`
annotation to mean durable=true.
* mc conversion tests and tidy up
* mc make x_header unstrict again
* amqpl: death record fixes
* bazel
* amqp -> amqpl conversion test
* Fix crash in mc_amqp:size/1
Body can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0/ ct-system t=java
```
on branch native-amqp.
* Fix crash in lists:flatten/1
Data can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp.
* Fix crash in rabbit_writer
Running test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp resulted in the following crash:
```
crasher:
initial call: rabbit_writer:enter_mainloop/2
pid: <0.711.0>
registered_name: []
exception error: bad argument
in function size/1
called as size([<<0>>,<<"Sw">>,[<<160,2>>,<<"hi">>]])
*** argument 1: not tuple or binary
in call from rabbit_binary_generator:build_content_frames/7 (rabbit_binary_generator.erl, line 89)
in call from rabbit_binary_generator:build_simple_content_frames/4 (rabbit_binary_generator.erl, line 61)
in call from rabbit_writer:assemble_frames/5 (rabbit_writer.erl, line 334)
in call from rabbit_writer:internal_send_command_async/3 (rabbit_writer.erl, line 365)
in call from rabbit_writer:handle_message/2 (rabbit_writer.erl, line 265)
in call from rabbit_writer:handle_message/3 (rabbit_writer.erl, line 232)
in call from rabbit_writer:mainloop1/2 (rabbit_writer.erl, line 223)
```
because #content.payload_fragments_rev is currently supposed to
be a flat list of binaries instead of being an iolist.
This commit fixes this crash inefficiently by calling
iolist_to_binary/1. A better solution would be to allow AMQP legacy's #content.payload_fragments_rev
to be an iolist.
* Add accidentally deleted line back
* mc: optimise mc_amqp internal format
By removint the outer records for message and delivery annotations
as well as application properties and footers.
* mc: optimis mc_amqp map_add by using upsert
* mc: refactoring and bug fixes
* mc_SUITE routingheader assertions
* mc remove serialize/1 callback as only used by amqp
* mc_amqp: avoid returning a nested list from protocol_state
* test and bug fix
* move infer_type to mc_util
* mc fixes and additiona assertions
* Support headers exchange routing for MQTT messages
When a headers exchange is bound to the MQTT topic exchange, routing
will be performend based on both MQTT topic (by the topic exchange) and
MQTT User Property (by the headers exchange).
This combines the best worlds of both MQTT 5.0 and AMQP 0.9.1 and
enables powerful routing topologies.
When the User Property contains the same name multiple times, only the
last name (and value) will be considered by the headers exchange.
* Fix crash when sending from stream to amqpl
When publishing a message via the stream protocol and consuming it via
AMQP 0.9.1, the following crash occurred prior to this commit:
```
crasher:
initial call: rabbit_channel:init/1
pid: <0.818.0>
registered_name: []
exception exit: {{badmatch,undefined},
[{rabbit_channel,handle_deliver0,4,
[{file,"rabbit_channel.erl"},
{line,2728}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1594}]},
{rabbit_channel,handle_cast,2,
[{file,"rabbit_channel.erl"},
{line,728}]},
{gen_server2,handle_msg,2,
[{file,"gen_server2.erl"},{line,1056}]},
{proc_lib,wake_up,3,
[{file,"proc_lib.erl"},{line,251}]}]}
```
This commit first gives `mc:init/3` the chance to set exchange and
routing_keys annotations.
If not set, `rabbit_stream_queue` will set these annotations assuming
the message was originally published via the stream protocol.
* Support consistent hash exchange routing for MQTT 5.0
When a consistent hash exchange is bound to the MQTT topic exchange,
MQTT 5.0 messages can be routed to queues consistently based on the
Correlation-Data in the PUBLISH packet.
* Convert MQTT 5.0 User Property
* to AMQP 0.9.1 headers
* from AMQP 0.9.1 headers
* to AMQP 1.0 application properties and message annotations
* from AMQP 1.0 application properties and message annotations
* Make use of Annotations in mc_mqtt:protocol_state/2
mc_mqtt:protocol_state/2 includes Annotations as parameter.
It's cleaner to make use of these Annotations when computing the
protocol state instead of relying on the caller (rabbitmq_mqtt_processor)
to compute the protocol state.
* Enforce AMQP 0.9.1 field name length limit
The AMQP 0.9.1 spec prohibits field names longer than 128 characters.
Therefore, when converting AMQP 1.0 message annotations, application
properties or MQTT 5.0 User Property to AMQP 0.9.1 headers, drop any
names longer than 128 characters.
* Fix type specs
Apply feedback from Michael Davis
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* Add mc_mqtt unit test suite
Implement mc_mqtt:x_header/2
* Translate indicator that payload is UTF-8 encoded
when converting between MQTT 5.0 and AMQP 1.0
* Translate single amqp-value section from AMQP 1.0 to MQTT
Convert to a text representation, if possible, and indicate to MQTT
client that the payload is UTF-8 encoded. This way, the MQTT client will
be able to parse the payload.
If conversion to text representation is not possible, encode the payload
using the AMQP 1.0 type system and indiate the encoding via Content-Type
message/vnd.rabbitmq.amqp.
This Content-Type is not registered.
Type "message" makes sense since it's a message.
Vendor tree "vnd.rabbitmq.amqp" makes sense since merely subtype "amqp" is not
registered.
* Fix payload conversion
* Translate Response Topic between MQTT and AMQP
Translate MQTT 5.0 Response Topic to AMQP 1.0 reply-to address and vice
versa.
The Response Topic must be a UTF-8 encoded string.
This commit re-uses the already defined RabbitMQ target addresses:
```
"/topic/" RK Publish to amq.topic with routing key RK
"/exchange/" X "/" RK Publish to exchange X with routing key RK
```
By default, the MQTT topic exchange is configure dto be amq.topic using
the 1st target address.
When an operator modifies the mqtt.exchange, the 2nd target address is
used.
* Apply PR feedback
and fix formatting
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* tidy up
* Add MQTT message_containers test
* consistent hash exchange: avoid amqp legacy conversion
When hashing on a header value.
* Avoid converting to amqp legacy when using exchange federation
* Fix test flake
* test and dialyzer fixes
* dialyzer fix
* Add MQTT protocol interoperability tests
Test receiving from and sending to MQTT 5.0 and
* AMQP 0.9.1
* AMQP 1.0
* STOMP
* Streams
* Regenerate portions of deps/rabbit/app.bzl with gazelle
I'm not exactly sure how this happened, but gazell seems to have been
run with an older version of the rules_erlang gazelle extension at
some point. This caused generation of a structure that is no longer
used. This commit updates the structure to the current pattern.
* mc: refactoring
* mc_amqpl: handle delivery annotations
Just in case they are included.
Also use iolist_to_iovec to create flat list of binaries when
converting from amqp with amqp encoded payload.
---------
Co-authored-by: David Ansari <david.ansari@gmx.de>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
Co-authored-by: Rin Kuryloski <kuryloskip@vmware.com>
2023-08-31 18:27:13 +08:00
|
|
|
Msg, MsgProps, IndexMaxSize) ->
|
|
|
|
|
MsgId = mc:get_annotation(id, Msg),
|
2013-01-13 00:15:06 +08:00
|
|
|
#msg_status{seq_id = SeqId,
|
|
|
|
|
msg_id = MsgId,
|
|
|
|
|
msg = Msg,
|
|
|
|
|
is_persistent = IsPersistent,
|
2021-11-05 22:59:32 +08:00
|
|
|
%% This value will only be correct when the message is going out.
|
|
|
|
|
%% See the set_deliver_flag/2 function.
|
2013-01-13 00:15:06 +08:00
|
|
|
is_delivered = IsDelivered,
|
2021-07-06 20:38:47 +08:00
|
|
|
msg_location = memory,
|
2013-01-13 00:15:06 +08:00
|
|
|
index_on_disk = false,
|
2024-03-04 17:08:22 +08:00
|
|
|
persist_to = determine_persist_to(Msg, MsgProps, IndexMaxSize),
|
2013-01-13 00:15:06 +08:00
|
|
|
msg_props = MsgProps}.
|
|
|
|
|
|
Message Containers (#5077)
This PR implements an approach for a "protocol (data format) agnostic core" where the format of the message isn't converted at point of reception.
Currently all non AMQP 0.9.1 originating messages are converted into a AMQP 0.9.1 flavoured basic_message record before sent to a queue. If the messages are then consumed by the originating protocol they are converted back from AMQP 0.9.1. For some protocols such as MQTT 3.1 this isn't too expensive as MQTT is mostly a fairly easily mapped subset of AMQP 0.9.1 but for others such as AMQP 1.0 the conversions are awkward and in some cases lossy even if consuming from the originating protocol.
This PR instead wraps all incoming messages in their originating form into a generic, extensible message container type (mc). The container module exposes an API to get common message details such as size and various properties (ttl, priority etc) directly from the source data type. Each protocol needs to implement the mc behaviour such that when a message originating form one protocol is consumed by another protocol we convert it to the target protocol at that point.
The message container also contains annotations, dead letter records and other meta data we need to record during the lifetime of a message. The original protocol message is never modified unless it is consumed.
This includes conversion modules to and from amqp, amqpl (AMQP 0.9.1) and mqtt.
COMMIT HISTORY:
* Refactor away from using the delivery{} record
In many places including exchange types. This should make it
easier to move towards using a message container type instead of
basic_message.
Add mc module and move direct replies outside of exchange
Lots of changes incl classic queues
Implement stream support incl amqp conversions
simplify mc state record
move mc.erl
mc dlx stuff
recent history exchange
Make tracking work
But doesn't take a protocol agnostic approach as we just convert
everything into AMQP legacy and back. Might be good enough for now.
Tracing as a whole may want a bit of a re-vamp at some point.
tidy
make quorum queue peek work by legacy conversion
dead lettering fixes
dead lettering fixes
CMQ fixes
rabbit_trace type fixes
fixes
fix
Fix classic queue props
test assertion fix
feature flag and backwards compat
Enable message_container feature flag in some SUITEs
Dialyzer fixes
fixes
fix
test fixes
Various
Manually update a gazelle generated file
until a gazelle enhancement can be made
https://github.com/rabbitmq/rules_erlang/issues/185
Add message_containers_SUITE to bazel
and regen bazel files with gazelle from rules_erlang@main
Simplify essential proprty access
Such as durable, ttl and priority by extracting them into annotations
at message container init time.
Move type
to remove dependenc on amqp10 stuff in mc.erl
mostly because I don't know how to make bazel do the right thing
add more stuff
Refine routing header stuff
wip
Cosmetics
Do not use "maybe" as type name as "maybe" is a keyword since OTP 25
which makes Erlang LS complain.
* Dedup death queue names
* Fix function clause crashes
Fix failing tests in the MQTT shared_SUITE:
A classic queue message ID can be undefined as set in
https://github.com/rabbitmq/rabbitmq-server/blob/fbe79ff47b4edbc0fd95457e623d6593161ad198/deps/rabbit/src/rabbit_classic_queue_index_v2.erl#L1048
Fix failing tests in the MQTT shared_SUITE-mixed:
When feature flag message_containers is disabled, the
message is not an #mc{} record, but a #basic_message{} record.
* Fix is_utf8_no_null crash
Prior to this commit, the function crashed if invalid UTF-8 was
provided, e.g.:
```
1> rabbit_misc:is_valid_shortstr(<<"😇"/utf16>>).
** exception error: no function clause matching rabbit_misc:is_utf8_no_null(<<216,61,222,7>>) (rabbit_misc.erl, line 1481)
```
* Implement mqtt mc behaviour
For now via amqp translation.
This is still work in progress, but the following SUITEs pass:
```
make -C deps/rabbitmq_mqtt ct-shared t=[mqtt,v5,cluster_size_1] FULL=1
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_1] FULL=1
```
* Shorten mc file names
Module name length matters because for each persistent message the #mc{}
record is persisted to disk.
```
1> iolist_size(term_to_iovec({mc, rabbit_mc_amqp_legacy})).
30
2> iolist_size(term_to_iovec({mc, mc_amqpl})).
17
```
This commit renames the mc modules:
```
ag -l rabbit_mc_amqp_legacy | xargs sed -i 's/rabbit_mc_amqp_legacy/mc_amqpl/g'
ag -l rabbit_mc_amqp | xargs sed -i 's/rabbit_mc_amqp/mc_amqp/g'
ag -l rabbit_mqtt_mc | xargs sed -i 's/rabbit_mqtt_mc/mc_mqtt/g'
```
* mc: make deaths an annotation + fixes
* Fix mc_mqtt protocol_state callback
* Fix test will_delay_node_restart
```
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_3]:will_delay_node_restart FULL=1
```
* Bazel run gazelle
* mix format rabbitmqctl.ex
* Ensure ttl annotation is refelected in amqp legacy protocol state
* Fix id access in message store
* Fix rabbit_message_interceptor_SUITE
* dializer fixes
* Fix rabbit:rabbit_message_interceptor_SUITE-mixed
set_annotation/3 should not result in duplicate keys
* Fix MQTT shared_SUITE-mixed
Up to 3.12 non-MQTT publishes were always QoS 1 regardless of delivery_mode.
https://github.com/rabbitmq/rabbitmq-server/blob/75a953ce286a10aca910c098805a4f545989af38/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl#L2075-L2076
From now on, non-MQTT publishes are QoS 1 if durable.
This makes more sense.
The MQTT plugin must send a #basic_message{} to an old node that does
not understand message containers.
* Field content of 'v1_0.data' can be binary
Fix
```
bazel test //deps/rabbitmq_mqtt:shared_SUITE-mixed \
--test_env FOCUS="-group [mqtt,v4,cluster_size_1] -case trace" \
-t- --test_sharding_strategy=disabled
```
* Remove route/2 and implement route/3 for all exchange types.
This removes the route/2 callback from rabbit_exchange_type and
makes route/3 mandatory instead. This is a breaking change and
will require all implementations of exchange types to update their
code, however this is necessary anyway for them to correctly handle
the mc type.
stream filtering fixes
* Translate directly from MQTT to AMQP 0.9.1
* handle undecoded properties in mc_compat
amqpl: put clause in right order
recover death deatails from amqp data
* Replace callback init_amqp with convert_from
* Fix return value of lists:keyfind/3
* Translate directly from AMQP 0.9.1 to MQTT
* Fix MQTT payload size
MQTT payload can be a list when converted from AMQP 0.9.1 for example
First conversions tests
Plus some other conversion related fixes.
bazel
bazel
translate amqp 1.0 null to undefined
mc: property/2 and correlation_id/message_id return type tagged values.
To ensure we can support a variety of types better.
The type type tags are AMQP 1.0 flavoured.
fix death recovery
mc_mqtt: impl new api
Add callbacks to allow protocols to compact data before storage
And make readable if needing to query things repeatedly.
bazel fix
* more decoding
* tracking mixed versions compat
* mc: flip default of `durable` annotation to save some data.
Assuming most messages are durable and that in memory messages suffer less
from persistence overhead it makes sense for a non existent `durable`
annotation to mean durable=true.
* mc conversion tests and tidy up
* mc make x_header unstrict again
* amqpl: death record fixes
* bazel
* amqp -> amqpl conversion test
* Fix crash in mc_amqp:size/1
Body can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0/ ct-system t=java
```
on branch native-amqp.
* Fix crash in lists:flatten/1
Data can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp.
* Fix crash in rabbit_writer
Running test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp resulted in the following crash:
```
crasher:
initial call: rabbit_writer:enter_mainloop/2
pid: <0.711.0>
registered_name: []
exception error: bad argument
in function size/1
called as size([<<0>>,<<"Sw">>,[<<160,2>>,<<"hi">>]])
*** argument 1: not tuple or binary
in call from rabbit_binary_generator:build_content_frames/7 (rabbit_binary_generator.erl, line 89)
in call from rabbit_binary_generator:build_simple_content_frames/4 (rabbit_binary_generator.erl, line 61)
in call from rabbit_writer:assemble_frames/5 (rabbit_writer.erl, line 334)
in call from rabbit_writer:internal_send_command_async/3 (rabbit_writer.erl, line 365)
in call from rabbit_writer:handle_message/2 (rabbit_writer.erl, line 265)
in call from rabbit_writer:handle_message/3 (rabbit_writer.erl, line 232)
in call from rabbit_writer:mainloop1/2 (rabbit_writer.erl, line 223)
```
because #content.payload_fragments_rev is currently supposed to
be a flat list of binaries instead of being an iolist.
This commit fixes this crash inefficiently by calling
iolist_to_binary/1. A better solution would be to allow AMQP legacy's #content.payload_fragments_rev
to be an iolist.
* Add accidentally deleted line back
* mc: optimise mc_amqp internal format
By removint the outer records for message and delivery annotations
as well as application properties and footers.
* mc: optimis mc_amqp map_add by using upsert
* mc: refactoring and bug fixes
* mc_SUITE routingheader assertions
* mc remove serialize/1 callback as only used by amqp
* mc_amqp: avoid returning a nested list from protocol_state
* test and bug fix
* move infer_type to mc_util
* mc fixes and additiona assertions
* Support headers exchange routing for MQTT messages
When a headers exchange is bound to the MQTT topic exchange, routing
will be performend based on both MQTT topic (by the topic exchange) and
MQTT User Property (by the headers exchange).
This combines the best worlds of both MQTT 5.0 and AMQP 0.9.1 and
enables powerful routing topologies.
When the User Property contains the same name multiple times, only the
last name (and value) will be considered by the headers exchange.
* Fix crash when sending from stream to amqpl
When publishing a message via the stream protocol and consuming it via
AMQP 0.9.1, the following crash occurred prior to this commit:
```
crasher:
initial call: rabbit_channel:init/1
pid: <0.818.0>
registered_name: []
exception exit: {{badmatch,undefined},
[{rabbit_channel,handle_deliver0,4,
[{file,"rabbit_channel.erl"},
{line,2728}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1594}]},
{rabbit_channel,handle_cast,2,
[{file,"rabbit_channel.erl"},
{line,728}]},
{gen_server2,handle_msg,2,
[{file,"gen_server2.erl"},{line,1056}]},
{proc_lib,wake_up,3,
[{file,"proc_lib.erl"},{line,251}]}]}
```
This commit first gives `mc:init/3` the chance to set exchange and
routing_keys annotations.
If not set, `rabbit_stream_queue` will set these annotations assuming
the message was originally published via the stream protocol.
* Support consistent hash exchange routing for MQTT 5.0
When a consistent hash exchange is bound to the MQTT topic exchange,
MQTT 5.0 messages can be routed to queues consistently based on the
Correlation-Data in the PUBLISH packet.
* Convert MQTT 5.0 User Property
* to AMQP 0.9.1 headers
* from AMQP 0.9.1 headers
* to AMQP 1.0 application properties and message annotations
* from AMQP 1.0 application properties and message annotations
* Make use of Annotations in mc_mqtt:protocol_state/2
mc_mqtt:protocol_state/2 includes Annotations as parameter.
It's cleaner to make use of these Annotations when computing the
protocol state instead of relying on the caller (rabbitmq_mqtt_processor)
to compute the protocol state.
* Enforce AMQP 0.9.1 field name length limit
The AMQP 0.9.1 spec prohibits field names longer than 128 characters.
Therefore, when converting AMQP 1.0 message annotations, application
properties or MQTT 5.0 User Property to AMQP 0.9.1 headers, drop any
names longer than 128 characters.
* Fix type specs
Apply feedback from Michael Davis
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* Add mc_mqtt unit test suite
Implement mc_mqtt:x_header/2
* Translate indicator that payload is UTF-8 encoded
when converting between MQTT 5.0 and AMQP 1.0
* Translate single amqp-value section from AMQP 1.0 to MQTT
Convert to a text representation, if possible, and indicate to MQTT
client that the payload is UTF-8 encoded. This way, the MQTT client will
be able to parse the payload.
If conversion to text representation is not possible, encode the payload
using the AMQP 1.0 type system and indiate the encoding via Content-Type
message/vnd.rabbitmq.amqp.
This Content-Type is not registered.
Type "message" makes sense since it's a message.
Vendor tree "vnd.rabbitmq.amqp" makes sense since merely subtype "amqp" is not
registered.
* Fix payload conversion
* Translate Response Topic between MQTT and AMQP
Translate MQTT 5.0 Response Topic to AMQP 1.0 reply-to address and vice
versa.
The Response Topic must be a UTF-8 encoded string.
This commit re-uses the already defined RabbitMQ target addresses:
```
"/topic/" RK Publish to amq.topic with routing key RK
"/exchange/" X "/" RK Publish to exchange X with routing key RK
```
By default, the MQTT topic exchange is configure dto be amq.topic using
the 1st target address.
When an operator modifies the mqtt.exchange, the 2nd target address is
used.
* Apply PR feedback
and fix formatting
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* tidy up
* Add MQTT message_containers test
* consistent hash exchange: avoid amqp legacy conversion
When hashing on a header value.
* Avoid converting to amqp legacy when using exchange federation
* Fix test flake
* test and dialyzer fixes
* dialyzer fix
* Add MQTT protocol interoperability tests
Test receiving from and sending to MQTT 5.0 and
* AMQP 0.9.1
* AMQP 1.0
* STOMP
* Streams
* Regenerate portions of deps/rabbit/app.bzl with gazelle
I'm not exactly sure how this happened, but gazell seems to have been
run with an older version of the rules_erlang gazelle extension at
some point. This caused generation of a structure that is no longer
used. This commit updates the structure to the current pattern.
* mc: refactoring
* mc_amqpl: handle delivery annotations
Just in case they are included.
Also use iolist_to_iovec to create flat list of binaries when
converting from amqp with amqp encoded payload.
---------
Co-authored-by: David Ansari <david.ansari@gmx.de>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
Co-authored-by: Rin Kuryloski <kuryloskip@vmware.com>
2023-08-31 18:27:13 +08:00
|
|
|
beta_msg_status({MsgId, SeqId, MsgLocation, MsgProps, IsPersistent})
|
|
|
|
|
when is_binary(MsgId) orelse
|
|
|
|
|
MsgId =:= undefined ->
|
|
|
|
|
MS0 = beta_msg_status0(SeqId, MsgProps, IsPersistent),
|
|
|
|
|
MS0#msg_status{msg_id = MsgId,
|
|
|
|
|
msg = undefined,
|
|
|
|
|
persist_to = case is_tuple(MsgLocation) of
|
|
|
|
|
true -> queue_store; %% @todo I'm not sure this clause is triggered anymore.
|
|
|
|
|
false -> msg_store
|
|
|
|
|
end,
|
|
|
|
|
msg_location = MsgLocation};
|
|
|
|
|
beta_msg_status({Msg, SeqId, MsgLocation, MsgProps, IsPersistent}) ->
|
|
|
|
|
MsgId = mc:get_annotation(id, Msg),
|
2021-08-27 21:57:09 +08:00
|
|
|
MS0 = beta_msg_status0(SeqId, MsgProps, IsPersistent),
|
2014-12-04 00:44:13 +08:00
|
|
|
MS0#msg_status{msg_id = MsgId,
|
|
|
|
|
msg = Msg,
|
2022-10-21 19:38:51 +08:00
|
|
|
persist_to = case MsgLocation of
|
|
|
|
|
rabbit_queue_index -> queue_index;
|
|
|
|
|
{rabbit_classic_queue_store_v2, _, _} -> queue_store;
|
|
|
|
|
rabbit_msg_store -> msg_store
|
|
|
|
|
end,
|
|
|
|
|
msg_location = case MsgLocation of
|
|
|
|
|
rabbit_queue_index -> memory;
|
|
|
|
|
_ -> MsgLocation
|
Message Containers (#5077)
This PR implements an approach for a "protocol (data format) agnostic core" where the format of the message isn't converted at point of reception.
Currently all non AMQP 0.9.1 originating messages are converted into a AMQP 0.9.1 flavoured basic_message record before sent to a queue. If the messages are then consumed by the originating protocol they are converted back from AMQP 0.9.1. For some protocols such as MQTT 3.1 this isn't too expensive as MQTT is mostly a fairly easily mapped subset of AMQP 0.9.1 but for others such as AMQP 1.0 the conversions are awkward and in some cases lossy even if consuming from the originating protocol.
This PR instead wraps all incoming messages in their originating form into a generic, extensible message container type (mc). The container module exposes an API to get common message details such as size and various properties (ttl, priority etc) directly from the source data type. Each protocol needs to implement the mc behaviour such that when a message originating form one protocol is consumed by another protocol we convert it to the target protocol at that point.
The message container also contains annotations, dead letter records and other meta data we need to record during the lifetime of a message. The original protocol message is never modified unless it is consumed.
This includes conversion modules to and from amqp, amqpl (AMQP 0.9.1) and mqtt.
COMMIT HISTORY:
* Refactor away from using the delivery{} record
In many places including exchange types. This should make it
easier to move towards using a message container type instead of
basic_message.
Add mc module and move direct replies outside of exchange
Lots of changes incl classic queues
Implement stream support incl amqp conversions
simplify mc state record
move mc.erl
mc dlx stuff
recent history exchange
Make tracking work
But doesn't take a protocol agnostic approach as we just convert
everything into AMQP legacy and back. Might be good enough for now.
Tracing as a whole may want a bit of a re-vamp at some point.
tidy
make quorum queue peek work by legacy conversion
dead lettering fixes
dead lettering fixes
CMQ fixes
rabbit_trace type fixes
fixes
fix
Fix classic queue props
test assertion fix
feature flag and backwards compat
Enable message_container feature flag in some SUITEs
Dialyzer fixes
fixes
fix
test fixes
Various
Manually update a gazelle generated file
until a gazelle enhancement can be made
https://github.com/rabbitmq/rules_erlang/issues/185
Add message_containers_SUITE to bazel
and regen bazel files with gazelle from rules_erlang@main
Simplify essential proprty access
Such as durable, ttl and priority by extracting them into annotations
at message container init time.
Move type
to remove dependenc on amqp10 stuff in mc.erl
mostly because I don't know how to make bazel do the right thing
add more stuff
Refine routing header stuff
wip
Cosmetics
Do not use "maybe" as type name as "maybe" is a keyword since OTP 25
which makes Erlang LS complain.
* Dedup death queue names
* Fix function clause crashes
Fix failing tests in the MQTT shared_SUITE:
A classic queue message ID can be undefined as set in
https://github.com/rabbitmq/rabbitmq-server/blob/fbe79ff47b4edbc0fd95457e623d6593161ad198/deps/rabbit/src/rabbit_classic_queue_index_v2.erl#L1048
Fix failing tests in the MQTT shared_SUITE-mixed:
When feature flag message_containers is disabled, the
message is not an #mc{} record, but a #basic_message{} record.
* Fix is_utf8_no_null crash
Prior to this commit, the function crashed if invalid UTF-8 was
provided, e.g.:
```
1> rabbit_misc:is_valid_shortstr(<<"😇"/utf16>>).
** exception error: no function clause matching rabbit_misc:is_utf8_no_null(<<216,61,222,7>>) (rabbit_misc.erl, line 1481)
```
* Implement mqtt mc behaviour
For now via amqp translation.
This is still work in progress, but the following SUITEs pass:
```
make -C deps/rabbitmq_mqtt ct-shared t=[mqtt,v5,cluster_size_1] FULL=1
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_1] FULL=1
```
* Shorten mc file names
Module name length matters because for each persistent message the #mc{}
record is persisted to disk.
```
1> iolist_size(term_to_iovec({mc, rabbit_mc_amqp_legacy})).
30
2> iolist_size(term_to_iovec({mc, mc_amqpl})).
17
```
This commit renames the mc modules:
```
ag -l rabbit_mc_amqp_legacy | xargs sed -i 's/rabbit_mc_amqp_legacy/mc_amqpl/g'
ag -l rabbit_mc_amqp | xargs sed -i 's/rabbit_mc_amqp/mc_amqp/g'
ag -l rabbit_mqtt_mc | xargs sed -i 's/rabbit_mqtt_mc/mc_mqtt/g'
```
* mc: make deaths an annotation + fixes
* Fix mc_mqtt protocol_state callback
* Fix test will_delay_node_restart
```
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_3]:will_delay_node_restart FULL=1
```
* Bazel run gazelle
* mix format rabbitmqctl.ex
* Ensure ttl annotation is refelected in amqp legacy protocol state
* Fix id access in message store
* Fix rabbit_message_interceptor_SUITE
* dializer fixes
* Fix rabbit:rabbit_message_interceptor_SUITE-mixed
set_annotation/3 should not result in duplicate keys
* Fix MQTT shared_SUITE-mixed
Up to 3.12 non-MQTT publishes were always QoS 1 regardless of delivery_mode.
https://github.com/rabbitmq/rabbitmq-server/blob/75a953ce286a10aca910c098805a4f545989af38/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl#L2075-L2076
From now on, non-MQTT publishes are QoS 1 if durable.
This makes more sense.
The MQTT plugin must send a #basic_message{} to an old node that does
not understand message containers.
* Field content of 'v1_0.data' can be binary
Fix
```
bazel test //deps/rabbitmq_mqtt:shared_SUITE-mixed \
--test_env FOCUS="-group [mqtt,v4,cluster_size_1] -case trace" \
-t- --test_sharding_strategy=disabled
```
* Remove route/2 and implement route/3 for all exchange types.
This removes the route/2 callback from rabbit_exchange_type and
makes route/3 mandatory instead. This is a breaking change and
will require all implementations of exchange types to update their
code, however this is necessary anyway for them to correctly handle
the mc type.
stream filtering fixes
* Translate directly from MQTT to AMQP 0.9.1
* handle undecoded properties in mc_compat
amqpl: put clause in right order
recover death deatails from amqp data
* Replace callback init_amqp with convert_from
* Fix return value of lists:keyfind/3
* Translate directly from AMQP 0.9.1 to MQTT
* Fix MQTT payload size
MQTT payload can be a list when converted from AMQP 0.9.1 for example
First conversions tests
Plus some other conversion related fixes.
bazel
bazel
translate amqp 1.0 null to undefined
mc: property/2 and correlation_id/message_id return type tagged values.
To ensure we can support a variety of types better.
The type type tags are AMQP 1.0 flavoured.
fix death recovery
mc_mqtt: impl new api
Add callbacks to allow protocols to compact data before storage
And make readable if needing to query things repeatedly.
bazel fix
* more decoding
* tracking mixed versions compat
* mc: flip default of `durable` annotation to save some data.
Assuming most messages are durable and that in memory messages suffer less
from persistence overhead it makes sense for a non existent `durable`
annotation to mean durable=true.
* mc conversion tests and tidy up
* mc make x_header unstrict again
* amqpl: death record fixes
* bazel
* amqp -> amqpl conversion test
* Fix crash in mc_amqp:size/1
Body can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0/ ct-system t=java
```
on branch native-amqp.
* Fix crash in lists:flatten/1
Data can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp.
* Fix crash in rabbit_writer
Running test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp resulted in the following crash:
```
crasher:
initial call: rabbit_writer:enter_mainloop/2
pid: <0.711.0>
registered_name: []
exception error: bad argument
in function size/1
called as size([<<0>>,<<"Sw">>,[<<160,2>>,<<"hi">>]])
*** argument 1: not tuple or binary
in call from rabbit_binary_generator:build_content_frames/7 (rabbit_binary_generator.erl, line 89)
in call from rabbit_binary_generator:build_simple_content_frames/4 (rabbit_binary_generator.erl, line 61)
in call from rabbit_writer:assemble_frames/5 (rabbit_writer.erl, line 334)
in call from rabbit_writer:internal_send_command_async/3 (rabbit_writer.erl, line 365)
in call from rabbit_writer:handle_message/2 (rabbit_writer.erl, line 265)
in call from rabbit_writer:handle_message/3 (rabbit_writer.erl, line 232)
in call from rabbit_writer:mainloop1/2 (rabbit_writer.erl, line 223)
```
because #content.payload_fragments_rev is currently supposed to
be a flat list of binaries instead of being an iolist.
This commit fixes this crash inefficiently by calling
iolist_to_binary/1. A better solution would be to allow AMQP legacy's #content.payload_fragments_rev
to be an iolist.
* Add accidentally deleted line back
* mc: optimise mc_amqp internal format
By removint the outer records for message and delivery annotations
as well as application properties and footers.
* mc: optimis mc_amqp map_add by using upsert
* mc: refactoring and bug fixes
* mc_SUITE routingheader assertions
* mc remove serialize/1 callback as only used by amqp
* mc_amqp: avoid returning a nested list from protocol_state
* test and bug fix
* move infer_type to mc_util
* mc fixes and additiona assertions
* Support headers exchange routing for MQTT messages
When a headers exchange is bound to the MQTT topic exchange, routing
will be performend based on both MQTT topic (by the topic exchange) and
MQTT User Property (by the headers exchange).
This combines the best worlds of both MQTT 5.0 and AMQP 0.9.1 and
enables powerful routing topologies.
When the User Property contains the same name multiple times, only the
last name (and value) will be considered by the headers exchange.
* Fix crash when sending from stream to amqpl
When publishing a message via the stream protocol and consuming it via
AMQP 0.9.1, the following crash occurred prior to this commit:
```
crasher:
initial call: rabbit_channel:init/1
pid: <0.818.0>
registered_name: []
exception exit: {{badmatch,undefined},
[{rabbit_channel,handle_deliver0,4,
[{file,"rabbit_channel.erl"},
{line,2728}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1594}]},
{rabbit_channel,handle_cast,2,
[{file,"rabbit_channel.erl"},
{line,728}]},
{gen_server2,handle_msg,2,
[{file,"gen_server2.erl"},{line,1056}]},
{proc_lib,wake_up,3,
[{file,"proc_lib.erl"},{line,251}]}]}
```
This commit first gives `mc:init/3` the chance to set exchange and
routing_keys annotations.
If not set, `rabbit_stream_queue` will set these annotations assuming
the message was originally published via the stream protocol.
* Support consistent hash exchange routing for MQTT 5.0
When a consistent hash exchange is bound to the MQTT topic exchange,
MQTT 5.0 messages can be routed to queues consistently based on the
Correlation-Data in the PUBLISH packet.
* Convert MQTT 5.0 User Property
* to AMQP 0.9.1 headers
* from AMQP 0.9.1 headers
* to AMQP 1.0 application properties and message annotations
* from AMQP 1.0 application properties and message annotations
* Make use of Annotations in mc_mqtt:protocol_state/2
mc_mqtt:protocol_state/2 includes Annotations as parameter.
It's cleaner to make use of these Annotations when computing the
protocol state instead of relying on the caller (rabbitmq_mqtt_processor)
to compute the protocol state.
* Enforce AMQP 0.9.1 field name length limit
The AMQP 0.9.1 spec prohibits field names longer than 128 characters.
Therefore, when converting AMQP 1.0 message annotations, application
properties or MQTT 5.0 User Property to AMQP 0.9.1 headers, drop any
names longer than 128 characters.
* Fix type specs
Apply feedback from Michael Davis
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* Add mc_mqtt unit test suite
Implement mc_mqtt:x_header/2
* Translate indicator that payload is UTF-8 encoded
when converting between MQTT 5.0 and AMQP 1.0
* Translate single amqp-value section from AMQP 1.0 to MQTT
Convert to a text representation, if possible, and indicate to MQTT
client that the payload is UTF-8 encoded. This way, the MQTT client will
be able to parse the payload.
If conversion to text representation is not possible, encode the payload
using the AMQP 1.0 type system and indiate the encoding via Content-Type
message/vnd.rabbitmq.amqp.
This Content-Type is not registered.
Type "message" makes sense since it's a message.
Vendor tree "vnd.rabbitmq.amqp" makes sense since merely subtype "amqp" is not
registered.
* Fix payload conversion
* Translate Response Topic between MQTT and AMQP
Translate MQTT 5.0 Response Topic to AMQP 1.0 reply-to address and vice
versa.
The Response Topic must be a UTF-8 encoded string.
This commit re-uses the already defined RabbitMQ target addresses:
```
"/topic/" RK Publish to amq.topic with routing key RK
"/exchange/" X "/" RK Publish to exchange X with routing key RK
```
By default, the MQTT topic exchange is configure dto be amq.topic using
the 1st target address.
When an operator modifies the mqtt.exchange, the 2nd target address is
used.
* Apply PR feedback
and fix formatting
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* tidy up
* Add MQTT message_containers test
* consistent hash exchange: avoid amqp legacy conversion
When hashing on a header value.
* Avoid converting to amqp legacy when using exchange federation
* Fix test flake
* test and dialyzer fixes
* dialyzer fix
* Add MQTT protocol interoperability tests
Test receiving from and sending to MQTT 5.0 and
* AMQP 0.9.1
* AMQP 1.0
* STOMP
* Streams
* Regenerate portions of deps/rabbit/app.bzl with gazelle
I'm not exactly sure how this happened, but gazell seems to have been
run with an older version of the rules_erlang gazelle extension at
some point. This caused generation of a structure that is no longer
used. This commit updates the structure to the current pattern.
* mc: refactoring
* mc_amqpl: handle delivery annotations
Just in case they are included.
Also use iolist_to_iovec to create flat list of binaries when
converting from amqp with amqp encoded payload.
---------
Co-authored-by: David Ansari <david.ansari@gmx.de>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
Co-authored-by: Rin Kuryloski <kuryloskip@vmware.com>
2023-08-31 18:27:13 +08:00
|
|
|
end}.
|
2014-12-03 23:52:49 +08:00
|
|
|
|
2021-08-27 21:57:09 +08:00
|
|
|
beta_msg_status0(SeqId, MsgProps, IsPersistent) ->
|
2013-01-13 00:15:06 +08:00
|
|
|
#msg_status{seq_id = SeqId,
|
|
|
|
|
msg = undefined,
|
|
|
|
|
is_persistent = IsPersistent,
|
|
|
|
|
index_on_disk = true,
|
|
|
|
|
msg_props = MsgProps}.
|
2010-06-12 20:38:14 +08:00
|
|
|
|
2010-10-21 07:21:37 +08:00
|
|
|
with_msg_store_state({MSCStateP, MSCStateT}, true, Fun) ->
|
|
|
|
|
{Result, MSCStateP1} = Fun(MSCStateP),
|
|
|
|
|
{Result, {MSCStateP1, MSCStateT}};
|
|
|
|
|
with_msg_store_state({MSCStateP, MSCStateT}, false, Fun) ->
|
|
|
|
|
{Result, MSCStateT1} = Fun(MSCStateT),
|
|
|
|
|
{Result, {MSCStateP, MSCStateT1}}.
|
|
|
|
|
|
|
|
|
|
with_immutable_msg_store_state(MSCState, IsPersistent, Fun) ->
|
|
|
|
|
{Res, MSCState} = with_msg_store_state(MSCState, IsPersistent,
|
|
|
|
|
fun (MSCState1) ->
|
|
|
|
|
{Fun(MSCState1), MSCState1}
|
|
|
|
|
end),
|
|
|
|
|
Res.
|
2010-06-16 15:01:53 +08:00
|
|
|
|
2023-02-03 20:56:02 +08:00
|
|
|
msg_store_client_init(MsgStore, MsgOnDiskFun, VHost) ->
|
2012-02-07 23:11:53 +08:00
|
|
|
msg_store_client_init(MsgStore, rabbit_guid:gen(), MsgOnDiskFun,
|
2023-02-03 20:56:02 +08:00
|
|
|
VHost).
|
2011-01-15 03:09:50 +08:00
|
|
|
|
2023-02-03 20:56:02 +08:00
|
|
|
msg_store_client_init(MsgStore, Ref, MsgOnDiskFun, VHost) ->
|
2017-03-17 21:03:03 +08:00
|
|
|
rabbit_vhost_msg_store:client_init(VHost, MsgStore,
|
2023-02-03 20:56:02 +08:00
|
|
|
Ref, MsgOnDiskFun).
|
2010-06-16 15:01:53 +08:00
|
|
|
|
2023-05-26 20:12:52 +08:00
|
|
|
msg_store_pre_hibernate({undefined, MSCStateT}) ->
|
|
|
|
|
{undefined,
|
|
|
|
|
rabbit_msg_store:client_pre_hibernate(MSCStateT)};
|
2023-05-24 23:05:02 +08:00
|
|
|
msg_store_pre_hibernate({MSCStateP, MSCStateT}) ->
|
|
|
|
|
{rabbit_msg_store:client_pre_hibernate(MSCStateP),
|
|
|
|
|
rabbit_msg_store:client_pre_hibernate(MSCStateT)}.
|
|
|
|
|
|
2023-03-10 16:48:43 +08:00
|
|
|
msg_store_write(MSCState, IsPersistent, SeqId, MsgId, Msg) ->
|
2010-10-26 21:51:35 +08:00
|
|
|
with_immutable_msg_store_state(
|
2010-06-16 15:01:53 +08:00
|
|
|
MSCState, IsPersistent,
|
2012-01-18 03:40:55 +08:00
|
|
|
fun (MSCState1) ->
|
2023-03-10 16:48:43 +08:00
|
|
|
rabbit_msg_store:write_flow(SeqId, MsgId, Msg, MSCState1)
|
2012-01-18 03:40:55 +08:00
|
|
|
end).
|
2010-06-12 20:38:14 +08:00
|
|
|
|
2011-03-05 02:30:25 +08:00
|
|
|
msg_store_read(MSCState, IsPersistent, MsgId) ->
|
2010-10-21 07:21:37 +08:00
|
|
|
with_msg_store_state(
|
|
|
|
|
MSCState, IsPersistent,
|
2012-01-18 03:40:55 +08:00
|
|
|
fun (MSCState1) ->
|
|
|
|
|
rabbit_msg_store:read(MsgId, MSCState1)
|
|
|
|
|
end).
|
2010-06-16 15:01:53 +08:00
|
|
|
|
2011-03-05 02:30:25 +08:00
|
|
|
msg_store_remove(MSCState, IsPersistent, MsgIds) ->
|
2010-10-21 07:21:37 +08:00
|
|
|
with_immutable_msg_store_state(
|
|
|
|
|
MSCState, IsPersistent,
|
2012-01-18 03:40:55 +08:00
|
|
|
fun (MCSState1) ->
|
|
|
|
|
rabbit_msg_store:remove(MsgIds, MCSState1)
|
|
|
|
|
end).
|
2010-06-16 15:01:53 +08:00
|
|
|
|
2021-07-28 19:43:31 +08:00
|
|
|
betas_from_index_entries(List, TransientThreshold, DelsAndAcksFun, State = #vqstate{ next_deliver_seq_id = NextDeliverSeqId0 }) ->
|
2021-11-08 20:30:08 +08:00
|
|
|
{Filtered, NextDeliverSeqId, Acks, RamReadyCount, RamBytes, TransientCount, TransientBytes} =
|
2010-04-06 07:18:05 +08:00
|
|
|
lists:foldr(
|
2021-08-27 21:57:09 +08:00
|
|
|
fun ({_MsgOrId, SeqId, _MsgLocation, _MsgProps, IsPersistent} = M,
|
2021-11-08 20:30:08 +08:00
|
|
|
{Filtered1, NextDeliverSeqId1, Acks1, RRC, RB, TC, TB} = Acc) ->
|
2010-04-06 07:18:05 +08:00
|
|
|
case SeqId < TransientThreshold andalso not IsPersistent of
|
2010-06-17 19:53:07 +08:00
|
|
|
true -> {Filtered1,
|
2021-11-08 20:30:08 +08:00
|
|
|
next_deliver_seq_id(SeqId, NextDeliverSeqId1),
|
2017-02-10 18:56:23 +08:00
|
|
|
[SeqId | Acks1], RRC, RB, TC, TB};
|
2014-12-11 21:58:14 +08:00
|
|
|
false -> MsgStatus = m(beta_msg_status(M)),
|
2014-12-12 18:51:17 +08:00
|
|
|
HaveMsg = msg_in_ram(MsgStatus),
|
2014-12-11 21:58:14 +08:00
|
|
|
Size = msg_size(MsgStatus),
|
2022-01-05 21:47:00 +08:00
|
|
|
case is_msg_in_pending_acks(SeqId, State) of
|
2014-12-11 21:58:14 +08:00
|
|
|
false -> {?QUEUE:in_r(MsgStatus, Filtered1),
|
2021-11-08 20:30:08 +08:00
|
|
|
NextDeliverSeqId1, Acks1,
|
2014-12-11 21:58:14 +08:00
|
|
|
RRC + one_if(HaveMsg),
|
2017-02-10 18:56:23 +08:00
|
|
|
RB + one_if(HaveMsg) * Size,
|
|
|
|
|
TC + one_if(not IsPersistent),
|
|
|
|
|
TB + one_if(not IsPersistent) * Size};
|
2014-12-11 21:58:14 +08:00
|
|
|
true -> Acc %% [0]
|
|
|
|
|
end
|
2010-04-06 07:18:05 +08:00
|
|
|
end
|
2021-07-28 19:43:31 +08:00
|
|
|
end, {?QUEUE:new(), NextDeliverSeqId0, [], 0, 0, 0, 0}, List),
|
2021-11-08 20:30:08 +08:00
|
|
|
{Filtered, RamReadyCount, RamBytes, DelsAndAcksFun(NextDeliverSeqId, Acks, State),
|
2017-02-10 18:56:23 +08:00
|
|
|
TransientCount, TransientBytes}.
|
2014-12-11 21:58:14 +08:00
|
|
|
%% [0] We don't increase RamBytes here, even though it pertains to
|
|
|
|
|
%% unacked messages too, since if HaveMsg then the message must have
|
|
|
|
|
%% been stored in the QI, thus the message must have been in
|
|
|
|
|
%% qi_pending_ack, thus it must already have been in RAM.
|
2009-11-10 02:12:00 +08:00
|
|
|
|
2021-11-08 20:30:08 +08:00
|
|
|
%% We increase the next_deliver_seq_id only when the next
|
|
|
|
|
%% message (next seq_id) was delivered.
|
|
|
|
|
next_deliver_seq_id(SeqId, NextDeliverSeqId)
|
|
|
|
|
when SeqId =:= NextDeliverSeqId ->
|
|
|
|
|
NextDeliverSeqId + 1;
|
|
|
|
|
next_deliver_seq_id(_, NextDeliverSeqId) ->
|
|
|
|
|
NextDeliverSeqId.
|
|
|
|
|
|
2015-09-10 01:55:48 +08:00
|
|
|
is_msg_in_pending_acks(SeqId, #vqstate { ram_pending_ack = RPA,
|
2022-06-08 21:39:42 +08:00
|
|
|
disk_pending_ack = DPA }) ->
|
2022-06-08 22:31:08 +08:00
|
|
|
maps:is_key(SeqId, RPA) orelse
|
|
|
|
|
maps:is_key(SeqId, DPA).
|
2015-09-10 01:55:48 +08:00
|
|
|
|
2017-02-10 18:56:23 +08:00
|
|
|
expand_delta(SeqId, ?BLANK_DELTA_PATTERN(X), IsPersistent) ->
|
|
|
|
|
d(#delta { start_seq_id = SeqId, count = 1, end_seq_id = SeqId + 1,
|
|
|
|
|
transient = one_if(not IsPersistent)});
|
2011-10-11 23:47:12 +08:00
|
|
|
expand_delta(SeqId, #delta { start_seq_id = StartSeqId,
|
2017-02-10 18:56:23 +08:00
|
|
|
count = Count,
|
|
|
|
|
transient = Transient } = Delta,
|
|
|
|
|
IsPersistent )
|
2011-10-11 23:47:12 +08:00
|
|
|
when SeqId < StartSeqId ->
|
2017-02-10 18:56:23 +08:00
|
|
|
d(Delta #delta { start_seq_id = SeqId, count = Count + 1,
|
|
|
|
|
transient = Transient + one_if(not IsPersistent)});
|
2011-10-15 22:56:35 +08:00
|
|
|
expand_delta(SeqId, #delta { count = Count,
|
2017-02-10 18:56:23 +08:00
|
|
|
end_seq_id = EndSeqId,
|
|
|
|
|
transient = Transient } = Delta,
|
|
|
|
|
IsPersistent)
|
2011-10-11 23:47:12 +08:00
|
|
|
when SeqId >= EndSeqId ->
|
2017-02-10 18:56:23 +08:00
|
|
|
d(Delta #delta { count = Count + 1, end_seq_id = SeqId + 1,
|
|
|
|
|
transient = Transient + one_if(not IsPersistent)});
|
|
|
|
|
expand_delta(_SeqId, #delta { count = Count,
|
|
|
|
|
transient = Transient } = Delta,
|
|
|
|
|
IsPersistent ) ->
|
|
|
|
|
d(Delta #delta { count = Count + 1,
|
|
|
|
|
transient = Transient + one_if(not IsPersistent) }).
|
2010-01-14 00:56:18 +08:00
|
|
|
|
2009-11-10 02:12:00 +08:00
|
|
|
%%----------------------------------------------------------------------------
|
|
|
|
|
%% Internal major helpers for Public API
|
|
|
|
|
%%----------------------------------------------------------------------------
|
|
|
|
|
|
2024-03-04 17:08:22 +08:00
|
|
|
init(IsDurable, IndexState, StoreState, DeltaCount, DeltaBytes, Terms,
|
2017-03-17 21:03:03 +08:00
|
|
|
PersistentClient, TransientClient, VHost) ->
|
2024-03-04 17:08:22 +08:00
|
|
|
{LowSeqId, HiSeqId, IndexState1} = rabbit_classic_queue_index_v2:bounds(IndexState),
|
2010-10-21 07:21:37 +08:00
|
|
|
|
2021-07-28 19:43:31 +08:00
|
|
|
{NextSeqId, NextDeliverSeqId, DeltaCount1, DeltaBytes1} =
|
2014-01-15 22:30:56 +08:00
|
|
|
case Terms of
|
2021-07-28 19:43:31 +08:00
|
|
|
non_clean_shutdown -> {HiSeqId, HiSeqId, DeltaCount, DeltaBytes};
|
|
|
|
|
_ -> NextSeqId0 = proplists:get_value(next_seq_id,
|
2022-03-30 20:01:53 +08:00
|
|
|
Terms, HiSeqId),
|
2021-07-28 19:43:31 +08:00
|
|
|
{NextSeqId0,
|
|
|
|
|
proplists:get_value(next_deliver_seq_id,
|
|
|
|
|
Terms, NextSeqId0),
|
2021-05-21 15:49:23 +08:00
|
|
|
proplists:get_value(persistent_count,
|
2014-07-23 23:20:17 +08:00
|
|
|
Terms, DeltaCount),
|
|
|
|
|
proplists:get_value(persistent_bytes,
|
|
|
|
|
Terms, DeltaBytes)}
|
2014-01-15 22:30:56 +08:00
|
|
|
end,
|
2010-10-21 07:21:37 +08:00
|
|
|
Delta = case DeltaCount1 == 0 andalso DeltaCount /= undefined of
|
|
|
|
|
true -> ?BLANK_DELTA;
|
2011-10-15 22:11:24 +08:00
|
|
|
false -> d(#delta { start_seq_id = LowSeqId,
|
|
|
|
|
count = DeltaCount1,
|
2017-02-10 18:56:23 +08:00
|
|
|
transient = 0,
|
2022-01-05 21:47:00 +08:00
|
|
|
end_seq_id = NextSeqId })
|
2010-10-21 07:21:37 +08:00
|
|
|
end,
|
2016-05-12 17:34:15 +08:00
|
|
|
Now = erlang:monotonic_time(),
|
2015-08-27 23:11:08 +08:00
|
|
|
IoBatchSize = rabbit_misc:get_env(rabbit, msg_store_io_batch_size,
|
|
|
|
|
?IO_BATCH_SIZE),
|
2015-09-01 18:40:04 +08:00
|
|
|
|
|
|
|
|
{ok, IndexMaxSize} = application:get_env(
|
|
|
|
|
rabbit, queue_index_embed_msgs_below),
|
2010-10-21 07:21:37 +08:00
|
|
|
State = #vqstate {
|
2011-09-30 00:09:09 +08:00
|
|
|
q1 = ?QUEUE:new(),
|
|
|
|
|
q2 = ?QUEUE:new(),
|
2010-12-06 03:31:22 +08:00
|
|
|
delta = Delta,
|
2011-09-30 00:09:09 +08:00
|
|
|
q3 = ?QUEUE:new(),
|
|
|
|
|
q4 = ?QUEUE:new(),
|
2010-12-06 03:31:22 +08:00
|
|
|
next_seq_id = NextSeqId,
|
2021-07-28 19:43:31 +08:00
|
|
|
next_deliver_seq_id = NextDeliverSeqId,
|
2022-06-08 22:31:08 +08:00
|
|
|
ram_pending_ack = #{},
|
|
|
|
|
disk_pending_ack = #{},
|
2010-12-06 03:31:22 +08:00
|
|
|
index_state = IndexState1,
|
2021-07-06 20:38:47 +08:00
|
|
|
store_state = StoreState,
|
2010-12-06 03:31:22 +08:00
|
|
|
msg_store_clients = {PersistentClient, TransientClient},
|
|
|
|
|
durable = IsDurable,
|
|
|
|
|
transient_threshold = NextSeqId,
|
2015-09-01 18:40:04 +08:00
|
|
|
qi_embed_msgs_below = IndexMaxSize,
|
2010-12-06 03:31:22 +08:00
|
|
|
|
|
|
|
|
len = DeltaCount1,
|
|
|
|
|
persistent_count = DeltaCount1,
|
2014-07-23 23:20:17 +08:00
|
|
|
bytes = DeltaBytes1,
|
|
|
|
|
persistent_bytes = DeltaBytes1,
|
2017-02-10 18:56:23 +08:00
|
|
|
delta_transient_bytes = 0,
|
2010-12-06 03:31:22 +08:00
|
|
|
|
|
|
|
|
target_ram_count = infinity,
|
|
|
|
|
ram_msg_count = 0,
|
|
|
|
|
ram_msg_count_prev = 0,
|
|
|
|
|
ram_ack_count_prev = 0,
|
2014-07-30 00:16:16 +08:00
|
|
|
ram_bytes = 0,
|
2014-08-12 21:26:43 +08:00
|
|
|
unacked_bytes = 0,
|
2010-12-06 03:31:22 +08:00
|
|
|
out_counter = 0,
|
|
|
|
|
in_counter = 0,
|
2014-01-27 21:59:16 +08:00
|
|
|
rates = blank_rates(Now),
|
2022-06-10 21:24:02 +08:00
|
|
|
msgs_on_disk = sets:new([{version,2}]),
|
|
|
|
|
msg_indices_on_disk = sets:new([{version,2}]),
|
|
|
|
|
unconfirmed = sets:new([{version,2}]),
|
2022-06-16 17:21:47 +08:00
|
|
|
unconfirmed_simple = sets:new([{version,2}]),
|
2022-06-10 21:24:02 +08:00
|
|
|
confirmed = sets:new([{version,2}]),
|
2010-12-06 03:31:22 +08:00
|
|
|
ack_out_counter = 0,
|
2015-02-03 00:35:05 +08:00
|
|
|
ack_in_counter = 0,
|
|
|
|
|
disk_read_count = 0,
|
2015-08-27 23:11:08 +08:00
|
|
|
disk_write_count = 0,
|
|
|
|
|
|
2015-10-10 23:40:23 +08:00
|
|
|
io_batch_size = IoBatchSize,
|
|
|
|
|
|
2016-09-26 21:07:27 +08:00
|
|
|
mode = default,
|
2017-03-17 21:03:03 +08:00
|
|
|
virtual_host = VHost},
|
2010-10-21 07:21:37 +08:00
|
|
|
a(maybe_deltas_to_betas(State)).
|
|
|
|
|
|
2014-01-27 21:59:16 +08:00
|
|
|
blank_rates(Now) ->
|
|
|
|
|
#rates { in = 0.0,
|
|
|
|
|
out = 0.0,
|
|
|
|
|
ack_in = 0.0,
|
|
|
|
|
ack_out = 0.0,
|
|
|
|
|
timestamp = Now}.
|
2010-11-18 21:21:26 +08:00
|
|
|
|
2022-04-04 17:08:54 +08:00
|
|
|
in_r(MsgStatus = #msg_status {}, State = #vqstate { q3 = Q3 }) ->
|
|
|
|
|
State #vqstate { q3 = ?QUEUE:in_r(MsgStatus, Q3) }.
|
2011-06-25 22:31:27 +08:00
|
|
|
|
2022-04-04 17:08:54 +08:00
|
|
|
queue_out(State) ->
|
2015-10-10 23:53:43 +08:00
|
|
|
case fetch_from_q3(State) of
|
|
|
|
|
{empty, _State1} = Result -> Result;
|
2021-07-28 19:43:31 +08:00
|
|
|
{loaded, {MsgStatus, State1}} -> {{value, set_deliver_flag(State, MsgStatus)}, State1}
|
2011-06-25 22:31:27 +08:00
|
|
|
end.
|
|
|
|
|
|
2021-07-28 19:43:31 +08:00
|
|
|
set_deliver_flag(#vqstate{ next_deliver_seq_id = NextDeliverSeqId },
|
|
|
|
|
MsgStatus = #msg_status{ seq_id = SeqId }) ->
|
|
|
|
|
MsgStatus#msg_status{ is_delivered = SeqId < NextDeliverSeqId }.
|
|
|
|
|
|
2021-07-06 20:38:47 +08:00
|
|
|
read_msg(#msg_status{seq_id = SeqId,
|
|
|
|
|
msg = undefined,
|
2013-01-12 18:18:28 +08:00
|
|
|
msg_id = MsgId,
|
2021-07-06 20:38:47 +08:00
|
|
|
is_persistent = IsPersistent,
|
|
|
|
|
msg_location = MsgLocation}, State) ->
|
|
|
|
|
read_msg(SeqId, MsgId, IsPersistent, MsgLocation, State);
|
2013-01-12 18:18:28 +08:00
|
|
|
read_msg(#msg_status{msg = Msg}, State) ->
|
2013-01-03 00:58:55 +08:00
|
|
|
{Msg, State}.
|
2012-11-22 08:20:28 +08:00
|
|
|
|
2021-07-06 20:38:47 +08:00
|
|
|
read_msg(SeqId, _, _, MsgLocation, State = #vqstate{ store_state = StoreState0 })
|
|
|
|
|
when is_tuple(MsgLocation) ->
|
2021-11-05 23:42:18 +08:00
|
|
|
{Msg, StoreState} = rabbit_classic_queue_store_v2:read(SeqId, MsgLocation, StoreState0),
|
2021-07-06 20:38:47 +08:00
|
|
|
{Msg, State#vqstate{ store_state = StoreState }};
|
2021-10-21 19:17:18 +08:00
|
|
|
read_msg(_, MsgId, IsPersistent, rabbit_msg_store, State = #vqstate{msg_store_clients = MSCState,
|
|
|
|
|
disk_read_count = Count}) ->
|
Message Containers (#5077)
This PR implements an approach for a "protocol (data format) agnostic core" where the format of the message isn't converted at point of reception.
Currently all non AMQP 0.9.1 originating messages are converted into a AMQP 0.9.1 flavoured basic_message record before sent to a queue. If the messages are then consumed by the originating protocol they are converted back from AMQP 0.9.1. For some protocols such as MQTT 3.1 this isn't too expensive as MQTT is mostly a fairly easily mapped subset of AMQP 0.9.1 but for others such as AMQP 1.0 the conversions are awkward and in some cases lossy even if consuming from the originating protocol.
This PR instead wraps all incoming messages in their originating form into a generic, extensible message container type (mc). The container module exposes an API to get common message details such as size and various properties (ttl, priority etc) directly from the source data type. Each protocol needs to implement the mc behaviour such that when a message originating form one protocol is consumed by another protocol we convert it to the target protocol at that point.
The message container also contains annotations, dead letter records and other meta data we need to record during the lifetime of a message. The original protocol message is never modified unless it is consumed.
This includes conversion modules to and from amqp, amqpl (AMQP 0.9.1) and mqtt.
COMMIT HISTORY:
* Refactor away from using the delivery{} record
In many places including exchange types. This should make it
easier to move towards using a message container type instead of
basic_message.
Add mc module and move direct replies outside of exchange
Lots of changes incl classic queues
Implement stream support incl amqp conversions
simplify mc state record
move mc.erl
mc dlx stuff
recent history exchange
Make tracking work
But doesn't take a protocol agnostic approach as we just convert
everything into AMQP legacy and back. Might be good enough for now.
Tracing as a whole may want a bit of a re-vamp at some point.
tidy
make quorum queue peek work by legacy conversion
dead lettering fixes
dead lettering fixes
CMQ fixes
rabbit_trace type fixes
fixes
fix
Fix classic queue props
test assertion fix
feature flag and backwards compat
Enable message_container feature flag in some SUITEs
Dialyzer fixes
fixes
fix
test fixes
Various
Manually update a gazelle generated file
until a gazelle enhancement can be made
https://github.com/rabbitmq/rules_erlang/issues/185
Add message_containers_SUITE to bazel
and regen bazel files with gazelle from rules_erlang@main
Simplify essential proprty access
Such as durable, ttl and priority by extracting them into annotations
at message container init time.
Move type
to remove dependenc on amqp10 stuff in mc.erl
mostly because I don't know how to make bazel do the right thing
add more stuff
Refine routing header stuff
wip
Cosmetics
Do not use "maybe" as type name as "maybe" is a keyword since OTP 25
which makes Erlang LS complain.
* Dedup death queue names
* Fix function clause crashes
Fix failing tests in the MQTT shared_SUITE:
A classic queue message ID can be undefined as set in
https://github.com/rabbitmq/rabbitmq-server/blob/fbe79ff47b4edbc0fd95457e623d6593161ad198/deps/rabbit/src/rabbit_classic_queue_index_v2.erl#L1048
Fix failing tests in the MQTT shared_SUITE-mixed:
When feature flag message_containers is disabled, the
message is not an #mc{} record, but a #basic_message{} record.
* Fix is_utf8_no_null crash
Prior to this commit, the function crashed if invalid UTF-8 was
provided, e.g.:
```
1> rabbit_misc:is_valid_shortstr(<<"😇"/utf16>>).
** exception error: no function clause matching rabbit_misc:is_utf8_no_null(<<216,61,222,7>>) (rabbit_misc.erl, line 1481)
```
* Implement mqtt mc behaviour
For now via amqp translation.
This is still work in progress, but the following SUITEs pass:
```
make -C deps/rabbitmq_mqtt ct-shared t=[mqtt,v5,cluster_size_1] FULL=1
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_1] FULL=1
```
* Shorten mc file names
Module name length matters because for each persistent message the #mc{}
record is persisted to disk.
```
1> iolist_size(term_to_iovec({mc, rabbit_mc_amqp_legacy})).
30
2> iolist_size(term_to_iovec({mc, mc_amqpl})).
17
```
This commit renames the mc modules:
```
ag -l rabbit_mc_amqp_legacy | xargs sed -i 's/rabbit_mc_amqp_legacy/mc_amqpl/g'
ag -l rabbit_mc_amqp | xargs sed -i 's/rabbit_mc_amqp/mc_amqp/g'
ag -l rabbit_mqtt_mc | xargs sed -i 's/rabbit_mqtt_mc/mc_mqtt/g'
```
* mc: make deaths an annotation + fixes
* Fix mc_mqtt protocol_state callback
* Fix test will_delay_node_restart
```
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_3]:will_delay_node_restart FULL=1
```
* Bazel run gazelle
* mix format rabbitmqctl.ex
* Ensure ttl annotation is refelected in amqp legacy protocol state
* Fix id access in message store
* Fix rabbit_message_interceptor_SUITE
* dializer fixes
* Fix rabbit:rabbit_message_interceptor_SUITE-mixed
set_annotation/3 should not result in duplicate keys
* Fix MQTT shared_SUITE-mixed
Up to 3.12 non-MQTT publishes were always QoS 1 regardless of delivery_mode.
https://github.com/rabbitmq/rabbitmq-server/blob/75a953ce286a10aca910c098805a4f545989af38/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl#L2075-L2076
From now on, non-MQTT publishes are QoS 1 if durable.
This makes more sense.
The MQTT plugin must send a #basic_message{} to an old node that does
not understand message containers.
* Field content of 'v1_0.data' can be binary
Fix
```
bazel test //deps/rabbitmq_mqtt:shared_SUITE-mixed \
--test_env FOCUS="-group [mqtt,v4,cluster_size_1] -case trace" \
-t- --test_sharding_strategy=disabled
```
* Remove route/2 and implement route/3 for all exchange types.
This removes the route/2 callback from rabbit_exchange_type and
makes route/3 mandatory instead. This is a breaking change and
will require all implementations of exchange types to update their
code, however this is necessary anyway for them to correctly handle
the mc type.
stream filtering fixes
* Translate directly from MQTT to AMQP 0.9.1
* handle undecoded properties in mc_compat
amqpl: put clause in right order
recover death deatails from amqp data
* Replace callback init_amqp with convert_from
* Fix return value of lists:keyfind/3
* Translate directly from AMQP 0.9.1 to MQTT
* Fix MQTT payload size
MQTT payload can be a list when converted from AMQP 0.9.1 for example
First conversions tests
Plus some other conversion related fixes.
bazel
bazel
translate amqp 1.0 null to undefined
mc: property/2 and correlation_id/message_id return type tagged values.
To ensure we can support a variety of types better.
The type type tags are AMQP 1.0 flavoured.
fix death recovery
mc_mqtt: impl new api
Add callbacks to allow protocols to compact data before storage
And make readable if needing to query things repeatedly.
bazel fix
* more decoding
* tracking mixed versions compat
* mc: flip default of `durable` annotation to save some data.
Assuming most messages are durable and that in memory messages suffer less
from persistence overhead it makes sense for a non existent `durable`
annotation to mean durable=true.
* mc conversion tests and tidy up
* mc make x_header unstrict again
* amqpl: death record fixes
* bazel
* amqp -> amqpl conversion test
* Fix crash in mc_amqp:size/1
Body can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0/ ct-system t=java
```
on branch native-amqp.
* Fix crash in lists:flatten/1
Data can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp.
* Fix crash in rabbit_writer
Running test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp resulted in the following crash:
```
crasher:
initial call: rabbit_writer:enter_mainloop/2
pid: <0.711.0>
registered_name: []
exception error: bad argument
in function size/1
called as size([<<0>>,<<"Sw">>,[<<160,2>>,<<"hi">>]])
*** argument 1: not tuple or binary
in call from rabbit_binary_generator:build_content_frames/7 (rabbit_binary_generator.erl, line 89)
in call from rabbit_binary_generator:build_simple_content_frames/4 (rabbit_binary_generator.erl, line 61)
in call from rabbit_writer:assemble_frames/5 (rabbit_writer.erl, line 334)
in call from rabbit_writer:internal_send_command_async/3 (rabbit_writer.erl, line 365)
in call from rabbit_writer:handle_message/2 (rabbit_writer.erl, line 265)
in call from rabbit_writer:handle_message/3 (rabbit_writer.erl, line 232)
in call from rabbit_writer:mainloop1/2 (rabbit_writer.erl, line 223)
```
because #content.payload_fragments_rev is currently supposed to
be a flat list of binaries instead of being an iolist.
This commit fixes this crash inefficiently by calling
iolist_to_binary/1. A better solution would be to allow AMQP legacy's #content.payload_fragments_rev
to be an iolist.
* Add accidentally deleted line back
* mc: optimise mc_amqp internal format
By removint the outer records for message and delivery annotations
as well as application properties and footers.
* mc: optimis mc_amqp map_add by using upsert
* mc: refactoring and bug fixes
* mc_SUITE routingheader assertions
* mc remove serialize/1 callback as only used by amqp
* mc_amqp: avoid returning a nested list from protocol_state
* test and bug fix
* move infer_type to mc_util
* mc fixes and additiona assertions
* Support headers exchange routing for MQTT messages
When a headers exchange is bound to the MQTT topic exchange, routing
will be performend based on both MQTT topic (by the topic exchange) and
MQTT User Property (by the headers exchange).
This combines the best worlds of both MQTT 5.0 and AMQP 0.9.1 and
enables powerful routing topologies.
When the User Property contains the same name multiple times, only the
last name (and value) will be considered by the headers exchange.
* Fix crash when sending from stream to amqpl
When publishing a message via the stream protocol and consuming it via
AMQP 0.9.1, the following crash occurred prior to this commit:
```
crasher:
initial call: rabbit_channel:init/1
pid: <0.818.0>
registered_name: []
exception exit: {{badmatch,undefined},
[{rabbit_channel,handle_deliver0,4,
[{file,"rabbit_channel.erl"},
{line,2728}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1594}]},
{rabbit_channel,handle_cast,2,
[{file,"rabbit_channel.erl"},
{line,728}]},
{gen_server2,handle_msg,2,
[{file,"gen_server2.erl"},{line,1056}]},
{proc_lib,wake_up,3,
[{file,"proc_lib.erl"},{line,251}]}]}
```
This commit first gives `mc:init/3` the chance to set exchange and
routing_keys annotations.
If not set, `rabbit_stream_queue` will set these annotations assuming
the message was originally published via the stream protocol.
* Support consistent hash exchange routing for MQTT 5.0
When a consistent hash exchange is bound to the MQTT topic exchange,
MQTT 5.0 messages can be routed to queues consistently based on the
Correlation-Data in the PUBLISH packet.
* Convert MQTT 5.0 User Property
* to AMQP 0.9.1 headers
* from AMQP 0.9.1 headers
* to AMQP 1.0 application properties and message annotations
* from AMQP 1.0 application properties and message annotations
* Make use of Annotations in mc_mqtt:protocol_state/2
mc_mqtt:protocol_state/2 includes Annotations as parameter.
It's cleaner to make use of these Annotations when computing the
protocol state instead of relying on the caller (rabbitmq_mqtt_processor)
to compute the protocol state.
* Enforce AMQP 0.9.1 field name length limit
The AMQP 0.9.1 spec prohibits field names longer than 128 characters.
Therefore, when converting AMQP 1.0 message annotations, application
properties or MQTT 5.0 User Property to AMQP 0.9.1 headers, drop any
names longer than 128 characters.
* Fix type specs
Apply feedback from Michael Davis
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* Add mc_mqtt unit test suite
Implement mc_mqtt:x_header/2
* Translate indicator that payload is UTF-8 encoded
when converting between MQTT 5.0 and AMQP 1.0
* Translate single amqp-value section from AMQP 1.0 to MQTT
Convert to a text representation, if possible, and indicate to MQTT
client that the payload is UTF-8 encoded. This way, the MQTT client will
be able to parse the payload.
If conversion to text representation is not possible, encode the payload
using the AMQP 1.0 type system and indiate the encoding via Content-Type
message/vnd.rabbitmq.amqp.
This Content-Type is not registered.
Type "message" makes sense since it's a message.
Vendor tree "vnd.rabbitmq.amqp" makes sense since merely subtype "amqp" is not
registered.
* Fix payload conversion
* Translate Response Topic between MQTT and AMQP
Translate MQTT 5.0 Response Topic to AMQP 1.0 reply-to address and vice
versa.
The Response Topic must be a UTF-8 encoded string.
This commit re-uses the already defined RabbitMQ target addresses:
```
"/topic/" RK Publish to amq.topic with routing key RK
"/exchange/" X "/" RK Publish to exchange X with routing key RK
```
By default, the MQTT topic exchange is configure dto be amq.topic using
the 1st target address.
When an operator modifies the mqtt.exchange, the 2nd target address is
used.
* Apply PR feedback
and fix formatting
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* tidy up
* Add MQTT message_containers test
* consistent hash exchange: avoid amqp legacy conversion
When hashing on a header value.
* Avoid converting to amqp legacy when using exchange federation
* Fix test flake
* test and dialyzer fixes
* dialyzer fix
* Add MQTT protocol interoperability tests
Test receiving from and sending to MQTT 5.0 and
* AMQP 0.9.1
* AMQP 1.0
* STOMP
* Streams
* Regenerate portions of deps/rabbit/app.bzl with gazelle
I'm not exactly sure how this happened, but gazell seems to have been
run with an older version of the rules_erlang gazelle extension at
some point. This caused generation of a structure that is no longer
used. This commit updates the structure to the current pattern.
* mc: refactoring
* mc_amqpl: handle delivery annotations
Just in case they are included.
Also use iolist_to_iovec to create flat list of binaries when
converting from amqp with amqp encoded payload.
---------
Co-authored-by: David Ansari <david.ansari@gmx.de>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
Co-authored-by: Rin Kuryloski <kuryloskip@vmware.com>
2023-08-31 18:27:13 +08:00
|
|
|
{{ok, Msg}, MSCState1} =
|
2012-02-14 20:05:27 +08:00
|
|
|
msg_store_read(MSCState, IsPersistent, MsgId),
|
2015-02-03 00:35:05 +08:00
|
|
|
{Msg, State #vqstate {msg_store_clients = MSCState1,
|
|
|
|
|
disk_read_count = Count + 1}}.
|
2013-01-12 19:35:24 +08:00
|
|
|
|
2022-06-03 17:53:45 +08:00
|
|
|
%% Helper macros to make the code as obvious as possible.
|
|
|
|
|
%% It's OK to call msg_size/1 for Inc because it gets inlined.
|
|
|
|
|
-define(UP(A, Inc),
|
|
|
|
|
A = St#vqstate.A + Inc).
|
|
|
|
|
-define(UP(A, B, Inc),
|
|
|
|
|
A = St#vqstate.A + Inc,
|
|
|
|
|
B = St#vqstate.B + Inc).
|
|
|
|
|
-define(UP(A, B, C, Inc),
|
|
|
|
|
A = St#vqstate.A + Inc,
|
|
|
|
|
B = St#vqstate.B + Inc,
|
|
|
|
|
C = St#vqstate.C + Inc).
|
|
|
|
|
|
|
|
|
|
%% When publishing to memory, transient messages do not get written to disk.
|
|
|
|
|
%% On the other hand, persistent messages are kept in memory as well as disk.
|
|
|
|
|
stats_published_memory(MS = #msg_status{is_persistent = true}, St) ->
|
|
|
|
|
St#vqstate{?UP(len, ram_msg_count, persistent_count, +1),
|
|
|
|
|
?UP(bytes, ram_bytes, persistent_bytes, +msg_size(MS))};
|
|
|
|
|
stats_published_memory(MS = #msg_status{is_persistent = false}, St) ->
|
|
|
|
|
St#vqstate{?UP(len, ram_msg_count, +1),
|
|
|
|
|
?UP(bytes, ram_bytes, +msg_size(MS))}.
|
|
|
|
|
|
|
|
|
|
%% Messages published directly to disk are not kept in memory.
|
|
|
|
|
stats_published_disk(MS = #msg_status{is_persistent = true}, St) ->
|
|
|
|
|
St#vqstate{?UP(len, persistent_count, +1),
|
|
|
|
|
?UP(bytes, persistent_bytes, +msg_size(MS))};
|
|
|
|
|
stats_published_disk(MS = #msg_status{is_persistent = false}, St) ->
|
|
|
|
|
St#vqstate{?UP(len, +1),
|
|
|
|
|
?UP(bytes, delta_transient_bytes, +msg_size(MS))}.
|
|
|
|
|
|
|
|
|
|
%% Pending acks do not add to len. Messages are kept in memory.
|
|
|
|
|
stats_published_pending_acks(MS = #msg_status{is_persistent = true}, St) ->
|
|
|
|
|
St#vqstate{?UP(persistent_count, +1),
|
|
|
|
|
?UP(persistent_bytes, unacked_bytes, ram_bytes, +msg_size(MS))};
|
|
|
|
|
stats_published_pending_acks(MS = #msg_status{is_persistent = false}, St) ->
|
|
|
|
|
St#vqstate{?UP(unacked_bytes, ram_bytes, +msg_size(MS))}.
|
|
|
|
|
|
|
|
|
|
%% Messages are moved from memory to pending acks. They may have
|
|
|
|
|
%% the message body either in memory or on disk depending on how
|
|
|
|
|
%% the message got to memory in the first place (if the message
|
|
|
|
|
%% was fully on disk the content will not be read immediately).
|
|
|
|
|
%% The contents stay where they are during this operation.
|
|
|
|
|
stats_pending_acks(MS = #msg_status{msg = undefined}, St) ->
|
|
|
|
|
St#vqstate{?UP(len, -1),
|
|
|
|
|
?UP(bytes, -msg_size(MS)), ?UP(unacked_bytes, +msg_size(MS))};
|
|
|
|
|
stats_pending_acks(MS, St) ->
|
|
|
|
|
St#vqstate{?UP(len, ram_msg_count, -1),
|
|
|
|
|
?UP(bytes, -msg_size(MS)), ?UP(unacked_bytes, +msg_size(MS))}.
|
|
|
|
|
|
|
|
|
|
%% Message may or may not be persistent and the contents
|
|
|
|
|
%% may or may not be in memory.
|
|
|
|
|
%%
|
|
|
|
|
%% Removal from delta_transient_bytes is done by maybe_deltas_to_betas.
|
|
|
|
|
stats_removed(MS = #msg_status{is_persistent = true, msg = undefined}, St) ->
|
|
|
|
|
St#vqstate{?UP(len, persistent_count, -1),
|
|
|
|
|
?UP(bytes, persistent_bytes, -msg_size(MS))};
|
|
|
|
|
stats_removed(MS = #msg_status{is_persistent = true}, St) ->
|
|
|
|
|
St#vqstate{?UP(len, ram_msg_count, persistent_count, -1),
|
|
|
|
|
?UP(bytes, ram_bytes, persistent_bytes, -msg_size(MS))};
|
|
|
|
|
stats_removed(MS = #msg_status{is_persistent = false, msg = undefined}, St) ->
|
|
|
|
|
St#vqstate{?UP(len, -1), ?UP(bytes, -msg_size(MS))};
|
|
|
|
|
stats_removed(MS = #msg_status{is_persistent = false}, St) ->
|
|
|
|
|
St#vqstate{?UP(len, ram_msg_count, -1),
|
|
|
|
|
?UP(bytes, ram_bytes, -msg_size(MS))}.
|
|
|
|
|
|
|
|
|
|
%% @todo Very confusing that ram_msg_count is without unacked but ram_bytes is with.
|
|
|
|
|
%% Rename the fields to make these things obvious. Fields are internal
|
|
|
|
|
%% so that should be OK.
|
|
|
|
|
|
|
|
|
|
stats_acked_pending(MS = #msg_status{is_persistent = true, msg = undefined}, St) ->
|
|
|
|
|
St#vqstate{?UP(persistent_count, -1),
|
|
|
|
|
?UP(persistent_bytes, unacked_bytes, -msg_size(MS))};
|
|
|
|
|
stats_acked_pending(MS = #msg_status{is_persistent = true}, St) ->
|
|
|
|
|
St#vqstate{?UP(persistent_count, -1),
|
|
|
|
|
?UP(persistent_bytes, unacked_bytes, ram_bytes, -msg_size(MS))};
|
|
|
|
|
stats_acked_pending(MS = #msg_status{is_persistent = false,
|
|
|
|
|
msg = undefined}, St) ->
|
|
|
|
|
St#vqstate{?UP(unacked_bytes, -msg_size(MS))};
|
|
|
|
|
stats_acked_pending(MS = #msg_status{is_persistent = false}, St) ->
|
|
|
|
|
St#vqstate{?UP(unacked_bytes, ram_bytes, -msg_size(MS))}.
|
|
|
|
|
|
|
|
|
|
%% Notice that this is the reverse of stats_pending_acks.
|
|
|
|
|
stats_requeued_memory(MS = #msg_status{msg = undefined}, St) ->
|
|
|
|
|
St#vqstate{?UP(len, +1),
|
|
|
|
|
?UP(bytes, +msg_size(MS)), ?UP(unacked_bytes, -msg_size(MS))};
|
|
|
|
|
stats_requeued_memory(MS, St) ->
|
|
|
|
|
St#vqstate{?UP(len, ram_msg_count, +1),
|
|
|
|
|
?UP(bytes, +msg_size(MS)), ?UP(unacked_bytes, -msg_size(MS))}.
|
|
|
|
|
|
|
|
|
|
%% @todo For v2 since we don't remove from disk until we ack, we don't need
|
|
|
|
|
%% to write to disk again on requeue. If the message falls within delta
|
|
|
|
|
%% we can just drop the MsgStatus. Otherwise we just put it in q3 and
|
|
|
|
|
%% we don't do any disk writes.
|
|
|
|
|
%%
|
|
|
|
|
%% For v1 I'm not sure? I don't think we need to write to the index
|
|
|
|
|
%% at least, but maybe we need to write the message if not embedded?
|
|
|
|
|
%% I don't think we need to...
|
|
|
|
|
%%
|
|
|
|
|
%% So we don't need to change anything except how we count stats as
|
|
|
|
|
%% well as delta stats if the message falls within delta.
|
|
|
|
|
stats_requeued_disk(MS = #msg_status{is_persistent = true}, St) ->
|
|
|
|
|
St#vqstate{?UP(len, +1),
|
|
|
|
|
?UP(bytes, +msg_size(MS)), ?UP(unacked_bytes, -msg_size(MS))};
|
|
|
|
|
stats_requeued_disk(MS = #msg_status{is_persistent = false}, St) ->
|
|
|
|
|
St#vqstate{?UP(len, +1),
|
|
|
|
|
?UP(bytes, delta_transient_bytes, +msg_size(MS)),
|
|
|
|
|
?UP(unacked_bytes, -msg_size(MS))}.
|
2014-07-30 00:16:16 +08:00
|
|
|
|
|
|
|
|
msg_size(#msg_status{msg_props = #message_properties{size = Size}}) -> Size.
|
2013-01-12 18:18:28 +08:00
|
|
|
|
2014-12-12 00:36:56 +08:00
|
|
|
msg_in_ram(#msg_status{msg = Msg}) -> Msg =/= undefined.
|
|
|
|
|
|
2015-09-12 03:13:54 +08:00
|
|
|
%% first param: AckRequired
|
|
|
|
|
remove(true, MsgStatus = #msg_status {
|
2021-11-05 22:59:32 +08:00
|
|
|
seq_id = SeqId },
|
2021-11-08 20:30:08 +08:00
|
|
|
State = #vqstate {next_deliver_seq_id = NextDeliverSeqId,
|
2021-07-28 19:43:31 +08:00
|
|
|
out_counter = OutCount,
|
2021-11-05 22:59:32 +08:00
|
|
|
index_state = IndexState1 }) ->
|
2015-09-12 03:13:54 +08:00
|
|
|
|
|
|
|
|
State1 = record_pending_ack(
|
|
|
|
|
MsgStatus #msg_status {
|
|
|
|
|
is_delivered = true }, State),
|
|
|
|
|
|
2022-06-03 17:53:45 +08:00
|
|
|
State2 = stats_pending_acks(MsgStatus, State1),
|
2015-09-12 03:13:54 +08:00
|
|
|
|
|
|
|
|
{SeqId, maybe_update_rates(
|
2021-11-08 20:30:08 +08:00
|
|
|
State2 #vqstate {next_deliver_seq_id = next_deliver_seq_id(SeqId, NextDeliverSeqId),
|
2021-07-28 19:43:31 +08:00
|
|
|
out_counter = OutCount + 1,
|
|
|
|
|
index_state = IndexState1})};
|
2015-09-12 03:13:54 +08:00
|
|
|
|
|
|
|
|
%% This function body has the same behaviour as remove_queue_entries/3
|
|
|
|
|
%% but instead of removing messages based on a ?QUEUE, this removes
|
|
|
|
|
%% just one message, the one referenced by the MsgStatus provided.
|
2023-06-21 02:04:17 +08:00
|
|
|
remove(false, MsgStatus = #msg_status{ seq_id = SeqId },
|
|
|
|
|
State = #vqstate{ next_deliver_seq_id = NextDeliverSeqId,
|
|
|
|
|
out_counter = OutCount }) ->
|
|
|
|
|
State1 = remove_from_disk(MsgStatus, State),
|
|
|
|
|
|
|
|
|
|
State2 = stats_removed(MsgStatus, State1),
|
|
|
|
|
|
|
|
|
|
{undefined, maybe_update_rates(
|
|
|
|
|
State2 #vqstate {next_deliver_seq_id = next_deliver_seq_id(SeqId, NextDeliverSeqId),
|
|
|
|
|
out_counter = OutCount + 1 })}.
|
|
|
|
|
|
|
|
|
|
remove_from_disk(#msg_status {
|
2015-09-12 03:13:54 +08:00
|
|
|
seq_id = SeqId,
|
|
|
|
|
msg_id = MsgId,
|
|
|
|
|
is_persistent = IsPersistent,
|
2021-07-06 20:38:47 +08:00
|
|
|
msg_location = MsgLocation,
|
2015-09-12 03:13:54 +08:00
|
|
|
index_on_disk = IndexOnDisk },
|
2024-03-04 17:08:22 +08:00
|
|
|
State = #vqstate {index_state = IndexState1,
|
2021-07-28 19:43:31 +08:00
|
|
|
store_state = StoreState0,
|
|
|
|
|
msg_store_clients = MSCState}) ->
|
2021-07-06 20:38:47 +08:00
|
|
|
{DeletedSegments, IndexState2} =
|
2015-09-12 03:13:54 +08:00
|
|
|
case IndexOnDisk of
|
2024-03-04 17:08:22 +08:00
|
|
|
true -> rabbit_classic_queue_index_v2:ack([SeqId], IndexState1);
|
2021-07-06 20:38:47 +08:00
|
|
|
false -> {[], IndexState1}
|
2015-09-12 03:13:54 +08:00
|
|
|
end,
|
2023-06-21 02:04:17 +08:00
|
|
|
{StoreState1, State1} = case MsgLocation of
|
|
|
|
|
?IN_SHARED_STORE ->
|
|
|
|
|
case msg_store_remove(MSCState, IsPersistent, [{SeqId, MsgId}]) of
|
|
|
|
|
{ok, []} ->
|
|
|
|
|
{StoreState0, State};
|
|
|
|
|
{ok, [_]} ->
|
|
|
|
|
{StoreState0, record_confirms(sets:add_element(MsgId, sets:new([{version,2}])), State)}
|
|
|
|
|
end;
|
|
|
|
|
?IN_QUEUE_STORE -> {rabbit_classic_queue_store_v2:remove(SeqId, StoreState0), State};
|
|
|
|
|
?IN_QUEUE_INDEX -> {StoreState0, State};
|
|
|
|
|
?IN_MEMORY -> {StoreState0, State}
|
|
|
|
|
end,
|
2021-11-05 23:42:18 +08:00
|
|
|
StoreState = rabbit_classic_queue_store_v2:delete_segments(DeletedSegments, StoreState1),
|
2023-06-21 02:04:17 +08:00
|
|
|
State1#vqstate{
|
|
|
|
|
index_state = IndexState2,
|
|
|
|
|
store_state = StoreState
|
|
|
|
|
}.
|
2011-06-25 22:31:27 +08:00
|
|
|
|
2015-09-12 03:13:54 +08:00
|
|
|
%% This function exists as a way to improve dropwhile/2
|
|
|
|
|
%% performance. The idea of having this function is to optimise calls
|
|
|
|
|
%% to rabbit_queue_index by batching delivers and acks, instead of
|
|
|
|
|
%% sending them one by one.
|
|
|
|
|
%%
|
|
|
|
|
%% Instead of removing every message as their are popped from the
|
|
|
|
|
%% queue, it first accumulates them and then removes them by calling
|
|
|
|
|
%% remove_queue_entries/3, since the behaviour of
|
|
|
|
|
%% remove_queue_entries/3 when used with
|
|
|
|
|
%% process_delivers_and_acks_fun(deliver_and_ack) is the same as
|
|
|
|
|
%% calling remove(false, MsgStatus, State).
|
|
|
|
|
%%
|
|
|
|
|
%% remove/3 also updates the out_counter in every call, but here we do
|
|
|
|
|
%% it just once at the end.
|
2021-05-19 20:18:08 +08:00
|
|
|
%%
|
|
|
|
|
%% @todo This function is really bad. If there are 1 million messages
|
|
|
|
|
%% expired, it will first collect the 1 million messages and then
|
|
|
|
|
%% process them. It should probably limit the number of messages
|
|
|
|
|
%% it removes at once and loop until satisfied instead. It could
|
|
|
|
|
%% also let the index first figure out until what seq_id() to read
|
|
|
|
|
%% (since the index has expiration encoded, it could use binary
|
|
|
|
|
%% search to find where it should stop reading) and then in a second
|
|
|
|
|
%% step do the reading with a limit for each read and drop only that.
|
2023-06-21 02:04:17 +08:00
|
|
|
%%
|
|
|
|
|
%% @todo We cannot just read the metadata to drop the messages because
|
|
|
|
|
%% there are messages we are not going to remove. Those messages
|
|
|
|
|
%% will later on be consumed and their content read; only with
|
|
|
|
|
%% a less efficient operation. This results in a drop in performance
|
|
|
|
|
%% for long queues.
|
2015-09-12 03:13:54 +08:00
|
|
|
remove_by_predicate(Pred, State = #vqstate {out_counter = OutCount}) ->
|
|
|
|
|
{MsgProps, QAcc, State1} =
|
|
|
|
|
collect_by_predicate(Pred, ?QUEUE:new(), State),
|
|
|
|
|
State2 =
|
|
|
|
|
remove_queue_entries(
|
|
|
|
|
QAcc, process_delivers_and_acks_fun(deliver_and_ack), State1),
|
|
|
|
|
%% maybe_update_rates/1 is called in remove/2 for every
|
|
|
|
|
%% message. Since we update out_counter only once, we call it just
|
|
|
|
|
%% there.
|
|
|
|
|
{MsgProps, maybe_update_rates(
|
|
|
|
|
State2 #vqstate {
|
|
|
|
|
out_counter = OutCount + ?QUEUE:len(QAcc)})}.
|
|
|
|
|
|
2015-09-28 10:32:20 +08:00
|
|
|
%% This function exists as a way to improve fetchwhile/4
|
|
|
|
|
%% performance. The idea of having this function is to optimise calls
|
|
|
|
|
%% to rabbit_queue_index by batching delivers, instead of sending them
|
|
|
|
|
%% one by one.
|
|
|
|
|
%%
|
|
|
|
|
%% Fun is the function passed to fetchwhile/4 that's
|
|
|
|
|
%% applied to every fetched message and used to build the fetchwhile/4
|
|
|
|
|
%% result accumulator FetchAcc.
|
2021-05-19 20:18:08 +08:00
|
|
|
%%
|
|
|
|
|
%% @todo See todo in remove_by_predicate/2 function.
|
2015-09-28 10:32:20 +08:00
|
|
|
fetch_by_predicate(Pred, Fun, FetchAcc,
|
2022-07-20 20:25:58 +08:00
|
|
|
State = #vqstate { out_counter = OutCount}) ->
|
2015-09-28 10:32:20 +08:00
|
|
|
{MsgProps, QAcc, State1} =
|
|
|
|
|
collect_by_predicate(Pred, ?QUEUE:new(), State),
|
|
|
|
|
|
2021-11-08 20:30:08 +08:00
|
|
|
{NextDeliverSeqId, FetchAcc1, State2} =
|
2015-09-28 10:32:20 +08:00
|
|
|
process_queue_entries(QAcc, Fun, FetchAcc, State1),
|
|
|
|
|
|
|
|
|
|
{MsgProps, FetchAcc1, maybe_update_rates(
|
|
|
|
|
State2 #vqstate {
|
2021-11-08 20:30:08 +08:00
|
|
|
next_deliver_seq_id = NextDeliverSeqId,
|
|
|
|
|
out_counter = OutCount + ?QUEUE:len(QAcc)})}.
|
2015-09-28 10:32:20 +08:00
|
|
|
|
|
|
|
|
%% We try to do here the same as what remove(true, State) does but
|
|
|
|
|
%% processing several messages at the same time. The idea is to
|
|
|
|
|
%% optimize rabbit_queue_index:deliver/2 calls by sending a list of
|
|
|
|
|
%% SeqIds instead of one by one, thus process_queue_entries1 will
|
|
|
|
|
%% accumulate the required deliveries, will record_pending_ack for
|
|
|
|
|
%% each message, and will update stats, like remove/2 does.
|
|
|
|
|
%%
|
|
|
|
|
%% For the meaning of Fun and FetchAcc arguments see
|
|
|
|
|
%% fetch_by_predicate/4 above.
|
2021-07-28 19:43:31 +08:00
|
|
|
process_queue_entries(Q, Fun, FetchAcc, State = #vqstate{ next_deliver_seq_id = NextDeliverSeqId }) ->
|
2022-02-01 20:17:59 +08:00
|
|
|
?QUEUE:fold(fun (MsgStatus, Acc) ->
|
|
|
|
|
process_queue_entries1(MsgStatus, Fun, Acc)
|
|
|
|
|
end,
|
|
|
|
|
{NextDeliverSeqId, FetchAcc, State}, Q).
|
2015-09-28 10:32:20 +08:00
|
|
|
|
|
|
|
|
process_queue_entries1(
|
2021-11-05 22:59:32 +08:00
|
|
|
#msg_status { seq_id = SeqId } = MsgStatus,
|
2015-09-28 10:32:20 +08:00
|
|
|
Fun,
|
2021-11-08 20:30:08 +08:00
|
|
|
{NextDeliverSeqId, FetchAcc, State}) ->
|
2015-09-28 10:32:20 +08:00
|
|
|
{Msg, State1} = read_msg(MsgStatus, State),
|
|
|
|
|
State2 = record_pending_ack(
|
|
|
|
|
MsgStatus #msg_status {
|
|
|
|
|
is_delivered = true }, State1),
|
2021-11-08 20:30:08 +08:00
|
|
|
{next_deliver_seq_id(SeqId, NextDeliverSeqId),
|
2015-09-28 10:32:20 +08:00
|
|
|
Fun(Msg, SeqId, FetchAcc),
|
2022-06-03 17:53:45 +08:00
|
|
|
stats_pending_acks(MsgStatus, State2)}.
|
2015-09-28 10:32:20 +08:00
|
|
|
|
2015-09-12 03:13:54 +08:00
|
|
|
collect_by_predicate(Pred, QAcc, State) ->
|
|
|
|
|
case queue_out(State) of
|
|
|
|
|
{empty, State1} ->
|
|
|
|
|
{undefined, QAcc, State1};
|
|
|
|
|
{{value, MsgStatus = #msg_status { msg_props = MsgProps }}, State1} ->
|
|
|
|
|
case Pred(MsgProps) of
|
|
|
|
|
true -> collect_by_predicate(Pred, ?QUEUE:in(MsgStatus, QAcc),
|
|
|
|
|
State1);
|
|
|
|
|
false -> {MsgProps, QAcc, in_r(MsgStatus, State1)}
|
|
|
|
|
end
|
|
|
|
|
end.
|
2011-06-25 22:31:27 +08:00
|
|
|
|
2015-09-07 19:30:33 +08:00
|
|
|
%%----------------------------------------------------------------------------
|
|
|
|
|
%% Helpers for Public API purge/1 function
|
|
|
|
|
%%----------------------------------------------------------------------------
|
|
|
|
|
|
2015-09-07 20:01:52 +08:00
|
|
|
%% The difference between purge_when_pending_acks/1
|
2015-09-07 23:11:40 +08:00
|
|
|
%% vs. purge_and_index_reset/1 is that the first one issues a deliver
|
|
|
|
|
%% and an ack to the queue index for every message that's being
|
|
|
|
|
%% removed, while the later just resets the queue index state.
|
2015-09-07 19:30:33 +08:00
|
|
|
purge_when_pending_acks(State) ->
|
2015-09-09 00:27:22 +08:00
|
|
|
State1 = purge1(process_delivers_and_acks_fun(deliver_and_ack), State),
|
2015-09-07 19:30:33 +08:00
|
|
|
a(State1).
|
|
|
|
|
|
2015-09-07 23:11:40 +08:00
|
|
|
purge_and_index_reset(State) ->
|
2015-09-09 00:27:22 +08:00
|
|
|
State1 = purge1(process_delivers_and_acks_fun(none), State),
|
2015-09-07 19:52:57 +08:00
|
|
|
a(reset_qi_state(State1)).
|
2015-09-07 19:30:33 +08:00
|
|
|
|
2022-05-11 21:10:44 +08:00
|
|
|
%% This function removes messages from each of delta and q3.
|
2015-09-07 20:01:52 +08:00
|
|
|
%%
|
|
|
|
|
%% purge_betas_and_deltas/2 loads messages from the queue index,
|
2022-05-11 21:10:44 +08:00
|
|
|
%% filling up q3. The messages loaded into q3 are removed by calling
|
2015-09-07 20:01:52 +08:00
|
|
|
%% remove_queue_entries/3 until there are no more messages to be read
|
2015-09-08 00:51:59 +08:00
|
|
|
%% from the queue index. Messages are read in batches from the queue
|
2015-09-07 20:01:52 +08:00
|
|
|
%% index.
|
2022-05-11 21:10:44 +08:00
|
|
|
purge1(AfterFun, State) ->
|
|
|
|
|
a(purge_betas_and_deltas(AfterFun, State)).
|
2015-09-07 19:30:33 +08:00
|
|
|
|
2024-03-04 17:08:22 +08:00
|
|
|
reset_qi_state(State = #vqstate{ index_state = IndexState0,
|
2021-11-08 23:50:11 +08:00
|
|
|
store_state = StoreState0 }) ->
|
|
|
|
|
StoreState = rabbit_classic_queue_store_v2:terminate(StoreState0),
|
2024-03-04 17:08:22 +08:00
|
|
|
IndexState = rabbit_classic_queue_index_v2:reset_state(IndexState0),
|
2021-11-08 23:50:11 +08:00
|
|
|
State#vqstate{ index_state = IndexState,
|
|
|
|
|
store_state = StoreState }.
|
2015-09-07 19:30:33 +08:00
|
|
|
|
|
|
|
|
is_pending_ack_empty(State) ->
|
|
|
|
|
count_pending_acks(State) =:= 0.
|
|
|
|
|
|
2022-06-16 17:21:47 +08:00
|
|
|
is_unconfirmed_empty(#vqstate { unconfirmed = UC, unconfirmed_simple = UCS }) ->
|
|
|
|
|
sets:is_empty(UC) andalso sets:is_empty(UCS).
|
2016-06-24 19:30:32 +08:00
|
|
|
|
2015-09-07 19:30:33 +08:00
|
|
|
count_pending_acks(#vqstate { ram_pending_ack = RPA,
|
2022-06-08 21:39:42 +08:00
|
|
|
disk_pending_ack = DPA }) ->
|
2022-06-08 22:31:08 +08:00
|
|
|
map_size(RPA) + map_size(DPA).
|
2015-09-07 19:30:33 +08:00
|
|
|
|
2022-06-03 17:53:45 +08:00
|
|
|
%% @todo When doing maybe_deltas_to_betas stats are updated. Then stats
|
|
|
|
|
%% are updated again in remove_queue_entries1. All unnecessary since
|
|
|
|
|
%% we are purging anyway?
|
2022-05-19 15:18:28 +08:00
|
|
|
purge_betas_and_deltas(DelsAndAcksFun, State) ->
|
2022-09-08 21:42:59 +08:00
|
|
|
%% We use the maximum memory limit when purging to get greater performance.
|
|
|
|
|
MemoryLimit = 2048,
|
2023-06-01 19:13:12 +08:00
|
|
|
State0 = #vqstate { q3 = Q3 } = maybe_deltas_to_betas(DelsAndAcksFun, State, MemoryLimit, metadata_only),
|
2015-12-23 21:01:32 +08:00
|
|
|
|
2011-09-30 00:09:09 +08:00
|
|
|
case ?QUEUE:is_empty(Q3) of
|
2015-12-23 21:01:32 +08:00
|
|
|
true -> State0;
|
|
|
|
|
false -> State1 = remove_queue_entries(Q3, DelsAndAcksFun, State0),
|
2022-09-08 21:42:59 +08:00
|
|
|
purge_betas_and_deltas(DelsAndAcksFun, State1#vqstate{q3 = ?QUEUE:new()})
|
2009-10-12 18:44:59 +08:00
|
|
|
end.
|
|
|
|
|
|
2015-09-06 18:45:14 +08:00
|
|
|
remove_queue_entries(Q, DelsAndAcksFun,
|
2023-06-21 02:04:17 +08:00
|
|
|
State = #vqstate{next_deliver_seq_id = NextDeliverSeqId0}) ->
|
2021-07-28 19:43:31 +08:00
|
|
|
{MsgIdsByStore, NextDeliverSeqId, Acks, State1} =
|
2022-02-01 20:17:59 +08:00
|
|
|
?QUEUE:fold(fun remove_queue_entries1/2,
|
|
|
|
|
{maps:new(), NextDeliverSeqId0, [], State}, Q),
|
2023-06-21 02:04:17 +08:00
|
|
|
State2 = remove_vhost_msgs_by_id(MsgIdsByStore, State1),
|
|
|
|
|
DelsAndAcksFun(NextDeliverSeqId, Acks, State2).
|
2009-10-12 18:44:59 +08:00
|
|
|
|
2010-01-14 00:56:18 +08:00
|
|
|
remove_queue_entries1(
|
2021-07-06 20:38:47 +08:00
|
|
|
#msg_status { msg_id = MsgId, seq_id = SeqId,
|
|
|
|
|
msg_location = MsgLocation, index_on_disk = IndexOnDisk,
|
2015-01-23 21:26:07 +08:00
|
|
|
is_persistent = IsPersistent} = MsgStatus,
|
2021-07-28 19:43:31 +08:00
|
|
|
{MsgIdsByStore, NextDeliverSeqId, Acks, State}) ->
|
2021-07-06 20:38:47 +08:00
|
|
|
{case MsgLocation of
|
2023-03-10 16:48:43 +08:00
|
|
|
?IN_SHARED_STORE -> rabbit_misc:maps_cons(IsPersistent, {SeqId, MsgId}, MsgIdsByStore);
|
2021-07-06 20:38:47 +08:00
|
|
|
_ -> MsgIdsByStore
|
2010-06-17 19:29:11 +08:00
|
|
|
end,
|
2021-11-08 20:30:08 +08:00
|
|
|
next_deliver_seq_id(SeqId, NextDeliverSeqId),
|
2015-01-23 21:26:07 +08:00
|
|
|
cons_if(IndexOnDisk, SeqId, Acks),
|
2022-06-03 17:53:45 +08:00
|
|
|
%% @todo Probably don't do this on a per-message basis...
|
|
|
|
|
stats_removed(MsgStatus, State)}.
|
2010-01-14 00:56:18 +08:00
|
|
|
|
2015-09-07 19:30:33 +08:00
|
|
|
process_delivers_and_acks_fun(deliver_and_ack) ->
|
2022-08-01 19:07:34 +08:00
|
|
|
%% @todo Make a clause for empty Acks list?
|
2024-03-04 17:08:22 +08:00
|
|
|
fun (NextDeliverSeqId, Acks, State = #vqstate { index_state = IndexState,
|
2021-11-08 20:30:08 +08:00
|
|
|
store_state = StoreState0}) ->
|
2024-03-04 17:08:22 +08:00
|
|
|
{DeletedSegments, IndexState1} = rabbit_classic_queue_index_v2:ack(Acks, IndexState),
|
2021-07-06 20:38:47 +08:00
|
|
|
|
2021-11-05 23:42:18 +08:00
|
|
|
StoreState = rabbit_classic_queue_store_v2:delete_segments(DeletedSegments, StoreState0),
|
2021-07-06 20:38:47 +08:00
|
|
|
|
2021-11-08 20:30:08 +08:00
|
|
|
State #vqstate { index_state = IndexState1,
|
|
|
|
|
store_state = StoreState,
|
|
|
|
|
%% We indiscriminately update because we already took care
|
|
|
|
|
%% of calling next_deliver_seq_id/2 in the functions that
|
|
|
|
|
%% end up calling this fun.
|
|
|
|
|
next_deliver_seq_id = NextDeliverSeqId }
|
2015-09-07 19:30:33 +08:00
|
|
|
end;
|
|
|
|
|
process_delivers_and_acks_fun(_) ->
|
2021-11-08 20:30:08 +08:00
|
|
|
fun (NextDeliverSeqId, _, State) ->
|
|
|
|
|
State #vqstate { next_deliver_seq_id = NextDeliverSeqId }
|
2015-09-07 19:30:33 +08:00
|
|
|
end.
|
|
|
|
|
|
2009-11-10 02:12:00 +08:00
|
|
|
%%----------------------------------------------------------------------------
|
|
|
|
|
%% Internal gubbins for publishing
|
|
|
|
|
%%----------------------------------------------------------------------------
|
2015-10-13 18:00:39 +08:00
|
|
|
|
Message Containers (#5077)
This PR implements an approach for a "protocol (data format) agnostic core" where the format of the message isn't converted at point of reception.
Currently all non AMQP 0.9.1 originating messages are converted into a AMQP 0.9.1 flavoured basic_message record before sent to a queue. If the messages are then consumed by the originating protocol they are converted back from AMQP 0.9.1. For some protocols such as MQTT 3.1 this isn't too expensive as MQTT is mostly a fairly easily mapped subset of AMQP 0.9.1 but for others such as AMQP 1.0 the conversions are awkward and in some cases lossy even if consuming from the originating protocol.
This PR instead wraps all incoming messages in their originating form into a generic, extensible message container type (mc). The container module exposes an API to get common message details such as size and various properties (ttl, priority etc) directly from the source data type. Each protocol needs to implement the mc behaviour such that when a message originating form one protocol is consumed by another protocol we convert it to the target protocol at that point.
The message container also contains annotations, dead letter records and other meta data we need to record during the lifetime of a message. The original protocol message is never modified unless it is consumed.
This includes conversion modules to and from amqp, amqpl (AMQP 0.9.1) and mqtt.
COMMIT HISTORY:
* Refactor away from using the delivery{} record
In many places including exchange types. This should make it
easier to move towards using a message container type instead of
basic_message.
Add mc module and move direct replies outside of exchange
Lots of changes incl classic queues
Implement stream support incl amqp conversions
simplify mc state record
move mc.erl
mc dlx stuff
recent history exchange
Make tracking work
But doesn't take a protocol agnostic approach as we just convert
everything into AMQP legacy and back. Might be good enough for now.
Tracing as a whole may want a bit of a re-vamp at some point.
tidy
make quorum queue peek work by legacy conversion
dead lettering fixes
dead lettering fixes
CMQ fixes
rabbit_trace type fixes
fixes
fix
Fix classic queue props
test assertion fix
feature flag and backwards compat
Enable message_container feature flag in some SUITEs
Dialyzer fixes
fixes
fix
test fixes
Various
Manually update a gazelle generated file
until a gazelle enhancement can be made
https://github.com/rabbitmq/rules_erlang/issues/185
Add message_containers_SUITE to bazel
and regen bazel files with gazelle from rules_erlang@main
Simplify essential proprty access
Such as durable, ttl and priority by extracting them into annotations
at message container init time.
Move type
to remove dependenc on amqp10 stuff in mc.erl
mostly because I don't know how to make bazel do the right thing
add more stuff
Refine routing header stuff
wip
Cosmetics
Do not use "maybe" as type name as "maybe" is a keyword since OTP 25
which makes Erlang LS complain.
* Dedup death queue names
* Fix function clause crashes
Fix failing tests in the MQTT shared_SUITE:
A classic queue message ID can be undefined as set in
https://github.com/rabbitmq/rabbitmq-server/blob/fbe79ff47b4edbc0fd95457e623d6593161ad198/deps/rabbit/src/rabbit_classic_queue_index_v2.erl#L1048
Fix failing tests in the MQTT shared_SUITE-mixed:
When feature flag message_containers is disabled, the
message is not an #mc{} record, but a #basic_message{} record.
* Fix is_utf8_no_null crash
Prior to this commit, the function crashed if invalid UTF-8 was
provided, e.g.:
```
1> rabbit_misc:is_valid_shortstr(<<"😇"/utf16>>).
** exception error: no function clause matching rabbit_misc:is_utf8_no_null(<<216,61,222,7>>) (rabbit_misc.erl, line 1481)
```
* Implement mqtt mc behaviour
For now via amqp translation.
This is still work in progress, but the following SUITEs pass:
```
make -C deps/rabbitmq_mqtt ct-shared t=[mqtt,v5,cluster_size_1] FULL=1
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_1] FULL=1
```
* Shorten mc file names
Module name length matters because for each persistent message the #mc{}
record is persisted to disk.
```
1> iolist_size(term_to_iovec({mc, rabbit_mc_amqp_legacy})).
30
2> iolist_size(term_to_iovec({mc, mc_amqpl})).
17
```
This commit renames the mc modules:
```
ag -l rabbit_mc_amqp_legacy | xargs sed -i 's/rabbit_mc_amqp_legacy/mc_amqpl/g'
ag -l rabbit_mc_amqp | xargs sed -i 's/rabbit_mc_amqp/mc_amqp/g'
ag -l rabbit_mqtt_mc | xargs sed -i 's/rabbit_mqtt_mc/mc_mqtt/g'
```
* mc: make deaths an annotation + fixes
* Fix mc_mqtt protocol_state callback
* Fix test will_delay_node_restart
```
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_3]:will_delay_node_restart FULL=1
```
* Bazel run gazelle
* mix format rabbitmqctl.ex
* Ensure ttl annotation is refelected in amqp legacy protocol state
* Fix id access in message store
* Fix rabbit_message_interceptor_SUITE
* dializer fixes
* Fix rabbit:rabbit_message_interceptor_SUITE-mixed
set_annotation/3 should not result in duplicate keys
* Fix MQTT shared_SUITE-mixed
Up to 3.12 non-MQTT publishes were always QoS 1 regardless of delivery_mode.
https://github.com/rabbitmq/rabbitmq-server/blob/75a953ce286a10aca910c098805a4f545989af38/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl#L2075-L2076
From now on, non-MQTT publishes are QoS 1 if durable.
This makes more sense.
The MQTT plugin must send a #basic_message{} to an old node that does
not understand message containers.
* Field content of 'v1_0.data' can be binary
Fix
```
bazel test //deps/rabbitmq_mqtt:shared_SUITE-mixed \
--test_env FOCUS="-group [mqtt,v4,cluster_size_1] -case trace" \
-t- --test_sharding_strategy=disabled
```
* Remove route/2 and implement route/3 for all exchange types.
This removes the route/2 callback from rabbit_exchange_type and
makes route/3 mandatory instead. This is a breaking change and
will require all implementations of exchange types to update their
code, however this is necessary anyway for them to correctly handle
the mc type.
stream filtering fixes
* Translate directly from MQTT to AMQP 0.9.1
* handle undecoded properties in mc_compat
amqpl: put clause in right order
recover death deatails from amqp data
* Replace callback init_amqp with convert_from
* Fix return value of lists:keyfind/3
* Translate directly from AMQP 0.9.1 to MQTT
* Fix MQTT payload size
MQTT payload can be a list when converted from AMQP 0.9.1 for example
First conversions tests
Plus some other conversion related fixes.
bazel
bazel
translate amqp 1.0 null to undefined
mc: property/2 and correlation_id/message_id return type tagged values.
To ensure we can support a variety of types better.
The type type tags are AMQP 1.0 flavoured.
fix death recovery
mc_mqtt: impl new api
Add callbacks to allow protocols to compact data before storage
And make readable if needing to query things repeatedly.
bazel fix
* more decoding
* tracking mixed versions compat
* mc: flip default of `durable` annotation to save some data.
Assuming most messages are durable and that in memory messages suffer less
from persistence overhead it makes sense for a non existent `durable`
annotation to mean durable=true.
* mc conversion tests and tidy up
* mc make x_header unstrict again
* amqpl: death record fixes
* bazel
* amqp -> amqpl conversion test
* Fix crash in mc_amqp:size/1
Body can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0/ ct-system t=java
```
on branch native-amqp.
* Fix crash in lists:flatten/1
Data can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp.
* Fix crash in rabbit_writer
Running test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp resulted in the following crash:
```
crasher:
initial call: rabbit_writer:enter_mainloop/2
pid: <0.711.0>
registered_name: []
exception error: bad argument
in function size/1
called as size([<<0>>,<<"Sw">>,[<<160,2>>,<<"hi">>]])
*** argument 1: not tuple or binary
in call from rabbit_binary_generator:build_content_frames/7 (rabbit_binary_generator.erl, line 89)
in call from rabbit_binary_generator:build_simple_content_frames/4 (rabbit_binary_generator.erl, line 61)
in call from rabbit_writer:assemble_frames/5 (rabbit_writer.erl, line 334)
in call from rabbit_writer:internal_send_command_async/3 (rabbit_writer.erl, line 365)
in call from rabbit_writer:handle_message/2 (rabbit_writer.erl, line 265)
in call from rabbit_writer:handle_message/3 (rabbit_writer.erl, line 232)
in call from rabbit_writer:mainloop1/2 (rabbit_writer.erl, line 223)
```
because #content.payload_fragments_rev is currently supposed to
be a flat list of binaries instead of being an iolist.
This commit fixes this crash inefficiently by calling
iolist_to_binary/1. A better solution would be to allow AMQP legacy's #content.payload_fragments_rev
to be an iolist.
* Add accidentally deleted line back
* mc: optimise mc_amqp internal format
By removint the outer records for message and delivery annotations
as well as application properties and footers.
* mc: optimis mc_amqp map_add by using upsert
* mc: refactoring and bug fixes
* mc_SUITE routingheader assertions
* mc remove serialize/1 callback as only used by amqp
* mc_amqp: avoid returning a nested list from protocol_state
* test and bug fix
* move infer_type to mc_util
* mc fixes and additiona assertions
* Support headers exchange routing for MQTT messages
When a headers exchange is bound to the MQTT topic exchange, routing
will be performend based on both MQTT topic (by the topic exchange) and
MQTT User Property (by the headers exchange).
This combines the best worlds of both MQTT 5.0 and AMQP 0.9.1 and
enables powerful routing topologies.
When the User Property contains the same name multiple times, only the
last name (and value) will be considered by the headers exchange.
* Fix crash when sending from stream to amqpl
When publishing a message via the stream protocol and consuming it via
AMQP 0.9.1, the following crash occurred prior to this commit:
```
crasher:
initial call: rabbit_channel:init/1
pid: <0.818.0>
registered_name: []
exception exit: {{badmatch,undefined},
[{rabbit_channel,handle_deliver0,4,
[{file,"rabbit_channel.erl"},
{line,2728}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1594}]},
{rabbit_channel,handle_cast,2,
[{file,"rabbit_channel.erl"},
{line,728}]},
{gen_server2,handle_msg,2,
[{file,"gen_server2.erl"},{line,1056}]},
{proc_lib,wake_up,3,
[{file,"proc_lib.erl"},{line,251}]}]}
```
This commit first gives `mc:init/3` the chance to set exchange and
routing_keys annotations.
If not set, `rabbit_stream_queue` will set these annotations assuming
the message was originally published via the stream protocol.
* Support consistent hash exchange routing for MQTT 5.0
When a consistent hash exchange is bound to the MQTT topic exchange,
MQTT 5.0 messages can be routed to queues consistently based on the
Correlation-Data in the PUBLISH packet.
* Convert MQTT 5.0 User Property
* to AMQP 0.9.1 headers
* from AMQP 0.9.1 headers
* to AMQP 1.0 application properties and message annotations
* from AMQP 1.0 application properties and message annotations
* Make use of Annotations in mc_mqtt:protocol_state/2
mc_mqtt:protocol_state/2 includes Annotations as parameter.
It's cleaner to make use of these Annotations when computing the
protocol state instead of relying on the caller (rabbitmq_mqtt_processor)
to compute the protocol state.
* Enforce AMQP 0.9.1 field name length limit
The AMQP 0.9.1 spec prohibits field names longer than 128 characters.
Therefore, when converting AMQP 1.0 message annotations, application
properties or MQTT 5.0 User Property to AMQP 0.9.1 headers, drop any
names longer than 128 characters.
* Fix type specs
Apply feedback from Michael Davis
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* Add mc_mqtt unit test suite
Implement mc_mqtt:x_header/2
* Translate indicator that payload is UTF-8 encoded
when converting between MQTT 5.0 and AMQP 1.0
* Translate single amqp-value section from AMQP 1.0 to MQTT
Convert to a text representation, if possible, and indicate to MQTT
client that the payload is UTF-8 encoded. This way, the MQTT client will
be able to parse the payload.
If conversion to text representation is not possible, encode the payload
using the AMQP 1.0 type system and indiate the encoding via Content-Type
message/vnd.rabbitmq.amqp.
This Content-Type is not registered.
Type "message" makes sense since it's a message.
Vendor tree "vnd.rabbitmq.amqp" makes sense since merely subtype "amqp" is not
registered.
* Fix payload conversion
* Translate Response Topic between MQTT and AMQP
Translate MQTT 5.0 Response Topic to AMQP 1.0 reply-to address and vice
versa.
The Response Topic must be a UTF-8 encoded string.
This commit re-uses the already defined RabbitMQ target addresses:
```
"/topic/" RK Publish to amq.topic with routing key RK
"/exchange/" X "/" RK Publish to exchange X with routing key RK
```
By default, the MQTT topic exchange is configure dto be amq.topic using
the 1st target address.
When an operator modifies the mqtt.exchange, the 2nd target address is
used.
* Apply PR feedback
and fix formatting
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* tidy up
* Add MQTT message_containers test
* consistent hash exchange: avoid amqp legacy conversion
When hashing on a header value.
* Avoid converting to amqp legacy when using exchange federation
* Fix test flake
* test and dialyzer fixes
* dialyzer fix
* Add MQTT protocol interoperability tests
Test receiving from and sending to MQTT 5.0 and
* AMQP 0.9.1
* AMQP 1.0
* STOMP
* Streams
* Regenerate portions of deps/rabbit/app.bzl with gazelle
I'm not exactly sure how this happened, but gazell seems to have been
run with an older version of the rules_erlang gazelle extension at
some point. This caused generation of a structure that is no longer
used. This commit updates the structure to the current pattern.
* mc: refactoring
* mc_amqpl: handle delivery annotations
Just in case they are included.
Also use iolist_to_iovec to create flat list of binaries when
converting from amqp with amqp encoded payload.
---------
Co-authored-by: David Ansari <david.ansari@gmx.de>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
Co-authored-by: Rin Kuryloski <kuryloskip@vmware.com>
2023-08-31 18:27:13 +08:00
|
|
|
publish1(Msg,
|
2015-09-29 06:41:51 +08:00
|
|
|
MsgProps = #message_properties { needs_confirming = NeedsConfirming },
|
2023-10-04 18:11:54 +08:00
|
|
|
IsDelivered, _ChPid, PersistFun,
|
2022-04-04 17:08:54 +08:00
|
|
|
State = #vqstate { q3 = Q3, delta = Delta = #delta { count = DeltaCount },
|
2022-05-10 21:40:31 +08:00
|
|
|
len = Len,
|
2015-09-29 06:41:51 +08:00
|
|
|
qi_embed_msgs_below = IndexMaxSize,
|
|
|
|
|
next_seq_id = SeqId,
|
2021-07-28 19:43:31 +08:00
|
|
|
next_deliver_seq_id = NextDeliverSeqId,
|
2015-09-29 06:41:51 +08:00
|
|
|
in_counter = InCount,
|
|
|
|
|
durable = IsDurable,
|
2022-05-12 21:18:36 +08:00
|
|
|
unconfirmed = UC,
|
2022-06-16 17:21:47 +08:00
|
|
|
unconfirmed_simple = UCS,
|
2022-05-12 21:18:36 +08:00
|
|
|
rates = #rates{ out = OutRate }}) ->
|
Message Containers (#5077)
This PR implements an approach for a "protocol (data format) agnostic core" where the format of the message isn't converted at point of reception.
Currently all non AMQP 0.9.1 originating messages are converted into a AMQP 0.9.1 flavoured basic_message record before sent to a queue. If the messages are then consumed by the originating protocol they are converted back from AMQP 0.9.1. For some protocols such as MQTT 3.1 this isn't too expensive as MQTT is mostly a fairly easily mapped subset of AMQP 0.9.1 but for others such as AMQP 1.0 the conversions are awkward and in some cases lossy even if consuming from the originating protocol.
This PR instead wraps all incoming messages in their originating form into a generic, extensible message container type (mc). The container module exposes an API to get common message details such as size and various properties (ttl, priority etc) directly from the source data type. Each protocol needs to implement the mc behaviour such that when a message originating form one protocol is consumed by another protocol we convert it to the target protocol at that point.
The message container also contains annotations, dead letter records and other meta data we need to record during the lifetime of a message. The original protocol message is never modified unless it is consumed.
This includes conversion modules to and from amqp, amqpl (AMQP 0.9.1) and mqtt.
COMMIT HISTORY:
* Refactor away from using the delivery{} record
In many places including exchange types. This should make it
easier to move towards using a message container type instead of
basic_message.
Add mc module and move direct replies outside of exchange
Lots of changes incl classic queues
Implement stream support incl amqp conversions
simplify mc state record
move mc.erl
mc dlx stuff
recent history exchange
Make tracking work
But doesn't take a protocol agnostic approach as we just convert
everything into AMQP legacy and back. Might be good enough for now.
Tracing as a whole may want a bit of a re-vamp at some point.
tidy
make quorum queue peek work by legacy conversion
dead lettering fixes
dead lettering fixes
CMQ fixes
rabbit_trace type fixes
fixes
fix
Fix classic queue props
test assertion fix
feature flag and backwards compat
Enable message_container feature flag in some SUITEs
Dialyzer fixes
fixes
fix
test fixes
Various
Manually update a gazelle generated file
until a gazelle enhancement can be made
https://github.com/rabbitmq/rules_erlang/issues/185
Add message_containers_SUITE to bazel
and regen bazel files with gazelle from rules_erlang@main
Simplify essential proprty access
Such as durable, ttl and priority by extracting them into annotations
at message container init time.
Move type
to remove dependenc on amqp10 stuff in mc.erl
mostly because I don't know how to make bazel do the right thing
add more stuff
Refine routing header stuff
wip
Cosmetics
Do not use "maybe" as type name as "maybe" is a keyword since OTP 25
which makes Erlang LS complain.
* Dedup death queue names
* Fix function clause crashes
Fix failing tests in the MQTT shared_SUITE:
A classic queue message ID can be undefined as set in
https://github.com/rabbitmq/rabbitmq-server/blob/fbe79ff47b4edbc0fd95457e623d6593161ad198/deps/rabbit/src/rabbit_classic_queue_index_v2.erl#L1048
Fix failing tests in the MQTT shared_SUITE-mixed:
When feature flag message_containers is disabled, the
message is not an #mc{} record, but a #basic_message{} record.
* Fix is_utf8_no_null crash
Prior to this commit, the function crashed if invalid UTF-8 was
provided, e.g.:
```
1> rabbit_misc:is_valid_shortstr(<<"😇"/utf16>>).
** exception error: no function clause matching rabbit_misc:is_utf8_no_null(<<216,61,222,7>>) (rabbit_misc.erl, line 1481)
```
* Implement mqtt mc behaviour
For now via amqp translation.
This is still work in progress, but the following SUITEs pass:
```
make -C deps/rabbitmq_mqtt ct-shared t=[mqtt,v5,cluster_size_1] FULL=1
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_1] FULL=1
```
* Shorten mc file names
Module name length matters because for each persistent message the #mc{}
record is persisted to disk.
```
1> iolist_size(term_to_iovec({mc, rabbit_mc_amqp_legacy})).
30
2> iolist_size(term_to_iovec({mc, mc_amqpl})).
17
```
This commit renames the mc modules:
```
ag -l rabbit_mc_amqp_legacy | xargs sed -i 's/rabbit_mc_amqp_legacy/mc_amqpl/g'
ag -l rabbit_mc_amqp | xargs sed -i 's/rabbit_mc_amqp/mc_amqp/g'
ag -l rabbit_mqtt_mc | xargs sed -i 's/rabbit_mqtt_mc/mc_mqtt/g'
```
* mc: make deaths an annotation + fixes
* Fix mc_mqtt protocol_state callback
* Fix test will_delay_node_restart
```
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_3]:will_delay_node_restart FULL=1
```
* Bazel run gazelle
* mix format rabbitmqctl.ex
* Ensure ttl annotation is refelected in amqp legacy protocol state
* Fix id access in message store
* Fix rabbit_message_interceptor_SUITE
* dializer fixes
* Fix rabbit:rabbit_message_interceptor_SUITE-mixed
set_annotation/3 should not result in duplicate keys
* Fix MQTT shared_SUITE-mixed
Up to 3.12 non-MQTT publishes were always QoS 1 regardless of delivery_mode.
https://github.com/rabbitmq/rabbitmq-server/blob/75a953ce286a10aca910c098805a4f545989af38/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl#L2075-L2076
From now on, non-MQTT publishes are QoS 1 if durable.
This makes more sense.
The MQTT plugin must send a #basic_message{} to an old node that does
not understand message containers.
* Field content of 'v1_0.data' can be binary
Fix
```
bazel test //deps/rabbitmq_mqtt:shared_SUITE-mixed \
--test_env FOCUS="-group [mqtt,v4,cluster_size_1] -case trace" \
-t- --test_sharding_strategy=disabled
```
* Remove route/2 and implement route/3 for all exchange types.
This removes the route/2 callback from rabbit_exchange_type and
makes route/3 mandatory instead. This is a breaking change and
will require all implementations of exchange types to update their
code, however this is necessary anyway for them to correctly handle
the mc type.
stream filtering fixes
* Translate directly from MQTT to AMQP 0.9.1
* handle undecoded properties in mc_compat
amqpl: put clause in right order
recover death deatails from amqp data
* Replace callback init_amqp with convert_from
* Fix return value of lists:keyfind/3
* Translate directly from AMQP 0.9.1 to MQTT
* Fix MQTT payload size
MQTT payload can be a list when converted from AMQP 0.9.1 for example
First conversions tests
Plus some other conversion related fixes.
bazel
bazel
translate amqp 1.0 null to undefined
mc: property/2 and correlation_id/message_id return type tagged values.
To ensure we can support a variety of types better.
The type type tags are AMQP 1.0 flavoured.
fix death recovery
mc_mqtt: impl new api
Add callbacks to allow protocols to compact data before storage
And make readable if needing to query things repeatedly.
bazel fix
* more decoding
* tracking mixed versions compat
* mc: flip default of `durable` annotation to save some data.
Assuming most messages are durable and that in memory messages suffer less
from persistence overhead it makes sense for a non existent `durable`
annotation to mean durable=true.
* mc conversion tests and tidy up
* mc make x_header unstrict again
* amqpl: death record fixes
* bazel
* amqp -> amqpl conversion test
* Fix crash in mc_amqp:size/1
Body can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0/ ct-system t=java
```
on branch native-amqp.
* Fix crash in lists:flatten/1
Data can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp.
* Fix crash in rabbit_writer
Running test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp resulted in the following crash:
```
crasher:
initial call: rabbit_writer:enter_mainloop/2
pid: <0.711.0>
registered_name: []
exception error: bad argument
in function size/1
called as size([<<0>>,<<"Sw">>,[<<160,2>>,<<"hi">>]])
*** argument 1: not tuple or binary
in call from rabbit_binary_generator:build_content_frames/7 (rabbit_binary_generator.erl, line 89)
in call from rabbit_binary_generator:build_simple_content_frames/4 (rabbit_binary_generator.erl, line 61)
in call from rabbit_writer:assemble_frames/5 (rabbit_writer.erl, line 334)
in call from rabbit_writer:internal_send_command_async/3 (rabbit_writer.erl, line 365)
in call from rabbit_writer:handle_message/2 (rabbit_writer.erl, line 265)
in call from rabbit_writer:handle_message/3 (rabbit_writer.erl, line 232)
in call from rabbit_writer:mainloop1/2 (rabbit_writer.erl, line 223)
```
because #content.payload_fragments_rev is currently supposed to
be a flat list of binaries instead of being an iolist.
This commit fixes this crash inefficiently by calling
iolist_to_binary/1. A better solution would be to allow AMQP legacy's #content.payload_fragments_rev
to be an iolist.
* Add accidentally deleted line back
* mc: optimise mc_amqp internal format
By removint the outer records for message and delivery annotations
as well as application properties and footers.
* mc: optimis mc_amqp map_add by using upsert
* mc: refactoring and bug fixes
* mc_SUITE routingheader assertions
* mc remove serialize/1 callback as only used by amqp
* mc_amqp: avoid returning a nested list from protocol_state
* test and bug fix
* move infer_type to mc_util
* mc fixes and additiona assertions
* Support headers exchange routing for MQTT messages
When a headers exchange is bound to the MQTT topic exchange, routing
will be performend based on both MQTT topic (by the topic exchange) and
MQTT User Property (by the headers exchange).
This combines the best worlds of both MQTT 5.0 and AMQP 0.9.1 and
enables powerful routing topologies.
When the User Property contains the same name multiple times, only the
last name (and value) will be considered by the headers exchange.
* Fix crash when sending from stream to amqpl
When publishing a message via the stream protocol and consuming it via
AMQP 0.9.1, the following crash occurred prior to this commit:
```
crasher:
initial call: rabbit_channel:init/1
pid: <0.818.0>
registered_name: []
exception exit: {{badmatch,undefined},
[{rabbit_channel,handle_deliver0,4,
[{file,"rabbit_channel.erl"},
{line,2728}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1594}]},
{rabbit_channel,handle_cast,2,
[{file,"rabbit_channel.erl"},
{line,728}]},
{gen_server2,handle_msg,2,
[{file,"gen_server2.erl"},{line,1056}]},
{proc_lib,wake_up,3,
[{file,"proc_lib.erl"},{line,251}]}]}
```
This commit first gives `mc:init/3` the chance to set exchange and
routing_keys annotations.
If not set, `rabbit_stream_queue` will set these annotations assuming
the message was originally published via the stream protocol.
* Support consistent hash exchange routing for MQTT 5.0
When a consistent hash exchange is bound to the MQTT topic exchange,
MQTT 5.0 messages can be routed to queues consistently based on the
Correlation-Data in the PUBLISH packet.
* Convert MQTT 5.0 User Property
* to AMQP 0.9.1 headers
* from AMQP 0.9.1 headers
* to AMQP 1.0 application properties and message annotations
* from AMQP 1.0 application properties and message annotations
* Make use of Annotations in mc_mqtt:protocol_state/2
mc_mqtt:protocol_state/2 includes Annotations as parameter.
It's cleaner to make use of these Annotations when computing the
protocol state instead of relying on the caller (rabbitmq_mqtt_processor)
to compute the protocol state.
* Enforce AMQP 0.9.1 field name length limit
The AMQP 0.9.1 spec prohibits field names longer than 128 characters.
Therefore, when converting AMQP 1.0 message annotations, application
properties or MQTT 5.0 User Property to AMQP 0.9.1 headers, drop any
names longer than 128 characters.
* Fix type specs
Apply feedback from Michael Davis
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* Add mc_mqtt unit test suite
Implement mc_mqtt:x_header/2
* Translate indicator that payload is UTF-8 encoded
when converting between MQTT 5.0 and AMQP 1.0
* Translate single amqp-value section from AMQP 1.0 to MQTT
Convert to a text representation, if possible, and indicate to MQTT
client that the payload is UTF-8 encoded. This way, the MQTT client will
be able to parse the payload.
If conversion to text representation is not possible, encode the payload
using the AMQP 1.0 type system and indiate the encoding via Content-Type
message/vnd.rabbitmq.amqp.
This Content-Type is not registered.
Type "message" makes sense since it's a message.
Vendor tree "vnd.rabbitmq.amqp" makes sense since merely subtype "amqp" is not
registered.
* Fix payload conversion
* Translate Response Topic between MQTT and AMQP
Translate MQTT 5.0 Response Topic to AMQP 1.0 reply-to address and vice
versa.
The Response Topic must be a UTF-8 encoded string.
This commit re-uses the already defined RabbitMQ target addresses:
```
"/topic/" RK Publish to amq.topic with routing key RK
"/exchange/" X "/" RK Publish to exchange X with routing key RK
```
By default, the MQTT topic exchange is configure dto be amq.topic using
the 1st target address.
When an operator modifies the mqtt.exchange, the 2nd target address is
used.
* Apply PR feedback
and fix formatting
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* tidy up
* Add MQTT message_containers test
* consistent hash exchange: avoid amqp legacy conversion
When hashing on a header value.
* Avoid converting to amqp legacy when using exchange federation
* Fix test flake
* test and dialyzer fixes
* dialyzer fix
* Add MQTT protocol interoperability tests
Test receiving from and sending to MQTT 5.0 and
* AMQP 0.9.1
* AMQP 1.0
* STOMP
* Streams
* Regenerate portions of deps/rabbit/app.bzl with gazelle
I'm not exactly sure how this happened, but gazell seems to have been
run with an older version of the rules_erlang gazelle extension at
some point. This caused generation of a structure that is no longer
used. This commit updates the structure to the current pattern.
* mc: refactoring
* mc_amqpl: handle delivery annotations
Just in case they are included.
Also use iolist_to_iovec to create flat list of binaries when
converting from amqp with amqp encoded payload.
---------
Co-authored-by: David Ansari <david.ansari@gmx.de>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
Co-authored-by: Rin Kuryloski <kuryloskip@vmware.com>
2023-08-31 18:27:13 +08:00
|
|
|
MsgId = mc:get_annotation(id, Msg),
|
|
|
|
|
IsPersistent = mc:is_persistent(Msg),
|
2015-09-29 06:41:51 +08:00
|
|
|
IsPersistent1 = IsDurable andalso IsPersistent,
|
2024-03-04 17:08:22 +08:00
|
|
|
MsgStatus = msg_status(IsPersistent1, IsDelivered, SeqId, Msg, MsgProps, IndexMaxSize),
|
2022-05-12 21:18:36 +08:00
|
|
|
%% We allow from 1 to 2048 messages in memory depending on the consume rate. The lower
|
|
|
|
|
%% limit is at 1 because the queue process will need to access this message to know
|
|
|
|
|
%% expiration information.
|
|
|
|
|
MemoryLimit = min(1 + floor(2 * OutRate), 2048),
|
2022-06-16 17:21:47 +08:00
|
|
|
State3 = case DeltaCount of
|
2022-05-10 21:40:31 +08:00
|
|
|
%% Len is the same as Q3Len when DeltaCount =:= 0.
|
2022-05-12 21:18:36 +08:00
|
|
|
0 when Len < MemoryLimit ->
|
2022-04-04 17:08:54 +08:00
|
|
|
{MsgStatus1, State1} = PersistFun(false, false, MsgStatus, State),
|
2022-04-12 22:41:13 +08:00
|
|
|
State2 = State1 #vqstate { q3 = ?QUEUE:in(m(MsgStatus1), Q3) },
|
2022-06-03 17:53:45 +08:00
|
|
|
stats_published_memory(MsgStatus1, State2);
|
2022-04-04 17:08:54 +08:00
|
|
|
_ ->
|
|
|
|
|
{MsgStatus1, State1} = PersistFun(true, true, MsgStatus, State),
|
2022-04-12 22:41:13 +08:00
|
|
|
Delta1 = expand_delta(SeqId, Delta, IsPersistent),
|
|
|
|
|
State2 = State1 #vqstate { delta = Delta1 },
|
2022-06-03 17:53:45 +08:00
|
|
|
stats_published_disk(MsgStatus1, State2)
|
2015-09-29 06:41:51 +08:00
|
|
|
end,
|
2022-06-16 17:21:47 +08:00
|
|
|
{UC1, UCS1} = maybe_needs_confirming(NeedsConfirming, persist_to(MsgStatus),
|
2024-03-04 17:08:22 +08:00
|
|
|
MsgId, UC, UCS),
|
2022-06-16 17:21:47 +08:00
|
|
|
State3#vqstate{ next_seq_id = SeqId + 1,
|
2022-04-04 17:08:54 +08:00
|
|
|
next_deliver_seq_id = maybe_next_deliver_seq_id(SeqId, NextDeliverSeqId, IsDelivered),
|
|
|
|
|
in_counter = InCount + 1,
|
2022-06-16 17:21:47 +08:00
|
|
|
unconfirmed = UC1,
|
|
|
|
|
unconfirmed_simple = UCS1 }.
|
2021-11-08 20:30:08 +08:00
|
|
|
|
|
|
|
|
%% Only attempt to increase the next_deliver_seq_id for delivered messages.
|
|
|
|
|
maybe_next_deliver_seq_id(SeqId, NextDeliverSeqId, true) ->
|
|
|
|
|
next_deliver_seq_id(SeqId, NextDeliverSeqId);
|
|
|
|
|
maybe_next_deliver_seq_id(_, NextDeliverSeqId, false) ->
|
|
|
|
|
NextDeliverSeqId.
|
2009-10-07 23:27:23 +08:00
|
|
|
|
Message Containers (#5077)
This PR implements an approach for a "protocol (data format) agnostic core" where the format of the message isn't converted at point of reception.
Currently all non AMQP 0.9.1 originating messages are converted into a AMQP 0.9.1 flavoured basic_message record before sent to a queue. If the messages are then consumed by the originating protocol they are converted back from AMQP 0.9.1. For some protocols such as MQTT 3.1 this isn't too expensive as MQTT is mostly a fairly easily mapped subset of AMQP 0.9.1 but for others such as AMQP 1.0 the conversions are awkward and in some cases lossy even if consuming from the originating protocol.
This PR instead wraps all incoming messages in their originating form into a generic, extensible message container type (mc). The container module exposes an API to get common message details such as size and various properties (ttl, priority etc) directly from the source data type. Each protocol needs to implement the mc behaviour such that when a message originating form one protocol is consumed by another protocol we convert it to the target protocol at that point.
The message container also contains annotations, dead letter records and other meta data we need to record during the lifetime of a message. The original protocol message is never modified unless it is consumed.
This includes conversion modules to and from amqp, amqpl (AMQP 0.9.1) and mqtt.
COMMIT HISTORY:
* Refactor away from using the delivery{} record
In many places including exchange types. This should make it
easier to move towards using a message container type instead of
basic_message.
Add mc module and move direct replies outside of exchange
Lots of changes incl classic queues
Implement stream support incl amqp conversions
simplify mc state record
move mc.erl
mc dlx stuff
recent history exchange
Make tracking work
But doesn't take a protocol agnostic approach as we just convert
everything into AMQP legacy and back. Might be good enough for now.
Tracing as a whole may want a bit of a re-vamp at some point.
tidy
make quorum queue peek work by legacy conversion
dead lettering fixes
dead lettering fixes
CMQ fixes
rabbit_trace type fixes
fixes
fix
Fix classic queue props
test assertion fix
feature flag and backwards compat
Enable message_container feature flag in some SUITEs
Dialyzer fixes
fixes
fix
test fixes
Various
Manually update a gazelle generated file
until a gazelle enhancement can be made
https://github.com/rabbitmq/rules_erlang/issues/185
Add message_containers_SUITE to bazel
and regen bazel files with gazelle from rules_erlang@main
Simplify essential proprty access
Such as durable, ttl and priority by extracting them into annotations
at message container init time.
Move type
to remove dependenc on amqp10 stuff in mc.erl
mostly because I don't know how to make bazel do the right thing
add more stuff
Refine routing header stuff
wip
Cosmetics
Do not use "maybe" as type name as "maybe" is a keyword since OTP 25
which makes Erlang LS complain.
* Dedup death queue names
* Fix function clause crashes
Fix failing tests in the MQTT shared_SUITE:
A classic queue message ID can be undefined as set in
https://github.com/rabbitmq/rabbitmq-server/blob/fbe79ff47b4edbc0fd95457e623d6593161ad198/deps/rabbit/src/rabbit_classic_queue_index_v2.erl#L1048
Fix failing tests in the MQTT shared_SUITE-mixed:
When feature flag message_containers is disabled, the
message is not an #mc{} record, but a #basic_message{} record.
* Fix is_utf8_no_null crash
Prior to this commit, the function crashed if invalid UTF-8 was
provided, e.g.:
```
1> rabbit_misc:is_valid_shortstr(<<"😇"/utf16>>).
** exception error: no function clause matching rabbit_misc:is_utf8_no_null(<<216,61,222,7>>) (rabbit_misc.erl, line 1481)
```
* Implement mqtt mc behaviour
For now via amqp translation.
This is still work in progress, but the following SUITEs pass:
```
make -C deps/rabbitmq_mqtt ct-shared t=[mqtt,v5,cluster_size_1] FULL=1
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_1] FULL=1
```
* Shorten mc file names
Module name length matters because for each persistent message the #mc{}
record is persisted to disk.
```
1> iolist_size(term_to_iovec({mc, rabbit_mc_amqp_legacy})).
30
2> iolist_size(term_to_iovec({mc, mc_amqpl})).
17
```
This commit renames the mc modules:
```
ag -l rabbit_mc_amqp_legacy | xargs sed -i 's/rabbit_mc_amqp_legacy/mc_amqpl/g'
ag -l rabbit_mc_amqp | xargs sed -i 's/rabbit_mc_amqp/mc_amqp/g'
ag -l rabbit_mqtt_mc | xargs sed -i 's/rabbit_mqtt_mc/mc_mqtt/g'
```
* mc: make deaths an annotation + fixes
* Fix mc_mqtt protocol_state callback
* Fix test will_delay_node_restart
```
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_3]:will_delay_node_restart FULL=1
```
* Bazel run gazelle
* mix format rabbitmqctl.ex
* Ensure ttl annotation is refelected in amqp legacy protocol state
* Fix id access in message store
* Fix rabbit_message_interceptor_SUITE
* dializer fixes
* Fix rabbit:rabbit_message_interceptor_SUITE-mixed
set_annotation/3 should not result in duplicate keys
* Fix MQTT shared_SUITE-mixed
Up to 3.12 non-MQTT publishes were always QoS 1 regardless of delivery_mode.
https://github.com/rabbitmq/rabbitmq-server/blob/75a953ce286a10aca910c098805a4f545989af38/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl#L2075-L2076
From now on, non-MQTT publishes are QoS 1 if durable.
This makes more sense.
The MQTT plugin must send a #basic_message{} to an old node that does
not understand message containers.
* Field content of 'v1_0.data' can be binary
Fix
```
bazel test //deps/rabbitmq_mqtt:shared_SUITE-mixed \
--test_env FOCUS="-group [mqtt,v4,cluster_size_1] -case trace" \
-t- --test_sharding_strategy=disabled
```
* Remove route/2 and implement route/3 for all exchange types.
This removes the route/2 callback from rabbit_exchange_type and
makes route/3 mandatory instead. This is a breaking change and
will require all implementations of exchange types to update their
code, however this is necessary anyway for them to correctly handle
the mc type.
stream filtering fixes
* Translate directly from MQTT to AMQP 0.9.1
* handle undecoded properties in mc_compat
amqpl: put clause in right order
recover death deatails from amqp data
* Replace callback init_amqp with convert_from
* Fix return value of lists:keyfind/3
* Translate directly from AMQP 0.9.1 to MQTT
* Fix MQTT payload size
MQTT payload can be a list when converted from AMQP 0.9.1 for example
First conversions tests
Plus some other conversion related fixes.
bazel
bazel
translate amqp 1.0 null to undefined
mc: property/2 and correlation_id/message_id return type tagged values.
To ensure we can support a variety of types better.
The type type tags are AMQP 1.0 flavoured.
fix death recovery
mc_mqtt: impl new api
Add callbacks to allow protocols to compact data before storage
And make readable if needing to query things repeatedly.
bazel fix
* more decoding
* tracking mixed versions compat
* mc: flip default of `durable` annotation to save some data.
Assuming most messages are durable and that in memory messages suffer less
from persistence overhead it makes sense for a non existent `durable`
annotation to mean durable=true.
* mc conversion tests and tidy up
* mc make x_header unstrict again
* amqpl: death record fixes
* bazel
* amqp -> amqpl conversion test
* Fix crash in mc_amqp:size/1
Body can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0/ ct-system t=java
```
on branch native-amqp.
* Fix crash in lists:flatten/1
Data can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp.
* Fix crash in rabbit_writer
Running test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp resulted in the following crash:
```
crasher:
initial call: rabbit_writer:enter_mainloop/2
pid: <0.711.0>
registered_name: []
exception error: bad argument
in function size/1
called as size([<<0>>,<<"Sw">>,[<<160,2>>,<<"hi">>]])
*** argument 1: not tuple or binary
in call from rabbit_binary_generator:build_content_frames/7 (rabbit_binary_generator.erl, line 89)
in call from rabbit_binary_generator:build_simple_content_frames/4 (rabbit_binary_generator.erl, line 61)
in call from rabbit_writer:assemble_frames/5 (rabbit_writer.erl, line 334)
in call from rabbit_writer:internal_send_command_async/3 (rabbit_writer.erl, line 365)
in call from rabbit_writer:handle_message/2 (rabbit_writer.erl, line 265)
in call from rabbit_writer:handle_message/3 (rabbit_writer.erl, line 232)
in call from rabbit_writer:mainloop1/2 (rabbit_writer.erl, line 223)
```
because #content.payload_fragments_rev is currently supposed to
be a flat list of binaries instead of being an iolist.
This commit fixes this crash inefficiently by calling
iolist_to_binary/1. A better solution would be to allow AMQP legacy's #content.payload_fragments_rev
to be an iolist.
* Add accidentally deleted line back
* mc: optimise mc_amqp internal format
By removint the outer records for message and delivery annotations
as well as application properties and footers.
* mc: optimis mc_amqp map_add by using upsert
* mc: refactoring and bug fixes
* mc_SUITE routingheader assertions
* mc remove serialize/1 callback as only used by amqp
* mc_amqp: avoid returning a nested list from protocol_state
* test and bug fix
* move infer_type to mc_util
* mc fixes and additiona assertions
* Support headers exchange routing for MQTT messages
When a headers exchange is bound to the MQTT topic exchange, routing
will be performend based on both MQTT topic (by the topic exchange) and
MQTT User Property (by the headers exchange).
This combines the best worlds of both MQTT 5.0 and AMQP 0.9.1 and
enables powerful routing topologies.
When the User Property contains the same name multiple times, only the
last name (and value) will be considered by the headers exchange.
* Fix crash when sending from stream to amqpl
When publishing a message via the stream protocol and consuming it via
AMQP 0.9.1, the following crash occurred prior to this commit:
```
crasher:
initial call: rabbit_channel:init/1
pid: <0.818.0>
registered_name: []
exception exit: {{badmatch,undefined},
[{rabbit_channel,handle_deliver0,4,
[{file,"rabbit_channel.erl"},
{line,2728}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1594}]},
{rabbit_channel,handle_cast,2,
[{file,"rabbit_channel.erl"},
{line,728}]},
{gen_server2,handle_msg,2,
[{file,"gen_server2.erl"},{line,1056}]},
{proc_lib,wake_up,3,
[{file,"proc_lib.erl"},{line,251}]}]}
```
This commit first gives `mc:init/3` the chance to set exchange and
routing_keys annotations.
If not set, `rabbit_stream_queue` will set these annotations assuming
the message was originally published via the stream protocol.
* Support consistent hash exchange routing for MQTT 5.0
When a consistent hash exchange is bound to the MQTT topic exchange,
MQTT 5.0 messages can be routed to queues consistently based on the
Correlation-Data in the PUBLISH packet.
* Convert MQTT 5.0 User Property
* to AMQP 0.9.1 headers
* from AMQP 0.9.1 headers
* to AMQP 1.0 application properties and message annotations
* from AMQP 1.0 application properties and message annotations
* Make use of Annotations in mc_mqtt:protocol_state/2
mc_mqtt:protocol_state/2 includes Annotations as parameter.
It's cleaner to make use of these Annotations when computing the
protocol state instead of relying on the caller (rabbitmq_mqtt_processor)
to compute the protocol state.
* Enforce AMQP 0.9.1 field name length limit
The AMQP 0.9.1 spec prohibits field names longer than 128 characters.
Therefore, when converting AMQP 1.0 message annotations, application
properties or MQTT 5.0 User Property to AMQP 0.9.1 headers, drop any
names longer than 128 characters.
* Fix type specs
Apply feedback from Michael Davis
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* Add mc_mqtt unit test suite
Implement mc_mqtt:x_header/2
* Translate indicator that payload is UTF-8 encoded
when converting between MQTT 5.0 and AMQP 1.0
* Translate single amqp-value section from AMQP 1.0 to MQTT
Convert to a text representation, if possible, and indicate to MQTT
client that the payload is UTF-8 encoded. This way, the MQTT client will
be able to parse the payload.
If conversion to text representation is not possible, encode the payload
using the AMQP 1.0 type system and indiate the encoding via Content-Type
message/vnd.rabbitmq.amqp.
This Content-Type is not registered.
Type "message" makes sense since it's a message.
Vendor tree "vnd.rabbitmq.amqp" makes sense since merely subtype "amqp" is not
registered.
* Fix payload conversion
* Translate Response Topic between MQTT and AMQP
Translate MQTT 5.0 Response Topic to AMQP 1.0 reply-to address and vice
versa.
The Response Topic must be a UTF-8 encoded string.
This commit re-uses the already defined RabbitMQ target addresses:
```
"/topic/" RK Publish to amq.topic with routing key RK
"/exchange/" X "/" RK Publish to exchange X with routing key RK
```
By default, the MQTT topic exchange is configure dto be amq.topic using
the 1st target address.
When an operator modifies the mqtt.exchange, the 2nd target address is
used.
* Apply PR feedback
and fix formatting
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* tidy up
* Add MQTT message_containers test
* consistent hash exchange: avoid amqp legacy conversion
When hashing on a header value.
* Avoid converting to amqp legacy when using exchange federation
* Fix test flake
* test and dialyzer fixes
* dialyzer fix
* Add MQTT protocol interoperability tests
Test receiving from and sending to MQTT 5.0 and
* AMQP 0.9.1
* AMQP 1.0
* STOMP
* Streams
* Regenerate portions of deps/rabbit/app.bzl with gazelle
I'm not exactly sure how this happened, but gazell seems to have been
run with an older version of the rules_erlang gazelle extension at
some point. This caused generation of a structure that is no longer
used. This commit updates the structure to the current pattern.
* mc: refactoring
* mc_amqpl: handle delivery annotations
Just in case they are included.
Also use iolist_to_iovec to create flat list of binaries when
converting from amqp with amqp encoded payload.
---------
Co-authored-by: David Ansari <david.ansari@gmx.de>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
Co-authored-by: Rin Kuryloski <kuryloskip@vmware.com>
2023-08-31 18:27:13 +08:00
|
|
|
publish_delivered1(Msg,
|
|
|
|
|
MsgProps = #message_properties {
|
|
|
|
|
needs_confirming = NeedsConfirming },
|
2023-10-04 18:11:54 +08:00
|
|
|
_ChPid, PersistFun,
|
2024-03-04 17:08:22 +08:00
|
|
|
State = #vqstate { qi_embed_msgs_below = IndexMaxSize,
|
2022-04-12 22:41:13 +08:00
|
|
|
next_seq_id = SeqId,
|
|
|
|
|
next_deliver_seq_id = NextDeliverSeqId,
|
|
|
|
|
in_counter = InCount,
|
|
|
|
|
out_counter = OutCount,
|
|
|
|
|
durable = IsDurable,
|
2022-06-16 17:21:47 +08:00
|
|
|
unconfirmed = UC,
|
|
|
|
|
unconfirmed_simple = UCS }) ->
|
Message Containers (#5077)
This PR implements an approach for a "protocol (data format) agnostic core" where the format of the message isn't converted at point of reception.
Currently all non AMQP 0.9.1 originating messages are converted into a AMQP 0.9.1 flavoured basic_message record before sent to a queue. If the messages are then consumed by the originating protocol they are converted back from AMQP 0.9.1. For some protocols such as MQTT 3.1 this isn't too expensive as MQTT is mostly a fairly easily mapped subset of AMQP 0.9.1 but for others such as AMQP 1.0 the conversions are awkward and in some cases lossy even if consuming from the originating protocol.
This PR instead wraps all incoming messages in their originating form into a generic, extensible message container type (mc). The container module exposes an API to get common message details such as size and various properties (ttl, priority etc) directly from the source data type. Each protocol needs to implement the mc behaviour such that when a message originating form one protocol is consumed by another protocol we convert it to the target protocol at that point.
The message container also contains annotations, dead letter records and other meta data we need to record during the lifetime of a message. The original protocol message is never modified unless it is consumed.
This includes conversion modules to and from amqp, amqpl (AMQP 0.9.1) and mqtt.
COMMIT HISTORY:
* Refactor away from using the delivery{} record
In many places including exchange types. This should make it
easier to move towards using a message container type instead of
basic_message.
Add mc module and move direct replies outside of exchange
Lots of changes incl classic queues
Implement stream support incl amqp conversions
simplify mc state record
move mc.erl
mc dlx stuff
recent history exchange
Make tracking work
But doesn't take a protocol agnostic approach as we just convert
everything into AMQP legacy and back. Might be good enough for now.
Tracing as a whole may want a bit of a re-vamp at some point.
tidy
make quorum queue peek work by legacy conversion
dead lettering fixes
dead lettering fixes
CMQ fixes
rabbit_trace type fixes
fixes
fix
Fix classic queue props
test assertion fix
feature flag and backwards compat
Enable message_container feature flag in some SUITEs
Dialyzer fixes
fixes
fix
test fixes
Various
Manually update a gazelle generated file
until a gazelle enhancement can be made
https://github.com/rabbitmq/rules_erlang/issues/185
Add message_containers_SUITE to bazel
and regen bazel files with gazelle from rules_erlang@main
Simplify essential proprty access
Such as durable, ttl and priority by extracting them into annotations
at message container init time.
Move type
to remove dependenc on amqp10 stuff in mc.erl
mostly because I don't know how to make bazel do the right thing
add more stuff
Refine routing header stuff
wip
Cosmetics
Do not use "maybe" as type name as "maybe" is a keyword since OTP 25
which makes Erlang LS complain.
* Dedup death queue names
* Fix function clause crashes
Fix failing tests in the MQTT shared_SUITE:
A classic queue message ID can be undefined as set in
https://github.com/rabbitmq/rabbitmq-server/blob/fbe79ff47b4edbc0fd95457e623d6593161ad198/deps/rabbit/src/rabbit_classic_queue_index_v2.erl#L1048
Fix failing tests in the MQTT shared_SUITE-mixed:
When feature flag message_containers is disabled, the
message is not an #mc{} record, but a #basic_message{} record.
* Fix is_utf8_no_null crash
Prior to this commit, the function crashed if invalid UTF-8 was
provided, e.g.:
```
1> rabbit_misc:is_valid_shortstr(<<"😇"/utf16>>).
** exception error: no function clause matching rabbit_misc:is_utf8_no_null(<<216,61,222,7>>) (rabbit_misc.erl, line 1481)
```
* Implement mqtt mc behaviour
For now via amqp translation.
This is still work in progress, but the following SUITEs pass:
```
make -C deps/rabbitmq_mqtt ct-shared t=[mqtt,v5,cluster_size_1] FULL=1
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_1] FULL=1
```
* Shorten mc file names
Module name length matters because for each persistent message the #mc{}
record is persisted to disk.
```
1> iolist_size(term_to_iovec({mc, rabbit_mc_amqp_legacy})).
30
2> iolist_size(term_to_iovec({mc, mc_amqpl})).
17
```
This commit renames the mc modules:
```
ag -l rabbit_mc_amqp_legacy | xargs sed -i 's/rabbit_mc_amqp_legacy/mc_amqpl/g'
ag -l rabbit_mc_amqp | xargs sed -i 's/rabbit_mc_amqp/mc_amqp/g'
ag -l rabbit_mqtt_mc | xargs sed -i 's/rabbit_mqtt_mc/mc_mqtt/g'
```
* mc: make deaths an annotation + fixes
* Fix mc_mqtt protocol_state callback
* Fix test will_delay_node_restart
```
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_3]:will_delay_node_restart FULL=1
```
* Bazel run gazelle
* mix format rabbitmqctl.ex
* Ensure ttl annotation is refelected in amqp legacy protocol state
* Fix id access in message store
* Fix rabbit_message_interceptor_SUITE
* dializer fixes
* Fix rabbit:rabbit_message_interceptor_SUITE-mixed
set_annotation/3 should not result in duplicate keys
* Fix MQTT shared_SUITE-mixed
Up to 3.12 non-MQTT publishes were always QoS 1 regardless of delivery_mode.
https://github.com/rabbitmq/rabbitmq-server/blob/75a953ce286a10aca910c098805a4f545989af38/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl#L2075-L2076
From now on, non-MQTT publishes are QoS 1 if durable.
This makes more sense.
The MQTT plugin must send a #basic_message{} to an old node that does
not understand message containers.
* Field content of 'v1_0.data' can be binary
Fix
```
bazel test //deps/rabbitmq_mqtt:shared_SUITE-mixed \
--test_env FOCUS="-group [mqtt,v4,cluster_size_1] -case trace" \
-t- --test_sharding_strategy=disabled
```
* Remove route/2 and implement route/3 for all exchange types.
This removes the route/2 callback from rabbit_exchange_type and
makes route/3 mandatory instead. This is a breaking change and
will require all implementations of exchange types to update their
code, however this is necessary anyway for them to correctly handle
the mc type.
stream filtering fixes
* Translate directly from MQTT to AMQP 0.9.1
* handle undecoded properties in mc_compat
amqpl: put clause in right order
recover death deatails from amqp data
* Replace callback init_amqp with convert_from
* Fix return value of lists:keyfind/3
* Translate directly from AMQP 0.9.1 to MQTT
* Fix MQTT payload size
MQTT payload can be a list when converted from AMQP 0.9.1 for example
First conversions tests
Plus some other conversion related fixes.
bazel
bazel
translate amqp 1.0 null to undefined
mc: property/2 and correlation_id/message_id return type tagged values.
To ensure we can support a variety of types better.
The type type tags are AMQP 1.0 flavoured.
fix death recovery
mc_mqtt: impl new api
Add callbacks to allow protocols to compact data before storage
And make readable if needing to query things repeatedly.
bazel fix
* more decoding
* tracking mixed versions compat
* mc: flip default of `durable` annotation to save some data.
Assuming most messages are durable and that in memory messages suffer less
from persistence overhead it makes sense for a non existent `durable`
annotation to mean durable=true.
* mc conversion tests and tidy up
* mc make x_header unstrict again
* amqpl: death record fixes
* bazel
* amqp -> amqpl conversion test
* Fix crash in mc_amqp:size/1
Body can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0/ ct-system t=java
```
on branch native-amqp.
* Fix crash in lists:flatten/1
Data can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp.
* Fix crash in rabbit_writer
Running test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp resulted in the following crash:
```
crasher:
initial call: rabbit_writer:enter_mainloop/2
pid: <0.711.0>
registered_name: []
exception error: bad argument
in function size/1
called as size([<<0>>,<<"Sw">>,[<<160,2>>,<<"hi">>]])
*** argument 1: not tuple or binary
in call from rabbit_binary_generator:build_content_frames/7 (rabbit_binary_generator.erl, line 89)
in call from rabbit_binary_generator:build_simple_content_frames/4 (rabbit_binary_generator.erl, line 61)
in call from rabbit_writer:assemble_frames/5 (rabbit_writer.erl, line 334)
in call from rabbit_writer:internal_send_command_async/3 (rabbit_writer.erl, line 365)
in call from rabbit_writer:handle_message/2 (rabbit_writer.erl, line 265)
in call from rabbit_writer:handle_message/3 (rabbit_writer.erl, line 232)
in call from rabbit_writer:mainloop1/2 (rabbit_writer.erl, line 223)
```
because #content.payload_fragments_rev is currently supposed to
be a flat list of binaries instead of being an iolist.
This commit fixes this crash inefficiently by calling
iolist_to_binary/1. A better solution would be to allow AMQP legacy's #content.payload_fragments_rev
to be an iolist.
* Add accidentally deleted line back
* mc: optimise mc_amqp internal format
By removint the outer records for message and delivery annotations
as well as application properties and footers.
* mc: optimis mc_amqp map_add by using upsert
* mc: refactoring and bug fixes
* mc_SUITE routingheader assertions
* mc remove serialize/1 callback as only used by amqp
* mc_amqp: avoid returning a nested list from protocol_state
* test and bug fix
* move infer_type to mc_util
* mc fixes and additiona assertions
* Support headers exchange routing for MQTT messages
When a headers exchange is bound to the MQTT topic exchange, routing
will be performend based on both MQTT topic (by the topic exchange) and
MQTT User Property (by the headers exchange).
This combines the best worlds of both MQTT 5.0 and AMQP 0.9.1 and
enables powerful routing topologies.
When the User Property contains the same name multiple times, only the
last name (and value) will be considered by the headers exchange.
* Fix crash when sending from stream to amqpl
When publishing a message via the stream protocol and consuming it via
AMQP 0.9.1, the following crash occurred prior to this commit:
```
crasher:
initial call: rabbit_channel:init/1
pid: <0.818.0>
registered_name: []
exception exit: {{badmatch,undefined},
[{rabbit_channel,handle_deliver0,4,
[{file,"rabbit_channel.erl"},
{line,2728}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1594}]},
{rabbit_channel,handle_cast,2,
[{file,"rabbit_channel.erl"},
{line,728}]},
{gen_server2,handle_msg,2,
[{file,"gen_server2.erl"},{line,1056}]},
{proc_lib,wake_up,3,
[{file,"proc_lib.erl"},{line,251}]}]}
```
This commit first gives `mc:init/3` the chance to set exchange and
routing_keys annotations.
If not set, `rabbit_stream_queue` will set these annotations assuming
the message was originally published via the stream protocol.
* Support consistent hash exchange routing for MQTT 5.0
When a consistent hash exchange is bound to the MQTT topic exchange,
MQTT 5.0 messages can be routed to queues consistently based on the
Correlation-Data in the PUBLISH packet.
* Convert MQTT 5.0 User Property
* to AMQP 0.9.1 headers
* from AMQP 0.9.1 headers
* to AMQP 1.0 application properties and message annotations
* from AMQP 1.0 application properties and message annotations
* Make use of Annotations in mc_mqtt:protocol_state/2
mc_mqtt:protocol_state/2 includes Annotations as parameter.
It's cleaner to make use of these Annotations when computing the
protocol state instead of relying on the caller (rabbitmq_mqtt_processor)
to compute the protocol state.
* Enforce AMQP 0.9.1 field name length limit
The AMQP 0.9.1 spec prohibits field names longer than 128 characters.
Therefore, when converting AMQP 1.0 message annotations, application
properties or MQTT 5.0 User Property to AMQP 0.9.1 headers, drop any
names longer than 128 characters.
* Fix type specs
Apply feedback from Michael Davis
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* Add mc_mqtt unit test suite
Implement mc_mqtt:x_header/2
* Translate indicator that payload is UTF-8 encoded
when converting between MQTT 5.0 and AMQP 1.0
* Translate single amqp-value section from AMQP 1.0 to MQTT
Convert to a text representation, if possible, and indicate to MQTT
client that the payload is UTF-8 encoded. This way, the MQTT client will
be able to parse the payload.
If conversion to text representation is not possible, encode the payload
using the AMQP 1.0 type system and indiate the encoding via Content-Type
message/vnd.rabbitmq.amqp.
This Content-Type is not registered.
Type "message" makes sense since it's a message.
Vendor tree "vnd.rabbitmq.amqp" makes sense since merely subtype "amqp" is not
registered.
* Fix payload conversion
* Translate Response Topic between MQTT and AMQP
Translate MQTT 5.0 Response Topic to AMQP 1.0 reply-to address and vice
versa.
The Response Topic must be a UTF-8 encoded string.
This commit re-uses the already defined RabbitMQ target addresses:
```
"/topic/" RK Publish to amq.topic with routing key RK
"/exchange/" X "/" RK Publish to exchange X with routing key RK
```
By default, the MQTT topic exchange is configure dto be amq.topic using
the 1st target address.
When an operator modifies the mqtt.exchange, the 2nd target address is
used.
* Apply PR feedback
and fix formatting
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* tidy up
* Add MQTT message_containers test
* consistent hash exchange: avoid amqp legacy conversion
When hashing on a header value.
* Avoid converting to amqp legacy when using exchange federation
* Fix test flake
* test and dialyzer fixes
* dialyzer fix
* Add MQTT protocol interoperability tests
Test receiving from and sending to MQTT 5.0 and
* AMQP 0.9.1
* AMQP 1.0
* STOMP
* Streams
* Regenerate portions of deps/rabbit/app.bzl with gazelle
I'm not exactly sure how this happened, but gazell seems to have been
run with an older version of the rules_erlang gazelle extension at
some point. This caused generation of a structure that is no longer
used. This commit updates the structure to the current pattern.
* mc: refactoring
* mc_amqpl: handle delivery annotations
Just in case they are included.
Also use iolist_to_iovec to create flat list of binaries when
converting from amqp with amqp encoded payload.
---------
Co-authored-by: David Ansari <david.ansari@gmx.de>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
Co-authored-by: Rin Kuryloski <kuryloskip@vmware.com>
2023-08-31 18:27:13 +08:00
|
|
|
MsgId = mc:get_annotation(id, Msg),
|
|
|
|
|
IsPersistent = mc:is_persistent(Msg),
|
2022-04-12 22:41:13 +08:00
|
|
|
IsPersistent1 = IsDurable andalso IsPersistent,
|
2024-03-04 17:08:22 +08:00
|
|
|
MsgStatus = msg_status(IsPersistent1, true, SeqId, Msg, MsgProps, IndexMaxSize),
|
2022-05-10 20:49:31 +08:00
|
|
|
{MsgStatus1, State1} = PersistFun(false, false, MsgStatus, State),
|
2022-04-12 22:41:13 +08:00
|
|
|
State2 = record_pending_ack(m(MsgStatus1), State1),
|
2022-06-16 17:21:47 +08:00
|
|
|
{UC1, UCS1} = maybe_needs_confirming(NeedsConfirming, persist_to(MsgStatus),
|
2024-03-04 17:08:22 +08:00
|
|
|
MsgId, UC, UCS),
|
2022-04-12 22:41:13 +08:00
|
|
|
{SeqId,
|
2022-06-03 17:53:45 +08:00
|
|
|
stats_published_pending_acks(MsgStatus1,
|
2022-04-12 22:41:13 +08:00
|
|
|
State2#vqstate{ next_seq_id = SeqId + 1,
|
|
|
|
|
next_deliver_seq_id = next_deliver_seq_id(SeqId, NextDeliverSeqId),
|
|
|
|
|
out_counter = OutCount + 1,
|
|
|
|
|
in_counter = InCount + 1,
|
2022-06-16 17:21:47 +08:00
|
|
|
unconfirmed = UC1,
|
|
|
|
|
unconfirmed_simple = UCS1 })}.
|
|
|
|
|
|
2024-03-04 17:08:22 +08:00
|
|
|
maybe_needs_confirming(false, _, _, UC, UCS) ->
|
2022-06-16 17:21:47 +08:00
|
|
|
{UC, UCS};
|
|
|
|
|
%% When storing to the v2 queue store we take the simple confirms
|
|
|
|
|
%% path because we don't need to track index and store separately.
|
2024-03-04 17:08:22 +08:00
|
|
|
maybe_needs_confirming(true, queue_store, MsgId, UC, UCS) ->
|
2022-06-16 17:21:47 +08:00
|
|
|
{UC, sets:add_element(MsgId, UCS)};
|
|
|
|
|
%% Otherwise we keep tracking as it used to be.
|
2024-03-04 17:08:22 +08:00
|
|
|
maybe_needs_confirming(true, _, MsgId, UC, UCS) ->
|
2022-06-16 17:21:47 +08:00
|
|
|
{sets:add_element(MsgId, UC), UCS}.
|
2015-09-29 06:41:51 +08:00
|
|
|
|
2010-05-22 01:01:41 +08:00
|
|
|
maybe_write_msg_to_disk(Force, MsgStatus = #msg_status {
|
2021-07-06 20:38:47 +08:00
|
|
|
seq_id = SeqId,
|
2011-03-05 02:30:25 +08:00
|
|
|
msg = Msg, msg_id = MsgId,
|
2021-07-06 20:38:47 +08:00
|
|
|
is_persistent = IsPersistent,
|
|
|
|
|
msg_location = ?IN_MEMORY,
|
2021-09-20 18:14:55 +08:00
|
|
|
msg_props = Props },
|
2021-07-06 20:38:47 +08:00
|
|
|
State = #vqstate{ store_state = StoreState0,
|
|
|
|
|
msg_store_clients = MSCState,
|
2015-02-03 00:35:05 +08:00
|
|
|
disk_write_count = Count})
|
2010-01-13 02:33:37 +08:00
|
|
|
when Force orelse IsPersistent ->
|
2015-01-07 02:30:24 +08:00
|
|
|
case persist_to(MsgStatus) of
|
2023-03-10 16:48:43 +08:00
|
|
|
msg_store -> ok = msg_store_write(MSCState, IsPersistent, SeqId, MsgId,
|
2014-12-05 22:44:31 +08:00
|
|
|
prepare_to_store(Msg)),
|
2021-07-06 20:38:47 +08:00
|
|
|
{MsgStatus#msg_status{msg_location = ?IN_SHARED_STORE},
|
2015-02-03 00:35:05 +08:00
|
|
|
State#vqstate{disk_write_count = Count + 1}};
|
2021-11-05 23:42:18 +08:00
|
|
|
queue_store -> {MsgLocation, StoreState} = rabbit_classic_queue_store_v2:write(SeqId, prepare_to_store(Msg), Props, StoreState0),
|
2021-07-06 20:38:47 +08:00
|
|
|
{MsgStatus#msg_status{ msg_location = MsgLocation },
|
|
|
|
|
State#vqstate{ store_state = StoreState,
|
|
|
|
|
disk_write_count = Count + 1}};
|
2021-10-21 19:17:18 +08:00
|
|
|
queue_index -> {MsgStatus, State}
|
2014-12-05 22:44:31 +08:00
|
|
|
end;
|
2015-02-03 00:35:05 +08:00
|
|
|
maybe_write_msg_to_disk(_Force, MsgStatus, State) ->
|
|
|
|
|
{MsgStatus, State}.
|
2010-01-13 02:33:37 +08:00
|
|
|
|
2019-02-13 01:18:14 +08:00
|
|
|
%% Due to certain optimisations made inside
|
2015-08-28 23:03:45 +08:00
|
|
|
%% rabbit_queue_index:pre_publish/7 we need to have two separate
|
2015-08-27 06:39:33 +08:00
|
|
|
%% functions for index persistence. This one is only used when paging
|
2015-08-28 01:53:16 +08:00
|
|
|
%% during memory pressure. We didn't want to modify
|
|
|
|
|
%% maybe_write_index_to_disk/3 because that function is used in other
|
|
|
|
|
%% places.
|
2015-08-28 01:47:06 +08:00
|
|
|
maybe_batch_write_index_to_disk(_Force,
|
|
|
|
|
MsgStatus = #msg_status {
|
|
|
|
|
index_on_disk = true }, State) ->
|
2015-08-27 06:39:33 +08:00
|
|
|
{MsgStatus, State};
|
2015-08-28 01:47:06 +08:00
|
|
|
maybe_batch_write_index_to_disk(Force,
|
|
|
|
|
MsgStatus = #msg_status {
|
|
|
|
|
msg = Msg,
|
|
|
|
|
msg_id = MsgId,
|
|
|
|
|
seq_id = SeqId,
|
|
|
|
|
is_persistent = IsPersistent,
|
2021-07-06 20:38:47 +08:00
|
|
|
msg_location = MsgLocation,
|
2015-08-28 01:47:06 +08:00
|
|
|
msg_props = MsgProps},
|
|
|
|
|
State = #vqstate {
|
2015-08-28 23:03:45 +08:00
|
|
|
target_ram_count = TargetRamCount,
|
|
|
|
|
disk_write_count = DiskWriteCount,
|
|
|
|
|
index_state = IndexState})
|
2015-08-27 23:42:20 +08:00
|
|
|
when Force orelse IsPersistent ->
|
2015-08-27 06:39:33 +08:00
|
|
|
{MsgOrId, DiskWriteCount1} =
|
|
|
|
|
case persist_to(MsgStatus) of
|
|
|
|
|
msg_store -> {MsgId, DiskWriteCount};
|
2021-07-06 20:38:47 +08:00
|
|
|
queue_store -> {MsgId, DiskWriteCount};
|
2015-08-27 06:39:33 +08:00
|
|
|
queue_index -> {prepare_to_store(Msg), DiskWriteCount + 1}
|
|
|
|
|
end,
|
2024-03-04 17:08:22 +08:00
|
|
|
IndexState1 = rabbit_classic_queue_index_v2:pre_publish(
|
|
|
|
|
MsgOrId, SeqId, MsgLocation, MsgProps,
|
|
|
|
|
IsPersistent, TargetRamCount, IndexState),
|
2015-08-27 06:39:33 +08:00
|
|
|
{MsgStatus#msg_status{index_on_disk = true},
|
2021-11-03 21:37:56 +08:00
|
|
|
State#vqstate{index_state = IndexState1,
|
2015-08-27 23:42:20 +08:00
|
|
|
disk_write_count = DiskWriteCount1}};
|
2015-08-28 01:47:06 +08:00
|
|
|
maybe_batch_write_index_to_disk(_Force, MsgStatus, State) ->
|
2015-08-27 23:42:20 +08:00
|
|
|
{MsgStatus, State}.
|
2015-08-27 06:39:33 +08:00
|
|
|
|
2021-05-17 20:38:09 +08:00
|
|
|
maybe_write_index_to_disk(_Force, MsgStatus = #msg_status {
|
|
|
|
|
index_on_disk = true }, State) ->
|
|
|
|
|
{MsgStatus, State};
|
2021-05-18 19:19:31 +08:00
|
|
|
maybe_write_index_to_disk(Force, MsgStatus = #msg_status {
|
2014-12-03 23:52:49 +08:00
|
|
|
msg = Msg,
|
2011-03-05 02:30:25 +08:00
|
|
|
msg_id = MsgId,
|
2010-10-23 15:20:08 +08:00
|
|
|
seq_id = SeqId,
|
2010-01-13 02:33:37 +08:00
|
|
|
is_persistent = IsPersistent,
|
2021-07-06 20:38:47 +08:00
|
|
|
msg_location = MsgLocation,
|
2015-01-27 19:41:25 +08:00
|
|
|
msg_props = MsgProps},
|
|
|
|
|
State = #vqstate{target_ram_count = TargetRamCount,
|
2015-02-03 00:35:05 +08:00
|
|
|
disk_write_count = DiskWriteCount,
|
2015-01-27 19:41:25 +08:00
|
|
|
index_state = IndexState})
|
2021-05-18 19:19:31 +08:00
|
|
|
when Force orelse IsPersistent ->
|
2015-02-03 00:35:05 +08:00
|
|
|
{MsgOrId, DiskWriteCount1} =
|
|
|
|
|
case persist_to(MsgStatus) of
|
|
|
|
|
msg_store -> {MsgId, DiskWriteCount};
|
2021-07-06 20:38:47 +08:00
|
|
|
queue_store -> {MsgId, DiskWriteCount};
|
2015-02-03 00:35:05 +08:00
|
|
|
queue_index -> {prepare_to_store(Msg), DiskWriteCount + 1}
|
|
|
|
|
end,
|
2024-03-04 17:08:22 +08:00
|
|
|
IndexState2 = rabbit_classic_queue_index_v2:publish(
|
2022-06-16 17:21:47 +08:00
|
|
|
MsgOrId, SeqId, MsgLocation, MsgProps, IsPersistent,
|
|
|
|
|
persist_to(MsgStatus) =:= msg_store, TargetRamCount,
|
2015-01-27 19:41:25 +08:00
|
|
|
IndexState),
|
2014-12-05 22:44:31 +08:00
|
|
|
{MsgStatus#msg_status{index_on_disk = true},
|
2024-03-04 17:08:22 +08:00
|
|
|
State#vqstate{index_state = IndexState2,
|
2015-02-03 00:35:05 +08:00
|
|
|
disk_write_count = DiskWriteCount1}};
|
2015-01-27 19:41:25 +08:00
|
|
|
|
|
|
|
|
maybe_write_index_to_disk(_Force, MsgStatus, State) ->
|
|
|
|
|
{MsgStatus, State}.
|
2009-11-10 02:12:00 +08:00
|
|
|
|
2015-02-03 00:35:05 +08:00
|
|
|
maybe_write_to_disk(ForceMsg, ForceIndex, MsgStatus, State) ->
|
|
|
|
|
{MsgStatus1, State1} = maybe_write_msg_to_disk(ForceMsg, MsgStatus, State),
|
|
|
|
|
maybe_write_index_to_disk(ForceIndex, MsgStatus1, State1).
|
2010-06-03 21:07:28 +08:00
|
|
|
|
2024-03-04 17:08:22 +08:00
|
|
|
maybe_prepare_write_to_disk(ForceMsg, ForceIndex0, MsgStatus, State) ->
|
2015-08-28 00:27:04 +08:00
|
|
|
{MsgStatus1, State1} = maybe_write_msg_to_disk(ForceMsg, MsgStatus, State),
|
2021-12-02 21:26:04 +08:00
|
|
|
%% We want messages written to the v2 per-queue store to also
|
|
|
|
|
%% be written to the index for proper accounting. The situation
|
|
|
|
|
%% where a message can be in the store but not in the index can
|
|
|
|
|
%% only occur when going through this function (not via maybe_write_to_disk).
|
2024-03-04 17:08:22 +08:00
|
|
|
ForceIndex = case persist_to(MsgStatus) of
|
|
|
|
|
queue_store -> true;
|
2021-12-02 21:26:04 +08:00
|
|
|
_ -> ForceIndex0
|
|
|
|
|
end,
|
2015-08-28 01:47:06 +08:00
|
|
|
maybe_batch_write_index_to_disk(ForceIndex, MsgStatus1, State1).
|
2015-08-28 00:27:04 +08:00
|
|
|
|
2024-03-04 17:08:22 +08:00
|
|
|
determine_persist_to(Msg,
|
2021-09-16 21:34:04 +08:00
|
|
|
#message_properties{size = BodySize},
|
|
|
|
|
IndexMaxSize) ->
|
|
|
|
|
%% The >= is so that you can set the env to 0 and never persist
|
|
|
|
|
%% to the index.
|
|
|
|
|
%%
|
|
|
|
|
%% We want this to be fast, so we avoid size(term_to_binary())
|
|
|
|
|
%% here, or using the term size estimation from truncate.erl, both
|
|
|
|
|
%% of which are too slow. So instead, if the message body size
|
|
|
|
|
%% goes over the limit then we avoid any other checks.
|
|
|
|
|
%%
|
|
|
|
|
%% If it doesn't we need to decide if the properties will push
|
|
|
|
|
%% it past the limit. If we have the encoded properties (usual
|
|
|
|
|
%% case) we can just check their size. If we don't (message came
|
|
|
|
|
%% via the direct client), we make a guess based on the number of
|
|
|
|
|
%% headers.
|
Message Containers (#5077)
This PR implements an approach for a "protocol (data format) agnostic core" where the format of the message isn't converted at point of reception.
Currently all non AMQP 0.9.1 originating messages are converted into a AMQP 0.9.1 flavoured basic_message record before sent to a queue. If the messages are then consumed by the originating protocol they are converted back from AMQP 0.9.1. For some protocols such as MQTT 3.1 this isn't too expensive as MQTT is mostly a fairly easily mapped subset of AMQP 0.9.1 but for others such as AMQP 1.0 the conversions are awkward and in some cases lossy even if consuming from the originating protocol.
This PR instead wraps all incoming messages in their originating form into a generic, extensible message container type (mc). The container module exposes an API to get common message details such as size and various properties (ttl, priority etc) directly from the source data type. Each protocol needs to implement the mc behaviour such that when a message originating form one protocol is consumed by another protocol we convert it to the target protocol at that point.
The message container also contains annotations, dead letter records and other meta data we need to record during the lifetime of a message. The original protocol message is never modified unless it is consumed.
This includes conversion modules to and from amqp, amqpl (AMQP 0.9.1) and mqtt.
COMMIT HISTORY:
* Refactor away from using the delivery{} record
In many places including exchange types. This should make it
easier to move towards using a message container type instead of
basic_message.
Add mc module and move direct replies outside of exchange
Lots of changes incl classic queues
Implement stream support incl amqp conversions
simplify mc state record
move mc.erl
mc dlx stuff
recent history exchange
Make tracking work
But doesn't take a protocol agnostic approach as we just convert
everything into AMQP legacy and back. Might be good enough for now.
Tracing as a whole may want a bit of a re-vamp at some point.
tidy
make quorum queue peek work by legacy conversion
dead lettering fixes
dead lettering fixes
CMQ fixes
rabbit_trace type fixes
fixes
fix
Fix classic queue props
test assertion fix
feature flag and backwards compat
Enable message_container feature flag in some SUITEs
Dialyzer fixes
fixes
fix
test fixes
Various
Manually update a gazelle generated file
until a gazelle enhancement can be made
https://github.com/rabbitmq/rules_erlang/issues/185
Add message_containers_SUITE to bazel
and regen bazel files with gazelle from rules_erlang@main
Simplify essential proprty access
Such as durable, ttl and priority by extracting them into annotations
at message container init time.
Move type
to remove dependenc on amqp10 stuff in mc.erl
mostly because I don't know how to make bazel do the right thing
add more stuff
Refine routing header stuff
wip
Cosmetics
Do not use "maybe" as type name as "maybe" is a keyword since OTP 25
which makes Erlang LS complain.
* Dedup death queue names
* Fix function clause crashes
Fix failing tests in the MQTT shared_SUITE:
A classic queue message ID can be undefined as set in
https://github.com/rabbitmq/rabbitmq-server/blob/fbe79ff47b4edbc0fd95457e623d6593161ad198/deps/rabbit/src/rabbit_classic_queue_index_v2.erl#L1048
Fix failing tests in the MQTT shared_SUITE-mixed:
When feature flag message_containers is disabled, the
message is not an #mc{} record, but a #basic_message{} record.
* Fix is_utf8_no_null crash
Prior to this commit, the function crashed if invalid UTF-8 was
provided, e.g.:
```
1> rabbit_misc:is_valid_shortstr(<<"😇"/utf16>>).
** exception error: no function clause matching rabbit_misc:is_utf8_no_null(<<216,61,222,7>>) (rabbit_misc.erl, line 1481)
```
* Implement mqtt mc behaviour
For now via amqp translation.
This is still work in progress, but the following SUITEs pass:
```
make -C deps/rabbitmq_mqtt ct-shared t=[mqtt,v5,cluster_size_1] FULL=1
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_1] FULL=1
```
* Shorten mc file names
Module name length matters because for each persistent message the #mc{}
record is persisted to disk.
```
1> iolist_size(term_to_iovec({mc, rabbit_mc_amqp_legacy})).
30
2> iolist_size(term_to_iovec({mc, mc_amqpl})).
17
```
This commit renames the mc modules:
```
ag -l rabbit_mc_amqp_legacy | xargs sed -i 's/rabbit_mc_amqp_legacy/mc_amqpl/g'
ag -l rabbit_mc_amqp | xargs sed -i 's/rabbit_mc_amqp/mc_amqp/g'
ag -l rabbit_mqtt_mc | xargs sed -i 's/rabbit_mqtt_mc/mc_mqtt/g'
```
* mc: make deaths an annotation + fixes
* Fix mc_mqtt protocol_state callback
* Fix test will_delay_node_restart
```
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_3]:will_delay_node_restart FULL=1
```
* Bazel run gazelle
* mix format rabbitmqctl.ex
* Ensure ttl annotation is refelected in amqp legacy protocol state
* Fix id access in message store
* Fix rabbit_message_interceptor_SUITE
* dializer fixes
* Fix rabbit:rabbit_message_interceptor_SUITE-mixed
set_annotation/3 should not result in duplicate keys
* Fix MQTT shared_SUITE-mixed
Up to 3.12 non-MQTT publishes were always QoS 1 regardless of delivery_mode.
https://github.com/rabbitmq/rabbitmq-server/blob/75a953ce286a10aca910c098805a4f545989af38/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl#L2075-L2076
From now on, non-MQTT publishes are QoS 1 if durable.
This makes more sense.
The MQTT plugin must send a #basic_message{} to an old node that does
not understand message containers.
* Field content of 'v1_0.data' can be binary
Fix
```
bazel test //deps/rabbitmq_mqtt:shared_SUITE-mixed \
--test_env FOCUS="-group [mqtt,v4,cluster_size_1] -case trace" \
-t- --test_sharding_strategy=disabled
```
* Remove route/2 and implement route/3 for all exchange types.
This removes the route/2 callback from rabbit_exchange_type and
makes route/3 mandatory instead. This is a breaking change and
will require all implementations of exchange types to update their
code, however this is necessary anyway for them to correctly handle
the mc type.
stream filtering fixes
* Translate directly from MQTT to AMQP 0.9.1
* handle undecoded properties in mc_compat
amqpl: put clause in right order
recover death deatails from amqp data
* Replace callback init_amqp with convert_from
* Fix return value of lists:keyfind/3
* Translate directly from AMQP 0.9.1 to MQTT
* Fix MQTT payload size
MQTT payload can be a list when converted from AMQP 0.9.1 for example
First conversions tests
Plus some other conversion related fixes.
bazel
bazel
translate amqp 1.0 null to undefined
mc: property/2 and correlation_id/message_id return type tagged values.
To ensure we can support a variety of types better.
The type type tags are AMQP 1.0 flavoured.
fix death recovery
mc_mqtt: impl new api
Add callbacks to allow protocols to compact data before storage
And make readable if needing to query things repeatedly.
bazel fix
* more decoding
* tracking mixed versions compat
* mc: flip default of `durable` annotation to save some data.
Assuming most messages are durable and that in memory messages suffer less
from persistence overhead it makes sense for a non existent `durable`
annotation to mean durable=true.
* mc conversion tests and tidy up
* mc make x_header unstrict again
* amqpl: death record fixes
* bazel
* amqp -> amqpl conversion test
* Fix crash in mc_amqp:size/1
Body can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0/ ct-system t=java
```
on branch native-amqp.
* Fix crash in lists:flatten/1
Data can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp.
* Fix crash in rabbit_writer
Running test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp resulted in the following crash:
```
crasher:
initial call: rabbit_writer:enter_mainloop/2
pid: <0.711.0>
registered_name: []
exception error: bad argument
in function size/1
called as size([<<0>>,<<"Sw">>,[<<160,2>>,<<"hi">>]])
*** argument 1: not tuple or binary
in call from rabbit_binary_generator:build_content_frames/7 (rabbit_binary_generator.erl, line 89)
in call from rabbit_binary_generator:build_simple_content_frames/4 (rabbit_binary_generator.erl, line 61)
in call from rabbit_writer:assemble_frames/5 (rabbit_writer.erl, line 334)
in call from rabbit_writer:internal_send_command_async/3 (rabbit_writer.erl, line 365)
in call from rabbit_writer:handle_message/2 (rabbit_writer.erl, line 265)
in call from rabbit_writer:handle_message/3 (rabbit_writer.erl, line 232)
in call from rabbit_writer:mainloop1/2 (rabbit_writer.erl, line 223)
```
because #content.payload_fragments_rev is currently supposed to
be a flat list of binaries instead of being an iolist.
This commit fixes this crash inefficiently by calling
iolist_to_binary/1. A better solution would be to allow AMQP legacy's #content.payload_fragments_rev
to be an iolist.
* Add accidentally deleted line back
* mc: optimise mc_amqp internal format
By removint the outer records for message and delivery annotations
as well as application properties and footers.
* mc: optimis mc_amqp map_add by using upsert
* mc: refactoring and bug fixes
* mc_SUITE routingheader assertions
* mc remove serialize/1 callback as only used by amqp
* mc_amqp: avoid returning a nested list from protocol_state
* test and bug fix
* move infer_type to mc_util
* mc fixes and additiona assertions
* Support headers exchange routing for MQTT messages
When a headers exchange is bound to the MQTT topic exchange, routing
will be performend based on both MQTT topic (by the topic exchange) and
MQTT User Property (by the headers exchange).
This combines the best worlds of both MQTT 5.0 and AMQP 0.9.1 and
enables powerful routing topologies.
When the User Property contains the same name multiple times, only the
last name (and value) will be considered by the headers exchange.
* Fix crash when sending from stream to amqpl
When publishing a message via the stream protocol and consuming it via
AMQP 0.9.1, the following crash occurred prior to this commit:
```
crasher:
initial call: rabbit_channel:init/1
pid: <0.818.0>
registered_name: []
exception exit: {{badmatch,undefined},
[{rabbit_channel,handle_deliver0,4,
[{file,"rabbit_channel.erl"},
{line,2728}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1594}]},
{rabbit_channel,handle_cast,2,
[{file,"rabbit_channel.erl"},
{line,728}]},
{gen_server2,handle_msg,2,
[{file,"gen_server2.erl"},{line,1056}]},
{proc_lib,wake_up,3,
[{file,"proc_lib.erl"},{line,251}]}]}
```
This commit first gives `mc:init/3` the chance to set exchange and
routing_keys annotations.
If not set, `rabbit_stream_queue` will set these annotations assuming
the message was originally published via the stream protocol.
* Support consistent hash exchange routing for MQTT 5.0
When a consistent hash exchange is bound to the MQTT topic exchange,
MQTT 5.0 messages can be routed to queues consistently based on the
Correlation-Data in the PUBLISH packet.
* Convert MQTT 5.0 User Property
* to AMQP 0.9.1 headers
* from AMQP 0.9.1 headers
* to AMQP 1.0 application properties and message annotations
* from AMQP 1.0 application properties and message annotations
* Make use of Annotations in mc_mqtt:protocol_state/2
mc_mqtt:protocol_state/2 includes Annotations as parameter.
It's cleaner to make use of these Annotations when computing the
protocol state instead of relying on the caller (rabbitmq_mqtt_processor)
to compute the protocol state.
* Enforce AMQP 0.9.1 field name length limit
The AMQP 0.9.1 spec prohibits field names longer than 128 characters.
Therefore, when converting AMQP 1.0 message annotations, application
properties or MQTT 5.0 User Property to AMQP 0.9.1 headers, drop any
names longer than 128 characters.
* Fix type specs
Apply feedback from Michael Davis
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* Add mc_mqtt unit test suite
Implement mc_mqtt:x_header/2
* Translate indicator that payload is UTF-8 encoded
when converting between MQTT 5.0 and AMQP 1.0
* Translate single amqp-value section from AMQP 1.0 to MQTT
Convert to a text representation, if possible, and indicate to MQTT
client that the payload is UTF-8 encoded. This way, the MQTT client will
be able to parse the payload.
If conversion to text representation is not possible, encode the payload
using the AMQP 1.0 type system and indiate the encoding via Content-Type
message/vnd.rabbitmq.amqp.
This Content-Type is not registered.
Type "message" makes sense since it's a message.
Vendor tree "vnd.rabbitmq.amqp" makes sense since merely subtype "amqp" is not
registered.
* Fix payload conversion
* Translate Response Topic between MQTT and AMQP
Translate MQTT 5.0 Response Topic to AMQP 1.0 reply-to address and vice
versa.
The Response Topic must be a UTF-8 encoded string.
This commit re-uses the already defined RabbitMQ target addresses:
```
"/topic/" RK Publish to amq.topic with routing key RK
"/exchange/" X "/" RK Publish to exchange X with routing key RK
```
By default, the MQTT topic exchange is configure dto be amq.topic using
the 1st target address.
When an operator modifies the mqtt.exchange, the 2nd target address is
used.
* Apply PR feedback
and fix formatting
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* tidy up
* Add MQTT message_containers test
* consistent hash exchange: avoid amqp legacy conversion
When hashing on a header value.
* Avoid converting to amqp legacy when using exchange federation
* Fix test flake
* test and dialyzer fixes
* dialyzer fix
* Add MQTT protocol interoperability tests
Test receiving from and sending to MQTT 5.0 and
* AMQP 0.9.1
* AMQP 1.0
* STOMP
* Streams
* Regenerate portions of deps/rabbit/app.bzl with gazelle
I'm not exactly sure how this happened, but gazell seems to have been
run with an older version of the rules_erlang gazelle extension at
some point. This caused generation of a structure that is no longer
used. This commit updates the structure to the current pattern.
* mc: refactoring
* mc_amqpl: handle delivery annotations
Just in case they are included.
Also use iolist_to_iovec to create flat list of binaries when
converting from amqp with amqp encoded payload.
---------
Co-authored-by: David Ansari <david.ansari@gmx.de>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
Co-authored-by: Rin Kuryloski <kuryloskip@vmware.com>
2023-08-31 18:27:13 +08:00
|
|
|
|
|
|
|
|
{MetaSize, _BodySize} = mc:size(Msg),
|
|
|
|
|
case BodySize >= IndexMaxSize of
|
|
|
|
|
true -> msg_store;
|
|
|
|
|
false ->
|
|
|
|
|
Est = MetaSize + BodySize,
|
|
|
|
|
case Est >= IndexMaxSize of
|
2024-03-04 17:08:22 +08:00
|
|
|
true -> msg_store;
|
|
|
|
|
false -> queue_store
|
Message Containers (#5077)
This PR implements an approach for a "protocol (data format) agnostic core" where the format of the message isn't converted at point of reception.
Currently all non AMQP 0.9.1 originating messages are converted into a AMQP 0.9.1 flavoured basic_message record before sent to a queue. If the messages are then consumed by the originating protocol they are converted back from AMQP 0.9.1. For some protocols such as MQTT 3.1 this isn't too expensive as MQTT is mostly a fairly easily mapped subset of AMQP 0.9.1 but for others such as AMQP 1.0 the conversions are awkward and in some cases lossy even if consuming from the originating protocol.
This PR instead wraps all incoming messages in their originating form into a generic, extensible message container type (mc). The container module exposes an API to get common message details such as size and various properties (ttl, priority etc) directly from the source data type. Each protocol needs to implement the mc behaviour such that when a message originating form one protocol is consumed by another protocol we convert it to the target protocol at that point.
The message container also contains annotations, dead letter records and other meta data we need to record during the lifetime of a message. The original protocol message is never modified unless it is consumed.
This includes conversion modules to and from amqp, amqpl (AMQP 0.9.1) and mqtt.
COMMIT HISTORY:
* Refactor away from using the delivery{} record
In many places including exchange types. This should make it
easier to move towards using a message container type instead of
basic_message.
Add mc module and move direct replies outside of exchange
Lots of changes incl classic queues
Implement stream support incl amqp conversions
simplify mc state record
move mc.erl
mc dlx stuff
recent history exchange
Make tracking work
But doesn't take a protocol agnostic approach as we just convert
everything into AMQP legacy and back. Might be good enough for now.
Tracing as a whole may want a bit of a re-vamp at some point.
tidy
make quorum queue peek work by legacy conversion
dead lettering fixes
dead lettering fixes
CMQ fixes
rabbit_trace type fixes
fixes
fix
Fix classic queue props
test assertion fix
feature flag and backwards compat
Enable message_container feature flag in some SUITEs
Dialyzer fixes
fixes
fix
test fixes
Various
Manually update a gazelle generated file
until a gazelle enhancement can be made
https://github.com/rabbitmq/rules_erlang/issues/185
Add message_containers_SUITE to bazel
and regen bazel files with gazelle from rules_erlang@main
Simplify essential proprty access
Such as durable, ttl and priority by extracting them into annotations
at message container init time.
Move type
to remove dependenc on amqp10 stuff in mc.erl
mostly because I don't know how to make bazel do the right thing
add more stuff
Refine routing header stuff
wip
Cosmetics
Do not use "maybe" as type name as "maybe" is a keyword since OTP 25
which makes Erlang LS complain.
* Dedup death queue names
* Fix function clause crashes
Fix failing tests in the MQTT shared_SUITE:
A classic queue message ID can be undefined as set in
https://github.com/rabbitmq/rabbitmq-server/blob/fbe79ff47b4edbc0fd95457e623d6593161ad198/deps/rabbit/src/rabbit_classic_queue_index_v2.erl#L1048
Fix failing tests in the MQTT shared_SUITE-mixed:
When feature flag message_containers is disabled, the
message is not an #mc{} record, but a #basic_message{} record.
* Fix is_utf8_no_null crash
Prior to this commit, the function crashed if invalid UTF-8 was
provided, e.g.:
```
1> rabbit_misc:is_valid_shortstr(<<"😇"/utf16>>).
** exception error: no function clause matching rabbit_misc:is_utf8_no_null(<<216,61,222,7>>) (rabbit_misc.erl, line 1481)
```
* Implement mqtt mc behaviour
For now via amqp translation.
This is still work in progress, but the following SUITEs pass:
```
make -C deps/rabbitmq_mqtt ct-shared t=[mqtt,v5,cluster_size_1] FULL=1
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_1] FULL=1
```
* Shorten mc file names
Module name length matters because for each persistent message the #mc{}
record is persisted to disk.
```
1> iolist_size(term_to_iovec({mc, rabbit_mc_amqp_legacy})).
30
2> iolist_size(term_to_iovec({mc, mc_amqpl})).
17
```
This commit renames the mc modules:
```
ag -l rabbit_mc_amqp_legacy | xargs sed -i 's/rabbit_mc_amqp_legacy/mc_amqpl/g'
ag -l rabbit_mc_amqp | xargs sed -i 's/rabbit_mc_amqp/mc_amqp/g'
ag -l rabbit_mqtt_mc | xargs sed -i 's/rabbit_mqtt_mc/mc_mqtt/g'
```
* mc: make deaths an annotation + fixes
* Fix mc_mqtt protocol_state callback
* Fix test will_delay_node_restart
```
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_3]:will_delay_node_restart FULL=1
```
* Bazel run gazelle
* mix format rabbitmqctl.ex
* Ensure ttl annotation is refelected in amqp legacy protocol state
* Fix id access in message store
* Fix rabbit_message_interceptor_SUITE
* dializer fixes
* Fix rabbit:rabbit_message_interceptor_SUITE-mixed
set_annotation/3 should not result in duplicate keys
* Fix MQTT shared_SUITE-mixed
Up to 3.12 non-MQTT publishes were always QoS 1 regardless of delivery_mode.
https://github.com/rabbitmq/rabbitmq-server/blob/75a953ce286a10aca910c098805a4f545989af38/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl#L2075-L2076
From now on, non-MQTT publishes are QoS 1 if durable.
This makes more sense.
The MQTT plugin must send a #basic_message{} to an old node that does
not understand message containers.
* Field content of 'v1_0.data' can be binary
Fix
```
bazel test //deps/rabbitmq_mqtt:shared_SUITE-mixed \
--test_env FOCUS="-group [mqtt,v4,cluster_size_1] -case trace" \
-t- --test_sharding_strategy=disabled
```
* Remove route/2 and implement route/3 for all exchange types.
This removes the route/2 callback from rabbit_exchange_type and
makes route/3 mandatory instead. This is a breaking change and
will require all implementations of exchange types to update their
code, however this is necessary anyway for them to correctly handle
the mc type.
stream filtering fixes
* Translate directly from MQTT to AMQP 0.9.1
* handle undecoded properties in mc_compat
amqpl: put clause in right order
recover death deatails from amqp data
* Replace callback init_amqp with convert_from
* Fix return value of lists:keyfind/3
* Translate directly from AMQP 0.9.1 to MQTT
* Fix MQTT payload size
MQTT payload can be a list when converted from AMQP 0.9.1 for example
First conversions tests
Plus some other conversion related fixes.
bazel
bazel
translate amqp 1.0 null to undefined
mc: property/2 and correlation_id/message_id return type tagged values.
To ensure we can support a variety of types better.
The type type tags are AMQP 1.0 flavoured.
fix death recovery
mc_mqtt: impl new api
Add callbacks to allow protocols to compact data before storage
And make readable if needing to query things repeatedly.
bazel fix
* more decoding
* tracking mixed versions compat
* mc: flip default of `durable` annotation to save some data.
Assuming most messages are durable and that in memory messages suffer less
from persistence overhead it makes sense for a non existent `durable`
annotation to mean durable=true.
* mc conversion tests and tidy up
* mc make x_header unstrict again
* amqpl: death record fixes
* bazel
* amqp -> amqpl conversion test
* Fix crash in mc_amqp:size/1
Body can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0/ ct-system t=java
```
on branch native-amqp.
* Fix crash in lists:flatten/1
Data can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp.
* Fix crash in rabbit_writer
Running test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp resulted in the following crash:
```
crasher:
initial call: rabbit_writer:enter_mainloop/2
pid: <0.711.0>
registered_name: []
exception error: bad argument
in function size/1
called as size([<<0>>,<<"Sw">>,[<<160,2>>,<<"hi">>]])
*** argument 1: not tuple or binary
in call from rabbit_binary_generator:build_content_frames/7 (rabbit_binary_generator.erl, line 89)
in call from rabbit_binary_generator:build_simple_content_frames/4 (rabbit_binary_generator.erl, line 61)
in call from rabbit_writer:assemble_frames/5 (rabbit_writer.erl, line 334)
in call from rabbit_writer:internal_send_command_async/3 (rabbit_writer.erl, line 365)
in call from rabbit_writer:handle_message/2 (rabbit_writer.erl, line 265)
in call from rabbit_writer:handle_message/3 (rabbit_writer.erl, line 232)
in call from rabbit_writer:mainloop1/2 (rabbit_writer.erl, line 223)
```
because #content.payload_fragments_rev is currently supposed to
be a flat list of binaries instead of being an iolist.
This commit fixes this crash inefficiently by calling
iolist_to_binary/1. A better solution would be to allow AMQP legacy's #content.payload_fragments_rev
to be an iolist.
* Add accidentally deleted line back
* mc: optimise mc_amqp internal format
By removint the outer records for message and delivery annotations
as well as application properties and footers.
* mc: optimis mc_amqp map_add by using upsert
* mc: refactoring and bug fixes
* mc_SUITE routingheader assertions
* mc remove serialize/1 callback as only used by amqp
* mc_amqp: avoid returning a nested list from protocol_state
* test and bug fix
* move infer_type to mc_util
* mc fixes and additiona assertions
* Support headers exchange routing for MQTT messages
When a headers exchange is bound to the MQTT topic exchange, routing
will be performend based on both MQTT topic (by the topic exchange) and
MQTT User Property (by the headers exchange).
This combines the best worlds of both MQTT 5.0 and AMQP 0.9.1 and
enables powerful routing topologies.
When the User Property contains the same name multiple times, only the
last name (and value) will be considered by the headers exchange.
* Fix crash when sending from stream to amqpl
When publishing a message via the stream protocol and consuming it via
AMQP 0.9.1, the following crash occurred prior to this commit:
```
crasher:
initial call: rabbit_channel:init/1
pid: <0.818.0>
registered_name: []
exception exit: {{badmatch,undefined},
[{rabbit_channel,handle_deliver0,4,
[{file,"rabbit_channel.erl"},
{line,2728}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1594}]},
{rabbit_channel,handle_cast,2,
[{file,"rabbit_channel.erl"},
{line,728}]},
{gen_server2,handle_msg,2,
[{file,"gen_server2.erl"},{line,1056}]},
{proc_lib,wake_up,3,
[{file,"proc_lib.erl"},{line,251}]}]}
```
This commit first gives `mc:init/3` the chance to set exchange and
routing_keys annotations.
If not set, `rabbit_stream_queue` will set these annotations assuming
the message was originally published via the stream protocol.
* Support consistent hash exchange routing for MQTT 5.0
When a consistent hash exchange is bound to the MQTT topic exchange,
MQTT 5.0 messages can be routed to queues consistently based on the
Correlation-Data in the PUBLISH packet.
* Convert MQTT 5.0 User Property
* to AMQP 0.9.1 headers
* from AMQP 0.9.1 headers
* to AMQP 1.0 application properties and message annotations
* from AMQP 1.0 application properties and message annotations
* Make use of Annotations in mc_mqtt:protocol_state/2
mc_mqtt:protocol_state/2 includes Annotations as parameter.
It's cleaner to make use of these Annotations when computing the
protocol state instead of relying on the caller (rabbitmq_mqtt_processor)
to compute the protocol state.
* Enforce AMQP 0.9.1 field name length limit
The AMQP 0.9.1 spec prohibits field names longer than 128 characters.
Therefore, when converting AMQP 1.0 message annotations, application
properties or MQTT 5.0 User Property to AMQP 0.9.1 headers, drop any
names longer than 128 characters.
* Fix type specs
Apply feedback from Michael Davis
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* Add mc_mqtt unit test suite
Implement mc_mqtt:x_header/2
* Translate indicator that payload is UTF-8 encoded
when converting between MQTT 5.0 and AMQP 1.0
* Translate single amqp-value section from AMQP 1.0 to MQTT
Convert to a text representation, if possible, and indicate to MQTT
client that the payload is UTF-8 encoded. This way, the MQTT client will
be able to parse the payload.
If conversion to text representation is not possible, encode the payload
using the AMQP 1.0 type system and indiate the encoding via Content-Type
message/vnd.rabbitmq.amqp.
This Content-Type is not registered.
Type "message" makes sense since it's a message.
Vendor tree "vnd.rabbitmq.amqp" makes sense since merely subtype "amqp" is not
registered.
* Fix payload conversion
* Translate Response Topic between MQTT and AMQP
Translate MQTT 5.0 Response Topic to AMQP 1.0 reply-to address and vice
versa.
The Response Topic must be a UTF-8 encoded string.
This commit re-uses the already defined RabbitMQ target addresses:
```
"/topic/" RK Publish to amq.topic with routing key RK
"/exchange/" X "/" RK Publish to exchange X with routing key RK
```
By default, the MQTT topic exchange is configure dto be amq.topic using
the 1st target address.
When an operator modifies the mqtt.exchange, the 2nd target address is
used.
* Apply PR feedback
and fix formatting
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* tidy up
* Add MQTT message_containers test
* consistent hash exchange: avoid amqp legacy conversion
When hashing on a header value.
* Avoid converting to amqp legacy when using exchange federation
* Fix test flake
* test and dialyzer fixes
* dialyzer fix
* Add MQTT protocol interoperability tests
Test receiving from and sending to MQTT 5.0 and
* AMQP 0.9.1
* AMQP 1.0
* STOMP
* Streams
* Regenerate portions of deps/rabbit/app.bzl with gazelle
I'm not exactly sure how this happened, but gazell seems to have been
run with an older version of the rules_erlang gazelle extension at
some point. This caused generation of a structure that is no longer
used. This commit updates the structure to the current pattern.
* mc: refactoring
* mc_amqpl: handle delivery annotations
Just in case they are included.
Also use iolist_to_iovec to create flat list of binaries when
converting from amqp with amqp encoded payload.
---------
Co-authored-by: David Ansari <david.ansari@gmx.de>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
Co-authored-by: Rin Kuryloski <kuryloskip@vmware.com>
2023-08-31 18:27:13 +08:00
|
|
|
end
|
|
|
|
|
end.
|
2014-12-05 22:44:31 +08:00
|
|
|
|
2015-01-07 02:30:24 +08:00
|
|
|
persist_to(#msg_status{persist_to = To}) -> To.
|
|
|
|
|
|
2014-12-05 22:44:31 +08:00
|
|
|
prepare_to_store(Msg) ->
|
Message Containers (#5077)
This PR implements an approach for a "protocol (data format) agnostic core" where the format of the message isn't converted at point of reception.
Currently all non AMQP 0.9.1 originating messages are converted into a AMQP 0.9.1 flavoured basic_message record before sent to a queue. If the messages are then consumed by the originating protocol they are converted back from AMQP 0.9.1. For some protocols such as MQTT 3.1 this isn't too expensive as MQTT is mostly a fairly easily mapped subset of AMQP 0.9.1 but for others such as AMQP 1.0 the conversions are awkward and in some cases lossy even if consuming from the originating protocol.
This PR instead wraps all incoming messages in their originating form into a generic, extensible message container type (mc). The container module exposes an API to get common message details such as size and various properties (ttl, priority etc) directly from the source data type. Each protocol needs to implement the mc behaviour such that when a message originating form one protocol is consumed by another protocol we convert it to the target protocol at that point.
The message container also contains annotations, dead letter records and other meta data we need to record during the lifetime of a message. The original protocol message is never modified unless it is consumed.
This includes conversion modules to and from amqp, amqpl (AMQP 0.9.1) and mqtt.
COMMIT HISTORY:
* Refactor away from using the delivery{} record
In many places including exchange types. This should make it
easier to move towards using a message container type instead of
basic_message.
Add mc module and move direct replies outside of exchange
Lots of changes incl classic queues
Implement stream support incl amqp conversions
simplify mc state record
move mc.erl
mc dlx stuff
recent history exchange
Make tracking work
But doesn't take a protocol agnostic approach as we just convert
everything into AMQP legacy and back. Might be good enough for now.
Tracing as a whole may want a bit of a re-vamp at some point.
tidy
make quorum queue peek work by legacy conversion
dead lettering fixes
dead lettering fixes
CMQ fixes
rabbit_trace type fixes
fixes
fix
Fix classic queue props
test assertion fix
feature flag and backwards compat
Enable message_container feature flag in some SUITEs
Dialyzer fixes
fixes
fix
test fixes
Various
Manually update a gazelle generated file
until a gazelle enhancement can be made
https://github.com/rabbitmq/rules_erlang/issues/185
Add message_containers_SUITE to bazel
and regen bazel files with gazelle from rules_erlang@main
Simplify essential proprty access
Such as durable, ttl and priority by extracting them into annotations
at message container init time.
Move type
to remove dependenc on amqp10 stuff in mc.erl
mostly because I don't know how to make bazel do the right thing
add more stuff
Refine routing header stuff
wip
Cosmetics
Do not use "maybe" as type name as "maybe" is a keyword since OTP 25
which makes Erlang LS complain.
* Dedup death queue names
* Fix function clause crashes
Fix failing tests in the MQTT shared_SUITE:
A classic queue message ID can be undefined as set in
https://github.com/rabbitmq/rabbitmq-server/blob/fbe79ff47b4edbc0fd95457e623d6593161ad198/deps/rabbit/src/rabbit_classic_queue_index_v2.erl#L1048
Fix failing tests in the MQTT shared_SUITE-mixed:
When feature flag message_containers is disabled, the
message is not an #mc{} record, but a #basic_message{} record.
* Fix is_utf8_no_null crash
Prior to this commit, the function crashed if invalid UTF-8 was
provided, e.g.:
```
1> rabbit_misc:is_valid_shortstr(<<"😇"/utf16>>).
** exception error: no function clause matching rabbit_misc:is_utf8_no_null(<<216,61,222,7>>) (rabbit_misc.erl, line 1481)
```
* Implement mqtt mc behaviour
For now via amqp translation.
This is still work in progress, but the following SUITEs pass:
```
make -C deps/rabbitmq_mqtt ct-shared t=[mqtt,v5,cluster_size_1] FULL=1
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_1] FULL=1
```
* Shorten mc file names
Module name length matters because for each persistent message the #mc{}
record is persisted to disk.
```
1> iolist_size(term_to_iovec({mc, rabbit_mc_amqp_legacy})).
30
2> iolist_size(term_to_iovec({mc, mc_amqpl})).
17
```
This commit renames the mc modules:
```
ag -l rabbit_mc_amqp_legacy | xargs sed -i 's/rabbit_mc_amqp_legacy/mc_amqpl/g'
ag -l rabbit_mc_amqp | xargs sed -i 's/rabbit_mc_amqp/mc_amqp/g'
ag -l rabbit_mqtt_mc | xargs sed -i 's/rabbit_mqtt_mc/mc_mqtt/g'
```
* mc: make deaths an annotation + fixes
* Fix mc_mqtt protocol_state callback
* Fix test will_delay_node_restart
```
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_3]:will_delay_node_restart FULL=1
```
* Bazel run gazelle
* mix format rabbitmqctl.ex
* Ensure ttl annotation is refelected in amqp legacy protocol state
* Fix id access in message store
* Fix rabbit_message_interceptor_SUITE
* dializer fixes
* Fix rabbit:rabbit_message_interceptor_SUITE-mixed
set_annotation/3 should not result in duplicate keys
* Fix MQTT shared_SUITE-mixed
Up to 3.12 non-MQTT publishes were always QoS 1 regardless of delivery_mode.
https://github.com/rabbitmq/rabbitmq-server/blob/75a953ce286a10aca910c098805a4f545989af38/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl#L2075-L2076
From now on, non-MQTT publishes are QoS 1 if durable.
This makes more sense.
The MQTT plugin must send a #basic_message{} to an old node that does
not understand message containers.
* Field content of 'v1_0.data' can be binary
Fix
```
bazel test //deps/rabbitmq_mqtt:shared_SUITE-mixed \
--test_env FOCUS="-group [mqtt,v4,cluster_size_1] -case trace" \
-t- --test_sharding_strategy=disabled
```
* Remove route/2 and implement route/3 for all exchange types.
This removes the route/2 callback from rabbit_exchange_type and
makes route/3 mandatory instead. This is a breaking change and
will require all implementations of exchange types to update their
code, however this is necessary anyway for them to correctly handle
the mc type.
stream filtering fixes
* Translate directly from MQTT to AMQP 0.9.1
* handle undecoded properties in mc_compat
amqpl: put clause in right order
recover death deatails from amqp data
* Replace callback init_amqp with convert_from
* Fix return value of lists:keyfind/3
* Translate directly from AMQP 0.9.1 to MQTT
* Fix MQTT payload size
MQTT payload can be a list when converted from AMQP 0.9.1 for example
First conversions tests
Plus some other conversion related fixes.
bazel
bazel
translate amqp 1.0 null to undefined
mc: property/2 and correlation_id/message_id return type tagged values.
To ensure we can support a variety of types better.
The type type tags are AMQP 1.0 flavoured.
fix death recovery
mc_mqtt: impl new api
Add callbacks to allow protocols to compact data before storage
And make readable if needing to query things repeatedly.
bazel fix
* more decoding
* tracking mixed versions compat
* mc: flip default of `durable` annotation to save some data.
Assuming most messages are durable and that in memory messages suffer less
from persistence overhead it makes sense for a non existent `durable`
annotation to mean durable=true.
* mc conversion tests and tidy up
* mc make x_header unstrict again
* amqpl: death record fixes
* bazel
* amqp -> amqpl conversion test
* Fix crash in mc_amqp:size/1
Body can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0/ ct-system t=java
```
on branch native-amqp.
* Fix crash in lists:flatten/1
Data can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp.
* Fix crash in rabbit_writer
Running test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp resulted in the following crash:
```
crasher:
initial call: rabbit_writer:enter_mainloop/2
pid: <0.711.0>
registered_name: []
exception error: bad argument
in function size/1
called as size([<<0>>,<<"Sw">>,[<<160,2>>,<<"hi">>]])
*** argument 1: not tuple or binary
in call from rabbit_binary_generator:build_content_frames/7 (rabbit_binary_generator.erl, line 89)
in call from rabbit_binary_generator:build_simple_content_frames/4 (rabbit_binary_generator.erl, line 61)
in call from rabbit_writer:assemble_frames/5 (rabbit_writer.erl, line 334)
in call from rabbit_writer:internal_send_command_async/3 (rabbit_writer.erl, line 365)
in call from rabbit_writer:handle_message/2 (rabbit_writer.erl, line 265)
in call from rabbit_writer:handle_message/3 (rabbit_writer.erl, line 232)
in call from rabbit_writer:mainloop1/2 (rabbit_writer.erl, line 223)
```
because #content.payload_fragments_rev is currently supposed to
be a flat list of binaries instead of being an iolist.
This commit fixes this crash inefficiently by calling
iolist_to_binary/1. A better solution would be to allow AMQP legacy's #content.payload_fragments_rev
to be an iolist.
* Add accidentally deleted line back
* mc: optimise mc_amqp internal format
By removint the outer records for message and delivery annotations
as well as application properties and footers.
* mc: optimis mc_amqp map_add by using upsert
* mc: refactoring and bug fixes
* mc_SUITE routingheader assertions
* mc remove serialize/1 callback as only used by amqp
* mc_amqp: avoid returning a nested list from protocol_state
* test and bug fix
* move infer_type to mc_util
* mc fixes and additiona assertions
* Support headers exchange routing for MQTT messages
When a headers exchange is bound to the MQTT topic exchange, routing
will be performend based on both MQTT topic (by the topic exchange) and
MQTT User Property (by the headers exchange).
This combines the best worlds of both MQTT 5.0 and AMQP 0.9.1 and
enables powerful routing topologies.
When the User Property contains the same name multiple times, only the
last name (and value) will be considered by the headers exchange.
* Fix crash when sending from stream to amqpl
When publishing a message via the stream protocol and consuming it via
AMQP 0.9.1, the following crash occurred prior to this commit:
```
crasher:
initial call: rabbit_channel:init/1
pid: <0.818.0>
registered_name: []
exception exit: {{badmatch,undefined},
[{rabbit_channel,handle_deliver0,4,
[{file,"rabbit_channel.erl"},
{line,2728}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1594}]},
{rabbit_channel,handle_cast,2,
[{file,"rabbit_channel.erl"},
{line,728}]},
{gen_server2,handle_msg,2,
[{file,"gen_server2.erl"},{line,1056}]},
{proc_lib,wake_up,3,
[{file,"proc_lib.erl"},{line,251}]}]}
```
This commit first gives `mc:init/3` the chance to set exchange and
routing_keys annotations.
If not set, `rabbit_stream_queue` will set these annotations assuming
the message was originally published via the stream protocol.
* Support consistent hash exchange routing for MQTT 5.0
When a consistent hash exchange is bound to the MQTT topic exchange,
MQTT 5.0 messages can be routed to queues consistently based on the
Correlation-Data in the PUBLISH packet.
* Convert MQTT 5.0 User Property
* to AMQP 0.9.1 headers
* from AMQP 0.9.1 headers
* to AMQP 1.0 application properties and message annotations
* from AMQP 1.0 application properties and message annotations
* Make use of Annotations in mc_mqtt:protocol_state/2
mc_mqtt:protocol_state/2 includes Annotations as parameter.
It's cleaner to make use of these Annotations when computing the
protocol state instead of relying on the caller (rabbitmq_mqtt_processor)
to compute the protocol state.
* Enforce AMQP 0.9.1 field name length limit
The AMQP 0.9.1 spec prohibits field names longer than 128 characters.
Therefore, when converting AMQP 1.0 message annotations, application
properties or MQTT 5.0 User Property to AMQP 0.9.1 headers, drop any
names longer than 128 characters.
* Fix type specs
Apply feedback from Michael Davis
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* Add mc_mqtt unit test suite
Implement mc_mqtt:x_header/2
* Translate indicator that payload is UTF-8 encoded
when converting between MQTT 5.0 and AMQP 1.0
* Translate single amqp-value section from AMQP 1.0 to MQTT
Convert to a text representation, if possible, and indicate to MQTT
client that the payload is UTF-8 encoded. This way, the MQTT client will
be able to parse the payload.
If conversion to text representation is not possible, encode the payload
using the AMQP 1.0 type system and indiate the encoding via Content-Type
message/vnd.rabbitmq.amqp.
This Content-Type is not registered.
Type "message" makes sense since it's a message.
Vendor tree "vnd.rabbitmq.amqp" makes sense since merely subtype "amqp" is not
registered.
* Fix payload conversion
* Translate Response Topic between MQTT and AMQP
Translate MQTT 5.0 Response Topic to AMQP 1.0 reply-to address and vice
versa.
The Response Topic must be a UTF-8 encoded string.
This commit re-uses the already defined RabbitMQ target addresses:
```
"/topic/" RK Publish to amq.topic with routing key RK
"/exchange/" X "/" RK Publish to exchange X with routing key RK
```
By default, the MQTT topic exchange is configure dto be amq.topic using
the 1st target address.
When an operator modifies the mqtt.exchange, the 2nd target address is
used.
* Apply PR feedback
and fix formatting
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* tidy up
* Add MQTT message_containers test
* consistent hash exchange: avoid amqp legacy conversion
When hashing on a header value.
* Avoid converting to amqp legacy when using exchange federation
* Fix test flake
* test and dialyzer fixes
* dialyzer fix
* Add MQTT protocol interoperability tests
Test receiving from and sending to MQTT 5.0 and
* AMQP 0.9.1
* AMQP 1.0
* STOMP
* Streams
* Regenerate portions of deps/rabbit/app.bzl with gazelle
I'm not exactly sure how this happened, but gazell seems to have been
run with an older version of the rules_erlang gazelle extension at
some point. This caused generation of a structure that is no longer
used. This commit updates the structure to the current pattern.
* mc: refactoring
* mc_amqpl: handle delivery annotations
Just in case they are included.
Also use iolist_to_iovec to create flat list of binaries when
converting from amqp with amqp encoded payload.
---------
Co-authored-by: David Ansari <david.ansari@gmx.de>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
Co-authored-by: Rin Kuryloski <kuryloskip@vmware.com>
2023-08-31 18:27:13 +08:00
|
|
|
mc:prepare(store, Msg).
|
2014-12-05 22:44:31 +08:00
|
|
|
|
2010-07-15 14:54:25 +08:00
|
|
|
%%----------------------------------------------------------------------------
|
|
|
|
|
%% Internal gubbins for acks
|
|
|
|
|
%%----------------------------------------------------------------------------
|
|
|
|
|
|
2015-01-07 02:30:24 +08:00
|
|
|
record_pending_ack(#msg_status { seq_id = SeqId } = MsgStatus,
|
2013-01-08 06:19:33 +08:00
|
|
|
State = #vqstate { ram_pending_ack = RPA,
|
|
|
|
|
disk_pending_ack = DPA,
|
|
|
|
|
ack_in_counter = AckInCount}) ->
|
2022-06-08 21:39:42 +08:00
|
|
|
{RPA1, DPA1} =
|
|
|
|
|
case msg_in_ram(MsgStatus) of
|
2022-06-08 22:31:08 +08:00
|
|
|
false -> {RPA, maps:put(SeqId, MsgStatus, DPA)};
|
|
|
|
|
_ -> {maps:put(SeqId, MsgStatus, RPA), DPA}
|
2010-10-11 22:54:48 +08:00
|
|
|
end,
|
2013-01-08 06:19:33 +08:00
|
|
|
State #vqstate { ram_pending_ack = RPA1,
|
|
|
|
|
disk_pending_ack = DPA1,
|
|
|
|
|
ack_in_counter = AckInCount + 1}.
|
|
|
|
|
|
|
|
|
|
lookup_pending_ack(SeqId, #vqstate { ram_pending_ack = RPA,
|
2022-06-08 21:39:42 +08:00
|
|
|
disk_pending_ack = DPA}) ->
|
2022-06-08 22:31:08 +08:00
|
|
|
case maps:get(SeqId, RPA, none) of
|
|
|
|
|
none -> maps:get(SeqId, DPA);
|
|
|
|
|
V -> V
|
2013-01-08 06:19:33 +08:00
|
|
|
end.
|
2010-07-15 14:54:25 +08:00
|
|
|
|
2015-01-23 21:10:42 +08:00
|
|
|
%% First parameter = UpdateStats
|
2022-06-03 17:53:45 +08:00
|
|
|
%% @todo Do the stats updating outside of this function.
|
2014-08-11 23:04:11 +08:00
|
|
|
remove_pending_ack(true, SeqId, State) ->
|
2016-10-26 21:59:12 +08:00
|
|
|
case remove_pending_ack(false, SeqId, State) of
|
|
|
|
|
{none, _} ->
|
|
|
|
|
{none, State};
|
|
|
|
|
{MsgStatus, State1} ->
|
2022-06-03 17:53:45 +08:00
|
|
|
{MsgStatus, stats_acked_pending(MsgStatus, State1)}
|
2016-10-26 21:59:12 +08:00
|
|
|
end;
|
2014-12-11 21:58:14 +08:00
|
|
|
remove_pending_ack(false, SeqId, State = #vqstate{ram_pending_ack = RPA,
|
2022-06-08 21:39:42 +08:00
|
|
|
disk_pending_ack = DPA}) ->
|
2022-06-08 22:31:08 +08:00
|
|
|
case maps:get(SeqId, RPA, none) of
|
|
|
|
|
none -> case maps:get(SeqId, DPA, none) of
|
|
|
|
|
none ->
|
|
|
|
|
{none, State};
|
|
|
|
|
V ->
|
|
|
|
|
DPA1 = maps:remove(SeqId, DPA),
|
|
|
|
|
{V, State#vqstate{disk_pending_ack = DPA1}}
|
|
|
|
|
end;
|
|
|
|
|
V -> RPA1 = maps:remove(SeqId, RPA),
|
|
|
|
|
{V, State #vqstate { ram_pending_ack = RPA1 }}
|
2013-01-08 06:19:33 +08:00
|
|
|
end.
|
2011-09-30 05:42:36 +08:00
|
|
|
|
|
|
|
|
purge_pending_ack(KeepPersistent,
|
2024-03-04 17:08:22 +08:00
|
|
|
State = #vqstate { index_state = IndexState,
|
2023-06-21 02:04:17 +08:00
|
|
|
store_state = StoreState0 }) ->
|
2021-09-22 21:07:38 +08:00
|
|
|
{IndexOnDiskSeqIds, MsgIdsByStore, SeqIdsInStore, State1} = purge_pending_ack1(State),
|
2015-09-08 00:15:48 +08:00
|
|
|
case KeepPersistent of
|
2023-06-21 02:04:17 +08:00
|
|
|
true -> remove_transient_msgs_by_id(MsgIdsByStore, State1);
|
2021-07-06 20:38:47 +08:00
|
|
|
false -> {DeletedSegments, IndexState1} =
|
2024-03-04 17:08:22 +08:00
|
|
|
rabbit_classic_queue_index_v2:ack(IndexOnDiskSeqIds, IndexState),
|
2022-07-19 00:05:02 +08:00
|
|
|
StoreState1 = lists:foldl(fun rabbit_classic_queue_store_v2:remove/2, StoreState0, SeqIdsInStore),
|
2021-11-05 23:42:18 +08:00
|
|
|
StoreState = rabbit_classic_queue_store_v2:delete_segments(DeletedSegments, StoreState1),
|
2023-06-21 02:04:17 +08:00
|
|
|
State2 = remove_vhost_msgs_by_id(MsgIdsByStore, State1),
|
|
|
|
|
State2 #vqstate { index_state = IndexState1,
|
2021-07-06 20:38:47 +08:00
|
|
|
store_state = StoreState }
|
2015-09-08 00:15:48 +08:00
|
|
|
end.
|
|
|
|
|
|
2015-09-08 00:23:19 +08:00
|
|
|
purge_pending_ack_delete_and_terminate(
|
2024-03-04 17:08:22 +08:00
|
|
|
State = #vqstate { index_state = IndexState,
|
2023-06-21 02:04:17 +08:00
|
|
|
store_state = StoreState }) ->
|
2021-09-22 21:07:38 +08:00
|
|
|
{_, MsgIdsByStore, _SeqIdsInStore, State1} = purge_pending_ack1(State),
|
2021-11-08 23:50:11 +08:00
|
|
|
StoreState1 = rabbit_classic_queue_store_v2:terminate(StoreState),
|
2024-03-04 17:08:22 +08:00
|
|
|
IndexState1 = rabbit_classic_queue_index_v2:delete_and_terminate(IndexState),
|
2023-06-21 02:04:17 +08:00
|
|
|
State2 = remove_vhost_msgs_by_id(MsgIdsByStore, State1),
|
|
|
|
|
State2 #vqstate { index_state = IndexState1,
|
2021-11-08 22:32:59 +08:00
|
|
|
store_state = StoreState1 }.
|
2015-09-08 00:15:48 +08:00
|
|
|
|
|
|
|
|
purge_pending_ack1(State = #vqstate { ram_pending_ack = RPA,
|
2022-06-08 21:39:42 +08:00
|
|
|
disk_pending_ack = DPA }) ->
|
2013-01-08 06:19:33 +08:00
|
|
|
F = fun (_SeqId, MsgStatus, Acc) -> accumulate_ack(MsgStatus, Acc) end,
|
2021-09-22 21:07:38 +08:00
|
|
|
{IndexOnDiskSeqIds, MsgIdsByStore, SeqIdsInStore, _AllMsgIds} =
|
2022-06-08 22:31:08 +08:00
|
|
|
maps:fold(F, maps:fold(F, accumulate_ack_init(), RPA), DPA),
|
|
|
|
|
State1 = State #vqstate{ram_pending_ack = #{},
|
|
|
|
|
disk_pending_ack = #{}},
|
2021-09-22 21:07:38 +08:00
|
|
|
{IndexOnDiskSeqIds, MsgIdsByStore, SeqIdsInStore, State1}.
|
2013-01-08 06:19:33 +08:00
|
|
|
|
2017-10-30 23:33:35 +08:00
|
|
|
%% MsgIdsByStore is an map with two keys:
|
2015-09-08 00:15:48 +08:00
|
|
|
%%
|
|
|
|
|
%% true: holds a list of Persistent Message Ids.
|
|
|
|
|
%% false: holds a list of Transient Message Ids.
|
|
|
|
|
%%
|
2023-06-21 02:04:17 +08:00
|
|
|
%% The msg_store_remove/3 function takes this boolean flag to determine
|
|
|
|
|
%% from which store the messages should be removed from.
|
|
|
|
|
remove_vhost_msgs_by_id(MsgIdsByStore,
|
|
|
|
|
State = #vqstate{ msg_store_clients = MSCState }) ->
|
|
|
|
|
maps:fold(fun(IsPersistent, MsgIds, StateF) ->
|
|
|
|
|
case msg_store_remove(MSCState, IsPersistent, MsgIds) of
|
|
|
|
|
{ok, []} ->
|
|
|
|
|
StateF;
|
|
|
|
|
{ok, ConfirmMsgIds} ->
|
|
|
|
|
record_confirms(sets:from_list(ConfirmMsgIds, [{version, 2}]), StateF)
|
|
|
|
|
end
|
|
|
|
|
end, State, MsgIdsByStore).
|
|
|
|
|
|
|
|
|
|
remove_transient_msgs_by_id(MsgIdsByStore, State) ->
|
|
|
|
|
remove_vhost_msgs_by_id(maps:with([false], MsgIdsByStore), State).
|
2010-07-15 14:54:25 +08:00
|
|
|
|
2021-09-22 21:07:38 +08:00
|
|
|
accumulate_ack_init() -> {[], maps:new(), [], []}.
|
2010-07-15 14:54:25 +08:00
|
|
|
|
2011-09-30 17:07:00 +08:00
|
|
|
accumulate_ack(#msg_status { seq_id = SeqId,
|
|
|
|
|
msg_id = MsgId,
|
2021-09-22 21:07:38 +08:00
|
|
|
is_persistent = IsPersistent,
|
2021-07-06 20:38:47 +08:00
|
|
|
msg_location = MsgLocation,
|
2011-09-30 17:07:00 +08:00
|
|
|
index_on_disk = IndexOnDisk },
|
2021-09-22 21:07:38 +08:00
|
|
|
{IndexOnDiskSeqIdsAcc, MsgIdsByStore, SeqIdsInStore, AllMsgIds}) ->
|
2011-09-29 17:14:36 +08:00
|
|
|
{cons_if(IndexOnDisk, SeqId, IndexOnDiskSeqIdsAcc),
|
2021-07-06 20:38:47 +08:00
|
|
|
case MsgLocation of
|
2023-03-10 16:48:43 +08:00
|
|
|
?IN_SHARED_STORE -> rabbit_misc:maps_cons(IsPersistent, {SeqId, MsgId}, MsgIdsByStore);
|
2021-09-22 21:07:38 +08:00
|
|
|
_ -> MsgIdsByStore
|
|
|
|
|
end,
|
|
|
|
|
case MsgLocation of
|
|
|
|
|
?IN_QUEUE_STORE -> [SeqId|SeqIdsInStore];
|
2021-11-03 21:37:56 +08:00
|
|
|
?IN_QUEUE_INDEX -> [SeqId|SeqIdsInStore];
|
2021-09-22 21:07:38 +08:00
|
|
|
_ -> SeqIdsInStore
|
2011-09-30 17:07:00 +08:00
|
|
|
end,
|
2011-04-08 19:03:42 +08:00
|
|
|
[MsgId | AllMsgIds]}.
|
2010-07-15 14:54:25 +08:00
|
|
|
|
2010-09-03 01:14:46 +08:00
|
|
|
%%----------------------------------------------------------------------------
|
|
|
|
|
%% Internal plumbing for confirms (aka publisher acks)
|
|
|
|
|
%%----------------------------------------------------------------------------
|
|
|
|
|
|
2011-03-08 00:15:40 +08:00
|
|
|
record_confirms(MsgIdSet, State = #vqstate { msgs_on_disk = MOD,
|
|
|
|
|
msg_indices_on_disk = MIOD,
|
|
|
|
|
unconfirmed = UC,
|
|
|
|
|
confirmed = C }) ->
|
2021-11-05 22:59:32 +08:00
|
|
|
State #vqstate {
|
2023-03-06 21:22:18 +08:00
|
|
|
msgs_on_disk = sets_subtract(MOD, MsgIdSet),
|
|
|
|
|
msg_indices_on_disk = sets_subtract(MIOD, MsgIdSet),
|
|
|
|
|
unconfirmed = sets_subtract(UC, MsgIdSet),
|
2022-06-10 21:24:02 +08:00
|
|
|
confirmed = sets:union(C, MsgIdSet) }.
|
2011-01-12 20:04:24 +08:00
|
|
|
|
2023-03-06 21:22:18 +08:00
|
|
|
%% Function defined in both rabbit_msg_store and rabbit_variable_queue.
|
|
|
|
|
sets_subtract(Set1, Set2) ->
|
|
|
|
|
case sets:size(Set2) of
|
|
|
|
|
1 -> sets:del_element(hd(sets:to_list(Set2)), Set1);
|
|
|
|
|
_ -> sets:subtract(Set1, Set2)
|
|
|
|
|
end.
|
|
|
|
|
|
2011-10-15 19:46:34 +08:00
|
|
|
msgs_written_to_disk(Callback, MsgIdSet, ignored) ->
|
2023-05-30 17:19:18 +08:00
|
|
|
%% The message was already acked so it doesn't matter if it was never written
|
|
|
|
|
%% to the index, we can process the confirm.
|
2012-10-17 00:41:24 +08:00
|
|
|
Callback(?MODULE,
|
|
|
|
|
fun (?MODULE, State) -> record_confirms(MsgIdSet, State) end);
|
2011-03-08 00:15:40 +08:00
|
|
|
msgs_written_to_disk(Callback, MsgIdSet, written) ->
|
2011-04-08 19:03:42 +08:00
|
|
|
Callback(?MODULE,
|
|
|
|
|
fun (?MODULE, State = #vqstate { msgs_on_disk = MOD,
|
|
|
|
|
msg_indices_on_disk = MIOD,
|
|
|
|
|
unconfirmed = UC }) ->
|
2021-07-06 20:38:47 +08:00
|
|
|
%% @todo Apparently the message store ALWAYS calls this function
|
|
|
|
|
%% for all message IDs. This is a waste. We should only
|
|
|
|
|
%% call it for messages that need confirming, and avoid
|
|
|
|
|
%% this intersection call.
|
|
|
|
|
%%
|
|
|
|
|
%% The same may apply to msg_indices_written_to_disk as well.
|
2022-06-10 21:24:02 +08:00
|
|
|
Confirmed = sets:intersection(UC, MsgIdSet),
|
|
|
|
|
record_confirms(sets:intersection(MsgIdSet, MIOD),
|
2011-03-05 08:31:49 +08:00
|
|
|
State #vqstate {
|
|
|
|
|
msgs_on_disk =
|
2022-06-10 21:24:02 +08:00
|
|
|
sets:union(MOD, Confirmed) })
|
2011-03-04 23:41:25 +08:00
|
|
|
end).
|
|
|
|
|
|
2011-03-08 00:15:40 +08:00
|
|
|
msg_indices_written_to_disk(Callback, MsgIdSet) ->
|
2011-04-08 19:03:42 +08:00
|
|
|
Callback(?MODULE,
|
|
|
|
|
fun (?MODULE, State = #vqstate { msgs_on_disk = MOD,
|
|
|
|
|
msg_indices_on_disk = MIOD,
|
|
|
|
|
unconfirmed = UC }) ->
|
2022-06-10 21:24:02 +08:00
|
|
|
Confirmed = sets:intersection(UC, MsgIdSet),
|
|
|
|
|
record_confirms(sets:intersection(MsgIdSet, MOD),
|
2011-03-05 08:31:49 +08:00
|
|
|
State #vqstate {
|
|
|
|
|
msg_indices_on_disk =
|
2022-06-10 21:24:02 +08:00
|
|
|
sets:union(MIOD, Confirmed) })
|
2011-03-04 23:41:25 +08:00
|
|
|
end).
|
2010-09-29 23:57:48 +08:00
|
|
|
|
2022-06-16 17:21:47 +08:00
|
|
|
%% @todo Having to call run_backing_queue is probably reducing performance...
|
2014-12-04 01:27:16 +08:00
|
|
|
msgs_and_indices_written_to_disk(Callback, MsgIdSet) ->
|
|
|
|
|
Callback(?MODULE,
|
|
|
|
|
fun (?MODULE, State) -> record_confirms(MsgIdSet, State) end).
|
|
|
|
|
|
2011-08-16 00:42:09 +08:00
|
|
|
%%----------------------------------------------------------------------------
|
|
|
|
|
%% Internal plumbing for requeue
|
|
|
|
|
%%----------------------------------------------------------------------------
|
|
|
|
|
|
2011-09-30 04:15:15 +08:00
|
|
|
%% Rebuild queue, inserting sequence ids to maintain ordering
|
2022-09-05 19:37:33 +08:00
|
|
|
requeue_merge(SeqIds, Q, MsgIds, Limit, State) ->
|
|
|
|
|
requeue_merge(SeqIds, Q, ?QUEUE:new(), MsgIds,
|
|
|
|
|
Limit, State).
|
2011-09-30 04:15:15 +08:00
|
|
|
|
2022-09-05 19:37:33 +08:00
|
|
|
requeue_merge([SeqId | Rest] = SeqIds, Q, Front, MsgIds,
|
|
|
|
|
Limit, State)
|
2011-09-30 04:15:15 +08:00
|
|
|
when Limit == undefined orelse SeqId < Limit ->
|
2011-10-03 04:06:02 +08:00
|
|
|
case ?QUEUE:out(Q) of
|
2011-09-30 04:15:15 +08:00
|
|
|
{{value, #msg_status { seq_id = SeqIdQ } = MsgStatus}, Q1}
|
|
|
|
|
when SeqIdQ < SeqId ->
|
|
|
|
|
%% enqueue from the remaining queue
|
2022-09-05 19:37:33 +08:00
|
|
|
requeue_merge(SeqIds, Q1, ?QUEUE:in(MsgStatus, Front), MsgIds,
|
|
|
|
|
Limit, State);
|
2011-09-30 04:15:15 +08:00
|
|
|
{_, _Q1} ->
|
|
|
|
|
%% enqueue from the remaining list of sequence ids
|
2016-10-26 21:59:12 +08:00
|
|
|
case msg_from_pending_ack(SeqId, State) of
|
|
|
|
|
{none, _} ->
|
2022-09-05 19:37:33 +08:00
|
|
|
requeue_merge(Rest, Q, Front, MsgIds, Limit, State);
|
|
|
|
|
{#msg_status { msg_id = MsgId } = MsgStatus, State1} ->
|
|
|
|
|
State2 = stats_requeued_memory(MsgStatus, State1),
|
|
|
|
|
requeue_merge(Rest, Q, ?QUEUE:in(MsgStatus, Front), [MsgId | MsgIds],
|
|
|
|
|
Limit, State2)
|
2016-10-26 21:59:12 +08:00
|
|
|
end
|
2011-09-30 04:15:15 +08:00
|
|
|
end;
|
2022-09-05 19:37:33 +08:00
|
|
|
requeue_merge(SeqIds, Q, Front, MsgIds,
|
|
|
|
|
_Limit, State) ->
|
2011-10-03 04:06:02 +08:00
|
|
|
{SeqIds, ?QUEUE:join(Front, Q), MsgIds, State}.
|
2011-09-30 04:02:24 +08:00
|
|
|
|
2011-10-20 23:22:35 +08:00
|
|
|
delta_merge([], Delta, MsgIds, State) ->
|
2011-09-30 04:02:24 +08:00
|
|
|
{Delta, MsgIds, State};
|
2011-10-20 23:22:35 +08:00
|
|
|
delta_merge(SeqIds, Delta, MsgIds, State) ->
|
2016-10-26 21:59:12 +08:00
|
|
|
lists:foldl(fun (SeqId, {Delta0, MsgIds0, State0} = Acc) ->
|
|
|
|
|
case msg_from_pending_ack(SeqId, State0) of
|
|
|
|
|
{none, _} ->
|
|
|
|
|
Acc;
|
2017-02-10 18:56:23 +08:00
|
|
|
{#msg_status { msg_id = MsgId,
|
|
|
|
|
is_persistent = IsPersistent } = MsgStatus, State1} ->
|
2016-10-26 21:59:12 +08:00
|
|
|
{_MsgStatus, State2} =
|
|
|
|
|
maybe_prepare_write_to_disk(true, true, MsgStatus, State1),
|
2017-02-10 18:56:23 +08:00
|
|
|
{expand_delta(SeqId, Delta0, IsPersistent), [MsgId | MsgIds0],
|
2022-06-03 17:53:45 +08:00
|
|
|
stats_requeued_disk(MsgStatus, State2)}
|
2016-10-26 21:59:12 +08:00
|
|
|
end
|
2011-09-30 04:02:24 +08:00
|
|
|
end, {Delta, MsgIds, State}, SeqIds).
|
2011-09-27 18:35:18 +08:00
|
|
|
|
2011-09-30 00:48:29 +08:00
|
|
|
%% Mostly opposite of record_pending_ack/2
|
2011-10-20 23:22:35 +08:00
|
|
|
msg_from_pending_ack(SeqId, State) ->
|
2016-10-26 21:59:12 +08:00
|
|
|
case remove_pending_ack(false, SeqId, State) of
|
|
|
|
|
{none, _} ->
|
|
|
|
|
{none, State};
|
|
|
|
|
{#msg_status { msg_props = MsgProps } = MsgStatus, State1} ->
|
|
|
|
|
{MsgStatus #msg_status {
|
|
|
|
|
msg_props = MsgProps #message_properties { needs_confirming = false } },
|
|
|
|
|
State1}
|
|
|
|
|
end.
|
2011-08-16 00:42:09 +08:00
|
|
|
|
2021-02-25 06:06:41 +08:00
|
|
|
delta_limit(?BLANK_DELTA_PATTERN(_)) -> undefined;
|
2011-09-28 01:00:00 +08:00
|
|
|
delta_limit(#delta { start_seq_id = StartSeqId }) -> StartSeqId.
|
2011-09-09 18:57:42 +08:00
|
|
|
|
2013-01-12 03:39:34 +08:00
|
|
|
%%----------------------------------------------------------------------------
|
|
|
|
|
%% Iterator
|
|
|
|
|
%%----------------------------------------------------------------------------
|
2012-11-28 22:37:44 +08:00
|
|
|
|
2013-01-13 18:49:08 +08:00
|
|
|
ram_ack_iterator(State) ->
|
2022-06-08 22:31:08 +08:00
|
|
|
{ack, maps:iterator(State#vqstate.ram_pending_ack)}.
|
2013-01-13 18:49:08 +08:00
|
|
|
|
|
|
|
|
disk_ack_iterator(State) ->
|
2022-06-08 22:31:08 +08:00
|
|
|
{ack, maps:iterator(State#vqstate.disk_pending_ack)}.
|
2013-01-13 18:49:08 +08:00
|
|
|
|
|
|
|
|
msg_iterator(State) -> istate(start, State).
|
2013-01-12 19:35:24 +08:00
|
|
|
|
2022-05-11 21:10:44 +08:00
|
|
|
istate(start, State) -> {q3, State#vqstate.q3, State};
|
2013-01-13 07:41:19 +08:00
|
|
|
istate(q3, State) -> {delta, State#vqstate.delta, State};
|
2022-05-11 21:10:44 +08:00
|
|
|
istate(delta, _State) -> done.
|
2013-01-12 19:35:24 +08:00
|
|
|
|
2013-01-13 18:49:08 +08:00
|
|
|
next({ack, It}, IndexState) ->
|
2022-06-08 22:31:08 +08:00
|
|
|
case maps:next(It) of
|
2013-01-13 18:49:08 +08:00
|
|
|
none -> {empty, IndexState};
|
2013-01-16 00:29:36 +08:00
|
|
|
{_SeqId, MsgStatus, It1} -> Next = {ack, It1},
|
|
|
|
|
{value, MsgStatus, true, Next, IndexState}
|
2013-01-13 18:49:08 +08:00
|
|
|
end;
|
2013-01-13 07:41:19 +08:00
|
|
|
next(done, IndexState) -> {empty, IndexState};
|
|
|
|
|
next({delta, #delta{start_seq_id = SeqId,
|
|
|
|
|
end_seq_id = SeqId}, State}, IndexState) ->
|
|
|
|
|
next(istate(delta, State), IndexState);
|
|
|
|
|
next({delta, #delta{start_seq_id = SeqId,
|
2024-03-04 17:08:22 +08:00
|
|
|
end_seq_id = SeqIdEnd} = Delta, State}, IndexState) ->
|
|
|
|
|
SeqIdB = rabbit_classic_queue_index_v2:next_segment_boundary(SeqId),
|
2022-05-12 21:18:36 +08:00
|
|
|
%% It may make sense to limit this based on rate. But this
|
|
|
|
|
%% is not called outside of CMQs so I will leave it alone
|
|
|
|
|
%% for the time being.
|
2021-05-26 17:27:03 +08:00
|
|
|
SeqId1 = lists:min([SeqIdB,
|
|
|
|
|
%% We must limit the number of messages read at once
|
2021-11-05 22:59:32 +08:00
|
|
|
%% otherwise the queue will attempt to read up to segment_entry_count()
|
|
|
|
|
%% messages from the index each time. The value
|
2021-05-26 17:27:03 +08:00
|
|
|
%% chosen here is arbitrary.
|
|
|
|
|
SeqId + 2048,
|
|
|
|
|
SeqIdEnd]),
|
2024-03-04 17:08:22 +08:00
|
|
|
{List, IndexState1} = rabbit_classic_queue_index_v2:read(SeqId, SeqId1, IndexState),
|
2013-01-13 07:41:19 +08:00
|
|
|
next({delta, Delta#delta{start_seq_id = SeqId1}, List, State}, IndexState1);
|
|
|
|
|
next({delta, Delta, [], State}, IndexState) ->
|
|
|
|
|
next({delta, Delta, State}, IndexState);
|
2013-01-15 21:40:51 +08:00
|
|
|
next({delta, Delta, [{_, SeqId, _, _, _} = M | Rest], State}, IndexState) ->
|
2015-09-10 01:55:48 +08:00
|
|
|
case is_msg_in_pending_acks(SeqId, State) of
|
2013-01-15 21:40:51 +08:00
|
|
|
false -> Next = {delta, Delta, Rest, State},
|
2013-01-16 00:29:36 +08:00
|
|
|
{value, beta_msg_status(M), false, Next, IndexState};
|
2013-01-15 21:40:51 +08:00
|
|
|
true -> next({delta, Delta, Rest, State}, IndexState)
|
|
|
|
|
end;
|
2013-01-13 07:41:19 +08:00
|
|
|
next({Key, Q, State}, IndexState) ->
|
2012-11-28 22:37:44 +08:00
|
|
|
case ?QUEUE:out(Q) of
|
2013-01-13 07:41:19 +08:00
|
|
|
{empty, _Q} -> next(istate(Key, State), IndexState);
|
2013-01-13 18:21:13 +08:00
|
|
|
{{value, MsgStatus}, QN} -> Next = {Key, QN, State},
|
2013-01-16 00:29:36 +08:00
|
|
|
{value, MsgStatus, false, Next, IndexState}
|
2012-11-28 22:37:44 +08:00
|
|
|
end.
|
|
|
|
|
|
2013-01-13 18:59:59 +08:00
|
|
|
inext(It, {Its, IndexState}) ->
|
2013-01-13 07:41:19 +08:00
|
|
|
case next(It, IndexState) of
|
|
|
|
|
{empty, IndexState1} ->
|
2013-01-13 18:59:59 +08:00
|
|
|
{Its, IndexState1};
|
2013-01-16 00:29:36 +08:00
|
|
|
{value, MsgStatus1, Unacked, It1, IndexState1} ->
|
|
|
|
|
{[{MsgStatus1, Unacked, It1} | Its], IndexState1}
|
2013-01-13 18:59:59 +08:00
|
|
|
end.
|
|
|
|
|
|
rabbit_variable_queue: Ensure modified state is not discarded in ifold
Commit d12b4d8e08005f97c5557f0948b3d03c15ef8c1b introduced a bug where
the old value of `State` is returned in the `stop` case.
The problem was sometimes visible in the backing_queue_SUITE's
`variable_queue_fold` testcase. The testcase crashed with the following
exception:
=== Location: [{rabbit_ct_broker_helpers,rpc,1509},
{backing_queue_SUITE,variable_queue_fold,1144},
{test_server,ts_tc,1754},
{test_server,run_test_case_eval1,1263},
{test_server,run_test_case_eval,1195}]
=== === Reason: {error,
{case_clause,undefined},
[{file_handle_cache,'-partition_handles/1-fun-0-',2,
[{file,"src/file_handle_cache.erl"},{line,804}]},
{file_handle_cache,get_or_reopen,1,
[{file,"src/file_handle_cache.erl"},{line,743}]},
{file_handle_cache_stats,timer_tc,1,
[{file,"src/file_handle_cache_stats.erl"},
{line,54}]},
{file_handle_cache_stats,update,2,
[{file,"src/file_handle_cache_stats.erl"},
{line,40}]},
{file_handle_cache,with_handles,3,
[{file,"src/file_handle_cache.erl"},{line,700}]},
{rabbit_msg_store,read_from_disk,2,
[{file,"src/rabbit_msg_store.erl"},{line,1259}]},
{rabbit_msg_store,client_read3,3,
[{file,"src/rabbit_msg_store.erl"},{line,671}]},
{rabbit_msg_store,safe_ets_update_counter,5,
[{file,"src/rabbit_msg_store.erl"},{line,1314}]}]}
The `State` (a #vqstate{} record) contains the rabbit_msg_store's state
as well (a #client_msstate{} record). This msg store client state is
updated after most calls to the msg store. In particular, the msg store
client state contains a map in the `file_handle_cache` field of that
record to store all file handles returned by the file_handle_cache.
So each time the msg store opens or closes files as part of its
operation, the client state is updated with a modifed
`file_handle_cache` map.
In addition to that, the file_handle_cache module uses the process
dictionary to store even more data about the various file handles used
by that process.
Therefore, by discarding the updated #client_msstate{}, we loose the
up-to-date `file_handle_cache` map. However, the process dictionary is
still updated by the file_handle_cache module. This means that the map
and the process dictionary are out-of-sync at this point. That's what
causes the crash in the file_handle_cache module: it is called with
arguments which don't work with the data in the process dictionary.
This commit fixes that and re-names some variables to clearly show the
progression of modified data.
Fixes #2488
Major props to @dumbbell for figuring this out, and @pjk25 for assistance!
2020-10-30 23:27:30 +08:00
|
|
|
ifold(_Fun, Acc, [], State0) ->
|
|
|
|
|
{Acc, State0};
|
|
|
|
|
ifold(Fun, Acc, Its0, State0) ->
|
2013-01-16 00:29:36 +08:00
|
|
|
[{MsgStatus, Unacked, It} | Rest] =
|
|
|
|
|
lists:sort(fun ({#msg_status{seq_id = SeqId1}, _, _},
|
|
|
|
|
{#msg_status{seq_id = SeqId2}, _, _}) ->
|
|
|
|
|
SeqId1 =< SeqId2
|
rabbit_variable_queue: Ensure modified state is not discarded in ifold
Commit d12b4d8e08005f97c5557f0948b3d03c15ef8c1b introduced a bug where
the old value of `State` is returned in the `stop` case.
The problem was sometimes visible in the backing_queue_SUITE's
`variable_queue_fold` testcase. The testcase crashed with the following
exception:
=== Location: [{rabbit_ct_broker_helpers,rpc,1509},
{backing_queue_SUITE,variable_queue_fold,1144},
{test_server,ts_tc,1754},
{test_server,run_test_case_eval1,1263},
{test_server,run_test_case_eval,1195}]
=== === Reason: {error,
{case_clause,undefined},
[{file_handle_cache,'-partition_handles/1-fun-0-',2,
[{file,"src/file_handle_cache.erl"},{line,804}]},
{file_handle_cache,get_or_reopen,1,
[{file,"src/file_handle_cache.erl"},{line,743}]},
{file_handle_cache_stats,timer_tc,1,
[{file,"src/file_handle_cache_stats.erl"},
{line,54}]},
{file_handle_cache_stats,update,2,
[{file,"src/file_handle_cache_stats.erl"},
{line,40}]},
{file_handle_cache,with_handles,3,
[{file,"src/file_handle_cache.erl"},{line,700}]},
{rabbit_msg_store,read_from_disk,2,
[{file,"src/rabbit_msg_store.erl"},{line,1259}]},
{rabbit_msg_store,client_read3,3,
[{file,"src/rabbit_msg_store.erl"},{line,671}]},
{rabbit_msg_store,safe_ets_update_counter,5,
[{file,"src/rabbit_msg_store.erl"},{line,1314}]}]}
The `State` (a #vqstate{} record) contains the rabbit_msg_store's state
as well (a #client_msstate{} record). This msg store client state is
updated after most calls to the msg store. In particular, the msg store
client state contains a map in the `file_handle_cache` field of that
record to store all file handles returned by the file_handle_cache.
So each time the msg store opens or closes files as part of its
operation, the client state is updated with a modifed
`file_handle_cache` map.
In addition to that, the file_handle_cache module uses the process
dictionary to store even more data about the various file handles used
by that process.
Therefore, by discarding the updated #client_msstate{}, we loose the
up-to-date `file_handle_cache` map. However, the process dictionary is
still updated by the file_handle_cache module. This means that the map
and the process dictionary are out-of-sync at this point. That's what
causes the crash in the file_handle_cache module: it is called with
arguments which don't work with the data in the process dictionary.
This commit fixes that and re-names some variables to clearly show the
progression of modified data.
Fixes #2488
Major props to @dumbbell for figuring this out, and @pjk25 for assistance!
2020-10-30 23:27:30 +08:00
|
|
|
end, Its0),
|
|
|
|
|
{Msg, State1} = read_msg(MsgStatus, State0),
|
2013-01-16 00:29:36 +08:00
|
|
|
case Fun(Msg, MsgStatus#msg_status.msg_props, Unacked, Acc) of
|
2013-01-13 18:49:08 +08:00
|
|
|
{stop, Acc1} ->
|
rabbit_variable_queue: Ensure modified state is not discarded in ifold
Commit d12b4d8e08005f97c5557f0948b3d03c15ef8c1b introduced a bug where
the old value of `State` is returned in the `stop` case.
The problem was sometimes visible in the backing_queue_SUITE's
`variable_queue_fold` testcase. The testcase crashed with the following
exception:
=== Location: [{rabbit_ct_broker_helpers,rpc,1509},
{backing_queue_SUITE,variable_queue_fold,1144},
{test_server,ts_tc,1754},
{test_server,run_test_case_eval1,1263},
{test_server,run_test_case_eval,1195}]
=== === Reason: {error,
{case_clause,undefined},
[{file_handle_cache,'-partition_handles/1-fun-0-',2,
[{file,"src/file_handle_cache.erl"},{line,804}]},
{file_handle_cache,get_or_reopen,1,
[{file,"src/file_handle_cache.erl"},{line,743}]},
{file_handle_cache_stats,timer_tc,1,
[{file,"src/file_handle_cache_stats.erl"},
{line,54}]},
{file_handle_cache_stats,update,2,
[{file,"src/file_handle_cache_stats.erl"},
{line,40}]},
{file_handle_cache,with_handles,3,
[{file,"src/file_handle_cache.erl"},{line,700}]},
{rabbit_msg_store,read_from_disk,2,
[{file,"src/rabbit_msg_store.erl"},{line,1259}]},
{rabbit_msg_store,client_read3,3,
[{file,"src/rabbit_msg_store.erl"},{line,671}]},
{rabbit_msg_store,safe_ets_update_counter,5,
[{file,"src/rabbit_msg_store.erl"},{line,1314}]}]}
The `State` (a #vqstate{} record) contains the rabbit_msg_store's state
as well (a #client_msstate{} record). This msg store client state is
updated after most calls to the msg store. In particular, the msg store
client state contains a map in the `file_handle_cache` field of that
record to store all file handles returned by the file_handle_cache.
So each time the msg store opens or closes files as part of its
operation, the client state is updated with a modifed
`file_handle_cache` map.
In addition to that, the file_handle_cache module uses the process
dictionary to store even more data about the various file handles used
by that process.
Therefore, by discarding the updated #client_msstate{}, we loose the
up-to-date `file_handle_cache` map. However, the process dictionary is
still updated by the file_handle_cache module. This means that the map
and the process dictionary are out-of-sync at this point. That's what
causes the crash in the file_handle_cache module: it is called with
arguments which don't work with the data in the process dictionary.
This commit fixes that and re-names some variables to clearly show the
progression of modified data.
Fixes #2488
Major props to @dumbbell for figuring this out, and @pjk25 for assistance!
2020-10-30 23:27:30 +08:00
|
|
|
{Acc1, State1};
|
2013-01-13 18:49:08 +08:00
|
|
|
{cont, Acc1} ->
|
rabbit_variable_queue: Ensure modified state is not discarded in ifold
Commit d12b4d8e08005f97c5557f0948b3d03c15ef8c1b introduced a bug where
the old value of `State` is returned in the `stop` case.
The problem was sometimes visible in the backing_queue_SUITE's
`variable_queue_fold` testcase. The testcase crashed with the following
exception:
=== Location: [{rabbit_ct_broker_helpers,rpc,1509},
{backing_queue_SUITE,variable_queue_fold,1144},
{test_server,ts_tc,1754},
{test_server,run_test_case_eval1,1263},
{test_server,run_test_case_eval,1195}]
=== === Reason: {error,
{case_clause,undefined},
[{file_handle_cache,'-partition_handles/1-fun-0-',2,
[{file,"src/file_handle_cache.erl"},{line,804}]},
{file_handle_cache,get_or_reopen,1,
[{file,"src/file_handle_cache.erl"},{line,743}]},
{file_handle_cache_stats,timer_tc,1,
[{file,"src/file_handle_cache_stats.erl"},
{line,54}]},
{file_handle_cache_stats,update,2,
[{file,"src/file_handle_cache_stats.erl"},
{line,40}]},
{file_handle_cache,with_handles,3,
[{file,"src/file_handle_cache.erl"},{line,700}]},
{rabbit_msg_store,read_from_disk,2,
[{file,"src/rabbit_msg_store.erl"},{line,1259}]},
{rabbit_msg_store,client_read3,3,
[{file,"src/rabbit_msg_store.erl"},{line,671}]},
{rabbit_msg_store,safe_ets_update_counter,5,
[{file,"src/rabbit_msg_store.erl"},{line,1314}]}]}
The `State` (a #vqstate{} record) contains the rabbit_msg_store's state
as well (a #client_msstate{} record). This msg store client state is
updated after most calls to the msg store. In particular, the msg store
client state contains a map in the `file_handle_cache` field of that
record to store all file handles returned by the file_handle_cache.
So each time the msg store opens or closes files as part of its
operation, the client state is updated with a modifed
`file_handle_cache` map.
In addition to that, the file_handle_cache module uses the process
dictionary to store even more data about the various file handles used
by that process.
Therefore, by discarding the updated #client_msstate{}, we loose the
up-to-date `file_handle_cache` map. However, the process dictionary is
still updated by the file_handle_cache module. This means that the map
and the process dictionary are out-of-sync at this point. That's what
causes the crash in the file_handle_cache module: it is called with
arguments which don't work with the data in the process dictionary.
This commit fixes that and re-names some variables to clearly show the
progression of modified data.
Fixes #2488
Major props to @dumbbell for figuring this out, and @pjk25 for assistance!
2020-10-30 23:27:30 +08:00
|
|
|
IndexState0 = State1#vqstate.index_state,
|
|
|
|
|
{Its1, IndexState1} = inext(It, {Rest, IndexState0}),
|
|
|
|
|
State2 = State1#vqstate{index_state = IndexState1},
|
|
|
|
|
ifold(Fun, Acc1, Its1, State2)
|
2013-01-12 03:39:34 +08:00
|
|
|
end.
|
2012-11-23 20:28:48 +08:00
|
|
|
|
2009-11-10 02:12:00 +08:00
|
|
|
%%----------------------------------------------------------------------------
|
|
|
|
|
%% Phase changes
|
|
|
|
|
%%----------------------------------------------------------------------------
|
|
|
|
|
|
2022-04-04 17:08:54 +08:00
|
|
|
fetch_from_q3(State = #vqstate { delta = #delta { count = DeltaCount },
|
2015-10-10 23:53:43 +08:00
|
|
|
q3 = Q3 }) ->
|
|
|
|
|
case ?QUEUE:out(Q3) of
|
|
|
|
|
{empty, _Q3} when DeltaCount =:= 0 ->
|
|
|
|
|
{empty, State};
|
|
|
|
|
{empty, _Q3} ->
|
|
|
|
|
fetch_from_q3(maybe_deltas_to_betas(State));
|
|
|
|
|
{{value, MsgStatus}, Q3a} ->
|
|
|
|
|
State1 = State #vqstate { q3 = Q3a },
|
|
|
|
|
{loaded, {MsgStatus, State1}}
|
2010-07-15 19:59:59 +08:00
|
|
|
end.
|
|
|
|
|
|
2023-05-26 17:22:13 +08:00
|
|
|
%% Thresholds for doing multi-read against the shared message
|
|
|
|
|
%% stores. The values have been obtained through numerous
|
|
|
|
|
%% experiments. Be careful when editing these values: after a
|
|
|
|
|
%% certain size the performance drops and it becomes no longer
|
|
|
|
|
%% interesting to keep the extra data in memory.
|
|
|
|
|
-define(SHARED_READ_MANY_SIZE_THRESHOLD, 12000).
|
|
|
|
|
-define(SHARED_READ_MANY_COUNT_THRESHOLD, 10).
|
|
|
|
|
|
2022-09-08 21:42:59 +08:00
|
|
|
maybe_deltas_to_betas(State = #vqstate { rates = #rates{ out = OutRate }}) ->
|
2015-09-07 18:59:39 +08:00
|
|
|
AfterFun = process_delivers_and_acks_fun(deliver_and_ack),
|
2022-09-08 21:42:59 +08:00
|
|
|
%% We allow from 1 to 2048 messages in memory depending on the consume rate.
|
|
|
|
|
MemoryLimit = min(1 + floor(2 * OutRate), 2048),
|
2023-06-01 19:13:12 +08:00
|
|
|
maybe_deltas_to_betas(AfterFun, State, MemoryLimit, messages).
|
2015-09-06 18:45:14 +08:00
|
|
|
|
2015-09-07 20:03:15 +08:00
|
|
|
maybe_deltas_to_betas(_DelsAndAcksFun,
|
2022-09-08 21:42:59 +08:00
|
|
|
State = #vqstate {delta = ?BLANK_DELTA_PATTERN(X) },
|
2023-06-01 19:13:12 +08:00
|
|
|
_MemoryLimit, _WhatToRead) ->
|
2009-11-10 02:12:00 +08:00
|
|
|
State;
|
2015-09-06 18:45:14 +08:00
|
|
|
maybe_deltas_to_betas(DelsAndAcksFun,
|
|
|
|
|
State = #vqstate {
|
2010-06-04 05:16:23 +08:00
|
|
|
delta = Delta,
|
|
|
|
|
q3 = Q3,
|
|
|
|
|
index_state = IndexState,
|
2022-07-05 22:00:39 +08:00
|
|
|
store_state = StoreState,
|
2022-10-21 19:38:51 +08:00
|
|
|
msg_store_clients = {MCStateP, MCStateT},
|
2014-12-03 23:52:49 +08:00
|
|
|
ram_msg_count = RamMsgCount,
|
|
|
|
|
ram_bytes = RamBytes,
|
2015-02-03 00:35:05 +08:00
|
|
|
disk_read_count = DiskReadCount,
|
2017-02-10 18:56:23 +08:00
|
|
|
delta_transient_bytes = DeltaTransientBytes,
|
2024-03-04 17:08:22 +08:00
|
|
|
transient_threshold = TransientThreshold },
|
2023-06-01 19:13:12 +08:00
|
|
|
MemoryLimit, WhatToRead) ->
|
2010-10-22 19:53:46 +08:00
|
|
|
#delta { start_seq_id = DeltaSeqId,
|
|
|
|
|
count = DeltaCount,
|
2017-02-10 18:56:23 +08:00
|
|
|
transient = Transient,
|
2010-10-22 19:53:46 +08:00
|
|
|
end_seq_id = DeltaSeqIdEnd } = Delta,
|
2023-04-25 15:50:27 +08:00
|
|
|
%% For v2 we want to limit the number of messages read at once to lower
|
|
|
|
|
%% the memory footprint. We use the consume rate to determine how many
|
|
|
|
|
%% messages we read.
|
2024-03-04 17:08:22 +08:00
|
|
|
DeltaSeqLimit = DeltaSeqId + MemoryLimit,
|
2010-10-22 19:53:46 +08:00
|
|
|
DeltaSeqId1 =
|
2024-03-04 17:08:22 +08:00
|
|
|
lists:min([rabbit_classic_queue_index_v2:next_segment_boundary(DeltaSeqId),
|
2023-04-25 15:50:27 +08:00
|
|
|
DeltaSeqLimit, DeltaSeqIdEnd]),
|
2024-03-04 17:08:22 +08:00
|
|
|
{List0, IndexState1} = rabbit_classic_queue_index_v2:read(DeltaSeqId, DeltaSeqId1, IndexState),
|
2023-06-21 02:04:17 +08:00
|
|
|
{List, StoreState3, MCStateP3, MCStateT3} = case WhatToRead of
|
2023-06-01 19:13:12 +08:00
|
|
|
messages ->
|
|
|
|
|
%% We try to read messages from disk all at once instead of
|
|
|
|
|
%% 1 by 1 at fetch time. When v1 is used and messages are
|
|
|
|
|
%% embedded, then the message content is already read from
|
|
|
|
|
%% disk at this point. For v2 embedded we must do a separate
|
|
|
|
|
%% call to obtain the contents and then merge the contents
|
|
|
|
|
%% back into the #msg_status records.
|
|
|
|
|
%%
|
|
|
|
|
%% For shared message store messages we do the same but only
|
|
|
|
|
%% for messages < ?SHARED_READ_MANY_SIZE_THRESHOLD bytes and
|
|
|
|
|
%% when there are at least ?SHARED_READ_MANY_COUNT_THRESHOLD
|
|
|
|
|
%% messages to fetch from that store. Other messages will be
|
|
|
|
|
%% fetched one by one right before sending the messages.
|
|
|
|
|
%%
|
|
|
|
|
%% Since we have two different shared stores for persistent
|
|
|
|
|
%% and transient messages they are treated separately when
|
|
|
|
|
%% deciding whether to read_many from either of them.
|
|
|
|
|
%%
|
|
|
|
|
%% Because v2 and shared stores function differently we
|
|
|
|
|
%% must keep different information for performing the reads.
|
|
|
|
|
{V2Reads0, ShPersistReads, ShTransientReads} = lists:foldl(fun
|
|
|
|
|
({_, SeqId, MsgLocation, _, _}, {V2ReadsAcc, ShPReadsAcc, ShTReadsAcc}) when is_tuple(MsgLocation) ->
|
|
|
|
|
{[{SeqId, MsgLocation}|V2ReadsAcc], ShPReadsAcc, ShTReadsAcc};
|
|
|
|
|
({MsgId, _, rabbit_msg_store, #message_properties{size = Size}, true},
|
|
|
|
|
{V2ReadsAcc, ShPReadsAcc, ShTReadsAcc}) when Size =< ?SHARED_READ_MANY_SIZE_THRESHOLD ->
|
|
|
|
|
{V2ReadsAcc, [MsgId|ShPReadsAcc], ShTReadsAcc};
|
|
|
|
|
({MsgId, _, rabbit_msg_store, #message_properties{size = Size}, false},
|
|
|
|
|
{V2ReadsAcc, ShPReadsAcc, ShTReadsAcc}) when Size =< ?SHARED_READ_MANY_SIZE_THRESHOLD ->
|
|
|
|
|
{V2ReadsAcc, ShPReadsAcc, [MsgId|ShTReadsAcc]};
|
|
|
|
|
(_, Acc) ->
|
|
|
|
|
Acc
|
|
|
|
|
end, {[], [], []}, List0),
|
|
|
|
|
%% In order to properly read and merge V2 messages we want them
|
|
|
|
|
%% in the older->younger order.
|
|
|
|
|
V2Reads = lists:reverse(V2Reads0),
|
|
|
|
|
%% We do read_many for v2 store unconditionally.
|
|
|
|
|
{V2Msgs, StoreState2} = rabbit_classic_queue_store_v2:read_many(V2Reads, StoreState),
|
|
|
|
|
List1 = merge_read_msgs(List0, V2Reads, V2Msgs),
|
|
|
|
|
%% We read from the shared message store only if there are multiple messages
|
|
|
|
|
%% (10+ as we wouldn't get much benefits from smaller number of messages)
|
|
|
|
|
%% otherwise we wait and read later.
|
|
|
|
|
%%
|
|
|
|
|
%% Because read_many does not use FHC we do not get an updated MCState
|
|
|
|
|
%% like with normal reads.
|
2023-06-21 02:04:17 +08:00
|
|
|
{List2, MCStateP2} = case length(ShPersistReads) < ?SHARED_READ_MANY_COUNT_THRESHOLD of
|
2023-06-01 19:13:12 +08:00
|
|
|
true ->
|
2023-06-21 02:04:17 +08:00
|
|
|
{List1, MCStateP};
|
2023-06-01 19:13:12 +08:00
|
|
|
false ->
|
2023-06-21 02:04:17 +08:00
|
|
|
{ShPersistMsgs, MCStateP1} = rabbit_msg_store:read_many(ShPersistReads, MCStateP),
|
2023-06-01 19:13:12 +08:00
|
|
|
case map_size(ShPersistMsgs) of
|
2023-06-21 02:04:17 +08:00
|
|
|
0 -> {List1, MCStateP1};
|
|
|
|
|
_ -> {merge_sh_read_msgs(List1, ShPersistMsgs), MCStateP1}
|
2023-06-01 19:13:12 +08:00
|
|
|
end
|
|
|
|
|
end,
|
2023-06-21 02:04:17 +08:00
|
|
|
{List3, MCStateT2} = case length(ShTransientReads) < ?SHARED_READ_MANY_COUNT_THRESHOLD of
|
2023-06-01 19:13:12 +08:00
|
|
|
true ->
|
2023-06-21 02:04:17 +08:00
|
|
|
{List2, MCStateT};
|
2023-06-01 19:13:12 +08:00
|
|
|
false ->
|
2023-06-21 02:04:17 +08:00
|
|
|
{ShTransientMsgs, MCStateT1} = rabbit_msg_store:read_many(ShTransientReads, MCStateT),
|
2023-06-01 19:13:12 +08:00
|
|
|
case map_size(ShTransientMsgs) of
|
2023-06-21 02:04:17 +08:00
|
|
|
0 -> {List2, MCStateT1};
|
|
|
|
|
_ -> {merge_sh_read_msgs(List2, ShTransientMsgs), MCStateT1}
|
2023-06-01 19:13:12 +08:00
|
|
|
end
|
|
|
|
|
end,
|
2023-06-21 02:04:17 +08:00
|
|
|
{List3, StoreState2, MCStateP2, MCStateT2};
|
2023-06-01 19:13:12 +08:00
|
|
|
metadata_only ->
|
2023-06-21 02:04:17 +08:00
|
|
|
{List0, StoreState, MCStateP, MCStateT}
|
2022-07-05 22:00:39 +08:00
|
|
|
end,
|
2017-02-10 18:56:23 +08:00
|
|
|
{Q3a, RamCountsInc, RamBytesInc, State1, TransientCount, TransientBytes} =
|
2014-12-11 21:58:14 +08:00
|
|
|
betas_from_index_entries(List, TransientThreshold,
|
2015-09-10 01:55:48 +08:00
|
|
|
DelsAndAcksFun,
|
2022-07-05 22:00:39 +08:00
|
|
|
State #vqstate { index_state = IndexState1,
|
2023-06-21 02:04:17 +08:00
|
|
|
store_state = StoreState3,
|
|
|
|
|
msg_store_clients = {MCStateP3, MCStateT3}}),
|
2015-09-06 18:45:14 +08:00
|
|
|
State2 = State1 #vqstate { ram_msg_count = RamMsgCount + RamCountsInc,
|
|
|
|
|
ram_bytes = RamBytes + RamBytesInc,
|
2015-09-07 20:03:15 +08:00
|
|
|
disk_read_count = DiskReadCount + RamCountsInc },
|
2011-09-30 00:09:09 +08:00
|
|
|
case ?QUEUE:len(Q3a) of
|
2010-10-22 19:53:46 +08:00
|
|
|
0 ->
|
|
|
|
|
%% we ignored every message in the segment due to it being
|
|
|
|
|
%% transient and below the threshold
|
|
|
|
|
maybe_deltas_to_betas(
|
2015-09-06 18:45:14 +08:00
|
|
|
DelsAndAcksFun,
|
|
|
|
|
State2 #vqstate {
|
2022-09-08 21:42:59 +08:00
|
|
|
delta = d(Delta #delta { start_seq_id = DeltaSeqId1 })},
|
2023-06-01 19:13:12 +08:00
|
|
|
MemoryLimit, WhatToRead);
|
2010-10-22 19:53:46 +08:00
|
|
|
Q3aLen ->
|
2011-09-30 00:09:09 +08:00
|
|
|
Q3b = ?QUEUE:join(Q3, Q3a),
|
2010-10-22 19:53:46 +08:00
|
|
|
case DeltaCount - Q3aLen of
|
2009-11-10 02:12:00 +08:00
|
|
|
0 ->
|
2022-05-11 21:10:44 +08:00
|
|
|
%% delta is now empty
|
|
|
|
|
State2 #vqstate { delta = ?BLANK_DELTA,
|
|
|
|
|
q3 = Q3b,
|
2017-02-10 18:56:23 +08:00
|
|
|
delta_transient_bytes = 0};
|
2010-10-22 19:53:46 +08:00
|
|
|
N when N > 0 ->
|
2011-10-15 22:11:24 +08:00
|
|
|
Delta1 = d(#delta { start_seq_id = DeltaSeqId1,
|
|
|
|
|
count = N,
|
2022-09-08 21:42:59 +08:00
|
|
|
%% @todo Probably something wrong, seen it become negative...
|
2017-02-10 18:56:23 +08:00
|
|
|
transient = Transient - TransientCount,
|
2011-10-15 22:11:24 +08:00
|
|
|
end_seq_id = DeltaSeqIdEnd }),
|
2015-09-06 18:45:14 +08:00
|
|
|
State2 #vqstate { delta = Delta1,
|
2017-02-10 18:56:23 +08:00
|
|
|
q3 = Q3b,
|
|
|
|
|
delta_transient_bytes = DeltaTransientBytes - TransientBytes }
|
2009-11-10 02:12:00 +08:00
|
|
|
end
|
|
|
|
|
end.
|
|
|
|
|
|
2022-07-05 22:00:39 +08:00
|
|
|
merge_read_msgs([M = {_, SeqId, _, _, _}|MTail], [{SeqId, _}|RTail], [Msg|MsgTail]) ->
|
|
|
|
|
[setelement(1, M, Msg)|merge_read_msgs(MTail, RTail, MsgTail)];
|
|
|
|
|
merge_read_msgs([M|MTail], RTail, MsgTail) ->
|
|
|
|
|
[M|merge_read_msgs(MTail, RTail, MsgTail)];
|
2022-10-21 19:38:51 +08:00
|
|
|
%% @todo We probably don't need to unwrap until the end.
|
2022-07-05 22:00:39 +08:00
|
|
|
merge_read_msgs([], [], []) ->
|
|
|
|
|
[].
|
|
|
|
|
|
2022-10-21 19:38:51 +08:00
|
|
|
%% We may not get as many messages as we tried reading.
|
|
|
|
|
merge_sh_read_msgs([M = {MsgId, _, _, _, _}|MTail], Reads) ->
|
|
|
|
|
case Reads of
|
|
|
|
|
#{MsgId := Msg} ->
|
|
|
|
|
[setelement(1, M, Msg)|merge_sh_read_msgs(MTail, Reads)];
|
|
|
|
|
_ ->
|
|
|
|
|
[M|merge_sh_read_msgs(MTail, Reads)]
|
|
|
|
|
end;
|
|
|
|
|
merge_sh_read_msgs(MTail, _Reads) ->
|
|
|
|
|
MTail.
|
|
|
|
|
|
2015-08-28 22:12:26 +08:00
|
|
|
%% Flushes queue index batch caches and updates queue index state.
|
2024-03-04 17:08:22 +08:00
|
|
|
ui(#vqstate{index_state = IndexState,
|
2015-08-28 22:12:26 +08:00
|
|
|
target_ram_count = TargetRamCount} = State) ->
|
2024-03-04 17:08:22 +08:00
|
|
|
IndexState1 = rabbit_classic_queue_index_v2:flush_pre_publish_cache(
|
2015-08-28 22:12:26 +08:00
|
|
|
TargetRamCount, IndexState),
|
|
|
|
|
State#vqstate{index_state = IndexState1}.
|
|
|
|
|
|
2017-04-19 21:03:24 +08:00
|
|
|
maybe_client_terminate(MSCStateP) ->
|
|
|
|
|
%% Queue might have been asked to stop by the supervisor, it needs a clean
|
|
|
|
|
%% shutdown in order for the supervising strategy to work - if it reaches max
|
|
|
|
|
%% restarts might bring the vhost down.
|
|
|
|
|
try
|
|
|
|
|
rabbit_msg_store:client_terminate(MSCStateP)
|
|
|
|
|
catch
|
|
|
|
|
_:_ ->
|
|
|
|
|
ok
|
|
|
|
|
end.
|