2023-01-25 16:41:56 +08:00
|
|
|
accept:
|
|
|
|
- accept_encoding_header
|
|
|
|
- accept_header
|
|
|
|
- accept_neg
|
|
|
|
- accept_parser
|
2023-02-23 21:47:41 +08:00
|
|
|
amqp_client:
|
|
|
|
- amqp_auth_mechanisms
|
|
|
|
- amqp_channel
|
|
|
|
- amqp_channel_sup
|
|
|
|
- amqp_channel_sup_sup
|
|
|
|
- amqp_channels_manager
|
|
|
|
- amqp_client
|
|
|
|
- amqp_connection
|
|
|
|
- amqp_connection_sup
|
|
|
|
- amqp_connection_type_sup
|
|
|
|
- amqp_direct_connection
|
|
|
|
- amqp_direct_consumer
|
|
|
|
- amqp_gen_connection
|
|
|
|
- amqp_gen_consumer
|
|
|
|
- amqp_main_reader
|
|
|
|
- amqp_network_connection
|
|
|
|
- amqp_rpc_client
|
|
|
|
- amqp_rpc_server
|
|
|
|
- amqp_selective_consumer
|
|
|
|
- amqp_ssl
|
|
|
|
- amqp_sup
|
|
|
|
- amqp_uri
|
|
|
|
- amqp_util
|
|
|
|
- rabbit_routing_util
|
|
|
|
- uri_parser
|
2024-02-21 12:02:24 +08:00
|
|
|
amqp10_client:
|
|
|
|
- amqp10_client
|
|
|
|
- amqp10_client_app
|
|
|
|
- amqp10_client_connection
|
|
|
|
- amqp10_client_connection_sup
|
|
|
|
- amqp10_client_connections_sup
|
|
|
|
- amqp10_client_frame_reader
|
|
|
|
- amqp10_client_session
|
|
|
|
- amqp10_client_sessions_sup
|
|
|
|
- amqp10_client_sup
|
|
|
|
- amqp10_client_types
|
|
|
|
- amqp10_msg
|
|
|
|
amqp10_common:
|
|
|
|
- amqp10_binary_generator
|
|
|
|
- amqp10_binary_parser
|
|
|
|
- amqp10_framing
|
|
|
|
- amqp10_framing0
|
2023-01-25 16:41:56 +08:00
|
|
|
aten:
|
|
|
|
- aten
|
|
|
|
- aten_app
|
|
|
|
- aten_detect
|
|
|
|
- aten_detector
|
|
|
|
- aten_emitter
|
|
|
|
- aten_sink
|
|
|
|
- aten_sup
|
|
|
|
base64url:
|
|
|
|
- base64url
|
|
|
|
cowboy:
|
|
|
|
- cowboy
|
|
|
|
- cowboy_app
|
|
|
|
- cowboy_bstr
|
|
|
|
- cowboy_children
|
|
|
|
- cowboy_clear
|
|
|
|
- cowboy_clock
|
|
|
|
- cowboy_compress_h
|
|
|
|
- cowboy_constraints
|
|
|
|
- cowboy_handler
|
|
|
|
- cowboy_http
|
|
|
|
- cowboy_http2
|
|
|
|
- cowboy_loop
|
|
|
|
- cowboy_metrics_h
|
|
|
|
- cowboy_middleware
|
|
|
|
- cowboy_req
|
|
|
|
- cowboy_rest
|
|
|
|
- cowboy_router
|
|
|
|
- cowboy_static
|
|
|
|
- cowboy_stream
|
|
|
|
- cowboy_stream_h
|
|
|
|
- cowboy_sub_protocol
|
|
|
|
- cowboy_sup
|
|
|
|
- cowboy_tls
|
|
|
|
- cowboy_tracer_h
|
|
|
|
- cowboy_websocket
|
|
|
|
cowlib:
|
|
|
|
- cow_base64url
|
|
|
|
- cow_cookie
|
|
|
|
- cow_date
|
|
|
|
- cow_hpack
|
|
|
|
- cow_http
|
|
|
|
- cow_http2
|
|
|
|
- cow_http2_machine
|
|
|
|
- cow_http_hd
|
|
|
|
- cow_http_struct_hd
|
|
|
|
- cow_http_te
|
|
|
|
- cow_iolists
|
|
|
|
- cow_link
|
|
|
|
- cow_mimetypes
|
|
|
|
- cow_multipart
|
|
|
|
- cow_qs
|
|
|
|
- cow_spdy
|
|
|
|
- cow_sse
|
|
|
|
- cow_uri
|
|
|
|
- cow_uri_template
|
|
|
|
- cow_ws
|
|
|
|
credentials_obfuscation:
|
|
|
|
- credentials_obfuscation
|
|
|
|
- credentials_obfuscation_app
|
|
|
|
- credentials_obfuscation_pbe
|
|
|
|
- credentials_obfuscation_sup
|
|
|
|
- credentials_obfuscation_svc
|
|
|
|
ct_helper:
|
|
|
|
- ct_helper
|
|
|
|
- ct_helper_error_h
|
|
|
|
cuttlefish:
|
|
|
|
- conf_parse
|
|
|
|
- cuttlefish
|
|
|
|
- cuttlefish_advanced
|
|
|
|
- cuttlefish_bytesize
|
|
|
|
- cuttlefish_conf
|
|
|
|
- cuttlefish_datatypes
|
|
|
|
- cuttlefish_duration
|
|
|
|
- cuttlefish_duration_parse
|
|
|
|
- cuttlefish_effective
|
|
|
|
- cuttlefish_enum
|
|
|
|
- cuttlefish_error
|
|
|
|
- cuttlefish_escript
|
|
|
|
- cuttlefish_flag
|
|
|
|
- cuttlefish_generator
|
|
|
|
- cuttlefish_mapping
|
|
|
|
- cuttlefish_rebar_plugin
|
|
|
|
- cuttlefish_schema
|
|
|
|
- cuttlefish_translation
|
|
|
|
- cuttlefish_unit
|
|
|
|
- cuttlefish_util
|
|
|
|
- cuttlefish_validator
|
|
|
|
- cuttlefish_variable
|
|
|
|
- cuttlefish_vmargs
|
|
|
|
eetcd:
|
2024-02-20 18:11:33 +08:00
|
|
|
- auth_pb
|
2023-01-25 16:41:56 +08:00
|
|
|
- eetcd
|
|
|
|
- eetcd_app
|
|
|
|
- eetcd_auth
|
2024-02-20 18:11:33 +08:00
|
|
|
- eetcd_auth_gen
|
2023-01-25 16:41:56 +08:00
|
|
|
- eetcd_cluster
|
2024-02-20 18:11:33 +08:00
|
|
|
- eetcd_cluster_gen
|
2023-01-25 16:41:56 +08:00
|
|
|
- eetcd_compare
|
|
|
|
- eetcd_conn
|
|
|
|
- eetcd_conn_sup
|
|
|
|
- eetcd_data_coercion
|
|
|
|
- eetcd_election
|
2024-02-20 18:11:33 +08:00
|
|
|
- eetcd_election_gen
|
2023-01-25 16:41:56 +08:00
|
|
|
- eetcd_grpc
|
2024-02-20 18:11:33 +08:00
|
|
|
- eetcd_health_gen
|
2023-01-25 16:41:56 +08:00
|
|
|
- eetcd_kv
|
2024-02-20 18:11:33 +08:00
|
|
|
- eetcd_kv_gen
|
2023-01-25 16:41:56 +08:00
|
|
|
- eetcd_lease
|
2024-02-20 18:11:33 +08:00
|
|
|
- eetcd_lease_gen
|
2023-01-25 16:41:56 +08:00
|
|
|
- eetcd_lease_sup
|
|
|
|
- eetcd_lock
|
2024-02-20 18:11:33 +08:00
|
|
|
- eetcd_lock_gen
|
2023-01-25 16:41:56 +08:00
|
|
|
- eetcd_maintenance
|
2024-02-20 18:11:33 +08:00
|
|
|
- eetcd_maintenance_gen
|
2023-01-25 16:41:56 +08:00
|
|
|
- eetcd_op
|
|
|
|
- eetcd_stream
|
|
|
|
- eetcd_sup
|
|
|
|
- eetcd_watch
|
2024-02-20 18:11:33 +08:00
|
|
|
- eetcd_watch_gen
|
2023-01-25 16:41:56 +08:00
|
|
|
- gogo_pb
|
|
|
|
- health_pb
|
|
|
|
- kv_pb
|
|
|
|
- router_pb
|
|
|
|
emqtt:
|
|
|
|
- emqtt
|
|
|
|
- emqtt_cli
|
|
|
|
- emqtt_frame
|
|
|
|
- emqtt_inflight
|
|
|
|
- emqtt_props
|
|
|
|
- emqtt_quic
|
2023-02-28 23:47:02 +08:00
|
|
|
- emqtt_quic_connection
|
|
|
|
- emqtt_quic_stream
|
2023-01-25 16:41:56 +08:00
|
|
|
- emqtt_secret
|
|
|
|
- emqtt_sock
|
|
|
|
- emqtt_ws
|
|
|
|
enough:
|
|
|
|
- enough
|
|
|
|
gen_batch_server:
|
|
|
|
- gen_batch_server
|
|
|
|
getopt:
|
|
|
|
- getopt
|
|
|
|
gun:
|
|
|
|
- gun
|
|
|
|
- gun_app
|
|
|
|
- gun_content_handler
|
|
|
|
- gun_data_h
|
|
|
|
- gun_http
|
|
|
|
- gun_http2
|
|
|
|
- gun_sse_h
|
|
|
|
- gun_sup
|
|
|
|
- gun_tcp
|
|
|
|
- gun_tls
|
|
|
|
- gun_ws
|
|
|
|
- gun_ws_h
|
Allow to use Khepri database to store metadata instead of Mnesia
[Why]
Mnesia is a very powerful and convenient tool for Erlang applications:
it is a persistent disc-based database, it handles replication accross
multiple Erlang nodes and it is available out-of-the-box from the
Erlang/OTP distribution. RabbitMQ relies on Mnesia to manage all its
metadata:
* virtual hosts' properties
* intenal users
* queue, exchange and binding declarations (not queues data)
* runtime parameters and policies
* ...
Unfortunately Mnesia makes it difficult to handle network partition and,
as a consequence, the merge conflicts between Erlang nodes once the
network partition is resolved. RabbitMQ provides several partition
handling strategies but they are not bullet-proof. Users still hit
situations where it is a pain to repair a cluster following a network
partition.
[How]
@kjnilsson created Ra [1], a Raft consensus library that RabbitMQ
already uses successfully to implement quorum queues and streams for
instance. Those queues do not suffer from network partitions.
We created Khepri [2], a new persistent and replicated database engine
based on Ra and we want to use it in place of Mnesia in RabbitMQ to
solve the problems with network partitions.
This patch integrates Khepri as an experimental feature. When enabled,
RabbitMQ will store all its metadata in Khepri instead of Mnesia.
This change comes with behavior changes. While Khepri remains disabled,
you should see no changes to the behavior of RabbitMQ. If there are
changes, it is a bug. After Khepri is enabled, there are significant
changes of behavior that you should be aware of.
Because it is based on the Raft consensus algorithm, when there is a
network partition, only the cluster members that are in the partition
with at least `(Number of nodes in the cluster ÷ 2) + 1` number of nodes
can "make progress". In other words, only those nodes may write to the
Khepri database and read from the database and expect a consistent
result.
For instance in a cluster of 5 RabbitMQ nodes:
* If there are two partitions, one with 3 nodes, one with 2 nodes, only
the group of 3 nodes will be able to write to the database.
* If there are three partitions, two with 2 nodes, one with 1 node, none
of the group can write to the database.
Because the Khepri database will be used for all kind of metadata, it
means that RabbitMQ nodes that can't write to the database will be
unable to perform some operations. A list of operations and what to
expect is documented in the associated pull request and the RabbitMQ
website.
This requirement from Raft also affects the startup of RabbitMQ nodes in
a cluster. Indeed, at least a quorum number of nodes must be started at
once to allow nodes to become ready.
To enable Khepri, you need to enable the `khepri_db` feature flag:
rabbitmqctl enable_feature_flag khepri_db
When the `khepri_db` feature flag is enabled, the migration code
performs the following two tasks:
1. It synchronizes the Khepri cluster membership from the Mnesia
cluster. It uses `mnesia_to_khepri:sync_cluster_membership/1` from
the `khepri_mnesia_migration` application [3].
2. It copies data from relevant Mnesia tables to Khepri, doing some
conversion if necessary on the way. Again, it uses
`mnesia_to_khepri:copy_tables/4` from `khepri_mnesia_migration` to do
it.
This can be performed on a running standalone RabbitMQ node or cluster.
Data will be migrated from Mnesia to Khepri without any service
interruption. Note that during the migration, the performance may
decrease and the memory footprint may go up.
Because this feature flag is considered experimental, it is not enabled
by default even on a brand new RabbitMQ deployment.
More about the implementation details below:
In the past months, all accesses to Mnesia were isolated in a collection
of `rabbit_db*` modules. This is where the integration of Khepri mostly
takes place: we use a function called `rabbit_khepri:handle_fallback/1`
which selects the database and perform the query or the transaction.
Here is an example from `rabbit_db_vhost`:
* Up until RabbitMQ 3.12.x:
get(VHostName) when is_binary(VHostName) ->
get_in_mnesia(VHostName).
* Starting with RabbitMQ 3.13.0:
get(VHostName) when is_binary(VHostName) ->
rabbit_khepri:handle_fallback(
#{mnesia => fun() -> get_in_mnesia(VHostName) end,
khepri => fun() -> get_in_khepri(VHostName) end}).
This `rabbit_khepri:handle_fallback/1` function relies on two things:
1. the fact that the `khepri_db` feature flag is enabled, in which case
it always executes the Khepri-based variant.
4. the ability or not to read and write to Mnesia tables otherwise.
Before the feature flag is enabled, or during the migration, the
function will try to execute the Mnesia-based variant. If it succeeds,
then it returns the result. If it fails because one or more Mnesia
tables can't be used, it restarts from scratch: it means the feature
flag is being enabled and depending on the outcome, either the
Mnesia-based variant will succeed (the feature flag couldn't be enabled)
or the feature flag will be marked as enabled and it will call the
Khepri-based variant. The meat of this function really lives in the
`khepri_mnesia_migration` application [3] and
`rabbit_khepri:handle_fallback/1` is a wrapper on top of it that knows
about the feature flag.
However, some calls to the database do not depend on the existence of
Mnesia tables, such as functions where we need to learn about the
members of a cluster. For those, we can't rely on exceptions from
Mnesia. Therefore, we just look at the state of the feature flag to
determine which database to use. There are two situations though:
* Sometimes, we need the feature flag state query to block because the
function interested in it can't return a valid answer during the
migration. Here is an example:
case rabbit_khepri:is_enabled(RemoteNode) of
true -> can_join_using_khepri(RemoteNode);
false -> can_join_using_mnesia(RemoteNode)
end
* Sometimes, we need the feature flag state query to NOT block (for
instance because it would cause a deadlock). Here is an example:
case rabbit_khepri:get_feature_state() of
enabled -> members_using_khepri();
_ -> members_using_mnesia()
end
Direct accesses to Mnesia still exists. They are limited to code that is
specific to Mnesia such as classic queue mirroring or network partitions
handling strategies.
Now, to discover the Mnesia tables to migrate and how to migrate them,
we use an Erlang module attribute called
`rabbit_mnesia_tables_to_khepri_db` which indicates a list of Mnesia
tables and an associated converter module. Here is an example in the
`rabbitmq_recent_history_exchange` plugin:
-rabbit_mnesia_tables_to_khepri_db(
[{?RH_TABLE, rabbit_db_rh_exchange_m2k_converter}]).
The converter module — `rabbit_db_rh_exchange_m2k_converter` in this
example — is is fact a "sub" converter module called but
`rabbit_db_m2k_converter`. See the documentation of a `mnesia_to_khepri`
converter module to learn more about these modules.
[1] https://github.com/rabbitmq/ra
[2] https://github.com/rabbitmq/khepri
[3] https://github.com/rabbitmq/khepri_mnesia_migration
See #7206.
Co-authored-by: Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
Co-authored-by: Diana Parra Corbacho <dparracorbac@vmware.com>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
2023-01-05 20:57:50 +08:00
|
|
|
horus:
|
|
|
|
- horus
|
|
|
|
- horus_cover
|
|
|
|
- horus_utils
|
|
|
|
host_triple:
|
|
|
|
- host_triple
|
2023-01-25 16:41:56 +08:00
|
|
|
inet_tcp_proxy_dist:
|
|
|
|
- inet_tcp_proxy_dist
|
|
|
|
- inet_tcp_proxy_dist_app
|
|
|
|
- inet_tcp_proxy_dist_conn_sup
|
|
|
|
- inet_tcp_proxy_dist_controller
|
|
|
|
- inet_tcp_proxy_dist_sup
|
|
|
|
jose:
|
2024-02-20 18:11:33 +08:00
|
|
|
- jose
|
|
|
|
- jose_app
|
2023-01-25 16:41:56 +08:00
|
|
|
- jose_base
|
|
|
|
- jose_base64
|
|
|
|
- jose_base64url
|
|
|
|
- jose_block_encryptor
|
|
|
|
- jose_chacha20_poly1305
|
|
|
|
- jose_chacha20_poly1305_crypto
|
|
|
|
- jose_chacha20_poly1305_libsodium
|
|
|
|
- jose_chacha20_poly1305_unsupported
|
|
|
|
- jose_crypto_compat
|
|
|
|
- jose_curve25519
|
|
|
|
- jose_curve25519_libdecaf
|
|
|
|
- jose_curve25519_libsodium
|
|
|
|
- jose_curve25519_unsupported
|
|
|
|
- jose_curve448
|
|
|
|
- jose_curve448_libdecaf
|
|
|
|
- jose_curve448_unsupported
|
|
|
|
- jose_json
|
|
|
|
- jose_json_jason
|
|
|
|
- jose_json_jiffy
|
|
|
|
- jose_json_jsone
|
|
|
|
- jose_json_jsx
|
|
|
|
- jose_json_ojson
|
|
|
|
- jose_json_poison
|
|
|
|
- jose_json_poison_compat_encoder
|
|
|
|
- jose_json_poison_lexical_encoder
|
|
|
|
- jose_json_thoas
|
|
|
|
- jose_json_unsupported
|
|
|
|
- jose_jwa
|
|
|
|
- jose_jwa_aes
|
|
|
|
- jose_jwa_aes_kw
|
|
|
|
- jose_jwa_base64url
|
|
|
|
- jose_jwa_bench
|
|
|
|
- jose_jwa_chacha20
|
|
|
|
- jose_jwa_chacha20_poly1305
|
|
|
|
- jose_jwa_concat_kdf
|
|
|
|
- jose_jwa_curve25519
|
|
|
|
- jose_jwa_curve448
|
|
|
|
- jose_jwa_ed25519
|
|
|
|
- jose_jwa_ed448
|
|
|
|
- jose_jwa_hchacha20
|
|
|
|
- jose_jwa_math
|
|
|
|
- jose_jwa_pkcs1
|
|
|
|
- jose_jwa_pkcs5
|
|
|
|
- jose_jwa_pkcs7
|
|
|
|
- jose_jwa_poly1305
|
|
|
|
- jose_jwa_sha3
|
|
|
|
- jose_jwa_unsupported
|
|
|
|
- jose_jwa_x25519
|
|
|
|
- jose_jwa_x448
|
|
|
|
- jose_jwa_xchacha20
|
|
|
|
- jose_jwa_xchacha20_poly1305
|
|
|
|
- jose_jwe
|
|
|
|
- jose_jwe_alg
|
|
|
|
- jose_jwe_alg_aes_kw
|
|
|
|
- jose_jwe_alg_c20p_kw
|
|
|
|
- jose_jwe_alg_dir
|
|
|
|
- jose_jwe_alg_ecdh_1pu
|
|
|
|
- jose_jwe_alg_ecdh_es
|
|
|
|
- jose_jwe_alg_pbes2
|
|
|
|
- jose_jwe_alg_rsa
|
|
|
|
- jose_jwe_alg_xc20p_kw
|
|
|
|
- jose_jwe_enc
|
|
|
|
- jose_jwe_enc_aes
|
|
|
|
- jose_jwe_enc_c20p
|
|
|
|
- jose_jwe_enc_xc20p
|
|
|
|
- jose_jwe_zip
|
|
|
|
- jose_jwk
|
|
|
|
- jose_jwk_der
|
|
|
|
- jose_jwk_kty
|
|
|
|
- jose_jwk_kty_ec
|
|
|
|
- jose_jwk_kty_oct
|
|
|
|
- jose_jwk_kty_okp_ed25519
|
|
|
|
- jose_jwk_kty_okp_ed25519ph
|
|
|
|
- jose_jwk_kty_okp_ed448
|
|
|
|
- jose_jwk_kty_okp_ed448ph
|
|
|
|
- jose_jwk_kty_okp_x25519
|
|
|
|
- jose_jwk_kty_okp_x448
|
|
|
|
- jose_jwk_kty_rsa
|
|
|
|
- jose_jwk_oct
|
|
|
|
- jose_jwk_openssh_key
|
|
|
|
- jose_jwk_pem
|
|
|
|
- jose_jwk_set
|
|
|
|
- jose_jwk_use_enc
|
|
|
|
- jose_jwk_use_sig
|
|
|
|
- jose_jws
|
|
|
|
- jose_jws_alg
|
|
|
|
- jose_jws_alg_ecdsa
|
|
|
|
- jose_jws_alg_eddsa
|
|
|
|
- jose_jws_alg_hmac
|
|
|
|
- jose_jws_alg_none
|
|
|
|
- jose_jws_alg_poly1305
|
|
|
|
- jose_jws_alg_rsa_pkcs1_v1_5
|
|
|
|
- jose_jws_alg_rsa_pss
|
|
|
|
- jose_jwt
|
2024-02-20 18:11:33 +08:00
|
|
|
- jose_public_key
|
|
|
|
- jose_server
|
|
|
|
- jose_sha3
|
|
|
|
- jose_sha3_keccakf1600_driver
|
|
|
|
- jose_sha3_keccakf1600_nif
|
|
|
|
- jose_sha3_libdecaf
|
|
|
|
- jose_sha3_unsupported
|
|
|
|
- jose_sup
|
|
|
|
- jose_xchacha20_poly1305
|
|
|
|
- jose_xchacha20_poly1305_crypto
|
|
|
|
- jose_xchacha20_poly1305_unsupported
|
Allow to use Khepri database to store metadata instead of Mnesia
[Why]
Mnesia is a very powerful and convenient tool for Erlang applications:
it is a persistent disc-based database, it handles replication accross
multiple Erlang nodes and it is available out-of-the-box from the
Erlang/OTP distribution. RabbitMQ relies on Mnesia to manage all its
metadata:
* virtual hosts' properties
* intenal users
* queue, exchange and binding declarations (not queues data)
* runtime parameters and policies
* ...
Unfortunately Mnesia makes it difficult to handle network partition and,
as a consequence, the merge conflicts between Erlang nodes once the
network partition is resolved. RabbitMQ provides several partition
handling strategies but they are not bullet-proof. Users still hit
situations where it is a pain to repair a cluster following a network
partition.
[How]
@kjnilsson created Ra [1], a Raft consensus library that RabbitMQ
already uses successfully to implement quorum queues and streams for
instance. Those queues do not suffer from network partitions.
We created Khepri [2], a new persistent and replicated database engine
based on Ra and we want to use it in place of Mnesia in RabbitMQ to
solve the problems with network partitions.
This patch integrates Khepri as an experimental feature. When enabled,
RabbitMQ will store all its metadata in Khepri instead of Mnesia.
This change comes with behavior changes. While Khepri remains disabled,
you should see no changes to the behavior of RabbitMQ. If there are
changes, it is a bug. After Khepri is enabled, there are significant
changes of behavior that you should be aware of.
Because it is based on the Raft consensus algorithm, when there is a
network partition, only the cluster members that are in the partition
with at least `(Number of nodes in the cluster ÷ 2) + 1` number of nodes
can "make progress". In other words, only those nodes may write to the
Khepri database and read from the database and expect a consistent
result.
For instance in a cluster of 5 RabbitMQ nodes:
* If there are two partitions, one with 3 nodes, one with 2 nodes, only
the group of 3 nodes will be able to write to the database.
* If there are three partitions, two with 2 nodes, one with 1 node, none
of the group can write to the database.
Because the Khepri database will be used for all kind of metadata, it
means that RabbitMQ nodes that can't write to the database will be
unable to perform some operations. A list of operations and what to
expect is documented in the associated pull request and the RabbitMQ
website.
This requirement from Raft also affects the startup of RabbitMQ nodes in
a cluster. Indeed, at least a quorum number of nodes must be started at
once to allow nodes to become ready.
To enable Khepri, you need to enable the `khepri_db` feature flag:
rabbitmqctl enable_feature_flag khepri_db
When the `khepri_db` feature flag is enabled, the migration code
performs the following two tasks:
1. It synchronizes the Khepri cluster membership from the Mnesia
cluster. It uses `mnesia_to_khepri:sync_cluster_membership/1` from
the `khepri_mnesia_migration` application [3].
2. It copies data from relevant Mnesia tables to Khepri, doing some
conversion if necessary on the way. Again, it uses
`mnesia_to_khepri:copy_tables/4` from `khepri_mnesia_migration` to do
it.
This can be performed on a running standalone RabbitMQ node or cluster.
Data will be migrated from Mnesia to Khepri without any service
interruption. Note that during the migration, the performance may
decrease and the memory footprint may go up.
Because this feature flag is considered experimental, it is not enabled
by default even on a brand new RabbitMQ deployment.
More about the implementation details below:
In the past months, all accesses to Mnesia were isolated in a collection
of `rabbit_db*` modules. This is where the integration of Khepri mostly
takes place: we use a function called `rabbit_khepri:handle_fallback/1`
which selects the database and perform the query or the transaction.
Here is an example from `rabbit_db_vhost`:
* Up until RabbitMQ 3.12.x:
get(VHostName) when is_binary(VHostName) ->
get_in_mnesia(VHostName).
* Starting with RabbitMQ 3.13.0:
get(VHostName) when is_binary(VHostName) ->
rabbit_khepri:handle_fallback(
#{mnesia => fun() -> get_in_mnesia(VHostName) end,
khepri => fun() -> get_in_khepri(VHostName) end}).
This `rabbit_khepri:handle_fallback/1` function relies on two things:
1. the fact that the `khepri_db` feature flag is enabled, in which case
it always executes the Khepri-based variant.
4. the ability or not to read and write to Mnesia tables otherwise.
Before the feature flag is enabled, or during the migration, the
function will try to execute the Mnesia-based variant. If it succeeds,
then it returns the result. If it fails because one or more Mnesia
tables can't be used, it restarts from scratch: it means the feature
flag is being enabled and depending on the outcome, either the
Mnesia-based variant will succeed (the feature flag couldn't be enabled)
or the feature flag will be marked as enabled and it will call the
Khepri-based variant. The meat of this function really lives in the
`khepri_mnesia_migration` application [3] and
`rabbit_khepri:handle_fallback/1` is a wrapper on top of it that knows
about the feature flag.
However, some calls to the database do not depend on the existence of
Mnesia tables, such as functions where we need to learn about the
members of a cluster. For those, we can't rely on exceptions from
Mnesia. Therefore, we just look at the state of the feature flag to
determine which database to use. There are two situations though:
* Sometimes, we need the feature flag state query to block because the
function interested in it can't return a valid answer during the
migration. Here is an example:
case rabbit_khepri:is_enabled(RemoteNode) of
true -> can_join_using_khepri(RemoteNode);
false -> can_join_using_mnesia(RemoteNode)
end
* Sometimes, we need the feature flag state query to NOT block (for
instance because it would cause a deadlock). Here is an example:
case rabbit_khepri:get_feature_state() of
enabled -> members_using_khepri();
_ -> members_using_mnesia()
end
Direct accesses to Mnesia still exists. They are limited to code that is
specific to Mnesia such as classic queue mirroring or network partitions
handling strategies.
Now, to discover the Mnesia tables to migrate and how to migrate them,
we use an Erlang module attribute called
`rabbit_mnesia_tables_to_khepri_db` which indicates a list of Mnesia
tables and an associated converter module. Here is an example in the
`rabbitmq_recent_history_exchange` plugin:
-rabbit_mnesia_tables_to_khepri_db(
[{?RH_TABLE, rabbit_db_rh_exchange_m2k_converter}]).
The converter module — `rabbit_db_rh_exchange_m2k_converter` in this
example — is is fact a "sub" converter module called but
`rabbit_db_m2k_converter`. See the documentation of a `mnesia_to_khepri`
converter module to learn more about these modules.
[1] https://github.com/rabbitmq/ra
[2] https://github.com/rabbitmq/khepri
[3] https://github.com/rabbitmq/khepri_mnesia_migration
See #7206.
Co-authored-by: Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
Co-authored-by: Diana Parra Corbacho <dparracorbac@vmware.com>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
2023-01-05 20:57:50 +08:00
|
|
|
khepri:
|
|
|
|
- khepri
|
|
|
|
- khepri_adv
|
|
|
|
- khepri_app
|
|
|
|
- khepri_cluster
|
|
|
|
- khepri_condition
|
|
|
|
- khepri_event_handler
|
|
|
|
- khepri_evf
|
|
|
|
- khepri_export_erlang
|
|
|
|
- khepri_import_export
|
|
|
|
- khepri_machine
|
|
|
|
- khepri_path
|
|
|
|
- khepri_pattern_tree
|
|
|
|
- khepri_payload
|
|
|
|
- khepri_projection
|
|
|
|
- khepri_sproc
|
|
|
|
- khepri_sup
|
|
|
|
- khepri_tree
|
|
|
|
- khepri_tx
|
|
|
|
- khepri_tx_adv
|
|
|
|
- khepri_utils
|
|
|
|
khepri_mnesia_migration:
|
|
|
|
- khepri_mnesia_migration_app
|
|
|
|
- khepri_mnesia_migration_sup
|
|
|
|
- kmm_utils
|
|
|
|
- m2k_cluster_sync
|
|
|
|
- m2k_cluster_sync_sup
|
|
|
|
- m2k_export
|
|
|
|
- m2k_subscriber
|
|
|
|
- m2k_table_copy
|
|
|
|
- m2k_table_copy_sup
|
|
|
|
- m2k_table_copy_sup_sup
|
|
|
|
- mnesia_to_khepri
|
|
|
|
- mnesia_to_khepri_converter
|
|
|
|
- mnesia_to_khepri_example_converter
|
|
|
|
looking_glass:
|
|
|
|
- lg
|
|
|
|
- lg_callgrind
|
|
|
|
- lg_file_reader
|
|
|
|
- lg_file_tracer
|
|
|
|
- lg_flame
|
|
|
|
- lg_messages
|
|
|
|
- lg_messages_seqdiag
|
|
|
|
- lg_rabbit_hole
|
|
|
|
- lg_raw_console_tracer
|
|
|
|
- lg_socket_client
|
|
|
|
- lg_socket_tracer
|
|
|
|
- lg_term
|
|
|
|
- lg_tracer
|
|
|
|
- lg_tracer_pool
|
|
|
|
- looking_glass_app
|
|
|
|
- looking_glass_sup
|
|
|
|
lz4:
|
|
|
|
- lz4_nif
|
|
|
|
- lz4f
|
2023-01-25 16:41:56 +08:00
|
|
|
meck:
|
|
|
|
- meck
|
|
|
|
- meck_args_matcher
|
|
|
|
- meck_code
|
|
|
|
- meck_code_gen
|
|
|
|
- meck_cover
|
|
|
|
- meck_expect
|
|
|
|
- meck_history
|
|
|
|
- meck_matcher
|
|
|
|
- meck_proc
|
|
|
|
- meck_ret_spec
|
|
|
|
- meck_util
|
2024-02-21 12:02:24 +08:00
|
|
|
my_plugin:
|
|
|
|
- my_plugin
|
2024-01-03 16:28:36 +08:00
|
|
|
oauth2_client:
|
|
|
|
- oauth2_client
|
2023-01-25 16:41:56 +08:00
|
|
|
observer_cli:
|
|
|
|
- observer_cli
|
|
|
|
- observer_cli_application
|
|
|
|
- observer_cli_escriptize
|
|
|
|
- observer_cli_ets
|
|
|
|
- observer_cli_help
|
|
|
|
- observer_cli_inet
|
|
|
|
- observer_cli_lib
|
|
|
|
- observer_cli_mnesia
|
|
|
|
- observer_cli_plugin
|
|
|
|
- observer_cli_port
|
|
|
|
- observer_cli_process
|
|
|
|
- observer_cli_store
|
|
|
|
- observer_cli_system
|
|
|
|
osiris:
|
|
|
|
- osiris
|
|
|
|
- osiris_app
|
|
|
|
- osiris_bench
|
2024-02-20 18:11:33 +08:00
|
|
|
- osiris_bloom
|
2023-01-25 16:41:56 +08:00
|
|
|
- osiris_counters
|
|
|
|
- osiris_ets
|
|
|
|
- osiris_log
|
|
|
|
- osiris_log_shared
|
|
|
|
- osiris_replica
|
|
|
|
- osiris_replica_reader
|
|
|
|
- osiris_replica_reader_sup
|
|
|
|
- osiris_retention
|
|
|
|
- osiris_server_sup
|
|
|
|
- osiris_sup
|
|
|
|
- osiris_tracking
|
|
|
|
- osiris_util
|
|
|
|
- osiris_writer
|
|
|
|
prometheus:
|
|
|
|
- prometheus
|
2024-02-20 18:11:33 +08:00
|
|
|
- prometheus_boolean
|
2023-01-25 16:41:56 +08:00
|
|
|
- prometheus_buckets
|
|
|
|
- prometheus_collector
|
2024-02-20 18:11:33 +08:00
|
|
|
- prometheus_counter
|
2023-01-25 16:41:56 +08:00
|
|
|
- prometheus_format
|
2024-02-20 18:11:33 +08:00
|
|
|
- prometheus_gauge
|
|
|
|
- prometheus_histogram
|
|
|
|
- prometheus_http
|
2023-01-25 16:41:56 +08:00
|
|
|
- prometheus_instrumenter
|
|
|
|
- prometheus_metric
|
|
|
|
- prometheus_metric_spec
|
|
|
|
- prometheus_misc
|
2024-02-20 18:11:33 +08:00
|
|
|
- prometheus_mnesia
|
|
|
|
- prometheus_mnesia_collector
|
|
|
|
- prometheus_model
|
|
|
|
- prometheus_model_helpers
|
|
|
|
- prometheus_protobuf_format
|
|
|
|
- prometheus_quantile_summary
|
2023-01-25 16:41:56 +08:00
|
|
|
- prometheus_registry
|
2024-02-20 18:11:33 +08:00
|
|
|
- prometheus_summary
|
2023-01-25 16:41:56 +08:00
|
|
|
- prometheus_sup
|
2024-02-20 18:11:33 +08:00
|
|
|
- prometheus_test_instrumenter
|
|
|
|
- prometheus_text_format
|
2023-01-25 16:41:56 +08:00
|
|
|
- prometheus_time
|
2024-02-20 18:11:33 +08:00
|
|
|
- prometheus_vm_dist_collector
|
|
|
|
- prometheus_vm_memory_collector
|
|
|
|
- prometheus_vm_msacc_collector
|
|
|
|
- prometheus_vm_statistics_collector
|
|
|
|
- prometheus_vm_system_info_collector
|
2023-01-25 16:41:56 +08:00
|
|
|
proper:
|
|
|
|
- proper
|
|
|
|
- proper_arith
|
|
|
|
- proper_array
|
|
|
|
- proper_dict
|
|
|
|
- proper_erlang_abstract_code
|
|
|
|
- proper_fsm
|
|
|
|
- proper_gb_sets
|
|
|
|
- proper_gb_trees
|
|
|
|
- proper_gen
|
|
|
|
- proper_gen_next
|
|
|
|
- proper_orddict
|
|
|
|
- proper_ordsets
|
|
|
|
- proper_prop_remover
|
|
|
|
- proper_queue
|
|
|
|
- proper_sa
|
|
|
|
- proper_sets
|
|
|
|
- proper_shrink
|
|
|
|
- proper_statem
|
|
|
|
- proper_symb
|
|
|
|
- proper_target
|
|
|
|
- proper_transformer
|
|
|
|
- proper_types
|
|
|
|
- proper_typeserver
|
|
|
|
- proper_unicode
|
|
|
|
- proper_unused_imports_remover
|
|
|
|
- vararg
|
|
|
|
quantile_estimator:
|
|
|
|
- quantile
|
|
|
|
- quantile_estimator
|
|
|
|
ra:
|
|
|
|
- ra
|
|
|
|
- ra_app
|
|
|
|
- ra_bench
|
|
|
|
- ra_counters
|
|
|
|
- ra_dbg
|
|
|
|
- ra_directory
|
|
|
|
- ra_env
|
2023-03-10 02:54:20 +08:00
|
|
|
- ra_ets_queue
|
2023-01-25 16:41:56 +08:00
|
|
|
- ra_file_handle
|
|
|
|
- ra_flru
|
|
|
|
- ra_leaderboard
|
|
|
|
- ra_lib
|
|
|
|
- ra_log
|
2023-03-10 02:54:20 +08:00
|
|
|
- ra_log_cache
|
2023-01-25 16:41:56 +08:00
|
|
|
- ra_log_ets
|
|
|
|
- ra_log_meta
|
|
|
|
- ra_log_pre_init
|
|
|
|
- ra_log_reader
|
|
|
|
- ra_log_segment
|
|
|
|
- ra_log_segment_writer
|
|
|
|
- ra_log_snapshot
|
|
|
|
- ra_log_sup
|
|
|
|
- ra_log_wal
|
|
|
|
- ra_log_wal_sup
|
|
|
|
- ra_machine
|
|
|
|
- ra_machine_ets
|
|
|
|
- ra_machine_simple
|
|
|
|
- ra_metrics_ets
|
|
|
|
- ra_monitors
|
|
|
|
- ra_server
|
|
|
|
- ra_server_proc
|
|
|
|
- ra_server_sup
|
|
|
|
- ra_server_sup_sup
|
|
|
|
- ra_snapshot
|
|
|
|
- ra_sup
|
|
|
|
- ra_system
|
|
|
|
- ra_system_sup
|
|
|
|
- ra_systems_sup
|
2023-02-23 21:47:41 +08:00
|
|
|
rabbit:
|
|
|
|
- amqqueue
|
|
|
|
- background_gc
|
|
|
|
- code_server_cache
|
|
|
|
- gatherer
|
|
|
|
- gm
|
|
|
|
- internal_user
|
|
|
|
- lqueue
|
Message Containers (#5077)
This PR implements an approach for a "protocol (data format) agnostic core" where the format of the message isn't converted at point of reception.
Currently all non AMQP 0.9.1 originating messages are converted into a AMQP 0.9.1 flavoured basic_message record before sent to a queue. If the messages are then consumed by the originating protocol they are converted back from AMQP 0.9.1. For some protocols such as MQTT 3.1 this isn't too expensive as MQTT is mostly a fairly easily mapped subset of AMQP 0.9.1 but for others such as AMQP 1.0 the conversions are awkward and in some cases lossy even if consuming from the originating protocol.
This PR instead wraps all incoming messages in their originating form into a generic, extensible message container type (mc). The container module exposes an API to get common message details such as size and various properties (ttl, priority etc) directly from the source data type. Each protocol needs to implement the mc behaviour such that when a message originating form one protocol is consumed by another protocol we convert it to the target protocol at that point.
The message container also contains annotations, dead letter records and other meta data we need to record during the lifetime of a message. The original protocol message is never modified unless it is consumed.
This includes conversion modules to and from amqp, amqpl (AMQP 0.9.1) and mqtt.
COMMIT HISTORY:
* Refactor away from using the delivery{} record
In many places including exchange types. This should make it
easier to move towards using a message container type instead of
basic_message.
Add mc module and move direct replies outside of exchange
Lots of changes incl classic queues
Implement stream support incl amqp conversions
simplify mc state record
move mc.erl
mc dlx stuff
recent history exchange
Make tracking work
But doesn't take a protocol agnostic approach as we just convert
everything into AMQP legacy and back. Might be good enough for now.
Tracing as a whole may want a bit of a re-vamp at some point.
tidy
make quorum queue peek work by legacy conversion
dead lettering fixes
dead lettering fixes
CMQ fixes
rabbit_trace type fixes
fixes
fix
Fix classic queue props
test assertion fix
feature flag and backwards compat
Enable message_container feature flag in some SUITEs
Dialyzer fixes
fixes
fix
test fixes
Various
Manually update a gazelle generated file
until a gazelle enhancement can be made
https://github.com/rabbitmq/rules_erlang/issues/185
Add message_containers_SUITE to bazel
and regen bazel files with gazelle from rules_erlang@main
Simplify essential proprty access
Such as durable, ttl and priority by extracting them into annotations
at message container init time.
Move type
to remove dependenc on amqp10 stuff in mc.erl
mostly because I don't know how to make bazel do the right thing
add more stuff
Refine routing header stuff
wip
Cosmetics
Do not use "maybe" as type name as "maybe" is a keyword since OTP 25
which makes Erlang LS complain.
* Dedup death queue names
* Fix function clause crashes
Fix failing tests in the MQTT shared_SUITE:
A classic queue message ID can be undefined as set in
https://github.com/rabbitmq/rabbitmq-server/blob/fbe79ff47b4edbc0fd95457e623d6593161ad198/deps/rabbit/src/rabbit_classic_queue_index_v2.erl#L1048
Fix failing tests in the MQTT shared_SUITE-mixed:
When feature flag message_containers is disabled, the
message is not an #mc{} record, but a #basic_message{} record.
* Fix is_utf8_no_null crash
Prior to this commit, the function crashed if invalid UTF-8 was
provided, e.g.:
```
1> rabbit_misc:is_valid_shortstr(<<"😇"/utf16>>).
** exception error: no function clause matching rabbit_misc:is_utf8_no_null(<<216,61,222,7>>) (rabbit_misc.erl, line 1481)
```
* Implement mqtt mc behaviour
For now via amqp translation.
This is still work in progress, but the following SUITEs pass:
```
make -C deps/rabbitmq_mqtt ct-shared t=[mqtt,v5,cluster_size_1] FULL=1
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_1] FULL=1
```
* Shorten mc file names
Module name length matters because for each persistent message the #mc{}
record is persisted to disk.
```
1> iolist_size(term_to_iovec({mc, rabbit_mc_amqp_legacy})).
30
2> iolist_size(term_to_iovec({mc, mc_amqpl})).
17
```
This commit renames the mc modules:
```
ag -l rabbit_mc_amqp_legacy | xargs sed -i 's/rabbit_mc_amqp_legacy/mc_amqpl/g'
ag -l rabbit_mc_amqp | xargs sed -i 's/rabbit_mc_amqp/mc_amqp/g'
ag -l rabbit_mqtt_mc | xargs sed -i 's/rabbit_mqtt_mc/mc_mqtt/g'
```
* mc: make deaths an annotation + fixes
* Fix mc_mqtt protocol_state callback
* Fix test will_delay_node_restart
```
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_3]:will_delay_node_restart FULL=1
```
* Bazel run gazelle
* mix format rabbitmqctl.ex
* Ensure ttl annotation is refelected in amqp legacy protocol state
* Fix id access in message store
* Fix rabbit_message_interceptor_SUITE
* dializer fixes
* Fix rabbit:rabbit_message_interceptor_SUITE-mixed
set_annotation/3 should not result in duplicate keys
* Fix MQTT shared_SUITE-mixed
Up to 3.12 non-MQTT publishes were always QoS 1 regardless of delivery_mode.
https://github.com/rabbitmq/rabbitmq-server/blob/75a953ce286a10aca910c098805a4f545989af38/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl#L2075-L2076
From now on, non-MQTT publishes are QoS 1 if durable.
This makes more sense.
The MQTT plugin must send a #basic_message{} to an old node that does
not understand message containers.
* Field content of 'v1_0.data' can be binary
Fix
```
bazel test //deps/rabbitmq_mqtt:shared_SUITE-mixed \
--test_env FOCUS="-group [mqtt,v4,cluster_size_1] -case trace" \
-t- --test_sharding_strategy=disabled
```
* Remove route/2 and implement route/3 for all exchange types.
This removes the route/2 callback from rabbit_exchange_type and
makes route/3 mandatory instead. This is a breaking change and
will require all implementations of exchange types to update their
code, however this is necessary anyway for them to correctly handle
the mc type.
stream filtering fixes
* Translate directly from MQTT to AMQP 0.9.1
* handle undecoded properties in mc_compat
amqpl: put clause in right order
recover death deatails from amqp data
* Replace callback init_amqp with convert_from
* Fix return value of lists:keyfind/3
* Translate directly from AMQP 0.9.1 to MQTT
* Fix MQTT payload size
MQTT payload can be a list when converted from AMQP 0.9.1 for example
First conversions tests
Plus some other conversion related fixes.
bazel
bazel
translate amqp 1.0 null to undefined
mc: property/2 and correlation_id/message_id return type tagged values.
To ensure we can support a variety of types better.
The type type tags are AMQP 1.0 flavoured.
fix death recovery
mc_mqtt: impl new api
Add callbacks to allow protocols to compact data before storage
And make readable if needing to query things repeatedly.
bazel fix
* more decoding
* tracking mixed versions compat
* mc: flip default of `durable` annotation to save some data.
Assuming most messages are durable and that in memory messages suffer less
from persistence overhead it makes sense for a non existent `durable`
annotation to mean durable=true.
* mc conversion tests and tidy up
* mc make x_header unstrict again
* amqpl: death record fixes
* bazel
* amqp -> amqpl conversion test
* Fix crash in mc_amqp:size/1
Body can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0/ ct-system t=java
```
on branch native-amqp.
* Fix crash in lists:flatten/1
Data can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp.
* Fix crash in rabbit_writer
Running test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp resulted in the following crash:
```
crasher:
initial call: rabbit_writer:enter_mainloop/2
pid: <0.711.0>
registered_name: []
exception error: bad argument
in function size/1
called as size([<<0>>,<<"Sw">>,[<<160,2>>,<<"hi">>]])
*** argument 1: not tuple or binary
in call from rabbit_binary_generator:build_content_frames/7 (rabbit_binary_generator.erl, line 89)
in call from rabbit_binary_generator:build_simple_content_frames/4 (rabbit_binary_generator.erl, line 61)
in call from rabbit_writer:assemble_frames/5 (rabbit_writer.erl, line 334)
in call from rabbit_writer:internal_send_command_async/3 (rabbit_writer.erl, line 365)
in call from rabbit_writer:handle_message/2 (rabbit_writer.erl, line 265)
in call from rabbit_writer:handle_message/3 (rabbit_writer.erl, line 232)
in call from rabbit_writer:mainloop1/2 (rabbit_writer.erl, line 223)
```
because #content.payload_fragments_rev is currently supposed to
be a flat list of binaries instead of being an iolist.
This commit fixes this crash inefficiently by calling
iolist_to_binary/1. A better solution would be to allow AMQP legacy's #content.payload_fragments_rev
to be an iolist.
* Add accidentally deleted line back
* mc: optimise mc_amqp internal format
By removint the outer records for message and delivery annotations
as well as application properties and footers.
* mc: optimis mc_amqp map_add by using upsert
* mc: refactoring and bug fixes
* mc_SUITE routingheader assertions
* mc remove serialize/1 callback as only used by amqp
* mc_amqp: avoid returning a nested list from protocol_state
* test and bug fix
* move infer_type to mc_util
* mc fixes and additiona assertions
* Support headers exchange routing for MQTT messages
When a headers exchange is bound to the MQTT topic exchange, routing
will be performend based on both MQTT topic (by the topic exchange) and
MQTT User Property (by the headers exchange).
This combines the best worlds of both MQTT 5.0 and AMQP 0.9.1 and
enables powerful routing topologies.
When the User Property contains the same name multiple times, only the
last name (and value) will be considered by the headers exchange.
* Fix crash when sending from stream to amqpl
When publishing a message via the stream protocol and consuming it via
AMQP 0.9.1, the following crash occurred prior to this commit:
```
crasher:
initial call: rabbit_channel:init/1
pid: <0.818.0>
registered_name: []
exception exit: {{badmatch,undefined},
[{rabbit_channel,handle_deliver0,4,
[{file,"rabbit_channel.erl"},
{line,2728}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1594}]},
{rabbit_channel,handle_cast,2,
[{file,"rabbit_channel.erl"},
{line,728}]},
{gen_server2,handle_msg,2,
[{file,"gen_server2.erl"},{line,1056}]},
{proc_lib,wake_up,3,
[{file,"proc_lib.erl"},{line,251}]}]}
```
This commit first gives `mc:init/3` the chance to set exchange and
routing_keys annotations.
If not set, `rabbit_stream_queue` will set these annotations assuming
the message was originally published via the stream protocol.
* Support consistent hash exchange routing for MQTT 5.0
When a consistent hash exchange is bound to the MQTT topic exchange,
MQTT 5.0 messages can be routed to queues consistently based on the
Correlation-Data in the PUBLISH packet.
* Convert MQTT 5.0 User Property
* to AMQP 0.9.1 headers
* from AMQP 0.9.1 headers
* to AMQP 1.0 application properties and message annotations
* from AMQP 1.0 application properties and message annotations
* Make use of Annotations in mc_mqtt:protocol_state/2
mc_mqtt:protocol_state/2 includes Annotations as parameter.
It's cleaner to make use of these Annotations when computing the
protocol state instead of relying on the caller (rabbitmq_mqtt_processor)
to compute the protocol state.
* Enforce AMQP 0.9.1 field name length limit
The AMQP 0.9.1 spec prohibits field names longer than 128 characters.
Therefore, when converting AMQP 1.0 message annotations, application
properties or MQTT 5.0 User Property to AMQP 0.9.1 headers, drop any
names longer than 128 characters.
* Fix type specs
Apply feedback from Michael Davis
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* Add mc_mqtt unit test suite
Implement mc_mqtt:x_header/2
* Translate indicator that payload is UTF-8 encoded
when converting between MQTT 5.0 and AMQP 1.0
* Translate single amqp-value section from AMQP 1.0 to MQTT
Convert to a text representation, if possible, and indicate to MQTT
client that the payload is UTF-8 encoded. This way, the MQTT client will
be able to parse the payload.
If conversion to text representation is not possible, encode the payload
using the AMQP 1.0 type system and indiate the encoding via Content-Type
message/vnd.rabbitmq.amqp.
This Content-Type is not registered.
Type "message" makes sense since it's a message.
Vendor tree "vnd.rabbitmq.amqp" makes sense since merely subtype "amqp" is not
registered.
* Fix payload conversion
* Translate Response Topic between MQTT and AMQP
Translate MQTT 5.0 Response Topic to AMQP 1.0 reply-to address and vice
versa.
The Response Topic must be a UTF-8 encoded string.
This commit re-uses the already defined RabbitMQ target addresses:
```
"/topic/" RK Publish to amq.topic with routing key RK
"/exchange/" X "/" RK Publish to exchange X with routing key RK
```
By default, the MQTT topic exchange is configure dto be amq.topic using
the 1st target address.
When an operator modifies the mqtt.exchange, the 2nd target address is
used.
* Apply PR feedback
and fix formatting
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* tidy up
* Add MQTT message_containers test
* consistent hash exchange: avoid amqp legacy conversion
When hashing on a header value.
* Avoid converting to amqp legacy when using exchange federation
* Fix test flake
* test and dialyzer fixes
* dialyzer fix
* Add MQTT protocol interoperability tests
Test receiving from and sending to MQTT 5.0 and
* AMQP 0.9.1
* AMQP 1.0
* STOMP
* Streams
* Regenerate portions of deps/rabbit/app.bzl with gazelle
I'm not exactly sure how this happened, but gazell seems to have been
run with an older version of the rules_erlang gazelle extension at
some point. This caused generation of a structure that is no longer
used. This commit updates the structure to the current pattern.
* mc: refactoring
* mc_amqpl: handle delivery annotations
Just in case they are included.
Also use iolist_to_iovec to create flat list of binaries when
converting from amqp with amqp encoded payload.
---------
Co-authored-by: David Ansari <david.ansari@gmx.de>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
Co-authored-by: Rin Kuryloski <kuryloskip@vmware.com>
2023-08-31 18:27:13 +08:00
|
|
|
- mc
|
|
|
|
- mc_amqp
|
|
|
|
- mc_amqpl
|
|
|
|
- mc_compat
|
|
|
|
- mc_util
|
2023-02-23 21:47:41 +08:00
|
|
|
- mirrored_supervisor
|
|
|
|
- mirrored_supervisor_sups
|
|
|
|
- pg_local
|
|
|
|
- pid_recomposition
|
|
|
|
- rabbit
|
|
|
|
- rabbit_access_control
|
|
|
|
- rabbit_alarm
|
|
|
|
- rabbit_amqqueue
|
2023-09-08 17:21:08 +08:00
|
|
|
- rabbit_amqqueue_control
|
2023-02-23 21:47:41 +08:00
|
|
|
- rabbit_amqqueue_process
|
|
|
|
- rabbit_amqqueue_sup
|
|
|
|
- rabbit_amqqueue_sup_sup
|
|
|
|
- rabbit_auth_backend_internal
|
|
|
|
- rabbit_auth_mechanism_amqplain
|
|
|
|
- rabbit_auth_mechanism_cr_demo
|
|
|
|
- rabbit_auth_mechanism_plain
|
|
|
|
- rabbit_autoheal
|
|
|
|
- rabbit_backing_queue
|
|
|
|
- rabbit_basic
|
|
|
|
- rabbit_binding
|
|
|
|
- rabbit_boot_steps
|
|
|
|
- rabbit_channel
|
|
|
|
- rabbit_channel_interceptor
|
|
|
|
- rabbit_channel_sup
|
|
|
|
- rabbit_channel_sup_sup
|
|
|
|
- rabbit_channel_tracking
|
|
|
|
- rabbit_channel_tracking_handler
|
|
|
|
- rabbit_classic_queue
|
|
|
|
- rabbit_classic_queue_index_v2
|
|
|
|
- rabbit_classic_queue_store_v2
|
|
|
|
- rabbit_client_sup
|
|
|
|
- rabbit_config
|
|
|
|
- rabbit_confirms
|
|
|
|
- rabbit_connection_helper_sup
|
|
|
|
- rabbit_connection_sup
|
|
|
|
- rabbit_connection_tracking
|
|
|
|
- rabbit_connection_tracking_handler
|
|
|
|
- rabbit_control_pbe
|
|
|
|
- rabbit_core_ff
|
|
|
|
- rabbit_core_metrics_gc
|
|
|
|
- rabbit_credential_validation
|
|
|
|
- rabbit_credential_validator
|
|
|
|
- rabbit_credential_validator_accept_everything
|
|
|
|
- rabbit_credential_validator_min_password_length
|
|
|
|
- rabbit_credential_validator_password_regexp
|
|
|
|
- rabbit_cuttlefish
|
|
|
|
- rabbit_db
|
|
|
|
- rabbit_db_binding
|
Allow to use Khepri database to store metadata instead of Mnesia
[Why]
Mnesia is a very powerful and convenient tool for Erlang applications:
it is a persistent disc-based database, it handles replication accross
multiple Erlang nodes and it is available out-of-the-box from the
Erlang/OTP distribution. RabbitMQ relies on Mnesia to manage all its
metadata:
* virtual hosts' properties
* intenal users
* queue, exchange and binding declarations (not queues data)
* runtime parameters and policies
* ...
Unfortunately Mnesia makes it difficult to handle network partition and,
as a consequence, the merge conflicts between Erlang nodes once the
network partition is resolved. RabbitMQ provides several partition
handling strategies but they are not bullet-proof. Users still hit
situations where it is a pain to repair a cluster following a network
partition.
[How]
@kjnilsson created Ra [1], a Raft consensus library that RabbitMQ
already uses successfully to implement quorum queues and streams for
instance. Those queues do not suffer from network partitions.
We created Khepri [2], a new persistent and replicated database engine
based on Ra and we want to use it in place of Mnesia in RabbitMQ to
solve the problems with network partitions.
This patch integrates Khepri as an experimental feature. When enabled,
RabbitMQ will store all its metadata in Khepri instead of Mnesia.
This change comes with behavior changes. While Khepri remains disabled,
you should see no changes to the behavior of RabbitMQ. If there are
changes, it is a bug. After Khepri is enabled, there are significant
changes of behavior that you should be aware of.
Because it is based on the Raft consensus algorithm, when there is a
network partition, only the cluster members that are in the partition
with at least `(Number of nodes in the cluster ÷ 2) + 1` number of nodes
can "make progress". In other words, only those nodes may write to the
Khepri database and read from the database and expect a consistent
result.
For instance in a cluster of 5 RabbitMQ nodes:
* If there are two partitions, one with 3 nodes, one with 2 nodes, only
the group of 3 nodes will be able to write to the database.
* If there are three partitions, two with 2 nodes, one with 1 node, none
of the group can write to the database.
Because the Khepri database will be used for all kind of metadata, it
means that RabbitMQ nodes that can't write to the database will be
unable to perform some operations. A list of operations and what to
expect is documented in the associated pull request and the RabbitMQ
website.
This requirement from Raft also affects the startup of RabbitMQ nodes in
a cluster. Indeed, at least a quorum number of nodes must be started at
once to allow nodes to become ready.
To enable Khepri, you need to enable the `khepri_db` feature flag:
rabbitmqctl enable_feature_flag khepri_db
When the `khepri_db` feature flag is enabled, the migration code
performs the following two tasks:
1. It synchronizes the Khepri cluster membership from the Mnesia
cluster. It uses `mnesia_to_khepri:sync_cluster_membership/1` from
the `khepri_mnesia_migration` application [3].
2. It copies data from relevant Mnesia tables to Khepri, doing some
conversion if necessary on the way. Again, it uses
`mnesia_to_khepri:copy_tables/4` from `khepri_mnesia_migration` to do
it.
This can be performed on a running standalone RabbitMQ node or cluster.
Data will be migrated from Mnesia to Khepri without any service
interruption. Note that during the migration, the performance may
decrease and the memory footprint may go up.
Because this feature flag is considered experimental, it is not enabled
by default even on a brand new RabbitMQ deployment.
More about the implementation details below:
In the past months, all accesses to Mnesia were isolated in a collection
of `rabbit_db*` modules. This is where the integration of Khepri mostly
takes place: we use a function called `rabbit_khepri:handle_fallback/1`
which selects the database and perform the query or the transaction.
Here is an example from `rabbit_db_vhost`:
* Up until RabbitMQ 3.12.x:
get(VHostName) when is_binary(VHostName) ->
get_in_mnesia(VHostName).
* Starting with RabbitMQ 3.13.0:
get(VHostName) when is_binary(VHostName) ->
rabbit_khepri:handle_fallback(
#{mnesia => fun() -> get_in_mnesia(VHostName) end,
khepri => fun() -> get_in_khepri(VHostName) end}).
This `rabbit_khepri:handle_fallback/1` function relies on two things:
1. the fact that the `khepri_db` feature flag is enabled, in which case
it always executes the Khepri-based variant.
4. the ability or not to read and write to Mnesia tables otherwise.
Before the feature flag is enabled, or during the migration, the
function will try to execute the Mnesia-based variant. If it succeeds,
then it returns the result. If it fails because one or more Mnesia
tables can't be used, it restarts from scratch: it means the feature
flag is being enabled and depending on the outcome, either the
Mnesia-based variant will succeed (the feature flag couldn't be enabled)
or the feature flag will be marked as enabled and it will call the
Khepri-based variant. The meat of this function really lives in the
`khepri_mnesia_migration` application [3] and
`rabbit_khepri:handle_fallback/1` is a wrapper on top of it that knows
about the feature flag.
However, some calls to the database do not depend on the existence of
Mnesia tables, such as functions where we need to learn about the
members of a cluster. For those, we can't rely on exceptions from
Mnesia. Therefore, we just look at the state of the feature flag to
determine which database to use. There are two situations though:
* Sometimes, we need the feature flag state query to block because the
function interested in it can't return a valid answer during the
migration. Here is an example:
case rabbit_khepri:is_enabled(RemoteNode) of
true -> can_join_using_khepri(RemoteNode);
false -> can_join_using_mnesia(RemoteNode)
end
* Sometimes, we need the feature flag state query to NOT block (for
instance because it would cause a deadlock). Here is an example:
case rabbit_khepri:get_feature_state() of
enabled -> members_using_khepri();
_ -> members_using_mnesia()
end
Direct accesses to Mnesia still exists. They are limited to code that is
specific to Mnesia such as classic queue mirroring or network partitions
handling strategies.
Now, to discover the Mnesia tables to migrate and how to migrate them,
we use an Erlang module attribute called
`rabbit_mnesia_tables_to_khepri_db` which indicates a list of Mnesia
tables and an associated converter module. Here is an example in the
`rabbitmq_recent_history_exchange` plugin:
-rabbit_mnesia_tables_to_khepri_db(
[{?RH_TABLE, rabbit_db_rh_exchange_m2k_converter}]).
The converter module — `rabbit_db_rh_exchange_m2k_converter` in this
example — is is fact a "sub" converter module called but
`rabbit_db_m2k_converter`. See the documentation of a `mnesia_to_khepri`
converter module to learn more about these modules.
[1] https://github.com/rabbitmq/ra
[2] https://github.com/rabbitmq/khepri
[3] https://github.com/rabbitmq/khepri_mnesia_migration
See #7206.
Co-authored-by: Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
Co-authored-by: Diana Parra Corbacho <dparracorbac@vmware.com>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
2023-01-05 20:57:50 +08:00
|
|
|
- rabbit_db_binding_m2k_converter
|
2023-02-23 21:47:41 +08:00
|
|
|
- rabbit_db_cluster
|
|
|
|
- rabbit_db_exchange
|
Allow to use Khepri database to store metadata instead of Mnesia
[Why]
Mnesia is a very powerful and convenient tool for Erlang applications:
it is a persistent disc-based database, it handles replication accross
multiple Erlang nodes and it is available out-of-the-box from the
Erlang/OTP distribution. RabbitMQ relies on Mnesia to manage all its
metadata:
* virtual hosts' properties
* intenal users
* queue, exchange and binding declarations (not queues data)
* runtime parameters and policies
* ...
Unfortunately Mnesia makes it difficult to handle network partition and,
as a consequence, the merge conflicts between Erlang nodes once the
network partition is resolved. RabbitMQ provides several partition
handling strategies but they are not bullet-proof. Users still hit
situations where it is a pain to repair a cluster following a network
partition.
[How]
@kjnilsson created Ra [1], a Raft consensus library that RabbitMQ
already uses successfully to implement quorum queues and streams for
instance. Those queues do not suffer from network partitions.
We created Khepri [2], a new persistent and replicated database engine
based on Ra and we want to use it in place of Mnesia in RabbitMQ to
solve the problems with network partitions.
This patch integrates Khepri as an experimental feature. When enabled,
RabbitMQ will store all its metadata in Khepri instead of Mnesia.
This change comes with behavior changes. While Khepri remains disabled,
you should see no changes to the behavior of RabbitMQ. If there are
changes, it is a bug. After Khepri is enabled, there are significant
changes of behavior that you should be aware of.
Because it is based on the Raft consensus algorithm, when there is a
network partition, only the cluster members that are in the partition
with at least `(Number of nodes in the cluster ÷ 2) + 1` number of nodes
can "make progress". In other words, only those nodes may write to the
Khepri database and read from the database and expect a consistent
result.
For instance in a cluster of 5 RabbitMQ nodes:
* If there are two partitions, one with 3 nodes, one with 2 nodes, only
the group of 3 nodes will be able to write to the database.
* If there are three partitions, two with 2 nodes, one with 1 node, none
of the group can write to the database.
Because the Khepri database will be used for all kind of metadata, it
means that RabbitMQ nodes that can't write to the database will be
unable to perform some operations. A list of operations and what to
expect is documented in the associated pull request and the RabbitMQ
website.
This requirement from Raft also affects the startup of RabbitMQ nodes in
a cluster. Indeed, at least a quorum number of nodes must be started at
once to allow nodes to become ready.
To enable Khepri, you need to enable the `khepri_db` feature flag:
rabbitmqctl enable_feature_flag khepri_db
When the `khepri_db` feature flag is enabled, the migration code
performs the following two tasks:
1. It synchronizes the Khepri cluster membership from the Mnesia
cluster. It uses `mnesia_to_khepri:sync_cluster_membership/1` from
the `khepri_mnesia_migration` application [3].
2. It copies data from relevant Mnesia tables to Khepri, doing some
conversion if necessary on the way. Again, it uses
`mnesia_to_khepri:copy_tables/4` from `khepri_mnesia_migration` to do
it.
This can be performed on a running standalone RabbitMQ node or cluster.
Data will be migrated from Mnesia to Khepri without any service
interruption. Note that during the migration, the performance may
decrease and the memory footprint may go up.
Because this feature flag is considered experimental, it is not enabled
by default even on a brand new RabbitMQ deployment.
More about the implementation details below:
In the past months, all accesses to Mnesia were isolated in a collection
of `rabbit_db*` modules. This is where the integration of Khepri mostly
takes place: we use a function called `rabbit_khepri:handle_fallback/1`
which selects the database and perform the query or the transaction.
Here is an example from `rabbit_db_vhost`:
* Up until RabbitMQ 3.12.x:
get(VHostName) when is_binary(VHostName) ->
get_in_mnesia(VHostName).
* Starting with RabbitMQ 3.13.0:
get(VHostName) when is_binary(VHostName) ->
rabbit_khepri:handle_fallback(
#{mnesia => fun() -> get_in_mnesia(VHostName) end,
khepri => fun() -> get_in_khepri(VHostName) end}).
This `rabbit_khepri:handle_fallback/1` function relies on two things:
1. the fact that the `khepri_db` feature flag is enabled, in which case
it always executes the Khepri-based variant.
4. the ability or not to read and write to Mnesia tables otherwise.
Before the feature flag is enabled, or during the migration, the
function will try to execute the Mnesia-based variant. If it succeeds,
then it returns the result. If it fails because one or more Mnesia
tables can't be used, it restarts from scratch: it means the feature
flag is being enabled and depending on the outcome, either the
Mnesia-based variant will succeed (the feature flag couldn't be enabled)
or the feature flag will be marked as enabled and it will call the
Khepri-based variant. The meat of this function really lives in the
`khepri_mnesia_migration` application [3] and
`rabbit_khepri:handle_fallback/1` is a wrapper on top of it that knows
about the feature flag.
However, some calls to the database do not depend on the existence of
Mnesia tables, such as functions where we need to learn about the
members of a cluster. For those, we can't rely on exceptions from
Mnesia. Therefore, we just look at the state of the feature flag to
determine which database to use. There are two situations though:
* Sometimes, we need the feature flag state query to block because the
function interested in it can't return a valid answer during the
migration. Here is an example:
case rabbit_khepri:is_enabled(RemoteNode) of
true -> can_join_using_khepri(RemoteNode);
false -> can_join_using_mnesia(RemoteNode)
end
* Sometimes, we need the feature flag state query to NOT block (for
instance because it would cause a deadlock). Here is an example:
case rabbit_khepri:get_feature_state() of
enabled -> members_using_khepri();
_ -> members_using_mnesia()
end
Direct accesses to Mnesia still exists. They are limited to code that is
specific to Mnesia such as classic queue mirroring or network partitions
handling strategies.
Now, to discover the Mnesia tables to migrate and how to migrate them,
we use an Erlang module attribute called
`rabbit_mnesia_tables_to_khepri_db` which indicates a list of Mnesia
tables and an associated converter module. Here is an example in the
`rabbitmq_recent_history_exchange` plugin:
-rabbit_mnesia_tables_to_khepri_db(
[{?RH_TABLE, rabbit_db_rh_exchange_m2k_converter}]).
The converter module — `rabbit_db_rh_exchange_m2k_converter` in this
example — is is fact a "sub" converter module called but
`rabbit_db_m2k_converter`. See the documentation of a `mnesia_to_khepri`
converter module to learn more about these modules.
[1] https://github.com/rabbitmq/ra
[2] https://github.com/rabbitmq/khepri
[3] https://github.com/rabbitmq/khepri_mnesia_migration
See #7206.
Co-authored-by: Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
Co-authored-by: Diana Parra Corbacho <dparracorbac@vmware.com>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
2023-01-05 20:57:50 +08:00
|
|
|
- rabbit_db_exchange_m2k_converter
|
|
|
|
- rabbit_db_m2k_converter
|
2023-02-23 21:47:41 +08:00
|
|
|
- rabbit_db_maintenance
|
Allow to use Khepri database to store metadata instead of Mnesia
[Why]
Mnesia is a very powerful and convenient tool for Erlang applications:
it is a persistent disc-based database, it handles replication accross
multiple Erlang nodes and it is available out-of-the-box from the
Erlang/OTP distribution. RabbitMQ relies on Mnesia to manage all its
metadata:
* virtual hosts' properties
* intenal users
* queue, exchange and binding declarations (not queues data)
* runtime parameters and policies
* ...
Unfortunately Mnesia makes it difficult to handle network partition and,
as a consequence, the merge conflicts between Erlang nodes once the
network partition is resolved. RabbitMQ provides several partition
handling strategies but they are not bullet-proof. Users still hit
situations where it is a pain to repair a cluster following a network
partition.
[How]
@kjnilsson created Ra [1], a Raft consensus library that RabbitMQ
already uses successfully to implement quorum queues and streams for
instance. Those queues do not suffer from network partitions.
We created Khepri [2], a new persistent and replicated database engine
based on Ra and we want to use it in place of Mnesia in RabbitMQ to
solve the problems with network partitions.
This patch integrates Khepri as an experimental feature. When enabled,
RabbitMQ will store all its metadata in Khepri instead of Mnesia.
This change comes with behavior changes. While Khepri remains disabled,
you should see no changes to the behavior of RabbitMQ. If there are
changes, it is a bug. After Khepri is enabled, there are significant
changes of behavior that you should be aware of.
Because it is based on the Raft consensus algorithm, when there is a
network partition, only the cluster members that are in the partition
with at least `(Number of nodes in the cluster ÷ 2) + 1` number of nodes
can "make progress". In other words, only those nodes may write to the
Khepri database and read from the database and expect a consistent
result.
For instance in a cluster of 5 RabbitMQ nodes:
* If there are two partitions, one with 3 nodes, one with 2 nodes, only
the group of 3 nodes will be able to write to the database.
* If there are three partitions, two with 2 nodes, one with 1 node, none
of the group can write to the database.
Because the Khepri database will be used for all kind of metadata, it
means that RabbitMQ nodes that can't write to the database will be
unable to perform some operations. A list of operations and what to
expect is documented in the associated pull request and the RabbitMQ
website.
This requirement from Raft also affects the startup of RabbitMQ nodes in
a cluster. Indeed, at least a quorum number of nodes must be started at
once to allow nodes to become ready.
To enable Khepri, you need to enable the `khepri_db` feature flag:
rabbitmqctl enable_feature_flag khepri_db
When the `khepri_db` feature flag is enabled, the migration code
performs the following two tasks:
1. It synchronizes the Khepri cluster membership from the Mnesia
cluster. It uses `mnesia_to_khepri:sync_cluster_membership/1` from
the `khepri_mnesia_migration` application [3].
2. It copies data from relevant Mnesia tables to Khepri, doing some
conversion if necessary on the way. Again, it uses
`mnesia_to_khepri:copy_tables/4` from `khepri_mnesia_migration` to do
it.
This can be performed on a running standalone RabbitMQ node or cluster.
Data will be migrated from Mnesia to Khepri without any service
interruption. Note that during the migration, the performance may
decrease and the memory footprint may go up.
Because this feature flag is considered experimental, it is not enabled
by default even on a brand new RabbitMQ deployment.
More about the implementation details below:
In the past months, all accesses to Mnesia were isolated in a collection
of `rabbit_db*` modules. This is where the integration of Khepri mostly
takes place: we use a function called `rabbit_khepri:handle_fallback/1`
which selects the database and perform the query or the transaction.
Here is an example from `rabbit_db_vhost`:
* Up until RabbitMQ 3.12.x:
get(VHostName) when is_binary(VHostName) ->
get_in_mnesia(VHostName).
* Starting with RabbitMQ 3.13.0:
get(VHostName) when is_binary(VHostName) ->
rabbit_khepri:handle_fallback(
#{mnesia => fun() -> get_in_mnesia(VHostName) end,
khepri => fun() -> get_in_khepri(VHostName) end}).
This `rabbit_khepri:handle_fallback/1` function relies on two things:
1. the fact that the `khepri_db` feature flag is enabled, in which case
it always executes the Khepri-based variant.
4. the ability or not to read and write to Mnesia tables otherwise.
Before the feature flag is enabled, or during the migration, the
function will try to execute the Mnesia-based variant. If it succeeds,
then it returns the result. If it fails because one or more Mnesia
tables can't be used, it restarts from scratch: it means the feature
flag is being enabled and depending on the outcome, either the
Mnesia-based variant will succeed (the feature flag couldn't be enabled)
or the feature flag will be marked as enabled and it will call the
Khepri-based variant. The meat of this function really lives in the
`khepri_mnesia_migration` application [3] and
`rabbit_khepri:handle_fallback/1` is a wrapper on top of it that knows
about the feature flag.
However, some calls to the database do not depend on the existence of
Mnesia tables, such as functions where we need to learn about the
members of a cluster. For those, we can't rely on exceptions from
Mnesia. Therefore, we just look at the state of the feature flag to
determine which database to use. There are two situations though:
* Sometimes, we need the feature flag state query to block because the
function interested in it can't return a valid answer during the
migration. Here is an example:
case rabbit_khepri:is_enabled(RemoteNode) of
true -> can_join_using_khepri(RemoteNode);
false -> can_join_using_mnesia(RemoteNode)
end
* Sometimes, we need the feature flag state query to NOT block (for
instance because it would cause a deadlock). Here is an example:
case rabbit_khepri:get_feature_state() of
enabled -> members_using_khepri();
_ -> members_using_mnesia()
end
Direct accesses to Mnesia still exists. They are limited to code that is
specific to Mnesia such as classic queue mirroring or network partitions
handling strategies.
Now, to discover the Mnesia tables to migrate and how to migrate them,
we use an Erlang module attribute called
`rabbit_mnesia_tables_to_khepri_db` which indicates a list of Mnesia
tables and an associated converter module. Here is an example in the
`rabbitmq_recent_history_exchange` plugin:
-rabbit_mnesia_tables_to_khepri_db(
[{?RH_TABLE, rabbit_db_rh_exchange_m2k_converter}]).
The converter module — `rabbit_db_rh_exchange_m2k_converter` in this
example — is is fact a "sub" converter module called but
`rabbit_db_m2k_converter`. See the documentation of a `mnesia_to_khepri`
converter module to learn more about these modules.
[1] https://github.com/rabbitmq/ra
[2] https://github.com/rabbitmq/khepri
[3] https://github.com/rabbitmq/khepri_mnesia_migration
See #7206.
Co-authored-by: Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
Co-authored-by: Diana Parra Corbacho <dparracorbac@vmware.com>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
2023-01-05 20:57:50 +08:00
|
|
|
- rabbit_db_maintenance_m2k_converter
|
2023-02-23 21:47:41 +08:00
|
|
|
- rabbit_db_msup
|
Allow to use Khepri database to store metadata instead of Mnesia
[Why]
Mnesia is a very powerful and convenient tool for Erlang applications:
it is a persistent disc-based database, it handles replication accross
multiple Erlang nodes and it is available out-of-the-box from the
Erlang/OTP distribution. RabbitMQ relies on Mnesia to manage all its
metadata:
* virtual hosts' properties
* intenal users
* queue, exchange and binding declarations (not queues data)
* runtime parameters and policies
* ...
Unfortunately Mnesia makes it difficult to handle network partition and,
as a consequence, the merge conflicts between Erlang nodes once the
network partition is resolved. RabbitMQ provides several partition
handling strategies but they are not bullet-proof. Users still hit
situations where it is a pain to repair a cluster following a network
partition.
[How]
@kjnilsson created Ra [1], a Raft consensus library that RabbitMQ
already uses successfully to implement quorum queues and streams for
instance. Those queues do not suffer from network partitions.
We created Khepri [2], a new persistent and replicated database engine
based on Ra and we want to use it in place of Mnesia in RabbitMQ to
solve the problems with network partitions.
This patch integrates Khepri as an experimental feature. When enabled,
RabbitMQ will store all its metadata in Khepri instead of Mnesia.
This change comes with behavior changes. While Khepri remains disabled,
you should see no changes to the behavior of RabbitMQ. If there are
changes, it is a bug. After Khepri is enabled, there are significant
changes of behavior that you should be aware of.
Because it is based on the Raft consensus algorithm, when there is a
network partition, only the cluster members that are in the partition
with at least `(Number of nodes in the cluster ÷ 2) + 1` number of nodes
can "make progress". In other words, only those nodes may write to the
Khepri database and read from the database and expect a consistent
result.
For instance in a cluster of 5 RabbitMQ nodes:
* If there are two partitions, one with 3 nodes, one with 2 nodes, only
the group of 3 nodes will be able to write to the database.
* If there are three partitions, two with 2 nodes, one with 1 node, none
of the group can write to the database.
Because the Khepri database will be used for all kind of metadata, it
means that RabbitMQ nodes that can't write to the database will be
unable to perform some operations. A list of operations and what to
expect is documented in the associated pull request and the RabbitMQ
website.
This requirement from Raft also affects the startup of RabbitMQ nodes in
a cluster. Indeed, at least a quorum number of nodes must be started at
once to allow nodes to become ready.
To enable Khepri, you need to enable the `khepri_db` feature flag:
rabbitmqctl enable_feature_flag khepri_db
When the `khepri_db` feature flag is enabled, the migration code
performs the following two tasks:
1. It synchronizes the Khepri cluster membership from the Mnesia
cluster. It uses `mnesia_to_khepri:sync_cluster_membership/1` from
the `khepri_mnesia_migration` application [3].
2. It copies data from relevant Mnesia tables to Khepri, doing some
conversion if necessary on the way. Again, it uses
`mnesia_to_khepri:copy_tables/4` from `khepri_mnesia_migration` to do
it.
This can be performed on a running standalone RabbitMQ node or cluster.
Data will be migrated from Mnesia to Khepri without any service
interruption. Note that during the migration, the performance may
decrease and the memory footprint may go up.
Because this feature flag is considered experimental, it is not enabled
by default even on a brand new RabbitMQ deployment.
More about the implementation details below:
In the past months, all accesses to Mnesia were isolated in a collection
of `rabbit_db*` modules. This is where the integration of Khepri mostly
takes place: we use a function called `rabbit_khepri:handle_fallback/1`
which selects the database and perform the query or the transaction.
Here is an example from `rabbit_db_vhost`:
* Up until RabbitMQ 3.12.x:
get(VHostName) when is_binary(VHostName) ->
get_in_mnesia(VHostName).
* Starting with RabbitMQ 3.13.0:
get(VHostName) when is_binary(VHostName) ->
rabbit_khepri:handle_fallback(
#{mnesia => fun() -> get_in_mnesia(VHostName) end,
khepri => fun() -> get_in_khepri(VHostName) end}).
This `rabbit_khepri:handle_fallback/1` function relies on two things:
1. the fact that the `khepri_db` feature flag is enabled, in which case
it always executes the Khepri-based variant.
4. the ability or not to read and write to Mnesia tables otherwise.
Before the feature flag is enabled, or during the migration, the
function will try to execute the Mnesia-based variant. If it succeeds,
then it returns the result. If it fails because one or more Mnesia
tables can't be used, it restarts from scratch: it means the feature
flag is being enabled and depending on the outcome, either the
Mnesia-based variant will succeed (the feature flag couldn't be enabled)
or the feature flag will be marked as enabled and it will call the
Khepri-based variant. The meat of this function really lives in the
`khepri_mnesia_migration` application [3] and
`rabbit_khepri:handle_fallback/1` is a wrapper on top of it that knows
about the feature flag.
However, some calls to the database do not depend on the existence of
Mnesia tables, such as functions where we need to learn about the
members of a cluster. For those, we can't rely on exceptions from
Mnesia. Therefore, we just look at the state of the feature flag to
determine which database to use. There are two situations though:
* Sometimes, we need the feature flag state query to block because the
function interested in it can't return a valid answer during the
migration. Here is an example:
case rabbit_khepri:is_enabled(RemoteNode) of
true -> can_join_using_khepri(RemoteNode);
false -> can_join_using_mnesia(RemoteNode)
end
* Sometimes, we need the feature flag state query to NOT block (for
instance because it would cause a deadlock). Here is an example:
case rabbit_khepri:get_feature_state() of
enabled -> members_using_khepri();
_ -> members_using_mnesia()
end
Direct accesses to Mnesia still exists. They are limited to code that is
specific to Mnesia such as classic queue mirroring or network partitions
handling strategies.
Now, to discover the Mnesia tables to migrate and how to migrate them,
we use an Erlang module attribute called
`rabbit_mnesia_tables_to_khepri_db` which indicates a list of Mnesia
tables and an associated converter module. Here is an example in the
`rabbitmq_recent_history_exchange` plugin:
-rabbit_mnesia_tables_to_khepri_db(
[{?RH_TABLE, rabbit_db_rh_exchange_m2k_converter}]).
The converter module — `rabbit_db_rh_exchange_m2k_converter` in this
example — is is fact a "sub" converter module called but
`rabbit_db_m2k_converter`. See the documentation of a `mnesia_to_khepri`
converter module to learn more about these modules.
[1] https://github.com/rabbitmq/ra
[2] https://github.com/rabbitmq/khepri
[3] https://github.com/rabbitmq/khepri_mnesia_migration
See #7206.
Co-authored-by: Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
Co-authored-by: Diana Parra Corbacho <dparracorbac@vmware.com>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
2023-01-05 20:57:50 +08:00
|
|
|
- rabbit_db_msup_m2k_converter
|
2023-02-23 21:47:41 +08:00
|
|
|
- rabbit_db_policy
|
|
|
|
- rabbit_db_queue
|
Allow to use Khepri database to store metadata instead of Mnesia
[Why]
Mnesia is a very powerful and convenient tool for Erlang applications:
it is a persistent disc-based database, it handles replication accross
multiple Erlang nodes and it is available out-of-the-box from the
Erlang/OTP distribution. RabbitMQ relies on Mnesia to manage all its
metadata:
* virtual hosts' properties
* intenal users
* queue, exchange and binding declarations (not queues data)
* runtime parameters and policies
* ...
Unfortunately Mnesia makes it difficult to handle network partition and,
as a consequence, the merge conflicts between Erlang nodes once the
network partition is resolved. RabbitMQ provides several partition
handling strategies but they are not bullet-proof. Users still hit
situations where it is a pain to repair a cluster following a network
partition.
[How]
@kjnilsson created Ra [1], a Raft consensus library that RabbitMQ
already uses successfully to implement quorum queues and streams for
instance. Those queues do not suffer from network partitions.
We created Khepri [2], a new persistent and replicated database engine
based on Ra and we want to use it in place of Mnesia in RabbitMQ to
solve the problems with network partitions.
This patch integrates Khepri as an experimental feature. When enabled,
RabbitMQ will store all its metadata in Khepri instead of Mnesia.
This change comes with behavior changes. While Khepri remains disabled,
you should see no changes to the behavior of RabbitMQ. If there are
changes, it is a bug. After Khepri is enabled, there are significant
changes of behavior that you should be aware of.
Because it is based on the Raft consensus algorithm, when there is a
network partition, only the cluster members that are in the partition
with at least `(Number of nodes in the cluster ÷ 2) + 1` number of nodes
can "make progress". In other words, only those nodes may write to the
Khepri database and read from the database and expect a consistent
result.
For instance in a cluster of 5 RabbitMQ nodes:
* If there are two partitions, one with 3 nodes, one with 2 nodes, only
the group of 3 nodes will be able to write to the database.
* If there are three partitions, two with 2 nodes, one with 1 node, none
of the group can write to the database.
Because the Khepri database will be used for all kind of metadata, it
means that RabbitMQ nodes that can't write to the database will be
unable to perform some operations. A list of operations and what to
expect is documented in the associated pull request and the RabbitMQ
website.
This requirement from Raft also affects the startup of RabbitMQ nodes in
a cluster. Indeed, at least a quorum number of nodes must be started at
once to allow nodes to become ready.
To enable Khepri, you need to enable the `khepri_db` feature flag:
rabbitmqctl enable_feature_flag khepri_db
When the `khepri_db` feature flag is enabled, the migration code
performs the following two tasks:
1. It synchronizes the Khepri cluster membership from the Mnesia
cluster. It uses `mnesia_to_khepri:sync_cluster_membership/1` from
the `khepri_mnesia_migration` application [3].
2. It copies data from relevant Mnesia tables to Khepri, doing some
conversion if necessary on the way. Again, it uses
`mnesia_to_khepri:copy_tables/4` from `khepri_mnesia_migration` to do
it.
This can be performed on a running standalone RabbitMQ node or cluster.
Data will be migrated from Mnesia to Khepri without any service
interruption. Note that during the migration, the performance may
decrease and the memory footprint may go up.
Because this feature flag is considered experimental, it is not enabled
by default even on a brand new RabbitMQ deployment.
More about the implementation details below:
In the past months, all accesses to Mnesia were isolated in a collection
of `rabbit_db*` modules. This is where the integration of Khepri mostly
takes place: we use a function called `rabbit_khepri:handle_fallback/1`
which selects the database and perform the query or the transaction.
Here is an example from `rabbit_db_vhost`:
* Up until RabbitMQ 3.12.x:
get(VHostName) when is_binary(VHostName) ->
get_in_mnesia(VHostName).
* Starting with RabbitMQ 3.13.0:
get(VHostName) when is_binary(VHostName) ->
rabbit_khepri:handle_fallback(
#{mnesia => fun() -> get_in_mnesia(VHostName) end,
khepri => fun() -> get_in_khepri(VHostName) end}).
This `rabbit_khepri:handle_fallback/1` function relies on two things:
1. the fact that the `khepri_db` feature flag is enabled, in which case
it always executes the Khepri-based variant.
4. the ability or not to read and write to Mnesia tables otherwise.
Before the feature flag is enabled, or during the migration, the
function will try to execute the Mnesia-based variant. If it succeeds,
then it returns the result. If it fails because one or more Mnesia
tables can't be used, it restarts from scratch: it means the feature
flag is being enabled and depending on the outcome, either the
Mnesia-based variant will succeed (the feature flag couldn't be enabled)
or the feature flag will be marked as enabled and it will call the
Khepri-based variant. The meat of this function really lives in the
`khepri_mnesia_migration` application [3] and
`rabbit_khepri:handle_fallback/1` is a wrapper on top of it that knows
about the feature flag.
However, some calls to the database do not depend on the existence of
Mnesia tables, such as functions where we need to learn about the
members of a cluster. For those, we can't rely on exceptions from
Mnesia. Therefore, we just look at the state of the feature flag to
determine which database to use. There are two situations though:
* Sometimes, we need the feature flag state query to block because the
function interested in it can't return a valid answer during the
migration. Here is an example:
case rabbit_khepri:is_enabled(RemoteNode) of
true -> can_join_using_khepri(RemoteNode);
false -> can_join_using_mnesia(RemoteNode)
end
* Sometimes, we need the feature flag state query to NOT block (for
instance because it would cause a deadlock). Here is an example:
case rabbit_khepri:get_feature_state() of
enabled -> members_using_khepri();
_ -> members_using_mnesia()
end
Direct accesses to Mnesia still exists. They are limited to code that is
specific to Mnesia such as classic queue mirroring or network partitions
handling strategies.
Now, to discover the Mnesia tables to migrate and how to migrate them,
we use an Erlang module attribute called
`rabbit_mnesia_tables_to_khepri_db` which indicates a list of Mnesia
tables and an associated converter module. Here is an example in the
`rabbitmq_recent_history_exchange` plugin:
-rabbit_mnesia_tables_to_khepri_db(
[{?RH_TABLE, rabbit_db_rh_exchange_m2k_converter}]).
The converter module — `rabbit_db_rh_exchange_m2k_converter` in this
example — is is fact a "sub" converter module called but
`rabbit_db_m2k_converter`. See the documentation of a `mnesia_to_khepri`
converter module to learn more about these modules.
[1] https://github.com/rabbitmq/ra
[2] https://github.com/rabbitmq/khepri
[3] https://github.com/rabbitmq/khepri_mnesia_migration
See #7206.
Co-authored-by: Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
Co-authored-by: Diana Parra Corbacho <dparracorbac@vmware.com>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
2023-01-05 20:57:50 +08:00
|
|
|
- rabbit_db_queue_m2k_converter
|
2023-02-23 21:47:41 +08:00
|
|
|
- rabbit_db_rtparams
|
Allow to use Khepri database to store metadata instead of Mnesia
[Why]
Mnesia is a very powerful and convenient tool for Erlang applications:
it is a persistent disc-based database, it handles replication accross
multiple Erlang nodes and it is available out-of-the-box from the
Erlang/OTP distribution. RabbitMQ relies on Mnesia to manage all its
metadata:
* virtual hosts' properties
* intenal users
* queue, exchange and binding declarations (not queues data)
* runtime parameters and policies
* ...
Unfortunately Mnesia makes it difficult to handle network partition and,
as a consequence, the merge conflicts between Erlang nodes once the
network partition is resolved. RabbitMQ provides several partition
handling strategies but they are not bullet-proof. Users still hit
situations where it is a pain to repair a cluster following a network
partition.
[How]
@kjnilsson created Ra [1], a Raft consensus library that RabbitMQ
already uses successfully to implement quorum queues and streams for
instance. Those queues do not suffer from network partitions.
We created Khepri [2], a new persistent and replicated database engine
based on Ra and we want to use it in place of Mnesia in RabbitMQ to
solve the problems with network partitions.
This patch integrates Khepri as an experimental feature. When enabled,
RabbitMQ will store all its metadata in Khepri instead of Mnesia.
This change comes with behavior changes. While Khepri remains disabled,
you should see no changes to the behavior of RabbitMQ. If there are
changes, it is a bug. After Khepri is enabled, there are significant
changes of behavior that you should be aware of.
Because it is based on the Raft consensus algorithm, when there is a
network partition, only the cluster members that are in the partition
with at least `(Number of nodes in the cluster ÷ 2) + 1` number of nodes
can "make progress". In other words, only those nodes may write to the
Khepri database and read from the database and expect a consistent
result.
For instance in a cluster of 5 RabbitMQ nodes:
* If there are two partitions, one with 3 nodes, one with 2 nodes, only
the group of 3 nodes will be able to write to the database.
* If there are three partitions, two with 2 nodes, one with 1 node, none
of the group can write to the database.
Because the Khepri database will be used for all kind of metadata, it
means that RabbitMQ nodes that can't write to the database will be
unable to perform some operations. A list of operations and what to
expect is documented in the associated pull request and the RabbitMQ
website.
This requirement from Raft also affects the startup of RabbitMQ nodes in
a cluster. Indeed, at least a quorum number of nodes must be started at
once to allow nodes to become ready.
To enable Khepri, you need to enable the `khepri_db` feature flag:
rabbitmqctl enable_feature_flag khepri_db
When the `khepri_db` feature flag is enabled, the migration code
performs the following two tasks:
1. It synchronizes the Khepri cluster membership from the Mnesia
cluster. It uses `mnesia_to_khepri:sync_cluster_membership/1` from
the `khepri_mnesia_migration` application [3].
2. It copies data from relevant Mnesia tables to Khepri, doing some
conversion if necessary on the way. Again, it uses
`mnesia_to_khepri:copy_tables/4` from `khepri_mnesia_migration` to do
it.
This can be performed on a running standalone RabbitMQ node or cluster.
Data will be migrated from Mnesia to Khepri without any service
interruption. Note that during the migration, the performance may
decrease and the memory footprint may go up.
Because this feature flag is considered experimental, it is not enabled
by default even on a brand new RabbitMQ deployment.
More about the implementation details below:
In the past months, all accesses to Mnesia were isolated in a collection
of `rabbit_db*` modules. This is where the integration of Khepri mostly
takes place: we use a function called `rabbit_khepri:handle_fallback/1`
which selects the database and perform the query or the transaction.
Here is an example from `rabbit_db_vhost`:
* Up until RabbitMQ 3.12.x:
get(VHostName) when is_binary(VHostName) ->
get_in_mnesia(VHostName).
* Starting with RabbitMQ 3.13.0:
get(VHostName) when is_binary(VHostName) ->
rabbit_khepri:handle_fallback(
#{mnesia => fun() -> get_in_mnesia(VHostName) end,
khepri => fun() -> get_in_khepri(VHostName) end}).
This `rabbit_khepri:handle_fallback/1` function relies on two things:
1. the fact that the `khepri_db` feature flag is enabled, in which case
it always executes the Khepri-based variant.
4. the ability or not to read and write to Mnesia tables otherwise.
Before the feature flag is enabled, or during the migration, the
function will try to execute the Mnesia-based variant. If it succeeds,
then it returns the result. If it fails because one or more Mnesia
tables can't be used, it restarts from scratch: it means the feature
flag is being enabled and depending on the outcome, either the
Mnesia-based variant will succeed (the feature flag couldn't be enabled)
or the feature flag will be marked as enabled and it will call the
Khepri-based variant. The meat of this function really lives in the
`khepri_mnesia_migration` application [3] and
`rabbit_khepri:handle_fallback/1` is a wrapper on top of it that knows
about the feature flag.
However, some calls to the database do not depend on the existence of
Mnesia tables, such as functions where we need to learn about the
members of a cluster. For those, we can't rely on exceptions from
Mnesia. Therefore, we just look at the state of the feature flag to
determine which database to use. There are two situations though:
* Sometimes, we need the feature flag state query to block because the
function interested in it can't return a valid answer during the
migration. Here is an example:
case rabbit_khepri:is_enabled(RemoteNode) of
true -> can_join_using_khepri(RemoteNode);
false -> can_join_using_mnesia(RemoteNode)
end
* Sometimes, we need the feature flag state query to NOT block (for
instance because it would cause a deadlock). Here is an example:
case rabbit_khepri:get_feature_state() of
enabled -> members_using_khepri();
_ -> members_using_mnesia()
end
Direct accesses to Mnesia still exists. They are limited to code that is
specific to Mnesia such as classic queue mirroring or network partitions
handling strategies.
Now, to discover the Mnesia tables to migrate and how to migrate them,
we use an Erlang module attribute called
`rabbit_mnesia_tables_to_khepri_db` which indicates a list of Mnesia
tables and an associated converter module. Here is an example in the
`rabbitmq_recent_history_exchange` plugin:
-rabbit_mnesia_tables_to_khepri_db(
[{?RH_TABLE, rabbit_db_rh_exchange_m2k_converter}]).
The converter module — `rabbit_db_rh_exchange_m2k_converter` in this
example — is is fact a "sub" converter module called but
`rabbit_db_m2k_converter`. See the documentation of a `mnesia_to_khepri`
converter module to learn more about these modules.
[1] https://github.com/rabbitmq/ra
[2] https://github.com/rabbitmq/khepri
[3] https://github.com/rabbitmq/khepri_mnesia_migration
See #7206.
Co-authored-by: Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
Co-authored-by: Diana Parra Corbacho <dparracorbac@vmware.com>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
2023-01-05 20:57:50 +08:00
|
|
|
- rabbit_db_rtparams_m2k_converter
|
2023-02-23 21:47:41 +08:00
|
|
|
- rabbit_db_topic_exchange
|
|
|
|
- rabbit_db_user
|
Allow to use Khepri database to store metadata instead of Mnesia
[Why]
Mnesia is a very powerful and convenient tool for Erlang applications:
it is a persistent disc-based database, it handles replication accross
multiple Erlang nodes and it is available out-of-the-box from the
Erlang/OTP distribution. RabbitMQ relies on Mnesia to manage all its
metadata:
* virtual hosts' properties
* intenal users
* queue, exchange and binding declarations (not queues data)
* runtime parameters and policies
* ...
Unfortunately Mnesia makes it difficult to handle network partition and,
as a consequence, the merge conflicts between Erlang nodes once the
network partition is resolved. RabbitMQ provides several partition
handling strategies but they are not bullet-proof. Users still hit
situations where it is a pain to repair a cluster following a network
partition.
[How]
@kjnilsson created Ra [1], a Raft consensus library that RabbitMQ
already uses successfully to implement quorum queues and streams for
instance. Those queues do not suffer from network partitions.
We created Khepri [2], a new persistent and replicated database engine
based on Ra and we want to use it in place of Mnesia in RabbitMQ to
solve the problems with network partitions.
This patch integrates Khepri as an experimental feature. When enabled,
RabbitMQ will store all its metadata in Khepri instead of Mnesia.
This change comes with behavior changes. While Khepri remains disabled,
you should see no changes to the behavior of RabbitMQ. If there are
changes, it is a bug. After Khepri is enabled, there are significant
changes of behavior that you should be aware of.
Because it is based on the Raft consensus algorithm, when there is a
network partition, only the cluster members that are in the partition
with at least `(Number of nodes in the cluster ÷ 2) + 1` number of nodes
can "make progress". In other words, only those nodes may write to the
Khepri database and read from the database and expect a consistent
result.
For instance in a cluster of 5 RabbitMQ nodes:
* If there are two partitions, one with 3 nodes, one with 2 nodes, only
the group of 3 nodes will be able to write to the database.
* If there are three partitions, two with 2 nodes, one with 1 node, none
of the group can write to the database.
Because the Khepri database will be used for all kind of metadata, it
means that RabbitMQ nodes that can't write to the database will be
unable to perform some operations. A list of operations and what to
expect is documented in the associated pull request and the RabbitMQ
website.
This requirement from Raft also affects the startup of RabbitMQ nodes in
a cluster. Indeed, at least a quorum number of nodes must be started at
once to allow nodes to become ready.
To enable Khepri, you need to enable the `khepri_db` feature flag:
rabbitmqctl enable_feature_flag khepri_db
When the `khepri_db` feature flag is enabled, the migration code
performs the following two tasks:
1. It synchronizes the Khepri cluster membership from the Mnesia
cluster. It uses `mnesia_to_khepri:sync_cluster_membership/1` from
the `khepri_mnesia_migration` application [3].
2. It copies data from relevant Mnesia tables to Khepri, doing some
conversion if necessary on the way. Again, it uses
`mnesia_to_khepri:copy_tables/4` from `khepri_mnesia_migration` to do
it.
This can be performed on a running standalone RabbitMQ node or cluster.
Data will be migrated from Mnesia to Khepri without any service
interruption. Note that during the migration, the performance may
decrease and the memory footprint may go up.
Because this feature flag is considered experimental, it is not enabled
by default even on a brand new RabbitMQ deployment.
More about the implementation details below:
In the past months, all accesses to Mnesia were isolated in a collection
of `rabbit_db*` modules. This is where the integration of Khepri mostly
takes place: we use a function called `rabbit_khepri:handle_fallback/1`
which selects the database and perform the query or the transaction.
Here is an example from `rabbit_db_vhost`:
* Up until RabbitMQ 3.12.x:
get(VHostName) when is_binary(VHostName) ->
get_in_mnesia(VHostName).
* Starting with RabbitMQ 3.13.0:
get(VHostName) when is_binary(VHostName) ->
rabbit_khepri:handle_fallback(
#{mnesia => fun() -> get_in_mnesia(VHostName) end,
khepri => fun() -> get_in_khepri(VHostName) end}).
This `rabbit_khepri:handle_fallback/1` function relies on two things:
1. the fact that the `khepri_db` feature flag is enabled, in which case
it always executes the Khepri-based variant.
4. the ability or not to read and write to Mnesia tables otherwise.
Before the feature flag is enabled, or during the migration, the
function will try to execute the Mnesia-based variant. If it succeeds,
then it returns the result. If it fails because one or more Mnesia
tables can't be used, it restarts from scratch: it means the feature
flag is being enabled and depending on the outcome, either the
Mnesia-based variant will succeed (the feature flag couldn't be enabled)
or the feature flag will be marked as enabled and it will call the
Khepri-based variant. The meat of this function really lives in the
`khepri_mnesia_migration` application [3] and
`rabbit_khepri:handle_fallback/1` is a wrapper on top of it that knows
about the feature flag.
However, some calls to the database do not depend on the existence of
Mnesia tables, such as functions where we need to learn about the
members of a cluster. For those, we can't rely on exceptions from
Mnesia. Therefore, we just look at the state of the feature flag to
determine which database to use. There are two situations though:
* Sometimes, we need the feature flag state query to block because the
function interested in it can't return a valid answer during the
migration. Here is an example:
case rabbit_khepri:is_enabled(RemoteNode) of
true -> can_join_using_khepri(RemoteNode);
false -> can_join_using_mnesia(RemoteNode)
end
* Sometimes, we need the feature flag state query to NOT block (for
instance because it would cause a deadlock). Here is an example:
case rabbit_khepri:get_feature_state() of
enabled -> members_using_khepri();
_ -> members_using_mnesia()
end
Direct accesses to Mnesia still exists. They are limited to code that is
specific to Mnesia such as classic queue mirroring or network partitions
handling strategies.
Now, to discover the Mnesia tables to migrate and how to migrate them,
we use an Erlang module attribute called
`rabbit_mnesia_tables_to_khepri_db` which indicates a list of Mnesia
tables and an associated converter module. Here is an example in the
`rabbitmq_recent_history_exchange` plugin:
-rabbit_mnesia_tables_to_khepri_db(
[{?RH_TABLE, rabbit_db_rh_exchange_m2k_converter}]).
The converter module — `rabbit_db_rh_exchange_m2k_converter` in this
example — is is fact a "sub" converter module called but
`rabbit_db_m2k_converter`. See the documentation of a `mnesia_to_khepri`
converter module to learn more about these modules.
[1] https://github.com/rabbitmq/ra
[2] https://github.com/rabbitmq/khepri
[3] https://github.com/rabbitmq/khepri_mnesia_migration
See #7206.
Co-authored-by: Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
Co-authored-by: Diana Parra Corbacho <dparracorbac@vmware.com>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
2023-01-05 20:57:50 +08:00
|
|
|
- rabbit_db_user_m2k_converter
|
2023-02-23 21:47:41 +08:00
|
|
|
- rabbit_db_vhost
|
|
|
|
- rabbit_db_vhost_defaults
|
Allow to use Khepri database to store metadata instead of Mnesia
[Why]
Mnesia is a very powerful and convenient tool for Erlang applications:
it is a persistent disc-based database, it handles replication accross
multiple Erlang nodes and it is available out-of-the-box from the
Erlang/OTP distribution. RabbitMQ relies on Mnesia to manage all its
metadata:
* virtual hosts' properties
* intenal users
* queue, exchange and binding declarations (not queues data)
* runtime parameters and policies
* ...
Unfortunately Mnesia makes it difficult to handle network partition and,
as a consequence, the merge conflicts between Erlang nodes once the
network partition is resolved. RabbitMQ provides several partition
handling strategies but they are not bullet-proof. Users still hit
situations where it is a pain to repair a cluster following a network
partition.
[How]
@kjnilsson created Ra [1], a Raft consensus library that RabbitMQ
already uses successfully to implement quorum queues and streams for
instance. Those queues do not suffer from network partitions.
We created Khepri [2], a new persistent and replicated database engine
based on Ra and we want to use it in place of Mnesia in RabbitMQ to
solve the problems with network partitions.
This patch integrates Khepri as an experimental feature. When enabled,
RabbitMQ will store all its metadata in Khepri instead of Mnesia.
This change comes with behavior changes. While Khepri remains disabled,
you should see no changes to the behavior of RabbitMQ. If there are
changes, it is a bug. After Khepri is enabled, there are significant
changes of behavior that you should be aware of.
Because it is based on the Raft consensus algorithm, when there is a
network partition, only the cluster members that are in the partition
with at least `(Number of nodes in the cluster ÷ 2) + 1` number of nodes
can "make progress". In other words, only those nodes may write to the
Khepri database and read from the database and expect a consistent
result.
For instance in a cluster of 5 RabbitMQ nodes:
* If there are two partitions, one with 3 nodes, one with 2 nodes, only
the group of 3 nodes will be able to write to the database.
* If there are three partitions, two with 2 nodes, one with 1 node, none
of the group can write to the database.
Because the Khepri database will be used for all kind of metadata, it
means that RabbitMQ nodes that can't write to the database will be
unable to perform some operations. A list of operations and what to
expect is documented in the associated pull request and the RabbitMQ
website.
This requirement from Raft also affects the startup of RabbitMQ nodes in
a cluster. Indeed, at least a quorum number of nodes must be started at
once to allow nodes to become ready.
To enable Khepri, you need to enable the `khepri_db` feature flag:
rabbitmqctl enable_feature_flag khepri_db
When the `khepri_db` feature flag is enabled, the migration code
performs the following two tasks:
1. It synchronizes the Khepri cluster membership from the Mnesia
cluster. It uses `mnesia_to_khepri:sync_cluster_membership/1` from
the `khepri_mnesia_migration` application [3].
2. It copies data from relevant Mnesia tables to Khepri, doing some
conversion if necessary on the way. Again, it uses
`mnesia_to_khepri:copy_tables/4` from `khepri_mnesia_migration` to do
it.
This can be performed on a running standalone RabbitMQ node or cluster.
Data will be migrated from Mnesia to Khepri without any service
interruption. Note that during the migration, the performance may
decrease and the memory footprint may go up.
Because this feature flag is considered experimental, it is not enabled
by default even on a brand new RabbitMQ deployment.
More about the implementation details below:
In the past months, all accesses to Mnesia were isolated in a collection
of `rabbit_db*` modules. This is where the integration of Khepri mostly
takes place: we use a function called `rabbit_khepri:handle_fallback/1`
which selects the database and perform the query or the transaction.
Here is an example from `rabbit_db_vhost`:
* Up until RabbitMQ 3.12.x:
get(VHostName) when is_binary(VHostName) ->
get_in_mnesia(VHostName).
* Starting with RabbitMQ 3.13.0:
get(VHostName) when is_binary(VHostName) ->
rabbit_khepri:handle_fallback(
#{mnesia => fun() -> get_in_mnesia(VHostName) end,
khepri => fun() -> get_in_khepri(VHostName) end}).
This `rabbit_khepri:handle_fallback/1` function relies on two things:
1. the fact that the `khepri_db` feature flag is enabled, in which case
it always executes the Khepri-based variant.
4. the ability or not to read and write to Mnesia tables otherwise.
Before the feature flag is enabled, or during the migration, the
function will try to execute the Mnesia-based variant. If it succeeds,
then it returns the result. If it fails because one or more Mnesia
tables can't be used, it restarts from scratch: it means the feature
flag is being enabled and depending on the outcome, either the
Mnesia-based variant will succeed (the feature flag couldn't be enabled)
or the feature flag will be marked as enabled and it will call the
Khepri-based variant. The meat of this function really lives in the
`khepri_mnesia_migration` application [3] and
`rabbit_khepri:handle_fallback/1` is a wrapper on top of it that knows
about the feature flag.
However, some calls to the database do not depend on the existence of
Mnesia tables, such as functions where we need to learn about the
members of a cluster. For those, we can't rely on exceptions from
Mnesia. Therefore, we just look at the state of the feature flag to
determine which database to use. There are two situations though:
* Sometimes, we need the feature flag state query to block because the
function interested in it can't return a valid answer during the
migration. Here is an example:
case rabbit_khepri:is_enabled(RemoteNode) of
true -> can_join_using_khepri(RemoteNode);
false -> can_join_using_mnesia(RemoteNode)
end
* Sometimes, we need the feature flag state query to NOT block (for
instance because it would cause a deadlock). Here is an example:
case rabbit_khepri:get_feature_state() of
enabled -> members_using_khepri();
_ -> members_using_mnesia()
end
Direct accesses to Mnesia still exists. They are limited to code that is
specific to Mnesia such as classic queue mirroring or network partitions
handling strategies.
Now, to discover the Mnesia tables to migrate and how to migrate them,
we use an Erlang module attribute called
`rabbit_mnesia_tables_to_khepri_db` which indicates a list of Mnesia
tables and an associated converter module. Here is an example in the
`rabbitmq_recent_history_exchange` plugin:
-rabbit_mnesia_tables_to_khepri_db(
[{?RH_TABLE, rabbit_db_rh_exchange_m2k_converter}]).
The converter module — `rabbit_db_rh_exchange_m2k_converter` in this
example — is is fact a "sub" converter module called but
`rabbit_db_m2k_converter`. See the documentation of a `mnesia_to_khepri`
converter module to learn more about these modules.
[1] https://github.com/rabbitmq/ra
[2] https://github.com/rabbitmq/khepri
[3] https://github.com/rabbitmq/khepri_mnesia_migration
See #7206.
Co-authored-by: Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
Co-authored-by: Diana Parra Corbacho <dparracorbac@vmware.com>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
2023-01-05 20:57:50 +08:00
|
|
|
- rabbit_db_vhost_m2k_converter
|
2023-02-23 21:47:41 +08:00
|
|
|
- rabbit_dead_letter
|
|
|
|
- rabbit_definitions
|
|
|
|
- rabbit_definitions_hashing
|
|
|
|
- rabbit_definitions_import_https
|
|
|
|
- rabbit_definitions_import_local_filesystem
|
2023-12-13 12:02:34 +08:00
|
|
|
- rabbit_depr_ff_extra
|
Deprecated features: New module to manage deprecated features (!)
This introduces a way to declare deprecated features in the code, not
only in our communication. The new module allows to disallow the use of
a deprecated feature and/or warn the user when he relies on such a
feature.
[Why]
Currently, we only tell people about deprecated features through blog
posts and the mailing-list. This might be insufficiant for our users
that a feature they use will be removed in a future version:
* They may not read our blog or mailing-list
* They may not understand that they use such a deprecated feature
* They might wait for the big removal before they plan testing
* They might not take it seriously enough
The idea behind this patch is to increase the chance that users notice
that they are using something which is about to be dropped from
RabbitMQ. Anopther benefit is that they should be able to test how
RabbitMQ will behave in the future before the actual removal. This
should allow them to test and plan changes.
[How]
When a feature is deprecated in other large projects (such as FreeBSD
where I took the idea from), it goes through a lifecycle:
1. The feature is still available, but users get a warning somehow when
they use it. They can disable it to test.
2. The feature is still available, but disabled out-of-the-box. Users
can re-enable it (and get a warning).
3. The feature is disconnected from the build. Therefore, the code
behind it is still there, but users have to recompile the thing to be
able to use it.
4. The feature is removed from the source code. Users have to adapt or
they can't upgrade anymore.
The solution in this patch offers the same lifecycle. A deprecated
feature will be in one of these deprecation phases:
1. `permitted_by_default`: The feature is available. Users get a warning
if they use it. They can disable it from the configuration.
2. `denied_by_default`: The feature is available but disabled by
default. Users get an error if they use it and RabbitMQ behaves like
the feature is removed. They can re-enable is from the configuration
and get a warning.
3. `disconnected`: The feature is present in the source code, but is
disabled and can't be re-enabled without recompiling RabbitMQ. Users
get the same behavior as if the code was removed.
4. `removed`: The feature's code is gone.
The whole thing is based on the feature flags subsystem, but it has the
following differences with other feature flags:
* The semantic is reversed: the feature flag behind a deprecated feature
is disabled when the deprecated feature is permitted, or enabled when
the deprecated feature is denied.
* The feature flag behind a deprecated feature is enabled out-of-the-box
(meaning the deprecated feature is denied):
* if the deprecation phase is `permitted_by_default` and the
configuration denies the deprecated feature
* if the deprecation phase is `denied_by_default` and the
configuration doesn't permit the deprecated feature
* if the deprecation phase is `disconnected` or `removed`
* Feature flags behind deprecated feature don't appear in feature flags
listings.
Otherwise, deprecated features' feature flags are managed like other
feature flags, in particular inside clusters.
To declare a deprecated feature:
-rabbit_deprecated_feature(
{my_deprecated_feature,
#{deprecation_phase => permitted_by_default,
msgs => #{when_permitted => "This feature will be removed in RabbitMQ X.0"},
}}).
Then, to check the state of a deprecated feature in the code:
case rabbit_deprecated_features:is_permitted(my_deprecated_feature) of
true ->
%% The deprecated feature is still permitted.
ok;
false ->
%% The deprecated feature is gone or should be considered
%% unavailable.
error
end.
Warnings and errors are logged automatically. A message is generated
automatically, but it is possible to define a message in the deprecated
feature flag declaration like in the example above.
Here is an example of a logged warning that was generated automatically:
Feature `my_deprecated_feature` is deprecated.
By default, this feature can still be used for now.
Its use will not be permitted by default in a future minor RabbitMQ version and the feature will be removed from a future major RabbitMQ version; actual versions to be determined.
To continue using this feature when it is not permitted by default, set the following parameter in your configuration:
"deprecated_features.permit.my_deprecated_feature = true"
To test RabbitMQ as if the feature was removed, set this in your configuration:
"deprecated_features.permit.my_deprecated_feature = false"
To override the default state of `permitted_by_default` and
`denied_by_default` deprecation phases, users can set the following
configuration:
# In rabbitmq.conf:
deprecated_features.permit.my_deprecated_feature = true # or false
The actual behavior protected by a deprecated feature check is out of
scope for this subsystem. It is the repsonsibility of each deprecated
feature code to determine what to do when the deprecated feature is
denied.
V1: Deprecated feature states are initially computed during the
initialization of the registry, based on their deprecation phase and
possibly the configuration. They don't go through the `enable/1`
code at all.
V2: Manage deprecated feature states as any other non-required
feature flags. This allows to execute an `is_feature_used()`
callback to determine if a deprecated feature can be denied. This
also allows to prevent the RabbitMQ node from starting if it
continues to use a deprecated feature.
V3: Manage deprecated feature states from the registry initialization
again. This is required because we need to know very early if some
of them are denied, so that an upgrade to a version of RabbitMQ
where a deprecated feature is disconnected or removed can be
performed.
To still prevent the start of a RabbitMQ node when a denied
deprecated feature is actively used, we run the `is_feature_used()`
callback of all denied deprecated features as part of the
`sync_cluster()` task. This task is executed as part of a feature
flag refresh executed when RabbitMQ starts or when plugins are
enabled. So even though a deprecated feature is marked as denied in
the registry early in the boot process, we will still abort the
start of a RabbitMQ node if the feature is used.
V4: Support context-dependent warnings. It is now possible to set a
specific message when deprecated feature is permitted, when it is
denied and when it is removed. Generic per-context messages are
still generated.
V5: Improve default warning messages, thanks to @pstack2021.
V6: Rename the configuration variable from `permit_deprecated_features.*`
to `deprecated_features.permit.*`. As @michaelklishin said, we tend
to use shorter top-level names.
2023-02-23 00:26:52 +08:00
|
|
|
- rabbit_deprecated_features
|
2023-02-23 21:47:41 +08:00
|
|
|
- rabbit_diagnostics
|
|
|
|
- rabbit_direct
|
|
|
|
- rabbit_direct_reply_to
|
|
|
|
- rabbit_disk_monitor
|
|
|
|
- rabbit_epmd_monitor
|
|
|
|
- rabbit_event_consumer
|
|
|
|
- rabbit_exchange
|
|
|
|
- rabbit_exchange_decorator
|
|
|
|
- rabbit_exchange_parameters
|
|
|
|
- rabbit_exchange_type
|
|
|
|
- rabbit_exchange_type_direct
|
|
|
|
- rabbit_exchange_type_fanout
|
|
|
|
- rabbit_exchange_type_headers
|
|
|
|
- rabbit_exchange_type_invalid
|
|
|
|
- rabbit_exchange_type_topic
|
|
|
|
- rabbit_feature_flags
|
|
|
|
- rabbit_ff_controller
|
|
|
|
- rabbit_ff_extra
|
|
|
|
- rabbit_ff_registry
|
|
|
|
- rabbit_ff_registry_factory
|
2023-05-23 12:02:30 +08:00
|
|
|
- rabbit_ff_registry_wrapper
|
2023-02-23 21:47:41 +08:00
|
|
|
- rabbit_fhc_helpers
|
|
|
|
- rabbit_fifo
|
|
|
|
- rabbit_fifo_client
|
|
|
|
- rabbit_fifo_dlx
|
|
|
|
- rabbit_fifo_dlx_client
|
|
|
|
- rabbit_fifo_dlx_sup
|
|
|
|
- rabbit_fifo_dlx_worker
|
|
|
|
- rabbit_fifo_index
|
|
|
|
- rabbit_fifo_v0
|
|
|
|
- rabbit_fifo_v1
|
|
|
|
- rabbit_file
|
|
|
|
- rabbit_global_counters
|
|
|
|
- rabbit_guid
|
|
|
|
- rabbit_health_check
|
Allow to use Khepri database to store metadata instead of Mnesia
[Why]
Mnesia is a very powerful and convenient tool for Erlang applications:
it is a persistent disc-based database, it handles replication accross
multiple Erlang nodes and it is available out-of-the-box from the
Erlang/OTP distribution. RabbitMQ relies on Mnesia to manage all its
metadata:
* virtual hosts' properties
* intenal users
* queue, exchange and binding declarations (not queues data)
* runtime parameters and policies
* ...
Unfortunately Mnesia makes it difficult to handle network partition and,
as a consequence, the merge conflicts between Erlang nodes once the
network partition is resolved. RabbitMQ provides several partition
handling strategies but they are not bullet-proof. Users still hit
situations where it is a pain to repair a cluster following a network
partition.
[How]
@kjnilsson created Ra [1], a Raft consensus library that RabbitMQ
already uses successfully to implement quorum queues and streams for
instance. Those queues do not suffer from network partitions.
We created Khepri [2], a new persistent and replicated database engine
based on Ra and we want to use it in place of Mnesia in RabbitMQ to
solve the problems with network partitions.
This patch integrates Khepri as an experimental feature. When enabled,
RabbitMQ will store all its metadata in Khepri instead of Mnesia.
This change comes with behavior changes. While Khepri remains disabled,
you should see no changes to the behavior of RabbitMQ. If there are
changes, it is a bug. After Khepri is enabled, there are significant
changes of behavior that you should be aware of.
Because it is based on the Raft consensus algorithm, when there is a
network partition, only the cluster members that are in the partition
with at least `(Number of nodes in the cluster ÷ 2) + 1` number of nodes
can "make progress". In other words, only those nodes may write to the
Khepri database and read from the database and expect a consistent
result.
For instance in a cluster of 5 RabbitMQ nodes:
* If there are two partitions, one with 3 nodes, one with 2 nodes, only
the group of 3 nodes will be able to write to the database.
* If there are three partitions, two with 2 nodes, one with 1 node, none
of the group can write to the database.
Because the Khepri database will be used for all kind of metadata, it
means that RabbitMQ nodes that can't write to the database will be
unable to perform some operations. A list of operations and what to
expect is documented in the associated pull request and the RabbitMQ
website.
This requirement from Raft also affects the startup of RabbitMQ nodes in
a cluster. Indeed, at least a quorum number of nodes must be started at
once to allow nodes to become ready.
To enable Khepri, you need to enable the `khepri_db` feature flag:
rabbitmqctl enable_feature_flag khepri_db
When the `khepri_db` feature flag is enabled, the migration code
performs the following two tasks:
1. It synchronizes the Khepri cluster membership from the Mnesia
cluster. It uses `mnesia_to_khepri:sync_cluster_membership/1` from
the `khepri_mnesia_migration` application [3].
2. It copies data from relevant Mnesia tables to Khepri, doing some
conversion if necessary on the way. Again, it uses
`mnesia_to_khepri:copy_tables/4` from `khepri_mnesia_migration` to do
it.
This can be performed on a running standalone RabbitMQ node or cluster.
Data will be migrated from Mnesia to Khepri without any service
interruption. Note that during the migration, the performance may
decrease and the memory footprint may go up.
Because this feature flag is considered experimental, it is not enabled
by default even on a brand new RabbitMQ deployment.
More about the implementation details below:
In the past months, all accesses to Mnesia were isolated in a collection
of `rabbit_db*` modules. This is where the integration of Khepri mostly
takes place: we use a function called `rabbit_khepri:handle_fallback/1`
which selects the database and perform the query or the transaction.
Here is an example from `rabbit_db_vhost`:
* Up until RabbitMQ 3.12.x:
get(VHostName) when is_binary(VHostName) ->
get_in_mnesia(VHostName).
* Starting with RabbitMQ 3.13.0:
get(VHostName) when is_binary(VHostName) ->
rabbit_khepri:handle_fallback(
#{mnesia => fun() -> get_in_mnesia(VHostName) end,
khepri => fun() -> get_in_khepri(VHostName) end}).
This `rabbit_khepri:handle_fallback/1` function relies on two things:
1. the fact that the `khepri_db` feature flag is enabled, in which case
it always executes the Khepri-based variant.
4. the ability or not to read and write to Mnesia tables otherwise.
Before the feature flag is enabled, or during the migration, the
function will try to execute the Mnesia-based variant. If it succeeds,
then it returns the result. If it fails because one or more Mnesia
tables can't be used, it restarts from scratch: it means the feature
flag is being enabled and depending on the outcome, either the
Mnesia-based variant will succeed (the feature flag couldn't be enabled)
or the feature flag will be marked as enabled and it will call the
Khepri-based variant. The meat of this function really lives in the
`khepri_mnesia_migration` application [3] and
`rabbit_khepri:handle_fallback/1` is a wrapper on top of it that knows
about the feature flag.
However, some calls to the database do not depend on the existence of
Mnesia tables, such as functions where we need to learn about the
members of a cluster. For those, we can't rely on exceptions from
Mnesia. Therefore, we just look at the state of the feature flag to
determine which database to use. There are two situations though:
* Sometimes, we need the feature flag state query to block because the
function interested in it can't return a valid answer during the
migration. Here is an example:
case rabbit_khepri:is_enabled(RemoteNode) of
true -> can_join_using_khepri(RemoteNode);
false -> can_join_using_mnesia(RemoteNode)
end
* Sometimes, we need the feature flag state query to NOT block (for
instance because it would cause a deadlock). Here is an example:
case rabbit_khepri:get_feature_state() of
enabled -> members_using_khepri();
_ -> members_using_mnesia()
end
Direct accesses to Mnesia still exists. They are limited to code that is
specific to Mnesia such as classic queue mirroring or network partitions
handling strategies.
Now, to discover the Mnesia tables to migrate and how to migrate them,
we use an Erlang module attribute called
`rabbit_mnesia_tables_to_khepri_db` which indicates a list of Mnesia
tables and an associated converter module. Here is an example in the
`rabbitmq_recent_history_exchange` plugin:
-rabbit_mnesia_tables_to_khepri_db(
[{?RH_TABLE, rabbit_db_rh_exchange_m2k_converter}]).
The converter module — `rabbit_db_rh_exchange_m2k_converter` in this
example — is is fact a "sub" converter module called but
`rabbit_db_m2k_converter`. See the documentation of a `mnesia_to_khepri`
converter module to learn more about these modules.
[1] https://github.com/rabbitmq/ra
[2] https://github.com/rabbitmq/khepri
[3] https://github.com/rabbitmq/khepri_mnesia_migration
See #7206.
Co-authored-by: Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
Co-authored-by: Diana Parra Corbacho <dparracorbac@vmware.com>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
2023-01-05 20:57:50 +08:00
|
|
|
- rabbit_khepri
|
2023-02-23 21:47:41 +08:00
|
|
|
- rabbit_limiter
|
|
|
|
- rabbit_log_channel
|
|
|
|
- rabbit_log_connection
|
|
|
|
- rabbit_log_mirroring
|
|
|
|
- rabbit_log_prelaunch
|
|
|
|
- rabbit_log_queue
|
|
|
|
- rabbit_log_tail
|
|
|
|
- rabbit_logger_exchange_h
|
|
|
|
- rabbit_looking_glass
|
|
|
|
- rabbit_maintenance
|
|
|
|
- rabbit_memory_monitor
|
Move plugin rabbitmq-message-timestamp to the core
As reported in https://groups.google.com/g/rabbitmq-users/c/x8ACs4dBlkI/
plugins that implement rabbit_channel_interceptor break with
Native MQTT in 3.12 because Native MQTT does not use rabbit_channel anymore.
Specifically, these plugins don't work anymore in 3.12 when sending a message
from an MQTT publisher to an AMQP 0.9.1 consumer.
Two of these plugins are
https://github.com/rabbitmq/rabbitmq-message-timestamp
and
https://github.com/rabbitmq/rabbitmq-routing-node-stamp
This commit moves both plugins into rabbitmq-server.
Therefore, these plugins are deprecated starting in 3.12.
Instead of using these plugins, the user gets the same behaviour by
configuring rabbitmq.conf as follows:
```
incoming_message_interceptors.set_header_timestamp.overwrite = false
incoming_message_interceptors.set_header_routing_node.overwrite = false
```
While both plugins were incompatible to be used together, this commit
allows setting both headers.
We name the top level configuration key `incoming_message_interceptors`
because only incoming messages are intercepted.
Currently, only `set_header_timestamp` and `set_header_routing_node` are
supported. (We might support more in the future.)
Both can set `overwrite` to `false` or `true`.
The meaning of `overwrite` is the same as documented in
https://github.com/rabbitmq/rabbitmq-message-timestamp#always-overwrite-timestamps
i.e. whether headers should be overwritten if they are already present
in the message.
Both `set_header_timestamp` and `set_header_routing_node` behave exactly
to plugins `rabbitmq-message-timestamp` and `rabbitmq-routing-node-stamp`,
respectively.
Upon node boot, the configuration is put into persistent_term to not
cause any performance penalty in the default case where these settings
are disabled.
The channel and MQTT connection process will intercept incoming messages
and - if configured - add the desired AMQP 0.9.1 headers.
For now, this allows using Native MQTT in 3.12 with the old plugins
behaviour.
In the future, once "message containers" are implemented,
we can think about more generic message interceptors where plugins can be
written to modify arbitrary headers or message contents for various protocols.
Likewise, in the future, once MQTT 5.0 is implemented, we can think
about an MQTT connection interceptor which could function similar to a
`rabbit_channel_interceptor` allowing to modify any MQTT packet.
2023-05-12 22:12:50 +08:00
|
|
|
- rabbit_message_interceptor
|
2023-02-23 21:47:41 +08:00
|
|
|
- rabbit_metrics
|
|
|
|
- rabbit_mirror_queue_coordinator
|
|
|
|
- rabbit_mirror_queue_master
|
|
|
|
- rabbit_mirror_queue_misc
|
|
|
|
- rabbit_mirror_queue_mode
|
|
|
|
- rabbit_mirror_queue_mode_all
|
|
|
|
- rabbit_mirror_queue_mode_exactly
|
|
|
|
- rabbit_mirror_queue_mode_nodes
|
|
|
|
- rabbit_mirror_queue_slave
|
|
|
|
- rabbit_mirror_queue_sync
|
|
|
|
- rabbit_mnesia
|
|
|
|
- rabbit_msg_record
|
|
|
|
- rabbit_msg_store
|
|
|
|
- rabbit_msg_store_ets_index
|
|
|
|
- rabbit_msg_store_gc
|
|
|
|
- rabbit_networking
|
|
|
|
- rabbit_networking_store
|
|
|
|
- rabbit_node_monitor
|
|
|
|
- rabbit_nodes
|
|
|
|
- rabbit_observer_cli
|
|
|
|
- rabbit_observer_cli_classic_queues
|
2023-05-19 00:25:08 +08:00
|
|
|
- rabbit_observer_cli_quorum_queues
|
2023-02-23 21:47:41 +08:00
|
|
|
- rabbit_osiris_metrics
|
|
|
|
- rabbit_parameter_validation
|
|
|
|
- rabbit_peer_discovery
|
|
|
|
- rabbit_peer_discovery_classic_config
|
|
|
|
- rabbit_peer_discovery_dns
|
|
|
|
- rabbit_plugins
|
|
|
|
- rabbit_policies
|
|
|
|
- rabbit_policy
|
|
|
|
- rabbit_policy_merge_strategy
|
|
|
|
- rabbit_prelaunch_cluster
|
|
|
|
- rabbit_prelaunch_enabled_plugins_file
|
|
|
|
- rabbit_prelaunch_feature_flags
|
|
|
|
- rabbit_prelaunch_logging
|
|
|
|
- rabbit_prequeue
|
|
|
|
- rabbit_priority_queue
|
|
|
|
- rabbit_process
|
|
|
|
- rabbit_queue_consumers
|
|
|
|
- rabbit_queue_decorator
|
|
|
|
- rabbit_queue_index
|
|
|
|
- rabbit_queue_location
|
|
|
|
- rabbit_queue_location_client_local
|
|
|
|
- rabbit_queue_location_min_masters
|
|
|
|
- rabbit_queue_location_random
|
|
|
|
- rabbit_queue_location_validator
|
|
|
|
- rabbit_queue_master_location_misc
|
|
|
|
- rabbit_queue_master_locator
|
|
|
|
- rabbit_queue_type
|
|
|
|
- rabbit_queue_type_util
|
|
|
|
- rabbit_quorum_memory_manager
|
|
|
|
- rabbit_quorum_queue
|
2023-05-17 08:06:01 +08:00
|
|
|
- rabbit_quorum_queue_periodic_membership_reconciliation
|
2023-02-23 21:47:41 +08:00
|
|
|
- rabbit_ra_registry
|
|
|
|
- rabbit_ra_systems
|
|
|
|
- rabbit_reader
|
|
|
|
- rabbit_recovery_terms
|
|
|
|
- rabbit_release_series
|
|
|
|
- rabbit_restartable_sup
|
|
|
|
- rabbit_router
|
|
|
|
- rabbit_runtime_parameters
|
|
|
|
- rabbit_ssl
|
|
|
|
- rabbit_stream_coordinator
|
|
|
|
- rabbit_stream_queue
|
|
|
|
- rabbit_stream_sac_coordinator
|
|
|
|
- rabbit_sup
|
|
|
|
- rabbit_sysmon_handler
|
|
|
|
- rabbit_sysmon_minder
|
|
|
|
- rabbit_table
|
|
|
|
- rabbit_time_travel_dbg
|
|
|
|
- rabbit_trace
|
|
|
|
- rabbit_tracking
|
|
|
|
- rabbit_tracking_store
|
|
|
|
- rabbit_upgrade_preparation
|
|
|
|
- rabbit_variable_queue
|
|
|
|
- rabbit_version
|
|
|
|
- rabbit_vhost
|
|
|
|
- rabbit_vhost_limit
|
|
|
|
- rabbit_vhost_msg_store
|
|
|
|
- rabbit_vhost_process
|
|
|
|
- rabbit_vhost_sup
|
|
|
|
- rabbit_vhost_sup_sup
|
|
|
|
- rabbit_vhost_sup_wrapper
|
|
|
|
- rabbit_vm
|
|
|
|
- supervised_lifecycle
|
|
|
|
- tcp_listener
|
|
|
|
- tcp_listener_sup
|
|
|
|
- term_to_binary_compat
|
|
|
|
- vhost
|
|
|
|
rabbit_common:
|
|
|
|
- app_utils
|
|
|
|
- code_version
|
|
|
|
- credit_flow
|
|
|
|
- delegate
|
|
|
|
- delegate_sup
|
|
|
|
- file_handle_cache
|
|
|
|
- file_handle_cache_stats
|
|
|
|
- gen_server2
|
|
|
|
- mirrored_supervisor_locks
|
|
|
|
- mnesia_sync
|
|
|
|
- pmon
|
|
|
|
- priority_queue
|
|
|
|
- rabbit_amqp_connection
|
|
|
|
- rabbit_amqqueue_common
|
|
|
|
- rabbit_auth_backend_dummy
|
|
|
|
- rabbit_auth_mechanism
|
|
|
|
- rabbit_authn_backend
|
|
|
|
- rabbit_authz_backend
|
|
|
|
- rabbit_basic_common
|
|
|
|
- rabbit_binary_generator
|
|
|
|
- rabbit_binary_parser
|
|
|
|
- rabbit_cert_info
|
|
|
|
- rabbit_channel_common
|
|
|
|
- rabbit_command_assembler
|
|
|
|
- rabbit_control_misc
|
|
|
|
- rabbit_core_metrics
|
|
|
|
- rabbit_data_coercion
|
|
|
|
- rabbit_date_time
|
|
|
|
- rabbit_env
|
|
|
|
- rabbit_error_logger_handler
|
|
|
|
- rabbit_event
|
|
|
|
- rabbit_framing
|
|
|
|
- rabbit_framing_amqp_0_8
|
|
|
|
- rabbit_framing_amqp_0_9_1
|
|
|
|
- rabbit_heartbeat
|
|
|
|
- rabbit_http_util
|
|
|
|
- rabbit_json
|
|
|
|
- rabbit_log
|
|
|
|
- rabbit_misc
|
|
|
|
- rabbit_msg_store_index
|
|
|
|
- rabbit_net
|
|
|
|
- rabbit_nodes_common
|
|
|
|
- rabbit_numerical
|
|
|
|
- rabbit_password
|
|
|
|
- rabbit_password_hashing
|
|
|
|
- rabbit_password_hashing_md5
|
|
|
|
- rabbit_password_hashing_sha256
|
|
|
|
- rabbit_password_hashing_sha512
|
|
|
|
- rabbit_pbe
|
|
|
|
- rabbit_peer_discovery_backend
|
|
|
|
- rabbit_policy_validator
|
|
|
|
- rabbit_queue_collector
|
|
|
|
- rabbit_registry
|
|
|
|
- rabbit_registry_class
|
|
|
|
- rabbit_resource_monitor_misc
|
|
|
|
- rabbit_runtime
|
|
|
|
- rabbit_runtime_parameter
|
|
|
|
- rabbit_semver
|
|
|
|
- rabbit_semver_parser
|
|
|
|
- rabbit_ssl_options
|
|
|
|
- rabbit_types
|
|
|
|
- rabbit_writer
|
|
|
|
- supervisor2
|
|
|
|
- vm_memory_monitor
|
|
|
|
- worker_pool
|
|
|
|
- worker_pool_sup
|
|
|
|
- worker_pool_worker
|
|
|
|
rabbitmq_amqp1_0:
|
|
|
|
- Elixir.RabbitMQ.CLI.Ctl.Commands.ListAmqp10ConnectionsCommand
|
|
|
|
- rabbit_amqp1_0
|
|
|
|
- rabbit_amqp1_0_channel
|
|
|
|
- rabbit_amqp1_0_incoming_link
|
|
|
|
- rabbit_amqp1_0_link_util
|
|
|
|
- rabbit_amqp1_0_message
|
|
|
|
- rabbit_amqp1_0_outgoing_link
|
|
|
|
- rabbit_amqp1_0_reader
|
|
|
|
- rabbit_amqp1_0_session
|
|
|
|
- rabbit_amqp1_0_session_process
|
|
|
|
- rabbit_amqp1_0_session_sup
|
|
|
|
- rabbit_amqp1_0_session_sup_sup
|
|
|
|
- rabbit_amqp1_0_util
|
|
|
|
- rabbit_amqp1_0_writer
|
|
|
|
rabbitmq_auth_backend_cache:
|
|
|
|
- rabbit_auth_backend_cache
|
|
|
|
- rabbit_auth_backend_cache_app
|
|
|
|
- rabbit_auth_cache
|
|
|
|
- rabbit_auth_cache_dict
|
|
|
|
- rabbit_auth_cache_ets
|
|
|
|
- rabbit_auth_cache_ets_segmented
|
|
|
|
- rabbit_auth_cache_ets_segmented_stateless
|
|
|
|
rabbitmq_auth_backend_http:
|
|
|
|
- rabbit_auth_backend_http
|
|
|
|
- rabbit_auth_backend_http_app
|
|
|
|
rabbitmq_auth_backend_ldap:
|
|
|
|
- rabbit_auth_backend_ldap
|
|
|
|
- rabbit_auth_backend_ldap_app
|
|
|
|
- rabbit_auth_backend_ldap_util
|
|
|
|
- rabbit_log_ldap
|
|
|
|
rabbitmq_auth_backend_oauth2:
|
2024-02-15 01:55:39 +08:00
|
|
|
- Elixir.RabbitMQ.CLI.Ctl.Commands.AddSigningKeyCommand
|
2023-02-23 21:47:41 +08:00
|
|
|
- Elixir.RabbitMQ.CLI.Ctl.Commands.AddUaaKeyCommand
|
|
|
|
- rabbit_auth_backend_oauth2
|
|
|
|
- rabbit_auth_backend_oauth2_app
|
2024-01-03 16:28:36 +08:00
|
|
|
- rabbit_oauth2_config
|
2023-02-23 21:47:41 +08:00
|
|
|
- rabbit_oauth2_scope
|
|
|
|
- uaa_jwks
|
|
|
|
- uaa_jwt
|
|
|
|
- uaa_jwt_jwk
|
|
|
|
- uaa_jwt_jwt
|
|
|
|
- wildcard
|
|
|
|
rabbitmq_auth_mechanism_ssl:
|
|
|
|
- rabbit_auth_mechanism_ssl
|
|
|
|
- rabbit_auth_mechanism_ssl_app
|
|
|
|
rabbitmq_aws:
|
|
|
|
- rabbitmq_aws
|
|
|
|
- rabbitmq_aws_app
|
|
|
|
- rabbitmq_aws_config
|
|
|
|
- rabbitmq_aws_json
|
|
|
|
- rabbitmq_aws_sign
|
|
|
|
- rabbitmq_aws_sup
|
|
|
|
- rabbitmq_aws_urilib
|
|
|
|
- rabbitmq_aws_xml
|
|
|
|
rabbitmq_consistent_hash_exchange:
|
|
|
|
- Elixir.RabbitMQ.CLI.Diagnostics.Commands.ConsistentHashExchangeRingStateCommand
|
|
|
|
- rabbit_db_ch_exchange
|
Allow to use Khepri database to store metadata instead of Mnesia
[Why]
Mnesia is a very powerful and convenient tool for Erlang applications:
it is a persistent disc-based database, it handles replication accross
multiple Erlang nodes and it is available out-of-the-box from the
Erlang/OTP distribution. RabbitMQ relies on Mnesia to manage all its
metadata:
* virtual hosts' properties
* intenal users
* queue, exchange and binding declarations (not queues data)
* runtime parameters and policies
* ...
Unfortunately Mnesia makes it difficult to handle network partition and,
as a consequence, the merge conflicts between Erlang nodes once the
network partition is resolved. RabbitMQ provides several partition
handling strategies but they are not bullet-proof. Users still hit
situations where it is a pain to repair a cluster following a network
partition.
[How]
@kjnilsson created Ra [1], a Raft consensus library that RabbitMQ
already uses successfully to implement quorum queues and streams for
instance. Those queues do not suffer from network partitions.
We created Khepri [2], a new persistent and replicated database engine
based on Ra and we want to use it in place of Mnesia in RabbitMQ to
solve the problems with network partitions.
This patch integrates Khepri as an experimental feature. When enabled,
RabbitMQ will store all its metadata in Khepri instead of Mnesia.
This change comes with behavior changes. While Khepri remains disabled,
you should see no changes to the behavior of RabbitMQ. If there are
changes, it is a bug. After Khepri is enabled, there are significant
changes of behavior that you should be aware of.
Because it is based on the Raft consensus algorithm, when there is a
network partition, only the cluster members that are in the partition
with at least `(Number of nodes in the cluster ÷ 2) + 1` number of nodes
can "make progress". In other words, only those nodes may write to the
Khepri database and read from the database and expect a consistent
result.
For instance in a cluster of 5 RabbitMQ nodes:
* If there are two partitions, one with 3 nodes, one with 2 nodes, only
the group of 3 nodes will be able to write to the database.
* If there are three partitions, two with 2 nodes, one with 1 node, none
of the group can write to the database.
Because the Khepri database will be used for all kind of metadata, it
means that RabbitMQ nodes that can't write to the database will be
unable to perform some operations. A list of operations and what to
expect is documented in the associated pull request and the RabbitMQ
website.
This requirement from Raft also affects the startup of RabbitMQ nodes in
a cluster. Indeed, at least a quorum number of nodes must be started at
once to allow nodes to become ready.
To enable Khepri, you need to enable the `khepri_db` feature flag:
rabbitmqctl enable_feature_flag khepri_db
When the `khepri_db` feature flag is enabled, the migration code
performs the following two tasks:
1. It synchronizes the Khepri cluster membership from the Mnesia
cluster. It uses `mnesia_to_khepri:sync_cluster_membership/1` from
the `khepri_mnesia_migration` application [3].
2. It copies data from relevant Mnesia tables to Khepri, doing some
conversion if necessary on the way. Again, it uses
`mnesia_to_khepri:copy_tables/4` from `khepri_mnesia_migration` to do
it.
This can be performed on a running standalone RabbitMQ node or cluster.
Data will be migrated from Mnesia to Khepri without any service
interruption. Note that during the migration, the performance may
decrease and the memory footprint may go up.
Because this feature flag is considered experimental, it is not enabled
by default even on a brand new RabbitMQ deployment.
More about the implementation details below:
In the past months, all accesses to Mnesia were isolated in a collection
of `rabbit_db*` modules. This is where the integration of Khepri mostly
takes place: we use a function called `rabbit_khepri:handle_fallback/1`
which selects the database and perform the query or the transaction.
Here is an example from `rabbit_db_vhost`:
* Up until RabbitMQ 3.12.x:
get(VHostName) when is_binary(VHostName) ->
get_in_mnesia(VHostName).
* Starting with RabbitMQ 3.13.0:
get(VHostName) when is_binary(VHostName) ->
rabbit_khepri:handle_fallback(
#{mnesia => fun() -> get_in_mnesia(VHostName) end,
khepri => fun() -> get_in_khepri(VHostName) end}).
This `rabbit_khepri:handle_fallback/1` function relies on two things:
1. the fact that the `khepri_db` feature flag is enabled, in which case
it always executes the Khepri-based variant.
4. the ability or not to read and write to Mnesia tables otherwise.
Before the feature flag is enabled, or during the migration, the
function will try to execute the Mnesia-based variant. If it succeeds,
then it returns the result. If it fails because one or more Mnesia
tables can't be used, it restarts from scratch: it means the feature
flag is being enabled and depending on the outcome, either the
Mnesia-based variant will succeed (the feature flag couldn't be enabled)
or the feature flag will be marked as enabled and it will call the
Khepri-based variant. The meat of this function really lives in the
`khepri_mnesia_migration` application [3] and
`rabbit_khepri:handle_fallback/1` is a wrapper on top of it that knows
about the feature flag.
However, some calls to the database do not depend on the existence of
Mnesia tables, such as functions where we need to learn about the
members of a cluster. For those, we can't rely on exceptions from
Mnesia. Therefore, we just look at the state of the feature flag to
determine which database to use. There are two situations though:
* Sometimes, we need the feature flag state query to block because the
function interested in it can't return a valid answer during the
migration. Here is an example:
case rabbit_khepri:is_enabled(RemoteNode) of
true -> can_join_using_khepri(RemoteNode);
false -> can_join_using_mnesia(RemoteNode)
end
* Sometimes, we need the feature flag state query to NOT block (for
instance because it would cause a deadlock). Here is an example:
case rabbit_khepri:get_feature_state() of
enabled -> members_using_khepri();
_ -> members_using_mnesia()
end
Direct accesses to Mnesia still exists. They are limited to code that is
specific to Mnesia such as classic queue mirroring or network partitions
handling strategies.
Now, to discover the Mnesia tables to migrate and how to migrate them,
we use an Erlang module attribute called
`rabbit_mnesia_tables_to_khepri_db` which indicates a list of Mnesia
tables and an associated converter module. Here is an example in the
`rabbitmq_recent_history_exchange` plugin:
-rabbit_mnesia_tables_to_khepri_db(
[{?RH_TABLE, rabbit_db_rh_exchange_m2k_converter}]).
The converter module — `rabbit_db_rh_exchange_m2k_converter` in this
example — is is fact a "sub" converter module called but
`rabbit_db_m2k_converter`. See the documentation of a `mnesia_to_khepri`
converter module to learn more about these modules.
[1] https://github.com/rabbitmq/ra
[2] https://github.com/rabbitmq/khepri
[3] https://github.com/rabbitmq/khepri_mnesia_migration
See #7206.
Co-authored-by: Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
Co-authored-by: Diana Parra Corbacho <dparracorbac@vmware.com>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
2023-01-05 20:57:50 +08:00
|
|
|
- rabbit_db_ch_exchange_m2k_converter
|
2023-02-23 21:47:41 +08:00
|
|
|
- rabbit_exchange_type_consistent_hash
|
|
|
|
rabbitmq_ct_client_helpers:
|
|
|
|
- rabbit_ct_client_helpers
|
|
|
|
rabbitmq_ct_helpers:
|
|
|
|
- cth_log_redirect_any_domains
|
|
|
|
- rabbit_control_helper
|
|
|
|
- rabbit_ct_broker_helpers
|
|
|
|
- rabbit_ct_config_schema
|
|
|
|
- rabbit_ct_helpers
|
|
|
|
- rabbit_ct_proper_helpers
|
|
|
|
- rabbit_ct_vm_helpers
|
|
|
|
- rabbit_mgmt_test_util
|
|
|
|
rabbitmq_event_exchange:
|
|
|
|
- rabbit_event_exchange_decorator
|
|
|
|
- rabbit_exchange_type_event
|
|
|
|
rabbitmq_federation:
|
|
|
|
- Elixir.RabbitMQ.CLI.Ctl.Commands.FederationStatusCommand
|
|
|
|
- Elixir.RabbitMQ.CLI.Ctl.Commands.RestartFederationLinkCommand
|
|
|
|
- rabbit_federation_app
|
|
|
|
- rabbit_federation_db
|
|
|
|
- rabbit_federation_event
|
|
|
|
- rabbit_federation_exchange
|
|
|
|
- rabbit_federation_exchange_link
|
|
|
|
- rabbit_federation_exchange_link_sup_sup
|
|
|
|
- rabbit_federation_link_sup
|
|
|
|
- rabbit_federation_link_util
|
|
|
|
- rabbit_federation_parameters
|
|
|
|
- rabbit_federation_pg
|
|
|
|
- rabbit_federation_queue
|
|
|
|
- rabbit_federation_queue_link
|
|
|
|
- rabbit_federation_queue_link_sup_sup
|
|
|
|
- rabbit_federation_status
|
|
|
|
- rabbit_federation_sup
|
|
|
|
- rabbit_federation_upstream
|
|
|
|
- rabbit_federation_upstream_exchange
|
|
|
|
- rabbit_federation_util
|
|
|
|
- rabbit_log_federation
|
|
|
|
rabbitmq_federation_management:
|
|
|
|
- rabbit_federation_mgmt
|
|
|
|
rabbitmq_jms_topic_exchange:
|
|
|
|
- rabbit_db_jms_exchange
|
Allow to use Khepri database to store metadata instead of Mnesia
[Why]
Mnesia is a very powerful and convenient tool for Erlang applications:
it is a persistent disc-based database, it handles replication accross
multiple Erlang nodes and it is available out-of-the-box from the
Erlang/OTP distribution. RabbitMQ relies on Mnesia to manage all its
metadata:
* virtual hosts' properties
* intenal users
* queue, exchange and binding declarations (not queues data)
* runtime parameters and policies
* ...
Unfortunately Mnesia makes it difficult to handle network partition and,
as a consequence, the merge conflicts between Erlang nodes once the
network partition is resolved. RabbitMQ provides several partition
handling strategies but they are not bullet-proof. Users still hit
situations where it is a pain to repair a cluster following a network
partition.
[How]
@kjnilsson created Ra [1], a Raft consensus library that RabbitMQ
already uses successfully to implement quorum queues and streams for
instance. Those queues do not suffer from network partitions.
We created Khepri [2], a new persistent and replicated database engine
based on Ra and we want to use it in place of Mnesia in RabbitMQ to
solve the problems with network partitions.
This patch integrates Khepri as an experimental feature. When enabled,
RabbitMQ will store all its metadata in Khepri instead of Mnesia.
This change comes with behavior changes. While Khepri remains disabled,
you should see no changes to the behavior of RabbitMQ. If there are
changes, it is a bug. After Khepri is enabled, there are significant
changes of behavior that you should be aware of.
Because it is based on the Raft consensus algorithm, when there is a
network partition, only the cluster members that are in the partition
with at least `(Number of nodes in the cluster ÷ 2) + 1` number of nodes
can "make progress". In other words, only those nodes may write to the
Khepri database and read from the database and expect a consistent
result.
For instance in a cluster of 5 RabbitMQ nodes:
* If there are two partitions, one with 3 nodes, one with 2 nodes, only
the group of 3 nodes will be able to write to the database.
* If there are three partitions, two with 2 nodes, one with 1 node, none
of the group can write to the database.
Because the Khepri database will be used for all kind of metadata, it
means that RabbitMQ nodes that can't write to the database will be
unable to perform some operations. A list of operations and what to
expect is documented in the associated pull request and the RabbitMQ
website.
This requirement from Raft also affects the startup of RabbitMQ nodes in
a cluster. Indeed, at least a quorum number of nodes must be started at
once to allow nodes to become ready.
To enable Khepri, you need to enable the `khepri_db` feature flag:
rabbitmqctl enable_feature_flag khepri_db
When the `khepri_db` feature flag is enabled, the migration code
performs the following two tasks:
1. It synchronizes the Khepri cluster membership from the Mnesia
cluster. It uses `mnesia_to_khepri:sync_cluster_membership/1` from
the `khepri_mnesia_migration` application [3].
2. It copies data from relevant Mnesia tables to Khepri, doing some
conversion if necessary on the way. Again, it uses
`mnesia_to_khepri:copy_tables/4` from `khepri_mnesia_migration` to do
it.
This can be performed on a running standalone RabbitMQ node or cluster.
Data will be migrated from Mnesia to Khepri without any service
interruption. Note that during the migration, the performance may
decrease and the memory footprint may go up.
Because this feature flag is considered experimental, it is not enabled
by default even on a brand new RabbitMQ deployment.
More about the implementation details below:
In the past months, all accesses to Mnesia were isolated in a collection
of `rabbit_db*` modules. This is where the integration of Khepri mostly
takes place: we use a function called `rabbit_khepri:handle_fallback/1`
which selects the database and perform the query or the transaction.
Here is an example from `rabbit_db_vhost`:
* Up until RabbitMQ 3.12.x:
get(VHostName) when is_binary(VHostName) ->
get_in_mnesia(VHostName).
* Starting with RabbitMQ 3.13.0:
get(VHostName) when is_binary(VHostName) ->
rabbit_khepri:handle_fallback(
#{mnesia => fun() -> get_in_mnesia(VHostName) end,
khepri => fun() -> get_in_khepri(VHostName) end}).
This `rabbit_khepri:handle_fallback/1` function relies on two things:
1. the fact that the `khepri_db` feature flag is enabled, in which case
it always executes the Khepri-based variant.
4. the ability or not to read and write to Mnesia tables otherwise.
Before the feature flag is enabled, or during the migration, the
function will try to execute the Mnesia-based variant. If it succeeds,
then it returns the result. If it fails because one or more Mnesia
tables can't be used, it restarts from scratch: it means the feature
flag is being enabled and depending on the outcome, either the
Mnesia-based variant will succeed (the feature flag couldn't be enabled)
or the feature flag will be marked as enabled and it will call the
Khepri-based variant. The meat of this function really lives in the
`khepri_mnesia_migration` application [3] and
`rabbit_khepri:handle_fallback/1` is a wrapper on top of it that knows
about the feature flag.
However, some calls to the database do not depend on the existence of
Mnesia tables, such as functions where we need to learn about the
members of a cluster. For those, we can't rely on exceptions from
Mnesia. Therefore, we just look at the state of the feature flag to
determine which database to use. There are two situations though:
* Sometimes, we need the feature flag state query to block because the
function interested in it can't return a valid answer during the
migration. Here is an example:
case rabbit_khepri:is_enabled(RemoteNode) of
true -> can_join_using_khepri(RemoteNode);
false -> can_join_using_mnesia(RemoteNode)
end
* Sometimes, we need the feature flag state query to NOT block (for
instance because it would cause a deadlock). Here is an example:
case rabbit_khepri:get_feature_state() of
enabled -> members_using_khepri();
_ -> members_using_mnesia()
end
Direct accesses to Mnesia still exists. They are limited to code that is
specific to Mnesia such as classic queue mirroring or network partitions
handling strategies.
Now, to discover the Mnesia tables to migrate and how to migrate them,
we use an Erlang module attribute called
`rabbit_mnesia_tables_to_khepri_db` which indicates a list of Mnesia
tables and an associated converter module. Here is an example in the
`rabbitmq_recent_history_exchange` plugin:
-rabbit_mnesia_tables_to_khepri_db(
[{?RH_TABLE, rabbit_db_rh_exchange_m2k_converter}]).
The converter module — `rabbit_db_rh_exchange_m2k_converter` in this
example — is is fact a "sub" converter module called but
`rabbit_db_m2k_converter`. See the documentation of a `mnesia_to_khepri`
converter module to learn more about these modules.
[1] https://github.com/rabbitmq/ra
[2] https://github.com/rabbitmq/khepri
[3] https://github.com/rabbitmq/khepri_mnesia_migration
See #7206.
Co-authored-by: Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
Co-authored-by: Diana Parra Corbacho <dparracorbac@vmware.com>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
2023-01-05 20:57:50 +08:00
|
|
|
- rabbit_db_jms_exchange_m2k_converter
|
2023-02-23 21:47:41 +08:00
|
|
|
- rabbit_jms_topic_exchange
|
|
|
|
- sjx_evaluator
|
|
|
|
rabbitmq_management:
|
|
|
|
- rabbit_mgmt_app
|
|
|
|
- rabbit_mgmt_cors
|
|
|
|
- rabbit_mgmt_csp
|
|
|
|
- rabbit_mgmt_db
|
|
|
|
- rabbit_mgmt_db_cache
|
|
|
|
- rabbit_mgmt_db_cache_sup
|
|
|
|
- rabbit_mgmt_dispatcher
|
|
|
|
- rabbit_mgmt_extension
|
|
|
|
- rabbit_mgmt_features
|
|
|
|
- rabbit_mgmt_headers
|
|
|
|
- rabbit_mgmt_hsts
|
|
|
|
- rabbit_mgmt_load_definitions
|
|
|
|
- rabbit_mgmt_login
|
2024-01-26 07:41:56 +08:00
|
|
|
- rabbit_mgmt_nodes
|
2023-02-23 21:47:41 +08:00
|
|
|
- rabbit_mgmt_oauth_bootstrap
|
|
|
|
- rabbit_mgmt_reset_handler
|
|
|
|
- rabbit_mgmt_stats
|
|
|
|
- rabbit_mgmt_sup
|
|
|
|
- rabbit_mgmt_sup_sup
|
|
|
|
- rabbit_mgmt_util
|
|
|
|
- rabbit_mgmt_wm_aliveness_test
|
|
|
|
- rabbit_mgmt_wm_auth
|
|
|
|
- rabbit_mgmt_wm_auth_attempts
|
|
|
|
- rabbit_mgmt_wm_binding
|
|
|
|
- rabbit_mgmt_wm_bindings
|
|
|
|
- rabbit_mgmt_wm_channel
|
|
|
|
- rabbit_mgmt_wm_channels
|
|
|
|
- rabbit_mgmt_wm_channels_vhost
|
|
|
|
- rabbit_mgmt_wm_cluster_name
|
|
|
|
- rabbit_mgmt_wm_connection
|
|
|
|
- rabbit_mgmt_wm_connection_channels
|
|
|
|
- rabbit_mgmt_wm_connection_user_name
|
|
|
|
- rabbit_mgmt_wm_connections
|
|
|
|
- rabbit_mgmt_wm_connections_vhost
|
|
|
|
- rabbit_mgmt_wm_consumers
|
|
|
|
- rabbit_mgmt_wm_definitions
|
2023-12-14 12:35:35 +08:00
|
|
|
- rabbit_mgmt_wm_deprecated_features
|
2023-02-23 21:47:41 +08:00
|
|
|
- rabbit_mgmt_wm_environment
|
|
|
|
- rabbit_mgmt_wm_exchange
|
|
|
|
- rabbit_mgmt_wm_exchange_publish
|
|
|
|
- rabbit_mgmt_wm_exchanges
|
|
|
|
- rabbit_mgmt_wm_extensions
|
|
|
|
- rabbit_mgmt_wm_feature_flag_enable
|
|
|
|
- rabbit_mgmt_wm_feature_flags
|
|
|
|
- rabbit_mgmt_wm_global_parameter
|
|
|
|
- rabbit_mgmt_wm_global_parameters
|
|
|
|
- rabbit_mgmt_wm_hash_password
|
|
|
|
- rabbit_mgmt_wm_health_check_alarms
|
|
|
|
- rabbit_mgmt_wm_health_check_certificate_expiration
|
|
|
|
- rabbit_mgmt_wm_health_check_local_alarms
|
|
|
|
- rabbit_mgmt_wm_health_check_node_is_mirror_sync_critical
|
|
|
|
- rabbit_mgmt_wm_health_check_node_is_quorum_critical
|
|
|
|
- rabbit_mgmt_wm_health_check_port_listener
|
|
|
|
- rabbit_mgmt_wm_health_check_protocol_listener
|
|
|
|
- rabbit_mgmt_wm_health_check_virtual_hosts
|
|
|
|
- rabbit_mgmt_wm_healthchecks
|
|
|
|
- rabbit_mgmt_wm_limit
|
|
|
|
- rabbit_mgmt_wm_limits
|
|
|
|
- rabbit_mgmt_wm_login
|
|
|
|
- rabbit_mgmt_wm_node
|
|
|
|
- rabbit_mgmt_wm_node_memory
|
|
|
|
- rabbit_mgmt_wm_node_memory_ets
|
|
|
|
- rabbit_mgmt_wm_nodes
|
|
|
|
- rabbit_mgmt_wm_operator_policies
|
|
|
|
- rabbit_mgmt_wm_operator_policy
|
|
|
|
- rabbit_mgmt_wm_overview
|
|
|
|
- rabbit_mgmt_wm_parameter
|
|
|
|
- rabbit_mgmt_wm_parameters
|
|
|
|
- rabbit_mgmt_wm_permission
|
|
|
|
- rabbit_mgmt_wm_permissions
|
|
|
|
- rabbit_mgmt_wm_permissions_user
|
|
|
|
- rabbit_mgmt_wm_permissions_vhost
|
|
|
|
- rabbit_mgmt_wm_policies
|
|
|
|
- rabbit_mgmt_wm_policy
|
|
|
|
- rabbit_mgmt_wm_queue
|
|
|
|
- rabbit_mgmt_wm_queue_actions
|
|
|
|
- rabbit_mgmt_wm_queue_get
|
|
|
|
- rabbit_mgmt_wm_queue_purge
|
|
|
|
- rabbit_mgmt_wm_queues
|
2023-06-13 05:36:54 +08:00
|
|
|
- rabbit_mgmt_wm_quorum_queue_replicas_add_member
|
|
|
|
- rabbit_mgmt_wm_quorum_queue_replicas_delete_member
|
2023-06-14 06:01:31 +08:00
|
|
|
- rabbit_mgmt_wm_quorum_queue_replicas_grow
|
|
|
|
- rabbit_mgmt_wm_quorum_queue_replicas_shrink
|
2023-02-23 21:47:41 +08:00
|
|
|
- rabbit_mgmt_wm_rebalance_queues
|
|
|
|
- rabbit_mgmt_wm_redirect
|
|
|
|
- rabbit_mgmt_wm_reset
|
|
|
|
- rabbit_mgmt_wm_static
|
|
|
|
- rabbit_mgmt_wm_topic_permission
|
|
|
|
- rabbit_mgmt_wm_topic_permissions
|
|
|
|
- rabbit_mgmt_wm_topic_permissions_user
|
|
|
|
- rabbit_mgmt_wm_topic_permissions_vhost
|
|
|
|
- rabbit_mgmt_wm_user
|
|
|
|
- rabbit_mgmt_wm_user_limit
|
|
|
|
- rabbit_mgmt_wm_user_limits
|
|
|
|
- rabbit_mgmt_wm_users
|
|
|
|
- rabbit_mgmt_wm_users_bulk_delete
|
|
|
|
- rabbit_mgmt_wm_vhost
|
|
|
|
- rabbit_mgmt_wm_vhost_restart
|
|
|
|
- rabbit_mgmt_wm_vhosts
|
|
|
|
- rabbit_mgmt_wm_whoami
|
|
|
|
rabbitmq_management_agent:
|
|
|
|
- Elixir.RabbitMQ.CLI.Ctl.Commands.ResetStatsDbCommand
|
|
|
|
- exometer_slide
|
|
|
|
- rabbit_mgmt_agent_app
|
|
|
|
- rabbit_mgmt_agent_config
|
|
|
|
- rabbit_mgmt_agent_sup
|
|
|
|
- rabbit_mgmt_agent_sup_sup
|
|
|
|
- rabbit_mgmt_data
|
|
|
|
- rabbit_mgmt_data_compat
|
|
|
|
- rabbit_mgmt_db_handler
|
|
|
|
- rabbit_mgmt_external_stats
|
|
|
|
- rabbit_mgmt_ff
|
|
|
|
- rabbit_mgmt_format
|
|
|
|
- rabbit_mgmt_gc
|
|
|
|
- rabbit_mgmt_metrics_collector
|
|
|
|
- rabbit_mgmt_metrics_gc
|
|
|
|
- rabbit_mgmt_storage
|
|
|
|
rabbitmq_mqtt:
|
|
|
|
- Elixir.RabbitMQ.CLI.Ctl.Commands.DecommissionMqttNodeCommand
|
|
|
|
- Elixir.RabbitMQ.CLI.Ctl.Commands.ListMqttConnectionsCommand
|
Message Containers (#5077)
This PR implements an approach for a "protocol (data format) agnostic core" where the format of the message isn't converted at point of reception.
Currently all non AMQP 0.9.1 originating messages are converted into a AMQP 0.9.1 flavoured basic_message record before sent to a queue. If the messages are then consumed by the originating protocol they are converted back from AMQP 0.9.1. For some protocols such as MQTT 3.1 this isn't too expensive as MQTT is mostly a fairly easily mapped subset of AMQP 0.9.1 but for others such as AMQP 1.0 the conversions are awkward and in some cases lossy even if consuming from the originating protocol.
This PR instead wraps all incoming messages in their originating form into a generic, extensible message container type (mc). The container module exposes an API to get common message details such as size and various properties (ttl, priority etc) directly from the source data type. Each protocol needs to implement the mc behaviour such that when a message originating form one protocol is consumed by another protocol we convert it to the target protocol at that point.
The message container also contains annotations, dead letter records and other meta data we need to record during the lifetime of a message. The original protocol message is never modified unless it is consumed.
This includes conversion modules to and from amqp, amqpl (AMQP 0.9.1) and mqtt.
COMMIT HISTORY:
* Refactor away from using the delivery{} record
In many places including exchange types. This should make it
easier to move towards using a message container type instead of
basic_message.
Add mc module and move direct replies outside of exchange
Lots of changes incl classic queues
Implement stream support incl amqp conversions
simplify mc state record
move mc.erl
mc dlx stuff
recent history exchange
Make tracking work
But doesn't take a protocol agnostic approach as we just convert
everything into AMQP legacy and back. Might be good enough for now.
Tracing as a whole may want a bit of a re-vamp at some point.
tidy
make quorum queue peek work by legacy conversion
dead lettering fixes
dead lettering fixes
CMQ fixes
rabbit_trace type fixes
fixes
fix
Fix classic queue props
test assertion fix
feature flag and backwards compat
Enable message_container feature flag in some SUITEs
Dialyzer fixes
fixes
fix
test fixes
Various
Manually update a gazelle generated file
until a gazelle enhancement can be made
https://github.com/rabbitmq/rules_erlang/issues/185
Add message_containers_SUITE to bazel
and regen bazel files with gazelle from rules_erlang@main
Simplify essential proprty access
Such as durable, ttl and priority by extracting them into annotations
at message container init time.
Move type
to remove dependenc on amqp10 stuff in mc.erl
mostly because I don't know how to make bazel do the right thing
add more stuff
Refine routing header stuff
wip
Cosmetics
Do not use "maybe" as type name as "maybe" is a keyword since OTP 25
which makes Erlang LS complain.
* Dedup death queue names
* Fix function clause crashes
Fix failing tests in the MQTT shared_SUITE:
A classic queue message ID can be undefined as set in
https://github.com/rabbitmq/rabbitmq-server/blob/fbe79ff47b4edbc0fd95457e623d6593161ad198/deps/rabbit/src/rabbit_classic_queue_index_v2.erl#L1048
Fix failing tests in the MQTT shared_SUITE-mixed:
When feature flag message_containers is disabled, the
message is not an #mc{} record, but a #basic_message{} record.
* Fix is_utf8_no_null crash
Prior to this commit, the function crashed if invalid UTF-8 was
provided, e.g.:
```
1> rabbit_misc:is_valid_shortstr(<<"😇"/utf16>>).
** exception error: no function clause matching rabbit_misc:is_utf8_no_null(<<216,61,222,7>>) (rabbit_misc.erl, line 1481)
```
* Implement mqtt mc behaviour
For now via amqp translation.
This is still work in progress, but the following SUITEs pass:
```
make -C deps/rabbitmq_mqtt ct-shared t=[mqtt,v5,cluster_size_1] FULL=1
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_1] FULL=1
```
* Shorten mc file names
Module name length matters because for each persistent message the #mc{}
record is persisted to disk.
```
1> iolist_size(term_to_iovec({mc, rabbit_mc_amqp_legacy})).
30
2> iolist_size(term_to_iovec({mc, mc_amqpl})).
17
```
This commit renames the mc modules:
```
ag -l rabbit_mc_amqp_legacy | xargs sed -i 's/rabbit_mc_amqp_legacy/mc_amqpl/g'
ag -l rabbit_mc_amqp | xargs sed -i 's/rabbit_mc_amqp/mc_amqp/g'
ag -l rabbit_mqtt_mc | xargs sed -i 's/rabbit_mqtt_mc/mc_mqtt/g'
```
* mc: make deaths an annotation + fixes
* Fix mc_mqtt protocol_state callback
* Fix test will_delay_node_restart
```
make -C deps/rabbitmq_mqtt ct-v5 t=[mqtt,cluster_size_3]:will_delay_node_restart FULL=1
```
* Bazel run gazelle
* mix format rabbitmqctl.ex
* Ensure ttl annotation is refelected in amqp legacy protocol state
* Fix id access in message store
* Fix rabbit_message_interceptor_SUITE
* dializer fixes
* Fix rabbit:rabbit_message_interceptor_SUITE-mixed
set_annotation/3 should not result in duplicate keys
* Fix MQTT shared_SUITE-mixed
Up to 3.12 non-MQTT publishes were always QoS 1 regardless of delivery_mode.
https://github.com/rabbitmq/rabbitmq-server/blob/75a953ce286a10aca910c098805a4f545989af38/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl#L2075-L2076
From now on, non-MQTT publishes are QoS 1 if durable.
This makes more sense.
The MQTT plugin must send a #basic_message{} to an old node that does
not understand message containers.
* Field content of 'v1_0.data' can be binary
Fix
```
bazel test //deps/rabbitmq_mqtt:shared_SUITE-mixed \
--test_env FOCUS="-group [mqtt,v4,cluster_size_1] -case trace" \
-t- --test_sharding_strategy=disabled
```
* Remove route/2 and implement route/3 for all exchange types.
This removes the route/2 callback from rabbit_exchange_type and
makes route/3 mandatory instead. This is a breaking change and
will require all implementations of exchange types to update their
code, however this is necessary anyway for them to correctly handle
the mc type.
stream filtering fixes
* Translate directly from MQTT to AMQP 0.9.1
* handle undecoded properties in mc_compat
amqpl: put clause in right order
recover death deatails from amqp data
* Replace callback init_amqp with convert_from
* Fix return value of lists:keyfind/3
* Translate directly from AMQP 0.9.1 to MQTT
* Fix MQTT payload size
MQTT payload can be a list when converted from AMQP 0.9.1 for example
First conversions tests
Plus some other conversion related fixes.
bazel
bazel
translate amqp 1.0 null to undefined
mc: property/2 and correlation_id/message_id return type tagged values.
To ensure we can support a variety of types better.
The type type tags are AMQP 1.0 flavoured.
fix death recovery
mc_mqtt: impl new api
Add callbacks to allow protocols to compact data before storage
And make readable if needing to query things repeatedly.
bazel fix
* more decoding
* tracking mixed versions compat
* mc: flip default of `durable` annotation to save some data.
Assuming most messages are durable and that in memory messages suffer less
from persistence overhead it makes sense for a non existent `durable`
annotation to mean durable=true.
* mc conversion tests and tidy up
* mc make x_header unstrict again
* amqpl: death record fixes
* bazel
* amqp -> amqpl conversion test
* Fix crash in mc_amqp:size/1
Body can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0/ ct-system t=java
```
on branch native-amqp.
* Fix crash in lists:flatten/1
Data can be a single amqp-value section (instead of
being a list) as shown by test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp.
* Fix crash in rabbit_writer
Running test
```
make -C deps/rabbitmq_amqp1_0 ct-system t=dotnet:roundtrip_to_amqp_091
```
on branch native-amqp resulted in the following crash:
```
crasher:
initial call: rabbit_writer:enter_mainloop/2
pid: <0.711.0>
registered_name: []
exception error: bad argument
in function size/1
called as size([<<0>>,<<"Sw">>,[<<160,2>>,<<"hi">>]])
*** argument 1: not tuple or binary
in call from rabbit_binary_generator:build_content_frames/7 (rabbit_binary_generator.erl, line 89)
in call from rabbit_binary_generator:build_simple_content_frames/4 (rabbit_binary_generator.erl, line 61)
in call from rabbit_writer:assemble_frames/5 (rabbit_writer.erl, line 334)
in call from rabbit_writer:internal_send_command_async/3 (rabbit_writer.erl, line 365)
in call from rabbit_writer:handle_message/2 (rabbit_writer.erl, line 265)
in call from rabbit_writer:handle_message/3 (rabbit_writer.erl, line 232)
in call from rabbit_writer:mainloop1/2 (rabbit_writer.erl, line 223)
```
because #content.payload_fragments_rev is currently supposed to
be a flat list of binaries instead of being an iolist.
This commit fixes this crash inefficiently by calling
iolist_to_binary/1. A better solution would be to allow AMQP legacy's #content.payload_fragments_rev
to be an iolist.
* Add accidentally deleted line back
* mc: optimise mc_amqp internal format
By removint the outer records for message and delivery annotations
as well as application properties and footers.
* mc: optimis mc_amqp map_add by using upsert
* mc: refactoring and bug fixes
* mc_SUITE routingheader assertions
* mc remove serialize/1 callback as only used by amqp
* mc_amqp: avoid returning a nested list from protocol_state
* test and bug fix
* move infer_type to mc_util
* mc fixes and additiona assertions
* Support headers exchange routing for MQTT messages
When a headers exchange is bound to the MQTT topic exchange, routing
will be performend based on both MQTT topic (by the topic exchange) and
MQTT User Property (by the headers exchange).
This combines the best worlds of both MQTT 5.0 and AMQP 0.9.1 and
enables powerful routing topologies.
When the User Property contains the same name multiple times, only the
last name (and value) will be considered by the headers exchange.
* Fix crash when sending from stream to amqpl
When publishing a message via the stream protocol and consuming it via
AMQP 0.9.1, the following crash occurred prior to this commit:
```
crasher:
initial call: rabbit_channel:init/1
pid: <0.818.0>
registered_name: []
exception exit: {{badmatch,undefined},
[{rabbit_channel,handle_deliver0,4,
[{file,"rabbit_channel.erl"},
{line,2728}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1594}]},
{rabbit_channel,handle_cast,2,
[{file,"rabbit_channel.erl"},
{line,728}]},
{gen_server2,handle_msg,2,
[{file,"gen_server2.erl"},{line,1056}]},
{proc_lib,wake_up,3,
[{file,"proc_lib.erl"},{line,251}]}]}
```
This commit first gives `mc:init/3` the chance to set exchange and
routing_keys annotations.
If not set, `rabbit_stream_queue` will set these annotations assuming
the message was originally published via the stream protocol.
* Support consistent hash exchange routing for MQTT 5.0
When a consistent hash exchange is bound to the MQTT topic exchange,
MQTT 5.0 messages can be routed to queues consistently based on the
Correlation-Data in the PUBLISH packet.
* Convert MQTT 5.0 User Property
* to AMQP 0.9.1 headers
* from AMQP 0.9.1 headers
* to AMQP 1.0 application properties and message annotations
* from AMQP 1.0 application properties and message annotations
* Make use of Annotations in mc_mqtt:protocol_state/2
mc_mqtt:protocol_state/2 includes Annotations as parameter.
It's cleaner to make use of these Annotations when computing the
protocol state instead of relying on the caller (rabbitmq_mqtt_processor)
to compute the protocol state.
* Enforce AMQP 0.9.1 field name length limit
The AMQP 0.9.1 spec prohibits field names longer than 128 characters.
Therefore, when converting AMQP 1.0 message annotations, application
properties or MQTT 5.0 User Property to AMQP 0.9.1 headers, drop any
names longer than 128 characters.
* Fix type specs
Apply feedback from Michael Davis
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* Add mc_mqtt unit test suite
Implement mc_mqtt:x_header/2
* Translate indicator that payload is UTF-8 encoded
when converting between MQTT 5.0 and AMQP 1.0
* Translate single amqp-value section from AMQP 1.0 to MQTT
Convert to a text representation, if possible, and indicate to MQTT
client that the payload is UTF-8 encoded. This way, the MQTT client will
be able to parse the payload.
If conversion to text representation is not possible, encode the payload
using the AMQP 1.0 type system and indiate the encoding via Content-Type
message/vnd.rabbitmq.amqp.
This Content-Type is not registered.
Type "message" makes sense since it's a message.
Vendor tree "vnd.rabbitmq.amqp" makes sense since merely subtype "amqp" is not
registered.
* Fix payload conversion
* Translate Response Topic between MQTT and AMQP
Translate MQTT 5.0 Response Topic to AMQP 1.0 reply-to address and vice
versa.
The Response Topic must be a UTF-8 encoded string.
This commit re-uses the already defined RabbitMQ target addresses:
```
"/topic/" RK Publish to amq.topic with routing key RK
"/exchange/" X "/" RK Publish to exchange X with routing key RK
```
By default, the MQTT topic exchange is configure dto be amq.topic using
the 1st target address.
When an operator modifies the mqtt.exchange, the 2nd target address is
used.
* Apply PR feedback
and fix formatting
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
* tidy up
* Add MQTT message_containers test
* consistent hash exchange: avoid amqp legacy conversion
When hashing on a header value.
* Avoid converting to amqp legacy when using exchange federation
* Fix test flake
* test and dialyzer fixes
* dialyzer fix
* Add MQTT protocol interoperability tests
Test receiving from and sending to MQTT 5.0 and
* AMQP 0.9.1
* AMQP 1.0
* STOMP
* Streams
* Regenerate portions of deps/rabbit/app.bzl with gazelle
I'm not exactly sure how this happened, but gazell seems to have been
run with an older version of the rules_erlang gazelle extension at
some point. This caused generation of a structure that is no longer
used. This commit updates the structure to the current pattern.
* mc: refactoring
* mc_amqpl: handle delivery annotations
Just in case they are included.
Also use iolist_to_iovec to create flat list of binaries when
converting from amqp with amqp encoded payload.
---------
Co-authored-by: David Ansari <david.ansari@gmx.de>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
Co-authored-by: Rin Kuryloski <kuryloskip@vmware.com>
2023-08-31 18:27:13 +08:00
|
|
|
- mc_mqtt
|
2023-02-23 21:47:41 +08:00
|
|
|
- mqtt_machine
|
|
|
|
- mqtt_machine_v0
|
|
|
|
- mqtt_node
|
|
|
|
- rabbit_mqtt
|
|
|
|
- rabbit_mqtt_collector
|
|
|
|
- rabbit_mqtt_confirms
|
|
|
|
- rabbit_mqtt_ff
|
|
|
|
- rabbit_mqtt_internal_event_handler
|
|
|
|
- rabbit_mqtt_keepalive
|
|
|
|
- rabbit_mqtt_packet
|
|
|
|
- rabbit_mqtt_processor
|
|
|
|
- rabbit_mqtt_qos0_queue
|
|
|
|
- rabbit_mqtt_reader
|
|
|
|
- rabbit_mqtt_retained_msg_store
|
|
|
|
- rabbit_mqtt_retained_msg_store_dets
|
|
|
|
- rabbit_mqtt_retained_msg_store_ets
|
|
|
|
- rabbit_mqtt_retained_msg_store_noop
|
|
|
|
- rabbit_mqtt_retainer
|
|
|
|
- rabbit_mqtt_retainer_sup
|
|
|
|
- rabbit_mqtt_sup
|
|
|
|
- rabbit_mqtt_util
|
|
|
|
rabbitmq_peer_discovery_aws:
|
|
|
|
- rabbit_peer_discovery_aws
|
|
|
|
- rabbitmq_peer_discovery_aws
|
|
|
|
rabbitmq_peer_discovery_common:
|
|
|
|
- rabbit_peer_discovery_cleanup
|
|
|
|
- rabbit_peer_discovery_common_app
|
|
|
|
- rabbit_peer_discovery_common_sup
|
|
|
|
- rabbit_peer_discovery_config
|
|
|
|
- rabbit_peer_discovery_httpc
|
|
|
|
- rabbit_peer_discovery_util
|
|
|
|
rabbitmq_peer_discovery_consul:
|
|
|
|
- rabbit_peer_discovery_consul
|
|
|
|
- rabbitmq_peer_discovery_consul
|
|
|
|
- rabbitmq_peer_discovery_consul_app
|
|
|
|
- rabbitmq_peer_discovery_consul_health_check_helper
|
|
|
|
- rabbitmq_peer_discovery_consul_sup
|
|
|
|
rabbitmq_peer_discovery_etcd:
|
|
|
|
- rabbit_peer_discovery_etcd
|
|
|
|
- rabbitmq_peer_discovery_etcd
|
|
|
|
- rabbitmq_peer_discovery_etcd_app
|
|
|
|
- rabbitmq_peer_discovery_etcd_sup
|
|
|
|
- rabbitmq_peer_discovery_etcd_v3_client
|
|
|
|
rabbitmq_peer_discovery_k8s:
|
|
|
|
- rabbit_peer_discovery_k8s
|
|
|
|
- rabbitmq_peer_discovery_k8s
|
|
|
|
- rabbitmq_peer_discovery_k8s_app
|
|
|
|
- rabbitmq_peer_discovery_k8s_node_monitor
|
|
|
|
- rabbitmq_peer_discovery_k8s_sup
|
|
|
|
rabbitmq_prelaunch:
|
|
|
|
- rabbit_boot_state
|
|
|
|
- rabbit_boot_state_sup
|
|
|
|
- rabbit_boot_state_systemd
|
|
|
|
- rabbit_boot_state_xterm_titlebar
|
|
|
|
- rabbit_logger_fmt_helpers
|
|
|
|
- rabbit_logger_json_fmt
|
|
|
|
- rabbit_logger_std_h
|
|
|
|
- rabbit_logger_text_fmt
|
|
|
|
- rabbit_prelaunch
|
|
|
|
- rabbit_prelaunch_app
|
|
|
|
- rabbit_prelaunch_conf
|
|
|
|
- rabbit_prelaunch_dist
|
|
|
|
- rabbit_prelaunch_early_logging
|
|
|
|
- rabbit_prelaunch_erlang_compat
|
|
|
|
- rabbit_prelaunch_errors
|
|
|
|
- rabbit_prelaunch_file
|
|
|
|
- rabbit_prelaunch_sighandler
|
|
|
|
- rabbit_prelaunch_sup
|
|
|
|
rabbitmq_prometheus:
|
|
|
|
- prometheus_process_collector
|
|
|
|
- prometheus_rabbitmq_alarm_metrics_collector
|
|
|
|
- prometheus_rabbitmq_core_metrics_collector
|
2023-09-20 12:03:13 +08:00
|
|
|
- prometheus_rabbitmq_dynamic_collector
|
2024-03-12 21:20:01 +08:00
|
|
|
- prometheus_rabbitmq_federation_collector
|
2023-02-23 21:47:41 +08:00
|
|
|
- prometheus_rabbitmq_global_metrics_collector
|
|
|
|
- rabbit_prometheus_app
|
|
|
|
- rabbit_prometheus_dispatcher
|
|
|
|
- rabbit_prometheus_handler
|
|
|
|
rabbitmq_random_exchange:
|
|
|
|
- rabbit_exchange_type_random
|
|
|
|
rabbitmq_recent_history_exchange:
|
|
|
|
- rabbit_db_rh_exchange
|
Allow to use Khepri database to store metadata instead of Mnesia
[Why]
Mnesia is a very powerful and convenient tool for Erlang applications:
it is a persistent disc-based database, it handles replication accross
multiple Erlang nodes and it is available out-of-the-box from the
Erlang/OTP distribution. RabbitMQ relies on Mnesia to manage all its
metadata:
* virtual hosts' properties
* intenal users
* queue, exchange and binding declarations (not queues data)
* runtime parameters and policies
* ...
Unfortunately Mnesia makes it difficult to handle network partition and,
as a consequence, the merge conflicts between Erlang nodes once the
network partition is resolved. RabbitMQ provides several partition
handling strategies but they are not bullet-proof. Users still hit
situations where it is a pain to repair a cluster following a network
partition.
[How]
@kjnilsson created Ra [1], a Raft consensus library that RabbitMQ
already uses successfully to implement quorum queues and streams for
instance. Those queues do not suffer from network partitions.
We created Khepri [2], a new persistent and replicated database engine
based on Ra and we want to use it in place of Mnesia in RabbitMQ to
solve the problems with network partitions.
This patch integrates Khepri as an experimental feature. When enabled,
RabbitMQ will store all its metadata in Khepri instead of Mnesia.
This change comes with behavior changes. While Khepri remains disabled,
you should see no changes to the behavior of RabbitMQ. If there are
changes, it is a bug. After Khepri is enabled, there are significant
changes of behavior that you should be aware of.
Because it is based on the Raft consensus algorithm, when there is a
network partition, only the cluster members that are in the partition
with at least `(Number of nodes in the cluster ÷ 2) + 1` number of nodes
can "make progress". In other words, only those nodes may write to the
Khepri database and read from the database and expect a consistent
result.
For instance in a cluster of 5 RabbitMQ nodes:
* If there are two partitions, one with 3 nodes, one with 2 nodes, only
the group of 3 nodes will be able to write to the database.
* If there are three partitions, two with 2 nodes, one with 1 node, none
of the group can write to the database.
Because the Khepri database will be used for all kind of metadata, it
means that RabbitMQ nodes that can't write to the database will be
unable to perform some operations. A list of operations and what to
expect is documented in the associated pull request and the RabbitMQ
website.
This requirement from Raft also affects the startup of RabbitMQ nodes in
a cluster. Indeed, at least a quorum number of nodes must be started at
once to allow nodes to become ready.
To enable Khepri, you need to enable the `khepri_db` feature flag:
rabbitmqctl enable_feature_flag khepri_db
When the `khepri_db` feature flag is enabled, the migration code
performs the following two tasks:
1. It synchronizes the Khepri cluster membership from the Mnesia
cluster. It uses `mnesia_to_khepri:sync_cluster_membership/1` from
the `khepri_mnesia_migration` application [3].
2. It copies data from relevant Mnesia tables to Khepri, doing some
conversion if necessary on the way. Again, it uses
`mnesia_to_khepri:copy_tables/4` from `khepri_mnesia_migration` to do
it.
This can be performed on a running standalone RabbitMQ node or cluster.
Data will be migrated from Mnesia to Khepri without any service
interruption. Note that during the migration, the performance may
decrease and the memory footprint may go up.
Because this feature flag is considered experimental, it is not enabled
by default even on a brand new RabbitMQ deployment.
More about the implementation details below:
In the past months, all accesses to Mnesia were isolated in a collection
of `rabbit_db*` modules. This is where the integration of Khepri mostly
takes place: we use a function called `rabbit_khepri:handle_fallback/1`
which selects the database and perform the query or the transaction.
Here is an example from `rabbit_db_vhost`:
* Up until RabbitMQ 3.12.x:
get(VHostName) when is_binary(VHostName) ->
get_in_mnesia(VHostName).
* Starting with RabbitMQ 3.13.0:
get(VHostName) when is_binary(VHostName) ->
rabbit_khepri:handle_fallback(
#{mnesia => fun() -> get_in_mnesia(VHostName) end,
khepri => fun() -> get_in_khepri(VHostName) end}).
This `rabbit_khepri:handle_fallback/1` function relies on two things:
1. the fact that the `khepri_db` feature flag is enabled, in which case
it always executes the Khepri-based variant.
4. the ability or not to read and write to Mnesia tables otherwise.
Before the feature flag is enabled, or during the migration, the
function will try to execute the Mnesia-based variant. If it succeeds,
then it returns the result. If it fails because one or more Mnesia
tables can't be used, it restarts from scratch: it means the feature
flag is being enabled and depending on the outcome, either the
Mnesia-based variant will succeed (the feature flag couldn't be enabled)
or the feature flag will be marked as enabled and it will call the
Khepri-based variant. The meat of this function really lives in the
`khepri_mnesia_migration` application [3] and
`rabbit_khepri:handle_fallback/1` is a wrapper on top of it that knows
about the feature flag.
However, some calls to the database do not depend on the existence of
Mnesia tables, such as functions where we need to learn about the
members of a cluster. For those, we can't rely on exceptions from
Mnesia. Therefore, we just look at the state of the feature flag to
determine which database to use. There are two situations though:
* Sometimes, we need the feature flag state query to block because the
function interested in it can't return a valid answer during the
migration. Here is an example:
case rabbit_khepri:is_enabled(RemoteNode) of
true -> can_join_using_khepri(RemoteNode);
false -> can_join_using_mnesia(RemoteNode)
end
* Sometimes, we need the feature flag state query to NOT block (for
instance because it would cause a deadlock). Here is an example:
case rabbit_khepri:get_feature_state() of
enabled -> members_using_khepri();
_ -> members_using_mnesia()
end
Direct accesses to Mnesia still exists. They are limited to code that is
specific to Mnesia such as classic queue mirroring or network partitions
handling strategies.
Now, to discover the Mnesia tables to migrate and how to migrate them,
we use an Erlang module attribute called
`rabbit_mnesia_tables_to_khepri_db` which indicates a list of Mnesia
tables and an associated converter module. Here is an example in the
`rabbitmq_recent_history_exchange` plugin:
-rabbit_mnesia_tables_to_khepri_db(
[{?RH_TABLE, rabbit_db_rh_exchange_m2k_converter}]).
The converter module — `rabbit_db_rh_exchange_m2k_converter` in this
example — is is fact a "sub" converter module called but
`rabbit_db_m2k_converter`. See the documentation of a `mnesia_to_khepri`
converter module to learn more about these modules.
[1] https://github.com/rabbitmq/ra
[2] https://github.com/rabbitmq/khepri
[3] https://github.com/rabbitmq/khepri_mnesia_migration
See #7206.
Co-authored-by: Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
Co-authored-by: Diana Parra Corbacho <dparracorbac@vmware.com>
Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
2023-01-05 20:57:50 +08:00
|
|
|
- rabbit_db_rh_exchange_m2k_converter
|
2023-02-23 21:47:41 +08:00
|
|
|
- rabbit_exchange_type_recent_history
|
|
|
|
rabbitmq_sharding:
|
|
|
|
- rabbit_sharding_exchange_decorator
|
|
|
|
- rabbit_sharding_exchange_type_modulus_hash
|
|
|
|
- rabbit_sharding_interceptor
|
|
|
|
- rabbit_sharding_policy_validator
|
|
|
|
- rabbit_sharding_shard
|
|
|
|
- rabbit_sharding_util
|
|
|
|
rabbitmq_shovel:
|
|
|
|
- Elixir.RabbitMQ.CLI.Ctl.Commands.DeleteShovelCommand
|
|
|
|
- Elixir.RabbitMQ.CLI.Ctl.Commands.RestartShovelCommand
|
|
|
|
- Elixir.RabbitMQ.CLI.Ctl.Commands.ShovelStatusCommand
|
|
|
|
- rabbit_amqp091_shovel
|
|
|
|
- rabbit_amqp10_shovel
|
|
|
|
- rabbit_log_shovel
|
|
|
|
- rabbit_shovel
|
|
|
|
- rabbit_shovel_behaviour
|
|
|
|
- rabbit_shovel_config
|
|
|
|
- rabbit_shovel_dyn_worker_sup
|
|
|
|
- rabbit_shovel_dyn_worker_sup_sup
|
|
|
|
- rabbit_shovel_locks
|
|
|
|
- rabbit_shovel_parameters
|
|
|
|
- rabbit_shovel_status
|
|
|
|
- rabbit_shovel_sup
|
|
|
|
- rabbit_shovel_util
|
|
|
|
- rabbit_shovel_worker
|
|
|
|
- rabbit_shovel_worker_sup
|
|
|
|
rabbitmq_shovel_management:
|
|
|
|
- rabbit_shovel_mgmt
|
|
|
|
- rabbit_shovel_mgmt_util
|
|
|
|
rabbitmq_stomp:
|
|
|
|
- Elixir.RabbitMQ.CLI.Ctl.Commands.ListStompConnectionsCommand
|
|
|
|
- rabbit_stomp
|
|
|
|
- rabbit_stomp_client_sup
|
|
|
|
- rabbit_stomp_connection_info
|
|
|
|
- rabbit_stomp_frame
|
|
|
|
- rabbit_stomp_internal_event_handler
|
|
|
|
- rabbit_stomp_processor
|
|
|
|
- rabbit_stomp_reader
|
|
|
|
- rabbit_stomp_sup
|
|
|
|
- rabbit_stomp_util
|
|
|
|
rabbitmq_stream:
|
|
|
|
- Elixir.RabbitMQ.CLI.Ctl.Commands.AddSuperStreamCommand
|
|
|
|
- Elixir.RabbitMQ.CLI.Ctl.Commands.DeleteSuperStreamCommand
|
|
|
|
- Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamConnectionsCommand
|
|
|
|
- Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamConsumerGroupsCommand
|
|
|
|
- Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamConsumersCommand
|
|
|
|
- Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamGroupConsumersCommand
|
|
|
|
- Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamPublishersCommand
|
2023-10-11 12:03:50 +08:00
|
|
|
- Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamTrackingCommand
|
2023-02-23 21:47:41 +08:00
|
|
|
- rabbit_stream
|
|
|
|
- rabbit_stream_connection_sup
|
|
|
|
- rabbit_stream_manager
|
|
|
|
- rabbit_stream_metrics
|
|
|
|
- rabbit_stream_metrics_gc
|
|
|
|
- rabbit_stream_reader
|
|
|
|
- rabbit_stream_sup
|
|
|
|
- rabbit_stream_utils
|
|
|
|
rabbitmq_stream_common:
|
|
|
|
- rabbit_stream_core
|
|
|
|
rabbitmq_stream_management:
|
|
|
|
- rabbit_stream_connection_consumers_mgmt
|
|
|
|
- rabbit_stream_connection_mgmt
|
|
|
|
- rabbit_stream_connection_publishers_mgmt
|
|
|
|
- rabbit_stream_connections_mgmt
|
|
|
|
- rabbit_stream_connections_vhost_mgmt
|
|
|
|
- rabbit_stream_consumers_mgmt
|
|
|
|
- rabbit_stream_management_utils
|
|
|
|
- rabbit_stream_mgmt_db
|
|
|
|
- rabbit_stream_publishers_mgmt
|
2023-10-11 12:03:50 +08:00
|
|
|
- rabbit_stream_tracking_mgmt
|
2023-02-23 21:47:41 +08:00
|
|
|
rabbitmq_top:
|
|
|
|
- rabbit_top_app
|
|
|
|
- rabbit_top_extension
|
|
|
|
- rabbit_top_sup
|
|
|
|
- rabbit_top_util
|
|
|
|
- rabbit_top_wm_ets_tables
|
|
|
|
- rabbit_top_wm_process
|
|
|
|
- rabbit_top_wm_processes
|
|
|
|
- rabbit_top_worker
|
|
|
|
rabbitmq_tracing:
|
|
|
|
- rabbit_tracing_app
|
|
|
|
- rabbit_tracing_consumer
|
|
|
|
- rabbit_tracing_consumer_sup
|
|
|
|
- rabbit_tracing_files
|
|
|
|
- rabbit_tracing_mgmt
|
|
|
|
- rabbit_tracing_sup
|
|
|
|
- rabbit_tracing_traces
|
|
|
|
- rabbit_tracing_util
|
|
|
|
- rabbit_tracing_wm_file
|
|
|
|
- rabbit_tracing_wm_files
|
|
|
|
- rabbit_tracing_wm_trace
|
|
|
|
- rabbit_tracing_wm_traces
|
|
|
|
rabbitmq_trust_store:
|
|
|
|
- rabbit_trust_store
|
|
|
|
- rabbit_trust_store_app
|
|
|
|
- rabbit_trust_store_certificate_provider
|
|
|
|
- rabbit_trust_store_file_provider
|
|
|
|
- rabbit_trust_store_http_provider
|
|
|
|
- rabbit_trust_store_sup
|
|
|
|
rabbitmq_web_dispatch:
|
|
|
|
- rabbit_cowboy_middleware
|
|
|
|
- rabbit_cowboy_redirect
|
|
|
|
- rabbit_cowboy_stream_h
|
|
|
|
- rabbit_web_dispatch
|
2023-06-24 05:52:56 +08:00
|
|
|
- rabbit_web_dispatch_access_control
|
2023-02-23 21:47:41 +08:00
|
|
|
- rabbit_web_dispatch_app
|
|
|
|
- rabbit_web_dispatch_listing_handler
|
|
|
|
- rabbit_web_dispatch_registry
|
|
|
|
- rabbit_web_dispatch_sup
|
|
|
|
- rabbit_web_dispatch_util
|
|
|
|
- webmachine_log
|
|
|
|
- webmachine_log_handler
|
|
|
|
rabbitmq_web_mqtt:
|
|
|
|
- rabbit_web_mqtt_app
|
|
|
|
- rabbit_web_mqtt_handler
|
|
|
|
- rabbit_web_mqtt_stream_handler
|
|
|
|
rabbitmq_web_mqtt_examples:
|
|
|
|
- rabbit_web_mqtt_examples_app
|
|
|
|
rabbitmq_web_stomp:
|
|
|
|
- rabbit_web_stomp_app
|
|
|
|
- rabbit_web_stomp_connection_sup
|
|
|
|
- rabbit_web_stomp_handler
|
|
|
|
- rabbit_web_stomp_internal_event_handler
|
|
|
|
- rabbit_web_stomp_listener
|
|
|
|
- rabbit_web_stomp_middleware
|
|
|
|
- rabbit_web_stomp_stream_handler
|
|
|
|
- rabbit_web_stomp_sup
|
|
|
|
rabbitmq_web_stomp_examples:
|
|
|
|
- rabbit_web_stomp_examples_app
|
2023-01-25 16:41:56 +08:00
|
|
|
ranch:
|
|
|
|
- ranch
|
|
|
|
- ranch_acceptor
|
|
|
|
- ranch_acceptors_sup
|
|
|
|
- ranch_app
|
|
|
|
- ranch_conns_sup
|
|
|
|
- ranch_conns_sup_sup
|
|
|
|
- ranch_crc32c
|
|
|
|
- ranch_embedded_sup
|
|
|
|
- ranch_listener_sup
|
|
|
|
- ranch_protocol
|
|
|
|
- ranch_proxy_header
|
|
|
|
- ranch_server
|
|
|
|
- ranch_server_proxy
|
|
|
|
- ranch_ssl
|
|
|
|
- ranch_sup
|
|
|
|
- ranch_tcp
|
|
|
|
- ranch_transport
|
|
|
|
recon:
|
|
|
|
- recon
|
|
|
|
- recon_alloc
|
|
|
|
- recon_lib
|
|
|
|
- recon_map
|
|
|
|
- recon_rec
|
|
|
|
- recon_trace
|
|
|
|
redbug:
|
|
|
|
- redbug
|
|
|
|
- redbug_compiler
|
|
|
|
- redbug_dtop
|
|
|
|
- redbug_lexer
|
|
|
|
- redbug_parser
|
|
|
|
- redbug_targ
|
|
|
|
seshat:
|
|
|
|
- seshat
|
|
|
|
- seshat_app
|
|
|
|
- seshat_counters_server
|
|
|
|
- seshat_sup
|
|
|
|
stdout_formatter:
|
|
|
|
- stdout_formatter
|
|
|
|
- stdout_formatter_paragraph
|
|
|
|
- stdout_formatter_table
|
|
|
|
- stdout_formatter_utils
|
2023-05-23 23:15:28 +08:00
|
|
|
syslog:
|
|
|
|
- syslog
|
|
|
|
- syslog_error_h
|
|
|
|
- syslog_lager_backend
|
|
|
|
- syslog_lib
|
|
|
|
- syslog_logger
|
|
|
|
- syslog_logger_h
|
|
|
|
- syslog_monitor
|
|
|
|
- syslog_rfc3164
|
|
|
|
- syslog_rfc5424
|
2023-01-25 16:41:56 +08:00
|
|
|
sysmon_handler:
|
|
|
|
- sysmon_handler_app
|
|
|
|
- sysmon_handler_example_handler
|
|
|
|
- sysmon_handler_filter
|
|
|
|
- sysmon_handler_sup
|
|
|
|
- sysmon_handler_testhandler
|
|
|
|
systemd:
|
|
|
|
- systemd
|
|
|
|
- systemd_app
|
|
|
|
- systemd_journal_h
|
|
|
|
- systemd_kmsg_formatter
|
|
|
|
- systemd_protocol
|
|
|
|
- systemd_socket
|
|
|
|
- systemd_sup
|
|
|
|
- systemd_watchdog
|
|
|
|
thoas:
|
|
|
|
- thoas
|
|
|
|
- thoas_decode
|
|
|
|
- thoas_encode
|
2023-02-23 21:47:41 +08:00
|
|
|
trust_store_http:
|
|
|
|
- trust_store_http
|
|
|
|
- trust_store_http_app
|
|
|
|
- trust_store_http_sup
|
|
|
|
- trust_store_invalid_handler
|
|
|
|
- trust_store_list_handler
|