2015-08-10 17:50:41 +08:00
|
|
|
PROJECT = rabbit
|
2016-12-06 19:18:02 +08:00
|
|
|
PROJECT_DESCRIPTION = RabbitMQ
|
|
|
|
PROJECT_MOD = rabbit
|
|
|
|
PROJECT_REGISTERED = rabbit_amqqueue_sup \
|
|
|
|
rabbit_direct_client_sup \
|
|
|
|
rabbit_log \
|
|
|
|
rabbit_node_monitor \
|
|
|
|
rabbit_router
|
2009-09-22 21:59:30 +08:00
|
|
|
|
2016-12-06 19:18:02 +08:00
|
|
|
define PROJECT_ENV
|
|
|
|
[
|
|
|
|
{tcp_listeners, [5672]},
|
|
|
|
{num_tcp_acceptors, 10},
|
|
|
|
{ssl_listeners, []},
|
2017-10-29 21:19:30 +08:00
|
|
|
{num_ssl_acceptors, 10},
|
2016-12-06 19:18:02 +08:00
|
|
|
{ssl_options, []},
|
|
|
|
{vm_memory_high_watermark, 0.4},
|
|
|
|
{vm_memory_high_watermark_paging_ratio, 0.5},
|
2017-10-19 05:33:39 +08:00
|
|
|
{vm_memory_calculation_strategy, rss},
|
2016-12-06 19:18:02 +08:00
|
|
|
{memory_monitor_interval, 2500},
|
|
|
|
{disk_free_limit, 50000000}, %% 50MB
|
|
|
|
{msg_store_index_module, rabbit_msg_store_ets_index},
|
|
|
|
{backing_queue_module, rabbit_variable_queue},
|
|
|
|
%% 0 ("no limit") would make a better default, but that
|
|
|
|
%% breaks the QPid Java client
|
|
|
|
{frame_max, 131072},
|
2018-05-09 23:06:20 +08:00
|
|
|
%% see rabbitmq-server#1593
|
2018-05-10 01:39:19 +08:00
|
|
|
{channel_max, 2047},
|
2023-03-29 08:07:57 +08:00
|
|
|
{ranch_connection_max, infinity},
|
2016-12-06 19:18:02 +08:00
|
|
|
{heartbeat, 60},
|
|
|
|
{msg_store_file_size_limit, 16777216},
|
2020-06-04 01:36:17 +08:00
|
|
|
{msg_store_shutdown_timeout, 600000},
|
2016-12-06 19:18:02 +08:00
|
|
|
{fhc_write_buffering, true},
|
|
|
|
{fhc_read_buffering, false},
|
|
|
|
{queue_index_max_journal_entries, 32768},
|
|
|
|
{queue_index_embed_msgs_below, 4096},
|
|
|
|
{default_user, <<"guest">>},
|
|
|
|
{default_pass, <<"guest">>},
|
|
|
|
{default_user_tags, [administrator]},
|
|
|
|
{default_vhost, <<"/">>},
|
|
|
|
{default_permissions, [<<".*">>, <<".*">>, <<".*">>]},
|
Support AMQP 1.0 natively
## What
Similar to Native MQTT in #5895, this commits implements Native AMQP 1.0.
By "native", we mean do not proxy via AMQP 0.9.1 anymore.
## Why
Native AMQP 1.0 comes with the following major benefits:
1. Similar to Native MQTT, this commit provides better throughput, latency,
scalability, and resource usage for AMQP 1.0.
See https://blog.rabbitmq.com/posts/2023/03/native-mqtt for native MQTT improvements.
See further below for some benchmarks.
2. Since AMQP 1.0 is not limited anymore by the AMQP 0.9.1 protocol,
this commit allows implementing more AMQP 1.0 features in the future.
Some features are already implemented in this commit (see next section).
3. Simpler, better understandable, and more maintainable code.
Native AMQP 1.0 as implemented in this commit has the
following major benefits compared to AMQP 0.9.1:
4. Memory and disk alarms will only stop accepting incoming TRANSFER frames.
New connections can still be created to consume from RabbitMQ to empty queues.
5. Due to 4. no need anymore for separate connections for publishers and
consumers as we currently recommended for AMQP 0.9.1. which potentially
halves the number of physical TCP connections.
6. When a single connection sends to multiple target queues, a single
slow target queue won't block the entire connection.
Publisher can still send data quickly to all other target queues.
7. A publisher can request whether it wants publisher confirmation on a per-message basis.
In AMQP 0.9.1 publisher confirms are configured per channel only.
8. Consumers can change their "prefetch count" dynamically which isn't
possible in our AMQP 0.9.1 implementation. See #10174
9. AMQP 1.0 is an extensible protocol
This commit also fixes dozens of bugs present in the AMQP 1.0 plugin in
RabbitMQ 3.x - most of which cannot be backported due to the complexity
and limitations of the old 3.x implementation.
This commit contains breaking changes and is therefore targeted for RabbitMQ 4.0.
## Implementation details
1. Breaking change: With Native AMQP, the behaviour of
```
Convert AMQP 0.9.1 message headers to application properties for an AMQP 1.0 consumer
amqp1_0.convert_amqp091_headers_to_app_props = false | true (default false)
Convert AMQP 1.0 Application Properties to AMQP 0.9.1 headers
amqp1_0.convert_app_props_to_amqp091_headers = false | true (default false)
```
will break because we always convert according to the message container conversions.
For example, AMQP 0.9.1 x-headers will go into message-annotations instead of application properties.
Also, `false` won’t be respected since we always convert the headers with message containers.
2. Remove rabbit_queue_collector
rabbit_queue_collector is responsible for synchronously deleting
exclusive queues. Since the AMQP 1.0 plugin never creates exclusive
queues, rabbit_queue_collector doesn't need to be started in the first
place. This will save 1 Erlang process per AMQP 1.0 connection.
3. 7 processes per connection + 1 process per session in this commit instead of
7 processes per connection + 15 processes per session in 3.x
Supervision hierarchy got re-designed.
4. Use 1 writer process per AMQP 1.0 connection
AMQP 0.9.1 uses a separate rabbit_writer Erlang process per AMQP 0.9.1 channel.
Prior to this commit, AMQP 1.0 used a separate rabbit_amqp1_0_writer process per AMQP 1.0 session.
Advantage of single writer proc per session (prior to this commit):
* High parallelism for serialising packets if multiple sessions within
a connection write heavily at the same time.
This commit uses a single writer process per AMQP 1.0 connection that is
shared across all AMQP 1.0 sessions.
Advantages of single writer proc per connection (this commit):
* Lower memory usage with hundreds of thousands of AMQP 1.0 sessions
* Less TCP and IP header overhead given that the single writer process
can accumulate across all sessions bytes before flushing the socket.
In other words, this commit decides that a reader / writer process pair
per AMQP 1.0 connection is good enough for bi-directional TRANSFER flows.
Having a writer per session is too heavy.
We still ensure high throughput by having separate reader, writer, and
session processes.
5. Transform rabbit_amqp1_0_writer into gen_server
Why:
Prior to this commit, when clicking on the AMQP 1.0 writer process in
observer, the process crashed.
Instead of handling all these debug messages of the sys module, it's better
to implement a gen_server.
There is no advantage of using a special OTP process over gen_server
for the AMQP 1.0 writer.
gen_server also provides cleaner format status output.
How:
Message callbacks return a timeout of 0.
After all messages in the inbox are processed, the timeout message is
handled by flushing any pending bytes.
6. Remove stats timer from writer
AMQP 1.0 connections haven't emitted any stats previously.
7. When there are contiguous queue confirmations in the session process
mailbox, batch them. When the confirmations are sent to the publisher, a
single DISPOSITION frame is sent for contiguously confirmed delivery
IDs.
This approach should be good enough. However it's sub optimal in
scenarios where contiguous delivery IDs that need confirmations are rare,
for example:
* There are multiple links in the session with different sender
settlement modes and sender publishes across these links interleaved.
* sender settlement mode is mixed and sender publishes interleaved settled
and unsettled TRANSFERs.
8. Introduce credit API v2
Why:
The AMQP 0.9.1 credit extension which is to be removed in 4.0 was poorly
designed since basic.credit is a synchronous call into the queue process
blocking the entire AMQP 1.0 session process.
How:
Change the interactions between queue clients and queue server
implementations:
* Clients only request a credit reply if the FLOW's `echo` field is set
* Include all link flow control state held by the queue process into a
new credit_reply queue event:
* `available` after the queue sends any deliveries
* `link-credit` after the queue sends any deliveries
* `drain` which allows us to combine the old queue events
send_credit_reply and send_drained into a single new queue event
credit_reply.
* Include the consumer tag into the credit_reply queue event such that
the AMQP 1.0 session process can process any credit replies
asynchronously.
Link flow control state `delivery-count` also moves to the queue processes.
The new interactions are hidden behind feature flag credit_api_v2 to
allow for rolling upgrades from 3.13 to 4.0.
9. Use serial number arithmetic in quorum queues and session process.
10. Completely bypass the rabbit_limiter module for AMQP 1.0
flow control. The goal is to eventually remove the rabbit_limiter module
in 4.0 since AMQP 0.9.1 global QoS will be unsupported in 4.0. This
commit lifts the AMQP 1.0 link flow control logic out of rabbit_limiter
into rabbit_queue_consumers.
11. Fix credit bug for streams:
AMQP 1.0 settlements shouldn't top up link credit,
only FLOW frames should top up link credit.
12. Allow sender settle mode unsettled for streams
since AMQP 1.0 acknowledgements to streams are no-ops (currently).
13. Fix AMQP 1.0 client bugs
Auto renewing credits should not be related to settling TRANSFERs.
Remove field link_credit_unsettled as it was wrong and confusing.
Prior to this commit auto renewal did not work when the sender uses
sender settlement mode settled.
14. Fix AMQP 1.0 client bugs
The wrong outdated Link was passed to function auto_flow/2
15. Use osiris chunk iterator
Only hold messages of uncompressed sub batches in memory if consumer
doesn't have sufficient credits.
Compressed sub batches are skipped for non Stream protocol consumers.
16. Fix incoming link flow control
Always use confirms between AMQP 1.0 queue clients and queue servers.
As already done internally by rabbit_fifo_client and
rabbit_stream_queue, use confirms for classic queues as well.
17. Include link handle into correlation when publishing messages to target queues
such that session process can correlate confirms from target queues to
incoming links.
18. Only grant more credits to publishers if publisher hasn't sufficient credits
anymore and there are not too many unconfirmed messages on the link.
19. Completely ignore `block` and `unblock` queue actions and RabbitMQ credit flow
between classic queue process and session process.
20. Link flow control is independent between links.
A client can refer to a queue or to an exchange with multiple
dynamically added target queues. Multiple incoming links can also fan
in to the same queue. However the link topology looks like, this
commit ensures that each link is only granted more credits if that link
isn't overloaded.
21. A connection or a session can send to many different queues.
In AMQP 0.9.1, a single slow queue will lead to the entire channel, and
then entire connection being blocked.
This commit makes sure that a single slow queue from one link won't slow
down sending on other links.
For example, having link A sending to a local classic queue and
link B sending to 5 replica quorum queue, link B will naturally
grant credits slower than link A. So, despite the quorum queue being
slower in confirming messages, the same AMQP 1.0 connection and session
can still pump data very fast into the classic queue.
22. If cluster wide memory or disk alarm occurs.
Each session sends a FLOW with incoming-window to 0 to sending client.
If sending clients don’t obey, force disconnect the client.
If cluster wide memory alarm clears:
Each session resumes with a FLOW defaulting to initial incoming-window.
23. All operations apart of publishing TRANSFERS to RabbitMQ can continue during cluster wide alarms,
specifically, attaching consumers and consuming, i.e. emptying queues.
There is no need for separate AMQP 1.0 connections for publishers and consumers as recommended in our AMQP 0.9.1 implementation.
24. Flow control summary:
* If queue becomes bottleneck, that’s solved by slowing down individual sending links (AMQP 1.0 link flow control).
* If session becomes bottleneck (more unlikely), that’s solved by AMQP 1.0 session flow control.
* If connection becomes bottleneck, it naturally won’t read fast enough from the socket causing TCP backpressure being applied.
Nowhere will RabbitMQ internal credit based flow control (i.e. module credit_flow) be used on the incoming AMQP 1.0 message path.
25. Register AMQP sessions
Prefer local-only pg over our custom pg_local implementation as
pg is a better process group implementation than pg_local.
pg_local was identified as bottleneck in tests where many MQTT clients were disconnected at once.
26. Start a local-only pg when Rabbit boots:
> A scope can be kept local-only by using a scope name that is unique cluster-wide, e.g. the node name:
> pg:start_link(node()).
Register AMQP 1.0 connections and sessions with pg.
In future we should remove pg_local and instead use the new local-only
pg for all registered processes such as AMQP 0.9.1 connections and channels.
27. Requeue messages if link detached
Although the spec allows to settle delivery IDs on detached links, RabbitMQ does not respect the 'closed'
field of the DETACH frame and therefore handles every DETACH frame as closed. Since the link is closed,
we expect every outstanding delivery to be requeued.
In addition to consumer cancellation, detaching a link therefore causes in flight deliveries to be requeued.
Note that this behaviour is different from merely consumer cancellation in AMQP 0.9.1:
"After a consumer is cancelled there will be no future deliveries dispatched to it. Note that there can
still be "in flight" deliveries dispatched previously. Cancelling a consumer will neither discard nor requeue them."
[https://www.rabbitmq.com/consumers.html#unsubscribing]
An AMQP receiver can first drain, and then detach to prevent "in flight" deliveries
28. Init AMQP session with BEGIN frame
Similar to how there can't be an MQTT processor without a CONNECT
frame, there can't be an AMQP session without a BEGIN frame.
This allows having strict dialyzer types for session flow control
fields (i.e. not allowing 'undefined').
29. Move serial_number to AMQP 1.0 common lib
such that it can be used by both AMQP 1.0 server and client
30. Fix AMQP client to do serial number arithmetic.
31. AMQP client: Differentiate between delivery-id and transfer-id for better
understandability.
32. Fix link flow control in classic queues
This commit fixes
```
java -jar target/perf-test.jar -ad false -f persistent -u cq -c 3000 -C 1000000 -y 0
```
followed by
```
./omq -x 0 amqp -T /queue/cq -D 1000000 --amqp-consumer-credits 2
```
Prior to this commit, (and on RabbitMQ 3.x) the consuming would halt after around
8 - 10,000 messages.
The bug was that in flight messages from classic queue process to
session process were not taken into account when topping up credit to
the classic queue process.
Fixes #2597
The solution to this bug (and a much cleaner design anyway independent of
this bug) is that queues should hold all link flow control state including
the delivery-count.
Hence, when credit API v2 is used the delivery-count will be held by the
classic queue process, quorum queue process, and stream queue client
instead of managing the delivery-count in the session.
33. The double level crediting between (a) session process and
rabbit_fifo_client, and (b) rabbit_fifo_client and rabbit_fifo was
removed. Therefore, instead of managing 3 separate delivery-counts (i. session,
ii. rabbit_fifo_client, iii. rabbit_fifo), only 1 delivery-count is used
in rabbit_fifo. This is a big simplification.
34. This commit fixes quorum queues without bumping the machine version
nor introducing new rabbit_fifo commands.
Whether credit API v2 is used is solely determined at link attachment time
depending on whether feature flag credit_api_v2 is enabled.
Even when that feature flag will be enabled later on, this link will
keep using credit API v1 until detached (or the node is shut down).
Eventually, after feature flag credit_api_v2 has been enabled and a
subsequent rolling upgrade, all links will use credit API v2.
This approach is safe and simple.
The 2 alternatives to move delivery-count from the session process to the
queue processes would have been:
i. Explicit feature flag credit_api_v2 migration function
* Can use a gen_server:call and only finish migration once all delivery-counts were migrated.
Cons:
* Extra new message format just for migration is required.
* Risky as migration will fail if a target queue doesn’t reply.
ii. Session always includes DeliveryCountSnd when crediting to the queue:
Cons:
* 2 delivery counts will be hold simultaneously in session proc and queue proc;
could be solved by deleting the session proc’s delivery-count for credit-reply
* What happens if the receiver doesn’t provide credit for a very long time? Is that a problem?
35. Support stream filtering in AMQP 1.0 (by @acogoluegnes)
Use the x-stream-filter-value message annotation
to carry the filter value in a published message.
Use the rabbitmq:stream-filter and rabbitmq:stream-match-unfiltered
filters when creating a receiver that wants to filter
out messages from a stream.
36. Remove credit extension from AMQP 0.9.1 client
37. Support maintenance mode closing AMQP 1.0 connections.
38. Remove AMQP 0.9.1 client dependency from AMQP 1.0 implementation.
39. Move AMQP 1.0 plugin to the core. AMQP 1.0 is enabled by default.
The old rabbitmq_amqp1_0 plugin will be kept as a no-op plugin to prevent deployment
tools from failing that execute:
```
rabbitmq-plugins enable rabbitmq_amqp1_0
rabbitmq-plugins disable rabbitmq_amqp1_0
```
40. Breaking change: Remove CLI command `rabbitmqctl list_amqp10_connections`.
Instead, list both AMQP 0.9.1 and AMQP 1.0 connections in `list_connections`:
```
rabbitmqctl list_connections protocol
Listing connections ...
protocol
{1, 0}
{0,9,1}
```
## Benchmarks
### Throughput & Latency
Setup:
* Single node Ubuntu 22.04
* Erlang 26.1.1
Start RabbitMQ:
```
make run-broker PLUGINS="rabbitmq_management rabbitmq_amqp1_0" FULL=1 RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS="+S 3"
```
Predeclare durable classic queue cq1, durable quorum queue qq1, durable stream queue sq1.
Start client:
https://github.com/ssorj/quiver
https://hub.docker.com/r/ssorj/quiver/tags (digest 453a2aceda64)
```
docker run -it --rm --add-host host.docker.internal:host-gateway ssorj/quiver:latest
bash-5.1# quiver --version
quiver 0.4.0-SNAPSHOT
```
1. Classic queue
```
quiver //host.docker.internal//amq/queue/cq1 --durable --count 1m --duration 10m --body-size 12 --credit 1000
```
This commit:
```
Count ............................................. 1,000,000 messages
Duration ............................................... 73.8 seconds
Sender rate .......................................... 13,548 messages/s
Receiver rate ........................................ 13,547 messages/s
End-to-end rate ...................................... 13,547 messages/s
Latencies by percentile:
0% ........ 0 ms 90.00% ........ 9 ms
25% ........ 2 ms 99.00% ....... 14 ms
50% ........ 4 ms 99.90% ....... 17 ms
100% ....... 26 ms 99.99% ....... 24 ms
```
RabbitMQ 3.x (main branch as of 30 January 2024):
```
---------------------- Sender ----------------------- --------------------- Receiver ---------------------- --------
Time [s] Count [m] Rate [m/s] CPU [%] RSS [M] Time [s] Count [m] Rate [m/s] CPU [%] RSS [M] Lat [ms]
----------------------------------------------------- ----------------------------------------------------- --------
2.1 130,814 65,342 6 73.6 2.1 3,217 1,607 0 8.0 511
4.1 163,580 16,367 2 74.1 4.1 3,217 0 0 8.0 0
6.1 229,114 32,767 3 74.1 6.1 3,217 0 0 8.0 0
8.1 261,880 16,367 2 74.1 8.1 67,874 32,296 8 8.2 7,662
10.1 294,646 16,367 2 74.1 10.1 67,874 0 0 8.2 0
12.1 360,180 32,734 3 74.1 12.1 67,874 0 0 8.2 0
14.1 392,946 16,367 3 74.1 14.1 68,604 365 0 8.2 12,147
16.1 458,480 32,734 3 74.1 16.1 68,604 0 0 8.2 0
18.1 491,246 16,367 2 74.1 18.1 68,604 0 0 8.2 0
20.1 556,780 32,767 4 74.1 20.1 68,604 0 0 8.2 0
22.1 589,546 16,375 2 74.1 22.1 68,604 0 0 8.2 0
receiver timed out
24.1 622,312 16,367 2 74.1 24.1 68,604 0 0 8.2 0
quiver: error: PlanoProcessError: Command 'quiver-arrow receive //host.docker.internal//amq/queue/cq1 --impl qpid-proton-c --duration 10m --count 1m --rate 0 --body-size 12 --credit 1000 --transaction-size 0 --timeout 10 --durable --output /tmp/quiver-otujr23y' returned non-zero exit status 1.
Traceback (most recent call last):
File "/usr/local/lib/quiver/python/quiver/pair.py", line 144, in run
_plano.wait(receiver, check=True)
File "/usr/local/lib/quiver/python/plano/main.py", line 1243, in wait
raise PlanoProcessError(proc)
plano.main.PlanoProcessError: Command 'quiver-arrow receive //host.docker.internal//amq/queue/cq1 --impl qpid-proton-c --duration 10m --count 1m --rate 0 --body-size 12 --credit 1000 --transaction-size 0 --timeout 10 --durable --output /tmp/quiver-otujr23y' returned non-zero exit status 1.
```
2. Quorum queue:
```
quiver //host.docker.internal//amq/queue/qq1 --durable --count 1m --duration 10m --body-size 12 --credit 1000
```
This commit:
```
Count ............................................. 1,000,000 messages
Duration .............................................. 101.4 seconds
Sender rate ........................................... 9,867 messages/s
Receiver rate ......................................... 9,868 messages/s
End-to-end rate ....................................... 9,865 messages/s
Latencies by percentile:
0% ....... 11 ms 90.00% ....... 23 ms
25% ....... 15 ms 99.00% ....... 28 ms
50% ....... 18 ms 99.90% ....... 33 ms
100% ....... 49 ms 99.99% ....... 47 ms
```
RabbitMQ 3.x:
```
---------------------- Sender ----------------------- --------------------- Receiver ---------------------- --------
Time [s] Count [m] Rate [m/s] CPU [%] RSS [M] Time [s] Count [m] Rate [m/s] CPU [%] RSS [M] Lat [ms]
----------------------------------------------------- ----------------------------------------------------- --------
2.1 130,814 65,342 9 69.9 2.1 18,430 9,206 5 7.6 1,221
4.1 163,580 16,375 5 70.2 4.1 18,867 218 0 7.6 2,168
6.1 229,114 32,767 6 70.2 6.1 18,867 0 0 7.6 0
8.1 294,648 32,734 7 70.2 8.1 18,867 0 0 7.6 0
10.1 360,182 32,734 6 70.2 10.1 18,867 0 0 7.6 0
12.1 425,716 32,767 6 70.2 12.1 18,867 0 0 7.6 0
receiver timed out
14.1 458,482 16,367 5 70.2 14.1 18,867 0 0 7.6 0
quiver: error: PlanoProcessError: Command 'quiver-arrow receive //host.docker.internal//amq/queue/qq1 --impl qpid-proton-c --duration 10m --count 1m --rate 0 --body-size 12 --credit 1000 --transaction-size 0 --timeout 10 --durable --output /tmp/quiver-b1gcup43' returned non-zero exit status 1.
Traceback (most recent call last):
File "/usr/local/lib/quiver/python/quiver/pair.py", line 144, in run
_plano.wait(receiver, check=True)
File "/usr/local/lib/quiver/python/plano/main.py", line 1243, in wait
raise PlanoProcessError(proc)
plano.main.PlanoProcessError: Command 'quiver-arrow receive //host.docker.internal//amq/queue/qq1 --impl qpid-proton-c --duration 10m --count 1m --rate 0 --body-size 12 --credit 1000 --transaction-size 0 --timeout 10 --durable --output /tmp/quiver-b1gcup43' returned non-zero exit status 1.
```
3. Stream:
```
quiver-arrow send //host.docker.internal//amq/queue/sq1 --durable --count 1m -d 10m --summary --verbose
```
This commit:
```
Count ............................................. 1,000,000 messages
Duration ................................................ 8.7 seconds
Message rate ........................................ 115,154 messages/s
```
RabbitMQ 3.x:
```
Count ............................................. 1,000,000 messages
Duration ............................................... 21.2 seconds
Message rate ......................................... 47,232 messages/s
```
### Memory usage
Start RabbitMQ:
```
ERL_MAX_PORTS=3000000 RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS="+P 3000000 +S 6" make run-broker PLUGINS="rabbitmq_amqp1_0" FULL=1 RABBITMQ_CONFIG_FILE="rabbitmq.conf"
```
```
/bin/cat rabbitmq.conf
tcp_listen_options.sndbuf = 2048
tcp_listen_options.recbuf = 2048
vm_memory_high_watermark.relative = 0.95
vm_memory_high_watermark_paging_ratio = 0.95
loopback_users = none
```
Create 50k connections with 2 sessions per connection, i.e. 100k session in total:
```go
package main
import (
"context"
"log"
"time"
"github.com/Azure/go-amqp"
)
func main() {
for i := 0; i < 50000; i++ {
conn, err := amqp.Dial(context.TODO(), "amqp://nuc", &amqp.ConnOptions{SASLType: amqp.SASLTypeAnonymous()})
if err != nil {
log.Fatal("dialing AMQP server:", err)
}
_, err = conn.NewSession(context.TODO(), nil)
if err != nil {
log.Fatal("creating AMQP session:", err)
}
_, err = conn.NewSession(context.TODO(), nil)
if err != nil {
log.Fatal("creating AMQP session:", err)
}
}
log.Println("opened all connections")
time.Sleep(5 * time.Hour)
}
```
This commit:
```
erlang:memory().
[{total,4586376480},
{processes,4025898504},
{processes_used,4025871040},
{system,560477976},
{atom,1048841},
{atom_used,1042841},
{binary,233228608},
{code,21449982},
{ets,108560464}]
erlang:system_info(process_count).
450289
```
7 procs per connection + 1 proc per session.
(7 + 2*1) * 50,000 = 450,000 procs
RabbitMQ 3.x:
```
erlang:memory().
[{total,15168232704},
{processes,14044779256},
{processes_used,14044755120},
{system,1123453448},
{atom,1057033},
{atom_used,1052587},
{binary,236381264},
{code,21790238},
{ets,391423744}]
erlang:system_info(process_count).
1850309
```
7 procs per connection + 15 per session
(7 + 2*15) * 50,000 = 1,850,000 procs
50k connections + 100k session require
with this commit: 4.5 GB
in RabbitMQ 3.x: 15 GB
## Future work
1. More efficient parser and serializer
2. TODO in mc_amqp: Do not store the parsed message on disk.
3. Implement both AMQP HTTP extension and AMQP management extension to allow AMQP
clients to create RabbitMQ objects (queues, exchanges, ...).
2023-07-21 18:29:07 +08:00
|
|
|
{amqp1_0_default_user, <<"guest">>},
|
|
|
|
{amqp1_0_default_vhost, <<"/">>},
|
2016-12-06 19:18:02 +08:00
|
|
|
{loopback_users, [<<"guest">>]},
|
|
|
|
{password_hashing_module, rabbit_password_hashing_sha256},
|
|
|
|
{server_properties, []},
|
|
|
|
{collect_statistics, none},
|
|
|
|
{collect_statistics_interval, 5000},
|
|
|
|
{mnesia_table_loading_retry_timeout, 30000},
|
|
|
|
{mnesia_table_loading_retry_limit, 10},
|
|
|
|
{auth_mechanisms, ['PLAIN', 'AMQPLAIN']},
|
|
|
|
{auth_backends, [rabbit_auth_backend_internal]},
|
|
|
|
{delegate_count, 16},
|
|
|
|
{trace_vhosts, []},
|
|
|
|
{ssl_cert_login_from, distinguished_name},
|
|
|
|
{ssl_handshake_timeout, 5000},
|
|
|
|
{ssl_allow_poodle_attack, false},
|
|
|
|
{handshake_timeout, 10000},
|
|
|
|
{reverse_dns_lookups, false},
|
|
|
|
{cluster_partition_handling, ignore},
|
|
|
|
{cluster_keepalive_interval, 10000},
|
2020-01-12 00:23:30 +08:00
|
|
|
{autoheal_state_transition_timeout, 60000},
|
2016-12-06 19:18:02 +08:00
|
|
|
{tcp_listen_options, [{backlog, 128},
|
|
|
|
{nodelay, true},
|
|
|
|
{linger, {true, 0}},
|
|
|
|
{exit_on_close, false}
|
|
|
|
]},
|
|
|
|
{halt_on_upgrade_failure, true},
|
|
|
|
{ssl_apps, [asn1, crypto, public_key, ssl]},
|
2023-10-04 18:11:54 +08:00
|
|
|
%% classic queue storage implementation version
|
|
|
|
{classic_queue_default_version, 2},
|
2016-12-06 19:18:02 +08:00
|
|
|
%% see rabbitmq-server#227 and related tickets.
|
|
|
|
%% msg_store_credit_disc_bound only takes effect when
|
|
|
|
%% messages are persisted to the message store. If messages
|
|
|
|
%% are embedded on the queue index, then modifying this
|
|
|
|
%% setting has no effect because credit_flow is not used when
|
|
|
|
%% writing to the queue index. See the setting
|
|
|
|
%% queue_index_embed_msgs_below above.
|
2017-02-18 03:31:29 +08:00
|
|
|
{msg_store_credit_disc_bound, {4000, 800}},
|
|
|
|
{msg_store_io_batch_size, 4096},
|
2017-02-08 16:56:05 +08:00
|
|
|
%% see rabbitmq-server#143,
|
|
|
|
%% rabbitmq-server#949, rabbitmq-server#1098
|
2017-02-05 19:41:40 +08:00
|
|
|
{credit_flow_default_credit, {400, 200}},
|
2020-05-01 18:43:42 +08:00
|
|
|
{quorum_commands_soft_limit, 32},
|
2021-01-28 17:46:28 +08:00
|
|
|
{quorum_cluster_size, 3},
|
2016-12-06 19:18:02 +08:00
|
|
|
%% see rabbitmq-server#248
|
|
|
|
%% and rabbitmq-server#667
|
|
|
|
{channel_operation_timeout, 15000},
|
2021-05-12 01:25:29 +08:00
|
|
|
%% See https://www.rabbitmq.com/consumers.html#acknowledgement-timeout
|
|
|
|
%% 30 minutes
|
|
|
|
{consumer_timeout, 1800000},
|
2016-12-07 22:21:58 +08:00
|
|
|
|
|
|
|
%% see rabbitmq-server#486
|
2017-05-24 18:03:05 +08:00
|
|
|
{autocluster,
|
|
|
|
[{peer_discovery_backend, rabbit_peer_discovery_classic_config}]
|
|
|
|
},
|
2016-12-07 22:21:58 +08:00
|
|
|
%% used by rabbit_peer_discovery_classic_config
|
|
|
|
{cluster_nodes, {[], disc}},
|
|
|
|
|
2019-04-19 22:06:07 +08:00
|
|
|
{config_entry_decoder, [{passphrase, undefined}]},
|
2017-02-05 19:41:40 +08:00
|
|
|
{background_gc_enabled, false},
|
2017-02-07 18:25:20 +08:00
|
|
|
{background_gc_target_interval, 60000},
|
2017-07-01 11:39:03 +08:00
|
|
|
%% rabbitmq-server#589
|
2017-05-08 19:58:01 +08:00
|
|
|
{proxy_protocol, false},
|
2017-04-12 18:28:25 +08:00
|
|
|
{disk_monitor_failure_retries, 10},
|
2017-05-08 23:01:10 +08:00
|
|
|
{disk_monitor_failure_retry_interval, 120000},
|
2017-07-01 11:39:03 +08:00
|
|
|
%% either "stop_node" or "continue".
|
2017-07-06 20:20:18 +08:00
|
|
|
%% by default we choose to not terminate the entire node if one
|
|
|
|
%% vhost had to shut down, see server#1158 and server#1280
|
2017-10-27 17:30:29 +08:00
|
|
|
{vhost_restart_strategy, continue},
|
2017-10-28 06:41:30 +08:00
|
|
|
%% {global, prefetch count}
|
Quorum queues (#1706)
* Test queue.declare method with quorum type
[#154472130]
* Cosmetics
[#154472130]
* Start quorum queue
Includes ra as a rabbit dependency
[#154472152]
* Update info and list operations to use quorum queues
Basic implementation. Might need an update when more functionality
is added to the quorum queues.
[#154472152]
* Stop quorum queue
[#154472158]
* Restart quorum queue
[#154472164]
* Introduce UId in ra config to support newer version of ra
Improved ra stop
[#154472158]
* Put data inside VHost specific subdirs
[#154472164]
* Include ra in rabbit deps to support stop_app/start_app command
[#154472164]
* Stop quorum queues in `rabbit_amqqueue:stop/1`
[#154472158]
* Revert creation of fifo ets table inside rabbit
Now supported by ra
[#154472158]
* Filter quorum queues
[#154472158]
* Test restart node with quorum queues
[#154472164]
* Publish to quorum queues
[#154472174]
* Use `ra:restart_node/1`
[#154472164]
* Wait for stats to be published when querying quorum queues
[#154472174]
* Test publish and queue length after restart
[#154472174]
* Consume messages from quorum queues with basic.get
[#154472211]
* Autoack messages from quorum queues on basic.get
[#154472211]
* Fix no_ack meaning
no_ack = true is equivalent to autoack
[#154472211]
* Use data_dir as provided in the config
If we modify the data_dir, ra is not able to delete the data
when a queue is deleted
[#154472158]
* Remove unused code/variables
[#154472158]
* Subscribe to a quorum queue
Supports auto-ack
[#154472215]
* Ack messages consumed from quorum queues
[#154472221]
* Nack messages consumed from quorum queues
[#154804608]
* Use delivery tag as consumer tag for basic.get in quorum queues
[#154472221]
* Support for publisher confirms in quorum queues
[#154472198]
* Integrate with ra_fifo_client
* Clear queue state on queue.delete
[#154472158]
* Fix quorum nack
[#154804608]
* Test redelivery after nack
[#154804608]
* Nack without requeueing
[#154472225]
* Test multiple acks
[#154804208]
* Test multiple nacks
[#154804314]
* Configure dead letter exchange with queue declare
[#155076661]
* Use a per-vhost process to handle dead-lettering
Needs to hold state for quorum queues
[#155401802]
* Implement dead-lettering on nack'ed messages
[#154804620]
* Use queue name as a resource on message delivery
Fixes a previously introduced bug
[#154804608]
* Handle ra events on dead letter process
[#155401802]
* Pass empty queue states to queue delete
Queue deletion on vhost deletion calls directly to rabbit_amqqueue.
Queue states are not available, but we can provide an empty map as
in deletion the states are only needed for cleanup.
* Generate quorum queue stats and events
Consumer delete events are still pending, as depend on basic.cancel
(not implemented yet), ra terminating or ra detecting channel down
[#154472241]
* Ensure quorum mapping entries are available before metric emission
[#154472241]
* Configure data_dir, uses new RABBITMQ_QUORUM_BASE env var
[#154472152]
* Use untracked enqueues when sending wihtout channel
Updated several other calls missed during the quorum implementation
* Revert "Configure data_dir, uses new RABBITMQ_QUORUM_BASE env var"
This reverts commit f2261212410affecb238fcbd1fb451381aee4036.
* Configure data_dir, uses new RABBITMQ_QUORUM_DIR based on mnesia dir
[#154472152]
* Fix get_quorum_state
* Fix calculation of quorum pids
* Move all quorum queues code to its own module
[#154472241]
* Return an error when declaring a quorum queue with an incompatible argument
[#154521696]
* Cleanup of quorum queue state after queue delete
Also fixes some existing problems where the state wasn't properly
stored
[#155458625]
* Revert Revert "Declare a quorum queue using the queue.declare method"
* Remove duplicated state info
[#154472241]
* Start/stop multi-node quorum queue
[#154472231]
[#154472236]
* Restart nodes in a multi-node quorum cluster
[#154472238]
* Test restart and leadership takeover on multiple nodes
[#154472238]
* Wait for leader down after deleting a quorum cluster
It ensures an smooth delete-declare sequence without race
conditions. The test included here detected the situation before
the fix.
[#154472236]
* Populate quorum_mapping from mnesia when not available
Ensures that leader nodes that don't have direct requests can get
the mapping ra name -> queue name
* Cosmetics
* Do not emit core metrics if queue has just been deleted
* Use rabbit_mnesia:is_process_alive
Fixes bug introduced by cac9583e1bb2705be7f06c2ab7f416a75d11c875
[#154472231]
* Only try to report stats if quorum process is alive
* Implement cancel consumer callback
Deletes metrics and sends consumer deleted event
* Remove unnecessary trigger election call
ra:restart_node has already been called during the recovery
* Apply cancellation callback on node hosting the channel
* Cosmetics
* Read new fifo metrics which store directly total, ready and unack
* Implement basic.cancel for quorum queues
* Store leader in amqqueue record, report all in stats
[#154472407]
* Declare quorum queue in mnesia before starting the ra cluster
Record needs to be stored first to update the leader on ra effects
* Revert
* Purge quorum queues
[#154472182]
* Improve use of untracked_enqueue
Choose the persisted leader id instead of just using the id of the
leader at point of creation.
* Store quorum leader in the pid field of amqqueue record
Same as mirrored queues, no real need for an additional field
* Improve recovery
When a ra node has never been started on a rabbit node ensure it doesn't
fail but instead rebuilds the config and starts the node as a new node.
Also fix issue when a quorum queue is declared when one of it's rabbit
nodes are unavailable.
[#157054606]
* Cleanup core metrics after leader change
[#157054473]
* Return an error on sync_queue on quorum queues
[#154472334]
* Return an error on cancel_sync_queue on quorum queues
[#154472337]
* Fix basic_cancel and basic_consume return values
Ensure the quorum queue state is always returned by these functions.
* Restore arity of amqqeueu delete and purge functions.
This avoids some breaking changes in the cli.
* Fix bug returning consumers.
* remove rogue debug log
* Integrate ingress flow control with quorum queues
[#157000583]
* Configure commands soft limit
[#157000583]
* Support quorum pids on rabbit_mnesia:is_process_alive
* Publish consumers metric for quorum queues
* Whitelist quorum directory in is_virgin_node
Allow the quorum directoy to exist without affecting the status of the
Rabbit node.
* Delete queue_metrics on leader change.
Also run the become_leader handler in a separate process to avoid
blocking.
[#157424225]
* Report cluster status in quorum queue infos. New per node status command.
Related to
[#157146500]
* Remove quorum_mapping table
As we can store the full queue name resource as the cluster id of the
ra_fifo_client state we can avoid needed the quorum_mapping table.
* Fix xref issue
* Provide quorum members information in stats
[#157146500]
* fix unused variable
* quorum queue multiple declare handling
Extend rabbit_amqqueue:internal_declare/2 to indicate if the queue
record was created or exisiting. From this we can then provide a code
path that should handle concurrent queue declares of the same quorum
queue.
* Return an error when declaring exclusive/auto-delete quorum queue
[#157472160]
* Restore lost changes
from 79c9bd201e1eac006a42bd162e7c86df96496629
* recover another part of commit
* fixup cherry pick
* Ra io/file metrics handler and stats publishing
[#157193081]
* Revert "Ra io/file metrics handler and stats publishing"
This reverts commit 05d15c786540322583fc655709825db215b70952.
* Do not issue confirms on node down for quorum queues.
Only a ra_event should be used to issue positive confirms for a quorum
queue.
* Ra stats publishing
[#157193081]
* Pick consumer utilisation from ra data
[#155402726]
* Handle error when deleting a quorum queue and all nodes are already down
This is in fact a successful deletion as all raft nodes are already 'stopped'
[#158656366]
* Return an error when declaring non-durable quorum queues
[#158656454]
* Rename dirty_query to committed_query
* Delete stats on leader node
[#158661152]
* Give full list of nodes to fifo client
* Handle timeout in quorum basic_get
* Fix unused variable error
* Handle timeout in basic get
[#158656366]
* Force GC after purge
[#158789389]
* Increase `ra:delete_cluster` timeout to 120s
* Revert "Force GC after purge"
This reverts commit 5c98bf22994eb39004760799d3a2c5041d16e9d4.
* Add quorum member command
[#157481599]
* Delete quorum member command
[#157481599]
* Implement basic.recover for quorum queues
[#157597411]
* Change concumer utilisation
to use the new ra_fifo table and api.
* Set max quorum queue size limit
Defaults to 7, can be configured per queue on queue.declare
Nodes are selected randomly from the list of nodes, but the one
that is executing the queue.declare command
[#159338081]
* remove potentially unrelated changes to rabbit_networking
* Move ra_fifo to rabbit
Copied ra_fifo to rabbit and renamed it rabbit_fifo.
[#159338031]
* rabbit_fifo tidy up
* rabbit_fifo tidy up
* rabbit_fifo: customer -> consumer rename
* Move ra_fifo tests
[#159338031]
* Tweak quorum_queue defaults
* quorum_queue test reliability
* Optimise quorum_queue test suite.
By only starting a rabbit cluster per group rather than test.
[#160612638]
* Renamings in line with ra API changes
* rabbit_fifo fixes
* Update with ra API changes
Ra has consolidated and simplified it's api. These changes update to
confirm to that.
* Update rabbit_fifo with latest ra changes
* Clean up out of date comment
* Return map of states
* Add test case for basic.get on an empty queue
Before the previous patch, any subsequent basic.get would crash as
the map of states had been replaced by a single state.
* Clarify use of deliver tags on record_sent
* Clean up queues after testcase
* Remove erlang monitor of quorum queues in rabbit_channel
The eol event can be used instead
* Use macros to make clearer distinctions between quorum/classic queues
Cosmetic only
* Erase queue stats on 'eol' event
* Update to follow Ra's cluster_id -> cluster_name rename.
* Rename qourum-cluster-size
To quorum-initial-group-size
* Issue confirms on quorum queue eol
Also avoid creating quorum queue session state on queue operation
methods.
* Only classic queues should be notified on channel down
* Quorum queues do not support global qos
Exit with protocol error of a basic.consume for a quorum queue is issued
on a channel with global qos enabled.
* unused variable name
* Refactoring
Strictly enfornce that channels do not monitor quorum queues.
* Refactor foreach_per_queue in the channel.
To make it call classic and quorum queues the same way.
[#161314899]
* rename function
* Query classic and quorum queues separately
during recovery as they should not be marked as stopped during failed
vhost recovery.
* Remove force_event_refresh function
As the only user of this function, the management API no longer requires
it.
* fix errors
* Remove created_at from amqqueue record
[#161343680]
* rabbit_fifo: support AMQP 1.0 consumer credit
This change implements an alternative consumer credit mechanism similar
to AMQP 1.0 link credit where the credit (prefetch) isn't automatically
topped up as deliveries are settled and instead needs to be manually
increased using a credit command. This is to be integrated with the AMQP
1.0 plugin.
[#161256187]
* Add basic.credit support for quorum queues.
Added support for AMQP 1.0 transfer flow control.
[#161256187]
* Make quorum queue recover idempotent
So that if a vhost crashes and runs the recover steps it doesn't fail
because ra servers are still running.
[#161343651]
* Add tests for vhost deletion
To ensure quorum queues are cleaned up on vhost removal.
Also fix xref issue.
[#161343673]
* remove unused clause
* always return latest value of queue
* Add rabbitmq-queues scripts. Remove ra config from .bat scripts.
* Return error if trying to get quorum status of a classic queue.
2018-10-29 17:47:29 +08:00
|
|
|
{default_consumer_prefetch, {false, 0}},
|
2022-08-28 01:49:35 +08:00
|
|
|
%% interval at which the channel can perform periodic actions
|
2019-04-08 23:12:26 +08:00
|
|
|
{channel_tick_interval, 60000},
|
2018-12-27 23:26:37 +08:00
|
|
|
%% Default max message size is 128 MB
|
2019-12-12 06:11:15 +08:00
|
|
|
{max_message_size, 134217728},
|
2019-12-20 13:02:51 +08:00
|
|
|
%% Socket writer will run GC every 1 GB of outgoing data
|
2020-06-10 17:10:48 +08:00
|
|
|
{writer_gc_threshold, 1000000000},
|
|
|
|
%% interval at which connection/channel tracking executes post operations
|
2020-09-29 18:43:24 +08:00
|
|
|
{tracking_execution_timeout, 15000},
|
2020-08-26 22:42:40 +08:00
|
|
|
{stream_messages_soft_limit, 256},
|
2022-05-06 20:58:50 +08:00
|
|
|
{track_auth_attempt_source, false},
|
|
|
|
{credentials_obfuscation_fallback_secret, <<"nocookie">>},
|
|
|
|
{dead_letter_worker_consumer_prefetch, 32},
|
2022-08-19 22:20:55 +08:00
|
|
|
{dead_letter_worker_publisher_confirm_timeout, 180000},
|
|
|
|
%% EOL date for the current release series, if known/announced
|
|
|
|
{release_series_eol_date, none}
|
2016-12-06 19:18:02 +08:00
|
|
|
]
|
|
|
|
endef
|
|
|
|
|
2023-12-08 20:00:46 +08:00
|
|
|
LOCAL_DEPS = sasl os_mon inets compiler public_key crypto ssl syntax_tools xmerl
|
2021-01-14 01:12:14 +08:00
|
|
|
|
2024-04-16 20:07:13 +08:00
|
|
|
BUILD_DEPS = rabbitmq_cli looking_glass
|
|
|
|
DEPS = ranch rabbit_common amqp10_common rabbitmq_prelaunch ra sysmon_handler stdout_formatter recon redbug observer_cli osiris syslog systemd seshat khepri khepri_mnesia_migration cuttlefish gen_batch_server
|
Enable AMQP 1.0 clients to manage topologies
## What?
* Allow AMQP 1.0 clients to dynamically create and delete RabbitMQ
topologies (exchanges, queues, bindings).
* Provide an Erlang AMQP 1.0 client that manages topologies.
## Why?
Today, RabbitMQ topologies can be created via:
* [Management HTTP API](https://www.rabbitmq.com/docs/management#http-api)
(including Management UI and
[messaging-topology-operator](https://github.com/rabbitmq/messaging-topology-operator))
* [Definition Import](https://www.rabbitmq.com/docs/definitions#import)
* AMQP 0.9.1 clients
Up to RabbitMQ 3.13 the RabbitMQ AMQP 1.0 plugin auto creates queues
and bindings depending on the terminus [address
format](https://github.com/rabbitmq/rabbitmq-server/tree/v3.13.x/deps/rabbitmq_amqp1_0#routing-and-addressing).
Such implicit creation of topologies is limiting and obscure.
For some address formats, queues will be created, but not deleted.
Some of RabbitMQ's success is due to its flexible routing topologies
that AMQP 0.9.1 clients can create and delete dynamically.
This commit allows dynamic management of topologies for AMQP 1.0 clients.
This commit builds on top of Native AMQP 1.0 (PR #9022) and will be
available in RabbitMQ 4.0.
## How?
This commits adds the following management operations for AMQP 1.0 clients:
* declare queue
* delete queue
* purge queue
* bind queue to exchange
* unbind queue from exchange
* declare exchange
* delete exchange
* bind exchange to exchange
* unbind exchange from exchange
Hence, at least the AMQP 0.9.1 management operations are supported for
AMQP 1.0 clients.
In addition the operation
* get queue
is provided which - similar to `declare queue` - returns queue
information including the current leader and replicas.
This allows clients to publish or consume locally on the node that hosts
the queue.
Compared to AMQP 0.9.1 whose commands and command fields are fixed, the
new AMQP Management API is extensible: New operations and new fields can
easily be added in the future.
There are different design options how management operations could be
supported for AMQP 1.0 clients:
1. Use a special exchange type as done in https://github.com/rabbitmq/rabbitmq-management-exchange
This has the advantage that any protocol client (e.g. also STOMP clients) could
dynamically manage topologies. However, a special exchange type is the wrong abstraction.
2. Clients could send "special" messages with special headers that the broker interprets.
This commit decided for a variation of the 2nd option using a more
standardized way by re-using a subest of the following latest AMQP 1.0 extension
specifications:
* [AMQP Request-Response Messaging with Link Pairing Version 1.0 - Committee Specification 01](https://docs.oasis-open.org/amqp/linkpair/v1.0/cs01/linkpair-v1.0-cs01.html) (February 2021)
* [HTTP Semantics and Content over AMQP Version 1.0 - Working Draft 06](https://groups.oasis-open.org/higherlogic/ws/public/document?document_id=65571) (July 2019)
* [AMQP Management Version 1.0 - Working Draft 16](https://groups.oasis-open.org/higherlogic/ws/public/document?document_id=65575) (July 2019)
An important goal is to keep the interaction between AMQP 1.0 client and RabbitMQ
simple to increase usage, development and adoptability of future RabbitMQ AMQP 1.0
client library wrappers.
The AMQP 1.0 client has to create a link pair to the special `/management` node.
This allows the client to send and receive from the management node.
Similar to AMQP 0.9.1, there is no need for a reply queue since the reply
will be sent directly to the client.
Requests and responses are modelled via HTTP, but sent via AMQP using
the `HTTP Semantics and Content over AMQP` extension (henceforth `HTTP
over AMQP` extension).
This commit tries to follow the `HTTP over AMQP` extension as much as
possible but deviates where this draft spec doesn't make sense.
The projected mode §4.1 is used as opposed to tunneled mode §4.2.
A named relay `/management` is used (§6.3) where the message field `to` is the URL.
Deviations are
* §3.1 mandates that URIs are not encoded in an AMQP message.
However, we percent encode URIs in the AMQP message. Otherwise there
is for example no way to distinguish a `/` in a queue name from the
URI path separator `/`.
* §4.1.4 mandates a data section. This commit uses an amqp-value section
as it's a better fit given that the content is AMQP encoded data.
Using an HTTP API allows for a common well understood interface and future extensibility.
Instead of re-using the current RabbitMQ HTTP API, this commit uses a
new HTTP API (let's call it v2) which could be used as a future API for
plain HTTP clients.
### HTTP API v1
The current HTTP API (let's call it v1) is **not** used since v1
comes with a couple of weaknesses:
1. Deep level of nesting becomes confusing and difficult to manage.
Examples of deep nesting in v1:
```
/api/bindings/vhost/e/source/e/destination/props
/api/bindings/vhost/e/exchange/q/queue/props
```
2. Redundant endpoints returning the same resources
v1 has 9 endpoints to list binding(s):
```
/api/exchanges/vhost/name/bindings/source
/api/exchanges/vhost/name/bindings/destination
/api/queues/vhost/name/bindings
/api/bindings
/api/bindings/vhost
/api/bindings/vhost/e/exchange/q/queue
/api/bindings/vhost/e/exchange/q/queue/props
/api/bindings/vhost/e/source/e/destination
/api/bindings/vhost/e/source/e/destination/props
```
3. Verbs in path names
Path names should be nouns instead.
v1 contains verbs:
```
/api/queues/vhost/name/get
/api/exchanges/vhost/name/publish
```
### AMQP Management extension
Only few aspects of the AMQP Management extension are used.
The central idea of the AMQP management spec is **dynamic discovery** such that broker independent AMQP 1.0
clients can discover objects, types, operations, and HTTP endpoints of specific brokers.
In fact, clients are only conformant if:
> All request addresses are dynamically discovered starting from the discovery document.
> A requesting container MUST NOT use fixed assumptions about the addressing structure of the management API.
While this is a nice and powerful idea, no AMQP 1.0 client and no AMQP 1.0 server implement the
latest AMQP 1.0 management spec from 2019, partly presumably due to its complexity.
Therefore, the idea of such dynamic discovery has failed to be implemented in practice.
The AMQP management spec mandates that the management endpoint returns a discovery document containing
broker specific collections, types, configuration, and operations including their endpoints.
The API endpoints of the AMQP management spec are therefore all designed around dynamic discovery.
For example, to create either a queue or an exchange, the client has to
```
POST /$management/entities
```
which shows that the entities collection acts as a generic factory, see section 2.2.
The server will then create the resource and reply with a location header containing a URI pointing to the resource.
For RabbitMQ, we don’t need such a generic factory to create queues or exchanges.
To list bindings for a queue Q1, the spec suggests
```
GET /$management/Queues/Q1/$management/entities
```
which again shows the generic entities endpoint as well as a `$management` endpoint under Q1 to
allow a queue to return a discovery document.
For RabbitMQ, we don’t need such generic endpoints and discovery documents.
Given we aim for our own thin RabbitMQ AMQP 1.0 client wrapper libraries which expose
the RabbitMQ model to the developer, we can directly use fixed HTTP endpoint assumptions
in our RabbitMQ specific libraries.
This is by far simpler than using the dynamic endpoints of the management spec.
Simplicity leads to higher adoption and enables more developers to write RabbitMQ AMQP 1.0 client
library wrappers.
The AMQP Management extension also suffers from deep level of nesting in paths
Examples:
```
/$management/Queues/Q1/$management/entities
/$management/Queues/Q1/Bindings/Binding1
```
as well as verbs in path names: Section 7.1.4 suggests using verbs in path names,
for example “purge”, due to the dynamic operations discovery document.
### HTTP API v2
This commit introduces a new HTTP API v2 following best practices.
It could serve as a future API for plain HTTP clients.
This commit and RabbitMQ 4.0 will only implement a minimal set of
HTTP API v2 endpoints and only for HTTP over AMQP.
In other words, the existing HTTP API v1 Cowboy handlers will continue to be
used for all plain HTTP requests in RabbitMQ 4.0 and will remain untouched for RabbitMQ 4.0.
Over time, after 4.0 shipped, we could ship a pure HTTP API implementation for HTTP API v2.
Hence, the new HTTP API v2 endpoints for HTTP over AMQP should be designed such that they
can be re-used in the future for a pure HTTP implementation.
The minimal set of endpoints for RabbitMQ 4.0 are:
``
GET / PUT / DELETE
/vhosts/:vhost/queues/:queue
```
read, create, delete a queue
```
DELETE
/vhosts/:vhost/queues/:queue/messages
```
purges a queue
```
GET / DELETE
/vhosts/:vhost/bindings/:binding
```
read, delete bindings
where `:binding` is a binding ID of the following path segment:
```
src=e1;dstq=q2;key=my-key;args=
```
Binding arguments `args` has an empty value by default, i.e. there are no binding arguments.
If the binding includes binding arguments, `args` will be an Erlang portable term hash
provided by the server similar to what’s provided in HTTP API v1 today.
Alternatively, we could use an arguments scheme of:
```
args=k1,utf8,v1&k2,uint,3
```
However, such a scheme leads to long URIs when there are many binding arguments.
Note that it’s perfectly fine for URI producing applications to include URI
reserved characters `=` / `;` / `,` / `$` in a path segment.
To create a binding, the client therefore needs to POST to a bindings factory URI:
```
POST
/vhosts/:vhost/bindings
```
To list all bindings between a source exchange e1 and destination exchange e2 with binding key k1:
```
GET
/vhosts/:vhost/bindings?src=e1&dste=e2&key=k1
```
This endpoint will be called by the RabbitMQ AMQP 1.0 client library to unbind a
binding with non-empty binding arguments to get the binding ID before invoking a
```
DELETE
/vhosts/:vhost/bindings/:binding
```
In future, after RabbitMQ 4.0 shipped, new API endpoints could be added.
The following is up for discussion and is only meant to show the clean and simple design of HTTP API v2.
Bindings endpoint can be queried as follows:
to list all bindings for a given source exchange e1:
```
GET
/vhosts/:vhost/bindings?src=e1
```
to list all bindings for a given destination queue q1:
```
GET
/vhosts/:vhost/bindings?dstq=q1
```
to list all bindings between a source exchange e1 and destination queue q1:
```
GET
/vhosts/:vhost/bindings?src=e1&dstq=q1
```
multiple bindings between source exchange e1 and destination queue q1 could be deleted at once as follows:
```
DELETE /vhosts/:vhost/bindings?src=e1&dstq=q1
```
GET could be supported globally across all vhosts:
```
/exchanges
/queues
/bindings
```
Publish a message:
```
POST
/vhosts/:vhost/queues/:queue/messages
```
Consume or peek a message (depending on query parameters):
```
GET
/vhosts/:vhost/queues/:queue/messages
```
Note that the AMQP 1.0 client omits the `/vhost/:vhost` path prefix.
Since an AMQP connection belongs to a single vhost, there is no need to
additionally include the vhost in every HTTP request.
Pros of HTTP API v2:
1. Low level of nesting
Queues, exchanges, bindings are top level entities directly under vhosts.
Although the HTTP API doesn’t have to reflect how resources are stored in the database,
v2 does nicely reflect the Khepri tree structure.
2. Nouns instead of verbs
HTTP API v2 is very simple to read and understand as shown by
```
POST /vhosts/:vhost/queues/:queue/messages to post messages, i.e. publish to a queue.
GET /vhosts/:vhost/queues/:queue/messages to get messages, i.e. consume or peek from a queue.
DELETE /vhosts/:vhost/queues/:queue/messages to delete messages, i.e. purge a queue.
```
A separate new HTTP API v2 allows us to ship only handlers for HTTP over AMQP for RabbitMQ 4.0
and therefore move faster while still keeping the option on the table to re-use the new v2 API
for pure HTTP in the future.
In contrast, re-using the HTTP API v1 for HTTP over AMQP is possible, but dirty because separate handlers
(HTTP over AMQP and pure HTTP) replying differently will be needed for the same v1 endpoints.
2024-02-08 01:26:13 +08:00
|
|
|
TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers meck proper amqp_client rabbitmq_amqp_client rabbitmq_amqp1_0
|
Use Lager to log RabbitMQ messages
By default, RabbitMQ now logs messages to a single file
($RABBITMQ_LOGS). The $RABBITMQ_SASL_LOGS variable is unused. To
configure how and which messages are logged, it's recommended to do it
from rabbitmq.config, not from the environment variable.
The old `log_levels` parameter is unsupported and categories are
replaced by Lager extra sinks. If you had in your rabbitmq.config:
{rabbit, [
{log_levels, [{connection, info}]}
]}
You can now configure Lager like this:
{lager, [
{extra_sinks, [
{rabbit_connection_lager_event, [
{handlers, [{lager_forwarder_backend, [lager_event, info]}]}
]}
]}
]}
rabbitmq-build.mk from rabbitmq-common is included in the top-level
Makefile. It sets the appropriate compiler options to enable Lager's
lager_transform parse_transform module.
rabbit_log calls are now converted by this parse_transform to direct
calls to lager:log(). To keep backward compatibility with other plugins,
the rabbit_log module still implements all the <level>() functions.
Compared to the parse_transformed calls, the main difference is the
logged message does not carry the file:line metadata.
Fixes #94.
2015-12-05 01:07:00 +08:00
|
|
|
|
2024-04-16 20:07:13 +08:00
|
|
|
PLT_APPS += mnesia runtime_tools looking_glass
|
2020-11-02 18:40:24 +08:00
|
|
|
|
Switch from Lager to the new Erlang Logger API for logging
The configuration remains the same for the end-user. The only exception
is the log root directory: it is now set through the `log_root`
application env. variable in `rabbit`. People using the Cuttlefish-based
configuration file are not affected by this exception.
The main change is how the logging facility is configured. It now
happens in `rabbit_prelaunch_logging`. The `rabbit_lager` module is
removed.
The supported outputs remain the same: the console, text files, the
`amq.rabbitmq.log` exchange and syslog.
The message text format slightly changed: the timestamp is more precise
(now to the microsecond) and the level can be abbreviated to always be
4-character long to align all messages and improve readability. Here is
an example:
2021-03-03 10:22:30.377392+01:00 [dbug] <0.229.0> == Prelaunch DONE ==
2021-03-03 10:22:30.377860+01:00 [info] <0.229.0>
2021-03-03 10:22:30.377860+01:00 [info] <0.229.0> Starting RabbitMQ 3.8.10+115.g071f3fb on Erlang 23.2.5
2021-03-03 10:22:30.377860+01:00 [info] <0.229.0> Licensed under the MPL 2.0. Website: https://rabbitmq.com
The example above also shows that multiline messages are supported and
each line is prepended with the same prefix (the timestamp, the level
and the Erlang process PID).
JSON is also supported as a message format and now for any outputs.
Indeed, it is possible to use it with e.g. syslog or the exchange. Here
is an example of a JSON-formatted message sent to syslog:
Mar 3 11:23:06 localhost rabbitmq-server[27908] <0.229.0> - {"time":"2021-03-03T11:23:06.998466+01:00","level":"notice","msg":"Logging: configured log handlers are now ACTIVE","meta":{"domain":"rabbitmq.prelaunch","file":"src/rabbit_prelaunch_logging.erl","gl":"<0.228.0>","line":311,"mfa":["rabbit_prelaunch_logging","configure_logger",1],"pid":"<0.229.0>"}}
For quick testing, the values accepted by the `$RABBITMQ_LOGS`
environment variables were extended:
* `-` still means stdout
* `-stderr` means stderr
* `syslog:` means syslog on localhost
* `exchange:` means logging to `amq.rabbitmq.log`
`$RABBITMQ_LOG` was also extended. It now accepts a `+json` modifier (in
addition to the existing `+color` one). With that modifier, messages are
formatted as JSON intead of plain text.
The `rabbitmqctl rotate_logs` command is deprecated. The reason is
Logger does not expose a function to force log rotation. However, it
will detect when a file was rotated by an external tool.
From a developer point of view, the old `rabbit_log*` API remains
supported, though it is now deprecated. It is implemented as regular
modules: there is no `parse_transform` involved anymore.
In the code, it is recommended to use the new Logger macros. For
instance, `?LOG_INFO(Format, Args)`. If possible, messages should be
augmented with some metadata. For instance (note the map after the
message):
?LOG_NOTICE("Logging: switching to configured handler(s); following "
"messages may not be visible in this log output",
#{domain => ?RMQLOG_DOMAIN_PRELAUNCH}),
Domains in Erlang Logger parlance are the way to categorize messages.
Some predefined domains, matching previous categories, are currently
defined in `rabbit_common/include/logging.hrl` or headers in the
relevant plugins for plugin-specific categories.
At this point, very few messages have been converted from the old
`rabbit_log*` API to the new macros. It can be done gradually when
working on a particular module or logging.
The Erlang builtin console/file handler, `logger_std_h`, has been forked
because it lacks date-based file rotation. The configuration of
date-based rotation is identical to Lager. Once the dust has settled for
this feature, the goal is to submit it upstream for inclusion in Erlang.
The forked module is calld `rabbit_logger_std_h` and is based
`logger_std_h` in Erlang 23.0.
2021-01-13 00:55:27 +08:00
|
|
|
dep_syslog = git https://github.com/schlagert/syslog 4.0.0
|
2024-03-21 21:48:41 +08:00
|
|
|
dep_osiris = git https://github.com/rabbitmq/osiris v1.8.1
|
2021-05-21 17:49:37 +08:00
|
|
|
dep_systemd = hex 0.6.1
|
2023-09-07 00:09:12 +08:00
|
|
|
|
2015-08-25 22:53:21 +08:00
|
|
|
define usage_xml_to_erl
|
|
|
|
$(subst __,_,$(patsubst $(DOCS_DIR)/rabbitmq%.1.xml, src/rabbit_%_usage.erl, $(subst -,_,$(1))))
|
|
|
|
endef
|
|
|
|
|
|
|
|
DOCS_DIR = docs
|
2017-04-25 16:50:00 +08:00
|
|
|
MANPAGES = $(wildcard $(DOCS_DIR)/*.[0-9])
|
|
|
|
WEB_MANPAGES = $(patsubst %,%.html,$(MANPAGES))
|
2023-12-18 19:16:04 +08:00
|
|
|
MD_MANPAGES = $(patsubst %,%.md,$(MANPAGES))
|
2016-11-07 20:03:18 +08:00
|
|
|
|
2017-05-16 22:58:03 +08:00
|
|
|
DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-test.mk
|
Use Lager to log RabbitMQ messages
By default, RabbitMQ now logs messages to a single file
($RABBITMQ_LOGS). The $RABBITMQ_SASL_LOGS variable is unused. To
configure how and which messages are logged, it's recommended to do it
from rabbitmq.config, not from the environment variable.
The old `log_levels` parameter is unsupported and categories are
replaced by Lager extra sinks. If you had in your rabbitmq.config:
{rabbit, [
{log_levels, [{connection, info}]}
]}
You can now configure Lager like this:
{lager, [
{extra_sinks, [
{rabbit_connection_lager_event, [
{handlers, [{lager_forwarder_backend, [lager_event, info]}]}
]}
]}
]}
rabbitmq-build.mk from rabbitmq-common is included in the top-level
Makefile. It sets the appropriate compiler options to enable Lager's
lager_transform parse_transform module.
rabbit_log calls are now converted by this parse_transform to direct
calls to lager:log(). To keep backward compatibility with other plugins,
the rabbit_log module still implements all the <level>() functions.
Compared to the parse_transformed calls, the main difference is the
logged message does not carry the file:line metadata.
Fixes #94.
2015-12-05 01:07:00 +08:00
|
|
|
DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk \
|
2015-12-01 23:17:49 +08:00
|
|
|
rabbit_common/mk/rabbitmq-dist.mk \
|
2017-01-31 18:24:05 +08:00
|
|
|
rabbit_common/mk/rabbitmq-run.mk \
|
2017-03-07 19:41:32 +08:00
|
|
|
rabbit_common/mk/rabbitmq-test.mk \
|
2020-11-17 22:29:05 +08:00
|
|
|
rabbit_common/mk/rabbitmq-tools.mk
|
2015-08-26 00:34:52 +08:00
|
|
|
|
2021-03-22 17:38:17 +08:00
|
|
|
include ../../rabbitmq-components.mk
|
|
|
|
include ../../erlang.mk
|
2010-06-09 19:41:46 +08:00
|
|
|
|
2017-08-23 01:18:05 +08:00
|
|
|
ifeq ($(strip $(BATS)),)
|
|
|
|
BATS := $(ERLANG_MK_TMP)/bats/bin/bats
|
|
|
|
endif
|
|
|
|
|
|
|
|
BATS_GIT ?= https://github.com/sstephenson/bats
|
|
|
|
BATS_COMMIT ?= v0.4.0
|
|
|
|
|
|
|
|
$(BATS):
|
|
|
|
$(verbose) mkdir -p $(ERLANG_MK_TMP)
|
|
|
|
$(gen_verbose) git clone --depth 1 --branch=$(BATS_COMMIT) $(BATS_GIT) $(ERLANG_MK_TMP)/bats
|
|
|
|
|
|
|
|
.PHONY: bats
|
|
|
|
|
|
|
|
bats: $(BATS)
|
|
|
|
$(verbose) $(BATS) $(TEST_DIR)
|
|
|
|
|
|
|
|
tests:: bats
|
|
|
|
|
2017-03-09 16:55:15 +08:00
|
|
|
SLOW_CT_SUITES := backing_queue \
|
2020-01-22 22:50:24 +08:00
|
|
|
channel_interceptor \
|
|
|
|
cluster \
|
2017-03-09 16:55:15 +08:00
|
|
|
cluster_rename \
|
|
|
|
clustering_management \
|
2019-01-08 21:19:18 +08:00
|
|
|
config_schema \
|
2020-01-22 22:50:24 +08:00
|
|
|
confirms_rejects \
|
|
|
|
consumer_timeout \
|
|
|
|
crashing_queues \
|
2017-03-09 16:55:15 +08:00
|
|
|
dynamic_ha \
|
2020-01-22 22:50:24 +08:00
|
|
|
dynamic_qq \
|
2017-03-09 16:55:15 +08:00
|
|
|
eager_sync \
|
2019-03-01 22:59:02 +08:00
|
|
|
feature_flags \
|
2017-03-09 16:55:15 +08:00
|
|
|
health_check \
|
2020-01-22 22:50:24 +08:00
|
|
|
many_node_ha \
|
2019-01-08 21:19:18 +08:00
|
|
|
metrics \
|
|
|
|
msg_store \
|
2017-03-09 16:55:15 +08:00
|
|
|
partitions \
|
2019-01-08 21:19:18 +08:00
|
|
|
per_user_connection_tracking \
|
|
|
|
per_vhost_connection_limit \
|
2020-01-22 22:50:24 +08:00
|
|
|
per_vhost_connection_limit_partitions \
|
2019-01-08 22:01:18 +08:00
|
|
|
per_vhost_msg_store \
|
2019-01-08 21:19:18 +08:00
|
|
|
per_vhost_queue_limit \
|
|
|
|
policy \
|
2017-03-09 16:55:15 +08:00
|
|
|
priority_queue \
|
2020-01-22 22:50:24 +08:00
|
|
|
priority_queue_recovery \
|
|
|
|
publisher_confirms_parallel \
|
2017-03-09 16:55:15 +08:00
|
|
|
queue_master_location \
|
2020-01-22 22:50:24 +08:00
|
|
|
queue_parallel \
|
2019-01-08 21:19:18 +08:00
|
|
|
quorum_queue \
|
|
|
|
rabbit_core_metrics_gc \
|
2019-01-08 22:01:18 +08:00
|
|
|
rabbit_fifo_prop \
|
2020-01-22 22:50:24 +08:00
|
|
|
rabbitmq_queues_cli_integration \
|
|
|
|
rabbitmqctl_integration \
|
2019-01-08 21:19:18 +08:00
|
|
|
simple_ha \
|
|
|
|
sync_detection \
|
2020-01-22 22:50:24 +08:00
|
|
|
unit_inbroker_non_parallel \
|
2019-01-08 21:19:18 +08:00
|
|
|
unit_inbroker_parallel \
|
|
|
|
vhost
|
2017-03-09 16:55:15 +08:00
|
|
|
FAST_CT_SUITES := $(filter-out $(sort $(SLOW_CT_SUITES)),$(CT_SUITES))
|
2017-03-07 19:41:32 +08:00
|
|
|
|
|
|
|
ct-fast: CT_SUITES = $(FAST_CT_SUITES)
|
|
|
|
ct-slow: CT_SUITES = $(SLOW_CT_SUITES)
|
|
|
|
|
2015-08-27 23:12:32 +08:00
|
|
|
# --------------------------------------------------------------------
|
|
|
|
# Compilation.
|
|
|
|
# --------------------------------------------------------------------
|
|
|
|
|
2015-08-25 23:46:35 +08:00
|
|
|
RMQ_ERLC_OPTS += -I $(DEPS_DIR)/rabbit_common/include
|
Deprecated features: New module to manage deprecated features (!)
This introduces a way to declare deprecated features in the code, not
only in our communication. The new module allows to disallow the use of
a deprecated feature and/or warn the user when he relies on such a
feature.
[Why]
Currently, we only tell people about deprecated features through blog
posts and the mailing-list. This might be insufficiant for our users
that a feature they use will be removed in a future version:
* They may not read our blog or mailing-list
* They may not understand that they use such a deprecated feature
* They might wait for the big removal before they plan testing
* They might not take it seriously enough
The idea behind this patch is to increase the chance that users notice
that they are using something which is about to be dropped from
RabbitMQ. Anopther benefit is that they should be able to test how
RabbitMQ will behave in the future before the actual removal. This
should allow them to test and plan changes.
[How]
When a feature is deprecated in other large projects (such as FreeBSD
where I took the idea from), it goes through a lifecycle:
1. The feature is still available, but users get a warning somehow when
they use it. They can disable it to test.
2. The feature is still available, but disabled out-of-the-box. Users
can re-enable it (and get a warning).
3. The feature is disconnected from the build. Therefore, the code
behind it is still there, but users have to recompile the thing to be
able to use it.
4. The feature is removed from the source code. Users have to adapt or
they can't upgrade anymore.
The solution in this patch offers the same lifecycle. A deprecated
feature will be in one of these deprecation phases:
1. `permitted_by_default`: The feature is available. Users get a warning
if they use it. They can disable it from the configuration.
2. `denied_by_default`: The feature is available but disabled by
default. Users get an error if they use it and RabbitMQ behaves like
the feature is removed. They can re-enable is from the configuration
and get a warning.
3. `disconnected`: The feature is present in the source code, but is
disabled and can't be re-enabled without recompiling RabbitMQ. Users
get the same behavior as if the code was removed.
4. `removed`: The feature's code is gone.
The whole thing is based on the feature flags subsystem, but it has the
following differences with other feature flags:
* The semantic is reversed: the feature flag behind a deprecated feature
is disabled when the deprecated feature is permitted, or enabled when
the deprecated feature is denied.
* The feature flag behind a deprecated feature is enabled out-of-the-box
(meaning the deprecated feature is denied):
* if the deprecation phase is `permitted_by_default` and the
configuration denies the deprecated feature
* if the deprecation phase is `denied_by_default` and the
configuration doesn't permit the deprecated feature
* if the deprecation phase is `disconnected` or `removed`
* Feature flags behind deprecated feature don't appear in feature flags
listings.
Otherwise, deprecated features' feature flags are managed like other
feature flags, in particular inside clusters.
To declare a deprecated feature:
-rabbit_deprecated_feature(
{my_deprecated_feature,
#{deprecation_phase => permitted_by_default,
msgs => #{when_permitted => "This feature will be removed in RabbitMQ X.0"},
}}).
Then, to check the state of a deprecated feature in the code:
case rabbit_deprecated_features:is_permitted(my_deprecated_feature) of
true ->
%% The deprecated feature is still permitted.
ok;
false ->
%% The deprecated feature is gone or should be considered
%% unavailable.
error
end.
Warnings and errors are logged automatically. A message is generated
automatically, but it is possible to define a message in the deprecated
feature flag declaration like in the example above.
Here is an example of a logged warning that was generated automatically:
Feature `my_deprecated_feature` is deprecated.
By default, this feature can still be used for now.
Its use will not be permitted by default in a future minor RabbitMQ version and the feature will be removed from a future major RabbitMQ version; actual versions to be determined.
To continue using this feature when it is not permitted by default, set the following parameter in your configuration:
"deprecated_features.permit.my_deprecated_feature = true"
To test RabbitMQ as if the feature was removed, set this in your configuration:
"deprecated_features.permit.my_deprecated_feature = false"
To override the default state of `permitted_by_default` and
`denied_by_default` deprecation phases, users can set the following
configuration:
# In rabbitmq.conf:
deprecated_features.permit.my_deprecated_feature = true # or false
The actual behavior protected by a deprecated feature check is out of
scope for this subsystem. It is the repsonsibility of each deprecated
feature code to determine what to do when the deprecated feature is
denied.
V1: Deprecated feature states are initially computed during the
initialization of the registry, based on their deprecation phase and
possibly the configuration. They don't go through the `enable/1`
code at all.
V2: Manage deprecated feature states as any other non-required
feature flags. This allows to execute an `is_feature_used()`
callback to determine if a deprecated feature can be denied. This
also allows to prevent the RabbitMQ node from starting if it
continues to use a deprecated feature.
V3: Manage deprecated feature states from the registry initialization
again. This is required because we need to know very early if some
of them are denied, so that an upgrade to a version of RabbitMQ
where a deprecated feature is disconnected or removed can be
performed.
To still prevent the start of a RabbitMQ node when a denied
deprecated feature is actively used, we run the `is_feature_used()`
callback of all denied deprecated features as part of the
`sync_cluster()` task. This task is executed as part of a feature
flag refresh executed when RabbitMQ starts or when plugins are
enabled. So even though a deprecated feature is marked as denied in
the registry early in the boot process, we will still abort the
start of a RabbitMQ node if the feature is used.
V4: Support context-dependent warnings. It is now possible to set a
specific message when deprecated feature is permitted, when it is
denied and when it is removed. Generic per-context messages are
still generated.
V5: Improve default warning messages, thanks to @pstack2021.
V6: Rename the configuration variable from `permit_deprecated_features.*`
to `deprecated_features.permit.*`. As @michaelklishin said, we tend
to use shorter top-level names.
2023-02-23 00:26:52 +08:00
|
|
|
EDOC_OPTS += {preprocess,true},{includes,["."]}
|
2010-03-23 19:23:55 +08:00
|
|
|
|
2015-08-10 17:50:41 +08:00
|
|
|
ifdef INSTRUMENT_FOR_QC
|
2015-08-14 18:21:42 +08:00
|
|
|
RMQ_ERLC_OPTS += -DINSTR_MOD=gm_qc
|
2021-04-09 19:13:47 +08:00
|
|
|
EDOC_OPTS += ,{macros,[{'INSTR_MOD',gm_qc}]}
|
2010-03-23 19:23:55 +08:00
|
|
|
else
|
2015-08-14 18:21:42 +08:00
|
|
|
RMQ_ERLC_OPTS += -DINSTR_MOD=gm
|
2021-04-09 19:13:47 +08:00
|
|
|
EDOC_OPTS += ,{macros,[{'INSTR_MOD',gm}]}
|
2010-03-23 19:23:55 +08:00
|
|
|
endif
|
|
|
|
|
2015-08-10 17:50:41 +08:00
|
|
|
ifdef CREDIT_FLOW_TRACING
|
2015-08-14 18:21:42 +08:00
|
|
|
RMQ_ERLC_OPTS += -DCREDIT_FLOW_TRACING=true
|
2015-08-10 17:50:41 +08:00
|
|
|
endif
|
|
|
|
|
2022-08-29 05:41:54 +08:00
|
|
|
ifdef TRACE_SUPERVISOR2
|
|
|
|
RMQ_ERLC_OPTS += -DTRACE_SUPERVISOR2=true
|
|
|
|
endif
|
|
|
|
|
2015-08-10 17:50:41 +08:00
|
|
|
ifndef USE_PROPER_QC
|
|
|
|
# PropEr needs to be installed for property checking
|
|
|
|
# http://proper.softlab.ntua.gr/
|
2015-10-27 21:29:08 +08:00
|
|
|
USE_PROPER_QC := $(shell $(ERL) -eval 'io:format({module, proper} =:= code:ensure_loaded(proper)), halt().')
|
2015-08-14 18:21:42 +08:00
|
|
|
RMQ_ERLC_OPTS += $(if $(filter true,$(USE_PROPER_QC)),-Duse_proper_qc)
|
2015-08-10 17:50:41 +08:00
|
|
|
endif
|
2015-08-14 00:48:53 +08:00
|
|
|
|
|
|
|
# --------------------------------------------------------------------
|
2015-08-14 01:01:27 +08:00
|
|
|
# Documentation.
|
2015-08-14 00:48:53 +08:00
|
|
|
# --------------------------------------------------------------------
|
|
|
|
|
2015-10-07 00:48:45 +08:00
|
|
|
.PHONY: manpages web-manpages distclean-manpages
|
|
|
|
|
|
|
|
docs:: manpages web-manpages
|
|
|
|
|
|
|
|
manpages: $(MANPAGES)
|
|
|
|
@:
|
|
|
|
|
2023-12-18 19:16:04 +08:00
|
|
|
web-manpages: $(WEB_MANPAGES) $(MD_MANPAGES)
|
2015-10-07 00:48:45 +08:00
|
|
|
@:
|
2015-08-14 00:48:53 +08:00
|
|
|
|
2017-04-25 16:50:00 +08:00
|
|
|
# We use mandoc(1) to convert manpages to HTML plus an awk script which
|
|
|
|
# does:
|
|
|
|
# 1. remove tables at the top and the bottom (they recall the
|
|
|
|
# manpage name, section and date)
|
|
|
|
# 2. "downgrade" headers by one level (eg. h1 -> h2)
|
|
|
|
# 3. annotate .Dl lines with more CSS classes
|
|
|
|
%.html: %
|
|
|
|
$(gen_verbose) mandoc -T html -O 'fragment,man=%N.%S.html' "$<" | \
|
|
|
|
awk '\
|
|
|
|
/^<table class="head">$$/ { remove_table=1; next; } \
|
|
|
|
/^<table class="foot">$$/ { remove_table=1; next; } \
|
|
|
|
/^<\/table>$$/ { if (remove_table) { remove_table=0; next; } } \
|
|
|
|
{ if (!remove_table) { \
|
|
|
|
line=$$0; \
|
|
|
|
gsub(/<h2/, "<h3", line); \
|
|
|
|
gsub(/<\/h2>/, "</h3>", line); \
|
|
|
|
gsub(/<h1/, "<h2", line); \
|
|
|
|
gsub(/<\/h1>/, "</h2>", line); \
|
2019-01-31 10:08:57 +08:00
|
|
|
gsub(/class="D1"/, "class=\"D1 lang-bash\"", line); \
|
|
|
|
gsub(/class="Bd Bd-indent"/, "class=\"Bd Bd-indent lang-bash\"", line); \
|
2019-09-05 06:53:01 +08:00
|
|
|
gsub(/&#[xX]201[cCdD];/, "\\"", line); \
|
2017-04-25 16:50:00 +08:00
|
|
|
print line; \
|
|
|
|
} } \
|
|
|
|
' > "$@"
|
|
|
|
|
2023-12-18 19:16:04 +08:00
|
|
|
%.md: %
|
|
|
|
$(gen_verbose) mandoc -T markdown -O 'fragment,man=%N.%S.md' "$<" | \
|
|
|
|
sed -E -e 's/\{/\{/g' \
|
|
|
|
> "$@"
|
|
|
|
|
2015-08-14 00:48:53 +08:00
|
|
|
distclean:: distclean-manpages
|
|
|
|
|
|
|
|
distclean-manpages::
|
2023-12-18 19:16:04 +08:00
|
|
|
$(gen_verbose) rm -f $(WEB_MANPAGES) $(MD_MANPAGES)
|