rabbitmq-server/deps/rabbit/Makefile

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

316 lines
10 KiB
Makefile
Raw Normal View History

2015-08-10 17:50:41 +08:00
PROJECT = rabbit
PROJECT_DESCRIPTION = RabbitMQ
PROJECT_MOD = rabbit
PROJECT_REGISTERED = rabbit_amqqueue_sup \
rabbit_direct_client_sup \
rabbit_log \
rabbit_node_monitor \
rabbit_router
2009-09-22 21:59:30 +08:00
define PROJECT_ENV
[
{tcp_listeners, [5672]},
{num_tcp_acceptors, 10},
{ssl_listeners, []},
{num_ssl_acceptors, 10},
{ssl_options, []},
{vm_memory_high_watermark, 0.4},
{vm_memory_high_watermark_paging_ratio, 0.5},
{vm_memory_calculation_strategy, rss},
{memory_monitor_interval, 2500},
{disk_free_limit, 50000000}, %% 50MB
{msg_store_index_module, rabbit_msg_store_ets_index},
{backing_queue_module, rabbit_variable_queue},
%% 0 ("no limit") would make a better default, but that
%% breaks the QPid Java client
{frame_max, 131072},
%% see rabbitmq-server#1593
{channel_max, 2047},
{ranch_connection_max, infinity},
{heartbeat, 60},
{msg_store_file_size_limit, 16777216},
{msg_store_shutdown_timeout, 600000},
{fhc_write_buffering, true},
{fhc_read_buffering, false},
{queue_index_max_journal_entries, 32768},
{queue_index_embed_msgs_below, 4096},
{default_user, <<"guest">>},
{default_pass, <<"guest">>},
{default_user_tags, [administrator]},
{default_vhost, <<"/">>},
{default_permissions, [<<".*">>, <<".*">>, <<".*">>]},
{loopback_users, [<<"guest">>]},
{password_hashing_module, rabbit_password_hashing_sha256},
{server_properties, []},
{collect_statistics, none},
{collect_statistics_interval, 5000},
{mnesia_table_loading_retry_timeout, 30000},
{mnesia_table_loading_retry_limit, 10},
{auth_mechanisms, ['PLAIN', 'AMQPLAIN']},
{auth_backends, [rabbit_auth_backend_internal]},
{delegate_count, 16},
{trace_vhosts, []},
{ssl_cert_login_from, distinguished_name},
{ssl_handshake_timeout, 5000},
{ssl_allow_poodle_attack, false},
{handshake_timeout, 10000},
{reverse_dns_lookups, false},
{cluster_partition_handling, ignore},
{cluster_keepalive_interval, 10000},
{autoheal_state_transition_timeout, 60000},
{tcp_listen_options, [{backlog, 128},
{nodelay, true},
{linger, {true, 0}},
{exit_on_close, false}
]},
{halt_on_upgrade_failure, true},
{ssl_apps, [asn1, crypto, public_key, ssl]},
%% classic queue storage implementation version
{classic_queue_default_version, 2},
%% see rabbitmq-server#114
{mirroring_flow_control, true},
{mirroring_sync_batch_size, 4096},
%% see rabbitmq-server#227 and related tickets.
%% msg_store_credit_disc_bound only takes effect when
%% messages are persisted to the message store. If messages
%% are embedded on the queue index, then modifying this
%% setting has no effect because credit_flow is not used when
%% writing to the queue index. See the setting
%% queue_index_embed_msgs_below above.
{msg_store_credit_disc_bound, {4000, 800}},
{msg_store_io_batch_size, 4096},
2017-02-08 16:56:05 +08:00
%% see rabbitmq-server#143,
%% rabbitmq-server#949, rabbitmq-server#1098
{credit_flow_default_credit, {400, 200}},
{quorum_commands_soft_limit, 32},
2021-01-28 17:46:28 +08:00
{quorum_cluster_size, 3},
%% see rabbitmq-server#248
%% and rabbitmq-server#667
{channel_operation_timeout, 15000},
%% See https://www.rabbitmq.com/consumers.html#acknowledgement-timeout
%% 30 minutes
{consumer_timeout, 1800000},
2016-12-07 22:21:58 +08:00
%% see rabbitmq-server#486
{autocluster,
[{peer_discovery_backend, rabbit_peer_discovery_classic_config}]
},
2016-12-07 22:21:58 +08:00
%% used by rabbit_peer_discovery_classic_config
{cluster_nodes, {[], disc}},
{config_entry_decoder, [{passphrase, undefined}]},
2016-12-07 22:21:58 +08:00
%% rabbitmq-server#973
{queue_explicit_gc_run_operation_threshold, 1000},
{background_gc_enabled, false},
2017-02-07 18:25:20 +08:00
{background_gc_target_interval, 60000},
%% rabbitmq-server#589
{proxy_protocol, false},
2017-04-12 18:28:25 +08:00
{disk_monitor_failure_retries, 10},
{disk_monitor_failure_retry_interval, 120000},
%% either "stop_node" or "continue".
2017-07-06 20:20:18 +08:00
%% by default we choose to not terminate the entire node if one
%% vhost had to shut down, see server#1158 and server#1280
{vhost_restart_strategy, continue},
2017-10-28 06:41:30 +08:00
%% {global, prefetch count}
Quorum queues (#1706) * Test queue.declare method with quorum type [#154472130] * Cosmetics [#154472130] * Start quorum queue Includes ra as a rabbit dependency [#154472152] * Update info and list operations to use quorum queues Basic implementation. Might need an update when more functionality is added to the quorum queues. [#154472152] * Stop quorum queue [#154472158] * Restart quorum queue [#154472164] * Introduce UId in ra config to support newer version of ra Improved ra stop [#154472158] * Put data inside VHost specific subdirs [#154472164] * Include ra in rabbit deps to support stop_app/start_app command [#154472164] * Stop quorum queues in `rabbit_amqqueue:stop/1` [#154472158] * Revert creation of fifo ets table inside rabbit Now supported by ra [#154472158] * Filter quorum queues [#154472158] * Test restart node with quorum queues [#154472164] * Publish to quorum queues [#154472174] * Use `ra:restart_node/1` [#154472164] * Wait for stats to be published when querying quorum queues [#154472174] * Test publish and queue length after restart [#154472174] * Consume messages from quorum queues with basic.get [#154472211] * Autoack messages from quorum queues on basic.get [#154472211] * Fix no_ack meaning no_ack = true is equivalent to autoack [#154472211] * Use data_dir as provided in the config If we modify the data_dir, ra is not able to delete the data when a queue is deleted [#154472158] * Remove unused code/variables [#154472158] * Subscribe to a quorum queue Supports auto-ack [#154472215] * Ack messages consumed from quorum queues [#154472221] * Nack messages consumed from quorum queues [#154804608] * Use delivery tag as consumer tag for basic.get in quorum queues [#154472221] * Support for publisher confirms in quorum queues [#154472198] * Integrate with ra_fifo_client * Clear queue state on queue.delete [#154472158] * Fix quorum nack [#154804608] * Test redelivery after nack [#154804608] * Nack without requeueing [#154472225] * Test multiple acks [#154804208] * Test multiple nacks [#154804314] * Configure dead letter exchange with queue declare [#155076661] * Use a per-vhost process to handle dead-lettering Needs to hold state for quorum queues [#155401802] * Implement dead-lettering on nack'ed messages [#154804620] * Use queue name as a resource on message delivery Fixes a previously introduced bug [#154804608] * Handle ra events on dead letter process [#155401802] * Pass empty queue states to queue delete Queue deletion on vhost deletion calls directly to rabbit_amqqueue. Queue states are not available, but we can provide an empty map as in deletion the states are only needed for cleanup. * Generate quorum queue stats and events Consumer delete events are still pending, as depend on basic.cancel (not implemented yet), ra terminating or ra detecting channel down [#154472241] * Ensure quorum mapping entries are available before metric emission [#154472241] * Configure data_dir, uses new RABBITMQ_QUORUM_BASE env var [#154472152] * Use untracked enqueues when sending wihtout channel Updated several other calls missed during the quorum implementation * Revert "Configure data_dir, uses new RABBITMQ_QUORUM_BASE env var" This reverts commit f2261212410affecb238fcbd1fb451381aee4036. * Configure data_dir, uses new RABBITMQ_QUORUM_DIR based on mnesia dir [#154472152] * Fix get_quorum_state * Fix calculation of quorum pids * Move all quorum queues code to its own module [#154472241] * Return an error when declaring a quorum queue with an incompatible argument [#154521696] * Cleanup of quorum queue state after queue delete Also fixes some existing problems where the state wasn't properly stored [#155458625] * Revert Revert "Declare a quorum queue using the queue.declare method" * Remove duplicated state info [#154472241] * Start/stop multi-node quorum queue [#154472231] [#154472236] * Restart nodes in a multi-node quorum cluster [#154472238] * Test restart and leadership takeover on multiple nodes [#154472238] * Wait for leader down after deleting a quorum cluster It ensures an smooth delete-declare sequence without race conditions. The test included here detected the situation before the fix. [#154472236] * Populate quorum_mapping from mnesia when not available Ensures that leader nodes that don't have direct requests can get the mapping ra name -> queue name * Cosmetics * Do not emit core metrics if queue has just been deleted * Use rabbit_mnesia:is_process_alive Fixes bug introduced by cac9583e1bb2705be7f06c2ab7f416a75d11c875 [#154472231] * Only try to report stats if quorum process is alive * Implement cancel consumer callback Deletes metrics and sends consumer deleted event * Remove unnecessary trigger election call ra:restart_node has already been called during the recovery * Apply cancellation callback on node hosting the channel * Cosmetics * Read new fifo metrics which store directly total, ready and unack * Implement basic.cancel for quorum queues * Store leader in amqqueue record, report all in stats [#154472407] * Declare quorum queue in mnesia before starting the ra cluster Record needs to be stored first to update the leader on ra effects * Revert * Purge quorum queues [#154472182] * Improve use of untracked_enqueue Choose the persisted leader id instead of just using the id of the leader at point of creation. * Store quorum leader in the pid field of amqqueue record Same as mirrored queues, no real need for an additional field * Improve recovery When a ra node has never been started on a rabbit node ensure it doesn't fail but instead rebuilds the config and starts the node as a new node. Also fix issue when a quorum queue is declared when one of it's rabbit nodes are unavailable. [#157054606] * Cleanup core metrics after leader change [#157054473] * Return an error on sync_queue on quorum queues [#154472334] * Return an error on cancel_sync_queue on quorum queues [#154472337] * Fix basic_cancel and basic_consume return values Ensure the quorum queue state is always returned by these functions. * Restore arity of amqqeueu delete and purge functions. This avoids some breaking changes in the cli. * Fix bug returning consumers. * remove rogue debug log * Integrate ingress flow control with quorum queues [#157000583] * Configure commands soft limit [#157000583] * Support quorum pids on rabbit_mnesia:is_process_alive * Publish consumers metric for quorum queues * Whitelist quorum directory in is_virgin_node Allow the quorum directoy to exist without affecting the status of the Rabbit node. * Delete queue_metrics on leader change. Also run the become_leader handler in a separate process to avoid blocking. [#157424225] * Report cluster status in quorum queue infos. New per node status command. Related to [#157146500] * Remove quorum_mapping table As we can store the full queue name resource as the cluster id of the ra_fifo_client state we can avoid needed the quorum_mapping table. * Fix xref issue * Provide quorum members information in stats [#157146500] * fix unused variable * quorum queue multiple declare handling Extend rabbit_amqqueue:internal_declare/2 to indicate if the queue record was created or exisiting. From this we can then provide a code path that should handle concurrent queue declares of the same quorum queue. * Return an error when declaring exclusive/auto-delete quorum queue [#157472160] * Restore lost changes from 79c9bd201e1eac006a42bd162e7c86df96496629 * recover another part of commit * fixup cherry pick * Ra io/file metrics handler and stats publishing [#157193081] * Revert "Ra io/file metrics handler and stats publishing" This reverts commit 05d15c786540322583fc655709825db215b70952. * Do not issue confirms on node down for quorum queues. Only a ra_event should be used to issue positive confirms for a quorum queue. * Ra stats publishing [#157193081] * Pick consumer utilisation from ra data [#155402726] * Handle error when deleting a quorum queue and all nodes are already down This is in fact a successful deletion as all raft nodes are already 'stopped' [#158656366] * Return an error when declaring non-durable quorum queues [#158656454] * Rename dirty_query to committed_query * Delete stats on leader node [#158661152] * Give full list of nodes to fifo client * Handle timeout in quorum basic_get * Fix unused variable error * Handle timeout in basic get [#158656366] * Force GC after purge [#158789389] * Increase `ra:delete_cluster` timeout to 120s * Revert "Force GC after purge" This reverts commit 5c98bf22994eb39004760799d3a2c5041d16e9d4. * Add quorum member command [#157481599] * Delete quorum member command [#157481599] * Implement basic.recover for quorum queues [#157597411] * Change concumer utilisation to use the new ra_fifo table and api. * Set max quorum queue size limit Defaults to 7, can be configured per queue on queue.declare Nodes are selected randomly from the list of nodes, but the one that is executing the queue.declare command [#159338081] * remove potentially unrelated changes to rabbit_networking * Move ra_fifo to rabbit Copied ra_fifo to rabbit and renamed it rabbit_fifo. [#159338031] * rabbit_fifo tidy up * rabbit_fifo tidy up * rabbit_fifo: customer -> consumer rename * Move ra_fifo tests [#159338031] * Tweak quorum_queue defaults * quorum_queue test reliability * Optimise quorum_queue test suite. By only starting a rabbit cluster per group rather than test. [#160612638] * Renamings in line with ra API changes * rabbit_fifo fixes * Update with ra API changes Ra has consolidated and simplified it's api. These changes update to confirm to that. * Update rabbit_fifo with latest ra changes * Clean up out of date comment * Return map of states * Add test case for basic.get on an empty queue Before the previous patch, any subsequent basic.get would crash as the map of states had been replaced by a single state. * Clarify use of deliver tags on record_sent * Clean up queues after testcase * Remove erlang monitor of quorum queues in rabbit_channel The eol event can be used instead * Use macros to make clearer distinctions between quorum/classic queues Cosmetic only * Erase queue stats on 'eol' event * Update to follow Ra's cluster_id -> cluster_name rename. * Rename qourum-cluster-size To quorum-initial-group-size * Issue confirms on quorum queue eol Also avoid creating quorum queue session state on queue operation methods. * Only classic queues should be notified on channel down * Quorum queues do not support global qos Exit with protocol error of a basic.consume for a quorum queue is issued on a channel with global qos enabled. * unused variable name * Refactoring Strictly enfornce that channels do not monitor quorum queues. * Refactor foreach_per_queue in the channel. To make it call classic and quorum queues the same way. [#161314899] * rename function * Query classic and quorum queues separately during recovery as they should not be marked as stopped during failed vhost recovery. * Remove force_event_refresh function As the only user of this function, the management API no longer requires it. * fix errors * Remove created_at from amqqueue record [#161343680] * rabbit_fifo: support AMQP 1.0 consumer credit This change implements an alternative consumer credit mechanism similar to AMQP 1.0 link credit where the credit (prefetch) isn't automatically topped up as deliveries are settled and instead needs to be manually increased using a credit command. This is to be integrated with the AMQP 1.0 plugin. [#161256187] * Add basic.credit support for quorum queues. Added support for AMQP 1.0 transfer flow control. [#161256187] * Make quorum queue recover idempotent So that if a vhost crashes and runs the recover steps it doesn't fail because ra servers are still running. [#161343651] * Add tests for vhost deletion To ensure quorum queues are cleaned up on vhost removal. Also fix xref issue. [#161343673] * remove unused clause * always return latest value of queue * Add rabbitmq-queues scripts. Remove ra config from .bat scripts. * Return error if trying to get quorum status of a classic queue.
2018-10-29 17:47:29 +08:00
{default_consumer_prefetch, {false, 0}},
%% interval at which the channel can perform periodic actions
{channel_tick_interval, 60000},
%% Default max message size is 128 MB
{max_message_size, 134217728},
%% Socket writer will run GC every 1 GB of outgoing data
{writer_gc_threshold, 1000000000},
%% interval at which connection/channel tracking executes post operations
Stream Queue This is an aggregated commit of all changes related to the initial implementation of queue types and on top of that the stream queue type. The varios commit messages have simply been included mostly un-edited below. Make rabbit_amqqueue:not_found_or_absent_dirty/1 visible For use in the stream plugin. Use bigger retention policy on max-age test Set coordinator timeout to 30s Handle coordinator unavailable error Handle operator policies as maps when checking if is applicable Add is_policy_applicable/2 to classic queues Ignore restart commands if the stream has been deleted It could happen that after termination some of the monitors are still up and trigger writer/replica restarts Policy support on stream queues Remove subscription events on stream coordinator Ensure old leaders are removed from monitors Introduce delay when retrying a failed phase Note that this ensures monitor is setup, there was a bug where no monitor was really started when re-trying the same phase Restart replicas after leader election instead of relying on old monitors Use timer for stream coordinator retries Fix stream stats for members/online Multiple fixes for replica monitoring and restart Ensure pending commands are appended at the end and re-run Ensure phase is reset with the state Remove duplicates from replica list Restart current phase on state_enter Remove unused import Ensure rabbit is running when checking for stream quorum Restart replicas Add a close/1 function to queue types So that we can get a chance of cleaning up resources if needed. Stream queues close their osiris logs at this point. fix compiler errors stream-queue: take retention into account When calculating ready messages metrics. Add osiris to the list of rabbit deps Retry restart of replicas Do not restart replicas or leaders after receiving a delete cluster command Add more logging to the stream coordinator Monitor subscribed processes on the stream coordinator Memory breakdown for stream queues Update quorum queue event formatter rabbit_msg_record fixes Refactor channel confirms Remove old unconfirmed_messages module that was designed to handle multiple queue fan in logic including all ha mirrors etc. Replaced with simpler rabbit_confirms module that handles the fan out and leaves any queue specific logic (such as confirms from mirrors) to the queue type implemention. Also this module has a dedicated test module. Which is nice. Backward compatibility with 3.8.x events Supports mixed version cluster upgrades Match specification when stream queue already exists Max age retention for stream queues Stop all replicas before starting leader election stream: disallow global qos remove IS_CLASSIC|QUORUM macros Ensure only classic queues are notified on channel down This also removes the delivering_queues map in the channel state as it should not be needed for this and just cause additional unecessary accounting. Polish AMQP 1.0/0.9.1 properties conversion Support byte in application properties, handle 1-bit representation for booleans. Use binary in header for long AMQP 1.0 ID Fix AMQP 1.0 to 0.9.1 conversion Fix test due to incorrect type Convert timestamp application properties to/from seconds AMQP 1.0 uses milliseconds for timestamp and AMQP 0.9.1 uses seconds, so conversion needed. Dialyzer fixes Handle all message-id types AMQP 1.0 is more liberal in it's allowed types of message-id and correlation-id - this adds headers to describe the type of the data in the message_id / correlation_id properties and also handles the case where the data cannot fit by again using headers. Resize stream coordinator cluster when broker configuration changes convert timestamp to and fro seconds user_id should be a binary message annotations keys need to be symbols stream-queue: default exchange and routing key As these won't be present for data written using the rabbitmq-stream plugin. Add exchange, routing key as message annotations To the AMQP 1.0 formatted data to enable roundtrip. Add osiris logging module config And update logging config test suite. Restart election when start of new leader fails The node might have just gone down so we need to try another one Only aux keeps track of phase now, as it might change if the leader election fails Stream coordinator refactor - all state is kept on the ra machine Ensure any ra cluster not a qq is not cleaned up Fixes to recovery and monitoring Add AMQP 1.0 common to dependencies Add rabbit_msg_record module To handle conversions into internal stream storage format. Use rabbitmq-common stream-queue branch Use SSH for osiris dependency Stream coordinator: delete replica Stream coordinator: add replica Stream coordinator: leader failover Stream coordinator: declare and delete Test consuming from a random offset Previous offsets should not be delivered to consumers Consume from stream replicas and multiple test fixes Use max-length-bytes and add new max-segment-size Use SSH for osiris dependency Basic cancel for stream queues Publish stream queues and settle/reject/requeue refactor Consume from stream queues Fix recovery Publish stream messages Add/delete stream replicas Use safe queue names Set retention policy for stream queues Required by the ctl command [#171207092] Stream queue delete queue fix missing callback impl Stream queue declare Queue type abstraction And use the implementing module as the value of the amqqueue record `type` field. This will allow for easy dispatch to the queue type implementation. Queue type abstraction Move queue declare into rabbit_queue_type Move queue delete into queue type implementation Queue type: dequeue/basic_get Move info inside queue type abstraction Move policy change into queue type interface Add purge to queue type Add recovery to the queue type interface Rename amqqueue quorum_nodes field To a more generic an extensible opaque queue type specific map. Fix tests and handle classic API response Fix HA queue confirm bug All mirrors need to be present as queue names. This introduces context linking allowing additional queue refs to be linked to a single "master" queue ref contining the actual queue context. Fix issue with events of deleted queues Also update queue type smoke test to use a cluster by default. correct default value of amqqueue getter Move classic queues further inside queue type interface why [TrackerId] Dialyzer fixes
2020-09-29 18:43:24 +08:00
{tracking_execution_timeout, 15000},
2020-08-26 22:42:40 +08:00
{stream_messages_soft_limit, 256},
{track_auth_attempt_source, false},
{credentials_obfuscation_fallback_secret, <<"nocookie">>},
{dead_letter_worker_consumer_prefetch, 32},
{dead_letter_worker_publisher_confirm_timeout, 180000},
%% EOL date for the current release series, if known/announced
{release_series_eol_date, none}
]
endef
Move most of shell scripts to Erlang code A large part of the rabbitmq-server(8) and CLI scripts, both Bourne-shell and Windows Batch versions, was moved to Erlang code and the RabbitMQ startup procedure was reorganized to be closer to a regular Erlang application. A new application called `rabbitmq_prelaunch` is responsible for: 1. Querying the environment variables to initialize important variables (using the new `rabbit_env` module in rabbitmq-common). 2. Checking the compatibility with the Erlang/OTP runtime. 3. Configuring Erlang distribution. 5. Writing the PID file. The application is started early (i.e. it is started before `rabbit`). The `rabbit` application runs the second half of the prelaunch sequence at the beginning of the application `start()` function. This second phase is responsible for the following steps: 1. Preparing the feature flags registry. 2. Reading and validating the configuration. 3. Configuring logging. 4. Running the various cluster checks. In addition to this prelaunch sequence, the `rabbit` application start procedure ends with a "postlaunch" sequence which takes care of starting enabled plugins. Thanks to this, RabbitMQ can be started with `application:start(rabbit)` as any other Erlang application. The only caveats are: * Mnesia must be stopped at the time `rabbit_prelaunch` is started, and must remain stopped when `rabbit` is started, to allow the Erlang distribution setup and cluster checks. `rabbit` takes care of starting Mnesia. * Likewise for Ra, because it relies on the `ra` application environment to be configured. Transitioning from scripts to Erlang code has the following benefits: * RabbitMQ start behavior should be identical between Unix and Windows. Also, features should be on par now. For instance, RabbitMQ now writes a PID file on Windows, like it always did on Unix-based systems. * The difference between published packages and a development environment are greatly reduced. In fact, we removed all the "if this is a dev working copy, then ..." blocks. As part of that, the `rabbit` application is now treated like its plugins: it is packaged as an `.ez` archive and written to the `plugins` directory (even though it is not technically a plugin). Also in a development copy, the CLI is copied to the top-level project. So when testing a plugin for instance, the CLI to use is `sbin/rabbitmqctl` in the current directory, not the master copy in `rabbit/scripts`. * As a consequence of the previous two points, maintaining and testing on Windows is now made easy. It should even be possible to setup CI on Windows. * There are less issues with paths containing non-US-ASCII characters, which can happen on Windows because RabbitMQ stores its data in user directories by default. This process brings at least one more benefit: we now have early logging during this prelaunch phase, which eases diagnostics and debugging. There are also behavior changes: * The new format configuration files used to be converted to an Erlang-term-based file by the Cuttlefish CLI. To do that, configuration schemas were copied to a temporary directory and the generated configuration file was written to RabbitMQ data directory. Now, Cuttlefish is used as a library: everything happens in memory. No schemas are copied, no generated configuration is written to disk. * The PID file is removed when the Erlang VM exits. * The `rabbit_config` module was trimmed significantly because most of the configuration handling is done in `rabbit_prelaunch_conf` now. * The RabbitMQ nodename does not appear on the command line, therefore it is missing from ps(1) and top(1) output. * The `rabbit:start()` function will probably behave differently in some ways because it defers everything to the Erlang application controller (instead of reimplementing it).
2019-05-15 22:27:51 +08:00
# With Erlang.mk default behavior, the value of `$(APPS_DIR)` is always
# relative to the top-level executed Makefile. In our case, it could be
# a plugin for instance. However, the rabbitmq_prelaunch application is
# in this repository, not the plugin's. That's why we need to override
# this value here.
APPS_DIR := $(CURDIR)/apps
LOCAL_DEPS = sasl rabbitmq_prelaunch os_mon inets compiler public_key crypto ssl syntax_tools xmerl
2021-01-14 01:12:14 +08:00
Switch from Lager to the new Erlang Logger API for logging The configuration remains the same for the end-user. The only exception is the log root directory: it is now set through the `log_root` application env. variable in `rabbit`. People using the Cuttlefish-based configuration file are not affected by this exception. The main change is how the logging facility is configured. It now happens in `rabbit_prelaunch_logging`. The `rabbit_lager` module is removed. The supported outputs remain the same: the console, text files, the `amq.rabbitmq.log` exchange and syslog. The message text format slightly changed: the timestamp is more precise (now to the microsecond) and the level can be abbreviated to always be 4-character long to align all messages and improve readability. Here is an example: 2021-03-03 10:22:30.377392+01:00 [dbug] <0.229.0> == Prelaunch DONE == 2021-03-03 10:22:30.377860+01:00 [info] <0.229.0> 2021-03-03 10:22:30.377860+01:00 [info] <0.229.0> Starting RabbitMQ 3.8.10+115.g071f3fb on Erlang 23.2.5 2021-03-03 10:22:30.377860+01:00 [info] <0.229.0> Licensed under the MPL 2.0. Website: https://rabbitmq.com The example above also shows that multiline messages are supported and each line is prepended with the same prefix (the timestamp, the level and the Erlang process PID). JSON is also supported as a message format and now for any outputs. Indeed, it is possible to use it with e.g. syslog or the exchange. Here is an example of a JSON-formatted message sent to syslog: Mar 3 11:23:06 localhost rabbitmq-server[27908] <0.229.0> - {"time":"2021-03-03T11:23:06.998466+01:00","level":"notice","msg":"Logging: configured log handlers are now ACTIVE","meta":{"domain":"rabbitmq.prelaunch","file":"src/rabbit_prelaunch_logging.erl","gl":"<0.228.0>","line":311,"mfa":["rabbit_prelaunch_logging","configure_logger",1],"pid":"<0.229.0>"}} For quick testing, the values accepted by the `$RABBITMQ_LOGS` environment variables were extended: * `-` still means stdout * `-stderr` means stderr * `syslog:` means syslog on localhost * `exchange:` means logging to `amq.rabbitmq.log` `$RABBITMQ_LOG` was also extended. It now accepts a `+json` modifier (in addition to the existing `+color` one). With that modifier, messages are formatted as JSON intead of plain text. The `rabbitmqctl rotate_logs` command is deprecated. The reason is Logger does not expose a function to force log rotation. However, it will detect when a file was rotated by an external tool. From a developer point of view, the old `rabbit_log*` API remains supported, though it is now deprecated. It is implemented as regular modules: there is no `parse_transform` involved anymore. In the code, it is recommended to use the new Logger macros. For instance, `?LOG_INFO(Format, Args)`. If possible, messages should be augmented with some metadata. For instance (note the map after the message): ?LOG_NOTICE("Logging: switching to configured handler(s); following " "messages may not be visible in this log output", #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), Domains in Erlang Logger parlance are the way to categorize messages. Some predefined domains, matching previous categories, are currently defined in `rabbit_common/include/logging.hrl` or headers in the relevant plugins for plugin-specific categories. At this point, very few messages have been converted from the old `rabbit_log*` API to the new macros. It can be done gradually when working on a particular module or logging. The Erlang builtin console/file handler, `logger_std_h`, has been forked because it lacks date-based file rotation. The configuration of date-based rotation is identical to Lager. Once the dust has settled for this feature, the goal is to submit it upstream for inclusion in Erlang. The forked module is calld `rabbit_logger_std_h` and is based `logger_std_h` in Erlang 23.0.
2021-01-13 00:55:27 +08:00
BUILD_DEPS = rabbitmq_cli
Allow to use Khepri database to store metadata instead of Mnesia [Why] Mnesia is a very powerful and convenient tool for Erlang applications: it is a persistent disc-based database, it handles replication accross multiple Erlang nodes and it is available out-of-the-box from the Erlang/OTP distribution. RabbitMQ relies on Mnesia to manage all its metadata: * virtual hosts' properties * intenal users * queue, exchange and binding declarations (not queues data) * runtime parameters and policies * ... Unfortunately Mnesia makes it difficult to handle network partition and, as a consequence, the merge conflicts between Erlang nodes once the network partition is resolved. RabbitMQ provides several partition handling strategies but they are not bullet-proof. Users still hit situations where it is a pain to repair a cluster following a network partition. [How] @kjnilsson created Ra [1], a Raft consensus library that RabbitMQ already uses successfully to implement quorum queues and streams for instance. Those queues do not suffer from network partitions. We created Khepri [2], a new persistent and replicated database engine based on Ra and we want to use it in place of Mnesia in RabbitMQ to solve the problems with network partitions. This patch integrates Khepri as an experimental feature. When enabled, RabbitMQ will store all its metadata in Khepri instead of Mnesia. This change comes with behavior changes. While Khepri remains disabled, you should see no changes to the behavior of RabbitMQ. If there are changes, it is a bug. After Khepri is enabled, there are significant changes of behavior that you should be aware of. Because it is based on the Raft consensus algorithm, when there is a network partition, only the cluster members that are in the partition with at least `(Number of nodes in the cluster ÷ 2) + 1` number of nodes can "make progress". In other words, only those nodes may write to the Khepri database and read from the database and expect a consistent result. For instance in a cluster of 5 RabbitMQ nodes: * If there are two partitions, one with 3 nodes, one with 2 nodes, only the group of 3 nodes will be able to write to the database. * If there are three partitions, two with 2 nodes, one with 1 node, none of the group can write to the database. Because the Khepri database will be used for all kind of metadata, it means that RabbitMQ nodes that can't write to the database will be unable to perform some operations. A list of operations and what to expect is documented in the associated pull request and the RabbitMQ website. This requirement from Raft also affects the startup of RabbitMQ nodes in a cluster. Indeed, at least a quorum number of nodes must be started at once to allow nodes to become ready. To enable Khepri, you need to enable the `khepri_db` feature flag: rabbitmqctl enable_feature_flag khepri_db When the `khepri_db` feature flag is enabled, the migration code performs the following two tasks: 1. It synchronizes the Khepri cluster membership from the Mnesia cluster. It uses `mnesia_to_khepri:sync_cluster_membership/1` from the `khepri_mnesia_migration` application [3]. 2. It copies data from relevant Mnesia tables to Khepri, doing some conversion if necessary on the way. Again, it uses `mnesia_to_khepri:copy_tables/4` from `khepri_mnesia_migration` to do it. This can be performed on a running standalone RabbitMQ node or cluster. Data will be migrated from Mnesia to Khepri without any service interruption. Note that during the migration, the performance may decrease and the memory footprint may go up. Because this feature flag is considered experimental, it is not enabled by default even on a brand new RabbitMQ deployment. More about the implementation details below: In the past months, all accesses to Mnesia were isolated in a collection of `rabbit_db*` modules. This is where the integration of Khepri mostly takes place: we use a function called `rabbit_khepri:handle_fallback/1` which selects the database and perform the query or the transaction. Here is an example from `rabbit_db_vhost`: * Up until RabbitMQ 3.12.x: get(VHostName) when is_binary(VHostName) -> get_in_mnesia(VHostName). * Starting with RabbitMQ 3.13.0: get(VHostName) when is_binary(VHostName) -> rabbit_khepri:handle_fallback( #{mnesia => fun() -> get_in_mnesia(VHostName) end, khepri => fun() -> get_in_khepri(VHostName) end}). This `rabbit_khepri:handle_fallback/1` function relies on two things: 1. the fact that the `khepri_db` feature flag is enabled, in which case it always executes the Khepri-based variant. 4. the ability or not to read and write to Mnesia tables otherwise. Before the feature flag is enabled, or during the migration, the function will try to execute the Mnesia-based variant. If it succeeds, then it returns the result. If it fails because one or more Mnesia tables can't be used, it restarts from scratch: it means the feature flag is being enabled and depending on the outcome, either the Mnesia-based variant will succeed (the feature flag couldn't be enabled) or the feature flag will be marked as enabled and it will call the Khepri-based variant. The meat of this function really lives in the `khepri_mnesia_migration` application [3] and `rabbit_khepri:handle_fallback/1` is a wrapper on top of it that knows about the feature flag. However, some calls to the database do not depend on the existence of Mnesia tables, such as functions where we need to learn about the members of a cluster. For those, we can't rely on exceptions from Mnesia. Therefore, we just look at the state of the feature flag to determine which database to use. There are two situations though: * Sometimes, we need the feature flag state query to block because the function interested in it can't return a valid answer during the migration. Here is an example: case rabbit_khepri:is_enabled(RemoteNode) of true -> can_join_using_khepri(RemoteNode); false -> can_join_using_mnesia(RemoteNode) end * Sometimes, we need the feature flag state query to NOT block (for instance because it would cause a deadlock). Here is an example: case rabbit_khepri:get_feature_state() of enabled -> members_using_khepri(); _ -> members_using_mnesia() end Direct accesses to Mnesia still exists. They are limited to code that is specific to Mnesia such as classic queue mirroring or network partitions handling strategies. Now, to discover the Mnesia tables to migrate and how to migrate them, we use an Erlang module attribute called `rabbit_mnesia_tables_to_khepri_db` which indicates a list of Mnesia tables and an associated converter module. Here is an example in the `rabbitmq_recent_history_exchange` plugin: -rabbit_mnesia_tables_to_khepri_db( [{?RH_TABLE, rabbit_db_rh_exchange_m2k_converter}]). The converter module — `rabbit_db_rh_exchange_m2k_converter` in this example — is is fact a "sub" converter module called but `rabbit_db_m2k_converter`. See the documentation of a `mnesia_to_khepri` converter module to learn more about these modules. [1] https://github.com/rabbitmq/ra [2] https://github.com/rabbitmq/khepri [3] https://github.com/rabbitmq/khepri_mnesia_migration See #7206. Co-authored-by: Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com> Co-authored-by: Diana Parra Corbacho <dparracorbac@vmware.com> Co-authored-by: Michael Davis <mcarsondavis@gmail.com>
2023-01-05 20:57:50 +08:00
DEPS = ranch rabbit_common ra sysmon_handler stdout_formatter recon redbug observer_cli osiris amqp10_common syslog systemd seshat khepri khepri_mnesia_migration
TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers amqp_client meck proper
2020-11-02 18:40:24 +08:00
PLT_APPS += mnesia
Switch from Lager to the new Erlang Logger API for logging The configuration remains the same for the end-user. The only exception is the log root directory: it is now set through the `log_root` application env. variable in `rabbit`. People using the Cuttlefish-based configuration file are not affected by this exception. The main change is how the logging facility is configured. It now happens in `rabbit_prelaunch_logging`. The `rabbit_lager` module is removed. The supported outputs remain the same: the console, text files, the `amq.rabbitmq.log` exchange and syslog. The message text format slightly changed: the timestamp is more precise (now to the microsecond) and the level can be abbreviated to always be 4-character long to align all messages and improve readability. Here is an example: 2021-03-03 10:22:30.377392+01:00 [dbug] <0.229.0> == Prelaunch DONE == 2021-03-03 10:22:30.377860+01:00 [info] <0.229.0> 2021-03-03 10:22:30.377860+01:00 [info] <0.229.0> Starting RabbitMQ 3.8.10+115.g071f3fb on Erlang 23.2.5 2021-03-03 10:22:30.377860+01:00 [info] <0.229.0> Licensed under the MPL 2.0. Website: https://rabbitmq.com The example above also shows that multiline messages are supported and each line is prepended with the same prefix (the timestamp, the level and the Erlang process PID). JSON is also supported as a message format and now for any outputs. Indeed, it is possible to use it with e.g. syslog or the exchange. Here is an example of a JSON-formatted message sent to syslog: Mar 3 11:23:06 localhost rabbitmq-server[27908] <0.229.0> - {"time":"2021-03-03T11:23:06.998466+01:00","level":"notice","msg":"Logging: configured log handlers are now ACTIVE","meta":{"domain":"rabbitmq.prelaunch","file":"src/rabbit_prelaunch_logging.erl","gl":"<0.228.0>","line":311,"mfa":["rabbit_prelaunch_logging","configure_logger",1],"pid":"<0.229.0>"}} For quick testing, the values accepted by the `$RABBITMQ_LOGS` environment variables were extended: * `-` still means stdout * `-stderr` means stderr * `syslog:` means syslog on localhost * `exchange:` means logging to `amq.rabbitmq.log` `$RABBITMQ_LOG` was also extended. It now accepts a `+json` modifier (in addition to the existing `+color` one). With that modifier, messages are formatted as JSON intead of plain text. The `rabbitmqctl rotate_logs` command is deprecated. The reason is Logger does not expose a function to force log rotation. However, it will detect when a file was rotated by an external tool. From a developer point of view, the old `rabbit_log*` API remains supported, though it is now deprecated. It is implemented as regular modules: there is no `parse_transform` involved anymore. In the code, it is recommended to use the new Logger macros. For instance, `?LOG_INFO(Format, Args)`. If possible, messages should be augmented with some metadata. For instance (note the map after the message): ?LOG_NOTICE("Logging: switching to configured handler(s); following " "messages may not be visible in this log output", #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), Domains in Erlang Logger parlance are the way to categorize messages. Some predefined domains, matching previous categories, are currently defined in `rabbit_common/include/logging.hrl` or headers in the relevant plugins for plugin-specific categories. At this point, very few messages have been converted from the old `rabbit_log*` API to the new macros. It can be done gradually when working on a particular module or logging. The Erlang builtin console/file handler, `logger_std_h`, has been forked because it lacks date-based file rotation. The configuration of date-based rotation is identical to Lager. Once the dust has settled for this feature, the goal is to submit it upstream for inclusion in Erlang. The forked module is calld `rabbit_logger_std_h` and is based `logger_std_h` in Erlang 23.0.
2021-01-13 00:55:27 +08:00
dep_syslog = git https://github.com/schlagert/syslog 4.0.0
2023-12-04 18:09:28 +08:00
dep_osiris = git https://github.com/rabbitmq/osiris v1.7.1
dep_systemd = hex 0.6.1
dep_seshat = git https://github.com/rabbitmq/seshat v0.6.1
2015-08-25 22:53:21 +08:00
define usage_xml_to_erl
$(subst __,_,$(patsubst $(DOCS_DIR)/rabbitmq%.1.xml, src/rabbit_%_usage.erl, $(subst -,_,$(1))))
endef
DOCS_DIR = docs
MANPAGES = $(wildcard $(DOCS_DIR)/*.[0-9])
WEB_MANPAGES = $(patsubst %,%.html,$(MANPAGES))
DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-test.mk
DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk \
2015-12-01 23:17:49 +08:00
rabbit_common/mk/rabbitmq-dist.mk \
rabbit_common/mk/rabbitmq-run.mk \
rabbit_common/mk/rabbitmq-test.mk \
rabbit_common/mk/rabbitmq-tools.mk
include ../../rabbitmq-components.mk
include ../../erlang.mk
Move most of shell scripts to Erlang code A large part of the rabbitmq-server(8) and CLI scripts, both Bourne-shell and Windows Batch versions, was moved to Erlang code and the RabbitMQ startup procedure was reorganized to be closer to a regular Erlang application. A new application called `rabbitmq_prelaunch` is responsible for: 1. Querying the environment variables to initialize important variables (using the new `rabbit_env` module in rabbitmq-common). 2. Checking the compatibility with the Erlang/OTP runtime. 3. Configuring Erlang distribution. 5. Writing the PID file. The application is started early (i.e. it is started before `rabbit`). The `rabbit` application runs the second half of the prelaunch sequence at the beginning of the application `start()` function. This second phase is responsible for the following steps: 1. Preparing the feature flags registry. 2. Reading and validating the configuration. 3. Configuring logging. 4. Running the various cluster checks. In addition to this prelaunch sequence, the `rabbit` application start procedure ends with a "postlaunch" sequence which takes care of starting enabled plugins. Thanks to this, RabbitMQ can be started with `application:start(rabbit)` as any other Erlang application. The only caveats are: * Mnesia must be stopped at the time `rabbit_prelaunch` is started, and must remain stopped when `rabbit` is started, to allow the Erlang distribution setup and cluster checks. `rabbit` takes care of starting Mnesia. * Likewise for Ra, because it relies on the `ra` application environment to be configured. Transitioning from scripts to Erlang code has the following benefits: * RabbitMQ start behavior should be identical between Unix and Windows. Also, features should be on par now. For instance, RabbitMQ now writes a PID file on Windows, like it always did on Unix-based systems. * The difference between published packages and a development environment are greatly reduced. In fact, we removed all the "if this is a dev working copy, then ..." blocks. As part of that, the `rabbit` application is now treated like its plugins: it is packaged as an `.ez` archive and written to the `plugins` directory (even though it is not technically a plugin). Also in a development copy, the CLI is copied to the top-level project. So when testing a plugin for instance, the CLI to use is `sbin/rabbitmqctl` in the current directory, not the master copy in `rabbit/scripts`. * As a consequence of the previous two points, maintaining and testing on Windows is now made easy. It should even be possible to setup CI on Windows. * There are less issues with paths containing non-US-ASCII characters, which can happen on Windows because RabbitMQ stores its data in user directories by default. This process brings at least one more benefit: we now have early logging during this prelaunch phase, which eases diagnostics and debugging. There are also behavior changes: * The new format configuration files used to be converted to an Erlang-term-based file by the Cuttlefish CLI. To do that, configuration schemas were copied to a temporary directory and the generated configuration file was written to RabbitMQ data directory. Now, Cuttlefish is used as a library: everything happens in memory. No schemas are copied, no generated configuration is written to disk. * The PID file is removed when the Erlang VM exits. * The `rabbit_config` module was trimmed significantly because most of the configuration handling is done in `rabbit_prelaunch_conf` now. * The RabbitMQ nodename does not appear on the command line, therefore it is missing from ps(1) and top(1) output. * The `rabbit:start()` function will probably behave differently in some ways because it defers everything to the Erlang application controller (instead of reimplementing it).
2019-05-15 22:27:51 +08:00
# See above why we mess with `$(APPS_DIR)`.
unexport APPS_DIR
ifeq ($(strip $(BATS)),)
BATS := $(ERLANG_MK_TMP)/bats/bin/bats
endif
BATS_GIT ?= https://github.com/sstephenson/bats
BATS_COMMIT ?= v0.4.0
$(BATS):
$(verbose) mkdir -p $(ERLANG_MK_TMP)
$(gen_verbose) git clone --depth 1 --branch=$(BATS_COMMIT) $(BATS_GIT) $(ERLANG_MK_TMP)/bats
.PHONY: bats
bats: $(BATS)
$(verbose) $(BATS) $(TEST_DIR)
tests:: bats
SLOW_CT_SUITES := backing_queue \
channel_interceptor \
cluster \
cluster_rename \
clustering_management \
config_schema \
confirms_rejects \
consumer_timeout \
crashing_queues \
dynamic_ha \
dynamic_qq \
eager_sync \
feature_flags \
health_check \
many_node_ha \
metrics \
msg_store \
partitions \
per_user_connection_tracking \
per_vhost_connection_limit \
per_vhost_connection_limit_partitions \
per_vhost_msg_store \
per_vhost_queue_limit \
policy \
priority_queue \
priority_queue_recovery \
publisher_confirms_parallel \
queue_master_location \
queue_parallel \
quorum_queue \
rabbit_core_metrics_gc \
rabbit_fifo_prop \
rabbitmq_queues_cli_integration \
rabbitmqctl_integration \
simple_ha \
sync_detection \
unit_inbroker_non_parallel \
unit_inbroker_parallel \
vhost
FAST_CT_SUITES := $(filter-out $(sort $(SLOW_CT_SUITES)),$(CT_SUITES))
ct-fast: CT_SUITES = $(FAST_CT_SUITES)
ct-slow: CT_SUITES = $(SLOW_CT_SUITES)
# --------------------------------------------------------------------
# Compilation.
# --------------------------------------------------------------------
RMQ_ERLC_OPTS += -I $(DEPS_DIR)/rabbit_common/include
Deprecated features: New module to manage deprecated features (!) This introduces a way to declare deprecated features in the code, not only in our communication. The new module allows to disallow the use of a deprecated feature and/or warn the user when he relies on such a feature. [Why] Currently, we only tell people about deprecated features through blog posts and the mailing-list. This might be insufficiant for our users that a feature they use will be removed in a future version: * They may not read our blog or mailing-list * They may not understand that they use such a deprecated feature * They might wait for the big removal before they plan testing * They might not take it seriously enough The idea behind this patch is to increase the chance that users notice that they are using something which is about to be dropped from RabbitMQ. Anopther benefit is that they should be able to test how RabbitMQ will behave in the future before the actual removal. This should allow them to test and plan changes. [How] When a feature is deprecated in other large projects (such as FreeBSD where I took the idea from), it goes through a lifecycle: 1. The feature is still available, but users get a warning somehow when they use it. They can disable it to test. 2. The feature is still available, but disabled out-of-the-box. Users can re-enable it (and get a warning). 3. The feature is disconnected from the build. Therefore, the code behind it is still there, but users have to recompile the thing to be able to use it. 4. The feature is removed from the source code. Users have to adapt or they can't upgrade anymore. The solution in this patch offers the same lifecycle. A deprecated feature will be in one of these deprecation phases: 1. `permitted_by_default`: The feature is available. Users get a warning if they use it. They can disable it from the configuration. 2. `denied_by_default`: The feature is available but disabled by default. Users get an error if they use it and RabbitMQ behaves like the feature is removed. They can re-enable is from the configuration and get a warning. 3. `disconnected`: The feature is present in the source code, but is disabled and can't be re-enabled without recompiling RabbitMQ. Users get the same behavior as if the code was removed. 4. `removed`: The feature's code is gone. The whole thing is based on the feature flags subsystem, but it has the following differences with other feature flags: * The semantic is reversed: the feature flag behind a deprecated feature is disabled when the deprecated feature is permitted, or enabled when the deprecated feature is denied. * The feature flag behind a deprecated feature is enabled out-of-the-box (meaning the deprecated feature is denied): * if the deprecation phase is `permitted_by_default` and the configuration denies the deprecated feature * if the deprecation phase is `denied_by_default` and the configuration doesn't permit the deprecated feature * if the deprecation phase is `disconnected` or `removed` * Feature flags behind deprecated feature don't appear in feature flags listings. Otherwise, deprecated features' feature flags are managed like other feature flags, in particular inside clusters. To declare a deprecated feature: -rabbit_deprecated_feature( {my_deprecated_feature, #{deprecation_phase => permitted_by_default, msgs => #{when_permitted => "This feature will be removed in RabbitMQ X.0"}, }}). Then, to check the state of a deprecated feature in the code: case rabbit_deprecated_features:is_permitted(my_deprecated_feature) of true -> %% The deprecated feature is still permitted. ok; false -> %% The deprecated feature is gone or should be considered %% unavailable. error end. Warnings and errors are logged automatically. A message is generated automatically, but it is possible to define a message in the deprecated feature flag declaration like in the example above. Here is an example of a logged warning that was generated automatically: Feature `my_deprecated_feature` is deprecated. By default, this feature can still be used for now. Its use will not be permitted by default in a future minor RabbitMQ version and the feature will be removed from a future major RabbitMQ version; actual versions to be determined. To continue using this feature when it is not permitted by default, set the following parameter in your configuration: "deprecated_features.permit.my_deprecated_feature = true" To test RabbitMQ as if the feature was removed, set this in your configuration: "deprecated_features.permit.my_deprecated_feature = false" To override the default state of `permitted_by_default` and `denied_by_default` deprecation phases, users can set the following configuration: # In rabbitmq.conf: deprecated_features.permit.my_deprecated_feature = true # or false The actual behavior protected by a deprecated feature check is out of scope for this subsystem. It is the repsonsibility of each deprecated feature code to determine what to do when the deprecated feature is denied. V1: Deprecated feature states are initially computed during the initialization of the registry, based on their deprecation phase and possibly the configuration. They don't go through the `enable/1` code at all. V2: Manage deprecated feature states as any other non-required feature flags. This allows to execute an `is_feature_used()` callback to determine if a deprecated feature can be denied. This also allows to prevent the RabbitMQ node from starting if it continues to use a deprecated feature. V3: Manage deprecated feature states from the registry initialization again. This is required because we need to know very early if some of them are denied, so that an upgrade to a version of RabbitMQ where a deprecated feature is disconnected or removed can be performed. To still prevent the start of a RabbitMQ node when a denied deprecated feature is actively used, we run the `is_feature_used()` callback of all denied deprecated features as part of the `sync_cluster()` task. This task is executed as part of a feature flag refresh executed when RabbitMQ starts or when plugins are enabled. So even though a deprecated feature is marked as denied in the registry early in the boot process, we will still abort the start of a RabbitMQ node if the feature is used. V4: Support context-dependent warnings. It is now possible to set a specific message when deprecated feature is permitted, when it is denied and when it is removed. Generic per-context messages are still generated. V5: Improve default warning messages, thanks to @pstack2021. V6: Rename the configuration variable from `permit_deprecated_features.*` to `deprecated_features.permit.*`. As @michaelklishin said, we tend to use shorter top-level names.
2023-02-23 00:26:52 +08:00
EDOC_OPTS += {preprocess,true},{includes,["."]}
2015-08-10 17:50:41 +08:00
ifdef INSTRUMENT_FOR_QC
2015-08-14 18:21:42 +08:00
RMQ_ERLC_OPTS += -DINSTR_MOD=gm_qc
EDOC_OPTS += ,{macros,[{'INSTR_MOD',gm_qc}]}
else
2015-08-14 18:21:42 +08:00
RMQ_ERLC_OPTS += -DINSTR_MOD=gm
EDOC_OPTS += ,{macros,[{'INSTR_MOD',gm}]}
endif
2015-08-10 17:50:41 +08:00
ifdef CREDIT_FLOW_TRACING
2015-08-14 18:21:42 +08:00
RMQ_ERLC_OPTS += -DCREDIT_FLOW_TRACING=true
2015-08-10 17:50:41 +08:00
endif
ifdef DEBUG_FF
RMQ_ERLC_OPTS += -DDEBUG_QUORUM_QUEUE_FF=true
endif
ifdef TRACE_SUPERVISOR2
RMQ_ERLC_OPTS += -DTRACE_SUPERVISOR2=true
endif
2015-08-10 17:50:41 +08:00
ifndef USE_PROPER_QC
# PropEr needs to be installed for property checking
# http://proper.softlab.ntua.gr/
USE_PROPER_QC := $(shell $(ERL) -eval 'io:format({module, proper} =:= code:ensure_loaded(proper)), halt().')
2015-08-14 18:21:42 +08:00
RMQ_ERLC_OPTS += $(if $(filter true,$(USE_PROPER_QC)),-Duse_proper_qc)
2015-08-10 17:50:41 +08:00
endif
# --------------------------------------------------------------------
2015-08-14 01:01:27 +08:00
# Documentation.
# --------------------------------------------------------------------
.PHONY: manpages web-manpages distclean-manpages
docs:: manpages web-manpages
manpages: $(MANPAGES)
@:
web-manpages: $(WEB_MANPAGES)
@:
# We use mandoc(1) to convert manpages to HTML plus an awk script which
# does:
# 1. remove tables at the top and the bottom (they recall the
# manpage name, section and date)
# 2. "downgrade" headers by one level (eg. h1 -> h2)
# 3. annotate .Dl lines with more CSS classes
%.html: %
$(gen_verbose) mandoc -T html -O 'fragment,man=%N.%S.html' "$<" | \
awk '\
/^<table class="head">$$/ { remove_table=1; next; } \
/^<table class="foot">$$/ { remove_table=1; next; } \
/^<\/table>$$/ { if (remove_table) { remove_table=0; next; } } \
{ if (!remove_table) { \
line=$$0; \
gsub(/<h2/, "<h3", line); \
gsub(/<\/h2>/, "</h3>", line); \
gsub(/<h1/, "<h2", line); \
gsub(/<\/h1>/, "</h2>", line); \
gsub(/class="D1"/, "class=\"D1 lang-bash\"", line); \
gsub(/class="Bd Bd-indent"/, "class=\"Bd Bd-indent lang-bash\"", line); \
gsub(/&#[xX]201[cCdD];/, "\\&quot;", line); \
print line; \
} } \
' > "$@"
distclean:: distclean-manpages
distclean-manpages::
$(gen_verbose) rm -f $(WEB_MANPAGES)