Merge branch 'stable'
Conflicts: src/rabbit_mgmt_external_stats.erl src/rabbitmq_management_agent.app.src
This commit is contained in:
commit
369a4a1a76
|
|
@ -1,7 +1,8 @@
|
|||
PROJECT = rabbitmq_management_agent
|
||||
|
||||
DEPS = rabbit_common rabbit
|
||||
|
||||
TEST_DEPS = rabbitmq_ct_helpers
|
||||
LOCAL_DEPS += xmerl mnesia ranch ssl crypto public_key
|
||||
DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
|
||||
|
||||
# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
|
||||
|
|
|
|||
|
|
@ -0,0 +1,186 @@
|
|||
%% The contents of this file are subject to the Mozilla Public License
|
||||
%% Version 1.1 (the "License"); you may not use this file except in
|
||||
%% compliance with the License. You may obtain a copy of the License at
|
||||
%% http://www.mozilla.org/MPL/
|
||||
%%
|
||||
%% Software distributed under the License is distributed on an "AS IS"
|
||||
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
|
||||
%% License for the specific language governing rights and limitations
|
||||
%% under the License.
|
||||
%%
|
||||
%% The Original Code is RabbitMQ.
|
||||
%%
|
||||
%% The Initial Developer of the Original Code is Pivotal Software, Inc.
|
||||
%% Copyright (c) 2010-2015 Pivotal Software, Inc. All rights reserved.
|
||||
%%
|
||||
|
||||
-type(event_type() :: queue_stats | queue_exchange_stats | vhost_stats
|
||||
| channel_queue_stats | channel_stats
|
||||
| channel_exchange_stats | exchange_stats
|
||||
| node_stats | node_node_stats | connection_stats).
|
||||
-type(type() :: deliver_get | fine_stats | queue_msg_rates | queue_msg_counts
|
||||
| coarse_node_stats | coarse_node_node_stats | coarse_conn_stats
|
||||
| process_stats).
|
||||
|
||||
-type(table_name() :: atom()).
|
||||
|
||||
-define(TABLES, [{connection_stats_coarse_conn_stats, set},
|
||||
{vhost_stats_coarse_conn_stats, set},
|
||||
{connection_created_stats, set},
|
||||
{connection_stats, set},
|
||||
{channel_created_stats, set},
|
||||
{channel_stats, set},
|
||||
{channel_stats_fine_stats, set},
|
||||
{channel_exchange_stats_fine_stats, set},
|
||||
{channel_queue_stats_deliver_stats, set},
|
||||
{vhost_stats_fine_stats, set},
|
||||
{queue_stats_deliver_stats, set},
|
||||
{vhost_stats_deliver_stats, set},
|
||||
{channel_stats_deliver_stats, set},
|
||||
{channel_process_stats, set},
|
||||
{queue_stats_publish, set},
|
||||
{queue_exchange_stats_publish, set},
|
||||
{exchange_stats_publish_out, set},
|
||||
{exchange_stats_publish_in, set},
|
||||
{consumer_stats, set},
|
||||
{queue_stats, set},
|
||||
{queue_msg_stats, set},
|
||||
{vhost_msg_stats, set},
|
||||
{queue_process_stats, set},
|
||||
{node_stats, set},
|
||||
{node_coarse_stats, set},
|
||||
{node_persister_stats, set},
|
||||
{node_node_stats, set},
|
||||
{node_node_coarse_stats, set},
|
||||
{queue_msg_rates, set},
|
||||
{vhost_msg_rates, set}]).
|
||||
|
||||
-define(INDEX_TABLES, [consumer_stats_queue_index,
|
||||
consumer_stats_channel_index,
|
||||
channel_exchange_stats_fine_stats_exchange_index,
|
||||
channel_exchange_stats_fine_stats_channel_index,
|
||||
channel_queue_stats_deliver_stats_queue_index,
|
||||
channel_queue_stats_deliver_stats_channel_index,
|
||||
queue_exchange_stats_publish_queue_index,
|
||||
queue_exchange_stats_publish_exchange_index,
|
||||
node_node_coarse_stats_node_index]).
|
||||
|
||||
-define(GC_EVENTS, [connection_closed, channel_closed, consumer_deleted,
|
||||
exchange_deleted, queue_deleted, vhost_deleted,
|
||||
node_node_deleted, channel_consumer_deleted]).
|
||||
|
||||
-define(DELEGATE_PREFIX, "delegate_management_").
|
||||
|
||||
%%------------------------------------------------------------------------------
|
||||
%% Only for documentation and testing purposes, so we keep track of the number and
|
||||
%% order of the metrics
|
||||
-define(connection_stats_coarse_conn_stats(Recv_oct, Send_oct, Reductions),
|
||||
{Recv_oct, Send_oct, Reductions}).
|
||||
-define(vhost_stats_coarse_conn_stats(Recv_oct, Send_oct), {Recv_oct, Send_oct}).
|
||||
-define(connection_created_stats(Id, Name, Props), {Id, Name, Props}).
|
||||
-define(connection_stats(Id, Props), {Id, Props}).
|
||||
-define(channel_created_stats(Id, Name, Props), {Id, Name, Props}).
|
||||
-define(channel_consumer_created_stats(Queue, ChPid, ConsumerTag),
|
||||
{Queue, {ChPid, ConsumerTag}}).
|
||||
-define(channel_stats(Id, Props), {Id, Props}).
|
||||
-define(channel_stats_fine_stats(Publish, Confirm, Return_unroutable),
|
||||
{Publish, Confirm, Return_unroutable}).
|
||||
-define(channel_exchange_stats_fine_stats(Publish, Confirm, Return_unroutable),
|
||||
{Publish, Confirm, Return_unroutable}).
|
||||
-define(channel_queue_stats_deliver_stats(Get, Get_no_ack, Deliver, Deliver_no_ack,
|
||||
Redeliver, Ack, Deliver_get),
|
||||
{Get, Get_no_ack, Deliver, Deliver_no_ack, Redeliver, Ack, Deliver_get}).
|
||||
-define(vhost_stats_fine_stats(Publish, Confirm, Return_unroutable),
|
||||
{Publish, Confirm, Return_unroutable}).
|
||||
-define(queue_stats_deliver_stats(Get, Get_no_ack, Deliver, Deliver_no_ack,
|
||||
Redeliver, Ack, Deliver_get),
|
||||
{Get, Get_no_ack, Deliver, Deliver_no_ack, Redeliver, Ack, Deliver_get}).
|
||||
-define(vhost_stats_deliver_stats(Get, Get_no_ack, Deliver, Deliver_no_ack,
|
||||
Redeliver, Ack, Deliver_get),
|
||||
{Get, Get_no_ack, Deliver, Deliver_no_ack, Redeliver, Ack, Deliver_get}).
|
||||
-define(channel_stats_deliver_stats(Get, Get_no_ack, Deliver, Deliver_no_ack,
|
||||
Redeliver, Ack, Deliver_get),
|
||||
{Get, Get_no_ack, Deliver, Deliver_no_ack, Redeliver, Ack, Deliver_get}).
|
||||
-define(channel_process_stats(Reductions), {Reductions}).
|
||||
-define(queue_stats_publish(Publish), {Publish}).
|
||||
-define(queue_exchange_stats_publish(Publish), {Publish}).
|
||||
-define(exchange_stats_publish_out(Publish_out), {Publish_out}).
|
||||
-define(exchange_stats_publish_in(Publish_in), {Publish_in}).
|
||||
-define(consumer_stats(Id, Props), {Id, Props}).
|
||||
-define(queue_stats(Id, Props), {Id, Props}).
|
||||
-define(queue_msg_stats(Messages_ready, Messages_unacknowledged, Messages),
|
||||
{Messages_ready, Messages_unacknowledged, Messages}).
|
||||
-define(vhost_msg_stats(Messages_ready, Messages_unacknowledged, Messages),
|
||||
{Messages_ready, Messages_unacknowledged, Messages}).
|
||||
-define(queue_process_stats(Reductions), {Reductions}).
|
||||
-define(node_stats(Id, Props), {Id, Props}).
|
||||
-define(node_coarse_stats(Fd_used, Sockets_used, Mem_used, Disk_free, Proc_used,
|
||||
Gc_num, Gc_bytes_reclaimed, Context_switches),
|
||||
{Fd_used, Sockets_used, Mem_used, Disk_free, Proc_used, Gc_num,
|
||||
Gc_bytes_reclaimed, Context_switches}).
|
||||
-define(node_persister_stats(Io_read_count, Io_read_bytes, Io_read_avg_time, Io_write_count,
|
||||
Io_write_bytes, Io_write_avg_time, Io_sync_count, Io_sync_avg_time,
|
||||
Io_seek_count, Io_seek_avg_time, Io_reopen_count, Mnesia_ram_tx_count,
|
||||
Mnesia_disk_tx_count, Msg_store_read_count, Msg_store_write_count,
|
||||
Queue_index_journal_write_count, Queue_index_write_count,
|
||||
Queue_index_read_count, Io_file_handle_open_attempt_count,
|
||||
Io_file_handle_open_attempt_avg_time),
|
||||
{Io_read_count, Io_read_bytes, Io_read_avg_time, Io_write_count, Io_write_bytes,
|
||||
Io_write_avg_time, Io_sync_count, Io_sync_avg_time, Io_seek_count, Io_seek_avg_time,
|
||||
Io_reopen_count, Mnesia_ram_tx_count, Mnesia_disk_tx_count, Msg_store_read_count,
|
||||
Msg_store_write_count, Queue_index_journal_write_count, Queue_index_write_count,
|
||||
Queue_index_read_count, Io_file_handle_open_attempt_count,
|
||||
Io_file_handle_open_attempt_avg_time}).
|
||||
-define(node_node_stats(Send_bytes, Recv_bytes), {Send_bytes, Recv_bytes}).
|
||||
-define(node_node_coarse_stats(Send_bytes, Recv_bytes), {Send_bytes, Recv_bytes}).
|
||||
-define(queue_msg_rates(Disk_reads, Disk_writes), {Disk_reads, Disk_writes}).
|
||||
-define(vhost_msg_rates(Disk_reads, Disk_writes), {Disk_reads, Disk_writes}).
|
||||
-define(old_aggr_stats(Id, Stats), {Id, Stats}).
|
||||
|
||||
|
||||
-define(stats_per_table(Table),
|
||||
case Table of
|
||||
connection_stats_coarse_conn_stats ->
|
||||
[recv_oct, send_oct, reductions];
|
||||
vhost_stats_coarse_conn_stats ->
|
||||
[recv_oct, send_oct];
|
||||
T when T =:= channel_stats_fine_stats;
|
||||
T =:= channel_exchange_stats_fine_stats;
|
||||
T =:= vhost_stats_fine_stats ->
|
||||
[publish, confirm, return_unroutable];
|
||||
T when T =:= channel_queue_stats_deliver_stats;
|
||||
T =:= queue_stats_deliver_stats;
|
||||
T =:= vhost_stats_deliver_stats;
|
||||
T =:= channel_stats_deliver_stats ->
|
||||
[get, get_no_ack, deliver, deliver_no_ack, redeliver, ack, deliver_get];
|
||||
T when T =:= channel_process_stats;
|
||||
T =:= queue_process_stats ->
|
||||
[reductions];
|
||||
T when T =:= queue_stats_publish;
|
||||
T =:= queue_exchange_stats_publish ->
|
||||
[publish];
|
||||
exchange_stats_publish_out ->
|
||||
[publish_out];
|
||||
exchange_stats_publish_in ->
|
||||
[publish_in];
|
||||
T when T =:= queue_msg_stats;
|
||||
T =:= vhost_msg_stats ->
|
||||
[messages_ready, messages_unacknowledged, messages];
|
||||
node_coarse_stats ->
|
||||
[fd_used, sockets_used, mem_used, disk_free, proc_used, gc_num,
|
||||
gc_bytes_reclaimed, context_switches];
|
||||
node_persister_stats ->
|
||||
[io_read_count, io_read_bytes, io_read_avg_time, io_write_count,
|
||||
io_write_bytes, io_write_avg_time, io_sync_count, io_sync_avg_time,
|
||||
io_seek_count, io_seek_avg_time, io_reopen_count, mnesia_ram_tx_count,
|
||||
mnesia_disk_tx_count, msg_store_read_count, msg_store_write_count,
|
||||
queue_index_journal_write_count, queue_index_write_count,
|
||||
queue_index_read_count, io_file_handle_open_attempt_count,
|
||||
io_file_handle_open_attempt_avg_time];
|
||||
node_node_coarse_stats ->
|
||||
[send_bytes, recv_bytes];
|
||||
T when T =:= queue_msg_rates;
|
||||
T =:= vhost_msg_rates ->
|
||||
[disk_reads, disk_writes]
|
||||
end).
|
||||
%%------------------------------------------------------------------------------
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
%% The contents of this file are subject to the Mozilla Public License
|
||||
%% Version 1.1 (the "License"); you may not use this file except in
|
||||
%% compliance with the License. You may obtain a copy of the License at
|
||||
%% http://www.mozilla.org/MPL/
|
||||
%%
|
||||
%% Software distributed under the License is distributed on an "AS IS"
|
||||
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
|
||||
%% License for the specific language governing rights and limitations
|
||||
%% under the License.
|
||||
%%
|
||||
%% The Original Code is RabbitMQ Management Console.
|
||||
%%
|
||||
%% The Initial Developer of the Original Code is GoPivotal, Inc.
|
||||
%% Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved.
|
||||
%%
|
||||
|
||||
-record(context, {user,
|
||||
password = none,
|
||||
impl}). % storage for a context of the resource handler
|
||||
|
||||
-record(range, {first :: integer(),
|
||||
last :: integer(),
|
||||
incr :: integer()}).
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,504 @@
|
|||
%% This file is a copy of exometer_slide.erl from https://github.com/Feuerlabs/exometer_core,
|
||||
%% with the following modifications:
|
||||
%%
|
||||
%% 1) The elements are tuples of numbers
|
||||
%%
|
||||
%% 2) Only one element for each expected interval point is added, intermediate values
|
||||
%% are discarded. Thus, if we have a window of 60s and interval of 5s, at max 12 elements
|
||||
%% are stored.
|
||||
%%
|
||||
%% 3) Additions can be provided as increments to the last value stored
|
||||
%%
|
||||
%% 4) sum/1 implements the sum of several slides, generating a new timestamp sequence based
|
||||
%% on the given intervals. Elements on each window are added to the closest interval point.
|
||||
%%
|
||||
%% Original commit: https://github.com/Feuerlabs/exometer_core/commit/2759edc804211b5245867b32c9a20c8fe1d93441
|
||||
%%
|
||||
%% -------------------------------------------------------------------
|
||||
%%
|
||||
%% Copyright (c) 2014 Basho Technologies, Inc. All Rights Reserved.
|
||||
%%
|
||||
%% This Source Code Form is subject to the terms of the Mozilla Public
|
||||
%% License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
%% file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
%%
|
||||
%% -------------------------------------------------------------------
|
||||
%%
|
||||
%% @author Tony Rogvall <tony@rogvall.se>
|
||||
%% @author Ulf Wiger <ulf@feuerlabs.com>
|
||||
%% @author Magnus Feuer <magnus@feuerlabs.com>
|
||||
%%
|
||||
%% @doc Efficient sliding-window buffer
|
||||
%%
|
||||
%% Initial implementation: 29 Sep 2009 by Tony Rogvall
|
||||
%%
|
||||
%% This module implements an efficient sliding window, maintaining
|
||||
%% two lists - a primary and a secondary. Values are paired with a
|
||||
%% timestamp (millisecond resolution, see `timestamp/0')
|
||||
%% and prepended to the primary list. When the time span between the oldest
|
||||
%% and the newest entry in the primary list exceeds the given window size,
|
||||
%% the primary list is shifted into the secondary list position, and the
|
||||
%% new entry is added to a new (empty) primary list.
|
||||
%%
|
||||
%% The window can be converted to a list using `to_list/1'.
|
||||
%% @end
|
||||
%%
|
||||
%%
|
||||
%% All modifications are (C) 2007-2016 Pivotal Software, Inc. All rights reserved.
|
||||
%% The Initial Developer of the Original Code is Basho Technologies, Inc.
|
||||
|
||||
-module(exometer_slide).
|
||||
|
||||
-export([new/2, new/3,
|
||||
reset/1,
|
||||
add_element/3,
|
||||
to_list/2,
|
||||
to_list/3,
|
||||
foldl/5,
|
||||
to_normalized_list/5]).
|
||||
|
||||
-export([timestamp/0,
|
||||
last_two/1,
|
||||
last/1]).
|
||||
|
||||
-export([sum/1,
|
||||
sum/2,
|
||||
sum/5,
|
||||
optimize/1]).
|
||||
|
||||
-compile(inline).
|
||||
-compile(inline_list_funcs).
|
||||
|
||||
|
||||
-type value() :: tuple().
|
||||
-type internal_value() :: tuple() | drop.
|
||||
-type timestamp() :: non_neg_integer().
|
||||
|
||||
-type fold_acc() :: any().
|
||||
-type fold_fun() :: fun(({timestamp(), internal_value()}, fold_acc()) -> fold_acc()).
|
||||
|
||||
%% Fixed size event buffer
|
||||
-record(slide, {size = 0 :: integer(), % ms window
|
||||
n = 0 :: integer(), % number of elements in buf1
|
||||
max_n :: infinity | integer(), % max no of elements
|
||||
incremental = false :: boolean(),
|
||||
interval :: integer(),
|
||||
last = 0 :: integer(), % millisecond timestamp
|
||||
first = undefined :: undefined | integer(), % millisecond timestamp
|
||||
buf1 = [] :: [internal_value()],
|
||||
buf2 = [] :: [internal_value()],
|
||||
total :: undefined | value()}).
|
||||
|
||||
-opaque slide() :: #slide{}.
|
||||
|
||||
-export_type([slide/0, timestamp/0]).
|
||||
|
||||
-spec timestamp() -> timestamp().
|
||||
%% @doc Generate a millisecond-resolution timestamp.
|
||||
%%
|
||||
%% This timestamp format is used e.g. by the `exometer_slide' and
|
||||
%% `exometer_histogram' implementations.
|
||||
%% @end
|
||||
timestamp() ->
|
||||
time_compat:os_system_time(milli_seconds).
|
||||
|
||||
-spec new(_Size::integer(), _Options::list()) -> slide().
|
||||
%% @doc Create a new sliding-window buffer.
|
||||
%%
|
||||
%% `Size' determines the size in milliseconds of the sliding window.
|
||||
%% The implementation prepends values into a primary list until the oldest
|
||||
%% element in the list is `Size' ms older than the current value. It then
|
||||
%% swaps the primary list into a secondary list, and starts prepending to
|
||||
%% a new primary list. This means that more data than fits inside the window
|
||||
%% will be kept - upwards of twice as much. On the other hand, updating the
|
||||
%% buffer is very cheap.
|
||||
%% @end
|
||||
new(Size, Opts) -> new(timestamp(), Size, Opts).
|
||||
|
||||
-spec new(Timestamp :: timestamp(), Size::integer(), Options::list()) -> slide().
|
||||
new(TS, Size, Opts) ->
|
||||
#slide{size = Size,
|
||||
max_n = proplists:get_value(max_n, Opts, infinity),
|
||||
interval = proplists:get_value(interval, Opts, infinity),
|
||||
last = TS,
|
||||
first = undefined,
|
||||
incremental = proplists:get_value(incremental, Opts, false),
|
||||
buf1 = [],
|
||||
buf2 = []}.
|
||||
|
||||
-spec reset(slide()) -> slide().
|
||||
|
||||
%% @doc Empty the buffer
|
||||
%%
|
||||
reset(Slide) ->
|
||||
Slide#slide{n = 0, buf1 = [], buf2 = [], last = 0}.
|
||||
|
||||
%% @doc Add an element to the buffer, tagged with the given timestamp.
|
||||
%%
|
||||
%% Apart from the specified timestamp, this function works just like
|
||||
%% {@link add_element/2}.
|
||||
%% @end
|
||||
-spec add_element(timestamp(), value(), slide()) -> slide().
|
||||
add_element(_TS, _Evt, Slide) when Slide#slide.size == 0 ->
|
||||
Slide;
|
||||
add_element(TS, Evt, #slide{last = Last, interval = Interval, total = Total0,
|
||||
incremental = true} = Slide)
|
||||
when (TS - Last) < Interval ->
|
||||
Total = add_to_total(Evt, Total0),
|
||||
Slide#slide{total = Total};
|
||||
add_element(TS, Evt, #slide{last = Last, interval = Interval} = Slide)
|
||||
when (TS - Last) < Interval ->
|
||||
Slide#slide{total = Evt};
|
||||
add_element(TS, Evt, #slide{last = Last, size = Sz, incremental = true,
|
||||
n = N, max_n = MaxN, total = Total0,
|
||||
buf1 = Buf1} = Slide) ->
|
||||
N1 = N+1,
|
||||
Total = add_to_total(Evt, Total0),
|
||||
%% Total could be the same as the last sample, by adding and substracting
|
||||
%% the same amout to the totals. That is not strictly a drop, but should
|
||||
%% generate new samples.
|
||||
%% I.e. 0, 0, -14, 14 (total = 0, samples = 14, -14, 0, drop)
|
||||
case {is_zeros(Evt), Buf1} of
|
||||
{_, []} ->
|
||||
Slide#slide{n = N1, first = TS, buf1 = [{TS, Total} | Buf1],
|
||||
last = TS, total = Total};
|
||||
_ when TS - Last > Sz; N1 > MaxN ->
|
||||
%% swap
|
||||
Slide#slide{last = TS, n = 1, buf1 = [{TS, Total}],
|
||||
buf2 = Buf1, total = Total};
|
||||
{true, [{_, Total}, {_, drop} = Drop | Tail]} ->
|
||||
%% Memory optimisation
|
||||
Slide#slide{buf1 = [{TS, Total}, Drop | Tail],
|
||||
n = N1, last = TS};
|
||||
{true, [{DropTS, Total} | Tail]} ->
|
||||
%% Memory optimisation
|
||||
Slide#slide{buf1 = [{TS, Total}, {DropTS, drop} | Tail],
|
||||
n = N1, last = TS};
|
||||
_ ->
|
||||
Slide#slide{n = N1, buf1 = [{TS, Total} | Buf1],
|
||||
last = TS, total = Total}
|
||||
end;
|
||||
add_element(TS, Evt, #slide{last = Last, size = Sz, n = N, max_n = MaxN,
|
||||
buf1 = Buf1} = Slide)
|
||||
when TS - Last > Sz; N + 1 > MaxN ->
|
||||
Slide#slide{last = TS, n = 1, buf1 = [{TS, Evt}],
|
||||
buf2 = Buf1, total = Evt};
|
||||
add_element(TS, Evt, #slide{buf1 = [{_, Evt}, {_, drop} = Drop | Tail],
|
||||
n = N} = Slide) ->
|
||||
%% Memory optimisation
|
||||
Slide#slide{buf1 = [{TS, Evt}, Drop | Tail], n = N + 1, last = TS};
|
||||
add_element(TS, Evt, #slide{buf1 = [{DropTS, Evt} | Tail], n = N} = Slide) ->
|
||||
%% Memory optimisation
|
||||
Slide#slide{buf1 = [{TS, Evt}, {DropTS, drop} | Tail],
|
||||
n = N + 1, last = TS};
|
||||
add_element(TS, Evt, #slide{n = N, buf1 = Buf1} = Slide) ->
|
||||
N1 = N+1,
|
||||
case Buf1 of
|
||||
[] ->
|
||||
Slide#slide{n = N1, buf1 = [{TS, Evt} | Buf1],
|
||||
last = TS, first = TS, total = Evt};
|
||||
_ ->
|
||||
Slide#slide{n = N1, buf1 = [{TS, Evt} | Buf1],
|
||||
last = TS, total = Evt}
|
||||
end.
|
||||
|
||||
add_to_total(Evt, undefined) ->
|
||||
Evt;
|
||||
add_to_total({A0}, {B0}) ->
|
||||
{B0 + A0};
|
||||
add_to_total({A0, A1}, {B0, B1}) ->
|
||||
{B0 + A0, B1 + A1};
|
||||
add_to_total({A0, A1, A2}, {B0, B1, B2}) ->
|
||||
{B0 + A0, B1 + A1, B2 + A2};
|
||||
add_to_total({A0, A1, A2, A3, A4, A5, A6}, {B0, B1, B2, B3, B4, B5, B6}) ->
|
||||
{B0 + A0, B1 + A1, B2 + A2, B3 + A3, B4 + A4, B5 + A5, B6 + A6};
|
||||
add_to_total({A0, A1, A2, A3, A4, A5, A6, A7}, {B0, B1, B2, B3, B4, B5, B6, B7}) ->
|
||||
{B0 + A0, B1 + A1, B2 + A2, B3 + A3, B4 + A4, B5 + A5, B6 + A6, B7 + A7};
|
||||
add_to_total({A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14,
|
||||
A15, A16, A17, A18, A19},
|
||||
{B0, B1, B2, B3, B4, B5, B6, B7, B8, B9, B10, B11, B12, B13, B14,
|
||||
B15, B16, B17, B18, B19}) ->
|
||||
{B0 + A0, B1 + A1, B2 + A2, B3 + A3, B4 + A4, B5 + A5, B6 + A6, B7 + A7, B8 + A8,
|
||||
B9 + A9, B10 + A10, B11 + A11, B12 + A12, B13 + A13, B14 + A14, B15 + A15, B16 + A16,
|
||||
B17 + A17, B18 + A18, B19 + A19}.
|
||||
|
||||
is_zeros({0}) ->
|
||||
true;
|
||||
is_zeros({0, 0}) ->
|
||||
true;
|
||||
is_zeros({0, 0, 0}) ->
|
||||
true;
|
||||
is_zeros({0, 0, 0, 0, 0, 0, 0}) ->
|
||||
true;
|
||||
is_zeros({0, 0, 0, 0, 0, 0, 0, 0}) ->
|
||||
true;
|
||||
is_zeros({0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}) ->
|
||||
true;
|
||||
is_zeros(_) ->
|
||||
false.
|
||||
|
||||
-spec optimize(#slide{}) -> #slide{}.
|
||||
optimize(#slide{buf2 = []} = Slide) ->
|
||||
Slide;
|
||||
optimize(#slide{buf1 = Buf1, buf2 = Buf2, max_n = MaxN, n = N} = Slide)
|
||||
when is_integer(MaxN) andalso length(Buf1) < MaxN ->
|
||||
Slide#slide{buf1 = Buf1,
|
||||
buf2 = lists:sublist(Buf2, n_diff(MaxN, N) + 1)};
|
||||
optimize(Slide) -> Slide.
|
||||
|
||||
snd(T) when is_tuple(T) ->
|
||||
element(2, T).
|
||||
|
||||
|
||||
-spec to_list(timestamp(), #slide{}) -> [{timestamp(), value()}].
|
||||
%% @doc Convert the sliding window into a list of timestamped values.
|
||||
%% @end
|
||||
to_list(_Now, #slide{size = Sz}) when Sz == 0 ->
|
||||
[];
|
||||
to_list(Now, #slide{size = Sz} = Slide) ->
|
||||
snd(to_list_from(Now, Now - Sz, Slide)).
|
||||
|
||||
to_list(Now, Start, Slide) ->
|
||||
snd(to_list_from(Now, Start, Slide)).
|
||||
|
||||
to_list_from(Now, Start0, #slide{max_n = MaxN, buf2 = Buf2, first = FirstTS,
|
||||
interval = Interval} = Slide) ->
|
||||
|
||||
{NewN, Buf1} = maybe_add_last_sample(Now, Slide),
|
||||
Start = first_max(FirstTS, Start0),
|
||||
{Prev0, Buf1_1} = take_since(Buf1, Now, Start, first_max(MaxN, NewN), [], Interval),
|
||||
case take_since(Buf2, Now, Start, first_max(MaxN, NewN), Buf1_1, Interval) of
|
||||
{undefined, Buf1_1} ->
|
||||
{Prev0, Buf1_1};
|
||||
Res ->
|
||||
Res
|
||||
end.
|
||||
|
||||
first_max(F, X) when is_integer(F) -> max(F, X);
|
||||
first_max(_, X) -> X.
|
||||
|
||||
-spec last_two(slide()) -> [{timestamp(), value()}].
|
||||
%% @doc Returns the newest 2 elements on the sample
|
||||
last_two(#slide{buf1 = [{TS, Evt} = H1, {_, drop} | _], interval = Interval}) ->
|
||||
[H1, {TS - Interval, Evt}];
|
||||
last_two(#slide{buf1 = [H1, H2 | _]}) ->
|
||||
[H1, H2];
|
||||
last_two(#slide{buf1 = [H1], buf2 = [H2 | _]}) ->
|
||||
[H1, H2];
|
||||
last_two(#slide{buf1 = [H1], buf2 = []}) ->
|
||||
[H1];
|
||||
last_two(#slide{buf1 = [], buf2 = [{TS, Evt} = H1, {_, drop} | _], interval = Interval}) ->
|
||||
[H1, {TS - Interval, Evt}];
|
||||
last_two(#slide{buf1 = [], buf2 = [H1, H2 | _]}) ->
|
||||
[H1, H2];
|
||||
last_two(#slide{buf1 = [], buf2 = [H1]}) ->
|
||||
[H1];
|
||||
last_two(_) ->
|
||||
[].
|
||||
|
||||
-spec last(slide()) -> value() | undefined.
|
||||
last(#slide{total = T}) when T =/= undefined ->
|
||||
T;
|
||||
last(#slide{buf1 = [{_TS, T} | _]}) ->
|
||||
T;
|
||||
last(#slide{buf2 = [{_TS, T} | _]}) ->
|
||||
T;
|
||||
last(_) ->
|
||||
undefined.
|
||||
|
||||
-spec foldl(timestamp(), timestamp(), fold_fun(), fold_acc(), slide()) -> fold_acc().
|
||||
%% @doc Fold over the sliding window, starting from `Timestamp'.
|
||||
%% Now provides a reference point to evaluate whether to include
|
||||
%% partial, unrealised sample values in the sequence. Unrealised values will be
|
||||
%% appended to the sequence when Now >= LastTS + Interval
|
||||
%%
|
||||
%% The fun should as `fun({Timestamp, Value}, Acc) -> NewAcc'.
|
||||
%% The values are processed in order from oldest to newest.
|
||||
%% @end
|
||||
foldl(_Now, _Timestamp, _Fun, _Acc, #slide{size = Sz}) when Sz == 0 ->
|
||||
[];
|
||||
foldl(Now, Start0, Fun, Acc, #slide{max_n = _MaxN, buf2 = _Buf2, first = _FirstTS,
|
||||
interval = _Interval} = Slide) ->
|
||||
lists:foldl(Fun, Acc, element(2, to_list_from(Now, Start0, Slide)) ++ [last]).
|
||||
|
||||
maybe_add_last_sample(_Now, #slide{total = T, n = N,
|
||||
buf1 = [{_, T} | _] = Buf1}) ->
|
||||
{N, Buf1};
|
||||
maybe_add_last_sample(Now, #slide{total = T,
|
||||
n = N,
|
||||
last = Last,
|
||||
interval = I,
|
||||
buf1 = Buf1})
|
||||
when T =/= undefined andalso Now >= Last + I ->
|
||||
{N + 1, [{Last + I, T} | Buf1]};
|
||||
maybe_add_last_sample(_Now, #slide{buf1 = Buf1, n = N}) ->
|
||||
{N, Buf1}.
|
||||
|
||||
|
||||
create_normalized_lookup(Start, Interval, RoundFun, Samples) ->
|
||||
lists:foldl(fun({TS, Value}, Dict) when TS - Start >= 0 ->
|
||||
NewTS = map_timestamp(TS, Start, Interval, RoundFun),
|
||||
orddict:update(NewTS, fun({T, V}) when T > TS ->
|
||||
{T, V};
|
||||
(_) -> {TS, Value}
|
||||
end, {TS, Value}, Dict);
|
||||
(_, Dict) -> Dict end, orddict:new(),
|
||||
Samples).
|
||||
|
||||
-spec to_normalized_list(timestamp(), timestamp(), integer(), slide(), no_pad | tuple()) ->
|
||||
[tuple()].
|
||||
to_normalized_list(Now, Start, Interval, Slide, Empty) ->
|
||||
to_normalized_list(Now, Start, Interval, Slide, Empty, fun ceil/1).
|
||||
|
||||
to_normalized_list(Now, Start, Interval, #slide{first = FirstTS0,
|
||||
total = Total} = Slide,
|
||||
Empty, RoundFun) ->
|
||||
|
||||
RoundTSFun = fun (TS) -> map_timestamp(TS, Start, Interval, RoundFun) end,
|
||||
|
||||
% add interval as we don't want to miss a sample due to rounding
|
||||
{Prev, Samples} = to_list_from(Now + Interval, Start, Slide),
|
||||
Lookup = create_normalized_lookup(Start, Interval, RoundFun, Samples),
|
||||
|
||||
NowRound = RoundTSFun(Now),
|
||||
|
||||
Pad = case Samples of
|
||||
_ when Empty =:= no_pad ->
|
||||
[];
|
||||
[{TS, _} | _] when Prev =/= undefined, Start =< TS ->
|
||||
[{T, snd(Prev)}
|
||||
|| T <- lists:seq(RoundTSFun(TS) - Interval, Start,
|
||||
-Interval)];
|
||||
[{TS, _} | _] when Start < FirstTS0 ->
|
||||
% only if we know there is nothing in the past can we
|
||||
% generate a 0 pad
|
||||
[{T, Empty} || T <- lists:seq(RoundTSFun(TS) - Interval, Start,
|
||||
-Interval)];
|
||||
_ when FirstTS0 =:= undefined andalso Total =:= undefined ->
|
||||
[{T, Empty} || T <- lists:seq(NowRound, Start, -Interval)];
|
||||
[] -> % samples have been seen, use the total to pad
|
||||
[{T, Total} || T <- lists:seq(NowRound, Start, -Interval)];
|
||||
_ -> []
|
||||
end,
|
||||
|
||||
{_, Res1} = lists:foldl(
|
||||
fun(T, {Last, Acc}) ->
|
||||
case orddict:find(T, Lookup) of
|
||||
{ok, {_, V}} ->
|
||||
{V, [{T, V} | Acc]};
|
||||
error when Last =:= undefined ->
|
||||
{Last, Acc};
|
||||
error -> % this pads the last value into the future
|
||||
{Last, [{T, Last} | Acc]}
|
||||
end
|
||||
end, {undefined, []}, lists:seq(Start, NowRound, Interval)),
|
||||
Res1 ++ Pad.
|
||||
|
||||
|
||||
%% @doc Sums a list of slides
|
||||
%%
|
||||
%% Takes the last known timestamp and creates an template version of the
|
||||
%% sliding window. Timestamps are then truncated and summed with the value
|
||||
%% in the template slide.
|
||||
%% @end
|
||||
-spec sum([slide()]) -> slide().
|
||||
sum(Slides) -> sum(Slides, no_pad).
|
||||
|
||||
sum([#slide{size = Size, interval = Interval} | _] = Slides, Pad) ->
|
||||
% take the freshest timestamp as reference point for summing operation
|
||||
Now = lists:max([Last || #slide{last = Last} <- Slides]),
|
||||
Start = Now - Size,
|
||||
sum(Now, Start, Interval, Slides, Pad).
|
||||
|
||||
|
||||
sum(Now, Start, Interval, [Slide | _ ] = All, Pad) ->
|
||||
Fun = fun({TS, Value}, Dict) ->
|
||||
orddict:update(TS, fun(V) -> add_to_total(V, Value) end,
|
||||
Value, Dict)
|
||||
end,
|
||||
{Total, Dict} =
|
||||
lists:foldl(fun(#slide{total = T} = S, {Tot, Acc}) ->
|
||||
Samples = to_normalized_list(Now, Start, Interval, S,
|
||||
Pad, fun ceil/1),
|
||||
Total = add_to_total(T, Tot),
|
||||
Folded = lists:foldl(Fun, Acc, Samples),
|
||||
{Total, Folded}
|
||||
end, {undefined, orddict:new()}, All),
|
||||
|
||||
{First, Buffer} = case orddict:to_list(Dict) of
|
||||
[] ->
|
||||
F = case [TS || #slide{first = TS} <- All,
|
||||
is_integer(TS)] of
|
||||
[] -> undefined;
|
||||
FS -> lists:min(FS)
|
||||
end,
|
||||
{F, []};
|
||||
[{F, _} | _ ] = B ->
|
||||
{F, lists:reverse(B)}
|
||||
end,
|
||||
Slide#slide{buf1 = Buffer, buf2 = [], total = Total, n = length(Buffer),
|
||||
first = First, last = Now}.
|
||||
|
||||
|
||||
truncated_seq(_First, _Last, _Incr, 0) ->
|
||||
[];
|
||||
truncated_seq(TS, TS, _Incr, MaxN) when MaxN > 0 ->
|
||||
[TS];
|
||||
truncated_seq(First, Last, Incr, MaxN) when First =< Last andalso MaxN > 0 ->
|
||||
End = min(Last, First + (MaxN * Incr) - Incr),
|
||||
lists:seq(First, End, Incr);
|
||||
truncated_seq(First, Last, Incr, MaxN) ->
|
||||
End = max(Last, First + (MaxN * Incr) - Incr),
|
||||
lists:seq(First, End, Incr).
|
||||
|
||||
|
||||
take_since([{DropTS, drop} | T], Now, Start, N, [{TS, Evt} | _] = Acc,
|
||||
Interval) ->
|
||||
case T of
|
||||
[] ->
|
||||
Fill = [{TS0, Evt} || TS0 <- truncated_seq(TS - Interval,
|
||||
max(DropTS, Start),
|
||||
-Interval, N)],
|
||||
{undefined, lists:reverse(Fill) ++ Acc};
|
||||
[{TS0, _} = E | Rest] when TS0 >= Start, N > 0 ->
|
||||
Fill = [{TS1, Evt} || TS1 <- truncated_seq(TS0 + Interval, TS - Interval,
|
||||
Interval, N)],
|
||||
take_since(Rest, Now, Start, decr(N), [E | Fill ++ Acc], Interval);
|
||||
[Prev | _] -> % next sample is out of range so needs to be filled from Start
|
||||
Fill = [{TS1, Evt} || TS1 <- truncated_seq(Start, TS - Interval,
|
||||
Interval, N)],
|
||||
{Prev, Fill ++ Acc}
|
||||
end;
|
||||
take_since([{TS, V} = H | T], Now, Start, N, Acc, Interval) when TS >= Start,
|
||||
N > 0,
|
||||
TS =< Now,
|
||||
is_tuple(V) ->
|
||||
take_since(T, Now, Start, decr(N), [H|Acc], Interval);
|
||||
take_since([{TS,_} | T], Now, Start, N, Acc, Interval) when TS >= Start, N > 0 ->
|
||||
take_since(T, Now, Start, decr(N), Acc, Interval);
|
||||
take_since([Prev | _], _, _, _, Acc, _) ->
|
||||
{Prev, Acc};
|
||||
take_since(_, _, _, _, Acc, _) ->
|
||||
%% Don't reverse; already the wanted order.
|
||||
{undefined, Acc}.
|
||||
|
||||
decr(N) when is_integer(N) ->
|
||||
N-1;
|
||||
decr(N) -> N.
|
||||
|
||||
n_diff(A, B) when is_integer(A) ->
|
||||
A - B.
|
||||
|
||||
ceil(X) when X < 0 ->
|
||||
trunc(X);
|
||||
ceil(X) ->
|
||||
T = trunc(X),
|
||||
case X - T == 0 of
|
||||
true -> T;
|
||||
false -> T + 1
|
||||
end.
|
||||
|
||||
map_timestamp(TS, Start, Interval, Round) ->
|
||||
Factor = Round((TS - Start) / Interval),
|
||||
Start + Interval * Factor.
|
||||
|
||||
|
|
@ -0,0 +1,993 @@
|
|||
%% @author Bob Ippolito <bob@mochimedia.com>
|
||||
%% @copyright 2007 Mochi Media, Inc.
|
||||
|
||||
%% @doc Utilities for parsing and quoting.
|
||||
|
||||
-module(mochiweb_util).
|
||||
-author('bob@mochimedia.com').
|
||||
-export([join/2, quote_plus/1, urlencode/1, parse_qs/1, unquote/1]).
|
||||
-export([path_split/1]).
|
||||
-export([urlsplit/1, urlsplit_path/1, urlunsplit/1, urlunsplit_path/1]).
|
||||
-export([parse_header/1]).
|
||||
-export([shell_quote/1, cmd/1, cmd_string/1, cmd_port/2, cmd_status/1, cmd_status/2]).
|
||||
-export([record_to_proplist/2, record_to_proplist/3]).
|
||||
-export([safe_relative_path/1, partition/2]).
|
||||
-export([parse_qvalues/1, pick_accepted_encodings/3]).
|
||||
-export([make_io/1]).
|
||||
|
||||
-define(PERCENT, 37). % $\%
|
||||
-define(FULLSTOP, 46). % $\.
|
||||
-define(IS_HEX(C), ((C >= $0 andalso C =< $9) orelse
|
||||
(C >= $a andalso C =< $f) orelse
|
||||
(C >= $A andalso C =< $F))).
|
||||
-define(QS_SAFE(C), ((C >= $a andalso C =< $z) orelse
|
||||
(C >= $A andalso C =< $Z) orelse
|
||||
(C >= $0 andalso C =< $9) orelse
|
||||
(C =:= ?FULLSTOP orelse C =:= $- orelse C =:= $~ orelse
|
||||
C =:= $_))).
|
||||
|
||||
hexdigit(C) when C < 10 -> $0 + C;
|
||||
hexdigit(C) when C < 16 -> $A + (C - 10).
|
||||
|
||||
unhexdigit(C) when C >= $0, C =< $9 -> C - $0;
|
||||
unhexdigit(C) when C >= $a, C =< $f -> C - $a + 10;
|
||||
unhexdigit(C) when C >= $A, C =< $F -> C - $A + 10.
|
||||
|
||||
%% @spec partition(String, Sep) -> {String, [], []} | {Prefix, Sep, Postfix}
|
||||
%% @doc Inspired by Python 2.5's str.partition:
|
||||
%% partition("foo/bar", "/") = {"foo", "/", "bar"},
|
||||
%% partition("foo", "/") = {"foo", "", ""}.
|
||||
partition(String, Sep) ->
|
||||
case partition(String, Sep, []) of
|
||||
undefined ->
|
||||
{String, "", ""};
|
||||
Result ->
|
||||
Result
|
||||
end.
|
||||
|
||||
partition("", _Sep, _Acc) ->
|
||||
undefined;
|
||||
partition(S, Sep, Acc) ->
|
||||
case partition2(S, Sep) of
|
||||
undefined ->
|
||||
[C | Rest] = S,
|
||||
partition(Rest, Sep, [C | Acc]);
|
||||
Rest ->
|
||||
{lists:reverse(Acc), Sep, Rest}
|
||||
end.
|
||||
|
||||
partition2(Rest, "") ->
|
||||
Rest;
|
||||
partition2([C | R1], [C | R2]) ->
|
||||
partition2(R1, R2);
|
||||
partition2(_S, _Sep) ->
|
||||
undefined.
|
||||
|
||||
|
||||
|
||||
%% @spec safe_relative_path(string()) -> string() | undefined
|
||||
%% @doc Return the reduced version of a relative path or undefined if it
|
||||
%% is not safe. safe relative paths can be joined with an absolute path
|
||||
%% and will result in a subdirectory of the absolute path. Safe paths
|
||||
%% never contain a backslash character.
|
||||
safe_relative_path("/" ++ _) ->
|
||||
undefined;
|
||||
safe_relative_path(P) ->
|
||||
case string:chr(P, $\\) of
|
||||
0 ->
|
||||
safe_relative_path(P, []);
|
||||
_ ->
|
||||
undefined
|
||||
end.
|
||||
|
||||
safe_relative_path("", Acc) ->
|
||||
case Acc of
|
||||
[] ->
|
||||
"";
|
||||
_ ->
|
||||
string:join(lists:reverse(Acc), "/")
|
||||
end;
|
||||
safe_relative_path(P, Acc) ->
|
||||
case partition(P, "/") of
|
||||
{"", "/", _} ->
|
||||
%% /foo or foo//bar
|
||||
undefined;
|
||||
{"..", _, _} when Acc =:= [] ->
|
||||
undefined;
|
||||
{"..", _, Rest} ->
|
||||
safe_relative_path(Rest, tl(Acc));
|
||||
{Part, "/", ""} ->
|
||||
safe_relative_path("", ["", Part | Acc]);
|
||||
{Part, _, Rest} ->
|
||||
safe_relative_path(Rest, [Part | Acc])
|
||||
end.
|
||||
|
||||
%% @spec shell_quote(string()) -> string()
|
||||
%% @doc Quote a string according to UNIX shell quoting rules, returns a string
|
||||
%% surrounded by double quotes.
|
||||
shell_quote(L) ->
|
||||
shell_quote(L, [$\"]).
|
||||
|
||||
%% @spec cmd_port([string()], Options) -> port()
|
||||
%% @doc open_port({spawn, mochiweb_util:cmd_string(Argv)}, Options).
|
||||
cmd_port(Argv, Options) ->
|
||||
open_port({spawn, cmd_string(Argv)}, Options).
|
||||
|
||||
%% @spec cmd([string()]) -> string()
|
||||
%% @doc os:cmd(cmd_string(Argv)).
|
||||
cmd(Argv) ->
|
||||
os:cmd(cmd_string(Argv)).
|
||||
|
||||
%% @spec cmd_string([string()]) -> string()
|
||||
%% @doc Create a shell quoted command string from a list of arguments.
|
||||
cmd_string(Argv) ->
|
||||
string:join([shell_quote(X) || X <- Argv], " ").
|
||||
|
||||
%% @spec cmd_status([string()]) -> {ExitStatus::integer(), Stdout::binary()}
|
||||
%% @doc Accumulate the output and exit status from the given application,
|
||||
%% will be spawned with cmd_port/2.
|
||||
cmd_status(Argv) ->
|
||||
cmd_status(Argv, []).
|
||||
|
||||
%% @spec cmd_status([string()], [atom()]) -> {ExitStatus::integer(), Stdout::binary()}
|
||||
%% @doc Accumulate the output and exit status from the given application,
|
||||
%% will be spawned with cmd_port/2.
|
||||
cmd_status(Argv, Options) ->
|
||||
Port = cmd_port(Argv, [exit_status, stderr_to_stdout,
|
||||
use_stdio, binary | Options]),
|
||||
try cmd_loop(Port, [])
|
||||
after catch port_close(Port)
|
||||
end.
|
||||
|
||||
%% @spec cmd_loop(port(), list()) -> {ExitStatus::integer(), Stdout::binary()}
|
||||
%% @doc Accumulate the output and exit status from a port.
|
||||
cmd_loop(Port, Acc) ->
|
||||
receive
|
||||
{Port, {exit_status, Status}} ->
|
||||
{Status, iolist_to_binary(lists:reverse(Acc))};
|
||||
{Port, {data, Data}} ->
|
||||
cmd_loop(Port, [Data | Acc])
|
||||
end.
|
||||
|
||||
%% @spec join([iolist()], iolist()) -> iolist()
|
||||
%% @doc Join a list of strings or binaries together with the given separator
|
||||
%% string or char or binary. The output is flattened, but may be an
|
||||
%% iolist() instead of a string() if any of the inputs are binary().
|
||||
join([], _Separator) ->
|
||||
[];
|
||||
join([S], _Separator) ->
|
||||
lists:flatten(S);
|
||||
join(Strings, Separator) ->
|
||||
lists:flatten(revjoin(lists:reverse(Strings), Separator, [])).
|
||||
|
||||
revjoin([], _Separator, Acc) ->
|
||||
Acc;
|
||||
revjoin([S | Rest], Separator, []) ->
|
||||
revjoin(Rest, Separator, [S]);
|
||||
revjoin([S | Rest], Separator, Acc) ->
|
||||
revjoin(Rest, Separator, [S, Separator | Acc]).
|
||||
|
||||
%% @spec quote_plus(atom() | integer() | float() | string() | binary()) -> string()
|
||||
%% @doc URL safe encoding of the given term.
|
||||
quote_plus(Atom) when is_atom(Atom) ->
|
||||
quote_plus(atom_to_list(Atom));
|
||||
quote_plus(Int) when is_integer(Int) ->
|
||||
quote_plus(integer_to_list(Int));
|
||||
quote_plus(Binary) when is_binary(Binary) ->
|
||||
quote_plus(binary_to_list(Binary));
|
||||
quote_plus(Float) when is_float(Float) ->
|
||||
quote_plus(mochinum:digits(Float));
|
||||
quote_plus(String) ->
|
||||
quote_plus(String, []).
|
||||
|
||||
quote_plus([], Acc) ->
|
||||
lists:reverse(Acc);
|
||||
quote_plus([C | Rest], Acc) when ?QS_SAFE(C) ->
|
||||
quote_plus(Rest, [C | Acc]);
|
||||
quote_plus([$\s | Rest], Acc) ->
|
||||
quote_plus(Rest, [$+ | Acc]);
|
||||
quote_plus([C | Rest], Acc) ->
|
||||
<<Hi:4, Lo:4>> = <<C>>,
|
||||
quote_plus(Rest, [hexdigit(Lo), hexdigit(Hi), ?PERCENT | Acc]).
|
||||
|
||||
%% @spec urlencode([{Key, Value}]) -> string()
|
||||
%% @doc URL encode the property list.
|
||||
urlencode(Props) ->
|
||||
Pairs = lists:foldr(
|
||||
fun ({K, V}, Acc) ->
|
||||
[quote_plus(K) ++ "=" ++ quote_plus(V) | Acc]
|
||||
end, [], Props),
|
||||
string:join(Pairs, "&").
|
||||
|
||||
%% @spec parse_qs(string() | binary()) -> [{Key, Value}]
|
||||
%% @doc Parse a query string or application/x-www-form-urlencoded.
|
||||
parse_qs(Binary) when is_binary(Binary) ->
|
||||
parse_qs(binary_to_list(Binary));
|
||||
parse_qs(String) ->
|
||||
parse_qs(String, []).
|
||||
|
||||
parse_qs([], Acc) ->
|
||||
lists:reverse(Acc);
|
||||
parse_qs(String, Acc) ->
|
||||
{Key, Rest} = parse_qs_key(String),
|
||||
{Value, Rest1} = parse_qs_value(Rest),
|
||||
parse_qs(Rest1, [{Key, Value} | Acc]).
|
||||
|
||||
parse_qs_key(String) ->
|
||||
parse_qs_key(String, []).
|
||||
|
||||
parse_qs_key([], Acc) ->
|
||||
{qs_revdecode(Acc), ""};
|
||||
parse_qs_key([$= | Rest], Acc) ->
|
||||
{qs_revdecode(Acc), Rest};
|
||||
parse_qs_key(Rest=[$; | _], Acc) ->
|
||||
{qs_revdecode(Acc), Rest};
|
||||
parse_qs_key(Rest=[$& | _], Acc) ->
|
||||
{qs_revdecode(Acc), Rest};
|
||||
parse_qs_key([C | Rest], Acc) ->
|
||||
parse_qs_key(Rest, [C | Acc]).
|
||||
|
||||
parse_qs_value(String) ->
|
||||
parse_qs_value(String, []).
|
||||
|
||||
parse_qs_value([], Acc) ->
|
||||
{qs_revdecode(Acc), ""};
|
||||
parse_qs_value([$; | Rest], Acc) ->
|
||||
{qs_revdecode(Acc), Rest};
|
||||
parse_qs_value([$& | Rest], Acc) ->
|
||||
{qs_revdecode(Acc), Rest};
|
||||
parse_qs_value([C | Rest], Acc) ->
|
||||
parse_qs_value(Rest, [C | Acc]).
|
||||
|
||||
%% @spec unquote(string() | binary()) -> string()
|
||||
%% @doc Unquote a URL encoded string.
|
||||
unquote(Binary) when is_binary(Binary) ->
|
||||
unquote(binary_to_list(Binary));
|
||||
unquote(String) ->
|
||||
qs_revdecode(lists:reverse(String)).
|
||||
|
||||
qs_revdecode(S) ->
|
||||
qs_revdecode(S, []).
|
||||
|
||||
qs_revdecode([], Acc) ->
|
||||
Acc;
|
||||
qs_revdecode([$+ | Rest], Acc) ->
|
||||
qs_revdecode(Rest, [$\s | Acc]);
|
||||
qs_revdecode([Lo, Hi, ?PERCENT | Rest], Acc) when ?IS_HEX(Lo), ?IS_HEX(Hi) ->
|
||||
qs_revdecode(Rest, [(unhexdigit(Lo) bor (unhexdigit(Hi) bsl 4)) | Acc]);
|
||||
qs_revdecode([C | Rest], Acc) ->
|
||||
qs_revdecode(Rest, [C | Acc]).
|
||||
|
||||
%% @spec urlsplit(Url) -> {Scheme, Netloc, Path, Query, Fragment}
|
||||
%% @doc Return a 5-tuple, does not expand % escapes. Only supports HTTP style
|
||||
%% URLs.
|
||||
urlsplit(Url) ->
|
||||
{Scheme, Url1} = urlsplit_scheme(Url),
|
||||
{Netloc, Url2} = urlsplit_netloc(Url1),
|
||||
{Path, Query, Fragment} = urlsplit_path(Url2),
|
||||
{Scheme, Netloc, Path, Query, Fragment}.
|
||||
|
||||
urlsplit_scheme(Url) ->
|
||||
case urlsplit_scheme(Url, []) of
|
||||
no_scheme ->
|
||||
{"", Url};
|
||||
Res ->
|
||||
Res
|
||||
end.
|
||||
|
||||
urlsplit_scheme([C | Rest], Acc) when ((C >= $a andalso C =< $z) orelse
|
||||
(C >= $A andalso C =< $Z) orelse
|
||||
(C >= $0 andalso C =< $9) orelse
|
||||
C =:= $+ orelse C =:= $- orelse
|
||||
C =:= $.) ->
|
||||
urlsplit_scheme(Rest, [C | Acc]);
|
||||
urlsplit_scheme([$: | Rest], Acc=[_ | _]) ->
|
||||
{string:to_lower(lists:reverse(Acc)), Rest};
|
||||
urlsplit_scheme(_Rest, _Acc) ->
|
||||
no_scheme.
|
||||
|
||||
urlsplit_netloc("//" ++ Rest) ->
|
||||
urlsplit_netloc(Rest, []);
|
||||
urlsplit_netloc(Path) ->
|
||||
{"", Path}.
|
||||
|
||||
urlsplit_netloc("", Acc) ->
|
||||
{lists:reverse(Acc), ""};
|
||||
urlsplit_netloc(Rest=[C | _], Acc) when C =:= $/; C =:= $?; C =:= $# ->
|
||||
{lists:reverse(Acc), Rest};
|
||||
urlsplit_netloc([C | Rest], Acc) ->
|
||||
urlsplit_netloc(Rest, [C | Acc]).
|
||||
|
||||
|
||||
%% @spec path_split(string()) -> {Part, Rest}
|
||||
%% @doc Split a path starting from the left, as in URL traversal.
|
||||
%% path_split("foo/bar") = {"foo", "bar"},
|
||||
%% path_split("/foo/bar") = {"", "foo/bar"}.
|
||||
path_split(S) ->
|
||||
path_split(S, []).
|
||||
|
||||
path_split("", Acc) ->
|
||||
{lists:reverse(Acc), ""};
|
||||
path_split("/" ++ Rest, Acc) ->
|
||||
{lists:reverse(Acc), Rest};
|
||||
path_split([C | Rest], Acc) ->
|
||||
path_split(Rest, [C | Acc]).
|
||||
|
||||
|
||||
%% @spec urlunsplit({Scheme, Netloc, Path, Query, Fragment}) -> string()
|
||||
%% @doc Assemble a URL from the 5-tuple. Path must be absolute.
|
||||
urlunsplit({Scheme, Netloc, Path, Query, Fragment}) ->
|
||||
lists:flatten([case Scheme of "" -> ""; _ -> [Scheme, "://"] end,
|
||||
Netloc,
|
||||
urlunsplit_path({Path, Query, Fragment})]).
|
||||
|
||||
%% @spec urlunsplit_path({Path, Query, Fragment}) -> string()
|
||||
%% @doc Assemble a URL path from the 3-tuple.
|
||||
urlunsplit_path({Path, Query, Fragment}) ->
|
||||
lists:flatten([Path,
|
||||
case Query of "" -> ""; _ -> [$? | Query] end,
|
||||
case Fragment of "" -> ""; _ -> [$# | Fragment] end]).
|
||||
|
||||
%% @spec urlsplit_path(Url) -> {Path, Query, Fragment}
|
||||
%% @doc Return a 3-tuple, does not expand % escapes. Only supports HTTP style
|
||||
%% paths.
|
||||
urlsplit_path(Path) ->
|
||||
urlsplit_path(Path, []).
|
||||
|
||||
urlsplit_path("", Acc) ->
|
||||
{lists:reverse(Acc), "", ""};
|
||||
urlsplit_path("?" ++ Rest, Acc) ->
|
||||
{Query, Fragment} = urlsplit_query(Rest),
|
||||
{lists:reverse(Acc), Query, Fragment};
|
||||
urlsplit_path("#" ++ Rest, Acc) ->
|
||||
{lists:reverse(Acc), "", Rest};
|
||||
urlsplit_path([C | Rest], Acc) ->
|
||||
urlsplit_path(Rest, [C | Acc]).
|
||||
|
||||
urlsplit_query(Query) ->
|
||||
urlsplit_query(Query, []).
|
||||
|
||||
urlsplit_query("", Acc) ->
|
||||
{lists:reverse(Acc), ""};
|
||||
urlsplit_query("#" ++ Rest, Acc) ->
|
||||
{lists:reverse(Acc), Rest};
|
||||
urlsplit_query([C | Rest], Acc) ->
|
||||
urlsplit_query(Rest, [C | Acc]).
|
||||
|
||||
% %% @spec guess_mime(string()) -> string()
|
||||
% %% @doc Guess the mime type of a file by the extension of its filename.
|
||||
% guess_mime(File) ->
|
||||
% case filename:basename(File) of
|
||||
% "crossdomain.xml" ->
|
||||
% "text/x-cross-domain-policy";
|
||||
% Name ->
|
||||
% case mochiweb_mime:from_extension(filename:extension(Name)) of
|
||||
% undefined ->
|
||||
% "text/plain";
|
||||
% Mime ->
|
||||
% Mime
|
||||
% end
|
||||
% end.
|
||||
|
||||
%% @spec parse_header(string()) -> {Type, [{K, V}]}
|
||||
%% @doc Parse a Content-Type like header, return the main Content-Type
|
||||
%% and a property list of options.
|
||||
parse_header(String) ->
|
||||
%% TODO: This is exactly as broken as Python's cgi module.
|
||||
%% Should parse properly like mochiweb_cookies.
|
||||
[Type | Parts] = [string:strip(S) || S <- string:tokens(String, ";")],
|
||||
F = fun (S, Acc) ->
|
||||
case lists:splitwith(fun (C) -> C =/= $= end, S) of
|
||||
{"", _} ->
|
||||
%% Skip anything with no name
|
||||
Acc;
|
||||
{_, ""} ->
|
||||
%% Skip anything with no value
|
||||
Acc;
|
||||
{Name, [$\= | Value]} ->
|
||||
[{string:to_lower(string:strip(Name)),
|
||||
unquote_header(string:strip(Value))} | Acc]
|
||||
end
|
||||
end,
|
||||
{string:to_lower(Type),
|
||||
lists:foldr(F, [], Parts)}.
|
||||
|
||||
unquote_header("\"" ++ Rest) ->
|
||||
unquote_header(Rest, []);
|
||||
unquote_header(S) ->
|
||||
S.
|
||||
|
||||
unquote_header("", Acc) ->
|
||||
lists:reverse(Acc);
|
||||
unquote_header("\"", Acc) ->
|
||||
lists:reverse(Acc);
|
||||
unquote_header([$\\, C | Rest], Acc) ->
|
||||
unquote_header(Rest, [C | Acc]);
|
||||
unquote_header([C | Rest], Acc) ->
|
||||
unquote_header(Rest, [C | Acc]).
|
||||
|
||||
%% @spec record_to_proplist(Record, Fields) -> proplist()
|
||||
%% @doc calls record_to_proplist/3 with a default TypeKey of '__record'
|
||||
record_to_proplist(Record, Fields) ->
|
||||
record_to_proplist(Record, Fields, '__record').
|
||||
|
||||
%% @spec record_to_proplist(Record, Fields, TypeKey) -> proplist()
|
||||
%% @doc Return a proplist of the given Record with each field in the
|
||||
%% Fields list set as a key with the corresponding value in the Record.
|
||||
%% TypeKey is the key that is used to store the record type
|
||||
%% Fields should be obtained by calling record_info(fields, record_type)
|
||||
%% where record_type is the record type of Record
|
||||
record_to_proplist(Record, Fields, TypeKey)
|
||||
when tuple_size(Record) - 1 =:= length(Fields) ->
|
||||
lists:zip([TypeKey | Fields], tuple_to_list(Record)).
|
||||
|
||||
|
||||
shell_quote([], Acc) ->
|
||||
lists:reverse([$\" | Acc]);
|
||||
shell_quote([C | Rest], Acc) when C =:= $\" orelse C =:= $\` orelse
|
||||
C =:= $\\ orelse C =:= $\$ ->
|
||||
shell_quote(Rest, [C, $\\ | Acc]);
|
||||
shell_quote([C | Rest], Acc) ->
|
||||
shell_quote(Rest, [C | Acc]).
|
||||
|
||||
%% @spec parse_qvalues(string()) -> [qvalue()] | invalid_qvalue_string
|
||||
%% @type qvalue() = {media_type() | encoding() , float()}.
|
||||
%% @type media_type() = string().
|
||||
%% @type encoding() = string().
|
||||
%%
|
||||
%% @doc Parses a list (given as a string) of elements with Q values associated
|
||||
%% to them. Elements are separated by commas and each element is separated
|
||||
%% from its Q value by a semicolon. Q values are optional but when missing
|
||||
%% the value of an element is considered as 1.0. A Q value is always in the
|
||||
%% range [0.0, 1.0]. A Q value list is used for example as the value of the
|
||||
%% HTTP "Accept" and "Accept-Encoding" headers.
|
||||
%%
|
||||
%% Q values are described in section 2.9 of the RFC 2616 (HTTP 1.1).
|
||||
%%
|
||||
%% Example:
|
||||
%%
|
||||
%% parse_qvalues("gzip; q=0.5, deflate, identity;q=0.0") ->
|
||||
%% [{"gzip", 0.5}, {"deflate", 1.0}, {"identity", 0.0}]
|
||||
%%
|
||||
parse_qvalues(QValuesStr) ->
|
||||
try
|
||||
lists:map(
|
||||
fun(Pair) ->
|
||||
[Type | Params] = string:tokens(Pair, ";"),
|
||||
NormParams = normalize_media_params(Params),
|
||||
{Q, NonQParams} = extract_q(NormParams),
|
||||
{string:join([string:strip(Type) | NonQParams], ";"), Q}
|
||||
end,
|
||||
string:tokens(string:to_lower(QValuesStr), ",")
|
||||
)
|
||||
catch
|
||||
_Type:_Error ->
|
||||
invalid_qvalue_string
|
||||
end.
|
||||
|
||||
normalize_media_params(Params) ->
|
||||
{ok, Re} = re:compile("\\s"),
|
||||
normalize_media_params(Re, Params, []).
|
||||
|
||||
normalize_media_params(_Re, [], Acc) ->
|
||||
lists:reverse(Acc);
|
||||
normalize_media_params(Re, [Param | Rest], Acc) ->
|
||||
NormParam = re:replace(Param, Re, "", [global, {return, list}]),
|
||||
normalize_media_params(Re, Rest, [NormParam | Acc]).
|
||||
|
||||
extract_q(NormParams) ->
|
||||
{ok, KVRe} = re:compile("^([^=]+)=([^=]+)$"),
|
||||
{ok, QRe} = re:compile("^((?:0|1)(?:\\.\\d{1,3})?)$"),
|
||||
extract_q(KVRe, QRe, NormParams, []).
|
||||
|
||||
extract_q(_KVRe, _QRe, [], Acc) ->
|
||||
{1.0, lists:reverse(Acc)};
|
||||
extract_q(KVRe, QRe, [Param | Rest], Acc) ->
|
||||
case re:run(Param, KVRe, [{capture, [1, 2], list}]) of
|
||||
{match, [Name, Value]} ->
|
||||
case Name of
|
||||
"q" ->
|
||||
{match, [Q]} = re:run(Value, QRe, [{capture, [1], list}]),
|
||||
QVal = case Q of
|
||||
"0" ->
|
||||
0.0;
|
||||
"1" ->
|
||||
1.0;
|
||||
Else ->
|
||||
list_to_float(Else)
|
||||
end,
|
||||
case QVal < 0.0 orelse QVal > 1.0 of
|
||||
false ->
|
||||
{QVal, lists:reverse(Acc) ++ Rest}
|
||||
end;
|
||||
_ ->
|
||||
extract_q(KVRe, QRe, Rest, [Param | Acc])
|
||||
end
|
||||
end.
|
||||
|
||||
%% @spec pick_accepted_encodings([qvalue()], [encoding()], encoding()) ->
|
||||
%% [encoding()]
|
||||
%%
|
||||
%% @doc Determines which encodings specified in the given Q values list are
|
||||
%% valid according to a list of supported encodings and a default encoding.
|
||||
%%
|
||||
%% The returned list of encodings is sorted, descendingly, according to the
|
||||
%% Q values of the given list. The last element of this list is the given
|
||||
%% default encoding unless this encoding is explicitily or implicitily
|
||||
%% marked with a Q value of 0.0 in the given Q values list.
|
||||
%% Note: encodings with the same Q value are kept in the same order as
|
||||
%% found in the input Q values list.
|
||||
%%
|
||||
%% This encoding picking process is described in section 14.3 of the
|
||||
%% RFC 2616 (HTTP 1.1).
|
||||
%%
|
||||
%% Example:
|
||||
%%
|
||||
%% pick_accepted_encodings(
|
||||
%% [{"gzip", 0.5}, {"deflate", 1.0}],
|
||||
%% ["gzip", "identity"],
|
||||
%% "identity"
|
||||
%% ) ->
|
||||
%% ["gzip", "identity"]
|
||||
%%
|
||||
pick_accepted_encodings(AcceptedEncs, SupportedEncs, DefaultEnc) ->
|
||||
SortedQList = lists:reverse(
|
||||
lists:sort(fun({_, Q1}, {_, Q2}) -> Q1 < Q2 end, AcceptedEncs)
|
||||
),
|
||||
{Accepted, Refused} = lists:foldr(
|
||||
fun({E, Q}, {A, R}) ->
|
||||
case Q > 0.0 of
|
||||
true ->
|
||||
{[E | A], R};
|
||||
false ->
|
||||
{A, [E | R]}
|
||||
end
|
||||
end,
|
||||
{[], []},
|
||||
SortedQList
|
||||
),
|
||||
Refused1 = lists:foldr(
|
||||
fun(Enc, Acc) ->
|
||||
case Enc of
|
||||
"*" ->
|
||||
lists:subtract(SupportedEncs, Accepted) ++ Acc;
|
||||
_ ->
|
||||
[Enc | Acc]
|
||||
end
|
||||
end,
|
||||
[],
|
||||
Refused
|
||||
),
|
||||
Accepted1 = lists:foldr(
|
||||
fun(Enc, Acc) ->
|
||||
case Enc of
|
||||
"*" ->
|
||||
lists:subtract(SupportedEncs, Accepted ++ Refused1) ++ Acc;
|
||||
_ ->
|
||||
[Enc | Acc]
|
||||
end
|
||||
end,
|
||||
[],
|
||||
Accepted
|
||||
),
|
||||
Accepted2 = case lists:member(DefaultEnc, Accepted1) of
|
||||
true ->
|
||||
Accepted1;
|
||||
false ->
|
||||
Accepted1 ++ [DefaultEnc]
|
||||
end,
|
||||
[E || E <- Accepted2, lists:member(E, SupportedEncs),
|
||||
not lists:member(E, Refused1)].
|
||||
|
||||
make_io(Atom) when is_atom(Atom) ->
|
||||
atom_to_list(Atom);
|
||||
make_io(Integer) when is_integer(Integer) ->
|
||||
integer_to_list(Integer);
|
||||
make_io(Io) when is_list(Io); is_binary(Io) ->
|
||||
Io.
|
||||
|
||||
%%
|
||||
%% Tests
|
||||
%%
|
||||
-ifdef(TEST).
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
|
||||
make_io_test() ->
|
||||
?assertEqual(
|
||||
<<"atom">>,
|
||||
iolist_to_binary(make_io(atom))),
|
||||
?assertEqual(
|
||||
<<"20">>,
|
||||
iolist_to_binary(make_io(20))),
|
||||
?assertEqual(
|
||||
<<"list">>,
|
||||
iolist_to_binary(make_io("list"))),
|
||||
?assertEqual(
|
||||
<<"binary">>,
|
||||
iolist_to_binary(make_io(<<"binary">>))),
|
||||
ok.
|
||||
|
||||
-record(test_record, {field1=f1, field2=f2}).
|
||||
record_to_proplist_test() ->
|
||||
?assertEqual(
|
||||
[{'__record', test_record},
|
||||
{field1, f1},
|
||||
{field2, f2}],
|
||||
record_to_proplist(#test_record{}, record_info(fields, test_record))),
|
||||
?assertEqual(
|
||||
[{'typekey', test_record},
|
||||
{field1, f1},
|
||||
{field2, f2}],
|
||||
record_to_proplist(#test_record{},
|
||||
record_info(fields, test_record),
|
||||
typekey)),
|
||||
ok.
|
||||
|
||||
shell_quote_test() ->
|
||||
?assertEqual(
|
||||
"\"foo \\$bar\\\"\\`' baz\"",
|
||||
shell_quote("foo $bar\"`' baz")),
|
||||
ok.
|
||||
|
||||
cmd_port_test_spool(Port, Acc) ->
|
||||
receive
|
||||
{Port, eof} ->
|
||||
Acc;
|
||||
{Port, {data, {eol, Data}}} ->
|
||||
cmd_port_test_spool(Port, ["\n", Data | Acc]);
|
||||
{Port, Unknown} ->
|
||||
throw({unknown, Unknown})
|
||||
after 1000 ->
|
||||
throw(timeout)
|
||||
end.
|
||||
|
||||
cmd_port_test() ->
|
||||
Port = cmd_port(["echo", "$bling$ `word`!"],
|
||||
[eof, stream, {line, 4096}]),
|
||||
Res = try lists:append(lists:reverse(cmd_port_test_spool(Port, [])))
|
||||
after catch port_close(Port)
|
||||
end,
|
||||
self() ! {Port, wtf},
|
||||
try cmd_port_test_spool(Port, [])
|
||||
catch throw:{unknown, wtf} -> ok
|
||||
end,
|
||||
try cmd_port_test_spool(Port, [])
|
||||
catch throw:timeout -> ok
|
||||
end,
|
||||
?assertEqual(
|
||||
"$bling$ `word`!\n",
|
||||
Res).
|
||||
|
||||
cmd_test() ->
|
||||
?assertEqual(
|
||||
"$bling$ `word`!\n",
|
||||
cmd(["echo", "$bling$ `word`!"])),
|
||||
ok.
|
||||
|
||||
cmd_string_test() ->
|
||||
?assertEqual(
|
||||
"\"echo\" \"\\$bling\\$ \\`word\\`!\"",
|
||||
cmd_string(["echo", "$bling$ `word`!"])),
|
||||
ok.
|
||||
|
||||
cmd_status_test() ->
|
||||
?assertEqual(
|
||||
{0, <<"$bling$ `word`!\n">>},
|
||||
cmd_status(["echo", "$bling$ `word`!"])),
|
||||
ok.
|
||||
|
||||
|
||||
parse_header_test() ->
|
||||
?assertEqual(
|
||||
{"multipart/form-data", [{"boundary", "AaB03x"}]},
|
||||
parse_header("multipart/form-data; boundary=AaB03x")),
|
||||
%% This tests (currently) intentionally broken behavior
|
||||
?assertEqual(
|
||||
{"multipart/form-data",
|
||||
[{"b", ""},
|
||||
{"cgi", "is"},
|
||||
{"broken", "true\"e"}]},
|
||||
parse_header("multipart/form-data;b=;cgi=\"i\\s;broken=true\"e;=z;z")),
|
||||
ok.
|
||||
|
||||
% guess_mime_test() ->
|
||||
% ?assertEqual("text/plain", guess_mime("")),
|
||||
% ?assertEqual("text/plain", guess_mime(".text")),
|
||||
% ?assertEqual("application/zip", guess_mime(".zip")),
|
||||
% ?assertEqual("application/zip", guess_mime("x.zip")),
|
||||
% ?assertEqual("text/html", guess_mime("x.html")),
|
||||
% ?assertEqual("application/xhtml+xml", guess_mime("x.xhtml")),
|
||||
% ?assertEqual("text/x-cross-domain-policy", guess_mime("crossdomain.xml")),
|
||||
% ?assertEqual("text/x-cross-domain-policy", guess_mime("www/crossdomain.xml")),
|
||||
% ok.
|
||||
|
||||
path_split_test() ->
|
||||
{"", "foo/bar"} = path_split("/foo/bar"),
|
||||
{"foo", "bar"} = path_split("foo/bar"),
|
||||
{"bar", ""} = path_split("bar"),
|
||||
ok.
|
||||
|
||||
urlsplit_test() ->
|
||||
{"", "", "/foo", "", "bar?baz"} = urlsplit("/foo#bar?baz"),
|
||||
{"http", "host:port", "/foo", "", "bar?baz"} =
|
||||
urlsplit("http://host:port/foo#bar?baz"),
|
||||
{"http", "host", "", "", ""} = urlsplit("http://host"),
|
||||
{"", "", "/wiki/Category:Fruit", "", ""} =
|
||||
urlsplit("/wiki/Category:Fruit"),
|
||||
ok.
|
||||
|
||||
urlsplit_path_test() ->
|
||||
{"/foo/bar", "", ""} = urlsplit_path("/foo/bar"),
|
||||
{"/foo", "baz", ""} = urlsplit_path("/foo?baz"),
|
||||
{"/foo", "", "bar?baz"} = urlsplit_path("/foo#bar?baz"),
|
||||
{"/foo", "", "bar?baz#wibble"} = urlsplit_path("/foo#bar?baz#wibble"),
|
||||
{"/foo", "bar", "baz"} = urlsplit_path("/foo?bar#baz"),
|
||||
{"/foo", "bar?baz", "baz"} = urlsplit_path("/foo?bar?baz#baz"),
|
||||
ok.
|
||||
|
||||
urlunsplit_test() ->
|
||||
"/foo#bar?baz" = urlunsplit({"", "", "/foo", "", "bar?baz"}),
|
||||
"http://host:port/foo#bar?baz" =
|
||||
urlunsplit({"http", "host:port", "/foo", "", "bar?baz"}),
|
||||
ok.
|
||||
|
||||
urlunsplit_path_test() ->
|
||||
"/foo/bar" = urlunsplit_path({"/foo/bar", "", ""}),
|
||||
"/foo?baz" = urlunsplit_path({"/foo", "baz", ""}),
|
||||
"/foo#bar?baz" = urlunsplit_path({"/foo", "", "bar?baz"}),
|
||||
"/foo#bar?baz#wibble" = urlunsplit_path({"/foo", "", "bar?baz#wibble"}),
|
||||
"/foo?bar#baz" = urlunsplit_path({"/foo", "bar", "baz"}),
|
||||
"/foo?bar?baz#baz" = urlunsplit_path({"/foo", "bar?baz", "baz"}),
|
||||
ok.
|
||||
|
||||
join_test() ->
|
||||
?assertEqual("foo,bar,baz",
|
||||
join(["foo", "bar", "baz"], $,)),
|
||||
?assertEqual("foo,bar,baz",
|
||||
join(["foo", "bar", "baz"], ",")),
|
||||
?assertEqual("foo bar",
|
||||
join([["foo", " bar"]], ",")),
|
||||
?assertEqual("foo bar,baz",
|
||||
join([["foo", " bar"], "baz"], ",")),
|
||||
?assertEqual("foo",
|
||||
join(["foo"], ",")),
|
||||
?assertEqual("foobarbaz",
|
||||
join(["foo", "bar", "baz"], "")),
|
||||
?assertEqual("foo" ++ [<<>>] ++ "bar" ++ [<<>>] ++ "baz",
|
||||
join(["foo", "bar", "baz"], <<>>)),
|
||||
?assertEqual("foobar" ++ [<<"baz">>],
|
||||
join(["foo", "bar", <<"baz">>], "")),
|
||||
?assertEqual("",
|
||||
join([], "any")),
|
||||
ok.
|
||||
|
||||
quote_plus_test() ->
|
||||
"foo" = quote_plus(foo),
|
||||
"1" = quote_plus(1),
|
||||
"1.1" = quote_plus(1.1),
|
||||
"foo" = quote_plus("foo"),
|
||||
"foo+bar" = quote_plus("foo bar"),
|
||||
"foo%0A" = quote_plus("foo\n"),
|
||||
"foo%0A" = quote_plus("foo\n"),
|
||||
"foo%3B%26%3D" = quote_plus("foo;&="),
|
||||
"foo%3B%26%3D" = quote_plus(<<"foo;&=">>),
|
||||
ok.
|
||||
|
||||
unquote_test() ->
|
||||
?assertEqual("foo bar",
|
||||
unquote("foo+bar")),
|
||||
?assertEqual("foo bar",
|
||||
unquote("foo%20bar")),
|
||||
?assertEqual("foo\r\n",
|
||||
unquote("foo%0D%0A")),
|
||||
?assertEqual("foo\r\n",
|
||||
unquote(<<"foo%0D%0A">>)),
|
||||
ok.
|
||||
|
||||
urlencode_test() ->
|
||||
"foo=bar&baz=wibble+%0D%0A&z=1" = urlencode([{foo, "bar"},
|
||||
{"baz", "wibble \r\n"},
|
||||
{z, 1}]),
|
||||
ok.
|
||||
|
||||
parse_qs_test() ->
|
||||
?assertEqual(
|
||||
[{"foo", "bar"}, {"baz", "wibble \r\n"}, {"z", "1"}],
|
||||
parse_qs("foo=bar&baz=wibble+%0D%0a&z=1")),
|
||||
?assertEqual(
|
||||
[{"", "bar"}, {"baz", "wibble \r\n"}, {"z", ""}],
|
||||
parse_qs("=bar&baz=wibble+%0D%0a&z=")),
|
||||
?assertEqual(
|
||||
[{"foo", "bar"}, {"baz", "wibble \r\n"}, {"z", "1"}],
|
||||
parse_qs(<<"foo=bar&baz=wibble+%0D%0a&z=1">>)),
|
||||
?assertEqual(
|
||||
[],
|
||||
parse_qs("")),
|
||||
?assertEqual(
|
||||
[{"foo", ""}, {"bar", ""}, {"baz", ""}],
|
||||
parse_qs("foo;bar&baz")),
|
||||
ok.
|
||||
|
||||
partition_test() ->
|
||||
{"foo", "", ""} = partition("foo", "/"),
|
||||
{"foo", "/", "bar"} = partition("foo/bar", "/"),
|
||||
{"foo", "/", ""} = partition("foo/", "/"),
|
||||
{"", "/", "bar"} = partition("/bar", "/"),
|
||||
{"f", "oo/ba", "r"} = partition("foo/bar", "oo/ba"),
|
||||
ok.
|
||||
|
||||
safe_relative_path_test() ->
|
||||
"foo" = safe_relative_path("foo"),
|
||||
"foo/" = safe_relative_path("foo/"),
|
||||
"foo" = safe_relative_path("foo/bar/.."),
|
||||
"bar" = safe_relative_path("foo/../bar"),
|
||||
"bar/" = safe_relative_path("foo/../bar/"),
|
||||
"" = safe_relative_path("foo/.."),
|
||||
"" = safe_relative_path("foo/../"),
|
||||
undefined = safe_relative_path("/foo"),
|
||||
undefined = safe_relative_path("../foo"),
|
||||
undefined = safe_relative_path("foo/../.."),
|
||||
undefined = safe_relative_path("foo//"),
|
||||
undefined = safe_relative_path("foo\\bar"),
|
||||
ok.
|
||||
|
||||
parse_qvalues_test() ->
|
||||
[] = parse_qvalues(""),
|
||||
[{"identity", 0.0}] = parse_qvalues("identity;q=0"),
|
||||
[{"identity", 0.0}] = parse_qvalues("identity ;q=0"),
|
||||
[{"identity", 0.0}] = parse_qvalues(" identity; q =0 "),
|
||||
[{"identity", 0.0}] = parse_qvalues("identity ; q = 0"),
|
||||
[{"identity", 0.0}] = parse_qvalues("identity ; q= 0.0"),
|
||||
[{"gzip", 1.0}, {"deflate", 1.0}, {"identity", 0.0}] = parse_qvalues(
|
||||
"gzip,deflate,identity;q=0.0"
|
||||
),
|
||||
[{"deflate", 1.0}, {"gzip", 1.0}, {"identity", 0.0}] = parse_qvalues(
|
||||
"deflate,gzip,identity;q=0.0"
|
||||
),
|
||||
[{"gzip", 1.0}, {"deflate", 1.0}, {"gzip", 1.0}, {"identity", 0.0}] =
|
||||
parse_qvalues("gzip,deflate,gzip,identity;q=0"),
|
||||
[{"gzip", 1.0}, {"deflate", 1.0}, {"identity", 0.0}] = parse_qvalues(
|
||||
"gzip, deflate , identity; q=0.0"
|
||||
),
|
||||
[{"gzip", 1.0}, {"deflate", 1.0}, {"identity", 0.0}] = parse_qvalues(
|
||||
"gzip; q=1, deflate;q=1.0, identity;q=0.0"
|
||||
),
|
||||
[{"gzip", 0.5}, {"deflate", 1.0}, {"identity", 0.0}] = parse_qvalues(
|
||||
"gzip; q=0.5, deflate;q=1.0, identity;q=0"
|
||||
),
|
||||
[{"gzip", 0.5}, {"deflate", 1.0}, {"identity", 0.0}] = parse_qvalues(
|
||||
"gzip; q=0.5, deflate , identity;q=0.0"
|
||||
),
|
||||
[{"gzip", 0.5}, {"deflate", 0.8}, {"identity", 0.0}] = parse_qvalues(
|
||||
"gzip; q=0.5, deflate;q=0.8, identity;q=0.0"
|
||||
),
|
||||
[{"gzip", 0.5}, {"deflate", 1.0}, {"identity", 1.0}] = parse_qvalues(
|
||||
"gzip; q=0.5,deflate,identity"
|
||||
),
|
||||
[{"gzip", 0.5}, {"deflate", 1.0}, {"identity", 1.0}, {"identity", 1.0}] =
|
||||
parse_qvalues("gzip; q=0.5,deflate,identity, identity "),
|
||||
[{"text/html;level=1", 1.0}, {"text/plain", 0.5}] =
|
||||
parse_qvalues("text/html;level=1, text/plain;q=0.5"),
|
||||
[{"text/html;level=1", 0.3}, {"text/plain", 1.0}] =
|
||||
parse_qvalues("text/html;level=1;q=0.3, text/plain"),
|
||||
[{"text/html;level=1", 0.3}, {"text/plain", 1.0}] =
|
||||
parse_qvalues("text/html; level = 1; q = 0.3, text/plain"),
|
||||
[{"text/html;level=1", 0.3}, {"text/plain", 1.0}] =
|
||||
parse_qvalues("text/html;q=0.3;level=1, text/plain"),
|
||||
invalid_qvalue_string = parse_qvalues("gzip; q=1.1, deflate"),
|
||||
invalid_qvalue_string = parse_qvalues("gzip; q=0.5, deflate;q=2"),
|
||||
invalid_qvalue_string = parse_qvalues("gzip, deflate;q=AB"),
|
||||
invalid_qvalue_string = parse_qvalues("gzip; q=2.1, deflate"),
|
||||
invalid_qvalue_string = parse_qvalues("gzip; q=0.1234, deflate"),
|
||||
invalid_qvalue_string = parse_qvalues("text/html;level=1;q=0.3, text/html;level"),
|
||||
ok.
|
||||
|
||||
pick_accepted_encodings_test() ->
|
||||
["identity"] = pick_accepted_encodings(
|
||||
[],
|
||||
["gzip", "identity"],
|
||||
"identity"
|
||||
),
|
||||
["gzip", "identity"] = pick_accepted_encodings(
|
||||
[{"gzip", 1.0}],
|
||||
["gzip", "identity"],
|
||||
"identity"
|
||||
),
|
||||
["identity"] = pick_accepted_encodings(
|
||||
[{"gzip", 0.0}],
|
||||
["gzip", "identity"],
|
||||
"identity"
|
||||
),
|
||||
["gzip", "identity"] = pick_accepted_encodings(
|
||||
[{"gzip", 1.0}, {"deflate", 1.0}],
|
||||
["gzip", "identity"],
|
||||
"identity"
|
||||
),
|
||||
["gzip", "identity"] = pick_accepted_encodings(
|
||||
[{"gzip", 0.5}, {"deflate", 1.0}],
|
||||
["gzip", "identity"],
|
||||
"identity"
|
||||
),
|
||||
["identity"] = pick_accepted_encodings(
|
||||
[{"gzip", 0.0}, {"deflate", 0.0}],
|
||||
["gzip", "identity"],
|
||||
"identity"
|
||||
),
|
||||
["gzip"] = pick_accepted_encodings(
|
||||
[{"gzip", 1.0}, {"deflate", 1.0}, {"identity", 0.0}],
|
||||
["gzip", "identity"],
|
||||
"identity"
|
||||
),
|
||||
["gzip", "deflate", "identity"] = pick_accepted_encodings(
|
||||
[{"gzip", 1.0}, {"deflate", 1.0}],
|
||||
["gzip", "deflate", "identity"],
|
||||
"identity"
|
||||
),
|
||||
["gzip", "deflate"] = pick_accepted_encodings(
|
||||
[{"gzip", 1.0}, {"deflate", 1.0}, {"identity", 0.0}],
|
||||
["gzip", "deflate", "identity"],
|
||||
"identity"
|
||||
),
|
||||
["deflate", "gzip", "identity"] = pick_accepted_encodings(
|
||||
[{"gzip", 0.2}, {"deflate", 1.0}],
|
||||
["gzip", "deflate", "identity"],
|
||||
"identity"
|
||||
),
|
||||
["deflate", "deflate", "gzip", "identity"] = pick_accepted_encodings(
|
||||
[{"gzip", 0.2}, {"deflate", 1.0}, {"deflate", 1.0}],
|
||||
["gzip", "deflate", "identity"],
|
||||
"identity"
|
||||
),
|
||||
["deflate", "gzip", "gzip", "identity"] = pick_accepted_encodings(
|
||||
[{"gzip", 0.2}, {"deflate", 1.0}, {"gzip", 1.0}],
|
||||
["gzip", "deflate", "identity"],
|
||||
"identity"
|
||||
),
|
||||
["gzip", "deflate", "gzip", "identity"] = pick_accepted_encodings(
|
||||
[{"gzip", 0.2}, {"deflate", 0.9}, {"gzip", 1.0}],
|
||||
["gzip", "deflate", "identity"],
|
||||
"identity"
|
||||
),
|
||||
[] = pick_accepted_encodings(
|
||||
[{"*", 0.0}],
|
||||
["gzip", "deflate", "identity"],
|
||||
"identity"
|
||||
),
|
||||
["gzip", "deflate", "identity"] = pick_accepted_encodings(
|
||||
[{"*", 1.0}],
|
||||
["gzip", "deflate", "identity"],
|
||||
"identity"
|
||||
),
|
||||
["gzip", "deflate", "identity"] = pick_accepted_encodings(
|
||||
[{"*", 0.6}],
|
||||
["gzip", "deflate", "identity"],
|
||||
"identity"
|
||||
),
|
||||
["gzip"] = pick_accepted_encodings(
|
||||
[{"gzip", 1.0}, {"*", 0.0}],
|
||||
["gzip", "deflate", "identity"],
|
||||
"identity"
|
||||
),
|
||||
["gzip", "deflate"] = pick_accepted_encodings(
|
||||
[{"gzip", 1.0}, {"deflate", 0.6}, {"*", 0.0}],
|
||||
["gzip", "deflate", "identity"],
|
||||
"identity"
|
||||
),
|
||||
["deflate", "gzip"] = pick_accepted_encodings(
|
||||
[{"gzip", 0.5}, {"deflate", 1.0}, {"*", 0.0}],
|
||||
["gzip", "deflate", "identity"],
|
||||
"identity"
|
||||
),
|
||||
["gzip", "identity"] = pick_accepted_encodings(
|
||||
[{"deflate", 0.0}, {"*", 1.0}],
|
||||
["gzip", "deflate", "identity"],
|
||||
"identity"
|
||||
),
|
||||
["gzip", "identity"] = pick_accepted_encodings(
|
||||
[{"*", 1.0}, {"deflate", 0.0}],
|
||||
["gzip", "deflate", "identity"],
|
||||
"identity"
|
||||
),
|
||||
ok.
|
||||
|
||||
-endif.
|
||||
|
|
@ -20,7 +20,7 @@
|
|||
-export([start/2, stop/1]).
|
||||
|
||||
start(_Type, _StartArgs) ->
|
||||
rabbit_mgmt_agent_sup:start_link().
|
||||
rabbit_mgmt_agent_sup_sup:start_link().
|
||||
|
||||
stop(_State) ->
|
||||
ok.
|
||||
|
|
|
|||
|
|
@ -0,0 +1,11 @@
|
|||
-module(rabbit_mgmt_agent_config).
|
||||
|
||||
-export([get_env/1]).
|
||||
|
||||
%% some people have reasons to only run with the agent enabled:
|
||||
%% make it possible for them to configure key management app
|
||||
%% settings such as rates_mode.
|
||||
get_env(Key) ->
|
||||
rabbit_misc:get_env(rabbitmq_management, Key,
|
||||
rabbit_misc:get_env(rabbitmq_management_agent, Key,
|
||||
undefined)).
|
||||
|
|
@ -18,14 +18,32 @@
|
|||
|
||||
-behaviour(supervisor).
|
||||
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include_lib("rabbit_common/include/rabbit_core_metrics.hrl").
|
||||
-include("rabbit_mgmt_metrics.hrl").
|
||||
|
||||
-export([init/1]).
|
||||
-export([start_link/0]).
|
||||
|
||||
init([]) ->
|
||||
pg2:create(management_db),
|
||||
ok = pg2:join(management_db, self()),
|
||||
ST = {rabbit_mgmt_storage, {rabbit_mgmt_storage, start_link, []},
|
||||
permanent, ?WORKER_WAIT, worker, [rabbit_mgmt_storage]},
|
||||
MD = {delegate_management_sup, {delegate_sup, start_link, [5, ?DELEGATE_PREFIX]},
|
||||
permanent, ?SUPERVISOR_WAIT, supervisor, [delegate_sup]},
|
||||
MC = [{rabbit_mgmt_metrics_collector:name(Table),
|
||||
{rabbit_mgmt_metrics_collector, start_link, [Table]},
|
||||
permanent, ?WORKER_WAIT, worker, [rabbit_mgmt_metrics_collector]}
|
||||
|| {Table, _} <- ?CORE_TABLES],
|
||||
MGC = [{rabbit_mgmt_metrics_gc:name(Event),
|
||||
{rabbit_mgmt_metrics_gc, start_link, [Event]},
|
||||
permanent, ?WORKER_WAIT, worker, [rabbit_mgmt_metrics_gc]}
|
||||
|| Event <- ?GC_EVENTS],
|
||||
ExternalStats = {rabbit_mgmt_external_stats,
|
||||
{rabbit_mgmt_external_stats, start_link, []},
|
||||
permanent, 5000, worker, [rabbit_mgmt_external_stats]},
|
||||
{ok, {{one_for_one, 10, 10}, [ExternalStats]}}.
|
||||
{ok, {{one_for_one, 100, 10}, [ST, MD, ExternalStats | MC ++ MGC]}}.
|
||||
|
||||
start_link() ->
|
||||
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
|
||||
|
|
|
|||
|
|
@ -0,0 +1,36 @@
|
|||
%% The contents of this file are subject to the Mozilla Public License
|
||||
%% Version 1.1 (the "License"); you may not use this file except in
|
||||
%% compliance with the License. You may obtain a copy of the License at
|
||||
%% http://www.mozilla.org/MPL/
|
||||
%%
|
||||
%% Software distributed under the License is distributed on an "AS IS"
|
||||
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
|
||||
%% License for the specific language governing rights and limitations
|
||||
%% under the License.
|
||||
%%
|
||||
%% The Original Code is RabbitMQ.
|
||||
%%
|
||||
%% Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved.
|
||||
%%
|
||||
|
||||
-module(rabbit_mgmt_agent_sup_sup).
|
||||
|
||||
-behaviour(supervisor2).
|
||||
|
||||
-export([init/1]).
|
||||
-export([start_link/0, start_child/0]).
|
||||
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
|
||||
start_child() ->
|
||||
supervisor2:start_child(?MODULE, sup()).
|
||||
|
||||
sup() ->
|
||||
{rabbit_mgmt_agent_sup, {rabbit_mgmt_agent_sup, start_link, []},
|
||||
temporary, ?SUPERVISOR_WAIT, supervisor, [rabbit_mgmt_agent_sup]}.
|
||||
|
||||
init([]) ->
|
||||
{ok, {{one_for_one, 0, 1}, [sup()]}}.
|
||||
|
||||
start_link() ->
|
||||
supervisor2:start_link({local, ?MODULE}, ?MODULE, []).
|
||||
|
|
@ -0,0 +1,531 @@
|
|||
%% The contents of this file are subject to the Mozilla Public License
|
||||
%% Version 1.1 (the "License"); you may not use this file except in
|
||||
%% compliance with the License. You may obtain a copy of the License at
|
||||
%% http://www.mozilla.org/MPL/
|
||||
%%
|
||||
%% Software distributed under the License is distributed on an "AS IS"
|
||||
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
|
||||
%% License for the specific language governing rights and limitations
|
||||
%% under the License.
|
||||
%%
|
||||
%% The Original Code is RabbitMQ.
|
||||
%%
|
||||
%% The Initial Developer of the Original Code is GoPivotal, Inc.
|
||||
%% Copyright (c) 2016 Pivotal Software, Inc. All rights reserved.
|
||||
%%
|
||||
|
||||
-module(rabbit_mgmt_data).
|
||||
|
||||
-include("rabbit_mgmt_records.hrl").
|
||||
-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_metrics.hrl").
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include_lib("rabbit_common/include/rabbit_core_metrics.hrl").
|
||||
|
||||
-export([empty/2, pick_range/2]).
|
||||
|
||||
% delegate api
|
||||
-export([overview_data/4,
|
||||
consumer_data/2,
|
||||
all_list_queue_data/3,
|
||||
all_detail_queue_data/3,
|
||||
all_exchange_data/3,
|
||||
all_connection_data/3,
|
||||
all_list_channel_data/3,
|
||||
all_detail_channel_data/3,
|
||||
all_vhost_data/3,
|
||||
all_node_data/3,
|
||||
augmented_created_stats/2,
|
||||
augmented_created_stats/3,
|
||||
augment_channel_pids/2,
|
||||
augment_details/2,
|
||||
lookup_element/2,
|
||||
lookup_element/3
|
||||
]).
|
||||
|
||||
|
||||
-import(rabbit_misc, [pget/2]).
|
||||
|
||||
-type maybe_slide() :: exometer_slide:slide() | not_found.
|
||||
-type ranges() :: {maybe_range(), maybe_range(), maybe_range(), maybe_range()}.
|
||||
-type maybe_range() :: no_range | #range{}.
|
||||
|
||||
%%----------------------------------------------------------------------------
|
||||
%% Internal, query-time - node-local operations
|
||||
%%----------------------------------------------------------------------------
|
||||
|
||||
created_stats(Name, Type) ->
|
||||
case ets:select(Type, [{{'_', '$2', '$3'}, [{'==', Name, '$2'}], ['$3']}]) of
|
||||
[] -> not_found;
|
||||
[Elem] -> Elem
|
||||
end.
|
||||
|
||||
created_stats(Type) ->
|
||||
%% TODO better tab2list?
|
||||
ets:select(Type, [{{'_', '_', '$3'}, [], ['$3']}]).
|
||||
|
||||
-spec all_detail_queue_data(pid(), [any()], ranges()) -> dict:dict(atom(), any()).
|
||||
all_detail_queue_data(_Pid, Ids, Ranges) ->
|
||||
lists:foldl(fun (Id, Acc) ->
|
||||
Data = detail_queue_data(Ranges, Id),
|
||||
dict:store(Id, Data, Acc)
|
||||
end, dict:new(), Ids).
|
||||
|
||||
all_list_queue_data(_Pid, Ids, Ranges) ->
|
||||
lists:foldl(fun (Id, Acc) ->
|
||||
Data = list_queue_data(Ranges, Id),
|
||||
dict:store(Id, Data, Acc)
|
||||
end, dict:new(), Ids).
|
||||
|
||||
all_detail_channel_data(_Pid, Ids, Ranges) ->
|
||||
lists:foldl(fun (Id, Acc) ->
|
||||
Data = detail_channel_data(Ranges, Id),
|
||||
dict:store(Id, Data, Acc)
|
||||
end, dict:new(), Ids).
|
||||
|
||||
all_list_channel_data(_Pid, Ids, Ranges) ->
|
||||
lists:foldl(fun (Id, Acc) ->
|
||||
Data = list_channel_data(Ranges, Id),
|
||||
dict:store(Id, Data, Acc)
|
||||
end, dict:new(), Ids).
|
||||
|
||||
connection_data(Ranges, Id) ->
|
||||
dict:from_list([raw_message_data(connection_stats_coarse_conn_stats,
|
||||
pick_range(coarse_conn_stats, Ranges), Id),
|
||||
{connection_stats, lookup_element(connection_stats, Id)}]).
|
||||
|
||||
exchange_data(Ranges, Id) ->
|
||||
dict:from_list(
|
||||
exchange_raw_detail_stats_data(Ranges, Id) ++
|
||||
[raw_message_data(exchange_stats_publish_out,
|
||||
pick_range(fine_stats, Ranges), Id),
|
||||
raw_message_data(exchange_stats_publish_in,
|
||||
pick_range(fine_stats, Ranges), Id)]).
|
||||
|
||||
vhost_data(Ranges, Id) ->
|
||||
dict:from_list([raw_message_data(vhost_stats_coarse_conn_stats,
|
||||
pick_range(coarse_conn_stats, Ranges), Id),
|
||||
raw_message_data(vhost_msg_stats,
|
||||
pick_range(queue_msg_rates, Ranges), Id),
|
||||
raw_message_data(vhost_stats_fine_stats,
|
||||
pick_range(fine_stats, Ranges), Id),
|
||||
raw_message_data(vhost_stats_deliver_stats,
|
||||
pick_range(deliver_get, Ranges), Id)]).
|
||||
|
||||
node_data(Ranges, Id) ->
|
||||
dict:from_list(
|
||||
[{mgmt_stats, mgmt_qeue_length_stats()}] ++
|
||||
node_raw_detail_stats_data(Ranges, Id) ++
|
||||
[raw_message_data(node_coarse_stats,
|
||||
pick_range(coarse_node_stats, Ranges), Id),
|
||||
raw_message_data(node_persister_stats,
|
||||
pick_range(coarse_node_stats, Ranges), Id),
|
||||
{node_stats, lookup_element(node_stats, Id)}]).
|
||||
|
||||
overview_data(_Pid, User, Ranges, VHosts) ->
|
||||
Raw = [raw_all_message_data(vhost_msg_stats, pick_range(queue_msg_counts, Ranges), VHosts),
|
||||
raw_all_message_data(vhost_stats_fine_stats, pick_range(fine_stats, Ranges), VHosts),
|
||||
raw_all_message_data(vhost_msg_rates, pick_range(queue_msg_rates, Ranges), VHosts),
|
||||
raw_all_message_data(vhost_stats_deliver_stats, pick_range(deliver_get, Ranges), VHosts)],
|
||||
|
||||
dict:from_list(Raw ++
|
||||
[{connections_count, count_created_stats(connection_created_stats, User)},
|
||||
{channels_count, count_created_stats(channel_created_stats, User)},
|
||||
{consumers_count, ets:info(consumer_stats, size)}]).
|
||||
|
||||
consumer_data(_Pid, VHost) ->
|
||||
dict:from_list(
|
||||
[{C, augment_msg_stats(augment_consumer(C))}
|
||||
|| C <- consumers_by_vhost(VHost)]).
|
||||
|
||||
all_connection_data(_Pid, Ids, Ranges) ->
|
||||
dict:from_list([{Id, connection_data(Ranges, Id)} || Id <- Ids]).
|
||||
|
||||
all_exchange_data(_Pid, Ids, Ranges) ->
|
||||
dict:from_list([{Id, exchange_data(Ranges, Id)} || Id <- Ids]).
|
||||
|
||||
all_vhost_data(_Pid, Ids, Ranges) ->
|
||||
dict:from_list([{Id, vhost_data(Ranges, Id)} || Id <- Ids]).
|
||||
|
||||
all_node_data(_Pid, Ids, Ranges) ->
|
||||
dict:from_list([{Id, node_data(Ranges, Id)} || Id <- Ids]).
|
||||
|
||||
channel_raw_message_data(Ranges, Id) ->
|
||||
[raw_message_data(channel_stats_fine_stats, pick_range(fine_stats, Ranges), Id),
|
||||
raw_message_data(channel_stats_deliver_stats, pick_range(deliver_get, Ranges), Id),
|
||||
raw_message_data(channel_process_stats, pick_range(process_stats, Ranges), Id)].
|
||||
|
||||
queue_raw_message_data(Ranges, Id) ->
|
||||
[raw_message_data(queue_stats_publish, pick_range(fine_stats, Ranges), Id),
|
||||
raw_message_data(queue_stats_deliver_stats, pick_range(deliver_get, Ranges), Id),
|
||||
raw_message_data(queue_process_stats, pick_range(process_stats, Ranges), Id),
|
||||
raw_message_data(queue_msg_stats, pick_range(queue_msg_counts, Ranges), Id)].
|
||||
|
||||
queue_raw_deliver_stats_data(Ranges, Id) ->
|
||||
[raw_message_data2(channel_queue_stats_deliver_stats,
|
||||
pick_range(deliver_get, Ranges), Key)
|
||||
|| Key <- get_table_keys(channel_queue_stats_deliver_stats, second(Id))] ++
|
||||
[raw_message_data2(queue_exchange_stats_publish,
|
||||
pick_range(fine_stats, Ranges), Key)
|
||||
|| Key <- get_table_keys(queue_exchange_stats_publish, first(Id))].
|
||||
|
||||
node_raw_detail_stats_data(Ranges, Id) ->
|
||||
[raw_message_data2(node_node_coarse_stats,
|
||||
pick_range(coarse_node_node_stats, Ranges), Key)
|
||||
|| Key <- get_table_keys(node_node_coarse_stats, first(Id))].
|
||||
|
||||
exchange_raw_detail_stats_data(Ranges, Id) ->
|
||||
[raw_message_data2(channel_exchange_stats_fine_stats,
|
||||
pick_range(fine_stats, Ranges), Key)
|
||||
|| Key <- get_table_keys(channel_exchange_stats_fine_stats, second(Id))] ++
|
||||
[raw_message_data2(queue_exchange_stats_publish,
|
||||
pick_range(fine_stats, Ranges), Key)
|
||||
|| Key <- get_table_keys(queue_exchange_stats_publish, second(Id))].
|
||||
|
||||
channel_raw_detail_stats_data(Ranges, Id) ->
|
||||
[raw_message_data2(channel_exchange_stats_fine_stats,
|
||||
pick_range(fine_stats, Ranges), Key)
|
||||
|| Key <- get_table_keys(channel_exchange_stats_fine_stats, first(Id))] ++
|
||||
[raw_message_data2(channel_queue_stats_deliver_stats,
|
||||
pick_range(fine_stats, Ranges), Key)
|
||||
|| Key <- get_table_keys(channel_queue_stats_deliver_stats, first(Id))].
|
||||
|
||||
raw_message_data2(Table, no_range, Id) ->
|
||||
SmallSample = lookup_smaller_sample(Table, Id),
|
||||
{{Table, Id}, {SmallSample, not_found}};
|
||||
raw_message_data2(Table, Range, Id) ->
|
||||
SmallSample = lookup_smaller_sample(Table, Id),
|
||||
Samples = lookup_samples(Table, Id, Range),
|
||||
{{Table, Id}, {SmallSample, Samples}}.
|
||||
|
||||
detail_queue_data(Ranges, Id) ->
|
||||
dict:from_list(queue_raw_message_data(Ranges, Id) ++
|
||||
queue_raw_deliver_stats_data(Ranges, Id) ++
|
||||
[{queue_stats, lookup_element(queue_stats, Id)},
|
||||
{consumer_stats, get_queue_consumer_stats(Id)}]).
|
||||
|
||||
list_queue_data(Ranges, Id) ->
|
||||
dict:from_list(queue_raw_message_data(Ranges, Id) ++
|
||||
queue_raw_deliver_stats_data(Ranges, Id) ++
|
||||
[{queue_stats, lookup_element(queue_stats, Id)}]).
|
||||
|
||||
detail_channel_data(Ranges, Id) ->
|
||||
dict:from_list(channel_raw_message_data(Ranges, Id) ++
|
||||
channel_raw_detail_stats_data(Ranges, Id) ++
|
||||
[{channel_stats, lookup_element(channel_stats, Id)},
|
||||
{consumer_stats, get_consumer_stats(Id)}]).
|
||||
|
||||
list_channel_data(Ranges, Id) ->
|
||||
dict:from_list(channel_raw_message_data(Ranges, Id) ++
|
||||
channel_raw_detail_stats_data(Ranges, Id) ++
|
||||
[{channel_stats, lookup_element(channel_stats, Id)}]).
|
||||
|
||||
-spec raw_message_data(atom(), maybe_range(), any()) ->
|
||||
{atom(), {maybe_slide(), maybe_slide()}}.
|
||||
raw_message_data(Table, no_range, Id) ->
|
||||
SmallSample = lookup_smaller_sample(Table, Id),
|
||||
{Table, {SmallSample, not_found}};
|
||||
raw_message_data(Table, Range, Id) ->
|
||||
SmallSample = lookup_smaller_sample(Table, Id),
|
||||
Samples = lookup_samples(Table, Id, Range),
|
||||
{Table, {SmallSample, Samples}}.
|
||||
|
||||
raw_all_message_data(Table, Range, VHosts) ->
|
||||
SmallSample = lookup_all(Table, VHosts, select_smaller_sample(Table)),
|
||||
RangeSample = case Range of
|
||||
no_range -> not_found;
|
||||
_ ->
|
||||
lookup_all(Table, VHosts, select_range_sample(Table,
|
||||
Range))
|
||||
end,
|
||||
{Table, {SmallSample, RangeSample}}.
|
||||
|
||||
get_queue_consumer_stats(Id) ->
|
||||
Consumers = ets:select(consumer_stats, match_queue_consumer_spec(Id)),
|
||||
[augment_consumer(C) || C <- Consumers].
|
||||
|
||||
get_consumer_stats(Id) ->
|
||||
Consumers = ets:select(consumer_stats, match_consumer_spec(Id)),
|
||||
[augment_consumer(C) || C <- Consumers].
|
||||
|
||||
count_created_stats(Type, all) ->
|
||||
ets:info(Type, size);
|
||||
count_created_stats(Type, User) ->
|
||||
length(filter_user(created_stats(Type), User)).
|
||||
|
||||
augment_consumer({{Q, Ch, CTag}, Props}) ->
|
||||
[{queue, format_resource(Q)},
|
||||
{channel_details, augment_channel_pid(Ch)},
|
||||
{channel_pid, Ch},
|
||||
{consumer_tag, CTag} | Props].
|
||||
|
||||
consumers_by_vhost(VHost) ->
|
||||
ets:select(consumer_stats,
|
||||
[{{{#resource{virtual_host = '$1', _ = '_'}, '_', '_'}, '_'},
|
||||
[{'orelse', {'==', 'all', VHost}, {'==', VHost, '$1'}}],
|
||||
['$_']}]).
|
||||
|
||||
augment_msg_stats(Props) ->
|
||||
augment_details(Props, []) ++ Props.
|
||||
|
||||
augment_details([{_, none} | T], Acc) ->
|
||||
augment_details(T, Acc);
|
||||
augment_details([{_, unknown} | T], Acc) ->
|
||||
augment_details(T, Acc);
|
||||
augment_details([{connection, Value} | T], Acc) ->
|
||||
augment_details(T, [{connection_details, augment_connection_pid(Value)} | Acc]);
|
||||
augment_details([{channel, Value} | T], Acc) ->
|
||||
augment_details(T, [{channel_details, augment_channel_pid(Value)} | Acc]);
|
||||
augment_details([{owner_pid, Value} | T], Acc) ->
|
||||
augment_details(T, [{owner_pid_details, augment_connection_pid(Value)} | Acc]);
|
||||
augment_details([_ | T], Acc) ->
|
||||
augment_details(T, Acc);
|
||||
augment_details([], Acc) ->
|
||||
Acc.
|
||||
|
||||
augment_channel_pids(_Pid, ChPids) ->
|
||||
lists:map(fun (ChPid) -> augment_channel_pid(ChPid) end, ChPids).
|
||||
|
||||
augment_channel_pid(Pid) ->
|
||||
Ch = lookup_element(channel_created_stats, Pid, 3),
|
||||
Conn = lookup_element(connection_created_stats, pget(connection, Ch), 3),
|
||||
case Conn of
|
||||
[] -> %% If the connection has just been opened, we might not yet have the data
|
||||
[];
|
||||
_ ->
|
||||
[{name, pget(name, Ch)},
|
||||
{pid, pget(pid, Ch)},
|
||||
{number, pget(number, Ch)},
|
||||
{user, pget(user, Ch)},
|
||||
{connection_name, pget(name, Conn)},
|
||||
{peer_port, pget(peer_port, Conn)},
|
||||
{peer_host, pget(peer_host, Conn)}]
|
||||
end.
|
||||
|
||||
augment_connection_pid(Pid) ->
|
||||
Conn = lookup_element(connection_created_stats, Pid, 3),
|
||||
case Conn of
|
||||
[] -> %% If the connection has just been opened, we might not yet have the data
|
||||
[];
|
||||
_ ->
|
||||
[{name, pget(name, Conn)},
|
||||
{peer_port, pget(peer_port, Conn)},
|
||||
{peer_host, pget(peer_host, Conn)}]
|
||||
end.
|
||||
|
||||
augmented_created_stats(_Pid, Key, Type) ->
|
||||
case created_stats(Key, Type) of
|
||||
not_found -> not_found;
|
||||
S -> augment_msg_stats(S)
|
||||
end.
|
||||
|
||||
augmented_created_stats(_Pid, Type) ->
|
||||
[ augment_msg_stats(S) || S <- created_stats(Type) ].
|
||||
|
||||
match_consumer_spec(Id) ->
|
||||
[{{{'_', '$1', '_'}, '_'}, [{'==', Id, '$1'}], ['$_']}].
|
||||
|
||||
match_queue_consumer_spec(Id) ->
|
||||
[{{{'$1', '_', '_'}, '_'}, [{'==', {Id}, '$1'}], ['$_']}].
|
||||
|
||||
lookup_element(Table, Key) -> lookup_element(Table, Key, 2).
|
||||
|
||||
lookup_element(Table, Key, Pos) ->
|
||||
try ets:lookup_element(Table, Key, Pos)
|
||||
catch error:badarg -> []
|
||||
end.
|
||||
|
||||
-spec lookup_smaller_sample(atom(), any()) -> maybe_slide().
|
||||
lookup_smaller_sample(Table, Id) ->
|
||||
case ets:lookup(Table, {Id, select_smaller_sample(Table)}) of
|
||||
[] ->
|
||||
not_found;
|
||||
[{_, Slide}] ->
|
||||
exometer_slide:optimize(Slide)
|
||||
end.
|
||||
|
||||
-spec lookup_samples(atom(), any(), #range{}) -> maybe_slide().
|
||||
lookup_samples(Table, Id, Range) ->
|
||||
case ets:lookup(Table, {Id, select_range_sample(Table, Range)}) of
|
||||
[] ->
|
||||
not_found;
|
||||
[{_, Slide}] ->
|
||||
exometer_slide:optimize(Slide)
|
||||
end.
|
||||
|
||||
lookup_all(Table, Ids, SecondKey) ->
|
||||
Slides = lists:foldl(fun(Id, Acc) ->
|
||||
case ets:lookup(Table, {Id, SecondKey}) of
|
||||
[] ->
|
||||
Acc;
|
||||
[{_, Slide}] ->
|
||||
[Slide | Acc]
|
||||
end
|
||||
end, [], Ids),
|
||||
case Slides of
|
||||
[] ->
|
||||
not_found;
|
||||
_ ->
|
||||
exometer_slide:sum(Slides, empty(Table, 0))
|
||||
end.
|
||||
|
||||
get_table_keys(Table, Id0) ->
|
||||
ets:select(Table, match_spec_keys(Id0)).
|
||||
|
||||
match_spec_keys(Id) ->
|
||||
MatchCondition = to_match_condition(Id),
|
||||
MatchHead = {{{'$1', '$2'}, '_'}, '_'},
|
||||
[{MatchHead, [MatchCondition], [{{'$1', '$2'}}]}].
|
||||
|
||||
to_match_condition({'_', Id1}) when is_tuple(Id1) ->
|
||||
{'==', {Id1}, '$2'};
|
||||
to_match_condition({'_', Id1}) ->
|
||||
{'==', Id1, '$2'};
|
||||
to_match_condition({Id0, '_'}) when is_tuple(Id0) ->
|
||||
{'==', {Id0}, '$1'};
|
||||
to_match_condition({Id0, '_'}) ->
|
||||
{'==', Id0, '$1'}.
|
||||
mgmt_qeue_length_stats() ->
|
||||
GCsQueueLengths = lists:map(fun (T) ->
|
||||
case whereis(rabbit_mgmt_metrics_gc:name(T)) of
|
||||
P when is_pid(P) ->
|
||||
{message_queue_len, Len} =
|
||||
erlang:process_info(P, message_queue_len),
|
||||
{T, Len};
|
||||
_ -> {T, 0}
|
||||
end
|
||||
end,
|
||||
?GC_EVENTS),
|
||||
[{metrics_gc_queue_length, GCsQueueLengths}].
|
||||
|
||||
select_range_sample(Table, #range{first = First, last = Last}) ->
|
||||
Range = Last - First,
|
||||
Policies = rabbit_mgmt_agent_config:get_env(sample_retention_policies),
|
||||
Policy = retention_policy(Table),
|
||||
[T | _] = TablePolicies = lists:sort(proplists:get_value(Policy, Policies)),
|
||||
{_, Sample} = select_smallest_above(T, TablePolicies, Range),
|
||||
Sample.
|
||||
|
||||
select_smaller_sample(Table) ->
|
||||
Policies = rabbit_mgmt_agent_config:get_env(sample_retention_policies),
|
||||
Policy = retention_policy(Table),
|
||||
TablePolicies = proplists:get_value(Policy, Policies),
|
||||
[V | _] = lists:sort([I || {_, I} <- TablePolicies]),
|
||||
V.
|
||||
|
||||
select_smallest_above(V, [], _) ->
|
||||
V;
|
||||
select_smallest_above(_, [{H, _} = S | _T], Interval) when (H * 1000) > Interval ->
|
||||
S;
|
||||
select_smallest_above(_, [H | T], Interval) ->
|
||||
select_smallest_above(H, T, Interval).
|
||||
|
||||
pick_range(queue_msg_counts, {RangeL, _RangeM, _RangeD, _RangeN}) ->
|
||||
RangeL;
|
||||
pick_range(K, {_RangeL, RangeM, _RangeD, _RangeN}) when K == fine_stats;
|
||||
K == deliver_get;
|
||||
K == queue_msg_rates ->
|
||||
RangeM;
|
||||
pick_range(K, {_RangeL, _RangeM, RangeD, _RangeN}) when K == coarse_conn_stats;
|
||||
K == process_stats ->
|
||||
RangeD;
|
||||
pick_range(K, {_RangeL, _RangeM, _RangeD, RangeN})
|
||||
when K == coarse_node_stats;
|
||||
K == coarse_node_node_stats ->
|
||||
RangeN.
|
||||
|
||||
first(Id) ->
|
||||
{Id, '_'}.
|
||||
|
||||
second(Id) ->
|
||||
{'_', Id}.
|
||||
|
||||
empty(Type, V) when Type =:= connection_stats_coarse_conn_stats;
|
||||
Type =:= channel_stats_fine_stats;
|
||||
Type =:= channel_exchange_stats_fine_stats;
|
||||
Type =:= vhost_stats_fine_stats;
|
||||
Type =:= queue_msg_stats;
|
||||
Type =:= vhost_msg_stats ->
|
||||
{V, V, V};
|
||||
empty(Type, V) when Type =:= channel_queue_stats_deliver_stats;
|
||||
Type =:= queue_stats_deliver_stats;
|
||||
Type =:= vhost_stats_deliver_stats;
|
||||
Type =:= channel_stats_deliver_stats ->
|
||||
{V, V, V, V, V, V, V};
|
||||
empty(Type, V) when Type =:= channel_process_stats;
|
||||
Type =:= queue_process_stats;
|
||||
Type =:= queue_stats_publish;
|
||||
Type =:= queue_exchange_stats_publish;
|
||||
Type =:= exchange_stats_publish_out;
|
||||
Type =:= exchange_stats_publish_in ->
|
||||
{V};
|
||||
empty(node_coarse_stats, V) ->
|
||||
{V, V, V, V, V, V, V, V};
|
||||
empty(node_persister_stats, V) ->
|
||||
{V, V, V, V, V, V, V, V, V, V, V, V, V, V, V, V, V, V, V, V};
|
||||
empty(Type, V) when Type =:= node_node_coarse_stats;
|
||||
Type =:= vhost_stats_coarse_conn_stats;
|
||||
Type =:= queue_msg_rates;
|
||||
Type =:= vhost_msg_rates ->
|
||||
{V, V}.
|
||||
|
||||
retention_policy(connection_stats_coarse_conn_stats) ->
|
||||
basic;
|
||||
retention_policy(channel_stats_fine_stats) ->
|
||||
basic;
|
||||
retention_policy(channel_queue_stats_deliver_stats) ->
|
||||
detailed;
|
||||
retention_policy(channel_exchange_stats_fine_stats) ->
|
||||
detailed;
|
||||
retention_policy(channel_process_stats) ->
|
||||
basic;
|
||||
retention_policy(vhost_stats_fine_stats) ->
|
||||
global;
|
||||
retention_policy(vhost_stats_deliver_stats) ->
|
||||
global;
|
||||
retention_policy(vhost_stats_coarse_conn_stats) ->
|
||||
global;
|
||||
retention_policy(vhost_msg_rates) ->
|
||||
global;
|
||||
retention_policy(channel_stats_deliver_stats) ->
|
||||
basic;
|
||||
retention_policy(queue_stats_deliver_stats) ->
|
||||
basic;
|
||||
retention_policy(queue_stats_publish) ->
|
||||
basic;
|
||||
retention_policy(queue_exchange_stats_publish) ->
|
||||
basic;
|
||||
retention_policy(exchange_stats_publish_out) ->
|
||||
basic;
|
||||
retention_policy(exchange_stats_publish_in) ->
|
||||
basic;
|
||||
retention_policy(queue_process_stats) ->
|
||||
basic;
|
||||
retention_policy(queue_msg_stats) ->
|
||||
basic;
|
||||
retention_policy(queue_msg_rates) ->
|
||||
basic;
|
||||
retention_policy(vhost_msg_stats) ->
|
||||
global;
|
||||
retention_policy(node_coarse_stats) ->
|
||||
global;
|
||||
retention_policy(node_persister_stats) ->
|
||||
global;
|
||||
retention_policy(node_node_coarse_stats) ->
|
||||
global.
|
||||
|
||||
format_resource(unknown) -> unknown;
|
||||
format_resource(Res) -> format_resource(name, Res).
|
||||
|
||||
format_resource(_, unknown) ->
|
||||
unknown;
|
||||
format_resource(NameAs, #resource{name = Name, virtual_host = VHost}) ->
|
||||
[{NameAs, Name}, {vhost, VHost}].
|
||||
|
||||
filter_user(List, #user{username = Username, tags = Tags}) ->
|
||||
case is_monitor(Tags) of
|
||||
true -> List;
|
||||
false -> [I || I <- List, pget(user, I) == Username]
|
||||
end.
|
||||
|
||||
is_monitor(T) -> intersects(T, [administrator, monitoring]).
|
||||
intersects(A, B) -> lists:any(fun(I) -> lists:member(I, B) end, A).
|
||||
|
|
@ -44,22 +44,14 @@ add_handler() ->
|
|||
gc() ->
|
||||
erlang:garbage_collect(whereis(rabbit_event)).
|
||||
|
||||
%% some people have reasons to only run with the agent enabled:
|
||||
%% make it possible for them to configure key management app
|
||||
%% settings such as rates_mode.
|
||||
get_management_env(Key) ->
|
||||
rabbit_misc:get_env(
|
||||
rabbitmq_management, Key,
|
||||
rabbit_misc:get_env(rabbitmq_management_agent, Key, undefined)).
|
||||
|
||||
rates_mode() ->
|
||||
case get_management_env(rates_mode) of
|
||||
case rabbit_mgmt_agent_config:get_env(rates_mode) of
|
||||
undefined -> basic;
|
||||
Mode -> Mode
|
||||
end.
|
||||
|
||||
handle_force_fine_statistics() ->
|
||||
case get_management_env(force_fine_statistics) of
|
||||
case rabbit_mgmt_agent_config:get_env(force_fine_statistics) of
|
||||
undefined ->
|
||||
ok;
|
||||
X ->
|
||||
|
|
@ -95,17 +87,14 @@ init([]) ->
|
|||
handle_call(_Request, State) ->
|
||||
{ok, not_understood, State}.
|
||||
|
||||
handle_event(#event{type = Type} = Event, State) when Type == channel_stats;
|
||||
Type == channel_created;
|
||||
Type == channel_closed ->
|
||||
gen_server:cast({global, rabbit_mgmt_channel_stats_collector}, {event, Event}),
|
||||
handle_event(#event{type = Type} = Event, State)
|
||||
when Type == connection_closed; Type == channel_closed; Type == queue_deleted;
|
||||
Type == exchange_deleted; Type == vhost_deleted;
|
||||
Type == consumer_deleted; Type == node_node_deleted;
|
||||
Type == channel_consumer_deleted ->
|
||||
gen_server:cast(rabbit_mgmt_metrics_gc:name(Type), {event, Event}),
|
||||
{ok, State};
|
||||
handle_event(#event{type = Type} = Event, State) when Type == queue_stats;
|
||||
Type == queue_deleted ->
|
||||
gen_server:cast({global, rabbit_mgmt_queue_stats_collector}, {event, Event}),
|
||||
{ok, State};
|
||||
handle_event(Event, State) ->
|
||||
gen_server:cast({global, rabbit_mgmt_event_collector}, {event, Event}),
|
||||
handle_event(_, State) ->
|
||||
{ok, State}.
|
||||
|
||||
handle_info(_Info, State) ->
|
||||
|
|
|
|||
|
|
@ -29,15 +29,16 @@
|
|||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
|
||||
-define(REFRESH_RATIO, 5000).
|
||||
-define(KEYS, [name, partitions, os_pid, fd_used, fd_total,
|
||||
sockets_used, sockets_total, mem_used, mem_limit, mem_alarm,
|
||||
disk_free_limit, disk_free, disk_free_alarm,
|
||||
proc_used, proc_total, rates_mode,
|
||||
uptime, run_queue, processors, exchange_types,
|
||||
auth_mechanisms, applications, contexts,
|
||||
log_files, db_dir, config_files, net_ticktime,
|
||||
enabled_plugins, persister_stats, gc_num, gc_bytes_reclaimed,
|
||||
context_switches]).
|
||||
-define(METRICS_KEYS, [fd_used, sockets_used, mem_used, disk_free, proc_used, gc_num,
|
||||
gc_bytes_reclaimed, context_switches]).
|
||||
|
||||
-define(PERSISTER_KEYS, [persister_stats]).
|
||||
|
||||
-define(OTHER_KEYS, [name, partitions, os_pid, fd_total, sockets_total, mem_limit,
|
||||
mem_alarm, disk_free_limit, disk_free_alarm, proc_total,
|
||||
rates_mode, uptime, run_queue, processors, exchange_types,
|
||||
auth_mechanisms, applications, contexts, log_file,
|
||||
sasl_log_file, db_dir, config_files, net_ticktime, enabled_plugins]).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
|
|
@ -56,7 +57,13 @@ start_link() ->
|
|||
%%--------------------------------------------------------------------
|
||||
|
||||
get_used_fd() ->
|
||||
get_used_fd(os:type()).
|
||||
case get_used_fd(os:type()) of
|
||||
Fd when is_number(Fd) ->
|
||||
Fd;
|
||||
_Other ->
|
||||
%% Defaults to 0 if data is not available
|
||||
0
|
||||
end.
|
||||
|
||||
get_used_fd({unix, linux}) ->
|
||||
case file:list_dir("/proc/" ++ os:getpid() ++ "/fd") of
|
||||
|
|
@ -75,14 +82,8 @@ get_used_fd({unix, BSD})
|
|||
lists:all(Digit, (lists:nth(4, string:tokens(Line, " "))))
|
||||
end, string:tokens(Output, "\n")))
|
||||
catch _:Error ->
|
||||
case get(logged_used_fd_error) of
|
||||
undefined -> rabbit_log:warning(
|
||||
"Could not parse fstat output:~n~s~n~p~n",
|
||||
[Output, {Error, erlang:get_stacktrace()}]),
|
||||
put(logged_used_fd_error, true);
|
||||
_ -> ok
|
||||
end,
|
||||
unknown
|
||||
log_fd_error("Could not parse fstat output:~n~s~n~p~n",
|
||||
[Output, {Error, erlang:get_stacktrace()}])
|
||||
end;
|
||||
|
||||
get_used_fd({unix, _}) ->
|
||||
|
|
@ -90,7 +91,7 @@ get_used_fd({unix, _}) ->
|
|||
"lsof -d \"0-9999999\" -lna -p ~s || echo failed", [os:getpid()]),
|
||||
Res = os:cmd(Cmd),
|
||||
case string:right(Res, 7) of
|
||||
"failed\n" -> unknown;
|
||||
"failed\n" -> log_fd_error("Could not obtain lsof output~n", []);
|
||||
_ -> string:words(Res, $\n) - 1
|
||||
end;
|
||||
|
||||
|
|
@ -131,12 +132,16 @@ get_used_fd({win32, _}) ->
|
|||
Handle = rabbit_misc:os_cmd(
|
||||
"handle.exe /accepteula -s -p " ++ os:getpid() ++ " 2> nul"),
|
||||
case Handle of
|
||||
[] -> install_handle_from_sysinternals;
|
||||
_ -> find_files_line(string:tokens(Handle, "\r\n"))
|
||||
end;
|
||||
|
||||
get_used_fd(_) ->
|
||||
unknown.
|
||||
[] -> log_fd_error("Could not find handle.exe, please install from "
|
||||
"sysinternals~n", []);
|
||||
_ -> case find_files_line(string:tokens(Handle, "\r\n")) of
|
||||
unknown ->
|
||||
log_fd_error("Could not parse handle.exe output: ~p~n",
|
||||
[Handle]);
|
||||
Any ->
|
||||
Any
|
||||
end
|
||||
end.
|
||||
|
||||
find_files_line([]) ->
|
||||
unknown;
|
||||
|
|
@ -158,6 +163,12 @@ get_disk_free_limit() -> ?SAFE_CALL(rabbit_disk_monitor:get_disk_free_limit(),
|
|||
get_disk_free() -> ?SAFE_CALL(rabbit_disk_monitor:get_disk_free(),
|
||||
disk_free_monitoring_disabled).
|
||||
|
||||
log_fd_error(Fmt, Args) ->
|
||||
case get(logged_used_fd_error) of
|
||||
undefined -> rabbit_log:warning(Fmt, Args),
|
||||
put(logged_used_fd_error, true);
|
||||
_ -> ok
|
||||
end.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
infos(Items, State) -> [{Item, i(Item, State)} || Item <- Items].
|
||||
|
|
@ -321,14 +332,9 @@ init([]) ->
|
|||
State = #state{fd_total = file_handle_cache:ulimit(),
|
||||
fhc_stats = file_handle_cache_stats:get(),
|
||||
node_owners = sets:new()},
|
||||
%% If we emit an update straight away we will do so just before
|
||||
%% the mgmt db starts up - and then have to wait ?REFRESH_RATIO
|
||||
%% until we send another. So let's have a shorter wait in the hope
|
||||
%% that the db will have started by the time we emit an update,
|
||||
%% and thus shorten that little gap at startup where mgmt knows
|
||||
%% nothing about any nodes.
|
||||
erlang:send_after(1000, self(), emit_update),
|
||||
{ok, State}.
|
||||
%% We can update stats straight away as they need to be available
|
||||
%% when the mgmt plugin starts a collector
|
||||
{ok, emit_update(State)}.
|
||||
|
||||
handle_call(_Req, _From, State) ->
|
||||
{reply, unknown_request, State}.
|
||||
|
|
@ -350,8 +356,13 @@ code_change(_, State, _) -> {ok, State}.
|
|||
|
||||
emit_update(State0) ->
|
||||
State = update_state(State0),
|
||||
Stats = infos(?KEYS, State),
|
||||
rabbit_event:notify(node_stats, Stats),
|
||||
MStats = infos(?METRICS_KEYS, State),
|
||||
[{persister_stats, PStats0}] = PStats = infos(?PERSISTER_KEYS, State),
|
||||
[{name, _Name} | OStats0] = OStats = infos(?OTHER_KEYS, State),
|
||||
rabbit_core_metrics:node_stats(persister_metrics, PStats0),
|
||||
rabbit_core_metrics:node_stats(coarse_metrics, MStats),
|
||||
rabbit_core_metrics:node_stats(node_metrics, OStats0),
|
||||
rabbit_event:notify(node_stats, PStats ++ MStats ++ OStats),
|
||||
erlang:send_after(?REFRESH_RATIO, self(), emit_update),
|
||||
emit_node_node_stats(State).
|
||||
|
||||
|
|
@ -361,11 +372,13 @@ emit_node_node_stats(State = #state{node_owners = Owners}) ->
|
|||
Dead = sets:to_list(sets:subtract(Owners, NewOwners)),
|
||||
[rabbit_event:notify(
|
||||
node_node_deleted, [{route, Route}]) || {Node, _Owner} <- Dead,
|
||||
Route <- [{node(), Node},
|
||||
{Node, node()}]],
|
||||
[rabbit_event:notify(
|
||||
node_node_stats, [{route, {node(), Node}} | Stats]) ||
|
||||
{Node, _Owner, Stats} <- Links],
|
||||
Route <- [{node(), Node},
|
||||
{Node, node()}]],
|
||||
[begin
|
||||
rabbit_core_metrics:node_node_stats({node(), Node}, Stats),
|
||||
rabbit_event:notify(
|
||||
node_node_stats, [{route, {node(), Node}} | Stats])
|
||||
end || {Node, _Owner, Stats} <- Links],
|
||||
State#state{node_owners = NewOwners}.
|
||||
|
||||
update_state(State0) ->
|
||||
|
|
|
|||
|
|
@ -0,0 +1,517 @@
|
|||
%% The contents of this file are subject to the Mozilla Public License
|
||||
%% Version 1.1 (the "License"); you may not use this file except in
|
||||
%% compliance with the License. You may obtain a copy of the License at
|
||||
%% http://www.mozilla.org/MPL/
|
||||
%%
|
||||
%% Software distributed under the License is distributed on an "AS IS"
|
||||
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
|
||||
%% License for the specific language governing rights and limitations
|
||||
%% under the License.
|
||||
%%
|
||||
%% The Original Code is RabbitMQ Management Plugin.
|
||||
%%
|
||||
%% The Initial Developer of the Original Code is GoPivotal, Inc.
|
||||
%% Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved.
|
||||
%%
|
||||
|
||||
-module(rabbit_mgmt_format).
|
||||
|
||||
-export([format/2, ip/1, ipb/1, amqp_table/1, tuple/1]).
|
||||
-export([parameter/1, now_to_str/1, now_to_str_ms/1, strip_pids/1]).
|
||||
-export([protocol/1, resource/1, queue/1, queue_state/1]).
|
||||
-export([exchange/1, user/1, internal_user/1, binding/1, url/2]).
|
||||
-export([pack_binding_props/2, tokenise/1]).
|
||||
-export([to_amqp_table/1, listener/1, properties/1, basic_properties/1]).
|
||||
-export([record/2, to_basic_properties/1]).
|
||||
-export([addr/1, port/1]).
|
||||
-export([format_nulls/1]).
|
||||
-export([print/2, print/1]).
|
||||
|
||||
-export([format_queue_stats/1, format_channel_stats/1,
|
||||
format_arguments/1, format_connection_created/1,
|
||||
format_accept_content/1, format_args/1]).
|
||||
|
||||
-export([strip_queue_pids/1]).
|
||||
|
||||
-export([clean_consumer_details/1, clean_channel_details/1]).
|
||||
|
||||
-import(rabbit_misc, [pget/2, pset/3]).
|
||||
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include_lib("rabbit_common/include/rabbit_framing.hrl").
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
format(Stats, {[], _}) ->
|
||||
[Stat || {_Name, Value} = Stat <- Stats, Value =/= unknown];
|
||||
format(Stats, {Fs, true}) ->
|
||||
[Fs(Stat) || {_Name, Value} = Stat <- Stats, Value =/= unknown];
|
||||
format(Stats, {Fs, false}) ->
|
||||
lists:concat([Fs(Stat) || {_Name, Value} = Stat <- Stats,
|
||||
Value =/= unknown]).
|
||||
|
||||
format_queue_stats({reductions, _}) ->
|
||||
[];
|
||||
format_queue_stats({exclusive_consumer_pid, _}) ->
|
||||
[];
|
||||
format_queue_stats({slave_pids, ''}) ->
|
||||
[];
|
||||
format_queue_stats({slave_pids, Pids}) ->
|
||||
[{slave_nodes, [node(Pid) || Pid <- Pids]}];
|
||||
format_queue_stats({synchronised_slave_pids, ''}) ->
|
||||
[];
|
||||
format_queue_stats({synchronised_slave_pids, Pids}) ->
|
||||
[{synchronised_slave_nodes, [node(Pid) || Pid <- Pids]}];
|
||||
format_queue_stats({backing_queue_status, Value}) ->
|
||||
[{backing_queue_status, properties(Value)}];
|
||||
format_queue_stats({idle_since, Value}) ->
|
||||
[{idle_since, now_to_str(Value)}];
|
||||
format_queue_stats({state, Value}) ->
|
||||
queue_state(Value);
|
||||
format_queue_stats({disk_reads, _}) ->
|
||||
[];
|
||||
format_queue_stats({disk_writes, _}) ->
|
||||
[];
|
||||
format_queue_stats(Stat) ->
|
||||
[Stat].
|
||||
|
||||
format_channel_stats({idle_since, Value}) ->
|
||||
{idle_since, now_to_str(Value)};
|
||||
format_channel_stats(Stat) ->
|
||||
Stat.
|
||||
|
||||
format_arguments({arguments, Value}) ->
|
||||
{arguments, amqp_table(Value)};
|
||||
format_arguments(Stat) ->
|
||||
Stat.
|
||||
|
||||
format_args({arguments, Value}) ->
|
||||
{arguments, args(Value)};
|
||||
format_args(Stat) ->
|
||||
Stat.
|
||||
|
||||
format_connection_created({host, Value}) ->
|
||||
{host, addr(Value)};
|
||||
format_connection_created({peer_host, Value}) ->
|
||||
{peer_host, addr(Value)};
|
||||
format_connection_created({port, Value}) ->
|
||||
{port, port(Value)};
|
||||
format_connection_created({peer_port, Value}) ->
|
||||
{peer_port, port(Value)};
|
||||
format_connection_created({protocol, Value}) ->
|
||||
{protocol, protocol(Value)};
|
||||
format_connection_created({client_properties, Value}) ->
|
||||
{client_properties, amqp_table(Value)};
|
||||
format_connection_created(Stat) ->
|
||||
Stat.
|
||||
|
||||
format_exchange_and_queue({policy, Value}) ->
|
||||
policy(Value);
|
||||
format_exchange_and_queue({arguments, Value}) ->
|
||||
[{arguments, amqp_table(Value)}];
|
||||
format_exchange_and_queue({name, Value}) ->
|
||||
resource(Value);
|
||||
format_exchange_and_queue(Stat) ->
|
||||
[Stat].
|
||||
|
||||
format_binding({source, Value}) ->
|
||||
resource(source, Value);
|
||||
format_binding({arguments, Value}) ->
|
||||
[{arguments, amqp_table(Value)}];
|
||||
format_binding(Stat) ->
|
||||
[Stat].
|
||||
|
||||
format_basic_properties({headers, Value}) ->
|
||||
{headers, amqp_table(Value)};
|
||||
format_basic_properties(Stat) ->
|
||||
Stat.
|
||||
|
||||
format_accept_content({durable, Value}) ->
|
||||
{durable, parse_bool(Value)};
|
||||
format_accept_content({auto_delete, Value}) ->
|
||||
{auto_delete, parse_bool(Value)};
|
||||
format_accept_content({internal, Value}) ->
|
||||
{internal, parse_bool(Value)};
|
||||
format_accept_content(Stat) ->
|
||||
Stat.
|
||||
|
||||
print(Fmt, Val) when is_list(Val) ->
|
||||
list_to_binary(lists:flatten(io_lib:format(Fmt, Val)));
|
||||
print(Fmt, Val) ->
|
||||
print(Fmt, [Val]).
|
||||
|
||||
print(Val) when is_list(Val) ->
|
||||
list_to_binary(lists:flatten(Val));
|
||||
print(Val) ->
|
||||
Val.
|
||||
|
||||
%% TODO - can we remove all these "unknown" cases? Coverage never hits them.
|
||||
|
||||
ip(unknown) -> unknown;
|
||||
ip(IP) -> list_to_binary(rabbit_misc:ntoa(IP)).
|
||||
|
||||
ipb(unknown) -> unknown;
|
||||
ipb(IP) -> list_to_binary(rabbit_misc:ntoab(IP)).
|
||||
|
||||
addr(S) when is_list(S); is_atom(S); is_binary(S) -> print("~s", S);
|
||||
addr(Addr) when is_tuple(Addr) -> ip(Addr).
|
||||
|
||||
port(Port) when is_number(Port) -> Port;
|
||||
port(Port) -> print("~w", Port).
|
||||
|
||||
properties(unknown) -> unknown;
|
||||
properties(Table) -> {struct, [{Name, tuple(Value)} ||
|
||||
{Name, Value} <- Table]}.
|
||||
|
||||
amqp_table(unknown) -> unknown;
|
||||
amqp_table(undefined) -> amqp_table([]);
|
||||
amqp_table(Table) -> {struct, [{Name, amqp_value(Type, Value)} ||
|
||||
{Name, Type, Value} <- Table]}.
|
||||
|
||||
amqp_value(array, Vs) -> [amqp_value(T, V) || {T, V} <- Vs];
|
||||
amqp_value(table, V) -> amqp_table(V);
|
||||
amqp_value(_Type, V) when is_binary(V) -> utf8_safe(V);
|
||||
amqp_value(_Type, V) -> V.
|
||||
|
||||
utf8_safe(V) ->
|
||||
try
|
||||
_ = xmerl_ucs:from_utf8(V),
|
||||
V
|
||||
catch exit:{ucs, _} ->
|
||||
Enc = split_lines(base64:encode(V)),
|
||||
<<"Not UTF-8, base64 is: ", Enc/binary>>
|
||||
end.
|
||||
|
||||
% MIME enforces a limit on line length of base 64-encoded data to 76 characters.
|
||||
split_lines(<<Text:76/binary, Rest/binary>>) ->
|
||||
<<Text/binary, $\n, (split_lines(Rest))/binary>>;
|
||||
split_lines(Text) ->
|
||||
Text.
|
||||
|
||||
parameter(P) -> pset(value, rabbit_misc:term_to_json(pget(value, P)), P).
|
||||
|
||||
tuple(unknown) -> unknown;
|
||||
tuple(Tuple) when is_tuple(Tuple) -> [tuple(E) || E <- tuple_to_list(Tuple)];
|
||||
tuple(Term) -> Term.
|
||||
|
||||
protocol(unknown) ->
|
||||
unknown;
|
||||
protocol(Version = {_Major, _Minor, _Revision}) ->
|
||||
protocol({'AMQP', Version});
|
||||
protocol({Family, Version}) ->
|
||||
print("~s ~s", [Family, protocol_version(Version)]).
|
||||
|
||||
protocol_version(Arbitrary)
|
||||
when is_list(Arbitrary) -> Arbitrary;
|
||||
protocol_version({Major, Minor}) -> io_lib:format("~B-~B", [Major, Minor]);
|
||||
protocol_version({Major, Minor, 0}) -> protocol_version({Major, Minor});
|
||||
protocol_version({Major, Minor, Revision}) -> io_lib:format("~B-~B-~B",
|
||||
[Major, Minor, Revision]).
|
||||
|
||||
now_to_str(unknown) ->
|
||||
unknown;
|
||||
now_to_str(MilliSeconds) ->
|
||||
BaseDate = calendar:datetime_to_gregorian_seconds({{1970, 1, 1},
|
||||
{0, 0, 0}}),
|
||||
Seconds = BaseDate + (MilliSeconds div 1000),
|
||||
{{Y, M, D}, {H, Min, S}} = calendar:gregorian_seconds_to_datetime(Seconds),
|
||||
print("~w-~2.2.0w-~2.2.0w ~w:~2.2.0w:~2.2.0w", [Y, M, D, H, Min, S]).
|
||||
|
||||
now_to_str_ms(unknown) ->
|
||||
unknown;
|
||||
now_to_str_ms(MilliSeconds) ->
|
||||
print("~s:~3.3.0w", [now_to_str(MilliSeconds), MilliSeconds rem 1000]).
|
||||
|
||||
resource(unknown) -> unknown;
|
||||
resource(Res) -> resource(name, Res).
|
||||
|
||||
resource(_, unknown) ->
|
||||
unknown;
|
||||
resource(NameAs, #resource{name = Name, virtual_host = VHost}) ->
|
||||
[{NameAs, Name}, {vhost, VHost}].
|
||||
|
||||
policy('') -> [];
|
||||
policy(Policy) -> [{policy, Policy}].
|
||||
|
||||
internal_user(User) ->
|
||||
[{name, User#internal_user.username},
|
||||
{password_hash, base64:encode(User#internal_user.password_hash)},
|
||||
{hashing_algorithm, rabbit_auth_backend_internal:hashing_module_for_user(
|
||||
User)},
|
||||
{tags, tags(User#internal_user.tags)}].
|
||||
|
||||
user(User) ->
|
||||
[{name, User#user.username},
|
||||
{tags, tags(User#user.tags)}].
|
||||
|
||||
tags(Tags) ->
|
||||
list_to_binary(string:join([atom_to_list(T) || T <- Tags], ",")).
|
||||
|
||||
listener(#listener{node = Node, protocol = Protocol,
|
||||
ip_address = IPAddress, port = Port, opts=Opts}) ->
|
||||
[{node, Node},
|
||||
{protocol, Protocol},
|
||||
{ip_address, ip(IPAddress)},
|
||||
{port, Port},
|
||||
{socket_opts, opts(Opts)}].
|
||||
|
||||
opts(Opts) ->
|
||||
opts(Opts, []).
|
||||
|
||||
opts([], Acc) ->
|
||||
lists:reverse(Acc);
|
||||
opts([Head={Name, Value}|Tail], Acc) when is_list(Value) ->
|
||||
case io_lib:printable_unicode_list(Value) of
|
||||
true -> opts(Tail, [{Name, unicode:characters_to_binary(Value)}|Acc]);
|
||||
false -> opts(Tail, [Head|Acc])
|
||||
end;
|
||||
opts([{Name, Value}|Tail], Acc) when is_tuple(Value) ->
|
||||
opts(Tail, [{Name, tuple_to_list(Value)}|Acc]);
|
||||
opts([Head|Tail], Acc) ->
|
||||
opts(Tail, [Head|Acc]).
|
||||
|
||||
pack_binding_props(<<"">>, []) ->
|
||||
<<"~">>;
|
||||
pack_binding_props(Key, []) ->
|
||||
list_to_binary(quote_binding(Key));
|
||||
pack_binding_props(Key, Args) ->
|
||||
ArgsEnc = args_hash(Args),
|
||||
list_to_binary(quote_binding(Key) ++ "~" ++ quote_binding(ArgsEnc)).
|
||||
|
||||
quote_binding(Name) ->
|
||||
re:replace(mochiweb_util:quote_plus(Name), "~", "%7E", [global]).
|
||||
|
||||
%% Unfortunately string:tokens("foo~~bar", "~"). -> ["foo","bar"], we lose
|
||||
%% the fact that there's a double ~.
|
||||
tokenise("") ->
|
||||
[];
|
||||
tokenise(Str) ->
|
||||
Count = string:cspan(Str, "~"),
|
||||
case length(Str) of
|
||||
Count -> [Str];
|
||||
_ -> [string:sub_string(Str, 1, Count) |
|
||||
tokenise(string:sub_string(Str, Count + 2))]
|
||||
end.
|
||||
|
||||
to_amqp_table({struct, T}) ->
|
||||
to_amqp_table(T);
|
||||
to_amqp_table(T) ->
|
||||
[to_amqp_table_row(K, V) || {K, V} <- T].
|
||||
|
||||
to_amqp_table_row(K, V) ->
|
||||
{T, V2} = type_val(V),
|
||||
{K, T, V2}.
|
||||
|
||||
to_amqp_array(L) ->
|
||||
[type_val(I) || I <- L].
|
||||
|
||||
type_val({struct, M}) -> {table, to_amqp_table(M)};
|
||||
type_val(L) when is_list(L) -> {array, to_amqp_array(L)};
|
||||
type_val(X) when is_binary(X) -> {longstr, X};
|
||||
type_val(X) when is_integer(X) -> {long, X};
|
||||
type_val(X) when is_number(X) -> {double, X};
|
||||
type_val(true) -> {bool, true};
|
||||
type_val(false) -> {bool, false};
|
||||
type_val(null) -> throw({error, null_not_allowed});
|
||||
type_val(X) -> throw({error, {unhandled_type, X}}).
|
||||
|
||||
url(Fmt, Vals) ->
|
||||
print(Fmt, [mochiweb_util:quote_plus(V) || V <- Vals]).
|
||||
|
||||
exchange(X) ->
|
||||
format(X, {fun format_exchange_and_queue/1, false}).
|
||||
|
||||
%% We get queues using rabbit_amqqueue:list/1 rather than :info_all/1 since
|
||||
%% the latter wakes up each queue. Therefore we have a record rather than a
|
||||
%% proplist to deal with.
|
||||
queue(#amqqueue{name = Name,
|
||||
durable = Durable,
|
||||
auto_delete = AutoDelete,
|
||||
exclusive_owner = ExclusiveOwner,
|
||||
arguments = Arguments,
|
||||
pid = Pid,
|
||||
state = State}) ->
|
||||
format(
|
||||
[{name, Name},
|
||||
{durable, Durable},
|
||||
{auto_delete, AutoDelete},
|
||||
{exclusive, is_pid(ExclusiveOwner)},
|
||||
{owner_pid, ExclusiveOwner},
|
||||
{arguments, Arguments},
|
||||
{pid, Pid},
|
||||
{state, State}],
|
||||
{fun format_exchange_and_queue/1, false}).
|
||||
|
||||
queue_state({syncing, Msgs}) -> [{state, syncing},
|
||||
{sync_messages, Msgs}];
|
||||
queue_state(Status) -> [{state, Status}].
|
||||
|
||||
%% We get bindings using rabbit_binding:list_*/1 rather than :info_all/1 since
|
||||
%% there are no per-exchange / queue / etc variants for the latter. Therefore
|
||||
%% we have a record rather than a proplist to deal with.
|
||||
binding(#binding{source = S,
|
||||
key = Key,
|
||||
destination = D,
|
||||
args = Args}) ->
|
||||
format(
|
||||
[{source, S},
|
||||
{destination, D#resource.name},
|
||||
{destination_type, D#resource.kind},
|
||||
{routing_key, Key},
|
||||
{arguments, Args},
|
||||
{properties_key, pack_binding_props(Key, Args)}],
|
||||
{fun format_binding/1, false}).
|
||||
|
||||
basic_properties(Props = #'P_basic'{}) ->
|
||||
Res = record(Props, record_info(fields, 'P_basic')),
|
||||
format(Res, {fun format_basic_properties/1, true}).
|
||||
|
||||
record(Record, Fields) ->
|
||||
{Res, _Ix} = lists:foldl(fun (K, {L, Ix}) ->
|
||||
{case element(Ix, Record) of
|
||||
undefined -> L;
|
||||
V -> [{K, V}|L]
|
||||
end, Ix + 1}
|
||||
end, {[], 2}, Fields),
|
||||
Res.
|
||||
|
||||
to_basic_properties({struct, P}) ->
|
||||
to_basic_properties(P);
|
||||
|
||||
to_basic_properties(Props) ->
|
||||
Fmt = fun (headers, H) -> to_amqp_table(H);
|
||||
(delivery_mode, V) when is_integer(V) -> V;
|
||||
(delivery_mode, _V) -> err(not_int,delivery_mode);
|
||||
(priority, V) when is_integer(V) -> V;
|
||||
(priority, _V) -> err(not_int, priority);
|
||||
(timestamp, V) when is_integer(V) -> V;
|
||||
(timestamp, _V) -> err(not_int, timestamp);
|
||||
(_, V) when is_binary(V) -> V;
|
||||
(K, _V) -> err(not_string, K)
|
||||
end,
|
||||
{Res, _Ix} = lists:foldl(
|
||||
fun (K, {P, Ix}) ->
|
||||
{case proplists:get_value(a2b(K), Props) of
|
||||
undefined -> P;
|
||||
V -> setelement(Ix, P, Fmt(K, V))
|
||||
end, Ix + 1}
|
||||
end, {#'P_basic'{}, 2},
|
||||
record_info(fields, 'P_basic')),
|
||||
Res.
|
||||
|
||||
-spec err(any(), any()) -> no_return().
|
||||
err(A, B) ->
|
||||
throw({error, {A, B}}).
|
||||
|
||||
a2b(A) ->
|
||||
list_to_binary(atom_to_list(A)).
|
||||
|
||||
strip_queue_pids(Item) ->
|
||||
strip_queue_pids(Item, []).
|
||||
|
||||
strip_queue_pids([{_, unknown} | T], Acc) ->
|
||||
strip_queue_pids(T, Acc);
|
||||
strip_queue_pids([{pid, Pid} | T], Acc) when is_pid(Pid) ->
|
||||
strip_queue_pids(T, [{node, node(Pid)} | Acc]);
|
||||
strip_queue_pids([{pid, _} | T], Acc) ->
|
||||
strip_queue_pids(T, Acc);
|
||||
strip_queue_pids([{owner_pid, _} | T], Acc) ->
|
||||
strip_queue_pids(T, Acc);
|
||||
strip_queue_pids([Any | T], Acc) ->
|
||||
strip_queue_pids(T, [Any | Acc]);
|
||||
strip_queue_pids([], Acc) ->
|
||||
Acc.
|
||||
|
||||
%% Items can be connections, channels, consumers or queues, hence remove takes
|
||||
%% various items.
|
||||
strip_pids(Item = [T | _]) when is_tuple(T) ->
|
||||
strip_pids(Item, []);
|
||||
|
||||
strip_pids(Items) -> [strip_pids(I) || I <- Items].
|
||||
|
||||
strip_pids([{_, unknown} | T], Acc) ->
|
||||
strip_pids(T, Acc);
|
||||
strip_pids([{pid, Pid} | T], Acc) when is_pid(Pid) ->
|
||||
strip_pids(T, [{node, node(Pid)} | Acc]);
|
||||
strip_pids([{pid, _} | T], Acc) ->
|
||||
strip_pids(T, Acc);
|
||||
strip_pids([{connection, _} | T], Acc) ->
|
||||
strip_pids(T, Acc);
|
||||
strip_pids([{owner_pid, _} | T], Acc) ->
|
||||
strip_pids(T, Acc);
|
||||
strip_pids([{channel, _} | T], Acc) ->
|
||||
strip_pids(T, Acc);
|
||||
strip_pids([{channel_pid, _} | T], Acc) ->
|
||||
strip_pids(T, Acc);
|
||||
strip_pids([{exclusive_consumer_pid, _} | T], Acc) ->
|
||||
strip_pids(T, Acc);
|
||||
strip_pids([{slave_pids, ''} | T], Acc) ->
|
||||
strip_pids(T, Acc);
|
||||
strip_pids([{slave_pids, Pids} | T], Acc) ->
|
||||
strip_pids(T, [{slave_nodes, [node(Pid) || Pid <- Pids]} | Acc]);
|
||||
strip_pids([{synchronised_slave_pids, ''} | T], Acc) ->
|
||||
strip_pids(T, Acc);
|
||||
strip_pids([{synchronised_slave_pids, Pids} | T], Acc) ->
|
||||
strip_pids(T, [{synchronised_slave_nodes, [node(Pid) || Pid <- Pids]} | Acc]);
|
||||
strip_pids([{K, [P|_] = Nested} | T], Acc) when is_tuple(P) -> % recurse
|
||||
strip_pids(T, [{K, strip_pids(Nested)} | Acc]);
|
||||
strip_pids([{K, [L|_] = Nested} | T], Acc) when is_list(L) -> % recurse
|
||||
strip_pids(T, [{K, strip_pids(Nested)} | Acc]);
|
||||
strip_pids([Any | T], Acc) ->
|
||||
strip_pids(T, [Any | Acc]);
|
||||
strip_pids([], Acc) ->
|
||||
Acc.
|
||||
|
||||
%% Format for JSON replies. Transforms '' into null
|
||||
format_nulls(Items) when is_list(Items) ->
|
||||
[format_null_item(Pair) || Pair <- Items];
|
||||
format_nulls(Item) ->
|
||||
format_null_item(Item).
|
||||
|
||||
format_null_item({Key, ''}) ->
|
||||
{Key, null};
|
||||
format_null_item({Key, Value}) when is_list(Value) ->
|
||||
{Key, format_nulls(Value)};
|
||||
format_null_item({Key, {struct, Struct}}) ->
|
||||
{Key, {struct, format_nulls(Struct)}};
|
||||
format_null_item({Key, {array, Struct}}) ->
|
||||
{Key, {array, format_nulls(Struct)}};
|
||||
format_null_item({Key, Value}) ->
|
||||
{Key, Value};
|
||||
format_null_item([{_K, _V} | _T] = L) ->
|
||||
format_nulls(L);
|
||||
format_null_item(Value) ->
|
||||
Value.
|
||||
|
||||
-spec clean_consumer_details(proplists:proplist()) -> proplists:proplist().
|
||||
clean_consumer_details(Obj) ->
|
||||
case pget(consumer_details, Obj) of
|
||||
undefined -> Obj;
|
||||
Cds ->
|
||||
Cons = [clean_channel_details(Con) || Con <- Cds],
|
||||
pset(consumer_details, Cons, Obj)
|
||||
end.
|
||||
|
||||
-spec clean_channel_details(proplists:proplist()) -> proplists:proplist().
|
||||
clean_channel_details(Obj) ->
|
||||
Obj0 = lists:keydelete(channel_pid, 1, Obj),
|
||||
case pget(channel_details, Obj0) of
|
||||
undefined -> Obj0;
|
||||
Chd ->
|
||||
pset(channel_details,
|
||||
lists:keydelete(pid, 1, Chd),
|
||||
Obj0)
|
||||
end.
|
||||
|
||||
args({struct, L}) -> args(L);
|
||||
args(L) -> to_amqp_table(L).
|
||||
|
||||
parse_bool(<<"true">>) -> true;
|
||||
parse_bool(<<"false">>) -> false;
|
||||
parse_bool(true) -> true;
|
||||
parse_bool(false) -> false;
|
||||
parse_bool(undefined) -> undefined;
|
||||
parse_bool(V) -> throw({error, {not_boolean, V}}).
|
||||
|
||||
args_hash(Args) ->
|
||||
list_to_binary(rabbit_misc:base64url(erlang:md5(term_to_binary(Args)))).
|
||||
|
|
@ -0,0 +1,502 @@
|
|||
%% The contents of this file are subject to the Mozilla Public License
|
||||
%% Version 1.1 (the "License"); you may not use this file except in
|
||||
%% compliance with the License. You may obtain a copy of the License
|
||||
%% at http://www.mozilla.org/MPL/
|
||||
%%
|
||||
%% Software distributed under the License is distributed on an "AS IS"
|
||||
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
|
||||
%% the License for the specific language governing rights and
|
||||
%% limitations under the License.
|
||||
%%
|
||||
%% The Original Code is RabbitMQ.
|
||||
%%
|
||||
%% The Initial Developer of the Original Code is GoPivotal, Inc.
|
||||
%% Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved.
|
||||
%%
|
||||
-module(rabbit_mgmt_metrics_collector).
|
||||
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
-include_lib("rabbit_common/include/rabbit_core_metrics.hrl").
|
||||
-include("rabbit_mgmt_metrics.hrl").
|
||||
|
||||
-behaviour(gen_server).
|
||||
|
||||
-spec start_link(atom()) -> rabbit_types:ok_pid_or_error().
|
||||
|
||||
-export([name/1]).
|
||||
-export([start_link/1]).
|
||||
-export([override_lookups/2, reset_lookups/1]).
|
||||
-export([delete_queue/2]).
|
||||
-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
|
||||
code_change/3]).
|
||||
-export([index_table/2]).
|
||||
|
||||
-import(rabbit_misc, [pget/3]).
|
||||
-import(rabbit_mgmt_data, [lookup_element/3]).
|
||||
|
||||
-record(state, {table, interval, policies, rates_mode, lookup_queue,
|
||||
lookup_exchange, old_aggr_stats}).
|
||||
|
||||
%% Data is stored in ETS tables:
|
||||
%% * One ETS table per metric (queue_stats, channel_stats_deliver_stats...)
|
||||
%% (see ?TABLES in rabbit_mgmt_metrics.hrl)
|
||||
%% * Stats are stored as key value pairs where the key is a tuple of
|
||||
%% some value (such as a channel pid) and the retention interval.
|
||||
%% The value is an instance of an exometer_slide providing a sliding window
|
||||
%% of samples for some {Object, Interval}.
|
||||
%% * Each slide can store multiple stats. See stats_per_table in
|
||||
%% rabbit_mgmt_metrics.hrl for a map of which stats are recorded in which
|
||||
%% table.
|
||||
|
||||
|
||||
name(Table) ->
|
||||
list_to_atom((atom_to_list(Table) ++ "_metrics_collector")).
|
||||
|
||||
|
||||
start_link(Table) ->
|
||||
gen_server:start_link({local, name(Table)}, ?MODULE, [Table], []).
|
||||
|
||||
override_lookups(Table, Lookups) ->
|
||||
gen_server:call(name(Table), {override_lookups, Lookups}, infinity).
|
||||
|
||||
reset_lookups(Table) ->
|
||||
gen_server:call(name(Table), reset_lookups, infinity).
|
||||
|
||||
delete_queue(Table, Queue) ->
|
||||
gen_server:cast(name(Table), {delete_queue, Queue}).
|
||||
|
||||
init([Table]) ->
|
||||
{RatesMode, Policies} = load_config(),
|
||||
Policy = retention_policy(Table),
|
||||
Interval = take_smaller(proplists:get_value(Policy, Policies)) * 1000,
|
||||
erlang:send_after(Interval, self(), collect_metrics),
|
||||
{ok, #state{table = Table, interval = Interval,
|
||||
policies = {proplists:get_value(basic, Policies),
|
||||
proplists:get_value(detailed, Policies),
|
||||
proplists:get_value(global, Policies)},
|
||||
rates_mode = RatesMode,
|
||||
old_aggr_stats = dict:new(),
|
||||
lookup_queue = fun queue_exists/1,
|
||||
lookup_exchange = fun exchange_exists/1}}.
|
||||
|
||||
handle_call(reset_lookups, _From, State) ->
|
||||
{reply, ok, State#state{lookup_queue = fun queue_exists/1,
|
||||
lookup_exchange = fun exchange_exists/1}};
|
||||
handle_call({override_lookups, Lookups}, _From, State) ->
|
||||
{reply, ok, State#state{lookup_queue = pget(queue, Lookups),
|
||||
lookup_exchange = pget(exchange, Lookups)}};
|
||||
handle_call({submit, Fun}, _From, State) ->
|
||||
{reply, Fun(), State};
|
||||
handle_call(_Request, _From, State) ->
|
||||
{noreply, State}.
|
||||
|
||||
handle_cast({delete_queue, Queue}, State = #state{table = queue_coarse_metrics,
|
||||
old_aggr_stats = Old,
|
||||
policies = {_, _, GPolicies}}) ->
|
||||
TS = exometer_slide:timestamp(),
|
||||
case dict:find(Queue, Old) of
|
||||
error ->
|
||||
{noreply, State};
|
||||
{ok, {R, U, M}} ->
|
||||
NegStats = ?vhost_msg_stats(-R, -U, -M),
|
||||
[insert_entry(vhost_msg_stats, vhost(Queue), TS, NegStats, Size, Interval, true)
|
||||
|| {Size, Interval} <- GPolicies],
|
||||
{noreply, State}
|
||||
end;
|
||||
handle_cast(_Msg, State) ->
|
||||
{noreply, State}.
|
||||
|
||||
|
||||
handle_info(collect_metrics, #state{interval = Interval} = State0) ->
|
||||
Timestamp = exometer_slide:timestamp(),
|
||||
State = aggregate_metrics(Timestamp, State0),
|
||||
erlang:send_after(Interval, self(), collect_metrics),
|
||||
{noreply, State};
|
||||
handle_info(_Msg, State) ->
|
||||
{noreply, State}.
|
||||
|
||||
terminate(_Reason, _State) ->
|
||||
ok.
|
||||
|
||||
code_change(_OldVsn, State, _Extra) ->
|
||||
{ok, State}.
|
||||
|
||||
retention_policy(connection_created) -> basic; %% really nothing
|
||||
retention_policy(connection_metrics) -> basic;
|
||||
retention_policy(connection_coarse_metrics) -> basic;
|
||||
retention_policy(channel_created) -> basic;
|
||||
retention_policy(channel_metrics) -> basic;
|
||||
retention_policy(channel_queue_exchange_metrics) -> detailed;
|
||||
retention_policy(channel_exchange_metrics) -> detailed;
|
||||
retention_policy(channel_queue_metrics) -> detailed;
|
||||
retention_policy(channel_process_metrics) -> basic;
|
||||
retention_policy(consumer_created) -> basic;
|
||||
retention_policy(queue_metrics) -> basic;
|
||||
retention_policy(queue_coarse_metrics) -> basic;
|
||||
retention_policy(node_persister_metrics) -> global;
|
||||
retention_policy(node_coarse_metrics) -> global;
|
||||
retention_policy(node_metrics) -> basic;
|
||||
retention_policy(node_node_metrics) -> global.
|
||||
|
||||
take_smaller(Policies) ->
|
||||
lists:min([I || {_, I} <- Policies]).
|
||||
|
||||
insert_old_aggr_stats(NextStats, Id, Stat) ->
|
||||
dict:store(Id, Stat, NextStats).
|
||||
|
||||
handle_deleted_queues(queue_coarse_metrics, Remainders, GPolicies) ->
|
||||
TS = exometer_slide:timestamp(),
|
||||
lists:foreach(fun ({Queue, {R, U, M}}) ->
|
||||
NegStats = ?vhost_msg_stats(-R, -U, -M),
|
||||
[insert_entry(vhost_msg_stats, vhost(Queue), TS,
|
||||
NegStats, Size, Interval, true)
|
||||
|| {Size, Interval} <- GPolicies]
|
||||
end,
|
||||
dict:to_list(Remainders));
|
||||
handle_deleted_queues(_T, _R, _P) -> ok.
|
||||
|
||||
aggregate_metrics(Timestamp, #state{table = Table,
|
||||
policies = {_, _, GPolicies}} = State0) ->
|
||||
Table = State0#state.table,
|
||||
{Next, #state{old_aggr_stats = Remainders}} = ets:foldl(
|
||||
fun(R, {Dict, State}) ->
|
||||
aggregate_entry(Timestamp, R, Dict, State)
|
||||
end, {dict:new(), State0}, Table),
|
||||
handle_deleted_queues(Table, Remainders, GPolicies),
|
||||
State0#state{old_aggr_stats = Next}.
|
||||
|
||||
aggregate_entry(_TS, {Id, Metrics}, NextStats, #state{table = connection_created} = State) ->
|
||||
Ftd = rabbit_mgmt_format:format(
|
||||
Metrics,
|
||||
{fun rabbit_mgmt_format:format_connection_created/1, true}),
|
||||
ets:insert(connection_created_stats,
|
||||
?connection_created_stats(Id, pget(name, Ftd, unknown), Ftd)),
|
||||
{NextStats, State};
|
||||
aggregate_entry(_TS, {Id, Metrics}, NextStats, #state{table = connection_metrics} = State) ->
|
||||
ets:insert(connection_stats, ?connection_stats(Id, Metrics)),
|
||||
{NextStats, State};
|
||||
aggregate_entry(TS, {Id, RecvOct, SendOct, Reductions}, NextStats,
|
||||
#state{table = connection_coarse_metrics,
|
||||
policies = {BPolicies, _, GPolicies}} = State) ->
|
||||
Stats = ?vhost_stats_coarse_conn_stats(RecvOct, SendOct),
|
||||
Diff = get_difference(Id, Stats, State),
|
||||
[insert_entry(vhost_stats_coarse_conn_stats, vhost({connection_created_stats, Id}),
|
||||
TS, Diff, Size, Interval, true) || {Size, Interval} <- GPolicies],
|
||||
[begin
|
||||
insert_entry(connection_stats_coarse_conn_stats, Id, TS,
|
||||
?connection_stats_coarse_conn_stats(RecvOct, SendOct, Reductions),
|
||||
Size, Interval, false)
|
||||
end || {Size, Interval} <- BPolicies],
|
||||
{insert_old_aggr_stats(NextStats, Id, Stats), State};
|
||||
aggregate_entry(_TS, {Id, Metrics}, NextStats, #state{table = channel_created} = State) ->
|
||||
Ftd = rabbit_mgmt_format:format(Metrics, {[], false}),
|
||||
ets:insert(channel_created_stats,
|
||||
?channel_created_stats(Id, pget(name, Ftd, unknown), Ftd)),
|
||||
{NextStats, State};
|
||||
aggregate_entry(_TS, {Id, Metrics}, NextStats, #state{table = channel_metrics} = State) ->
|
||||
Ftd = rabbit_mgmt_format:format(Metrics,
|
||||
{fun rabbit_mgmt_format:format_channel_stats/1, true}),
|
||||
ets:insert(channel_stats, ?channel_stats(Id, Ftd)),
|
||||
{NextStats, State};
|
||||
aggregate_entry(TS, {{Ch, X} = Id, Publish0, Confirm, ReturnUnroutable}, NextStats,
|
||||
#state{table = channel_exchange_metrics,
|
||||
policies = {BPolicies, DPolicies, GPolicies},
|
||||
rates_mode = RatesMode,
|
||||
lookup_exchange = ExchangeFun} = State) ->
|
||||
Stats = ?channel_stats_fine_stats(Publish0, Confirm, ReturnUnroutable),
|
||||
{Publish, _, _} = Diff = get_difference(Id, Stats, State),
|
||||
[begin
|
||||
insert_entry(channel_stats_fine_stats, Ch, TS, Diff, Size, Interval,
|
||||
true)
|
||||
end || {Size, Interval} <- BPolicies],
|
||||
[begin
|
||||
insert_entry(vhost_stats_fine_stats, vhost(X), TS, Diff, Size,
|
||||
Interval, true)
|
||||
end || {Size, Interval} <- GPolicies],
|
||||
_ = case {ExchangeFun(X), RatesMode} of
|
||||
{true, basic} ->
|
||||
[insert_entry(exchange_stats_publish_in, X, TS,
|
||||
?exchange_stats_publish_in(Publish), Size, Interval, true)
|
||||
|| {Size, Interval} <- DPolicies];
|
||||
{true, _} ->
|
||||
[begin
|
||||
insert_entry(exchange_stats_publish_in, X, TS,
|
||||
?exchange_stats_publish_in(Publish), Size, Interval, true),
|
||||
insert_entry(channel_exchange_stats_fine_stats, Id, TS, Stats,
|
||||
Size, Interval, false)
|
||||
end || {Size, Interval} <- DPolicies];
|
||||
_ ->
|
||||
ok
|
||||
end,
|
||||
{insert_old_aggr_stats(NextStats, Id, Stats), State};
|
||||
aggregate_entry(TS, {{Ch, Q} = Id, Get, GetNoAck, Deliver, DeliverNoAck, Redeliver, Ack},
|
||||
NextStats,
|
||||
#state{table = channel_queue_metrics,
|
||||
policies = {BPolicies, DPolicies, GPolicies},
|
||||
rates_mode = RatesMode,
|
||||
lookup_queue = QueueFun} = State) ->
|
||||
Stats = ?vhost_stats_deliver_stats(Get, GetNoAck, Deliver, DeliverNoAck,
|
||||
Redeliver, Ack,
|
||||
Deliver + DeliverNoAck + Get + GetNoAck),
|
||||
Diff = get_difference(Id, Stats, State),
|
||||
[begin
|
||||
insert_entry(vhost_stats_deliver_stats, vhost(Q), TS, Diff, Size,
|
||||
Interval, true)
|
||||
end || {Size, Interval} <- GPolicies],
|
||||
[begin
|
||||
insert_entry(channel_stats_deliver_stats, Ch, TS, Diff, Size, Interval,
|
||||
true)
|
||||
end || {Size, Interval} <- BPolicies],
|
||||
_ = case {QueueFun(Q), RatesMode} of
|
||||
{true, basic} ->
|
||||
[insert_entry(queue_stats_deliver_stats, Q, TS, Diff, Size, Interval,
|
||||
true) || {Size, Interval} <- BPolicies];
|
||||
{true, _} ->
|
||||
[insert_entry(queue_stats_deliver_stats, Q, TS, Diff, Size, Interval,
|
||||
true) || {Size, Interval} <- BPolicies],
|
||||
[insert_entry(channel_queue_stats_deliver_stats, Id, TS, Stats, Size,
|
||||
Interval, false)
|
||||
|| {Size, Interval} <- DPolicies];
|
||||
_ ->
|
||||
ok
|
||||
end,
|
||||
{insert_old_aggr_stats(NextStats, Id, Stats), State};
|
||||
aggregate_entry(TS, {{_Ch, {Q, X}} = Id, Publish}, NextStats,
|
||||
#state{table = channel_queue_exchange_metrics,
|
||||
policies = {BPolicies, _, _},
|
||||
rates_mode = RatesMode,
|
||||
lookup_queue = QueueFun,
|
||||
lookup_exchange = ExchangeFun} = State) ->
|
||||
Stats = ?queue_stats_publish(Publish),
|
||||
Diff = get_difference(Id, Stats, State),
|
||||
_ = case {QueueFun(Q), ExchangeFun(Q), RatesMode} of
|
||||
{true, false, _} ->
|
||||
[insert_entry(queue_stats_publish, Q, TS, Diff, Size, Interval, true)
|
||||
|| {Size, Interval} <- BPolicies];
|
||||
{false, true, _} ->
|
||||
[insert_entry(exchange_stats_publish_out, X, TS, Diff, Size, Interval, true)
|
||||
|| {Size, Interval} <- BPolicies];
|
||||
{true, true, basic} ->
|
||||
[begin
|
||||
insert_entry(queue_stats_publish, Q, TS, Diff, Size, Interval, true),
|
||||
insert_entry(exchange_stats_publish_out, X, TS, Diff, Size, Interval, true)
|
||||
end || {Size, Interval} <- BPolicies];
|
||||
{true, true, _} ->
|
||||
[begin
|
||||
insert_entry(queue_stats_publish, Q, TS, Diff, Size, Interval, true),
|
||||
insert_entry(exchange_stats_publish_out, X, TS, Diff, Size, Interval, true),
|
||||
insert_entry(queue_exchange_stats_publish, {Q, X}, TS, Diff, Size, Interval, true)
|
||||
end || {Size, Interval} <- BPolicies];
|
||||
_ ->
|
||||
ok
|
||||
end,
|
||||
{insert_old_aggr_stats(NextStats, Id, Stats), State};
|
||||
aggregate_entry(TS, {Id, Reductions}, NextStats,
|
||||
#state{table = channel_process_metrics,
|
||||
policies = {BPolicies, _, _}} = State) ->
|
||||
[begin
|
||||
insert_entry(channel_process_stats, Id, TS, ?channel_process_stats(Reductions),
|
||||
Size, Interval, false)
|
||||
end || {Size, Interval} <- BPolicies],
|
||||
{NextStats, State};
|
||||
aggregate_entry(_TS, {Id, Exclusive, AckRequired, PrefetchCount, Args}, NextStats,
|
||||
#state{table = consumer_created} = State) ->
|
||||
Fmt = rabbit_mgmt_format:format([{exclusive, Exclusive},
|
||||
{ack_required, AckRequired},
|
||||
{prefetch_count, PrefetchCount},
|
||||
{arguments, Args}], {[], false}),
|
||||
insert_with_index(consumer_stats, Id, ?consumer_stats(Id, Fmt)),
|
||||
{NextStats, State};
|
||||
aggregate_entry(TS, {Id, Metrics}, NextStats, #state{table = queue_metrics,
|
||||
policies = {BPolicies, _, GPolicies},
|
||||
lookup_queue = QueueFun} = State) ->
|
||||
Stats = ?queue_msg_rates(pget(disk_reads, Metrics, 0), pget(disk_writes, Metrics, 0)),
|
||||
Diff = get_difference(Id, Stats, State),
|
||||
[insert_entry(vhost_msg_rates, vhost(Id), TS, Diff, Size, Interval, true)
|
||||
|| {Size, Interval} <- GPolicies],
|
||||
case QueueFun(Id) of
|
||||
true ->
|
||||
[insert_entry(queue_msg_rates, Id, TS, Stats, Size, Interval, false)
|
||||
|| {Size, Interval} <- BPolicies],
|
||||
Fmt = rabbit_mgmt_format:format(
|
||||
Metrics,
|
||||
{fun rabbit_mgmt_format:format_queue_stats/1, false}),
|
||||
ets:insert(queue_stats, ?queue_stats(Id, Fmt));
|
||||
false ->
|
||||
ok
|
||||
end,
|
||||
{insert_old_aggr_stats(NextStats, Id, Stats), State};
|
||||
aggregate_entry(TS, {Name, Ready, Unack, Msgs, Red}, NextStats,
|
||||
#state{table = queue_coarse_metrics,
|
||||
old_aggr_stats = Old,
|
||||
policies = {BPolicies, _, GPolicies},
|
||||
lookup_queue = QueueFun} = State) ->
|
||||
Stats = ?vhost_msg_stats(Ready, Unack, Msgs),
|
||||
Diff = get_difference(Name, Stats, State),
|
||||
[insert_entry(vhost_msg_stats, vhost(Name), TS, Diff, Size, Interval, true)
|
||||
|| {Size, Interval} <- GPolicies],
|
||||
_ = case QueueFun(Name) of
|
||||
true ->
|
||||
[begin
|
||||
insert_entry(queue_process_stats, Name, TS, ?queue_process_stats(Red),
|
||||
Size, Interval, false),
|
||||
insert_entry(queue_msg_stats, Name, TS, ?queue_msg_stats(Ready, Unack, Msgs),
|
||||
Size, Interval, false)
|
||||
end || {Size, Interval} <- BPolicies];
|
||||
_ ->
|
||||
ok
|
||||
end,
|
||||
State1 = State#state{old_aggr_stats = dict:erase(Name, Old)},
|
||||
{insert_old_aggr_stats(NextStats, Name, Stats), State1};
|
||||
aggregate_entry(_TS, {Id, Metrics}, NextStats, #state{table = node_metrics} = State) ->
|
||||
ets:insert(node_stats, {Id, Metrics}),
|
||||
{NextStats, State};
|
||||
aggregate_entry(TS, {Id, Metrics}, NextStats,
|
||||
#state{table = node_coarse_metrics,
|
||||
policies = {_, _, GPolicies}} = State) ->
|
||||
Stats = ?node_coarse_stats(
|
||||
pget(fd_used, Metrics, 0), pget(sockets_used, Metrics, 0),
|
||||
pget(mem_used, Metrics, 0), pget(disk_free, Metrics, 0),
|
||||
pget(proc_used, Metrics, 0), pget(gc_num, Metrics, 0),
|
||||
pget(gc_bytes_reclaimed, Metrics, 0), pget(context_switches, Metrics, 0)),
|
||||
[insert_entry(node_coarse_stats, Id, TS, Stats, Size, Interval, false)
|
||||
|| {Size, Interval} <- GPolicies],
|
||||
{NextStats, State};
|
||||
aggregate_entry(TS, {Id, Metrics}, NextStats,
|
||||
#state{table = node_persister_metrics,
|
||||
policies = {_, _, GPolicies}} = State) ->
|
||||
Stats = ?node_persister_stats(
|
||||
pget(io_read_count, Metrics, 0), pget(io_read_bytes, Metrics, 0),
|
||||
pget(io_read_time, Metrics, 0), pget(io_write_count, Metrics, 0),
|
||||
pget(io_write_bytes, Metrics, 0), pget(io_write_time, Metrics, 0),
|
||||
pget(io_sync_count, Metrics, 0), pget(io_sync_time, Metrics, 0),
|
||||
pget(io_seek_count, Metrics, 0), pget(io_seek_time, Metrics, 0),
|
||||
pget(io_reopen_count, Metrics, 0), pget(mnesia_ram_tx_count, Metrics, 0),
|
||||
pget(mnesia_disk_tx_count, Metrics, 0), pget(msg_store_read_count, Metrics, 0),
|
||||
pget(msg_store_write_count, Metrics, 0),
|
||||
pget(queue_index_journal_write_count, Metrics, 0),
|
||||
pget(queue_index_write_count, Metrics, 0), pget(queue_index_read_count, Metrics, 0),
|
||||
pget(io_file_handle_open_attempt_count, Metrics, 0),
|
||||
pget(io_file_handle_open_attempt_time, Metrics, 0)),
|
||||
[insert_entry(node_persister_stats, Id, TS, Stats, Size, Interval, false)
|
||||
|| {Size, Interval} <- GPolicies],
|
||||
{NextStats, State};
|
||||
aggregate_entry(TS, {Id, Metrics}, NextStats,
|
||||
#state{table = node_node_metrics,
|
||||
policies = {_, _, GPolicies}} = State) ->
|
||||
Stats = ?node_node_coarse_stats(pget(send_bytes, Metrics, 0), pget(recv_bytes, Metrics, 0)),
|
||||
CleanMetrics = lists:keydelete(recv_bytes, 1, lists:keydelete(send_bytes, 1, Metrics)),
|
||||
ets:insert(node_node_stats, ?node_node_stats(Id, CleanMetrics)),
|
||||
[insert_entry(node_node_coarse_stats, Id, TS, Stats, Size, Interval, false)
|
||||
|| {Size, Interval} <- GPolicies],
|
||||
{NextStats, State}.
|
||||
|
||||
insert_entry(Table, Id, TS, Entry, Size, Interval, Incremental) ->
|
||||
Key = {Id, Interval},
|
||||
Slide =
|
||||
case ets:lookup(Table, Key) of
|
||||
[{Key, S}] ->
|
||||
S;
|
||||
[] ->
|
||||
% add some margin to Size and max_n to reduce chances of off-by-one errors
|
||||
exometer_slide:new((Size + Interval) * 1000,
|
||||
[{interval, Interval * 1000},
|
||||
{max_n, ceil(Size / Interval) + 1},
|
||||
{incremental, Incremental}])
|
||||
end,
|
||||
insert_with_index(Table, Key, {Key, exometer_slide:add_element(TS, Entry, Slide)}).
|
||||
|
||||
get_difference(Id, Stats, #state{old_aggr_stats = OldStats}) ->
|
||||
case dict:find(Id, OldStats) of
|
||||
error ->
|
||||
Stats;
|
||||
{ok, OldStat} ->
|
||||
difference(OldStat, Stats)
|
||||
end.
|
||||
|
||||
difference({A0}, {B0}) ->
|
||||
{B0 - A0};
|
||||
difference({A0, A1}, {B0, B1}) ->
|
||||
{B0 - A0, B1 - A1};
|
||||
difference({A0, A1, A2}, {B0, B1, B2}) ->
|
||||
{B0 - A0, B1 - A1, B2 - A2};
|
||||
difference({A0, A1, A2, A3, A4, A5, A6}, {B0, B1, B2, B3, B4, B5, B6}) ->
|
||||
{B0 - A0, B1 - A1, B2 - A2, B3 - A3, B4 - A4, B5 - A5, B6 - A6}.
|
||||
|
||||
vhost(#resource{virtual_host = VHost}) ->
|
||||
VHost;
|
||||
vhost({queue_stats, #resource{virtual_host = VHost}}) ->
|
||||
VHost;
|
||||
vhost({TName, Pid}) ->
|
||||
pget(vhost, lookup_element(TName, Pid, 3)).
|
||||
|
||||
exchange_exists(Name) ->
|
||||
case rabbit_exchange:lookup(Name) of
|
||||
{ok, _} ->
|
||||
true;
|
||||
_ ->
|
||||
false
|
||||
end.
|
||||
|
||||
queue_exists(Name) ->
|
||||
case rabbit_amqqueue:lookup(Name) of
|
||||
{ok, _} ->
|
||||
true;
|
||||
_ ->
|
||||
false
|
||||
end.
|
||||
|
||||
insert_with_index(Table, Key, Tuple) ->
|
||||
Insert = ets:insert(Table, Tuple),
|
||||
insert_index(Table, Key),
|
||||
Insert.
|
||||
|
||||
insert_index(consumer_stats, {Q, Ch, _} = Key) ->
|
||||
ets:insert(index_table(consumer_stats, queue), {Q, Key}),
|
||||
ets:insert(index_table(consumer_stats, channel), {Ch, Key});
|
||||
insert_index(old_aggr_stats, {Ch, {Q, _X}} = Key) ->
|
||||
ets:insert(index_table(old_aggr_stats, queue), {Q, Key}),
|
||||
ets:insert(index_table(old_aggr_stats, channel), {Ch, Key});
|
||||
insert_index(old_aggr_stats, {Ch, Q} = Key) ->
|
||||
ets:insert(index_table(old_aggr_stats, queue), {Q, Key}),
|
||||
ets:insert(index_table(old_aggr_stats, channel), {Ch, Key});
|
||||
insert_index(channel_exchange_stats_fine_stats, {{Ch, Ex}, _} = Key) ->
|
||||
ets:insert(index_table(channel_exchange_stats_fine_stats, exchange), {Ex, Key}),
|
||||
ets:insert(index_table(channel_exchange_stats_fine_stats, channel), {Ch, Key});
|
||||
insert_index(channel_queue_stats_deliver_stats, {{Ch, Q}, _} = Key) ->
|
||||
ets:insert(index_table(channel_queue_stats_deliver_stats, queue), {Q, Key}),
|
||||
ets:insert(index_table(channel_queue_stats_deliver_stats, channel), {Ch, Key});
|
||||
insert_index(queue_exchange_stats_publish, {{Q, Ex}, _} = Key) ->
|
||||
ets:insert(index_table(queue_exchange_stats_publish, queue), {Q, Key}),
|
||||
ets:insert(index_table(queue_exchange_stats_publish, exchange), {Ex, Key});
|
||||
insert_index(node_node_coarse_stats, {{_, Node}, _} = Key) ->
|
||||
ets:insert(index_table(node_node_coarse_stats, node), {Node, Key});
|
||||
insert_index(_, _) -> ok.
|
||||
|
||||
index_table(consumer_stats, queue) -> consumer_stats_queue_index;
|
||||
index_table(consumer_stats, channel) -> consumer_stats_channel_index;
|
||||
index_table(old_aggr_stats, queue) -> old_aggr_stats_queue_index;
|
||||
index_table(old_aggr_stats, channel) -> old_aggr_stats_channel_index;
|
||||
index_table(channel_exchange_stats_fine_stats, exchange) -> channel_exchange_stats_fine_stats_exchange_index;
|
||||
index_table(channel_exchange_stats_fine_stats, channel) -> channel_exchange_stats_fine_stats_channel_index;
|
||||
index_table(channel_queue_stats_deliver_stats, queue) -> channel_queue_stats_deliver_stats_queue_index;
|
||||
index_table(channel_queue_stats_deliver_stats, channel) -> channel_queue_stats_deliver_stats_channel_index;
|
||||
index_table(queue_exchange_stats_publish, queue) -> queue_exchange_stats_publish_queue_index;
|
||||
index_table(queue_exchange_stats_publish, exchange) -> queue_exchange_stats_publish_exchange_index;
|
||||
index_table(node_node_coarse_stats, node) -> node_node_coarse_stats_node_index.
|
||||
|
||||
load_config() ->
|
||||
RatesMode = rabbit_mgmt_agent_config:get_env(rates_mode),
|
||||
Policies = rabbit_mgmt_agent_config:get_env(sample_retention_policies),
|
||||
{RatesMode, Policies}.
|
||||
|
||||
ceil(X) when X < 0 ->
|
||||
trunc(X);
|
||||
ceil(X) ->
|
||||
T = trunc(X),
|
||||
case X - T == 0 of
|
||||
true -> T;
|
||||
false -> T + 1
|
||||
end.
|
||||
|
||||
pget(Key, List) -> pget(Key, List, unknown).
|
||||
|
|
@ -0,0 +1,185 @@
|
|||
%% The contents of this file are subject to the Mozilla Public License
|
||||
%% Version 1.1 (the "License"); you may not use this file except in
|
||||
%% compliance with the License. You may obtain a copy of the License
|
||||
%% at http://www.mozilla.org/MPL/
|
||||
%%
|
||||
%% Software distributed under the License is distributed on an "AS IS"
|
||||
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
|
||||
%% the License for the specific language governing rights and
|
||||
%% limitations under the License.
|
||||
%%
|
||||
%% The Original Code is RabbitMQ.
|
||||
%%
|
||||
%% The Initial Developer of the Original Code is GoPivotal, Inc.
|
||||
%% Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved.
|
||||
%%
|
||||
-module(rabbit_mgmt_metrics_gc).
|
||||
|
||||
-record(state, {basic_i,
|
||||
detailed_i,
|
||||
global_i}).
|
||||
|
||||
-include_lib("rabbit_common/include/rabbit.hrl").
|
||||
|
||||
-spec start_link(atom()) -> rabbit_types:ok_pid_or_error().
|
||||
|
||||
-export([name/1]).
|
||||
-export([start_link/1]).
|
||||
-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
|
||||
code_change/3]).
|
||||
|
||||
name(EventType) ->
|
||||
list_to_atom((atom_to_list(EventType) ++ "_metrics_gc")).
|
||||
|
||||
start_link(EventType) ->
|
||||
gen_server:start_link({local, name(EventType)}, ?MODULE, [], []).
|
||||
|
||||
init(_) ->
|
||||
Policies = rabbit_mgmt_agent_config:get_env(sample_retention_policies),
|
||||
{ok, #state{basic_i = intervals(basic, Policies),
|
||||
global_i = intervals(global, Policies),
|
||||
detailed_i = intervals(detailed, Policies)}}.
|
||||
|
||||
handle_call(_Request, _From, State) ->
|
||||
{noreply, State}.
|
||||
|
||||
handle_cast({event, #event{type = connection_closed, props = Props}},
|
||||
State = #state{basic_i = BIntervals, global_i = GIntervals}) ->
|
||||
Pid = pget(pid, Props),
|
||||
remove_connection(Pid, BIntervals, GIntervals),
|
||||
{noreply, State};
|
||||
handle_cast({event, #event{type = channel_closed, props = Props}},
|
||||
State = #state{basic_i = BIntervals}) ->
|
||||
Pid = pget(pid, Props),
|
||||
remove_channel(Pid, BIntervals),
|
||||
{noreply, State};
|
||||
handle_cast({event, #event{type = consumer_deleted, props = Props}}, State) ->
|
||||
remove_consumer(Props),
|
||||
{noreply, State};
|
||||
handle_cast({event, #event{type = exchange_deleted, props = Props}},
|
||||
State = #state{basic_i = BIntervals}) ->
|
||||
Name = pget(name, Props),
|
||||
remove_exchange(Name, BIntervals),
|
||||
{noreply, State};
|
||||
handle_cast({event, #event{type = queue_deleted, props = Props}},
|
||||
State = #state{basic_i = BIntervals}) ->
|
||||
Name = pget(name, Props),
|
||||
remove_queue(Name, BIntervals),
|
||||
{noreply, State};
|
||||
handle_cast({event, #event{type = vhost_deleted, props = Props}},
|
||||
State = #state{global_i = GIntervals}) ->
|
||||
Name = pget(name, Props),
|
||||
remove_vhost(Name, GIntervals),
|
||||
{noreply, State};
|
||||
handle_cast({event, #event{type = node_node_deleted, props = Props}}, State) ->
|
||||
Name = pget(route, Props),
|
||||
remove_node_node(Name),
|
||||
{noreply, State}.
|
||||
|
||||
handle_info(_Msg, State) ->
|
||||
{noreply, State}.
|
||||
|
||||
terminate(_Reason, _State) ->
|
||||
ok.
|
||||
|
||||
code_change(_OldVsn, State, _Extra) ->
|
||||
{ok, State}.
|
||||
|
||||
remove_connection(Id, BIntervals, GIntervals) ->
|
||||
ets:delete(connection_created_stats, Id),
|
||||
ets:delete(connection_stats, Id),
|
||||
delete_samples(connection_stats_coarse_conn_stats, Id, BIntervals),
|
||||
delete_samples(vhost_stats_coarse_conn_stats, Id, GIntervals),
|
||||
ok.
|
||||
|
||||
remove_channel(Id, BIntervals) ->
|
||||
ets:delete(channel_created_stats, Id),
|
||||
ets:delete(channel_stats, Id),
|
||||
delete_samples(channel_process_stats, Id, BIntervals),
|
||||
delete_samples(channel_stats_fine_stats, Id, BIntervals),
|
||||
delete_samples(channel_stats_deliver_stats, Id, BIntervals),
|
||||
index_delete(consumer_stats, channel, Id),
|
||||
index_delete(channel_exchange_stats_fine_stats, channel, Id),
|
||||
index_delete(channel_queue_stats_deliver_stats, channel, Id),
|
||||
ok.
|
||||
|
||||
remove_consumer(Props) ->
|
||||
Id = {pget(queue, Props), pget(channel, Props), pget(consumer_tag, Props)},
|
||||
ets:delete(consumer_stats, Id),
|
||||
cleanup_index(consumer_stats, Id),
|
||||
ok.
|
||||
|
||||
remove_exchange(Name, BIntervals) ->
|
||||
delete_samples(exchange_stats_publish_out, Name, BIntervals),
|
||||
delete_samples(exchange_stats_publish_in, Name, BIntervals),
|
||||
index_delete(queue_exchange_stats_publish, exchange, Name),
|
||||
index_delete(channel_exchange_stats_fine_stats, exchange, Name),
|
||||
ok.
|
||||
|
||||
remove_queue(Name, BIntervals) ->
|
||||
ets:delete(queue_stats, Name),
|
||||
delete_samples(queue_stats_publish, Name, BIntervals),
|
||||
delete_samples(queue_stats_deliver_stats, Name, BIntervals),
|
||||
delete_samples(queue_process_stats, Name, BIntervals),
|
||||
delete_samples(queue_msg_stats, Name, BIntervals),
|
||||
delete_samples(queue_msg_rates, Name, BIntervals),
|
||||
index_delete(channel_queue_stats_deliver_stats, queue, Name),
|
||||
index_delete(queue_exchange_stats_publish, queue, Name),
|
||||
index_delete(consumer_stats, queue, Name),
|
||||
|
||||
ok.
|
||||
|
||||
remove_vhost(Name, GIntervals) ->
|
||||
delete_samples(vhost_stats_coarse_conn_stats, Name, GIntervals),
|
||||
delete_samples(vhost_stats_fine_stats, Name, GIntervals),
|
||||
delete_samples(vhost_stats_deliver_stats, Name, GIntervals),
|
||||
ok.
|
||||
|
||||
remove_node_node(Name) ->
|
||||
index_delete(node_node_coarse_stats, node, Name),
|
||||
ok.
|
||||
|
||||
intervals(Type, Policies) ->
|
||||
[I || {_, I} <- proplists:get_value(Type, Policies)].
|
||||
|
||||
delete_samples(Table, Id, Intervals) ->
|
||||
[ets:delete(Table, {Id, I}) || I <- Intervals],
|
||||
ok.
|
||||
|
||||
index_delete(Table, Type, Id) ->
|
||||
IndexTable = rabbit_mgmt_metrics_collector:index_table(Table, Type),
|
||||
Keys = ets:lookup(IndexTable, Id),
|
||||
[ begin
|
||||
ets:delete(Table, Key),
|
||||
cleanup_index(Table, Key)
|
||||
end
|
||||
|| {_Index, Key} <- Keys ],
|
||||
ets:delete(IndexTable, Id),
|
||||
ok.
|
||||
|
||||
cleanup_index(consumer_stats, {Q, Ch, _} = Key) ->
|
||||
delete_index(consumer_stats, queue, {Q, Key}),
|
||||
delete_index(consumer_stats, channel, {Ch, Key}),
|
||||
ok;
|
||||
cleanup_index(channel_exchange_stats_fine_stats, {{Ch, Ex}, _} = Key) ->
|
||||
delete_index(channel_exchange_stats_fine_stats, exchange, {Ex, Key}),
|
||||
delete_index(channel_exchange_stats_fine_stats, channel, {Ch, Key}),
|
||||
ok;
|
||||
cleanup_index(channel_queue_stats_deliver_stats, {{Ch, Q}, _} = Key) ->
|
||||
delete_index(channel_queue_stats_deliver_stats, queue, {Q, Key}),
|
||||
delete_index(channel_queue_stats_deliver_stats, channel, {Ch, Key}),
|
||||
ok;
|
||||
cleanup_index(queue_exchange_stats_publish, {{Q, Ex}, _} = Key) ->
|
||||
delete_index(queue_exchange_stats_publish, queue, {Q, Key}),
|
||||
delete_index(queue_exchange_stats_publish, exchange, {Ex, Key}),
|
||||
ok;
|
||||
cleanup_index(node_node_coarse_stats, {{_, Node}, _} = Key) ->
|
||||
delete_index(node_node_coarse_stats, node, {Node, Key}),
|
||||
ok;
|
||||
cleanup_index(_, _) -> ok.
|
||||
|
||||
delete_index(Table, Index, Obj) ->
|
||||
ets:delete_object(rabbit_mgmt_metrics_collector:index_table(Table, Index),
|
||||
Obj).
|
||||
|
||||
pget(Key, List) -> rabbit_misc:pget(Key, List, unknown).
|
||||
|
|
@ -0,0 +1,64 @@
|
|||
%% The contents of this file are subject to the Mozilla Public License
|
||||
%% Version 1.1 (the "License"); you may not use this file except in
|
||||
%% compliance with the License. You may obtain a copy of the License
|
||||
%% at http://www.mozilla.org/MPL/
|
||||
%%
|
||||
%% Software distributed under the License is distributed on an "AS IS"
|
||||
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
|
||||
%% the License for the specific language governing rights and
|
||||
%% limitations under the License.
|
||||
%%
|
||||
%% The Original Code is RabbitMQ.
|
||||
%%
|
||||
%% The Initial Developer of the Original Code is GoPivotal, Inc.
|
||||
%% Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved.
|
||||
%%
|
||||
-module(rabbit_mgmt_storage).
|
||||
-behaviour(gen_server2).
|
||||
-record(state, {}).
|
||||
|
||||
-spec start_link() -> rabbit_types:ok_pid_or_error().
|
||||
|
||||
-export([start_link/0]).
|
||||
-export([reset/0, reset_all/0]).
|
||||
-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
|
||||
code_change/3]).
|
||||
|
||||
-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_metrics.hrl").
|
||||
|
||||
%% ETS owner
|
||||
start_link() ->
|
||||
gen_server2:start_link({local, ?MODULE}, ?MODULE, [], []).
|
||||
|
||||
reset() ->
|
||||
rabbit_log:warning("Resetting RabbitMQ management storage~n"),
|
||||
[ets:delete_all_objects(IndexTable) || IndexTable <- ?INDEX_TABLES],
|
||||
[ets:delete_all_objects(Table) || {Table, _} <- ?TABLES],
|
||||
ok.
|
||||
|
||||
reset_all() ->
|
||||
[rpc:call(Node, rabbit_mgmt_storage, reset, [])
|
||||
|| Node <- rabbit_nodes:all_running()].
|
||||
|
||||
init(_) ->
|
||||
_ = [ets:new(IndexTable, [public, bag, named_table])
|
||||
|| IndexTable <- ?INDEX_TABLES],
|
||||
_ = [ets:new(Table, [public, Type, named_table])
|
||||
|| {Table, Type} <- ?TABLES],
|
||||
_ = ets:new(rabbit_mgmt_db_cache, [public, set, named_table]),
|
||||
{ok, #state{}}.
|
||||
|
||||
handle_call(_Request, _From, State) ->
|
||||
{noreply, State}.
|
||||
|
||||
handle_cast(_Msg, State) ->
|
||||
{noreply, State}.
|
||||
|
||||
handle_info(_Msg, State) ->
|
||||
{noreply, State}.
|
||||
|
||||
terminate(_Reason, _State) ->
|
||||
ok.
|
||||
|
||||
code_change(_OldVsn, State, _Extra) ->
|
||||
{ok, State}.
|
||||
|
|
@ -4,6 +4,12 @@
|
|||
{modules, []},
|
||||
{registered, []},
|
||||
{mod, {rabbit_mgmt_agent_app, []}},
|
||||
{env, []},
|
||||
{env, [{rates_mode, basic},
|
||||
{sample_retention_policies,
|
||||
%% List of {MaxAgeInSeconds, SampleEveryNSeconds}
|
||||
[{global, [{605, 5}, {3660, 60}, {29400, 600}, {86400, 1800}]},
|
||||
{basic, [{605, 5}, {3600, 60}]},
|
||||
{detailed, [{605, 5}]}]}
|
||||
]},
|
||||
{broker_version_requirements, []},
|
||||
{applications, [kernel, stdlib, rabbit_common, rabbit]}]}.
|
||||
|
|
|
|||
|
|
@ -0,0 +1,505 @@
|
|||
%% The contents of this file are subject to the Mozilla Public License
|
||||
%% Version 1.1 (the "License"); you may not use this file except in
|
||||
%% compliance with the License. You may obtain a copy of the License at
|
||||
%% http://www.mozilla.org/MPL/
|
||||
%%
|
||||
%% Software distributed under the License is distributed on an "AS IS"
|
||||
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
|
||||
%% License for the specific language governing rights and limitations
|
||||
%% under the License.
|
||||
%%
|
||||
%% Copyright (c) 2016 Pivotal Software, Inc. All rights reserved.
|
||||
%%
|
||||
|
||||
-module(exometer_slide_SUITE).
|
||||
|
||||
-include_lib("proper/include/proper.hrl").
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
|
||||
-compile(export_all).
|
||||
|
||||
all() ->
|
||||
[
|
||||
{group, parallel_tests}
|
||||
].
|
||||
|
||||
groups() ->
|
||||
[
|
||||
{parallel_tests, [parallel], [
|
||||
incremental_add_element_basics,
|
||||
incremental_last_two_returns_last_two_completed_samples,
|
||||
incremental_sum,
|
||||
incremental_sum_stale,
|
||||
incremental_sum_stale2,
|
||||
incremental_sum_with_drop,
|
||||
incremental_sum_with_total,
|
||||
foldl_realises_partial_sample,
|
||||
foldl_and_to_list,
|
||||
foldl_and_to_list_incremental,
|
||||
optimize,
|
||||
stale_to_list,
|
||||
to_list_single_after_drop,
|
||||
to_list_drop_and_roll,
|
||||
to_list_with_drop,
|
||||
to_list_simple,
|
||||
foldl_with_drop,
|
||||
sum_single,
|
||||
to_normalized_list,
|
||||
to_normalized_list_no_padding,
|
||||
to_list_in_the_past
|
||||
]}
|
||||
].
|
||||
|
||||
%% -------------------------------------------------------------------
|
||||
%% Testsuite setup/teardown.
|
||||
%% -------------------------------------------------------------------
|
||||
init_per_suite(Config) ->
|
||||
Config.
|
||||
|
||||
end_per_suite(_Config) ->
|
||||
ok.
|
||||
|
||||
init_per_group(_, Config) ->
|
||||
Config.
|
||||
|
||||
end_per_group(_, _Config) ->
|
||||
ok.
|
||||
|
||||
init_per_testcase(_, Config) ->
|
||||
Config.
|
||||
|
||||
end_per_testcase(_, _Config) ->
|
||||
ok.
|
||||
|
||||
%% -------------------------------------------------------------------
|
||||
%% Generators.
|
||||
%% -------------------------------------------------------------------
|
||||
elements_gen() ->
|
||||
?LET(Length, oneof([1, 2, 3, 7, 8, 20]),
|
||||
?LET(Elements, list(vector(Length, int())),
|
||||
[erlang:list_to_tuple(E) || E <- Elements])).
|
||||
|
||||
%% -------------------------------------------------------------------
|
||||
%% Testcases.
|
||||
%% -------------------------------------------------------------------
|
||||
|
||||
%% TODO: turn tests into properties
|
||||
|
||||
incremental_add_element_basics(_Config) ->
|
||||
Now = exometer_slide:timestamp(),
|
||||
S0 = exometer_slide:new(Now, 10, [{incremental, true},
|
||||
{interval, 100}]),
|
||||
|
||||
[] = exometer_slide:to_list(Now, S0),
|
||||
% add element before next interval
|
||||
S1 = exometer_slide:add_element(Now + 10, {1}, S0),
|
||||
|
||||
[] = exometer_slide:to_list(Now + 20, S1),
|
||||
|
||||
%% to_list is not empty, as we take the 'real' total if a full interval has passed
|
||||
Now100 = Now + 100,
|
||||
[{Now100, {1}}] = exometer_slide:to_list(Now100, S1),
|
||||
|
||||
Then = Now + 101,
|
||||
% add element after interval
|
||||
S2 = exometer_slide:add_element(Then, {1}, S1),
|
||||
|
||||
% contains single element with incremented value
|
||||
[{Then, {2}}] = exometer_slide:to_list(Then, S2).
|
||||
|
||||
incremental_last_two_returns_last_two_completed_samples(_Config) ->
|
||||
Now = exometer_slide:timestamp(),
|
||||
S0 = exometer_slide:new(Now, 10, [{incremental, true},
|
||||
{interval, 100}]),
|
||||
|
||||
% add two full elements then a partial
|
||||
Now100 = Now + 100,
|
||||
Now200 = Now + 200,
|
||||
S1 = exometer_slide:add_element(Now100, {1}, S0),
|
||||
S2 = exometer_slide:add_element(Now200, {1}, S1),
|
||||
S3 = exometer_slide:add_element(Now + 210, {1}, S2),
|
||||
%% to_list is empty
|
||||
[{Now200, {2}}, {Now100, {1}}] = exometer_slide:last_two(S3).
|
||||
|
||||
incremental_sum(_Config) ->
|
||||
Now = exometer_slide:timestamp(),
|
||||
S1 = lists:foldl(fun (Next, S) ->
|
||||
exometer_slide:add_element(Now + Next, {1}, S)
|
||||
end,
|
||||
exometer_slide:new(Now, 1000, [{incremental, true}, {interval, 100}]),
|
||||
lists:seq(100, 1000, 100)),
|
||||
Now50 = Now - 50,
|
||||
S2 = lists:foldl(fun (Next, S) ->
|
||||
exometer_slide:add_element(Now50 + Next, {1}, S)
|
||||
end,
|
||||
exometer_slide:new(Now50, 1000, [{incremental, true}, {interval, 100}]),
|
||||
lists:seq(100, 1000, 100)),
|
||||
S3 = exometer_slide:sum([S1, S2]),
|
||||
|
||||
10 = length(exometer_slide:to_list(Now + 1000, S1)),
|
||||
10 = length(exometer_slide:to_list(Now + 1000, S2)),
|
||||
10 = length(exometer_slide:to_list(Now + 1000, S3)).
|
||||
|
||||
incremental_sum_stale(_Config) ->
|
||||
Now = 0,
|
||||
Slide = exometer_slide:new(Now, 25, [{incremental, true}, {interval, 5}]),
|
||||
|
||||
S1 = lists:foldl(fun (Next, S) ->
|
||||
exometer_slide:add_element(Now + Next, {1}, S)
|
||||
end, Slide, [1, 8, 15, 21, 27]),
|
||||
|
||||
S2 = lists:foldl(fun (Next, S) ->
|
||||
exometer_slide:add_element(Now + Next, {1}, S)
|
||||
end, Slide, [2, 7, 14, 20, 25]),
|
||||
S3 = exometer_slide:sum([S1, S2]),
|
||||
[27,22,17,12,7] = lists:reverse([T || {T, _} <- exometer_slide:to_list(27, S3)]),
|
||||
[10,8,6,4,2] = lists:reverse([V || {_, {V}} <- exometer_slide:to_list(27, S3)]).
|
||||
|
||||
incremental_sum_stale2(_Config) ->
|
||||
Now = 0,
|
||||
Slide = exometer_slide:new(Now, 25, [{incremental, true},
|
||||
{max_n, 5},
|
||||
{interval, 5}]),
|
||||
|
||||
S1 = lists:foldl(fun (Next, S) ->
|
||||
exometer_slide:add_element(Now + Next, {1}, S)
|
||||
end, Slide, [5]),
|
||||
|
||||
S2 = lists:foldl(fun (Next, S) ->
|
||||
exometer_slide:add_element(Now + Next, {1}, S)
|
||||
end, Slide, [500, 505, 510, 515, 520, 525, 527]),
|
||||
S3 = exometer_slide:sum([S1, S2], {0}),
|
||||
[500, 505, 510, 515, 520, 525] = [T || {T, _} <- exometer_slide:to_list(525, S3)],
|
||||
[7,6,5,4,3,2] = lists:reverse([V || {_, {V}} <- exometer_slide:to_list(525, S3)]).
|
||||
|
||||
incremental_sum_with_drop(_Config) ->
|
||||
Now = 0,
|
||||
Slide = exometer_slide:new(Now, 25, [{incremental, true},
|
||||
{max_n, 5},
|
||||
{interval, 5}]),
|
||||
|
||||
S1 = lists:foldl(fun ({Next, Incr}, S) ->
|
||||
exometer_slide:add_element(Now + Next, {Incr}, S)
|
||||
end, Slide, [{1, 1}, {8, 0}, {15, 0}, {21, 1}, {27, 0}]),
|
||||
|
||||
S2 = lists:foldl(fun (Next, S) ->
|
||||
exometer_slide:add_element(Now + Next, {1}, S)
|
||||
end, Slide, [2, 7, 14, 20, 25]),
|
||||
S3 = exometer_slide:sum([S1, S2]),
|
||||
[27,22,17,12,7] = lists:reverse([T || {T, _} <- exometer_slide:to_list(27, S3)]),
|
||||
[7,6,4,3,2] = lists:reverse([V || {_, {V}} <- exometer_slide:to_list(27, S3)]).
|
||||
|
||||
incremental_sum_with_total(_Config) ->
|
||||
Now = 0,
|
||||
Slide = exometer_slide:new(Now, 50, [{incremental, true}, {interval, 5}]),
|
||||
|
||||
S1 = lists:foldl(fun (Next, S) ->
|
||||
exometer_slide:add_element(Now + Next, {1}, S)
|
||||
end, Slide, [5, 10, 15, 20, 25]),
|
||||
|
||||
S2 = lists:foldl(fun (Next, S) ->
|
||||
exometer_slide:add_element(Now + Next, {1}, S)
|
||||
end, Slide, [7, 12, 17, 22, 23]),
|
||||
S3 = exometer_slide:sum([S1, S2]),
|
||||
{10} = exometer_slide:last(S3),
|
||||
[25,20,15,10,5] = lists:reverse([T || {T, _} <- exometer_slide:to_list(26, S3)]),
|
||||
[ 9, 7, 5, 3,1] = lists:reverse([V || {_, {V}} <- exometer_slide:to_list(26, S3)]).
|
||||
|
||||
foldl_realises_partial_sample(_Config) ->
|
||||
Now = 0,
|
||||
Slide = exometer_slide:new(Now, 25, [{incremental, true}, {interval, 5}]),
|
||||
S = lists:foldl(fun (Next, S) ->
|
||||
exometer_slide:add_element(Now + Next, {1}, S)
|
||||
end, Slide, [5, 10, 15, 20, 23]),
|
||||
Fun = fun(last, Acc) -> Acc;
|
||||
({TS, {X}}, Acc) -> [{TS, X} | Acc]
|
||||
end,
|
||||
|
||||
[{25, 5}, {20, 4}, {15, 3}, {10, 2}, {5, 1}] =
|
||||
exometer_slide:foldl(25, 5, Fun, [], S),
|
||||
[{20, 4}, {15, 3}, {10, 2}, {5, 1}] =
|
||||
exometer_slide:foldl(20, 5, Fun, [], S),
|
||||
% do not realise sample unless Now is at least an interval beyond the last
|
||||
% full sample
|
||||
[{20, 4}, {15, 3}, {10, 2}, {5, 1}] =
|
||||
exometer_slide:foldl(23, 5, Fun, [], S).
|
||||
|
||||
optimize(_Config) ->
|
||||
Now = 0,
|
||||
Slide = exometer_slide:new(Now, 25, [{interval, 5}, {max_n, 5}]),
|
||||
S = lists:foldl(fun (Next, S) ->
|
||||
exometer_slide:add_element(Now + Next, {Next}, S)
|
||||
end, Slide, [5, 10, 15, 20, 25, 30, 35]),
|
||||
OS = exometer_slide:optimize(S),
|
||||
SRes = exometer_slide:to_list(35, S),
|
||||
OSRes = exometer_slide:to_list(35, OS),
|
||||
SRes = OSRes,
|
||||
?assert(S =/= OS).
|
||||
|
||||
to_list_with_drop(_Config) ->
|
||||
Now = 0,
|
||||
Slide = exometer_slide:new(Now, 25, [{interval, 5},
|
||||
{incremental, true},
|
||||
{max_n, 5}]),
|
||||
S = exometer_slide:add_element(30, {1}, Slide),
|
||||
S2 = exometer_slide:add_element(35, {1}, S),
|
||||
S3 = exometer_slide:add_element(40, {0}, S2),
|
||||
S4 = exometer_slide:add_element(45, {0}, S3),
|
||||
[{30, {1}}, {35, {2}}, {40, {2}}, {45, {2}}] = exometer_slide:to_list(45, S4).
|
||||
|
||||
to_list_simple(_Config) ->
|
||||
Now = 0,
|
||||
Slide = exometer_slide:new(Now, 25, [{interval, 5},
|
||||
{incremental, true},
|
||||
{max_n, 5}]),
|
||||
S = exometer_slide:add_element(30, {0}, Slide),
|
||||
S2 = exometer_slide:add_element(35, {0}, S),
|
||||
[{30, {0}}, {35, {0}}] = exometer_slide:to_list(38, S2).
|
||||
|
||||
foldl_with_drop(_Config) ->
|
||||
Now = 0,
|
||||
Slide = exometer_slide:new(Now, 25, [{interval, 5},
|
||||
{incremental, true},
|
||||
{max_n, 5}]),
|
||||
S = exometer_slide:add_element(30, {1}, Slide),
|
||||
S2 = exometer_slide:add_element(35, {1}, S),
|
||||
S3 = exometer_slide:add_element(40, {0}, S2),
|
||||
S4 = exometer_slide:add_element(45, {0}, S3),
|
||||
Fun = fun(last, Acc) -> Acc;
|
||||
({TS, {X}}, Acc) -> [{TS, X} | Acc]
|
||||
end,
|
||||
[{45, 2}, {40, 2}, {35, 2}, {30, 1}] =
|
||||
exometer_slide:foldl(45, 30, Fun, [], S4).
|
||||
|
||||
foldl_and_to_list(_Config) ->
|
||||
Now = 0,
|
||||
Tests = [ % {input, expected, query range}
|
||||
{[],
|
||||
[],
|
||||
{0, 10}},
|
||||
{[{5, 1}],
|
||||
[{5, {1}}],
|
||||
{0, 5}},
|
||||
{[{10, 1}],
|
||||
[{10, {1}}],
|
||||
{0, 10}},
|
||||
{[{5, 1}, {10, 2}],
|
||||
[{10, {2}}, {5, {1}}],
|
||||
{0, 10}},
|
||||
{[{5, 0}, {10, 0}], % drop 1
|
||||
[{10, {0}}, {5, {0}}],
|
||||
{0, 10}},
|
||||
{[{5, 2}, {10, 1}, {15, 1}], % drop 2
|
||||
[{15, {1}}, {10, {1}}, {5, {2}}],
|
||||
{0, 15}},
|
||||
{[{10, 0}, {15, 0}, {20, 0}], % drop
|
||||
[{20, {0}}, {15, {0}}, {10, {0}}],
|
||||
{0, 20}},
|
||||
{[{5, 1}, {10, 5}, {15, 5}, {20, 0}], % drop middle
|
||||
[{20, {0}}, {15, {5}}, {10, {5}}, {5, {1}}],
|
||||
{0, 20}},
|
||||
{[{5, 1}, {10, 5}, {15, 5}, {20, 1}], % drop middle filtered
|
||||
[{20, {1}}, {15, {5}}, {10, {5}}],
|
||||
{10, 20}},
|
||||
{[{5, 1}, {10, 2}, {15, 3}, {20, 4}, {25, 4}, {30, 5}], % buffer roll over
|
||||
[{30, {5}}, {25, {4}}, {20, {4}}, {15, {3}}, {10, {2}}, {5, {1}}],
|
||||
{5, 30}}
|
||||
],
|
||||
Slide = exometer_slide:new(Now, 25, [{interval, 5},
|
||||
{max_n, 5}]),
|
||||
Fun = fun(last, Acc) -> Acc;
|
||||
(V, Acc) -> [V | Acc]
|
||||
end,
|
||||
[begin
|
||||
S = lists:foldl(fun ({T, V}, Acc) ->
|
||||
exometer_slide:add_element(T, {V}, Acc)
|
||||
end, Slide, Inputs),
|
||||
Expected = exometer_slide:foldl(To, From, Fun, [], S),
|
||||
ExpRev = lists:reverse(Expected),
|
||||
ExpRev = exometer_slide:to_list(To, From, S)
|
||||
end || {Inputs, Expected, {From, To}} <- Tests].
|
||||
|
||||
foldl_and_to_list_incremental(_Config) ->
|
||||
Now = 0,
|
||||
Tests = [ % {input, expected, query range}
|
||||
{[],
|
||||
[],
|
||||
{0, 10}},
|
||||
{[{5, 1}],
|
||||
[{5, {1}}],
|
||||
{0, 5}},
|
||||
{[{10, 1}],
|
||||
[{10, {1}}],
|
||||
{0, 10}},
|
||||
{[{5, 1}, {10, 1}],
|
||||
[{10, {2}}, {5, {1}}],
|
||||
{0, 10}},
|
||||
{[{5, 0}, {10, 0}], % drop 1
|
||||
[{10, {0}}, {5, {0}}],
|
||||
{0, 10}},
|
||||
{[{5, 1}, {10, 0}, {15, 0}], % drop 2
|
||||
[{15, {1}}, {10, {1}}, {5, {1}}],
|
||||
{0, 15}},
|
||||
{[{10, 0}, {15, 0}, {20, 0}], % drop
|
||||
[{20, {0}}, {15, {0}}, {10, {0}}],
|
||||
{0, 20}},
|
||||
{[{5, 1}, {10, 0}, {15, 0}, {20, 1}], % drop middle
|
||||
[{20, {2}}, {15, {1}}, {10, {1}}, {5, {1}}],
|
||||
{0, 20}},
|
||||
{[{5, 1}, {10, 0}, {15, 0}, {20, 1}], % drop middle filtered
|
||||
[{20, {2}}, {15, {1}}, {10, {1}}],
|
||||
{10, 20}},
|
||||
{[{5, 1}, {10, 1}, {15, 1}, {20, 1}, {25, 0}, {30, 1}], % buffer roll over
|
||||
[{30, {5}}, {25, {4}}, {20, {4}}, {15, {3}}, {10, {2}}, {5, {1}}],
|
||||
{5, 30}}
|
||||
],
|
||||
Slide = exometer_slide:new(Now, 25, [{interval, 5},
|
||||
{incremental, true},
|
||||
{max_n, 5}]),
|
||||
Fun = fun(last, Acc) -> Acc;
|
||||
(V, Acc) -> [V | Acc]
|
||||
end,
|
||||
[begin
|
||||
S = lists:foldl(fun ({T, V}, Acc) ->
|
||||
exometer_slide:add_element(T, {V}, Acc)
|
||||
end, Slide, Inputs),
|
||||
Expected = exometer_slide:foldl(To, From, Fun, [], S),
|
||||
ExpRev = lists:reverse(Expected),
|
||||
ExpRev = exometer_slide:to_list(To, From, S)
|
||||
end || {Inputs, Expected, {From, To}} <- Tests].
|
||||
|
||||
stale_to_list(_Config) ->
|
||||
Now = 0,
|
||||
Slide = exometer_slide:new(Now, 25, [{interval, 5}, {max_n, 5}]),
|
||||
S = exometer_slide:add_element(50, {1}, Slide),
|
||||
S2 = exometer_slide:add_element(55, {1}, S),
|
||||
[] = exometer_slide:to_list(100, S2).
|
||||
|
||||
to_list_single_after_drop(_Config) ->
|
||||
Now = 0,
|
||||
Slide = exometer_slide:new(Now, 25, [{interval, 5},
|
||||
{incremental, true},
|
||||
{max_n, 5}]),
|
||||
S = exometer_slide:add_element(5, {0}, Slide),
|
||||
S2 = exometer_slide:add_element(10, {0}, S),
|
||||
S3 = exometer_slide:add_element(15, {1}, S2),
|
||||
Res = exometer_slide:to_list(17, S3),
|
||||
[{5,{0}},{10,{0}},{15,{1}}] = Res.
|
||||
|
||||
|
||||
to_list_drop_and_roll(_Config) ->
|
||||
Now = 0,
|
||||
Slide = exometer_slide:new(Now, 10, [{interval, 5},
|
||||
{incremental, true},
|
||||
{max_n, 5}]),
|
||||
S = exometer_slide:add_element(5, {0}, Slide),
|
||||
S2 = exometer_slide:add_element(10, {0}, S),
|
||||
S3 = exometer_slide:add_element(15, {0}, S2),
|
||||
[{10, {0}}, {15, {0}}] = exometer_slide:to_list(17, S3).
|
||||
|
||||
|
||||
sum_single(_Config) ->
|
||||
Now = 0,
|
||||
Slide = exometer_slide:new(Now, 25, [{interval, 5},
|
||||
{incremental, true},
|
||||
{max_n, 5}]),
|
||||
S = exometer_slide:add_element(Now + 5, {0}, Slide),
|
||||
S2 = exometer_slide:add_element(Now + 10, {0}, S),
|
||||
Summed = exometer_slide:sum([S2]),
|
||||
[_,_] = exometer_slide:to_list(15, Summed).
|
||||
|
||||
|
||||
to_normalized_list(_Config) ->
|
||||
Interval = 5,
|
||||
Tests = [ % {input, expected, query range}
|
||||
{[], % zero pad when slide has never seen any samples
|
||||
[{10, {0}}, {5, {0}}, {0, {0}}],
|
||||
{0, 10}},
|
||||
{[{5, 1}], % zero pad before first known sample
|
||||
[{5, {1}}, {0, {0}}],
|
||||
{0, 5}},
|
||||
{[{10, 1}, {15, 1}], % zero pad before last know sample
|
||||
[{15, {2}}, {10, {1}}, {5, {0}}],
|
||||
{5, 15}},
|
||||
{[{5, 1}, {15, 1}], % insert missing sample using previous total
|
||||
[{15, {2}}, {10, {1}}, {5, {1}}],
|
||||
{5, 15}},
|
||||
% {[{6, 1}, {11, 1}, {16, 1}], % align timestamps with query
|
||||
% [{15, {3}}, {10, {2}}, {5, {1}}, {0, {0}}],
|
||||
% {0, 15}},
|
||||
{[{5, 1}, {10, 1}, {15, 1}, {20, 1}, {25, 1}, {30, 1}], % outside of max_n
|
||||
[{30, {6}}, {25, {5}}, {20, {4}}, {15, {3}}, {10, {2}}], % we cannot possibly be expected deduce what 10 should be
|
||||
{10, 30}},
|
||||
{[{5, 1}, {20, 1}, {25, 1}], % as long as the past TS 5 sample still exists we should use to for padding
|
||||
[{25, {3}}, {20, {2}}, {15, {1}}, {10, {1}}],
|
||||
{10, 25}},
|
||||
{[{5, 1}, {10, 1}], % pad based on total
|
||||
[{35, {2}}, {30, {2}}],
|
||||
{30, 35}},
|
||||
{[{5, 1}], % make up future values to fill the window
|
||||
[{10, {1}}, {5, {1}}],
|
||||
{5, 10}},
|
||||
{[{5, 1}, {7, 1}], % realise last sample
|
||||
[{10, {2}}, {5, {1}}],
|
||||
{5, 10}}
|
||||
],
|
||||
|
||||
Slide = exometer_slide:new(0, 20, [{interval, 5},
|
||||
{incremental, true},
|
||||
{max_n, 4}]),
|
||||
[begin
|
||||
S0 = lists:foldl(fun ({T, V}, Acc) ->
|
||||
exometer_slide:add_element(T, {V}, Acc)
|
||||
end, Slide, Inputs),
|
||||
Expected = exometer_slide:to_normalized_list(To, From, Interval, S0, {0}),
|
||||
S = exometer_slide:sum([exometer_slide:optimize(S0)], {0}), % also test it post sum
|
||||
Expected = exometer_slide:to_normalized_list(To, From, Interval, S, {0})
|
||||
end || {Inputs, Expected, {From, To}} <- Tests].
|
||||
|
||||
to_normalized_list_no_padding(_Config) ->
|
||||
Interval = 5,
|
||||
Tests = [ % {input, expected, query range}
|
||||
{[],
|
||||
[],
|
||||
{0, 10}},
|
||||
{[{5, 1}],
|
||||
[{5, {1}}],
|
||||
{0, 5}},
|
||||
{[{5, 1}, {15, 1}],
|
||||
[{15, {2}}, {10, {1}}, {5, {1}}],
|
||||
{5, 15}},
|
||||
{[{10, 1}, {15, 1}],
|
||||
[{15, {2}}, {10, {1}}],
|
||||
{5, 15}},
|
||||
{[{5, 1}, {20, 1}], % NB as 5 is outside of the query we can't pick the value up
|
||||
[{20, {2}}],
|
||||
{10, 20}}
|
||||
],
|
||||
|
||||
Slide = exometer_slide:new(0, 20, [{interval, 5},
|
||||
{incremental, true},
|
||||
{max_n, 4}]),
|
||||
[begin
|
||||
S = lists:foldl(fun ({T, V}, Acc) ->
|
||||
exometer_slide:add_element(T, {V}, Acc)
|
||||
end, Slide, Inputs),
|
||||
Expected = exometer_slide:to_normalized_list(To, From, Interval, S, no_pad)
|
||||
end || {Inputs, Expected, {From, To}} <- Tests].
|
||||
|
||||
to_list_in_the_past(_Config) ->
|
||||
Slide = exometer_slide:new(0, 20, [{interval, 5},
|
||||
{incremental, true},
|
||||
{max_n, 4}]),
|
||||
% ensure firstTS is way in the past
|
||||
S0 = exometer_slide:add_element(5, {1}, Slide),
|
||||
S1 = exometer_slide:add_element(105, {0}, S0),
|
||||
S = exometer_slide:add_element(110, {0}, S1), % create drop
|
||||
% query into the past
|
||||
% this could happen if a node with and incorrect clock joins the cluster
|
||||
[] = exometer_slide:to_list(50, 10, S).
|
||||
|
||||
%% -------------------------------------------------------------------
|
||||
%% Util
|
||||
%% -------------------------------------------------------------------
|
||||
|
||||
ele(TS, V) -> {TS, {V}}.
|
||||
|
|
@ -0,0 +1,94 @@
|
|||
%% The contents of this file are subject to the Mozilla Public License
|
||||
%% Version 1.1 (the "License"); you may not use this file except in
|
||||
%% compliance with the License. You may obtain a copy of the License
|
||||
%% at http://www.mozilla.org/MPL/
|
||||
%%
|
||||
%% Software distributed under the License is distributed on an "AS IS"
|
||||
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
|
||||
%% the License for the specific language governing rights and
|
||||
%% limitations under the License.
|
||||
%%
|
||||
%% The Original Code is RabbitMQ.
|
||||
%%
|
||||
%% The Initial Developer of the Original Code is GoPivotal, Inc.
|
||||
%% Copyright (c) 2016 Pivotal Software, Inc. All rights reserved.
|
||||
%%
|
||||
-module(metrics_SUITE).
|
||||
-compile(export_all).
|
||||
|
||||
-include_lib("common_test/include/ct.hrl").
|
||||
|
||||
|
||||
all() ->
|
||||
[
|
||||
{group, non_parallel_tests}
|
||||
].
|
||||
|
||||
groups() ->
|
||||
[
|
||||
{non_parallel_tests, [], [
|
||||
node
|
||||
]}
|
||||
].
|
||||
|
||||
%% -------------------------------------------------------------------
|
||||
%% Testsuite setup/teardown.
|
||||
%% -------------------------------------------------------------------
|
||||
|
||||
merge_app_env(Config) ->
|
||||
rabbit_ct_helpers:merge_app_env(Config,
|
||||
{rabbit, [
|
||||
{collect_statistics, fine},
|
||||
{collect_statistics_interval, 500}
|
||||
]}).
|
||||
init_per_suite(Config) ->
|
||||
rabbit_ct_helpers:log_environment(),
|
||||
Config1 = rabbit_ct_helpers:set_config(Config, [
|
||||
{rmq_nodename_suffix, ?MODULE},
|
||||
{rmq_nodes_count, 2}
|
||||
]),
|
||||
rabbit_ct_helpers:run_setup_steps(Config1,
|
||||
[ fun merge_app_env/1 ] ++
|
||||
rabbit_ct_broker_helpers:setup_steps()).
|
||||
|
||||
end_per_suite(Config) ->
|
||||
rabbit_ct_helpers:run_teardown_steps(Config,
|
||||
rabbit_ct_broker_helpers:teardown_steps()).
|
||||
|
||||
init_per_group(_, Config) ->
|
||||
Config.
|
||||
|
||||
end_per_group(_, Config) ->
|
||||
Config.
|
||||
|
||||
init_per_testcase(Testcase, Config) ->
|
||||
rabbit_ct_helpers:testcase_started(Config, Testcase).
|
||||
|
||||
end_per_testcase(Testcase, Config) ->
|
||||
rabbit_ct_helpers:testcase_finished(Config, Testcase).
|
||||
|
||||
|
||||
%% -------------------------------------------------------------------
|
||||
%% Testcases.
|
||||
%% -------------------------------------------------------------------
|
||||
|
||||
read_table_rpc(Config, Table) ->
|
||||
rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, read_table, [Table]).
|
||||
|
||||
read_table(Table) ->
|
||||
ets:tab2list(Table).
|
||||
|
||||
force_stats() ->
|
||||
rabbit_mgmt_external_stats ! emit_update.
|
||||
|
||||
node(Config) ->
|
||||
% force multipe stats refreshes
|
||||
[ rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, force_stats, [])
|
||||
|| _ <- lists:seq(0, 10)],
|
||||
[_] = read_table_rpc(Config, node_persister_metrics),
|
||||
[_] = read_table_rpc(Config, node_coarse_metrics),
|
||||
[_] = read_table_rpc(Config, node_metrics),
|
||||
timer:sleep(100),
|
||||
[_, _, _] = read_table_rpc(Config, node_node_metrics). % 3 nodes as ct-helpers adds one
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,180 @@
|
|||
%% The contents of this file are subject to the Mozilla Public License
|
||||
%% Version 1.1 (the "License"); you may not use this file except in
|
||||
%% compliance with the License. You may obtain a copy of the License at
|
||||
%% http://www.mozilla.org/MPL/
|
||||
%%
|
||||
%% Software distributed under the License is distributed on an "AS IS"
|
||||
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
|
||||
%% License for the specific language governing rights and limitations
|
||||
%% under the License.
|
||||
%%
|
||||
%% Copyright (c) 2016 Pivotal Software, Inc. All rights reserved.
|
||||
%%
|
||||
|
||||
-module(rabbit_mgmt_slide_SUITE).
|
||||
|
||||
-include_lib("proper/include/proper.hrl").
|
||||
|
||||
-compile(export_all).
|
||||
|
||||
all() ->
|
||||
[
|
||||
{group, parallel_tests}
|
||||
].
|
||||
|
||||
groups() ->
|
||||
[
|
||||
{parallel_tests, [parallel], [
|
||||
last_two_test,
|
||||
last_two_incremental_test,
|
||||
sum_test,
|
||||
sum_incremental_test
|
||||
]}
|
||||
].
|
||||
|
||||
%% -------------------------------------------------------------------
|
||||
%% Testsuite setup/teardown.
|
||||
%% -------------------------------------------------------------------
|
||||
init_per_suite(Config) ->
|
||||
rabbit_ct_helpers:log_environment(),
|
||||
Config.
|
||||
|
||||
end_per_suite(_Config) ->
|
||||
ok.
|
||||
|
||||
init_per_group(_, Config) ->
|
||||
Config.
|
||||
|
||||
end_per_group(_, _Config) ->
|
||||
ok.
|
||||
|
||||
init_per_testcase(_, Config) ->
|
||||
Config.
|
||||
|
||||
end_per_testcase(_, _Config) ->
|
||||
ok.
|
||||
|
||||
%% -------------------------------------------------------------------
|
||||
%% Generators.
|
||||
%% -------------------------------------------------------------------
|
||||
elements_gen() ->
|
||||
?LET(Length, oneof([1, 2, 3, 7, 8, 20]),
|
||||
?LET(Elements, list(vector(Length, int())),
|
||||
[erlang:list_to_tuple(E) || E <- Elements])).
|
||||
|
||||
%% -------------------------------------------------------------------
|
||||
%% Testcases.
|
||||
%% -------------------------------------------------------------------
|
||||
last_two_test(_Config) ->
|
||||
rabbit_ct_proper_helpers:run_proper(fun prop_last_two/0, [], 100).
|
||||
|
||||
prop_last_two() ->
|
||||
?FORALL(
|
||||
Elements, elements_gen(),
|
||||
begin
|
||||
Interval = 1,
|
||||
Incremental = false,
|
||||
{_LastTS, Slide} = new_slide(Interval, Incremental, Elements),
|
||||
Expected = last_two(Elements),
|
||||
ValuesOnly = [V || {_Timestamp, V} <- exometer_slide:last_two(Slide)],
|
||||
?WHENFAIL(io:format("Last two values obtained: ~p~nExpected: ~p~n"
|
||||
"Slide: ~p~n", [ValuesOnly, Expected, Slide]),
|
||||
Expected == ValuesOnly)
|
||||
end).
|
||||
|
||||
last_two_incremental_test(_Config) ->
|
||||
rabbit_ct_proper_helpers:run_proper(fun prop_last_two_incremental/0, [], 100).
|
||||
|
||||
prop_last_two_incremental() ->
|
||||
?FORALL(
|
||||
Elements, non_empty(elements_gen()),
|
||||
begin
|
||||
Interval = 1,
|
||||
Incremental = true,
|
||||
{_LastTS, Slide} = new_slide(Interval, Incremental, Elements),
|
||||
[{_Timestamp, Values} | _] = exometer_slide:last_two(Slide),
|
||||
Expected = add_elements(Elements),
|
||||
?WHENFAIL(io:format("Expected a total of: ~p~nGot: ~p~n"
|
||||
"Slide: ~p~n", [Expected, Values, Slide]),
|
||||
Values == Expected)
|
||||
end).
|
||||
|
||||
sum_incremental_test(_Config) ->
|
||||
rabbit_ct_proper_helpers:run_proper(fun prop_sum/1, [true], 100).
|
||||
|
||||
sum_test(_Config) ->
|
||||
rabbit_ct_proper_helpers:run_proper(fun prop_sum/1, [false], 100).
|
||||
|
||||
prop_sum(Inc) ->
|
||||
?FORALL(
|
||||
{Elements, Number}, {non_empty(elements_gen()), ?SUCHTHAT(I, int(), I > 0)},
|
||||
begin
|
||||
Interval = 1,
|
||||
{LastTS, Slide} = new_slide(Interval, Inc, Elements),
|
||||
%% Add the same so the timestamp matches. As the timestamps are handled
|
||||
%% internally, we cannot guarantee on which interval they go otherwise
|
||||
%% (unless we manually manipulate the slide content).
|
||||
Sum = exometer_slide:sum([Slide || _ <- lists:seq(1, Number)]),
|
||||
Values = [V || {_TS, V} <- exometer_slide:to_list(LastTS + 1, Sum)],
|
||||
Expected = expected_sum(Slide, LastTS + 1, Number, Interval, Inc),
|
||||
?WHENFAIL(io:format("Expected: ~p~nGot: ~p~nSlide:~p~n",
|
||||
[Expected, Values, Slide]),
|
||||
Values == Expected)
|
||||
end).
|
||||
|
||||
expected_sum(Slide, Now, Number, _Int, false) ->
|
||||
[sum_n_times(V, Number) || {_TS, V} <- exometer_slide:to_list(Now, Slide)];
|
||||
expected_sum(Slide, Now, Number, Int, true) ->
|
||||
[{TSfirst, First} = F | Rest] = All = exometer_slide:to_list(Now, Slide),
|
||||
{TSlast, _Last} = case Rest of
|
||||
[] ->
|
||||
F;
|
||||
_ ->
|
||||
lists:last(Rest)
|
||||
end,
|
||||
Seq = lists:seq(TSfirst, TSlast, Int),
|
||||
{Expected, _} = lists:foldl(fun(TS0, {Acc, Previous}) ->
|
||||
Actual = proplists:get_value(TS0, All, Previous),
|
||||
{[sum_n_times(Actual, Number) | Acc], Actual}
|
||||
end, {[], First}, Seq),
|
||||
lists:reverse(Expected).
|
||||
%% -------------------------------------------------------------------
|
||||
%% Helpers
|
||||
%% -------------------------------------------------------------------
|
||||
new_slide(Interval, Incremental, Elements) ->
|
||||
new_slide(Interval, Interval, Incremental, Elements).
|
||||
|
||||
new_slide(PublishInterval, Interval, Incremental, Elements) ->
|
||||
Now = 0,
|
||||
Slide = exometer_slide:new(Now, 60 * 1000, [{interval, Interval},
|
||||
{incremental, Incremental}]),
|
||||
lists:foldl(
|
||||
fun(E, {TS0, Acc}) ->
|
||||
TS1 = TS0 + PublishInterval,
|
||||
{TS1, exometer_slide:add_element(TS1, E, Acc)}
|
||||
end, {Now, Slide}, Elements).
|
||||
|
||||
last_two(Elements) when length(Elements) >= 2 ->
|
||||
[F, S | _] = lists:reverse(Elements),
|
||||
[F, S];
|
||||
last_two(Elements) ->
|
||||
Elements.
|
||||
|
||||
add_elements([H | T]) ->
|
||||
add_elements(T, H).
|
||||
|
||||
add_elements([], Acc) ->
|
||||
Acc;
|
||||
add_elements([Tuple | T], Acc) ->
|
||||
add_elements(T, sum(Tuple, Acc)).
|
||||
|
||||
sum(T1, T2) ->
|
||||
list_to_tuple(lists:zipwith(fun(A, B) -> A + B end, tuple_to_list(T1), tuple_to_list(T2))).
|
||||
|
||||
sum_n_times(V, N) ->
|
||||
sum_n_times(V, V, N - 1).
|
||||
|
||||
sum_n_times(_V, Acc, 0) ->
|
||||
Acc;
|
||||
sum_n_times(V, Acc, N) ->
|
||||
sum_n_times(V, sum(V, Acc), N-1).
|
||||
Loading…
Reference in New Issue