mirror of https://github.com/apache/kafka.git
KAFKA-8941: Add RocksDB Metrics that Could not be Added due to RocksD… (#11441)
This PR adds some RocksDB metrics that could not be added in KIP-471 due to RocksDB version. The new metrics are extracted using Histogram data provided by RocksDB API, and the old ones were extracted using Tickers. The new metrics added are memtable-flush-time-(avg|min|max) and compaction-time-(avg|min|max). Reviewer: Bruno Cadonnna <cadonna@apache.org>
This commit is contained in:
parent
3aef0a5ceb
commit
252a40ea1f
|
|
@ -2891,6 +2891,21 @@ for built-in state stores, currently we have:
|
|||
<td>The ratio of memtable hits relative to all lookups to the memtable.</td>
|
||||
<td>kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+)</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>memtable-flush-time-avg</td>
|
||||
<td>The average duration of memtable flushes to disc in ms.</td>
|
||||
<td>kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+)</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>memtable-flush-time-min</td>
|
||||
<td>The minimum duration of memtable flushes to disc in ms.</td>
|
||||
<td>kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+)</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>memtable-flush-time-max</td>
|
||||
<td>The maximum duration of memtable flushes to disc in ms.</td>
|
||||
<td>kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+)</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>block-cache-data-hit-ratio</td>
|
||||
<td>The ratio of block cache hits for data blocks relative to all lookups for data blocks to the block cache.</td>
|
||||
|
|
@ -2926,6 +2941,21 @@ for built-in state stores, currently we have:
|
|||
<td>The average number of bytes written per second during compaction.</td>
|
||||
<td>kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+)</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>compaction-time-avg</td>
|
||||
<td>The average duration of disc compactions in ms.</td>
|
||||
<td>kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+)</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>compaction-time-min</td>
|
||||
<td>The minimum duration of disc compactions in ms.</td>
|
||||
<td>kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+)</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>compaction-time-max</td>
|
||||
<td>The maximum duration of disc compactions in ms.</td>
|
||||
<td>kafka.streams:type=stream-state-metrics,thread-id=([-.\w]+),task-id=([-.\w]+),[store-scope]-id=([-.\w]+)</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>number-open-files</td>
|
||||
<td>The number of current open files.</td>
|
||||
|
|
|
|||
|
|
@ -24,6 +24,8 @@ import org.apache.kafka.streams.processor.TaskId;
|
|||
import org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl;
|
||||
import org.apache.kafka.streams.state.internals.metrics.RocksDBMetrics.RocksDBMetricContext;
|
||||
import org.rocksdb.Cache;
|
||||
import org.rocksdb.HistogramData;
|
||||
import org.rocksdb.HistogramType;
|
||||
import org.rocksdb.RocksDB;
|
||||
import org.rocksdb.RocksDBException;
|
||||
import org.rocksdb.Statistics;
|
||||
|
|
@ -94,12 +96,18 @@ public class RocksDBMetricsRecorder {
|
|||
private Sensor bytesReadFromDatabaseSensor;
|
||||
private Sensor memtableBytesFlushedSensor;
|
||||
private Sensor memtableHitRatioSensor;
|
||||
private Sensor memtableAvgFlushTimeSensor;
|
||||
private Sensor memtableMinFlushTimeSensor;
|
||||
private Sensor memtableMaxFlushTimeSensor;
|
||||
private Sensor writeStallDurationSensor;
|
||||
private Sensor blockCacheDataHitRatioSensor;
|
||||
private Sensor blockCacheIndexHitRatioSensor;
|
||||
private Sensor blockCacheFilterHitRatioSensor;
|
||||
private Sensor bytesReadDuringCompactionSensor;
|
||||
private Sensor bytesWrittenDuringCompactionSensor;
|
||||
private Sensor compactionTimeAvgSensor;
|
||||
private Sensor compactionTimeMinSensor;
|
||||
private Sensor compactionTimeMaxSensor;
|
||||
private Sensor numberOfOpenFilesSensor;
|
||||
private Sensor numberOfFileErrorsSensor;
|
||||
|
||||
|
|
@ -213,6 +221,9 @@ public class RocksDBMetricsRecorder {
|
|||
bytesReadFromDatabaseSensor = RocksDBMetrics.bytesReadFromDatabaseSensor(streamsMetrics, metricContext);
|
||||
memtableBytesFlushedSensor = RocksDBMetrics.memtableBytesFlushedSensor(streamsMetrics, metricContext);
|
||||
memtableHitRatioSensor = RocksDBMetrics.memtableHitRatioSensor(streamsMetrics, metricContext);
|
||||
memtableAvgFlushTimeSensor = RocksDBMetrics.memtableAvgFlushTimeSensor(streamsMetrics, metricContext);
|
||||
memtableMinFlushTimeSensor = RocksDBMetrics.memtableMinFlushTimeSensor(streamsMetrics, metricContext);
|
||||
memtableMaxFlushTimeSensor = RocksDBMetrics.memtableMaxFlushTimeSensor(streamsMetrics, metricContext);
|
||||
writeStallDurationSensor = RocksDBMetrics.writeStallDurationSensor(streamsMetrics, metricContext);
|
||||
blockCacheDataHitRatioSensor = RocksDBMetrics.blockCacheDataHitRatioSensor(streamsMetrics, metricContext);
|
||||
blockCacheIndexHitRatioSensor = RocksDBMetrics.blockCacheIndexHitRatioSensor(streamsMetrics, metricContext);
|
||||
|
|
@ -220,6 +231,9 @@ public class RocksDBMetricsRecorder {
|
|||
bytesWrittenDuringCompactionSensor =
|
||||
RocksDBMetrics.bytesWrittenDuringCompactionSensor(streamsMetrics, metricContext);
|
||||
bytesReadDuringCompactionSensor = RocksDBMetrics.bytesReadDuringCompactionSensor(streamsMetrics, metricContext);
|
||||
compactionTimeAvgSensor = RocksDBMetrics.compactionTimeAvgSensor(streamsMetrics, metricContext);
|
||||
compactionTimeMinSensor = RocksDBMetrics.compactionTimeMinSensor(streamsMetrics, metricContext);
|
||||
compactionTimeMaxSensor = RocksDBMetrics.compactionTimeMaxSensor(streamsMetrics, metricContext);
|
||||
numberOfOpenFilesSensor = RocksDBMetrics.numberOfOpenFilesSensor(streamsMetrics, metricContext);
|
||||
numberOfFileErrorsSensor = RocksDBMetrics.numberOfFileErrorsSensor(streamsMetrics, metricContext);
|
||||
}
|
||||
|
|
@ -426,6 +440,14 @@ public class RocksDBMetricsRecorder {
|
|||
long bytesReadDuringCompaction = 0;
|
||||
long numberOfOpenFiles = 0;
|
||||
long numberOfFileErrors = 0;
|
||||
long memtableFlushTimeSum = 0;
|
||||
long memtableFlushTimeCount = 0;
|
||||
double memtableFlushTimeMin = Double.MAX_VALUE;
|
||||
double memtableFlushTimeMax = 0.0;
|
||||
long compactionTimeSum = 0;
|
||||
long compactionTimeCount = 0;
|
||||
double compactionTimeMin = Double.MAX_VALUE;
|
||||
double compactionTimeMax = 0.0;
|
||||
boolean shouldRecord = true;
|
||||
for (final DbAndCacheAndStatistics valueProviders : storeToValueProviders.values()) {
|
||||
if (valueProviders.statistics == null) {
|
||||
|
|
@ -449,18 +471,34 @@ public class RocksDBMetricsRecorder {
|
|||
numberOfOpenFiles += valueProviders.statistics.getAndResetTickerCount(TickerType.NO_FILE_OPENS)
|
||||
- valueProviders.statistics.getAndResetTickerCount(TickerType.NO_FILE_CLOSES);
|
||||
numberOfFileErrors += valueProviders.statistics.getAndResetTickerCount(TickerType.NO_FILE_ERRORS);
|
||||
final HistogramData memtableFlushTimeData = valueProviders.statistics.getHistogramData(HistogramType.FLUSH_TIME);
|
||||
memtableFlushTimeSum += memtableFlushTimeData.getSum();
|
||||
memtableFlushTimeCount += memtableFlushTimeData.getCount();
|
||||
memtableFlushTimeMin = Double.min(memtableFlushTimeMin, memtableFlushTimeData.getMin());
|
||||
memtableFlushTimeMax = Double.max(memtableFlushTimeMax, memtableFlushTimeData.getMax());
|
||||
final HistogramData compactionTimeData = valueProviders.statistics.getHistogramData(HistogramType.COMPACTION_TIME);
|
||||
compactionTimeSum += compactionTimeData.getSum();
|
||||
compactionTimeCount += compactionTimeData.getCount();
|
||||
compactionTimeMin = Double.min(compactionTimeMin, compactionTimeData.getMin());
|
||||
compactionTimeMax = Double.max(compactionTimeMax, compactionTimeData.getMax());
|
||||
}
|
||||
if (shouldRecord) {
|
||||
bytesWrittenToDatabaseSensor.record(bytesWrittenToDatabase, now);
|
||||
bytesReadFromDatabaseSensor.record(bytesReadFromDatabase, now);
|
||||
memtableBytesFlushedSensor.record(memtableBytesFlushed, now);
|
||||
memtableHitRatioSensor.record(computeHitRatio(memtableHits, memtableMisses), now);
|
||||
memtableAvgFlushTimeSensor.record(computeAvg(memtableFlushTimeSum, memtableFlushTimeCount), now);
|
||||
memtableMinFlushTimeSensor.record(memtableFlushTimeMin, now);
|
||||
memtableMaxFlushTimeSensor.record(memtableFlushTimeMax, now);
|
||||
blockCacheDataHitRatioSensor.record(computeHitRatio(blockCacheDataHits, blockCacheDataMisses), now);
|
||||
blockCacheIndexHitRatioSensor.record(computeHitRatio(blockCacheIndexHits, blockCacheIndexMisses), now);
|
||||
blockCacheFilterHitRatioSensor.record(computeHitRatio(blockCacheFilterHits, blockCacheFilterMisses), now);
|
||||
writeStallDurationSensor.record(writeStallDuration, now);
|
||||
bytesWrittenDuringCompactionSensor.record(bytesWrittenDuringCompaction, now);
|
||||
bytesReadDuringCompactionSensor.record(bytesReadDuringCompaction, now);
|
||||
compactionTimeAvgSensor.record(computeAvg(compactionTimeSum, compactionTimeCount), now);
|
||||
compactionTimeMinSensor.record(compactionTimeMin, now);
|
||||
compactionTimeMaxSensor.record(compactionTimeMax, now);
|
||||
numberOfOpenFilesSensor.record(numberOfOpenFiles, now);
|
||||
numberOfFileErrorsSensor.record(numberOfFileErrors, now);
|
||||
}
|
||||
|
|
@ -472,4 +510,11 @@ public class RocksDBMetricsRecorder {
|
|||
}
|
||||
return (double) hits / (hits + misses);
|
||||
}
|
||||
|
||||
private double computeAvg(final long sum, final long count) {
|
||||
if (count == 0) {
|
||||
return 0;
|
||||
}
|
||||
return (double) sum / count;
|
||||
}
|
||||
}
|
||||
|
|
@ -29,6 +29,8 @@ import org.junit.runner.RunWith;
|
|||
import org.powermock.core.classloader.annotations.PrepareForTest;
|
||||
import org.powermock.modules.junit4.PowerMockRunner;
|
||||
import org.rocksdb.Cache;
|
||||
import org.rocksdb.HistogramData;
|
||||
import org.rocksdb.HistogramType;
|
||||
import org.rocksdb.RocksDB;
|
||||
import org.rocksdb.Statistics;
|
||||
import org.rocksdb.StatsLevel;
|
||||
|
|
@ -74,12 +76,18 @@ public class RocksDBMetricsRecorderTest {
|
|||
private final Sensor bytesReadFromDatabaseSensor = createMock(Sensor.class);
|
||||
private final Sensor memtableBytesFlushedSensor = createMock(Sensor.class);
|
||||
private final Sensor memtableHitRatioSensor = createMock(Sensor.class);
|
||||
private final Sensor memtableAvgFlushTimeSensor = createMock(Sensor.class);
|
||||
private final Sensor memtableMinFlushTimeSensor = createMock(Sensor.class);
|
||||
private final Sensor memtableMaxFlushTimeSensor = createMock(Sensor.class);
|
||||
private final Sensor writeStallDurationSensor = createMock(Sensor.class);
|
||||
private final Sensor blockCacheDataHitRatioSensor = createMock(Sensor.class);
|
||||
private final Sensor blockCacheIndexHitRatioSensor = createMock(Sensor.class);
|
||||
private final Sensor blockCacheFilterHitRatioSensor = createMock(Sensor.class);
|
||||
private final Sensor bytesReadDuringCompactionSensor = createMock(Sensor.class);
|
||||
private final Sensor bytesWrittenDuringCompactionSensor = createMock(Sensor.class);
|
||||
private final Sensor compactionTimeAvgSensor = createMock(Sensor.class);
|
||||
private final Sensor compactionTimeMinSensor = createMock(Sensor.class);
|
||||
private final Sensor compactionTimeMaxSensor = createMock(Sensor.class);
|
||||
private final Sensor numberOfOpenFilesSensor = createMock(Sensor.class);
|
||||
private final Sensor numberOfFileErrorsSensor = createMock(Sensor.class);
|
||||
|
||||
|
|
@ -400,6 +408,17 @@ public class RocksDBMetricsRecorderTest {
|
|||
memtableHitRatioSensor.record((double) 4 / (4 + 6), 0L);
|
||||
replay(memtableHitRatioSensor);
|
||||
|
||||
final HistogramData memtableFlushTimeData1 = new HistogramData(0.0, 0.0, 0.0, 0.0, 0.0, 16.0, 2L, 10L, 3.0);
|
||||
final HistogramData memtableFlushTimeData2 = new HistogramData(0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 4L, 8L, 10.0);
|
||||
expect(statisticsToAdd1.getHistogramData(HistogramType.FLUSH_TIME)).andReturn(memtableFlushTimeData1);
|
||||
expect(statisticsToAdd2.getHistogramData(HistogramType.FLUSH_TIME)).andReturn(memtableFlushTimeData2);
|
||||
memtableAvgFlushTimeSensor.record((double) (10 + 8) / (2 + 4), 0L);
|
||||
replay(memtableAvgFlushTimeSensor);
|
||||
memtableMinFlushTimeSensor.record(3.0, 0L);
|
||||
replay(memtableMinFlushTimeSensor);
|
||||
memtableMaxFlushTimeSensor.record(20.0, 0L);
|
||||
replay(memtableMaxFlushTimeSensor);
|
||||
|
||||
expect(statisticsToAdd1.getAndResetTickerCount(TickerType.STALL_MICROS)).andReturn(4L);
|
||||
expect(statisticsToAdd2.getAndResetTickerCount(TickerType.STALL_MICROS)).andReturn(5L);
|
||||
writeStallDurationSensor.record(4 + 5, 0L);
|
||||
|
|
@ -436,6 +455,17 @@ public class RocksDBMetricsRecorderTest {
|
|||
bytesReadDuringCompactionSensor.record(5 + 6, 0L);
|
||||
replay(bytesReadDuringCompactionSensor);
|
||||
|
||||
final HistogramData compactionTimeData1 = new HistogramData(0.0, 0.0, 0.0, 0.0, 0.0, 16.0, 2L, 8L, 6.0);
|
||||
final HistogramData compactionTimeData2 = new HistogramData(0.0, 0.0, 0.0, 0.0, 0.0, 24.0, 2L, 8L, 4.0);
|
||||
expect(statisticsToAdd1.getHistogramData(HistogramType.COMPACTION_TIME)).andReturn(compactionTimeData1);
|
||||
expect(statisticsToAdd2.getHistogramData(HistogramType.COMPACTION_TIME)).andReturn(compactionTimeData2);
|
||||
compactionTimeAvgSensor.record((double) (8 + 8) / (2 + 2), 0L);
|
||||
replay(compactionTimeAvgSensor);
|
||||
compactionTimeMinSensor.record(4.0, 0L);
|
||||
replay(compactionTimeMinSensor);
|
||||
compactionTimeMaxSensor.record(24.0, 0L);
|
||||
replay(compactionTimeMaxSensor);
|
||||
|
||||
expect(statisticsToAdd1.getAndResetTickerCount(TickerType.NO_FILE_OPENS)).andReturn(5L);
|
||||
expect(statisticsToAdd1.getAndResetTickerCount(TickerType.NO_FILE_CLOSES)).andReturn(3L);
|
||||
expect(statisticsToAdd2.getAndResetTickerCount(TickerType.NO_FILE_OPENS)).andReturn(7L);
|
||||
|
|
@ -460,12 +490,18 @@ public class RocksDBMetricsRecorderTest {
|
|||
bytesReadFromDatabaseSensor,
|
||||
memtableBytesFlushedSensor,
|
||||
memtableHitRatioSensor,
|
||||
memtableAvgFlushTimeSensor,
|
||||
memtableMinFlushTimeSensor,
|
||||
memtableMaxFlushTimeSensor,
|
||||
writeStallDurationSensor,
|
||||
blockCacheDataHitRatioSensor,
|
||||
blockCacheIndexHitRatioSensor,
|
||||
blockCacheFilterHitRatioSensor,
|
||||
bytesWrittenDuringCompactionSensor,
|
||||
bytesReadDuringCompactionSensor,
|
||||
compactionTimeAvgSensor,
|
||||
compactionTimeMinSensor,
|
||||
compactionTimeMaxSensor,
|
||||
numberOfOpenFilesSensor,
|
||||
numberOfFileErrorsSensor
|
||||
);
|
||||
|
|
@ -479,12 +515,18 @@ public class RocksDBMetricsRecorderTest {
|
|||
bytesReadFromDatabaseSensor,
|
||||
memtableBytesFlushedSensor,
|
||||
memtableHitRatioSensor,
|
||||
memtableAvgFlushTimeSensor,
|
||||
memtableMinFlushTimeSensor,
|
||||
memtableMaxFlushTimeSensor,
|
||||
writeStallDurationSensor,
|
||||
blockCacheDataHitRatioSensor,
|
||||
blockCacheIndexHitRatioSensor,
|
||||
blockCacheFilterHitRatioSensor,
|
||||
bytesWrittenDuringCompactionSensor,
|
||||
bytesReadDuringCompactionSensor,
|
||||
compactionTimeAvgSensor,
|
||||
compactionTimeMinSensor,
|
||||
compactionTimeMaxSensor,
|
||||
numberOfOpenFilesSensor,
|
||||
numberOfFileErrorsSensor
|
||||
);
|
||||
|
|
@ -496,12 +538,18 @@ public class RocksDBMetricsRecorderTest {
|
|||
bytesReadFromDatabaseSensor,
|
||||
memtableBytesFlushedSensor,
|
||||
memtableHitRatioSensor,
|
||||
memtableAvgFlushTimeSensor,
|
||||
memtableMinFlushTimeSensor,
|
||||
memtableMaxFlushTimeSensor,
|
||||
writeStallDurationSensor,
|
||||
blockCacheDataHitRatioSensor,
|
||||
blockCacheIndexHitRatioSensor,
|
||||
blockCacheFilterHitRatioSensor,
|
||||
bytesWrittenDuringCompactionSensor,
|
||||
bytesReadDuringCompactionSensor,
|
||||
compactionTimeAvgSensor,
|
||||
compactionTimeMinSensor,
|
||||
compactionTimeMaxSensor,
|
||||
numberOfOpenFilesSensor,
|
||||
numberOfFileErrorsSensor
|
||||
);
|
||||
|
|
@ -511,6 +559,7 @@ public class RocksDBMetricsRecorderTest {
|
|||
public void shouldCorrectlyHandleHitRatioRecordingsWithZeroHitsAndMisses() {
|
||||
resetToNice(statisticsToAdd1);
|
||||
recorder.addValueProviders(SEGMENT_STORE_NAME_1, dbToAdd1, cacheToAdd1, statisticsToAdd1);
|
||||
expect(statisticsToAdd1.getHistogramData(anyObject())).andStubReturn(new HistogramData(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0L, 0L, 0.0));
|
||||
expect(statisticsToAdd1.getTickerCount(anyObject())).andStubReturn(0L);
|
||||
replay(statisticsToAdd1);
|
||||
memtableHitRatioSensor.record(0, 0L);
|
||||
|
|
@ -530,6 +579,24 @@ public class RocksDBMetricsRecorderTest {
|
|||
verify(blockCacheFilterHitRatioSensor);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldCorrectlyHandleAvgRecordingsWithZeroSumAndCount() {
|
||||
resetToNice(statisticsToAdd1);
|
||||
recorder.addValueProviders(SEGMENT_STORE_NAME_1, dbToAdd1, cacheToAdd1, statisticsToAdd1);
|
||||
expect(statisticsToAdd1.getHistogramData(anyObject())).andStubReturn(new HistogramData(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0L, 0L, 0.0));
|
||||
expect(statisticsToAdd1.getTickerCount(anyObject())).andStubReturn(0L);
|
||||
replay(statisticsToAdd1);
|
||||
memtableAvgFlushTimeSensor.record(0, 0L);
|
||||
compactionTimeAvgSensor.record(0, 0L);
|
||||
replay(memtableAvgFlushTimeSensor);
|
||||
replay(compactionTimeAvgSensor);
|
||||
|
||||
recorder.record(0L);
|
||||
|
||||
verify(memtableAvgFlushTimeSensor);
|
||||
verify(compactionTimeAvgSensor);
|
||||
}
|
||||
|
||||
private void setUpMetricsMock() {
|
||||
mockStatic(RocksDBMetrics.class);
|
||||
final RocksDBMetricContext metricsContext =
|
||||
|
|
@ -542,6 +609,12 @@ public class RocksDBMetricsRecorderTest {
|
|||
.andReturn(memtableBytesFlushedSensor);
|
||||
expect(RocksDBMetrics.memtableHitRatioSensor(eq(streamsMetrics), eq(metricsContext)))
|
||||
.andReturn(memtableHitRatioSensor);
|
||||
expect(RocksDBMetrics.memtableAvgFlushTimeSensor(eq(streamsMetrics), eq(metricsContext)))
|
||||
.andReturn(memtableAvgFlushTimeSensor);
|
||||
expect(RocksDBMetrics.memtableMinFlushTimeSensor(eq(streamsMetrics), eq(metricsContext)))
|
||||
.andReturn(memtableMinFlushTimeSensor);
|
||||
expect(RocksDBMetrics.memtableMaxFlushTimeSensor(eq(streamsMetrics), eq(metricsContext)))
|
||||
.andReturn(memtableMaxFlushTimeSensor);
|
||||
expect(RocksDBMetrics.writeStallDurationSensor(eq(streamsMetrics), eq(metricsContext)))
|
||||
.andReturn(writeStallDurationSensor);
|
||||
expect(RocksDBMetrics.blockCacheDataHitRatioSensor(eq(streamsMetrics), eq(metricsContext)))
|
||||
|
|
@ -554,6 +627,12 @@ public class RocksDBMetricsRecorderTest {
|
|||
.andReturn(bytesWrittenDuringCompactionSensor);
|
||||
expect(RocksDBMetrics.bytesReadDuringCompactionSensor(eq(streamsMetrics), eq(metricsContext)))
|
||||
.andReturn(bytesReadDuringCompactionSensor);
|
||||
expect(RocksDBMetrics.compactionTimeAvgSensor(eq(streamsMetrics), eq(metricsContext)))
|
||||
.andReturn(compactionTimeAvgSensor);
|
||||
expect(RocksDBMetrics.compactionTimeMinSensor(eq(streamsMetrics), eq(metricsContext)))
|
||||
.andReturn(compactionTimeMinSensor);
|
||||
expect(RocksDBMetrics.compactionTimeMaxSensor(eq(streamsMetrics), eq(metricsContext)))
|
||||
.andReturn(compactionTimeMaxSensor);
|
||||
expect(RocksDBMetrics.numberOfOpenFilesSensor(eq(streamsMetrics), eq(metricsContext)))
|
||||
.andReturn(numberOfOpenFilesSensor);
|
||||
expect(RocksDBMetrics.numberOfFileErrorsSensor(eq(streamsMetrics), eq(metricsContext)))
|
||||
|
|
@ -595,6 +674,12 @@ public class RocksDBMetricsRecorderTest {
|
|||
.andStubReturn(memtableBytesFlushedSensor);
|
||||
expect(RocksDBMetrics.memtableHitRatioSensor(streamsMetrics, metricsContext))
|
||||
.andStubReturn(memtableHitRatioSensor);
|
||||
expect(RocksDBMetrics.memtableAvgFlushTimeSensor(streamsMetrics, metricsContext))
|
||||
.andStubReturn(memtableAvgFlushTimeSensor);
|
||||
expect(RocksDBMetrics.memtableMinFlushTimeSensor(streamsMetrics, metricsContext))
|
||||
.andStubReturn(memtableMinFlushTimeSensor);
|
||||
expect(RocksDBMetrics.memtableMaxFlushTimeSensor(streamsMetrics, metricsContext))
|
||||
.andStubReturn(memtableMaxFlushTimeSensor);
|
||||
expect(RocksDBMetrics.writeStallDurationSensor(streamsMetrics, metricsContext))
|
||||
.andStubReturn(writeStallDurationSensor);
|
||||
expect(RocksDBMetrics.blockCacheDataHitRatioSensor(streamsMetrics, metricsContext))
|
||||
|
|
@ -607,6 +692,12 @@ public class RocksDBMetricsRecorderTest {
|
|||
.andStubReturn(bytesWrittenDuringCompactionSensor);
|
||||
expect(RocksDBMetrics.bytesReadDuringCompactionSensor(streamsMetrics, metricsContext))
|
||||
.andStubReturn(bytesReadDuringCompactionSensor);
|
||||
expect(RocksDBMetrics.compactionTimeAvgSensor(streamsMetrics, metricsContext))
|
||||
.andStubReturn(compactionTimeAvgSensor);
|
||||
expect(RocksDBMetrics.compactionTimeMinSensor(streamsMetrics, metricsContext))
|
||||
.andStubReturn(compactionTimeMinSensor);
|
||||
expect(RocksDBMetrics.compactionTimeMaxSensor(streamsMetrics, metricsContext))
|
||||
.andStubReturn(compactionTimeMaxSensor);
|
||||
expect(RocksDBMetrics.numberOfOpenFilesSensor(streamsMetrics, metricsContext))
|
||||
.andStubReturn(numberOfOpenFilesSensor);
|
||||
expect(RocksDBMetrics.numberOfFileErrorsSensor(streamsMetrics, metricsContext))
|
||||
|
|
|
|||
Loading…
Reference in New Issue