mirror of https://github.com/grafana/grafana.git
Transformers: Add smoothing transformer
Added a smoothing transformer to help clean up noisy time series data. It uses the ASAP algorithm to pick the most important data points while keeping the overall shape and trends intact. The transformer always keeps the first and last points so you get the complete time range. I also added a test for it.
This commit is contained in:
parent
6c35bb2c6e
commit
e089343db0
|
@ -334,6 +334,7 @@
|
|||
"date-fns": "4.1.0",
|
||||
"debounce-promise": "3.1.2",
|
||||
"diff": "^8.0.0",
|
||||
"downsample": "1.4.0",
|
||||
"fast-deep-equal": "^3.1.3",
|
||||
"fast-json-patch": "3.1.1",
|
||||
"file-saver": "2.0.5",
|
||||
|
|
|
@ -42,5 +42,6 @@ export enum DataTransformerID {
|
|||
formatTime = 'formatTime',
|
||||
formatString = 'formatString',
|
||||
regression = 'regression',
|
||||
smoothing = 'smoothing',
|
||||
groupToNestedTable = 'groupToNestedTable',
|
||||
}
|
||||
|
|
|
@ -1608,6 +1608,52 @@ ${buildImageContent(
|
|||
`;
|
||||
},
|
||||
},
|
||||
smoothing: {
|
||||
name: 'Smoothing',
|
||||
getHelperDocs: function (imageRenderType: ImageRenderType = ImageRenderType.ShortcodeFigure) {
|
||||
return `
|
||||
Use this transformation to reduce noise in time series data by applying downsampling. This transformation creates smoother, cleaner visualizations while preserving important trends and patterns in your data.
|
||||
|
||||
The smoothing transformation uses the ASAP (Automatic Smoothing for Attention Prioritization) algorithm, which automatically determines the optimal points to preserve based on your data's characteristics. Unlike simple downsampling methods, ASAP selects which data points to keep, ensuring that important features like peaks, valleys, and trend changes are maintained.
|
||||
|
||||
#### Available options
|
||||
|
||||
- **Resolution** - Set the target number of output points (10-1000). Lower values create more aggressive smoothing with fewer points, while higher values preserve more detail. The ASAP algorithm may output fewer points than requested if the data doesn't require the full resolution for accurate representation.
|
||||
|
||||
#### When to use smoothing
|
||||
|
||||
This transformation is useful for:
|
||||
|
||||
- Noisy time series data that obscures underlying trends
|
||||
- Clearer trend analysis and pattern recognition
|
||||
|
||||
#### Example
|
||||
|
||||
Consider noisy sensor data with thousands of points:
|
||||
|
||||
**Before smoothing:**
|
||||
|
||||
| Time | Temperature |
|
||||
| ------------------- | ----------- |
|
||||
| 2020-07-07 10:00:00 | 23.1 |
|
||||
| 2020-07-07 10:00:01 | 23.3 |
|
||||
| 2020-07-07 10:00:02 | 22.9 |
|
||||
| 2020-07-07 10:00:03 | 23.2 |
|
||||
| ... (thousands more) | ... |
|
||||
|
||||
**After smoothing (Resolution: 100):**
|
||||
|
||||
| Time | Temperature (smoothed) |
|
||||
| ------------------- | ---------------------- |
|
||||
| 2020-07-07 10:00:00 | 23.1 |
|
||||
| 2020-07-07 10:05:30 | 23.0 |
|
||||
| 2020-07-07 10:15:45 | 22.8 |
|
||||
| ... (≤100 points) | ... |
|
||||
|
||||
The transformation preserves the complete time range and important variations while significantly reducing the number of data points for cleaner visualization.
|
||||
`;
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
export function getLinkToDocs(): string {
|
||||
|
|
|
@ -0,0 +1,42 @@
|
|||
<?xml version="1.0"?>
|
||||
<svg width="86" height="48.00000000000001" xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg">
|
||||
<defs>
|
||||
<linearGradient id="paint0_linear_smoothing_dark" x1="0" x2="1" y1="0.5" y2="0.5">
|
||||
<stop stop-color="#F2CC0C"/>
|
||||
<stop offset="1" stop-color="#FF9830"/>
|
||||
</linearGradient>
|
||||
<linearGradient id="paint1_linear_smoothing_dark" x1="0" x2="1" y1="0.5" y2="0.5">
|
||||
<stop stop-color="#F2CC0C"/>
|
||||
<stop offset="1" stop-color="#FF9830"/>
|
||||
</linearGradient>
|
||||
<clipPath id="clip0_smoothing_dark">
|
||||
<rect fill="white" height="48" id="svg_1" width="24"/>
|
||||
</clipPath>
|
||||
<clipPath id="clip1_smoothing_dark">
|
||||
<rect fill="white" height="36" id="svg_2" transform="translate(62 6)" width="24"/>
|
||||
</clipPath>
|
||||
</defs>
|
||||
<g class="layer">
|
||||
<title>Layer 1</title>
|
||||
<g clip-path="url(#clip0_smoothing_dark)" id="svg_3">
|
||||
<path d="m0,0.8l0,9.2l24,0l0,-10l-23,0l-1,0l0,0.8z" fill="url(#paint0_linear_smoothing_dark)" id="svg_4"/>
|
||||
<path d="m24,13l-24,0l0,9.5l24,0l0,-9.5z" fill="#84AFF1" id="svg_5"/>
|
||||
<path d="m24,25.75l-24,0l0,9.5l24,0l0,-9.5z" fill="#84AFF1" id="svg_6"/>
|
||||
<path d="m24,38.5l-24,0l0,9.5l24,0l0,-9.5z" fill="#84AFF1" id="svg_7"/>
|
||||
<path d="m13,14.75l-1.43,0l-1.57,0.92l0,1.25l1.43,-0.82l0.03,0l0,4.65l1.54,0l0,-6z" fill="#24292E" id="svg_8"/>
|
||||
<path d="m9.84,33.5l4.41,0l0,-1.15l-2.42,0l0,-0.04l0.72,-0.65c1.29,-1.12 1.63,-1.69 1.63,-2.37c0,-1.07 -0.88,-1.79 -2.23,-1.79c-1.32,0 -2.2,0.75 -2.2,1.96l1.37,0c0,-0.54 0.33,-0.84 0.82,-0.84c0.49,0 0.84,0.29 0.84,0.78c0,0.44 -0.28,0.74 -0.77,1.18l-2.17,1.89l0,1.03z" fill="#24292E" id="svg_9"/>
|
||||
<path d="m11.98,46.25c1.33,0 2.27,-0.72 2.27,-1.74c0,-0.72 -0.45,-1.23 -1.33,-1.34l0,-0.04c0.66,-0.1 1.12,-0.55 1.12,-1.21c0,-0.95 -0.83,-1.67 -2.05,-1.67c-1.22,0 -2.12,0.73 -2.13,1.77l1.31,0c0.01,-0.4 0.36,-0.67 0.82,-0.67c0.44,0 0.73,0.27 0.73,0.66c0,0.41 -0.35,0.69 -0.85,0.69l-0.55,0l0,1l0.55,0c0.57,0 0.95,0.29 0.94,0.7c0.01,0.42 -0.34,0.7 -0.83,0.7c-0.48,0 -0.84,-0.26 -0.85,-0.65l-1.38,0c0.01,1.06 0.93,1.8 2.23,1.8z" fill="#24292E" id="svg_10"/>
|
||||
<path d="m10.77,7.5c0.76,0 1.28,-0.26 1.57,-0.76l0.04,0l0,0.68l1.62,0l0,-3.3c0,-1.03 -1.02,-1.62 -2.39,-1.62c-1.45,0 -2.27,0.66 -2.41,1.54l1.59,0.05c0.07,-0.31 0.35,-0.5 0.81,-0.5c0.42,0 0.69,0.19 0.69,0.51l0,0.02c0,0.3 -0.36,0.36 -1.28,0.43c-1.1,0.08 -2.01,0.45 -2.01,1.52c0,0.96 0.74,1.43 1.77,1.43zm0.53,-1.01c-0.39,0 -0.67,-0.17 -0.67,-0.49c0,-0.32 0.27,-0.51 0.76,-0.57c0.32,-0.04 0.71,-0.11 0.91,-0.2l0,0.46c0,0.48 -0.45,0.8 -1,0.8z" fill="#24292E" id="svg_11"/>
|
||||
</g>
|
||||
<path d="m42.1,29c0.69,0 6.02,-4 6.02,-6c0,-2 -5.19,-6 -6.02,-6c-0.83,0 -1.51,0.5 -1.51,1.48c0,0.97 3.51,3.44 3.51,3.44c0,0 -7.66,-0.67 -7.91,0c-0.25,0.67 -0.25,1.49 0,2.16c0.25,0.67 7.91,0 7.91,0c0,0 -3.51,2.67 -3.51,3.45c0,0.78 0.81,1.47 1.51,1.47z" fill="#CCCCDC" id="svg_12"/>
|
||||
<g clip-path="url(#clip1_smoothing_dark)" id="svg_13" transform="matrix(1 0 0 1 0 0)">
|
||||
<path d="m62,6.8l0,9.2l24,0l0,-10l-23,0l-1,0l0,0.8z" fill="url(#paint1_linear_smoothing_dark)" id="svg_14"/>
|
||||
<path d="m86,19l-24,0l0,9.5l24,0l0,-9.5z" fill="#84AFF1" id="svg_15"/>
|
||||
<path d="m86,31.75l-24,0l0,9.5l24,0l0,-9.5z" fill="#84AFF1" id="svg_16"/>
|
||||
<path d="m86,44l-24,0l0,4l24,0l0,-4z" fill="#84AFF1" id="svg_17"/>
|
||||
<path d="m75,20.75l-1.43,0l-1.57,0.92l0,1.25l1.43,-0.82l0.03,0l0,4.65l1.54,0l0,-6z" fill="#24292E" id="svg_18"/>
|
||||
<path d="m71.84,39.5l4.41,0l0,-1.15l-2.42,0l0,-0.04l0.72,-0.65c1.29,-1.12 1.63,-1.69 1.63,-2.37c0,-1.07 -0.88,-1.79 -2.23,-1.79c-1.32,0 -2.2,0.75 -2.2,1.96l1.37,0c0,-0.54 0.33,-0.84 0.82,-0.84c0.49,0 0.84,0.29 0.84,0.78c0,0.44 -0.28,0.74 -0.77,1.18l-2.17,1.89l0,1.03z" fill="#24292E" id="svg_19"/>
|
||||
<path d="m72.77,13.5c0.76,0 1.28,-0.26 1.57,-0.76l0.04,0l0,0.68l1.62,0l0,-3.3c0,-1.03 -1.02,-1.62 -2.39,-1.62c-1.45,0 -2.27,0.66 -2.41,1.54l1.58,0.05c0.08,-0.31 0.36,-0.5 0.82,-0.5c0.42,0 0.69,0.19 0.69,0.51l0,0.02c0,0.3 -0.36,0.36 -1.28,0.43c-1.1,0.08 -2.01,0.45 -2.01,1.52c0,0.96 0.74,1.43 1.77,1.43zm0.54,-1.01c-0.4,0 -0.68,-0.17 -0.68,-0.49c0,-0.32 0.27,-0.51 0.76,-0.57c0.32,-0.04 0.71,-0.11 0.91,-0.2l0,0.46c0,0.48 -0.45,0.8 -0.99,0.8z" fill="#24292E" id="svg_20"/>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
After Width: | Height: | Size: 4.1 KiB |
|
@ -0,0 +1,43 @@
|
|||
<?xml version="1.0"?>
|
||||
<svg width="85" height="48.00000000000001" xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg">
|
||||
<defs>
|
||||
<linearGradient id="paint0_linear_smoothing_light" x1="0" x2="1" y1="0.5" y2="0.5">
|
||||
<stop stop-color="#F2CC0C"/>
|
||||
<stop offset="1" stop-color="#FF9830"/>
|
||||
</linearGradient>
|
||||
<linearGradient id="paint1_linear_smoothing_light" x1="0" x2="1" y1="0.5" y2="0.5">
|
||||
<stop stop-color="#F2CC0C"/>
|
||||
<stop offset="1" stop-color="#FF9830"/>
|
||||
</linearGradient>
|
||||
<clipPath id="clip0_smoothing_light">
|
||||
<rect fill="white" height="48" id="svg_1" width="24"/>
|
||||
</clipPath>
|
||||
<clipPath id="clip1_smoothing_light">
|
||||
<rect fill="white" height="36" id="svg_2" transform="translate(62 6)" width="24"/>
|
||||
</clipPath>
|
||||
</defs>
|
||||
<g class="layer">
|
||||
<title>Layer 1</title>
|
||||
<g clip-path="url(#clip0_smoothing_light)" id="svg_3">
|
||||
<path d="m0,0.8l0,9.2l24,0l0,-10l-23,0l-1,0l0,0.8z" fill="url(#paint0_linear_smoothing_light)" id="svg_4"/>
|
||||
<path d="m24,13l-24,0l0,9.5l24,0l0,-9.5z" fill="#84AFF1" id="svg_5"/>
|
||||
<path d="m24,25.75l-24,0l0,9.5l24,0l0,-9.5z" fill="#84AFF1" id="svg_6"/>
|
||||
<path d="m24,38.5l-24,0l0,9.5l24,0l0,-9.5z" fill="#84AFF1" id="svg_7"/>
|
||||
<path d="m13,14.75l-1.43,0l-1.57,0.92l0,1.25l1.43,-0.82l0.03,0l0,4.65l1.54,0l0,-6z" fill="white" id="svg_8"/>
|
||||
<path d="m9.84,33.5l4.41,0l0,-1.15l-2.42,0l0,-0.04l0.72,-0.65c1.29,-1.12 1.63,-1.69 1.63,-2.37c0,-1.07 -0.88,-1.79 -2.23,-1.79c-1.32,0 -2.2,0.75 -2.2,1.96l1.37,0c0,-0.54 0.33,-0.84 0.82,-0.84c0.49,0 0.84,0.29 0.84,0.78c0,0.44 -0.28,0.74 -0.77,1.18l-2.17,1.89l0,1.03z" fill="white" id="svg_9"/>
|
||||
<path d="m11.98,46.25c1.33,0 2.27,-0.72 2.27,-1.74c0,-0.72 -0.45,-1.23 -1.33,-1.34l0,-0.04c0.66,-0.1 1.12,-0.55 1.12,-1.21c0,-0.95 -0.83,-1.67 -2.05,-1.67c-1.22,0 -2.12,0.73 -2.13,1.77l1.31,0c0.01,-0.4 0.36,-0.67 0.82,-0.67c0.44,0 0.73,0.27 0.73,0.66c0,0.41 -0.35,0.69 -0.85,0.69l-0.55,0l0,1l0.55,0c0.57,0 0.95,0.29 0.94,0.7c0.01,0.42 -0.34,0.7 -0.83,0.7c-0.48,0 -0.84,-0.26 -0.85,-0.65l-1.38,0c0.01,1.06 0.93,1.8 2.23,1.8z" fill="white" id="svg_10"/>
|
||||
<path d="m10.77,7.5c0.76,0 1.28,-0.26 1.57,-0.76l0.04,0l0,0.68l1.62,0l0,-3.3c0,-1.03 -1.02,-1.62 -2.39,-1.62c-1.45,0 -2.27,0.66 -2.41,1.54l1.59,0.05c0.07,-0.31 0.35,-0.5 0.81,-0.5c0.42,0 0.69,0.19 0.69,0.51l0,0.02c0,0.3 -0.36,0.36 -1.28,0.43c-1.1,0.08 -2.01,0.45 -2.01,1.52c0,0.96 0.74,1.43 1.77,1.43zm0.53,-1.01c-0.39,0 -0.67,-0.17 -0.67,-0.49c0,-0.32 0.27,-0.51 0.76,-0.57c0.32,-0.04 0.71,-0.11 0.91,-0.2l0,0.46c0,0.48 -0.45,0.8 -1,0.8z" fill="white" id="svg_11"/>
|
||||
</g>
|
||||
<path d="m42.09,29c0.69,0 6.02,-4 6.02,-6c0,-2 -5.19,-6 -6.02,-6c-0.83,0 -1.51,0.5 -1.51,1.48c0,0.97 3.51,3.44 3.51,3.44c0,0 -7.66,-0.67 -7.91,0c-0.25,0.67 -0.25,1.49 0,2.16c0.25,0.67 7.91,0 7.91,0c0,0 -3.51,2.67 -3.51,3.45c0,0.78 0.81,1.47 1.51,1.47z" fill="#24292E" id="svg_12"/>
|
||||
<g clip-path="url(#clip1_smoothing_light)" id="svg_13" transform="matrix(1 0 0 1 0 0) translate(-1 0)">
|
||||
<path d="m62,6.8l0,9.2l24,0l0,-10l-23,0l-1,0l0,0.8z" fill="url(#paint1_linear_smoothing_light)" id="svg_14"/>
|
||||
<path d="m86,19l-24,0l0,9.5l24,0l0,-9.5z" fill="#84AFF1" id="svg_15"/>
|
||||
<path d="m86,31.75l-24,0l0,9.5l24,0l0,-9.5z" fill="#84AFF1" id="svg_16"/>
|
||||
<path d="m86,44l-24,0l0,4l24,0l0,-4z" fill="#84AFF1" id="svg_17"/>
|
||||
<path d="m75,20.75l-1.43,0l-1.57,0.92l0,1.25l1.43,-0.82l0.03,0l0,4.65l1.54,0l0,-6z" fill="white" id="svg_18"/>
|
||||
<path d="m71.84,39.5l4.41,0l0,-1.15l-2.42,0l0,-0.04l0.72,-0.65c1.29,-1.12 1.63,-1.69 1.63,-2.37c0,-1.07 -0.88,-1.79 -2.23,-1.79c-1.32,0 -2.2,0.75 -2.2,1.96l1.37,0c0,-0.54 0.33,-0.84 0.82,-0.84c0.49,0 0.84,0.29 0.84,0.78c0,0.44 -0.28,0.74 -0.77,1.18l-2.17,1.89l0,1.03z" fill="white" id="svg_19"/>
|
||||
<path d="m72.77,13.5c0.76,0 1.28,-0.26 1.57,-0.76l0.04,0l0,0.68l1.62,0l0,-3.3c0,-1.03 -1.02,-1.62 -2.39,-1.62c-1.45,0 -2.27,0.66 -2.41,1.54l1.58,0.05c0.08,-0.31 0.36,-0.5 0.82,-0.5c0.42,0 0.69,0.19 0.69,0.51l0,0.02c0,0.3 -0.36,0.36 -1.28,0.43c-1.1,0.08 -2.01,0.45 -2.01,1.52c0,0.96 0.74,1.43 1.77,1.43zm0.54,-1.01c-0.4,0 -0.68,-0.17 -0.68,-0.49c0,-0.32 0.27,-0.51 0.76,-0.57c0.32,-0.04 0.71,-0.11 0.91,-0.2l0,0.46c0,0.48 -0.45,0.8 -0.99,0.8z" fill="white" id="svg_20"/>
|
||||
<path d="m72.77,46.5c0.76,0 1.28,-0.26 1.57,-0.76l0.04,0l0,0.68l1.62,0l0,-3.3c0,-1.03 -1.02,-1.62 -2.39,-1.62c-1.45,0 -2.27,0.66 -2.41,1.54l1.58,0.05c0.08,-0.31 0.36,-0.5 0.82,-0.5c0.42,0 0.69,0.19 0.69,0.51l0,0.02c0,0.3 -0.36,0.36 -1.28,0.43c-1.1,0.08 -2.01,0.45 -2.01,1.52c0,0.96 0.74,1.43 1.77,1.43zm0.54,-1.01c-0.4,0 -0.68,-0.17 -0.68,-0.49c0,-0.32 0.27,-0.51 0.76,-0.57c0.32,-0.04 0.71,-0.11 0.91,-0.2l0,0.46c0,0.48 -0.45,0.8 -0.99,0.8z" fill="white" id="svg_21"/>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
After Width: | Height: | Size: 4.5 KiB |
|
@ -0,0 +1,83 @@
|
|||
import { ASAP } from 'downsample';
|
||||
|
||||
export interface DataPoint {
|
||||
x: number;
|
||||
y: number;
|
||||
}
|
||||
|
||||
export interface ASAPOptions {
|
||||
resolution?: number;
|
||||
}
|
||||
|
||||
const DEFAULT_RESOLUTION = 100;
|
||||
|
||||
export function asapSmooth(data: Array<DataPoint | [number, number]>, options: ASAPOptions = {}): DataPoint[] {
|
||||
const { resolution = DEFAULT_RESOLUTION } = options;
|
||||
|
||||
if (!data || !Array.isArray(data) || data.length === 0) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const inputData: Array<[number, number]> = [];
|
||||
|
||||
for (const point of data) {
|
||||
if (!point || typeof point !== 'object') {
|
||||
continue;
|
||||
}
|
||||
|
||||
let x: number, y: number;
|
||||
|
||||
if ('x' in point && 'y' in point) {
|
||||
x = typeof point.x === 'number' ? point.x : Number(point.x);
|
||||
y = typeof point.y === 'number' ? point.y : Number(point.y);
|
||||
} else if (Array.isArray(point) && point.length >= 2) {
|
||||
x = Number(point[0]);
|
||||
y = Number(point[1]);
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (isNaN(x) || isNaN(y)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
inputData.push([x, y]);
|
||||
}
|
||||
|
||||
if (inputData.length === 0) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const smoothedData = ASAP(inputData, resolution);
|
||||
|
||||
const result: DataPoint[] = [];
|
||||
for (let i = 0; i < smoothedData.length; i++) {
|
||||
const item = smoothedData[i];
|
||||
|
||||
if (Array.isArray(item) && item.length >= 2) {
|
||||
result.push({ x: Number(item[0]), y: Number(item[1]) });
|
||||
} else if (item && typeof item === 'object' && 'x' in item && 'y' in item) {
|
||||
result.push({ x: Number(item.x), y: Number(item.y) });
|
||||
}
|
||||
}
|
||||
|
||||
// always preserve first and last points to maintain complete time range
|
||||
if (result.length > 0 && inputData.length > 0) {
|
||||
const firstInput = inputData[0];
|
||||
const lastInput = inputData[inputData.length - 1];
|
||||
const firstResult = result[0];
|
||||
const lastResult = result[result.length - 1];
|
||||
|
||||
// always add first point if it's not already there
|
||||
if (firstResult && firstResult.x !== firstInput[0]) {
|
||||
result.unshift({ x: firstInput[0], y: firstInput[1] });
|
||||
}
|
||||
|
||||
// always add last point if it's not already there
|
||||
if (lastResult && lastResult.x !== lastInput[0]) {
|
||||
result.push({ x: lastInput[0], y: lastInput[1] });
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
|
@ -0,0 +1,450 @@
|
|||
import { DataFrame, DataTransformContext, FieldType, toDataFrame } from '@grafana/data';
|
||||
|
||||
import { getSmoothingTransformer, SmoothingTransformerOptions, DEFAULTS } from './smoothing';
|
||||
|
||||
describe('Smoothing transformer', () => {
|
||||
const smoothingTransformer = getSmoothingTransformer();
|
||||
const ctx: DataTransformContext = {
|
||||
interpolate: (v: string) => v,
|
||||
};
|
||||
|
||||
describe('Basic functionality', () => {
|
||||
it('should smooth time series data with default settings', () => {
|
||||
const source = [
|
||||
toDataFrame({
|
||||
name: 'test data',
|
||||
refId: 'A',
|
||||
fields: [
|
||||
{ name: 'time', type: FieldType.time, values: [1000, 2000, 3000, 4000, 5000] },
|
||||
{ name: 'value', type: FieldType.number, values: [10, 20, 15, 25, 18] },
|
||||
],
|
||||
}),
|
||||
];
|
||||
|
||||
const config: SmoothingTransformerOptions = {};
|
||||
|
||||
const result = smoothingTransformer.transformer(config, ctx)(source);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].name).toBe('test data (smoothed)');
|
||||
expect(result[0].fields).toHaveLength(2);
|
||||
expect(result[0].fields[0].name).toBe('time');
|
||||
expect(result[0].fields[1].name).toBe('value (smoothed)');
|
||||
|
||||
// should have time field values
|
||||
expect(result[0].fields[0].values.length).toBeGreaterThan(0);
|
||||
// should have corresponding smoothed values
|
||||
expect(result[0].fields[1].values.length).toBe(result[0].fields[0].values.length);
|
||||
});
|
||||
|
||||
it('should handle multiple numeric fields', () => {
|
||||
const source = [
|
||||
toDataFrame({
|
||||
name: 'multi field data',
|
||||
refId: 'B',
|
||||
fields: [
|
||||
{ name: 'timestamp', type: FieldType.time, values: [1000, 2000, 3000, 4000] },
|
||||
{ name: 'cpu', type: FieldType.number, values: [50, 75, 60, 80] },
|
||||
{ name: 'memory', type: FieldType.number, values: [40, 55, 45, 65] },
|
||||
{ name: 'label', type: FieldType.string, values: ['a', 'b', 'c', 'd'] },
|
||||
],
|
||||
}),
|
||||
];
|
||||
|
||||
const config: SmoothingTransformerOptions = { resolution: 3 };
|
||||
|
||||
const result = smoothingTransformer.transformer(config, ctx)(source);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].fields).toHaveLength(4);
|
||||
expect(result[0].fields[0].name).toBe('timestamp');
|
||||
expect(result[0].fields[1].name).toBe('cpu (smoothed)');
|
||||
expect(result[0].fields[2].name).toBe('memory (smoothed)');
|
||||
expect(result[0].fields[3].name).toBe('label');
|
||||
|
||||
// all numeric fields should be smoothed
|
||||
expect(result[0].fields[1].values.length).toBeGreaterThan(0);
|
||||
expect(result[0].fields[2].values.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it('should preserve non-numeric and non-time fields', () => {
|
||||
const source = [
|
||||
toDataFrame({
|
||||
name: 'mixed data',
|
||||
refId: 'C',
|
||||
fields: [
|
||||
{ name: 'time', type: FieldType.time, values: [1000, 2000, 3000] },
|
||||
{ name: 'value', type: FieldType.number, values: [10, 20, 15] },
|
||||
{ name: 'category', type: FieldType.string, values: ['A', 'B', 'C'] },
|
||||
{ name: 'active', type: FieldType.boolean, values: [true, false, true] },
|
||||
],
|
||||
}),
|
||||
];
|
||||
|
||||
const config: SmoothingTransformerOptions = { resolution: 2 };
|
||||
|
||||
const result = smoothingTransformer.transformer(config, ctx)(source);
|
||||
|
||||
expect(result[0].fields[2].name).toBe('category');
|
||||
expect(result[0].fields[2].type).toBe(FieldType.string);
|
||||
expect(result[0].fields[3].name).toBe('active');
|
||||
expect(result[0].fields[3].type).toBe(FieldType.boolean);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Configuration options', () => {
|
||||
it('should use default resolution when not specified', () => {
|
||||
const source = [
|
||||
toDataFrame({
|
||||
name: 'default test',
|
||||
refId: 'A',
|
||||
fields: [
|
||||
{ name: 'time', type: FieldType.time, values: Array.from({ length: 200 }, (_, i) => i * 1000) },
|
||||
{ name: 'value', type: FieldType.number, values: Array.from({ length: 200 }, () => Math.random() * 100) },
|
||||
],
|
||||
}),
|
||||
];
|
||||
|
||||
const config: SmoothingTransformerOptions = {};
|
||||
|
||||
const result = smoothingTransformer.transformer(config, ctx)(source);
|
||||
|
||||
// With default resolution of 100, output should be around 100 points
|
||||
expect(result[0].fields[0].values.length).toBeLessThanOrEqual(DEFAULTS.resolution + 10);
|
||||
});
|
||||
|
||||
it('should respect custom resolution settings', () => {
|
||||
const source = [
|
||||
toDataFrame({
|
||||
name: 'resolution test',
|
||||
refId: 'A',
|
||||
fields: [
|
||||
{ name: 'time', type: FieldType.time, values: Array.from({ length: 100 }, (_, i) => i * 1000) },
|
||||
{ name: 'value', type: FieldType.number, values: Array.from({ length: 100 }, () => Math.random() * 100) },
|
||||
],
|
||||
}),
|
||||
];
|
||||
|
||||
const config: SmoothingTransformerOptions = { resolution: 25 };
|
||||
|
||||
const result = smoothingTransformer.transformer(config, ctx)(source);
|
||||
|
||||
// With resolution of 25, output should be around 25 points
|
||||
expect(result[0].fields[0].values.length).toBeLessThanOrEqual(30);
|
||||
});
|
||||
|
||||
it('should handle very small resolution values', () => {
|
||||
const source = [
|
||||
toDataFrame({
|
||||
name: 'small resolution test',
|
||||
refId: 'A',
|
||||
fields: [
|
||||
{ name: 'time', type: FieldType.time, values: [1000, 2000, 3000, 4000, 5000] },
|
||||
{ name: 'value', type: FieldType.number, values: [10, 20, 15, 25, 18] },
|
||||
],
|
||||
}),
|
||||
];
|
||||
|
||||
const config: SmoothingTransformerOptions = { resolution: 2 };
|
||||
|
||||
const result = smoothingTransformer.transformer(config, ctx)(source);
|
||||
|
||||
expect(result[0].fields[0].values.length).toBeLessThanOrEqual(5);
|
||||
expect(result[0].fields[1].values.length).toBe(result[0].fields[0].values.length);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Edge cases', () => {
|
||||
it('should handle empty data frames', () => {
|
||||
const source: DataFrame[] = [];
|
||||
|
||||
const config: SmoothingTransformerOptions = {};
|
||||
|
||||
const result = smoothingTransformer.transformer(config, ctx)(source);
|
||||
|
||||
expect(result).toEqual([]);
|
||||
});
|
||||
|
||||
it('should handle frames without time fields', () => {
|
||||
const source = [
|
||||
toDataFrame({
|
||||
name: 'no time field',
|
||||
refId: 'A',
|
||||
fields: [
|
||||
{ name: 'category', type: FieldType.string, values: ['A', 'B', 'C'] },
|
||||
{ name: 'value', type: FieldType.number, values: [10, 20, 15] },
|
||||
],
|
||||
}),
|
||||
];
|
||||
|
||||
const config: SmoothingTransformerOptions = {};
|
||||
|
||||
const result = smoothingTransformer.transformer(config, ctx)(source);
|
||||
|
||||
// should return original frame unchanged
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0]).toEqual(source[0]);
|
||||
});
|
||||
|
||||
it('should handle frames without numeric fields', () => {
|
||||
const source = [
|
||||
toDataFrame({
|
||||
name: 'no numeric fields',
|
||||
refId: 'A',
|
||||
fields: [
|
||||
{ name: 'time', type: FieldType.time, values: [1000, 2000, 3000] },
|
||||
{ name: 'category', type: FieldType.string, values: ['A', 'B', 'C'] },
|
||||
],
|
||||
}),
|
||||
];
|
||||
|
||||
const config: SmoothingTransformerOptions = {};
|
||||
|
||||
const result = smoothingTransformer.transformer(config, ctx)(source);
|
||||
|
||||
// should return original frame unchanged
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0]).toEqual(source[0]);
|
||||
});
|
||||
|
||||
it('should filter out NaN values', () => {
|
||||
const source = [
|
||||
toDataFrame({
|
||||
name: 'data with NaN',
|
||||
refId: 'A',
|
||||
fields: [
|
||||
{ name: 'time', type: FieldType.time, values: [1000, 2000, 3000, 4000, 5000] },
|
||||
{ name: 'value', type: FieldType.number, values: [10, NaN, 15, 25, NaN] },
|
||||
],
|
||||
}),
|
||||
];
|
||||
|
||||
const config: SmoothingTransformerOptions = { resolution: 3 };
|
||||
|
||||
const result = smoothingTransformer.transformer(config, ctx)(source);
|
||||
|
||||
expect(result[0].fields[1].values.length).toBeGreaterThan(0);
|
||||
// all values should be valid numbers
|
||||
result[0].fields[1].values.forEach((value) => {
|
||||
expect(typeof value).toBe('number');
|
||||
expect(isNaN(value)).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle data with all NaN values', () => {
|
||||
const source = [
|
||||
toDataFrame({
|
||||
name: 'all NaN data',
|
||||
refId: 'A',
|
||||
fields: [
|
||||
{ name: 'time', type: FieldType.time, values: [1000, 2000, 3000] },
|
||||
{ name: 'value', type: FieldType.number, values: [NaN, NaN, NaN] },
|
||||
],
|
||||
}),
|
||||
];
|
||||
|
||||
const config: SmoothingTransformerOptions = {};
|
||||
|
||||
const result = smoothingTransformer.transformer(config, ctx)(source);
|
||||
|
||||
// When all values are NaN, frame should be returned unchanged
|
||||
expect(result[0].fields[1].name).toBe('value'); // No "(smoothed)" suffix
|
||||
expect(result[0].fields[1].values).toEqual([NaN, NaN, NaN]);
|
||||
expect(result[0].name).toBe('all NaN data'); // Original name preserved
|
||||
});
|
||||
|
||||
it('should handle data with null values', () => {
|
||||
const source = [
|
||||
toDataFrame({
|
||||
name: 'data with nulls',
|
||||
refId: 'A',
|
||||
fields: [
|
||||
{ name: 'time', type: FieldType.time, values: [1000, 2000, 3000, 4000] },
|
||||
{ name: 'value', type: FieldType.number, values: [10, null, 15, 25] },
|
||||
],
|
||||
}),
|
||||
];
|
||||
|
||||
const config: SmoothingTransformerOptions = { resolution: 3 };
|
||||
|
||||
const result = smoothingTransformer.transformer(config, ctx)(source);
|
||||
|
||||
expect(result[0].fields[1].values.length).toBeGreaterThan(0);
|
||||
// should filter out null values and process valid ones
|
||||
result[0].fields[1].values.forEach((value) => {
|
||||
expect(value).not.toBeNull();
|
||||
expect(typeof value).toBe('number');
|
||||
expect(isNaN(value)).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle single data point', () => {
|
||||
const source = [
|
||||
toDataFrame({
|
||||
name: 'single point',
|
||||
refId: 'A',
|
||||
fields: [
|
||||
{ name: 'time', type: FieldType.time, values: [1000] },
|
||||
{ name: 'value', type: FieldType.number, values: [42] },
|
||||
],
|
||||
}),
|
||||
];
|
||||
|
||||
const config: SmoothingTransformerOptions = {};
|
||||
|
||||
const result = smoothingTransformer.transformer(config, ctx)(source);
|
||||
|
||||
expect(result[0].fields[0].values).toHaveLength(1);
|
||||
expect(result[0].fields[1].values).toHaveLength(1);
|
||||
expect(result[0].fields[1].values[0]).toBe(42);
|
||||
});
|
||||
|
||||
it('should handle empty numeric field values', () => {
|
||||
const source = [
|
||||
toDataFrame({
|
||||
name: 'empty values',
|
||||
refId: 'A',
|
||||
fields: [
|
||||
{ name: 'time', type: FieldType.time, values: [1000, 2000, 3000] },
|
||||
{ name: 'value', type: FieldType.number, values: [] },
|
||||
],
|
||||
}),
|
||||
];
|
||||
|
||||
const config: SmoothingTransformerOptions = {};
|
||||
|
||||
const result = smoothingTransformer.transformer(config, ctx)(source);
|
||||
|
||||
// should return original frame since no numeric data to smooth
|
||||
expect(result[0]).toEqual(source[0]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Data integrity', () => {
|
||||
it('should maintain time ordering in smoothed data', () => {
|
||||
const source = [
|
||||
toDataFrame({
|
||||
name: 'ordered data',
|
||||
refId: 'A',
|
||||
fields: [
|
||||
{ name: 'time', type: FieldType.time, values: [1000, 2000, 3000, 4000, 5000] },
|
||||
{ name: 'value', type: FieldType.number, values: [10, 20, 15, 25, 18] },
|
||||
],
|
||||
}),
|
||||
];
|
||||
|
||||
const config: SmoothingTransformerOptions = { resolution: 4 };
|
||||
|
||||
const result = smoothingTransformer.transformer(config, ctx)(source);
|
||||
|
||||
const timeValues = result[0].fields[0].values as number[];
|
||||
|
||||
// check that time values are in ascending order
|
||||
for (let i = 1; i < timeValues.length; i++) {
|
||||
expect(timeValues[i]).toBeGreaterThanOrEqual(timeValues[i - 1]);
|
||||
}
|
||||
});
|
||||
|
||||
it('should preserve original frame metadata', () => {
|
||||
const source = [
|
||||
toDataFrame({
|
||||
name: 'original name',
|
||||
refId: 'TEST',
|
||||
meta: { custom: { test: 'value' } },
|
||||
fields: [
|
||||
{ name: 'time', type: FieldType.time, values: [1000, 2000, 3000] },
|
||||
{ name: 'value', type: FieldType.number, values: [10, 20, 15] },
|
||||
],
|
||||
}),
|
||||
];
|
||||
|
||||
const config: SmoothingTransformerOptions = {};
|
||||
|
||||
const result = smoothingTransformer.transformer(config, ctx)(source);
|
||||
|
||||
expect(result[0].refId).toBe('TEST');
|
||||
expect(result[0].meta).toEqual(source[0].meta);
|
||||
expect(result[0].name).toBe('original name (smoothed)');
|
||||
});
|
||||
|
||||
it('should handle frames with no name', () => {
|
||||
const source = [
|
||||
toDataFrame({
|
||||
refId: 'A',
|
||||
fields: [
|
||||
{ name: 'time', type: FieldType.time, values: [1000, 2000, 3000] },
|
||||
{ name: 'value', type: FieldType.number, values: [10, 20, 15] },
|
||||
],
|
||||
}),
|
||||
];
|
||||
|
||||
const config: SmoothingTransformerOptions = {};
|
||||
|
||||
const result = smoothingTransformer.transformer(config, ctx)(source);
|
||||
|
||||
expect(result[0].name).toBe('Data (smoothed)');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Multiple frames', () => {
|
||||
it('should process multiple frames independently', () => {
|
||||
const source = [
|
||||
toDataFrame({
|
||||
name: 'frame1',
|
||||
refId: 'A',
|
||||
fields: [
|
||||
{ name: 'time', type: FieldType.time, values: [1000, 2000, 3000] },
|
||||
{ name: 'value', type: FieldType.number, values: [10, 20, 15] },
|
||||
],
|
||||
}),
|
||||
toDataFrame({
|
||||
name: 'frame2',
|
||||
refId: 'B',
|
||||
fields: [
|
||||
{ name: 'timestamp', type: FieldType.time, values: [4000, 5000, 6000] },
|
||||
{ name: 'metric', type: FieldType.number, values: [30, 40, 35] },
|
||||
],
|
||||
}),
|
||||
];
|
||||
|
||||
const config: SmoothingTransformerOptions = { resolution: 2 };
|
||||
|
||||
const result = smoothingTransformer.transformer(config, ctx)(source);
|
||||
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result[0].name).toBe('frame1 (smoothed)');
|
||||
expect(result[1].name).toBe('frame2 (smoothed)');
|
||||
expect(result[0].refId).toBe('A');
|
||||
expect(result[1].refId).toBe('B');
|
||||
});
|
||||
|
||||
it('should handle mixed frame types', () => {
|
||||
const source = [
|
||||
toDataFrame({
|
||||
name: 'valid frame',
|
||||
refId: 'A',
|
||||
fields: [
|
||||
{ name: 'time', type: FieldType.time, values: [1000, 2000, 3000] },
|
||||
{ name: 'value', type: FieldType.number, values: [10, 20, 15] },
|
||||
],
|
||||
}),
|
||||
toDataFrame({
|
||||
name: 'invalid frame',
|
||||
refId: 'B',
|
||||
fields: [
|
||||
{ name: 'category', type: FieldType.string, values: ['A', 'B', 'C'] },
|
||||
{ name: 'label', type: FieldType.string, values: ['X', 'Y', 'Z'] },
|
||||
],
|
||||
}),
|
||||
];
|
||||
|
||||
const config: SmoothingTransformerOptions = {};
|
||||
|
||||
const result = smoothingTransformer.transformer(config, ctx)(source);
|
||||
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result[0].name).toBe('valid frame (smoothed)');
|
||||
expect(result[1]).toEqual(source[1]);
|
||||
});
|
||||
});
|
||||
});
|
|
@ -0,0 +1,124 @@
|
|||
import { map } from 'rxjs';
|
||||
|
||||
import { DataFrame, DataTransformerID, FieldType, SynchronousDataTransformerInfo } from '@grafana/data';
|
||||
import { t } from '@grafana/i18n';
|
||||
|
||||
import { asapSmooth, DataPoint } from './asap';
|
||||
|
||||
export interface SmoothingTransformerOptions {
|
||||
resolution?: number;
|
||||
}
|
||||
|
||||
export const DEFAULTS = {
|
||||
resolution: 100,
|
||||
};
|
||||
|
||||
export const getSmoothingTransformer: () => SynchronousDataTransformerInfo<SmoothingTransformerOptions> = () => ({
|
||||
id: DataTransformerID.smoothing,
|
||||
name: t('transformers.smoothing.name', 'Smoothing'),
|
||||
description: t(
|
||||
'transformers.smoothing.description',
|
||||
'Reduce noise in time series data through adaptive downsampling.'
|
||||
),
|
||||
operator: (options, ctx) => (source) =>
|
||||
source.pipe(map((data) => getSmoothingTransformer().transformer(options, ctx)(data))),
|
||||
transformer: (options, ctx) => {
|
||||
return (frames: DataFrame[]) => {
|
||||
const { resolution = DEFAULTS.resolution } = options;
|
||||
|
||||
if (frames.length === 0) {
|
||||
return frames;
|
||||
}
|
||||
|
||||
return frames.map((frame) => {
|
||||
const timeField = frame.fields.find((f) => f.type === FieldType.time);
|
||||
if (!timeField) {
|
||||
return frame;
|
||||
}
|
||||
|
||||
const firstNumericField = frame.fields.find((f) => f.type === FieldType.number && f.values.length > 0);
|
||||
if (!firstNumericField) {
|
||||
return frame;
|
||||
}
|
||||
|
||||
const referencePoints: DataPoint[] = timeField.values
|
||||
.map((time, index) => ({
|
||||
x: time,
|
||||
y: firstNumericField.values[index],
|
||||
}))
|
||||
.filter((point) => point.y != null && !isNaN(point.y));
|
||||
|
||||
if (referencePoints.length === 0) {
|
||||
return frame;
|
||||
}
|
||||
|
||||
// run ASAP only once to determine optimal time points
|
||||
const smoothedReference = asapSmooth(referencePoints, { resolution });
|
||||
const smoothedTimes = smoothedReference.map((point) => point.x);
|
||||
|
||||
const interpolateToTargetTimes = (sourceField: Array<number | null | undefined>): number[] => {
|
||||
const sourcePoints: DataPoint[] = timeField.values
|
||||
.map((time, index) => ({
|
||||
x: time,
|
||||
y: sourceField[index],
|
||||
}))
|
||||
.filter((point): point is DataPoint => point.y != null && !isNaN(point.y));
|
||||
|
||||
if (sourcePoints.length === 0) {
|
||||
return new Array(smoothedTimes.length).fill(NaN);
|
||||
}
|
||||
|
||||
return smoothedTimes.map((targetTime) => {
|
||||
let leftPoint = sourcePoints[0];
|
||||
let rightPoint = sourcePoints[sourcePoints.length - 1];
|
||||
|
||||
for (let i = 0; i < sourcePoints.length - 1; i++) {
|
||||
if (sourcePoints[i].x <= targetTime && sourcePoints[i + 1].x >= targetTime) {
|
||||
leftPoint = sourcePoints[i];
|
||||
rightPoint = sourcePoints[i + 1];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (leftPoint.x === targetTime) {
|
||||
return leftPoint.y;
|
||||
}
|
||||
if (rightPoint.x === targetTime) {
|
||||
return rightPoint.y;
|
||||
}
|
||||
if (leftPoint.x === rightPoint.x) {
|
||||
return leftPoint.y;
|
||||
}
|
||||
|
||||
const ratio = (targetTime - leftPoint.x) / (rightPoint.x - leftPoint.x);
|
||||
return leftPoint.y + ratio * (rightPoint.y - leftPoint.y);
|
||||
});
|
||||
};
|
||||
|
||||
const newFields = frame.fields.map((field) => {
|
||||
if (field.type === FieldType.time) {
|
||||
return {
|
||||
...field,
|
||||
values: smoothedTimes,
|
||||
};
|
||||
} else if (field.type === FieldType.number) {
|
||||
const smoothedValues = interpolateToTargetTimes(field.values);
|
||||
|
||||
return {
|
||||
...field,
|
||||
name: `${field.name} (smoothed)`,
|
||||
values: smoothedValues,
|
||||
};
|
||||
}
|
||||
return field;
|
||||
});
|
||||
|
||||
return {
|
||||
...frame,
|
||||
fields: newFields,
|
||||
name: `${frame.name || 'Data'} (smoothed)`,
|
||||
};
|
||||
});
|
||||
};
|
||||
},
|
||||
});
|
|
@ -0,0 +1,52 @@
|
|||
import { DataTransformerID, TransformerRegistryItem, TransformerUIProps, TransformerCategory } from '@grafana/data';
|
||||
import { t } from '@grafana/i18n';
|
||||
import { InlineField, InlineFieldRow } from '@grafana/ui';
|
||||
import { NumberInput } from 'app/core/components/OptionsUI/NumberInput';
|
||||
|
||||
import { getTransformationContent } from '../docs/getTransformationContent';
|
||||
import darkImage from '../images/dark/smoothing.svg';
|
||||
import lightImage from '../images/light/smoothing.svg';
|
||||
|
||||
import { DEFAULTS, SmoothingTransformerOptions, getSmoothingTransformer } from './smoothing';
|
||||
|
||||
export const SmoothingTransformerEditor = ({
|
||||
input,
|
||||
options,
|
||||
onChange,
|
||||
}: TransformerUIProps<SmoothingTransformerOptions>) => {
|
||||
return (
|
||||
<InlineFieldRow>
|
||||
<InlineField
|
||||
label={t('transformers.smoothing.resolution.label', 'Resolution')}
|
||||
labelWidth={12}
|
||||
tooltip={t(
|
||||
'transformers.smoothing.resolution.tooltip',
|
||||
'Number of points in the smoothed output. Lower values create more aggressive smoothing with fewer points.'
|
||||
)}
|
||||
>
|
||||
<NumberInput
|
||||
value={options.resolution ?? DEFAULTS.resolution}
|
||||
onChange={(v) => onChange({ ...options, resolution: v })}
|
||||
min={10}
|
||||
max={1000}
|
||||
width={20}
|
||||
/>
|
||||
</InlineField>
|
||||
</InlineFieldRow>
|
||||
);
|
||||
};
|
||||
|
||||
export const getSmoothingTransformerRegistryItem: () => TransformerRegistryItem<SmoothingTransformerOptions> = () => {
|
||||
const smoothingTransformer = getSmoothingTransformer();
|
||||
return {
|
||||
id: DataTransformerID.smoothing,
|
||||
editor: SmoothingTransformerEditor,
|
||||
transformation: smoothingTransformer,
|
||||
name: smoothingTransformer.name,
|
||||
description: smoothingTransformer.description,
|
||||
categories: new Set([TransformerCategory.Reformat]),
|
||||
imageDark: darkImage,
|
||||
imageLight: lightImage,
|
||||
help: getTransformationContent(DataTransformerID.smoothing).helperDocs,
|
||||
};
|
||||
};
|
|
@ -32,6 +32,7 @@ import { getPartitionByValuesTransformRegistryItem } from './partitionByValues/P
|
|||
import { getPrepareTimeseriesTransformerRegistryItem } from './prepareTimeSeries/PrepareTimeSeriesEditor';
|
||||
import { getRegressionTransformerRegistryItem } from './regression/regressionEditor';
|
||||
import { getRowsToFieldsTransformRegistryItem } from './rowsToFields/RowsToFieldsTransformerEditor';
|
||||
import { getSmoothingTransformerRegistryItem } from './smoothing/smoothingEditor';
|
||||
import { getSpatialTransformRegistryItem } from './spatial/SpatialTransformerEditor';
|
||||
import { getTimeSeriesTableTransformRegistryItem } from './timeSeriesTable/TimeSeriesTableTransformEditor';
|
||||
|
||||
|
@ -64,6 +65,7 @@ export const getStandardTransformers = (): TransformerRegistryItem[] => {
|
|||
getLimitTransformRegistryItem(),
|
||||
getJoinByLabelsTransformRegistryItem(),
|
||||
getRegressionTransformerRegistryItem(),
|
||||
getSmoothingTransformerRegistryItem(),
|
||||
getPartitionByValuesTransformRegistryItem(),
|
||||
...(config.featureToggles.formatString ? [getFormatStringTransformerRegistryItem()] : []),
|
||||
...(config.featureToggles.groupToNestedTableTransformation ? [getGroupToNestedTableTransformRegistryItem()] : []),
|
||||
|
|
|
@ -15292,6 +15292,13 @@ __metadata:
|
|||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"downsample@npm:1.4.0":
|
||||
version: 1.4.0
|
||||
resolution: "downsample@npm:1.4.0"
|
||||
checksum: 10/ad0ab937e368546b577b564b13d7f39cd85a92bf29d56562aaa6ed10bac19e91ee75ab58f38050a9e8bf601c1abcfda942541880a84c89ba78d1775a229636d1
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"downshift@npm:^9.0.6":
|
||||
version: 9.0.10
|
||||
resolution: "downshift@npm:9.0.10"
|
||||
|
@ -18306,6 +18313,7 @@ __metadata:
|
|||
date-fns: "npm:4.1.0"
|
||||
debounce-promise: "npm:3.1.2"
|
||||
diff: "npm:^8.0.0"
|
||||
downsample: "npm:1.4.0"
|
||||
esbuild: "npm:0.25.8"
|
||||
esbuild-loader: "npm:4.3.0"
|
||||
esbuild-plugin-browserslist: "npm:^1.0.0"
|
||||
|
|
Loading…
Reference in New Issue