| 
									
										
										
										
											2017-07-07 19:46:41 +08:00
										 |  |  |  | // Copyright 2017 The Prometheus Authors
 | 
					
						
							|  |  |  |  | // Licensed under the Apache License, Version 2.0 (the "License");
 | 
					
						
							|  |  |  |  | // you may not use this file except in compliance with the License.
 | 
					
						
							|  |  |  |  | // You may obtain a copy of the License at
 | 
					
						
							|  |  |  |  | //
 | 
					
						
							|  |  |  |  | // http://www.apache.org/licenses/LICENSE-2.0
 | 
					
						
							|  |  |  |  | //
 | 
					
						
							|  |  |  |  | // Unless required by applicable law or agreed to in writing, software
 | 
					
						
							|  |  |  |  | // distributed under the License is distributed on an "AS IS" BASIS,
 | 
					
						
							|  |  |  |  | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
					
						
							|  |  |  |  | // See the License for the specific language governing permissions and
 | 
					
						
							|  |  |  |  | // limitations under the License.
 | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-07-07 03:29:26 +08:00
										 |  |  |  | package tsdb | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | import ( | 
					
						
							| 
									
										
										
										
											2018-12-06 00:34:42 +08:00
										 |  |  |  | 	"context" | 
					
						
							| 
									
										
										
										
											2023-11-17 02:54:41 +08:00
										 |  |  |  | 	"errors" | 
					
						
							| 
									
										
										
										
											2019-02-14 21:29:41 +08:00
										 |  |  |  | 	"fmt" | 
					
						
							| 
									
										
										
										
											2018-10-12 17:45:19 +08:00
										 |  |  |  | 	"math" | 
					
						
							| 
									
										
										
										
											2021-07-08 13:31:53 +08:00
										 |  |  |  | 	"math/rand" | 
					
						
							| 
									
										
										
										
											2017-11-21 19:15:02 +08:00
										 |  |  |  | 	"os" | 
					
						
							| 
									
										
										
										
											2019-01-29 16:26:01 +08:00
										 |  |  |  | 	"path" | 
					
						
							| 
									
										
										
										
											2017-11-21 19:15:02 +08:00
										 |  |  |  | 	"path/filepath" | 
					
						
							| 
									
										
										
										
											2024-04-08 20:59:30 +08:00
										 |  |  |  | 	"runtime" | 
					
						
							| 
									
										
										
										
											2024-05-13 23:36:19 +08:00
										 |  |  |  | 	"strconv" | 
					
						
							| 
									
										
										
										
											2021-07-08 13:31:53 +08:00
										 |  |  |  | 	"sync" | 
					
						
							| 
									
										
										
										
											2017-07-07 03:29:26 +08:00
										 |  |  |  | 	"testing" | 
					
						
							| 
									
										
										
										
											2018-11-20 18:34:26 +08:00
										 |  |  |  | 	"time" | 
					
						
							| 
									
										
										
										
											2017-07-07 03:29:26 +08:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2025-03-05 23:03:25 +08:00
										 |  |  |  | 	"github.com/oklog/ulid/v2" | 
					
						
							| 
									
										
										
										
											2018-12-07 23:49:23 +08:00
										 |  |  |  | 	prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" | 
					
						
							| 
									
										
										
										
											2024-09-10 09:41:53 +08:00
										 |  |  |  | 	"github.com/prometheus/common/promslog" | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 	"github.com/stretchr/testify/require" | 
					
						
							| 
									
										
										
										
											2020-10-22 17:00:08 +08:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
											  
											
												Style cleanup of all the changes in sparsehistogram so far
A lot of this code was hacked together, literally during a
hackathon. This commit intends not to change the code substantially,
but just make the code obey the usual style practices.
A (possibly incomplete) list of areas:
* Generally address linter warnings.
* The `pgk` directory is deprecated as per dev-summit. No new packages should
  be added to it. I moved the new `pkg/histogram` package to `model`
  anticipating what's proposed in #9478.
* Make the naming of the Sparse Histogram more consistent. Including
  abbreviations, there were just too many names for it: SparseHistogram,
  Histogram, Histo, hist, his, shs, h. The idea is to call it "Histogram" in
  general. Only add "Sparse" if it is needed to avoid confusion with
  conventional Histograms (which is rare because the TSDB really has no notion
  of conventional Histograms). Use abbreviations only in local scope, and then
  really abbreviate (not just removing three out of seven letters like in
  "Histo"). This is in the spirit of
  https://github.com/golang/go/wiki/CodeReviewComments#variable-names
* Several other minor name changes.
* A lot of formatting of doc comments. For one, following
  https://github.com/golang/go/wiki/CodeReviewComments#comment-sentences
  , but also layout question, anticipating how things will look like
  when rendered by `godoc` (even where `godoc` doesn't render them
  right now because they are for unexported types or not a doc comment
  at all but just a normal code comment - consistency is queen!).
* Re-enabled `TestQueryLog` and `TestEndopints` (they pass now,
  leaving them disabled was presumably an oversight).
* Bucket iterator for histogram.Histogram is now created with a
  method.
* HistogramChunk.iterator now allows iterator recycling. (I think
  @dieterbe only commented it out because he was confused by the
  question in the comment.)
* HistogramAppender.Append panics now because we decided to treat
  staleness marker differently.
Signed-off-by: beorn7 <beorn@grafana.com>
											
										 
											2021-10-09 21:57:07 +08:00
										 |  |  |  | 	"github.com/prometheus/prometheus/model/histogram" | 
					
						
							| 
									
										
										
										
											2021-11-08 22:23:17 +08:00
										 |  |  |  | 	"github.com/prometheus/prometheus/model/labels" | 
					
						
							| 
									
										
										
										
											2021-08-16 19:02:23 +08:00
										 |  |  |  | 	"github.com/prometheus/prometheus/storage" | 
					
						
							| 
									
										
										
										
											2020-07-31 23:03:02 +08:00
										 |  |  |  | 	"github.com/prometheus/prometheus/tsdb/chunkenc" | 
					
						
							| 
									
										
										
										
											2019-08-13 16:34:14 +08:00
										 |  |  |  | 	"github.com/prometheus/prometheus/tsdb/chunks" | 
					
						
							|  |  |  |  | 	"github.com/prometheus/prometheus/tsdb/fileutil" | 
					
						
							| 
									
										
										
										
											2024-06-25 16:21:48 +08:00
										 |  |  |  | 	"github.com/prometheus/prometheus/tsdb/index" | 
					
						
							| 
									
										
										
										
											2019-09-19 17:15:41 +08:00
										 |  |  |  | 	"github.com/prometheus/prometheus/tsdb/tombstones" | 
					
						
							| 
									
										
										
										
											2023-11-01 22:52:04 +08:00
										 |  |  |  | 	"github.com/prometheus/prometheus/tsdb/tsdbutil" | 
					
						
							| 
									
										
										
										
											2025-03-10 18:36:26 +08:00
										 |  |  |  | 	"github.com/prometheus/prometheus/util/compression" | 
					
						
							| 
									
										
										
										
											2017-07-07 03:29:26 +08:00
										 |  |  |  | ) | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | func TestSplitByRange(t *testing.T) { | 
					
						
							|  |  |  |  | 	cases := []struct { | 
					
						
							|  |  |  |  | 		trange int64 | 
					
						
							| 
									
										
										
										
											2017-07-13 22:13:59 +08:00
										 |  |  |  | 		ranges [][2]int64 | 
					
						
							|  |  |  |  | 		output [][][2]int64 | 
					
						
							| 
									
										
										
										
											2017-07-07 03:29:26 +08:00
										 |  |  |  | 	}{ | 
					
						
							|  |  |  |  | 		{ | 
					
						
							|  |  |  |  | 			trange: 60, | 
					
						
							| 
									
										
										
										
											2017-07-13 22:13:59 +08:00
										 |  |  |  | 			ranges: [][2]int64{{0, 10}}, | 
					
						
							|  |  |  |  | 			output: [][][2]int64{ | 
					
						
							|  |  |  |  | 				{{0, 10}}, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							| 
									
										
										
										
											2017-07-07 03:29:26 +08:00
										 |  |  |  | 		}, | 
					
						
							|  |  |  |  | 		{ | 
					
						
							|  |  |  |  | 			trange: 60, | 
					
						
							| 
									
										
										
										
											2017-07-13 22:13:59 +08:00
										 |  |  |  | 			ranges: [][2]int64{{0, 60}}, | 
					
						
							|  |  |  |  | 			output: [][][2]int64{ | 
					
						
							|  |  |  |  | 				{{0, 60}}, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							| 
									
										
										
										
											2017-07-07 03:29:26 +08:00
										 |  |  |  | 		}, | 
					
						
							|  |  |  |  | 		{ | 
					
						
							|  |  |  |  | 			trange: 60, | 
					
						
							| 
									
										
										
										
											2017-07-13 22:13:59 +08:00
										 |  |  |  | 			ranges: [][2]int64{{0, 10}, {9, 15}, {30, 60}}, | 
					
						
							|  |  |  |  | 			output: [][][2]int64{ | 
					
						
							|  |  |  |  | 				{{0, 10}, {9, 15}, {30, 60}}, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							| 
									
										
										
										
											2017-07-07 03:29:26 +08:00
										 |  |  |  | 		}, | 
					
						
							|  |  |  |  | 		{ | 
					
						
							|  |  |  |  | 			trange: 60, | 
					
						
							| 
									
										
										
										
											2017-07-13 22:13:59 +08:00
										 |  |  |  | 			ranges: [][2]int64{{70, 90}, {125, 130}, {130, 180}, {1000, 1001}}, | 
					
						
							|  |  |  |  | 			output: [][][2]int64{ | 
					
						
							|  |  |  |  | 				{{70, 90}}, | 
					
						
							|  |  |  |  | 				{{125, 130}, {130, 180}}, | 
					
						
							|  |  |  |  | 				{{1000, 1001}}, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							| 
									
										
										
										
											2017-07-07 03:29:26 +08:00
										 |  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2017-07-13 22:13:59 +08:00
										 |  |  |  | 		// Mis-aligned or too-large blocks are ignored.
 | 
					
						
							| 
									
										
										
										
											2017-07-07 03:29:26 +08:00
										 |  |  |  | 		{ | 
					
						
							|  |  |  |  | 			trange: 60, | 
					
						
							| 
									
										
										
										
											2017-07-13 22:13:59 +08:00
										 |  |  |  | 			ranges: [][2]int64{{50, 70}, {70, 80}}, | 
					
						
							|  |  |  |  | 			output: [][][2]int64{ | 
					
						
							|  |  |  |  | 				{{70, 80}}, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							| 
									
										
										
										
											2017-07-07 03:29:26 +08:00
										 |  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2017-07-13 22:13:59 +08:00
										 |  |  |  | 		{ | 
					
						
							|  |  |  |  | 			trange: 72, | 
					
						
							|  |  |  |  | 			ranges: [][2]int64{{0, 144}, {144, 216}, {216, 288}}, | 
					
						
							|  |  |  |  | 			output: [][][2]int64{ | 
					
						
							|  |  |  |  | 				{{144, 216}}, | 
					
						
							|  |  |  |  | 				{{216, 288}}, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							|  |  |  |  | 		// Various awkward edge cases easy to hit with negative numbers.
 | 
					
						
							| 
									
										
										
										
											2017-07-07 03:29:26 +08:00
										 |  |  |  | 		{ | 
					
						
							|  |  |  |  | 			trange: 60, | 
					
						
							| 
									
										
										
										
											2017-07-13 22:13:59 +08:00
										 |  |  |  | 			ranges: [][2]int64{{-10, -5}}, | 
					
						
							|  |  |  |  | 			output: [][][2]int64{ | 
					
						
							|  |  |  |  | 				{{-10, -5}}, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							|  |  |  |  | 		{ | 
					
						
							|  |  |  |  | 			trange: 60, | 
					
						
							|  |  |  |  | 			ranges: [][2]int64{{-60, -50}, {-10, -5}}, | 
					
						
							|  |  |  |  | 			output: [][][2]int64{ | 
					
						
							|  |  |  |  | 				{{-60, -50}, {-10, -5}}, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							|  |  |  |  | 		{ | 
					
						
							|  |  |  |  | 			trange: 60, | 
					
						
							|  |  |  |  | 			ranges: [][2]int64{{-60, -50}, {-10, -5}, {0, 15}}, | 
					
						
							|  |  |  |  | 			output: [][][2]int64{ | 
					
						
							|  |  |  |  | 				{{-60, -50}, {-10, -5}}, | 
					
						
							|  |  |  |  | 				{{0, 15}}, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							| 
									
										
										
										
											2017-07-07 03:29:26 +08:00
										 |  |  |  | 		}, | 
					
						
							|  |  |  |  | 	} | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 	for _, c := range cases { | 
					
						
							| 
									
										
										
										
											2017-07-13 22:13:59 +08:00
										 |  |  |  | 		// Transform input range tuples into dirMetas.
 | 
					
						
							| 
									
										
										
										
											2017-07-07 03:29:26 +08:00
										 |  |  |  | 		blocks := make([]dirMeta, 0, len(c.ranges)) | 
					
						
							|  |  |  |  | 		for _, r := range c.ranges { | 
					
						
							|  |  |  |  | 			blocks = append(blocks, dirMeta{ | 
					
						
							|  |  |  |  | 				meta: &BlockMeta{ | 
					
						
							|  |  |  |  | 					MinTime: r[0], | 
					
						
							|  |  |  |  | 					MaxTime: r[1], | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 			}) | 
					
						
							|  |  |  |  | 		} | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-07-13 22:13:59 +08:00
										 |  |  |  | 		// Transform output range tuples into dirMetas.
 | 
					
						
							|  |  |  |  | 		exp := make([][]dirMeta, len(c.output)) | 
					
						
							|  |  |  |  | 		for i, group := range c.output { | 
					
						
							|  |  |  |  | 			for _, r := range group { | 
					
						
							|  |  |  |  | 				exp[i] = append(exp[i], dirMeta{ | 
					
						
							|  |  |  |  | 					meta: &BlockMeta{MinTime: r[0], MaxTime: r[1]}, | 
					
						
							|  |  |  |  | 				}) | 
					
						
							|  |  |  |  | 			} | 
					
						
							|  |  |  |  | 		} | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 		require.Equal(t, exp, splitByRange(blocks, c.trange)) | 
					
						
							| 
									
										
										
										
											2017-07-07 03:29:26 +08:00
										 |  |  |  | 	} | 
					
						
							|  |  |  |  | } | 
					
						
							| 
									
										
										
										
											2017-08-13 16:41:08 +08:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  | // See https://github.com/prometheus/prometheus/issues/3064
 | 
					
						
							|  |  |  |  | func TestNoPanicFor0Tombstones(t *testing.T) { | 
					
						
							|  |  |  |  | 	metas := []dirMeta{ | 
					
						
							|  |  |  |  | 		{ | 
					
						
							|  |  |  |  | 			dir: "1", | 
					
						
							|  |  |  |  | 			meta: &BlockMeta{ | 
					
						
							|  |  |  |  | 				MinTime: 0, | 
					
						
							|  |  |  |  | 				MaxTime: 100, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							|  |  |  |  | 		{ | 
					
						
							|  |  |  |  | 			dir: "2", | 
					
						
							|  |  |  |  | 			meta: &BlockMeta{ | 
					
						
							|  |  |  |  | 				MinTime: 101, | 
					
						
							|  |  |  |  | 				MaxTime: 200, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							|  |  |  |  | 	} | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-05-19 00:38:37 +08:00
										 |  |  |  | 	c, err := NewLeveledCompactor(context.Background(), nil, nil, []int64{50}, nil, nil) | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 	require.NoError(t, err) | 
					
						
							| 
									
										
										
										
											2017-09-01 17:46:46 +08:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-08-13 16:41:08 +08:00
										 |  |  |  | 	c.plan(metas) | 
					
						
							|  |  |  |  | } | 
					
						
							| 
									
										
										
										
											2017-09-01 17:46:46 +08:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  | func TestLeveledCompactor_plan(t *testing.T) { | 
					
						
							| 
									
										
										
										
											2019-10-17 19:09:54 +08:00
										 |  |  |  | 	// This mimics our default ExponentialBlockRanges with min block size equals to 20.
 | 
					
						
							| 
									
										
										
										
											2018-12-06 00:34:42 +08:00
										 |  |  |  | 	compactor, err := NewLeveledCompactor(context.Background(), nil, nil, []int64{ | 
					
						
							| 
									
										
										
										
											2017-09-01 17:46:46 +08:00
										 |  |  |  | 		20, | 
					
						
							|  |  |  |  | 		60, | 
					
						
							| 
									
										
										
										
											2018-03-13 22:11:02 +08:00
										 |  |  |  | 		180, | 
					
						
							|  |  |  |  | 		540, | 
					
						
							|  |  |  |  | 		1620, | 
					
						
							| 
									
										
										
										
											2021-05-19 00:38:37 +08:00
										 |  |  |  | 	}, nil, nil) | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 	require.NoError(t, err) | 
					
						
							| 
									
										
										
										
											2017-09-01 17:46:46 +08:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-08 20:27:06 +08:00
										 |  |  |  | 	cases := map[string]struct { | 
					
						
							| 
									
										
										
										
											2017-09-01 17:46:46 +08:00
										 |  |  |  | 		metas    []dirMeta | 
					
						
							|  |  |  |  | 		expected []string | 
					
						
							|  |  |  |  | 	}{ | 
					
						
							| 
									
										
										
										
											2019-04-08 20:27:06 +08:00
										 |  |  |  | 		"Outside Range": { | 
					
						
							| 
									
										
										
										
											2017-09-01 17:46:46 +08:00
										 |  |  |  | 			metas: []dirMeta{ | 
					
						
							|  |  |  |  | 				metaRange("1", 0, 20, nil), | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 			expected: nil, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2019-04-08 20:27:06 +08:00
										 |  |  |  | 		"We should wait for four blocks of size 20 to appear before compacting.": { | 
					
						
							| 
									
										
										
										
											2017-09-01 17:46:46 +08:00
										 |  |  |  | 			metas: []dirMeta{ | 
					
						
							|  |  |  |  | 				metaRange("1", 0, 20, nil), | 
					
						
							|  |  |  |  | 				metaRange("2", 20, 40, nil), | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 			expected: nil, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2019-04-08 20:27:06 +08:00
										 |  |  |  | 		`We should wait for a next block of size 20 to appear before compacting | 
					
						
							|  |  |  |  | 		the existing ones. We have three, but we ignore the fresh one from WAl`: { | 
					
						
							| 
									
										
										
										
											2018-03-13 20:30:27 +08:00
										 |  |  |  | 			metas: []dirMeta{ | 
					
						
							|  |  |  |  | 				metaRange("1", 0, 20, nil), | 
					
						
							|  |  |  |  | 				metaRange("2", 20, 40, nil), | 
					
						
							|  |  |  |  | 				metaRange("3", 40, 60, nil), | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 			expected: nil, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2019-04-08 20:27:06 +08:00
										 |  |  |  | 		"Block to fill the entire parent range appeared – should be compacted": { | 
					
						
							| 
									
										
										
										
											2017-09-01 17:46:46 +08:00
										 |  |  |  | 			metas: []dirMeta{ | 
					
						
							|  |  |  |  | 				metaRange("1", 0, 20, nil), | 
					
						
							|  |  |  |  | 				metaRange("2", 20, 40, nil), | 
					
						
							|  |  |  |  | 				metaRange("3", 40, 60, nil), | 
					
						
							| 
									
										
										
										
											2018-03-13 20:30:27 +08:00
										 |  |  |  | 				metaRange("4", 60, 80, nil), | 
					
						
							| 
									
										
										
										
											2017-09-01 17:46:46 +08:00
										 |  |  |  | 			}, | 
					
						
							|  |  |  |  | 			expected: []string{"1", "2", "3"}, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2019-04-08 20:27:06 +08:00
										 |  |  |  | 		`Block for the next parent range appeared with gap with size 20. Nothing will happen in the first one | 
					
						
							|  |  |  |  | 		anymore but we ignore fresh one still, so no compaction`: { | 
					
						
							| 
									
										
										
										
											2018-03-13 20:30:27 +08:00
										 |  |  |  | 			metas: []dirMeta{ | 
					
						
							|  |  |  |  | 				metaRange("1", 0, 20, nil), | 
					
						
							|  |  |  |  | 				metaRange("2", 20, 40, nil), | 
					
						
							|  |  |  |  | 				metaRange("3", 60, 80, nil), | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 			expected: nil, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2019-04-08 20:27:06 +08:00
										 |  |  |  | 		`Block for the next parent range appeared, and we have a gap with size 20 between second and third block. | 
					
						
							|  |  |  |  | 		We will not get this missed gap anymore and we should compact just these two.`: { | 
					
						
							| 
									
										
										
										
											2017-09-01 17:46:46 +08:00
										 |  |  |  | 			metas: []dirMeta{ | 
					
						
							|  |  |  |  | 				metaRange("1", 0, 20, nil), | 
					
						
							|  |  |  |  | 				metaRange("2", 20, 40, nil), | 
					
						
							|  |  |  |  | 				metaRange("3", 60, 80, nil), | 
					
						
							| 
									
										
										
										
											2018-03-13 20:30:27 +08:00
										 |  |  |  | 				metaRange("4", 80, 100, nil), | 
					
						
							| 
									
										
										
										
											2017-09-01 17:46:46 +08:00
										 |  |  |  | 			}, | 
					
						
							|  |  |  |  | 			expected: []string{"1", "2"}, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2019-04-08 20:27:06 +08:00
										 |  |  |  | 		"We have 20, 20, 20, 60, 60 range blocks. '5' is marked as fresh one": { | 
					
						
							| 
									
										
										
										
											2017-09-01 17:46:46 +08:00
										 |  |  |  | 			metas: []dirMeta{ | 
					
						
							|  |  |  |  | 				metaRange("1", 0, 20, nil), | 
					
						
							|  |  |  |  | 				metaRange("2", 20, 40, nil), | 
					
						
							|  |  |  |  | 				metaRange("3", 40, 60, nil), | 
					
						
							|  |  |  |  | 				metaRange("4", 60, 120, nil), | 
					
						
							|  |  |  |  | 				metaRange("5", 120, 180, nil), | 
					
						
							|  |  |  |  | 			}, | 
					
						
							| 
									
										
										
										
											2018-03-13 22:11:02 +08:00
										 |  |  |  | 			expected: []string{"1", "2", "3"}, | 
					
						
							| 
									
										
										
										
											2017-09-01 17:46:46 +08:00
										 |  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2019-04-08 20:27:06 +08:00
										 |  |  |  | 		"We have 20, 60, 20, 60, 240 range blocks. We can compact 20 + 60 + 60": { | 
					
						
							| 
									
										
										
										
											2017-09-01 17:46:46 +08:00
										 |  |  |  | 			metas: []dirMeta{ | 
					
						
							|  |  |  |  | 				metaRange("2", 20, 40, nil), | 
					
						
							|  |  |  |  | 				metaRange("4", 60, 120, nil), | 
					
						
							| 
									
										
										
										
											2018-03-13 22:11:02 +08:00
										 |  |  |  | 				metaRange("5", 960, 980, nil), // Fresh one.
 | 
					
						
							|  |  |  |  | 				metaRange("6", 120, 180, nil), | 
					
						
							|  |  |  |  | 				metaRange("7", 720, 960, nil), | 
					
						
							| 
									
										
										
										
											2018-03-13 20:30:27 +08:00
										 |  |  |  | 			}, | 
					
						
							| 
									
										
										
										
											2018-03-13 22:11:02 +08:00
										 |  |  |  | 			expected: []string{"2", "4", "6"}, | 
					
						
							| 
									
										
										
										
											2017-09-01 17:46:46 +08:00
										 |  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2019-04-08 20:27:06 +08:00
										 |  |  |  | 		"Do not select large blocks that have many tombstones when there is no fresh block": { | 
					
						
							| 
									
										
										
										
											2017-09-01 17:46:46 +08:00
										 |  |  |  | 			metas: []dirMeta{ | 
					
						
							| 
									
										
										
										
											2018-03-13 22:11:02 +08:00
										 |  |  |  | 				metaRange("1", 0, 540, &BlockStats{ | 
					
						
							| 
									
										
										
										
											2017-09-01 17:46:46 +08:00
										 |  |  |  | 					NumSeries:     10, | 
					
						
							|  |  |  |  | 					NumTombstones: 3, | 
					
						
							|  |  |  |  | 				}), | 
					
						
							|  |  |  |  | 			}, | 
					
						
							| 
									
										
										
										
											2018-03-13 20:30:27 +08:00
										 |  |  |  | 			expected: nil, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2019-04-08 20:27:06 +08:00
										 |  |  |  | 		"Select large blocks that have many tombstones when fresh appears": { | 
					
						
							| 
									
										
										
										
											2018-03-13 20:30:27 +08:00
										 |  |  |  | 			metas: []dirMeta{ | 
					
						
							| 
									
										
										
										
											2018-03-13 22:11:02 +08:00
										 |  |  |  | 				metaRange("1", 0, 540, &BlockStats{ | 
					
						
							| 
									
										
										
										
											2018-03-13 20:30:27 +08:00
										 |  |  |  | 					NumSeries:     10, | 
					
						
							|  |  |  |  | 					NumTombstones: 3, | 
					
						
							|  |  |  |  | 				}), | 
					
						
							| 
									
										
										
										
											2018-03-13 22:11:02 +08:00
										 |  |  |  | 				metaRange("2", 540, 560, nil), | 
					
						
							| 
									
										
										
										
											2018-03-13 20:30:27 +08:00
										 |  |  |  | 			}, | 
					
						
							| 
									
										
										
										
											2017-09-01 17:46:46 +08:00
										 |  |  |  | 			expected: []string{"1"}, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2019-04-08 20:27:06 +08:00
										 |  |  |  | 		"For small blocks, do not compact tombstones, even when fresh appears.": { | 
					
						
							| 
									
										
										
										
											2017-09-01 17:46:46 +08:00
										 |  |  |  | 			metas: []dirMeta{ | 
					
						
							| 
									
										
										
										
											2018-03-13 22:11:02 +08:00
										 |  |  |  | 				metaRange("1", 0, 60, &BlockStats{ | 
					
						
							| 
									
										
										
										
											2017-09-01 17:46:46 +08:00
										 |  |  |  | 					NumSeries:     10, | 
					
						
							|  |  |  |  | 					NumTombstones: 3, | 
					
						
							|  |  |  |  | 				}), | 
					
						
							| 
									
										
										
										
											2018-03-13 22:11:02 +08:00
										 |  |  |  | 				metaRange("2", 60, 80, nil), | 
					
						
							| 
									
										
										
										
											2017-09-01 17:46:46 +08:00
										 |  |  |  | 			}, | 
					
						
							|  |  |  |  | 			expected: nil, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2019-04-08 20:27:06 +08:00
										 |  |  |  | 		`Regression test: we were stuck in a compact loop where we always recompacted | 
					
						
							|  |  |  |  | 		the same block when tombstones and series counts were zero`: { | 
					
						
							| 
									
										
										
										
											2017-09-01 17:46:46 +08:00
										 |  |  |  | 			metas: []dirMeta{ | 
					
						
							| 
									
										
										
										
											2018-03-13 22:11:02 +08:00
										 |  |  |  | 				metaRange("1", 0, 540, &BlockStats{ | 
					
						
							| 
									
										
										
										
											2017-09-01 17:46:46 +08:00
										 |  |  |  | 					NumSeries:     0, | 
					
						
							|  |  |  |  | 					NumTombstones: 0, | 
					
						
							|  |  |  |  | 				}), | 
					
						
							| 
									
										
										
										
											2018-03-13 22:11:02 +08:00
										 |  |  |  | 				metaRange("2", 540, 560, nil), | 
					
						
							| 
									
										
										
										
											2017-09-01 17:46:46 +08:00
										 |  |  |  | 			}, | 
					
						
							|  |  |  |  | 			expected: nil, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2019-04-08 20:27:06 +08:00
										 |  |  |  | 		`Regression test: we were wrongly assuming that new block is fresh from WAL when its ULID is newest. | 
					
						
							|  |  |  |  | 		We need to actually look on max time instead. | 
					
						
							| 
									
										
										
										
											2019-11-19 03:53:33 +08:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-08 20:27:06 +08:00
										 |  |  |  | 		With previous, wrong approach "8" block was ignored, so we were wrongly compacting 5 and 7 and introducing | 
					
						
							|  |  |  |  | 		block overlaps`: { | 
					
						
							| 
									
										
										
										
											2018-03-13 22:11:02 +08:00
										 |  |  |  | 			metas: []dirMeta{ | 
					
						
							|  |  |  |  | 				metaRange("5", 0, 360, nil), | 
					
						
							|  |  |  |  | 				metaRange("6", 540, 560, nil), // Fresh one.
 | 
					
						
							|  |  |  |  | 				metaRange("7", 360, 420, nil), | 
					
						
							|  |  |  |  | 				metaRange("8", 420, 540, nil), | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 			expected: []string{"7", "8"}, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2019-04-08 20:27:06 +08:00
										 |  |  |  | 		// |--------------|
 | 
					
						
							|  |  |  |  | 		//               |----------------|
 | 
					
						
							|  |  |  |  | 		//                                |--------------|
 | 
					
						
							|  |  |  |  | 		"Overlapping blocks 1": { | 
					
						
							| 
									
										
										
										
											2019-02-14 21:29:41 +08:00
										 |  |  |  | 			metas: []dirMeta{ | 
					
						
							|  |  |  |  | 				metaRange("1", 0, 20, nil), | 
					
						
							|  |  |  |  | 				metaRange("2", 19, 40, nil), | 
					
						
							|  |  |  |  | 				metaRange("3", 40, 60, nil), | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 			expected: []string{"1", "2"}, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2019-04-08 20:27:06 +08:00
										 |  |  |  | 		// |--------------|
 | 
					
						
							|  |  |  |  | 		//                |--------------|
 | 
					
						
							|  |  |  |  | 		//                        |--------------|
 | 
					
						
							|  |  |  |  | 		"Overlapping blocks 2": { | 
					
						
							| 
									
										
										
										
											2019-02-14 21:29:41 +08:00
										 |  |  |  | 			metas: []dirMeta{ | 
					
						
							|  |  |  |  | 				metaRange("1", 0, 20, nil), | 
					
						
							|  |  |  |  | 				metaRange("2", 20, 40, nil), | 
					
						
							|  |  |  |  | 				metaRange("3", 30, 50, nil), | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 			expected: []string{"2", "3"}, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2019-04-08 20:27:06 +08:00
										 |  |  |  | 		// |--------------|
 | 
					
						
							|  |  |  |  | 		//         |---------------------|
 | 
					
						
							|  |  |  |  | 		//                       |--------------|
 | 
					
						
							|  |  |  |  | 		"Overlapping blocks 3": { | 
					
						
							| 
									
										
										
										
											2019-02-14 21:29:41 +08:00
										 |  |  |  | 			metas: []dirMeta{ | 
					
						
							|  |  |  |  | 				metaRange("1", 0, 20, nil), | 
					
						
							|  |  |  |  | 				metaRange("2", 10, 40, nil), | 
					
						
							|  |  |  |  | 				metaRange("3", 30, 50, nil), | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 			expected: []string{"1", "2", "3"}, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2019-04-08 20:27:06 +08:00
										 |  |  |  | 		// |--------------|
 | 
					
						
							|  |  |  |  | 		//               |--------------------------------|
 | 
					
						
							|  |  |  |  | 		//                |--------------|
 | 
					
						
							|  |  |  |  | 		//                               |--------------|
 | 
					
						
							|  |  |  |  | 		"Overlapping blocks 4": { | 
					
						
							| 
									
										
										
										
											2019-02-14 21:29:41 +08:00
										 |  |  |  | 			metas: []dirMeta{ | 
					
						
							|  |  |  |  | 				metaRange("5", 0, 360, nil), | 
					
						
							|  |  |  |  | 				metaRange("6", 340, 560, nil), | 
					
						
							|  |  |  |  | 				metaRange("7", 360, 420, nil), | 
					
						
							|  |  |  |  | 				metaRange("8", 420, 540, nil), | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 			expected: []string{"5", "6", "7", "8"}, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2019-04-08 20:27:06 +08:00
										 |  |  |  | 		// |--------------|
 | 
					
						
							|  |  |  |  | 		//               |--------------|
 | 
					
						
							|  |  |  |  | 		//                                            |--------------|
 | 
					
						
							|  |  |  |  | 		//                                                          |--------------|
 | 
					
						
							|  |  |  |  | 		"Overlapping blocks 5": { | 
					
						
							|  |  |  |  | 			metas: []dirMeta{ | 
					
						
							|  |  |  |  | 				metaRange("1", 0, 10, nil), | 
					
						
							|  |  |  |  | 				metaRange("2", 9, 20, nil), | 
					
						
							|  |  |  |  | 				metaRange("3", 30, 40, nil), | 
					
						
							|  |  |  |  | 				metaRange("4", 39, 50, nil), | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 			expected: []string{"1", "2"}, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2017-09-01 17:46:46 +08:00
										 |  |  |  | 	} | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-08 20:27:06 +08:00
										 |  |  |  | 	for title, c := range cases { | 
					
						
							|  |  |  |  | 		if !t.Run(title, func(t *testing.T) { | 
					
						
							| 
									
										
										
										
											2018-03-13 20:30:27 +08:00
										 |  |  |  | 			res, err := compactor.plan(c.metas) | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 			require.NoError(t, err) | 
					
						
							|  |  |  |  | 			require.Equal(t, c.expected, res) | 
					
						
							| 
									
										
										
										
											2018-03-13 20:30:27 +08:00
										 |  |  |  | 		}) { | 
					
						
							|  |  |  |  | 			return | 
					
						
							|  |  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2017-09-01 17:46:46 +08:00
										 |  |  |  | 	} | 
					
						
							|  |  |  |  | } | 
					
						
							| 
									
										
										
										
											2017-11-21 19:15:02 +08:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  | func TestRangeWithFailedCompactionWontGetSelected(t *testing.T) { | 
					
						
							| 
									
										
										
										
											2018-12-06 00:34:42 +08:00
										 |  |  |  | 	compactor, err := NewLeveledCompactor(context.Background(), nil, nil, []int64{ | 
					
						
							| 
									
										
										
										
											2017-11-21 19:15:02 +08:00
										 |  |  |  | 		20, | 
					
						
							|  |  |  |  | 		60, | 
					
						
							|  |  |  |  | 		240, | 
					
						
							|  |  |  |  | 		720, | 
					
						
							|  |  |  |  | 		2160, | 
					
						
							| 
									
										
										
										
											2021-05-19 00:38:37 +08:00
										 |  |  |  | 	}, nil, nil) | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 	require.NoError(t, err) | 
					
						
							| 
									
										
										
										
											2017-11-21 19:15:02 +08:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  | 	cases := []struct { | 
					
						
							|  |  |  |  | 		metas []dirMeta | 
					
						
							|  |  |  |  | 	}{ | 
					
						
							|  |  |  |  | 		{ | 
					
						
							|  |  |  |  | 			metas: []dirMeta{ | 
					
						
							|  |  |  |  | 				metaRange("1", 0, 20, nil), | 
					
						
							|  |  |  |  | 				metaRange("2", 20, 40, nil), | 
					
						
							|  |  |  |  | 				metaRange("3", 40, 60, nil), | 
					
						
							| 
									
										
										
										
											2018-03-13 22:11:02 +08:00
										 |  |  |  | 				metaRange("4", 60, 80, nil), | 
					
						
							| 
									
										
										
										
											2017-11-21 19:15:02 +08:00
										 |  |  |  | 			}, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							|  |  |  |  | 		{ | 
					
						
							|  |  |  |  | 			metas: []dirMeta{ | 
					
						
							|  |  |  |  | 				metaRange("1", 0, 20, nil), | 
					
						
							|  |  |  |  | 				metaRange("2", 20, 40, nil), | 
					
						
							|  |  |  |  | 				metaRange("3", 60, 80, nil), | 
					
						
							| 
									
										
										
										
											2018-03-13 22:11:02 +08:00
										 |  |  |  | 				metaRange("4", 80, 100, nil), | 
					
						
							| 
									
										
										
										
											2017-11-21 19:15:02 +08:00
										 |  |  |  | 			}, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							|  |  |  |  | 		{ | 
					
						
							|  |  |  |  | 			metas: []dirMeta{ | 
					
						
							|  |  |  |  | 				metaRange("1", 0, 20, nil), | 
					
						
							|  |  |  |  | 				metaRange("2", 20, 40, nil), | 
					
						
							|  |  |  |  | 				metaRange("3", 40, 60, nil), | 
					
						
							|  |  |  |  | 				metaRange("4", 60, 120, nil), | 
					
						
							|  |  |  |  | 				metaRange("5", 120, 180, nil), | 
					
						
							| 
									
										
										
										
											2018-03-13 22:11:02 +08:00
										 |  |  |  | 				metaRange("6", 180, 200, nil), | 
					
						
							| 
									
										
										
										
											2017-11-21 19:15:02 +08:00
										 |  |  |  | 			}, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							|  |  |  |  | 	} | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 	for _, c := range cases { | 
					
						
							|  |  |  |  | 		c.metas[1].meta.Compaction.Failed = true | 
					
						
							|  |  |  |  | 		res, err := compactor.plan(c.metas) | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 		require.NoError(t, err) | 
					
						
							| 
									
										
										
										
											2017-11-21 19:15:02 +08:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 		require.Equal(t, []string(nil), res) | 
					
						
							| 
									
										
										
										
											2017-11-21 19:15:02 +08:00
										 |  |  |  | 	} | 
					
						
							|  |  |  |  | } | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | func TestCompactionFailWillCleanUpTempDir(t *testing.T) { | 
					
						
							| 
									
										
										
										
											2024-09-10 09:41:53 +08:00
										 |  |  |  | 	compactor, err := NewLeveledCompactor(context.Background(), nil, promslog.NewNopLogger(), []int64{ | 
					
						
							| 
									
										
										
										
											2017-11-21 19:15:02 +08:00
										 |  |  |  | 		20, | 
					
						
							|  |  |  |  | 		60, | 
					
						
							|  |  |  |  | 		240, | 
					
						
							|  |  |  |  | 		720, | 
					
						
							|  |  |  |  | 		2160, | 
					
						
							| 
									
										
										
										
											2021-05-19 00:38:37 +08:00
										 |  |  |  | 	}, nil, nil) | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 	require.NoError(t, err) | 
					
						
							| 
									
										
										
										
											2017-11-21 19:15:02 +08:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-01-22 17:55:01 +08:00
										 |  |  |  | 	tmpdir := t.TempDir() | 
					
						
							| 
									
										
										
										
											2017-11-21 19:15:02 +08:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-04-13 05:18:20 +08:00
										 |  |  |  | 	require.Error(t, compactor.write(tmpdir, &BlockMeta{}, DefaultBlockPopulator{}, erringBReader{})) | 
					
						
							| 
									
										
										
										
											2020-08-11 13:56:08 +08:00
										 |  |  |  | 	_, err = os.Stat(filepath.Join(tmpdir, BlockMeta{}.ULID.String()) + tmpForCreationBlockDirSuffix) | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 	require.True(t, os.IsNotExist(err), "directory is not cleaned up") | 
					
						
							| 
									
										
										
										
											2017-11-21 19:15:02 +08:00
										 |  |  |  | } | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | func metaRange(name string, mint, maxt int64, stats *BlockStats) dirMeta { | 
					
						
							|  |  |  |  | 	meta := &BlockMeta{MinTime: mint, MaxTime: maxt} | 
					
						
							|  |  |  |  | 	if stats != nil { | 
					
						
							|  |  |  |  | 		meta.Stats = *stats | 
					
						
							|  |  |  |  | 	} | 
					
						
							|  |  |  |  | 	return dirMeta{ | 
					
						
							|  |  |  |  | 		dir:  name, | 
					
						
							|  |  |  |  | 		meta: meta, | 
					
						
							|  |  |  |  | 	} | 
					
						
							|  |  |  |  | } | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | type erringBReader struct{} | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-03-26 03:13:47 +08:00
										 |  |  |  | func (erringBReader) Index() (IndexReader, error)            { return nil, errors.New("index") } | 
					
						
							|  |  |  |  | func (erringBReader) Chunks() (ChunkReader, error)           { return nil, errors.New("chunks") } | 
					
						
							|  |  |  |  | func (erringBReader) Tombstones() (tombstones.Reader, error) { return nil, errors.New("tombstones") } | 
					
						
							|  |  |  |  | func (erringBReader) Meta() BlockMeta                        { return BlockMeta{} } | 
					
						
							| 
									
										
										
										
											2020-10-13 05:15:40 +08:00
										 |  |  |  | func (erringBReader) Size() int64                            { return 0 } | 
					
						
							| 
									
										
										
										
											2018-10-12 17:45:19 +08:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  | type nopChunkWriter struct{} | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-04-12 19:05:41 +08:00
										 |  |  |  | func (nopChunkWriter) WriteChunks(...chunks.Meta) error { return nil } | 
					
						
							|  |  |  |  | func (nopChunkWriter) Close() error                     { return nil } | 
					
						
							| 
									
										
										
										
											2018-10-12 17:45:19 +08:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-07-31 23:03:02 +08:00
										 |  |  |  | func samplesForRange(minTime, maxTime int64, maxSamplesPerChunk int) (ret [][]sample) { | 
					
						
							|  |  |  |  | 	var curr []sample | 
					
						
							|  |  |  |  | 	for i := minTime; i <= maxTime; i++ { | 
					
						
							|  |  |  |  | 		curr = append(curr, sample{t: i}) | 
					
						
							|  |  |  |  | 		if len(curr) >= maxSamplesPerChunk { | 
					
						
							|  |  |  |  | 			ret = append(ret, curr) | 
					
						
							|  |  |  |  | 			curr = []sample{} | 
					
						
							|  |  |  |  | 		} | 
					
						
							|  |  |  |  | 	} | 
					
						
							|  |  |  |  | 	if len(curr) > 0 { | 
					
						
							|  |  |  |  | 		ret = append(ret, curr) | 
					
						
							|  |  |  |  | 	} | 
					
						
							|  |  |  |  | 	return ret | 
					
						
							|  |  |  |  | } | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-10-12 17:45:19 +08:00
										 |  |  |  | func TestCompaction_populateBlock(t *testing.T) { | 
					
						
							| 
									
										
										
										
											2020-07-31 23:03:02 +08:00
										 |  |  |  | 	for _, tc := range []struct { | 
					
						
							| 
									
										
										
										
											2018-10-12 17:45:19 +08:00
										 |  |  |  | 		title              string | 
					
						
							|  |  |  |  | 		inputSeriesSamples [][]seriesSamples | 
					
						
							|  |  |  |  | 		compactMinTime     int64 | 
					
						
							|  |  |  |  | 		compactMaxTime     int64 // When not defined the test runner sets a default of math.MaxInt64.
 | 
					
						
							| 
									
										
										
										
											2024-06-25 16:21:48 +08:00
										 |  |  |  | 		irPostingsFunc     IndexReaderPostingsFunc | 
					
						
							| 
									
										
										
										
											2019-02-14 21:29:41 +08:00
										 |  |  |  | 		expSeriesSamples   []seriesSamples | 
					
						
							|  |  |  |  | 		expErr             error | 
					
						
							| 
									
										
										
										
											2018-10-12 17:45:19 +08:00
										 |  |  |  | 	}{ | 
					
						
							|  |  |  |  | 		{ | 
					
						
							|  |  |  |  | 			title:              "Populate block from empty input should return error.", | 
					
						
							|  |  |  |  | 			inputSeriesSamples: [][]seriesSamples{}, | 
					
						
							|  |  |  |  | 			expErr:             errors.New("cannot populate block from no readers"), | 
					
						
							|  |  |  |  | 		}, | 
					
						
							|  |  |  |  | 		{ | 
					
						
							|  |  |  |  | 			// Populate from single block without chunks. We expect these kind of series being ignored.
 | 
					
						
							|  |  |  |  | 			inputSeriesSamples: [][]seriesSamples{ | 
					
						
							| 
									
										
										
										
											2020-07-31 23:03:02 +08:00
										 |  |  |  | 				{{lset: map[string]string{"a": "b"}}}, | 
					
						
							| 
									
										
										
										
											2018-10-12 17:45:19 +08:00
										 |  |  |  | 			}, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							|  |  |  |  | 		{ | 
					
						
							|  |  |  |  | 			title: "Populate from single block. We expect the same samples at the output.", | 
					
						
							|  |  |  |  | 			inputSeriesSamples: [][]seriesSamples{ | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "b"}, | 
					
						
							|  |  |  |  | 						chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}}, | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 			expSeriesSamples: []seriesSamples{ | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					lset:   map[string]string{"a": "b"}, | 
					
						
							|  |  |  |  | 					chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							|  |  |  |  | 		{ | 
					
						
							|  |  |  |  | 			title: "Populate from two blocks.", | 
					
						
							|  |  |  |  | 			inputSeriesSamples: [][]seriesSamples{ | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "b"}, | 
					
						
							|  |  |  |  | 						chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}}, | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "c"}, | 
					
						
							|  |  |  |  | 						chunks: [][]sample{{{t: 1}, {t: 9}}, {{t: 10}, {t: 19}}}, | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						// no-chunk series should be dropped.
 | 
					
						
							|  |  |  |  | 						lset: map[string]string{"a": "empty"}, | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "b"}, | 
					
						
							|  |  |  |  | 						chunks: [][]sample{{{t: 21}, {t: 30}}}, | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "c"}, | 
					
						
							|  |  |  |  | 						chunks: [][]sample{{{t: 40}, {t: 45}}}, | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 			expSeriesSamples: []seriesSamples{ | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					lset:   map[string]string{"a": "b"}, | 
					
						
							|  |  |  |  | 					chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}, {{t: 21}, {t: 30}}}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					lset:   map[string]string{"a": "c"}, | 
					
						
							|  |  |  |  | 					chunks: [][]sample{{{t: 1}, {t: 9}}, {{t: 10}, {t: 19}}, {{t: 40}, {t: 45}}}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2020-07-31 23:03:02 +08:00
										 |  |  |  | 		{ | 
					
						
							|  |  |  |  | 			title: "Populate from two blocks; chunks with negative time.", | 
					
						
							|  |  |  |  | 			inputSeriesSamples: [][]seriesSamples{ | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "b"}, | 
					
						
							|  |  |  |  | 						chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}}, | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "c"}, | 
					
						
							|  |  |  |  | 						chunks: [][]sample{{{t: -11}, {t: -9}}, {{t: 10}, {t: 19}}}, | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						// no-chunk series should be dropped.
 | 
					
						
							|  |  |  |  | 						lset: map[string]string{"a": "empty"}, | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "b"}, | 
					
						
							|  |  |  |  | 						chunks: [][]sample{{{t: 21}, {t: 30}}}, | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "c"}, | 
					
						
							|  |  |  |  | 						chunks: [][]sample{{{t: 40}, {t: 45}}}, | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 			compactMinTime: -11, | 
					
						
							|  |  |  |  | 			expSeriesSamples: []seriesSamples{ | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					lset:   map[string]string{"a": "b"}, | 
					
						
							|  |  |  |  | 					chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}, {{t: 21}, {t: 30}}}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					lset:   map[string]string{"a": "c"}, | 
					
						
							|  |  |  |  | 					chunks: [][]sample{{{t: -11}, {t: -9}}, {{t: 10}, {t: 19}}, {{t: 40}, {t: 45}}}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2018-10-12 17:45:19 +08:00
										 |  |  |  | 		{ | 
					
						
							|  |  |  |  | 			title: "Populate from two blocks showing that order is maintained.", | 
					
						
							|  |  |  |  | 			inputSeriesSamples: [][]seriesSamples{ | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "b"}, | 
					
						
							| 
									
										
										
										
											2019-02-14 21:29:41 +08:00
										 |  |  |  | 						chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}}, | 
					
						
							| 
									
										
										
										
											2018-10-12 17:45:19 +08:00
										 |  |  |  | 					}, | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "c"}, | 
					
						
							| 
									
										
										
										
											2019-02-14 21:29:41 +08:00
										 |  |  |  | 						chunks: [][]sample{{{t: 1}, {t: 9}}, {{t: 10}, {t: 19}}}, | 
					
						
							| 
									
										
										
										
											2018-10-12 17:45:19 +08:00
										 |  |  |  | 					}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "b"}, | 
					
						
							| 
									
										
										
										
											2019-02-14 21:29:41 +08:00
										 |  |  |  | 						chunks: [][]sample{{{t: 21}, {t: 30}}}, | 
					
						
							| 
									
										
										
										
											2018-10-12 17:45:19 +08:00
										 |  |  |  | 					}, | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "c"}, | 
					
						
							| 
									
										
										
										
											2019-02-14 21:29:41 +08:00
										 |  |  |  | 						chunks: [][]sample{{{t: 40}, {t: 45}}}, | 
					
						
							| 
									
										
										
										
											2018-10-12 17:45:19 +08:00
										 |  |  |  | 					}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 			expSeriesSamples: []seriesSamples{ | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					lset:   map[string]string{"a": "b"}, | 
					
						
							| 
									
										
										
										
											2019-02-14 21:29:41 +08:00
										 |  |  |  | 					chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}, {{t: 21}, {t: 30}}}, | 
					
						
							| 
									
										
										
										
											2018-10-12 17:45:19 +08:00
										 |  |  |  | 				}, | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					lset:   map[string]string{"a": "c"}, | 
					
						
							| 
									
										
										
										
											2019-02-14 21:29:41 +08:00
										 |  |  |  | 					chunks: [][]sample{{{t: 1}, {t: 9}}, {{t: 10}, {t: 19}}, {{t: 40}, {t: 45}}}, | 
					
						
							| 
									
										
										
										
											2018-10-12 17:45:19 +08:00
										 |  |  |  | 				}, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							|  |  |  |  | 		{ | 
					
						
							| 
									
										
										
										
											2019-02-14 21:29:41 +08:00
										 |  |  |  | 			title: "Populate from two blocks showing that order of series is sorted.", | 
					
						
							| 
									
										
										
										
											2018-10-12 17:45:19 +08:00
										 |  |  |  | 			inputSeriesSamples: [][]seriesSamples{ | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "4"}, | 
					
						
							|  |  |  |  | 						chunks: [][]sample{{{t: 5}, {t: 7}}}, | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "3"}, | 
					
						
							|  |  |  |  | 						chunks: [][]sample{{{t: 5}, {t: 6}}}, | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "same"}, | 
					
						
							|  |  |  |  | 						chunks: [][]sample{{{t: 1}, {t: 4}}}, | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "2"}, | 
					
						
							|  |  |  |  | 						chunks: [][]sample{{{t: 1}, {t: 3}}}, | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "1"}, | 
					
						
							|  |  |  |  | 						chunks: [][]sample{{{t: 1}, {t: 2}}}, | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "same"}, | 
					
						
							|  |  |  |  | 						chunks: [][]sample{{{t: 5}, {t: 8}}}, | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 			expSeriesSamples: []seriesSamples{ | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					lset:   map[string]string{"a": "1"}, | 
					
						
							|  |  |  |  | 					chunks: [][]sample{{{t: 1}, {t: 2}}}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					lset:   map[string]string{"a": "2"}, | 
					
						
							|  |  |  |  | 					chunks: [][]sample{{{t: 1}, {t: 3}}}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					lset:   map[string]string{"a": "3"}, | 
					
						
							|  |  |  |  | 					chunks: [][]sample{{{t: 5}, {t: 6}}}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					lset:   map[string]string{"a": "4"}, | 
					
						
							|  |  |  |  | 					chunks: [][]sample{{{t: 5}, {t: 7}}}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					lset:   map[string]string{"a": "same"}, | 
					
						
							|  |  |  |  | 					chunks: [][]sample{{{t: 1}, {t: 4}}, {{t: 5}, {t: 8}}}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2020-07-31 23:03:02 +08:00
										 |  |  |  | 		{ | 
					
						
							|  |  |  |  | 			title: "Populate from two blocks 1:1 duplicated chunks; with negative timestamps.", | 
					
						
							|  |  |  |  | 			inputSeriesSamples: [][]seriesSamples{ | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "1"}, | 
					
						
							|  |  |  |  | 						chunks: [][]sample{{{t: 1}, {t: 2}}, {{t: 3}, {t: 4}}}, | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "2"}, | 
					
						
							|  |  |  |  | 						chunks: [][]sample{{{t: -3}, {t: -2}}, {{t: 1}, {t: 3}, {t: 4}}, {{t: 5}, {t: 6}}}, | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "1"}, | 
					
						
							|  |  |  |  | 						chunks: [][]sample{{{t: 3}, {t: 4}}}, | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "2"}, | 
					
						
							|  |  |  |  | 						chunks: [][]sample{{{t: 1}, {t: 3}, {t: 4}}, {{t: 7}, {t: 8}}}, | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 			compactMinTime: -3, | 
					
						
							|  |  |  |  | 			expSeriesSamples: []seriesSamples{ | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					lset:   map[string]string{"a": "1"}, | 
					
						
							|  |  |  |  | 					chunks: [][]sample{{{t: 1}, {t: 2}}, {{t: 3}, {t: 4}}}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					lset:   map[string]string{"a": "2"}, | 
					
						
							|  |  |  |  | 					chunks: [][]sample{{{t: -3}, {t: -2}}, {{t: 1}, {t: 3}, {t: 4}}, {{t: 5}, {t: 6}}, {{t: 7}, {t: 8}}}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2018-10-12 17:45:19 +08:00
										 |  |  |  | 		{ | 
					
						
							|  |  |  |  | 			// This should not happened because head block is making sure the chunks are not crossing block boundaries.
 | 
					
						
							| 
									
										
										
										
											2020-07-31 23:03:02 +08:00
										 |  |  |  | 			// We used to return error, but now chunk is trimmed.
 | 
					
						
							| 
									
										
										
										
											2018-10-12 17:45:19 +08:00
										 |  |  |  | 			title: "Populate from single block containing chunk outside of compact meta time range.", | 
					
						
							|  |  |  |  | 			inputSeriesSamples: [][]seriesSamples{ | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "b"}, | 
					
						
							|  |  |  |  | 						chunks: [][]sample{{{t: 1}, {t: 2}}, {{t: 10}, {t: 30}}}, | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 			compactMinTime: 0, | 
					
						
							|  |  |  |  | 			compactMaxTime: 20, | 
					
						
							| 
									
										
										
										
											2020-07-31 23:03:02 +08:00
										 |  |  |  | 			expSeriesSamples: []seriesSamples{ | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					lset:   map[string]string{"a": "b"}, | 
					
						
							|  |  |  |  | 					chunks: [][]sample{{{t: 1}, {t: 2}}, {{t: 10}}}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							| 
									
										
										
										
											2018-10-12 17:45:19 +08:00
										 |  |  |  | 		}, | 
					
						
							|  |  |  |  | 		{ | 
					
						
							| 
									
										
										
										
											2020-07-31 23:03:02 +08:00
										 |  |  |  | 			// Introduced by https://github.com/prometheus/tsdb/issues/347. We used to return error, but now chunk is trimmed.
 | 
					
						
							| 
									
										
										
										
											2018-10-12 17:45:19 +08:00
										 |  |  |  | 			title: "Populate from single block containing extra chunk", | 
					
						
							|  |  |  |  | 			inputSeriesSamples: [][]seriesSamples{ | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "issue347"}, | 
					
						
							|  |  |  |  | 						chunks: [][]sample{{{t: 1}, {t: 2}}, {{t: 10}, {t: 20}}}, | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 			compactMinTime: 0, | 
					
						
							|  |  |  |  | 			compactMaxTime: 10, | 
					
						
							| 
									
										
										
										
											2020-07-31 23:03:02 +08:00
										 |  |  |  | 			expSeriesSamples: []seriesSamples{ | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					lset:   map[string]string{"a": "issue347"}, | 
					
						
							|  |  |  |  | 					chunks: [][]sample{{{t: 1}, {t: 2}}}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							| 
									
										
										
										
											2018-10-12 17:45:19 +08:00
										 |  |  |  | 		}, | 
					
						
							|  |  |  |  | 		{ | 
					
						
							| 
									
										
										
										
											2019-03-05 21:48:55 +08:00
										 |  |  |  | 			// Deduplication expected.
 | 
					
						
							|  |  |  |  | 			// Introduced by pull/370 and pull/539.
 | 
					
						
							| 
									
										
										
										
											2018-10-12 17:45:19 +08:00
										 |  |  |  | 			title: "Populate from two blocks containing duplicated chunk.", | 
					
						
							|  |  |  |  | 			inputSeriesSamples: [][]seriesSamples{ | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "b"}, | 
					
						
							|  |  |  |  | 						chunks: [][]sample{{{t: 1}, {t: 2}}, {{t: 10}, {t: 20}}}, | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "b"}, | 
					
						
							|  |  |  |  | 						chunks: [][]sample{{{t: 10}, {t: 20}}}, | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 			expSeriesSamples: []seriesSamples{ | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					lset:   map[string]string{"a": "b"}, | 
					
						
							| 
									
										
										
										
											2019-03-05 21:48:55 +08:00
										 |  |  |  | 					chunks: [][]sample{{{t: 1}, {t: 2}}, {{t: 10}, {t: 20}}}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							|  |  |  |  | 		{ | 
					
						
							| 
									
										
										
										
											2020-03-26 03:26:10 +08:00
										 |  |  |  | 			// Introduced by https://github.com/prometheus/tsdb/pull/539.
 | 
					
						
							| 
									
										
										
										
											2020-07-31 23:03:02 +08:00
										 |  |  |  | 			title: "Populate from three overlapping blocks.", | 
					
						
							| 
									
										
										
										
											2019-03-05 21:48:55 +08:00
										 |  |  |  | 			inputSeriesSamples: [][]seriesSamples{ | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					{ | 
					
						
							| 
									
										
										
										
											2020-07-31 23:03:02 +08:00
										 |  |  |  | 						lset:   map[string]string{"a": "overlap-all"}, | 
					
						
							|  |  |  |  | 						chunks: [][]sample{{{t: 19}, {t: 30}}}, | 
					
						
							| 
									
										
										
										
											2019-03-05 21:48:55 +08:00
										 |  |  |  | 					}, | 
					
						
							|  |  |  |  | 					{ | 
					
						
							| 
									
										
										
										
											2020-07-31 23:03:02 +08:00
										 |  |  |  | 						lset:   map[string]string{"a": "overlap-beginning"}, | 
					
						
							|  |  |  |  | 						chunks: [][]sample{{{t: 0}, {t: 5}}}, | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "overlap-ending"}, | 
					
						
							|  |  |  |  | 						chunks: [][]sample{{{t: 21}, {t: 30}}}, | 
					
						
							| 
									
										
										
										
											2019-03-05 21:48:55 +08:00
										 |  |  |  | 					}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					{ | 
					
						
							| 
									
										
										
										
											2020-07-31 23:03:02 +08:00
										 |  |  |  | 						lset:   map[string]string{"a": "overlap-all"}, | 
					
						
							|  |  |  |  | 						chunks: [][]sample{{{t: 0}, {t: 10}, {t: 11}, {t: 20}}}, | 
					
						
							| 
									
										
										
										
											2019-03-05 21:48:55 +08:00
										 |  |  |  | 					}, | 
					
						
							|  |  |  |  | 					{ | 
					
						
							| 
									
										
										
										
											2020-07-31 23:03:02 +08:00
										 |  |  |  | 						lset:   map[string]string{"a": "overlap-beginning"}, | 
					
						
							|  |  |  |  | 						chunks: [][]sample{{{t: 0}, {t: 10}, {t: 12}, {t: 20}}}, | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "overlap-ending"}, | 
					
						
							|  |  |  |  | 						chunks: [][]sample{{{t: 0}, {t: 10}, {t: 13}, {t: 20}}}, | 
					
						
							| 
									
										
										
										
											2019-03-05 21:48:55 +08:00
										 |  |  |  | 					}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					{ | 
					
						
							| 
									
										
										
										
											2020-07-31 23:03:02 +08:00
										 |  |  |  | 						lset:   map[string]string{"a": "overlap-all"}, | 
					
						
							| 
									
										
										
										
											2019-03-05 21:48:55 +08:00
										 |  |  |  | 						chunks: [][]sample{{{t: 27}, {t: 35}}}, | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 					{ | 
					
						
							| 
									
										
										
										
											2020-07-31 23:03:02 +08:00
										 |  |  |  | 						lset:   map[string]string{"a": "overlap-ending"}, | 
					
						
							| 
									
										
										
										
											2019-03-05 21:48:55 +08:00
										 |  |  |  | 						chunks: [][]sample{{{t: 27}, {t: 35}}}, | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 			expSeriesSamples: []seriesSamples{ | 
					
						
							|  |  |  |  | 				{ | 
					
						
							| 
									
										
										
										
											2020-07-31 23:03:02 +08:00
										 |  |  |  | 					lset:   map[string]string{"a": "overlap-all"}, | 
					
						
							|  |  |  |  | 					chunks: [][]sample{{{t: 0}, {t: 10}, {t: 11}, {t: 19}, {t: 20}, {t: 27}, {t: 30}, {t: 35}}}, | 
					
						
							| 
									
										
										
										
											2019-03-05 21:48:55 +08:00
										 |  |  |  | 				}, | 
					
						
							|  |  |  |  | 				{ | 
					
						
							| 
									
										
										
										
											2020-07-31 23:03:02 +08:00
										 |  |  |  | 					lset:   map[string]string{"a": "overlap-beginning"}, | 
					
						
							|  |  |  |  | 					chunks: [][]sample{{{t: 0}, {t: 5}, {t: 10}, {t: 12}, {t: 20}}}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					lset:   map[string]string{"a": "overlap-ending"}, | 
					
						
							|  |  |  |  | 					chunks: [][]sample{{{t: 0}, {t: 10}, {t: 13}, {t: 20}}, {{t: 21}, {t: 27}, {t: 30}, {t: 35}}}, | 
					
						
							| 
									
										
										
										
											2018-10-12 17:45:19 +08:00
										 |  |  |  | 				}, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2020-07-31 23:03:02 +08:00
										 |  |  |  | 		{ | 
					
						
							|  |  |  |  | 			title: "Populate from three partially overlapping blocks with few full chunks.", | 
					
						
							|  |  |  |  | 			inputSeriesSamples: [][]seriesSamples{ | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "1", "b": "1"}, | 
					
						
							|  |  |  |  | 						chunks: samplesForRange(0, 659, 120), // 5 chunks and half.
 | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "1", "b": "2"}, | 
					
						
							|  |  |  |  | 						chunks: samplesForRange(0, 659, 120), | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "1", "b": "2"}, | 
					
						
							|  |  |  |  | 						chunks: samplesForRange(480, 1199, 120), // two chunks overlapping with previous, two non overlapping and two overlapping with next block.
 | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "1", "b": "3"}, | 
					
						
							|  |  |  |  | 						chunks: samplesForRange(480, 1199, 120), | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "1", "b": "2"}, | 
					
						
							|  |  |  |  | 						chunks: samplesForRange(960, 1499, 120), // 5 chunks and half.
 | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "1", "b": "4"}, | 
					
						
							|  |  |  |  | 						chunks: samplesForRange(960, 1499, 120), | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 			expSeriesSamples: []seriesSamples{ | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					lset:   map[string]string{"a": "1", "b": "1"}, | 
					
						
							|  |  |  |  | 					chunks: samplesForRange(0, 659, 120), | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					lset:   map[string]string{"a": "1", "b": "2"}, | 
					
						
							|  |  |  |  | 					chunks: samplesForRange(0, 1499, 120), | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					lset:   map[string]string{"a": "1", "b": "3"}, | 
					
						
							|  |  |  |  | 					chunks: samplesForRange(480, 1199, 120), | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					lset:   map[string]string{"a": "1", "b": "4"}, | 
					
						
							|  |  |  |  | 					chunks: samplesForRange(960, 1499, 120), | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							|  |  |  |  | 		{ | 
					
						
							|  |  |  |  | 			title: "Populate from three partially overlapping blocks with chunks that are expected to merge into single big chunks.", | 
					
						
							|  |  |  |  | 			inputSeriesSamples: [][]seriesSamples{ | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "1", "b": "2"}, | 
					
						
							|  |  |  |  | 						chunks: [][]sample{{{t: 0}, {t: 6902464}}, {{t: 6961968}, {t: 7080976}}}, | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "1", "b": "2"}, | 
					
						
							|  |  |  |  | 						chunks: [][]sample{{{t: 3600000}, {t: 13953696}}, {{t: 14042952}, {t: 14221464}}}, | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "1", "b": "2"}, | 
					
						
							|  |  |  |  | 						chunks: [][]sample{{{t: 10800000}, {t: 14251232}}, {{t: 14280984}, {t: 14340488}}}, | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 			expSeriesSamples: []seriesSamples{ | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					lset:   map[string]string{"a": "1", "b": "2"}, | 
					
						
							|  |  |  |  | 					chunks: [][]sample{{{t: 0}, {t: 3600000}, {t: 6902464}, {t: 6961968}, {t: 7080976}, {t: 10800000}, {t: 13953696}, {t: 14042952}, {t: 14221464}, {t: 14251232}}, {{t: 14280984}, {t: 14340488}}}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2023-11-01 22:52:04 +08:00
										 |  |  |  | 		{ | 
					
						
							| 
									
										
										
										
											2023-11-02 13:37:07 +08:00
										 |  |  |  | 			// Regression test for populateWithDelChunkSeriesIterator failing to set minTime on chunks.
 | 
					
						
							|  |  |  |  | 			title:          "Populate from mixed type series and expect sample inside the interval only.", | 
					
						
							| 
									
										
										
										
											2023-11-01 22:52:04 +08:00
										 |  |  |  | 			compactMinTime: 1, | 
					
						
							|  |  |  |  | 			compactMaxTime: 11, | 
					
						
							|  |  |  |  | 			inputSeriesSamples: [][]seriesSamples{ | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset: map[string]string{"a": "1"}, | 
					
						
							|  |  |  |  | 						chunks: [][]sample{ | 
					
						
							|  |  |  |  | 							{{t: 0, h: tsdbutil.GenerateTestHistogram(0)}, {t: 1, h: tsdbutil.GenerateTestHistogram(1)}}, | 
					
						
							|  |  |  |  | 							{{t: 10, f: 1}, {t: 11, f: 2}}, | 
					
						
							|  |  |  |  | 						}, | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 			expSeriesSamples: []seriesSamples{ | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					lset: map[string]string{"a": "1"}, | 
					
						
							|  |  |  |  | 					chunks: [][]sample{ | 
					
						
							|  |  |  |  | 						{{t: 1, h: tsdbutil.GenerateTestHistogram(1)}}, | 
					
						
							|  |  |  |  | 						{{t: 10, f: 1}}, | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2024-06-25 16:21:48 +08:00
										 |  |  |  | 		{ | 
					
						
							|  |  |  |  | 			title: "Populate from single block with index reader postings function selecting different series. Expect empty block.", | 
					
						
							|  |  |  |  | 			inputSeriesSamples: [][]seriesSamples{ | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "b"}, | 
					
						
							|  |  |  |  | 						chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}}, | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 			irPostingsFunc: func(ctx context.Context, reader IndexReader) index.Postings { | 
					
						
							|  |  |  |  | 				p, err := reader.Postings(ctx, "a", "c") | 
					
						
							|  |  |  |  | 				if err != nil { | 
					
						
							|  |  |  |  | 					return index.EmptyPostings() | 
					
						
							|  |  |  |  | 				} | 
					
						
							|  |  |  |  | 				return reader.SortedPostings(p) | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							|  |  |  |  | 		{ | 
					
						
							|  |  |  |  | 			title: "Populate from single block with index reader postings function selecting one series. Expect partial block.", | 
					
						
							|  |  |  |  | 			inputSeriesSamples: [][]seriesSamples{ | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "b"}, | 
					
						
							|  |  |  |  | 						chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}}, | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "c"}, | 
					
						
							|  |  |  |  | 						chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}}, | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 					{ | 
					
						
							|  |  |  |  | 						lset:   map[string]string{"a": "d"}, | 
					
						
							|  |  |  |  | 						chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}}, | 
					
						
							|  |  |  |  | 					}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 			irPostingsFunc: func(ctx context.Context, reader IndexReader) index.Postings { | 
					
						
							|  |  |  |  | 				p, err := reader.Postings(ctx, "a", "c", "d") | 
					
						
							|  |  |  |  | 				if err != nil { | 
					
						
							|  |  |  |  | 					return index.EmptyPostings() | 
					
						
							|  |  |  |  | 				} | 
					
						
							|  |  |  |  | 				return reader.SortedPostings(p) | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 			expSeriesSamples: []seriesSamples{ | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					lset:   map[string]string{"a": "c"}, | 
					
						
							|  |  |  |  | 					chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 				{ | 
					
						
							|  |  |  |  | 					lset:   map[string]string{"a": "d"}, | 
					
						
							|  |  |  |  | 					chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2020-07-31 23:03:02 +08:00
										 |  |  |  | 	} { | 
					
						
							|  |  |  |  | 		t.Run(tc.title, func(t *testing.T) { | 
					
						
							| 
									
										
										
										
											2018-10-12 17:45:19 +08:00
										 |  |  |  | 			blocks := make([]BlockReader, 0, len(tc.inputSeriesSamples)) | 
					
						
							|  |  |  |  | 			for _, b := range tc.inputSeriesSamples { | 
					
						
							| 
									
										
										
										
											2019-04-25 18:07:04 +08:00
										 |  |  |  | 				ir, cr, mint, maxt := createIdxChkReaders(t, b) | 
					
						
							| 
									
										
										
										
											2019-02-14 21:29:41 +08:00
										 |  |  |  | 				blocks = append(blocks, &mockBReader{ir: ir, cr: cr, mint: mint, maxt: maxt}) | 
					
						
							| 
									
										
										
										
											2018-10-12 17:45:19 +08:00
										 |  |  |  | 			} | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-05-19 00:38:37 +08:00
										 |  |  |  | 			c, err := NewLeveledCompactor(context.Background(), nil, nil, []int64{0}, nil, nil) | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 			require.NoError(t, err) | 
					
						
							| 
									
										
										
										
											2018-10-12 17:45:19 +08:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  | 			meta := &BlockMeta{ | 
					
						
							|  |  |  |  | 				MinTime: tc.compactMinTime, | 
					
						
							|  |  |  |  | 				MaxTime: tc.compactMaxTime, | 
					
						
							|  |  |  |  | 			} | 
					
						
							|  |  |  |  | 			if meta.MaxTime == 0 { | 
					
						
							|  |  |  |  | 				meta.MaxTime = math.MaxInt64 | 
					
						
							|  |  |  |  | 			} | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 			iw := &mockIndexWriter{} | 
					
						
							| 
									
										
										
										
											2023-04-13 05:18:20 +08:00
										 |  |  |  | 			blockPopulator := DefaultBlockPopulator{} | 
					
						
							| 
									
										
										
										
											2024-06-25 16:21:48 +08:00
										 |  |  |  | 			irPostingsFunc := AllSortedPostings | 
					
						
							|  |  |  |  | 			if tc.irPostingsFunc != nil { | 
					
						
							|  |  |  |  | 				irPostingsFunc = tc.irPostingsFunc | 
					
						
							|  |  |  |  | 			} | 
					
						
							|  |  |  |  | 			err = blockPopulator.PopulateBlock(c.ctx, c.metrics, c.logger, c.chunkPool, c.mergeFunc, blocks, meta, iw, nopChunkWriter{}, irPostingsFunc) | 
					
						
							| 
									
										
										
										
											2018-10-12 17:45:19 +08:00
										 |  |  |  | 			if tc.expErr != nil { | 
					
						
							| 
									
										
										
										
											2024-10-07 00:35:29 +08:00
										 |  |  |  | 				require.EqualError(t, err, tc.expErr.Error()) | 
					
						
							| 
									
										
										
										
											2018-10-12 17:45:19 +08:00
										 |  |  |  | 				return | 
					
						
							|  |  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 			require.NoError(t, err) | 
					
						
							| 
									
										
										
										
											2018-10-12 17:45:19 +08:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-07-31 23:03:02 +08:00
										 |  |  |  | 			// Check if response is expected and chunk is valid.
 | 
					
						
							|  |  |  |  | 			var raw []seriesSamples | 
					
						
							|  |  |  |  | 			for _, s := range iw.seriesChunks { | 
					
						
							|  |  |  |  | 				ss := seriesSamples{lset: s.l.Map()} | 
					
						
							|  |  |  |  | 				var iter chunkenc.Iterator | 
					
						
							|  |  |  |  | 				for _, chk := range s.chunks { | 
					
						
							|  |  |  |  | 					var ( | 
					
						
							|  |  |  |  | 						samples       = make([]sample, 0, chk.Chunk.NumSamples()) | 
					
						
							|  |  |  |  | 						iter          = chk.Chunk.Iterator(iter) | 
					
						
							|  |  |  |  | 						firstTs int64 = math.MaxInt64 | 
					
						
							|  |  |  |  | 						s       sample | 
					
						
							|  |  |  |  | 					) | 
					
						
							| 
									
										
										
										
											2023-11-01 22:52:04 +08:00
										 |  |  |  | 					for vt := iter.Next(); vt != chunkenc.ValNone; vt = iter.Next() { | 
					
						
							|  |  |  |  | 						switch vt { | 
					
						
							|  |  |  |  | 						case chunkenc.ValFloat: | 
					
						
							|  |  |  |  | 							s.t, s.f = iter.At() | 
					
						
							|  |  |  |  | 							samples = append(samples, s) | 
					
						
							|  |  |  |  | 						case chunkenc.ValHistogram: | 
					
						
							| 
									
										
										
										
											2024-01-24 00:02:14 +08:00
										 |  |  |  | 							s.t, s.h = iter.AtHistogram(nil) | 
					
						
							| 
									
										
										
										
											2023-11-01 22:52:04 +08:00
										 |  |  |  | 							samples = append(samples, s) | 
					
						
							|  |  |  |  | 						case chunkenc.ValFloatHistogram: | 
					
						
							| 
									
										
										
										
											2024-01-24 00:02:14 +08:00
										 |  |  |  | 							s.t, s.fh = iter.AtFloatHistogram(nil) | 
					
						
							| 
									
										
										
										
											2023-11-01 22:52:04 +08:00
										 |  |  |  | 							samples = append(samples, s) | 
					
						
							|  |  |  |  | 						default: | 
					
						
							|  |  |  |  | 							require.Fail(t, "unexpected value type") | 
					
						
							|  |  |  |  | 						} | 
					
						
							| 
									
										
										
										
											2020-07-31 23:03:02 +08:00
										 |  |  |  | 						if firstTs == math.MaxInt64 { | 
					
						
							|  |  |  |  | 							firstTs = s.t | 
					
						
							|  |  |  |  | 						} | 
					
						
							|  |  |  |  | 					} | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 					// Check if chunk has correct min, max times.
 | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 					require.Equal(t, firstTs, chk.MinTime, "chunk Meta %v does not match the first encoded sample timestamp: %v", chk, firstTs) | 
					
						
							|  |  |  |  | 					require.Equal(t, s.t, chk.MaxTime, "chunk Meta %v does not match the last encoded sample timestamp %v", chk, s.t) | 
					
						
							| 
									
										
										
										
											2020-07-31 23:03:02 +08:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 					require.NoError(t, iter.Err()) | 
					
						
							| 
									
										
										
										
											2020-07-31 23:03:02 +08:00
										 |  |  |  | 					ss.chunks = append(ss.chunks, samples) | 
					
						
							|  |  |  |  | 				} | 
					
						
							|  |  |  |  | 				raw = append(raw, ss) | 
					
						
							|  |  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 			require.Equal(t, tc.expSeriesSamples, raw) | 
					
						
							| 
									
										
										
										
											2018-10-12 17:45:19 +08:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  | 			// Check if stats are calculated properly.
 | 
					
						
							| 
									
										
										
										
											2020-07-31 23:03:02 +08:00
										 |  |  |  | 			s := BlockStats{NumSeries: uint64(len(tc.expSeriesSamples))} | 
					
						
							| 
									
										
										
										
											2018-10-12 17:45:19 +08:00
										 |  |  |  | 			for _, series := range tc.expSeriesSamples { | 
					
						
							|  |  |  |  | 				s.NumChunks += uint64(len(series.chunks)) | 
					
						
							|  |  |  |  | 				for _, chk := range series.chunks { | 
					
						
							|  |  |  |  | 					s.NumSamples += uint64(len(chk)) | 
					
						
							|  |  |  |  | 				} | 
					
						
							|  |  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 			require.Equal(t, s, meta.Stats) | 
					
						
							| 
									
										
										
										
											2020-07-31 23:03:02 +08:00
										 |  |  |  | 		}) | 
					
						
							| 
									
										
										
										
											2018-10-12 17:45:19 +08:00
										 |  |  |  | 	} | 
					
						
							|  |  |  |  | } | 
					
						
							| 
									
										
										
										
											2018-11-20 18:34:26 +08:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-02-14 21:29:41 +08:00
										 |  |  |  | func BenchmarkCompaction(b *testing.B) { | 
					
						
							|  |  |  |  | 	cases := []struct { | 
					
						
							|  |  |  |  | 		ranges         [][2]int64 | 
					
						
							|  |  |  |  | 		compactionType string | 
					
						
							|  |  |  |  | 	}{ | 
					
						
							|  |  |  |  | 		{ | 
					
						
							|  |  |  |  | 			ranges:         [][2]int64{{0, 100}, {200, 300}, {400, 500}, {600, 700}}, | 
					
						
							|  |  |  |  | 			compactionType: "normal", | 
					
						
							|  |  |  |  | 		}, | 
					
						
							|  |  |  |  | 		{ | 
					
						
							|  |  |  |  | 			ranges:         [][2]int64{{0, 1000}, {2000, 3000}, {4000, 5000}, {6000, 7000}}, | 
					
						
							|  |  |  |  | 			compactionType: "normal", | 
					
						
							|  |  |  |  | 		}, | 
					
						
							|  |  |  |  | 		{ | 
					
						
							| 
									
										
										
										
											2019-06-14 20:30:49 +08:00
										 |  |  |  | 			ranges:         [][2]int64{{0, 2000}, {3000, 5000}, {6000, 8000}, {9000, 11000}}, | 
					
						
							| 
									
										
										
										
											2019-02-14 21:29:41 +08:00
										 |  |  |  | 			compactionType: "normal", | 
					
						
							|  |  |  |  | 		}, | 
					
						
							|  |  |  |  | 		{ | 
					
						
							| 
									
										
										
										
											2019-06-14 20:30:49 +08:00
										 |  |  |  | 			ranges:         [][2]int64{{0, 5000}, {6000, 11000}, {12000, 17000}, {18000, 23000}}, | 
					
						
							| 
									
										
										
										
											2019-02-14 21:29:41 +08:00
										 |  |  |  | 			compactionType: "normal", | 
					
						
							|  |  |  |  | 		}, | 
					
						
							|  |  |  |  | 		// 40% overlaps.
 | 
					
						
							|  |  |  |  | 		{ | 
					
						
							|  |  |  |  | 			ranges:         [][2]int64{{0, 100}, {60, 160}, {120, 220}, {180, 280}}, | 
					
						
							|  |  |  |  | 			compactionType: "vertical", | 
					
						
							|  |  |  |  | 		}, | 
					
						
							|  |  |  |  | 		{ | 
					
						
							|  |  |  |  | 			ranges:         [][2]int64{{0, 1000}, {600, 1600}, {1200, 2200}, {1800, 2800}}, | 
					
						
							|  |  |  |  | 			compactionType: "vertical", | 
					
						
							|  |  |  |  | 		}, | 
					
						
							|  |  |  |  | 		{ | 
					
						
							| 
									
										
										
										
											2019-06-14 20:30:49 +08:00
										 |  |  |  | 			ranges:         [][2]int64{{0, 2000}, {1200, 3200}, {2400, 4400}, {3600, 5600}}, | 
					
						
							| 
									
										
										
										
											2019-02-14 21:29:41 +08:00
										 |  |  |  | 			compactionType: "vertical", | 
					
						
							|  |  |  |  | 		}, | 
					
						
							|  |  |  |  | 		{ | 
					
						
							| 
									
										
										
										
											2019-06-14 20:30:49 +08:00
										 |  |  |  | 			ranges:         [][2]int64{{0, 5000}, {3000, 8000}, {6000, 11000}, {9000, 14000}}, | 
					
						
							| 
									
										
										
										
											2019-02-14 21:29:41 +08:00
										 |  |  |  | 			compactionType: "vertical", | 
					
						
							|  |  |  |  | 		}, | 
					
						
							|  |  |  |  | 	} | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 	nSeries := 10000 | 
					
						
							|  |  |  |  | 	for _, c := range cases { | 
					
						
							|  |  |  |  | 		nBlocks := len(c.ranges) | 
					
						
							|  |  |  |  | 		b.Run(fmt.Sprintf("type=%s,blocks=%d,series=%d,samplesPerSeriesPerBlock=%d", c.compactionType, nBlocks, nSeries, c.ranges[0][1]-c.ranges[0][0]+1), func(b *testing.B) { | 
					
						
							| 
									
										
										
										
											2022-01-22 17:55:01 +08:00
										 |  |  |  | 			dir := b.TempDir() | 
					
						
							| 
									
										
										
										
											2019-02-14 21:29:41 +08:00
										 |  |  |  | 			blockDirs := make([]string, 0, len(c.ranges)) | 
					
						
							|  |  |  |  | 			var blocks []*Block | 
					
						
							|  |  |  |  | 			for _, r := range c.ranges { | 
					
						
							| 
									
										
										
										
											2024-11-11 14:59:24 +08:00
										 |  |  |  | 				block, err := OpenBlock(nil, createBlock(b, dir, genSeries(nSeries, 10, r[0], r[1])), nil, nil) | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 				require.NoError(b, err) | 
					
						
							| 
									
										
										
										
											2019-02-14 21:29:41 +08:00
										 |  |  |  | 				blocks = append(blocks, block) | 
					
						
							|  |  |  |  | 				defer func() { | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 					require.NoError(b, block.Close()) | 
					
						
							| 
									
										
										
										
											2019-02-14 21:29:41 +08:00
										 |  |  |  | 				}() | 
					
						
							|  |  |  |  | 				blockDirs = append(blockDirs, block.Dir()) | 
					
						
							|  |  |  |  | 			} | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2024-09-10 09:41:53 +08:00
										 |  |  |  | 			c, err := NewLeveledCompactor(context.Background(), nil, promslog.NewNopLogger(), []int64{0}, nil, nil) | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 			require.NoError(b, err) | 
					
						
							| 
									
										
										
										
											2019-02-14 21:29:41 +08:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  | 			b.ResetTimer() | 
					
						
							|  |  |  |  | 			b.ReportAllocs() | 
					
						
							| 
									
										
										
										
											2019-12-11 20:49:13 +08:00
										 |  |  |  | 			for i := 0; i < b.N; i++ { | 
					
						
							|  |  |  |  | 				_, err = c.Compact(dir, blockDirs, blocks) | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 				require.NoError(b, err) | 
					
						
							| 
									
										
										
										
											2019-12-11 20:49:13 +08:00
										 |  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2019-02-14 21:29:41 +08:00
										 |  |  |  | 		}) | 
					
						
							|  |  |  |  | 	} | 
					
						
							|  |  |  |  | } | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
											  
											
												Coalesce series reads where we can.
When compacting rather than doing a read of all
series in the index per label name, do many at once
but only when it won't use (much) more ram than writing the
special all index does.
original in-memory postings:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4                  1        1202383447 ns/op        158936496 B/op   1031511 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4                  1        1141792706 ns/op        154453408 B/op   1093453 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4                  1        1169288829 ns/op        161072336 B/op   1110021 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4                  1        1115700103 ns/op        149480472 B/op   1129180 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4                  1        1283813141 ns/op        162937800 B/op   1202771 allocs/op
before:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4                  1        1145195941 ns/op        131749984 B/op    834400 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4                  1        1233526345 ns/op        127889416 B/op    897033 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4                  1        1821942296 ns/op        131665648 B/op    914836 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4                  1        8035568665 ns/op        123811832 B/op    934312 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4                  1       71325926267 ns/op        140722648 B/op   1016824 allocs/op
after:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4                  1        1101429174 ns/op        129063496 B/op    832571 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4                  1        1074466374 ns/op        124154888 B/op    894875 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4                  1        1166510282 ns/op        128790648 B/op    912931 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4                  1        1075013071 ns/op        120570696 B/op    933511 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4                  1        1231673790 ns/op        138754288 B/op   1022791 allocs/op
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
											
										 
											2019-12-12 05:24:03 +08:00
										 |  |  |  | func BenchmarkCompactionFromHead(b *testing.B) { | 
					
						
							| 
									
										
										
										
											2022-01-22 17:55:01 +08:00
										 |  |  |  | 	dir := b.TempDir() | 
					
						
							| 
									
										
											  
											
												Coalesce series reads where we can.
When compacting rather than doing a read of all
series in the index per label name, do many at once
but only when it won't use (much) more ram than writing the
special all index does.
original in-memory postings:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4                  1        1202383447 ns/op        158936496 B/op   1031511 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4                  1        1141792706 ns/op        154453408 B/op   1093453 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4                  1        1169288829 ns/op        161072336 B/op   1110021 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4                  1        1115700103 ns/op        149480472 B/op   1129180 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4                  1        1283813141 ns/op        162937800 B/op   1202771 allocs/op
before:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4                  1        1145195941 ns/op        131749984 B/op    834400 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4                  1        1233526345 ns/op        127889416 B/op    897033 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4                  1        1821942296 ns/op        131665648 B/op    914836 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4                  1        8035568665 ns/op        123811832 B/op    934312 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4                  1       71325926267 ns/op        140722648 B/op   1016824 allocs/op
after:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4                  1        1101429174 ns/op        129063496 B/op    832571 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4                  1        1074466374 ns/op        124154888 B/op    894875 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4                  1        1166510282 ns/op        128790648 B/op    912931 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4                  1        1075013071 ns/op        120570696 B/op    933511 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4                  1        1231673790 ns/op        138754288 B/op   1022791 allocs/op
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
											
										 
											2019-12-12 05:24:03 +08:00
										 |  |  |  | 	totalSeries := 100000 | 
					
						
							|  |  |  |  | 	for labelNames := 1; labelNames < totalSeries; labelNames *= 10 { | 
					
						
							|  |  |  |  | 		labelValues := totalSeries / labelNames | 
					
						
							|  |  |  |  | 		b.Run(fmt.Sprintf("labelnames=%d,labelvalues=%d", labelNames, labelValues), func(b *testing.B) { | 
					
						
							| 
									
										
										
										
											2022-01-22 17:55:01 +08:00
										 |  |  |  | 			chunkDir := b.TempDir() | 
					
						
							| 
									
										
										
										
											2021-02-09 22:12:48 +08:00
										 |  |  |  | 			opts := DefaultHeadOptions() | 
					
						
							|  |  |  |  | 			opts.ChunkRange = 1000 | 
					
						
							|  |  |  |  | 			opts.ChunkDirRoot = chunkDir | 
					
						
							| 
									
										
										
										
											2022-09-21 01:05:50 +08:00
										 |  |  |  | 			h, err := NewHead(nil, nil, nil, nil, opts, nil) | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 			require.NoError(b, err) | 
					
						
							| 
									
										
											  
											
												Coalesce series reads where we can.
When compacting rather than doing a read of all
series in the index per label name, do many at once
but only when it won't use (much) more ram than writing the
special all index does.
original in-memory postings:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4                  1        1202383447 ns/op        158936496 B/op   1031511 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4                  1        1141792706 ns/op        154453408 B/op   1093453 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4                  1        1169288829 ns/op        161072336 B/op   1110021 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4                  1        1115700103 ns/op        149480472 B/op   1129180 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4                  1        1283813141 ns/op        162937800 B/op   1202771 allocs/op
before:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4                  1        1145195941 ns/op        131749984 B/op    834400 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4                  1        1233526345 ns/op        127889416 B/op    897033 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4                  1        1821942296 ns/op        131665648 B/op    914836 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4                  1        8035568665 ns/op        123811832 B/op    934312 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4                  1       71325926267 ns/op        140722648 B/op   1016824 allocs/op
after:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4                  1        1101429174 ns/op        129063496 B/op    832571 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4                  1        1074466374 ns/op        124154888 B/op    894875 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4                  1        1166510282 ns/op        128790648 B/op    912931 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4                  1        1075013071 ns/op        120570696 B/op    933511 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4                  1        1231673790 ns/op        138754288 B/op   1022791 allocs/op
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
											
										 
											2019-12-12 05:24:03 +08:00
										 |  |  |  | 			for ln := 0; ln < labelNames; ln++ { | 
					
						
							| 
									
										
										
										
											2020-07-30 19:36:56 +08:00
										 |  |  |  | 				app := h.Appender(context.Background()) | 
					
						
							| 
									
										
											  
											
												Coalesce series reads where we can.
When compacting rather than doing a read of all
series in the index per label name, do many at once
but only when it won't use (much) more ram than writing the
special all index does.
original in-memory postings:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4                  1        1202383447 ns/op        158936496 B/op   1031511 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4                  1        1141792706 ns/op        154453408 B/op   1093453 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4                  1        1169288829 ns/op        161072336 B/op   1110021 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4                  1        1115700103 ns/op        149480472 B/op   1129180 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4                  1        1283813141 ns/op        162937800 B/op   1202771 allocs/op
before:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4                  1        1145195941 ns/op        131749984 B/op    834400 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4                  1        1233526345 ns/op        127889416 B/op    897033 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4                  1        1821942296 ns/op        131665648 B/op    914836 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4                  1        8035568665 ns/op        123811832 B/op    934312 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4                  1       71325926267 ns/op        140722648 B/op   1016824 allocs/op
after:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4                  1        1101429174 ns/op        129063496 B/op    832571 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4                  1        1074466374 ns/op        124154888 B/op    894875 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4                  1        1166510282 ns/op        128790648 B/op    912931 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4                  1        1075013071 ns/op        120570696 B/op    933511 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4                  1        1231673790 ns/op        138754288 B/op   1022791 allocs/op
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
											
										 
											2019-12-12 05:24:03 +08:00
										 |  |  |  | 				for lv := 0; lv < labelValues; lv++ { | 
					
						
							| 
									
										
										
										
											2024-05-13 23:36:19 +08:00
										 |  |  |  | 					app.Append(0, labels.FromStrings(strconv.Itoa(ln), fmt.Sprintf("%d%s%d", lv, postingsBenchSuffix, ln)), 0, 0) | 
					
						
							| 
									
										
											  
											
												Coalesce series reads where we can.
When compacting rather than doing a read of all
series in the index per label name, do many at once
but only when it won't use (much) more ram than writing the
special all index does.
original in-memory postings:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4                  1        1202383447 ns/op        158936496 B/op   1031511 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4                  1        1141792706 ns/op        154453408 B/op   1093453 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4                  1        1169288829 ns/op        161072336 B/op   1110021 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4                  1        1115700103 ns/op        149480472 B/op   1129180 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4                  1        1283813141 ns/op        162937800 B/op   1202771 allocs/op
before:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4                  1        1145195941 ns/op        131749984 B/op    834400 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4                  1        1233526345 ns/op        127889416 B/op    897033 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4                  1        1821942296 ns/op        131665648 B/op    914836 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4                  1        8035568665 ns/op        123811832 B/op    934312 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4                  1       71325926267 ns/op        140722648 B/op   1016824 allocs/op
after:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4                  1        1101429174 ns/op        129063496 B/op    832571 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4                  1        1074466374 ns/op        124154888 B/op    894875 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4                  1        1166510282 ns/op        128790648 B/op    912931 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4                  1        1075013071 ns/op        120570696 B/op    933511 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4                  1        1231673790 ns/op        138754288 B/op   1022791 allocs/op
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
											
										 
											2019-12-12 05:24:03 +08:00
										 |  |  |  | 				} | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 				require.NoError(b, app.Commit()) | 
					
						
							| 
									
										
											  
											
												Coalesce series reads where we can.
When compacting rather than doing a read of all
series in the index per label name, do many at once
but only when it won't use (much) more ram than writing the
special all index does.
original in-memory postings:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4                  1        1202383447 ns/op        158936496 B/op   1031511 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4                  1        1141792706 ns/op        154453408 B/op   1093453 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4                  1        1169288829 ns/op        161072336 B/op   1110021 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4                  1        1115700103 ns/op        149480472 B/op   1129180 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4                  1        1283813141 ns/op        162937800 B/op   1202771 allocs/op
before:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4                  1        1145195941 ns/op        131749984 B/op    834400 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4                  1        1233526345 ns/op        127889416 B/op    897033 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4                  1        1821942296 ns/op        131665648 B/op    914836 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4                  1        8035568665 ns/op        123811832 B/op    934312 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4                  1       71325926267 ns/op        140722648 B/op   1016824 allocs/op
after:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4                  1        1101429174 ns/op        129063496 B/op    832571 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4                  1        1074466374 ns/op        124154888 B/op    894875 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4                  1        1166510282 ns/op        128790648 B/op    912931 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4                  1        1075013071 ns/op        120570696 B/op    933511 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4                  1        1231673790 ns/op        138754288 B/op   1022791 allocs/op
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
											
										 
											2019-12-12 05:24:03 +08:00
										 |  |  |  | 			} | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 			b.ResetTimer() | 
					
						
							|  |  |  |  | 			b.ReportAllocs() | 
					
						
							|  |  |  |  | 			for i := 0; i < b.N; i++ { | 
					
						
							|  |  |  |  | 				createBlockFromHead(b, filepath.Join(dir, fmt.Sprintf("%d-%d", i, labelNames)), h) | 
					
						
							|  |  |  |  | 			} | 
					
						
							|  |  |  |  | 			h.Close() | 
					
						
							|  |  |  |  | 		}) | 
					
						
							|  |  |  |  | 	} | 
					
						
							|  |  |  |  | } | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-11-28 18:14:29 +08:00
										 |  |  |  | func BenchmarkCompactionFromOOOHead(b *testing.B) { | 
					
						
							|  |  |  |  | 	dir := b.TempDir() | 
					
						
							|  |  |  |  | 	totalSeries := 100000 | 
					
						
							|  |  |  |  | 	totalSamples := 100 | 
					
						
							|  |  |  |  | 	for labelNames := 1; labelNames < totalSeries; labelNames *= 10 { | 
					
						
							|  |  |  |  | 		labelValues := totalSeries / labelNames | 
					
						
							|  |  |  |  | 		b.Run(fmt.Sprintf("labelnames=%d,labelvalues=%d", labelNames, labelValues), func(b *testing.B) { | 
					
						
							|  |  |  |  | 			chunkDir := b.TempDir() | 
					
						
							|  |  |  |  | 			opts := DefaultHeadOptions() | 
					
						
							|  |  |  |  | 			opts.ChunkRange = 1000 | 
					
						
							|  |  |  |  | 			opts.ChunkDirRoot = chunkDir | 
					
						
							|  |  |  |  | 			opts.OutOfOrderTimeWindow.Store(int64(totalSamples)) | 
					
						
							|  |  |  |  | 			h, err := NewHead(nil, nil, nil, nil, opts, nil) | 
					
						
							|  |  |  |  | 			require.NoError(b, err) | 
					
						
							|  |  |  |  | 			for ln := 0; ln < labelNames; ln++ { | 
					
						
							|  |  |  |  | 				app := h.Appender(context.Background()) | 
					
						
							|  |  |  |  | 				for lv := 0; lv < labelValues; lv++ { | 
					
						
							| 
									
										
										
										
											2024-05-13 23:36:19 +08:00
										 |  |  |  | 					lbls := labels.FromStrings(strconv.Itoa(ln), fmt.Sprintf("%d%s%d", lv, postingsBenchSuffix, ln)) | 
					
						
							| 
									
										
										
										
											2023-11-28 18:14:29 +08:00
										 |  |  |  | 					_, err = app.Append(0, lbls, int64(totalSamples), 0) | 
					
						
							|  |  |  |  | 					require.NoError(b, err) | 
					
						
							|  |  |  |  | 					for ts := 0; ts < totalSamples; ts++ { | 
					
						
							|  |  |  |  | 						_, err = app.Append(0, lbls, int64(ts), float64(ts)) | 
					
						
							|  |  |  |  | 						require.NoError(b, err) | 
					
						
							|  |  |  |  | 					} | 
					
						
							|  |  |  |  | 				} | 
					
						
							|  |  |  |  | 				require.NoError(b, app.Commit()) | 
					
						
							|  |  |  |  | 			} | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 			b.ResetTimer() | 
					
						
							|  |  |  |  | 			b.ReportAllocs() | 
					
						
							|  |  |  |  | 			for i := 0; i < b.N; i++ { | 
					
						
							|  |  |  |  | 				oooHead, err := NewOOOCompactionHead(context.TODO(), h) | 
					
						
							|  |  |  |  | 				require.NoError(b, err) | 
					
						
							|  |  |  |  | 				createBlockFromOOOHead(b, filepath.Join(dir, fmt.Sprintf("%d-%d", i, labelNames)), oooHead) | 
					
						
							|  |  |  |  | 			} | 
					
						
							|  |  |  |  | 			h.Close() | 
					
						
							|  |  |  |  | 		}) | 
					
						
							|  |  |  |  | 	} | 
					
						
							|  |  |  |  | } | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-11-20 18:34:26 +08:00
										 |  |  |  | // TestDisableAutoCompactions checks that we can
 | 
					
						
							|  |  |  |  | // disable and enable the auto compaction.
 | 
					
						
							|  |  |  |  | // This is needed for unit tests that rely on
 | 
					
						
							|  |  |  |  | // checking state before and after a compaction.
 | 
					
						
							|  |  |  |  | func TestDisableAutoCompactions(t *testing.T) { | 
					
						
							| 
									
										
										
										
											2020-07-21 16:39:02 +08:00
										 |  |  |  | 	db := openTestDB(t, nil, nil) | 
					
						
							| 
									
										
										
										
											2019-01-30 17:40:12 +08:00
										 |  |  |  | 	defer func() { | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 		require.NoError(t, db.Close()) | 
					
						
							| 
									
										
										
										
											2019-01-30 17:40:12 +08:00
										 |  |  |  | 	}() | 
					
						
							| 
									
										
										
										
											2018-11-20 18:34:26 +08:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-02-06 23:58:38 +08:00
										 |  |  |  | 	blockRange := db.compactor.(*LeveledCompactor).ranges[0] | 
					
						
							| 
									
										
										
										
											2018-11-20 18:34:26 +08:00
										 |  |  |  | 	label := labels.FromStrings("foo", "bar") | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 	// Trigger a compaction to check that it was skipped and
 | 
					
						
							|  |  |  |  | 	// no new blocks were created when compaction is disabled.
 | 
					
						
							|  |  |  |  | 	db.DisableCompactions() | 
					
						
							| 
									
										
										
										
											2020-07-30 19:55:51 +08:00
										 |  |  |  | 	app := db.Appender(context.Background()) | 
					
						
							| 
									
										
										
										
											2018-11-20 18:34:26 +08:00
										 |  |  |  | 	for i := int64(0); i < 3; i++ { | 
					
						
							| 
									
										
										
										
											2021-02-18 20:07:00 +08:00
										 |  |  |  | 		_, err := app.Append(0, label, i*blockRange, 0) | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 		require.NoError(t, err) | 
					
						
							| 
									
										
										
										
											2021-02-18 20:07:00 +08:00
										 |  |  |  | 		_, err = app.Append(0, label, i*blockRange+1000, 0) | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 		require.NoError(t, err) | 
					
						
							| 
									
										
										
										
											2018-11-20 18:34:26 +08:00
										 |  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 	require.NoError(t, app.Commit()) | 
					
						
							| 
									
										
										
										
											2018-11-20 18:34:26 +08:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  | 	select { | 
					
						
							|  |  |  |  | 	case db.compactc <- struct{}{}: | 
					
						
							|  |  |  |  | 	default: | 
					
						
							|  |  |  |  | 	} | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 	for x := 0; x < 10; x++ { | 
					
						
							| 
									
										
										
										
											2019-04-25 18:07:04 +08:00
										 |  |  |  | 		if prom_testutil.ToFloat64(db.metrics.compactionsSkipped) > 0.0 { | 
					
						
							| 
									
										
										
										
											2018-11-20 18:34:26 +08:00
										 |  |  |  | 			break | 
					
						
							|  |  |  |  | 		} | 
					
						
							|  |  |  |  | 		time.Sleep(10 * time.Millisecond) | 
					
						
							|  |  |  |  | 	} | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 	require.Greater(t, prom_testutil.ToFloat64(db.metrics.compactionsSkipped), 0.0, "No compaction was skipped after the set timeout.") | 
					
						
							| 
									
										
										
										
											2023-12-07 19:35:01 +08:00
										 |  |  |  | 	require.Empty(t, db.blocks) | 
					
						
							| 
									
										
										
										
											2018-11-20 18:34:26 +08:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  | 	// Enable the compaction, trigger it and check that the block is persisted.
 | 
					
						
							|  |  |  |  | 	db.EnableCompactions() | 
					
						
							|  |  |  |  | 	select { | 
					
						
							|  |  |  |  | 	case db.compactc <- struct{}{}: | 
					
						
							|  |  |  |  | 	default: | 
					
						
							|  |  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-12-12 19:49:03 +08:00
										 |  |  |  | 	for x := 0; x < 100; x++ { | 
					
						
							| 
									
										
										
										
											2018-11-20 18:34:26 +08:00
										 |  |  |  | 		if len(db.Blocks()) > 0 { | 
					
						
							|  |  |  |  | 			break | 
					
						
							|  |  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2018-11-21 06:33:14 +08:00
										 |  |  |  | 		time.Sleep(100 * time.Millisecond) | 
					
						
							| 
									
										
										
										
											2018-11-20 18:34:26 +08:00
										 |  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2023-12-07 19:35:01 +08:00
										 |  |  |  | 	require.NotEmpty(t, db.Blocks(), "No block was persisted after the set timeout.") | 
					
						
							| 
									
										
										
										
											2018-11-20 18:34:26 +08:00
										 |  |  |  | } | 
					
						
							| 
									
										
										
										
											2018-12-07 23:49:23 +08:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  | // TestCancelCompactions ensures that when the db is closed
 | 
					
						
							|  |  |  |  | // any running compaction is cancelled to unblock closing the db.
 | 
					
						
							|  |  |  |  | func TestCancelCompactions(t *testing.T) { | 
					
						
							| 
									
										
										
										
											2022-01-22 17:55:01 +08:00
										 |  |  |  | 	tmpdir := t.TempDir() | 
					
						
							| 
									
										
										
										
											2019-01-24 20:15:32 +08:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  | 	// Create some blocks to fall within the compaction range.
 | 
					
						
							| 
									
										
										
										
											2019-12-17 01:24:48 +08:00
										 |  |  |  | 	createBlock(t, tmpdir, genSeries(1, 10000, 0, 1000)) | 
					
						
							|  |  |  |  | 	createBlock(t, tmpdir, genSeries(1, 10000, 1000, 2000)) | 
					
						
							| 
									
										
										
										
											2019-02-04 17:14:39 +08:00
										 |  |  |  | 	createBlock(t, tmpdir, genSeries(1, 1, 2000, 2001)) // The most recent block is ignored so can be e small one.
 | 
					
						
							| 
									
										
										
										
											2018-12-07 23:49:23 +08:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-01-24 20:15:32 +08:00
										 |  |  |  | 	// Copy the db so we have an exact copy to compare compaction times.
 | 
					
						
							| 
									
										
										
										
											2023-03-30 19:38:43 +08:00
										 |  |  |  | 	tmpdirCopy := t.TempDir() | 
					
						
							| 
									
										
										
										
											2022-01-22 17:55:01 +08:00
										 |  |  |  | 	err := fileutil.CopyDirs(tmpdir, tmpdirCopy) | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 	require.NoError(t, err) | 
					
						
							| 
									
										
										
										
											2019-01-24 20:15:32 +08:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-09-30 23:54:55 +08:00
										 |  |  |  | 	// Measure the compaction time without interrupting it.
 | 
					
						
							| 
									
										
										
										
											2019-01-24 20:15:32 +08:00
										 |  |  |  | 	var timeCompactionUninterrupted time.Duration | 
					
						
							|  |  |  |  | 	{ | 
					
						
							| 
									
										
										
										
											2024-09-10 09:41:53 +08:00
										 |  |  |  | 		db, err := open(tmpdir, promslog.NewNopLogger(), nil, DefaultOptions(), []int64{1, 2000}, nil) | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 		require.NoError(t, err) | 
					
						
							| 
									
										
										
										
											2023-12-07 19:35:01 +08:00
										 |  |  |  | 		require.Len(t, db.Blocks(), 3, "initial block count mismatch") | 
					
						
							| 
									
										
										
										
											2023-04-04 14:31:49 +08:00
										 |  |  |  | 		require.Equal(t, 0.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran), "initial compaction counter mismatch") | 
					
						
							| 
									
										
										
										
											2019-01-24 20:34:16 +08:00
										 |  |  |  | 		db.compactc <- struct{}{} // Trigger a compaction.
 | 
					
						
							| 
									
										
										
										
											2023-04-04 14:31:49 +08:00
										 |  |  |  | 		for prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.PopulatingBlocks) <= 0 { | 
					
						
							| 
									
										
										
										
											2019-01-24 20:34:16 +08:00
										 |  |  |  | 			time.Sleep(3 * time.Millisecond) | 
					
						
							|  |  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2018-12-07 23:49:23 +08:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-03-23 18:08:56 +08:00
										 |  |  |  | 		start := time.Now() | 
					
						
							| 
									
										
										
										
											2023-04-04 14:31:49 +08:00
										 |  |  |  | 		for prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran) != 1 { | 
					
						
							| 
									
										
										
										
											2019-01-24 20:34:16 +08:00
										 |  |  |  | 			time.Sleep(3 * time.Millisecond) | 
					
						
							|  |  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2019-02-08 17:36:30 +08:00
										 |  |  |  | 		timeCompactionUninterrupted = time.Since(start) | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 		require.NoError(t, db.Close()) | 
					
						
							| 
									
										
										
										
											2018-12-07 23:49:23 +08:00
										 |  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2019-01-24 20:15:32 +08:00
										 |  |  |  | 	// Measure the compaction time when closing the db in the middle of compaction.
 | 
					
						
							| 
									
										
										
										
											2019-01-24 18:48:56 +08:00
										 |  |  |  | 	{ | 
					
						
							| 
									
										
										
										
											2024-09-10 09:41:53 +08:00
										 |  |  |  | 		db, err := open(tmpdirCopy, promslog.NewNopLogger(), nil, DefaultOptions(), []int64{1, 2000}, nil) | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 		require.NoError(t, err) | 
					
						
							| 
									
										
										
										
											2023-12-07 19:35:01 +08:00
										 |  |  |  | 		require.Len(t, db.Blocks(), 3, "initial block count mismatch") | 
					
						
							| 
									
										
										
										
											2023-04-04 14:31:49 +08:00
										 |  |  |  | 		require.Equal(t, 0.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran), "initial compaction counter mismatch") | 
					
						
							| 
									
										
										
										
											2019-01-24 20:34:16 +08:00
										 |  |  |  | 		db.compactc <- struct{}{} // Trigger a compaction.
 | 
					
						
							| 
									
										
										
										
											2019-02-09 00:09:23 +08:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-04-04 14:31:49 +08:00
										 |  |  |  | 		for prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.PopulatingBlocks) <= 0 { | 
					
						
							| 
									
										
										
										
											2019-02-08 18:39:25 +08:00
										 |  |  |  | 			time.Sleep(3 * time.Millisecond) | 
					
						
							| 
									
										
										
										
											2019-01-24 20:34:16 +08:00
										 |  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2018-12-07 23:49:23 +08:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-01-24 20:34:16 +08:00
										 |  |  |  | 		start := time.Now() | 
					
						
							| 
									
										
										
										
											2023-03-23 18:08:56 +08:00
										 |  |  |  | 		require.NoError(t, db.Close()) | 
					
						
							| 
									
										
										
										
											2019-01-24 20:34:16 +08:00
										 |  |  |  | 		actT := time.Since(start) | 
					
						
							| 
									
										
										
										
											2023-03-23 18:08:56 +08:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  | 		expT := timeCompactionUninterrupted / 2 // Closing the db in the middle of compaction should less than half the time.
 | 
					
						
							| 
									
										
										
										
											2023-12-07 19:35:01 +08:00
										 |  |  |  | 		require.Less(t, actT, expT, "closing the db took more than expected. exp: <%v, act: %v", expT, actT) | 
					
						
							| 
									
										
										
										
											2023-03-23 18:08:56 +08:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  | 		// Make sure that no blocks were marked as compaction failed.
 | 
					
						
							|  |  |  |  | 		// This checks that the `context.Canceled` error is properly checked at all levels:
 | 
					
						
							|  |  |  |  | 		// - tsdb_errors.NewMulti() should have the Is() method implemented for correct checks.
 | 
					
						
							|  |  |  |  | 		// - callers should check with errors.Is() instead of ==.
 | 
					
						
							| 
									
										
										
										
											2024-09-10 09:41:53 +08:00
										 |  |  |  | 		readOnlyDB, err := OpenDBReadOnly(tmpdirCopy, "", promslog.NewNopLogger()) | 
					
						
							| 
									
										
										
										
											2023-03-23 18:08:56 +08:00
										 |  |  |  | 		require.NoError(t, err) | 
					
						
							|  |  |  |  | 		blocks, err := readOnlyDB.Blocks() | 
					
						
							|  |  |  |  | 		require.NoError(t, err) | 
					
						
							|  |  |  |  | 		for i, b := range blocks { | 
					
						
							|  |  |  |  | 			require.Falsef(t, b.Meta().Compaction.Failed, "block %d (%s) should not be marked as compaction failed", i, b.Meta().ULID) | 
					
						
							|  |  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2023-03-30 19:38:43 +08:00
										 |  |  |  | 		require.NoError(t, readOnlyDB.Close()) | 
					
						
							| 
									
										
										
										
											2019-01-24 18:48:56 +08:00
										 |  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-12-07 23:49:23 +08:00
										 |  |  |  | } | 
					
						
							| 
									
										
										
										
											2019-02-12 16:56:45 +08:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-10-19 23:27:08 +08:00
										 |  |  |  | // TestDeleteCompactionBlockAfterFailedReload ensures that a failed reloadBlocks immediately after a compaction
 | 
					
						
							| 
									
										
										
										
											2024-09-11 04:32:03 +08:00
										 |  |  |  | // deletes the resulting block to avoid creating blocks with the same time range.
 | 
					
						
							| 
									
										
										
										
											2019-01-29 16:26:01 +08:00
										 |  |  |  | func TestDeleteCompactionBlockAfterFailedReload(t *testing.T) { | 
					
						
							|  |  |  |  | 	tests := map[string]func(*DB) int{ | 
					
						
							|  |  |  |  | 		"Test Head Compaction": func(db *DB) int { | 
					
						
							| 
									
										
										
										
											2020-02-06 23:58:38 +08:00
										 |  |  |  | 			rangeToTriggerCompaction := db.compactor.(*LeveledCompactor).ranges[0]/2*3 - 1 | 
					
						
							| 
									
										
										
										
											2019-01-29 16:26:01 +08:00
										 |  |  |  | 			defaultLabel := labels.FromStrings("foo", "bar") | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 			// Add some data to the head that is enough to trigger a compaction.
 | 
					
						
							| 
									
										
										
										
											2020-07-30 19:55:51 +08:00
										 |  |  |  | 			app := db.Appender(context.Background()) | 
					
						
							| 
									
										
										
										
											2021-02-18 20:07:00 +08:00
										 |  |  |  | 			_, err := app.Append(0, defaultLabel, 1, 0) | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 			require.NoError(t, err) | 
					
						
							| 
									
										
										
										
											2021-02-18 20:07:00 +08:00
										 |  |  |  | 			_, err = app.Append(0, defaultLabel, 2, 0) | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 			require.NoError(t, err) | 
					
						
							| 
									
										
										
										
											2021-02-18 20:07:00 +08:00
										 |  |  |  | 			_, err = app.Append(0, defaultLabel, 3+rangeToTriggerCompaction, 0) | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 			require.NoError(t, err) | 
					
						
							|  |  |  |  | 			require.NoError(t, app.Commit()) | 
					
						
							| 
									
										
										
										
											2019-01-29 16:26:01 +08:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-01-30 17:40:55 +08:00
										 |  |  |  | 			return 0 | 
					
						
							| 
									
										
										
										
											2019-01-29 16:26:01 +08:00
										 |  |  |  | 		}, | 
					
						
							|  |  |  |  | 		"Test Block Compaction": func(db *DB) int { | 
					
						
							| 
									
										
										
										
											2019-01-30 17:40:55 +08:00
										 |  |  |  | 			blocks := []*BlockMeta{ | 
					
						
							| 
									
										
										
										
											2019-01-29 16:26:01 +08:00
										 |  |  |  | 				{MinTime: 0, MaxTime: 100}, | 
					
						
							|  |  |  |  | 				{MinTime: 100, MaxTime: 150}, | 
					
						
							|  |  |  |  | 				{MinTime: 150, MaxTime: 200}, | 
					
						
							|  |  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2019-01-30 17:40:55 +08:00
										 |  |  |  | 			for _, m := range blocks { | 
					
						
							| 
									
										
										
										
											2019-01-29 16:26:01 +08:00
										 |  |  |  | 				createBlock(t, db.Dir(), genSeries(1, 1, m.MinTime, m.MaxTime)) | 
					
						
							|  |  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 			require.NoError(t, db.reload()) | 
					
						
							| 
									
										
										
										
											2025-05-04 01:05:13 +08:00
										 |  |  |  | 			require.Len(t, db.Blocks(), len(blocks), "unexpected block count after a reloadBlocks") | 
					
						
							| 
									
										
										
										
											2019-01-29 16:26:01 +08:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-01-30 17:40:55 +08:00
										 |  |  |  | 			return len(blocks) | 
					
						
							| 
									
										
										
										
											2019-01-29 16:26:01 +08:00
										 |  |  |  | 		}, | 
					
						
							|  |  |  |  | 	} | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 	for title, bootStrap := range tests { | 
					
						
							|  |  |  |  | 		t.Run(title, func(t *testing.T) { | 
					
						
							| 
									
										
										
										
											2023-09-13 23:45:06 +08:00
										 |  |  |  | 			ctx := context.Background() | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-07-21 16:39:02 +08:00
										 |  |  |  | 			db := openTestDB(t, nil, []int64{1, 100}) | 
					
						
							| 
									
										
										
										
											2019-01-30 17:40:12 +08:00
										 |  |  |  | 			defer func() { | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 				require.NoError(t, db.Close()) | 
					
						
							| 
									
										
										
										
											2019-01-30 17:40:12 +08:00
										 |  |  |  | 			}() | 
					
						
							| 
									
										
										
										
											2019-01-29 16:26:01 +08:00
										 |  |  |  | 			db.DisableCompactions() | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 			expBlocks := bootStrap(db) | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-10-19 23:27:08 +08:00
										 |  |  |  | 			// Create a block that will trigger the reloadBlocks to fail.
 | 
					
						
							| 
									
										
										
										
											2019-01-29 16:26:01 +08:00
										 |  |  |  | 			blockPath := createBlock(t, db.Dir(), genSeries(1, 1, 200, 300)) | 
					
						
							|  |  |  |  | 			lastBlockIndex := path.Join(blockPath, indexFilename) | 
					
						
							|  |  |  |  | 			actBlocks, err := blockDirs(db.Dir()) | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 			require.NoError(t, err) | 
					
						
							|  |  |  |  | 			require.Equal(t, expBlocks, len(actBlocks)-1)    // -1 to exclude the corrupted block.
 | 
					
						
							|  |  |  |  | 			require.NoError(t, os.RemoveAll(lastBlockIndex)) // Corrupt the block by removing the index file.
 | 
					
						
							| 
									
										
										
										
											2019-01-29 16:26:01 +08:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 			require.Equal(t, 0.0, prom_testutil.ToFloat64(db.metrics.reloadsFailed), "initial 'failed db reloadBlocks' count metrics mismatch") | 
					
						
							| 
									
										
										
										
											2023-04-04 14:31:49 +08:00
										 |  |  |  | 			require.Equal(t, 0.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran), "initial `compactions` count metric mismatch") | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 			require.Equal(t, 0.0, prom_testutil.ToFloat64(db.metrics.compactionsFailed), "initial `compactions failed` count metric mismatch") | 
					
						
							| 
									
										
										
										
											2019-01-29 16:26:01 +08:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  | 			// Do the compaction and check the metrics.
 | 
					
						
							| 
									
										
										
										
											2020-10-19 23:27:08 +08:00
										 |  |  |  | 			// Compaction should succeed, but the reloadBlocks should fail and
 | 
					
						
							| 
									
										
										
										
											2019-01-29 16:26:01 +08:00
										 |  |  |  | 			// the new block created from the compaction should be deleted.
 | 
					
						
							| 
									
										
										
										
											2023-09-13 23:45:06 +08:00
										 |  |  |  | 			require.Error(t, db.Compact(ctx)) | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 			require.Equal(t, 1.0, prom_testutil.ToFloat64(db.metrics.reloadsFailed), "'failed db reloadBlocks' count metrics mismatch") | 
					
						
							| 
									
										
										
										
											2023-04-04 14:31:49 +08:00
										 |  |  |  | 			require.Equal(t, 1.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran), "`compaction` count metric mismatch") | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 			require.Equal(t, 1.0, prom_testutil.ToFloat64(db.metrics.compactionsFailed), "`compactions failed` count metric mismatch") | 
					
						
							| 
									
										
										
										
											2019-05-30 19:57:28 +08:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-01-29 16:26:01 +08:00
										 |  |  |  | 			actBlocks, err = blockDirs(db.Dir()) | 
					
						
							| 
									
										
										
										
											2020-10-29 17:43:23 +08:00
										 |  |  |  | 			require.NoError(t, err) | 
					
						
							|  |  |  |  | 			require.Equal(t, expBlocks, len(actBlocks)-1, "block count should be the same as before the compaction") // -1 to exclude the corrupted block.
 | 
					
						
							| 
									
										
										
										
											2019-01-29 16:26:01 +08:00
										 |  |  |  | 		}) | 
					
						
							|  |  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2019-02-12 17:08:09 +08:00
										 |  |  |  | } | 
					
						
							| 
									
										
										
										
											2021-07-04 18:42:37 +08:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  | func TestHeadCompactionWithHistograms(t *testing.T) { | 
					
						
							| 
									
										
										
										
											2022-12-28 16:55:07 +08:00
										 |  |  |  | 	for _, floatTest := range []bool{true, false} { | 
					
						
							|  |  |  |  | 		t.Run(fmt.Sprintf("float=%t", floatTest), func(t *testing.T) { | 
					
						
							| 
									
										
										
										
											2025-03-10 18:36:26 +08:00
										 |  |  |  | 			head, _ := newTestHead(t, DefaultBlockDuration, compression.None, false) | 
					
						
							| 
									
										
										
										
											2022-12-28 16:55:07 +08:00
										 |  |  |  | 			require.NoError(t, head.Init(0)) | 
					
						
							|  |  |  |  | 			t.Cleanup(func() { | 
					
						
							|  |  |  |  | 				require.NoError(t, head.Close()) | 
					
						
							|  |  |  |  | 			}) | 
					
						
							| 
									
										
										
										
											2021-07-04 18:42:37 +08:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-12-28 16:55:07 +08:00
										 |  |  |  | 			minute := func(m int) int64 { return int64(m) * time.Minute.Milliseconds() } | 
					
						
							|  |  |  |  | 			ctx := context.Background() | 
					
						
							| 
									
										
										
										
											2023-01-19 00:59:29 +08:00
										 |  |  |  | 			appendHistogram := func( | 
					
						
							| 
									
										
										
										
											2023-08-24 21:21:17 +08:00
										 |  |  |  | 				lbls labels.Labels, from, to int, h *histogram.Histogram, exp *[]chunks.Sample, | 
					
						
							| 
									
										
										
										
											2023-01-19 00:59:29 +08:00
										 |  |  |  | 			) { | 
					
						
							| 
									
										
										
										
											2022-12-28 16:55:07 +08:00
										 |  |  |  | 				t.Helper() | 
					
						
							|  |  |  |  | 				app := head.Appender(ctx) | 
					
						
							|  |  |  |  | 				for tsMinute := from; tsMinute <= to; tsMinute++ { | 
					
						
							|  |  |  |  | 					var err error | 
					
						
							|  |  |  |  | 					if floatTest { | 
					
						
							| 
									
										
										
										
											2023-11-29 22:15:57 +08:00
										 |  |  |  | 						_, err = app.AppendHistogram(0, lbls, minute(tsMinute), nil, h.ToFloat(nil)) | 
					
						
							|  |  |  |  | 						efh := h.ToFloat(nil) | 
					
						
							| 
									
										
										
										
											2023-01-19 00:59:29 +08:00
										 |  |  |  | 						if tsMinute == from { | 
					
						
							|  |  |  |  | 							efh.CounterResetHint = histogram.UnknownCounterReset | 
					
						
							|  |  |  |  | 						} else { | 
					
						
							|  |  |  |  | 							efh.CounterResetHint = histogram.NotCounterReset | 
					
						
							|  |  |  |  | 						} | 
					
						
							|  |  |  |  | 						*exp = append(*exp, sample{t: minute(tsMinute), fh: efh}) | 
					
						
							| 
									
										
										
										
											2022-12-28 16:55:07 +08:00
										 |  |  |  | 					} else { | 
					
						
							|  |  |  |  | 						_, err = app.AppendHistogram(0, lbls, minute(tsMinute), h, nil) | 
					
						
							| 
									
										
										
										
											2023-01-19 00:59:29 +08:00
										 |  |  |  | 						eh := h.Copy() | 
					
						
							|  |  |  |  | 						if tsMinute == from { | 
					
						
							|  |  |  |  | 							eh.CounterResetHint = histogram.UnknownCounterReset | 
					
						
							|  |  |  |  | 						} else { | 
					
						
							|  |  |  |  | 							eh.CounterResetHint = histogram.NotCounterReset | 
					
						
							|  |  |  |  | 						} | 
					
						
							|  |  |  |  | 						*exp = append(*exp, sample{t: minute(tsMinute), h: eh}) | 
					
						
							| 
									
										
										
										
											2022-12-28 16:55:07 +08:00
										 |  |  |  | 					} | 
					
						
							|  |  |  |  | 					require.NoError(t, err) | 
					
						
							|  |  |  |  | 				} | 
					
						
							|  |  |  |  | 				require.NoError(t, app.Commit()) | 
					
						
							|  |  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2023-08-24 21:21:17 +08:00
										 |  |  |  | 			appendFloat := func(lbls labels.Labels, from, to int, exp *[]chunks.Sample) { | 
					
						
							| 
									
										
										
										
											2022-12-28 16:55:07 +08:00
										 |  |  |  | 				t.Helper() | 
					
						
							|  |  |  |  | 				app := head.Appender(ctx) | 
					
						
							|  |  |  |  | 				for tsMinute := from; tsMinute <= to; tsMinute++ { | 
					
						
							|  |  |  |  | 					_, err := app.Append(0, lbls, minute(tsMinute), float64(tsMinute)) | 
					
						
							|  |  |  |  | 					require.NoError(t, err) | 
					
						
							| 
									
										
										
										
											2023-03-31 01:50:13 +08:00
										 |  |  |  | 					*exp = append(*exp, sample{t: minute(tsMinute), f: float64(tsMinute)}) | 
					
						
							| 
									
										
										
										
											2022-12-28 16:55:07 +08:00
										 |  |  |  | 				} | 
					
						
							|  |  |  |  | 				require.NoError(t, app.Commit()) | 
					
						
							|  |  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-07-04 18:42:37 +08:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-12-28 16:55:07 +08:00
										 |  |  |  | 			var ( | 
					
						
							|  |  |  |  | 				series1                = labels.FromStrings("foo", "bar1") | 
					
						
							|  |  |  |  | 				series2                = labels.FromStrings("foo", "bar2") | 
					
						
							|  |  |  |  | 				series3                = labels.FromStrings("foo", "bar3") | 
					
						
							|  |  |  |  | 				series4                = labels.FromStrings("foo", "bar4") | 
					
						
							| 
									
										
										
										
											2023-08-24 21:21:17 +08:00
										 |  |  |  | 				exp1, exp2, exp3, exp4 []chunks.Sample | 
					
						
							| 
									
										
										
										
											2022-12-28 16:55:07 +08:00
										 |  |  |  | 			) | 
					
						
							|  |  |  |  | 			h := &histogram.Histogram{ | 
					
						
							| 
									
										
										
										
											2023-08-23 03:51:56 +08:00
										 |  |  |  | 				Count:         15, | 
					
						
							| 
									
										
										
										
											2022-12-28 16:55:07 +08:00
										 |  |  |  | 				ZeroCount:     4, | 
					
						
							|  |  |  |  | 				ZeroThreshold: 0.001, | 
					
						
							|  |  |  |  | 				Sum:           35.5, | 
					
						
							|  |  |  |  | 				Schema:        1, | 
					
						
							|  |  |  |  | 				PositiveSpans: []histogram.Span{ | 
					
						
							|  |  |  |  | 					{Offset: 0, Length: 2}, | 
					
						
							|  |  |  |  | 					{Offset: 2, Length: 2}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 				PositiveBuckets: []int64{1, 1, -1, 0}, | 
					
						
							|  |  |  |  | 				NegativeSpans: []histogram.Span{ | 
					
						
							|  |  |  |  | 					{Offset: 0, Length: 1}, | 
					
						
							|  |  |  |  | 					{Offset: 1, Length: 2}, | 
					
						
							|  |  |  |  | 				}, | 
					
						
							|  |  |  |  | 				NegativeBuckets: []int64{1, 2, -1}, | 
					
						
							|  |  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2022-09-23 16:31:10 +08:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-12-28 16:55:07 +08:00
										 |  |  |  | 			// Series with only histograms.
 | 
					
						
							|  |  |  |  | 			appendHistogram(series1, 100, 105, h, &exp1) | 
					
						
							| 
									
										
										
										
											2022-09-23 16:31:10 +08:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-12-28 16:55:07 +08:00
										 |  |  |  | 			// Series starting with float and then getting histograms.
 | 
					
						
							|  |  |  |  | 			appendFloat(series2, 100, 102, &exp2) | 
					
						
							|  |  |  |  | 			appendHistogram(series2, 103, 105, h.Copy(), &exp2) | 
					
						
							|  |  |  |  | 			appendFloat(series2, 106, 107, &exp2) | 
					
						
							|  |  |  |  | 			appendHistogram(series2, 108, 109, h.Copy(), &exp2) | 
					
						
							| 
									
										
										
										
											2022-09-23 16:31:10 +08:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-12-28 16:55:07 +08:00
										 |  |  |  | 			// Series starting with histogram and then getting float.
 | 
					
						
							|  |  |  |  | 			appendHistogram(series3, 101, 103, h.Copy(), &exp3) | 
					
						
							|  |  |  |  | 			appendFloat(series3, 104, 106, &exp3) | 
					
						
							|  |  |  |  | 			appendHistogram(series3, 107, 108, h.Copy(), &exp3) | 
					
						
							|  |  |  |  | 			appendFloat(series3, 109, 110, &exp3) | 
					
						
							| 
									
										
										
										
											2022-09-23 16:31:10 +08:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-12-28 16:55:07 +08:00
										 |  |  |  | 			// A float only series.
 | 
					
						
							|  |  |  |  | 			appendFloat(series4, 100, 102, &exp4) | 
					
						
							| 
									
										
										
										
											2021-07-04 18:42:37 +08:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-12-28 16:55:07 +08:00
										 |  |  |  | 			// Compaction.
 | 
					
						
							|  |  |  |  | 			mint := head.MinTime() | 
					
						
							|  |  |  |  | 			maxt := head.MaxTime() + 1 // Block intervals are half-open: [b.MinTime, b.MaxTime).
 | 
					
						
							|  |  |  |  | 			compactor, err := NewLeveledCompactor(context.Background(), nil, nil, []int64{DefaultBlockDuration}, chunkenc.NewPool(), nil) | 
					
						
							|  |  |  |  | 			require.NoError(t, err) | 
					
						
							| 
									
										
										
										
											2024-06-13 05:31:25 +08:00
										 |  |  |  | 			ids, err := compactor.Write(head.opts.ChunkDirRoot, head, mint, maxt, nil) | 
					
						
							| 
									
										
										
										
											2022-12-28 16:55:07 +08:00
										 |  |  |  | 			require.NoError(t, err) | 
					
						
							| 
									
										
										
										
											2024-06-13 05:31:25 +08:00
										 |  |  |  | 			require.Len(t, ids, 1) | 
					
						
							| 
									
										
										
										
											2021-07-04 18:42:37 +08:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-12-28 16:55:07 +08:00
										 |  |  |  | 			// Open the block and query it and check the histograms.
 | 
					
						
							| 
									
										
										
										
											2024-11-11 14:59:24 +08:00
										 |  |  |  | 			block, err := OpenBlock(nil, path.Join(head.opts.ChunkDirRoot, ids[0].String()), nil, nil) | 
					
						
							| 
									
										
										
										
											2022-12-28 16:55:07 +08:00
										 |  |  |  | 			require.NoError(t, err) | 
					
						
							|  |  |  |  | 			t.Cleanup(func() { | 
					
						
							|  |  |  |  | 				require.NoError(t, block.Close()) | 
					
						
							|  |  |  |  | 			}) | 
					
						
							| 
									
										
										
										
											2021-07-04 18:42:37 +08:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-12-28 16:55:07 +08:00
										 |  |  |  | 			q, err := NewBlockQuerier(block, block.MinTime(), block.MaxTime()) | 
					
						
							|  |  |  |  | 			require.NoError(t, err) | 
					
						
							| 
									
										
										
										
											2021-07-04 18:42:37 +08:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-12-28 16:55:07 +08:00
										 |  |  |  | 			actHists := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*")) | 
					
						
							| 
									
										
										
										
											2023-08-24 21:21:17 +08:00
										 |  |  |  | 			require.Equal(t, map[string][]chunks.Sample{ | 
					
						
							| 
									
										
										
										
											2022-12-28 16:55:07 +08:00
										 |  |  |  | 				series1.String(): exp1, | 
					
						
							|  |  |  |  | 				series2.String(): exp2, | 
					
						
							|  |  |  |  | 				series3.String(): exp3, | 
					
						
							|  |  |  |  | 				series4.String(): exp4, | 
					
						
							|  |  |  |  | 			}, actHists) | 
					
						
							|  |  |  |  | 		}) | 
					
						
							|  |  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-07-04 18:42:37 +08:00
										 |  |  |  | } | 
					
						
							| 
									
										
										
										
											2021-07-08 13:31:53 +08:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  | // Depending on numSeriesPerSchema, it can take few gigs of memory;
 | 
					
						
							|  |  |  |  | // the test adds all samples to appender before committing instead of
 | 
					
						
							|  |  |  |  | // buffering the writes to make it run faster.
 | 
					
						
							| 
									
										
											  
											
												Style cleanup of all the changes in sparsehistogram so far
A lot of this code was hacked together, literally during a
hackathon. This commit intends not to change the code substantially,
but just make the code obey the usual style practices.
A (possibly incomplete) list of areas:
* Generally address linter warnings.
* The `pgk` directory is deprecated as per dev-summit. No new packages should
  be added to it. I moved the new `pkg/histogram` package to `model`
  anticipating what's proposed in #9478.
* Make the naming of the Sparse Histogram more consistent. Including
  abbreviations, there were just too many names for it: SparseHistogram,
  Histogram, Histo, hist, his, shs, h. The idea is to call it "Histogram" in
  general. Only add "Sparse" if it is needed to avoid confusion with
  conventional Histograms (which is rare because the TSDB really has no notion
  of conventional Histograms). Use abbreviations only in local scope, and then
  really abbreviate (not just removing three out of seven letters like in
  "Histo"). This is in the spirit of
  https://github.com/golang/go/wiki/CodeReviewComments#variable-names
* Several other minor name changes.
* A lot of formatting of doc comments. For one, following
  https://github.com/golang/go/wiki/CodeReviewComments#comment-sentences
  , but also layout question, anticipating how things will look like
  when rendered by `godoc` (even where `godoc` doesn't render them
  right now because they are for unexported types or not a doc comment
  at all but just a normal code comment - consistency is queen!).
* Re-enabled `TestQueryLog` and `TestEndopints` (they pass now,
  leaving them disabled was presumably an oversight).
* Bucket iterator for histogram.Histogram is now created with a
  method.
* HistogramChunk.iterator now allows iterator recycling. (I think
  @dieterbe only commented it out because he was confused by the
  question in the comment.)
* HistogramAppender.Append panics now because we decided to treat
  staleness marker differently.
Signed-off-by: beorn7 <beorn@grafana.com>
											
										 
											2021-10-09 21:57:07 +08:00
										 |  |  |  | func TestSparseHistogramSpaceSavings(t *testing.T) { | 
					
						
							| 
									
										
										
										
											2021-07-08 13:31:53 +08:00
										 |  |  |  | 	t.Skip() | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-11-27 23:13:01 +08:00
										 |  |  |  | 	type testcase struct { | 
					
						
							| 
									
										
										
										
											2021-07-08 13:31:53 +08:00
										 |  |  |  | 		numSeriesPerSchema int | 
					
						
							|  |  |  |  | 		numBuckets         int | 
					
						
							|  |  |  |  | 		numSpans           int | 
					
						
							|  |  |  |  | 		gapBetweenSpans    int | 
					
						
							| 
									
										
										
										
											2023-11-27 23:13:01 +08:00
										 |  |  |  | 	} | 
					
						
							|  |  |  |  | 	cases := []testcase{ | 
					
						
							| 
									
										
										
										
											2021-07-08 13:31:53 +08:00
										 |  |  |  | 		{1, 15, 1, 0}, | 
					
						
							|  |  |  |  | 		{1, 50, 1, 0}, | 
					
						
							|  |  |  |  | 		{1, 100, 1, 0}, | 
					
						
							|  |  |  |  | 		{1, 15, 3, 5}, | 
					
						
							|  |  |  |  | 		{1, 50, 3, 3}, | 
					
						
							|  |  |  |  | 		{1, 100, 3, 2}, | 
					
						
							|  |  |  |  | 		{100, 15, 1, 0}, | 
					
						
							|  |  |  |  | 		{100, 50, 1, 0}, | 
					
						
							|  |  |  |  | 		{100, 100, 1, 0}, | 
					
						
							|  |  |  |  | 		{100, 15, 3, 5}, | 
					
						
							|  |  |  |  | 		{100, 50, 3, 3}, | 
					
						
							|  |  |  |  | 		{100, 100, 3, 2}, | 
					
						
							|  |  |  |  | 	} | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 	type testSummary struct { | 
					
						
							|  |  |  |  | 		oldBlockTotalSeries int | 
					
						
							|  |  |  |  | 		oldBlockIndexSize   int64 | 
					
						
							|  |  |  |  | 		oldBlockChunksSize  int64 | 
					
						
							|  |  |  |  | 		oldBlockTotalSize   int64 | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 		sparseBlockTotalSeries int | 
					
						
							|  |  |  |  | 		sparseBlockIndexSize   int64 | 
					
						
							|  |  |  |  | 		sparseBlockChunksSize  int64 | 
					
						
							|  |  |  |  | 		sparseBlockTotalSize   int64 | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 		numBuckets      int | 
					
						
							|  |  |  |  | 		numSpans        int | 
					
						
							|  |  |  |  | 		gapBetweenSpans int | 
					
						
							|  |  |  |  | 	} | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 	var summaries []testSummary | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 	allSchemas := []int{-4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8} | 
					
						
							|  |  |  |  | 	schemaDescription := []string{"minus_4", "minus_3", "minus_2", "minus_1", "0", "1", "2", "3", "4", "5", "6", "7", "8"} | 
					
						
							|  |  |  |  | 	numHistograms := 120 * 4 // 15s scrape interval.
 | 
					
						
							|  |  |  |  | 	timeStep := DefaultBlockDuration / int64(numHistograms) | 
					
						
							|  |  |  |  | 	for _, c := range cases { | 
					
						
							|  |  |  |  | 		t.Run( | 
					
						
							|  |  |  |  | 			fmt.Sprintf("series=%d,span=%d,gap=%d,buckets=%d", | 
					
						
							|  |  |  |  | 				len(allSchemas)*c.numSeriesPerSchema, | 
					
						
							|  |  |  |  | 				c.numSpans, | 
					
						
							|  |  |  |  | 				c.gapBetweenSpans, | 
					
						
							|  |  |  |  | 				c.numBuckets, | 
					
						
							|  |  |  |  | 			), | 
					
						
							|  |  |  |  | 			func(t *testing.T) { | 
					
						
							| 
									
										
										
										
											2025-03-10 18:36:26 +08:00
										 |  |  |  | 				oldHead, _ := newTestHead(t, DefaultBlockDuration, compression.None, false) | 
					
						
							| 
									
										
										
										
											2021-07-08 13:31:53 +08:00
										 |  |  |  | 				t.Cleanup(func() { | 
					
						
							|  |  |  |  | 					require.NoError(t, oldHead.Close()) | 
					
						
							|  |  |  |  | 				}) | 
					
						
							| 
									
										
										
										
											2025-03-10 18:36:26 +08:00
										 |  |  |  | 				sparseHead, _ := newTestHead(t, DefaultBlockDuration, compression.None, false) | 
					
						
							| 
									
										
										
										
											2021-07-08 13:31:53 +08:00
										 |  |  |  | 				t.Cleanup(func() { | 
					
						
							|  |  |  |  | 					require.NoError(t, sparseHead.Close()) | 
					
						
							|  |  |  |  | 				}) | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 				var allSparseSeries []struct { | 
					
						
							|  |  |  |  | 					baseLabels labels.Labels | 
					
						
							| 
									
										
										
										
											2021-11-13 02:07:41 +08:00
										 |  |  |  | 					hists      []*histogram.Histogram | 
					
						
							| 
									
										
										
										
											2021-07-08 13:31:53 +08:00
										 |  |  |  | 				} | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 				for sid, schema := range allSchemas { | 
					
						
							|  |  |  |  | 					for i := 0; i < c.numSeriesPerSchema; i++ { | 
					
						
							| 
									
										
										
										
											2022-03-10 06:17:29 +08:00
										 |  |  |  | 						lbls := labels.FromStrings( | 
					
						
							|  |  |  |  | 							"__name__", fmt.Sprintf("rpc_durations_%d_histogram_seconds", i), | 
					
						
							|  |  |  |  | 							"instance", "localhost:8080", | 
					
						
							|  |  |  |  | 							"job", fmt.Sprintf("sparse_histogram_schema_%s", schemaDescription[sid]), | 
					
						
							|  |  |  |  | 						) | 
					
						
							| 
									
										
										
										
											2021-07-08 13:31:53 +08:00
										 |  |  |  | 						allSparseSeries = append(allSparseSeries, struct { | 
					
						
							|  |  |  |  | 							baseLabels labels.Labels | 
					
						
							| 
									
										
										
										
											2021-11-13 02:07:41 +08:00
										 |  |  |  | 							hists      []*histogram.Histogram | 
					
						
							| 
									
										
										
										
											2021-07-08 13:31:53 +08:00
										 |  |  |  | 						}{baseLabels: lbls, hists: generateCustomHistograms(numHistograms, c.numBuckets, c.numSpans, c.gapBetweenSpans, schema)}) | 
					
						
							|  |  |  |  | 					} | 
					
						
							|  |  |  |  | 				} | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 				oldApp := oldHead.Appender(context.Background()) | 
					
						
							|  |  |  |  | 				sparseApp := sparseHead.Appender(context.Background()) | 
					
						
							|  |  |  |  | 				numOldSeriesPerHistogram := 0 | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2024-06-13 05:31:25 +08:00
										 |  |  |  | 				var oldULIDs []ulid.ULID | 
					
						
							|  |  |  |  | 				var sparseULIDs []ulid.ULID | 
					
						
							| 
									
										
										
										
											2021-07-08 13:31:53 +08:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  | 				var wg sync.WaitGroup | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 				wg.Add(1) | 
					
						
							|  |  |  |  | 				go func() { | 
					
						
							|  |  |  |  | 					defer wg.Done() | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 					// Ingest sparse histograms.
 | 
					
						
							|  |  |  |  | 					for _, ah := range allSparseSeries { | 
					
						
							|  |  |  |  | 						var ( | 
					
						
							| 
									
										
										
										
											2021-11-18 02:57:31 +08:00
										 |  |  |  | 							ref storage.SeriesRef | 
					
						
							| 
									
										
										
										
											2021-07-08 13:31:53 +08:00
										 |  |  |  | 							err error | 
					
						
							|  |  |  |  | 						) | 
					
						
							|  |  |  |  | 						for i := 0; i < numHistograms; i++ { | 
					
						
							|  |  |  |  | 							ts := int64(i) * timeStep | 
					
						
							| 
									
										
										
										
											2022-12-28 16:55:07 +08:00
										 |  |  |  | 							ref, err = sparseApp.AppendHistogram(ref, ah.baseLabels, ts, ah.hists[i], nil) | 
					
						
							| 
									
										
										
										
											2021-07-08 13:31:53 +08:00
										 |  |  |  | 							require.NoError(t, err) | 
					
						
							|  |  |  |  | 						} | 
					
						
							|  |  |  |  | 					} | 
					
						
							|  |  |  |  | 					require.NoError(t, sparseApp.Commit()) | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 					// Sparse head compaction.
 | 
					
						
							|  |  |  |  | 					mint := sparseHead.MinTime() | 
					
						
							|  |  |  |  | 					maxt := sparseHead.MaxTime() + 1 // Block intervals are half-open: [b.MinTime, b.MaxTime).
 | 
					
						
							|  |  |  |  | 					compactor, err := NewLeveledCompactor(context.Background(), nil, nil, []int64{DefaultBlockDuration}, chunkenc.NewPool(), nil) | 
					
						
							|  |  |  |  | 					require.NoError(t, err) | 
					
						
							| 
									
										
										
										
											2024-06-13 05:31:25 +08:00
										 |  |  |  | 					sparseULIDs, err = compactor.Write(sparseHead.opts.ChunkDirRoot, sparseHead, mint, maxt, nil) | 
					
						
							| 
									
										
										
										
											2021-07-08 13:31:53 +08:00
										 |  |  |  | 					require.NoError(t, err) | 
					
						
							| 
									
										
										
										
											2024-06-13 05:31:25 +08:00
										 |  |  |  | 					require.Len(t, sparseULIDs, 1) | 
					
						
							| 
									
										
										
										
											2021-07-08 13:31:53 +08:00
										 |  |  |  | 				}() | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 				wg.Add(1) | 
					
						
							| 
									
										
										
										
											2023-11-27 23:13:01 +08:00
										 |  |  |  | 				go func(c testcase) { | 
					
						
							| 
									
										
										
										
											2021-07-08 13:31:53 +08:00
										 |  |  |  | 					defer wg.Done() | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 					// Ingest histograms the old way.
 | 
					
						
							|  |  |  |  | 					for _, ah := range allSparseSeries { | 
					
						
							| 
									
										
										
										
											2021-11-18 02:57:31 +08:00
										 |  |  |  | 						refs := make([]storage.SeriesRef, c.numBuckets+((c.numSpans-1)*c.gapBetweenSpans)) | 
					
						
							| 
									
										
										
										
											2021-07-08 13:31:53 +08:00
										 |  |  |  | 						for i := 0; i < numHistograms; i++ { | 
					
						
							|  |  |  |  | 							ts := int64(i) * timeStep | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 							h := ah.hists[i] | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 							numOldSeriesPerHistogram = 0 | 
					
						
							| 
									
										
											  
											
												Style cleanup of all the changes in sparsehistogram so far
A lot of this code was hacked together, literally during a
hackathon. This commit intends not to change the code substantially,
but just make the code obey the usual style practices.
A (possibly incomplete) list of areas:
* Generally address linter warnings.
* The `pgk` directory is deprecated as per dev-summit. No new packages should
  be added to it. I moved the new `pkg/histogram` package to `model`
  anticipating what's proposed in #9478.
* Make the naming of the Sparse Histogram more consistent. Including
  abbreviations, there were just too many names for it: SparseHistogram,
  Histogram, Histo, hist, his, shs, h. The idea is to call it "Histogram" in
  general. Only add "Sparse" if it is needed to avoid confusion with
  conventional Histograms (which is rare because the TSDB really has no notion
  of conventional Histograms). Use abbreviations only in local scope, and then
  really abbreviate (not just removing three out of seven letters like in
  "Histo"). This is in the spirit of
  https://github.com/golang/go/wiki/CodeReviewComments#variable-names
* Several other minor name changes.
* A lot of formatting of doc comments. For one, following
  https://github.com/golang/go/wiki/CodeReviewComments#comment-sentences
  , but also layout question, anticipating how things will look like
  when rendered by `godoc` (even where `godoc` doesn't render them
  right now because they are for unexported types or not a doc comment
  at all but just a normal code comment - consistency is queen!).
* Re-enabled `TestQueryLog` and `TestEndopints` (they pass now,
  leaving them disabled was presumably an oversight).
* Bucket iterator for histogram.Histogram is now created with a
  method.
* HistogramChunk.iterator now allows iterator recycling. (I think
  @dieterbe only commented it out because he was confused by the
  question in the comment.)
* HistogramAppender.Append panics now because we decided to treat
  staleness marker differently.
Signed-off-by: beorn7 <beorn@grafana.com>
											
										 
											2021-10-09 21:57:07 +08:00
										 |  |  |  | 							it := h.CumulativeBucketIterator() | 
					
						
							| 
									
										
										
										
											2021-07-08 13:31:53 +08:00
										 |  |  |  | 							itIdx := 0 | 
					
						
							|  |  |  |  | 							var err error | 
					
						
							|  |  |  |  | 							for it.Next() { | 
					
						
							|  |  |  |  | 								numOldSeriesPerHistogram++ | 
					
						
							|  |  |  |  | 								b := it.At() | 
					
						
							| 
									
										
										
										
											2023-03-22 23:46:02 +08:00
										 |  |  |  | 								lbls := labels.NewBuilder(ah.baseLabels).Set("le", fmt.Sprintf("%.16f", b.Upper)).Labels() | 
					
						
							| 
									
										
										
										
											2021-07-08 13:31:53 +08:00
										 |  |  |  | 								refs[itIdx], err = oldApp.Append(refs[itIdx], lbls, ts, float64(b.Count)) | 
					
						
							|  |  |  |  | 								require.NoError(t, err) | 
					
						
							|  |  |  |  | 								itIdx++ | 
					
						
							|  |  |  |  | 							} | 
					
						
							| 
									
										
										
										
											2022-03-10 06:17:29 +08:00
										 |  |  |  | 							baseName := ah.baseLabels.Get(labels.MetricName) | 
					
						
							| 
									
										
										
										
											2021-07-08 13:31:53 +08:00
										 |  |  |  | 							// _count metric.
 | 
					
						
							| 
									
										
										
										
											2023-03-22 23:46:02 +08:00
										 |  |  |  | 							countLbls := labels.NewBuilder(ah.baseLabels).Set(labels.MetricName, baseName+"_count").Labels() | 
					
						
							| 
									
										
										
										
											2021-07-08 13:31:53 +08:00
										 |  |  |  | 							_, err = oldApp.Append(0, countLbls, ts, float64(h.Count)) | 
					
						
							|  |  |  |  | 							require.NoError(t, err) | 
					
						
							|  |  |  |  | 							numOldSeriesPerHistogram++ | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 							// _sum metric.
 | 
					
						
							| 
									
										
										
										
											2023-03-22 23:46:02 +08:00
										 |  |  |  | 							sumLbls := labels.NewBuilder(ah.baseLabels).Set(labels.MetricName, baseName+"_sum").Labels() | 
					
						
							| 
									
										
										
										
											2021-07-08 13:31:53 +08:00
										 |  |  |  | 							_, err = oldApp.Append(0, sumLbls, ts, h.Sum) | 
					
						
							|  |  |  |  | 							require.NoError(t, err) | 
					
						
							|  |  |  |  | 							numOldSeriesPerHistogram++ | 
					
						
							|  |  |  |  | 						} | 
					
						
							|  |  |  |  | 					} | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 					require.NoError(t, oldApp.Commit()) | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 					// Old head compaction.
 | 
					
						
							|  |  |  |  | 					mint := oldHead.MinTime() | 
					
						
							|  |  |  |  | 					maxt := oldHead.MaxTime() + 1 // Block intervals are half-open: [b.MinTime, b.MaxTime).
 | 
					
						
							|  |  |  |  | 					compactor, err := NewLeveledCompactor(context.Background(), nil, nil, []int64{DefaultBlockDuration}, chunkenc.NewPool(), nil) | 
					
						
							|  |  |  |  | 					require.NoError(t, err) | 
					
						
							| 
									
										
										
										
											2024-06-13 05:31:25 +08:00
										 |  |  |  | 					oldULIDs, err = compactor.Write(oldHead.opts.ChunkDirRoot, oldHead, mint, maxt, nil) | 
					
						
							| 
									
										
										
										
											2021-07-08 13:31:53 +08:00
										 |  |  |  | 					require.NoError(t, err) | 
					
						
							| 
									
										
										
										
											2024-06-13 05:31:25 +08:00
										 |  |  |  | 					require.Len(t, oldULIDs, 1) | 
					
						
							| 
									
										
										
										
											2023-11-27 23:13:01 +08:00
										 |  |  |  | 				}(c) | 
					
						
							| 
									
										
										
										
											2021-07-08 13:31:53 +08:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  | 				wg.Wait() | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2024-06-13 05:31:25 +08:00
										 |  |  |  | 				oldBlockDir := filepath.Join(oldHead.opts.ChunkDirRoot, oldULIDs[0].String()) | 
					
						
							|  |  |  |  | 				sparseBlockDir := filepath.Join(sparseHead.opts.ChunkDirRoot, sparseULIDs[0].String()) | 
					
						
							| 
									
										
										
										
											2021-07-08 13:31:53 +08:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  | 				oldSize, err := fileutil.DirSize(oldBlockDir) | 
					
						
							|  |  |  |  | 				require.NoError(t, err) | 
					
						
							|  |  |  |  | 				oldIndexSize, err := fileutil.DirSize(filepath.Join(oldBlockDir, "index")) | 
					
						
							|  |  |  |  | 				require.NoError(t, err) | 
					
						
							|  |  |  |  | 				oldChunksSize, err := fileutil.DirSize(filepath.Join(oldBlockDir, "chunks")) | 
					
						
							|  |  |  |  | 				require.NoError(t, err) | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 				sparseSize, err := fileutil.DirSize(sparseBlockDir) | 
					
						
							|  |  |  |  | 				require.NoError(t, err) | 
					
						
							|  |  |  |  | 				sparseIndexSize, err := fileutil.DirSize(filepath.Join(sparseBlockDir, "index")) | 
					
						
							|  |  |  |  | 				require.NoError(t, err) | 
					
						
							|  |  |  |  | 				sparseChunksSize, err := fileutil.DirSize(filepath.Join(sparseBlockDir, "chunks")) | 
					
						
							|  |  |  |  | 				require.NoError(t, err) | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 				summaries = append(summaries, testSummary{ | 
					
						
							|  |  |  |  | 					oldBlockTotalSeries:    len(allSchemas) * c.numSeriesPerSchema * numOldSeriesPerHistogram, | 
					
						
							|  |  |  |  | 					oldBlockIndexSize:      oldIndexSize, | 
					
						
							|  |  |  |  | 					oldBlockChunksSize:     oldChunksSize, | 
					
						
							|  |  |  |  | 					oldBlockTotalSize:      oldSize, | 
					
						
							|  |  |  |  | 					sparseBlockTotalSeries: len(allSchemas) * c.numSeriesPerSchema, | 
					
						
							|  |  |  |  | 					sparseBlockIndexSize:   sparseIndexSize, | 
					
						
							|  |  |  |  | 					sparseBlockChunksSize:  sparseChunksSize, | 
					
						
							|  |  |  |  | 					sparseBlockTotalSize:   sparseSize, | 
					
						
							|  |  |  |  | 					numBuckets:             c.numBuckets, | 
					
						
							|  |  |  |  | 					numSpans:               c.numSpans, | 
					
						
							|  |  |  |  | 					gapBetweenSpans:        c.gapBetweenSpans, | 
					
						
							|  |  |  |  | 				}) | 
					
						
							|  |  |  |  | 			}) | 
					
						
							|  |  |  |  | 	} | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 	for _, s := range summaries { | 
					
						
							|  |  |  |  | 		fmt.Printf(` | 
					
						
							|  |  |  |  | Meta: NumBuckets=%d, NumSpans=%d, GapBetweenSpans=%d | 
					
						
							|  |  |  |  | Old Block: NumSeries=%d, IndexSize=%d, ChunksSize=%d, TotalSize=%d | 
					
						
							|  |  |  |  | Sparse Block: NumSeries=%d, IndexSize=%d, ChunksSize=%d, TotalSize=%d | 
					
						
							|  |  |  |  | Savings: Index=%.2f%%, Chunks=%.2f%%, Total=%.2f%% | 
					
						
							|  |  |  |  | `, | 
					
						
							|  |  |  |  | 			s.numBuckets, s.numSpans, s.gapBetweenSpans, | 
					
						
							|  |  |  |  | 			s.oldBlockTotalSeries, s.oldBlockIndexSize, s.oldBlockChunksSize, s.oldBlockTotalSize, | 
					
						
							|  |  |  |  | 			s.sparseBlockTotalSeries, s.sparseBlockIndexSize, s.sparseBlockChunksSize, s.sparseBlockTotalSize, | 
					
						
							|  |  |  |  | 			100*(1-float64(s.sparseBlockIndexSize)/float64(s.oldBlockIndexSize)), | 
					
						
							|  |  |  |  | 			100*(1-float64(s.sparseBlockChunksSize)/float64(s.oldBlockChunksSize)), | 
					
						
							|  |  |  |  | 			100*(1-float64(s.sparseBlockTotalSize)/float64(s.oldBlockTotalSize)), | 
					
						
							|  |  |  |  | 		) | 
					
						
							|  |  |  |  | 	} | 
					
						
							|  |  |  |  | } | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-11-13 02:07:41 +08:00
										 |  |  |  | func generateCustomHistograms(numHists, numBuckets, numSpans, gapBetweenSpans, schema int) (r []*histogram.Histogram) { | 
					
						
							| 
									
										
										
										
											2021-07-08 13:31:53 +08:00
										 |  |  |  | 	// First histogram with all the settings.
 | 
					
						
							| 
									
										
										
										
											2021-11-13 02:07:41 +08:00
										 |  |  |  | 	h := &histogram.Histogram{ | 
					
						
							| 
									
										
										
										
											2021-07-08 13:31:53 +08:00
										 |  |  |  | 		Sum:    1000 * rand.Float64(), | 
					
						
							|  |  |  |  | 		Schema: int32(schema), | 
					
						
							|  |  |  |  | 	} | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 	// Generate spans.
 | 
					
						
							|  |  |  |  | 	h.PositiveSpans = []histogram.Span{ | 
					
						
							|  |  |  |  | 		{Offset: int32(rand.Intn(10)), Length: uint32(numBuckets)}, | 
					
						
							|  |  |  |  | 	} | 
					
						
							|  |  |  |  | 	if numSpans > 1 { | 
					
						
							|  |  |  |  | 		spanWidth := numBuckets / numSpans | 
					
						
							|  |  |  |  | 		// First span gets those additional buckets.
 | 
					
						
							|  |  |  |  | 		h.PositiveSpans[0].Length = uint32(spanWidth + (numBuckets - spanWidth*numSpans)) | 
					
						
							|  |  |  |  | 		for i := 0; i < numSpans-1; i++ { | 
					
						
							|  |  |  |  | 			h.PositiveSpans = append(h.PositiveSpans, histogram.Span{Offset: int32(rand.Intn(gapBetweenSpans) + 1), Length: uint32(spanWidth)}) | 
					
						
							|  |  |  |  | 		} | 
					
						
							|  |  |  |  | 	} | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 	// Generate buckets.
 | 
					
						
							|  |  |  |  | 	v := int64(rand.Intn(30) + 1) | 
					
						
							|  |  |  |  | 	h.PositiveBuckets = []int64{v} | 
					
						
							|  |  |  |  | 	count := v | 
					
						
							|  |  |  |  | 	firstHistValues := []int64{v} | 
					
						
							|  |  |  |  | 	for i := 0; i < numBuckets-1; i++ { | 
					
						
							|  |  |  |  | 		delta := int64(rand.Intn(20)) | 
					
						
							|  |  |  |  | 		if rand.Int()%2 == 0 && firstHistValues[len(firstHistValues)-1] > delta { | 
					
						
							|  |  |  |  | 			// Randomly making delta negative such that curr value will be >0.
 | 
					
						
							|  |  |  |  | 			delta = -delta | 
					
						
							|  |  |  |  | 		} | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 		currVal := firstHistValues[len(firstHistValues)-1] + delta | 
					
						
							|  |  |  |  | 		count += currVal | 
					
						
							|  |  |  |  | 		firstHistValues = append(firstHistValues, currVal) | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 		h.PositiveBuckets = append(h.PositiveBuckets, delta) | 
					
						
							|  |  |  |  | 	} | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 	h.Count = uint64(count) | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 	r = append(r, h) | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 	// Remaining histograms with same spans but changed bucket values.
 | 
					
						
							|  |  |  |  | 	for j := 0; j < numHists-1; j++ { | 
					
						
							|  |  |  |  | 		newH := h.Copy() | 
					
						
							|  |  |  |  | 		newH.Sum = float64(j+1) * 1000 * rand.Float64() | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 		// Generate buckets.
 | 
					
						
							|  |  |  |  | 		count := int64(0) | 
					
						
							|  |  |  |  | 		currVal := int64(0) | 
					
						
							|  |  |  |  | 		for i := range newH.PositiveBuckets { | 
					
						
							|  |  |  |  | 			delta := int64(rand.Intn(10)) | 
					
						
							|  |  |  |  | 			if i == 0 { | 
					
						
							|  |  |  |  | 				newH.PositiveBuckets[i] += delta | 
					
						
							|  |  |  |  | 				currVal = newH.PositiveBuckets[i] | 
					
						
							|  |  |  |  | 				continue | 
					
						
							|  |  |  |  | 			} | 
					
						
							|  |  |  |  | 			currVal += newH.PositiveBuckets[i] | 
					
						
							|  |  |  |  | 			if rand.Int()%2 == 0 && (currVal-delta) > firstHistValues[i] { | 
					
						
							|  |  |  |  | 				// Randomly making delta negative such that curr value will be >0
 | 
					
						
							|  |  |  |  | 				// and above the previous count since we are not doing resets here.
 | 
					
						
							|  |  |  |  | 				delta = -delta | 
					
						
							|  |  |  |  | 			} | 
					
						
							|  |  |  |  | 			newH.PositiveBuckets[i] += delta | 
					
						
							|  |  |  |  | 			currVal += delta | 
					
						
							|  |  |  |  | 			count += currVal | 
					
						
							|  |  |  |  | 		} | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 		newH.Count = uint64(count) | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 		r = append(r, newH) | 
					
						
							|  |  |  |  | 		h = newH | 
					
						
							|  |  |  |  | 	} | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 	return r | 
					
						
							|  |  |  |  | } | 
					
						
							| 
									
										
										
										
											2021-08-16 19:02:23 +08:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-01-05 17:40:00 +08:00
										 |  |  |  | func TestCompactBlockMetas(t *testing.T) { | 
					
						
							|  |  |  |  | 	parent1 := ulid.MustNew(100, nil) | 
					
						
							|  |  |  |  | 	parent2 := ulid.MustNew(200, nil) | 
					
						
							|  |  |  |  | 	parent3 := ulid.MustNew(300, nil) | 
					
						
							|  |  |  |  | 	parent4 := ulid.MustNew(400, nil) | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 	input := []*BlockMeta{ | 
					
						
							|  |  |  |  | 		{ULID: parent1, MinTime: 1000, MaxTime: 2000, Compaction: BlockMetaCompaction{Level: 2, Sources: []ulid.ULID{ulid.MustNew(1, nil), ulid.MustNew(10, nil)}}}, | 
					
						
							|  |  |  |  | 		{ULID: parent2, MinTime: 200, MaxTime: 500, Compaction: BlockMetaCompaction{Level: 1}}, | 
					
						
							|  |  |  |  | 		{ULID: parent3, MinTime: 500, MaxTime: 2500, Compaction: BlockMetaCompaction{Level: 3, Sources: []ulid.ULID{ulid.MustNew(5, nil), ulid.MustNew(6, nil)}}}, | 
					
						
							|  |  |  |  | 		{ULID: parent4, MinTime: 100, MaxTime: 900, Compaction: BlockMetaCompaction{Level: 1}}, | 
					
						
							|  |  |  |  | 	} | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 	outUlid := ulid.MustNew(1000, nil) | 
					
						
							|  |  |  |  | 	output := CompactBlockMetas(outUlid, input...) | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 	expected := &BlockMeta{ | 
					
						
							|  |  |  |  | 		ULID:    outUlid, | 
					
						
							|  |  |  |  | 		MinTime: 100, | 
					
						
							|  |  |  |  | 		MaxTime: 2500, | 
					
						
							|  |  |  |  | 		Stats:   BlockStats{}, | 
					
						
							|  |  |  |  | 		Compaction: BlockMetaCompaction{ | 
					
						
							|  |  |  |  | 			Level:   4, | 
					
						
							|  |  |  |  | 			Sources: []ulid.ULID{ulid.MustNew(1, nil), ulid.MustNew(5, nil), ulid.MustNew(6, nil), ulid.MustNew(10, nil)}, | 
					
						
							|  |  |  |  | 			Parents: []BlockDesc{ | 
					
						
							|  |  |  |  | 				{ULID: parent1, MinTime: 1000, MaxTime: 2000}, | 
					
						
							|  |  |  |  | 				{ULID: parent2, MinTime: 200, MaxTime: 500}, | 
					
						
							|  |  |  |  | 				{ULID: parent3, MinTime: 500, MaxTime: 2500}, | 
					
						
							|  |  |  |  | 				{ULID: parent4, MinTime: 100, MaxTime: 900}, | 
					
						
							|  |  |  |  | 			}, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							|  |  |  |  | 	} | 
					
						
							|  |  |  |  | 	require.Equal(t, expected, output) | 
					
						
							|  |  |  |  | } | 
					
						
							| 
									
										
										
										
											2024-06-13 05:31:25 +08:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  | func TestCompactEmptyResultBlockWithTombstone(t *testing.T) { | 
					
						
							|  |  |  |  | 	ctx := context.Background() | 
					
						
							|  |  |  |  | 	tmpdir := t.TempDir() | 
					
						
							|  |  |  |  | 	blockDir := createBlock(t, tmpdir, genSeries(1, 1, 0, 10)) | 
					
						
							| 
									
										
										
										
											2024-11-11 14:59:24 +08:00
										 |  |  |  | 	block, err := OpenBlock(nil, blockDir, nil, nil) | 
					
						
							| 
									
										
										
										
											2024-06-13 05:31:25 +08:00
										 |  |  |  | 	require.NoError(t, err) | 
					
						
							|  |  |  |  | 	// Write tombstone covering the whole block.
 | 
					
						
							|  |  |  |  | 	err = block.Delete(ctx, 0, 10, labels.MustNewMatcher(labels.MatchEqual, defaultLabelName, "0")) | 
					
						
							|  |  |  |  | 	require.NoError(t, err) | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2024-09-10 09:41:53 +08:00
										 |  |  |  | 	c, err := NewLeveledCompactor(ctx, nil, promslog.NewNopLogger(), []int64{0}, nil, nil) | 
					
						
							| 
									
										
										
										
											2024-06-13 05:31:25 +08:00
										 |  |  |  | 	require.NoError(t, err) | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 	ulids, err := c.Compact(tmpdir, []string{blockDir}, []*Block{block}) | 
					
						
							|  |  |  |  | 	require.NoError(t, err) | 
					
						
							|  |  |  |  | 	require.Nil(t, ulids) | 
					
						
							|  |  |  |  | 	require.NoError(t, block.Close()) | 
					
						
							|  |  |  |  | } | 
					
						
							| 
									
										
										
										
											2024-04-08 20:59:30 +08:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  | func TestDelayedCompaction(t *testing.T) { | 
					
						
							|  |  |  |  | 	// The delay is chosen in such a way as to not slow down the tests, but also to make
 | 
					
						
							|  |  |  |  | 	// the effective compaction duration negligible compared to it, so that the duration comparisons make sense.
 | 
					
						
							|  |  |  |  | 	delay := 1000 * time.Millisecond | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 	waitUntilCompactedAndCheck := func(db *DB) { | 
					
						
							|  |  |  |  | 		t.Helper() | 
					
						
							|  |  |  |  | 		start := time.Now() | 
					
						
							|  |  |  |  | 		for db.head.compactable() { | 
					
						
							|  |  |  |  | 			// This simulates what happens at the end of commits, for less busy DB, a compaction
 | 
					
						
							|  |  |  |  | 			// is triggered every minute. This is to speed up the test.
 | 
					
						
							|  |  |  |  | 			select { | 
					
						
							|  |  |  |  | 			case db.compactc <- struct{}{}: | 
					
						
							|  |  |  |  | 			default: | 
					
						
							|  |  |  |  | 			} | 
					
						
							|  |  |  |  | 			time.Sleep(time.Millisecond) | 
					
						
							|  |  |  |  | 		} | 
					
						
							|  |  |  |  | 		duration := time.Since(start) | 
					
						
							|  |  |  |  | 		// Only waited for one offset: offset<=delay<<<2*offset
 | 
					
						
							|  |  |  |  | 		require.Greater(t, duration, db.opts.CompactionDelay) | 
					
						
							|  |  |  |  | 		require.Less(t, duration, 2*db.opts.CompactionDelay) | 
					
						
							|  |  |  |  | 	} | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 	compactAndCheck := func(db *DB) { | 
					
						
							|  |  |  |  | 		t.Helper() | 
					
						
							|  |  |  |  | 		start := time.Now() | 
					
						
							|  |  |  |  | 		db.Compact(context.Background()) | 
					
						
							|  |  |  |  | 		for db.head.compactable() { | 
					
						
							|  |  |  |  | 			time.Sleep(time.Millisecond) | 
					
						
							|  |  |  |  | 		} | 
					
						
							|  |  |  |  | 		if runtime.GOOS == "windows" { | 
					
						
							|  |  |  |  | 			// TODO: enable on windows once ms resolution timers are better supported.
 | 
					
						
							|  |  |  |  | 			return | 
					
						
							|  |  |  |  | 		} | 
					
						
							|  |  |  |  | 		duration := time.Since(start) | 
					
						
							|  |  |  |  | 		require.Less(t, duration, delay) | 
					
						
							|  |  |  |  | 	} | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 	cases := []struct { | 
					
						
							|  |  |  |  | 		name string | 
					
						
							|  |  |  |  | 		// The delays are chosen in such a way as to not slow down the tests, but also in a way to make the
 | 
					
						
							|  |  |  |  | 		// effective compaction duration negligible compared to them, so that the duration comparisons make sense.
 | 
					
						
							|  |  |  |  | 		compactionDelay time.Duration | 
					
						
							|  |  |  |  | 	}{ | 
					
						
							|  |  |  |  | 		{ | 
					
						
							|  |  |  |  | 			"delayed compaction not enabled", | 
					
						
							|  |  |  |  | 			0, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							|  |  |  |  | 		{ | 
					
						
							|  |  |  |  | 			"delayed compaction enabled", | 
					
						
							|  |  |  |  | 			delay, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							|  |  |  |  | 	} | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 	for _, c := range cases { | 
					
						
							|  |  |  |  | 		c := c | 
					
						
							|  |  |  |  | 		t.Run(c.name, func(t *testing.T) { | 
					
						
							|  |  |  |  | 			t.Parallel() | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 			var options *Options | 
					
						
							|  |  |  |  | 			if c.compactionDelay > 0 { | 
					
						
							|  |  |  |  | 				options = &Options{CompactionDelay: c.compactionDelay} | 
					
						
							|  |  |  |  | 			} | 
					
						
							|  |  |  |  | 			db := openTestDB(t, options, []int64{10}) | 
					
						
							|  |  |  |  | 			defer func() { | 
					
						
							|  |  |  |  | 				require.NoError(t, db.Close()) | 
					
						
							|  |  |  |  | 			}() | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 			label := labels.FromStrings("foo", "bar") | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 			// The first compaction is expected to result in 1 block.
 | 
					
						
							|  |  |  |  | 			db.DisableCompactions() | 
					
						
							|  |  |  |  | 			app := db.Appender(context.Background()) | 
					
						
							|  |  |  |  | 			_, err := app.Append(0, label, 0, 0) | 
					
						
							|  |  |  |  | 			require.NoError(t, err) | 
					
						
							|  |  |  |  | 			_, err = app.Append(0, label, 11, 0) | 
					
						
							|  |  |  |  | 			require.NoError(t, err) | 
					
						
							|  |  |  |  | 			_, err = app.Append(0, label, 21, 0) | 
					
						
							|  |  |  |  | 			require.NoError(t, err) | 
					
						
							|  |  |  |  | 			require.NoError(t, app.Commit()) | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 			if c.compactionDelay == 0 { | 
					
						
							|  |  |  |  | 				// When delay is not enabled, compaction should run on the first trigger.
 | 
					
						
							|  |  |  |  | 				compactAndCheck(db) | 
					
						
							|  |  |  |  | 			} else { | 
					
						
							|  |  |  |  | 				db.EnableCompactions() | 
					
						
							|  |  |  |  | 				waitUntilCompactedAndCheck(db) | 
					
						
							|  |  |  |  | 				// The db.compactc signals have been processed multiple times since a compaction is triggered every 1ms by waitUntilCompacted.
 | 
					
						
							|  |  |  |  | 				// This implies that the compaction delay doesn't block or wait on the initial trigger.
 | 
					
						
							|  |  |  |  | 				// 3 is an arbitrary value because it's difficult to determine the precise value.
 | 
					
						
							|  |  |  |  | 				require.GreaterOrEqual(t, prom_testutil.ToFloat64(db.metrics.compactionsTriggered)-prom_testutil.ToFloat64(db.metrics.compactionsSkipped), 3.0) | 
					
						
							| 
									
										
										
										
											2024-08-28 11:26:57 +08:00
										 |  |  |  | 				// The delay doesn't change the head blocks alignment.
 | 
					
						
							| 
									
										
										
										
											2024-04-08 20:59:30 +08:00
										 |  |  |  | 				require.Eventually(t, func() bool { | 
					
						
							|  |  |  |  | 					return db.head.MinTime() == db.compactor.(*LeveledCompactor).ranges[0]+1 | 
					
						
							|  |  |  |  | 				}, 500*time.Millisecond, 10*time.Millisecond) | 
					
						
							|  |  |  |  | 				// One compaction was run and one block was produced.
 | 
					
						
							|  |  |  |  | 				require.Equal(t, 1.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran)) | 
					
						
							|  |  |  |  | 			} | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 			// The second compaction is expected to result in 2 blocks.
 | 
					
						
							|  |  |  |  | 			// This ensures that the logic for compaction delay doesn't only work for the first compaction, but also takes into account the future compactions.
 | 
					
						
							|  |  |  |  | 			// This also ensures that no delay happens between consecutive compactions.
 | 
					
						
							|  |  |  |  | 			db.DisableCompactions() | 
					
						
							|  |  |  |  | 			app = db.Appender(context.Background()) | 
					
						
							|  |  |  |  | 			_, err = app.Append(0, label, 31, 0) | 
					
						
							|  |  |  |  | 			require.NoError(t, err) | 
					
						
							|  |  |  |  | 			_, err = app.Append(0, label, 41, 0) | 
					
						
							|  |  |  |  | 			require.NoError(t, err) | 
					
						
							|  |  |  |  | 			require.NoError(t, app.Commit()) | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 			if c.compactionDelay == 0 { | 
					
						
							|  |  |  |  | 				// Compaction should still run on the first trigger.
 | 
					
						
							|  |  |  |  | 				compactAndCheck(db) | 
					
						
							|  |  |  |  | 			} else { | 
					
						
							|  |  |  |  | 				db.EnableCompactions() | 
					
						
							|  |  |  |  | 				waitUntilCompactedAndCheck(db) | 
					
						
							|  |  |  |  | 			} | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 			// Two other compactions were run.
 | 
					
						
							|  |  |  |  | 			require.Eventually(t, func() bool { | 
					
						
							|  |  |  |  | 				return prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran) == 3.0 | 
					
						
							|  |  |  |  | 			}, 500*time.Millisecond, 10*time.Millisecond) | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 			if c.compactionDelay == 0 { | 
					
						
							|  |  |  |  | 				return | 
					
						
							|  |  |  |  | 			} | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 			// This test covers a special case. If auto compaction is in a delay period and a manual compaction is triggered,
 | 
					
						
							|  |  |  |  | 			// auto compaction should stop waiting for the delay if the head is no longer compactable.
 | 
					
						
							|  |  |  |  | 			// Of course, if the head is still compactable after the manual compaction, auto compaction will continue waiting for the same delay.
 | 
					
						
							|  |  |  |  | 			getTimeWhenCompactionDelayStarted := func() time.Time { | 
					
						
							|  |  |  |  | 				t.Helper() | 
					
						
							|  |  |  |  | 				db.cmtx.Lock() | 
					
						
							|  |  |  |  | 				defer db.cmtx.Unlock() | 
					
						
							|  |  |  |  | 				return db.timeWhenCompactionDelayStarted | 
					
						
							|  |  |  |  | 			} | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 			db.DisableCompactions() | 
					
						
							|  |  |  |  | 			app = db.Appender(context.Background()) | 
					
						
							|  |  |  |  | 			_, err = app.Append(0, label, 51, 0) | 
					
						
							|  |  |  |  | 			require.NoError(t, err) | 
					
						
							|  |  |  |  | 			require.NoError(t, app.Commit()) | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 			require.True(t, db.head.compactable()) | 
					
						
							|  |  |  |  | 			db.EnableCompactions() | 
					
						
							|  |  |  |  | 			// Trigger an auto compaction.
 | 
					
						
							|  |  |  |  | 			db.compactc <- struct{}{} | 
					
						
							|  |  |  |  | 			// That made auto compaction start waiting for the delay.
 | 
					
						
							|  |  |  |  | 			require.Eventually(t, func() bool { | 
					
						
							|  |  |  |  | 				return !getTimeWhenCompactionDelayStarted().IsZero() | 
					
						
							|  |  |  |  | 			}, 100*time.Millisecond, 10*time.Millisecond) | 
					
						
							|  |  |  |  | 			// Trigger a manual compaction.
 | 
					
						
							|  |  |  |  | 			require.NoError(t, db.CompactHead(NewRangeHead(db.Head(), 0, 50.0))) | 
					
						
							|  |  |  |  | 			require.Equal(t, 4.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran)) | 
					
						
							|  |  |  |  | 			// Re-trigger an auto compaction.
 | 
					
						
							|  |  |  |  | 			db.compactc <- struct{}{} | 
					
						
							|  |  |  |  | 			// That made auto compaction stop waiting for the delay.
 | 
					
						
							|  |  |  |  | 			require.Eventually(t, func() bool { | 
					
						
							|  |  |  |  | 				return getTimeWhenCompactionDelayStarted().IsZero() | 
					
						
							|  |  |  |  | 			}, 100*time.Millisecond, 10*time.Millisecond) | 
					
						
							|  |  |  |  | 		}) | 
					
						
							|  |  |  |  | 	} | 
					
						
							|  |  |  |  | } | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | // TestDelayedCompactionDoesNotBlockUnrelatedOps makes sure that when delayed compaction is enabled,
 | 
					
						
							|  |  |  |  | // operations that don't directly derive from the Head compaction are not delayed, here we consider disk blocks compaction.
 | 
					
						
							|  |  |  |  | func TestDelayedCompactionDoesNotBlockUnrelatedOps(t *testing.T) { | 
					
						
							|  |  |  |  | 	cases := []struct { | 
					
						
							|  |  |  |  | 		name            string | 
					
						
							|  |  |  |  | 		whenCompactable bool | 
					
						
							|  |  |  |  | 	}{ | 
					
						
							|  |  |  |  | 		{ | 
					
						
							|  |  |  |  | 			"Head is compactable", | 
					
						
							|  |  |  |  | 			true, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							|  |  |  |  | 		{ | 
					
						
							|  |  |  |  | 			"Head is not compactable", | 
					
						
							|  |  |  |  | 			false, | 
					
						
							|  |  |  |  | 		}, | 
					
						
							|  |  |  |  | 	} | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 	for _, c := range cases { | 
					
						
							|  |  |  |  | 		c := c | 
					
						
							|  |  |  |  | 		t.Run(c.name, func(t *testing.T) { | 
					
						
							|  |  |  |  | 			t.Parallel() | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 			tmpdir := t.TempDir() | 
					
						
							| 
									
										
										
										
											2024-09-11 04:32:03 +08:00
										 |  |  |  | 			// Some blocks that need compaction are present.
 | 
					
						
							| 
									
										
										
										
											2024-04-08 20:59:30 +08:00
										 |  |  |  | 			createBlock(t, tmpdir, genSeries(1, 1, 0, 100)) | 
					
						
							|  |  |  |  | 			createBlock(t, tmpdir, genSeries(1, 1, 100, 200)) | 
					
						
							|  |  |  |  | 			createBlock(t, tmpdir, genSeries(1, 1, 200, 300)) | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 			options := DefaultOptions() | 
					
						
							|  |  |  |  | 			// This will make the test timeout if compaction really waits for it.
 | 
					
						
							|  |  |  |  | 			options.CompactionDelay = time.Hour | 
					
						
							| 
									
										
										
										
											2024-09-10 09:41:53 +08:00
										 |  |  |  | 			db, err := open(tmpdir, promslog.NewNopLogger(), nil, options, []int64{10, 200}, nil) | 
					
						
							| 
									
										
										
										
											2024-04-08 20:59:30 +08:00
										 |  |  |  | 			require.NoError(t, err) | 
					
						
							|  |  |  |  | 			defer func() { | 
					
						
							|  |  |  |  | 				require.NoError(t, db.Close()) | 
					
						
							|  |  |  |  | 			}() | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 			db.DisableCompactions() | 
					
						
							|  |  |  |  | 			require.Len(t, db.Blocks(), 3) | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 			if c.whenCompactable { | 
					
						
							|  |  |  |  | 				label := labels.FromStrings("foo", "bar") | 
					
						
							|  |  |  |  | 				app := db.Appender(context.Background()) | 
					
						
							|  |  |  |  | 				_, err := app.Append(0, label, 301, 0) | 
					
						
							|  |  |  |  | 				require.NoError(t, err) | 
					
						
							|  |  |  |  | 				_, err = app.Append(0, label, 317, 0) | 
					
						
							|  |  |  |  | 				require.NoError(t, err) | 
					
						
							|  |  |  |  | 				require.NoError(t, app.Commit()) | 
					
						
							|  |  |  |  | 				// The Head is compactable and will still be at the end.
 | 
					
						
							|  |  |  |  | 				require.True(t, db.head.compactable()) | 
					
						
							|  |  |  |  | 				defer func() { | 
					
						
							|  |  |  |  | 					require.True(t, db.head.compactable()) | 
					
						
							|  |  |  |  | 				}() | 
					
						
							|  |  |  |  | 			} | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | 			// The blocks were compacted.
 | 
					
						
							|  |  |  |  | 			db.Compact(context.Background()) | 
					
						
							|  |  |  |  | 			require.Len(t, db.Blocks(), 2) | 
					
						
							|  |  |  |  | 		}) | 
					
						
							|  |  |  |  | 	} | 
					
						
							|  |  |  |  | } |