| 
									
										
										
										
											2020-10-13 00:04:20 +08:00
										 |  |  | // Copyright 2020 The Prometheus Authors
 | 
					
						
							|  |  |  | // Licensed under the Apache License, Version 2.0 (the "License");
 | 
					
						
							|  |  |  | // you may not use this file except in compliance with the License.
 | 
					
						
							|  |  |  | // You may obtain a copy of the License at
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // http://www.apache.org/licenses/LICENSE-2.0
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // Unless required by applicable law or agreed to in writing, software
 | 
					
						
							|  |  |  | // distributed under the License is distributed on an "AS IS" BASIS,
 | 
					
						
							|  |  |  | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
					
						
							|  |  |  | // See the License for the specific language governing permissions and
 | 
					
						
							|  |  |  | // limitations under the License.
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | package tsdb | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | import ( | 
					
						
							|  |  |  | 	"context" | 
					
						
							| 
									
										
										
										
											2023-11-17 02:54:41 +08:00
										 |  |  | 	"errors" | 
					
						
							|  |  |  | 	"fmt" | 
					
						
							| 
									
										
										
										
											2024-09-10 09:41:53 +08:00
										 |  |  | 	"log/slog" | 
					
						
							| 
									
										
										
										
											2020-10-13 00:04:20 +08:00
										 |  |  | 	"math" | 
					
						
							|  |  |  | 	"os" | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2025-03-05 23:03:25 +08:00
										 |  |  | 	"github.com/oklog/ulid/v2" | 
					
						
							| 
									
										
										
										
											2020-10-13 00:04:20 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-11-08 22:23:17 +08:00
										 |  |  | 	"github.com/prometheus/prometheus/model/timestamp" | 
					
						
							| 
									
										
										
										
											2020-10-13 00:04:20 +08:00
										 |  |  | 	"github.com/prometheus/prometheus/storage" | 
					
						
							|  |  |  | 	"github.com/prometheus/prometheus/tsdb/chunkenc" | 
					
						
							|  |  |  | ) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // BlockWriter is a block writer that allows appending and flushing series to disk.
 | 
					
						
							|  |  |  | type BlockWriter struct { | 
					
						
							| 
									
										
										
										
											2024-09-10 09:41:53 +08:00
										 |  |  | 	logger         *slog.Logger | 
					
						
							| 
									
										
										
										
											2020-10-13 00:04:20 +08:00
										 |  |  | 	destinationDir string | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	head      *Head | 
					
						
							|  |  |  | 	blockSize int64 // in ms
 | 
					
						
							|  |  |  | 	chunkDir  string | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-11-26 13:07:06 +08:00
										 |  |  | // ErrNoSeriesAppended is returned if the series count is zero while flushing blocks.
 | 
					
						
							| 
									
										
										
										
											2022-09-21 01:05:50 +08:00
										 |  |  | var ErrNoSeriesAppended = errors.New("no series appended, aborting") | 
					
						
							| 
									
										
										
										
											2020-11-26 13:07:06 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2024-05-08 23:57:09 +08:00
										 |  |  | // NewBlockWriter creates a new block writer.
 | 
					
						
							| 
									
										
										
										
											2020-10-13 00:04:20 +08:00
										 |  |  | //
 | 
					
						
							|  |  |  | // The returned writer accumulates all the series in the Head block until `Flush` is called.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // Note that the writer will not check if the target directory exists or
 | 
					
						
							|  |  |  | // contains anything at all. It is the caller's responsibility to
 | 
					
						
							|  |  |  | // ensure that the resulting blocks do not overlap etc.
 | 
					
						
							|  |  |  | // Writer ensures the block flush is atomic (via rename).
 | 
					
						
							| 
									
										
										
										
											2024-09-10 09:41:53 +08:00
										 |  |  | func NewBlockWriter(logger *slog.Logger, dir string, blockSize int64) (*BlockWriter, error) { | 
					
						
							| 
									
										
										
										
											2020-10-13 00:04:20 +08:00
										 |  |  | 	w := &BlockWriter{ | 
					
						
							|  |  |  | 		logger:         logger, | 
					
						
							|  |  |  | 		destinationDir: dir, | 
					
						
							|  |  |  | 		blockSize:      blockSize, | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if err := w.initHead(); err != nil { | 
					
						
							|  |  |  | 		return nil, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return w, nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // initHead creates and initialises a new TSDB head.
 | 
					
						
							|  |  |  | func (w *BlockWriter) initHead() error { | 
					
						
							| 
									
										
										
										
											2022-04-27 17:24:36 +08:00
										 |  |  | 	chunkDir, err := os.MkdirTemp(os.TempDir(), "head") | 
					
						
							| 
									
										
										
										
											2020-10-13 00:04:20 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2023-11-17 02:54:41 +08:00
										 |  |  | 		return fmt.Errorf("create temp dir: %w", err) | 
					
						
							| 
									
										
										
										
											2020-10-13 00:04:20 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	w.chunkDir = chunkDir | 
					
						
							| 
									
										
										
										
											2021-02-09 22:12:48 +08:00
										 |  |  | 	opts := DefaultHeadOptions() | 
					
						
							|  |  |  | 	opts.ChunkRange = w.blockSize | 
					
						
							|  |  |  | 	opts.ChunkDirRoot = w.chunkDir | 
					
						
							| 
									
										
										
										
											2022-09-14 20:08:34 +08:00
										 |  |  | 	opts.EnableNativeHistograms.Store(true) | 
					
						
							| 
									
										
										
										
											2022-09-21 01:05:50 +08:00
										 |  |  | 	h, err := NewHead(nil, w.logger, nil, nil, opts, NewHeadStats()) | 
					
						
							| 
									
										
										
										
											2020-10-13 00:04:20 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2023-11-17 02:54:41 +08:00
										 |  |  | 		return fmt.Errorf("tsdb.NewHead: %w", err) | 
					
						
							| 
									
										
										
										
											2020-10-13 00:04:20 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	w.head = h | 
					
						
							|  |  |  | 	return w.head.Init(math.MinInt64) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Appender returns a new appender on the database.
 | 
					
						
							|  |  |  | // Appender can't be called concurrently. However, the returned Appender can safely be used concurrently.
 | 
					
						
							|  |  |  | func (w *BlockWriter) Appender(ctx context.Context) storage.Appender { | 
					
						
							|  |  |  | 	return w.head.Appender(ctx) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Flush implements the Writer interface. This is where actual block writing
 | 
					
						
							|  |  |  | // happens. After flush completes, no writes can be done.
 | 
					
						
							|  |  |  | func (w *BlockWriter) Flush(ctx context.Context) (ulid.ULID, error) { | 
					
						
							|  |  |  | 	mint := w.head.MinTime() | 
					
						
							|  |  |  | 	// Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime).
 | 
					
						
							|  |  |  | 	// Because of this block intervals are always +1 than the total samples it includes.
 | 
					
						
							|  |  |  | 	maxt := w.head.MaxTime() + 1 | 
					
						
							| 
									
										
										
										
											2024-09-10 09:41:53 +08:00
										 |  |  | 	w.logger.Info("flushing", "series_count", w.head.NumSeries(), "mint", timestamp.Time(mint), "maxt", timestamp.Time(maxt)) | 
					
						
							| 
									
										
										
										
											2020-10-13 00:04:20 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	compactor, err := NewLeveledCompactor(ctx, | 
					
						
							|  |  |  | 		nil, | 
					
						
							|  |  |  | 		w.logger, | 
					
						
							|  |  |  | 		[]int64{w.blockSize}, | 
					
						
							| 
									
										
										
										
											2021-05-19 00:38:37 +08:00
										 |  |  | 		chunkenc.NewPool(), nil) | 
					
						
							| 
									
										
										
										
											2020-10-13 00:04:20 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2023-11-17 02:54:41 +08:00
										 |  |  | 		return ulid.ULID{}, fmt.Errorf("create leveled compactor: %w", err) | 
					
						
							| 
									
										
										
										
											2020-10-13 00:04:20 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2024-06-13 05:31:25 +08:00
										 |  |  | 	ids, err := compactor.Write(w.destinationDir, w.head, mint, maxt, nil) | 
					
						
							| 
									
										
										
										
											2020-10-13 00:04:20 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2023-11-17 02:54:41 +08:00
										 |  |  | 		return ulid.ULID{}, fmt.Errorf("compactor write: %w", err) | 
					
						
							| 
									
										
										
										
											2020-10-13 00:04:20 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2024-06-13 05:31:25 +08:00
										 |  |  | 	// No block was produced. Caller is responsible to check empty
 | 
					
						
							|  |  |  | 	// ulid.ULID based on its use case.
 | 
					
						
							|  |  |  | 	if len(ids) == 0 { | 
					
						
							|  |  |  | 		return ulid.ULID{}, nil | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return ids[0], nil | 
					
						
							| 
									
										
										
										
											2020-10-13 00:04:20 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func (w *BlockWriter) Close() error { | 
					
						
							|  |  |  | 	defer func() { | 
					
						
							|  |  |  | 		if err := os.RemoveAll(w.chunkDir); err != nil { | 
					
						
							| 
									
										
										
										
											2024-09-10 09:41:53 +08:00
										 |  |  | 			w.logger.Error("error in deleting BlockWriter files", "err", err) | 
					
						
							| 
									
										
										
										
											2020-10-13 00:04:20 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	}() | 
					
						
							|  |  |  | 	return w.head.Close() | 
					
						
							|  |  |  | } |