remove `put` function and use RLock in `Iter` function
Signed-off-by: codwu <wuhan9087@163.com>
This commit is contained in:
		
							parent
							
								
									84a45cb79a
								
							
						
					
					
						commit
						cd145c90d5
					
				
							
								
								
									
										2
									
								
								block.go
								
								
								
								
							
							
						
						
									
										2
									
								
								block.go
								
								
								
								
							|  | @ -440,7 +440,7 @@ Outer: | |||
| 			if intervalOverlap(mint, maxt, chk.MinTime, chk.MaxTime) { | ||||
| 				// Delete only until the current values and not beyond.
 | ||||
| 				tmin, tmax := clampInterval(mint, maxt, chks[0].MinTime, chks[len(chks)-1].MaxTime) | ||||
| 				stones.put(p.At(), Intervals{{tmin, tmax}}) | ||||
| 				stones.addInterval(p.At(), Interval{tmin, tmax}) | ||||
| 				continue Outer | ||||
| 			} | ||||
| 		} | ||||
|  |  | |||
|  | @ -808,7 +808,7 @@ func TestTombstoneCleanFail(t *testing.T) { | |||
| 
 | ||||
| 		// Add some some fake tombstones to trigger the compaction.
 | ||||
| 		tomb := NewMemTombstones() | ||||
| 		tomb.put(0,Intervals{{0, 1}}) | ||||
| 		tomb.addInterval(0, Interval{0, 1}) | ||||
| 		block.tombstones = tomb | ||||
| 
 | ||||
| 		db.blocks = append(db.blocks, block) | ||||
|  |  | |||
|  | @ -557,7 +557,7 @@ func TestBlockQuerierDelete(t *testing.T) { | |||
| 				}, | ||||
| 			}, | ||||
| 		}, | ||||
| 		tombstones: NewMemTombstones().put(1, Intervals{{1, 3}}).put(2, Intervals{{1, 3}, {6, 10}}).put(3, Intervals{{6, 10}}), | ||||
| 		tombstones: NewMemTombstones().addInterval(1, Interval{1, 3}).addInterval(2, Interval{1, 3}, Interval{6, 10}).addInterval(3, Interval{6, 10}), | ||||
| 		queries: []query{ | ||||
| 			{ | ||||
| 				mint: 2, | ||||
|  |  | |||
|  | @ -16,14 +16,12 @@ package tsdb | |||
| import ( | ||||
| 	"encoding/binary" | ||||
| 	"fmt" | ||||
| 	"github.com/pkg/errors" | ||||
| 	"io" | ||||
| 	"io/ioutil" | ||||
| 	"os" | ||||
| 	"path/filepath" | ||||
| 
 | ||||
| 	"sync" | ||||
| 
 | ||||
| 	"github.com/pkg/errors" | ||||
| ) | ||||
| 
 | ||||
| const tombstoneFilename = "tombstones" | ||||
|  | @ -174,8 +172,8 @@ func (t *memTombstones) Get(ref uint64) (Intervals, error) { | |||
| } | ||||
| 
 | ||||
| func (t *memTombstones) Iter(f func(uint64, Intervals) error) error { | ||||
| 	t.mtx.Lock() | ||||
| 	defer t.mtx.Unlock() | ||||
| 	t.mtx.RLock() | ||||
| 	defer t.mtx.RUnlock() | ||||
| 	for ref, ivs := range t.mts { | ||||
| 		if err := f(ref, ivs); err != nil { | ||||
| 			return err | ||||
|  | @ -185,16 +183,12 @@ func (t *memTombstones) Iter(f func(uint64, Intervals) error) error { | |||
| } | ||||
| 
 | ||||
| // addInterval to an existing memTombstones
 | ||||
| func (t *memTombstones) addInterval(ref uint64, itv Interval) { | ||||
| 	t.mtx.Lock() | ||||
| 	t.mts[ref] = t.mts[ref].add(itv) | ||||
| 	t.mtx.Unlock() | ||||
| } | ||||
| 
 | ||||
| func (t *memTombstones) put(ref uint64, itvs Intervals) *memTombstones { | ||||
| func (t *memTombstones) addInterval(ref uint64, itvs ...Interval) *memTombstones { | ||||
| 	t.mtx.Lock() | ||||
| 	defer t.mtx.Unlock() | ||||
| 	t.mts[ref] = itvs | ||||
| 	for _, itv := range itvs { | ||||
| 		t.mts[ref] = t.mts[ref].add(itv) | ||||
| 	} | ||||
| 	return t | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -41,7 +41,7 @@ func TestWriteAndReadbackTombStones(t *testing.T) { | |||
| 			dranges = dranges.add(Interval{mint, mint + rand.Int63n(1000)}) | ||||
| 			mint += rand.Int63n(1000) + 1 | ||||
| 		} | ||||
| 		stones.put(ref, dranges) | ||||
| 		stones.addInterval(ref, dranges...) | ||||
| 	} | ||||
| 
 | ||||
| 	testutil.Ok(t, writeTombstoneFile(tmpdir, stones)) | ||||
|  | @ -132,7 +132,6 @@ func TestMemTombstonesConcurrency(t *testing.T) { | |||
| 
 | ||||
| 	go func() { | ||||
| 		for x := 0; x < totalRuns; x++ { | ||||
| 			tomb.put(uint64(x), Intervals{{int64(x), int64(x)}}) | ||||
| 			tomb.addInterval(uint64(x), Interval{int64(x), int64(x)}) | ||||
| 		} | ||||
| 		wg.Done() | ||||
|  |  | |||
		Loading…
	
		Reference in New Issue