Fix deprecated methods & types for new arrow-go version

This commit is contained in:
Nathan Verzemnieks 2025-10-07 12:52:35 +02:00
parent 900cbf7d49
commit 6bcacd7675
5 changed files with 13 additions and 13 deletions

View File

@ -114,7 +114,7 @@ func (w *parquetWriter) Close() error {
// writes the current buffer to parquet and re-inits the arrow buffer
func (w *parquetWriter) flush() error {
w.logger.Info("flush", "count", w.rv.Len())
rec := array.NewRecord(w.schema, []arrow.Array{
rec := array.NewRecordBatch(w.schema, []arrow.Array{
w.rv.NewArray(),
w.namespace.NewArray(),
w.group.NewArray(),

View File

@ -26,7 +26,7 @@ const rowLimit = 1_000_000
type recordReader interface {
Next() bool
Schema() *arrow.Schema
Record() arrow.Record
Record() arrow.RecordBatch
Err() error
}

View File

@ -77,8 +77,8 @@ func TestNewQueryDataResponse(t *testing.T) {
arr = append(arr, tarr)
}
record := array.NewRecord(schema, arr, -1)
records := []arrow.Record{record}
record := array.NewRecordBatch(schema, arr, -1)
records := []arrow.RecordBatch{record}
reader, err := array.NewRecordReader(schema, records)
assert.NoError(t, err)
@ -202,8 +202,8 @@ func TestNewQueryDataResponse_Error(t *testing.T) {
)
assert.NoError(t, err)
record := array.NewRecord(schema, []arrow.Array{i64s, f64s}, -1)
records := []arrow.Record{record}
record := array.NewRecordBatch(schema, []arrow.Array{i64s, f64s}, -1)
records := []arrow.RecordBatch{record}
reader, err := array.NewRecordReader(schema, records)
assert.NoError(t, err)
@ -247,8 +247,8 @@ func TestNewQueryDataResponse_WideTable(t *testing.T) {
)
assert.NoError(t, err)
record := array.NewRecord(schema, []arrow.Array{times, strs, i64s}, -1)
records := []arrow.Record{record}
record := array.NewRecordBatch(schema, []arrow.Array{times, strs, i64s}, -1)
records := []arrow.RecordBatch{record}
reader, err := array.NewRecordReader(schema, records)
assert.NoError(t, err)
@ -522,8 +522,8 @@ func TestCustomMetadata(t *testing.T) {
)
assert.NoError(t, err)
record := array.NewRecord(schema, []arrow.Array{i64s}, -1)
records := []arrow.Record{record}
record := array.NewRecordBatch(schema, []arrow.Array{i64s}, -1)
records := []arrow.RecordBatch{record}
reader, err := array.NewRecordReader(schema, records)
assert.NoError(t, err)

View File

@ -196,7 +196,7 @@ func arrowToNestedSetDataFrame(flamegraph *v1alpha1.FlamegraphArrow) (*data.Fram
defer arrowReader.Release()
arrowReader.Next()
rec := arrowReader.Record()
rec := arrowReader.RecordBatch()
fi, err := newFlamegraphIterator(rec)
if err != nil {
@ -236,7 +236,7 @@ type flamegraphIterator struct {
addressBuilder *bytes.Buffer
}
func newFlamegraphIterator(rec arrow.Record) (*flamegraphIterator, error) {
func newFlamegraphIterator(rec arrow.RecordBatch) (*flamegraphIterator, error) {
schema := rec.Schema()
columnChildren := rec.Column(schema.FieldIndices(FlamegraphFieldChildren)[0]).(*array.List)

View File

@ -278,7 +278,7 @@ func flamegraphResponse() *connect.Response[v1alpha1.QueryResponse] {
builderFlat.Append(columns.flat[i])
}
record := array.NewRecord(
record := array.NewRecordBatch(
arrow.NewSchema(fields, nil),
[]arrow.Array{
builderLocationAddress.NewArray(),