mirror of https://github.com/grafana/grafana.git
				
				
				
			Merge pull request #13951 from marefr/11977_6x_terms
Fix terms agg order deprecation warning on es 6+
This commit is contained in:
		
						commit
						dcb5015022
					
				|  | @ -156,7 +156,7 @@ Since not all datasources have the same configuration settings we only have the | |||
| | tlsSkipVerify | boolean | *All* | Controls whether a client verifies the server's certificate chain and host name. | | ||||
| | graphiteVersion | string | Graphite |  Graphite version  | | ||||
| | timeInterval | string | Prometheus, Elasticsearch, InfluxDB, MySQL, PostgreSQL & MSSQL | Lowest interval/step value that should be used for this data source | | ||||
| | esVersion | number | Elasticsearch | Elasticsearch version as a number (2/5/56) | | ||||
| | esVersion | number | Elasticsearch | Elasticsearch version as a number (2/5/56/60) | | ||||
| | timeField | string | Elasticsearch | Which field that should be used as timestamp | | ||||
| | interval | string | Elasticsearch | Index date time format. nil(No Pattern), 'Hourly', 'Daily', 'Weekly', 'Monthly' or 'Yearly' | | ||||
| | authType | string | Cloudwatch | Auth provider. keys/credentials/arn | | ||||
|  |  | |||
|  | @ -59,7 +59,7 @@ a time pattern for the index name or a wildcard. | |||
| ### Elasticsearch version | ||||
| 
 | ||||
| Be sure to specify your Elasticsearch version in the version selection dropdown. This is very important as there are differences how queries are composed. | ||||
| Currently the versions available is 2.x, 5.x and 5.6+ where 5.6+ means a version of 5.6 or higher, 6.3.2 for example. | ||||
| Currently the versions available is 2.x, 5.x, 5.6+ or 6.0+. 5.6+ means a version of 5.6 or less than 6.0. 6.0+ means a version of 6.0 or higher, 6.3.2 for example. | ||||
| 
 | ||||
| ### Min time interval | ||||
| A lower limit for the auto group by time interval. Recommended to be set to write frequency, for example `1m` if your data is written every minute. | ||||
|  |  | |||
|  | @ -112,7 +112,7 @@ func (b *SearchRequestBuilder) Query() *QueryBuilder { | |||
| 
 | ||||
| // Agg initiate and returns a new aggregation builder
 | ||||
| func (b *SearchRequestBuilder) Agg() AggBuilder { | ||||
| 	aggBuilder := newAggBuilder() | ||||
| 	aggBuilder := newAggBuilder(b.version) | ||||
| 	b.aggBuilders = append(b.aggBuilders, aggBuilder) | ||||
| 	return aggBuilder | ||||
| } | ||||
|  | @ -275,11 +275,13 @@ type AggBuilder interface { | |||
| type aggBuilderImpl struct { | ||||
| 	AggBuilder | ||||
| 	aggDefs []*aggDef | ||||
| 	version int | ||||
| } | ||||
| 
 | ||||
| func newAggBuilder() *aggBuilderImpl { | ||||
| func newAggBuilder(version int) *aggBuilderImpl { | ||||
| 	return &aggBuilderImpl{ | ||||
| 		aggDefs: make([]*aggDef, 0), | ||||
| 		version: version, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
|  | @ -317,7 +319,7 @@ func (b *aggBuilderImpl) Histogram(key, field string, fn func(a *HistogramAgg, b | |||
| 	}) | ||||
| 
 | ||||
| 	if fn != nil { | ||||
| 		builder := newAggBuilder() | ||||
| 		builder := newAggBuilder(b.version) | ||||
| 		aggDef.builders = append(aggDef.builders, builder) | ||||
| 		fn(innerAgg, builder) | ||||
| 	} | ||||
|  | @ -337,7 +339,7 @@ func (b *aggBuilderImpl) DateHistogram(key, field string, fn func(a *DateHistogr | |||
| 	}) | ||||
| 
 | ||||
| 	if fn != nil { | ||||
| 		builder := newAggBuilder() | ||||
| 		builder := newAggBuilder(b.version) | ||||
| 		aggDef.builders = append(aggDef.builders, builder) | ||||
| 		fn(innerAgg, builder) | ||||
| 	} | ||||
|  | @ -347,6 +349,8 @@ func (b *aggBuilderImpl) DateHistogram(key, field string, fn func(a *DateHistogr | |||
| 	return b | ||||
| } | ||||
| 
 | ||||
| const termsOrderTerm = "_term" | ||||
| 
 | ||||
| func (b *aggBuilderImpl) Terms(key, field string, fn func(a *TermsAggregation, b AggBuilder)) AggBuilder { | ||||
| 	innerAgg := &TermsAggregation{ | ||||
| 		Field: field, | ||||
|  | @ -358,11 +362,18 @@ func (b *aggBuilderImpl) Terms(key, field string, fn func(a *TermsAggregation, b | |||
| 	}) | ||||
| 
 | ||||
| 	if fn != nil { | ||||
| 		builder := newAggBuilder() | ||||
| 		builder := newAggBuilder(b.version) | ||||
| 		aggDef.builders = append(aggDef.builders, builder) | ||||
| 		fn(innerAgg, builder) | ||||
| 	} | ||||
| 
 | ||||
| 	if b.version >= 60 && len(innerAgg.Order) > 0 { | ||||
| 		if orderBy, exists := innerAgg.Order[termsOrderTerm]; exists { | ||||
| 			innerAgg.Order["_key"] = orderBy | ||||
| 			delete(innerAgg.Order, termsOrderTerm) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	b.aggDefs = append(b.aggDefs, aggDef) | ||||
| 
 | ||||
| 	return b | ||||
|  | @ -377,7 +388,7 @@ func (b *aggBuilderImpl) Filters(key string, fn func(a *FiltersAggregation, b Ag | |||
| 		Aggregation: innerAgg, | ||||
| 	}) | ||||
| 	if fn != nil { | ||||
| 		builder := newAggBuilder() | ||||
| 		builder := newAggBuilder(b.version) | ||||
| 		aggDef.builders = append(aggDef.builders, builder) | ||||
| 		fn(innerAgg, builder) | ||||
| 	} | ||||
|  | @ -398,7 +409,7 @@ func (b *aggBuilderImpl) GeoHashGrid(key, field string, fn func(a *GeoHashGridAg | |||
| 	}) | ||||
| 
 | ||||
| 	if fn != nil { | ||||
| 		builder := newAggBuilder() | ||||
| 		builder := newAggBuilder(b.version) | ||||
| 		aggDef.builders = append(aggDef.builders, builder) | ||||
| 		fn(innerAgg, builder) | ||||
| 	} | ||||
|  |  | |||
|  | @ -127,6 +127,60 @@ func TestExecuteTimeSeriesQuery(t *testing.T) { | |||
| 			So(avgAgg.Aggregation.Type, ShouldEqual, "avg") | ||||
| 		}) | ||||
| 
 | ||||
| 		Convey("With term agg and order by term", func() { | ||||
| 			c := newFakeClient(5) | ||||
| 			_, err := executeTsdbQuery(c, `{ | ||||
| 				"timeField": "@timestamp", | ||||
| 				"bucketAggs": [ | ||||
| 					{ | ||||
| 						"type": "terms", | ||||
| 						"field": "@host", | ||||
| 						"id": "2", | ||||
| 						"settings": { "size": "5", "order": "asc", "orderBy": "_term"	} | ||||
| 					}, | ||||
| 					{ "type": "date_histogram", "field": "@timestamp", "id": "3" } | ||||
| 				], | ||||
| 				"metrics": [ | ||||
| 					{"type": "count", "id": "1" }, | ||||
| 					{"type": "avg", "field": "@value", "id": "5" } | ||||
| 				] | ||||
| 			}`, from, to, 15*time.Second) | ||||
| 			So(err, ShouldBeNil) | ||||
| 			sr := c.multisearchRequests[0].Requests[0] | ||||
| 
 | ||||
| 			firstLevel := sr.Aggs[0] | ||||
| 			So(firstLevel.Key, ShouldEqual, "2") | ||||
| 			termsAgg := firstLevel.Aggregation.Aggregation.(*es.TermsAggregation) | ||||
| 			So(termsAgg.Order["_term"], ShouldEqual, "asc") | ||||
| 		}) | ||||
| 
 | ||||
| 		Convey("With term agg and order by term with es6.x", func() { | ||||
| 			c := newFakeClient(60) | ||||
| 			_, err := executeTsdbQuery(c, `{ | ||||
| 				"timeField": "@timestamp", | ||||
| 				"bucketAggs": [ | ||||
| 					{ | ||||
| 						"type": "terms", | ||||
| 						"field": "@host", | ||||
| 						"id": "2", | ||||
| 						"settings": { "size": "5", "order": "asc", "orderBy": "_term"	} | ||||
| 					}, | ||||
| 					{ "type": "date_histogram", "field": "@timestamp", "id": "3" } | ||||
| 				], | ||||
| 				"metrics": [ | ||||
| 					{"type": "count", "id": "1" }, | ||||
| 					{"type": "avg", "field": "@value", "id": "5" } | ||||
| 				] | ||||
| 			}`, from, to, 15*time.Second) | ||||
| 			So(err, ShouldBeNil) | ||||
| 			sr := c.multisearchRequests[0].Requests[0] | ||||
| 
 | ||||
| 			firstLevel := sr.Aggs[0] | ||||
| 			So(firstLevel.Key, ShouldEqual, "2") | ||||
| 			termsAgg := firstLevel.Aggregation.Aggregation.(*es.TermsAggregation) | ||||
| 			So(termsAgg.Order["_key"], ShouldEqual, "asc") | ||||
| 		}) | ||||
| 
 | ||||
| 		Convey("With metric percentiles", func() { | ||||
| 			c := newFakeClient(5) | ||||
| 			_, err := executeTsdbQuery(c, `{ | ||||
|  |  | |||
|  | @ -20,7 +20,12 @@ export class ElasticConfigCtrl { | |||
|     { name: 'Yearly', value: 'Yearly', example: '[logstash-]YYYY' }, | ||||
|   ]; | ||||
| 
 | ||||
|   esVersions = [{ name: '2.x', value: 2 }, { name: '5.x', value: 5 }, { name: '5.6+', value: 56 }]; | ||||
|   esVersions = [ | ||||
|     { name: '2.x', value: 2 }, | ||||
|     { name: '5.x', value: 5 }, | ||||
|     { name: '5.6+', value: 56 }, | ||||
|     { name: '6.0+', value: 60 }, | ||||
|   ]; | ||||
| 
 | ||||
|   indexPatternTypeChanged() { | ||||
|     const def = _.find(this.indexPatternTypes, { | ||||
|  |  | |||
|  | @ -31,7 +31,11 @@ export class ElasticQueryBuilder { | |||
|     queryNode.terms.size = parseInt(aggDef.settings.size, 10) === 0 ? 500 : parseInt(aggDef.settings.size, 10); | ||||
|     if (aggDef.settings.orderBy !== void 0) { | ||||
|       queryNode.terms.order = {}; | ||||
|       if (aggDef.settings.orderBy === '_term' && this.esVersion >= 60) { | ||||
|         queryNode.terms.order['_key'] = aggDef.settings.order; | ||||
|       } else { | ||||
|         queryNode.terms.order[aggDef.settings.orderBy] = aggDef.settings.order; | ||||
|       } | ||||
| 
 | ||||
|       // if metric ref, look it up and add it to this agg level
 | ||||
|       metricRef = parseInt(aggDef.settings.orderBy, 10); | ||||
|  | @ -318,6 +322,13 @@ export class ElasticQueryBuilder { | |||
|         }, | ||||
|       }, | ||||
|     }; | ||||
| 
 | ||||
|     if (this.esVersion >= 60) { | ||||
|       query.aggs['1'].terms.order = { | ||||
|         _key: 'asc', | ||||
|       }; | ||||
|     } | ||||
| 
 | ||||
|     return query; | ||||
|   } | ||||
| } | ||||
|  |  | |||
|  | @ -62,6 +62,54 @@ describe('ElasticQueryBuilder', () => { | |||
|     expect(aggs['1'].avg.field).toBe('@value'); | ||||
|   }); | ||||
| 
 | ||||
|   it('with term agg and order by term', () => { | ||||
|     const query = builder.build( | ||||
|       { | ||||
|         metrics: [{ type: 'count', id: '1' }, { type: 'avg', field: '@value', id: '5' }], | ||||
|         bucketAggs: [ | ||||
|           { | ||||
|             type: 'terms', | ||||
|             field: '@host', | ||||
|             settings: { size: 5, order: 'asc', orderBy: '_term' }, | ||||
|             id: '2', | ||||
|           }, | ||||
|           { type: 'date_histogram', field: '@timestamp', id: '3' }, | ||||
|         ], | ||||
|       }, | ||||
|       100, | ||||
|       1000 | ||||
|     ); | ||||
| 
 | ||||
|     const firstLevel = query.aggs['2']; | ||||
|     expect(firstLevel.terms.order._term).toBe('asc'); | ||||
|   }); | ||||
| 
 | ||||
|   it('with term agg and order by term on es6.x', () => { | ||||
|     const builder6x = new ElasticQueryBuilder({ | ||||
|       timeField: '@timestamp', | ||||
|       esVersion: 60, | ||||
|     }); | ||||
|     const query = builder6x.build( | ||||
|       { | ||||
|         metrics: [{ type: 'count', id: '1' }, { type: 'avg', field: '@value', id: '5' }], | ||||
|         bucketAggs: [ | ||||
|           { | ||||
|             type: 'terms', | ||||
|             field: '@host', | ||||
|             settings: { size: 5, order: 'asc', orderBy: '_term' }, | ||||
|             id: '2', | ||||
|           }, | ||||
|           { type: 'date_histogram', field: '@timestamp', id: '3' }, | ||||
|         ], | ||||
|       }, | ||||
|       100, | ||||
|       1000 | ||||
|     ); | ||||
| 
 | ||||
|     const firstLevel = query.aggs['2']; | ||||
|     expect(firstLevel.terms.order._key).toBe('asc'); | ||||
|   }); | ||||
| 
 | ||||
|   it('with term agg and order by metric agg', () => { | ||||
|     const query = builder.build( | ||||
|       { | ||||
|  | @ -302,4 +350,18 @@ describe('ElasticQueryBuilder', () => { | |||
|     expect(query.query.bool.filter[4].regexp['key5']).toBe('value5'); | ||||
|     expect(query.query.bool.filter[5].bool.must_not.regexp['key6']).toBe('value6'); | ||||
|   }); | ||||
| 
 | ||||
|   it('getTermsQuery should set correct sorting', () => { | ||||
|     const query = builder.getTermsQuery({}); | ||||
|     expect(query.aggs['1'].terms.order._term).toBe('asc'); | ||||
|   }); | ||||
| 
 | ||||
|   it('getTermsQuery es6.x should set correct sorting', () => { | ||||
|     const builder6x = new ElasticQueryBuilder({ | ||||
|       timeField: '@timestamp', | ||||
|       esVersion: 60, | ||||
|     }); | ||||
|     const query = builder6x.getTermsQuery({}); | ||||
|     expect(query.aggs['1'].terms.order._key).toBe('asc'); | ||||
|   }); | ||||
| }); | ||||
|  |  | |||
		Loading…
	
		Reference in New Issue