Upgrade to lucene-9.0.0-snapshot-32a0a16aff0 (#73324)

This commit upgrades elasticsearch 8.0 to use a snapshot of
Lucene 9.0.0.
This commit is contained in:
Alan Woodward 2021-09-21 10:48:26 +01:00 committed by GitHub
parent d381ffafa3
commit 524d1ea757
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
283 changed files with 1221 additions and 1044 deletions

View File

@ -1,5 +1,5 @@
elasticsearch = 8.0.0
lucene = 8.10.0-snapshot-bf2fcb53079
lucene = 9.0.0-snapshot-32a0a16aff0
bundled_jdk_vendor = adoptium
bundled_jdk = 16.0.2+7
@ -11,7 +11,7 @@ spatial4j = 0.7
jts = 1.15.0
jackson = 2.10.4
snakeyaml = 1.26
icu4j = 62.1
icu4j = 68.2
supercsv = 2.4.0
# when updating log4j, please update also docs/java-api/index.asciidoc
log4j = 2.11.1

View File

@ -8,7 +8,9 @@
package org.elasticsearch.common.settings;
import org.apache.lucene.backward_codecs.store.EndiannessReverserUtil;
import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.store.DataOutput;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexOutput;
@ -345,13 +347,14 @@ public class KeyStoreWrapperTests extends ESTestCase {
byte[] encryptedBytes,
int truncEncryptedDataLength
) throws Exception {
indexOutput.writeInt(4 + salt.length + 4 + iv.length + 4 + encryptedBytes.length);
indexOutput.writeInt(salt.length);
indexOutput.writeBytes(salt, salt.length);
indexOutput.writeInt(iv.length);
indexOutput.writeBytes(iv, iv.length);
indexOutput.writeInt(encryptedBytes.length - truncEncryptedDataLength);
indexOutput.writeBytes(encryptedBytes, encryptedBytes.length);
DataOutput out = EndiannessReverserUtil.wrapDataOutput(indexOutput);
out.writeInt(4 + salt.length + 4 + iv.length + 4 + encryptedBytes.length);
out.writeInt(salt.length);
out.writeBytes(salt, salt.length);
out.writeInt(iv.length);
out.writeBytes(iv, iv.length);
out.writeInt(encryptedBytes.length - truncEncryptedDataLength);
out.writeBytes(encryptedBytes, encryptedBytes.length);
}
public void testUpgradeAddsSeed() throws Exception {
@ -382,7 +385,7 @@ public class KeyStoreWrapperTests extends ESTestCase {
Path configDir = env.configFile();
try (
Directory directory = newFSDirectory(configDir);
IndexOutput output = directory.createOutput("elasticsearch.keystore", IOContext.DEFAULT)
IndexOutput output = EndiannessReverserUtil.createOutput(directory, "elasticsearch.keystore", IOContext.DEFAULT);
) {
CodecUtil.writeHeader(output, "elasticsearch.keystore", 1);
output.writeByte((byte) 0); // hasPassword = false
@ -417,9 +420,8 @@ public class KeyStoreWrapperTests extends ESTestCase {
random().nextBytes(fileBytes);
try (
Directory directory = newFSDirectory(configDir);
IndexOutput output = directory.createOutput("elasticsearch.keystore", IOContext.DEFAULT)
IndexOutput output = EndiannessReverserUtil.createOutput(directory, "elasticsearch.keystore", IOContext.DEFAULT);
) {
CodecUtil.writeHeader(output, "elasticsearch.keystore", 2);
output.writeByte((byte) 0); // hasPassword = false
output.writeString("PKCS12");

View File

@ -1,8 +1,8 @@
include::{docs-root}/shared/versions/stack/{source_branch}.asciidoc[]
:lucene_version: 8.10.0
:lucene_version_path: 8_10_0
:lucene_version: 9.0.0
:lucene_version_path: 9_0_0
:jdk: 11.0.2
:jdk_major: 11
:build_flavor: default

View File

@ -8,7 +8,7 @@
package org.elasticsearch.analysis.common;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.ClassicFilter;
import org.apache.lucene.analysis.classic.ClassicFilter;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;

View File

@ -9,7 +9,7 @@
package org.elasticsearch.analysis.common;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.standard.ClassicTokenizer;
import org.apache.lucene.analysis.classic.ClassicTokenizer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;

View File

@ -27,6 +27,8 @@ import org.apache.lucene.analysis.cjk.CJKBigramFilter;
import org.apache.lucene.analysis.cjk.CJKWidthFilter;
import org.apache.lucene.analysis.ckb.SoraniAnalyzer;
import org.apache.lucene.analysis.ckb.SoraniNormalizationFilter;
import org.apache.lucene.analysis.classic.ClassicFilter;
import org.apache.lucene.analysis.classic.ClassicTokenizer;
import org.apache.lucene.analysis.commongrams.CommonGramsFilter;
import org.apache.lucene.analysis.core.DecimalDigitFilter;
import org.apache.lucene.analysis.core.KeywordTokenizer;
@ -40,6 +42,7 @@ import org.apache.lucene.analysis.de.GermanAnalyzer;
import org.apache.lucene.analysis.de.GermanNormalizationFilter;
import org.apache.lucene.analysis.de.GermanStemFilter;
import org.apache.lucene.analysis.el.GreekAnalyzer;
import org.apache.lucene.analysis.email.UAX29URLEmailTokenizer;
import org.apache.lucene.analysis.en.EnglishAnalyzer;
import org.apache.lucene.analysis.en.KStemFilter;
import org.apache.lucene.analysis.en.PorterStemFilter;
@ -89,10 +92,7 @@ import org.apache.lucene.analysis.ro.RomanianAnalyzer;
import org.apache.lucene.analysis.ru.RussianAnalyzer;
import org.apache.lucene.analysis.shingle.ShingleFilter;
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.standard.ClassicFilter;
import org.apache.lucene.analysis.standard.ClassicTokenizer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.standard.UAX29URLEmailTokenizer;
import org.apache.lucene.analysis.sv.SwedishAnalyzer;
import org.apache.lucene.analysis.th.ThaiAnalyzer;
import org.apache.lucene.analysis.th.ThaiTokenizer;

View File

@ -38,10 +38,18 @@ public class MinHashTokenFilterFactory extends AbstractTokenFilterFactory {
private Map<String, String> convertSettings(Settings settings) {
Map<String, String> settingMap = new HashMap<>();
settingMap.put("hashCount", settings.get("hash_count"));
settingMap.put("bucketCount", settings.get("bucket_count"));
settingMap.put("hashSetSize", settings.get("hash_set_size"));
settingMap.put("withRotation", settings.get("with_rotation"));
if (settings.hasValue("hash_count")) {
settingMap.put("hashCount", settings.get("hash_count"));
}
if (settings.hasValue("bucketCount")) {
settingMap.put("bucketCount", settings.get("bucket_count"));
}
if (settings.hasValue("hashSetSize")) {
settingMap.put("hashSetSize", settings.get("hash_set_size"));
}
if (settings.hasValue("with_rotation")) {
settingMap.put("withRotation", settings.get("with_rotation"));
}
return settingMap;
}
}

View File

@ -9,8 +9,8 @@
package org.elasticsearch.analysis.common;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.email.UAX29URLEmailTokenizer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.standard.UAX29URLEmailTokenizer;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;

View File

@ -86,6 +86,7 @@ public class CommonAnalysisFactoryTests extends AnalysisFactoryTestCase {
filters.put("latvianstem", StemmerTokenFilterFactory.class);
filters.put("norwegianlightstem", StemmerTokenFilterFactory.class);
filters.put("norwegianminimalstem", StemmerTokenFilterFactory.class);
filters.put("norwegiannormalization", Void.class);
filters.put("portuguesestem", StemmerTokenFilterFactory.class);
filters.put("portugueselightstem", StemmerTokenFilterFactory.class);
filters.put("portugueseminimalstem", StemmerTokenFilterFactory.class);
@ -93,7 +94,10 @@ public class CommonAnalysisFactoryTests extends AnalysisFactoryTestCase {
filters.put("soranistem", StemmerTokenFilterFactory.class);
filters.put("spanishlightstem", StemmerTokenFilterFactory.class);
filters.put("swedishlightstem", StemmerTokenFilterFactory.class);
filters.put("swedishminimalstem", Void.class);
filters.put("stemmeroverride", StemmerOverrideTokenFilterFactory.class);
filters.put("telugunormalization", TeluguNormalizationFilterFactory.class);
filters.put("telugustem", TeluguStemFilterFactory.class);
filters.put("kstem", KStemTokenFilterFactory.class);
filters.put("synonym", SynonymTokenFilterFactory.class);
filters.put("synonymgraph", SynonymGraphTokenFilterFactory.class);
@ -131,8 +135,6 @@ public class CommonAnalysisFactoryTests extends AnalysisFactoryTestCase {
filters.put("brazilianstem", BrazilianStemTokenFilterFactory.class);
filters.put("czechstem", CzechStemTokenFilterFactory.class);
filters.put("germanstem", GermanStemTokenFilterFactory.class);
filters.put("telugunormalization", TeluguNormalizationFilterFactory.class);
filters.put("telugustem", TeluguStemFilterFactory.class);
// this filter is not exposed and should only be used internally
filters.put("fixedshingle", Void.class);
return filters;
@ -216,7 +218,7 @@ public class CommonAnalysisFactoryTests extends AnalysisFactoryTestCase {
tokenizers.put("keyword", null);
tokenizers.put("lowercase", Void.class);
tokenizers.put("classic", null);
tokenizers.put("uax_url_email", org.apache.lucene.analysis.standard.UAX29URLEmailTokenizerFactory.class);
tokenizers.put("uax_url_email", org.apache.lucene.analysis.email.UAX29URLEmailTokenizerFactory.class);
tokenizers.put("path_hierarchy", null);
tokenizers.put("letter", null);
tokenizers.put("whitespace", null);

View File

@ -80,15 +80,17 @@ public class DisableGraphQueryTests extends ESSingleNodeTestCase {
// that ignores position length attribute
expectedQueryWithUnigram= new BooleanQuery.Builder()
.add(
new SynonymQuery(
new Term("text_shingle_unigram", "foo"),
new Term("text_shingle_unigram", "foo bar")
), BooleanClause.Occur.SHOULD)
new SynonymQuery.Builder("text_shingle_unigram")
.addTerm(new Term("text_shingle_unigram", "foo"))
.addTerm(new Term("text_shingle_unigram", "foo bar"))
.build(),
BooleanClause.Occur.SHOULD)
.add(
new SynonymQuery(
new Term("text_shingle_unigram", "bar"),
new Term("text_shingle_unigram", "bar baz")
), BooleanClause.Occur.SHOULD)
new SynonymQuery.Builder("text_shingle_unigram")
.addTerm(new Term("text_shingle_unigram", "bar"))
.addTerm(new Term("text_shingle_unigram", "bar baz"))
.build(),
BooleanClause.Occur.SHOULD)
.add(
new TermQuery(
new Term("text_shingle_unigram", "baz")

View File

@ -1 +0,0 @@
97f27306f1817475b30e52f48b1407bc5d696d59

View File

@ -0,0 +1 @@
561301335ba14e07ec89adff0e2321cb79146a48

View File

@ -13,7 +13,6 @@ import org.apache.lucene.expressions.SimpleBindings;
import org.apache.lucene.expressions.js.JavascriptCompiler;
import org.apache.lucene.expressions.js.VariableContext;
import org.apache.lucene.search.DoubleValuesSource;
import org.apache.lucene.search.SortField;
import org.elasticsearch.SpecialPermission;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.index.fielddata.IndexFieldData;
@ -232,7 +231,7 @@ public class ExpressionScriptEngine implements ScriptEngine {
for (String variable : expr.variables) {
try {
if (variable.equals("_score")) {
bindings.add(new SortField("_score", SortField.Type.SCORE));
bindings.add("_score", DoubleValuesSource.SCORES);
needsScores = true;
} else if (vars != null && vars.containsKey(variable)) {
bindFromParams(vars, bindings, variable);
@ -283,7 +282,7 @@ public class ExpressionScriptEngine implements ScriptEngine {
for (String variable : expr.variables) {
try {
if (variable.equals("_score")) {
bindings.add(new SortField("_score", SortField.Type.SCORE));
bindings.add("_score", DoubleValuesSource.SCORES);
needsScores = true;
} else if (variable.equals("_value")) {
specialValue = new ReplaceableConstDoubleValueSource();
@ -355,7 +354,7 @@ public class ExpressionScriptEngine implements ScriptEngine {
for (String variable : expr.variables) {
try {
if (variable.equals("_score")) {
bindings.add(new SortField("_score", SortField.Type.SCORE));
bindings.add("_score", DoubleValuesSource.SCORES);
needsScores = true;
} else if (variable.equals("_value")) {
specialValue = new ReplaceableConstDoubleValueSource();

View File

@ -18,4 +18,5 @@ grant {
permission org.elasticsearch.script.ClassPermission "java.lang.Math";
permission org.elasticsearch.script.ClassPermission "org.apache.lucene.util.MathUtil";
permission org.elasticsearch.script.ClassPermission "org.apache.lucene.util.SloppyMath";
permission org.elasticsearch.script.ClassPermission "org.apache.lucene.expressions.js.ExpressionMath";
};

View File

@ -20,6 +20,10 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.Term;
import org.apache.lucene.queries.spans.FieldMaskingSpanQuery;
import org.apache.lucene.queries.spans.SpanMultiTermQueryWrapper;
import org.apache.lucene.queries.spans.SpanQuery;
import org.apache.lucene.queries.spans.SpanTermQuery;
import org.apache.lucene.search.AutomatonQuery;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
@ -28,10 +32,6 @@ import org.apache.lucene.search.MultiTermQuery;
import org.apache.lucene.search.PrefixQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.spans.FieldMaskingSpanQuery;
import org.apache.lucene.search.spans.SpanMultiTermQueryWrapper;
import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.util.automaton.Automata;
import org.apache.lucene.util.automaton.Automaton;
import org.apache.lucene.util.automaton.Operations;

View File

@ -183,6 +183,11 @@ public final class SourceConfirmedTextQuery extends Query {
return 31 * Objects.hash(in, valueFetcherProvider, indexAnalyzer) + classHash();
}
@Override
public void visit(QueryVisitor visitor) {
in.visit(visitor.getSubVisitor(Occur.MUST, this));
}
@Override
public Query rewrite(IndexReader reader) throws IOException {
Query inRewritten = in.rewrite(reader);
@ -244,11 +249,6 @@ public final class SourceConfirmedTextQuery extends Query {
return false;
}
@Override
public void extractTerms(Set<Term> termSet) {
termSet.addAll(terms);
}
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
RuntimePhraseScorer scorer = scorer(context);

View File

@ -15,6 +15,9 @@ import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.IndexableFieldType;
import org.apache.lucene.index.Term;
import org.apache.lucene.queries.spans.FieldMaskingSpanQuery;
import org.apache.lucene.queries.spans.SpanNearQuery;
import org.apache.lucene.queries.spans.SpanTermQuery;
import org.apache.lucene.queryparser.classic.ParseException;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
@ -26,9 +29,6 @@ import org.apache.lucene.search.NormsFieldExistsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.SynonymQuery;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.spans.FieldMaskingSpanQuery;
import org.apache.lucene.search.spans.SpanNearQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.IndexSettings;

View File

@ -16,6 +16,9 @@ import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.queries.spans.SpanNearQuery;
import org.apache.lucene.queries.spans.SpanQuery;
import org.apache.lucene.queries.spans.SpanTermQuery;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.CheckHits;
@ -27,9 +30,6 @@ import org.apache.lucene.search.PrefixQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.spans.SpanNearQuery;
import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.store.Directory;
import org.elasticsearch.common.CheckedIntFunction;
import org.elasticsearch.common.lucene.Lucene;

View File

@ -13,6 +13,7 @@ import org.apache.lucene.index.OrdinalMap;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryVisitor;
import org.apache.lucene.search.join.JoinUtil;
import org.apache.lucene.search.join.ScoreMode;
import org.apache.lucene.search.similarities.Similarity;
@ -411,6 +412,11 @@ public class HasChildQueryBuilder extends AbstractQueryBuilder<HasChildQueryBuil
return "LateParsingQuery {joinField=" + joinField + "}";
}
@Override
public void visit(QueryVisitor visitor) {
visitor.visitLeaf(this);
}
public int getMinChildren() {
return minChildren;
}

View File

@ -10,19 +10,19 @@ package org.elasticsearch.percolator;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryVisitor;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.ScorerSupplier;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.TwoPhaseIterator;
import org.apache.lucene.search.Weight;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.Bits;
import org.elasticsearch.core.CheckedFunction;
@ -32,7 +32,6 @@ import org.elasticsearch.common.lucene.Lucene;
import java.io.IOException;
import java.util.List;
import java.util.Objects;
import java.util.Set;
final class PercolateQuery extends Query implements Accountable {
@ -75,10 +74,6 @@ final class PercolateQuery extends Query implements Accountable {
final Weight verifiedMatchesWeight = verifiedMatchesQuery.createWeight(searcher, ScoreMode.COMPLETE_NO_SCORES, boost);
final Weight candidateMatchesWeight = candidateMatchesQuery.createWeight(searcher, ScoreMode.COMPLETE_NO_SCORES, boost);
return new Weight(this) {
@Override
public void extractTerms(Set<Term> set) {
}
@Override
public Explanation explain(LeafReaderContext leafReaderContext, int docId) throws IOException {
Scorer scorer = scorer(leafReaderContext);
@ -239,6 +234,12 @@ final class PercolateQuery extends Query implements Accountable {
candidateMatchesQuery.toString(s) + "}}";
}
@Override
public void visit(QueryVisitor visitor) {
visitor.visitLeaf(this);
}
@Override
public long ramBytesUsed() {
long ramUsed = 0L;

View File

@ -19,9 +19,9 @@ import org.apache.lucene.index.PointValues;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.sandbox.search.CoveringQuery;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.CoveringQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.LongValuesSource;
import org.apache.lucene.search.MatchNoDocsQuery;

View File

@ -11,6 +11,8 @@ import org.apache.lucene.document.BinaryRange;
import org.apache.lucene.index.PrefixCodedTerms;
import org.apache.lucene.index.Term;
import org.apache.lucene.queries.BlendedTermQuery;
import org.apache.lucene.queries.spans.SpanOrQuery;
import org.apache.lucene.queries.spans.SpanTermQuery;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.BoostQuery;
@ -24,8 +26,6 @@ import org.apache.lucene.search.QueryVisitor;
import org.apache.lucene.search.SynonymQuery;
import org.apache.lucene.search.TermInSetQuery;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.spans.SpanOrQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.automaton.ByteRunAutomaton;

View File

@ -13,7 +13,6 @@ import org.apache.lucene.document.Document;
import org.apache.lucene.document.DoublePoint;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FloatPoint;
import org.apache.lucene.document.HalfFloatPoint;
import org.apache.lucene.document.InetAddressPoint;
import org.apache.lucene.document.IntPoint;
import org.apache.lucene.document.LongPoint;
@ -35,10 +34,15 @@ import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.index.memory.MemoryIndex;
import org.apache.lucene.queries.BlendedTermQuery;
import org.apache.lucene.queries.spans.SpanNearQuery;
import org.apache.lucene.queries.spans.SpanNotQuery;
import org.apache.lucene.queries.spans.SpanOrQuery;
import org.apache.lucene.queries.spans.SpanTermQuery;
import org.apache.lucene.sandbox.document.HalfFloatPoint;
import org.apache.lucene.sandbox.search.CoveringQuery;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.CoveringQuery;
import org.apache.lucene.search.DisjunctionMaxQuery;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Explanation;
@ -49,6 +53,7 @@ import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.PrefixQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryVisitor;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Sort;
@ -58,10 +63,6 @@ import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.Weight;
import org.apache.lucene.search.WildcardQuery;
import org.apache.lucene.search.spans.SpanNearQuery;
import org.apache.lucene.search.spans.SpanNotQuery;
import org.apache.lucene.search.spans.SpanOrQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.store.ByteBuffersDirectory;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BytesRef;
@ -97,7 +98,6 @@ import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.stream.Collectors;
@ -1152,6 +1152,11 @@ public class CandidateQueryTests extends ESSingleNodeTestCase {
public int hashCode() {
return classHash();
}
@Override
public void visit(QueryVisitor visitor) {
visitor.visitLeaf(this);
}
}
private static final class ControlQuery extends Query {
@ -1169,9 +1174,6 @@ public class CandidateQueryTests extends ESSingleNodeTestCase {
final IndexSearcher percolatorIndexSearcher = memoryIndex.createSearcher();
return new Weight(this) {
@Override
public void extractTerms(Set<Term> terms) {}
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
Scorer scorer = scorer(context);
@ -1260,6 +1262,11 @@ public class CandidateQueryTests extends ESSingleNodeTestCase {
return classHash();
}
@Override
public void visit(QueryVisitor visitor) {
visitor.visitLeaf(this);
}
}
}

View File

@ -29,8 +29,8 @@ import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.spans.SpanNearQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.queries.spans.SpanNearQuery;
import org.apache.lucene.queries.spans.SpanTermQuery;
import org.apache.lucene.store.Directory;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.test.ESTestCase;

View File

@ -11,7 +11,6 @@ package org.elasticsearch.percolator;
import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
import org.apache.lucene.document.DoublePoint;
import org.apache.lucene.document.FloatPoint;
import org.apache.lucene.document.HalfFloatPoint;
import org.apache.lucene.document.InetAddressPoint;
import org.apache.lucene.document.IntPoint;
import org.apache.lucene.document.LongPoint;
@ -19,9 +18,10 @@ import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.memory.MemoryIndex;
import org.apache.lucene.sandbox.document.HalfFloatPoint;
import org.apache.lucene.sandbox.search.CoveringQuery;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.CoveringQuery;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermInSetQuery;

View File

@ -9,7 +9,6 @@ package org.elasticsearch.percolator;
import org.apache.lucene.document.DoublePoint;
import org.apache.lucene.document.FloatPoint;
import org.apache.lucene.document.HalfFloatPoint;
import org.apache.lucene.document.InetAddressPoint;
import org.apache.lucene.document.IntPoint;
import org.apache.lucene.document.LatLonPoint;
@ -20,6 +19,12 @@ import org.apache.lucene.queries.BlendedTermQuery;
import org.apache.lucene.queries.intervals.IntervalQuery;
import org.apache.lucene.queries.intervals.Intervals;
import org.apache.lucene.queries.intervals.IntervalsSource;
import org.apache.lucene.queries.spans.SpanFirstQuery;
import org.apache.lucene.queries.spans.SpanNearQuery;
import org.apache.lucene.queries.spans.SpanNotQuery;
import org.apache.lucene.queries.spans.SpanOrQuery;
import org.apache.lucene.queries.spans.SpanTermQuery;
import org.apache.lucene.sandbox.document.HalfFloatPoint;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
@ -38,11 +43,6 @@ import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TermRangeQuery;
import org.apache.lucene.search.join.QueryBitSetProducer;
import org.apache.lucene.search.join.ScoreMode;
import org.apache.lucene.search.spans.SpanFirstQuery;
import org.apache.lucene.search.spans.SpanNearQuery;
import org.apache.lucene.search.spans.SpanNotQuery;
import org.apache.lucene.search.spans.SpanOrQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.lucene.search.function.CombineFunction;
import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery;
@ -799,13 +799,16 @@ public class QueryAnalyzerTests extends ESTestCase {
}
public void testSynonymQuery() {
SynonymQuery query = new SynonymQuery();
SynonymQuery query = new SynonymQuery.Builder("field").build();
Result result = analyze(query);
assertThat(result.verified, is(true));
assertThat(result.minimumShouldMatch, equalTo(0));
assertThat(result.extractions.isEmpty(), is(true));
query = new SynonymQuery(new Term("_field", "_value1"), new Term("_field", "_value2"));
query = new SynonymQuery.Builder("_field")
.addTerm(new Term("_field", "_value1"))
.addTerm(new Term("_field", "_value2"))
.build();
result = analyze(query);
assertThat(result.verified, is(true));
assertThat(result.minimumShouldMatch, equalTo(1));

View File

@ -1,5 +1,3 @@
import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApis
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
@ -23,7 +21,7 @@ tasks.named("forbiddenApisMain").configure {
}
dependencies {
api "org.apache.lucene:lucene-analyzers-icu:${versions.lucene}"
api "org.apache.lucene:lucene-analysis-icu:${versions.lucene}"
api "com.ibm.icu:icu4j:${versions.icu4j}"
}

View File

@ -1 +0,0 @@
7a4d00d5ec5febd252a6182e8b6e87a0a9821f81

View File

@ -0,0 +1 @@
76893e6000401ace133a65262254be0ebe556d46

View File

@ -0,0 +1 @@
5b64983cf184ddd4018d56114b086f3cebd734ad

View File

@ -1 +0,0 @@
97560884369643ba928b7e683e092699330c4f5b

View File

@ -22,7 +22,7 @@ import com.ibm.icu.text.RawCollationKey;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.collation.ICUCollationDocValuesField;
import org.apache.lucene.analysis.icu.ICUCollationDocValuesField;
import java.io.IOException;

View File

@ -14,7 +14,7 @@ esplugin {
}
dependencies {
api "org.apache.lucene:lucene-analyzers-kuromoji:${versions.lucene}"
api "org.apache.lucene:lucene-analysis-kuromoji:${versions.lucene}"
}
restResources {

View File

@ -0,0 +1 @@
3e27dd79ed9fa24bfb998446526baf4f16cc7a05

View File

@ -1 +0,0 @@
3bc69fe29001399cfc98ddbd9f1d27864fa6a4a1

View File

@ -14,7 +14,7 @@ esplugin {
}
dependencies {
api "org.apache.lucene:lucene-analyzers-nori:${versions.lucene}"
api "org.apache.lucene:lucene-analysis-nori:${versions.lucene}"
}
restResources {

View File

@ -0,0 +1 @@
48f2c1dec0aea566c3ac7c62322b52887a054bf6

View File

@ -1 +0,0 @@
497cdb3e2b5dd63d735cd60140f7c18533f3c401

View File

@ -14,7 +14,7 @@ esplugin {
}
dependencies {
api "org.apache.lucene:lucene-analyzers-phonetic:${versions.lucene}"
api "org.apache.lucene:lucene-analysis-phonetic:${versions.lucene}"
api "commons-codec:commons-codec:${versions.commonscodec}"
}

View File

@ -0,0 +1 @@
ee44e4909d2c2b8686606fd4d11b2b817cbd5b3f

View File

@ -1 +0,0 @@
4fa14c42bf5864d28a7abfe197e79cf89b195c1b

View File

@ -14,7 +14,7 @@ esplugin {
}
dependencies {
api "org.apache.lucene:lucene-analyzers-smartcn:${versions.lucene}"
api "org.apache.lucene:lucene-analysis-smartcn:${versions.lucene}"
}
restResources {

View File

@ -0,0 +1 @@
bf0d9a8984578032378342891c4d1e6d3147b780

View File

@ -1 +0,0 @@
18971c579487a7c55a65b055fec07d9286ed49ce

View File

@ -14,7 +14,7 @@ esplugin {
}
dependencies {
api "org.apache.lucene:lucene-analyzers-stempel:${versions.lucene}"
api "org.apache.lucene:lucene-analysis-stempel:${versions.lucene}"
}
restResources {

View File

@ -0,0 +1 @@
4a88bb19fa4035d9841f76de0572826ce6c43915

View File

@ -1 +0,0 @@
6fcf11017c3bce6e33d5cda04e3ef49560baa2a2

View File

@ -14,7 +14,7 @@ esplugin {
}
dependencies {
api "org.apache.lucene:lucene-analyzers-morfologik:${versions.lucene}"
api "org.apache.lucene:lucene-analysis-morfologik:${versions.lucene}"
api "org.carrot2:morfologik-stemming:2.1.1"
api "org.carrot2:morfologik-fsa:2.1.1"
api "ua.net.nlp:morfologik-ukrainian-search:3.7.5"

View File

@ -0,0 +1 @@
88837c81d62b322d83669cf16f85b7bc66cfe3e8

View File

@ -1 +0,0 @@
af5750b21d1d38352b49561c63b039723911a0d4

View File

@ -8,12 +8,13 @@
package org.elasticsearch.index.store.smb;
import java.io.IOException;
import java.nio.file.Path;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.NIOFSDirectory;
import org.elasticsearch.index.store.EsBaseDirectoryTestCase;
import java.io.IOException;
import java.nio.file.Path;
public class SmbNIOFSDirectoryTests extends EsBaseDirectoryTestCase {
@Override

View File

@ -1,5 +1,4 @@
import org.elasticsearch.gradle.internal.info.BuildParams
import org.elasticsearch.gradle.OS
apply plugin: 'elasticsearch.build'
apply plugin: 'elasticsearch.publish'
@ -85,6 +84,9 @@ tasks.named("yamlRestTestV7CompatTest").configure {
'indices.stats/20_translog/Translog retention without soft_deletes',
'indices.stats/20_translog/Translog stats on closed indices without soft-deletes',
// field usage results will be different between lucene versions
'indices.stats/60_field_usage/*',
// upgrade api will only get a dummy endpoint returning an exception suggesting to use _reindex
'indices.upgrade/*/*',
@ -220,6 +222,8 @@ tasks.named("yamlRestTestV7CompatTransform").configure({ task ->
"_all.primaries.indexing.types._doc.index_total"
)
task.replaceValueInMatch("_all.primaries.indexing.types._doc.index_total", 2)
// points get touched by sorting in ES 8
task.replaceValueInMatch("testindex.shards.0.stats.fields.price.points", 1)
//override for "indices.open/10_basic/?wait_for_active_shards default is deprecated" and "indices.open/10_basic/?wait_for_active_shards=index-setting"
task.addAllowedWarningRegexForTest("\\?wait_for_active_shards=index-setting is now the default behaviour.*", "?wait_for_active_shards=index-setting")

View File

@ -1,9 +1,3 @@
---
setup:
- skip:
version: " - 7.14.99"
reason: field usage stats API is introduced in 7.15
---
"Field usage stats":
- do:
@ -115,7 +109,8 @@ setup:
- match: { testindex.shards.0.stats.fields.price.inverted_index.proximity: 0 }
- match: { testindex.shards.0.stats.fields.price.stored_fields: 0 }
- gt: { testindex.shards.0.stats.fields.price.doc_values: 0 }
- match: { testindex.shards.0.stats.fields.price.points: 0 }
# can be 0 on pre-lucene 9 nodes or 1 on post-upgrade nodes
- gte: { testindex.shards.0.stats.fields.price.points: 0 }
- match: { testindex.shards.0.stats.fields.price.norms: 0 }
- match: { testindex.shards.0.stats.fields.price.term_vectors: 0 }
- match: { testindex.shards.0.stats.fields.price.inverted_index.term_frequencies: 0 }

View File

@ -33,7 +33,7 @@ dependencies {
// lucene
api "org.apache.lucene:lucene-core:${versions.lucene}"
api "org.apache.lucene:lucene-analyzers-common:${versions.lucene}"
api "org.apache.lucene:lucene-analysis-common:${versions.lucene}"
api "org.apache.lucene:lucene-backward-codecs:${versions.lucene}"
api "org.apache.lucene:lucene-grouping:${versions.lucene}"
api "org.apache.lucene:lucene-highlighter:${versions.lucene}"
@ -279,8 +279,6 @@ tasks.named('splitPackagesAudit').configure {
'org.apache.lucene.queryparser.classic.XQueryParser',
'org.apache.lucene.queries.BinaryDocValuesRangeQuery',
'org.apache.lucene.queries.BlendedTermQuery',
'org.apache.lucene.queries.MinDocQuery',
'org.apache.lucene.queries.SearchAfterSortedDocQuery',
'org.apache.lucene.queries.SpanMatchNoDocsQuery',
'org.apache.lucene.search.grouping.CollapseTopFieldDocs',
'org.apache.lucene.search.grouping.CollapsingDocValuesSource',

View File

@ -0,0 +1 @@
bce23b7816187b44700ee646ce1afbdac4423dcc

View File

@ -1 +0,0 @@
eb63f6ecd58a7e27a02b533b9c1e6cdb68f506fc

View File

@ -1 +0,0 @@
2482a84e5e26a3eaf0bd7c5a77efc60435c7f688

View File

@ -0,0 +1 @@
63ef3d7a09cc122a77d4d84af9b5c03efa480d01

View File

@ -1 +0,0 @@
f33b45dbbce59e727e5a82dddab48c8c7681e25b

View File

@ -0,0 +1 @@
49c6fdf8fdca15e4bc97973056e883e34de82b7a

View File

@ -1 +0,0 @@
4020eb4e53d759fa11819b8d6b6a49422f51abe8

View File

@ -0,0 +1 @@
0d2bf347c51f37e2f659bb8a015189e8743ec8e2

View File

@ -1 +0,0 @@
b2c9682be68699860aeeb9b048665ab6cf259c7c

View File

@ -0,0 +1 @@
d0159455b0ca8930d9caf95a7275ba192f4bd9ab

View File

@ -1 +0,0 @@
be4f561159763de6a28d47b463438331a96c31c9

View File

@ -0,0 +1 @@
342f80a704dd37012253a6b19bfad88e08287791

View File

@ -1 +0,0 @@
8fc0745c4a589cdfbd56d156236fd91dbab0dacb

View File

@ -0,0 +1 @@
5944a54b0d46ee4554f53c40c0b92ac7f1d70c1b

View File

@ -1 +0,0 @@
7089f903268271acf6eb668918ac51f0cba15213

View File

@ -0,0 +1 @@
1553866522ffdd4aa2123fa14b1a3ed2d1d64713

View File

@ -1 +0,0 @@
35a4945ac05c2aeb0c9e244098827fd7aeea1858

View File

@ -0,0 +1 @@
0e695c67e7de44914d6b9a8f95d06c2948ef8b7e

View File

@ -1 +0,0 @@
3d0929b7a5a2ba7f83d0357553f240f6d8362446

View File

@ -0,0 +1 @@
21687ed2a47b1b78eee80c2bd6de3f11eb63e473

View File

@ -1 +0,0 @@
d1d6696a4857bb580f6fc4a93bd3307effddd736

View File

@ -0,0 +1 @@
52acae7ec3773b136019613a4dd897cb915da856

View File

@ -1 +0,0 @@
bc52ac3d5fed41fde8b7ad95c7d5ce703b90377f

View File

@ -0,0 +1 @@
26b1aebb92871f79b6f2c8dab8b3f47371ec30da

View File

@ -1 +0,0 @@
57d8cc36815cf45eb16d43648c8d2a5b251b4e62

View File

@ -0,0 +1 @@
ca9b60241861bfa17401bd28287f09af61fa1c5c

View File

@ -1 +0,0 @@
17e5a74d57ecb861e93c3cfbf4feb356a0571bbf

View File

@ -0,0 +1 @@
e2449a6766c2e1c78628ce1dfbd00b172eb45b37

View File

@ -312,8 +312,6 @@ public class QueryStringIT extends ESIntegTestCase {
doAssertOneHitForQueryString("field_A0:foo");
// expanding to the limit should work
doAssertOneHitForQueryString("field_A\\*:foo");
// expanding two blocks to the limit still works
doAssertOneHitForQueryString("field_A\\*:foo field_B\\*:bar");
// adding a non-existing field on top shouldn't overshoot the limit
doAssertOneHitForQueryString("field_A\\*:foo unmapped:something");

View File

@ -111,8 +111,11 @@ public class SimpleValidateQueryIT extends ESIntegTestCase {
assertThat(response.isValid(), equalTo(true));
assertThat(response.getQueryExplanation().size(), equalTo(1));
assertThat(response.getQueryExplanation().get(0).getExplanation(),
equalTo("(MatchNoDocsQuery(\"failed [bar] query, caused by number_format_exception:[For input string: \"foo\"]\") " +
"| foo:foo | baz:foo)"));
containsString("MatchNoDocsQuery(\"failed [bar] query, caused by number_format_exception:[For input string: \"foo\"]\")"));
assertThat(response.getQueryExplanation().get(0).getExplanation(),
containsString("foo:foo"));
assertThat(response.getQueryExplanation().get(0).getExplanation(),
containsString("baz:foo"));
assertThat(response.getQueryExplanation().get(0).getError(), nullValue());
}
}

View File

@ -15,6 +15,7 @@ import org.apache.lucene.search.ConstantScoreScorer;
import org.apache.lucene.search.ConstantScoreWeight;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryVisitor;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.TwoPhaseIterator;
@ -108,6 +109,13 @@ public final class BinaryDocValuesRangeQuery extends Query {
};
}
@Override
public void visit(QueryVisitor visitor) {
if (visitor.acceptField(fieldName)) {
visitor.visitLeaf(this);
}
}
@Override
public String toString(String field) {
return "BinaryDocValuesRangeQuery(fieldName=" + field + ",from=" + originalFrom + ",to=" + originalTo + ")";

View File

@ -10,16 +10,16 @@ package org.apache.lucene.queries;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermStates;
import org.apache.lucene.queries.spans.SpanQuery;
import org.apache.lucene.queries.spans.SpanWeight;
import org.apache.lucene.queries.spans.Spans;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.QueryVisitor;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.search.spans.SpanWeight;
import org.apache.lucene.search.spans.Spans;
import java.io.IOException;
import java.util.Collections;
import java.util.Map;
import java.util.Set;
/**
* A {@link SpanQuery} that matches no documents.
@ -53,6 +53,11 @@ public class SpanMatchNoDocsQuery extends SpanQuery {
return classHash();
}
@Override
public void visit(QueryVisitor visitor) {
visitor.visitLeaf(this);
}
@Override
public SpanWeight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
return new SpanWeight(this, searcher, Collections.emptyMap(), boost) {
@ -64,9 +69,6 @@ public class SpanMatchNoDocsQuery extends SpanQuery {
return null;
}
@Override
public void extractTerms(Set<Term> terms) {}
@Override
public boolean isCacheable(LeafReaderContext ctx) {
return true;

View File

@ -11,18 +11,18 @@ package org.apache.lucene.search.uhighlight;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.queries.spans.SpanMultiTermQueryWrapper;
import org.apache.lucene.queries.spans.SpanNearQuery;
import org.apache.lucene.queries.spans.SpanOrQuery;
import org.apache.lucene.queries.spans.SpanQuery;
import org.apache.lucene.queries.spans.SpanTermQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.PrefixQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.spans.SpanMultiTermQueryWrapper;
import org.apache.lucene.search.spans.SpanNearQuery;
import org.apache.lucene.search.spans.SpanOrQuery;
import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.CheckedSupplier;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.search.ESToParentBlockJoinQuery;

View File

@ -11,15 +11,15 @@ package org.apache.lucene.search.vectorhighlight;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.queries.BlendedTermQuery;
import org.apache.lucene.queries.spans.SpanTermQuery;
import org.apache.lucene.sandbox.search.CombinedFieldQuery;
import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.CombinedFieldQuery;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.MultiPhraseQuery;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.SynonymQuery;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery;
import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery;
import org.elasticsearch.index.search.ESToParentBlockJoinQuery;

View File

@ -90,7 +90,7 @@ public class Version implements Comparable<Version>, ToXContentFragment {
public static final Version V_7_14_2 = new Version(7140299, org.apache.lucene.util.Version.LUCENE_8_9_0);
public static final Version V_7_15_0 = new Version(7150099, org.apache.lucene.util.Version.LUCENE_8_9_0);
public static final Version V_7_16_0 = new Version(7160099, org.apache.lucene.util.Version.LUCENE_8_10_0);
public static final Version V_8_0_0 = new Version(8000099, org.apache.lucene.util.Version.LUCENE_8_10_0);
public static final Version V_8_0_0 = new Version(8000099, org.apache.lucene.util.Version.LUCENE_9_0_0);
public static final Version CURRENT = V_8_0_0;
private static final ImmutableOpenIntMap<Version> idToVersion;

View File

@ -9,14 +9,15 @@
package org.elasticsearch.action.admin.indices.diskusage;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.backward_codecs.lucene50.Lucene50PostingsFormat;
import org.apache.lucene.backward_codecs.lucene84.Lucene84PostingsFormat;
import org.apache.lucene.codecs.DocValuesProducer;
import org.apache.lucene.codecs.FieldsProducer;
import org.apache.lucene.codecs.NormsProducer;
import org.apache.lucene.codecs.PointsReader;
import org.apache.lucene.codecs.StoredFieldsReader;
import org.apache.lucene.codecs.TermVectorsReader;
import org.apache.lucene.codecs.lucene50.Lucene50PostingsFormat;
import org.apache.lucene.codecs.lucene84.Lucene84PostingsFormat;
import org.apache.lucene.codecs.lucene90.Lucene90PostingsFormat;
import org.apache.lucene.index.BinaryDocValues;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.DocValuesType;
@ -42,7 +43,6 @@ import org.apache.lucene.store.FilterDirectory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FutureArrays;
import org.elasticsearch.common.CheckedSupplier;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.lucene.FilterIndexCommit;
@ -53,6 +53,8 @@ import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.store.LuceneFilesExtensions;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
@ -169,8 +171,8 @@ import java.util.Objects;
}
@Override
public void stringField(FieldInfo fieldInfo, byte[] value) throws IOException {
trackField(fieldInfo, Integer.BYTES + value.length);
public void stringField(FieldInfo fieldInfo, String value) throws IOException {
trackField(fieldInfo, Integer.BYTES + value.getBytes(StandardCharsets.UTF_8).length);
}
@Override
@ -290,6 +292,10 @@ import java.util.Objects;
private BlockTermState getBlockTermState(TermsEnum termsEnum, BytesRef term) throws IOException {
if (term != null && termsEnum.seekExact(term)) {
final TermState termState = termsEnum.termState();
if (termState instanceof Lucene90PostingsFormat.IntBlockTermState) {
final Lucene90PostingsFormat.IntBlockTermState blockTermState = (Lucene90PostingsFormat.IntBlockTermState) termState;
return new BlockTermState(blockTermState.docStartFP, blockTermState.posStartFP, blockTermState.payStartFP);
}
if (termState instanceof Lucene84PostingsFormat.IntBlockTermState) {
final Lucene84PostingsFormat.IntBlockTermState blockTermState = (Lucene84PostingsFormat.IntBlockTermState) termState;
return new BlockTermState(blockTermState.docStartFP, blockTermState.posStartFP, blockTermState.payStartFP);
@ -425,8 +431,8 @@ import java.util.Objects;
public PointValues.Relation compare(byte[] minPackedValue, byte[] maxPackedValue) {
for (int dim = 0; dim < numDims; dim++) {
int offset = dim * bytesPerDim;
if (FutureArrays.compareUnsigned(minPackedValue, offset, offset + bytesPerDim, point, offset, offset + bytesPerDim) > 0 ||
FutureArrays.compareUnsigned(maxPackedValue, offset, offset + bytesPerDim, point, offset, offset + bytesPerDim) < 0) {
if (Arrays.compareUnsigned(minPackedValue, offset, offset + bytesPerDim, point, offset, offset + bytesPerDim) > 0 ||
Arrays.compareUnsigned(maxPackedValue, offset, offset + bytesPerDim, point, offset, offset + bytesPerDim) < 0) {
return PointValues.Relation.CELL_OUTSIDE_QUERY;
}
}

View File

@ -128,13 +128,6 @@ public class IndicesSegmentResponse extends BroadcastResponse {
if (segment.getSegmentSort() != null) {
toXContent(builder, segment.getSegmentSort());
}
if (segment.ramTree != null) {
builder.startArray(Fields.RAM_TREE);
for (Accountable child : segment.ramTree.getChildResources()) {
toXContent(builder, child);
}
builder.endArray();
}
if (segment.attributes != null && segment.attributes.isEmpty() == false) {
builder.field("attributes", segment.attributes);
}

View File

@ -184,7 +184,6 @@ public final class SearchPhaseController {
if (results.isEmpty()) {
return null;
}
final boolean setShardIndex = false;
final TopDocs topDocs = results.stream().findFirst().get();
final TopDocs mergedTopDocs;
final int numShards = results.size();
@ -194,15 +193,15 @@ public final class SearchPhaseController {
CollapseTopFieldDocs firstTopDocs = (CollapseTopFieldDocs) topDocs;
final Sort sort = new Sort(firstTopDocs.fields);
final CollapseTopFieldDocs[] shardTopDocs = results.toArray(new CollapseTopFieldDocs[numShards]);
mergedTopDocs = CollapseTopFieldDocs.merge(sort, from, topN, shardTopDocs, setShardIndex);
mergedTopDocs = CollapseTopFieldDocs.merge(sort, from, topN, shardTopDocs, false);
} else if (topDocs instanceof TopFieldDocs) {
TopFieldDocs firstTopDocs = (TopFieldDocs) topDocs;
final Sort sort = new Sort(firstTopDocs.fields);
final TopFieldDocs[] shardTopDocs = results.toArray(new TopFieldDocs[numShards]);
mergedTopDocs = TopDocs.merge(sort, from, topN, shardTopDocs, setShardIndex);
mergedTopDocs = TopDocs.merge(sort, from, topN, shardTopDocs);
} else {
final TopDocs[] shardTopDocs = results.toArray(new TopDocs[numShards]);
mergedTopDocs = TopDocs.merge(from, topN, shardTopDocs, setShardIndex);
mergedTopDocs = TopDocs.merge(from, topN, shardTopDocs);
}
return mergedTopDocs;
}

View File

@ -8,8 +8,8 @@
package org.elasticsearch.action.search;
import org.apache.lucene.store.RAMOutputStream;
import org.elasticsearch.common.io.stream.ByteArrayStreamInput;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchPhaseResult;
import org.elasticsearch.search.SearchShardTarget;
@ -30,7 +30,8 @@ final class TransportSearchHelper {
}
static String buildScrollId(AtomicArray<? extends SearchPhaseResult> searchPhaseResults) {
try (RAMOutputStream out = new RAMOutputStream()) {
try {
BytesStreamOutput out = new BytesStreamOutput();
out.writeString(INCLUDE_CONTEXT_UUID);
out.writeString(searchPhaseResults.length() == 1 ? ParsedScrollId.QUERY_AND_FETCH_TYPE : ParsedScrollId.QUERY_THEN_FETCH_TYPE);
out.writeVInt(searchPhaseResults.asList().size());
@ -45,9 +46,7 @@ final class TransportSearchHelper {
out.writeString(searchShardTarget.getNodeId());
}
}
byte[] bytes = new byte[(int) out.getFilePointer()];
out.writeTo(bytes, 0);
return Base64.getUrlEncoder().encodeToString(bytes);
return Base64.getUrlEncoder().encodeToString(out.copyBytes().array());
} catch (IOException e) {
throw new UncheckedIOException(e);
}

View File

@ -599,8 +599,8 @@ public class GeoUtils {
* 4 decimal degrees
*/
public static double planeDistance(double lat1, double lon1, double lat2, double lon2) {
double x = (lon2 - lon1) * SloppyMath.TO_RADIANS * Math.cos((lat2 + lat1) / 2.0 * SloppyMath.TO_RADIANS);
double y = (lat2 - lat1) * SloppyMath.TO_RADIANS;
double x = Math.toRadians(lon2 - lon1) * Math.cos(Math.toRadians((lat2 + lat1) / 2.0));
double y = Math.toRadians(lat2 - lat1);
return Math.sqrt(x * x + y * y) * EARTH_MEAN_RADIUS;
}

View File

@ -49,6 +49,7 @@ import org.apache.lucene.index.SortedNumericDocValues;
import org.apache.lucene.index.SortedSetDocValues;
import org.apache.lucene.index.StoredFieldVisitor;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.VectorValues;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.FieldDoc;
@ -74,17 +75,16 @@ import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.Lock;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.Version;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.core.SuppressForbidden;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.lucene.index.SequentialStoredFieldsLeafReader;
import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore;
import org.elasticsearch.common.util.iterable.Iterables;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.core.SuppressForbidden;
import org.elasticsearch.index.analysis.AnalyzerScope;
import org.elasticsearch.index.analysis.NamedAnalyzer;
import org.elasticsearch.index.fielddata.IndexFieldData;
@ -101,7 +101,7 @@ import java.util.List;
import java.util.Map;
public class Lucene {
public static final String LATEST_CODEC = "Lucene87";
public static final String LATEST_CODEC = "Lucene90";
public static final String SOFT_DELETES_FIELD = "__soft_deletes";
@ -197,7 +197,7 @@ public class Lucene {
* since checksums don's match anymore. that's why we prune the name here directly.
* We also want the caller to know if we were not able to remove a segments_N file.
*/
if (file.startsWith(IndexFileNames.SEGMENTS) || file.equals(IndexFileNames.OLD_SEGMENTS_GEN)) {
if (file.startsWith(IndexFileNames.SEGMENTS)) {
foundSegmentFiles++;
if (file.equals(si.getSegmentsFileName()) == false) {
directory.deleteFile(file); // remove all segment_N files except of the one we wanna keep
@ -235,7 +235,7 @@ public class Lucene {
public static void cleanLuceneIndex(Directory directory) throws IOException {
try (Lock writeLock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME)) {
for (final String file : directory.listAll()) {
if (file.startsWith(IndexFileNames.SEGMENTS) || file.equals(IndexFileNames.OLD_SEGMENTS_GEN)) {
if (file.startsWith(IndexFileNames.SEGMENTS)) {
directory.deleteFile(file); // remove all segment_N files
}
}
@ -1024,6 +1024,16 @@ public class Lucene {
return null;
}
@Override
public VectorValues getVectorValues(String field) throws IOException {
return null;
}
@Override
public TopDocs searchNearestVectors(String field, float[] target, int k, Bits acceptDocs) throws IOException {
return null;
}
public FieldInfos getFieldInfos() {
return new FieldInfos(new FieldInfo[0]);
}
@ -1039,7 +1049,8 @@ public class Lucene {
public void checkIntegrity() {
}
public Fields getTermVectors(int docID) {
@Override
public Fields getTermVectors(int docID) throws IOException {
return null;
}
@ -1074,7 +1085,7 @@ public class Lucene {
/**
* Prepares a new {@link IndexWriterConfig} that does not do any merges, by setting both the merge policy and the merge scheduler.
* Setting just the merge policy means that constructing the index writer will create a {@link ConcurrentMergeScheduler} by default,
* which is quite heavyweight and in particular it can unnecessarily block on {@link IOUtils#spins}.
* which is quite heavyweight.
*/
@SuppressForbidden(reason = "NoMergePolicy#INSTANCE is safe to use since we also set NoMergeScheduler#INSTANCE")
public static IndexWriterConfig indexWriterConfigWithNoMerging(Analyzer analyzer) {

View File

@ -34,7 +34,7 @@ public class MinimumScoreCollector extends SimpleCollector {
@Override
public void setScorer(Scorable scorer) throws IOException {
if ((scorer instanceof ScoreCachingWrappingScorer) == false) {
scorer = new ScoreCachingWrappingScorer(scorer);
scorer = ScoreCachingWrappingScorer.wrap(scorer);
}
this.scorer = scorer;
leafCollector.setScorer(scorer);

View File

@ -19,6 +19,7 @@ import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryVisitor;
import org.apache.lucene.search.similarities.ClassicSimilarity;
import org.apache.lucene.search.similarities.Similarity;
import org.apache.lucene.search.similarities.TFIDFSimilarity;
@ -148,6 +149,11 @@ public class MoreLikeThisQuery extends Query {
return createQuery(mlt);
}
@Override
public void visit(QueryVisitor visitor) {
visitor.visitLeaf(this);
}
private Query createQuery(XMoreLikeThis mlt) throws IOException {
BooleanQuery.Builder bqBuilder = new BooleanQuery.Builder();
if (this.likeFields != null) {

View File

@ -19,6 +19,7 @@ import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.MultiPhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryVisitor;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.StringHelper;
@ -303,4 +304,11 @@ public class MultiPhrasePrefixQuery extends Query {
public String getField() {
return field;
}
@Override
public void visit(QueryVisitor visitor) {
if (visitor.acceptField(field)) {
visitor.visitLeaf(this); // TODO implement term visiting
}
}
}

View File

@ -15,13 +15,13 @@ import org.apache.lucene.index.Term;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.queries.SpanMatchNoDocsQuery;
import org.apache.lucene.queries.spans.SpanMultiTermQueryWrapper;
import org.apache.lucene.queries.spans.SpanOrQuery;
import org.apache.lucene.queries.spans.SpanQuery;
import org.apache.lucene.queries.spans.SpanTermQuery;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.MultiTermQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.spans.SpanMultiTermQueryWrapper;
import org.apache.lucene.search.spans.SpanOrQuery;
import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.util.AttributeSource;
import org.apache.lucene.util.BytesRef;

View File

@ -10,7 +10,6 @@ package org.elasticsearch.common.lucene.search.function;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.FilterScorer;
@ -34,7 +33,6 @@ import java.util.Collections;
import java.util.List;
import java.util.Locale;
import java.util.Objects;
import java.util.Set;
/**
* A query that allows for a pluggable boost function / filter. If it matches
@ -248,11 +246,6 @@ public class FunctionScoreQuery extends Query {
this.needsScores = needsScores;
}
@Override
public void extractTerms(Set<Term> terms) {
subQueryWeight.extractTerms(terms);
}
private FunctionFactorScorer functionScorer(LeafReaderContext context) throws IOException {
Scorer subQueryScorer = subQueryWeight.scorer(context);
if (subQueryScorer == null) {

Some files were not shown because too many files have changed in this diff Show More