-
Notifications
You must be signed in to change notification settings - Fork 21
Adapting to ES 2.0.0 #21
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -6,7 +6,7 @@ | |
<modelVersion>4.0.0</modelVersion> | ||
<groupId>com.yakaz.elasticsearch.plugins</groupId> | ||
<artifactId>elasticsearch-analysis-combo</artifactId> | ||
<version>1.5.2-SNAPSHOT</version> | ||
<version>2.1.1-SNAPSHOT</version> | ||
<packaging>jar</packaging> | ||
<inceptionYear>2011</inceptionYear> | ||
<licenses> | ||
|
@@ -38,8 +38,9 @@ | |
</parent> | ||
|
||
<properties> | ||
<elasticsearch.version>1.0.0.RC1</elasticsearch.version> | ||
<lucene.version>4.6.0</lucene.version> | ||
<elasticsearch.version> 2.1.1</elasticsearch.version> | ||
<lucene.version>5.3.1</lucene.version> | ||
<mvn.java.version>1.7</mvn.java.version> | ||
<!-- Activate the following when running tests against ES 1.4+ --> | ||
<!--randomizedtesting-runner.version>2.1.10</randomizedtesting-runner.version--> | ||
</properties> | ||
|
@@ -75,6 +76,19 @@ | |
<scope>test</scope> | ||
</dependency> | ||
|
||
<dependency> | ||
<groupId>junit</groupId> | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This was needed due to ES checking for 'jar hell' when booting, even in test. Junit brings in hamcrest-core, which conflicts with the usage of hamcrest-all in the project's tests. |
||
<artifactId>junit</artifactId> | ||
<version>4.11</version> | ||
<exclusions> | ||
<exclusion> | ||
<artifactId>hamcrest-core</artifactId> | ||
<groupId>org.hamcrest</groupId> | ||
</exclusion> | ||
</exclusions> | ||
<scope>test</scope> | ||
</dependency> | ||
|
||
<dependency> | ||
<groupId>log4j</groupId> | ||
<artifactId>log4j</artifactId> | ||
|
@@ -117,8 +131,8 @@ | |
<artifactId>maven-compiler-plugin</artifactId> | ||
<version>2.3.2</version> | ||
<configuration> | ||
<source>1.6</source> | ||
<target>1.6</target> | ||
<source>${mvn.java.version}</source> | ||
<target>${mvn.java.version}</target> | ||
</configuration> | ||
</plugin> | ||
<plugin> | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -5,6 +5,13 @@ | |
<format>zip</format> | ||
</formats> | ||
<includeBaseDirectory>false</includeBaseDirectory> | ||
<files> | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The Plugin architecture has changed, now one specifies stuff in an external properties file. |
||
<file> | ||
<source>src/main/resources/plugin-descriptor.properties</source> | ||
<outputDirectory></outputDirectory> | ||
<filtered>true</filtered> | ||
</file> | ||
</files> | ||
<dependencySets> | ||
<dependencySet> | ||
<outputDirectory>/</outputDirectory> | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -18,18 +18,14 @@ | |
package org.apache.lucene.analysis; | ||
|
||
import org.apache.lucene.analysis.miscellaneous.UniqueTokenFilter; | ||
import org.apache.lucene.util.CloseableThreadLocal; | ||
import org.apache.lucene.util.ReaderCloneFactory; | ||
import org.apache.lucene.util.Version; | ||
import org.elasticsearch.common.logging.ESLogger; | ||
import org.elasticsearch.common.logging.ESLoggerFactory; | ||
|
||
import java.io.IOException; | ||
import java.io.Reader; | ||
import java.util.Arrays; | ||
import java.util.HashMap; | ||
import java.util.HashSet; | ||
import java.util.Map; | ||
import java.util.Set; | ||
import java.util.concurrent.atomic.AtomicReference; | ||
|
||
/** | ||
* An analyzer that combines multiple sub-analyzers into one. | ||
|
@@ -50,8 +46,6 @@ | |
*/ | ||
public class ComboAnalyzer extends Analyzer { | ||
|
||
protected static final ESLogger logger = ESLoggerFactory.getLogger(ComboAnalyzer.class.getSimpleName()); | ||
|
||
/** | ||
* Default value for the enabled state of {@link TokenStream} caching. | ||
*/ | ||
|
@@ -71,13 +65,8 @@ public class ComboAnalyzer extends Analyzer { | |
|
||
private boolean deduplication = DEDUPLICATION_ENABLED_DEFAULT; | ||
|
||
private CloseableThreadLocal<TokenStream[]> lastTokenStreams = new CloseableThreadLocal<TokenStream[]>(); | ||
private CloseableThreadLocal<TokenStream[]> tempTokenStreams = new CloseableThreadLocal<TokenStream[]>(); | ||
private CloseableThreadLocal<ReusableTokenStreamComponents> lastComboTokenStream = new CloseableThreadLocal<ReusableTokenStreamComponents>(); | ||
|
||
public ComboAnalyzer(Version version, Analyzer... subAnalyzers) { | ||
super(new GlobalReuseStrategy()); | ||
|
||
public ComboAnalyzer(Analyzer... subAnalyzers) { | ||
super(); | ||
this.subAnalyzers = subAnalyzers; | ||
|
||
// Detect duplicates in analyzers | ||
|
@@ -168,113 +157,109 @@ public ComboAnalyzer disableDeduplication() { | |
return this; | ||
} | ||
|
||
protected ReaderCloneFactory.ReaderCloner<? extends Reader> cloneReader(Reader originalReader) { | ||
ReaderCloneFactory.ReaderCloner<? extends Reader> rtn; | ||
private static Tokenizer DUMMY_TOKENIZER = new Tokenizer(){ | ||
@Override | ||
public boolean incrementToken() throws IOException { | ||
return false; | ||
} | ||
}; | ||
|
||
// Duplication of the original reader, to feed all sub-analyzers | ||
if (subAnalyzers.length <= 1) { | ||
@Override | ||
protected TokenStreamComponents createComponents(String fieldName) { | ||
return new CombiningTokenStreamComponents(fieldName); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. So, this is the largest change in code. In Lucene 5, components are more or less forced to be reusable. Instead of having an external TokenStreamComponents class, there is now an internal one. The internal tokenstreams class will generate the tokenStreams when the tokenStreams() method is called, not before that. In my opinion, this makes this file a bit more readable, since the state is all embedded in the subclass class instead. The basic reuse/deduplication is still the same, but methods are smaller. Would love feedback. |
||
} | ||
|
||
// Can reuse the only reader we have, there will be no need of duplication | ||
// Usage of the AtomicReference ensures that the same reader won't be duplicated. | ||
ReaderCloneFactory.ReaderCloner<Reader> useOnceReaderCloner = new ReaderCloneFactory.ReaderCloner<Reader>() { | ||
private AtomicReference<Reader> singleUsageReference = null; | ||
public void init(Reader originalReader) throws IOException { | ||
singleUsageReference = new AtomicReference<Reader>(originalReader); | ||
} | ||
public Reader giveAClone() { | ||
return singleUsageReference.getAndSet(null); | ||
} | ||
}; | ||
try { | ||
useOnceReaderCloner.init(originalReader); | ||
} catch (Throwable fail) { | ||
useOnceReaderCloner = null; | ||
} | ||
rtn = useOnceReaderCloner; | ||
@Override public void close() { | ||
super.close(); | ||
} | ||
|
||
} else { | ||
private class CombiningTokenStreamComponents extends TokenStreamComponents { | ||
|
||
rtn = ReaderCloneFactory.getCloner(originalReader); // internally uses the default "should always work" implementation | ||
private final Map<Analyzer, CachingTokenStream> duplicateAnalyzers = new HashMap<Analyzer, CachingTokenStream>(); | ||
private final String field; | ||
private Reader reader; | ||
|
||
public CombiningTokenStreamComponents(String field) { | ||
super(DUMMY_TOKENIZER); | ||
this.field = field; | ||
} | ||
|
||
if (rtn == null) { | ||
throw new IllegalArgumentException("Could not duplicate the original reader to feed multiple sub-readers"); | ||
@Override | ||
public void setReader(Reader reader) throws IOException { | ||
duplicateAnalyzers.clear(); | ||
this.reader = reader; | ||
} | ||
return rtn; | ||
} | ||
|
||
@Override | ||
protected TokenStreamComponents createComponents(String fieldName, Reader originalReader) { | ||
// Duplication of the original reader, to feed all sub-analyzers | ||
ReaderCloneFactory.ReaderCloner readerCloner = cloneReader(originalReader); | ||
|
||
// We remember last used TokenStreams because many times Analyzers can provide a reusable TokenStream | ||
// Detecting that all sub-TokenStreams are reusable permits to reuse our ComboTokenStream as well. | ||
if (tempTokenStreams.get() == null) tempTokenStreams.set(new TokenStream[subAnalyzers.length]); // each time non reusability has been detected | ||
if (lastTokenStreams.get() == null) lastTokenStreams.set(new TokenStream[subAnalyzers.length]); // only at first run | ||
TokenStream[] tempTokenStreams_local = tempTokenStreams.get(); | ||
TokenStream[] lastTokenStreams_local = lastTokenStreams.get(); | ||
ReusableTokenStreamComponents lastComboTokenStream_local = lastComboTokenStream.get(); | ||
if (lastComboTokenStream_local == null) | ||
lastComboTokenStream_local = new ReusableTokenStreamComponents(fieldName, this); | ||
@Override | ||
public TokenStream getTokenStream() { | ||
TokenStream ret = createTokenStreams(); | ||
return deduplication ? new UniqueTokenFilter(ret): ret; | ||
} | ||
|
||
// Get sub-TokenStreams from sub-analyzers | ||
for (int i = subAnalyzers.length-1 ; i >= 0 ; --i) { | ||
private TokenStream createTokenStreams() { | ||
if(subAnalyzers.length == 1){ | ||
return createTokenStream(subAnalyzers[0], field, reader); | ||
} | ||
else{ | ||
ReaderCloneFactory.ReaderCloner<Reader> cloner = ReaderCloneFactory.getCloner(reader); | ||
TokenStream[] streams = new TokenStream[subAnalyzers.length]; | ||
for (int i = 0; i < subAnalyzers.length; i++) { | ||
streams[i] = createTokenStream(subAnalyzers[i], field, cloner.giveAClone()); | ||
} | ||
return new ComboTokenStream(streams); | ||
} | ||
} | ||
|
||
// Feed the troll | ||
Reader reader = readerCloner.giveAClone(); | ||
tempTokenStreams_local[i] = null; | ||
private TokenStream createTokenStream(Analyzer analyzer, String field, Reader reader) { | ||
try { | ||
tempTokenStreams_local[i] = subAnalyzers[i].tokenStream(fieldName, reader); | ||
} catch (IOException ignored) { | ||
logger.debug("Ignoring {}th analyzer [{}]. Could not get a TokenStream.", ignored, i, subAnalyzers[i]); | ||
} | ||
// Use caching if asked or if required in case of duplicated analyzers | ||
if (cacheTokenStreams || hasDuplicatedAnalyzers && duplicatedAnalyzers.contains(subAnalyzers[i])) { | ||
CachingTokenStream cache = new CachingTokenStream(tempTokenStreams_local[i]); | ||
try { | ||
tempTokenStreams_local[i].reset(); | ||
cache.fillCache(); | ||
} catch (IOException ignored) { | ||
logger.debug("Got an error when caching TokenStream from the {}th analyzer [{}]", i, subAnalyzers[i]); | ||
if(hasDuplicatedAnalyzers && duplicatedAnalyzers.contains(analyzer)) { | ||
return createCachedCopies(analyzer, field, reader); | ||
} | ||
else if(cacheTokenStreams){ | ||
return loadAndClose(analyzer.tokenStream(field, reader)); | ||
} | ||
try { | ||
// Close original stream, all tokens are buffered | ||
tempTokenStreams_local[i].close(); | ||
} catch (IOException ignored) { | ||
logger.debug("Got an error when closing TokenStream from the {}th analyzer [{}]", i, subAnalyzers[i]); | ||
else{ | ||
return analyzer.tokenStream(field, reader); | ||
} | ||
tempTokenStreams_local[i] = cache; | ||
} catch (IOException e) { | ||
throw new RuntimeException(e); | ||
} | ||
// Detect non reusability | ||
if (tempTokenStreams_local[i] != lastTokenStreams_local[i]) { | ||
lastComboTokenStream_local.setTokenStream(null); | ||
} | ||
|
||
private TokenStream createCachedCopies(Analyzer analyzer, String field ,Reader reader) throws IOException { | ||
//First time we see this analyzer, means that we have to cache the content | ||
if(!duplicateAnalyzers.containsKey(analyzer)){ | ||
CachingTokenStream caching = loadAndClose(analyzer.tokenStream(field, reader)); | ||
duplicateAnalyzers.put(analyzer, caching); | ||
return caching; | ||
} | ||
else{ | ||
//Already seen, can just create a new copy of the cached | ||
return loadAsCaching(duplicateAnalyzers.get(analyzer)); | ||
} | ||
} | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Rather than making debug outputs, I opted to just throw a runtime exception if something failed. It's quite severe if one looses tokens without getting an error. |
||
|
||
// If last ComboTokenStream is not available create a new one | ||
// This happens in the first call and in case of non reusability | ||
if (lastComboTokenStream_local.getTokenStream() == null) { | ||
// Clear old invalid references (preferred over allocating a new array) | ||
Arrays.fill(lastTokenStreams_local, null); | ||
// Swap temporary and last (non reusable) TokenStream references | ||
lastTokenStreams.set(tempTokenStreams_local); | ||
tempTokenStreams.set(lastTokenStreams_local); | ||
// New ComboTokenStream to use | ||
lastComboTokenStream_local.setTokenStream(new ComboTokenStream(tempTokenStreams_local)); | ||
if (deduplication) | ||
lastComboTokenStream_local.setTokenStream(new UniqueTokenFilter(lastComboTokenStream_local.getTokenStream(), true)); | ||
lastComboTokenStream.set(lastComboTokenStream_local); | ||
private CachingTokenStream loadAndClose(TokenStream tokenStream) { | ||
CachingTokenStream cache = loadAsCaching(tokenStream); | ||
try{ | ||
tokenStream.close(); | ||
} | ||
catch (IOException e){ | ||
throw new RuntimeException(e); | ||
} | ||
return cache; | ||
} | ||
return lastComboTokenStream_local; | ||
} | ||
|
||
@Override public void close() { | ||
super.close(); | ||
lastTokenStreams.close(); | ||
tempTokenStreams.close(); | ||
lastComboTokenStream.close(); | ||
private CachingTokenStream loadAsCaching(TokenStream tokenStream) { | ||
try{ | ||
CachingTokenStream cachingTokenStream = new CachingTokenStream(tokenStream); | ||
tokenStream.reset(); | ||
cachingTokenStream.fillCache(); | ||
return cachingTokenStream; | ||
} | ||
catch (Exception e){ | ||
throw new RuntimeException(e); | ||
} | ||
} | ||
} | ||
|
||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -20,7 +20,6 @@ | |
package org.apache.lucene.analysis; | ||
|
||
import org.apache.lucene.util.Version; | ||
import org.elasticsearch.ElasticsearchIllegalArgumentException; | ||
import org.elasticsearch.common.inject.Injector; | ||
import org.elasticsearch.common.logging.ESLogger; | ||
import org.elasticsearch.common.logging.ESLoggerFactory; | ||
|
@@ -72,7 +71,7 @@ protected void init() { | |
String[] sub = settings.getAsArray("sub_analyzers"); | ||
ArrayList<Analyzer> subAnalyzers = new ArrayList<Analyzer>(); | ||
if (sub == null) { | ||
throw new ElasticsearchIllegalArgumentException("Analyzer ["+name+"] analyzer of type ["+NAME+"], must have a \"sub_analyzers\" list property"); | ||
throw new IllegalArgumentException("Analyzer ["+name+"] analyzer of type ["+NAME+"], must have a \"sub_analyzers\" list property"); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The ES version is no more. |
||
} | ||
|
||
for (String subname : sub) { | ||
|
@@ -84,7 +83,7 @@ protected void init() { | |
} | ||
} | ||
|
||
this.analyzer = new org.apache.lucene.analysis.ComboAnalyzer(version, subAnalyzers.toArray(new Analyzer[subAnalyzers.size()])); | ||
this.analyzer = new org.apache.lucene.analysis.ComboAnalyzer(subAnalyzers.toArray(new Analyzer[subAnalyzers.size()])); | ||
|
||
Boolean tokenstreamCaching = settings.getAsBoolean("tokenstream_caching", null); | ||
if (tokenstreamCaching != null) | ||
|
@@ -96,9 +95,9 @@ protected void init() { | |
} | ||
|
||
@Override | ||
protected TokenStreamComponents createComponents(String fieldName, Reader reader) { | ||
protected TokenStreamComponents createComponents(String fieldName) { | ||
if (analyzer == null) init(); | ||
return this.analyzer.createComponents(fieldName, reader); | ||
return this.analyzer.createComponents(fieldName); | ||
} | ||
|
||
@Override public void close() { | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Took the liberty to upgrade to java 7.. it's been out for a while at least
As a side note, the project does not build with java8, due to much stricter javadoc requirements there. Opted to not change them in this PR.