Java 类org.apache.lucene.analysis.ngram.Lucene43EdgeNGramTokenizer 实例源码

项目:Elasticsearch    文件:EdgeNGramTokenizerFactory.java   
public EdgeNGramTokenizerFactory(Index index, Settings indexSettings, String name, Settings settings) {
    super(index, indexSettings, name, settings);
    this.minGram = settings.getAsInt("min_gram", NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE);
    this.maxGram = settings.getAsInt("max_gram", NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE);
    this.side = Lucene43EdgeNGramTokenizer.Side.getSide(settings.get("side", Lucene43EdgeNGramTokenizer.DEFAULT_SIDE.getLabel()));
    this.matcher = parseTokenChars(settings.getAsArray("token_chars"));
    this.esVersion = org.elasticsearch.Version.indexCreated(indexSettings);
}
项目:search    文件:TestRandomChains.java   
@Override public Object create(Random random) {
  return random.nextBoolean() 
      ? Lucene43EdgeNGramTokenizer.Side.FRONT 
      : Lucene43EdgeNGramTokenizer.Side.BACK;
}
项目:Maskana-Gestor-de-Conocimiento    文件:TestRandomChains.java   
@Override public Object create(Random random) {
  return random.nextBoolean() 
      ? Lucene43EdgeNGramTokenizer.Side.FRONT 
      : Lucene43EdgeNGramTokenizer.Side.BACK;
}