Java 类org.apache.lucene.index.IndexReader 实例源码

项目:elasticsearch_my    文件:ScriptedMetricAggregatorTests.java   
/**
 * test that combine script sums the list produced by the "mapScript"
 */
public void testScriptedMetricWithCombine() throws IOException {
    try (Directory directory = newDirectory()) {
        Integer numDocs = randomInt(100);
        try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) {
            for (int i = 0; i < numDocs; i++) {
                indexWriter.addDocument(singleton(new SortedNumericDocValuesField("number", i)));
            }
        }
        try (IndexReader indexReader = DirectoryReader.open(directory)) {
            ScriptedMetricAggregationBuilder aggregationBuilder = new ScriptedMetricAggregationBuilder(AGG_NAME);
            aggregationBuilder.initScript(INIT_SCRIPT).mapScript(MAP_SCRIPT).combineScript(COMBINE_SCRIPT);
            ScriptedMetric scriptedMetric = search(newSearcher(indexReader, true, true), new MatchAllDocsQuery(), aggregationBuilder);
            assertEquals(AGG_NAME, scriptedMetric.getName());
            assertNotNull(scriptedMetric.aggregation());
            assertEquals(numDocs, scriptedMetric.aggregation());
        }
    }
}
项目:elasticsearch_my    文件:MinDocQueryTests.java   
public void testRandom() throws IOException {
    final int numDocs = randomIntBetween(10, 200);
    final Document doc = new Document();
    final Directory dir = newDirectory();
    final RandomIndexWriter w = new RandomIndexWriter(random(), dir);
    for (int i = 0; i < numDocs; ++i) {
        w.addDocument(doc);
    }
    final IndexReader reader = w.getReader();
    final IndexSearcher searcher = newSearcher(reader);
    for (int i = 0; i <= numDocs; ++i) {
        assertEquals(numDocs - i, searcher.count(new MinDocQuery(i)));
    }
    w.close();
    reader.close();
    dir.close();
}
项目:elasticsearch_my    文件:QueryPhaseTests.java   
private void countTestCase(Query query, IndexReader reader, boolean shouldCollect) throws Exception {
    TestSearchContext context = new TestSearchContext(null);
    context.parsedQuery(new ParsedQuery(query));
    context.setSize(0);
    context.setTask(new SearchTask(123L, "", "", "", null));

    IndexSearcher searcher = new IndexSearcher(reader);
    final AtomicBoolean collected = new AtomicBoolean();
    IndexSearcher contextSearcher = new IndexSearcher(reader) {
        protected void search(List<LeafReaderContext> leaves, Weight weight, Collector collector) throws IOException {
            collected.set(true);
            super.search(leaves, weight, collector);
        }
    };

    final boolean rescore = QueryPhase.execute(context, contextSearcher);
    assertFalse(rescore);
    assertEquals(searcher.count(query), context.queryResult().topDocs().totalHits);
    assertEquals(shouldCollect, collected.get());
}
项目:Elasticsearch    文件:TermSuggester.java   
@Override
public TermSuggestion innerExecute(String name, TermSuggestionContext suggestion, IndexSearcher searcher, CharsRefBuilder spare) throws IOException {
    DirectSpellChecker directSpellChecker = SuggestUtils.getDirectSpellChecker(suggestion.getDirectSpellCheckerSettings());
    final IndexReader indexReader = searcher.getIndexReader();
    TermSuggestion response = new TermSuggestion(
            name, suggestion.getSize(), suggestion.getDirectSpellCheckerSettings().sort()
    );
    List<Token> tokens = queryTerms(suggestion, spare);
    for (Token token : tokens) {
        // TODO: Extend DirectSpellChecker in 4.1, to get the raw suggested words as BytesRef
        SuggestWord[] suggestedWords = directSpellChecker.suggestSimilar(
                token.term, suggestion.getShardSize(), indexReader, suggestion.getDirectSpellCheckerSettings().suggestMode()
        );
        Text key = new Text(new BytesArray(token.term.bytes()));
        TermSuggestion.Entry resultEntry = new TermSuggestion.Entry(key, token.startOffset, token.endOffset - token.startOffset);
        for (SuggestWord suggestWord : suggestedWords) {
            Text word = new Text(suggestWord.string);
            resultEntry.addOption(new TermSuggestion.Entry.Option(word, suggestWord.freq, suggestWord.score));
        }
        response.addTerm(resultEntry);
    }
    return response;
}
项目:alfresco-repository    文件:IndexInfo.java   
private IndexReader buildReferenceCountingIndexReader(String id, long size) throws IOException
{
    IndexReader reader;
    File location = new File(indexDirectory, id).getCanonicalFile();
    double folderSize = getSizeInMb(location);
    if (IndexReader.indexExists(location))
    {
        if ((size < maxDocsForInMemoryIndex) && (folderSize < maxRamInMbForInMemoryIndex))
        {
            RAMDirectory rd = new RAMDirectory(location);
            reader = IndexReader.open(rd);
        }
        else
        {
            reader = IndexReader.open(location);
        }
    }
    else
    {
        reader = IndexReader.open(emptyIndex);
    }
    reader = ReferenceCountingReadOnlyIndexReaderFactory.createReader(id, reader, true, config);
    return reader;
}
项目:sjk    文件:SpellCheckerServiceImpl.java   
@PostConstruct
public void reset() {
    String indexDir = appConfig.getAllSpellCheckerDir();
    try {
        Directory spellcheckDir = FSDirectory.open(new File(indexDir));
        if (!IndexReader.indexExists(spellcheckDir)) {
            logger.info("Please reset index firstly!");
            return;
        }
        SpellChecker newSpellChecker = new SpellChecker(spellcheckDir);
        newSpellChecker.setStringDistance(new JaroWinklerDistance());
        newSpellChecker.setAccuracy(0.7f);
        if (spellChecker == null) {
            spellChecker = newSpellChecker;
        } else {
            final Closeable preSpellChecker = spellChecker;
            spellChecker = newSpellChecker;
            IOUtils.closeQuietly(preSpellChecker);
        }
    } catch (Exception e) {
        logger.error("Exception", e);
    }
}
项目:elasticsearch_my    文件:SimpleLuceneTests.java   
public void testSortValues() throws Exception {
    Directory dir = new RAMDirectory();
    IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
    for (int i = 0; i < 10; i++) {
        Document document = new Document();
        String text = new String(new char[]{(char) (97 + i), (char) (97 + i)});
        document.add(new TextField("str", text, Field.Store.YES));
        document.add(new SortedDocValuesField("str", new BytesRef(text)));
        indexWriter.addDocument(document);
    }
    IndexReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(indexWriter));
    IndexSearcher searcher = new IndexSearcher(reader);
    TopFieldDocs docs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(new SortField("str", SortField.Type.STRING)));
    for (int i = 0; i < 10; i++) {
        FieldDoc fieldDoc = (FieldDoc) docs.scoreDocs[i];
        assertThat((BytesRef) fieldDoc.fields[0], equalTo(new BytesRef(new String(new char[]{(char) (97 + i), (char) (97 + i)}))));
    }
}
项目:Elasticsearch    文件:HasChildQueryParser.java   
@Override
public Query rewrite(IndexReader reader) throws IOException {
    if (getBoost() != 1.0F) {
        return super.rewrite(reader);
    }
    if (reader instanceof DirectoryReader) {
        String joinField = ParentFieldMapper.joinField(parentType);
        IndexSearcher indexSearcher = new IndexSearcher(reader);
        indexSearcher.setQueryCache(null);
        indexSearcher.setSimilarity(similarity);
        IndexParentChildFieldData indexParentChildFieldData = parentChildIndexFieldData.loadGlobal((DirectoryReader) reader);
        MultiDocValues.OrdinalMap ordinalMap = ParentChildIndexFieldData.getOrdinalMap(indexParentChildFieldData, parentType);
        return JoinUtil.createJoinQuery(joinField, innerQuery, toQuery, indexSearcher, scoreMode, ordinalMap, minChildren, maxChildren);
    } else {
        if (reader.leaves().isEmpty() && reader.numDocs() == 0) {
            // asserting reader passes down a MultiReader during rewrite which makes this
            // blow up since for this query to work we have to have a DirectoryReader otherwise
            // we can't load global ordinals - for this to work we simply check if the reader has no leaves
            // and rewrite to match nothing
            return new MatchNoDocsQuery();
        }
        throw new IllegalStateException("can't load global ordinals for reader of type: " + reader.getClass() + " must be a DirectoryReader");
    }
}
项目:elasticsearch_my    文件:TermSuggester.java   
@Override
public TermSuggestion innerExecute(String name, TermSuggestionContext suggestion, IndexSearcher searcher, CharsRefBuilder spare)
        throws IOException {
    DirectSpellChecker directSpellChecker = suggestion.getDirectSpellCheckerSettings().createDirectSpellChecker();
    final IndexReader indexReader = searcher.getIndexReader();
    TermSuggestion response = new TermSuggestion(
            name, suggestion.getSize(), suggestion.getDirectSpellCheckerSettings().sort()
    );
    List<Token> tokens = queryTerms(suggestion, spare);
    for (Token token : tokens) {
        // TODO: Extend DirectSpellChecker in 4.1, to get the raw suggested words as BytesRef
        SuggestWord[] suggestedWords = directSpellChecker.suggestSimilar(
                token.term, suggestion.getShardSize(), indexReader, suggestion.getDirectSpellCheckerSettings().suggestMode()
        );
        Text key = new Text(new BytesArray(token.term.bytes()));
        TermSuggestion.Entry resultEntry = new TermSuggestion.Entry(key, token.startOffset, token.endOffset - token.startOffset);
        for (SuggestWord suggestWord : suggestedWords) {
            Text word = new Text(suggestWord.string);
            resultEntry.addOption(new TermSuggestion.Entry.Option(word, suggestWord.freq, suggestWord.score));
        }
        response.addTerm(resultEntry);
    }
    return response;
}
项目:lams    文件:FieldMaskingSpanQuery.java   
@Override
public Query rewrite(IndexReader reader) throws IOException {
  FieldMaskingSpanQuery clone = null;

  SpanQuery rewritten = (SpanQuery) maskedQuery.rewrite(reader);
  if (rewritten != maskedQuery) {
    clone = (FieldMaskingSpanQuery) this.clone();
    clone.maskedQuery = rewritten;
  }

  if (clone != null) {
    return clone;
  } else {
    return this;
  }
}
项目:TextHIN    文件:FbEntitySearcher.java   
public FbEntitySearcher(String indexDir, int numOfDocs, String searchingStrategy) throws IOException {

    LogInfo.begin_track("Constructing Searcher");
    if (!searchingStrategy.equals("exact") && !searchingStrategy.equals("inexact"))
      throw new RuntimeException("Bad searching strategy: " + searchingStrategy);
    this.searchStrategy = searchingStrategy;

    queryParser = new QueryParser(
        Version.LUCENE_44,
        FbIndexField.TEXT.fieldName(),
        searchingStrategy.equals("exact") ? new KeywordAnalyzer() : new StandardAnalyzer(Version.LUCENE_44));
    LogInfo.log("Opening index dir: " + indexDir);
    IndexReader indexReader = DirectoryReader.open(SimpleFSDirectory.open(new File(indexDir)));
    indexSearcher = new IndexSearcher(indexReader);
    LogInfo.log("Opened index with " + indexReader.numDocs() + " documents.");

    this.numOfDocs = numOfDocs;
    LogInfo.end_track();
  }
项目:elasticsearch_my    文件:RangeQueryRewriteTests.java   
public void testRewriteEmptyReader() throws Exception {
    IndexService indexService = createIndex("test");
    String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
            .startObject("properties")
                .startObject("foo")
                    .field("type", "date")
                .endObject()
            .endObject()
        .endObject().endObject().string();
    indexService.mapperService().merge("type",
            new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false);
    IndexReader reader = new MultiReader();
    QueryRewriteContext context = new QueryShardContext(0, indexService.getIndexSettings(), null, null, indexService.mapperService(),
            null, null, xContentRegistry(), null, reader, null);
    RangeQueryBuilder range = new RangeQueryBuilder("foo");
    // no values -> DISJOINT
    assertEquals(Relation.DISJOINT, range.getRelation(context));
}
项目:Equella    文件:ItemIndex.java   
private OpenBitSet searchRequestToBitSet(@Nullable final Search searchreq, IndexSearcher searcher,
    IndexReader reader) throws IOException
{
    if( searchreq != null )
    {
        Filter filters = getFilter(searchreq);
        Query query = getQuery(searchreq, null, false);

        BitSetCollector collector = new BitSetCollector();
        searcher.search(query, filters, collector);
        return collector.getBitSet();
    }
    else
    {
        return (OpenBitSet) new InstitutionFilter().getDocIdSet(reader);
    }
}
项目:elasticsearch_my    文件:ParentToChildrenAggregatorTests.java   
public void testNoDocs() throws IOException {
    Directory directory = newDirectory();

    RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
    // intentionally not writing any docs
    indexWriter.close();
    IndexReader indexReader = DirectoryReader.open(directory);

    testCase(new MatchAllDocsQuery(), newSearcher(indexReader, false, true), parentToChild -> {
        assertEquals(0, parentToChild.getDocCount());
        assertEquals(Double.POSITIVE_INFINITY, ((InternalMin) parentToChild.getAggregations().get("in_child")).getValue(),
                Double.MIN_VALUE);
    });
    indexReader.close();
    directory.close();
}
项目:elasticsearch_my    文件:ShardCoreKeyMapTests.java   
public void testMissingShard() throws IOException {
    try (Directory dir = newDirectory();
            RandomIndexWriter w = new RandomIndexWriter(random(), dir)) {
        w.addDocument(new Document());
        try (IndexReader reader = w.getReader()) {
            ShardCoreKeyMap map = new ShardCoreKeyMap();
            for (LeafReaderContext ctx : reader.leaves()) {
                try {
                    map.add(ctx.reader());
                    fail();
                } catch (IllegalArgumentException expected) {
                    // ok
                }
            }
        }
    }
}
项目:elasticsearch_my    文件:NumberFieldMapper.java   
@Override
FieldStats.Double stats(IndexReader reader, String fieldName,
                        boolean isSearchable, boolean isAggregatable) throws IOException {
    FieldInfo fi = org.apache.lucene.index.MultiFields.getMergedFieldInfos(reader).fieldInfo(fieldName);
    if (fi == null) {
        return null;
    }
    long size = PointValues.size(reader, fieldName);
    if (size == 0) {
        return new FieldStats.Double(reader.maxDoc(), 0, -1, -1, isSearchable, isAggregatable);
    }
    int docCount = PointValues.getDocCount(reader, fieldName);
    byte[] min = PointValues.getMinPackedValue(reader, fieldName);
    byte[] max = PointValues.getMaxPackedValue(reader, fieldName);
    return new FieldStats.Double(reader.maxDoc(),docCount, -1L, size,
        isSearchable, isAggregatable,
        FloatPoint.decodeDimension(min, 0), FloatPoint.decodeDimension(max, 0));
}
项目:alfresco-repository    文件:IndexInfo.java   
/**
 * Manage closing and unregistering an index reader.
 * 
 * @param id String
 * @throws IOException
 */
public void closeDeltaIndexReader(String id) throws IOException
{
    if (id == null)
    {
        throw new IndexerException("\"null\" is not a valid identifier for a transaction");
    }

    // No lock required as the delta applied to one thread. The delta is
    // still active.
    IndexReader reader = indexReaders.remove(id);
    if (reader != null)
    {
        reader.close();
    }
}
项目:elasticsearch_my    文件:DateFieldMapper.java   
@Override
public FieldStats.Date stats(IndexReader reader) throws IOException {
    String field = name();
    FieldInfo fi = org.apache.lucene.index.MultiFields.getMergedFieldInfos(reader).fieldInfo(name());
    if (fi == null) {
        return null;
    }
    long size = PointValues.size(reader, field);
    if (size == 0) {
        return new FieldStats.Date(reader.maxDoc(), 0, -1, -1, isSearchable(), isAggregatable());
    }
    int docCount = PointValues.getDocCount(reader, field);
    byte[] min = PointValues.getMinPackedValue(reader, field);
    byte[] max = PointValues.getMaxPackedValue(reader, field);
    return new FieldStats.Date(reader.maxDoc(),docCount, -1L, size,
        isSearchable(), isAggregatable(),
        dateTimeFormatter(), LongPoint.decodeDimension(min, 0), LongPoint.decodeDimension(max, 0));
}
项目:elasticsearch_my    文件:GeoPointFieldMapper.java   
@Override
public FieldStats.GeoPoint stats(IndexReader reader) throws IOException {
    String field = name();
    FieldInfo fi = org.apache.lucene.index.MultiFields.getMergedFieldInfos(reader).fieldInfo(name());
    if (fi == null) {
        return null;
    }
    final long size = PointValues.size(reader, field);
    if (size == 0) {
        return new FieldStats.GeoPoint(reader.maxDoc(), -1L, -1L, -1L, isSearchable(), isAggregatable());
    }
    final int docCount = PointValues.getDocCount(reader, field);
    byte[] min = PointValues.getMinPackedValue(reader, field);
    byte[] max = PointValues.getMaxPackedValue(reader, field);
    GeoPoint minPt = new GeoPoint(GeoEncodingUtils.decodeLatitude(min, 0), GeoEncodingUtils.decodeLongitude(min, Integer.BYTES));
    GeoPoint maxPt = new GeoPoint(GeoEncodingUtils.decodeLatitude(max, 0), GeoEncodingUtils.decodeLongitude(max, Integer.BYTES));
    return new FieldStats.GeoPoint(reader.maxDoc(), docCount, -1L, size, isSearchable(), isAggregatable(),
        minPt, maxPt);
}
项目:lams    文件:SearcherManager.java   
/** Expert: creates a searcher from the provided {@link
 *  IndexReader} using the provided {@link
 *  SearcherFactory}.  NOTE: this decRefs incoming reader
 * on throwing an exception. */
public static IndexSearcher getSearcher(SearcherFactory searcherFactory, IndexReader reader) throws IOException {
  boolean success = false;
  final IndexSearcher searcher;
  try {
    searcher = searcherFactory.newSearcher(reader);
    if (searcher.getIndexReader() != reader) {
      throw new IllegalStateException("SearcherFactory must wrap exactly the provided reader (got " + searcher.getIndexReader() + " but expected " + reader + ")");
    }
    success = true;
  } finally {
    if (!success) {
      reader.decRef();
    }
  }
  return searcher;
}
项目:elasticsearch_my    文件:VectorHighlighterTests.java   
public void testVectorHighlighter() throws Exception {
    Directory dir = new RAMDirectory();
    IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));

    Document document = new Document();
    document.add(new TextField("_id", "1", Field.Store.YES));
    FieldType vectorsType = new FieldType(TextField.TYPE_STORED);
    vectorsType.setStoreTermVectors(true);
    vectorsType.setStoreTermVectorPositions(true);
    vectorsType.setStoreTermVectorOffsets(true);
    document.add(new Field("content", "the big bad dog", vectorsType));
    indexWriter.addDocument(document);

    IndexReader reader = DirectoryReader.open(indexWriter);
    IndexSearcher searcher = new IndexSearcher(reader);
    TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);

    assertThat(topDocs.totalHits, equalTo(1));

    FastVectorHighlighter highlighter = new FastVectorHighlighter();
    String fragment = highlighter.getBestFragment(highlighter.getFieldQuery(new TermQuery(new Term("content", "bad"))),
            reader, topDocs.scoreDocs[0].doc, "content", 30);
    assertThat(fragment, notNullValue());
    assertThat(fragment, equalTo("the big <b>bad</b> dog"));
}
项目:elasticsearch_my    文件:IpFieldMapper.java   
@Override
public FieldStats.Ip stats(IndexReader reader) throws IOException {
    String field = name();
    FieldInfo fi = org.apache.lucene.index.MultiFields.getMergedFieldInfos(reader).fieldInfo(name());
    if (fi == null) {
        return null;
    }
    long size = PointValues.size(reader, field);
    if (size == 0) {
        return new FieldStats.Ip(reader.maxDoc(), 0, -1, -1, isSearchable(), isAggregatable());
    }
    int docCount = PointValues.getDocCount(reader, field);
    byte[] min = PointValues.getMinPackedValue(reader, field);
    byte[] max = PointValues.getMaxPackedValue(reader, field);
    return new FieldStats.Ip(reader.maxDoc(), docCount, -1L, size,
        isSearchable(), isAggregatable(),
        InetAddressPoint.decode(min), InetAddressPoint.decode(max));
}
项目:elasticsearch_my    文件:DiversifiedSamplerTests.java   
public void testDiversifiedSampler_noDocs() throws Exception {
    Directory directory = newDirectory();
    RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
    indexWriter.close();
    IndexReader indexReader = DirectoryReader.open(directory);
    IndexSearcher indexSearcher = new IndexSearcher(indexReader);

    MappedFieldType idFieldType = new KeywordFieldMapper.KeywordFieldType();
    idFieldType.setName("id");
    idFieldType.setHasDocValues(true);

    MappedFieldType genreFieldType = new KeywordFieldMapper.KeywordFieldType();
    genreFieldType.setName("genre");
    genreFieldType.setHasDocValues(true);

    DiversifiedAggregationBuilder builder = new DiversifiedAggregationBuilder("_name")
            .field(genreFieldType.name())
            .subAggregation(new TermsAggregationBuilder("terms", null).field("id"));

    InternalSampler result = search(indexSearcher, new MatchAllDocsQuery(), builder, genreFieldType, idFieldType);
    Terms terms = result.getAggregations().get("terms");
    assertEquals(0, terms.getBuckets().size());
    indexReader.close();
    directory.close();
}
项目:elasticsearch_my    文件:FreqTermsEnum.java   
public FreqTermsEnum(IndexReader reader, String field, boolean needDocFreq, boolean needTotalTermFreq, @Nullable Query filter, BigArrays bigArrays) throws IOException {
    super(reader, field, needTotalTermFreq ? PostingsEnum.FREQS : PostingsEnum.NONE, filter);
    this.bigArrays = bigArrays;
    this.needDocFreqs = needDocFreq;
    this.needTotalTermFreqs = needTotalTermFreq;
    if (needDocFreq) {
        termDocFreqs = bigArrays.newIntArray(INITIAL_NUM_TERM_FREQS_CACHED, false);
    } else {
        termDocFreqs = null;
    }
    if (needTotalTermFreq) {
        termsTotalFreqs = bigArrays.newLongArray(INITIAL_NUM_TERM_FREQS_CACHED, false);
    } else {
        termsTotalFreqs = null;
    }
    cachedTermOrds = new BytesRefHash(INITIAL_NUM_TERM_FREQS_CACHED, bigArrays);
}
项目:elasticsearch_my    文件:AvgAggregatorTests.java   
private void testCase(Query query, CheckedConsumer<RandomIndexWriter, IOException> buildIndex, Consumer<InternalAvg> verify)
        throws IOException {
    Directory directory = newDirectory();
    RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
    buildIndex.accept(indexWriter);
    indexWriter.close();

    IndexReader indexReader = DirectoryReader.open(directory);
    IndexSearcher indexSearcher = newSearcher(indexReader, true, true);

    AvgAggregationBuilder aggregationBuilder = new AvgAggregationBuilder("_name").field("number");
    MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
    fieldType.setName("number");
    try (AvgAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType)) {
        aggregator.preCollection();
        indexSearcher.search(query, aggregator);
        aggregator.postCollection();
        verify.accept((InternalAvg) aggregator.buildAggregation(0L));
    }
    indexReader.close();
    directory.close();
}
项目:elasticsearch_my    文件:CustomUnifiedHighlighterTests.java   
public void testAllTermQuery() throws IOException {
    Directory dir = newDirectory();
    String value = "The quick brown fox.";
    Analyzer analyzer = new StandardAnalyzer();
    IndexReader ir = indexOneDoc(dir, "all", value, analyzer);
    AllTermQuery query = new AllTermQuery(new Term("all", "fox"));
    IndexSearcher searcher = newSearcher(ir);
    TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER);
    assertThat(topDocs.totalHits, equalTo(1));
    int docId = topDocs.scoreDocs[0].doc;
    CustomPassageFormatter passageFormatter = new CustomPassageFormatter("<b>", "</b>", new DefaultEncoder());
    CustomUnifiedHighlighter highlighter = new CustomUnifiedHighlighter(searcher, analyzer,
        passageFormatter, null, value, false);
    Snippet[] snippets = highlighter.highlightField("all", query, docId, 5);
    assertThat(snippets.length, equalTo(1));
    assertThat(snippets[0].getText(), equalTo("The quick brown <b>fox</b>."));
    ir.close();
    dir.close();
}
项目:wiseowl    文件:PassageRankingComponent.java   
protected float calculateWeight(Term term, IndexReader reader) throws IOException {
  //if a term is not in the index, then it's weight is 0
int docFrequency = reader.docFreq(term);
  if (docFrequency !=0) {
    log.warn("Term {} doc freq.{}",term.toString(),docFrequency);
    return 1.0f / docFrequency;
  } else {
    log.warn("Couldn't find doc freq for term {}", term);
    return 0f;
  }

}
项目:Equella    文件:ItemIndex.java   
/**
 * A simplified implementation of matrixSearch() that only works on a single
 * field, and currently only returns the count per term. It could easily be
 * extended to return a list of ItemIds per term, it simply wasn't necessary
 * when I was writing it!
 * <p>
 * This simplified implementation was written to overcome the memory
 * pressures that matrixSearch() creates when you have over half a million
 * terms for a field. MatrixSearch() creates *many* BitSets that it holds on
 * to to reuse as it recurse through a list of fields. Since we only care
 * about a single field in this implementation, we can avoid generating and
 * holding onto BitSets.
 */
public Multimap<String, Pair<String, Integer>> facetCount(@Nullable final Search searchreq,
    final Collection<String> fields)
{
    return search(new Searcher<Multimap<String, Pair<String, Integer>>>()
    {
        @Override
        public Multimap<String, Pair<String, Integer>> search(IndexSearcher searcher) throws IOException
        {
            final IndexReader reader = searcher.getIndexReader();
            final OpenBitSet filteredBits = searchRequestToBitSet(searchreq, searcher, reader);

            final Multimap<String, Pair<String, Integer>> rv = ArrayListMultimap.create();
            for( String field : fields )
            {
                for( Term term : new XPathFieldIterator(reader, field, "") )
                {
                    int count = 0;

                    TermDocs docs = reader.termDocs(term);
                    while( docs.next() )
                    {
                        if( filteredBits.get(docs.doc()) )
                        {
                            count++;
                        }
                    }
                    docs.close();

                    if( count > 0 )
                    {
                        rv.put(field, new Pair<String, Integer>(term.text(), count));
                    }
                }
            }
            return rv;
        }
    });
}
项目:incubator-netbeans    文件:Queries.java   
@Override
public Query rewrite(IndexReader reader) throws IOException {
    final Query result =  super.rewrite(reader);
    if (this.collector != null) {
        attach(this,this.collector);
    }
    return result;
}
项目:elasticsearch_my    文件:RangeQueryRewriteTests.java   
public void testRewriteMissingField() throws Exception {
    IndexService indexService = createIndex("test");
    IndexReader reader = new MultiReader();
    QueryRewriteContext context = new QueryShardContext(0, indexService.getIndexSettings(), null, null, indexService.mapperService(),
            null, null, xContentRegistry(), null, reader, null);
    RangeQueryBuilder range = new RangeQueryBuilder("foo");
    assertEquals(Relation.DISJOINT, range.getRelation(context));
}
项目:NGB-master    文件:FeatureIndexDao.java   
private MultiReader openMultiReader(SimpleFSDirectory[] indexes) throws IOException {
    IndexReader[] readers = new IndexReader[indexes.length];
    for (int i = 0; i < indexes.length; i++) {
        readers[i] = DirectoryReader.open(indexes[i]);
    }

    return new MultiReader(readers, true);
}
项目:lams    文件:FilteredQuery.java   
/** Rewrites the query. If the wrapped is an instance of
 * {@link MatchAllDocsQuery} it returns a {@link ConstantScoreQuery}. Otherwise
 * it returns a new {@code FilteredQuery} wrapping the rewritten query. */
@Override
public Query rewrite(IndexReader reader) throws IOException {
  final Query queryRewritten = query.rewrite(reader);

  if (queryRewritten != query) {
    // rewrite to a new FilteredQuery wrapping the rewritten query
    final Query rewritten = new FilteredQuery(queryRewritten, filter, strategy);
    rewritten.setBoost(this.getBoost());
    return rewritten;
  } else {
    // nothing to rewrite, we are done!
    return this;
  }
}
项目:Elasticsearch    文件:SignificantLongTermsAggregator.java   
@Override
public SignificantLongTerms buildEmptyAggregation() {
    // We need to account for the significance of a miss in our global stats - provide corpus size as context
    ContextIndexSearcher searcher = context.searchContext().searcher();
    IndexReader topReader = searcher.getIndexReader();
    int supersetSize = topReader.numDocs();
    return new SignificantLongTerms(0, supersetSize, name, formatter, bucketCountThresholds.getRequiredSize(),
            bucketCountThresholds.getMinDocCount(), termsAggFactory.getSignificanceHeuristic(),
            Collections.<InternalSignificantTerms.Bucket> emptyList(), pipelineAggregators(), metaData());
}
项目:elasticsearch_my    文件:PercolatorFieldMapper.java   
Query createCandidateQuery(IndexReader indexReader) throws IOException {
    List<BytesRef> extractedTerms = new ArrayList<>();
    LeafReader reader = indexReader.leaves().get(0).reader();
    Fields fields = reader.fields();
    for (String field : fields) {
        Terms terms = fields.terms(field);
        if (terms == null) {
            continue;
        }

        BytesRef fieldBr = new BytesRef(field);
        TermsEnum tenum = terms.iterator();
        for (BytesRef term = tenum.next(); term != null; term = tenum.next()) {
            BytesRefBuilder builder = new BytesRefBuilder();
            builder.append(fieldBr);
            builder.append(FIELD_VALUE_SEPARATOR);
            builder.append(term);
            extractedTerms.add(builder.toBytesRef());
        }
    }
    Query extractionSuccess = new TermInSetQuery(queryTermsField.name(), extractedTerms);
    // include extractionResultField:failed, because docs with this term have no extractedTermsField
    // and otherwise we would fail to return these docs. Docs that failed query term extraction
    // always need to be verified by MemoryIndex:
    Query extractionFailure = new TermQuery(new Term(extractionResultField.name(), EXTRACTION_FAILED));

    return new BooleanQuery.Builder()
            .add(extractionSuccess, Occur.SHOULD)
            .add(extractionFailure, Occur.SHOULD)
            .build();
}
项目:Elasticsearch    文件:IndicesRequestCache.java   
@Override
public void onClose(IndexReader reader) {
    Boolean remove = registeredClosedListeners.remove(this);
    if (remove != null) {
        keysToClean.add(this);
    }
}
项目:Elasticsearch    文件:GlobalOrdinalsSignificantTermsAggregator.java   
@Override
public SignificantStringTerms buildEmptyAggregation() {
    // We need to account for the significance of a miss in our global stats - provide corpus size as context
    ContextIndexSearcher searcher = context.searchContext().searcher();
    IndexReader topReader = searcher.getIndexReader();
    int supersetSize = topReader.numDocs();
    return new SignificantStringTerms(0, supersetSize, name, bucketCountThresholds.getRequiredSize(),
            bucketCountThresholds.getMinDocCount(), termsAggFactory.getSignificanceHeuristic(),
            Collections.<InternalSignificantTerms.Bucket> emptyList(), pipelineAggregators(), metaData());
}
项目:lams    文件:PhraseQuery.java   
@Override
public Query rewrite(IndexReader reader) throws IOException {
  if (terms.isEmpty()) {
    BooleanQuery bq = new BooleanQuery();
    bq.setBoost(getBoost());
    return bq;
  } else if (terms.size() == 1) {
    TermQuery tq = new TermQuery(terms.get(0));
    tq.setBoost(getBoost());
    return tq;
  } else
    return super.rewrite(reader);
}
项目:elasticsearch_my    文件:CompletionFieldStats.java   
/**
 * Returns total in-heap bytes used by all suggesters.  This method has CPU cost <code>O(numIndexedFields)</code>.
 *
 * @param fieldNamePatterns if non-null, any completion field name matching any of these patterns will break out its in-heap bytes
 * separately in the returned {@link CompletionStats}
 */
public static CompletionStats completionStats(IndexReader indexReader, String ... fieldNamePatterns) {
    long sizeInBytes = 0;
    ObjectLongHashMap<String> completionFields = null;
    if (fieldNamePatterns != null  && fieldNamePatterns.length > 0) {
        completionFields = new ObjectLongHashMap<>(fieldNamePatterns.length);
    }
    for (LeafReaderContext atomicReaderContext : indexReader.leaves()) {
        LeafReader atomicReader = atomicReaderContext.reader();
        try {
            Fields fields = atomicReader.fields();
            for (String fieldName : fields) {
                Terms terms = fields.terms(fieldName);
                if (terms instanceof CompletionTerms) {
                    // TODO: currently we load up the suggester for reporting its size
                    long fstSize = ((CompletionTerms) terms).suggester().ramBytesUsed();
                    if (fieldNamePatterns != null && fieldNamePatterns.length > 0 && Regex.simpleMatch(fieldNamePatterns, fieldName)) {
                        completionFields.addTo(fieldName, fstSize);
                    }
                    sizeInBytes += fstSize;
                }
            }
        } catch (IOException ioe) {
            throw new ElasticsearchException(ioe);
        }
    }
    return new CompletionStats(sizeInBytes, completionFields == null ? null : new FieldMemoryStats(completionFields));
}
项目:sjk    文件:QuickTipsServiceImpl.java   
private void resetQuickTips() throws IOException {
    String indexDir = appConfig.getOldAllQuickTipsIndex();
    Directory directory = FSDirectory.open(new File(indexDir));
    if (!IndexReader.indexExists(directory)) {
        logger.info("Please reset index firstly!");
        return;
    }
    Directory ram = new RAMDirectory(directory);
    this.quickTipsIndexReader = IndexReader.open(ram);
    logger.info("IndexReader has numDos: {}", this.quickTipsIndexReader.numDocs());

    IndexSearcher preIndexSearcher = this.quickTipsSearcher;
    this.quickTipsSearcher = new IndexSearcher(quickTipsIndexReader);
    IOUtils.closeQuietly(preIndexSearcher);
}
项目:NGB-master    文件:FeatureIndexDao.java   
/**
 * Queries a feature index of a project, specified by ID
 *
 * @param projectId ID of a project, which index to work with
 * @param query a query to search in index
 * @return a {List} of {@code FeatureIndexEntry} objects that satisfy index query
 * @deprecated
 * @throws IOException
 */
@Deprecated
private IndexSearchResult searchLuceneIndexForProject(final long projectId, Query query,
                                         List<String> vcfInfoFields, Integer maxResultsCount, Sort sort) throws
        IOException {
    Map<Integer, FeatureIndexEntry> entryMap = new LinkedHashMap<>();

    int totalHits = 0;
    try (
        Directory index = fileManager.getIndexForProject(projectId);
        IndexReader reader = DirectoryReader.open(index)
    ) {
        if (reader.numDocs() == 0) {
            return new IndexSearchResult(Collections.emptyList(), false, 0);
        }

        IndexSearcher searcher = new IndexSearcher(reader);
        final TopDocs docs;
        int resultsCount = maxResultsCount == null ? reader.numDocs() : maxResultsCount;
        if (sort == null) {
            docs = searcher.search(query, resultsCount);
        } else {
            docs = searcher.search(query, resultsCount, sort);
        }

        totalHits = docs.totalHits;
        final ScoreDoc[] hits = docs.scoreDocs;

        Map<Long, BookmarkIndexEntry> foundBookmarkEntries = new HashMap<>(); // for batch bookmarks loading
        createIndexEntries(hits, entryMap, foundBookmarkEntries, searcher, vcfInfoFields);
        setBookmarks(foundBookmarkEntries);
    } catch (IOException e) {
        LOGGER.error(MessageHelper.getMessage(MessagesConstants.ERROR_FEATURE_INDEX_SEARCH_FAILED), e);
        return new IndexSearchResult(Collections.emptyList(), false, 0);
    }

    return new IndexSearchResult(new ArrayList<>(entryMap.values()), maxResultsCount != null &&
                                                                     totalHits > maxResultsCount, totalHits);
}