Java 类org.apache.lucene.search.IndexSearcher 实例源码

项目:RedisDirectory    文件:TestLucene.java   
public void testMMapDirectory() throws IOException {
    long start = System.currentTimeMillis();
    IndexWriterConfig indexWriterConfig = new IndexWriterConfig(new WhitespaceAnalyzer()).setOpenMode(IndexWriterConfig
            .OpenMode.CREATE);
    FSDirectory open = FSDirectory.open(Paths.get("E:/testlucene"));
    IndexWriter indexWriter = new IndexWriter(open, indexWriterConfig);
    for (int i = 0; i < 10000000; i++) {
        indexWriter.addDocument(addDocument(i));
    }
    indexWriter.commit();
    indexWriter.close();
    long end = System.currentTimeMillis();
    log.error("MMapDirectory consumes {}s!", (end - start) / 1000);
    start = System.currentTimeMillis();
    IndexSearcher indexSearcher = new IndexSearcher(DirectoryReader.open(open));
    int total = 0;
    for (int i = 0; i < 10000000; i++) {
        TermQuery key1 = new TermQuery(new Term("key1", "key" + i));
        TopDocs search = indexSearcher.search(key1, 10);
        total += search.totalHits;
    }
    System.out.println(total);
    end = System.currentTimeMillis();
    log.error("MMapDirectory search consumes {}ms!", (end - start));
}
项目:elasticsearch_my    文件:CandidateQueryTests.java   
private void duelRun(PercolateQuery.QueryStore queryStore, MemoryIndex memoryIndex, IndexSearcher shardSearcher) throws IOException {
    boolean requireScore = randomBoolean();
    IndexSearcher percolateSearcher = memoryIndex.createSearcher();
    Query percolateQuery = fieldType.percolateQuery("type", queryStore, new BytesArray("{}"), percolateSearcher);
    Query query = requireScore ? percolateQuery : new ConstantScoreQuery(percolateQuery);
    TopDocs topDocs = shardSearcher.search(query, 10);

    Query controlQuery = new ControlQuery(memoryIndex, queryStore);
    controlQuery = requireScore ? controlQuery : new ConstantScoreQuery(controlQuery);
    TopDocs controlTopDocs = shardSearcher.search(controlQuery, 10);
    assertThat(topDocs.totalHits, equalTo(controlTopDocs.totalHits));
    assertThat(topDocs.scoreDocs.length, equalTo(controlTopDocs.scoreDocs.length));
    for (int j = 0; j < topDocs.scoreDocs.length; j++) {
        assertThat(topDocs.scoreDocs[j].doc, equalTo(controlTopDocs.scoreDocs[j].doc));
        assertThat(topDocs.scoreDocs[j].score, equalTo(controlTopDocs.scoreDocs[j].score));
        if (requireScore) {
            Explanation explain1 = shardSearcher.explain(query, topDocs.scoreDocs[j].doc);
            Explanation explain2 = shardSearcher.explain(controlQuery, controlTopDocs.scoreDocs[j].doc);
            assertThat(explain1.isMatch(), equalTo(explain2.isMatch()));
            assertThat(explain1.getValue(), equalTo(explain2.getValue()));
        }
    }
}
项目:elasticsearch_my    文件:VectorHighlighterTests.java   
public void testVectorHighlighterNoStore() throws Exception {
    Directory dir = new RAMDirectory();
    IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));

    Document document = new Document();
    document.add(new TextField("_id", "1", Field.Store.YES));
    FieldType vectorsType = new FieldType(TextField.TYPE_NOT_STORED);
    vectorsType.setStoreTermVectors(true);
    vectorsType.setStoreTermVectorPositions(true);
    vectorsType.setStoreTermVectorOffsets(true);
    document.add(new Field("content", "the big bad dog", vectorsType));
    indexWriter.addDocument(document);

    IndexReader reader = DirectoryReader.open(indexWriter);
    IndexSearcher searcher = new IndexSearcher(reader);
    TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);

    assertThat(topDocs.totalHits, equalTo(1));

    FastVectorHighlighter highlighter = new FastVectorHighlighter();
    String fragment = highlighter.getBestFragment(highlighter.getFieldQuery(new TermQuery(new Term("content", "bad"))),
            reader, topDocs.scoreDocs[0].doc, "content", 30);
    assertThat(fragment, nullValue());
}
项目:as-full-text-search-server    文件:LuceneBasicFlowExampleTestCase.java   
/**
 * Search all books of a given author. 
 * 
 * @throws Exception never, otherwise the test fails.
 */
@Test
public void findByAuthorSurname() throws Exception {
    IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(directory));

    Query query = new QueryParser("author", new StandardAnalyzer()).parse("Gazzarini");
    TopDocs matches = searcher.search(query, 10);

    assertEquals(1, matches.totalHits);

    final String id = Arrays.stream(matches.scoreDocs)
        .map(scoreDoc -> luceneDoc(scoreDoc.doc, searcher))
        .map(doc -> doc.get("id"))
        .findFirst()
        .get();

    assertEquals("1", id);
}
项目:InComb    文件:IndexingThreadTest.java   
private void checkIndexContent(final String elementId,
        final String fieldContent, final int expectedAmount) throws IOException {
    final IndexReader reader = IndexManager.getInstance().getIndex().getIndexReader();
    final IndexSearcher searcher = new IndexSearcher(reader);
    final TopDocs topDocs = searcher.search(new TermQuery(new Term(FIELDNAME, fieldContent)), expectedAmount + 10);

    assertNotNull(topDocs);
    assertTrue(topDocs.totalHits == expectedAmount);

    if(expectedAmount > 0) {
        final ScoreDoc scoreDoc = topDocs.scoreDocs[0];
        assertNotNull(scoreDoc);

        final Document doc = reader.document(scoreDoc.doc);
        assertNotNull(doc);
        assertEquals(fieldContent, doc.get(FIELDNAME));
        assertEquals(elementId, doc.get(IIndexElement.FIELD_ID));
        assertEquals(INDEX_TYPE, doc.get(IIndexElement.FIELD_INDEX_TYPE));
    }
}
项目:elasticsearch_my    文件:SimpleLuceneTests.java   
public void testSimpleNumericOps() throws Exception {
    Directory dir = new RAMDirectory();
    IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));

    Document document = new Document();
    document.add(new TextField("_id", "1", Field.Store.YES));
    document.add(new LegacyIntField("test", 2, LegacyIntField.TYPE_STORED));
    indexWriter.addDocument(document);

    IndexReader reader = DirectoryReader.open(indexWriter);
    IndexSearcher searcher = new IndexSearcher(reader);
    TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);
    Document doc = searcher.doc(topDocs.scoreDocs[0].doc);
    IndexableField f = doc.getField("test");
    assertThat(f.stringValue(), equalTo("2"));

    BytesRefBuilder bytes = new BytesRefBuilder();
    LegacyNumericUtils.intToPrefixCoded(2, 0, bytes);
    topDocs = searcher.search(new TermQuery(new Term("test", bytes.get())), 1);
    doc = searcher.doc(topDocs.scoreDocs[0].doc);
    f = doc.getField("test");
    assertThat(f.stringValue(), equalTo("2"));

    indexWriter.close();
}
项目:elasticsearch_my    文件:PercolateQueryBuilder.java   
static IndexSearcher createMultiDocumentSearcher(Analyzer analyzer, ParsedDocument doc) {
    RAMDirectory ramDirectory = new RAMDirectory();
    try (IndexWriter indexWriter = new IndexWriter(ramDirectory, new IndexWriterConfig(analyzer))) {
        indexWriter.addDocuments(doc.docs());
        indexWriter.commit();
        DirectoryReader directoryReader = DirectoryReader.open(ramDirectory);
        assert directoryReader.leaves().size() == 1 : "Expected single leaf, but got [" + directoryReader.leaves().size() + "]";
        final IndexSearcher slowSearcher = new IndexSearcher(directoryReader) {

            @Override
            public Weight createNormalizedWeight(Query query, boolean needsScores) throws IOException {
                BooleanQuery.Builder bq = new BooleanQuery.Builder();
                bq.add(query, BooleanClause.Occur.MUST);
                bq.add(Queries.newNestedFilter(), BooleanClause.Occur.MUST_NOT);
                return super.createNormalizedWeight(bq.build(), needsScores);
            }

        };
        slowSearcher.setQueryCache(null);
        return slowSearcher;
    } catch (IOException e) {
        throw new ElasticsearchException("Failed to create index for percolator with nested document ", e);
    }
}
项目:sdudoc    文件:LuceneIndexSearch.java   
/**
     * 初始化indexSearch对象的方法
     * @throws Exception
     */
    public void createIndexSearch(){
        try{
            IndexReader indexReader = DirectoryReader.open(this.indexSettings.directory);
            this.indexSearcher = new IndexSearcher(indexReader);
            //输出现在的索引
//          for(int i =0; i<indexReader.numDocs();i++){
//              System.out.println(indexReader.document(i));
//              System.out.println("文件名称:"+indexReader.document(i).get("fileName")+"\t文件描述:"+indexReader.document(i).get("fileDesc")+"\t文件ID:"+indexReader.document(i).get("fileId")+"\t创建者:"+indexReader.document(i).get("fileCreator"));
//          }
//          System.out.println("索引版本:" + indexReader.getCoreCacheKey());
//          System.out.println("索引内文档数量:"+indexReader.numDocs());
        }catch(Exception e){
            e.printStackTrace();
        }
    }
项目:sjk    文件:SearchService1Impl.java   
@PostConstruct
public void reset() {
    final String dirPath = appConfig.getAllIndexDir();
    try {
        Directory directory = FSDirectory.open(new File(dirPath));
        if (!IndexReader.indexExists(directory)) {
            logger.error("Please reset index firstly! The path: {}", dirPath);
            return;
        }
        Directory ram = new RAMDirectory(directory);
        if (this.indexReader == null) {
            reopenIndexSearcher(ram);
        } else {
            // changed
            logger.info("Search's new indexReader!");
            final IndexReader preIndexReader = this.indexReader;
            final IndexSearcher preIndexSearcher = this.indexSearcher;
            reopenIndexSearcher(ram);
            IOUtils.closeQuietly(preIndexSearcher, preIndexReader);
            logger.info("Reload search index!");
        }
        logger.info("Search's IndexReader has numDos: {}", this.indexReader.numDocs());
    } catch (Exception e) {
        logger.error("Exception", e);
    }
}
项目:JLink    文件:LuceneRetrieval.java   
private void synTokenQuery(String search, final int numbOfResults, final double minLuceneScore,
        Map<String, Float> result, IndexSearcher searcher) throws ParseException, IOException {

    QueryParser parser = new QueryParser(Version.LUCENE_46, "surfaceFormTokens",
            new StandardAnalyzer(Version.LUCENE_46));

    search = QueryParser.escape(search);

    Query q = parser.parse(search);
    /*
     * Works only in String field!!
     */
    // Query q = new FuzzyQuery(new Term("surfaceFormTokens",
    // QueryParser.escape(search)), 2);

    TopDocs top = searcher.search(q, numbOfResults);

    for (ScoreDoc doc : top.scoreDocs) {
        if (doc.score >= minLuceneScore) {
            final String key = searcher.doc(doc.doc).get("conceptID");
            if (result.getOrDefault(key, 0f) < doc.score) {
                result.put(key, doc.score);
            }
        }
    }
}
项目:elasticsearch_my    文件:FloatNestedSortingTests.java   
protected void assertAvgScoreMode(Query parentFilter, IndexSearcher searcher, IndexFieldData.XFieldComparatorSource innerFieldComparator) throws IOException {
    MultiValueMode sortMode = MultiValueMode.AVG;
    Query childFilter = Queries.not(parentFilter);
    XFieldComparatorSource nestedComparatorSource = createFieldComparator("field2", sortMode, -127, createNested(searcher, parentFilter, childFilter));
    Query query = new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None);
    Sort sort = new Sort(new SortField("field2", nestedComparatorSource));
    TopDocs topDocs = searcher.search(query, 5, sort);
    assertThat(topDocs.totalHits, equalTo(7));
    assertThat(topDocs.scoreDocs.length, equalTo(5));
    assertThat(topDocs.scoreDocs[0].doc, equalTo(11));
    assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(2));
    assertThat(topDocs.scoreDocs[1].doc, equalTo(7));
    assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(2));
    assertThat(topDocs.scoreDocs[2].doc, equalTo(3));
    assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(3));
    assertThat(topDocs.scoreDocs[3].doc, equalTo(15));
    assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(3));
    assertThat(topDocs.scoreDocs[4].doc, equalTo(19));
    assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(3));
}
项目:elasticsearch_my    文件:VectorHighlighterTests.java   
public void testVectorHighlighterNoTermVector() throws Exception {
    Directory dir = new RAMDirectory();
    IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));

    Document document = new Document();
    document.add(new TextField("_id", "1", Field.Store.YES));
    document.add(new TextField("content", "the big bad dog", Field.Store.YES));
    indexWriter.addDocument(document);

    IndexReader reader = DirectoryReader.open(indexWriter);
    IndexSearcher searcher = new IndexSearcher(reader);
    TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);

    assertThat(topDocs.totalHits, equalTo(1));

    FastVectorHighlighter highlighter = new FastVectorHighlighter();
    String fragment = highlighter.getBestFragment(highlighter.getFieldQuery(new TermQuery(new Term("content", "bad"))),
            reader, topDocs.scoreDocs[0].doc, "content", 30);
    assertThat(fragment, nullValue());
}
项目:elasticsearch_my    文件:TermSuggester.java   
@Override
public TermSuggestion innerExecute(String name, TermSuggestionContext suggestion, IndexSearcher searcher, CharsRefBuilder spare)
        throws IOException {
    DirectSpellChecker directSpellChecker = suggestion.getDirectSpellCheckerSettings().createDirectSpellChecker();
    final IndexReader indexReader = searcher.getIndexReader();
    TermSuggestion response = new TermSuggestion(
            name, suggestion.getSize(), suggestion.getDirectSpellCheckerSettings().sort()
    );
    List<Token> tokens = queryTerms(suggestion, spare);
    for (Token token : tokens) {
        // TODO: Extend DirectSpellChecker in 4.1, to get the raw suggested words as BytesRef
        SuggestWord[] suggestedWords = directSpellChecker.suggestSimilar(
                token.term, suggestion.getShardSize(), indexReader, suggestion.getDirectSpellCheckerSettings().suggestMode()
        );
        Text key = new Text(new BytesArray(token.term.bytes()));
        TermSuggestion.Entry resultEntry = new TermSuggestion.Entry(key, token.startOffset, token.endOffset - token.startOffset);
        for (SuggestWord suggestWord : suggestedWords) {
            Text word = new Text(suggestWord.string);
            resultEntry.addOption(new TermSuggestion.Entry.Option(word, suggestWord.freq, suggestWord.score));
        }
        response.addTerm(resultEntry);
    }
    return response;
}
项目:elasticsearch_my    文件:DocValuesSliceQuery.java   
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
    return new RandomAccessWeight(this) {
        @Override
        protected Bits getMatchingDocs(final LeafReaderContext context) throws IOException {
            final SortedNumericDocValues values = DocValues.getSortedNumeric(context.reader(), getField());
            return new Bits() {
                @Override
                public boolean get(int doc) {
                    values.setDocument(doc);
                    for (int i = 0; i < values.count(); i++) {
                        return contains(BitMixer.mix(values.valueAt(i)));
                    }
                    return contains(0);
                }

                @Override
                public int length() {
                    return context.reader().maxDoc();
                }
            };
        }
    };
}
项目:elasticsearch_my    文件:SimpleAllTests.java   
public void testNoTokens() throws Exception {
    Directory dir = new RAMDirectory();
    IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.KEYWORD_ANALYZER));

    FieldType allFt = getAllFieldType();
    Document doc = new Document();
    doc.add(new Field("_id", "1", StoredField.TYPE));
    doc.add(new AllField("_all", "", 2.0f, allFt));
    indexWriter.addDocument(doc);

    IndexReader reader = DirectoryReader.open(indexWriter);
    IndexSearcher searcher = new IndexSearcher(reader);

    TopDocs docs = searcher.search(new MatchAllDocsQuery(), 10);
    assertThat(docs.totalHits, equalTo(1));
    assertThat(docs.scoreDocs[0].doc, equalTo(0));
}
项目:elasticsearch_my    文件:Engine.java   
/**
 * Read the last segments info from the commit pointed to by the searcher manager
 */
protected static SegmentInfos readLastCommittedSegmentInfos(final SearcherManager sm, final Store store) throws IOException {
    IndexSearcher searcher = sm.acquire();
    try {
        IndexCommit latestCommit = ((DirectoryReader) searcher.getIndexReader()).getIndexCommit();
        return Lucene.readSegmentInfos(latestCommit);
    } catch (IOException e) {
        // Fall back to reading from the store if reading from the commit fails
        try {
            return store.readLastCommittedSegmentsInfo();
        } catch (IOException e2) {
            e2.addSuppressed(e);
            throw e2;
        }
    } finally {
        sm.release(searcher);
    }
}
项目:elasticsearch_my    文件:DoubleNestedSortingTests.java   
@Override
protected void assertAvgScoreMode(Query parentFilter, IndexSearcher searcher) throws IOException {
    MultiValueMode sortMode = MultiValueMode.AVG;
    Query childFilter = Queries.not(parentFilter);
    XFieldComparatorSource nestedComparatorSource = createFieldComparator("field2", sortMode, -127, createNested(searcher, parentFilter, childFilter));
    Query query = new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None);
    Sort sort = new Sort(new SortField("field2", nestedComparatorSource));
    TopDocs topDocs = searcher.search(query, 5, sort);
    assertThat(topDocs.totalHits, equalTo(7));
    assertThat(topDocs.scoreDocs.length, equalTo(5));
    assertThat(topDocs.scoreDocs[0].doc, equalTo(11));
    assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(2));
    assertThat(topDocs.scoreDocs[1].doc, equalTo(7));
    assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(2));
    assertThat(topDocs.scoreDocs[2].doc, equalTo(3));
    assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(3));
    assertThat(topDocs.scoreDocs[3].doc, equalTo(15));
    assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(3));
    assertThat(topDocs.scoreDocs[4].doc, equalTo(19));
    assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(3));
}
项目:sjk    文件:SearchServiceImpl.java   
@Override
public void resetClassFields() throws IOException {
    Directory directory = FSDirectory.open(new File(appConfig.getOldAllIndexDir()));
    if (!IndexReader.indexExists(directory)) {
        logger.error("Please reset index firstly!");
        return;
    }
    Directory ram = new RAMDirectory(directory);
    IndexReader preIndexReader = this.indexReader;
    this.indexReader = IndexReader.open(ram);
    logger.info("IndexReader has numDos: {}", this.indexReader.numDocs());

    IndexSearcher preIndexSearcher = this.indexSearcher;
    this.indexSearcher = new IndexSearcher(indexReader);
    IOUtils.closeQuietly(preIndexSearcher, preIndexReader);
}
项目:elasticsearch_my    文件:QueryPhaseTests.java   
private void countTestCase(Query query, IndexReader reader, boolean shouldCollect) throws Exception {
    TestSearchContext context = new TestSearchContext(null);
    context.parsedQuery(new ParsedQuery(query));
    context.setSize(0);
    context.setTask(new SearchTask(123L, "", "", "", null));

    IndexSearcher searcher = new IndexSearcher(reader);
    final AtomicBoolean collected = new AtomicBoolean();
    IndexSearcher contextSearcher = new IndexSearcher(reader) {
        protected void search(List<LeafReaderContext> leaves, Weight weight, Collector collector) throws IOException {
            collected.set(true);
            super.search(leaves, weight, collector);
        }
    };

    final boolean rescore = QueryPhase.execute(context, contextSearcher);
    assertFalse(rescore);
    assertEquals(searcher.count(query), context.queryResult().topDocs().totalHits);
    assertEquals(shouldCollect, collected.get());
}
项目:elasticsearch_my    文件:MinDocQueryTests.java   
public void testRandom() throws IOException {
    final int numDocs = randomIntBetween(10, 200);
    final Document doc = new Document();
    final Directory dir = newDirectory();
    final RandomIndexWriter w = new RandomIndexWriter(random(), dir);
    for (int i = 0; i < numDocs; ++i) {
        w.addDocument(doc);
    }
    final IndexReader reader = w.getReader();
    final IndexSearcher searcher = newSearcher(reader);
    for (int i = 0; i <= numDocs; ++i) {
        assertEquals(numDocs - i, searcher.count(new MinDocQuery(i)));
    }
    w.close();
    reader.close();
    dir.close();
}
项目:dswork    文件:SimpleSortedSetFacetsExample.java   
/** User runs a query and counts facets. */
private List<FacetResult> search() throws IOException {
  DirectoryReader indexReader = DirectoryReader.open(indexDir);
  IndexSearcher searcher = new IndexSearcher(indexReader);
  SortedSetDocValuesReaderState state = new DefaultSortedSetDocValuesReaderState(indexReader);

  // Aggregatses the facet counts
  FacetsCollector fc = new FacetsCollector();

  // MatchAllDocsQuery is for "browsing" (counts facets
  // for all non-deleted docs in the index); normally
  // you'd use a "normal" query:
  FacetsCollector.search(searcher, new MatchAllDocsQuery(), 10, fc);

  // Retrieve results
  Facets facets = new SortedSetDocValuesFacetCounts(state, fc);

  List<FacetResult> results = new ArrayList<FacetResult>();
  results.add(facets.getTopChildren(10, "Author"));
  results.add(facets.getTopChildren(10, "Publish Year"));
  indexReader.close();

  return results;
}
项目:elasticsearch_my    文件:CustomUnifiedHighlighterTests.java   
public void testAllTermQuery() throws IOException {
    Directory dir = newDirectory();
    String value = "The quick brown fox.";
    Analyzer analyzer = new StandardAnalyzer();
    IndexReader ir = indexOneDoc(dir, "all", value, analyzer);
    AllTermQuery query = new AllTermQuery(new Term("all", "fox"));
    IndexSearcher searcher = newSearcher(ir);
    TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER);
    assertThat(topDocs.totalHits, equalTo(1));
    int docId = topDocs.scoreDocs[0].doc;
    CustomPassageFormatter passageFormatter = new CustomPassageFormatter("<b>", "</b>", new DefaultEncoder());
    CustomUnifiedHighlighter highlighter = new CustomUnifiedHighlighter(searcher, analyzer,
        passageFormatter, null, value, false);
    Snippet[] snippets = highlighter.highlightField("all", query, docId, 5);
    assertThat(snippets.length, equalTo(1));
    assertThat(snippets[0].getText(), equalTo("The quick brown <b>fox</b>."));
    ir.close();
    dir.close();
}
项目:elasticsearch_my    文件:CustomUnifiedHighlighterTests.java   
public void testCommonTermsQuery() throws IOException {
    Directory dir = newDirectory();
    String value = "The quick brown fox.";
    Analyzer analyzer = new StandardAnalyzer();
    IndexReader ir = indexOneDoc(dir, "text", value, analyzer);
    CommonTermsQuery query = new CommonTermsQuery(BooleanClause.Occur.SHOULD, BooleanClause.Occur.SHOULD, 128);
    query.add(new Term("text", "quick"));
    query.add(new Term("text", "brown"));
    query.add(new Term("text", "fox"));
    IndexSearcher searcher = newSearcher(ir);
    TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER);
    assertThat(topDocs.totalHits, equalTo(1));
    int docId = topDocs.scoreDocs[0].doc;
    CustomPassageFormatter passageFormatter = new CustomPassageFormatter("<b>", "</b>", new DefaultEncoder());
    CustomUnifiedHighlighter highlighter = new CustomUnifiedHighlighter(searcher, analyzer,
        passageFormatter, null, value, false);
    Snippet[] snippets = highlighter.highlightField("text", query, docId, 5);
    assertThat(snippets.length, equalTo(1));
    assertThat(snippets[0].getText(), equalTo("The <b>quick</b> <b>brown</b> <b>fox</b>."));
    ir.close();
    dir.close();
}
项目:elasticsearch_my    文件:ExtendedStatsAggregatorTests.java   
public void testCase(MappedFieldType ft,
                     CheckedConsumer<RandomIndexWriter, IOException> buildIndex,
                     Consumer<InternalExtendedStats> verify) throws IOException {
    try (Directory directory = newDirectory();
         RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) {
        buildIndex.accept(indexWriter);
        try (IndexReader reader = indexWriter.getReader()) {
            IndexSearcher searcher = new IndexSearcher(reader);
            ExtendedStatsAggregationBuilder aggBuilder = new ExtendedStatsAggregationBuilder("my_agg")
                .field("field")
                .sigma(randomDoubleBetween(0, 10, true));
            InternalExtendedStats stats = search(searcher, new MatchAllDocsQuery(), aggBuilder, ft);
            verify.accept(stats);
        }
    }
}
项目:elasticsearch_my    文件:AggregatorTestCase.java   
protected <A extends Aggregator, B extends AggregationBuilder> A createAggregator(B aggregationBuilder,
                                                                                  IndexSearcher indexSearcher,
                                                                                  MappedFieldType... fieldTypes) throws IOException {
    IndexSettings indexSettings = createIndexSettings();
    SearchContext searchContext = createSearchContext(indexSearcher, indexSettings);
    CircuitBreakerService circuitBreakerService = new NoneCircuitBreakerService();
    when(searchContext.bigArrays()).thenReturn(new MockBigArrays(Settings.EMPTY, circuitBreakerService));
    // TODO: now just needed for top_hits, this will need to be revised for other agg unit tests:
    MapperService mapperService = mapperServiceMock();
    when(mapperService.hasNested()).thenReturn(false);
    when(searchContext.mapperService()).thenReturn(mapperService);
    IndexFieldDataService ifds = new IndexFieldDataService(indexSettings,
            new IndicesFieldDataCache(Settings.EMPTY, new IndexFieldDataCache.Listener() {
            }), circuitBreakerService, mapperService);
    when(searchContext.fieldData()).thenReturn(ifds);

    SearchLookup searchLookup = new SearchLookup(mapperService, ifds, new String[]{"type"});
    when(searchContext.lookup()).thenReturn(searchLookup);

    QueryShardContext queryShardContext = queryShardContextMock(fieldTypes, indexSettings, circuitBreakerService);
    when(searchContext.getQueryShardContext()).thenReturn(queryShardContext);

    @SuppressWarnings("unchecked")
    A aggregator = (A) aggregationBuilder.build(searchContext, null).create(null, true);
    return aggregator;
}
项目:elasticsearch_my    文件:MaxAggregatorTests.java   
private void testCase(Query query, CheckedConsumer<RandomIndexWriter, IOException> buildIndex, Consumer<InternalMax> verify)
        throws IOException {
    Directory directory = newDirectory();
    RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
    buildIndex.accept(indexWriter);
    indexWriter.close();

    IndexReader indexReader = DirectoryReader.open(directory);
    IndexSearcher indexSearcher = newSearcher(indexReader, true, true);

    MaxAggregationBuilder aggregationBuilder = new MaxAggregationBuilder("_name").field("number");
    MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
    fieldType.setName("number");
    try (MaxAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType)) {
        aggregator.preCollection();
        indexSearcher.search(query, aggregator);
        aggregator.postCollection();
        verify.accept((InternalMax) aggregator.buildAggregation(0L));
    }
    indexReader.close();
    directory.close();
}
项目:freemoz    文件:Searcher.java   
public SearchResult search(String index, String queryString, int page) {
    SearchResult searchResult = null;

    try {
        IndexReader reader = DirectoryReader.open(FSDirectory.open(Paths.get(Properties.getProperties().getProperty(Values.INDEX_LOCATION, Values.DEFAULT_INDEX_LOCATION))));
        IndexSearcher searcher = new IndexSearcher(reader);
        Analyzer analyzer = new StandardAnalyzer();

        // Search over the titles only for the moment
        QueryParser parser = new QueryParser(index, analyzer);
        Query query = parser.parse(queryString);

        searchResult = this.doPagingSearch(reader, searcher, query, queryString, page);
        reader.close();
    }
    catch(Exception ex) {}

    return searchResult;
}
项目:elasticsearch_my    文件:MinAggregatorTests.java   
public void testMinAggregator_noDocs() throws Exception {
    Directory directory = newDirectory();
    RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
    indexWriter.close();

    IndexReader indexReader = DirectoryReader.open(directory);
    IndexSearcher indexSearcher = newSearcher(indexReader, true, true);

    MinAggregationBuilder aggregationBuilder = new MinAggregationBuilder("_name").field("number");
    MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
    fieldType.setName("number");
    try (MinAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType)) {
        aggregator.preCollection();
        indexSearcher.search(new MatchAllDocsQuery(), aggregator);
        aggregator.postCollection();
        InternalMin result = (InternalMin) aggregator.buildAggregation(0L);
        assertEquals(Double.POSITIVE_INFINITY, result.getValue(), 0);
    }
    indexReader.close();
    directory.close();
}
项目:Elasticsearch    文件:TermSuggester.java   
@Override
public TermSuggestion innerExecute(String name, TermSuggestionContext suggestion, IndexSearcher searcher, CharsRefBuilder spare) throws IOException {
    DirectSpellChecker directSpellChecker = SuggestUtils.getDirectSpellChecker(suggestion.getDirectSpellCheckerSettings());
    final IndexReader indexReader = searcher.getIndexReader();
    TermSuggestion response = new TermSuggestion(
            name, suggestion.getSize(), suggestion.getDirectSpellCheckerSettings().sort()
    );
    List<Token> tokens = queryTerms(suggestion, spare);
    for (Token token : tokens) {
        // TODO: Extend DirectSpellChecker in 4.1, to get the raw suggested words as BytesRef
        SuggestWord[] suggestedWords = directSpellChecker.suggestSimilar(
                token.term, suggestion.getShardSize(), indexReader, suggestion.getDirectSpellCheckerSettings().suggestMode()
        );
        Text key = new Text(new BytesArray(token.term.bytes()));
        TermSuggestion.Entry resultEntry = new TermSuggestion.Entry(key, token.startOffset, token.endOffset - token.startOffset);
        for (SuggestWord suggestWord : suggestedWords) {
            Text word = new Text(suggestWord.string);
            resultEntry.addOption(new TermSuggestion.Entry.Option(word, suggestWord.freq, suggestWord.score));
        }
        response.addTerm(resultEntry);
    }
    return response;
}
项目:gitplex-mit    文件:DefaultIndexManager.java   
@Override
public boolean isIndexed(Project project, ObjectId commit) {
    File indexDir = storageManager.getProjectIndexDir(project.getForkRoot().getId());
    try (Directory directory = FSDirectory.open(indexDir)) {
        if (DirectoryReader.indexExists(directory)) {
            try (IndexReader reader = DirectoryReader.open(directory)) {
                IndexSearcher searcher = new IndexSearcher(reader);
                return getCurrentCommitIndexVersion().equals(getCommitIndexVersion(searcher, commit));
            }
        } else {
            return false;
        }
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}
项目:elasticsearch_my    文件:DiversifiedSamplerTests.java   
public void testDiversifiedSampler_noDocs() throws Exception {
    Directory directory = newDirectory();
    RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
    indexWriter.close();
    IndexReader indexReader = DirectoryReader.open(directory);
    IndexSearcher indexSearcher = new IndexSearcher(indexReader);

    MappedFieldType idFieldType = new KeywordFieldMapper.KeywordFieldType();
    idFieldType.setName("id");
    idFieldType.setHasDocValues(true);

    MappedFieldType genreFieldType = new KeywordFieldMapper.KeywordFieldType();
    genreFieldType.setName("genre");
    genreFieldType.setHasDocValues(true);

    DiversifiedAggregationBuilder builder = new DiversifiedAggregationBuilder("_name")
            .field(genreFieldType.name())
            .subAggregation(new TermsAggregationBuilder("terms", null).field("id"));

    InternalSampler result = search(indexSearcher, new MatchAllDocsQuery(), builder, genreFieldType, idFieldType);
    Terms terms = result.getAggregations().get("terms");
    assertEquals(0, terms.getBuckets().size());
    indexReader.close();
    directory.close();
}
项目:Equella    文件:ItemIndex.java   
/**
 * A simplified implementation of matrixSearch() that only works on a single
 * field, and currently only returns the count per term. It could easily be
 * extended to return a list of ItemIds per term, it simply wasn't necessary
 * when I was writing it!
 * <p>
 * This simplified implementation was written to overcome the memory
 * pressures that matrixSearch() creates when you have over half a million
 * terms for a field. MatrixSearch() creates *many* BitSets that it holds on
 * to to reuse as it recurse through a list of fields. Since we only care
 * about a single field in this implementation, we can avoid generating and
 * holding onto BitSets.
 */
public Multimap<String, Pair<String, Integer>> facetCount(@Nullable final Search searchreq,
    final Collection<String> fields)
{
    return search(new Searcher<Multimap<String, Pair<String, Integer>>>()
    {
        @Override
        public Multimap<String, Pair<String, Integer>> search(IndexSearcher searcher) throws IOException
        {
            final IndexReader reader = searcher.getIndexReader();
            final OpenBitSet filteredBits = searchRequestToBitSet(searchreq, searcher, reader);

            final Multimap<String, Pair<String, Integer>> rv = ArrayListMultimap.create();
            for( String field : fields )
            {
                for( Term term : new XPathFieldIterator(reader, field, "") )
                {
                    int count = 0;

                    TermDocs docs = reader.termDocs(term);
                    while( docs.next() )
                    {
                        if( filteredBits.get(docs.doc()) )
                        {
                            count++;
                        }
                    }
                    docs.close();

                    if( count > 0 )
                    {
                        rv.put(field, new Pair<String, Integer>(term.text(), count));
                    }
                }
            }
            return rv;
        }
    });
}
项目:elasticsearch_my    文件:HistogramAggregatorTests.java   
public void testLongs() throws Exception {
    try (Directory dir = newDirectory();
            RandomIndexWriter w = new RandomIndexWriter(random(), dir)) {
        for (long value : new long[] {7, 3, -10, -6, 5, 50}) {
            Document doc = new Document();
            doc.add(new SortedNumericDocValuesField("field", value));
            w.addDocument(doc);
        }

        HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg")
                .field("field")
                .interval(5);
        MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
        fieldType.setName("field");
        try (IndexReader reader = w.getReader()) {
            IndexSearcher searcher = new IndexSearcher(reader);
            Histogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType);
            assertEquals(4, histogram.getBuckets().size());
            assertEquals(-10d, histogram.getBuckets().get(0).getKey());
            assertEquals(2, histogram.getBuckets().get(0).getDocCount());
            assertEquals(0d, histogram.getBuckets().get(1).getKey());
            assertEquals(1, histogram.getBuckets().get(1).getDocCount());
            assertEquals(5d, histogram.getBuckets().get(2).getKey());
            assertEquals(2, histogram.getBuckets().get(2).getDocCount());
            assertEquals(50d, histogram.getBuckets().get(3).getKey());
            assertEquals(1, histogram.getBuckets().get(3).getDocCount());
        }
    }
}
项目:RedisDirectory    文件:TestLucene.java   
public void
testRedisDirectoryWithRemoteJedisPool() throws IOException {
    long start = System.currentTimeMillis();
    IndexWriterConfig indexWriterConfig = new IndexWriterConfig(new WhitespaceAnalyzer()).setOpenMode(IndexWriterConfig
            .OpenMode.CREATE);
    JedisPool jedisPool = new JedisPool(new JedisPoolConfig(), "10.97.19.55", 6379, Constants.TIME_OUT);
    RedisDirectory redisDirectory = new RedisDirectory(new JedisPoolStream(jedisPool));
    IndexWriter indexWriter = new IndexWriter(redisDirectory, indexWriterConfig);
    for (int i = 0; i < 5000000; i++) {
        indexWriter.addDocument(addDocument(i));
    }
    indexWriter.commit();
    indexWriter.close();
    redisDirectory.close();
    long end = System.currentTimeMillis();
    log.error("RedisDirectoryWithJedisPool consumes {}s!", (end - start) / 1000);
    start = System.currentTimeMillis();
    IndexSearcher indexSearcher = new IndexSearcher(DirectoryReader.open(new RedisDirectory(new JedisStream("localhost",
            6379))));
    int total = 0;
    for (int i = 0; i < 1000000; i++) {
        TermQuery key1 = new TermQuery(new Term("key1", "key" + i));
        TopDocs search = indexSearcher.search(key1, 10);
        total += search.totalHits;
    }
    System.out.println(total);
    end = System.currentTimeMillis();
    log.error("RedisDirectoryWithJedisPool search consumes {}ms!", (end - start));
}
项目:RedisDirectory    文件:TestLucene.java   
public void testRedisDirectoryWithJedis() throws IOException {
    long start = System.currentTimeMillis();
    IndexWriterConfig indexWriterConfig = new IndexWriterConfig(new WhitespaceAnalyzer()).setOpenMode(IndexWriterConfig
            .OpenMode.CREATE);
    //indexWriterConfig.setInfoStream(System.out);
    //indexWriterConfig.setRAMBufferSizeMB(2048);
    //LogByteSizeMergePolicy logByteSizeMergePolicy = new LogByteSizeMergePolicy();
    //logByteSizeMergePolicy.setMinMergeMB(1);
    //logByteSizeMergePolicy.setMaxMergeMB(64);
    //logByteSizeMergePolicy.setMaxCFSSegmentSizeMB(64);
    //indexWriterConfig.setRAMBufferSizeMB(1024).setMergePolicy(logByteSizeMergePolicy).setUseCompoundFile(false);
    //GenericObjectPoolConfig genericObjectPoolConfig = new GenericObjectPoolConfig();
    //获取连接等待时间
    //genericObjectPoolConfig.setMaxWaitMillis(3000);
    //10s超时时间
    RedisDirectory redisDirectory = new RedisDirectory(new JedisStream("localhost", 6379));
    IndexWriter indexWriter = new IndexWriter(redisDirectory, indexWriterConfig);
    for (int i = 0; i < 10000000; i++) {
        indexWriter.addDocument(addDocument(i));
    }
    indexWriter.commit();
    indexWriter.close();
    redisDirectory.close();
    long end = System.currentTimeMillis();
    log.error("RedisDirectoryWithJedis consumes {}s!", (end - start) / 1000);
    start = System.currentTimeMillis();
    IndexSearcher indexSearcher = new IndexSearcher(DirectoryReader.open(new RedisDirectory(new JedisStream("localhost",
            6379))));
    int total = 0;
    for (int i = 0; i < 10000000; i++) {
        TermQuery key1 = new TermQuery(new Term("key1", "key" + i));
        TopDocs search = indexSearcher.search(key1, 10);
        total += search.totalHits;
    }
    System.out.println(total);
    end = System.currentTimeMillis();
    log.error("RedisDirectoryWithJedis search consumes {}ms!", (end - start));
}
项目:elasticsearch_my    文件:PercolateQuery.java   
PercolateQuery(String documentType, QueryStore queryStore, BytesReference documentSource,
                      Query candidateMatchesQuery, IndexSearcher percolatorIndexSearcher, Query verifiedMatchesQuery) {
    this.documentType = Objects.requireNonNull(documentType);
    this.documentSource = Objects.requireNonNull(documentSource);
    this.candidateMatchesQuery = Objects.requireNonNull(candidateMatchesQuery);
    this.queryStore = Objects.requireNonNull(queryStore);
    this.percolatorIndexSearcher = Objects.requireNonNull(percolatorIndexSearcher);
    this.verifiedMatchesQuery = Objects.requireNonNull(verifiedMatchesQuery);
}
项目:elasticsearch_my    文件:PercolateQuery.java   
BaseScorer(Weight weight, Scorer approximation, CheckedFunction<Integer, Query, IOException> percolatorQueries,
           IndexSearcher percolatorIndexSearcher) {
    super(weight);
    this.approximation = approximation;
    this.percolatorQueries = percolatorQueries;
    this.percolatorIndexSearcher = percolatorIndexSearcher;
}
项目:elasticsearch_my    文件:HistogramAggregatorTests.java   
public void testDoubles() throws Exception {
    try (Directory dir = newDirectory();
            RandomIndexWriter w = new RandomIndexWriter(random(), dir)) {
        for (double value : new double[] {9.3, 3.2, -10, -6.5, 5.3, 50.1}) {
            Document doc = new Document();
            doc.add(new SortedNumericDocValuesField("field", NumericUtils.doubleToSortableLong(value)));
            w.addDocument(doc);
        }

        HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg")
                .field("field")
                .interval(5);
        MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.DOUBLE);
        fieldType.setName("field");
        try (IndexReader reader = w.getReader()) {
            IndexSearcher searcher = new IndexSearcher(reader);
            Histogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType);
            assertEquals(4, histogram.getBuckets().size());
            assertEquals(-10d, histogram.getBuckets().get(0).getKey());
            assertEquals(2, histogram.getBuckets().get(0).getDocCount());
            assertEquals(0d, histogram.getBuckets().get(1).getKey());
            assertEquals(1, histogram.getBuckets().get(1).getDocCount());
            assertEquals(5d, histogram.getBuckets().get(2).getKey());
            assertEquals(2, histogram.getBuckets().get(2).getDocCount());
            assertEquals(50d, histogram.getBuckets().get(3).getKey());
            assertEquals(1, histogram.getBuckets().get(3).getDocCount());
        }
    }
}
项目:dswork    文件:MultiCategoryListsFacetsExample.java   
/** User runs a query and counts facets. */
private List<FacetResult> search() throws IOException {
  DirectoryReader indexReader = DirectoryReader.open(indexDir);
  IndexSearcher searcher = new IndexSearcher(indexReader);
  TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir);

  FacetsCollector fc = new FacetsCollector();

  // MatchAllDocsQuery is for "browsing" (counts facets
  // for all non-deleted docs in the index); normally
  // you'd use a "normal" query:
  FacetsCollector.search(searcher, new MatchAllDocsQuery(), 10, fc);

  // Retrieve results
  List<FacetResult> results = new ArrayList<FacetResult>();

  // Count both "Publish Date" and "Author" dimensions
  Facets author = new FastTaxonomyFacetCounts("author", taxoReader, config, fc);
  results.add(author.getTopChildren(10, "Author"));

  Facets pubDate = new FastTaxonomyFacetCounts("pubdate", taxoReader, config, fc);
  results.add(pubDate.getTopChildren(10, "Publish Date"));

  indexReader.close();
  taxoReader.close();

  return results;
}
项目:gitplex-mit    文件:DefaultIndexManager.java   
private String getCommitIndexVersion(final IndexSearcher searcher, AnyObjectId commitId) throws IOException {
    final AtomicReference<String> indexVersion = new AtomicReference<>(null);

    searcher.search(COMMIT_HASH.query(commitId.getName()), new Collector() {

        private int docBase;

        @Override
        public void setScorer(Scorer scorer) throws IOException {
        }

        @Override
        public void collect(int doc) throws IOException {
            indexVersion.set(searcher.doc(docBase+doc).get(COMMIT_INDEX_VERSION.name()));
        }

        @Override
        public void setNextReader(AtomicReaderContext context) throws IOException {
            docBase = context.docBase;
        }

        @Override
        public boolean acceptsDocsOutOfOrder() {
            return true;
        }

    });
    return indexVersion.get();
}