Java 类org.apache.lucene.search.TotalHitCountCollector 实例源码

项目:chronix.server    文件:ChronixRetentionHandler.java   
/**
 * Searches the index, if older documents exists. Updates the solr query response.
 *
 * @param req - the solr query request information
 * @param rsp - the solr query response information
 * @return true if the hit count is greater zero, otherwise false
 * @throws SyntaxError, IOException if bad things happen
 */
private boolean olderDocumentsExists(String queryString, SolrQueryRequest req, SolrQueryResponse rsp) throws SyntaxError, IOException {
    String defType = req.getParams().get(QueryParsing.DEFTYPE, QParserPlugin.DEFAULT_QTYPE);

    QParser queryParser = QParser.getParser(queryString, defType, req);
    Query query = queryParser.getQuery();

    TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector();
    req.getSearcher().search(query, totalHitCountCollector);

    rsp.add("query", String.format("%s:[* TO NOW-%s]", queryField, timeSeriesAge));
    rsp.add("queryTechnical", queryString);
    rsp.add("removedDocuments", totalHitCountCollector.getTotalHits());

    return totalHitCountCollector.getTotalHits() != 0;
}
项目:search    文件:SolrIndexSearcher.java   
/**
 * Returns the number of documents that match both <code>a</code> and <code>b</code>.
 * <p>
 * This method is cache-aware and may check as well as modify the cache.
 *
 * @return the number of documents in the intersection between <code>a</code> and <code>b</code>.
 * @throws IOException If there is a low-level I/O error.
 */
public int numDocs(Query a, DocSet b) throws IOException {
  if (filterCache != null) {
    // Negative query if absolute value different from original
    Query absQ = QueryUtils.getAbs(a);
    DocSet positiveA = getPositiveDocSet(absQ);
    return a==absQ ? b.intersectionSize(positiveA) : b.andNotSize(positiveA);
  } else {
    // If there isn't a cache, then do a single filtered query
    // NOTE: we cannot use FilteredQuery, because BitDocSet assumes it will never 
    // have deleted documents, but UninvertedField's doNegative has sets with deleted docs
    TotalHitCountCollector collector = new TotalHitCountCollector();
    BooleanQuery bq = new BooleanQuery();
    bq.add(QueryUtils.makeQueryable(a), BooleanClause.Occur.MUST);
    bq.add(new ConstantScoreQuery(b.getTopFilter()), BooleanClause.Occur.MUST);
    super.search(bq, null, collector);
    return collector.getTotalHits();
  }
}
项目:neo4j-lucene5-index    文件:LuceneIndexAccessorReader.java   
@Override
public int countIndexedNodes( long nodeId, Object propertyValue )
{
    Query nodeIdQuery = new TermQuery( documentLogic.newTermForChangeOrRemove( nodeId ) );
    Query valueQuery = documentLogic.newSeekQuery( propertyValue );
    BooleanQuery.Builder nodeIdAndValueQuery = new BooleanQuery.Builder().setDisableCoord( true );
    nodeIdAndValueQuery.add( nodeIdQuery, BooleanClause.Occur.MUST );
    nodeIdAndValueQuery.add( valueQuery, BooleanClause.Occur.MUST );
    try
    {
        TotalHitCountCollector collector = new TotalHitCountCollector();
        searcher.search( nodeIdAndValueQuery.build(), collector );
        // A <label,propertyKeyId,nodeId> tuple should only match at most a single propertyValue
        return collector.getTotalHits();
    }
    catch ( IOException e )
    {
        throw new RuntimeException( e );
    }
}
项目:NYBC    文件:TestFacetsPayloadMigrationReader.java   
private void verifyNotFacetsData(DirectoryReader indexReader, IndexSearcher searcher) throws IOException {
  // verify that non facets data was not damaged
  TotalHitCountCollector total = new TotalHitCountCollector();
  searcher.search(new PrefixQuery(new Term("foo", "content")), total);
  assertEquals("invalid number of results for content query", total.getTotalHits(), indexReader.maxDoc());

  int numDocIDs = 0;
  for (AtomicReaderContext context : indexReader.leaves()) {
    Terms docIDs = context.reader().terms("docid");
    assertNotNull(docIDs);
    TermsEnum te = docIDs.iterator(null);
    while (te.next() != null) {
      ++numDocIDs;
    }
  }
  assertEquals("invalid number of docid terms", indexReader.maxDoc(), numDocIDs);
}
项目:NYBC    文件:TestFacetsPayloadMigrationReader.java   
private void verifyDrillDown(Map<String,Integer> expectedCounts, FacetIndexingParams fip, DirectoryReader indexReader, 
    TaxonomyReader taxoReader, IndexSearcher searcher) throws IOException {
  // verify drill-down
  for (String dim : expectedCounts.keySet()) {
    CategoryPath drillDownCP = new CategoryPath(dim);
    FacetSearchParams fsp = new FacetSearchParams(fip, new CountFacetRequest(drillDownCP, 10));
    DrillDownQuery drillDown = new DrillDownQuery(fip, new MatchAllDocsQuery());
    drillDown.add(drillDownCP);
    TotalHitCountCollector total = new TotalHitCountCollector();
    FacetsCollector fc = FacetsCollector.create(fsp, indexReader, taxoReader);
    searcher.search(drillDown, MultiCollector.wrap(fc, total));
    assertTrue("no results for drill-down query " + drillDown, total.getTotalHits() > 0);
    List<FacetResult> facetResults = fc.getFacetResults();
    assertEquals(1, facetResults.size());
    FacetResultNode rootNode = facetResults.get(0).getFacetResultNode();
    assertEquals("wrong count for " + dim, expectedCounts.get(dim).intValue(), (int) rootNode.value);
  }
}
项目:read-open-source-code    文件:SolrIndexSearcher.java   
/**
 * Returns the number of documents that match both <code>a</code> and <code>b</code>.
 * <p>
 * This method is cache-aware and may check as well as modify the cache.
 *
 * @return the number of documents in the intersection between <code>a</code> and <code>b</code>.
 * @throws IOException If there is a low-level I/O error.
 */
public int numDocs(Query a, DocSet b) throws IOException {
  if (filterCache != null) {
    // Negative query if absolute value different from original
    Query absQ = QueryUtils.getAbs(a);
    DocSet positiveA = getPositiveDocSet(absQ);
    return a==absQ ? b.intersectionSize(positiveA) : b.andNotSize(positiveA);
  } else {
    // If there isn't a cache, then do a single filtered query
    // NOTE: we cannot use FilteredQuery, because BitDocSet assumes it will never 
    // have deleted documents, but UninvertedField's doNegative has sets with deleted docs
    TotalHitCountCollector collector = new TotalHitCountCollector();
    BooleanQuery bq = new BooleanQuery();
    bq.add(QueryUtils.makeQueryable(a), BooleanClause.Occur.MUST);
    bq.add(new ConstantScoreQuery(b.getTopFilter()), BooleanClause.Occur.MUST);
    super.search(bq, null, collector);
    return collector.getTotalHits();
  }
}
项目:read-open-source-code    文件:SolrIndexSearcher.java   
/**
 * Returns the number of documents that match both <code>a</code> and <code>b</code>.
 * <p>
 * This method is cache-aware and may check as well as modify the cache.
 *
 * @return the number of documents in the intersection between <code>a</code> and <code>b</code>.
 * @throws IOException If there is a low-level I/O error.
 */
public int numDocs(Query a, DocSet b) throws IOException {
  if (filterCache != null) {
    // Negative query if absolute value different from original
    Query absQ = QueryUtils.getAbs(a);
    DocSet positiveA = getPositiveDocSet(absQ);
    return a==absQ ? b.intersectionSize(positiveA) : b.andNotSize(positiveA);
  } else {
    // If there isn't a cache, then do a single filtered query
    // NOTE: we cannot use FilteredQuery, because BitDocSet assumes it will never 
    // have deleted documents, but UninvertedField's doNegative has sets with deleted docs
    TotalHitCountCollector collector = new TotalHitCountCollector();
    BooleanQuery bq = new BooleanQuery();
    bq.add(QueryUtils.makeQueryable(a), BooleanClause.Occur.MUST);
    bq.add(new ConstantScoreQuery(b.getTopFilter()), BooleanClause.Occur.MUST);
    super.search(bq, null, collector);
    return collector.getTotalHits();
  }
}
项目:Maskana-Gestor-de-Conocimiento    文件:TestFacetsPayloadMigrationReader.java   
private void verifyNotFacetsData(DirectoryReader indexReader, IndexSearcher searcher) throws IOException {
  // verify that non facets data was not damaged
  TotalHitCountCollector total = new TotalHitCountCollector();
  searcher.search(new PrefixQuery(new Term("foo", "content")), total);
  assertEquals("invalid number of results for content query", total.getTotalHits(), indexReader.maxDoc());

  int numDocIDs = 0;
  for (AtomicReaderContext context : indexReader.leaves()) {
    Terms docIDs = context.reader().terms("docid");
    assertNotNull(docIDs);
    TermsEnum te = docIDs.iterator(null);
    while (te.next() != null) {
      ++numDocIDs;
    }
  }
  assertEquals("invalid number of docid terms", indexReader.maxDoc(), numDocIDs);
}
项目:Maskana-Gestor-de-Conocimiento    文件:TestFacetsPayloadMigrationReader.java   
private void verifyDrillDown(Map<String,Integer> expectedCounts, FacetIndexingParams fip, DirectoryReader indexReader, 
    TaxonomyReader taxoReader, IndexSearcher searcher) throws IOException {
  // verify drill-down
  for (String dim : expectedCounts.keySet()) {
    CategoryPath drillDownCP = new CategoryPath(dim);
    FacetSearchParams fsp = new FacetSearchParams(fip, new CountFacetRequest(drillDownCP, 10));
    DrillDownQuery drillDown = new DrillDownQuery(fip, new MatchAllDocsQuery());
    drillDown.add(drillDownCP);
    TotalHitCountCollector total = new TotalHitCountCollector();
    FacetsCollector fc = FacetsCollector.create(fsp, indexReader, taxoReader);
    searcher.search(drillDown, MultiCollector.wrap(fc, total));
    assertTrue("no results for drill-down query " + drillDown, total.getTotalHits() > 0);
    List<FacetResult> facetResults = fc.getFacetResults();
    assertEquals(1, facetResults.size());
    FacetResultNode rootNode = facetResults.get(0).getFacetResultNode();
    assertEquals("wrong count for " + dim, expectedCounts.get(dim).intValue(), (int) rootNode.value);
  }
}
项目:elasticsearch_my    文件:SearchCancellationTests.java   
public void testLowLevelCancellableCollector() throws IOException {
    TotalHitCountCollector collector = new TotalHitCountCollector();
    AtomicBoolean cancelled = new AtomicBoolean();
    CancellableCollector cancellableCollector = new CancellableCollector(cancelled::get, true, collector);
    final LeafCollector leafCollector = cancellableCollector.getLeafCollector(reader.leaves().get(0));
    leafCollector.collect(0);
    cancelled.set(true);
    expectThrows(TaskCancelledException.class, () -> leafCollector.collect(1));
}
项目:elasticsearch_my    文件:SearchCancellationTests.java   
public void testCancellableCollector() throws IOException {
    TotalHitCountCollector collector = new TotalHitCountCollector();
    AtomicBoolean cancelled = new AtomicBoolean();
    CancellableCollector cancellableCollector = new CancellableCollector(cancelled::get, false, collector);
    final LeafCollector leafCollector = cancellableCollector.getLeafCollector(reader.leaves().get(0));
    leafCollector.collect(0);
    cancelled.set(true);
    leafCollector.collect(1);
    expectThrows(TaskCancelledException.class, () -> cancellableCollector.getLeafCollector(reader.leaves().get(1)));
}
项目:elasticsearch_my    文件:QueryProfilerTests.java   
public void testCollector() throws IOException {
    TotalHitCountCollector collector = new TotalHitCountCollector();
    ProfileCollector profileCollector = new ProfileCollector(collector);
    assertEquals(0, profileCollector.getTime());
    final LeafCollector leafCollector = profileCollector.getLeafCollector(reader.leaves().get(0));
    assertThat(profileCollector.getTime(), greaterThan(0L));
    long time = profileCollector.getTime();
    leafCollector.setScorer(null);
    assertThat(profileCollector.getTime(), greaterThan(time));
    time = profileCollector.getTime();
    leafCollector.collect(0);
    assertThat(profileCollector.getTime(), greaterThan(time));
}
项目:naisc    文件:Search.java   
private int count(Query query) {
       TotalHitCountCollector collector = new TotalHitCountCollector();
    try {
        searcher.search(query, collector);
    } catch (IOException e) {
        throw new RuntimeException(e);
    }       
    return collector.getTotalHits();
}
项目:OpenCyclos    文件:AdDAOImpl.java   
private AdCategoryWithCounterVO createCounter(final IndexSearcher searcher, final Query query, final Filters baseFilters, final AdCategory adCategory, final AdCategoryWithCounterVO parent, final int level) throws IOException {
    // Run with filters based on the current category
    Filters filters = (Filters) baseFilters.clone();
    filters.addTerms("category", adCategory.getId());
    TotalHitCountCollector collector = new TotalHitCountCollector();
    searcher.search(query, filters, collector);
    int totalCount = collector.getTotalHits();
    AdCategoryWithCounterVO counter = new AdCategoryWithCounterVO(adCategory.getId(), adCategory.getName(), level, totalCount, parent);
    // Repeat recursively for each child
    for (AdCategory childCategory : adCategory.getChildren()) {
        AdCategoryWithCounterVO childCounter = createCounter(searcher, query, baseFilters, childCategory, counter, level + 1);
        counter.addChild(childCounter);
    }
    return counter;
}
项目:search    文件:SimpleNaiveBayesClassifier.java   
private int countDocsWithClass() throws IOException {
  int docCount = MultiFields.getTerms(this.atomicReader, this.classFieldName).getDocCount();
  if (docCount == -1) { // in case codec doesn't support getDocCount
    TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector();
    BooleanQuery q = new BooleanQuery();
    q.add(new BooleanClause(new WildcardQuery(new Term(classFieldName, String.valueOf(WildcardQuery.WILDCARD_STRING))), BooleanClause.Occur.MUST));
    if (query != null) {
      q.add(query, BooleanClause.Occur.MUST);
    }
    indexSearcher.search(q,
        totalHitCountCollector);
    docCount = totalHitCountCollector.getTotalHits();
  }
  return docCount;
}
项目:search    文件:SimpleNaiveBayesClassifier.java   
private int getWordFreqForClass(String word, BytesRef c) throws IOException {
  BooleanQuery booleanQuery = new BooleanQuery();
  BooleanQuery subQuery = new BooleanQuery();
  for (String textFieldName : textFieldNames) {
   subQuery.add(new BooleanClause(new TermQuery(new Term(textFieldName, word)), BooleanClause.Occur.SHOULD));
  }
  booleanQuery.add(new BooleanClause(subQuery, BooleanClause.Occur.MUST));
  booleanQuery.add(new BooleanClause(new TermQuery(new Term(classFieldName, c)), BooleanClause.Occur.MUST));
  if (query != null) {
    booleanQuery.add(query, BooleanClause.Occur.MUST);
  }
  TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector();
  indexSearcher.search(booleanQuery, totalHitCountCollector);
  return totalHitCountCollector.getTotalHits();
}
项目:search    文件:Grouping.java   
/**
 * {@inheritDoc}
 */
@Override
protected Collector createFirstPassCollector() throws IOException {
  // Ok we don't want groups, but do want a total count
  if (actualGroupsToFind <= 0) {
    fallBackCollector = new TotalHitCountCollector();
    return fallBackCollector;
  }

  sort = sort == null ? Sort.RELEVANCE : sort;
  firstPass = new TermFirstPassGroupingCollector(groupBy, sort, actualGroupsToFind);
  return firstPass;
}
项目:search    文件:Grouping.java   
/**
 * {@inheritDoc}
 */
@Override
protected Collector createFirstPassCollector() throws IOException {
  // Ok we don't want groups, but do want a total count
  if (actualGroupsToFind <= 0) {
    fallBackCollector = new TotalHitCountCollector();
    return fallBackCollector;
  }

  sort = sort == null ? Sort.RELEVANCE : sort;
  firstPass = new FunctionFirstPassGroupingCollector(groupBy, context, searcher.weightSort(sort), actualGroupsToFind);
  return firstPass;
}
项目:appformer    文件:LuceneSearchIndex.java   
private int searchHits(final Query query,
                       final ClusterSegment... clusterSegments) {
    final IndexSearcher index = indexManager.getIndexSearcher(clusterSegments);
    try {
        final TotalHitCountCollector collector = new TotalHitCountCollector();
        index.search(query,
                     collector);
        return collector.getTotalHits();
    } catch (final Exception ex) {
        throw new RuntimeException("Error during Query!",
                                   ex);
    } finally {
        indexManager.release(index);
    }
}
项目:NYBC    文件:SimpleNaiveBayesClassifier.java   
private int countDocsWithClass() throws IOException {
  int docCount = MultiFields.getTerms(this.atomicReader, this.classFieldName).getDocCount();
  if (docCount == -1) { // in case codec doesn't support getDocCount
    TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector();
    indexSearcher.search(new WildcardQuery(new Term(classFieldName, String.valueOf(WildcardQuery.WILDCARD_STRING))),
        totalHitCountCollector);
    docCount = totalHitCountCollector.getTotalHits();
  }
  return docCount;
}
项目:NYBC    文件:SimpleNaiveBayesClassifier.java   
private int getWordFreqForClass(String word, BytesRef c) throws IOException {
  BooleanQuery booleanQuery = new BooleanQuery();
  booleanQuery.add(new BooleanClause(new TermQuery(new Term(textFieldName, word)), BooleanClause.Occur.MUST));
  booleanQuery.add(new BooleanClause(new TermQuery(new Term(classFieldName, c)), BooleanClause.Occur.MUST));
  TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector();
  indexSearcher.search(booleanQuery, totalHitCountCollector);
  return totalHitCountCollector.getTotalHits();
}
项目:lumongo    文件:FacetStorageTest.java   
/** User runs a query and counts facets. */
private List<FacetResult> search() throws IOException {
    DirectoryReader indexReader = DirectoryReader.open(directory);
    IndexSearcher searcher = new IndexSearcher(indexReader);
    SortedSetDocValuesReaderState state = new DefaultSortedSetDocValuesReaderState(indexReader);

    // Aggregates the facet counts
    FacetsCollector fc = new FacetsCollector();

    // MatchAllDocsQuery is for "browsing" (counts facets
    // for all non-deleted docs in the index); normally
    // you'd use a "normal" query:
    //FacetsCollector.search(searcher, new MatchAllDocsQuery(), 10, fc);

    TotalHitCountCollector collector = new TotalHitCountCollector();
    searcher.search(new MatchAllDocsQuery(), MultiCollector.wrap(collector, fc));

    // Retrieve results
    Facets facets = new SortedSetDocValuesFacetCounts(state, fc);

    List<FacetResult> results = new ArrayList<>();
    results.add(facets.getTopChildren(10, "Author"));
    results.add(facets.getTopChildren(10, "Publish Year"));
    indexReader.close();

    return results;
}
项目:read-open-source-code    文件:SimpleNaiveBayesClassifier.java   
private int countDocsWithClass() throws IOException {
  int docCount = MultiFields.getTerms(this.atomicReader, this.classFieldName).getDocCount();
  if (docCount == -1) { // in case codec doesn't support getDocCount
    TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector();
    BooleanQuery q = new BooleanQuery();
    q.add(new BooleanClause(new WildcardQuery(new Term(classFieldName, String.valueOf(WildcardQuery.WILDCARD_STRING))), BooleanClause.Occur.MUST));
    if (query != null) {
      q.add(query, BooleanClause.Occur.MUST);
    }
    indexSearcher.search(q,
        totalHitCountCollector);
    docCount = totalHitCountCollector.getTotalHits();
  }
  return docCount;
}
项目:read-open-source-code    文件:SimpleNaiveBayesClassifier.java   
private int getWordFreqForClass(String word, BytesRef c) throws IOException {
  BooleanQuery booleanQuery = new BooleanQuery();
  BooleanQuery subQuery = new BooleanQuery();
  for (String textFieldName : textFieldNames) {
   subQuery.add(new BooleanClause(new TermQuery(new Term(textFieldName, word)), BooleanClause.Occur.SHOULD));
  }
  booleanQuery.add(new BooleanClause(subQuery, BooleanClause.Occur.MUST));
  booleanQuery.add(new BooleanClause(new TermQuery(new Term(classFieldName, c)), BooleanClause.Occur.MUST));
  if (query != null) {
    booleanQuery.add(query, BooleanClause.Occur.MUST);
  }
  TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector();
  indexSearcher.search(booleanQuery, totalHitCountCollector);
  return totalHitCountCollector.getTotalHits();
}
项目:read-open-source-code    文件:Grouping.java   
/**
 * {@inheritDoc}
 */
@Override
protected Collector createFirstPassCollector() throws IOException {
  // Ok we don't want groups, but do want a total count
  if (actualGroupsToFind <= 0) {
    fallBackCollector = new TotalHitCountCollector();
    return fallBackCollector;
  }

  sort = sort == null ? Sort.RELEVANCE : sort;
  firstPass = new TermFirstPassGroupingCollector(groupBy, sort, actualGroupsToFind);
  return firstPass;
}
项目:read-open-source-code    文件:Grouping.java   
/**
 * {@inheritDoc}
 */
@Override
protected Collector createFirstPassCollector() throws IOException {
  // Ok we don't want groups, but do want a total count
  if (actualGroupsToFind <= 0) {
    fallBackCollector = new TotalHitCountCollector();
    return fallBackCollector;
  }

  sort = sort == null ? Sort.RELEVANCE : sort;
  firstPass = new FunctionFirstPassGroupingCollector(groupBy, context, searcher.weightSort(sort), actualGroupsToFind);
  return firstPass;
}
项目:read-open-source-code    文件:SimpleNaiveBayesClassifier.java   
private int countDocsWithClass() throws IOException {
  int docCount = MultiFields.getTerms(this.atomicReader, this.classFieldName).getDocCount();
  if (docCount == -1) { // in case codec doesn't support getDocCount
    TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector();
    BooleanQuery q = new BooleanQuery();
    q.add(new BooleanClause(new WildcardQuery(new Term(classFieldName, String.valueOf(WildcardQuery.WILDCARD_STRING))), BooleanClause.Occur.MUST));
    if (query != null) {
      q.add(query, BooleanClause.Occur.MUST);
    }
    indexSearcher.search(q,
        totalHitCountCollector);
    docCount = totalHitCountCollector.getTotalHits();
  }
  return docCount;
}
项目:read-open-source-code    文件:SimpleNaiveBayesClassifier.java   
private int getWordFreqForClass(String word, BytesRef c) throws IOException {
  BooleanQuery booleanQuery = new BooleanQuery();
  BooleanQuery subQuery = new BooleanQuery();
  for (String textFieldName : textFieldNames) {
   subQuery.add(new BooleanClause(new TermQuery(new Term(textFieldName, word)), BooleanClause.Occur.SHOULD));
  }
  booleanQuery.add(new BooleanClause(subQuery, BooleanClause.Occur.MUST));
  booleanQuery.add(new BooleanClause(new TermQuery(new Term(classFieldName, c)), BooleanClause.Occur.MUST));
  if (query != null) {
    booleanQuery.add(query, BooleanClause.Occur.MUST);
  }
  TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector();
  indexSearcher.search(booleanQuery, totalHitCountCollector);
  return totalHitCountCollector.getTotalHits();
}
项目:read-open-source-code    文件:Grouping.java   
/**
 * {@inheritDoc}
 */
@Override
protected Collector createFirstPassCollector() throws IOException {
  // Ok we don't want groups, but do want a total count
  if (actualGroupsToFind <= 0) {
    fallBackCollector = new TotalHitCountCollector();
    return fallBackCollector;
  }

  sort = sort == null ? Sort.RELEVANCE : sort;
  firstPass = new TermFirstPassGroupingCollector(groupBy, sort, actualGroupsToFind);
  return firstPass;
}
项目:read-open-source-code    文件:Grouping.java   
/**
 * {@inheritDoc}
 */
@Override
protected Collector createFirstPassCollector() throws IOException {
  // Ok we don't want groups, but do want a total count
  if (actualGroupsToFind <= 0) {
    fallBackCollector = new TotalHitCountCollector();
    return fallBackCollector;
  }

  sort = sort == null ? Sort.RELEVANCE : sort;
  firstPass = new FunctionFirstPassGroupingCollector(groupBy, context, searcher.weightSort(sort), actualGroupsToFind);
  return firstPass;
}
项目:open-cyclos    文件:AdDAOImpl.java   
private AdCategoryWithCounterVO createCounter(final IndexSearcher searcher, final Query query, final Filters baseFilters, final AdCategory adCategory, final AdCategoryWithCounterVO parent, final int level) throws IOException {
    // Run with filters based on the current category
    Filters filters = (Filters) baseFilters.clone();
    filters.addTerms("category", adCategory.getId());
    TotalHitCountCollector collector = new TotalHitCountCollector();
    searcher.search(query, filters, collector);
    int totalCount = collector.getTotalHits();
    AdCategoryWithCounterVO counter = new AdCategoryWithCounterVO(adCategory.getId(), adCategory.getName(), level, totalCount, parent);
    // Repeat recursively for each child
    for (AdCategory childCategory : adCategory.getChildren()) {
        AdCategoryWithCounterVO childCounter = createCounter(searcher, query, baseFilters, childCategory, counter, level + 1);
        counter.addChild(childCounter);
    }
    return counter;
}
项目:Maskana-Gestor-de-Conocimiento    文件:SimpleNaiveBayesClassifier.java   
private int countDocsWithClass() throws IOException {
  int docCount = MultiFields.getTerms(this.atomicReader, this.classFieldName).getDocCount();
  if (docCount == -1) { // in case codec doesn't support getDocCount
    TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector();
    BooleanQuery q = new BooleanQuery();
    q.add(new BooleanClause(new WildcardQuery(new Term(classFieldName, String.valueOf(WildcardQuery.WILDCARD_STRING))), BooleanClause.Occur.MUST));
    if (query != null) {
      q.add(query, BooleanClause.Occur.MUST);
    }
    indexSearcher.search(q,
        totalHitCountCollector);
    docCount = totalHitCountCollector.getTotalHits();
  }
  return docCount;
}
项目:Maskana-Gestor-de-Conocimiento    文件:SimpleNaiveBayesClassifier.java   
private int getWordFreqForClass(String word, BytesRef c) throws IOException {
  BooleanQuery booleanQuery = new BooleanQuery();
  BooleanQuery subQuery = new BooleanQuery();
  for (String textFieldName : textFieldNames) {
   subQuery.add(new BooleanClause(new TermQuery(new Term(textFieldName, word)), BooleanClause.Occur.SHOULD));
  }
  booleanQuery.add(new BooleanClause(subQuery, BooleanClause.Occur.MUST));
  booleanQuery.add(new BooleanClause(new TermQuery(new Term(classFieldName, c)), BooleanClause.Occur.MUST));
  if (query != null) {
    booleanQuery.add(query, BooleanClause.Occur.MUST);
  }
  TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector();
  indexSearcher.search(booleanQuery, totalHitCountCollector);
  return totalHitCountCollector.getTotalHits();
}
项目:elasticsearch_my    文件:NestedChildrenFilterTests.java   
public void testNestedChildrenFilter() throws Exception {
    int numParentDocs = scaledRandomIntBetween(0, 32);
    int maxChildDocsPerParent = scaledRandomIntBetween(8, 16);

    Directory dir = newDirectory();
    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
    for (int i = 0; i < numParentDocs; i++) {
        int numChildDocs = scaledRandomIntBetween(0, maxChildDocsPerParent);
        List<Document> docs = new ArrayList<>(numChildDocs + 1);
        for (int j = 0; j < numChildDocs; j++) {
            Document childDoc = new Document();
            childDoc.add(new StringField("type", "child", Field.Store.NO));
            docs.add(childDoc);
        }

        Document parenDoc = new Document();
        parenDoc.add(new StringField("type", "parent", Field.Store.NO));
        parenDoc.add(new LegacyIntField("num_child_docs", numChildDocs, Field.Store.YES));
        docs.add(parenDoc);
        writer.addDocuments(docs);
    }

    IndexReader reader = writer.getReader();
    writer.close();

    IndexSearcher searcher = new IndexSearcher(reader);
    FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext();
    BitSetProducer parentFilter = new QueryBitSetProducer(new TermQuery(new Term("type", "parent")));
    Query childFilter = new TermQuery(new Term("type", "child"));
    int checkedParents = 0;
    final Weight parentsWeight = searcher.createNormalizedWeight(new TermQuery(new Term("type", "parent")), false);
    for (LeafReaderContext leaf : reader.leaves()) {
        DocIdSetIterator parents = parentsWeight.scorer(leaf).iterator();
        for (int parentDoc = parents.nextDoc(); parentDoc != DocIdSetIterator.NO_MORE_DOCS ; parentDoc = parents.nextDoc()) {
            int expectedChildDocs = leaf.reader().document(parentDoc).getField("num_child_docs").numericValue().intValue();
            hitContext.reset(null, leaf, parentDoc, searcher);
            NestedChildrenQuery nestedChildrenFilter = new NestedChildrenQuery(parentFilter, childFilter, hitContext);
            TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector();
            searcher.search(new ConstantScoreQuery(nestedChildrenFilter), totalHitCountCollector);
            assertThat(totalHitCountCollector.getTotalHits(), equalTo(expectedChildDocs));
            checkedParents++;
        }
    }
    assertThat(checkedParents, equalTo(numParentDocs));
    reader.close();
    dir.close();
}
项目:BabelMorph    文件:BabelMorph.java   
private  List<BabelMorphWord> getBabelMorphWord(Language language, String lemma, String word, POS pos)throws IOException
{
    BooleanQuery.Builder mainQuery = new BooleanQuery.Builder();

    Term termLanguage = new Term(BabelMorphIndexField.LANGUAGE.toString(), language.name());
    TermQuery termQueryLanguage = new TermQuery(termLanguage);
    mainQuery.add(termQueryLanguage, BooleanClause.Occur.MUST);

    if( word != null)
    {
        Term termForm =new Term(BabelMorphIndexField.FORM.toString(), word);
        TermQuery termQueryForm = new TermQuery(termForm);
        mainQuery.add(termQueryForm, BooleanClause.Occur.MUST);
    }

    if( lemma != null)
    {
        Term termLemma =new Term(BabelMorphIndexField.LEMMA.toString(), lemma);
        TermQuery termQueryLemma = new TermQuery(termLemma);
        mainQuery.add(termQueryLemma, BooleanClause.Occur.MUST);
    }

    if( pos != null )
    {
        Term termPos =new Term(BabelMorphIndexField.POS.toString(), pos.name());
        TermQuery temQueryPos = new TermQuery(termPos);
        mainQuery.add(temQueryPos, BooleanClause.Occur.MUST);
    }

    List<BabelMorphWord> listBabelMorphWord = new ArrayList<BabelMorphWord>();
    BooleanQuery bq = mainQuery.build();
    TotalHitCountCollector collector = new TotalHitCountCollector();
    morphologicalDictionary.search(bq, collector);
    TopDocs topDocs = morphologicalDictionary.search(bq, Math.max(1, collector.getTotalHits()));
    ScoreDoc[] topScoreDocs = topDocs.scoreDocs;
    for(ScoreDoc sc : topScoreDocs)
    {
        Document doc = morphologicalDictionary.doc(sc.doc);

        String queryLemma = doc.get(BabelMorphIndexField.LEMMA.toString());
        String queryPos = doc.get(BabelMorphIndexField.POS.toString());
        POS p = POS.valueOf(queryPos);
        String[] queryForms = doc.getValues(BabelMorphIndexField.FORM.toString());
        String[] queryInfos = doc.getValues(BabelMorphIndexField.INFO.toString());
        SetMultimap<String, String> multimap = HashMultimap.create();
        for(int i=0;i<queryForms.length;i++)
            multimap.put(queryForms[i], queryInfos[i]); 

        BabelMorphWord bmw = new BabelMorphWord(queryLemma, language, p, multimap);
        listBabelMorphWord.add(bmw);
    }

    return listBabelMorphWord;
}
项目:semantic-knowledge-graph    文件:ScoreNormalizer.java   
private static int getTotalDocs(NodeContext context) throws IOException {
    TotalHitCountCollector collector = new TotalHitCountCollector();
    context.req.getSearcher().search(new MatchAllDocsQuery(), collector);
    return collector.getTotalHits();
}
项目:lucene-addons    文件:SimpleTargetCounter.java   
/**
 * Simple utility method to get document counts for a given query.
 * This uses TotalHitCounter.
 *
 * @param query  query
 * @param reader reader
 * @return number of docs with a hit
 * @throws java.io.IOException if there is an exception from teh searcher
 */
public int simpleDocCount(Query query, IndexReader reader) throws IOException {
  IndexSearcher searcher = new IndexSearcher(reader);
  TotalHitCountCollector collector = new TotalHitCountCollector();
  searcher.search(query, collector);
  return collector.getTotalHits();
}