Java 类org.apache.lucene.search.TopFieldCollector 实例源码

项目:search    文件:DrillSideways.java   
/**
 * Search, sorting by {@link Sort}, and computing
 * drill down and sideways counts.
 */
public DrillSidewaysResult search(DrillDownQuery query,
                                  Filter filter, FieldDoc after, int topN, Sort sort, boolean doDocScores,
                                  boolean doMaxScore) throws IOException {
  if (filter != null) {
    query = new DrillDownQuery(config, filter, query);
  }
  if (sort != null) {
    int limit = searcher.getIndexReader().maxDoc();
    if (limit == 0) {
      limit = 1; // the collector does not alow numHits = 0
    }
    topN = Math.min(topN, limit);
    final TopFieldCollector hitCollector = TopFieldCollector.create(sort,
                                                                    topN,
                                                                    after,
                                                                    true,
                                                                    doDocScores,
                                                                    doMaxScore,
                                                                    true);
    DrillSidewaysResult r = search(query, hitCollector);
    return new DrillSidewaysResult(r.facets, hitCollector.topDocs());
  } else {
    return search(after, query, topN);
  }
}
项目:search    文件:ReRankQParserPlugin.java   
public ReRankCollector(int reRankDocs,
                       int length,
                       Query reRankQuery,
                       double reRankWeight,
                       SolrIndexSearcher.QueryCommand cmd,
                       IndexSearcher searcher,
                       Map<BytesRef, Integer> boostedPriority) throws IOException {
  super(null);
  this.reRankQuery = reRankQuery;
  this.reRankDocs = reRankDocs;
  this.length = length;
  this.boostedPriority = boostedPriority;
  Sort sort = cmd.getSort();
  if(sort == null) {
    this.mainCollector = TopScoreDocCollector.create(Math.max(this.reRankDocs, length),true);
  } else {
    sort = sort.rewrite(searcher);
    this.mainCollector = TopFieldCollector.create(sort, Math.max(this.reRankDocs, length), false, true, true, true);
  }
  this.searcher = searcher;
  this.reRankWeight = reRankWeight;
}
项目:search    文件:ExpandComponent.java   
public GroupExpandCollector(SortedDocValues docValues, FixedBitSet groupBits, IntOpenHashSet collapsedSet, int limit, Sort sort) throws IOException {
  int numGroups = collapsedSet.size();
  groups = new IntObjectOpenHashMap<>(numGroups * 2);
  collectors = new ArrayList<>();
  DocIdSetIterator iterator = groupBits.iterator();
  int group;
  while ((group = iterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
    Collector collector = (sort == null) ? TopScoreDocCollector.create(limit, true) : TopFieldCollector.create(sort, limit, false, false, false, true);
    groups.put(group, collector);
    collectors.add(collector);
  }

  this.collapsedSet = collapsedSet;
  this.groupBits = groupBits;
  this.docValues = docValues;
}
项目:neo4j-lucene5-index    文件:TopDocsIterator.java   
private TopDocs toTopDocs( Query query, QueryContext context, IndexSearcher searcher ) throws IOException
{
    Sort sorting = context != null ? context.getSorting() : null;
    TopDocs topDocs;
    if ( sorting == null && context != null )
    {
        topDocs = searcher.search( query, context.getTop() );
    }
    else
    {
        if ( context == null || !context.getTradeCorrectnessForSpeed() )
        {
            TopFieldCollector collector = LuceneDataSource.scoringCollector( sorting, context.getTop() );
            searcher.search( query, collector );
            topDocs = collector.topDocs();
        }
        else
        {
            topDocs = searcher.search( query, null, context.getTop(), sorting );
        }
    }
    return topDocs;
}
项目:community-edition-old    文件:AlfrescoReRankQParserPlugin.java   
public ReRankCollector(int reRankDocs,
                       int length,
                       Query reRankQuery,
                       double reRankWeight,
                       SolrIndexSearcher.QueryCommand cmd,
                       IndexSearcher searcher,
                       Map<BytesRef, Integer> boostedPriority,
                       boolean scale) throws IOException {
    super(null);
    this.reRankQuery = reRankQuery;
    this.reRankDocs = reRankDocs;
    this.length = length;
    this.boostedPriority = boostedPriority;
    this.scale = scale;
    Sort sort = cmd.getSort();
    if(sort == null) {
        this.mainCollector = TopScoreDocCollector.create(Math.max(this.reRankDocs, length),true);
    } else {
        sort = sort.rewrite(searcher);
        this.mainCollector = TopFieldCollector.create(sort, Math.max(this.reRankDocs, length), false, true, true, true);
    }
    this.searcher = searcher;
    this.reRankWeight = reRankWeight;
}
项目:NYBC    文件:DrillSideways.java   
/**
 * Search, sorting by {@link Sort}, and computing
 * drill down and sideways counts.
 */
public DrillSidewaysResult search(DrillDownQuery query,
                                  Filter filter, FieldDoc after, int topN, Sort sort, boolean doDocScores,
                                  boolean doMaxScore, FacetSearchParams fsp) throws IOException {
  if (filter != null) {
    query = new DrillDownQuery(filter, query);
  }
  if (sort != null) {
    final TopFieldCollector hitCollector = TopFieldCollector.create(sort,
                                                                    Math.min(topN, searcher.getIndexReader().maxDoc()),
                                                                    after,
                                                                    true,
                                                                    doDocScores,
                                                                    doMaxScore,
                                                                    true);
    DrillSidewaysResult r = new DrillSideways(searcher, taxoReader).search(query, hitCollector, fsp);
    r.hits = hitCollector.topDocs();
    return r;
  } else {
    return search(after, query, topN, fsp);
  }
}
项目:read-open-source-code    文件:DrillSideways.java   
/**
 * Search, sorting by {@link Sort}, and computing
 * drill down and sideways counts.
 */
public DrillSidewaysResult search(DrillDownQuery query,
                                  Filter filter, FieldDoc after, int topN, Sort sort, boolean doDocScores,
                                  boolean doMaxScore) throws IOException {
  if (filter != null) {
    query = new DrillDownQuery(config, filter, query);
  }
  if (sort != null) {
    int limit = searcher.getIndexReader().maxDoc();
    if (limit == 0) {
      limit = 1; // the collector does not alow numHits = 0
    }
    topN = Math.min(topN, limit);
    final TopFieldCollector hitCollector = TopFieldCollector.create(sort,
                                                                    topN,
                                                                    after,
                                                                    true,
                                                                    doDocScores,
                                                                    doMaxScore,
                                                                    true);
    DrillSidewaysResult r = search(query, hitCollector);
    return new DrillSidewaysResult(r.facets, hitCollector.topDocs());
  } else {
    return search(after, query, topN);
  }
}
项目:read-open-source-code    文件:DrillSideways.java   
/**
 * Search, sorting by {@link Sort}, and computing
 * drill down and sideways counts.
 */
public DrillSidewaysResult search(DrillDownQuery query,
                                  Filter filter, FieldDoc after, int topN, Sort sort, boolean doDocScores,
                                  boolean doMaxScore) throws IOException {
  if (filter != null) {
    query = new DrillDownQuery(config, filter, query);
  }
  if (sort != null) {
    int limit = searcher.getIndexReader().maxDoc();
    if (limit == 0) {
      limit = 1; // the collector does not alow numHits = 0
    }
    topN = Math.min(topN, limit);
    final TopFieldCollector hitCollector = TopFieldCollector.create(sort,
                                                                    topN,
                                                                    after,
                                                                    true,
                                                                    doDocScores,
                                                                    doMaxScore,
                                                                    true);
    DrillSidewaysResult r = search(query, hitCollector);
    return new DrillSidewaysResult(r.facets, hitCollector.topDocs());
  } else {
    return search(after, query, topN);
  }
}
项目:read-open-source-code    文件:DrillSideways.java   
/**
 * Search, sorting by {@link Sort}, and computing
 * drill down and sideways counts.
 */
public DrillSidewaysResult search(DrillDownQuery query,
                                  Filter filter, FieldDoc after, int topN, Sort sort, boolean doDocScores,
                                  boolean doMaxScore) throws IOException {
  if (filter != null) {
    query = new DrillDownQuery(config, filter, query);
  }
  if (sort != null) {
    int limit = searcher.getIndexReader().maxDoc();
    if (limit == 0) {
      limit = 1; // the collector does not alow numHits = 0
    }
    topN = Math.min(topN, limit);
    final TopFieldCollector hitCollector = TopFieldCollector.create(sort,
                                                                    topN,
                                                                    after,
                                                                    true,
                                                                    doDocScores,
                                                                    doMaxScore,
                                                                    true);
    DrillSidewaysResult r = search(query, hitCollector);
    return new DrillSidewaysResult(r.facets, hitCollector.topDocs());
  } else {
    return search(after, query, topN);
  }
}
项目:read-open-source-code    文件:ReRankQParserPlugin.java   
public ReRankCollector(int reRankDocs,
                       int length,
                       Query reRankQuery,
                       double reRankWeight,
                       SolrIndexSearcher.QueryCommand cmd,
                       IndexSearcher searcher,
                       Map<BytesRef, Integer> boostedPriority) throws IOException {
  super(null);
  this.reRankQuery = reRankQuery;
  this.reRankDocs = reRankDocs;
  this.length = length;
  this.boostedPriority = boostedPriority;
  Sort sort = cmd.getSort();
  if(sort == null) {
    this.mainCollector = TopScoreDocCollector.create(Math.max(this.reRankDocs, length),true);
  } else {
    sort = sort.rewrite(searcher);
    this.mainCollector = TopFieldCollector.create(sort, Math.max(this.reRankDocs, length), false, true, true, true);
  }
  this.searcher = searcher;
  this.reRankWeight = reRankWeight;
}
项目:read-open-source-code    文件:ExpandComponent.java   
public GroupExpandCollector(SortedDocValues docValues, FixedBitSet groupBits, IntOpenHashSet collapsedSet, int limit, Sort sort) throws IOException {
  int numGroups = collapsedSet.size();
  groups = new IntObjectOpenHashMap<>(numGroups * 2);
  collectors = new ArrayList<>();
  DocIdSetIterator iterator = groupBits.iterator();
  int group;
  while ((group = iterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
    Collector collector = (sort == null) ? TopScoreDocCollector.create(limit, true) : TopFieldCollector.create(sort, limit, false, false, false, true);
    groups.put(group, collector);
    collectors.add(collector);
  }

  this.collapsedSet = collapsedSet;
  this.groupBits = groupBits;
  this.docValues = docValues;
}
项目:neo4j-mobile-android    文件:TopDocsIterator.java   
private TopDocs toTopDocs( Query query, QueryContext context, IndexSearcher searcher ) throws IOException
{
    Sort sorting = context != null ? context.getSorting() : null;
    TopDocs topDocs = null;
    if ( sorting == null )
    {
        topDocs = searcher.search( query, context.getTop() );
    }
    else
    {
        boolean forceScore = context == null || !context.getTradeCorrectnessForSpeed();
        if ( forceScore )
        {
            TopFieldCollector collector = LuceneDataSource.scoringCollector( sorting, context.getTop() );
            searcher.search( query, collector );
            topDocs = collector.topDocs();
        }
        else
        {
            topDocs = searcher.search( query, null, context.getTop(), sorting );
        }
    }
    return topDocs;
}
项目:Maskana-Gestor-de-Conocimiento    文件:TestEarlyTermination.java   
public void testEarlyTermination() throws IOException {
  createRandomIndexes(5);
  final int numHits = _TestUtil.nextInt(random(), 1, numDocs / 10);
  final Sort sort = new Sort(new SortField("ndv1", SortField.Type.LONG, false));
  final boolean fillFields = random().nextBoolean();
  final boolean trackDocScores = random().nextBoolean();
  final boolean trackMaxScore = random().nextBoolean();
  final boolean inOrder = random().nextBoolean();
  final TopFieldCollector collector1 = TopFieldCollector.create(sort, numHits, fillFields, trackDocScores, trackMaxScore, inOrder);
  final TopFieldCollector collector2 = TopFieldCollector.create(sort, numHits, fillFields, trackDocScores, trackMaxScore, inOrder);

  final IndexSearcher searcher = newSearcher(reader);
  final int iters = atLeast(5);
  for (int i = 0; i < iters; ++i) {
    final TermQuery query = new TermQuery(new Term("s", RandomPicks.randomFrom(random(), terms)));
    searcher.search(query, collector1);
    searcher.search(query, new EarlyTerminatingSortingCollector(collector2, sorter, numHits));
  }
  assertTrue(collector1.getTotalHits() >= collector2.getTotalHits());
  assertTopDocsEquals(collector1.topDocs().scoreDocs, collector2.topDocs().scoreDocs);
}
项目:Maskana-Gestor-de-Conocimiento    文件:DrillSideways.java   
/**
 * Search, sorting by {@link Sort}, and computing
 * drill down and sideways counts.
 */
public DrillSidewaysResult search(DrillDownQuery query,
                                  Filter filter, FieldDoc after, int topN, Sort sort, boolean doDocScores,
                                  boolean doMaxScore, FacetSearchParams fsp) throws IOException {
  if (filter != null) {
    query = new DrillDownQuery(filter, query);
  }
  if (sort != null) {
    int limit = searcher.getIndexReader().maxDoc();
    if (limit == 0) {
      limit = 1; // the collector does not alow numHits = 0
    }
    topN = Math.min(topN, limit);
    final TopFieldCollector hitCollector = TopFieldCollector.create(sort,
                                                                    topN,
                                                                    after,
                                                                    true,
                                                                    doDocScores,
                                                                    doMaxScore,
                                                                    true);
    DrillSidewaysResult r = search(query, hitCollector, fsp);
    return new DrillSidewaysResult(r.facetResults, hitCollector.topDocs());
  } else {
    return search(after, query, topN, fsp);
  }
}
项目:elasticsearch_my    文件:InnerHitsContext.java   
@Override
public TopDocs topDocs(SearchContext context, FetchSubPhase.HitContext hitContext) throws IOException {
    Query rawParentFilter;
    if (parentObjectMapper == null) {
        rawParentFilter = Queries.newNonNestedFilter();
    } else {
        rawParentFilter = parentObjectMapper.nestedTypeFilter();
    }
    BitSetProducer parentFilter = context.bitsetFilterCache().getBitSetProducer(rawParentFilter);
    Query childFilter = childObjectMapper.nestedTypeFilter();
    Query q = Queries.filtered(query(), new NestedChildrenQuery(parentFilter, childFilter, hitContext));

    if (size() == 0) {
        return new TopDocs(context.searcher().count(q), Lucene.EMPTY_SCORE_DOCS, 0);
    } else {
        int topN = Math.min(from() + size(), context.searcher().getIndexReader().maxDoc());
        TopDocsCollector topDocsCollector;
        if (sort() != null) {
            try {
                topDocsCollector = TopFieldCollector.create(sort().sort, topN, true, trackScores(), trackScores());
            } catch (IOException e) {
                throw ExceptionsHelper.convertToElastic(e);
            }
        } else {
            topDocsCollector = TopScoreDocCollector.create(topN);
        }
        try {
            context.searcher().search(q, topDocsCollector);
        } finally {
            clearReleasables(Lifetime.COLLECTION);
        }
        return topDocsCollector.topDocs(from(), size());
    }
}
项目:flea-db    文件:PaginatorImpl.java   
public int getTotalHits() {
    if (totalHits == null) {
        try {
            TopDocsCollector documentCollector = TopFieldCollector.create(
                    this.sort, 1, null, false, false, false, false);
            searcher.search(this.q, documentCollector);
            this.totalHits = documentCollector.getTotalHits();
        } catch (IOException ex) {
            throw new RuntimeException(ex);
        }
    }
    return this.totalHits;
}
项目:flea-db    文件:PaginatorImpl.java   
private TopDocs queryDocuments(int pageSize, FieldDoc memento) throws IOException {
    TopDocsCollector documentCollector = TopFieldCollector.create(
            this.sort, pageSize, memento, true, false, false, false);
    searcher.search(this.q, documentCollector);
    this.totalHits = documentCollector.getTotalHits();
    return documentCollector.topDocs(0, pageSize);
}
项目:search    文件:TestEarlyTermination.java   
public void testEarlyTermination() throws IOException {
  final int iters = atLeast(8);
  for (int i = 0; i < iters; ++i) {
    createRandomIndex();
    for (int j = 0; j < iters; ++j) {
      final IndexSearcher searcher = newSearcher(reader);
      final int numHits = TestUtil.nextInt(random(), 1, numDocs);
      final Sort sort = new Sort(new SortField("ndv1", SortField.Type.LONG, false));
      final boolean fillFields = random().nextBoolean();
      final boolean trackDocScores = random().nextBoolean();
      final boolean trackMaxScore = random().nextBoolean();
      final boolean inOrder = random().nextBoolean();
      final TopFieldCollector collector1 = TopFieldCollector.create(sort, numHits, fillFields, trackDocScores, trackMaxScore, inOrder);
      final TopFieldCollector collector2 = TopFieldCollector.create(sort, numHits, fillFields, trackDocScores, trackMaxScore, inOrder);

      final Query query;
      if (random().nextBoolean()) {
        query = new TermQuery(new Term("s", RandomPicks.randomFrom(random(), terms)));
      } else {
        query = new MatchAllDocsQuery();
      }
      searcher.search(query, collector1);
      searcher.search(query, new EarlyTerminatingSortingCollector(collector2, sort, numHits));
      assertTrue(collector1.getTotalHits() >= collector2.getTotalHits());
      assertTopDocsEquals(collector1.topDocs().scoreDocs, collector2.topDocs().scoreDocs);
    }
    closeIndex();
  }
}
项目:search    文件:TestEarlyTermination.java   
public void testEarlyTerminationDifferentSorter() throws IOException {
  createRandomIndex();
  final int iters = atLeast(3);
  for (int i = 0; i < iters; ++i) {
    final IndexSearcher searcher = newSearcher(reader);
    // test that the collector works correctly when the index was sorted by a
    // different sorter than the one specified in the ctor.
    final int numHits = TestUtil.nextInt(random(), 1, numDocs);
    final Sort sort = new Sort(new SortField("ndv2", SortField.Type.LONG, false));
    final boolean fillFields = random().nextBoolean();
    final boolean trackDocScores = random().nextBoolean();
    final boolean trackMaxScore = random().nextBoolean();
    final boolean inOrder = random().nextBoolean();
    final TopFieldCollector collector1 = TopFieldCollector.create(sort, numHits, fillFields, trackDocScores, trackMaxScore, inOrder);
    final TopFieldCollector collector2 = TopFieldCollector.create(sort, numHits, fillFields, trackDocScores, trackMaxScore, inOrder);

    final Query query;
    if (random().nextBoolean()) {
      query = new TermQuery(new Term("s", RandomPicks.randomFrom(random(), terms)));
    } else {
      query = new MatchAllDocsQuery();
    }
    searcher.search(query, collector1);
    Sort different = new Sort(new SortField("ndv2", SortField.Type.LONG));
    searcher.search(query, new EarlyTerminatingSortingCollector(collector2, different, numHits) {
      @Override
      public void setNextReader(AtomicReaderContext context) throws IOException {
        super.setNextReader(context);
        assertFalse("segment should not be recognized as sorted as different sorter was used", segmentSorted);
      }
    });
    assertTrue(collector1.getTotalHits() >= collector2.getTotalHits());
    assertTopDocsEquals(collector1.topDocs().scoreDocs, collector2.topDocs().scoreDocs);
  }
  closeIndex();
}
项目:search    文件:Grouping.java   
TopDocsCollector newCollector(Sort sort, boolean needScores) throws IOException {
  int groupDocsToCollect = getMax(groupOffset, docsPerGroup, maxDoc);
  if (sort == null || sort == Sort.RELEVANCE) {
    return TopScoreDocCollector.create(groupDocsToCollect, true);
  } else {
    return TopFieldCollector.create(searcher.weightSort(sort), groupDocsToCollect, false, needScores, needScores, true);
  }
}
项目:NYBC    文件:SolrIndexSearcher.java   
protected DocList sortDocSet(DocSet set, Sort sort, int nDocs) throws IOException {
  if (nDocs == 0) {
    // SOLR-2923
    return new DocSlice(0, 0, new int[0], null, 0, 0f);
  }

  // bit of a hack to tell if a set is sorted - do it better in the future.
  boolean inOrder = set instanceof BitDocSet || set instanceof SortedIntDocSet;

  TopDocsCollector topCollector = TopFieldCollector.create(weightSort(sort), nDocs, false, false, false, inOrder);

  DocIterator iter = set.iterator();
  int base=0;
  int end=0;
  int readerIndex = 0;

  while (iter.hasNext()) {
    int doc = iter.nextDoc();
    while (doc>=end) {
      AtomicReaderContext leaf = leafContexts.get(readerIndex++);
      base = leaf.docBase;
      end = base + leaf.reader().maxDoc();
      topCollector.setNextReader(leaf);
      // we should never need to set the scorer given the settings for the collector
    }
    topCollector.collect(doc-base);
  }

  TopDocs topDocs = topCollector.topDocs(0, nDocs);

  int nDocsReturned = topDocs.scoreDocs.length;
  int[] ids = new int[nDocsReturned];

  for (int i=0; i<nDocsReturned; i++) {
    ScoreDoc scoreDoc = topDocs.scoreDocs[i];
    ids[i] = scoreDoc.doc;
  }

  return new DocSlice(0,nDocsReturned,ids,null,topDocs.totalHits,0.0f);
}
项目:lumongo    文件:BasicStorageTest.java   
private static int runQuery(IndexReader indexReader, int count, Query q) throws IOException {
    long start = System.currentTimeMillis();
    IndexSearcher searcher = new IndexSearcher(indexReader);

    Sort sort = new Sort();

    sort.setSort(new SortedSetSortField("category", false));

    TopFieldCollector collector = TopFieldCollector.create(sort, count, null, true, true, true);

    searcher.search(q, collector);

    ScoreDoc[] hits = collector.topDocs().scoreDocs;
    int totalHits = collector.getTotalHits();
    @SuppressWarnings("unused") long searchTime = System.currentTimeMillis() - start;

    start = System.currentTimeMillis();

    List<String> ids = new ArrayList<>();
    for (ScoreDoc hit : hits) {
        int docId = hit.doc;
        Document d = searcher.doc(docId);
        ids.add(d.get("uid"));

    }
    @SuppressWarnings("unused") long fetchTime = System.currentTimeMillis() - start;

    return totalHits;
}
项目:incubator-blur    文件:BlurFieldCollector.java   
@Override
public Collector newCollector() throws IOException {
  TopFieldCollector collector = TopFieldCollector.create(_sort, _numHitsToCollect, _after, true, true, false, true);
  Collector col = new StopExecutionCollector(collector, _running);
  if (_runSlow) {
    return new SlowCollector(col);
  }
  return col;
}
项目:incubator-blur    文件:BlurFieldCollector.java   
private TopFieldCollector getTopFieldCollector(Collector collector) {
  if (collector instanceof SlowCollector) {
    SlowCollector slowCollector = (SlowCollector) collector;
    return getTopFieldCollector(slowCollector.getCollector());
  } else if (collector instanceof StopExecutionCollector) {
    StopExecutionCollector stopExecutionCollector = (StopExecutionCollector) collector;
    return getTopFieldCollector(stopExecutionCollector.getCollector());
  } else if (collector instanceof TopFieldCollector) {
    TopFieldCollector topFieldCollector = (TopFieldCollector) collector;
    return topFieldCollector;
  } else {
    throw new RuntimeException("Collector type [" + collector + "] not supported.");
  }
}
项目:search-core    文件:SolrIndexSearcher.java   
protected DocList sortDocSet(DocSet set, Sort sort, int nDocs) throws IOException {
    if(nDocs == 0) {
        // SOLR-2923
        return new DocSlice(0, 0, new int[0], null, 0, 0f);
    }

    // bit of a hack to tell if a set is sorted - do it better in the future.
    boolean inOrder = set instanceof BitDocSet || set instanceof SortedIntDocSet;

    TopDocsCollector topCollector = TopFieldCollector.create(weightSort(sort), nDocs, false, false, false, inOrder);

    DocIterator iter = set.iterator();
    int base = 0;
    int end = 0;
    int readerIndex = 0;

    while(iter.hasNext()) {
        int doc = iter.nextDoc();
        while(doc >= end) {
            AtomicReaderContext leaf = leafContexts.get(readerIndex++);
            base = leaf.docBase;
            end = base + leaf.reader().maxDoc();
            topCollector.setNextReader(leaf);
            // we should never need to set the scorer given the settings for the collector
        }
        topCollector.collect(doc - base);
    }

    TopDocs topDocs = topCollector.topDocs(0, nDocs);

    int nDocsReturned = topDocs.scoreDocs.length;
    int[] ids = new int[nDocsReturned];

    for(int i = 0; i < nDocsReturned; i++) {
        ScoreDoc scoreDoc = topDocs.scoreDocs[i];
        ids[i] = scoreDoc.doc;
    }

    return new DocSlice(0, nDocsReturned, ids, null, topDocs.totalHits, 0.0f);
}
项目:read-open-source-code    文件:Grouping.java   
TopDocsCollector newCollector(Sort sort, boolean needScores) throws IOException {
  int groupDocsToCollect = getMax(groupOffset, docsPerGroup, maxDoc);
  if (sort == null || sort == Sort.RELEVANCE) {
    return TopScoreDocCollector.create(groupDocsToCollect, true);
  } else {
    return TopFieldCollector.create(searcher.weightSort(sort), groupDocsToCollect, false, needScores, needScores, true);
  }
}
项目:read-open-source-code    文件:Grouping.java   
TopDocsCollector newCollector(Sort sort, boolean needScores) throws IOException {
  int groupDocsToCollect = getMax(groupOffset, docsPerGroup, maxDoc);
  if (sort == null || sort == Sort.RELEVANCE) {
    return TopScoreDocCollector.create(groupDocsToCollect, true);
  } else {
    return TopFieldCollector.create(searcher.weightSort(sort), groupDocsToCollect, false, needScores, needScores, true);
  }
}
项目:VillageElder    文件:Search.java   
/**
 * Returns a Collector instance for this search that can be used with an
 * IndexSearcher to return results from the index.  If one has not already
 * been created (by calling {@link Search#getCollector(int)}), one will
 * be created with a default max hit count of 100 documents.
 * @return A Collector suitable for use with an IndexSearcher.
 * @throws IOException A fatal exception occurred while interacting
 * with the index.
 */
public TopFieldCollector getCollector() throws IOException {
   if (collector == null) {
      collector =
            TopFieldCollector.create(
                  sort,
                  DEFAULT_HITS,
                  true,
                  false,
                  false,
                  false);
   }
   return collector;
}
项目:VillageElder    文件:Search.java   
/**
 * Given a maximum hit count, returns a Collector instance for this search
 * that can be used with an IndexSearcher to return results from the index.
 * @param count The maximum hit count, or number of documents, to return
 * from the search.  If this method has already been called, calling it
 * again will cause the argument to be ignored and the preexisting Collector
 * will be returned.  In that case, you might want to use the overload
 * of this method that has no argument.
 * @return A Collector suitable for use with an IndexSearcher.
 * @throws IOException A fatal exception occurred while interacting with
 * the index.
 */
public TopFieldCollector getCollector(final int count) throws IOException {
   if (collector == null) {
      collector =
            TopFieldCollector.create(
                  sort,
                  count,
                  true,
                  false,
                  false,
                  false);
   }
   return collector;
}
项目:VillageElder    文件:SearcherTest.java   
private TopFieldCollector getDummyCollector() throws Exception {
   TopFieldCollector collector =
         TopFieldCollector.create(
               Sort.RELEVANCE,
               100,
               true,
               false,
               false,
               false);

   return collector;
}
项目:VillageElder    文件:SearchResultTest.java   
/**
 * Test method for {@link com.fuerve.villageelder.actions.results.SearchResult#aggregate(com.fuerve.villageelder.actions.results.SearchResultItem)}.
 */
@Test
public final void testAggregateSearchResultItem() throws Exception {
   Directory indexDirectoryExpected = new RAMDirectory();
   Directory taxonomyDirectoryExpected = new RAMDirectory();

   buildDummyIndex(indexDirectoryExpected, taxonomyDirectoryExpected);

   IndexReader reader = DirectoryReader.open(indexDirectoryExpected);
   IndexSearcher searcher = new IndexSearcher(reader);
   TaxonomyReader taxo = new DirectoryTaxonomyReader(taxonomyDirectoryExpected);

   QueryParser parser =
         new SearchQueryParser(
               Lucene.LUCENE_VERSION,
               Lucene.DEFAULT_QUERY_FIELD,
               Lucene.getPerFieldAnalyzer()
         );

   TopFieldCollector indexCollector = getDummyCollector();
   FacetsCollector facetsCollector = getDummyFacetsCollector((DirectoryReader) reader, taxo);
   Collector collector = MultiCollector.wrap(indexCollector, facetsCollector);
   searcher.search(parser.parse("Revision:5*"), collector);
   facetsCollector.getFacetResults();

   SearchResult target = new SearchResult();
   target.aggregate(new SearchResultItem(indexCollector.topDocs(), facetsCollector.getFacetResults()));

   assertEquals(2, target.getTopDocs().totalHits);
   assertEquals(1, target.getFacetResults().size());
}
项目:VillageElder    文件:SearchResultTest.java   
private TopFieldCollector getDummyCollector() throws Exception {
   TopFieldCollector collector =
         TopFieldCollector.create(
               Sort.RELEVANCE,
               100,
               true,
               false,
               false,
               false);

   return collector;
}
项目:VillageElder    文件:SearchResultItemTest.java   
/**
 * Test method for {@link com.fuerve.villageelder.actions.results.SearchResultItem#SearchResultItem(org.apache.lucene.search.TopDocs, java.util.List)}.
 */
@Test
public final void testSearchResultItem() throws Exception {
   Directory indexDirectoryExpected = new RAMDirectory();
   Directory taxonomyDirectoryExpected = new RAMDirectory();

   buildDummyIndex(indexDirectoryExpected, taxonomyDirectoryExpected);

   IndexReader reader = DirectoryReader.open(indexDirectoryExpected);
   IndexSearcher searcher = new IndexSearcher(reader);
   TaxonomyReader taxo = new DirectoryTaxonomyReader(taxonomyDirectoryExpected);

   QueryParser parser =
         new SearchQueryParser(
               Lucene.LUCENE_VERSION,
               Lucene.DEFAULT_QUERY_FIELD,
               Lucene.getPerFieldAnalyzer()
         );

   TopFieldCollector indexCollector = getDummyCollector();
   FacetsCollector facetsCollector = getDummyFacetsCollector((DirectoryReader) reader, taxo);
   Collector collector = MultiCollector.wrap(indexCollector, facetsCollector);
   searcher.search(parser.parse("Revision:5*"), collector);
   facetsCollector.getFacetResults();
   SearchResultItem target = new SearchResultItem(indexCollector.topDocs(), facetsCollector.getFacetResults());

   assertEquals(2, target.getTopDocs().totalHits);
   assertEquals(1, target.getFacetResults().size());
}
项目:VillageElder    文件:SearchResultItemTest.java   
private TopFieldCollector getDummyCollector() throws Exception {
   TopFieldCollector collector =
         TopFieldCollector.create(
               Sort.RELEVANCE,
               100,
               true,
               false,
               false,
               false);

   return collector;
}
项目:Maskana-Gestor-de-Conocimiento    文件:TestEarlyTermination.java   
public void testEarlyTerminationDifferentSorter() throws IOException {
  // test that the collector works correctly when the index was sorted by a
  // different sorter than the one specified in the ctor.
  createRandomIndexes(5);
  final int numHits = _TestUtil.nextInt(random(), 1, numDocs / 10);
  final Sort sort = new Sort(new SortField("ndv2", SortField.Type.LONG, false));
  final boolean fillFields = random().nextBoolean();
  final boolean trackDocScores = random().nextBoolean();
  final boolean trackMaxScore = random().nextBoolean();
  final boolean inOrder = random().nextBoolean();
  final TopFieldCollector collector1 = TopFieldCollector.create(sort, numHits, fillFields, trackDocScores, trackMaxScore, inOrder);
  final TopFieldCollector collector2 = TopFieldCollector.create(sort, numHits, fillFields, trackDocScores, trackMaxScore, inOrder);

  final IndexSearcher searcher = newSearcher(reader);
  final int iters = atLeast(5);
  for (int i = 0; i < iters; ++i) {
    final TermQuery query = new TermQuery(new Term("s", RandomPicks.randomFrom(random(), terms)));
    searcher.search(query, collector1);
    searcher.search(query, new EarlyTerminatingSortingCollector(collector2, new NumericDocValuesSorter("ndv2"), numHits) {
      @Override
      public void setNextReader(AtomicReaderContext context) throws IOException {
        super.setNextReader(context);
        assertFalse("segment should not be recognized as sorted as different sorter was used", segmentSorted);
      }
    });
  }
  assertTrue(collector1.getTotalHits() >= collector2.getTotalHits());
  assertTopDocsEquals(collector1.topDocs().scoreDocs, collector2.topDocs().scoreDocs);
}
项目:neo4j-lucene5-index    文件:LuceneDataSource.java   
static TopFieldCollector scoringCollector( Sort sorting, int n ) throws IOException
{
    return TopFieldCollector.create( sorting, n, false, true, false );
}
项目:neo4j-mobile-android    文件:Hits.java   
/**
   * Tries to add new documents to hitDocs.
   * Ensures that the hit numbered <code>min</code> has been retrieved.
   */
  private final void getMoreDocs(int min) throws IOException {
    if (hitDocs.size() > min) {
      min = hitDocs.size();
    }

    int n = min * 2;    // double # retrieved
//  TopDocs topDocs = (sort == null) ? searcher.search(weight, filter, n) : searcher.search(weight, filter, n, sort);
    TopDocs topDocs = null;
    if ( sort == null )
    {
        topDocs = searcher.search( weight, filter, n );
    }
    else
    {
        if ( this.score )
        {
            TopFieldCollector collector = LuceneDataSource.scoringCollector( sort, n );
            searcher.search( weight, null, collector );
            topDocs = collector.topDocs();
        }
        else
        {
            topDocs = searcher.search( weight, filter, n, sort );
        }
    }


    length = topDocs.totalHits;
    ScoreDoc[] scoreDocs = topDocs.scoreDocs;

    float scoreNorm = 1.0f;

    if (length > 0 && topDocs.getMaxScore() > 1.0f) {
      scoreNorm = 1.0f / topDocs.getMaxScore();
    }

    int start = hitDocs.size() - nDeletedHits;

    // any new deletions?
    int nDels2 = countDeletions(searcher);
    debugCheckedForDeletions = false;
    if (nDeletions < 0 || nDels2 > nDeletions) {
      // either we cannot count deletions, or some "previously valid hits" might have been deleted, so find exact start point
      nDeletedHits = 0;
      debugCheckedForDeletions = true;
      int i2 = 0;
      for (int i1=0; i1<hitDocs.size() && i2<scoreDocs.length; i1++) {
        int id1 = ((HitDoc)hitDocs.get(i1)).id;
        int id2 = scoreDocs[i2].doc;
        if (id1 == id2) {
          i2++;
        } else {
          nDeletedHits ++;
        }
      }
      start = i2;
    }

    int end = scoreDocs.length < length ? scoreDocs.length : length;
    length += nDeletedHits;
    for (int i = start; i < end; i++) {
      hitDocs.addElement(new HitDoc(scoreDocs[i].score * scoreNorm,
                                    scoreDocs[i].doc));
    }

    nDeletions = nDels2;
  }
项目:neo4j-mobile-android    文件:LuceneDataSource.java   
static TopFieldCollector scoringCollector( Sort sorting, int n ) throws IOException
{
    return TopFieldCollector.create( sorting, n, false, true, false, true );
}
项目:meresco-lucene    文件:TopFieldSuperCollector.java   
@Override
protected TopDocSubCollector<TopFieldSuperCollector> createSubCollector() throws IOException {
    return new TopDocSubCollector<TopFieldSuperCollector>(TopFieldCollector.create(this.sort,
            this.numHits, /*fillFields*/ true, this.trackDocScores, this.trackMaxScore), this);
}
项目:VillageElder    文件:SearcherTest.java   
/**
 * Test method for {@link com.fuerve.villageelder.search.Searcher#search(org.apache.lucene.search.Collector)}.
 */
@SuppressWarnings("unused")
@Test
public final void testSearchCollector() throws Exception {
   // Gather declared fields.
   Field indexDirectoryField = Searcher.class.getDeclaredField("indexDirectory");
   Field taxonomyDirectoryField = Searcher.class.getDeclaredField("taxonomyDirectory");
   Field indexDirectoryNameField = Searcher.class.getDeclaredField("indexDirectoryName");
   Field taxonomyDirectoryNameField = Searcher.class.getDeclaredField("taxonomyDirectoryName");
   Field stringDirectoriesField = Searcher.class.getDeclaredField("stringDirectories");
   Field initializedField = Searcher.class.getDeclaredField("initialized");
   Field searchField = Searcher.class.getDeclaredField("search");
   Field indexReaderField = Searcher.class.getDeclaredField("indexReader");
   Field indexSearcherField = Searcher.class.getDeclaredField("indexSearcher");
   Field taxonomyReaderField = Searcher.class.getDeclaredField("taxonomyReader");

   indexDirectoryField.setAccessible(true);
   taxonomyDirectoryField.setAccessible(true);
   indexDirectoryNameField.setAccessible(true);
   taxonomyDirectoryNameField.setAccessible(true);
   stringDirectoriesField.setAccessible(true);
   initializedField.setAccessible(true);
   searchField.setAccessible(true);
   indexReaderField.setAccessible(true);
   indexSearcherField.setAccessible(true);
   taxonomyReaderField.setAccessible(true);

   // Setup
   Directory indexDirectoryExpected = new RAMDirectory();
   Directory taxonomyDirectoryExpected = new RAMDirectory();

   buildDummyIndex(indexDirectoryExpected, taxonomyDirectoryExpected);

   Searcher target = new Searcher(indexDirectoryExpected, taxonomyDirectoryExpected);
   target.initializeSearch();

   // Gather field values.
   Directory indexDirectoryActual = (Directory) indexDirectoryField.get(target);
   Directory taxonomyDirectoryActual = (Directory) taxonomyDirectoryField.get(target);
   String indexDirectoryNameActual = (String) indexDirectoryNameField.get(target);
   String taxonomyDirectoryNameActual = (String) taxonomyDirectoryNameField.get(target);
   boolean stringDirectoriesActual = stringDirectoriesField.getBoolean(target);
   boolean initializedActual = initializedField.getBoolean(target);
   Search searchFieldActual = (Search) searchField.get(target);
   IndexReader indexReaderActual = (IndexReader) indexReaderField.get(target);
   IndexSearcher indexSearcherActual = (IndexSearcher) indexSearcherField.get(target);
   TaxonomyReader taxonomyReaderActual = (TaxonomyReader) taxonomyReaderField.get(target);

   // Create the Collector to be passed in and execute a search to populate it.
   final TopFieldCollector collector = getDummyCollector();
   final FacetsCollector facetsCollector =
         getDummyFacetsCollector((DirectoryReader) indexReaderActual, taxonomyReaderActual);
   final Collector testCollector = MultiCollector.wrap(collector, facetsCollector);

   target.createSearch("Revision:5*");
   target.search(testCollector);

   // Test
   assertEquals(true, initializedActual);
   assertEquals(2, collector.topDocs().totalHits);
}