Java 类org.apache.lucene.search.TopScoreDocCollector 实例源码

项目:searsiaserver    文件:SearchResultIndex.java   
public SearchResult search (String queryString, int hitsPerPage) throws IOException  {
    SearchResult result = new SearchResult();
    result.setQuery(queryString);
    TopScoreDocCollector collector;
    ScoreDoc[] docs;
    Query query;
    try { // new Query parser, because it is not thread safe
        query = new QueryParser("terms", new StandardAnalyzer()).parse(QueryParser.escape(queryString));
    } catch (ParseException e) {
        throw new IOException(e);
    }
    collector = TopScoreDocCollector.create(hitsPerPage, true);
    if (hitsSearcher == null) openReader(); // reopen index to see updates.
    hitsSearcher.search(query, collector);
    docs = collector.topDocs().scoreDocs;
    for(ScoreDoc doc: docs) {
        int docId = doc.doc;
        Document d = hitsSearcher.doc(docId);
        Hit hit = new Hit(d.get("result"));
        hit.put("score", doc.score);
        hit.remove("query"); // remove for privacy reasons
        result.addHit(hit);
    }
    return result;
}
项目:searsiaserver    文件:SearchResultIndex.java   
/**
 * Get Hit by Lucene id. Used for tests only
 * @param hitId
 * @return hit
 * @throws IOException
 */
protected Hit getHit(String hitId) throws IOException {
    Term term = new Term("id", hitId);
    Query query = new TermQuery(term);
    TopScoreDocCollector collector = TopScoreDocCollector.create(1, true);
    if (hitsSearcher == null) openReader();
    hitsSearcher.search(query, collector);
    if (collector.getTotalHits() > 0) {
        ScoreDoc[] docs = collector.topDocs().scoreDocs;
        Document doc = hitsSearcher.doc(docs[0].doc);
        Hit hit = new Hit(doc.get("result"));
        return hit;
    } else {
        return null;
    }
}
项目:naisc    文件:Corpus.java   
public Object2DoubleMap<String> getVector(Language lang, String value) {
    Object2DoubleMap<String> uriWeightMap = new Object2DoubleOpenHashMap<>();

    try {
        Search luceneSearch = search();
        final String field = getContentFieldName(lang);

        TopScoreDocCollector docsCollector = luceneSearch.search(value, field);

        ScoreDoc[] scoreDocs = docsCollector.topDocs().scoreDocs;

        double score = 0.0;
        for(int i=0;i<scoreDocs.length;++i) {
            int docID = scoreDocs[i].doc;
            score = scoreDocs[i].score;
            Document document = luceneSearch.getDocumentWithDocID(docID);
            String uri = document.get(fieldNameURI);
            uriWeightMap.put(uri, score);
        }
        return uriWeightMap;
    } catch(IOException x) {
        throw new RuntimeException(x);
    }
}
项目:search    文件:ReRankQParserPlugin.java   
public ReRankCollector(int reRankDocs,
                       int length,
                       Query reRankQuery,
                       double reRankWeight,
                       SolrIndexSearcher.QueryCommand cmd,
                       IndexSearcher searcher,
                       Map<BytesRef, Integer> boostedPriority) throws IOException {
  super(null);
  this.reRankQuery = reRankQuery;
  this.reRankDocs = reRankDocs;
  this.length = length;
  this.boostedPriority = boostedPriority;
  Sort sort = cmd.getSort();
  if(sort == null) {
    this.mainCollector = TopScoreDocCollector.create(Math.max(this.reRankDocs, length),true);
  } else {
    sort = sort.rewrite(searcher);
    this.mainCollector = TopFieldCollector.create(sort, Math.max(this.reRankDocs, length), false, true, true, true);
  }
  this.searcher = searcher;
  this.reRankWeight = reRankWeight;
}
项目:search    文件:ExpandComponent.java   
public GroupExpandCollector(SortedDocValues docValues, FixedBitSet groupBits, IntOpenHashSet collapsedSet, int limit, Sort sort) throws IOException {
  int numGroups = collapsedSet.size();
  groups = new IntObjectOpenHashMap<>(numGroups * 2);
  collectors = new ArrayList<>();
  DocIdSetIterator iterator = groupBits.iterator();
  int group;
  while ((group = iterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
    Collector collector = (sort == null) ? TopScoreDocCollector.create(limit, true) : TopFieldCollector.create(sort, limit, false, false, false, true);
    groups.put(group, collector);
    collectors.add(collector);
  }

  this.collapsedSet = collapsedSet;
  this.groupBits = groupBits;
  this.docValues = docValues;
}
项目:appformer    文件:LuceneSearchIndex.java   
private List<KObject> search(final Query query,
                             final int totalNumHitsEstimate,
                             final IOSearchService.Filter filter,
                             final ClusterSegment... clusterSegments) {
    final TopScoreDocCollector collector = TopScoreDocCollector.create(totalNumHitsEstimate);
    final IndexSearcher index = indexManager.getIndexSearcher(clusterSegments);
    final List<KObject> result = new ArrayList<KObject>();
    try {
        index.search(query,
                     collector);
        final ScoreDoc[] hits = collector.topDocs(0).scoreDocs;
        for (int i = 0; i < hits.length; i++) {
            final KObject kObject = toKObject(index.doc(hits[i].doc));
            if (filter.accept(kObject)) {
                result.add(kObject);
            }
        }
    } catch (final Exception ex) {
        throw new RuntimeException("Error during Query!",
                                   ex);
    } finally {
        indexManager.release(index);
    }

    return result;
}
项目:openimaj    文件:QuickSearcher.java   
/**
 * Given a search field to search,the name of the field to return results in
 * and a query string, return search results up to the limit.
 * 
 * @param searchfieldName
 * @param returnFieldName
 * @param queryStr
 * @param limit
 * @return search results (with confidences)
 * @throws ParseException
 * @throws IOException
 */
public HashMap<String[], Float> search(String searchfieldName,
        String[] returnFieldName, String queryStr, int limit)
        throws ParseException, IOException {
    if (queryStr == null || queryStr.length() == 0)
        return new HashMap<String[], Float>();
    final String clean = QueryParser.escape(queryStr);
    final Query q = new QueryParser(Version.LUCENE_40, searchfieldName,
            analyser).parse(clean);
    final TopScoreDocCollector collector = TopScoreDocCollector.create(
            limit, true);

    searcher.search(q, collector);
    final ScoreDoc[] hits = collector.topDocs().scoreDocs;
    final HashMap<String[], Float> results = new HashMap<String[], Float>();
    for (int i = 0; i < hits.length; ++i) {
        final int docId = hits[i].doc;
        final Document d = searcher.doc(docId);
        String[] rvalues = new String[returnFieldName.length];
        for(int j=0;j<rvalues.length;j++){
            rvalues[j]=d.get(returnFieldName[j]);
        }
        results.put(rvalues, hits[i].score);
    }
    return results;
}
项目:community-edition-old    文件:AlfrescoReRankQParserPlugin.java   
public ReRankCollector(int reRankDocs,
                       int length,
                       Query reRankQuery,
                       double reRankWeight,
                       SolrIndexSearcher.QueryCommand cmd,
                       IndexSearcher searcher,
                       Map<BytesRef, Integer> boostedPriority,
                       boolean scale) throws IOException {
    super(null);
    this.reRankQuery = reRankQuery;
    this.reRankDocs = reRankDocs;
    this.length = length;
    this.boostedPriority = boostedPriority;
    this.scale = scale;
    Sort sort = cmd.getSort();
    if(sort == null) {
        this.mainCollector = TopScoreDocCollector.create(Math.max(this.reRankDocs, length),true);
    } else {
        sort = sort.rewrite(searcher);
        this.mainCollector = TopFieldCollector.create(sort, Math.max(this.reRankDocs, length), false, true, true, true);
    }
    this.searcher = searcher;
    this.reRankWeight = reRankWeight;
}
项目:NYBC    文件:TestMultipleCategoryLists.java   
private FacetsCollector performSearch(FacetIndexingParams iParams, TaxonomyReader tr, IndexReader ir, 
    IndexSearcher searcher) throws IOException {
  // step 1: collect matching documents into a collector
  Query q = new MatchAllDocsQuery();
  TopScoreDocCollector topDocsCollector = TopScoreDocCollector.create(10, true);

  List<FacetRequest> facetRequests = new ArrayList<FacetRequest>();
  facetRequests.add(new CountFacetRequest(new CategoryPath("Band"), 10));
  CountFacetRequest bandDepth = new CountFacetRequest(new CategoryPath("Band"), 10);
  bandDepth.setDepth(2);
  // makes it easier to check the results in the test.
  bandDepth.setResultMode(ResultMode.GLOBAL_FLAT);
  facetRequests.add(bandDepth);
  facetRequests.add(new CountFacetRequest(new CategoryPath("Author"), 10));
  facetRequests.add(new CountFacetRequest(new CategoryPath("Band", "Rock & Pop"), 10));

  // Faceted search parameters indicate which facets are we interested in
  FacetSearchParams facetSearchParams = new FacetSearchParams(iParams, facetRequests);

  // perform documents search and facets accumulation
  FacetsCollector facetsCollector = FacetsCollector.create(facetSearchParams, ir, tr);
  searcher.search(q, MultiCollector.wrap(topDocsCollector, facetsCollector));
  return facetsCollector;
}
项目:FuzzyClassifier    文件:MovieIndex.java   
public Map<Movie, Float> getNeighbours(final Movie movie,
        final int numberOfHits) throws ParseException, IOException {
    final StandardAnalyzer analyzer = new StandardAnalyzer(
            Version.LUCENE_46);
    final QueryParser parser = new QueryParser(Version.LUCENE_46,
            FIELD_SUMMARY, analyzer);
    final Query query = parser.parse(movie.toString());
    final TopScoreDocCollector collector = TopScoreDocCollector.create(
            numberOfHits, true);

    final IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(
            writer, false));
    searcher.search(query, collector);

    final Map<Movie, Float> result = new LinkedHashMap<Movie, Float>();
    final ScoreDoc[] scoreDocs = collector.topDocs().scoreDocs;
    for (final ScoreDoc doc : scoreDocs) {
        result.put(indexedMovies.get(doc.doc), 1 - doc.score);
    }
    return result;
}
项目:read-open-source-code    文件:ReRankQParserPlugin.java   
public ReRankCollector(int reRankDocs,
                       int length,
                       Query reRankQuery,
                       double reRankWeight,
                       SolrIndexSearcher.QueryCommand cmd,
                       IndexSearcher searcher,
                       Map<BytesRef, Integer> boostedPriority) throws IOException {
  super(null);
  this.reRankQuery = reRankQuery;
  this.reRankDocs = reRankDocs;
  this.length = length;
  this.boostedPriority = boostedPriority;
  Sort sort = cmd.getSort();
  if(sort == null) {
    this.mainCollector = TopScoreDocCollector.create(Math.max(this.reRankDocs, length),true);
  } else {
    sort = sort.rewrite(searcher);
    this.mainCollector = TopFieldCollector.create(sort, Math.max(this.reRankDocs, length), false, true, true, true);
  }
  this.searcher = searcher;
  this.reRankWeight = reRankWeight;
}
项目:read-open-source-code    文件:ExpandComponent.java   
public GroupExpandCollector(SortedDocValues docValues, FixedBitSet groupBits, IntOpenHashSet collapsedSet, int limit, Sort sort) throws IOException {
  int numGroups = collapsedSet.size();
  groups = new IntObjectOpenHashMap<>(numGroups * 2);
  collectors = new ArrayList<>();
  DocIdSetIterator iterator = groupBits.iterator();
  int group;
  while ((group = iterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
    Collector collector = (sort == null) ? TopScoreDocCollector.create(limit, true) : TopFieldCollector.create(sort, limit, false, false, false, true);
    groups.put(group, collector);
    collectors.add(collector);
  }

  this.collapsedSet = collapsedSet;
  this.groupBits = groupBits;
  this.docValues = docValues;
}
项目:lucene-addons    文件:TestOverallSpanQueryParser.java   
private void compareHits(SpanQueryParser p, String s, IndexSearcher searcher, int ... docids) throws Exception{
  Query q = p.parse(s);
  TopScoreDocCollector results = TopScoreDocCollector.create(1000);
  searcher.search(q, results);
  ScoreDoc[] scoreDocs = results.topDocs().scoreDocs;
  Set<Integer> hits = new HashSet<>();

  for (int i = 0; i < scoreDocs.length; i++) {
    hits.add(scoreDocs[i].doc);
  }
  assertEquals(docids.length, hits.size());

  for (int i = 0; i < docids.length; i++) {
    assertTrue("couldn't find " + Integer.toString(docids[i]) + " among the hits", hits.contains(docids[i]));
  }
}
项目:Easy-Cassandra-samples    文件:MusicSearch.java   
private List<String> returnMusics(Query query) throws IOException {
    int hitsPerPage = 10;
    IndexReader reader = DirectoryReader.open(LuceneUtil.INSTANCE.getDirectory());
    IndexSearcher searcher = new IndexSearcher(reader);
    TopScoreDocCollector collector = TopScoreDocCollector.create(
            hitsPerPage, true);
    searcher.search(query, collector);
    ScoreDoc[] hits = collector.topDocs().scoreDocs;


       List<String> musics = new LinkedList<>();
        for(int i=0;i<hits.length;++i) {
          int docId = hits[i].doc;
          Document d = searcher.doc(docId);
          musics.add(d.get(COLUMN_NAME));
        }
    return musics;
}
项目:Easy-Cassandra-samples    文件:ResumeSearch.java   
private List<String> returnResume(Query query) throws IOException {
    int hitsPerPage = 10;
    IndexReader reader = DirectoryReader.open(LuceneUtil.INSTANCE.getDirectory());
    IndexSearcher searcher = new IndexSearcher(reader);
    TopScoreDocCollector collector = TopScoreDocCollector.create(
            hitsPerPage, true);
    searcher.search(query, collector);
    ScoreDoc[] hits = collector.topDocs().scoreDocs;


       List<String> resumeIDs = new LinkedList<>();
        for(int i=0;i<hits.length;++i) {
          int docId = hits[i].doc;
          Document d = searcher.doc(docId);
          resumeIDs.add(d.get(COLUMN_NICk_NAME));
        }
    return resumeIDs;
}
项目:cl-esa    文件:ArticlesOTDFProcessor.java   
private String searchUri(String title, Language language) {
    BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST,
            BooleanClause.Occur.MUST};  
    String uri = null;
    String[] queryStrings = new String[2];
    String[] fields = new String[2];
    queryStrings[0] = title;
    queryStrings[1] = language.getIso639_1();
    fields[0] = TitleURILucDocCreator.Fields.Title.toString();
    fields[1] = TitleURILucDocCreator.Fields.LanguageISOCode.toString();        
    TopScoreDocCollector docCollector = searcher.multiFieldTermSearch(queryStrings, fields, flags, 10);
    ScoreDoc[] scoreDocs = docCollector.topDocs().scoreDocs;
    if(scoreDocs.length>0) {
        ScoreDoc scoreDoc = scoreDocs[0];
        Document document = searcher.getDocumentWithDocID(scoreDoc.doc);
        uri = document.get(TitleURILucDocCreator.Fields.URI_EN.toString());
    }
    return uri;
}
项目:cl-esa    文件:CLESA.java   
public void makeVector() {
    TIntDoubleHashMap vecMapT = new TIntDoubleHashMap();    
    String fieldName = pair.getSecond().getIso639_1() + "TopicContent";
    TopScoreDocCollector docsCollector = searcher.search(pair.getFirst(), lucHits, fieldName, AnalyzerFactory.getAnalyzer(pair.getSecond()));           
    if(docsCollector == null) {
        vectorMapT.put(threadName, null); 
        return;             
    }   
    ScoreDoc[] scoreDocs = docsCollector.topDocs().scoreDocs;
    double score = 0.0;
    for(int i=0;i<scoreDocs.length;++i) {
        int docID = scoreDocs[i].doc;
        score = scoreDocs[i].score;
        vecMapT.put(docID, score);
    }       
    vectorMapT.put(threadName, vecMapT);
}
项目:AGDISTIS    文件:TripleIndex.java   
private List<Triple> getFromIndex(int maxNumberOfResults, BooleanQuery bq) throws IOException {
    log.debug("\t start asking index...");
    TopScoreDocCollector collector = TopScoreDocCollector.create(maxNumberOfResults, true);
    // Similarity BM25Similarity = new BM25Similarity();
    // isearcher.setSimilarity(BM25Similarity);
    isearcher.search(bq, collector);
    ScoreDoc[] hits = collector.topDocs().scoreDocs;

    List<Triple> triples = new ArrayList<Triple>();
    String s, p, o;
    for (int i = 0; i < hits.length; i++) {
        Document hitDoc = isearcher.doc(hits[i].doc);
        s = hitDoc.get(FIELD_NAME_SUBJECT);
        p = hitDoc.get(FIELD_NAME_PREDICATE);
        o = hitDoc.get(FIELD_NAME_OBJECT_URI);
        if (o == null) {
            o = hitDoc.get(FIELD_NAME_OBJECT_LITERAL);
        }
        Triple triple = new Triple(s, p, o);
        triples.add(triple);
    }
    log.debug("\t finished asking index...");
    return triples;
}
项目:FOX    文件:TripleIndex.java   
private List<Triple> getFromIndex(final int maxNumberOfResults, final BooleanQuery bq)
    throws IOException {
  log.debug("\t start asking index...");
  final TopScoreDocCollector collector = TopScoreDocCollector.create(maxNumberOfResults, true);
  // Similarity BM25Similarity = new BM25Similarity();
  // isearcher.setSimilarity(BM25Similarity);
  isearcher.search(bq, collector);
  final ScoreDoc[] hits = collector.topDocs().scoreDocs;

  final List<Triple> triples = new ArrayList<Triple>();
  String s, p, o;
  for (int i = 0; i < hits.length; i++) {
    final Document hitDoc = isearcher.doc(hits[i].doc);
    s = hitDoc.get(FIELD_NAME_SUBJECT);
    p = hitDoc.get(FIELD_NAME_PREDICATE);
    o = hitDoc.get(FIELD_NAME_OBJECT_URI);
    if (o == null) {
      o = hitDoc.get(FIELD_NAME_OBJECT_LITERAL);
    }
    final Triple triple = new Triple(s, p, o);
    triples.add(triple);
  }
  log.debug("\t finished asking index...");
  return triples;
}
项目:Maskana-Gestor-de-Conocimiento    文件:TestMultipleCategoryLists.java   
private FacetsCollector performSearch(FacetIndexingParams iParams, TaxonomyReader tr, IndexReader ir, 
    IndexSearcher searcher) throws IOException {
  // step 1: collect matching documents into a collector
  Query q = new MatchAllDocsQuery();
  TopScoreDocCollector topDocsCollector = TopScoreDocCollector.create(10, true);

  List<FacetRequest> facetRequests = new ArrayList<FacetRequest>();
  facetRequests.add(new CountFacetRequest(new CategoryPath("Band"), 10));
  CountFacetRequest bandDepth = new CountFacetRequest(new CategoryPath("Band"), 10);
  bandDepth.setDepth(2);
  // makes it easier to check the results in the test.
  bandDepth.setResultMode(ResultMode.GLOBAL_FLAT);
  facetRequests.add(bandDepth);
  facetRequests.add(new CountFacetRequest(new CategoryPath("Author"), 10));
  facetRequests.add(new CountFacetRequest(new CategoryPath("Band", "Rock & Pop"), 10));

  // Faceted search parameters indicate which facets are we interested in
  FacetSearchParams facetSearchParams = new FacetSearchParams(iParams, facetRequests);

  // perform documents search and facets accumulation
  FacetsCollector facetsCollector = FacetsCollector.create(facetSearchParams, ir, tr);
  searcher.search(q, MultiCollector.wrap(topDocsCollector, facetsCollector));
  return facetsCollector;
}
项目:anycook-api    文件:FulltextIndex.java   
public Set<String> search(String q) throws IOException {
    Set<String> recipes = new LinkedHashSet<>();
    String fields[] = new String[]{"description", "steps"};
    logger.debug(String.format("searching for %s", q));

    try (IndexReader reader = DirectoryReader.open(index)) {
        int hitsPerPage = 1000;
        IndexSearcher searcher = new IndexSearcher(reader);
        Query query = new MultiFieldQueryParser(fields, analyzer).parse(q);
        TopScoreDocCollector collector = TopScoreDocCollector.create(hitsPerPage, null);
        searcher.search(query, collector);

        ScoreDoc[] hits = collector.topDocs().scoreDocs;
        for (ScoreDoc hit : hits) {
            Document d = searcher.doc(hit.doc);
            recipes.add(d.get("title"));
        }

    } catch (CorruptIndexException | ParseException e) {
        logger.error(e);
    }

    logger.debug(String.format("found %d results", recipes.size()));
    return recipes;
}
项目:t4f-data    文件:TimeLimitingCollectorTest.java   
public void testTimeLimitingCollector() throws Exception {
  Directory dir = TestUtil.getBookIndexDirectory();
  IndexSearcher searcher = new IndexSearcher(dir);
  Query q = new MatchAllDocsQuery();
  int numAllBooks = TestUtil.hitCount(searcher, q);

  TopScoreDocCollector topDocs = TopScoreDocCollector.create(10, false);
  Collector collector = new TimeLimitingCollector(topDocs,  // #A
                                                  1000);    // #A
  try {
    searcher.search(q, collector);
    assertEquals(numAllBooks, topDocs.getTotalHits());  // #B
  } catch (TimeExceededException tee) {                 // #C
    LOGGER.info("Too much time taken.");         // #C
  }                                                     // #C
  searcher.close();
  dir.close();
}
项目:t4f-data    文件:IndexCreationQueryTest.java   
private static void query(String indexDir, Query q) throws IOException, ParseException {

        int hitsPerPage = 10;
        IndexReader reader = DirectoryReader.open(FSDirectory.open(new File(indexDir)));
        IndexSearcher indexSearcher = new IndexSearcher(reader);
        TopDocsCollector collector = TopScoreDocCollector.create(hitsPerPage, false);
        indexSearcher.search(q, collector);
        ScoreDoc[] hits = collector.topDocs().scoreDocs;

        LOGGER.info("Found " + hits.length + " hits.");
        for (int i = 0; i < hits.length; ++i) {
            int docId = hits[i].doc;
            Document d = indexSearcher.doc(docId);
            // LOGGER.info((i + 1) + ". " + d.get("title"));
        }

        // searcher can only be closed when there
        // is no need to access the documents any more.
        // indexSearcher.close();

    }
项目:t4f-data    文件:IndexCreationQueryOptimizeTest.java   
private static void query(IndexSearcher indexSearcher, Query q) throws IOException, ParseException {

        int hitsPerPage = 10;
        TopDocsCollector collector = TopScoreDocCollector.create(hitsPerPage, false);
        indexSearcher.search(q, collector);

        ScoreDoc[] hits = collector.topDocs().scoreDocs;
        LOGGER.info("Found " + hits.length + " hits.");

        for (int i = 0; i < hits.length; ++i) {
            int docId = hits[i].doc;
            Document d = indexSearcher.doc(docId);
            LOGGER.info((i + 1) + ". " + d.get("title"));
        }

    }
项目:incubator-netbeans    文件:ClassDependencyIndexCreator.java   
static void search(String className, Indexer indexer, Collection<IndexingContext> contexts, List<? super ClassUsage> results) throws IOException {
    String searchString = crc32base64(className.replace('.', '/'));
    Query refClassQuery = indexer.constructQuery(ClassDependencyIndexCreator.FLD_NB_DEPENDENCY_CLASS.getOntology(), new StringSearchExpression(searchString));
    TopScoreDocCollector collector = TopScoreDocCollector.create(NexusRepositoryIndexerImpl.MAX_RESULT_COUNT, null);
    for (IndexingContext context : contexts) {
        IndexSearcher searcher = context.acquireIndexSearcher();
        try {
    searcher.search(refClassQuery, collector);
    ScoreDoc[] hits = collector.topDocs().scoreDocs;
    LOG.log(Level.FINER, "for {0} ~ {1} found {2} hits", new Object[] {className, searchString, hits.length});
    for (ScoreDoc hit : hits) {
        int docId = hit.doc;
        Document d = searcher.doc(docId);
        String fldValue = d.get(ClassDependencyIndexCreator.NB_DEPENDENCY_CLASSES);
        LOG.log(Level.FINER, "{0} uses: {1}", new Object[] {className, fldValue});
        Set<String> refClasses = parseField(searchString, fldValue, d.get(ArtifactInfo.NAMES));
        if (!refClasses.isEmpty()) {
            ArtifactInfo ai = IndexUtils.constructArtifactInfo(d, context);
            if (ai != null) {
                ai.setRepository(context.getRepositoryId());
                List<NBVersionInfo> version = NexusRepositoryIndexerImpl.convertToNBVersionInfo(Collections.singleton(ai));
                if (!version.isEmpty()) {
                    results.add(new ClassUsage(version.get(0), refClasses));
                }
            }
        }
    }
    } finally {
        context.releaseIndexSearcher(searcher);
    }
    }
}
项目:elasticsearch_my    文件:InnerHitsContext.java   
@Override
public TopDocs topDocs(SearchContext context, FetchSubPhase.HitContext hitContext) throws IOException {
    Query rawParentFilter;
    if (parentObjectMapper == null) {
        rawParentFilter = Queries.newNonNestedFilter();
    } else {
        rawParentFilter = parentObjectMapper.nestedTypeFilter();
    }
    BitSetProducer parentFilter = context.bitsetFilterCache().getBitSetProducer(rawParentFilter);
    Query childFilter = childObjectMapper.nestedTypeFilter();
    Query q = Queries.filtered(query(), new NestedChildrenQuery(parentFilter, childFilter, hitContext));

    if (size() == 0) {
        return new TopDocs(context.searcher().count(q), Lucene.EMPTY_SCORE_DOCS, 0);
    } else {
        int topN = Math.min(from() + size(), context.searcher().getIndexReader().maxDoc());
        TopDocsCollector topDocsCollector;
        if (sort() != null) {
            try {
                topDocsCollector = TopFieldCollector.create(sort().sort, topN, true, trackScores(), trackScores());
            } catch (IOException e) {
                throw ExceptionsHelper.convertToElastic(e);
            }
        } else {
            topDocsCollector = TopScoreDocCollector.create(topN);
        }
        try {
            context.searcher().search(q, topDocsCollector);
        } finally {
            clearReleasables(Lifetime.COLLECTION);
        }
        return topDocsCollector.topDocs(from(), size());
    }
}
项目:searsiaserver    文件:SearchResultIndex.java   
/**
 * Dump the index to standard out
 * @throws IOException
 */
public void dump() throws IOException {
    TopScoreDocCollector collector;
    ScoreDoc[] docs;
    collector = TopScoreDocCollector.create(999999, true);
    if (hitsSearcher == null) openReader();
    hitsSearcher.search(new MatchAllDocsQuery(), collector);
    docs = collector.topDocs().scoreDocs;
    for(ScoreDoc doc: docs) {
        Document d = hitsSearcher.doc(doc.doc);
        System.out.println(d.get("result"));
    }

}
项目:naisc    文件:Search.java   
public TopScoreDocCollector search(String queryString, String field){       
        Query query = parseQuery(queryString, field, analyzer);
//      queryString = QueryParser.escape(queryString);      
//      QueryParser queryParser = new QueryParser(Version.LUCENE_35, field, analyzer);
//      Query query = null;
//      try {
//          query = queryParser.parse(queryString);
//      } catch (ParseException e) {
//          throw new RuntimeException(e);
//      }                       
        return search(query);
    }
项目:naisc    文件:Search.java   
private TopScoreDocCollector search(Query query) {
    TopScoreDocCollector collector = TopScoreDocCollector.create(hits, true);
    try {
        searcher.search(query, collector);
    } catch (IOException e) {
        throw new RuntimeException(e);
    }       
    return collector;               
}
项目:naisc    文件:SearchTest.java   
/**
 * Test of search method, of class Search.
 */
@Test
public void testSearch() throws IOException {
    System.out.println("search");
    Map<String, String> map = new HashMap<>();
    map.put("foo", "bar");
    try(Index index = new Index("src/test/resources/tmp", Corpus.analyzer())) {
        index.addDoc(map);
    }
    String queryString = "test";
    try(Search instance = new Search("src/test/resources/tmp", true, Corpus.analyzer(), 100)) {
        TopScoreDocCollector result = instance.search(queryString, "foo");
        assertEquals(0, result.getTotalHits());
    }
}
项目:word-root-finder    文件:Indexer.java   
/**
 * Searcher.
 *
 * @throws IOException
 *             Signals that an I/O exception has occurred.
 * @throws QueryNodeException
 *             the query node exception
 * @throws ParseException
 *             the parse exception
 */
private static void searcher() throws IOException, QueryNodeException,
        ParseException {
    Path indexDirectoryPath = new File(INDEX_PATH)
            .toPath();
    FSDirectory indexDirectory = new SimpleFSDirectory(indexDirectoryPath);
    DirectoryReader ireader = DirectoryReader.open(indexDirectory);
    IndexSearcher isearcher = new IndexSearcher(ireader);
    QueryParser parser = new QueryParser("title", new StandardAnalyzer());
    Query query = parser.parse("\"Lucene in Action\"");

    TopScoreDocCollector collector = TopScoreDocCollector.create(10);
    isearcher.search(query, new PositiveScoresOnlyCollector(collector));
    TopDocs topDocs = collector.topDocs();
    Set<String> fields = new HashSet<String>();
    fields.add("title");
    fields.add("isbn");
    for (ScoreDoc result : topDocs.scoreDocs) {
        Document doc = isearcher.doc(result.doc, fields);

        if (LOGGER.isInfoEnabled()) {

            LOGGER.info("--- Title :  "
                    + doc.getField("title").stringValue() + " ---");
            LOGGER.info("--- ISBN : " + doc.getField("isbn").stringValue()
                    + " ---");
            LOGGER.info(isearcher.explain(query, result.doc));
        }

    }

}
项目:Camel    文件:LuceneSearcher.java   
private int doSearch(String searchPhrase, int maxNumberOfHits, Version luceneVersion) throws NullPointerException, ParseException, IOException {
    LOG.trace("*** Search Phrase: {} ***", searchPhrase);

    QueryParser parser = new QueryParser("contents", analyzer);
    Query query = parser.parse(searchPhrase);
    TopScoreDocCollector collector = TopScoreDocCollector.create(maxNumberOfHits);
    indexSearcher.search(query, collector);
    hits = collector.topDocs().scoreDocs;

    LOG.trace("*** Search generated {} hits ***", hits.length);
    return hits.length;
}
项目:aerolush    文件:FullTextSearchTest.java   
private void find(String search) throws IOException {

        StandardAnalyzer analyzer = new StandardAnalyzer();
        Query query = null;
        try {
            query = new QueryParser("text", analyzer).parse(search);
        } catch (org.apache.lucene.queryparser.classic.ParseException e) {
            e.printStackTrace();
        }

        int hitsPerPage = 100;

        Directory index = new AeroDirectory(getSfy());
        IndexReader reader = DirectoryReader.open(index);
        IndexSearcher searcher = new IndexSearcher(reader);

        TopScoreDocCollector collector = TopScoreDocCollector.create(hitsPerPage);
        searcher.search(query, collector);
        ScoreDoc[] hits = collector.topDocs().scoreDocs;

        // 4. display results
        System.out.println("*************************");
        System.out.println("FOUND: " + hits.length);
        System.out.println("*************************");

        for (int i = 0; i < hits.length; ++i) {

            int docId = hits[i].doc;
            Document d = searcher.doc(docId);
            System.out.println((i + 1) + ". " + d.get("line") + "\t" + d.get("text"));
        }
        System.out.println("*************************");

        // reader can only be closed when there
        // is no need to access the documents any more.
        reader.close();
    }
项目:ephesoft    文件:SearchFiles.java   
/**
 * This demonstrates a typical paging search scenario, where the search engine presents pages of size n to the user. The user can
 * then go to the next page if interested in the next hits.
 * 
 * When the query is executed for the first time, then only enough results are collected to fill 5 result pages. If the user wants
 * to page beyond this limit, then the query is executed another time and all hits are collected.
 * 
 */
public static ScoreDoc[] doPagingSearch(Searcher searcher, Query query, int noOfPages) throws IOException {

    // Collect enough docs to show 5 pages
    TopScoreDocCollector collector = TopScoreDocCollector.create(noOfPages, true);
    searcher.search(query, collector);
    ScoreDoc[] hits = collector.topDocs().scoreDocs;
    int numTotalHits = collector.getTotalHits();
    // System.out.println("Confidence Score : : "+hits.length);
    System.out.println(numTotalHits + " total matching documents");
    return hits;
}
项目:ephesoft    文件:SearchFiles.java   
/**
 * This demonstrates a typical paging search scenario, where the search engine presents pages of size n to the user. The user can
 * then go to the next page if interested in the next hits.
 * 
 * When the query is executed for the first time, then only enough results are collected to fill 5 result pages. If the user wants
 * to page beyond this limit, then the query is executed another time and all hits are collected.
 * 
 */
public static ScoreDoc[] doPagingSearch(Searcher searcher, Query query, int noOfPages) throws IOException {

    // Collect enough docs to show 5 pages
    TopScoreDocCollector collector = TopScoreDocCollector.create(noOfPages, true);
    searcher.search(query, collector);
    ScoreDoc[] hits = collector.topDocs().scoreDocs;
    int numTotalHits = collector.getTotalHits();
    // System.out.println("Confidence Score : : "+hits.length);
    System.out.println(numTotalHits + " total matching documents");
    return hits;
}
项目:Review-It    文件:SearchEngine.java   
public List<ParsedComment> searchComments(String userQuery) {
    if (searcher == null) {
        initializeSearcher();
    }
    Query query = parseUserQuery(userQuery);
    TopScoreDocCollector docCollector = TopScoreDocCollector.create(
            Configuration.getInstance().getResultSize(), true);
    try {
        searcher.search(query, docCollector);
    } catch (IOException e) {
        e.printStackTrace();
    }

    return  parseScoreDocsToList(docCollector.topDocs().scoreDocs);
}
项目:search    文件:SearchWithCollectorTask.java   
@Override
protected Collector createCollector() throws Exception {
  Collector collector = null;
  if (clnName.equalsIgnoreCase("topScoreDocOrdered") == true) {
    collector = TopScoreDocCollector.create(numHits(), true);
  } else if (clnName.equalsIgnoreCase("topScoreDocUnOrdered") == true) {
    collector = TopScoreDocCollector.create(numHits(), false);
  } else if (clnName.length() > 0){
    collector = Class.forName(clnName).asSubclass(Collector.class).newInstance();

  } else {
    collector = super.createCollector();
  }
  return collector;
}
项目:search    文件:DrillSideways.java   
/**
 * Search, sorting by score, and computing
 * drill down and sideways counts.
 */
public DrillSidewaysResult search(ScoreDoc after,
                                  DrillDownQuery query, int topN) throws IOException {
  int limit = searcher.getIndexReader().maxDoc();
  if (limit == 0) {
    limit = 1; // the collector does not alow numHits = 0
  }
  topN = Math.min(topN, limit);
  TopScoreDocCollector hitCollector = TopScoreDocCollector.create(topN, after, true);
  DrillSidewaysResult r = search(query, hitCollector);
  return new DrillSidewaysResult(r.facets, hitCollector.topDocs());
}
项目:search    文件:Grouping.java   
TopDocsCollector newCollector(Sort sort, boolean needScores) throws IOException {
  int groupDocsToCollect = getMax(groupOffset, docsPerGroup, maxDoc);
  if (sort == null || sort == Sort.RELEVANCE) {
    return TopScoreDocCollector.create(groupDocsToCollect, true);
  } else {
    return TopFieldCollector.create(searcher.weightSort(sort), groupDocsToCollect, false, needScores, needScores, true);
  }
}
项目:biospectra    文件:Classifier.java   
public ClassificationResult classify(String header, String sequence) throws Exception {
    if(sequence == null || sequence.isEmpty()) {
        throw new IllegalArgumentException("sequence is null or empty");
    }

    ClassificationResult classificationResult = null;

    BooleanQuery q = createQuery(this.queryAnalyzer, IndexConstants.FIELD_SEQUENCE, sequence, this.minShouldMatch, this.queryGenerationAlgorithm);

    int hitsPerPage = 10;
    TopScoreDocCollector collector = TopScoreDocCollector.create(hitsPerPage);
    this.indexSearcher.search(q, collector);
    TopDocs topdocs = collector.topDocs();
    ScoreDoc[] hits = topdocs.scoreDocs;

    if(hits.length > 0) {
        List<SearchResultEntry> resultArr = new ArrayList<SearchResultEntry>();
        double topscore = topdocs.getMaxScore();
        for(int i=0;i<hits.length;++i) {
            if(topscore - hits[i].score == 0) {
                int docId = hits[i].doc;
                Document d = this.indexSearcher.doc(docId);
                SearchResultEntry result = new SearchResultEntry(docId, d, i, hits[i].score);
                resultArr.add(result);
            }
        }

        classificationResult = makeClassificationResult(header, sequence, resultArr);
    } else {
        classificationResult = makeClassificationResult(header, sequence, null);
    }

    return classificationResult;
}