Java 类org.apache.lucene.search.Searcher 实例源码

项目:bisis-v4    文件:BulkIndexer.java   
public boolean addField(String recID, String prefix, String value){
   try {
       Searcher searcher = new IndexSearcher(indexPath);
       Query q=new TermQuery(new Term("ID",recID));
       Hits hits=searcher.search(q);
       if ((hits==null)||(hits.length()!=1)){
           return false;
       }   
       Document doc=hits.doc(0);
       IndexWriter iw = getIndexWriter(); 
       Field f=new Field(prefix,value, Field.Store.YES, Field.Index.UN_TOKENIZED, Field.TermVector.NO);
       doc.add(f);
       iw.updateDocument(new Term("ID", recID), doc);
    } catch (IOException ex) {
       log.fatal(ex);
       return false;
     }
     return true;
}
项目:bisis-v4    文件:Indexer.java   
public boolean addField(String recID, String prefix, String value){
  try {
   Searcher searcher = new IndexSearcher(indexPath);
   Query q=new TermQuery(new Term("ID",recID));
   Hits hits=searcher.search(q);
   if ((hits==null)||(hits.length()!=1)){
       return false;
   }   
   Document doc=hits.doc(0);
   IndexWriter iw = getIndexWriter(); 
   Field f=new Field(prefix,value, Field.Store.YES, Field.Index.UN_TOKENIZED, Field.TermVector.NO);
   doc.add(f);
   iw.updateDocument(new Term("ID", recID), doc);
   iw.close();
} catch (IOException ex) {
   log.fatal(ex);
   return false;
 }
 return true;
}
项目:bisis-v4    文件:Indexer.java   
public void deleteField(String recID, String prefix, String value){
   try {
       Searcher searcher = new IndexSearcher(indexPath);
       Query q=new TermQuery(new Term("ID",recID));
       Hits hits=searcher.search(q);
       if ((hits==null)||(hits.length()!=1)){
           log.fatal("greska pri brisanju polja. Zapis: "+recID);
           return ;   
       }   

       Document doc=hits.doc(0);
       Field [] fields=doc.getFields(prefix);
       IndexWriter iw = getIndexWriter(); 
       doc.removeFields(prefix);
       for(int i=0;i<fields.length;i++){
           if(!fields[i].stringValue().equals(value)){
               doc.add(fields[i]);
           } 
       }
       iw.updateDocument(new Term("ID", recID), doc);
       iw.close();
    } catch (IOException ex) {
       log.fatal(ex);
     }

}
项目:jeecms6    文件:LuceneContent.java   
public static List<Integer> getResultList(Searcher searcher, TopDocs docs,
        int first, int max) throws CorruptIndexException, IOException {
    List<Integer> list = new ArrayList<Integer>(max);
    ScoreDoc[] hits = docs.scoreDocs;
    if (first < 0) {
        first = 0;
    }
    if (max < 0) {
        max = 0;
    }
    int last = first + max;
    int len = hits.length;
    if (last > len) {
        last = len;
    }
    for (int i = first; i < last; i++) {
        Document d = searcher.doc(hits[i].doc);
        list.add(Integer.valueOf(d.getField(ID).stringValue()));
    }
    return list;
}
项目:jeecms6    文件:LuceneContentSvcImpl.java   
@Transactional(readOnly = true)
public Pagination searchPage(Directory dir, String queryString,String category,String workplace,
        Integer siteId, Integer channelId, Date startDate, Date endDate,
        int pageNo, int pageSize) throws CorruptIndexException,
        IOException, ParseException {
    Searcher searcher = new IndexSearcher(dir);
    try {
        Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_30);
        Query query = LuceneContent.createQuery(queryString,category,workplace, siteId,
                channelId, startDate, endDate, analyzer);
        TopDocs docs = searcher.search(query, pageNo * pageSize);
        Pagination p = LuceneContent.getResultPage(searcher, docs, pageNo,
                pageSize);
        List<?> ids = p.getList();
        List<Content> contents = new ArrayList<Content>(ids.size());
        for (Object id : ids) {
            contents.add(contentMng.findById((Integer) id));
        }
        p.setList(contents);
        return p;
    } finally {
        searcher.close();
    }
}
项目:reviki    文件:LuceneSearcher.java   
public Set<String> incomingLinks(final String page) throws IOException, PageStoreException {
  if (_dir == null) {
    return Collections.emptySet();
  }
  try {
    return doReadOperation(new ReadOperation<Set<String>>() {
      public Set<String> execute(final IndexReader reader, final Searcher searcher, final Analyzer analyzer) throws IOException, ParseException {
        final String pageEscaped = escape(URIUtil.encodeWithinPath(page));
        Set<String> results = Sets.newLinkedHashSet(Iterables.transform(query(reader, createAnalyzer(), searcher, FIELD_OUTGOING_LINKS, pageEscaped, false), SearchMatch.TO_PAGE_NAME));
        results.remove(page);
        return results;
      }
    }, false);
  }
  catch (QuerySyntaxException ex) {
    throw new NoQueryPerformedException(ex);
  }
}
项目:reviki    文件:LuceneSearcher.java   
public Set<SearchMatch> search(final String queryString, final boolean provideExtracts, final boolean singleWiki) throws IOException, QuerySyntaxException {
  if (_dir == null || queryString == null || queryString.trim().length() == 0) {
    return Collections.emptySet();
  }
  return doReadOperation(new ReadOperation<Set<SearchMatch>>() {
    public Set<SearchMatch> execute(final IndexReader reader, final Searcher searcher, final Analyzer analyzer) throws IOException, ParseException {
      LinkedHashSet<SearchMatch> results = new LinkedHashSet<SearchMatch>();
      // Prefer path, then title then content matches (match equality is on page name)
      for (String field : ALL_SEARCH_FIELDS) {
        if (field.equals(FIELD_PATH_LOWER)) {
          final Query query = new WildcardQuery(new Term(FIELD_PATH_LOWER, "*" + queryString.toLowerCase() + "*"));
          results.addAll(doQuery(reader, analyzer, searcher, field, provideExtracts, query));
        }
        else {
          results.addAll(query(reader, analyzer, searcher, field, queryString, provideExtracts));
        }
      }
      return orderResults(results);
    }
  }, !singleWiki);
}
项目:reviki    文件:LuceneSearcher.java   
private String getProperty(final String propertyName) throws IOException {
  if (_dir == null) {
    return null;
  }
  try {
    return doReadOperation(new ReadOperation<String>() {
      public String execute(final IndexReader reader, final Searcher searcher, final Analyzer analyzer) throws IOException, ParseException {
        Hits hits = searcher.search(new TermQuery(new Term(FIELD_PROPERTY_KEY, propertyName)));
        Iterator<?> iterator = hits.iterator();
        if (iterator.hasNext()) {
          return ((Hit) iterator.next()).get(FIELD_PROPERTY_VALUE);
        }
        return null;
      }
    }, false);
  }
  catch (QuerySyntaxException ex) {
    throw new NoQueryPerformedException(ex);
  }
}
项目:Openfire    文件:ChatSearchManager.java   
/**
 * Returns a Lucene Searcher that can be used to execute queries. Lucene
 * can handle index reading even while updates occur. However, in order
 * for index changes to be reflected in search results, the reader must
 * be re-opened whenever the modificationDate changes.<p>
 * <p/>
 * The location of the index is the "index" subdirectory in [jiveHome].
 *
 * @return a Searcher that can be used to execute queries.
 */
public Searcher getSearcher() throws IOException {
    synchronized (indexerAnalyzer) {
        if (searcherReader == null) {
            if (searchDirectory != null && IndexReader.indexExists(searchDirectory)) {
                searcherReader = IndexReader.open(searchDirectory);
                searcher = new IndexSearcher(searcherReader);
            }
            else {
                // Log warnings.
                if (searchDirectory == null) {
                    Log.warn("Search " +
                            "directory not set, you must rebuild the index.");
                }
                else if (!IndexReader.indexExists(searchDirectory)) {
                    Log.warn("Search " +
                            "directory " + searchDirectory + " does not appear to " +
                            "be a valid search index. You must rebuild the index.");
                }
                return null;
            }
        }
    }
    return searcher;
}
项目:g3server    文件:ChatSearchManager.java   
/**
 * Returns a Lucene Searcher that can be used to execute queries. Lucene
 * can handle index reading even while updates occur. However, in order
 * for index changes to be reflected in search results, the reader must
 * be re-opened whenever the modificationDate changes.<p>
 * <p/>
 * The location of the index is the "index" subdirectory in [jiveHome].
 *
 * @return a Searcher that can be used to execute queries.
 */
public Searcher getSearcher() throws IOException {
    synchronized (indexerAnalyzer) {
        if (searcherReader == null) {
            if (searchDirectory != null && IndexReader.indexExists(searchDirectory)) {
                searcherReader = IndexReader.open(searchDirectory);
                searcher = new IndexSearcher(searcherReader);
            }
            else {
                // Log warnings.
                if (searchDirectory == null) {
                    Log.warn("Search " +
                            "directory not set, you must rebuild the index.");
                }
                else if (!IndexReader.indexExists(searchDirectory)) {
                    Log.warn("Search " +
                            "directory " + searchDirectory + " does not appear to " +
                            "be a valid search index. You must rebuild the index.");
                }
                return null;
            }
        }
    }
    return searcher;
}
项目:dash-xtf    文件:NearSpans.java   
public NearSpans(SpanNearQuery query, IndexReader reader, Searcher searcher)
  throws IOException 
{
  this.query = query;
  this.slop = query.getSlop();
  this.inOrder = query.isInOrder();

  SpanQuery[] clauses = query.getClauses(); // initialize spans & list
  for (int i = 0; i < clauses.length; i++) {
    SpansCell cell =  // construct clause spans
                     new SpansCell(clauses[i].getSpans(reader, searcher), i);
    ordered.add(cell); // add to ordered
  }

  similarity = searcher.getSimilarity();
}
项目:dash-xtf    文件:OrNearSpans.java   
public OrNearSpans(SpanOrNearQuery query, IndexReader reader,
                   Searcher searcher)
  throws IOException 
{
  this.query = query;
  this.slop = query.getSlop();
  this.penalizeOutOfOrder = query.penalizeOutOfOrder();

  SpanQuery[] clauses = query.getClauses();
  nClauses = clauses.length;
  cells = new ArrayList(nClauses);
  for (int i = 0; i < nClauses; i++)
    cells.add(new SpansCell(clauses[i].getSpans(reader, searcher), i));

  similarity = searcher.getSimilarity();
}
项目:openfire    文件:ChatSearchManager.java   
/**
 * Returns a Lucene Searcher that can be used to execute queries. Lucene
 * can handle index reading even while updates occur. However, in order
 * for index changes to be reflected in search results, the reader must
 * be re-opened whenever the modificationDate changes.<p>
 * <p/>
 * The location of the index is the "index" subdirectory in [jiveHome].
 *
 * @return a Searcher that can be used to execute queries.
 */
public Searcher getSearcher() throws IOException {
    synchronized (indexerAnalyzer) {
        if (searcherReader == null) {
            if (searchDirectory != null && IndexReader.indexExists(searchDirectory)) {
                searcherReader = IndexReader.open(searchDirectory);
                searcher = new IndexSearcher(searcherReader);
            }
            else {
                // Log warnings.
                if (searchDirectory == null) {
                    Log.warn("Search " +
                            "directory not set, you must rebuild the index.");
                }
                else if (!IndexReader.indexExists(searchDirectory)) {
                    Log.warn("Search " +
                            "directory " + searchDirectory + " does not appear to " +
                            "be a valid search index. You must rebuild the index.");
                }
                return null;
            }
        }
    }
    return searcher;
}
项目:Lottery    文件:LuceneContent.java   
public static List<Integer> getResultList(Searcher searcher, TopDocs docs,
        int first, int max) throws CorruptIndexException, IOException {
    List<Integer> list = new ArrayList<Integer>(max);
    ScoreDoc[] hits = docs.scoreDocs;
    if (first < 0) {
        first = 0;
    }
    if (max < 0) {
        max = 0;
    }
    int last = first + max;
    int len = hits.length;
    if (last > len) {
        last = len;
    }
    for (int i = first; i < last; i++) {
        Document d = searcher.doc(hits[i].doc);
        list.add(Integer.valueOf(d.getField(ID).stringValue()));
    }
    return list;
}
项目:Lottery    文件:LuceneContentSvcImpl.java   
@Transactional(readOnly = true)
public Pagination searchPage(Directory dir, String queryString,String category,String workplace,
        Integer siteId, Integer channelId, Date startDate, Date endDate,
        int pageNo, int pageSize) throws CorruptIndexException,
        IOException, ParseException {
    Searcher searcher = new IndexSearcher(dir);
    try {
        Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_30);
        Query query = LuceneContent.createQuery(queryString,category,workplace, siteId,
                channelId, startDate, endDate, analyzer);
        TopDocs docs = searcher.search(query, pageNo * pageSize);
        Pagination p = LuceneContent.getResultPage(searcher, docs, pageNo,
                pageSize);
        List<?> ids = p.getList();
        List<Content> contents = new ArrayList<Content>(ids.size());
        for (Object id : ids) {
            contents.add(contentMng.findById((Integer) id));
        }
        p.setList(contents);
        return p;
    } finally {
        searcher.close();
    }
}
项目:openfire-bespoke    文件:ChatSearchManager.java   
/**
 * Returns a Lucene Searcher that can be used to execute queries. Lucene
 * can handle index reading even while updates occur. However, in order
 * for index changes to be reflected in search results, the reader must
 * be re-opened whenever the modificationDate changes.<p>
 * <p/>
 * The location of the index is the "index" subdirectory in [jiveHome].
 *
 * @return a Searcher that can be used to execute queries.
 */
public Searcher getSearcher() throws IOException {
    synchronized (indexerAnalyzer) {
        if (searcherReader == null) {
            if (searchDirectory != null && IndexReader.indexExists(searchDirectory)) {
                searcherReader = IndexReader.open(searchDirectory);
                searcher = new IndexSearcher(searcherReader);
            }
            else {
                // Log warnings.
                if (searchDirectory == null) {
                    Log.warn("Search " +
                            "directory not set, you must rebuild the index.");
                }
                else if (!IndexReader.indexExists(searchDirectory)) {
                    Log.warn("Search " +
                            "directory " + searchDirectory + " does not appear to " +
                            "be a valid search index. You must rebuild the index.");
                }
                return null;
            }
        }
    }
    return searcher;
}
项目:alfresco-repository    文件:LuceneResultSet.java   
/**
 * Wrap a lucene seach result with node support
 * 
 * @param hits Hits
 * @param searcher Searcher
 * @param nodeService nodeService
 * @param tenantService tenant service
 * @param searchParameters SearchParameters
 * @param config - lucene config
 */
public LuceneResultSet(Hits hits, Searcher searcher, NodeService nodeService, TenantService tenantService, SearchParameters searchParameters,
        LuceneConfig config)
{
    super();
    this.hits = hits;
    this.searcher = searcher;
    this.nodeService = nodeService;
    this.tenantService = tenantService;
    this.searchParameters = searchParameters;
    this.config = config;
    prefetch = new BitSet(hits.length());
}
项目:bisis-v4    文件:Retriever.java   
public Result selectAll(Query query, String sortPrefix){
  try {
    BooleanQuery.setMaxClauseCount(20000);//zbog heap-a
    Searcher searcher = new IndexSearcher(indexPath);
    Hits hits;
    if (sortPrefix == null || "".equals(sortPrefix))
      hits = searcher.search(query);
    else {
      int sortType = SortField.STRING;
      if ("RN_sort".equals(sortPrefix))
        sortType = SortField.INT;
      hits = searcher.search(query, new Sort(
          new SortField(sortPrefix, sortType)));
    }

    int n = hits.length();
    int[] retVal = new int[n];
    List<String> invs = new ArrayList<String>();
    Field[] tmp = null;

    for (int i = 0; i < n; i++) {
      String recordID = hits.doc(i).get("ID");
      retVal[i] = Integer.parseInt(recordID);
      tmp = hits.doc(i).getFields("IN");
      if (tmp != null){
        for (int j = 0; j<tmp.length; j++){
          invs.add(tmp[j].stringValue());
        } 
      }
    }
    searcher.close();
    Result result = new Result();
    result.setRecords(retVal);
    result.setInvs(invs);
    return result;
  } catch (Exception ex) {
    log.fatal(ex);
    return null;
  }
}
项目:bisis-v4    文件:Retriever.java   
public int[] select(Query query, Filter filter, String sortPrefix){
  try {
    BooleanQuery.setMaxClauseCount(Integer.MAX_VALUE);
    Searcher searcher = new IndexSearcher(indexPath);
    Hits hits;
    if (sortPrefix == null || "".equals(sortPrefix)){
       hits = searcher.search(query,filter);
    } else {
      int sortType = SortField.STRING;
      if ("RN_sort".equals(sortPrefix))
        sortType = SortField.INT;
      hits = searcher.search(query,filter, new Sort(
          new SortField(sortPrefix, sortType)));
 }
    int n = hits.length();
    int[] retVal = new int[n];

    for (int i = 0; i < n; i++) {
      String recordID = hits.doc(i).get("ID");
      retVal[i] = Integer.parseInt(recordID);
    }
    searcher.close();
    return retVal;
  } catch (Exception ex) {
    log.fatal(ex);
    return null;
  }
}
项目:bisis-v4    文件:Retriever.java   
public Result selectAll(Query query, Filter filter, String sortPrefix) {
  try {
    BooleanQuery.setMaxClauseCount(Integer.MAX_VALUE);
    Searcher searcher = new IndexSearcher(indexPath);
    Hits hits;
    if (sortPrefix == null || "".equals(sortPrefix)){
       hits = searcher.search(query,filter);
    } else {
      int sortType = SortField.STRING;
      if ("RN_sort".equals(sortPrefix))
        sortType = SortField.INT;
      hits = searcher.search(query,filter, new Sort(
          new SortField(sortPrefix, sortType)));
 }
    int n = hits.length();
    int[] retVal = new int[n];
    List<String> invs = new ArrayList<String>();
    Field[] tmp = null;

    for (int i = 0; i < n; i++) {
      String recordID = hits.doc(i).get("ID");
      retVal[i] = Integer.parseInt(recordID);
      tmp = hits.doc(i).getFields("IN");
      if (tmp != null){
        for (int j = 0; j<tmp.length; j++){
          invs.add(tmp[j].stringValue());
        } 
      }
    }
    searcher.close();
    Result result = new Result();
    result.setRecords(retVal);
    result.setInvs(invs);
    return result;
  } catch (Exception ex) {
    log.fatal(ex);
    return null;
  }
}
项目:bisis-v4    文件:Retriever.java   
public List<String> selectExpand(String query, String prefix,String text){
  try {
   WhitespaceAnalyzer sa= new WhitespaceAnalyzer();
   BooleanQuery.setMaxClauseCount(Integer.MAX_VALUE);
   QueryParser p = new QueryParser("contents", sa);
   Query q = p.parse(query);
   Searcher searcher = new IndexSearcher(indexPath);
   StopWatch clock=new StopWatch();
   clock.start();
   Hits hits = searcher.search(q);
   int n = hits.length();
   List <String> expandList = new ArrayList<String>();
   Field[] tmp = null;
   String pom="";
    for (int i = 0; i < n; i++) {
      tmp = hits.doc(i).getFields(prefix);
      if (tmp != null){
        for (int j = 0; j<tmp.length; j++){
          pom=tmp[j].stringValue().replace("0start0 ", "");
          pom=pom.replace(" 0end0", "");
          if(pom.startsWith(text)&&(!expandList.contains(pom))){
             expandList.add(pom);
          }
        } 
      }
    }
    clock.stop();
    searcher.close();
    return expandList;
  } catch (Exception ex) {
    log.fatal(ex);
    return null;
  }
}
项目:ephesoft    文件:SearchFiles.java   
public static Map<String, Float> generateConfidence(String indexFolder, String query, String field, int noOfPages, int maxValue)
        throws Exception {

    CustomValueSortedMap docNameScore = new CustomValueSortedMap(maxValue);
    IndexReader reader = IndexReader.open(FSDirectory.open(new File(indexFolder)), true); // only searching, so read-only=true
    Searcher searcher = new IndexSearcher(reader);
    Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_CURRENT);
    // Analyzer analyzer = new WhitespaceAnalyzer();
    QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, field, analyzer);
    if (query != null) {
        query = query.trim();
    } else {
        System.out.println("Wrong Query");
        return null;
    }

    Query searchQuery = parser.parse(query);
    ScoreDoc[] scoreDocs = doPagingSearch(searcher, searchQuery, noOfPages);
    if (scoreDocs != null && scoreDocs.length > 0) {
        for (int i = 0; i < scoreDocs.length; i++) {
            Document document = searcher.doc(scoreDocs[i].doc);
            /* Explanation exp = */searcher.explain(searchQuery, scoreDocs[i].doc);
            String docPageType = fetchDocPageType(document.get("path"));
            docNameScore.add(docPageType, Double.valueOf(scoreDocs[i].score));
        }
    }
    reader.close();
    return docNameScore.getReverseSortedMapValueInFloat();
}
项目:ephesoft    文件:SearchFiles.java   
/**
 * This demonstrates a typical paging search scenario, where the search engine presents pages of size n to the user. The user can
 * then go to the next page if interested in the next hits.
 * 
 * When the query is executed for the first time, then only enough results are collected to fill 5 result pages. If the user wants
 * to page beyond this limit, then the query is executed another time and all hits are collected.
 * 
 */
public static ScoreDoc[] doPagingSearch(Searcher searcher, Query query, int noOfPages) throws IOException {

    // Collect enough docs to show 5 pages
    TopScoreDocCollector collector = TopScoreDocCollector.create(noOfPages, true);
    searcher.search(query, collector);
    ScoreDoc[] hits = collector.topDocs().scoreDocs;
    int numTotalHits = collector.getTotalHits();
    // System.out.println("Confidence Score : : "+hits.length);
    System.out.println(numTotalHits + " total matching documents");
    return hits;
}
项目:ephesoft    文件:SearchFiles.java   
/**
 * This demonstrates a typical paging search scenario, where the search engine presents pages of size n to the user. The user can
 * then go to the next page if interested in the next hits.
 * 
 * When the query is executed for the first time, then only enough results are collected to fill 5 result pages. If the user wants
 * to page beyond this limit, then the query is executed another time and all hits are collected.
 * 
 */
public static ScoreDoc[] doPagingSearch(Searcher searcher, Query query, int noOfPages) throws IOException {

    // Collect enough docs to show 5 pages
    TopScoreDocCollector collector = TopScoreDocCollector.create(noOfPages, true);
    searcher.search(query, collector);
    ScoreDoc[] hits = collector.topDocs().scoreDocs;
    int numTotalHits = collector.getTotalHits();
    // System.out.println("Confidence Score : : "+hits.length);
    System.out.println(numTotalHits + " total matching documents");
    return hits;
}
项目:jeecms6    文件:LuceneContent.java   
public static Pagination getResultPage(Searcher searcher, TopDocs docs,
        int pageNo, int pageSize) throws CorruptIndexException, IOException {
    List<Integer> list = new ArrayList<Integer>(pageSize);
    ScoreDoc[] hits = docs.scoreDocs;
    int endIndex = pageNo * pageSize;
    int len = hits.length;
    if (endIndex > len) {
        endIndex = len;
    }
    for (int i = (pageNo - 1) * pageSize; i < endIndex; i++) {
        Document d = searcher.doc(hits[i].doc);
        list.add(Integer.valueOf(d.getField(ID).stringValue()));
    }
    return new Pagination(pageNo, pageSize, docs.totalHits, list);
}
项目:jeecms6    文件:LuceneContentSvcImpl.java   
@Transactional(readOnly = true)
public List<Content> searchList(Directory dir, String queryString,String category,String workplace,
        Integer siteId, Integer channelId, Date startDate, Date endDate,
        int first, int max) throws CorruptIndexException, IOException,
        ParseException {
    Searcher searcher = new IndexSearcher(dir);
    try {
        Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_30);
        Query query = LuceneContent.createQuery(queryString,category,workplace, siteId,
                channelId, startDate, endDate, analyzer);
        if (first < 0) {
            first = 0;
        }
        if (max < 0) {
            max = 0;
        }
        TopDocs docs = searcher.search(query, first + max);
        List<Integer> ids = LuceneContent.getResultList(searcher, docs,
                first, max);
        List<Content> contents = new ArrayList<Content>(ids.size());
        for (Object id : ids) {
            contents.add(contentMng.findById((Integer) id));
        }
        return contents;
    } finally {
        searcher.close();
    }
}
项目:Lucee4    文件:LuceneSearchCollection.java   
private static void close(Searcher searcher) throws SearchException {
    if(searcher!=null){
        try {
            searcher.close();
        } catch (IOException e) {
            throw new SearchException(e);
        }
    }
}
项目:olat    文件:SearchResultsImpl.java   
private List<ResultDocument> initResultList(final Identity identity, final Roles roles, final Query query, final Analyzer analyzer, final Searcher searcher,
        final TopDocs docs, final int firstResult, final int maxReturns, final boolean doHighlight) throws IOException {
    final FieldSelector selector = new FieldSelector() {
        @Override
        public FieldSelectorResult accept(final String fieldName) {
            return (doHighlight || !AbstractOlatDocument.CONTENT_FIELD_NAME.equals(fieldName)) ? FieldSelectorResult.LOAD : FieldSelectorResult.NO_LOAD;
        }
    };

    maxHits = SearchServiceFactory.getService().getSearchModuleConfig().getMaxHits();
    totalHits = docs.totalHits;
    totalDocs = (docs.scoreDocs == null ? 0 : docs.scoreDocs.length);
    final int numOfDocs = Math.min(maxHits, docs.totalHits);
    final List<ResultDocument> res = new ArrayList<ResultDocument>(maxReturns + 1);
    for (int i = firstResult; i < numOfDocs && res.size() < maxReturns; i++) {
        final Document doc = searcher.doc(docs.scoreDocs[i].doc, selector);
        final String reservedTo = doc.get(AbstractOlatDocument.RESERVED_TO);
        if (StringHelper.containsNonWhitespace(reservedTo) && !"public".equals(reservedTo) && !reservedTo.contains(identity.getKey().toString())) {
            continue;// admin cannot see private documents
        }

        final ResultDocument rDoc = createResultDocument(doc, i, query, analyzer, doHighlight, identity, roles);
        if (rDoc != null) {
            res.add(rDoc);
        }

        if (!roles.isOLATAdmin() && i % 10 == 0) {
            // Do commit after certain number of documents because the transaction should not be too big
            DBFactory.getInstance().intermediateCommit();
        }
    }
    return res;
}
项目:olat    文件:SearchResultsImpl.java   
private List<ResultDocument> initResultList(final Identity identity, final Roles roles, final Query query, final Analyzer analyzer, final Searcher searcher,
        final TopDocs docs, final int firstResult, final int maxReturns, final boolean doHighlight) throws IOException {
    final FieldSelector selector = new FieldSelector() {
        @Override
        public FieldSelectorResult accept(final String fieldName) {
            return (doHighlight || !AbstractOlatDocument.CONTENT_FIELD_NAME.equals(fieldName)) ? FieldSelectorResult.LOAD : FieldSelectorResult.NO_LOAD;
        }
    };

    maxHits = SearchServiceFactory.getService().getSearchModuleConfig().getMaxHits();
    totalHits = docs.totalHits;
    totalDocs = (docs.scoreDocs == null ? 0 : docs.scoreDocs.length);
    final int numOfDocs = Math.min(maxHits, docs.totalHits);
    final List<ResultDocument> res = new ArrayList<ResultDocument>(maxReturns + 1);
    for (int i = firstResult; i < numOfDocs && res.size() < maxReturns; i++) {
        final Document doc = searcher.doc(docs.scoreDocs[i].doc, selector);
        final String reservedTo = doc.get(AbstractOlatDocument.RESERVED_TO);
        if (StringHelper.containsNonWhitespace(reservedTo) && !"public".equals(reservedTo) && !reservedTo.contains(identity.getKey().toString())) {
            continue;// admin cannot see private documents
        }

        final ResultDocument rDoc = createResultDocument(doc, i, query, analyzer, doHighlight, identity, roles);
        if (rDoc != null) {
            res.add(rDoc);
        }

        if (!roles.isOLATAdmin() && i % 10 == 0) {
            // Do commit after certain number of documents because the transaction should not be too big
            DBFactory.getInstance().intermediateCommit();
        }
    }
    return res;
}
项目:community-edition-old    文件:LuceneResultSet.java   
/**
 * Wrap a lucene seach result with node support
 * 
 * @param hits Hits
 * @param searcher Searcher
 * @param nodeService nodeService
 * @param tenantService tenant service
 * @param searchParameters SearchParameters
 * @param config - lucene config
 */
public LuceneResultSet(Hits hits, Searcher searcher, NodeService nodeService, TenantService tenantService, SearchParameters searchParameters,
        LuceneConfig config)
{
    super();
    this.hits = hits;
    this.searcher = searcher;
    this.nodeService = nodeService;
    this.tenantService = tenantService;
    this.searchParameters = searchParameters;
    this.config = config;
    prefetch = new BitSet(hits.length());
}
项目:community-edition-old    文件:SolrAuthoritySetQuery.java   
public Weight createWeight(Searcher searcher) throws IOException
{
    if(!(searcher instanceof SolrIndexSearcher))
    {
        throw new IllegalStateException("Must have a SolrIndexSearcher");
    }
    return new SolrAuthoritySetQueryWeight((SolrIndexSearcher)searcher);
}
项目:community-edition-old    文件:SolrCachingAuxDocQuery.java   
public Weight createWeight(Searcher searcher) throws IOException
{
    if (!(searcher instanceof SolrIndexSearcher))
    {
        throw new IllegalStateException("Must have a SolrIndexSearcher");
    }
    return new SolrCachingAuxDocQueryWeight((SolrIndexSearcher) searcher);
}
项目:community-edition-old    文件:SolrCachingAuthorityQuery.java   
public Weight createWeight(Searcher searcher) throws IOException
{
    if(!(searcher instanceof SolrIndexSearcher))
    {
        throw new IllegalStateException("Must have a SolrIndexSearcher");
    }
    return new SolrCachingAuthorityQueryWeight((SolrIndexSearcher)searcher);
}
项目:community-edition-old    文件:SolrCachingReaderQuery.java   
public Weight createWeight(Searcher searcher) throws IOException
{
    if(!(searcher instanceof SolrIndexSearcher))
    {
        throw new IllegalStateException("Must have a SolrIndexSearcher");
    }
    return new SolrCachingAuthorityQueryWeight((SolrIndexSearcher)searcher);
}
项目:community-edition-old    文件:SolrReaderSetQuery.java   
public Weight createWeight(Searcher searcher) throws IOException
{
    if(!(searcher instanceof SolrIndexSearcher))
    {
        throw new IllegalStateException("Must have a SolrIndexSearcher");
    }
    return new SolrReaderSetQueryWeight((SolrIndexSearcher)searcher);
}
项目:community-edition-old    文件:SolrCachingOwnerQuery.java   
public Weight createWeight(Searcher searcher) throws IOException
{
    if(!(searcher instanceof SolrIndexSearcher))
    {
        throw new IllegalStateException("Must have a SolrIndexSearcher");
    }
    return new SolrCachingOwnerQueryWeight((SolrIndexSearcher)searcher);
}
项目:community-edition-old    文件:SolrOwnerSetQuery.java   
public Weight createWeight(Searcher searcher) throws IOException
{
    if(!(searcher instanceof SolrIndexSearcher))
    {
        throw new IllegalStateException("Must have a SolrIndexSearcher");
    }
    return new SolrOwnerSetQueryWeight((SolrIndexSearcher)searcher);
}
项目:community-edition-old    文件:SolrCachingPathQuery.java   
public Weight createWeight(Searcher searcher) throws IOException
{
    if(!(searcher instanceof SolrIndexSearcher))
    {
        throw new IllegalStateException("Must have a SolrIndexSearcher");
    }
    return new SolrCachingPathQueryWeight((SolrIndexSearcher)searcher);
}
项目:dash-xtf    文件:SpanRecordingScorer.java   
/**
 * Construct a recording scorer.
 *
 * @param spans set of spans to process
 * @param weight weight of this query
 * @param similarity used to calculate scores, and compare queries
 * @param maxSpans max # of spans to collect
 * @throws IOException
 */
SpanRecordingScorer(Spans spans, SpanWeight weight, Similarity similarity,
                    int maxSpans)
  throws IOException 
{
  super(spans, weight, similarity);

  this.spans = spans;
  this.maxSpans = maxSpans;

  value = weight.getValue();
  field = ((SpanQuery)weight.getQuery()).getField();

  // Register ourselves with the searcher, so it will know how to call us
  // to get the matching spans.
  //
  Searcher searcher = weight.getSearcher();
  if (searcher instanceof RecordingSearcher)
    ((RecordingSearcher)searcher).registerRecordingScorer(this);

  // Make a map of all the terms.
  Collection termColl = ((SpanQuery)weight.getQuery()).getTerms();
  terms = new HashSet(termColl.size() * 2);
  for (Iterator iter = termColl.iterator(); iter.hasNext();) 
  {
    String term = ((Term)iter.next()).text();
    terms.add(term);

    // If this is a probable bi-gram, add both the component terms to the
    // map as well.
    //
    int sepPos = term.indexOf('~');
    if (sepPos > 0) {
      terms.add(term.substring(0, sepPos));
      terms.add(term.substring(sepPos + 1));
    }
  }
}
项目:dash-xtf    文件:SpanOrNearQuery.java   
public Spans getSpans(final IndexReader reader, final Searcher searcher)
  throws IOException 
{
  if (clauses.length == 1) // optimize 1-clause case
    return clauses[0].getSpans(reader, searcher);

  return new OrNearSpans(this, reader, searcher);
}