Java 类org.apache.lucene.search.Hits 实例源码

项目:sjk    文件:SearchServiceImpl.java   
/**
 * 查询索引
 * 
 * @param keywords
 * @return
 * @throws Exception
 */
public List<Document> searchIndex(Integer typeId, String keywords) throws Exception {
    // 1.init searcher
    Analyzer analyzer = new PaodingAnalyzer();
    IndexReader reader = IndexReader.open(typeId == appConfig.getGameTypeId() ? appConfig.getGameIndexDir()
            : appConfig.getSoftIndexDir());
    BooleanClause.Occur[] flags = new BooleanClause.Occur[] { BooleanClause.Occur.SHOULD,
            BooleanClause.Occur.SHOULD };
    Query query = MultiFieldQueryParser.parse(keywords, appConfig.getQueryFields(), flags, analyzer);
    query = query.rewrite(reader);

    // 2.search
    List<Document> docs = new ArrayList<Document>();
    Hits hits = (typeId == appConfig.getGameTypeId() ? gameSearcher.search(query, Sort.RELEVANCE) : softSearcher
            .search(query, Sort.RELEVANCE));// searcher.search(query,
                                            // Sort.RELEVANCE);
    for (int i = 0; i < hits.length(); i++) {
        docs.add(hits.doc(i));
    }

    // 3.return
    reader.close();
    return docs;
}
项目:bisis-v4    文件:BulkIndexer.java   
public boolean addField(String recID, String prefix, String value){
   try {
       Searcher searcher = new IndexSearcher(indexPath);
       Query q=new TermQuery(new Term("ID",recID));
       Hits hits=searcher.search(q);
       if ((hits==null)||(hits.length()!=1)){
           return false;
       }   
       Document doc=hits.doc(0);
       IndexWriter iw = getIndexWriter(); 
       Field f=new Field(prefix,value, Field.Store.YES, Field.Index.UN_TOKENIZED, Field.TermVector.NO);
       doc.add(f);
       iw.updateDocument(new Term("ID", recID), doc);
    } catch (IOException ex) {
       log.fatal(ex);
       return false;
     }
     return true;
}
项目:bisis-v4    文件:Indexer.java   
public boolean addField(String recID, String prefix, String value){
  try {
   Searcher searcher = new IndexSearcher(indexPath);
   Query q=new TermQuery(new Term("ID",recID));
   Hits hits=searcher.search(q);
   if ((hits==null)||(hits.length()!=1)){
       return false;
   }   
   Document doc=hits.doc(0);
   IndexWriter iw = getIndexWriter(); 
   Field f=new Field(prefix,value, Field.Store.YES, Field.Index.UN_TOKENIZED, Field.TermVector.NO);
   doc.add(f);
   iw.updateDocument(new Term("ID", recID), doc);
   iw.close();
} catch (IOException ex) {
   log.fatal(ex);
   return false;
 }
 return true;
}
项目:bisis-v4    文件:Indexer.java   
public void deleteField(String recID, String prefix, String value){
   try {
       Searcher searcher = new IndexSearcher(indexPath);
       Query q=new TermQuery(new Term("ID",recID));
       Hits hits=searcher.search(q);
       if ((hits==null)||(hits.length()!=1)){
           log.fatal("greska pri brisanju polja. Zapis: "+recID);
           return ;   
       }   

       Document doc=hits.doc(0);
       Field [] fields=doc.getFields(prefix);
       IndexWriter iw = getIndexWriter(); 
       doc.removeFields(prefix);
       for(int i=0;i<fields.length;i++){
           if(!fields[i].stringValue().equals(value)){
               doc.add(fields[i]);
           } 
       }
       iw.updateDocument(new Term("ID", recID), doc);
       iw.close();
    } catch (IOException ex) {
       log.fatal(ex);
     }

}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestMixedDirectory.java   
private void verify(Directory dir, int expectedHits) throws IOException {
  IndexSearcher searcher = new IndexSearcher(dir);
  Hits hits = searcher.search(new TermQuery(new Term("content", "apache")));
  int numHits = hits.length();

  assertEquals(expectedHits, numHits);

  int[] docs = new int[numHits];
  for (int i = 0; i < numHits; i++) {
    Document hit = hits.doc(i);
    docs[Integer.parseInt(hit.get("id"))]++;
  }
  for (int i = 0; i < numHits; i++) {
    assertEquals(1, docs[i]);
  }

  searcher.close();
}
项目:hadoop-EAR    文件:TestMixedDirectory.java   
private void verify(Directory dir, int expectedHits) throws IOException {
  IndexSearcher searcher = new IndexSearcher(dir);
  Hits hits = searcher.search(new TermQuery(new Term("content", "apache")));
  int numHits = hits.length();

  assertEquals(expectedHits, numHits);

  int[] docs = new int[numHits];
  for (int i = 0; i < numHits; i++) {
    Document hit = hits.doc(i);
    docs[Integer.parseInt(hit.get("id"))]++;
  }
  for (int i = 0; i < numHits; i++) {
    assertEquals(1, docs[i]);
  }

  searcher.close();
}
项目:reviki    文件:LuceneSearcher.java   
private String getProperty(final String propertyName) throws IOException {
  if (_dir == null) {
    return null;
  }
  try {
    return doReadOperation(new ReadOperation<String>() {
      public String execute(final IndexReader reader, final Searcher searcher, final Analyzer analyzer) throws IOException, ParseException {
        Hits hits = searcher.search(new TermQuery(new Term(FIELD_PROPERTY_KEY, propertyName)));
        Iterator<?> iterator = hits.iterator();
        if (iterator.hasNext()) {
          return ((Hit) iterator.next()).get(FIELD_PROPERTY_VALUE);
        }
        return null;
      }
    }, false);
  }
  catch (QuerySyntaxException ex) {
    throw new NoQueryPerformedException(ex);
  }
}
项目:hadoop-on-lustre    文件:TestMixedDirectory.java   
private void verify(Directory dir, int expectedHits) throws IOException {
  IndexSearcher searcher = new IndexSearcher(dir);
  Hits hits = searcher.search(new TermQuery(new Term("content", "apache")));
  int numHits = hits.length();

  assertEquals(expectedHits, numHits);

  int[] docs = new int[numHits];
  for (int i = 0; i < numHits; i++) {
    Document hit = hits.doc(i);
    docs[Integer.parseInt(hit.get("id"))]++;
  }
  for (int i = 0; i < numHits; i++) {
    assertEquals(1, docs[i]);
  }

  searcher.close();
}
项目:dash-xtf    文件:XMLTextProcessor.java   
/** Checks if a given document exists in the index.<br><br>
 *
 *  @param key        The key associated with the document in the index.
 *
 *  @return           true if a document was found, false if not.
 *
 */
public boolean docExists(String key)
  throws ParserConfigurationException, SAXException, IOException 
{
  // We need to find the docInfo chunk that contains the specified
  // file. So construct a boolean query looking for a chunk with 
  // a "docInfo" field AND a "key" field containing the specified
  // source file key.
  //
  BooleanQuery query = new BooleanQuery();
  Term docInfo = new Term("docInfo", "1");
  Term srcPath = new Term("key", key);
  query.add(new TermQuery(docInfo), BooleanClause.Occur.MUST);
  query.add(new TermQuery(srcPath), BooleanClause.Occur.MUST);

  // Use the query to see if the document is in the index..
  Hits match = indexSearcher.search(query);

  // Let the caller know if we found a match.
  return match.length() > 0;
}
项目:spacewalk    文件:NGramQueryParserTest.java   
public Hits performSearch(Directory dir, String query, boolean useMust)
    throws Exception {

    NGramQueryParser parser = new NGramQueryParser("name",
            new NGramAnalyzer(min_ngram, max_ngram), useMust);
    IndexSearcher searcher = new IndexSearcher(dir);
    Query q = parser.parse(query);
    Hits hits = searcher.search(q);
    log.info("Original Query = " + query);
    log.info("Parsed Query = " + q.toString());
    log.info("Hits.length() = " + hits.length());
    for (int i=0; i < hits.length(); i++) {
        log.debug("Document<"+hits.id(i)+"> = " + hits.doc(i));
        //Explanation explain = searcher.explain(q, hits.id(i));
        //log.debug("explain = " + explain.toString());
    }
    return hits;
}
项目:spacewalk    文件:NGramTestSetup.java   
protected int thresholdHits(Hits hits) throws IOException {
    /** We could consider doing thresholding as a relative thing...
     * instead of checking against an absolute value, we grab top score
     * then filter based on difference from that...
     */
    int counter = 0;
    for (int i=0; i < hits.length(); i++) {
        if (hits.score(i) >= score_threshold) {
            counter++;
        }
        else {
            break;
        }
    }
    return counter;
}
项目:spacewalk    文件:IndexManager.java   
private void debugExplainResults(String indexName, Hits hits, IndexSearcher searcher,
        Query q, Set<Term> queryTerms)
    throws IOException {
    log.debug("Parsed Query is " + q.toString());
    log.debug("Looking at index:  " + indexName);
    for (int i = 0; i < hits.length(); i++) {
        if ((i < 10)) {
            Document doc = hits.doc(i);
            Float score = hits.score(i);
            Explanation ex = searcher.explain(q, hits.id(i));
            log.debug("Looking at hit<" + i + ", " + hits.id(i) + ", " + score +
                    ">: " + doc);
            log.debug("Explanation: " + ex);
            MatchingField match = new MatchingField(q.toString(), doc, queryTerms);
            String fieldName = match.getFieldName();
            String fieldValue = match.getFieldValue();
            log.debug("Guessing that matched fieldName is " + fieldName + " = " +
                    fieldValue);
        }
    }
}
项目:snipsnap    文件:SnipSearchServlet.java   
public void doPost(HttpServletRequest request, HttpServletResponse response)
    throws IOException, ServletException {

  String query = request.getParameter("query");
  if (query != null && query.length() > 0) {
    HttpSession session = request.getSession();
    SearchService searchService = (SearchService) snipsnap.api.container.Components.getComponent(SearchService.class);
    Hits hits = searchService.search(query);
    session.setAttribute("query", query);
    session.setAttribute("hits", hits);
    session.setAttribute("startIndex", new Integer(0));
    RequestDispatcher dispatcher = request.getRequestDispatcher("/exec/search.jsp");
    dispatcher.forward(request, response);
    return;
  }

  Configuration config = Application.get().getConfiguration();
  response.sendRedirect(config.getUrl());
}
项目:RDFS    文件:TestMixedDirectory.java   
private void verify(Directory dir, int expectedHits) throws IOException {
  IndexSearcher searcher = new IndexSearcher(dir);
  Hits hits = searcher.search(new TermQuery(new Term("content", "apache")));
  int numHits = hits.length();

  assertEquals(expectedHits, numHits);

  int[] docs = new int[numHits];
  for (int i = 0; i < numHits; i++) {
    Document hit = hits.doc(i);
    docs[Integer.parseInt(hit.get("id"))]++;
  }
  for (int i = 0; i < numHits; i++) {
    assertEquals(1, docs[i]);
  }

  searcher.close();
}
项目:hadoop-0.20    文件:TestMixedDirectory.java   
private void verify(Directory dir, int expectedHits) throws IOException {
  IndexSearcher searcher = new IndexSearcher(dir);
  Hits hits = searcher.search(new TermQuery(new Term("content", "apache")));
  int numHits = hits.length();

  assertEquals(expectedHits, numHits);

  int[] docs = new int[numHits];
  for (int i = 0; i < numHits; i++) {
    Document hit = hits.doc(i);
    docs[Integer.parseInt(hit.get("id"))]++;
  }
  for (int i = 0; i < numHits; i++) {
    assertEquals(1, docs[i]);
  }

  searcher.close();
}
项目:Deskera-HRMS    文件:SearchBean.java   
public Hits skynetsearch(String query, String Field, String indexPath) {
    String indexfield = Field + ":";
    String querytext = indexfield + query.trim();
    Hits result = null;

    try {

        String[] search_fields = {Field};
        //String indexPath = StorageHandler.GetDocIndexPath();
        IndexSearcher searcher = new IndexSearcher(indexPath);
        KeywordAnalyzer analyzer = new KeywordAnalyzer();
        Query lucenequery = MultiFieldQueryParser.parse(query,
                search_fields, analyzer);
        // QueryParser queryparse = new QueryParser(query,analyzer);
        // Query lucenequery = queryparse.parse(querytext);
        result = searcher.search(lucenequery);

    } catch (IOException e) {
        e.printStackTrace();
    } catch (Exception ex) {
        System.out.println(ex + "");
    }

    return result;
}
项目:Deskera-HRMS    文件:SearchBean.java   
public Hits skynetsearch(String query, String Field) {
    String indexfield = Field + ":";
    String querytext = indexfield + query.trim();
    Hits result = null;

    try {

        String[] search_fields = {Field};
        String indexPath = StorageHandler.GetDocIndexPath();
        IndexSearcher searcher = new IndexSearcher(indexPath);
        KeywordAnalyzer analyzer = new KeywordAnalyzer();
        Query lucenequery = MultiFieldQueryParser.parse(query,
                search_fields, analyzer);
        // QueryParser queryparse = new QueryParser(query,analyzer);
        // Query lucenequery = queryparse.parse(querytext);
        result = searcher.search(lucenequery);

    } catch (IOException e) {
        e.printStackTrace();
    } catch (Exception ex) {
        System.out.println(ex + "");
    }

    return result;
}
项目:Deskera-HRMS    文件:SearchBean.java   
public Hits skynetsearchMulti(String query, String[] Field, String indexPath) {
    Hits result = null;
    try {
        IndexSearcher searcher = new IndexSearcher(indexPath);
        KeywordAnalyzer analyzer = new KeywordAnalyzer();
        MultiFieldQueryParser multiparser = new MultiFieldQueryParser(Field, analyzer);
        multiparser.setDefaultOperator(QueryParser.Operator.OR);
        Query lucenequery = multiparser.parse(query);
        result = searcher.search(lucenequery);
    } catch (IOException e) {
        e.printStackTrace();
    } catch (Exception ex) {
        System.out.println(ex + "");
    }

    return result;
}
项目:Deskera-HRMS    文件:KrawlerIndexCreator.java   
public void updateDocument(String documentId, String fieldName,
        String fieldValue) {
    try {
        SearchBean sbean = new SearchBean();
        Hits hresult = sbean.skynetsearch(documentId, "DocumentId");
        Document doc = hresult.doc(0);
        ArrayList<DocumentFields> docfields = new ArrayList<DocumentFields>();
        for (int i = 0; i < fields.length; i++) {
            DocumentFields docFields = new DocumentFields();
            docFields.SetFieldName(fields[i]);
            if (fields[i].equalsIgnoreCase(fieldName)) {
                docFields.SetFieldValue(fieldValue);
            } else {
                docFields.SetFieldValue(doc.get(fields[i]));
            }
            docfields.add(docFields);
        }
        DeleteIndex(documentId);
        CreateIndex(docfields);

    } catch (Exception ex) {
        System.out.print(ex.toString());
    }

}
项目:Deskera-HRMS    文件:SearchBean.java   
public Hits skynetsearch(String query, String Field, String indexPath) {
    String indexfield = Field + ":";
    String querytext = indexfield + query.trim();
    Hits result = null;

    try {

        String[] search_fields = {Field};
        //String indexPath = StorageHandler.GetDocIndexPath();
        IndexSearcher searcher = new IndexSearcher(indexPath);
        KeywordAnalyzer analyzer = new KeywordAnalyzer();
        Query lucenequery = MultiFieldQueryParser.parse(query,
                search_fields, analyzer);
        // QueryParser queryparse = new QueryParser(query,analyzer);
        // Query lucenequery = queryparse.parse(querytext);
        result = searcher.search(lucenequery);

    } catch (IOException e) {
        e.printStackTrace();
    } catch (Exception ex) {
        System.out.println(ex + "");
    }

    return result;
}
项目:Deskera-HRMS    文件:SearchBean.java   
public Hits skynetsearch(String query, String Field) {
    String indexfield = Field + ":";
    String querytext = indexfield + query.trim();
    Hits result = null;

    try {

        String[] search_fields = {Field};
        String indexPath = storageHandlerImpl.GetDocIndexPath();
        IndexSearcher searcher = new IndexSearcher(indexPath);
        KeywordAnalyzer analyzer = new KeywordAnalyzer();
        Query lucenequery = MultiFieldQueryParser.parse(query,
                search_fields, analyzer);
        // QueryParser queryparse = new QueryParser(query,analyzer);
        // Query lucenequery = queryparse.parse(querytext);
        result = searcher.search(lucenequery);

    } catch (IOException e) {
        e.printStackTrace();
    } catch (Exception ex) {
        System.out.println(ex + "");
    }

    return result;
}
项目:Deskera-HRMS    文件:SearchBean.java   
public Hits skynetsearchMulti(String query, String[] Field, String indexPath) {
    Hits result = null;
    try {
        IndexSearcher searcher = new IndexSearcher(indexPath);
        KeywordAnalyzer analyzer = new KeywordAnalyzer();
        MultiFieldQueryParser multiparser = new MultiFieldQueryParser(Field, analyzer);
        multiparser.setDefaultOperator(QueryParser.Operator.OR);
        Query lucenequery = multiparser.parse(query);
        result = searcher.search(lucenequery);
    } catch (IOException e) {
        e.printStackTrace();
    } catch (Exception ex) {
        System.out.println(ex + "");
    }

    return result;
}
项目:Deskera-HRMS    文件:KrawlerIndexCreator.java   
public void updateDocument(String documentId, String fieldName,
        String fieldValue) {
    try {
        SearchBean sbean = new SearchBean();
        Hits hresult = sbean.skynetsearch(documentId, "DocumentId");
        Document doc = hresult.doc(0);
        ArrayList<DocumentFields> docfields = new ArrayList<DocumentFields>();
        for (int i = 0; i < fields.length; i++) {
            DocumentFields docFields = new DocumentFields();
            docFields.SetFieldName(fields[i]);
            if (fields[i].equalsIgnoreCase(fieldName)) {
                docFields.SetFieldValue(fieldValue);
            } else {
                docFields.SetFieldValue(doc.get(fields[i]));
            }
            docfields.add(docFields);
        }
        DeleteIndex(documentId);
        CreateIndex(docfields);

    } catch (Exception ex) {
        System.out.print(ex.toString());
    }

}
项目:hanoi-hadoop-2.0.0-cdh    文件:TestMixedDirectory.java   
private void verify(Directory dir, int expectedHits) throws IOException {
  IndexSearcher searcher = new IndexSearcher(dir);
  Hits hits = searcher.search(new TermQuery(new Term("content", "apache")));
  int numHits = hits.length();

  assertEquals(expectedHits, numHits);

  int[] docs = new int[numHits];
  for (int i = 0; i < numHits; i++) {
    Document hit = hits.doc(i);
    docs[Integer.parseInt(hit.get("id"))]++;
  }
  for (int i = 0; i < numHits; i++) {
    assertEquals(1, docs[i]);
  }

  searcher.close();
}
项目:mapreduce-fork    文件:TestMixedDirectory.java   
private void verify(Directory dir, int expectedHits) throws IOException {
  IndexSearcher searcher = new IndexSearcher(dir);
  Hits hits = searcher.search(new TermQuery(new Term("content", "apache")));
  int numHits = hits.length();

  assertEquals(expectedHits, numHits);

  int[] docs = new int[numHits];
  for (int i = 0; i < numHits; i++) {
    Document hit = hits.doc(i);
    docs[Integer.parseInt(hit.get("id"))]++;
  }
  for (int i = 0; i < numHits; i++) {
    assertEquals(1, docs[i]);
  }

  searcher.close();
}
项目:CadalWorkspace    文件:ConsoleSearch.java   
public Hits search(String type, String qstr)
{

    String queryStr = "(" + qstr + ") OR ("+TStransformer.STtransform(qstr)+")";

    Hits hits = null;
    try{
        FSDirectory fsd = FSDirectory.getDirectory(QuickSearchConfig.getIndexDir() + "/" + type);
        //RAMDirectory ramd = new RAMDirectory(QuickSearchConfig.getIndexDir() + "/" + type);
        IndexSearcher indexSearcher = new IndexSearcher(fsd);
        Query query = MultiFieldQueryParser.parse(queryStr, fields, flags, analyzer);
        hits = indexSearcher.search(query);
    }catch(Exception ex){
        ex.printStackTrace();
    }
    return hits;
}
项目:l2-writing-assistant    文件:KnowledgeBasePathIndex.java   
/**
 * Method to collect all paths between two input concept with a certain depth.
 * 
 * @param targetConcept
 * @return all paths between input concepts up to a certain depth
 * @throws IOException
 */
public Set<List<String>> getPathsBetween(String sourceConcept,
                                         String targetConcept,
                                         Integer depth) throws IOException
{
    BooleanQuery q = new BooleanQuery();
    q.add(new BooleanClause(
            new TermQuery(
                    new Term(KnowledgeBasePathIndexField.START.toString(),
                             sourceConcept)), 
                    Occur.MUST));
    q.add(new BooleanClause(
            new TermQuery(
                    new Term(KnowledgeBasePathIndexField.END.toString(),
                             targetConcept)), 
                    Occur.MUST));
    if (depth != null)
        q.add(new BooleanClause(
                new TermQuery(
                    new Term(KnowledgeBasePathIndexField.DEPTH.toString(),
                             depth.toString())), 
                    Occur.MUST));

    Hits hits = index.search(q);
    return getPaths(hits);
}
项目:hortonworks-extension    文件:TestMixedDirectory.java   
private void verify(Directory dir, int expectedHits) throws IOException {
  IndexSearcher searcher = new IndexSearcher(dir);
  Hits hits = searcher.search(new TermQuery(new Term("content", "apache")));
  int numHits = hits.length();

  assertEquals(expectedHits, numHits);

  int[] docs = new int[numHits];
  for (int i = 0; i < numHits; i++) {
    Document hit = hits.doc(i);
    docs[Integer.parseInt(hit.get("id"))]++;
  }
  for (int i = 0; i < numHits; i++) {
    assertEquals(1, docs[i]);
  }

  searcher.close();
}
项目:hortonworks-extension    文件:TestMixedDirectory.java   
private void verify(Directory dir, int expectedHits) throws IOException {
  IndexSearcher searcher = new IndexSearcher(dir);
  Hits hits = searcher.search(new TermQuery(new Term("content", "apache")));
  int numHits = hits.length();

  assertEquals(expectedHits, numHits);

  int[] docs = new int[numHits];
  for (int i = 0; i < numHits; i++) {
    Document hit = hits.doc(i);
    docs[Integer.parseInt(hit.get("id"))]++;
  }
  for (int i = 0; i < numHits; i++) {
    assertEquals(1, docs[i]);
  }

  searcher.close();
}
项目:hadoop-gpu    文件:TestMixedDirectory.java   
private void verify(Directory dir, int expectedHits) throws IOException {
  IndexSearcher searcher = new IndexSearcher(dir);
  Hits hits = searcher.search(new TermQuery(new Term("content", "apache")));
  int numHits = hits.length();

  assertEquals(expectedHits, numHits);

  int[] docs = new int[numHits];
  for (int i = 0; i < numHits; i++) {
    Document hit = hits.doc(i);
    docs[Integer.parseInt(hit.get("id"))]++;
  }
  for (int i = 0; i < numHits; i++) {
    assertEquals(1, docs[i]);
  }

  searcher.close();
}
项目:alfresco-repository    文件:LuceneResultSet.java   
/**
 * Wrap a lucene seach result with node support
 * 
 * @param hits Hits
 * @param searcher Searcher
 * @param nodeService nodeService
 * @param tenantService tenant service
 * @param searchParameters SearchParameters
 * @param config - lucene config
 */
public LuceneResultSet(Hits hits, Searcher searcher, NodeService nodeService, TenantService tenantService, SearchParameters searchParameters,
        LuceneConfig config)
{
    super();
    this.hits = hits;
    this.searcher = searcher;
    this.nodeService = nodeService;
    this.tenantService = tenantService;
    this.searchParameters = searchParameters;
    this.config = config;
    prefetch = new BitSet(hits.length());
}
项目:bisis-v4    文件:Retriever.java   
public Result selectAll(Query query, String sortPrefix){
  try {
    BooleanQuery.setMaxClauseCount(20000);//zbog heap-a
    Searcher searcher = new IndexSearcher(indexPath);
    Hits hits;
    if (sortPrefix == null || "".equals(sortPrefix))
      hits = searcher.search(query);
    else {
      int sortType = SortField.STRING;
      if ("RN_sort".equals(sortPrefix))
        sortType = SortField.INT;
      hits = searcher.search(query, new Sort(
          new SortField(sortPrefix, sortType)));
    }

    int n = hits.length();
    int[] retVal = new int[n];
    List<String> invs = new ArrayList<String>();
    Field[] tmp = null;

    for (int i = 0; i < n; i++) {
      String recordID = hits.doc(i).get("ID");
      retVal[i] = Integer.parseInt(recordID);
      tmp = hits.doc(i).getFields("IN");
      if (tmp != null){
        for (int j = 0; j<tmp.length; j++){
          invs.add(tmp[j].stringValue());
        } 
      }
    }
    searcher.close();
    Result result = new Result();
    result.setRecords(retVal);
    result.setInvs(invs);
    return result;
  } catch (Exception ex) {
    log.fatal(ex);
    return null;
  }
}
项目:bisis-v4    文件:Retriever.java   
public int[] select(Query query, Filter filter, String sortPrefix){
  try {
    BooleanQuery.setMaxClauseCount(Integer.MAX_VALUE);
    Searcher searcher = new IndexSearcher(indexPath);
    Hits hits;
    if (sortPrefix == null || "".equals(sortPrefix)){
       hits = searcher.search(query,filter);
    } else {
      int sortType = SortField.STRING;
      if ("RN_sort".equals(sortPrefix))
        sortType = SortField.INT;
      hits = searcher.search(query,filter, new Sort(
          new SortField(sortPrefix, sortType)));
 }
    int n = hits.length();
    int[] retVal = new int[n];

    for (int i = 0; i < n; i++) {
      String recordID = hits.doc(i).get("ID");
      retVal[i] = Integer.parseInt(recordID);
    }
    searcher.close();
    return retVal;
  } catch (Exception ex) {
    log.fatal(ex);
    return null;
  }
}
项目:bisis-v4    文件:Retriever.java   
public Result selectAll(Query query, Filter filter, String sortPrefix) {
  try {
    BooleanQuery.setMaxClauseCount(Integer.MAX_VALUE);
    Searcher searcher = new IndexSearcher(indexPath);
    Hits hits;
    if (sortPrefix == null || "".equals(sortPrefix)){
       hits = searcher.search(query,filter);
    } else {
      int sortType = SortField.STRING;
      if ("RN_sort".equals(sortPrefix))
        sortType = SortField.INT;
      hits = searcher.search(query,filter, new Sort(
          new SortField(sortPrefix, sortType)));
 }
    int n = hits.length();
    int[] retVal = new int[n];
    List<String> invs = new ArrayList<String>();
    Field[] tmp = null;

    for (int i = 0; i < n; i++) {
      String recordID = hits.doc(i).get("ID");
      retVal[i] = Integer.parseInt(recordID);
      tmp = hits.doc(i).getFields("IN");
      if (tmp != null){
        for (int j = 0; j<tmp.length; j++){
          invs.add(tmp[j].stringValue());
        } 
      }
    }
    searcher.close();
    Result result = new Result();
    result.setRecords(retVal);
    result.setInvs(invs);
    return result;
  } catch (Exception ex) {
    log.fatal(ex);
    return null;
  }
}
项目:bisis-v4    文件:Retriever.java   
public List<String> selectExpand(String query, String prefix,String text){
  try {
   WhitespaceAnalyzer sa= new WhitespaceAnalyzer();
   BooleanQuery.setMaxClauseCount(Integer.MAX_VALUE);
   QueryParser p = new QueryParser("contents", sa);
   Query q = p.parse(query);
   Searcher searcher = new IndexSearcher(indexPath);
   StopWatch clock=new StopWatch();
   clock.start();
   Hits hits = searcher.search(q);
   int n = hits.length();
   List <String> expandList = new ArrayList<String>();
   Field[] tmp = null;
   String pom="";
    for (int i = 0; i < n; i++) {
      tmp = hits.doc(i).getFields(prefix);
      if (tmp != null){
        for (int j = 0; j<tmp.length; j++){
          pom=tmp[j].stringValue().replace("0start0 ", "");
          pom=pom.replace(" 0end0", "");
          if(pom.startsWith(text)&&(!expandList.contains(pom))){
             expandList.add(pom);
          }
        } 
      }
    }
    clock.stop();
    searcher.close();
    return expandList;
  } catch (Exception ex) {
    log.fatal(ex);
    return null;
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestDistributionPolicy.java   
private void verify(Shard[] shards) throws IOException {
  // verify the index
  IndexReader[] readers = new IndexReader[shards.length];
  for (int i = 0; i < shards.length; i++) {
    Directory dir =
        new FileSystemDirectory(fs, new Path(shards[i].getDirectory()),
            false, conf);
    readers[i] = IndexReader.open(dir);
  }

  IndexReader reader = new MultiReader(readers);
  IndexSearcher searcher = new IndexSearcher(reader);
  Hits hits = searcher.search(new TermQuery(new Term("content", "apache")));
  assertEquals(0, hits.length());

  hits = searcher.search(new TermQuery(new Term("content", "hadoop")));
  assertEquals(numDocsPerRun / 2, hits.length());

  int[] counts = new int[numDocsPerRun];
  for (int i = 0; i < hits.length(); i++) {
    Document doc = hits.doc(i);
    counts[Integer.parseInt(doc.get("id"))]++;
  }

  for (int i = 0; i < numDocsPerRun; i++) {
    if (i % 2 == 0) {
      assertEquals(0, counts[i]);
    } else {
      assertEquals(1, counts[i]);
    }
  }

  searcher.close();
  reader.close();
}
项目:hadoop-EAR    文件:TestDistributionPolicy.java   
private void verify(Shard[] shards) throws IOException {
  // verify the index
  IndexReader[] readers = new IndexReader[shards.length];
  for (int i = 0; i < shards.length; i++) {
    Directory dir =
        new FileSystemDirectory(fs, new Path(shards[i].getDirectory()),
            false, conf);
    readers[i] = IndexReader.open(dir);
  }

  IndexReader reader = new MultiReader(readers);
  IndexSearcher searcher = new IndexSearcher(reader);
  Hits hits = searcher.search(new TermQuery(new Term("content", "apache")));
  assertEquals(0, hits.length());

  hits = searcher.search(new TermQuery(new Term("content", "hadoop")));
  assertEquals(numDocsPerRun / 2, hits.length());

  int[] counts = new int[numDocsPerRun];
  for (int i = 0; i < hits.length(); i++) {
    Document doc = hits.doc(i);
    counts[Integer.parseInt(doc.get("id"))]++;
  }

  for (int i = 0; i < numDocsPerRun; i++) {
    if (i % 2 == 0) {
      assertEquals(0, counts[i]);
    } else {
      assertEquals(1, counts[i]);
    }
  }

  searcher.close();
  reader.close();
}
项目:Lucee4    文件:SearchResulItemHits.java   
public SearchResulItemHits(Hits hits, int index, Object highlighter,Analyzer analyzer,
        String id, String categoryTree, String category,int maxNumFragments, int maxLength) {
    this.hits=hits;
    this.index=index;
    this.highlighter=highlighter;
    this.analyzer=analyzer;
    this.id=id;
    this.categoryTree=categoryTree;
    this.category=category;
    this.maxNumFragments=maxNumFragments;
    this.maxLength=maxLength;
}
项目:whatswrong    文件:CorpusNavigator.java   
/**
 * Searches the current corpus using the search terms in the search field.
 */
private void searchCorpus() {
    if (search.getText().trim().equals("")) return;
    try {
        indexSearcher = guess.getSelected() != null ?
            getIndex(getDiffCorpus(gold.getSelected(), guess.getSelected())) :
            getIndex(gold.getSelected());
        //System.out.println("Searching...");
        QueryParser parser = new QueryParser("Word", analyzer);
        Query query = parser.parse(search.getText());
        Hits hits = indexSearcher.search(query);
        Highlighter highlighter = new Highlighter(new QueryScorer(query));
        DefaultListModel model = new DefaultListModel();
        for (int i = 0; i < hits.length(); i++) {
            Document hitDoc = hits.doc(i);
            int nr = Integer.parseInt(hitDoc.get("<nr>"));
            //System.out.println(hitDoc.get("<nr>"));
            String best = null;
            for (Object field : hitDoc.getFields()) {
                Field f = (Field) field;
                best = highlighter.getBestFragment(analyzer, f.name(), hitDoc.get(f.name()));
                if (best != null) break;
            }
            if (best != null)
                model.addElement(new Result(nr, "<html>" + nr + ":" + best + "</html>"));
            //System.out.println(highlighter.getBestFragment(analyzer, "Word", hitDoc.get("Word")));
            //assertEquals("This is the text to be indexed.", hitDoc.get("fieldname"));
        }
        results.setModel(model);
        repaint();
    } catch (Exception ex) {
        ex.printStackTrace();
    }
}
项目:community-edition-old    文件:LuceneResultSet.java   
/**
 * Wrap a lucene seach result with node support
 * 
 * @param hits Hits
 * @param searcher Searcher
 * @param nodeService nodeService
 * @param tenantService tenant service
 * @param searchParameters SearchParameters
 * @param config - lucene config
 */
public LuceneResultSet(Hits hits, Searcher searcher, NodeService nodeService, TenantService tenantService, SearchParameters searchParameters,
        LuceneConfig config)
{
    super();
    this.hits = hits;
    this.searcher = searcher;
    this.nodeService = nodeService;
    this.tenantService = tenantService;
    this.searchParameters = searchParameters;
    this.config = config;
    prefetch = new BitSet(hits.length());
}