Java 类org.apache.lucene.search.Weight 实例源码

项目:elasticsearch_my    文件:FiltersAggregatorFactory.java   
public FiltersAggregatorFactory(String name, List<KeyedFilter> filters, boolean keyed, boolean otherBucket,
        String otherBucketKey, SearchContext context, AggregatorFactory<?> parent, AggregatorFactories.Builder subFactories,
        Map<String, Object> metaData) throws IOException {
    super(name, context, parent, subFactories, metaData);
    this.keyed = keyed;
    this.otherBucket = otherBucket;
    this.otherBucketKey = otherBucketKey;
    IndexSearcher contextSearcher = context.searcher();
    weights = new Weight[filters.size()];
    keys = new String[filters.size()];
    for (int i = 0; i < filters.size(); ++i) {
        KeyedFilter keyedFilter = filters.get(i);
        this.keys[i] = keyedFilter.key();
        Query filter = keyedFilter.filter().toFilter(context.getQueryShardContext());
        this.weights[i] = contextSearcher.createNormalizedWeight(filter, false);
    }
}
项目:elasticsearch_my    文件:FiltersAggregator.java   
public FiltersAggregator(String name, AggregatorFactories factories, String[] keys, Weight[] filters, boolean keyed, String otherBucketKey,
        SearchContext context,
        Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData)
        throws IOException {
    super(name, factories, context, parent, pipelineAggregators, metaData);
    this.keyed = keyed;
    this.keys = keys;
    this.filters = filters;
    this.showOtherBucket = otherBucketKey != null;
    this.otherBucketKey = otherBucketKey;
    if (showOtherBucket) {
        this.totalNumKeys = keys.length + 1;
    } else {
        this.totalNumKeys = keys.length;
    }
}
项目:elasticsearch_my    文件:DocValuesSliceQuery.java   
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
    return new RandomAccessWeight(this) {
        @Override
        protected Bits getMatchingDocs(final LeafReaderContext context) throws IOException {
            final SortedNumericDocValues values = DocValues.getSortedNumeric(context.reader(), getField());
            return new Bits() {
                @Override
                public boolean get(int doc) {
                    values.setDocument(doc);
                    for (int i = 0; i < values.count(); i++) {
                        return contains(BitMixer.mix(values.valueAt(i)));
                    }
                    return contains(0);
                }

                @Override
                public int length() {
                    return context.reader().maxDoc();
                }
            };
        }
    };
}
项目:elasticsearch_my    文件:ContextIndexSearcher.java   
@Override
public Weight createWeight(Query query, boolean needsScores) throws IOException {
    if (profiler != null) {
        // createWeight() is called for each query in the tree, so we tell the queryProfiler
        // each invocation so that it can build an internal representation of the query
        // tree
        QueryProfileBreakdown profile = profiler.getQueryBreakdown(query);
        profile.startTime(QueryTimingType.CREATE_WEIGHT);
        final Weight weight;
        try {
            weight = super.createWeight(query, needsScores);
        } finally {
            profile.stopAndRecordTime();
            profiler.pollLastElement();
        }
        return new ProfileWeight(query, weight, profile);
    } else {
        // needs to be 'super', not 'in' in order to use aggregated DFS
        return super.createWeight(query, needsScores);
    }
}
项目:elasticsearch_my    文件:Lucene.java   
/**
 * Check whether there is one or more documents matching the provided query.
 */
public static boolean exists(IndexSearcher searcher, Query query) throws IOException {
    final Weight weight = searcher.createNormalizedWeight(query, false);
    // the scorer API should be more efficient at stopping after the first
    // match than the bulk scorer API
    for (LeafReaderContext context : searcher.getIndexReader().leaves()) {
        final Scorer scorer = weight.scorer(context);
        if (scorer == null) {
            continue;
        }
        final Bits liveDocs = context.reader().getLiveDocs();
        final DocIdSetIterator iterator = scorer.iterator();
        for (int doc = iterator.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = iterator.nextDoc()) {
            if (liveDocs == null || liveDocs.get(doc)) {
                return true;
            }
        }
    }
    return false;
}
项目:elasticsearch_my    文件:QueryPhaseTests.java   
private void countTestCase(Query query, IndexReader reader, boolean shouldCollect) throws Exception {
    TestSearchContext context = new TestSearchContext(null);
    context.parsedQuery(new ParsedQuery(query));
    context.setSize(0);
    context.setTask(new SearchTask(123L, "", "", "", null));

    IndexSearcher searcher = new IndexSearcher(reader);
    final AtomicBoolean collected = new AtomicBoolean();
    IndexSearcher contextSearcher = new IndexSearcher(reader) {
        protected void search(List<LeafReaderContext> leaves, Weight weight, Collector collector) throws IOException {
            collected.set(true);
            super.search(leaves, weight, collector);
        }
    };

    final boolean rescore = QueryPhase.execute(context, contextSearcher);
    assertFalse(rescore);
    assertEquals(searcher.count(query), context.queryResult().topDocs().totalHits);
    assertEquals(shouldCollect, collected.get());
}
项目:elasticsearch_my    文件:QueryPhaseTests.java   
public void testPostFilterDisablesCountOptimization() throws Exception {
    TestSearchContext context = new TestSearchContext(null);
    context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery()));
    context.setSize(0);
    context.setTask(new SearchTask(123L, "", "", "", null));

    final AtomicBoolean collected = new AtomicBoolean();
    IndexSearcher contextSearcher = new IndexSearcher(new MultiReader()) {
        protected void search(List<LeafReaderContext> leaves, Weight weight, Collector collector) throws IOException {
            collected.set(true);
            super.search(leaves, weight, collector);
        }
    };

    QueryPhase.execute(context, contextSearcher);
    assertEquals(0, context.queryResult().topDocs().totalHits);
    assertFalse(collected.get());

    context.parsedPostFilter(new ParsedQuery(new MatchNoDocsQuery()));
    QueryPhase.execute(context, contextSearcher);
    assertEquals(0, context.queryResult().topDocs().totalHits);
    assertTrue(collected.get());
}
项目:elasticsearch_my    文件:QueryPhaseTests.java   
public void testMinScoreDisablesCountOptimization() throws Exception {
    TestSearchContext context = new TestSearchContext(null);
    context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery()));
    context.setSize(0);
    context.setTask(new SearchTask(123L, "", "", "", null));

    final AtomicBoolean collected = new AtomicBoolean();
    IndexSearcher contextSearcher = new IndexSearcher(new MultiReader()) {
        protected void search(List<LeafReaderContext> leaves, Weight weight, Collector collector) throws IOException {
            collected.set(true);
            super.search(leaves, weight, collector);
        }
    };

    QueryPhase.execute(context, contextSearcher);
    assertEquals(0, context.queryResult().topDocs().totalHits);
    assertFalse(collected.get());

    context.minimumScore(1);
    QueryPhase.execute(context, contextSearcher);
    assertEquals(0, context.queryResult().topDocs().totalHits);
    assertTrue(collected.get());
}
项目:Elasticsearch    文件:FiltersAggregator.java   
public FiltersAggregator(String name, AggregatorFactories factories, String[] keys, Weight[] filters, boolean keyed, String otherBucketKey,
        AggregationContext aggregationContext,
        Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData)
        throws IOException {
    super(name, factories, aggregationContext, parent, pipelineAggregators, metaData);
    this.keyed = keyed;
    this.keys = keys;
    this.filters = filters;
    this.showOtherBucket = otherBucketKey != null;
    this.otherBucketKey = otherBucketKey;
    if (showOtherBucket) {
        this.totalNumKeys = keys.length + 1;
    } else {
        this.totalNumKeys = keys.length;
    }
}
项目:Elasticsearch    文件:MatchedQueriesFetchSubPhase.java   
private void addMatchedQueries(HitContext hitContext, ImmutableMap<String, Query> namedQueries, List<String> matchedQueries) throws IOException {
    for (Map.Entry<String, Query> entry : namedQueries.entrySet()) {
        String name = entry.getKey();
        Query filter = entry.getValue();

        final Weight weight = hitContext.topLevelSearcher().createNormalizedWeight(filter, false);
        final Scorer scorer = weight.scorer(hitContext.readerContext());
        if (scorer == null) {
            continue;
        }
        final TwoPhaseIterator twoPhase = scorer.twoPhaseIterator();
        if (twoPhase == null) {
            if (scorer.iterator().advance(hitContext.docId()) == hitContext.docId()) {
                matchedQueries.add(name);
            }
        } else {
            if (twoPhase.approximation().advance(hitContext.docId()) == hitContext.docId() && twoPhase.matches()) {
                matchedQueries.add(name);
            }
        }
    }
}
项目:Elasticsearch    文件:InMemoryGeoBoundingBoxQuery.java   
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
    return new RandomAccessWeight(this) {
        @Override
        protected Bits getMatchingDocs(LeafReaderContext context) throws IOException {
            final int maxDoc = context.reader().maxDoc();
            final MultiGeoPointValues values = indexFieldData.load(context).getGeoPointValues();
            // checks to see if bounding box crosses 180 degrees
            if (topLeft.lon() > bottomRight.lon()) {
                return new Meridian180GeoBoundingBoxBits(maxDoc, values, topLeft, bottomRight);
            } else {
                return new GeoBoundingBoxBits(maxDoc, values, topLeft, bottomRight);
            }
        }
    };
}
项目:Elasticsearch    文件:IncludeNestedDocsQuery.java   
IncludeNestedDocsScorer(Weight weight, Scorer parentScorer, BitSet parentBits, int currentParentPointer) {
    super(weight);
    this.parentScorer = parentScorer;
    this.parentBits = parentBits;
    this.currentParentPointer = currentParentPointer;
    if (currentParentPointer == 0) {
        currentChildPointer = 0;
    } else {
        this.currentChildPointer = this.parentBits.prevSetBit(currentParentPointer - 1);
        if (currentChildPointer == -1) {
            // no previous set parent, we delete from doc 0
            currentChildPointer = 0;
        } else {
            currentChildPointer++; // we only care about children
        }
    }

    currentDoc = currentChildPointer;
}
项目:elasticsearch-learning-to-rank    文件:RankerQuery.java   
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
    if (!needsScores) {
        // If scores are not needed simply return a constant score on all docs
        return new ConstantScoreWeight(this) {
            @Override
            public Scorer scorer(LeafReaderContext context) throws IOException {
                return new ConstantScoreScorer(this, score(), DocIdSetIterator.all(context.reader().maxDoc()));
            }
        };
    }
    List<Weight> weights = new ArrayList<>(queries.size());
    for (Query q : queries) {
        weights.add(searcher.createWeight(q, needsScores));
    }
    return new RankerWeight(weights);
}
项目:elasticsearch-learning-to-rank    文件:RankerQuery.java   
@Override
public void normalize(float norm, float boost) {
    // Ignore top-level boost & norm
    // We must make sure that the scores from the sub scorers
    // are not affected by parent queries because rankers using thresholds
    // may produce inconsistent results.
    // It breaks lucene contract but in general this query is meant
    // to be used as the top level query of a rescore query where
    // resulting score can still be controlled with the rescore_weight param.
    // One possibility would be to store the boost value and apply it
    // on the resulting score.
    // Logging feature scores may be impossible when feature queries
    // are run and logged individually (_msearch approach) and the similatity
    // used is affected by queryNorm (ClassicSimilarity)
    for (Weight w : weights) {
        w.normalize(1F, 1F);
    }
}
项目:DoSeR-Disambiguation    文件:LearnToRankQuery.java   
public LearnToRankWeight(final IndexSearcher searcher)
        throws IOException {
    // Check for required and optional weights
    final List<LearnToRankClause> requiredClauses = new LinkedList<LearnToRankClause>();
    final List<LearnToRankClause> optionalClauses = new LinkedList<LearnToRankClause>();

    // boolean termConjunction = true;
    for (int i = 0; i < clausesList.size(); i++) {
        final LearnToRankClause clause = clausesList.get(i);
        final Weight cweight = clause.getQuery().createWeight(searcher);
        if (clause.isMustOccur()) {
            requiredClauses.add(clause);
        } else {
            optionalClauses.add(clause);
        }
        // if (!(clause.isMustOccur() && w instanceof TermWeight)) {
        // termConjunction = false;
        // }
        clause.setW(cweight);
    }
    this.requiredClauses = requiredClauses;
    this.optionalClauses = optionalClauses;
    // this.termConjunction = termConjunction;
}
项目:DoSeR-Disambiguation    文件:TermQuery.java   
@Override
public Weight createWeight(final IndexSearcher searcher) throws IOException {
    final IndexReaderContext context = searcher.getTopReaderContext();
    final TermContext termState;
    if ((perReaderTermS == null)
            || (perReaderTermS.topReaderContext != context)) {
        // make TermQuery single-pass if we don't have a PRTS or if the
        // context differs!
        termState = TermContext.build(context, term);
    } else {
        // PRTS was pre-build for this IS
        termState = perReaderTermS;
    }

    // we must not ignore the given docFreq - if set use the given value
    // (lie)
    if (docFreq != -1) {
        termState.setDocFreq(docFreq);
    }

    return new TermWeight(searcher, termState);
}
项目:DoSeR-Disambiguation    文件:LearnToRankTermQuery.java   
@Override
public Weight createWeight(final IndexSearcher searcher) throws IOException {
    final IndexReaderContext context = searcher.getTopReaderContext();
    final TermContext termState;
    if ((perReaderTermS == null)
            || (perReaderTermS.topReaderContext != context)) {
        // make TermQuery single-pass if we don't have a PRTS or if the
        // context differs!
        termState = TermContext.build(context, term); // cache term
                                                        // lookups!
    } else {
        // PRTS was pre-build for this IS
        termState = perReaderTermS;
    }

    // we must not ignore the given docFreq - if set use the given value
    // (lie)
    if (docFreq != -1) {
        termState.setDocFreq(docFreq);
    }
    return new TermWeight(searcher, termState);
}
项目:DoSeR-Disambiguation    文件:DisjunctionSumScorer.java   
/**
 * Construct a <code>AbstractDisjunctionScorer</code>.
 * 
 * @param weight
 *            The weight to be used.
 * @param subScorers
 *            A collection of at least two subscorers.
 * @param minimumNrMatchers
 *            The positive minimum number of subscorers that should match to
 *            match this query. <br>
 *            When <code>minimumNrMatchers</code> is bigger than the number
 *            of <code>subScorers</code>, no matches will be produced. <br>
 *            When minimumNrMatchers equals the number of subScorers, it
 *            more efficient to use <code>ConjunctionScorer</code>.
 */
public DisjunctionSumScorer(final Weight weight,
        final List<Scorer> subScorers, final int minimumNrMatchers,
        final LearnToRankClause[] ltrWeights, final int docBase)
        throws IOException {
    super(weight, subScorers.toArray(new Scorer[subScorers.size()]),
            ltrWeights, subScorers.size());

    if (minimumNrMatchers <= 0) {
        throw new IllegalArgumentException(
                "Minimum nr of matchers must be positive");
    }
    if (numScorers <= 1) {
        throw new IllegalArgumentException(
                "There must be at least 2 subScorers");
    }
    clauses = ltrWeights;
    this.minimumNrMatchers = minimumNrMatchers;
    this.docBase = docBase;
}
项目:DoSeR-Disambiguation    文件:ConjunctionScorer.java   
ConjunctionScorer(final Weight weight, final Scorer[] scorers,
        final float coord, final LearnToRankClause[] ltrclauses,
        final int docBase) {
    super(weight);
    this.coord = coord;
    this.docBase = docBase;
    clauses = ltrclauses;
    docsAndFreqs = new DocsAndFreqs[scorers.length];
    for (int i = 0; i < scorers.length; i++) {
        docsAndFreqs[i] = new DocsAndFreqs(scorers[i]);
    }
    // Sort the array the first time to allow the least frequent DocsEnum to
    // lead the matching.
    ArrayUtil.timSort(docsAndFreqs, new Comparator<DocsAndFreqs>() {
        @Override
        public int compare(final DocsAndFreqs obj1, final DocsAndFreqs obj2) {
            return Long.signum(obj1.cost - obj2.cost);
        }
    });

    lead = docsAndFreqs[0]; // least frequent DocsEnum leads the
                            // intersection
}
项目:search    文件:CustomScoreQuery.java   
@Override
public void normalize(float norm, float topLevelBoost) {
  // note we DONT incorporate our boost, nor pass down any topLevelBoost 
  // (e.g. from outer BQ), as there is no guarantee that the CustomScoreProvider's 
  // function obeys the distributive law... it might call sqrt() on the subQuery score
  // or some other arbitrary function other than multiplication.
  // so, instead boosts are applied directly in score()
  subQueryWeight.normalize(norm, 1f);
  for (Weight valSrcWeight : valSrcWeights) {
    if (qStrict) {
      valSrcWeight.normalize(1, 1); // do not normalize the ValueSource part
    } else {
      valSrcWeight.normalize(norm, 1f);
    }
  }
  queryWeight = topLevelBoost * getBoost();
}
项目:search    文件:SolrIndexSearcher.java   
@Override
public DocIdSetIterator iterator() throws IOException {
  List<DocIdSetIterator> iterators = new ArrayList<>(weights.size()+1);
  if (docIdSet != null) {
    DocIdSetIterator iter = docIdSet.iterator();
    if (iter == null) return null;
    iterators.add(iter);
  }
  for (Weight w : weights) {
    Scorer scorer = w.scorer(context, context.reader().getLiveDocs());
    if (scorer == null) return null;
    iterators.add(scorer);
  }
  if (iterators.size()==0) return null;
  if (iterators.size()==1) return iterators.get(0);
  if (iterators.size()==2) return new DualFilterIterator(iterators.get(0), iterators.get(1));
  return new FilterIterator(iterators.toArray(new DocIdSetIterator[iterators.size()]));
}
项目:DoSeR    文件:LearnToRankQuery.java   
public LearnToRankWeight(IndexSearcher searcher) throws IOException {
    // Check for required and optional weights
    List<LearnToRankClause> requiredClauses = new LinkedList<LearnToRankClause>();
    List<LearnToRankClause> optionalClauses = new LinkedList<LearnToRankClause>();
    weights = new Weight[clauses.size()];

    boolean termConjunction = true;
    for (int i = 0; i < clauses.size(); i++) {
        LearnToRankClause clause = clauses.get(i);
        Weight w = clause.getQuery().createWeight(searcher);
        if (clause.isMustOccur()) {
            requiredClauses.add(clause);
        } else {
            optionalClauses.add(clause);
        }
        if (!(clause.isMustOccur() && w instanceof TermWeight)) {
            termConjunction = false;
        }
        weights[i] = w;
        clause.setW(w);
    }
    this.requiredClauses = requiredClauses;
    this.optionalClauses = optionalClauses;
    this.termConjunction = termConjunction;
}
项目:DoSeR    文件:LearnToRankTermQuery.java   
@Override
public Weight createWeight(IndexSearcher searcher) throws IOException {
    final IndexReaderContext context = searcher.getTopReaderContext();
    final TermContext termState;
    if (perReaderTermState == null
            || perReaderTermState.topReaderContext != context) {
        // make TermQuery single-pass if we don't have a PRTS or if the
        // context differs!
        termState = TermContext.build(context, term, true); // cache term
                                                            // lookups!
    } else {
        // PRTS was pre-build for this IS
        termState = this.perReaderTermState;
    }

    // we must not ignore the given docFreq - if set use the given value
    // (lie)
    if (docFreq != -1)
        termState.setDocFreq(docFreq);
    return new TermWeight(searcher, termState);
}
项目:DoSeR    文件:DisjunctionSumScorer.java   
/**
 * Construct a <code>DisjunctionScorer</code>.
 * 
 * @param weight
 *            The weight to be used.
 * @param subScorers
 *            A collection of at least two subscorers.
 * @param minimumNrMatchers
 *            The positive minimum number of subscorers that should match to
 *            match this query. <br>
 *            When <code>minimumNrMatchers</code> is bigger than the number
 *            of <code>subScorers</code>, no matches will be produced. <br>
 *            When minimumNrMatchers equals the number of subScorers, it
 *            more efficient to use <code>ConjunctionScorer</code>.
 */
public DisjunctionSumScorer(Weight weight, List<Scorer> subScorers,
        int minimumNrMatchers, LearnToRankClause[] learnToRankWeights,
        int docBase) throws IOException {
    super(weight, subScorers.toArray(new Scorer[subScorers.size()]), learnToRankWeights,
            subScorers.size());

    if (minimumNrMatchers <= 0) {
        throw new IllegalArgumentException(
                "Minimum nr of matchers must be positive");
    }
    if (numScorers <= 1) {
        throw new IllegalArgumentException(
                "There must be at least 2 subScorers");
    }
    this.clauses = learnToRankWeights;
    this.minimumNrMatchers = minimumNrMatchers;
    this.docBase = docBase;
}
项目:DoSeR    文件:LearnToRankQuery.java   
public LearnToRankWeight(final IndexSearcher searcher)
        throws IOException {
    // Check for required and optional weights
    final List<LearnToRankClause> requiredClauses = new LinkedList<LearnToRankClause>();
    final List<LearnToRankClause> optionalClauses = new LinkedList<LearnToRankClause>();

    // boolean termConjunction = true;
    for (int i = 0; i < clausesList.size(); i++) {
        final LearnToRankClause clause = clausesList.get(i);
        final Weight cweight = clause.getQuery().createWeight(searcher);
        if (clause.isMustOccur()) {
            requiredClauses.add(clause);
        } else {
            optionalClauses.add(clause);
        }
        // if (!(clause.isMustOccur() && w instanceof TermWeight)) {
        // termConjunction = false;
        // }
        clause.setW(cweight);
    }
    this.requiredClauses = requiredClauses;
    this.optionalClauses = optionalClauses;
    // this.termConjunction = termConjunction;
}
项目:DoSeR    文件:TermQuery.java   
@Override
public Weight createWeight(final IndexSearcher searcher) throws IOException {
    final IndexReaderContext context = searcher.getTopReaderContext();
    final TermContext termState;
    if ((perReaderTermS == null)
            || (perReaderTermS.topReaderContext != context)) {
        // make TermQuery single-pass if we don't have a PRTS or if the
        // context differs!
        termState = TermContext.build(context, term);
    } else {
        // PRTS was pre-build for this IS
        termState = perReaderTermS;
    }

    // we must not ignore the given docFreq - if set use the given value
    // (lie)
    if (docFreq != -1) {
        termState.setDocFreq(docFreq);
    }

    return new TermWeight(searcher, termState);
}
项目:DoSeR    文件:LearnToRankTermQuery.java   
@Override
public Weight createWeight(final IndexSearcher searcher) throws IOException {
    final IndexReaderContext context = searcher.getTopReaderContext();
    final TermContext termState;
    if ((perReaderTermS == null)
            || (perReaderTermS.topReaderContext != context)) {
        // make TermQuery single-pass if we don't have a PRTS or if the
        // context differs!
        termState = TermContext.build(context, term); // cache term
                                                        // lookups!
    } else {
        // PRTS was pre-build for this IS
        termState = perReaderTermS;
    }

    // we must not ignore the given docFreq - if set use the given value
    // (lie)
    if (docFreq != -1) {
        termState.setDocFreq(docFreq);
    }
    return new TermWeight(searcher, termState);
}
项目:DoSeR    文件:DisjunctionSumScorer.java   
/**
 * Construct a <code>AbstractDisjunctionScorer</code>.
 * 
 * @param weight
 *            The weight to be used.
 * @param subScorers
 *            A collection of at least two subscorers.
 * @param minimumNrMatchers
 *            The positive minimum number of subscorers that should match to
 *            match this query. <br>
 *            When <code>minimumNrMatchers</code> is bigger than the number
 *            of <code>subScorers</code>, no matches will be produced. <br>
 *            When minimumNrMatchers equals the number of subScorers, it
 *            more efficient to use <code>ConjunctionScorer</code>.
 */
public DisjunctionSumScorer(final Weight weight,
        final List<Scorer> subScorers, final int minimumNrMatchers,
        final LearnToRankClause[] ltrWeights, final int docBase)
        throws IOException {
    super(weight, subScorers.toArray(new Scorer[subScorers.size()]),
            ltrWeights, subScorers.size());

    if (minimumNrMatchers <= 0) {
        throw new IllegalArgumentException(
                "Minimum nr of matchers must be positive");
    }
    if (numScorers <= 1) {
        throw new IllegalArgumentException(
                "There must be at least 2 subScorers");
    }
    clauses = ltrWeights;
    this.minimumNrMatchers = minimumNrMatchers;
    this.docBase = docBase;
}
项目:DoSeR    文件:ConjunctionScorer.java   
ConjunctionScorer(final Weight weight, final Scorer[] scorers,
        final float coord, final LearnToRankClause[] ltrclauses,
        final int docBase) {
    super(weight);
    this.coord = coord;
    this.docBase = docBase;
    clauses = ltrclauses;
    docsAndFreqs = new DocsAndFreqs[scorers.length];
    for (int i = 0; i < scorers.length; i++) {
        docsAndFreqs[i] = new DocsAndFreqs(scorers[i]);
    }
    // Sort the array the first time to allow the least frequent DocsEnum to
    // lead the matching.
    ArrayUtil.timSort(docsAndFreqs, new Comparator<DocsAndFreqs>() {
        @Override
        public int compare(final DocsAndFreqs obj1, final DocsAndFreqs obj2) {
            return Long.signum(obj1.cost - obj2.cost);
        }
    });

    lead = docsAndFreqs[0]; // least frequent DocsEnum leads the
                            // intersection
}
项目:community-edition-old    文件:AbstractSolrCachingScorer.java   
AbstractSolrCachingScorer(Weight weight, DocSet in, AtomicReaderContext context, Bits acceptDocs, SolrIndexSearcher searcher)
{
    super(weight);
    this.context = context;
    this.acceptDocs = acceptDocs;

    if (in instanceof BitDocSet)
    {
        matches = (BitDocSet) in;
    }
    else
    {
        this.matches = new BitDocSet(new FixedBitSet(searcher.maxDoc()));
        for (DocIterator it = in.iterator(); it.hasNext(); /* */)
        {
            matches.addUnique(it.nextDoc());
        }
    }
    bitSet = matches.getBits();

    doc = getBase() - 1;
}
项目:search-core    文件:SolrIndexSearcher.java   
@Override
public DocIdSetIterator iterator() throws IOException {
    List<DocIdSetIterator> iterators = new ArrayList<DocIdSetIterator>(weights.size() + 1);
    if(docIdSet != null) {
        DocIdSetIterator iter = docIdSet.iterator();
        if(iter == null)
            return null;
        iterators.add(iter);
    }
    for(Weight w : weights) {
        Scorer scorer = w.scorer(context, true, false, context.reader().getLiveDocs());
        if(scorer == null)
            return null;
        iterators.add(scorer);
    }
    if(iterators.size() == 0)
        return null;
    if(iterators.size() == 1)
        return iterators.get(0);
    if(iterators.size() == 2)
        return new DualFilterIterator(iterators.get(0), iterators.get(1));
    return new FilterIterator(iterators.toArray(new DocIdSetIterator[iterators.size()]));
}
项目:read-open-source-code    文件:CustomScoreQuery.java   
@Override
public void normalize(float norm, float topLevelBoost) {
  // note we DONT incorporate our boost, nor pass down any topLevelBoost 
  // (e.g. from outer BQ), as there is no guarantee that the CustomScoreProvider's 
  // function obeys the distributive law... it might call sqrt() on the subQuery score
  // or some other arbitrary function other than multiplication.
  // so, instead boosts are applied directly in score()
  subQueryWeight.normalize(norm, 1f);
  for (Weight valSrcWeight : valSrcWeights) {
    if (qStrict) {
      valSrcWeight.normalize(1, 1); // do not normalize the ValueSource part
    } else {
      valSrcWeight.normalize(norm, 1f);
    }
  }
  queryWeight = topLevelBoost * getBoost();
}
项目:read-open-source-code    文件:SolrIndexSearcher.java   
@Override
public DocIdSetIterator iterator() throws IOException {
  List<DocIdSetIterator> iterators = new ArrayList<>(weights.size()+1);
  if (docIdSet != null) {
    DocIdSetIterator iter = docIdSet.iterator();
    if (iter == null) return null;
    iterators.add(iter);
  }
  for (Weight w : weights) {
    Scorer scorer = w.scorer(context, context.reader().getLiveDocs());
    if (scorer == null) return null;
    iterators.add(scorer);
  }
  if (iterators.size()==0) return null;
  if (iterators.size()==1) return iterators.get(0);
  if (iterators.size()==2) return new DualFilterIterator(iterators.get(0), iterators.get(1));
  return new FilterIterator(iterators.toArray(new DocIdSetIterator[iterators.size()]));
}
项目:read-open-source-code    文件:CustomScoreQuery.java   
@Override
public void normalize(float norm, float topLevelBoost) {
  // note we DONT incorporate our boost, nor pass down any topLevelBoost 
  // (e.g. from outer BQ), as there is no guarantee that the CustomScoreProvider's 
  // function obeys the distributive law... it might call sqrt() on the subQuery score
  // or some other arbitrary function other than multiplication.
  // so, instead boosts are applied directly in score()
  subQueryWeight.normalize(norm, 1f);
  for (Weight valSrcWeight : valSrcWeights) {
    if (qStrict) {
      valSrcWeight.normalize(1, 1); // do not normalize the ValueSource part
    } else {
      valSrcWeight.normalize(norm, 1f);
    }
  }
  queryWeight = topLevelBoost * getBoost();
}
项目:read-open-source-code    文件:SolrIndexSearcher.java   
@Override
public DocIdSetIterator iterator() throws IOException {
  List<DocIdSetIterator> iterators = new ArrayList<DocIdSetIterator>(weights.size()+1);
  if (docIdSet != null) {
    DocIdSetIterator iter = docIdSet.iterator();
    if (iter == null) return null;
    iterators.add(iter);
  }
  for (Weight w : weights) {
    Scorer scorer = w.scorer(context, true, false, context.reader().getLiveDocs());
    if (scorer == null) return null;
    iterators.add(scorer);
  }
  if (iterators.size()==0) return null;
  if (iterators.size()==1) return iterators.get(0);
  if (iterators.size()==2) return new DualFilterIterator(iterators.get(0), iterators.get(1));
  return new FilterIterator(iterators.toArray(new DocIdSetIterator[iterators.size()]));
}
项目:NYBC    文件:SolrIndexSearcher.java   
@Override
public DocIdSetIterator iterator() throws IOException {
  List<DocIdSetIterator> iterators = new ArrayList<DocIdSetIterator>(weights.size()+1);
  if (docIdSet != null) {
    DocIdSetIterator iter = docIdSet.iterator();
    if (iter == null) return null;
    iterators.add(iter);
  }
  for (Weight w : weights) {
    Scorer scorer = w.scorer(context, true, false, context.reader().getLiveDocs());
    if (scorer == null) return null;
    iterators.add(scorer);
  }
  if (iterators.size()==0) return null;
  if (iterators.size()==1) return iterators.get(0);
  if (iterators.size()==2) return new DualFilterIterator(iterators.get(0), iterators.get(1));
  return new FilterIterator(iterators.toArray(new DocIdSetIterator[iterators.size()]));
}
项目:lucene-addons    文件:SimpleTargetCounter.java   
/**
 * Simple utility class to perform basic term frequency/document frequency
 * counts on the individual terms within a query.  This relies on
 * IndexReader and does not perform any concordance search/retrieval;
 * it is, therefore, very fast.
 * <p>
 * If you want to visit more than basic terms (e.g. SpanNear),
 * see {@link org.apache.lucene.search.concordance.windowvisitor.TargetVisitor}
 *
 * @param query query
 * @param searcher searcher
 * @return target term results
 * @throws java.io.IOException if there is an IOException from the searcher
 */
public SimpleTargetTermResults searchSingleTerms(Query query, IndexSearcher searcher)
    throws IOException {
  Query tmpQ = query.rewrite(searcher.getIndexReader());
  Set<Term> terms = new HashSet<>();
  Weight weight = tmpQ.createWeight(searcher, false, 1.0f);//default boost 1.0f.
  weight.extractTerms(terms);

  Map<String, Integer> dfs = new HashMap<>();
  Map<String, Integer> tfs = new HashMap<>();

  for (Term t : terms) {
    String targ = t.text();
    int docFreq = searcher.getIndexReader().docFreq(t);
    if (docFreq == 0) {
      continue;
    }
    Integer i = Integer.valueOf(docFreq);
    dfs.put(targ, i);

    long tf = searcher.getIndexReader().totalTermFreq(t);
    tfs.put(targ, (int) tf);
  }

  return new SimpleTargetTermResults(dfs, tfs);
}
项目:lucene-addons    文件:SpanQueryConverter.java   
@Override
protected SpanQuery convertUnknownQuery(String field, Query query) {
  if (query instanceof CommonTermsQuery) {

    // specialized since rewriting would change the result query
    // this query is TermContext sensitive.
    CommonTermsQuery ctq = (CommonTermsQuery) query;

    Set<Term> terms = new HashSet<>();
    try {
      Weight w = ctq.createWeight(searcher, false, 1.0f);
      w.extractTerms(terms);
    } catch (IOException e) {
      throw new RuntimeException("IOException on searcher!!!", e);
    }
    List<SpanQuery> spanQs = new LinkedList<>();

    for (Term term : terms) {
      if (term.field().equals(field)) {
        spanQs.add(new SpanTermQuery(term));
      }
    }
    if (spanQs.size() == 0) {
      return getEmptySpanQuery();
    } else if (spanQs.size() == 1) {
      return spanQs.get(0);
    } else {
      return new SpanOrQuery(spanQs.toArray(new SpanQuery[spanQs.size()]));
    }
  }
  super.convertUnknownQuery(field, query);
  return null;
}
项目:elasticsearch_my    文件:PercolateQuery.java   
BaseScorer(Weight weight, Scorer approximation, CheckedFunction<Integer, Query, IOException> percolatorQueries,
           IndexSearcher percolatorIndexSearcher) {
    super(weight);
    this.approximation = approximation;
    this.percolatorQueries = percolatorQueries;
    this.percolatorIndexSearcher = percolatorIndexSearcher;
}
项目:elasticsearch_my    文件:AdjacencyMatrixAggregatorFactory.java   
public AdjacencyMatrixAggregatorFactory(String name, List<KeyedFilter> filters, String separator, 
        SearchContext context, AggregatorFactory<?> parent, AggregatorFactories.Builder subFactories, 
        Map<String, Object> metaData) throws IOException {
    super(name, context, parent, subFactories, metaData);
    IndexSearcher contextSearcher = context.searcher();
    this.separator = separator;
    weights = new Weight[filters.size()];
    keys = new String[filters.size()];
    for (int i = 0; i < filters.size(); ++i) {
        KeyedFilter keyedFilter = filters.get(i);
        this.keys[i] = keyedFilter.key();
        Query filter = keyedFilter.filter().toFilter(context.getQueryShardContext());
        this.weights[i] = contextSearcher.createNormalizedWeight(filter, false);
    }
}