Java 类org.apache.lucene.index.SegmentReader 实例源码

项目:elasticsearch_my    文件:Engine.java   
/**
 * Global stats on segments.
 */
public final SegmentsStats segmentsStats(boolean includeSegmentFileSizes) {
    ensureOpen();
    try (Searcher searcher = acquireSearcher("segments_stats")) {
        SegmentsStats stats = new SegmentsStats();
        for (LeafReaderContext reader : searcher.reader().leaves()) {
            final SegmentReader segmentReader = segmentReader(reader.reader());
            stats.add(1, segmentReader.ramBytesUsed());
            stats.addTermsMemoryInBytes(guardedRamBytesUsed(segmentReader.getPostingsReader()));
            stats.addStoredFieldsMemoryInBytes(guardedRamBytesUsed(segmentReader.getFieldsReader()));
            stats.addTermVectorsMemoryInBytes(guardedRamBytesUsed(segmentReader.getTermVectorsReader()));
            stats.addNormsMemoryInBytes(guardedRamBytesUsed(segmentReader.getNormsReader()));
            stats.addPointsMemoryInBytes(guardedRamBytesUsed(segmentReader.getPointsReader()));
            stats.addDocValuesMemoryInBytes(guardedRamBytesUsed(segmentReader.getDocValuesReader()));

            if (includeSegmentFileSizes) {
                // TODO: consider moving this to StoreStats
                stats.addFileSizes(getSegmentFileSizes(segmentReader));
            }
        }
        writerSegmentStats(stats);
        return stats;
    }
}
项目:elasticsearch_my    文件:CodecTests.java   
private void assertCompressionEquals(Mode expected, Codec actual) throws Exception {
    Directory dir = newDirectory();
    IndexWriterConfig iwc = newIndexWriterConfig(null);
    iwc.setCodec(actual);
    IndexWriter iw = new IndexWriter(dir, iwc);
    iw.addDocument(new Document());
    iw.commit();
    iw.close();
    DirectoryReader ir = DirectoryReader.open(dir);
    SegmentReader sr = (SegmentReader) ir.leaves().get(0).reader();
    String v = sr.getSegmentInfo().info.getAttribute(Lucene50StoredFieldsFormat.MODE_KEY);
    assertNotNull(v);
    assertEquals(expected, Mode.valueOf(v));
    ir.close();
    dir.close();
}
项目:Elasticsearch    文件:Engine.java   
/**
 * Global stats on segments.
 */
public final SegmentsStats segmentsStats() {
    ensureOpen();
    try (final Searcher searcher = acquireSearcher("segments_stats", false)) {
        SegmentsStats stats = new SegmentsStats();
        for (LeafReaderContext reader : searcher.reader().leaves()) {
            final SegmentReader segmentReader = segmentReader(reader.reader());
            stats.add(1, segmentReader.ramBytesUsed());
            stats.addTermsMemoryInBytes(guardedRamBytesUsed(segmentReader.getPostingsReader()));
            stats.addStoredFieldsMemoryInBytes(guardedRamBytesUsed(segmentReader.getFieldsReader()));
            stats.addTermVectorsMemoryInBytes(guardedRamBytesUsed(segmentReader.getTermVectorsReader()));
            stats.addNormsMemoryInBytes(guardedRamBytesUsed(segmentReader.getNormsReader()));
            stats.addDocValuesMemoryInBytes(guardedRamBytesUsed(segmentReader.getDocValuesReader()));
        }
        writerSegmentStats(stats);
        return stats;
    }
}
项目:search    文件:AnalyzingInfixSuggester.java   
@Override
public long ramBytesUsed() {
  long mem = RamUsageEstimator.shallowSizeOf(this);
  try {
    if (searcherMgr != null) {
      IndexSearcher searcher = searcherMgr.acquire();
      try {
        for (AtomicReaderContext context : searcher.getIndexReader().leaves()) {
          AtomicReader reader = FilterAtomicReader.unwrap(context.reader());
          if (reader instanceof SegmentReader) {
            mem += ((SegmentReader) context.reader()).ramBytesUsed();
          }
        }
      } finally {
        searcherMgr.release(searcher);
      }
    }
    return mem;
  } catch (IOException ioe) {
    throw new RuntimeException(ioe);
  }
}
项目:search    文件:TestMergePolicyConfig.java   
/**
 * Given an IndexReader, asserts that there is at least one AtomcReader leaf,
 * and that all AtomicReader leaves are SegmentReader's that have a compound 
 * file status that matches the expected input.
 */
private static void assertCompoundSegments(IndexReader reader, 
                                           boolean compound) {

  assertNotNull("Null leaves", reader.leaves());
  assertTrue("no leaves", 0 < reader.leaves().size());

  for (AtomicReaderContext atomic : reader.leaves()) {
    assertTrue("not a segment reader: " + atomic.reader().toString(), 
               atomic.reader() instanceof SegmentReader);

    assertEquals("Compound status incorrect for: " + 
                 atomic.reader().toString(),
                 compound,
                 ((SegmentReader)atomic.reader()).getSegmentInfo().info.getUseCompoundFile());
  }
}
项目:incubator-blur    文件:MergeSortRowIdMatcher.java   
private void writeRowIds(Writer writer, SegmentReader segmentReader) throws IOException {
  Terms terms = segmentReader.terms(BlurConstants.ROW_ID);
  if (terms == null) {
    return;
  }
  TermsEnum termsEnum = terms.iterator(null);
  BytesRef rowId;
  long s = System.nanoTime();
  while ((rowId = termsEnum.next()) != null) {
    long n = System.nanoTime();
    if (n + _10_SECONDS > s) {
      _progressable.progress();
      s = System.nanoTime();
    }
    writer.append(new Text(rowId.utf8ToString()), NullWritable.get());
  }
}
项目:read-open-source-code    文件:AnalyzingInfixSuggester.java   
@Override
public long ramBytesUsed() {
  long mem = RamUsageEstimator.shallowSizeOf(this);
  try {
    if (searcherMgr != null) {
      IndexSearcher searcher = searcherMgr.acquire();
      try {
        for (AtomicReaderContext context : searcher.getIndexReader().leaves()) {
          AtomicReader reader = FilterAtomicReader.unwrap(context.reader());
          if (reader instanceof SegmentReader) {
            mem += ((SegmentReader) context.reader()).ramBytesUsed();
          }
        }
      } finally {
        searcherMgr.release(searcher);
      }
    }
    return mem;
  } catch (IOException ioe) {
    throw new RuntimeException(ioe);
  }
}
项目:elasticsearch_my    文件:Engine.java   
/**
 * Tries to extract a segment reader from the given index reader.
 * If no SegmentReader can be extracted an {@link IllegalStateException} is thrown.
 */
protected static SegmentReader segmentReader(LeafReader reader) {
    if (reader instanceof SegmentReader) {
        return (SegmentReader) reader;
    } else if (reader instanceof FilterLeafReader) {
        final FilterLeafReader fReader = (FilterLeafReader) reader;
        return segmentReader(FilterLeafReader.unwrap(fReader));
    }
    // hard fail - we can't get a SegmentReader
    throw new IllegalStateException("Can not extract segment reader from given index reader [" + reader + "]");
}
项目:lams    文件:Lucene40StoredFieldsWriter.java   
@Override
public int merge(MergeState mergeState) throws IOException {
  int docCount = 0;
  // Used for bulk-reading raw bytes for stored fields
  int rawDocLengths[] = new int[MAX_RAW_MERGE_DOCS];
  int idx = 0;

  for (AtomicReader reader : mergeState.readers) {
    final SegmentReader matchingSegmentReader = mergeState.matchingSegmentReaders[idx++];
    Lucene40StoredFieldsReader matchingFieldsReader = null;
    if (matchingSegmentReader != null) {
      final StoredFieldsReader fieldsReader = matchingSegmentReader.getFieldsReader();
      // we can only bulk-copy if the matching reader is also a Lucene40FieldsReader
      if (fieldsReader != null && fieldsReader instanceof Lucene40StoredFieldsReader) {
        matchingFieldsReader = (Lucene40StoredFieldsReader) fieldsReader;
      }
    }

    if (reader.getLiveDocs() != null) {
      docCount += copyFieldsWithDeletions(mergeState,
                                          reader, matchingFieldsReader, rawDocLengths);
    } else {
      docCount += copyFieldsNoDeletions(mergeState,
                                        reader, matchingFieldsReader, rawDocLengths);
    }
  }
  finish(mergeState.fieldInfos, docCount);
  return docCount;
}
项目:lams    文件:Lucene40TermVectorsWriter.java   
@Override
public final int merge(MergeState mergeState) throws IOException {
  // Used for bulk-reading raw bytes for term vectors
  int rawDocLengths[] = new int[MAX_RAW_MERGE_DOCS];
  int rawDocLengths2[] = new int[MAX_RAW_MERGE_DOCS];

  int idx = 0;
  int numDocs = 0;
  for (int i = 0; i < mergeState.readers.size(); i++) {
    final AtomicReader reader = mergeState.readers.get(i);

    final SegmentReader matchingSegmentReader = mergeState.matchingSegmentReaders[idx++];
    Lucene40TermVectorsReader matchingVectorsReader = null;
    if (matchingSegmentReader != null) {
      TermVectorsReader vectorsReader = matchingSegmentReader.getTermVectorsReader();

      if (vectorsReader != null && vectorsReader instanceof Lucene40TermVectorsReader) {
          matchingVectorsReader = (Lucene40TermVectorsReader) vectorsReader;
      }
    }
    if (reader.getLiveDocs() != null) {
      numDocs += copyVectorsWithDeletions(mergeState, matchingVectorsReader, reader, rawDocLengths, rawDocLengths2);
    } else {
      numDocs += copyVectorsNoDeletions(mergeState, matchingVectorsReader, reader, rawDocLengths, rawDocLengths2);
    }
  }
  finish(mergeState.fieldInfos, numDocs);
  return numDocs;
}
项目:lams    文件:FieldCacheImpl.java   
private void initReader(AtomicReader reader) {
  if (reader instanceof SegmentReader) {
    ((SegmentReader) reader).addCoreClosedListener(purgeCore);
  } else {
    // we have a slow reader of some sort, try to register a purge event
    // rather than relying on gc:
    Object key = reader.getCoreCacheKey();
    if (key instanceof AtomicReader) {
      ((AtomicReader)key).addReaderClosedListener(purgeReader); 
    } else {
      // last chance
      reader.addReaderClosedListener(purgeReader);
    }
  }
}
项目:Elasticsearch    文件:Engine.java   
/**
 * Tries to extract a segment reader from the given index reader.
 * If no SegmentReader can be extracted an {@link IllegalStateException} is thrown.
 */
protected static SegmentReader segmentReader(LeafReader reader) {
    if (reader instanceof SegmentReader) {
        return (SegmentReader) reader;
    } else if (reader instanceof FilterLeafReader) {
        final FilterLeafReader fReader = (FilterLeafReader) reader;
        return segmentReader(FilterLeafReader.unwrap(fReader));
    }
    // hard fail - we can't get a SegmentReader
    throw new IllegalStateException("Can not extract segment reader from given index reader [" + reader + "]");
}
项目:linden    文件:LindenFieldCacheImpl.java   
private void initReader(AtomicReader reader) {
  if (reader instanceof SegmentReader) {
    reader.addCoreClosedListener(purgeCore);
  } else {
    // we have a slow reader of some sort, try to register a purge event
    // rather than relying on gc:
    Object key = reader.getCoreCacheKey();
    if (key instanceof AtomicReader) {
      ((AtomicReader) key).addReaderClosedListener(purgeReader);
    } else {
      // last chance
      reader.addReaderClosedListener(purgeReader);
    }
  }
}
项目:search    文件:SortingMergePolicy.java   
/** Returns {@code true} if the given {@code reader} is sorted by the specified {@code sort}. */
public static boolean isSorted(AtomicReader reader, Sort sort) {
  if (reader instanceof SegmentReader) {
    final SegmentReader segReader = (SegmentReader) reader;
    final Map<String, String> diagnostics = segReader.getSegmentInfo().info.getDiagnostics();
    if (diagnostics != null && sort.toString().equals(diagnostics.get(SORTER_ID_PROP))) {
      return true;
    }
  }
  return false;
}
项目:search    文件:LuceneTestCase.java   
/**
 * Some tests expect the directory to contain a single segment, and want to 
 * do tests on that segment's reader. This is an utility method to help them.
 */
public static SegmentReader getOnlySegmentReader(DirectoryReader reader) {
  List<AtomicReaderContext> subReaders = reader.leaves();
  if (subReaders.size() != 1)
    throw new IllegalArgumentException(reader + " has " + subReaders.size() + " segments instead of exactly one");
  final AtomicReader r = subReaders.get(0).reader();
  assertTrue(r instanceof SegmentReader);
  return (SegmentReader) r;
}
项目:search    文件:Lucene40StoredFieldsWriter.java   
@Override
public int merge(MergeState mergeState) throws IOException {
  int docCount = 0;
  // Used for bulk-reading raw bytes for stored fields
  int rawDocLengths[] = new int[MAX_RAW_MERGE_DOCS];
  int idx = 0;

  for (AtomicReader reader : mergeState.readers) {
    final SegmentReader matchingSegmentReader = mergeState.matchingSegmentReaders[idx++];
    Lucene40StoredFieldsReader matchingFieldsReader = null;
    if (matchingSegmentReader != null) {
      final StoredFieldsReader fieldsReader = matchingSegmentReader.getFieldsReader();
      // we can only bulk-copy if the matching reader is also a Lucene40FieldsReader
      if (fieldsReader != null && fieldsReader instanceof Lucene40StoredFieldsReader) {
        matchingFieldsReader = (Lucene40StoredFieldsReader) fieldsReader;
      }
    }

    if (reader.getLiveDocs() != null) {
      docCount += copyFieldsWithDeletions(mergeState,
                                          reader, matchingFieldsReader, rawDocLengths);
    } else {
      docCount += copyFieldsNoDeletions(mergeState,
                                        reader, matchingFieldsReader, rawDocLengths);
    }
  }
  finish(mergeState.fieldInfos, docCount);
  return docCount;
}
项目:search    文件:Lucene40TermVectorsWriter.java   
@Override
public final int merge(MergeState mergeState) throws IOException {
  // Used for bulk-reading raw bytes for term vectors
  int rawDocLengths[] = new int[MAX_RAW_MERGE_DOCS];
  int rawDocLengths2[] = new int[MAX_RAW_MERGE_DOCS];

  int idx = 0;
  int numDocs = 0;
  for (int i = 0; i < mergeState.readers.size(); i++) {
    final AtomicReader reader = mergeState.readers.get(i);

    final SegmentReader matchingSegmentReader = mergeState.matchingSegmentReaders[idx++];
    Lucene40TermVectorsReader matchingVectorsReader = null;
    if (matchingSegmentReader != null) {
      TermVectorsReader vectorsReader = matchingSegmentReader.getTermVectorsReader();

      if (vectorsReader != null && vectorsReader instanceof Lucene40TermVectorsReader) {
          matchingVectorsReader = (Lucene40TermVectorsReader) vectorsReader;
      }
    }
    if (reader.getLiveDocs() != null) {
      numDocs += copyVectorsWithDeletions(mergeState, matchingVectorsReader, reader, rawDocLengths, rawDocLengths2);
    } else {
      numDocs += copyVectorsNoDeletions(mergeState, matchingVectorsReader, reader, rawDocLengths, rawDocLengths2);
    }
  }
  finish(mergeState.fieldInfos, numDocs);
  return numDocs;
}
项目:search    文件:FieldCacheImpl.java   
private void initReader(AtomicReader reader) {
  if (reader instanceof SegmentReader) {
    ((SegmentReader) reader).addCoreClosedListener(purgeCore);
  } else {
    // we have a slow reader of some sort, try to register a purge event
    // rather than relying on gc:
    Object key = reader.getCoreCacheKey();
    if (key instanceof AtomicReader) {
      ((AtomicReader)key).addReaderClosedListener(purgeReader); 
    } else {
      // last chance
      reader.addReaderClosedListener(purgeReader);
    }
  }
}
项目:NYBC    文件:Lucene40StoredFieldsWriter.java   
@Override
public int merge(MergeState mergeState) throws IOException {
  int docCount = 0;
  // Used for bulk-reading raw bytes for stored fields
  int rawDocLengths[] = new int[MAX_RAW_MERGE_DOCS];
  int idx = 0;

  for (AtomicReader reader : mergeState.readers) {
    final SegmentReader matchingSegmentReader = mergeState.matchingSegmentReaders[idx++];
    Lucene40StoredFieldsReader matchingFieldsReader = null;
    if (matchingSegmentReader != null) {
      final StoredFieldsReader fieldsReader = matchingSegmentReader.getFieldsReader();
      // we can only bulk-copy if the matching reader is also a Lucene40FieldsReader
      if (fieldsReader != null && fieldsReader instanceof Lucene40StoredFieldsReader) {
        matchingFieldsReader = (Lucene40StoredFieldsReader) fieldsReader;
      }
    }

    if (reader.getLiveDocs() != null) {
      docCount += copyFieldsWithDeletions(mergeState,
                                          reader, matchingFieldsReader, rawDocLengths);
    } else {
      docCount += copyFieldsNoDeletions(mergeState,
                                        reader, matchingFieldsReader, rawDocLengths);
    }
  }
  finish(mergeState.fieldInfos, docCount);
  return docCount;
}
项目:NYBC    文件:Lucene40TermVectorsWriter.java   
@Override
public final int merge(MergeState mergeState) throws IOException {
  // Used for bulk-reading raw bytes for term vectors
  int rawDocLengths[] = new int[MAX_RAW_MERGE_DOCS];
  int rawDocLengths2[] = new int[MAX_RAW_MERGE_DOCS];

  int idx = 0;
  int numDocs = 0;
  for (int i = 0; i < mergeState.readers.size(); i++) {
    final AtomicReader reader = mergeState.readers.get(i);

    final SegmentReader matchingSegmentReader = mergeState.matchingSegmentReaders[idx++];
    Lucene40TermVectorsReader matchingVectorsReader = null;
    if (matchingSegmentReader != null) {
      TermVectorsReader vectorsReader = matchingSegmentReader.getTermVectorsReader();

      if (vectorsReader != null && vectorsReader instanceof Lucene40TermVectorsReader) {
          matchingVectorsReader = (Lucene40TermVectorsReader) vectorsReader;
      }
    }
    if (reader.getLiveDocs() != null) {
      numDocs += copyVectorsWithDeletions(mergeState, matchingVectorsReader, reader, rawDocLengths, rawDocLengths2);
    } else {
      numDocs += copyVectorsNoDeletions(mergeState, matchingVectorsReader, reader, rawDocLengths, rawDocLengths2);
    }
  }
  finish(mergeState.fieldInfos, numDocs);
  return numDocs;
}
项目:NYBC    文件:FieldCacheImpl.java   
private void initReader(AtomicReader reader) {
  if (reader instanceof SegmentReader) {
    ((SegmentReader) reader).addCoreClosedListener(purgeCore);
  } else {
    // we have a slow reader of some sort, try to register a purge event
    // rather than relying on gc:
    Object key = reader.getCoreCacheKey();
    if (key instanceof AtomicReader) {
      ((AtomicReader)key).addReaderClosedListener(purgeReader); 
    } else {
      // last chance
      reader.addReaderClosedListener(purgeReader);
    }
  }
}
项目:incubator-blur    文件:MergeSortRowIdMatcher.java   
private void createCacheFile(Path file, SegmentKey segmentKey) throws IOException {
  LOG.info("Building cache for segment [{0}] to [{1}]", segmentKey, file);
  Path tmpPath = getTmpWriterPath(file.getParent());
  try (Writer writer = createWriter(_configuration, tmpPath)) {
    DirectoryReader reader = getReader();
    for (AtomicReaderContext context : reader.leaves()) {
      SegmentReader segmentReader = AtomicReaderUtil.getSegmentReader(context.reader());
      if (segmentReader.getSegmentName().equals(segmentKey.getSegmentName())) {
        writeRowIds(writer, segmentReader);
        break;
      }
    }
  }
  commitWriter(_configuration, file, tmpPath);
}
项目:incubator-blur    文件:IndexManager.java   
@SuppressWarnings("unchecked")
private static boolean isFiltered(int notAdjustedDocId, IndexReader reader, Filter filter) throws IOException {
  if (filter == null) {
    return false;
  }
  if (reader instanceof BaseCompositeReader) {
    BaseCompositeReader<IndexReader> indexReader = (BaseCompositeReader<IndexReader>) reader;
    List<? extends IndexReader> sequentialSubReaders = BaseCompositeReaderUtil.getSequentialSubReaders(indexReader);
    int readerIndex = BaseCompositeReaderUtil.readerIndex(indexReader, notAdjustedDocId);
    int readerBase = BaseCompositeReaderUtil.readerBase(indexReader, readerIndex);
    int docId = notAdjustedDocId - readerBase;
    IndexReader orgReader = sequentialSubReaders.get(readerIndex);
    SegmentReader sReader = AtomicReaderUtil.getSegmentReader(orgReader);
    if (sReader != null) {
      SegmentReader segmentReader = (SegmentReader) sReader;
      DocIdSet docIdSet = filter.getDocIdSet(segmentReader.getContext(), segmentReader.getLiveDocs());
      DocIdSetIterator iterator = docIdSet.iterator();
      if (iterator == null) {
        return true;
      }
      if (iterator.advance(docId) == docId) {
        return false;
      }
      return true;
    }
    throw new RuntimeException("Reader has to be a SegmentReader [" + orgReader + "]");
  } else {
    throw new RuntimeException("Reader has to be a BaseCompositeReader [" + reader + "]");
  }
}
项目:incubator-blur    文件:FilterCache.java   
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
  AtomicReader reader = context.reader();
  Object key = reader.getCoreCacheKey();
  DocIdSet docIdSet = _cache.get(key);
  if (docIdSet != null) {
    _hits.incrementAndGet();
    return BitsFilteredDocIdSet.wrap(docIdSet, acceptDocs);
  }
  // This will only allow a single instance be created per reader per filter
  Object lock = getLock(key);
  synchronized (lock) {
    SegmentReader segmentReader = getSegmentReader(reader);
    if (segmentReader == null) {
      LOG.warn("Could not find SegmentReader from [{0}]", reader);
      return _filter.getDocIdSet(context, acceptDocs);
    }
    Directory directory = getDirectory(segmentReader);
    if (directory == null) {
      LOG.warn("Could not find Directory from [{0}]", segmentReader);
      return _filter.getDocIdSet(context, acceptDocs);
    }
    _misses.incrementAndGet();
    String segmentName = segmentReader.getSegmentName();
    docIdSet = docIdSetToCache(_filter.getDocIdSet(context, null), reader, segmentName, directory);
    _cache.put(key, docIdSet);
    return BitsFilteredDocIdSet.wrap(docIdSet, acceptDocs);
  }
}
项目:read-open-source-code    文件:Lucene40StoredFieldsWriter.java   
@Override
public int merge(MergeState mergeState) throws IOException {
  int docCount = 0;
  // Used for bulk-reading raw bytes for stored fields
  int rawDocLengths[] = new int[MAX_RAW_MERGE_DOCS];
  int idx = 0;

  for (AtomicReader reader : mergeState.readers) {
    final SegmentReader matchingSegmentReader = mergeState.matchingSegmentReaders[idx++];
    Lucene40StoredFieldsReader matchingFieldsReader = null;
    if (matchingSegmentReader != null) {
      final StoredFieldsReader fieldsReader = matchingSegmentReader.getFieldsReader();
      // we can only bulk-copy if the matching reader is also a Lucene40FieldsReader
      if (fieldsReader != null && fieldsReader instanceof Lucene40StoredFieldsReader) {
        matchingFieldsReader = (Lucene40StoredFieldsReader) fieldsReader;
      }
    }

    if (reader.getLiveDocs() != null) {
      docCount += copyFieldsWithDeletions(mergeState,
                                          reader, matchingFieldsReader, rawDocLengths);
    } else {
      docCount += copyFieldsNoDeletions(mergeState,
                                        reader, matchingFieldsReader, rawDocLengths);
    }
  }
  finish(mergeState.fieldInfos, docCount);
  return docCount;
}
项目:read-open-source-code    文件:Lucene40TermVectorsWriter.java   
@Override
public final int merge(MergeState mergeState) throws IOException {
  // Used for bulk-reading raw bytes for term vectors
  int rawDocLengths[] = new int[MAX_RAW_MERGE_DOCS];
  int rawDocLengths2[] = new int[MAX_RAW_MERGE_DOCS];

  int idx = 0;
  int numDocs = 0;
  for (int i = 0; i < mergeState.readers.size(); i++) {
    final AtomicReader reader = mergeState.readers.get(i);

    final SegmentReader matchingSegmentReader = mergeState.matchingSegmentReaders[idx++];
    Lucene40TermVectorsReader matchingVectorsReader = null;
    if (matchingSegmentReader != null) {
      TermVectorsReader vectorsReader = matchingSegmentReader.getTermVectorsReader();

      if (vectorsReader != null && vectorsReader instanceof Lucene40TermVectorsReader) {
          matchingVectorsReader = (Lucene40TermVectorsReader) vectorsReader;
      }
    }
    if (reader.getLiveDocs() != null) {
      numDocs += copyVectorsWithDeletions(mergeState, matchingVectorsReader, reader, rawDocLengths, rawDocLengths2);
    } else {
      numDocs += copyVectorsNoDeletions(mergeState, matchingVectorsReader, reader, rawDocLengths, rawDocLengths2);
    }
  }
  finish(mergeState.fieldInfos, numDocs);
  return numDocs;
}
项目:read-open-source-code    文件:FieldCacheImpl.java   
private void initReader(AtomicReader reader) {
  if (reader instanceof SegmentReader) {
    ((SegmentReader) reader).addCoreClosedListener(purgeCore);
  } else {
    // we have a slow reader of some sort, try to register a purge event
    // rather than relying on gc:
    Object key = reader.getCoreCacheKey();
    if (key instanceof AtomicReader) {
      ((AtomicReader)key).addReaderClosedListener(purgeReader); 
    } else {
      // last chance
      reader.addReaderClosedListener(purgeReader);
    }
  }
}
项目:read-open-source-code    文件:SortingMergePolicy.java   
/** Returns true if the given reader is sorted by the given sorter. */
public static boolean isSorted(AtomicReader reader, Sorter sorter) {
  if (reader instanceof SegmentReader) {
    final SegmentReader segReader = (SegmentReader) reader;
    final Map<String, String> diagnostics = segReader.getSegmentInfo().info.getDiagnostics();
    if (diagnostics != null && sorter.getID().equals(diagnostics.get(SORTER_ID_PROP))) {
      return true;
    }
  }
  return false;
}
项目:read-open-source-code    文件:Lucene40StoredFieldsWriter.java   
@Override
public int merge(MergeState mergeState) throws IOException {
  int docCount = 0;
  // Used for bulk-reading raw bytes for stored fields
  int rawDocLengths[] = new int[MAX_RAW_MERGE_DOCS];
  int idx = 0;

  for (AtomicReader reader : mergeState.readers) {
    final SegmentReader matchingSegmentReader = mergeState.matchingSegmentReaders[idx++];
    Lucene40StoredFieldsReader matchingFieldsReader = null;
    if (matchingSegmentReader != null) {
      final StoredFieldsReader fieldsReader = matchingSegmentReader.getFieldsReader();
      // we can only bulk-copy if the matching reader is also a Lucene40FieldsReader
      if (fieldsReader != null && fieldsReader instanceof Lucene40StoredFieldsReader) {
        matchingFieldsReader = (Lucene40StoredFieldsReader) fieldsReader;
      }
    }

    if (reader.getLiveDocs() != null) {
      docCount += copyFieldsWithDeletions(mergeState,
                                          reader, matchingFieldsReader, rawDocLengths);
    } else {
      docCount += copyFieldsNoDeletions(mergeState,
                                        reader, matchingFieldsReader, rawDocLengths);
    }
  }
  finish(mergeState.fieldInfos, docCount);
  return docCount;
}
项目:read-open-source-code    文件:Lucene40TermVectorsWriter.java   
@Override
public final int merge(MergeState mergeState) throws IOException {
  // Used for bulk-reading raw bytes for term vectors
  int rawDocLengths[] = new int[MAX_RAW_MERGE_DOCS];
  int rawDocLengths2[] = new int[MAX_RAW_MERGE_DOCS];

  int idx = 0;
  int numDocs = 0;
  for (int i = 0; i < mergeState.readers.size(); i++) {
    final AtomicReader reader = mergeState.readers.get(i);

    final SegmentReader matchingSegmentReader = mergeState.matchingSegmentReaders[idx++];
    Lucene40TermVectorsReader matchingVectorsReader = null;
    if (matchingSegmentReader != null) {
      TermVectorsReader vectorsReader = matchingSegmentReader.getTermVectorsReader();

      if (vectorsReader != null && vectorsReader instanceof Lucene40TermVectorsReader) {
          matchingVectorsReader = (Lucene40TermVectorsReader) vectorsReader;
      }
    }
    if (reader.getLiveDocs() != null) {
      numDocs += copyVectorsWithDeletions(mergeState, matchingVectorsReader, reader, rawDocLengths, rawDocLengths2);
    } else {
      numDocs += copyVectorsNoDeletions(mergeState, matchingVectorsReader, reader, rawDocLengths, rawDocLengths2);
    }
  }
  finish(mergeState.fieldInfos, numDocs);
  return numDocs;
}
项目:read-open-source-code    文件:FieldCacheImpl.java   
private void initReader(AtomicReader reader) {
  if (reader instanceof SegmentReader) {
    ((SegmentReader) reader).addCoreClosedListener(purgeCore);
  } else {
    // we have a slow reader of some sort, try to register a purge event
    // rather than relying on gc:
    Object key = reader.getCoreCacheKey();
    if (key instanceof AtomicReader) {
      ((AtomicReader)key).addReaderClosedListener(purgeReader); 
    } else {
      // last chance
      reader.addReaderClosedListener(purgeReader);
    }
  }
}
项目:read-open-source-code    文件:SortingMergePolicy.java   
/** Returns true if the given reader is sorted by the given sorter. */
public static boolean isSorted(AtomicReader reader, Sorter sorter) {
  if (reader instanceof SegmentReader) {
    final SegmentReader segReader = (SegmentReader) reader;
    final Map<String, String> diagnostics = segReader.getSegmentInfo().info.getDiagnostics();
    if (diagnostics != null && sorter.getID().equals(diagnostics.get(SORTER_ID_PROP))) {
      return true;
    }
  }
  return false;
}
项目:read-open-source-code    文件:Lucene40StoredFieldsWriter.java   
@Override
public int merge(MergeState mergeState) throws IOException {
  int docCount = 0;
  // Used for bulk-reading raw bytes for stored fields
  int rawDocLengths[] = new int[MAX_RAW_MERGE_DOCS];
  int idx = 0;

  for (AtomicReader reader : mergeState.readers) {
    final SegmentReader matchingSegmentReader = mergeState.matchingSegmentReaders[idx++];
    Lucene40StoredFieldsReader matchingFieldsReader = null;
    if (matchingSegmentReader != null) {
      final StoredFieldsReader fieldsReader = matchingSegmentReader.getFieldsReader();
      // we can only bulk-copy if the matching reader is also a Lucene40FieldsReader
      if (fieldsReader != null && fieldsReader instanceof Lucene40StoredFieldsReader) {
        matchingFieldsReader = (Lucene40StoredFieldsReader) fieldsReader;
      }
    }

    if (reader.getLiveDocs() != null) {
      docCount += copyFieldsWithDeletions(mergeState,
                                          reader, matchingFieldsReader, rawDocLengths);
    } else {
      docCount += copyFieldsNoDeletions(mergeState,
                                        reader, matchingFieldsReader, rawDocLengths);
    }
  }
  finish(mergeState.fieldInfos, docCount);
  return docCount;
}
项目:read-open-source-code    文件:Lucene40TermVectorsWriter.java   
@Override
public final int merge(MergeState mergeState) throws IOException {
  // Used for bulk-reading raw bytes for term vectors
  int rawDocLengths[] = new int[MAX_RAW_MERGE_DOCS];
  int rawDocLengths2[] = new int[MAX_RAW_MERGE_DOCS];

  int idx = 0;
  int numDocs = 0;
  for (int i = 0; i < mergeState.readers.size(); i++) {
    final AtomicReader reader = mergeState.readers.get(i);

    final SegmentReader matchingSegmentReader = mergeState.matchingSegmentReaders[idx++];
    Lucene40TermVectorsReader matchingVectorsReader = null;
    if (matchingSegmentReader != null) {
      TermVectorsReader vectorsReader = matchingSegmentReader.getTermVectorsReader();

      if (vectorsReader != null && vectorsReader instanceof Lucene40TermVectorsReader) {
          matchingVectorsReader = (Lucene40TermVectorsReader) vectorsReader;
      }
    }
    if (reader.getLiveDocs() != null) {
      numDocs += copyVectorsWithDeletions(mergeState, matchingVectorsReader, reader, rawDocLengths, rawDocLengths2);
    } else {
      numDocs += copyVectorsNoDeletions(mergeState, matchingVectorsReader, reader, rawDocLengths, rawDocLengths2);
    }
  }
  finish(mergeState.fieldInfos, numDocs);
  return numDocs;
}
项目:read-open-source-code    文件:FieldCacheImpl.java   
private void initReader(AtomicReader reader) {
  if (reader instanceof SegmentReader) {
    ((SegmentReader) reader).addCoreClosedListener(purgeCore);
  } else {
    // we have a slow reader of some sort, try to register a purge event
    // rather than relying on gc:
    Object key = reader.getCoreCacheKey();
    if (key instanceof AtomicReader) {
      ((AtomicReader)key).addReaderClosedListener(purgeReader); 
    } else {
      // last chance
      reader.addReaderClosedListener(purgeReader);
    }
  }
}
项目:read-open-source-code    文件:SortingMergePolicy.java   
/** Returns {@code true} if the given {@code reader} is sorted by the specified {@code sort}. */
public static boolean isSorted(AtomicReader reader, Sort sort) {
  if (reader instanceof SegmentReader) {
    final SegmentReader segReader = (SegmentReader) reader;
    final Map<String, String> diagnostics = segReader.getSegmentInfo().info.getDiagnostics();
    if (diagnostics != null && sort.toString().equals(diagnostics.get(SORTER_ID_PROP))) {
      return true;
    }
  }
  return false;
}
项目:Maskana-Gestor-de-Conocimiento    文件:SortingMergePolicy.java   
/** Returns true if the given reader is sorted by the given sorter. */
public static boolean isSorted(AtomicReader reader, Sorter sorter) {
  if (reader instanceof SegmentReader) {
    final SegmentReader segReader = (SegmentReader) reader;
    final Map<String, String> diagnostics = segReader.getSegmentInfo().info.getDiagnostics();
    if (diagnostics != null && sorter.getID().equals(diagnostics.get(SORTER_ID_PROP))) {
      return true;
    }
  }
  return false;
}
项目:Maskana-Gestor-de-Conocimiento    文件:Lucene40StoredFieldsWriter.java   
@Override
public int merge(MergeState mergeState) throws IOException {
  int docCount = 0;
  // Used for bulk-reading raw bytes for stored fields
  int rawDocLengths[] = new int[MAX_RAW_MERGE_DOCS];
  int idx = 0;

  for (AtomicReader reader : mergeState.readers) {
    final SegmentReader matchingSegmentReader = mergeState.matchingSegmentReaders[idx++];
    Lucene40StoredFieldsReader matchingFieldsReader = null;
    if (matchingSegmentReader != null) {
      final StoredFieldsReader fieldsReader = matchingSegmentReader.getFieldsReader();
      // we can only bulk-copy if the matching reader is also a Lucene40FieldsReader
      if (fieldsReader != null && fieldsReader instanceof Lucene40StoredFieldsReader) {
        matchingFieldsReader = (Lucene40StoredFieldsReader) fieldsReader;
      }
    }

    if (reader.getLiveDocs() != null) {
      docCount += copyFieldsWithDeletions(mergeState,
                                          reader, matchingFieldsReader, rawDocLengths);
    } else {
      docCount += copyFieldsNoDeletions(mergeState,
                                        reader, matchingFieldsReader, rawDocLengths);
    }
  }
  finish(mergeState.fieldInfos, docCount);
  return docCount;
}
项目:Maskana-Gestor-de-Conocimiento    文件:Lucene40TermVectorsWriter.java   
@Override
public final int merge(MergeState mergeState) throws IOException {
  // Used for bulk-reading raw bytes for term vectors
  int rawDocLengths[] = new int[MAX_RAW_MERGE_DOCS];
  int rawDocLengths2[] = new int[MAX_RAW_MERGE_DOCS];

  int idx = 0;
  int numDocs = 0;
  for (int i = 0; i < mergeState.readers.size(); i++) {
    final AtomicReader reader = mergeState.readers.get(i);

    final SegmentReader matchingSegmentReader = mergeState.matchingSegmentReaders[idx++];
    Lucene40TermVectorsReader matchingVectorsReader = null;
    if (matchingSegmentReader != null) {
      TermVectorsReader vectorsReader = matchingSegmentReader.getTermVectorsReader();

      if (vectorsReader != null && vectorsReader instanceof Lucene40TermVectorsReader) {
          matchingVectorsReader = (Lucene40TermVectorsReader) vectorsReader;
      }
    }
    if (reader.getLiveDocs() != null) {
      numDocs += copyVectorsWithDeletions(mergeState, matchingVectorsReader, reader, rawDocLengths, rawDocLengths2);
    } else {
      numDocs += copyVectorsNoDeletions(mergeState, matchingVectorsReader, reader, rawDocLengths, rawDocLengths2);
    }
  }
  finish(mergeState.fieldInfos, numDocs);
  return numDocs;
}
项目:Maskana-Gestor-de-Conocimiento    文件:FieldCacheImpl.java   
private void initReader(AtomicReader reader) {
  if (reader instanceof SegmentReader) {
    ((SegmentReader) reader).addCoreClosedListener(purgeCore);
  } else {
    // we have a slow reader of some sort, try to register a purge event
    // rather than relying on gc:
    Object key = reader.getCoreCacheKey();
    if (key instanceof AtomicReader) {
      ((AtomicReader)key).addReaderClosedListener(purgeReader); 
    } else {
      // last chance
      reader.addReaderClosedListener(purgeReader);
    }
  }
}