Java 类org.apache.lucene.util.Counter 实例源码

项目:elasticsearch_my    文件:DefaultSearchContext.java   
DefaultSearchContext(long id, ShardSearchRequest request, SearchShardTarget shardTarget, Engine.Searcher engineSearcher,
                     IndexService indexService, IndexShard indexShard,
                     BigArrays bigArrays, Counter timeEstimateCounter, TimeValue timeout, FetchPhase fetchPhase) {
    this.id = id;
    this.request = request;
    this.fetchPhase = fetchPhase;
    this.searchType = request.searchType();
    this.shardTarget = shardTarget;
    this.engineSearcher = engineSearcher;
    // SearchContexts use a BigArrays that can circuit break
    this.bigArrays = bigArrays.withCircuitBreaking();
    this.dfsResult = new DfsSearchResult(id, shardTarget);
    this.queryResult = new QuerySearchResult(id, shardTarget);
    this.fetchResult = new FetchSearchResult(id, shardTarget);
    this.indexShard = indexShard;
    this.indexService = indexService;
    this.searcher = new ContextIndexSearcher(engineSearcher, indexService.cache().query(), indexShard.getQueryCachingPolicy());
    this.timeEstimateCounter = timeEstimateCounter;
    this.timeout = timeout;
    queryShardContext = indexService.newQueryShardContext(request.shardId().id(), searcher.getIndexReader(), request::nowInMillis);
    queryShardContext.setTypes(request.types());
    queryBoost = request.indexBoost();
}
项目:Elasticsearch    文件:CrateSearchContext.java   
public CrateSearchContext(long id,
                          final long nowInMillis,
                          SearchShardTarget shardTarget,
                          Engine.Searcher engineSearcher,
                          IndexService indexService,
                          final IndexShard indexShard,
                          ScriptService scriptService,
                          PageCacheRecycler pageCacheRecycler,
                          BigArrays bigArrays,
                          Counter timeEstimateCounter,
                          Optional<Scroll> scroll) {
    super(id, new CrateSearchShardRequest(nowInMillis, scroll, indexShard),
            shardTarget, engineSearcher, indexService,
            indexShard, scriptService, pageCacheRecycler,
            bigArrays, timeEstimateCounter, ParseFieldMatcher.STRICT, SearchService.NO_TIMEOUT);
    this.engineSearcher = engineSearcher;
}
项目:Elasticsearch    文件:DefaultSearchContext.java   
public DefaultSearchContext(long id, ShardSearchRequest request, SearchShardTarget shardTarget,
                            Engine.Searcher engineSearcher, IndexService indexService, IndexShard indexShard,
                            ScriptService scriptService, PageCacheRecycler pageCacheRecycler,
                            BigArrays bigArrays, Counter timeEstimateCounter, ParseFieldMatcher parseFieldMatcher,
                            TimeValue timeout
) {
    super(parseFieldMatcher, request);
    this.id = id;
    this.request = request;
    this.searchType = request.searchType();
    this.shardTarget = shardTarget;
    this.engineSearcher = engineSearcher;
    this.scriptService = scriptService;
    this.pageCacheRecycler = pageCacheRecycler;
    // SearchContexts use a BigArrays that can circuit break
    this.bigArrays = bigArrays.withCircuitBreaking();
    this.dfsResult = new DfsSearchResult(id, shardTarget);
    this.queryResult = new QuerySearchResult(id, shardTarget);
    this.fetchResult = new FetchSearchResult(id, shardTarget);
    this.indexShard = indexShard;
    this.indexService = indexService;
    this.searcher = new ContextIndexSearcher(engineSearcher, indexService.cache().query(), indexShard.getQueryCachingPolicy());
    this.timeEstimateCounter = timeEstimateCounter;
    this.timeoutInMillis = timeout.millis();
}
项目:siren-join    文件:BytesRefTermsSet.java   
private void readFromBytes(BytesRef bytes) {
  // Read pruned flag
  this.setIsPruned(bytes.bytes[bytes.offset++] == 1 ? true : false);

  // Read size fo the set
  int size = Bytes.readInt(bytes);

  // Read terms
  bytesUsed = Counter.newCounter();
  pool = new ByteBlockPool(new ByteBlockPool.DirectTrackingAllocator(bytesUsed));
  set = new BytesRefHash(pool);

  BytesRef reusable = new BytesRef();
  for (int i = 0; i < size; i++) {
    Bytes.readBytesRef(bytes, reusable);
    set.add(reusable);
  }
}
项目:community-edition-old    文件:MimetypeGroupingCollector.java   
public void finish() throws IOException 
{
    NamedList<Object> analytics = new NamedList<>();
    rb.rsp.add("analytics", analytics);
    NamedList<Object> fieldCounts = new NamedList<>(); 
    analytics.add("mimetype()", fieldCounts);
    for(String key : counters.keySet())
    {
        Counter counter = counters.get(key);
        fieldCounts.add(key, counter.get());
    }

    if(this.delegate instanceof DelegatingCollector) {
        ((DelegatingCollector)this.delegate).finish();
    }
}
项目:NYBC    文件:TermsHash.java   
public TermsHash(final DocumentsWriterPerThread docWriter, final TermsHashConsumer consumer, boolean trackAllocations, final TermsHash nextTermsHash) {
  this.docState = docWriter.docState;
  this.consumer = consumer;
  this.trackAllocations = trackAllocations; 
  this.nextTermsHash = nextTermsHash;
  this.bytesUsed = trackAllocations ? docWriter.bytesUsed : Counter.newCounter();
  intPool = new IntBlockPool(docWriter.intBlockAllocator);
  bytePool = new ByteBlockPool(docWriter.byteBlockAllocator);

  if (nextTermsHash != null) {
    // We are primary
    primary = true;
    termBytePool = bytePool;
    nextTermsHash.termBytePool = bytePool;
  } else {
    primary = false;
  }
}
项目:NYBC    文件:DocumentsWriterPerThread.java   
public DocumentsWriterPerThread(Directory directory, DocumentsWriter parent,
    FieldInfos.Builder fieldInfos, IndexingChain indexingChain) {
  this.directoryOrig = directory;
  this.directory = new TrackingDirectoryWrapper(directory);
  this.parent = parent;
  this.fieldInfos = fieldInfos;
  this.writer = parent.indexWriter;
  this.infoStream = parent.infoStream;
  this.codec = parent.codec;
  this.docState = new DocState(this, infoStream);
  this.docState.similarity = parent.indexWriter.getConfig().getSimilarity();
  bytesUsed = Counter.newCounter();
  byteBlockAllocator = new DirectTrackingAllocator(bytesUsed);
  pendingDeletes = new BufferedDeletes();
  intBlockAllocator = new IntBlockAllocator(bytesUsed);
  initialize();
  // this should be the last call in the ctor 
  // it really sucks that we need to pull this within the ctor and pass this ref to the chain!
  consumer = indexingChain.getChain(this);
}
项目:read-open-source-code    文件:TermsHash.java   
public TermsHash(final DocumentsWriterPerThread docWriter, final TermsHashConsumer consumer, boolean trackAllocations, final TermsHash nextTermsHash) {
  this.docState = docWriter.docState;
  this.consumer = consumer;
  this.trackAllocations = trackAllocations; 
  this.nextTermsHash = nextTermsHash;
  this.bytesUsed = trackAllocations ? docWriter.bytesUsed : Counter.newCounter();
  intPool = new IntBlockPool(docWriter.intBlockAllocator);
  bytePool = new ByteBlockPool(docWriter.byteBlockAllocator);

  if (nextTermsHash != null) {
    // We are primary
    primary = true;
    termBytePool = bytePool;
    nextTermsHash.termBytePool = bytePool;
  } else {
    primary = false;
  }
}
项目:read-open-source-code    文件:TermsHash.java   
public TermsHash(final DocumentsWriterPerThread docWriter, final TermsHashConsumer consumer, boolean trackAllocations, final TermsHash nextTermsHash) {
  this.docState = docWriter.docState;
  this.consumer = consumer;
  this.trackAllocations = trackAllocations; 
  this.nextTermsHash = nextTermsHash;
  this.bytesUsed = trackAllocations ? docWriter.bytesUsed : Counter.newCounter();
  intPool = new IntBlockPool(docWriter.intBlockAllocator);
  bytePool = new ByteBlockPool(docWriter.byteBlockAllocator);

  if (nextTermsHash != null) {
    // We are primary
    primary = true;
    termBytePool = bytePool;
    nextTermsHash.termBytePool = bytePool;
  } else {
    primary = false;
  }
}
项目:Maskana-Gestor-de-Conocimiento    文件:TermsHash.java   
public TermsHash(final DocumentsWriterPerThread docWriter, final TermsHashConsumer consumer, boolean trackAllocations, final TermsHash nextTermsHash) {
  this.docState = docWriter.docState;
  this.consumer = consumer;
  this.trackAllocations = trackAllocations; 
  this.nextTermsHash = nextTermsHash;
  this.bytesUsed = trackAllocations ? docWriter.bytesUsed : Counter.newCounter();
  intPool = new IntBlockPool(docWriter.intBlockAllocator);
  bytePool = new ByteBlockPool(docWriter.byteBlockAllocator);

  if (nextTermsHash != null) {
    // We are primary
    primary = true;
    termBytePool = bytePool;
    nextTermsHash.termBytePool = bytePool;
  } else {
    primary = false;
  }
}
项目:elasticsearch_my    文件:CollectionUtilsTests.java   
public void testSortAndDedupByteRefArray() {
    SortedSet<BytesRef> set = new TreeSet<>();
    final int numValues = scaledRandomIntBetween(0, 10000);
    List<BytesRef> tmpList = new ArrayList<>();
    BytesRefArray array = new BytesRefArray(Counter.newCounter());
    for (int i = 0; i < numValues; i++) {
        String s = randomRealisticUnicodeOfCodepointLengthBetween(1, 100);
        set.add(new BytesRef(s));
        tmpList.add(new BytesRef(s));
        array.append(new BytesRef(s));
    }
    if (randomBoolean()) {
        Collections.shuffle(tmpList, random());
        for (BytesRef ref : tmpList) {
            array.append(ref);
        }
    }
    int[] indices = new int[array.size()];
    for (int i = 0; i < indices.length; i++) {
        indices[i] = i;
    }
    int numUnique = CollectionUtils.sortAndDedup(array, indices);
    assertThat(numUnique, equalTo(set.size()));
    Iterator<BytesRef> iterator = set.iterator();

    BytesRefBuilder spare = new BytesRefBuilder();
    for (int i = 0; i < numUnique; i++) {
        assertThat(iterator.hasNext(), is(true));
        assertThat(array.get(spare, indices[i]), equalTo(iterator.next()));
    }

}
项目:elasticsearch_my    文件:CollectionUtilsTests.java   
public void testSortByteRefArray() {
    List<BytesRef> values = new ArrayList<>();
    final int numValues = scaledRandomIntBetween(0, 10000);
    BytesRefArray array = new BytesRefArray(Counter.newCounter());
    for (int i = 0; i < numValues; i++) {
        String s = randomRealisticUnicodeOfCodepointLengthBetween(1, 100);
        values.add(new BytesRef(s));
        array.append(new BytesRef(s));
    }
    if (randomBoolean()) {
        Collections.shuffle(values, random());
    }
    int[] indices = new int[array.size()];
    for (int i = 0; i < indices.length; i++) {
        indices[i] = i;
    }
    CollectionUtils.sort(array, indices);
    Collections.sort(values);
    Iterator<BytesRef> iterator = values.iterator();

    BytesRefBuilder spare = new BytesRefBuilder();
    for (int i = 0; i < values.size(); i++) {
        assertThat(iterator.hasNext(), is(true));
        assertThat(array.get(spare, indices[i]), equalTo(iterator.next()));
    }

}
项目:lams    文件:TermsHash.java   
TermsHash(final DocumentsWriterPerThread docWriter, boolean trackAllocations, TermsHash nextTermsHash) {
  this.docState = docWriter.docState;
  this.trackAllocations = trackAllocations; 
  this.nextTermsHash = nextTermsHash;
  this.bytesUsed = trackAllocations ? docWriter.bytesUsed : Counter.newCounter();
  intPool = new IntBlockPool(docWriter.intBlockAllocator);
  bytePool = new ByteBlockPool(docWriter.byteBlockAllocator);

  if (nextTermsHash != null) {
    // We are primary
    termBytePool = bytePool;
    nextTermsHash.termBytePool = bytePool;
  }
}
项目:lams    文件:BinaryDocValuesWriter.java   
public BinaryDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
  this.fieldInfo = fieldInfo;
  this.bytes = new PagedBytes(BLOCK_BITS);
  this.bytesOut = bytes.getDataOutput();
  this.lengths = PackedLongValues.deltaPackedBuilder(PackedInts.COMPACT);
  this.iwBytesUsed = iwBytesUsed;
  this.docsWithField = new FixedBitSet(64);
  this.bytesUsed = docsWithFieldBytesUsed();
  iwBytesUsed.addAndGet(bytesUsed);
}
项目:lams    文件:SortedSetDocValuesWriter.java   
public SortedSetDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
  this.fieldInfo = fieldInfo;
  this.iwBytesUsed = iwBytesUsed;
  hash = new BytesRefHash(
      new ByteBlockPool(
          new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)),
          BytesRefHash.DEFAULT_CAPACITY,
          new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed));
  pending = PackedLongValues.packedBuilder(PackedInts.COMPACT);
  pendingCounts = PackedLongValues.deltaPackedBuilder(PackedInts.COMPACT);
  bytesUsed = pending.ramBytesUsed() + pendingCounts.ramBytesUsed();
  iwBytesUsed.addAndGet(bytesUsed);
}
项目:lams    文件:SortedDocValuesWriter.java   
public SortedDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
  this.fieldInfo = fieldInfo;
  this.iwBytesUsed = iwBytesUsed;
  hash = new BytesRefHash(
      new ByteBlockPool(
          new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)),
          BytesRefHash.DEFAULT_CAPACITY,
          new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed));
  pending = PackedLongValues.deltaPackedBuilder(PackedInts.COMPACT);
  bytesUsed = pending.ramBytesUsed();
  iwBytesUsed.addAndGet(bytesUsed);
}
项目:lams    文件:NumericDocValuesWriter.java   
public NumericDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed, boolean trackDocsWithField) {
  pending = PackedLongValues.deltaPackedBuilder(PackedInts.COMPACT);
  docsWithField = trackDocsWithField ? new FixedBitSet(64) : null;
  bytesUsed = pending.ramBytesUsed() + docsWithFieldBytesUsed();
  this.fieldInfo = fieldInfo;
  this.iwBytesUsed = iwBytesUsed;
  iwBytesUsed.addAndGet(bytesUsed);
}
项目:lams    文件:SortedNumericDocValuesWriter.java   
public SortedNumericDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
  this.fieldInfo = fieldInfo;
  this.iwBytesUsed = iwBytesUsed;
  pending = PackedLongValues.deltaPackedBuilder(PackedInts.COMPACT);
  pendingCounts = PackedLongValues.deltaPackedBuilder(PackedInts.COMPACT);
  bytesUsed = pending.ramBytesUsed() + pendingCounts.ramBytesUsed();
  iwBytesUsed.addAndGet(bytesUsed);
}
项目:lams    文件:BytesRefArray.java   
/**
 * Creates a new {@link BytesRefArray} with a counter to track allocated bytes
 */
public BytesRefArray(Counter bytesUsed) {
  this.pool = new ByteBlockPool(new ByteBlockPool.DirectTrackingAllocator(
      bytesUsed));
  pool.nextBuffer();
  bytesUsed.addAndGet(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER
      + RamUsageEstimator.NUM_BYTES_INT);
  this.bytesUsed = bytesUsed;
}
项目:siren-join    文件:BytesRefTermsSet.java   
@Override
public void readFrom(StreamInput in) throws IOException {
  this.setIsPruned(in.readBoolean());
  int size = in.readInt();

  bytesUsed = Counter.newCounter();
  pool = new ByteBlockPool(new ByteBlockPool.DirectTrackingAllocator(bytesUsed));
  set = new BytesRefHash(pool);

  for (long i = 0; i < size; i++) {
    set.add(in.readBytesRef());
  }
}
项目:search    文件:TermsHash.java   
TermsHash(final DocumentsWriterPerThread docWriter, boolean trackAllocations, TermsHash nextTermsHash) {
  this.docState = docWriter.docState;
  this.trackAllocations = trackAllocations; 
  this.nextTermsHash = nextTermsHash;
  this.bytesUsed = trackAllocations ? docWriter.bytesUsed : Counter.newCounter();
  intPool = new IntBlockPool(docWriter.intBlockAllocator);
  bytePool = new ByteBlockPool(docWriter.byteBlockAllocator);

  if (nextTermsHash != null) {
    // We are primary
    termBytePool = bytePool;
    nextTermsHash.termBytePool = bytePool;
  }
}
项目:search    文件:BinaryDocValuesWriter.java   
public BinaryDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
  this.fieldInfo = fieldInfo;
  this.bytes = new PagedBytes(BLOCK_BITS);
  this.bytesOut = bytes.getDataOutput();
  this.lengths = PackedLongValues.deltaPackedBuilder(PackedInts.COMPACT);
  this.iwBytesUsed = iwBytesUsed;
  this.docsWithField = new FixedBitSet(64);
  this.bytesUsed = docsWithFieldBytesUsed();
  iwBytesUsed.addAndGet(bytesUsed);
}
项目:search    文件:SortedSetDocValuesWriter.java   
public SortedSetDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
  this.fieldInfo = fieldInfo;
  this.iwBytesUsed = iwBytesUsed;
  hash = new BytesRefHash(
      new ByteBlockPool(
          new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)),
          BytesRefHash.DEFAULT_CAPACITY,
          new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed));
  pending = PackedLongValues.packedBuilder(PackedInts.COMPACT);
  pendingCounts = PackedLongValues.deltaPackedBuilder(PackedInts.COMPACT);
  bytesUsed = pending.ramBytesUsed() + pendingCounts.ramBytesUsed();
  iwBytesUsed.addAndGet(bytesUsed);
}
项目:search    文件:SortedDocValuesWriter.java   
public SortedDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
  this.fieldInfo = fieldInfo;
  this.iwBytesUsed = iwBytesUsed;
  hash = new BytesRefHash(
      new ByteBlockPool(
          new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)),
          BytesRefHash.DEFAULT_CAPACITY,
          new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed));
  pending = PackedLongValues.deltaPackedBuilder(PackedInts.COMPACT);
  bytesUsed = pending.ramBytesUsed();
  iwBytesUsed.addAndGet(bytesUsed);
}
项目:search    文件:NumericDocValuesWriter.java   
public NumericDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed, boolean trackDocsWithField) {
  pending = PackedLongValues.deltaPackedBuilder(PackedInts.COMPACT);
  docsWithField = trackDocsWithField ? new FixedBitSet(64) : null;
  bytesUsed = pending.ramBytesUsed() + docsWithFieldBytesUsed();
  this.fieldInfo = fieldInfo;
  this.iwBytesUsed = iwBytesUsed;
  iwBytesUsed.addAndGet(bytesUsed);
}
项目:search    文件:SortedNumericDocValuesWriter.java   
public SortedNumericDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
  this.fieldInfo = fieldInfo;
  this.iwBytesUsed = iwBytesUsed;
  pending = PackedLongValues.deltaPackedBuilder(PackedInts.COMPACT);
  pendingCounts = PackedLongValues.deltaPackedBuilder(PackedInts.COMPACT);
  bytesUsed = pending.ramBytesUsed() + pendingCounts.ramBytesUsed();
  iwBytesUsed.addAndGet(bytesUsed);
}
项目:search    文件:BytesRefArray.java   
/**
 * Creates a new {@link BytesRefArray} with a counter to track allocated bytes
 */
public BytesRefArray(Counter bytesUsed) {
  this.pool = new ByteBlockPool(new ByteBlockPool.DirectTrackingAllocator(
      bytesUsed));
  pool.nextBuffer();
  bytesUsed.addAndGet(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER
      + RamUsageEstimator.NUM_BYTES_INT);
  this.bytesUsed = bytesUsed;
}
项目:search    文件:TestIntBlockPool.java   
public void testSingleWriterReader() {
  Counter bytesUsed = Counter.newCounter();
  IntBlockPool pool = new IntBlockPool(new ByteTrackingAllocator(bytesUsed));

  for (int j = 0; j < 2; j++) {
    IntBlockPool.SliceWriter writer = new IntBlockPool.SliceWriter(pool);
    int start = writer.startNewSlice();
    int num = atLeast(100);
    for (int i = 0; i < num; i++) {
      writer.writeInt(i);
    }

    int upto = writer.getCurrentOffset();
    IntBlockPool.SliceReader reader = new IntBlockPool.SliceReader(pool);
    reader.reset(start, upto);
    for (int i = 0; i < num; i++) {
      assertEquals(i, reader.readInt());
    }
    assertTrue(reader.endOfSlice());
    if (random().nextBoolean()) {
      pool.reset(true, false);
      assertEquals(0, bytesUsed.get());
    } else {
      pool.reset(true, true);
      assertEquals(IntBlockPool.INT_BLOCK_SIZE
          * RamUsageEstimator.NUM_BYTES_INT, bytesUsed.get());
    }
  }
}
项目:community-edition-old    文件:MimetypeGroupingCollector.java   
@Override
public void collect(int doc) throws IOException 
{
    if(sortedDocValues != null)
    {
        int ordinal = sortedDocValues.getOrd(doc);
        if(ordinal > -1)
        {
            String value = (String)schemaField.getType().toObject(schemaField, sortedDocValues.lookupOrd(ordinal));
            String group = doGroup ? mappings.get(value) : value;
            if(group == null)
            {
                group = value;
            }

            Counter counter = counters.get(group);
            if(counter == null)
            {
                counter = Counter.newCounter();
                counters.put(group, counter);
            }
            counter.addAndGet(1);
        }
    }


    delegate.collect(doc);
}
项目:NYBC    文件:BytesRefArray.java   
/**
 * Creates a new {@link BytesRefArray} with a counter to track allocated bytes
 */
public BytesRefArray(Counter bytesUsed) {
  this.pool = new ByteBlockPool(new ByteBlockPool.DirectTrackingAllocator(
      bytesUsed));
  pool.nextBuffer();
  bytesUsed.addAndGet(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER
      + RamUsageEstimator.NUM_BYTES_INT);
  this.bytesUsed = bytesUsed;
}
项目:NYBC    文件:SortedSetDocValuesWriter.java   
public SortedSetDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
  this.fieldInfo = fieldInfo;
  this.iwBytesUsed = iwBytesUsed;
  hash = new BytesRefHash(
      new ByteBlockPool(
          new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)),
          BytesRefHash.DEFAULT_CAPACITY,
          new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed));
  pending = new AppendingLongBuffer();
  pendingCounts = new AppendingLongBuffer();
  bytesUsed = pending.ramBytesUsed() + pendingCounts.ramBytesUsed();
  iwBytesUsed.addAndGet(bytesUsed);
}
项目:NYBC    文件:SortedDocValuesWriter.java   
public SortedDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
  this.fieldInfo = fieldInfo;
  this.iwBytesUsed = iwBytesUsed;
  hash = new BytesRefHash(
      new ByteBlockPool(
          new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)),
          BytesRefHash.DEFAULT_CAPACITY,
          new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed));
  pending = new AppendingLongBuffer();
  bytesUsed = pending.ramBytesUsed();
  iwBytesUsed.addAndGet(bytesUsed);
}
项目:NYBC    文件:NumericDocValuesWriter.java   
public NumericDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
  pending = new AppendingLongBuffer();
  bytesUsed = pending.ramBytesUsed();
  this.fieldInfo = fieldInfo;
  this.iwBytesUsed = iwBytesUsed;
  iwBytesUsed.addAndGet(bytesUsed);
}
项目:NYBC    文件:TestIntBlockPool.java   
public void testSingleWriterReader() {
  Counter bytesUsed = Counter.newCounter();
  IntBlockPool pool = new IntBlockPool(new ByteTrackingAllocator(bytesUsed));

  for (int j = 0; j < 2; j++) {
    IntBlockPool.SliceWriter writer = new IntBlockPool.SliceWriter(pool);
    int start = writer.startNewSlice();
    int num = atLeast(100);
    for (int i = 0; i < num; i++) {
      writer.writeInt(i);
    }

    int upto = writer.getCurrentOffset();
    IntBlockPool.SliceReader reader = new IntBlockPool.SliceReader(pool);
    reader.reset(start, upto);
    for (int i = 0; i < num; i++) {
      assertEquals(i, reader.readInt());
    }
    assertTrue(reader.endOfSlice());
    if (random().nextBoolean()) {
      pool.reset(true, false);
      assertEquals(0, bytesUsed.get());
    } else {
      pool.reset(true, true);
      assertEquals(IntBlockPool.INT_BLOCK_SIZE
          * RamUsageEstimator.NUM_BYTES_INT, bytesUsed.get());
    }
  }
}
项目:read-open-source-code    文件:BytesRefArray.java   
/**
 * Creates a new {@link BytesRefArray} with a counter to track allocated bytes
 */
public BytesRefArray(Counter bytesUsed) {
  this.pool = new ByteBlockPool(new ByteBlockPool.DirectTrackingAllocator(
      bytesUsed));
  pool.nextBuffer();
  bytesUsed.addAndGet(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER
      + RamUsageEstimator.NUM_BYTES_INT);
  this.bytesUsed = bytesUsed;
}
项目:read-open-source-code    文件:BinaryDocValuesWriter.java   
public BinaryDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
  this.fieldInfo = fieldInfo;
  this.bytes = new PagedBytes(BLOCK_BITS);
  this.bytesOut = bytes.getDataOutput();
  this.lengths = new AppendingDeltaPackedLongBuffer(PackedInts.COMPACT);
  this.iwBytesUsed = iwBytesUsed;
  this.docsWithField = new FixedBitSet(64);
  this.bytesUsed = docsWithFieldBytesUsed();
  iwBytesUsed.addAndGet(bytesUsed);
}
项目:read-open-source-code    文件:SortedSetDocValuesWriter.java   
public SortedSetDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
  this.fieldInfo = fieldInfo;
  this.iwBytesUsed = iwBytesUsed;
  hash = new BytesRefHash(
      new ByteBlockPool(
          new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)),
          BytesRefHash.DEFAULT_CAPACITY,
          new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed));
  pending = new AppendingPackedLongBuffer(PackedInts.COMPACT);
  pendingCounts = new AppendingDeltaPackedLongBuffer(PackedInts.COMPACT);
  bytesUsed = pending.ramBytesUsed() + pendingCounts.ramBytesUsed();
  iwBytesUsed.addAndGet(bytesUsed);
}
项目:read-open-source-code    文件:SortedDocValuesWriter.java   
public SortedDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
  this.fieldInfo = fieldInfo;
  this.iwBytesUsed = iwBytesUsed;
  hash = new BytesRefHash(
      new ByteBlockPool(
          new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)),
          BytesRefHash.DEFAULT_CAPACITY,
          new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed));
  pending = new AppendingDeltaPackedLongBuffer(PackedInts.COMPACT);
  bytesUsed = pending.ramBytesUsed();
  iwBytesUsed.addAndGet(bytesUsed);
}
项目:read-open-source-code    文件:NumericDocValuesWriter.java   
public NumericDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed, boolean trackDocsWithField) {
  pending = new AppendingDeltaPackedLongBuffer(PackedInts.COMPACT);
  docsWithField = trackDocsWithField ? new FixedBitSet(64) : null;
  bytesUsed = pending.ramBytesUsed() + docsWithFieldBytesUsed();
  this.fieldInfo = fieldInfo;
  this.iwBytesUsed = iwBytesUsed;
  iwBytesUsed.addAndGet(bytesUsed);
}
项目:read-open-source-code    文件:BytesRefArray.java   
/**
 * Creates a new {@link BytesRefArray} with a counter to track allocated bytes
 */
public BytesRefArray(Counter bytesUsed) {
  this.pool = new ByteBlockPool(new ByteBlockPool.DirectTrackingAllocator(
      bytesUsed));
  pool.nextBuffer();
  bytesUsed.addAndGet(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER
      + RamUsageEstimator.NUM_BYTES_INT);
  this.bytesUsed = bytesUsed;
}