Java 类org.apache.lucene.util.RamUsageEstimator 实例源码

项目:elasticsearch_my    文件:CompositeBytesReference.java   
public CompositeBytesReference(BytesReference... references) {
    this.references = Objects.requireNonNull(references, "references must not be null");
    this.offsets = new int[references.length];
    long ramBytesUsed = 0;
    int offset = 0;
    for (int i = 0; i < references.length; i++) {
        BytesReference reference = references[i];
        if (reference == null) {
            throw new IllegalArgumentException("references must not be null");
        }
        offsets[i] = offset; // we use the offsets to seek into the right BytesReference for random access and slicing
        offset += reference.length();
        ramBytesUsed += reference.ramBytesUsed();
    }
    this.ramBytesUsed = ramBytesUsed
        + (Integer.BYTES * offsets.length + RamUsageEstimator.NUM_BYTES_ARRAY_HEADER) // offsets
        + (references.length * RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_ARRAY_HEADER) // references
        + Integer.BYTES // length
        + Long.BYTES; // ramBytesUsed
    length = offset;
}
项目:elasticsearch_my    文件:BigByteArray.java   
/** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */
@Override
public void resize(long newSize) {
    final int numPages = numPages(newSize);
    if (numPages > pages.length) {
        pages = Arrays.copyOf(pages, ArrayUtil.oversize(numPages, RamUsageEstimator.NUM_BYTES_OBJECT_REF));
    }
    for (int i = numPages - 1; i >= 0 && pages[i] == null; --i) {
        pages[i] = newBytePage(i);
    }
    for (int i = numPages; i < pages.length && pages[i] != null; ++i) {
        pages[i] = null;
        releasePage(i);
    }
    this.size = newSize;
}
项目:elasticsearch_my    文件:BigObjectArray.java   
/** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */
@Override
public void resize(long newSize) {
    final int numPages = numPages(newSize);
    if (numPages > pages.length) {
        pages = Arrays.copyOf(pages, ArrayUtil.oversize(numPages, RamUsageEstimator.NUM_BYTES_OBJECT_REF));
    }
    for (int i = numPages - 1; i >= 0 && pages[i] == null; --i) {
        pages[i] = newObjectPage(i);
    }
    for (int i = numPages; i < pages.length && pages[i] != null; ++i) {
        pages[i] = null;
        releasePage(i);
    }
    this.size = newSize;
}
项目:elasticsearch_my    文件:BigIntArray.java   
/** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */
@Override
public void resize(long newSize) {
    final int numPages = numPages(newSize);
    if (numPages > pages.length) {
        pages = Arrays.copyOf(pages, ArrayUtil.oversize(numPages, RamUsageEstimator.NUM_BYTES_OBJECT_REF));
    }
    for (int i = numPages - 1; i >= 0 && pages[i] == null; --i) {
        pages[i] = newIntPage(i);
    }
    for (int i = numPages; i < pages.length && pages[i] != null; ++i) {
        pages[i] = null;
        releasePage(i);
    }
    this.size = newSize;
}
项目:elasticsearch_my    文件:BigLongArray.java   
/** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */
@Override
public void resize(long newSize) {
    final int numPages = numPages(newSize);
    if (numPages > pages.length) {
        pages = Arrays.copyOf(pages, ArrayUtil.oversize(numPages, RamUsageEstimator.NUM_BYTES_OBJECT_REF));
    }
    for (int i = numPages - 1; i >= 0 && pages[i] == null; --i) {
        pages[i] = newLongPage(i);
    }
    for (int i = numPages; i < pages.length && pages[i] != null; ++i) {
        pages[i] = null;
        releasePage(i);
    }
    this.size = newSize;
}
项目:elasticsearch_my    文件:BigFloatArray.java   
/** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */
@Override
public void resize(long newSize) {
    final int numPages = numPages(newSize);
    if (numPages > pages.length) {
        pages = Arrays.copyOf(pages, ArrayUtil.oversize(numPages, RamUsageEstimator.NUM_BYTES_OBJECT_REF));
    }
    for (int i = numPages - 1; i >= 0 && pages[i] == null; --i) {
        pages[i] = newIntPage(i);
    }
    for (int i = numPages; i < pages.length && pages[i] != null; ++i) {
        pages[i] = null;
        releasePage(i);
    }
    this.size = newSize;
}
项目:lams    文件:Lucene40DocValuesReader.java   
private NumericDocValues loadIntField(FieldInfo field, IndexInput input) throws IOException {
  CodecUtil.checkHeader(input, Lucene40DocValuesFormat.INTS_CODEC_NAME,
                               Lucene40DocValuesFormat.INTS_VERSION_START,
                               Lucene40DocValuesFormat.INTS_VERSION_CURRENT);
  int valueSize = input.readInt();
  if (valueSize != 4) {
    throw new CorruptIndexException("invalid valueSize: " + valueSize);
  }
  int maxDoc = state.segmentInfo.getDocCount();
  final int values[] = new int[maxDoc];
  for (int i = 0; i < values.length; i++) {
    values[i] = input.readInt();
  }
  ramBytesUsed.addAndGet(RamUsageEstimator.sizeOf(values));
  return new NumericDocValues() {
    @Override
    public long get(int docID) {
      return values[docID];
    }
  };
}
项目:lams    文件:Lucene40DocValuesReader.java   
private NumericDocValues loadLongField(FieldInfo field, IndexInput input) throws IOException {
  CodecUtil.checkHeader(input, Lucene40DocValuesFormat.INTS_CODEC_NAME,
                               Lucene40DocValuesFormat.INTS_VERSION_START,
                               Lucene40DocValuesFormat.INTS_VERSION_CURRENT);
  int valueSize = input.readInt();
  if (valueSize != 8) {
    throw new CorruptIndexException("invalid valueSize: " + valueSize);
  }
  int maxDoc = state.segmentInfo.getDocCount();
  final long values[] = new long[maxDoc];
  for (int i = 0; i < values.length; i++) {
    values[i] = input.readLong();
  }
  ramBytesUsed.addAndGet(RamUsageEstimator.sizeOf(values));
  return new NumericDocValues() {
    @Override
    public long get(int docID) {
      return values[docID];
    }
  };
}
项目:lams    文件:Lucene40DocValuesReader.java   
private NumericDocValues loadFloatField(FieldInfo field, IndexInput input) throws IOException {
  CodecUtil.checkHeader(input, Lucene40DocValuesFormat.FLOATS_CODEC_NAME,
                               Lucene40DocValuesFormat.FLOATS_VERSION_START,
                               Lucene40DocValuesFormat.FLOATS_VERSION_CURRENT);
  int valueSize = input.readInt();
  if (valueSize != 4) {
    throw new CorruptIndexException("invalid valueSize: " + valueSize);
  }
  int maxDoc = state.segmentInfo.getDocCount();
  final int values[] = new int[maxDoc];
  for (int i = 0; i < values.length; i++) {
    values[i] = input.readInt();
  }
  ramBytesUsed.addAndGet(RamUsageEstimator.sizeOf(values));
  return new NumericDocValues() {
    @Override
    public long get(int docID) {
      return values[docID];
    }
  };
}
项目:lams    文件:Lucene49DocValuesProducer.java   
/** returns an address instance for prefix-compressed binary values. */
private MonotonicBlockPackedReader getIntervalInstance(IndexInput data, FieldInfo field, BinaryEntry bytes) throws IOException {
  final MonotonicBlockPackedReader addresses;
  final long interval = bytes.addressInterval;
  synchronized (addressInstances) {
    MonotonicBlockPackedReader addrInstance = addressInstances.get(field.number);
    if (addrInstance == null) {
      data.seek(bytes.addressesOffset);
      final long size;
      if (bytes.count % interval == 0) {
        size = bytes.count / interval;
      } else {
        size = 1L + bytes.count / interval;
      }
      addrInstance = MonotonicBlockPackedReader.of(data, bytes.packedIntsVersion, bytes.blockSize, size, false);
      addressInstances.put(field.number, addrInstance);
      ramBytesUsed.addAndGet(addrInstance.ramBytesUsed() + RamUsageEstimator.NUM_BYTES_INT);
    }
    addresses = addrInstance;
  }
  return addresses;
}
项目:Elasticsearch    文件:BigFloatArray.java   
/** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */
@Override
public void resize(long newSize) {
    final int numPages = numPages(newSize);
    if (numPages > pages.length) {
        pages = Arrays.copyOf(pages, ArrayUtil.oversize(numPages, RamUsageEstimator.NUM_BYTES_OBJECT_REF));
    }
    for (int i = numPages - 1; i >= 0 && pages[i] == null; --i) {
        pages[i] = newIntPage(i);
    }
    for (int i = numPages; i < pages.length && pages[i] != null; ++i) {
        pages[i] = null;
        releasePage(i);
    }
    this.size = newSize;
}
项目:Elasticsearch    文件:Translog.java   
/**
 * Writes all operations in the given iterable to the given output stream including the size of the array
 * use {@link #readOperations(StreamInput)} to read it back.
 */
public static void writeOperations(StreamOutput outStream, List<Operation> toWrite) throws IOException {
    final ReleasableBytesStreamOutput out = new ReleasableBytesStreamOutput(BigArrays.NON_RECYCLING_INSTANCE);
    try {
        outStream.writeInt(toWrite.size());
        final BufferedChecksumStreamOutput checksumStreamOutput = new BufferedChecksumStreamOutput(out);
        for (Operation op : toWrite) {
            out.reset();
            final long start = out.position();
            out.skip(RamUsageEstimator.NUM_BYTES_INT);
            writeOperationNoSize(checksumStreamOutput, op);
            long end = out.position();
            int operationSize = (int) (out.position() - RamUsageEstimator.NUM_BYTES_INT - start);
            out.seek(start);
            out.writeInt(operationSize);
            out.seek(end);
            ReleasablePagedBytesReference bytes = out.bytes();
            bytes.writeTo(outStream);
        }
    } finally {
        Releasables.close(out.bytes());
    }

}
项目:lams    文件:SortedSetDocValuesWriter.java   
private void addOneValue(BytesRef value) {
  int termID = hash.add(value);
  if (termID < 0) {
    termID = -termID-1;
  } else {
    // reserve additional space for each unique value:
    // 1. when indexing, when hash is 50% full, rehash() suddenly needs 2*size ints.
    //    TODO: can this same OOM happen in THPF?
    // 2. when flushing, we need 1 int per value (slot in the ordMap).
    iwBytesUsed.addAndGet(2 * RamUsageEstimator.NUM_BYTES_INT);
  }

  if (currentUpto == currentValues.length) {
    currentValues = ArrayUtil.grow(currentValues, currentValues.length+1);
    // reserve additional space for max # values per-doc
    // when flushing, we need an int[] to sort the mapped-ords within the doc
    iwBytesUsed.addAndGet((currentValues.length - currentUpto) * 2 * RamUsageEstimator.NUM_BYTES_INT);
  }

  currentValues[currentUpto] = termID;
  currentUpto++;
}
项目:Elasticsearch    文件:BigByteArray.java   
/** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */
@Override
public void resize(long newSize) {
    final int numPages = numPages(newSize);
    if (numPages > pages.length) {
        pages = Arrays.copyOf(pages, ArrayUtil.oversize(numPages, RamUsageEstimator.NUM_BYTES_OBJECT_REF));
    }
    for (int i = numPages - 1; i >= 0 && pages[i] == null; --i) {
        pages[i] = newBytePage(i);
    }
    for (int i = numPages; i < pages.length && pages[i] != null; ++i) {
        pages[i] = null;
        releasePage(i);
    }
    this.size = newSize;
}
项目:lams    文件:WordDelimiterFilter.java   
/**
 * Saves the existing attribute states
 */
private void saveState() {
  // otherwise, we have delimiters, save state
  savedStartOffset = offsetAttribute.startOffset();
  savedEndOffset = offsetAttribute.endOffset();
  // if length by start + end offsets doesn't match the term text then assume this is a synonym and don't adjust the offsets.
  hasIllegalOffsets = (savedEndOffset - savedStartOffset != termAttribute.length());
  savedType = typeAttribute.type();

  if (savedBuffer.length < termAttribute.length()) {
    savedBuffer = new char[ArrayUtil.oversize(termAttribute.length(), RamUsageEstimator.NUM_BYTES_CHAR)];
  }

  System.arraycopy(termAttribute.buffer(), 0, savedBuffer, 0, termAttribute.length());
  iterator.text = savedBuffer;

  hasSavedState = true;
}
项目:lams    文件:Lucene47WordDelimiterFilter.java   
/**
 * Saves the existing attribute states
 */
private void saveState() {
  // otherwise, we have delimiters, save state
  savedStartOffset = offsetAttribute.startOffset();
  savedEndOffset = offsetAttribute.endOffset();
  // if length by start + end offsets doesn't match the term text then assume this is a synonym and don't adjust the offsets.
  hasIllegalOffsets = (savedEndOffset - savedStartOffset != termAttribute.length());
  savedType = typeAttribute.type();

  if (savedBuffer.length < termAttribute.length()) {
    savedBuffer = new char[ArrayUtil.oversize(termAttribute.length(), RamUsageEstimator.NUM_BYTES_CHAR)];
  }

  System.arraycopy(termAttribute.buffer(), 0, savedBuffer, 0, termAttribute.length());
  iterator.text = savedBuffer;

  hasSavedState = true;
}
项目:Elasticsearch    文件:BigObjectArray.java   
/** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */
@Override
public void resize(long newSize) {
    final int numPages = numPages(newSize);
    if (numPages > pages.length) {
        pages = Arrays.copyOf(pages, ArrayUtil.oversize(numPages, RamUsageEstimator.NUM_BYTES_OBJECT_REF));
    }
    for (int i = numPages - 1; i >= 0 && pages[i] == null; --i) {
        pages[i] = newObjectPage(i);
    }
    for (int i = numPages; i < pages.length && pages[i] != null; ++i) {
        pages[i] = null;
        releasePage(i);
    }
    this.size = newSize;
}
项目:lams    文件:FST.java   
private long ramBytesUsed(Arc<T>[] arcs) {
  long size = 0;
  if (arcs != null) {
    size += RamUsageEstimator.shallowSizeOf(arcs);
    for (Arc<T> arc : arcs) {
      if (arc != null) {
        size += ARC_SHALLOW_RAM_BYTES_USED;
        if (arc.output != null && arc.output != outputs.getNoOutput()) {
          size += outputs.ramBytesUsed(arc.output);
        }
        if (arc.nextFinalOutput != null && arc.nextFinalOutput != outputs.getNoOutput()) {
          size += outputs.ramBytesUsed(arc.nextFinalOutput);
        }
      }
    }
  }
  return size;
}
项目:Elasticsearch    文件:BigDoubleArray.java   
/** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */
@Override
public void resize(long newSize) {
    final int numPages = numPages(newSize);
    if (numPages > pages.length) {
        pages = Arrays.copyOf(pages, ArrayUtil.oversize(numPages, RamUsageEstimator.NUM_BYTES_OBJECT_REF));
    }
    for (int i = numPages - 1; i >= 0 && pages[i] == null; --i) {
        pages[i] = newLongPage(i);
    }
    for (int i = numPages; i < pages.length && pages[i] != null; ++i) {
        pages[i] = null;
        releasePage(i);
    }
    this.size = newSize;
}
项目:lams    文件:Packed8ThreeBlocks.java   
@Override
public long ramBytesUsed() {
  return RamUsageEstimator.alignObjectSize(
      RamUsageEstimator.NUM_BYTES_OBJECT_HEADER
      + 2 * RamUsageEstimator.NUM_BYTES_INT     // valueCount,bitsPerValue
      + RamUsageEstimator.NUM_BYTES_OBJECT_REF) // blocks ref
      + RamUsageEstimator.sizeOf(blocks);
}
项目:elasticsearch_my    文件:LatLonPointDVAtomicFieldData.java   
@Override
public MultiGeoPointValues getGeoPointValues() {
    return new MultiGeoPointValues() {
        GeoPoint[] points = new GeoPoint[0];
        private int count = 0;

        @Override
        public void setDocument(int docId) {
            values.setDocument(docId);
            count = values.count();
            if (count > points.length) {
                final int previousLength = points.length;
                points = Arrays.copyOf(points, ArrayUtil.oversize(count, RamUsageEstimator.NUM_BYTES_OBJECT_REF));
                for (int i = previousLength; i < points.length; ++i) {
                    points[i] = new GeoPoint(Double.NaN, Double.NaN);
                }
            }
            long encoded;
            for (int i=0; i<count; ++i) {
                encoded = values.valueAt(i);
                points[i].reset(GeoEncodingUtils.decodeLatitude((int)(encoded >>> 32)), GeoEncodingUtils.decodeLongitude((int)encoded));
            }
        }

        @Override
        public int count() {
            return count;
        }

        @Override
        public GeoPoint valueAt(int index) {
            return points[index];
        }
    };
}
项目:lams    文件:PackedLongValues.java   
/** Build a {@link PackedLongValues} instance that contains values that
 *  have been added to this builder. This operation is destructive. */
public PackedLongValues build() {
  finish();
  pending = null;
  final PackedInts.Reader[] values = Arrays.copyOf(this.values, valuesOff);
  final long ramBytesUsed = PackedLongValues.BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(values);
  return new PackedLongValues(pageShift, pageMask, values, size, ramBytesUsed);
}
项目:Elasticsearch    文件:BigArrays.java   
/** Grow an array to a size that is larger than <code>minSize</code>, preserving content, and potentially reusing part of the provided array. */
public DoubleArray grow(DoubleArray array, long minSize) {
    if (minSize <= array.size()) {
        return array;
    }
    final long newSize = overSize(minSize, LONG_PAGE_SIZE, RamUsageEstimator.NUM_BYTES_LONG);
    return resize(array, newSize);
}
项目:elasticsearch_my    文件:AbstractBigArray.java   
private static <T> T[] grow(T[] array, int minSize) {
    if (array.length < minSize) {
        final int newLen = ArrayUtil.oversize(minSize, RamUsageEstimator.NUM_BYTES_OBJECT_REF);
        array = Arrays.copyOf(array, newLen);
    }
    return array;
}
项目:Elasticsearch    文件:HyperLogLogPlusPlus.java   
/**
 * Compute the required precision so that <code>count</code> distinct entries
 * would be counted with linear counting.
 */
public static int precisionFromThreshold(long count) {
    final long hashTableEntries = (long) Math.ceil(count / MAX_LOAD_FACTOR);
    int precision = PackedInts.bitsRequired(hashTableEntries * RamUsageEstimator.NUM_BYTES_INT);
    precision = Math.max(precision, MIN_PRECISION);
    precision = Math.min(precision, MAX_PRECISION);
    return precision;
}
项目:lams    文件:Lucene45DocValuesProducer.java   
/** returns an address instance for variable-length binary values.
 *  @lucene.internal */
protected MonotonicBlockPackedReader getAddressInstance(IndexInput data, FieldInfo field, BinaryEntry bytes) throws IOException {
  final MonotonicBlockPackedReader addresses;
  synchronized (addressInstances) {
    MonotonicBlockPackedReader addrInstance = addressInstances.get(field.number);
    if (addrInstance == null) {
      data.seek(bytes.addressesOffset);
      addrInstance = MonotonicBlockPackedReader.of(data, bytes.packedIntsVersion, bytes.blockSize, bytes.count, false);
      addressInstances.put(field.number, addrInstance);
      ramBytesUsed.addAndGet(addrInstance.ramBytesUsed() + RamUsageEstimator.NUM_BYTES_INT);
    }
    addresses = addrInstance;
  }
  return addresses;
}
项目:Elasticsearch    文件:BigArrays.java   
/** Grow an array to a size that is larger than <code>minSize</code>, preserving content, and potentially reusing part of the provided array. */
public FloatArray grow(FloatArray array, long minSize) {
    if (minSize <= array.size()) {
        return array;
    }
    final long newSize = overSize(minSize, INT_PAGE_SIZE, RamUsageEstimator.NUM_BYTES_FLOAT);
    return resize(array, newSize);
}
项目:lams    文件:Lucene45DocValuesProducer.java   
/** returns an address instance for sortedset ordinal lists
 * @lucene.internal */
protected MonotonicBlockPackedReader getOrdIndexInstance(IndexInput data, FieldInfo field, NumericEntry entry) throws IOException {
  final MonotonicBlockPackedReader ordIndex;
  synchronized (ordIndexInstances) {
    MonotonicBlockPackedReader ordIndexInstance = ordIndexInstances.get(field.number);
    if (ordIndexInstance == null) {
      data.seek(entry.offset);
      ordIndexInstance = MonotonicBlockPackedReader.of(data, entry.packedIntsVersion, entry.blockSize, entry.count, false);
      ordIndexInstances.put(field.number, ordIndexInstance);
      ramBytesUsed.addAndGet(ordIndexInstance.ramBytesUsed() + RamUsageEstimator.NUM_BYTES_INT);
    }
    ordIndex = ordIndexInstance;
  }
  return ordIndex;
}
项目:lams    文件:Lucene49DocValuesProducer.java   
/** returns an address instance for variable-length binary values. */
private MonotonicBlockPackedReader getAddressInstance(IndexInput data, FieldInfo field, BinaryEntry bytes) throws IOException {
  final MonotonicBlockPackedReader addresses;
  synchronized (addressInstances) {
    MonotonicBlockPackedReader addrInstance = addressInstances.get(field.number);
    if (addrInstance == null) {
      data.seek(bytes.addressesOffset);
      addrInstance = MonotonicBlockPackedReader.of(data, bytes.packedIntsVersion, bytes.blockSize, bytes.count+1, false);
      addressInstances.put(field.number, addrInstance);
      ramBytesUsed.addAndGet(addrInstance.ramBytesUsed() + RamUsageEstimator.NUM_BYTES_INT);
    }
    addresses = addrInstance;
  }
  return addresses;
}
项目:lams    文件:Lucene49DocValuesProducer.java   
/** returns an address instance for sortedset ordinal lists */
private MonotonicBlockPackedReader getOrdIndexInstance(IndexInput data, FieldInfo field, NumericEntry entry) throws IOException {
  final MonotonicBlockPackedReader ordIndex;
  synchronized (ordIndexInstances) {
    MonotonicBlockPackedReader ordIndexInstance = ordIndexInstances.get(field.number);
    if (ordIndexInstance == null) {
      data.seek(entry.offset);
      ordIndexInstance = MonotonicBlockPackedReader.of(data, entry.packedIntsVersion, entry.blockSize, entry.count+1, false);
      ordIndexInstances.put(field.number, ordIndexInstance);
      ramBytesUsed.addAndGet(ordIndexInstance.ramBytesUsed() + RamUsageEstimator.NUM_BYTES_INT);
    }
    ordIndex = ordIndexInstance;
  }
  return ordIndex;
}
项目:lams    文件:BlockTreeTermsReader.java   
@Override
public long ramBytesUsed() {
  long sizeInByes = BASE_RAM_BYTES_USED
      + ((postingsReader!=null) ? postingsReader.ramBytesUsed() : 0)
      + fields.size() * 2L * RamUsageEstimator.NUM_BYTES_OBJECT_REF;
  for(FieldReader reader : fields.values()) {
    sizeInByes += reader.ramBytesUsed();
  }
  return sizeInByes;
}
项目:lams    文件:SegmentTermsEnum.java   
private FST.Arc<BytesRef> getArc(int ord) {
  if (ord >= arcs.length) {
    @SuppressWarnings({"rawtypes","unchecked"}) final FST.Arc<BytesRef>[] next =
    new FST.Arc[ArrayUtil.oversize(1+ord, RamUsageEstimator.NUM_BYTES_OBJECT_REF)];
    System.arraycopy(arcs, 0, next, 0, arcs.length);
    for(int arcOrd=arcs.length;arcOrd<next.length;arcOrd++) {
      next[arcOrd] = new FST.Arc<>();
    }
    arcs = next;
  }
  return arcs[ord];
}
项目:lams    文件:IntersectTermsEnum.java   
private FST.Arc<BytesRef> getArc(int ord) {
  if (ord >= arcs.length) {
    @SuppressWarnings({"rawtypes","unchecked"}) final FST.Arc<BytesRef>[] next =
    new FST.Arc[ArrayUtil.oversize(1+ord, RamUsageEstimator.NUM_BYTES_OBJECT_REF)];
    System.arraycopy(arcs, 0, next, 0, arcs.length);
    for(int arcOrd=arcs.length;arcOrd<next.length;arcOrd++) {
      next[arcOrd] = new FST.Arc<>();
    }
    arcs = next;
  }
  return arcs[ord];
}
项目:lams    文件:Lucene410DocValuesProducer.java   
/** returns an address instance for prefix-compressed binary values. */
private synchronized MonotonicBlockPackedReader getIntervalInstance(FieldInfo field, BinaryEntry bytes) throws IOException {
  MonotonicBlockPackedReader addresses = addressInstances.get(field.number);
  if (addresses == null) {
    data.seek(bytes.addressesOffset);
    final long size = (bytes.count + INTERVAL_MASK) >>> INTERVAL_SHIFT;
    addresses = MonotonicBlockPackedReader.of(data, bytes.packedIntsVersion, bytes.blockSize, size, false);
    addressInstances.put(field.number, addresses);
    ramBytesUsed.addAndGet(addresses.ramBytesUsed() + RamUsageEstimator.NUM_BYTES_INT);
  }
  return addresses;
}
项目:lams    文件:CachingCollector.java   
private CachingCollector(Collector other, double maxRAMMB, boolean cacheScores) {
  this.other = other;

  cachedDocs = new ArrayList<>();
  curDocs = new int[INITIAL_ARRAY_SIZE];
  cachedDocs.add(curDocs);

  int bytesPerDoc = RamUsageEstimator.NUM_BYTES_INT;
  if (cacheScores) {
    bytesPerDoc += RamUsageEstimator.NUM_BYTES_FLOAT;
  }
  maxDocsToCache = (int) ((maxRAMMB * 1024 * 1024) / bytesPerDoc);
}
项目:lams    文件:FieldCacheImpl.java   
@Override
public long ramBytesUsed() {
  long base = RamUsageEstimator.NUM_BYTES_OBJECT_REF;
  if (bits instanceof Bits.MatchAllBits || bits instanceof Bits.MatchNoBits) {
    return base;
  } else {
    return base + (bits.length() >>> 3);
  }
}
项目:lams    文件:BytesRefArray.java   
/**
 * Creates a new {@link BytesRefArray} with a counter to track allocated bytes
 */
public BytesRefArray(Counter bytesUsed) {
  this.pool = new ByteBlockPool(new ByteBlockPool.DirectTrackingAllocator(
      bytesUsed));
  pool.nextBuffer();
  bytesUsed.addAndGet(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER
      + RamUsageEstimator.NUM_BYTES_INT);
  this.bytesUsed = bytesUsed;
}
项目:lams    文件:MonotonicBlockPackedReader.java   
@Override
public long ramBytesUsed() {
  long sizeInBytes = 0;
  sizeInBytes += RamUsageEstimator.sizeOf(minValues);
  sizeInBytes += RamUsageEstimator.sizeOf(averages);
  for(PackedInts.Reader reader: subReaders) {
    sizeInBytes += reader.ramBytesUsed();
  }
  return sizeInBytes;
}
项目:lams    文件:MonotonicLongValues.java   
@Override
public MonotonicLongValues build() {
  finish();
  pending = null;
  final PackedInts.Reader[] values = Arrays.copyOf(this.values, valuesOff);
  final long[] mins = Arrays.copyOf(this.mins, valuesOff);
  final float[] averages = Arrays.copyOf(this.averages, valuesOff);
  final long ramBytesUsed = MonotonicLongValues.BASE_RAM_BYTES_USED
      + RamUsageEstimator.sizeOf(values) + RamUsageEstimator.sizeOf(mins)
      + RamUsageEstimator.sizeOf(averages);
  return new MonotonicLongValues(pageShift, pageMask, values, mins, averages, size, ramBytesUsed);
}
项目:lams    文件:TermVectorsConsumer.java   
void addFieldToFlush(TermVectorsConsumerPerField fieldToFlush) {
  if (numVectorFields == perFields.length) {
    int newSize = ArrayUtil.oversize(numVectorFields + 1, RamUsageEstimator.NUM_BYTES_OBJECT_REF);
    TermVectorsConsumerPerField[] newArray = new TermVectorsConsumerPerField[newSize];
    System.arraycopy(perFields, 0, newArray, 0, numVectorFields);
    perFields = newArray;
  }

  perFields[numVectorFields++] = fieldToFlush;
}