Java 类org.apache.hadoop.hbase.util.ChecksumType 实例源码

项目:ditb    文件:ChecksumUtil.java   
/**
 * Generates a checksum for all the data in indata. The checksum is
 * written to outdata.
 * @param indata input data stream
 * @param startOffset starting offset in the indata stream from where to
 *                    compute checkums from
 * @param endOffset ending offset in the indata stream upto
 *                   which checksums needs to be computed
 * @param outdata the output buffer where checksum values are written
 * @param outOffset the starting offset in the outdata where the
 *                  checksum values are written
 * @param checksumType type of checksum
 * @param bytesPerChecksum number of bytes per checksum value
 */
static void generateChecksums(byte[] indata, int startOffset, int endOffset,
  byte[] outdata, int outOffset, ChecksumType checksumType,
  int bytesPerChecksum) throws IOException {

  if (checksumType == ChecksumType.NULL) {
    return; // No checksum for this block.
  }

  DataChecksum checksum = DataChecksum.newDataChecksum(
      checksumType.getDataChecksumType(), bytesPerChecksum);

  checksum.calculateChunkedSums(
     ByteBuffer.wrap(indata, startOffset, endOffset - startOffset),
     ByteBuffer.wrap(outdata, outOffset, outdata.length - outOffset));
}
项目:ditb    文件:HFileBlock.java   
/**
 * Creates a block from an existing buffer starting with a header. Rewinds
 * and takes ownership of the buffer. By definition of rewind, ignores the
 * buffer position, but if you slice the buffer beforehand, it will rewind
 * to that point. The reason this has a minorNumber and not a majorNumber is
 * because majorNumbers indicate the format of a HFile whereas minorNumbers
 * indicate the format inside a HFileBlock.
 */
HFileBlock(ByteBuffer b, boolean usesHBaseChecksum) throws IOException {
  b.rewind();
  blockType = BlockType.read(b);
  onDiskSizeWithoutHeader = b.getInt(Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX);
  uncompressedSizeWithoutHeader = b.getInt(Header.UNCOMPRESSED_SIZE_WITHOUT_HEADER_INDEX);
  prevBlockOffset = b.getLong(Header.PREV_BLOCK_OFFSET_INDEX);
  HFileContextBuilder contextBuilder = new HFileContextBuilder();
  contextBuilder.withHBaseCheckSum(usesHBaseChecksum);
  if (usesHBaseChecksum) {
    contextBuilder.withChecksumType(ChecksumType.codeToType(b.get(Header.CHECKSUM_TYPE_INDEX)));
    contextBuilder.withBytesPerCheckSum(b.getInt(Header.BYTES_PER_CHECKSUM_INDEX));
    this.onDiskDataSizeWithHeader = b.getInt(Header.ON_DISK_DATA_SIZE_WITH_HEADER_INDEX);
  } else {
    contextBuilder.withChecksumType(ChecksumType.NULL);
    contextBuilder.withBytesPerCheckSum(0);
    this.onDiskDataSizeWithHeader = onDiskSizeWithoutHeader +
                                     HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM;
  }
  this.fileContext = contextBuilder.build();
  buf = b;
  buf.rewind();
}
项目:ditb    文件:HFileBlock.java   
/**
 * Creates a new HFileBlock. Checksums have already been validated, so
 * the byte buffer passed into the constructor of this newly created
 * block does not have checksum data even though the header minor
 * version is MINOR_VERSION_WITH_CHECKSUM. This is indicated by setting a
 * 0 value in bytesPerChecksum.
 */
public HFileBlock getBlockForCaching(CacheConfig cacheConf) {
  HFileContext newContext = new HFileContextBuilder()
                            .withBlockSize(fileContext.getBlocksize())
                            .withBytesPerCheckSum(0)
                            .withChecksumType(ChecksumType.NULL) // no checksums in cached data
                            .withCompression(fileContext.getCompression())
                            .withDataBlockEncoding(fileContext.getDataBlockEncoding())
                            .withHBaseCheckSum(fileContext.isUseHBaseChecksum())
                            .withCompressTags(fileContext.isCompressTags())
                            .withIncludesMvcc(fileContext.isIncludesMvcc())
                            .withIncludesTags(fileContext.isIncludesTags())
                            .build();
  return new HFileBlock(blockType, getOnDiskSizeWithoutHeader(),
      getUncompressedSizeWithoutHeader(), prevOffset,
      cacheConf.shouldCacheCompressed(blockType.getCategory()) ?
        getOnDiskBufferWithHeader() :
        getUncompressedBufferWithHeader(),
      FILL_HEADER, startOffset,
      onDiskBytesWithHeader.length + onDiskChecksum.length, newContext);
}
项目:ditb    文件:HFileBlock.java   
/**
 * Convert the contents of the block header into a human readable string.
 * This is mostly helpful for debugging. This assumes that the block
 * has minor version > 0.
 */
static String toStringHeader(ByteBuffer buf) throws IOException {
  byte[] magicBuf = new byte[Math.min(buf.limit() - buf.position(), BlockType.MAGIC_LENGTH)];
  buf.get(magicBuf);
  BlockType bt = BlockType.parse(magicBuf, 0, BlockType.MAGIC_LENGTH);
  int compressedBlockSizeNoHeader = buf.getInt();
  int uncompressedBlockSizeNoHeader = buf.getInt();
  long prevBlockOffset = buf.getLong();
  byte cksumtype = buf.get();
  long bytesPerChecksum = buf.getInt();
  long onDiskDataSizeWithHeader = buf.getInt();
  return " Header dump: magic: " + Bytes.toString(magicBuf) +
                 " blockType " + bt +
                 " compressedBlockSizeNoHeader " +
                 compressedBlockSizeNoHeader +
                 " uncompressedBlockSizeNoHeader " +
                 uncompressedBlockSizeNoHeader +
                 " prevBlockOffset " + prevBlockOffset +
                 " checksumType " + ChecksumType.codeToType(cksumtype) +
                 " bytesPerChecksum " + bytesPerChecksum +
                 " onDiskDataSizeWithHeader " + onDiskDataSizeWithHeader;
}
项目:ditb    文件:TestChecksum.java   
@Test
public void testNewBlocksHaveDefaultChecksum() throws IOException {
  Path path = new Path(TEST_UTIL.getDataTestDir(), "default_checksum");
  FSDataOutputStream os = fs.create(path);
  HFileContext meta = new HFileContextBuilder().build();
  HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
  DataOutputStream dos = hbw.startWriting(BlockType.DATA);
  for (int i = 0; i < 1000; ++i)
    dos.writeInt(i);
  hbw.writeHeaderAndData(os);
  int totalSize = hbw.getOnDiskSizeWithHeader();
  os.close();

  // Use hbase checksums.
  assertEquals(true, hfs.useHBaseChecksum());

  FSDataInputStreamWrapper is = new FSDataInputStreamWrapper(fs, path);
  meta = new HFileContextBuilder().withHBaseCheckSum(true).build();
  HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(
      is, totalSize, (HFileSystem) fs, path, meta);
  HFileBlock b = hbr.readBlockData(0, -1, -1, false);
  assertEquals(b.getChecksumType(), ChecksumType.getDefaultChecksumType().getCode());
}
项目:ditb    文件:TestHFileDataBlockEncoder.java   
private void testHeaderSizeInCacheWithoutChecksumInternals(boolean useTags) throws IOException {
  int headerSize = HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM;
  // Create some KVs and create the block with old-style header.
  List<KeyValue> kvs = generator.generateTestKeyValues(60, useTags);
  ByteBuffer keyValues = RedundantKVGenerator.convertKvToByteBuffer(kvs, includesMemstoreTS);
  int size = keyValues.limit();
  ByteBuffer buf = ByteBuffer.allocate(size + headerSize);
  buf.position(headerSize);
  keyValues.rewind();
  buf.put(keyValues);
  HFileContext hfileContext = new HFileContextBuilder().withHBaseCheckSum(false)
                      .withIncludesMvcc(includesMemstoreTS)
                      .withIncludesTags(useTags)
                      .withBlockSize(0)
                      .withChecksumType(ChecksumType.NULL)
                      .build();
  HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, buf,
      HFileBlock.FILL_HEADER, 0,
      0, hfileContext);
  HFileBlock cacheBlock = createBlockOnDisk(kvs, block, useTags);
  assertEquals(headerSize, cacheBlock.getDummyHeaderForVersion().length);
}
项目:ditb    文件:TestHFileDataBlockEncoder.java   
private HFileBlock getSampleHFileBlock(List<KeyValue> kvs, boolean useTag) {
  ByteBuffer keyValues = RedundantKVGenerator.convertKvToByteBuffer(kvs, includesMemstoreTS);
  int size = keyValues.limit();
  ByteBuffer buf = ByteBuffer.allocate(size + HConstants.HFILEBLOCK_HEADER_SIZE);
  buf.position(HConstants.HFILEBLOCK_HEADER_SIZE);
  keyValues.rewind();
  buf.put(keyValues);
  HFileContext meta = new HFileContextBuilder()
                      .withIncludesMvcc(includesMemstoreTS)
                      .withIncludesTags(useTag)
                      .withHBaseCheckSum(true)
                      .withCompression(Algorithm.NONE)
                      .withBlockSize(0)
                      .withChecksumType(ChecksumType.NULL)
                      .build();
  HFileBlock b = new HFileBlock(BlockType.DATA, size, size, -1, buf,
      HFileBlock.FILL_HEADER, 0, 
       0, meta);
  return b;
}
项目:ditb    文件:HFileContext.java   
public HFileContext(boolean useHBaseChecksum, boolean includesMvcc, boolean includesTags,
    Compression.Algorithm compressAlgo, boolean compressTags, ChecksumType checksumType,
    int bytesPerChecksum, int blockSize, DataBlockEncoding encoding,
    Encryption.Context cryptoContext, long fileCreateTime) {
  this.usesHBaseChecksum = useHBaseChecksum;
  this.includesMvcc =  includesMvcc;
  this.includesTags = includesTags;
  this.compressAlgo = compressAlgo;
  this.compressTags = compressTags;
  this.checksumType = checksumType;
  this.bytesPerChecksum = bytesPerChecksum;
  this.blocksize = blockSize;
  if (encoding != null) {
    this.encoding = encoding;
  }
  this.cryptoContext = cryptoContext;
  this.fileCreateTime = fileCreateTime;
}
项目:LCIndex-HBase-0.94.16    文件:HFileWriterV2.java   
/** Constructor that takes a path, creates and closes the output stream. */
public HFileWriterV2(Configuration conf, CacheConfig cacheConf,
    FileSystem fs, Path path, FSDataOutputStream ostream, int blockSize,
    Compression.Algorithm compressAlgo, HFileDataBlockEncoder blockEncoder,
    final KeyComparator comparator, final ChecksumType checksumType,
    final int bytesPerChecksum, boolean includeMVCCReadpoint) throws IOException {
  super(cacheConf,
      ostream == null ? createOutputStream(conf, fs, path) : ostream,
      path, blockSize, compressAlgo, blockEncoder, comparator);
  SchemaMetrics.configureGlobally(conf);
  this.checksumType = checksumType;
  this.bytesPerChecksum = bytesPerChecksum;
  this.includeMemstoreTS = includeMVCCReadpoint;
  if (!conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, false)) {
    this.minorVersion = 0;
  }
  finishInit(conf);
}
项目:LCIndex-HBase-0.94.16    文件:HFileBlock.java   
/**
 * Creates a block from an existing buffer starting with a header. Rewinds and takes ownership of
 * the buffer. By definition of rewind, ignores the buffer position, but if you slice the buffer
 * beforehand, it will rewind to that point. The reason this has a minorNumber and not a
 * majorNumber is because majorNumbers indicate the format of a HFile whereas minorNumbers
 * indicate the format inside a HFileBlock.
 */
HFileBlock(ByteBuffer b, int minorVersion) throws IOException {
  b.rewind();
  blockType = BlockType.read(b);
  onDiskSizeWithoutHeader = b.getInt();
  uncompressedSizeWithoutHeader = b.getInt();
  prevBlockOffset = b.getLong();
  this.minorVersion = minorVersion;
  if (minorVersion >= MINOR_VERSION_WITH_CHECKSUM) {
    this.checksumType = b.get();
    this.bytesPerChecksum = b.getInt();
    this.onDiskDataSizeWithHeader = b.getInt();
  } else {
    this.checksumType = ChecksumType.NULL.getCode();
    this.bytesPerChecksum = 0;
    this.onDiskDataSizeWithHeader = onDiskSizeWithoutHeader + HEADER_SIZE_NO_CHECKSUM;
  }
  buf = b;
  buf.rewind();
}
项目:LCIndex-HBase-0.94.16    文件:HFileBlock.java   
/**
 * Convert the contents of the block header into a human readable string. This is mostly helpful
 * for debugging. This assumes that the block has minor version > 0.
 */
static String toStringHeader(ByteBuffer buf) throws IOException {
  int offset = buf.arrayOffset();
  byte[] b = buf.array();
  long magic = Bytes.toLong(b, offset);
  BlockType bt = BlockType.read(buf);
  offset += Bytes.SIZEOF_LONG;
  int compressedBlockSizeNoHeader = Bytes.toInt(b, offset);
  offset += Bytes.SIZEOF_INT;
  int uncompressedBlockSizeNoHeader = Bytes.toInt(b, offset);
  offset += Bytes.SIZEOF_INT;
  long prevBlockOffset = Bytes.toLong(b, offset);
  offset += Bytes.SIZEOF_LONG;
  byte cksumtype = b[offset];
  offset += Bytes.SIZEOF_BYTE;
  long bytesPerChecksum = Bytes.toInt(b, offset);
  offset += Bytes.SIZEOF_INT;
  long onDiskDataSizeWithHeader = Bytes.toInt(b, offset);
  offset += Bytes.SIZEOF_INT;
  return " Header dump: magic: " + magic + " blockType " + bt + " compressedBlockSizeNoHeader "
      + compressedBlockSizeNoHeader + " uncompressedBlockSizeNoHeader "
      + uncompressedBlockSizeNoHeader + " prevBlockOffset " + prevBlockOffset + " checksumType "
      + ChecksumType.codeToType(cksumtype) + " bytesPerChecksum " + bytesPerChecksum
      + " onDiskDataSizeWithHeader " + onDiskDataSizeWithHeader;
}
项目:pbase    文件:HFileBlock.java   
/**
 * Creates a block from an existing buffer starting with a header. Rewinds
 * and takes ownership of the buffer. By definition of rewind, ignores the
 * buffer position, but if you slice the buffer beforehand, it will rewind
 * to that point. The reason this has a minorNumber and not a majorNumber is
 * because majorNumbers indicate the format of a HFile whereas minorNumbers
 * indicate the format inside a HFileBlock.
 */
HFileBlock(ByteBuffer b, boolean usesHBaseChecksum) throws IOException {
  b.rewind();
  blockType = BlockType.read(b);
  onDiskSizeWithoutHeader = b.getInt();
  uncompressedSizeWithoutHeader = b.getInt();
  prevBlockOffset = b.getLong();
  HFileContextBuilder contextBuilder = new HFileContextBuilder();
  contextBuilder.withHBaseCheckSum(usesHBaseChecksum);
  if (usesHBaseChecksum) {
    contextBuilder.withChecksumType(ChecksumType.codeToType(b.get()));
    contextBuilder.withBytesPerCheckSum(b.getInt());
    this.onDiskDataSizeWithHeader = b.getInt();
  } else {
    contextBuilder.withChecksumType(ChecksumType.NULL);
    contextBuilder.withBytesPerCheckSum(0);
    this.onDiskDataSizeWithHeader = onDiskSizeWithoutHeader +
                                     HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM;
  }
  this.fileContext = contextBuilder.build();
  buf = b;
  buf.rewind();
}
项目:pbase    文件:HFileBlock.java   
/**
 * Creates a new HFileBlock. Checksums have already been validated, so
 * the byte buffer passed into the constructor of this newly created
 * block does not have checksum data even though the header minor
 * version is MINOR_VERSION_WITH_CHECKSUM. This is indicated by setting a
 * 0 value in bytesPerChecksum.
 */
public HFileBlock getBlockForCaching(CacheConfig cacheConf) {
  HFileContext newContext = new HFileContextBuilder()
                            .withBlockSize(fileContext.getBlocksize())
                            .withBytesPerCheckSum(0)
                            .withChecksumType(ChecksumType.NULL) // no checksums in cached data
                            .withCompression(fileContext.getCompression())
                            .withDataBlockEncoding(fileContext.getDataBlockEncoding())
                            .withHBaseCheckSum(fileContext.isUseHBaseChecksum())
                            .withCompressTags(fileContext.isCompressTags())
                            .withIncludesMvcc(fileContext.isIncludesMvcc())
                            .withIncludesTags(fileContext.isIncludesTags())
                            .build();
  return new HFileBlock(blockType, getOnDiskSizeWithoutHeader(),
      getUncompressedSizeWithoutHeader(), prevOffset,
      cacheConf.shouldCacheCompressed(blockType.getCategory()) ?
        getOnDiskBufferWithHeader() :
        getUncompressedBufferWithHeader(),
      FILL_HEADER, startOffset,
      onDiskBytesWithHeader.length + onDiskChecksum.length, newContext);
}
项目:pbase    文件:HFileBlock.java   
/**
 * Convert the contents of the block header into a human readable string.
 * This is mostly helpful for debugging. This assumes that the block
 * has minor version > 0.
 */
static String toStringHeader(ByteBuffer buf) throws IOException {
  byte[] magicBuf = new byte[Math.min(buf.limit() - buf.position(), BlockType.MAGIC_LENGTH)];
  buf.get(magicBuf);
  BlockType bt = BlockType.parse(magicBuf, 0, BlockType.MAGIC_LENGTH);
  int compressedBlockSizeNoHeader = buf.getInt();;
  int uncompressedBlockSizeNoHeader = buf.getInt();;
  long prevBlockOffset = buf.getLong();
  byte cksumtype = buf.get();
  long bytesPerChecksum = buf.getInt();
  long onDiskDataSizeWithHeader = buf.getInt();
  return " Header dump: magic: " + Bytes.toString(magicBuf) +
                 " blockType " + bt +
                 " compressedBlockSizeNoHeader " +
                 compressedBlockSizeNoHeader +
                 " uncompressedBlockSizeNoHeader " +
                 uncompressedBlockSizeNoHeader +
                 " prevBlockOffset " + prevBlockOffset +
                 " checksumType " + ChecksumType.codeToType(cksumtype) +
                 " bytesPerChecksum " + bytesPerChecksum +
                 " onDiskDataSizeWithHeader " + onDiskDataSizeWithHeader;
}
项目:pbase    文件:TestHFileDataBlockEncoder.java   
private void testHeaderSizeInCacheWithoutChecksumInternals(boolean useTags) throws IOException {
  int headerSize = HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM;
  // Create some KVs and create the block with old-style header.
  List<KeyValue> kvs = generator.generateTestKeyValues(60, useTags);
  ByteBuffer keyValues = RedundantKVGenerator.convertKvToByteBuffer(kvs, includesMemstoreTS);
  int size = keyValues.limit();
  ByteBuffer buf = ByteBuffer.allocate(size + headerSize);
  buf.position(headerSize);
  keyValues.rewind();
  buf.put(keyValues);
  HFileContext hfileContext = new HFileContextBuilder().withHBaseCheckSum(false)
                      .withIncludesMvcc(includesMemstoreTS)
                      .withIncludesTags(useTags)
                      .withBlockSize(0)
                      .withChecksumType(ChecksumType.NULL)
                      .build();
  HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, buf,
      HFileBlock.FILL_HEADER, 0,
      0, hfileContext);
  HFileBlock cacheBlock = createBlockOnDisk(kvs, block, useTags);
  assertEquals(headerSize, cacheBlock.getDummyHeaderForVersion().length);
}
项目:pbase    文件:TestHFileDataBlockEncoder.java   
private HFileBlock getSampleHFileBlock(List<KeyValue> kvs, boolean useTag) {
  ByteBuffer keyValues = RedundantKVGenerator.convertKvToByteBuffer(kvs, includesMemstoreTS);
  int size = keyValues.limit();
  ByteBuffer buf = ByteBuffer.allocate(size + HConstants.HFILEBLOCK_HEADER_SIZE);
  buf.position(HConstants.HFILEBLOCK_HEADER_SIZE);
  keyValues.rewind();
  buf.put(keyValues);
  HFileContext meta = new HFileContextBuilder()
                      .withIncludesMvcc(includesMemstoreTS)
                      .withIncludesTags(useTag)
                      .withHBaseCheckSum(true)
                      .withCompression(Algorithm.NONE)
                      .withBlockSize(0)
                      .withChecksumType(ChecksumType.NULL)
                      .build();
  HFileBlock b = new HFileBlock(BlockType.DATA, size, size, -1, buf,
      HFileBlock.FILL_HEADER, 0, 
       0, meta);
  return b;
}
项目:pbase    文件:HFileContext.java   
public HFileContext(boolean useHBaseChecksum, boolean includesMvcc, boolean includesTags,
    Compression.Algorithm compressAlgo, boolean compressTags, ChecksumType checksumType,
    int bytesPerChecksum, int blockSize, DataBlockEncoding encoding,
    Encryption.Context cryptoContext) {
  this.usesHBaseChecksum = useHBaseChecksum;
  this.includesMvcc =  includesMvcc;
  this.includesTags = includesTags;
  this.compressAlgo = compressAlgo;
  this.compressTags = compressTags;
  this.checksumType = checksumType;
  this.bytesPerChecksum = bytesPerChecksum;
  this.blocksize = blockSize;
  if (encoding != null) {
    this.encoding = encoding;
  }
  this.cryptoContext = cryptoContext;
}
项目:HIndex    文件:HFileBlock.java   
/**
 * Creates a block from an existing buffer starting with a header. Rewinds
 * and takes ownership of the buffer. By definition of rewind, ignores the
 * buffer position, but if you slice the buffer beforehand, it will rewind
 * to that point. The reason this has a minorNumber and not a majorNumber is
 * because majorNumbers indicate the format of a HFile whereas minorNumbers 
 * indicate the format inside a HFileBlock.
 */
HFileBlock(ByteBuffer b, boolean usesHBaseChecksum) throws IOException {
  b.rewind();
  blockType = BlockType.read(b);
  onDiskSizeWithoutHeader = b.getInt();
  uncompressedSizeWithoutHeader = b.getInt();
  prevBlockOffset = b.getLong();
  HFileContextBuilder contextBuilder = new HFileContextBuilder();
  contextBuilder.withHBaseCheckSum(usesHBaseChecksum);
  if (usesHBaseChecksum) {
    contextBuilder.withChecksumType(ChecksumType.codeToType(b.get()));
    contextBuilder.withBytesPerCheckSum(b.getInt());
    this.onDiskDataSizeWithHeader = b.getInt();
  } else {
    contextBuilder.withChecksumType(ChecksumType.NULL);
    contextBuilder.withBytesPerCheckSum(0);
    this.onDiskDataSizeWithHeader = onDiskSizeWithoutHeader +
                                     HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM;
  }
  this.fileContext = contextBuilder.build();
  buf = b;
  buf.rewind();
}
项目:HIndex    文件:HFileBlock.java   
/**
 * Creates a new HFileBlock. Checksums have already been validated, so
 * the byte buffer passed into the constructor of this newly created
 * block does not have checksum data even though the header minor 
 * version is MINOR_VERSION_WITH_CHECKSUM. This is indicated by setting a
 * 0 value in bytesPerChecksum.
 */
public HFileBlock getBlockForCaching() {
  HFileContext newContext = new HFileContextBuilder()
                            .withBlockSize(fileContext.getBlocksize())
                            .withBytesPerCheckSum(0)
                            .withChecksumType(ChecksumType.NULL) // no checksums in cached data
                            .withCompression(fileContext.getCompression())
                            .withDataBlockEncoding(fileContext.getDataBlockEncoding())
                            .withHBaseCheckSum(fileContext.isUseHBaseChecksum())
                            .withCompressTags(fileContext.isCompressTags())
                            .withIncludesMvcc(fileContext.isIncludesMvcc())
                            .withIncludesTags(fileContext.isIncludesTags())
                            .build();
  return new HFileBlock(blockType, getOnDiskSizeWithoutHeader(),
      getUncompressedSizeWithoutHeader(), prevOffset, getUncompressedBufferWithHeader(),
      DONT_FILL_HEADER, startOffset,
      onDiskBytesWithHeader.length + onDiskChecksum.length, newContext);
}
项目:HIndex    文件:TestHFileDataBlockEncoder.java   
private void testHeaderSizeInCacheWithoutChecksumInternals(boolean useTags) throws IOException {
  int headerSize = HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM;
  // Create some KVs and create the block with old-style header.
  ByteBuffer keyValues = RedundantKVGenerator.convertKvToByteBuffer(
      generator.generateTestKeyValues(60, useTags), includesMemstoreTS);
  int size = keyValues.limit();
  ByteBuffer buf = ByteBuffer.allocate(size + headerSize);
  buf.position(headerSize);
  keyValues.rewind();
  buf.put(keyValues);
  HFileContext hfileContext = new HFileContextBuilder().withHBaseCheckSum(false)
                      .withIncludesMvcc(includesMemstoreTS)
                      .withIncludesTags(useTags)
                      .withBlockSize(0)
                      .withChecksumType(ChecksumType.NULL)
                      .build();
  HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, buf,
      HFileBlock.FILL_HEADER, 0,
      0, hfileContext);
  HFileBlock cacheBlock = createBlockOnDisk(block, useTags);
  assertEquals(headerSize, cacheBlock.getDummyHeaderForVersion().length);
}
项目:HIndex    文件:TestHFileDataBlockEncoder.java   
private HFileBlock getSampleHFileBlock(boolean useTag) {
  ByteBuffer keyValues = RedundantKVGenerator.convertKvToByteBuffer(
      generator.generateTestKeyValues(60, useTag), includesMemstoreTS);
  int size = keyValues.limit();
  ByteBuffer buf = ByteBuffer.allocate(size + HConstants.HFILEBLOCK_HEADER_SIZE);
  buf.position(HConstants.HFILEBLOCK_HEADER_SIZE);
  keyValues.rewind();
  buf.put(keyValues);
  HFileContext meta = new HFileContextBuilder()
                      .withIncludesMvcc(includesMemstoreTS)
                      .withIncludesTags(useTag)
                      .withHBaseCheckSum(true)
                      .withCompression(Algorithm.NONE)
                      .withBlockSize(0)
                      .withChecksumType(ChecksumType.NULL)
                      .build();
  HFileBlock b = new HFileBlock(BlockType.DATA, size, size, -1, buf,
      HFileBlock.FILL_HEADER, 0, 
       0, meta);
  return b;
}
项目:HIndex    文件:HFileContext.java   
public HFileContext(boolean useHBaseChecksum, boolean includesMvcc, boolean includesTags,
    Compression.Algorithm compressAlgo, boolean compressTags, ChecksumType checksumType,
    int bytesPerChecksum, int blockSize, DataBlockEncoding encoding,
    Encryption.Context cryptoContext) {
  this.usesHBaseChecksum = useHBaseChecksum;
  this.includesMvcc =  includesMvcc;
  this.includesTags = includesTags;
  this.compressAlgo = compressAlgo;
  this.compressTags = compressTags;
  this.checksumType = checksumType;
  this.bytesPerChecksum = bytesPerChecksum;
  this.blocksize = blockSize;
  if (encoding != null) {
    this.encoding = encoding;
  }
  this.cryptoContext = cryptoContext;
}
项目:IRIndex    文件:HFileWriterV2.java   
/** Constructor that takes a path, creates and closes the output stream. */
public HFileWriterV2(Configuration conf, CacheConfig cacheConf,
    FileSystem fs, Path path, FSDataOutputStream ostream, int blockSize,
    Compression.Algorithm compressAlgo, HFileDataBlockEncoder blockEncoder,
    final KeyComparator comparator, final ChecksumType checksumType,
    final int bytesPerChecksum, boolean includeMVCCReadpoint) throws IOException {
  super(cacheConf,
      ostream == null ? createOutputStream(conf, fs, path) : ostream,
      path, blockSize, compressAlgo, blockEncoder, comparator);
  SchemaMetrics.configureGlobally(conf);
  this.checksumType = checksumType;
  this.bytesPerChecksum = bytesPerChecksum;
  this.includeMemstoreTS = includeMVCCReadpoint;
  if (!conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, false)) {
    this.minorVersion = 0;
  }
  finishInit(conf);
}
项目:IRIndex    文件:HFileBlock.java   
/**
 * Creates a block from an existing buffer starting with a header. Rewinds
 * and takes ownership of the buffer. By definition of rewind, ignores the
 * buffer position, but if you slice the buffer beforehand, it will rewind
 * to that point. The reason this has a minorNumber and not a majorNumber is
 * because majorNumbers indicate the format of a HFile whereas minorNumbers 
 * indicate the format inside a HFileBlock.
 */
HFileBlock(ByteBuffer b, int minorVersion) throws IOException {
  b.rewind();
  blockType = BlockType.read(b);
  onDiskSizeWithoutHeader = b.getInt();
  uncompressedSizeWithoutHeader = b.getInt();
  prevBlockOffset = b.getLong();
  this.minorVersion = minorVersion;
  if (minorVersion >= MINOR_VERSION_WITH_CHECKSUM) {
    this.checksumType = b.get();
    this.bytesPerChecksum = b.getInt();
    this.onDiskDataSizeWithHeader = b.getInt();
  } else {
    this.checksumType = ChecksumType.NULL.getCode();
    this.bytesPerChecksum = 0;
    this.onDiskDataSizeWithHeader = onDiskSizeWithoutHeader +
                                     HEADER_SIZE_NO_CHECKSUM;
  }
  buf = b;
  buf.rewind();
}
项目:hbase    文件:ChecksumUtil.java   
/**
 * Generates a checksum for all the data in indata. The checksum is
 * written to outdata.
 * @param indata input data stream
 * @param startOffset starting offset in the indata stream from where to
 *                    compute checkums from
 * @param endOffset ending offset in the indata stream upto
 *                   which checksums needs to be computed
 * @param outdata the output buffer where checksum values are written
 * @param outOffset the starting offset in the outdata where the
 *                  checksum values are written
 * @param checksumType type of checksum
 * @param bytesPerChecksum number of bytes per checksum value
 */
static void generateChecksums(byte[] indata, int startOffset, int endOffset,
  byte[] outdata, int outOffset, ChecksumType checksumType,
  int bytesPerChecksum) throws IOException {

  if (checksumType == ChecksumType.NULL) {
    return; // No checksum for this block.
  }

  DataChecksum checksum = DataChecksum.newDataChecksum(
      checksumType.getDataChecksumType(), bytesPerChecksum);

  checksum.calculateChunkedSums(
     ByteBuffer.wrap(indata, startOffset, endOffset - startOffset),
     ByteBuffer.wrap(outdata, outOffset, outdata.length - outOffset));
}
项目:hbase    文件:HFileBlock.java   
/**
 * Creates a new HFileBlock. Checksums have already been validated, so
 * the byte buffer passed into the constructor of this newly created
 * block does not have checksum data even though the header minor
 * version is MINOR_VERSION_WITH_CHECKSUM. This is indicated by setting a
 * 0 value in bytesPerChecksum. This method copies the on-disk or
 * uncompressed data to build the HFileBlock which is used only
 * while writing blocks and caching.
 *
 * <p>TODO: Should there be an option where a cache can ask that hbase preserve block
 * checksums for checking after a block comes out of the cache? Otehrwise, cache is responsible
 * for blocks being wholesome (ECC memory or if file-backed, it does checksumming).
 */
HFileBlock getBlockForCaching(CacheConfig cacheConf) {
  HFileContext newContext = new HFileContextBuilder()
                            .withBlockSize(fileContext.getBlocksize())
                            .withBytesPerCheckSum(0)
                            .withChecksumType(ChecksumType.NULL) // no checksums in cached data
                            .withCompression(fileContext.getCompression())
                            .withDataBlockEncoding(fileContext.getDataBlockEncoding())
                            .withHBaseCheckSum(fileContext.isUseHBaseChecksum())
                            .withCompressTags(fileContext.isCompressTags())
                            .withIncludesMvcc(fileContext.isIncludesMvcc())
                            .withIncludesTags(fileContext.isIncludesTags())
                            .build();
   return new HFileBlock(blockType, getOnDiskSizeWithoutHeader(),
      getUncompressedSizeWithoutHeader(), prevOffset,
      cacheConf.shouldCacheCompressed(blockType.getCategory())?
        cloneOnDiskBufferWithHeader() :
        cloneUncompressedBufferWithHeader(),
      FILL_HEADER, startOffset, UNSET,
      onDiskBlockBytesWithHeader.size() + onDiskChecksum.length, newContext);
}
项目:hbase    文件:HFileBlock.java   
/**
 * Convert the contents of the block header into a human readable string.
 * This is mostly helpful for debugging. This assumes that the block
 * has minor version > 0.
 */
@VisibleForTesting
static String toStringHeader(ByteBuff buf) throws IOException {
  byte[] magicBuf = new byte[Math.min(buf.limit() - buf.position(), BlockType.MAGIC_LENGTH)];
  buf.get(magicBuf);
  BlockType bt = BlockType.parse(magicBuf, 0, BlockType.MAGIC_LENGTH);
  int compressedBlockSizeNoHeader = buf.getInt();
  int uncompressedBlockSizeNoHeader = buf.getInt();
  long prevBlockOffset = buf.getLong();
  byte cksumtype = buf.get();
  long bytesPerChecksum = buf.getInt();
  long onDiskDataSizeWithHeader = buf.getInt();
  return " Header dump: magic: " + Bytes.toString(magicBuf) +
                 " blockType " + bt +
                 " compressedBlockSizeNoHeader " +
                 compressedBlockSizeNoHeader +
                 " uncompressedBlockSizeNoHeader " +
                 uncompressedBlockSizeNoHeader +
                 " prevBlockOffset " + prevBlockOffset +
                 " checksumType " + ChecksumType.codeToType(cksumtype) +
                 " bytesPerChecksum " + bytesPerChecksum +
                 " onDiskDataSizeWithHeader " + onDiskDataSizeWithHeader;
}
项目:hbase    文件:TestChecksum.java   
@Test
public void testNewBlocksHaveDefaultChecksum() throws IOException {
  Path path = new Path(TEST_UTIL.getDataTestDir(), "default_checksum");
  FSDataOutputStream os = fs.create(path);
  HFileContext meta = new HFileContextBuilder().build();
  HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
  DataOutputStream dos = hbw.startWriting(BlockType.DATA);
  for (int i = 0; i < 1000; ++i)
    dos.writeInt(i);
  hbw.writeHeaderAndData(os);
  int totalSize = hbw.getOnDiskSizeWithHeader();
  os.close();

  // Use hbase checksums.
  assertEquals(true, hfs.useHBaseChecksum());

  FSDataInputStreamWrapper is = new FSDataInputStreamWrapper(fs, path);
  meta = new HFileContextBuilder().withHBaseCheckSum(true).build();
  HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(
      is, totalSize, (HFileSystem) fs, path, meta);
  HFileBlock b = hbr.readBlockData(0, -1, false, false);
  assertEquals(b.getChecksumType(), ChecksumType.getDefaultChecksumType().getCode());
}
项目:hbase    文件:TestHFileDataBlockEncoder.java   
private void testHeaderSizeInCacheWithoutChecksumInternals(boolean useTags) throws IOException {
  int headerSize = HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM;
  // Create some KVs and create the block with old-style header.
  List<KeyValue> kvs = generator.generateTestKeyValues(60, useTags);
  ByteBuffer keyValues = RedundantKVGenerator.convertKvToByteBuffer(kvs, includesMemstoreTS);
  int size = keyValues.limit();
  ByteBuffer buf = ByteBuffer.allocate(size + headerSize);
  buf.position(headerSize);
  keyValues.rewind();
  buf.put(keyValues);
  HFileContext hfileContext = new HFileContextBuilder().withHBaseCheckSum(false)
                      .withIncludesMvcc(includesMemstoreTS)
                      .withIncludesTags(useTags)
                      .withBlockSize(0)
                      .withChecksumType(ChecksumType.NULL)
                      .build();
  HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, buf,
      HFileBlock.FILL_HEADER, 0,
      0, -1, hfileContext);
  HFileBlock cacheBlock = createBlockOnDisk(kvs, block, useTags);
  assertEquals(headerSize, cacheBlock.getDummyHeaderForVersion().length);
}
项目:hbase    文件:TestHFileDataBlockEncoder.java   
private HFileBlock getSampleHFileBlock(List<KeyValue> kvs, boolean useTag) {
  ByteBuffer keyValues = RedundantKVGenerator.convertKvToByteBuffer(kvs, includesMemstoreTS);
  int size = keyValues.limit();
  ByteBuffer buf = ByteBuffer.allocate(size + HConstants.HFILEBLOCK_HEADER_SIZE);
  buf.position(HConstants.HFILEBLOCK_HEADER_SIZE);
  keyValues.rewind();
  buf.put(keyValues);
  HFileContext meta = new HFileContextBuilder()
                      .withIncludesMvcc(includesMemstoreTS)
                      .withIncludesTags(useTag)
                      .withHBaseCheckSum(true)
                      .withCompression(Algorithm.NONE)
                      .withBlockSize(0)
                      .withChecksumType(ChecksumType.NULL)
                      .build();
  HFileBlock b = new HFileBlock(BlockType.DATA, size, size, -1, buf,
      HFileBlock.FILL_HEADER, 0,
       0, -1, meta);
  return b;
}
项目:hbase    文件:HFileContext.java   
HFileContext(boolean useHBaseChecksum, boolean includesMvcc, boolean includesTags,
    Compression.Algorithm compressAlgo, boolean compressTags, ChecksumType checksumType,
    int bytesPerChecksum, int blockSize, DataBlockEncoding encoding,
    Encryption.Context cryptoContext, long fileCreateTime, String hfileName) {
  this.usesHBaseChecksum = useHBaseChecksum;
  this.includesMvcc =  includesMvcc;
  this.includesTags = includesTags;
  this.compressAlgo = compressAlgo;
  this.compressTags = compressTags;
  this.checksumType = checksumType;
  this.bytesPerChecksum = bytesPerChecksum;
  this.blocksize = blockSize;
  if (encoding != null) {
    this.encoding = encoding;
  }
  this.cryptoContext = cryptoContext;
  this.fileCreateTime = fileCreateTime;
  this.hfileName = hfileName;
}
项目:PyroDB    文件:HFileBlock.java   
/**
 * Creates a new HFileBlock. Checksums have already been validated, so
 * the byte buffer passed into the constructor of this newly created
 * block does not have checksum data even though the header minor 
 * version is MINOR_VERSION_WITH_CHECKSUM. This is indicated by setting a
 * 0 value in bytesPerChecksum.
 */
public HFileBlock getBlockForCaching() {
  HFileContext newContext = new HFileContextBuilder()
                            .withBlockSize(fileContext.getBlocksize())
                            .withBytesPerCheckSum(0)
                            .withChecksumType(ChecksumType.NULL) // no checksums in cached data
                            .withCompression(fileContext.getCompression())
                            .withDataBlockEncoding(fileContext.getDataBlockEncoding())
                            .withHBaseCheckSum(fileContext.isUseHBaseChecksum())
                            .withCompressTags(fileContext.isCompressTags())
                            .withIncludesMvcc(fileContext.isIncludesMvcc())
                            .withIncludesTags(fileContext.isIncludesTags())
                            .build();
  return new HFileBlock(blockType, getOnDiskSizeWithoutHeader(),
      getUncompressedSizeWithoutHeader(), prevOffset, getUncompressedBufferWithHeader(),
      DONT_FILL_HEADER, startOffset,
      onDiskBytesWithHeader.length + onDiskChecksum.length, newContext);
}
项目:PyroDB    文件:TestHFileDataBlockEncoder.java   
private void testHeaderSizeInCacheWithoutChecksumInternals(boolean useTags) throws IOException {
  int headerSize = HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM;
  // Create some KVs and create the block with old-style header.
  List<KeyValue> kvs = generator.generateTestKeyValues(60, useTags);
  ByteBuffer keyValues = RedundantKVGenerator.convertKvToByteBuffer(kvs, includesMemstoreTS);
  int size = keyValues.limit();
  ByteBuffer buf = ByteBuffer.allocate(size + headerSize);
  buf.position(headerSize);
  keyValues.rewind();
  buf.put(keyValues);
  HFileContext hfileContext = new HFileContextBuilder().withHBaseCheckSum(false)
                      .withIncludesMvcc(includesMemstoreTS)
                      .withIncludesTags(useTags)
                      .withBlockSize(0)
                      .withChecksumType(ChecksumType.NULL)
                      .build();
  HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, buf,
      HFileBlock.FILL_HEADER, 0,
      0, hfileContext);
  HFileBlock cacheBlock = createBlockOnDisk(kvs, block, useTags);
  assertEquals(headerSize, cacheBlock.getDummyHeaderForVersion().length);
}
项目:PyroDB    文件:TestHFileDataBlockEncoder.java   
private HFileBlock getSampleHFileBlock(List<KeyValue> kvs, boolean useTag) {
  ByteBuffer keyValues = RedundantKVGenerator.convertKvToByteBuffer(kvs, includesMemstoreTS);
  int size = keyValues.limit();
  ByteBuffer buf = ByteBuffer.allocate(size + HConstants.HFILEBLOCK_HEADER_SIZE);
  buf.position(HConstants.HFILEBLOCK_HEADER_SIZE);
  keyValues.rewind();
  buf.put(keyValues);
  HFileContext meta = new HFileContextBuilder()
                      .withIncludesMvcc(includesMemstoreTS)
                      .withIncludesTags(useTag)
                      .withHBaseCheckSum(true)
                      .withCompression(Algorithm.NONE)
                      .withBlockSize(0)
                      .withChecksumType(ChecksumType.NULL)
                      .build();
  HFileBlock b = new HFileBlock(BlockType.DATA, size, size, -1, buf,
      HFileBlock.FILL_HEADER, 0, 
       0, meta);
  return b;
}
项目:PyroDB    文件:HFileContext.java   
public HFileContext(boolean useHBaseChecksum, boolean includesMvcc, boolean includesTags,
    Compression.Algorithm compressAlgo, boolean compressTags, ChecksumType checksumType,
    int bytesPerChecksum, int blockSize, DataBlockEncoding encoding,
    Encryption.Context cryptoContext) {
  this.usesHBaseChecksum = useHBaseChecksum;
  this.includesMvcc =  includesMvcc;
  this.includesTags = includesTags;
  this.compressAlgo = compressAlgo;
  this.compressTags = compressTags;
  this.checksumType = checksumType;
  this.bytesPerChecksum = bytesPerChecksum;
  this.blocksize = blockSize;
  if (encoding != null) {
    this.encoding = encoding;
  }
  this.cryptoContext = cryptoContext;
}
项目:c5    文件:HFileBlock.java   
/**
 * Creates a block from an existing buffer starting with a header. Rewinds
 * and takes ownership of the buffer. By definition of rewind, ignores the
 * buffer position, but if you slice the buffer beforehand, it will rewind
 * to that point. The reason this has a minorNumber and not a majorNumber is
 * because majorNumbers indicate the format of a HFile whereas minorNumbers 
 * indicate the format inside a HFileBlock.
 */
HFileBlock(ByteBuffer b, int minorVersion) throws IOException {
  b.rewind();
  blockType = BlockType.read(b);
  onDiskSizeWithoutHeader = b.getInt();
  uncompressedSizeWithoutHeader = b.getInt();
  prevBlockOffset = b.getLong();
  this.minorVersion = minorVersion;
  if (minorVersion >= MINOR_VERSION_WITH_CHECKSUM) {
    this.checksumType = b.get();
    this.bytesPerChecksum = b.getInt();
    this.onDiskDataSizeWithHeader = b.getInt();
  } else {
    this.checksumType = ChecksumType.NULL.getCode();
    this.bytesPerChecksum = 0;
    this.onDiskDataSizeWithHeader = onDiskSizeWithoutHeader +
                                     HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM;
  }
  buf = b;
  buf.rewind();
}
项目:c5    文件:TestHFileDataBlockEncoder.java   
/** Test for HBASE-5746. */
@Test
public void testHeaderSizeInCacheWithoutChecksum() throws Exception {
  int headerSize = HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM;
  // Create some KVs and create the block with old-style header.
  ByteBuffer keyValues = RedundantKVGenerator.convertKvToByteBuffer(
      generator.generateTestKeyValues(60), includesMemstoreTS);
  int size = keyValues.limit();
  ByteBuffer buf = ByteBuffer.allocate(size + headerSize);
  buf.position(headerSize);
  keyValues.rewind();
  buf.put(keyValues);
  HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, buf,
      HFileBlock.FILL_HEADER, 0, includesMemstoreTS,
      HFileBlock.MINOR_VERSION_NO_CHECKSUM, 0, ChecksumType.NULL.getCode(), 0);
  HFileBlock cacheBlock = createBlockOnDisk(block);
  assertEquals(headerSize, cacheBlock.getDummyHeaderForVersion().length);
}
项目:HBase-Research    文件:HFileWriterV2.java   
/** Constructor that takes a path, creates and closes the output stream. */
public HFileWriterV2(Configuration conf, CacheConfig cacheConf,
    FileSystem fs, Path path, FSDataOutputStream ostream, int blockSize,
    Compression.Algorithm compressAlgo, HFileDataBlockEncoder blockEncoder,
    final KeyComparator comparator, final ChecksumType checksumType,
    final int bytesPerChecksum, boolean includeMVCCReadpoint) throws IOException {
  super(cacheConf,
      ostream == null ? createOutputStream(conf, fs, path) : ostream,
      path, blockSize, compressAlgo, blockEncoder, comparator);
  SchemaMetrics.configureGlobally(conf);
  this.checksumType = checksumType;
  this.bytesPerChecksum = bytesPerChecksum;
  this.includeMemstoreTS = includeMVCCReadpoint;
  if (!conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, false)) {
    this.minorVersion = 0;
  }
  finishInit(conf);
}
项目:HBase-Research    文件:HFileBlock.java   
/**
 * Creates a block from an existing buffer starting with a header. Rewinds
 * and takes ownership of the buffer. By definition of rewind, ignores the
 * buffer position, but if you slice the buffer beforehand, it will rewind
 * to that point. The reason this has a minorNumber and not a majorNumber is
 * because majorNumbers indicate the format of a HFile whereas minorNumbers 
 * indicate the format inside a HFileBlock.
 */
HFileBlock(ByteBuffer b, int minorVersion) throws IOException {
  b.rewind();
  blockType = BlockType.read(b);
  onDiskSizeWithoutHeader = b.getInt();
  uncompressedSizeWithoutHeader = b.getInt();
  prevBlockOffset = b.getLong();
  this.minorVersion = minorVersion;
  if (minorVersion >= MINOR_VERSION_WITH_CHECKSUM) {
    this.checksumType = b.get();
    this.bytesPerChecksum = b.getInt();
    this.onDiskDataSizeWithHeader = b.getInt();
  } else {
    this.checksumType = ChecksumType.NULL.getCode();
    this.bytesPerChecksum = 0;
    this.onDiskDataSizeWithHeader = onDiskSizeWithoutHeader +
                                     HEADER_SIZE_NO_CHECKSUM;
  }
  buf = b;
  buf.rewind();
}
项目:hbase-0.94.8-qod    文件:HFileWriterV2.java   
/** Constructor that takes a path, creates and closes the output stream. */
public HFileWriterV2(Configuration conf, CacheConfig cacheConf,
    FileSystem fs, Path path, FSDataOutputStream ostream, int blockSize,
    Compression.Algorithm compressAlgo, HFileDataBlockEncoder blockEncoder,
    final KeyComparator comparator, final ChecksumType checksumType,
    final int bytesPerChecksum, boolean includeMVCCReadpoint) throws IOException {
  super(cacheConf,
      ostream == null ? createOutputStream(conf, fs, path) : ostream,
      path, blockSize, compressAlgo, blockEncoder, comparator);
  SchemaMetrics.configureGlobally(conf);
  this.checksumType = checksumType;
  this.bytesPerChecksum = bytesPerChecksum;
  this.includeMemstoreTS = includeMVCCReadpoint;
  if (!conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, false)) {
    this.minorVersion = 0;
  }
  finishInit(conf);
}