Java 类org.apache.hadoop.hbase.io.hfile.HFileReaderV2 实例源码

项目:ditb    文件:DataBlockEncodingTool.java   
/**
 * Test a data block encoder on the given HFile. Output results to console.
 * @param kvLimit The limit of KeyValue which will be analyzed.
 * @param hfilePath an HFile path on the file system.
 * @param compressionName Compression algorithm used for comparison.
 * @param doBenchmark Run performance benchmarks.
 * @param doVerify Verify correctness.
 * @throws IOException When pathName is incorrect.
 */
public static void testCodecs(Configuration conf, int kvLimit,
    String hfilePath, String compressionName, boolean doBenchmark,
    boolean doVerify) throws IOException {
  // create environment
  Path path = new Path(hfilePath);
  CacheConfig cacheConf = new CacheConfig(conf);
  FileSystem fs = FileSystem.get(conf);
  StoreFile hsf = new StoreFile(fs, path, conf, cacheConf,
    BloomType.NONE);

  StoreFile.Reader reader = hsf.createReader();
  reader.loadFileInfo();
  KeyValueScanner scanner = reader.getStoreFileScanner(true, true);

  // run the utilities
  DataBlockEncodingTool comp = new DataBlockEncodingTool(compressionName);
  int majorVersion = reader.getHFileVersion();
  comp.useHBaseChecksum = majorVersion > 2
      || (majorVersion == 2 && reader.getHFileMinorVersion() >= HFileReaderV2.MINOR_VERSION_WITH_CHECKSUM);
  comp.checkStatistics(scanner, kvLimit);
  if (doVerify) {
    comp.verifyCodecs(scanner, kvLimit);
  }
  if (doBenchmark) {
    comp.benchmarkCodecs();
  }
  comp.displayStatistics();

  // cleanup
  scanner.close();
  reader.close(cacheConf.shouldEvictOnClose());
}
项目:pbase    文件:DataBlockEncodingTool.java   
/**
 * Test a data block encoder on the given HFile. Output results to console.
 * @param kvLimit The limit of KeyValue which will be analyzed.
 * @param hfilePath an HFile path on the file system.
 * @param compressionName Compression algorithm used for comparison.
 * @param doBenchmark Run performance benchmarks.
 * @param doVerify Verify correctness.
 * @throws IOException When pathName is incorrect.
 */
public static void testCodecs(Configuration conf, int kvLimit,
    String hfilePath, String compressionName, boolean doBenchmark,
    boolean doVerify) throws IOException {
  // create environment
  Path path = new Path(hfilePath);
  CacheConfig cacheConf = new CacheConfig(conf);
  FileSystem fs = FileSystem.get(conf);
  StoreFile hsf = new StoreFile(fs, path, conf, cacheConf,
    BloomType.NONE);

  StoreFile.Reader reader = hsf.createReader();
  reader.loadFileInfo();
  KeyValueScanner scanner = reader.getStoreFileScanner(true, true);

  // run the utilities
  DataBlockEncodingTool comp = new DataBlockEncodingTool(compressionName);
  int majorVersion = reader.getHFileVersion();
  comp.useHBaseChecksum = majorVersion > 2
      || (majorVersion == 2 && reader.getHFileMinorVersion() >= HFileReaderV2.MINOR_VERSION_WITH_CHECKSUM);
  comp.checkStatistics(scanner, kvLimit);
  if (doVerify) {
    comp.verifyCodecs(scanner, kvLimit);
  }
  if (doBenchmark) {
    comp.benchmarkCodecs();
  }
  comp.displayStatistics();

  // cleanup
  scanner.close();
  reader.close(cacheConf.shouldEvictOnClose());
}
项目:HIndex    文件:DataBlockEncodingTool.java   
/**
 * Test a data block encoder on the given HFile. Output results to console.
 * @param kvLimit The limit of KeyValue which will be analyzed.
 * @param hfilePath an HFile path on the file system.
 * @param compressionName Compression algorithm used for comparison.
 * @param doBenchmark Run performance benchmarks.
 * @param doVerify Verify correctness.
 * @throws IOException When pathName is incorrect.
 */
public static void testCodecs(Configuration conf, int kvLimit,
    String hfilePath, String compressionName, boolean doBenchmark,
    boolean doVerify) throws IOException {
  // create environment
  Path path = new Path(hfilePath);
  CacheConfig cacheConf = new CacheConfig(conf);
  FileSystem fs = FileSystem.get(conf);
  StoreFile hsf = new StoreFile(fs, path, conf, cacheConf,
    BloomType.NONE);

  StoreFile.Reader reader = hsf.createReader();
  reader.loadFileInfo();
  KeyValueScanner scanner = reader.getStoreFileScanner(true, true);

  // run the utilities
  DataBlockEncodingTool comp = new DataBlockEncodingTool(compressionName);
  int majorVersion = reader.getHFileVersion();
  comp.useHBaseChecksum = majorVersion > 2
      || (majorVersion == 2 && reader.getHFileMinorVersion() >= HFileReaderV2.MINOR_VERSION_WITH_CHECKSUM);
  comp.checkStatistics(scanner, kvLimit);
  if (doVerify) {
    comp.verifyCodecs(scanner, kvLimit);
  }
  if (doBenchmark) {
    comp.benchmarkCodecs();
  }
  comp.displayStatistics();

  // cleanup
  scanner.close();
  reader.close(cacheConf.shouldEvictOnClose());
}
项目:PyroDB    文件:DataBlockEncodingTool.java   
/**
 * Test a data block encoder on the given HFile. Output results to console.
 * @param kvLimit The limit of KeyValue which will be analyzed.
 * @param hfilePath an HFile path on the file system.
 * @param compressionName Compression algorithm used for comparison.
 * @param doBenchmark Run performance benchmarks.
 * @param doVerify Verify correctness.
 * @throws IOException When pathName is incorrect.
 */
public static void testCodecs(Configuration conf, int kvLimit,
    String hfilePath, String compressionName, boolean doBenchmark,
    boolean doVerify) throws IOException {
  // create environment
  Path path = new Path(hfilePath);
  CacheConfig cacheConf = new CacheConfig(conf);
  FileSystem fs = FileSystem.get(conf);
  StoreFile hsf = new StoreFile(fs, path, conf, cacheConf,
    BloomType.NONE);

  StoreFile.Reader reader = hsf.createReader();
  reader.loadFileInfo();
  KeyValueScanner scanner = reader.getStoreFileScanner(true, true);

  // run the utilities
  DataBlockEncodingTool comp = new DataBlockEncodingTool(compressionName);
  int majorVersion = reader.getHFileVersion();
  comp.useHBaseChecksum = majorVersion > 2
      || (majorVersion == 2 && reader.getHFileMinorVersion() >= HFileReaderV2.MINOR_VERSION_WITH_CHECKSUM);
  comp.checkStatistics(scanner, kvLimit);
  if (doVerify) {
    comp.verifyCodecs(scanner, kvLimit);
  }
  if (doBenchmark) {
    comp.benchmarkCodecs();
  }
  comp.displayStatistics();

  // cleanup
  scanner.close();
  reader.close(cacheConf.shouldEvictOnClose());
}
项目:ditb    文件:TestCacheOnWriteInSchema.java   
private void readStoreFile(Path path) throws IOException {
  CacheConfig cacheConf = store.getCacheConfig();
  BlockCache cache = cacheConf.getBlockCache();
  StoreFile sf = new StoreFile(fs, path, conf, cacheConf,
    BloomType.ROWCOL);
  HFileReaderV2 reader = (HFileReaderV2) sf.createReader().getHFileReader();
  try {
    // Open a scanner with (on read) caching disabled
    HFileScanner scanner = reader.getScanner(false, false);
    assertTrue(testDescription, scanner.seekTo());
    // Cribbed from io.hfile.TestCacheOnWrite
    long offset = 0;
    HFileBlock prevBlock = null;
    while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
      long onDiskSize = -1;
      if (prevBlock != null) {
        onDiskSize = prevBlock.getNextBlockOnDiskSizeWithHeader();
      }
      // Flags: don't cache the block, use pread, this is not a compaction.
      // Also, pass null for expected block type to avoid checking it.
      HFileBlock block = reader.readBlock(offset, onDiskSize, false, true,
        false, true, null, DataBlockEncoding.NONE);
      BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(),
        offset);
      boolean isCached = cache.getBlock(blockCacheKey, true, false, true) != null;
      boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
      if (shouldBeCached != isCached) {
        throw new AssertionError(
          "shouldBeCached: " + shouldBeCached+ "\n" +
          "isCached: " + isCached + "\n" +
          "Test description: " + testDescription + "\n" +
          "block: " + block + "\n" +
          "blockCacheKey: " + blockCacheKey);
      }
      prevBlock = block;
      offset += block.getOnDiskSizeWithHeader();
    }
  } finally {
    reader.close();
  }
}
项目:LCIndex-HBase-0.94.16    文件:TestCacheOnWriteInSchema.java   
private void readStoreFile(Path path) throws IOException {
  CacheConfig cacheConf = store.getCacheConfig(); 
  BlockCache cache = cacheConf.getBlockCache();
  StoreFile sf = new StoreFile(fs, path, conf, cacheConf,
      BloomType.ROWCOL, null);
  store.passSchemaMetricsTo(sf);
  HFileReaderV2 reader = (HFileReaderV2) sf.createReader().getHFileReader();
  try {
    // Open a scanner with (on read) caching disabled
    HFileScanner scanner = reader.getScanner(false, false);
    assertTrue(testDescription, scanner.seekTo());
    // Cribbed from io.hfile.TestCacheOnWrite
    long offset = 0;
    HFileBlock prevBlock = null;
    while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
      long onDiskSize = -1;
      if (prevBlock != null) {
        onDiskSize = prevBlock.getNextBlockOnDiskSizeWithHeader();
      }
      // Flags: don't cache the block, use pread, this is not a compaction.
      // Also, pass null for expected block type to avoid checking it.
      HFileBlock block = reader.readBlock(offset, onDiskSize, false, true,
        false, null);
      BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(),
        offset);
      boolean isCached = cache.getBlock(blockCacheKey, true, false) != null;
      boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
      if (shouldBeCached != isCached) {
        throw new AssertionError(
          "shouldBeCached: " + shouldBeCached+ "\n" +
          "isCached: " + isCached + "\n" +
          "Test description: " + testDescription + "\n" +
          "block: " + block + "\n" +
          "blockCacheKey: " + blockCacheKey);
      }
      prevBlock = block;
      offset += block.getOnDiskSizeWithHeader();
    }
  } finally {
    reader.close();
  }
}
项目:pbase    文件:TestCacheOnWriteInSchema.java   
private void readStoreFile(Path path) throws IOException {
  CacheConfig cacheConf = store.getCacheConfig();
  BlockCache cache = cacheConf.getBlockCache();
  StoreFile sf = new StoreFile(fs, path, conf, cacheConf,
    BloomType.ROWCOL);
  HFileReaderV2 reader = (HFileReaderV2) sf.createReader().getHFileReader();
  try {
    // Open a scanner with (on read) caching disabled
    HFileScanner scanner = reader.getScanner(false, false);
    assertTrue(testDescription, scanner.seekTo());
    // Cribbed from io.hfile.TestCacheOnWrite
    long offset = 0;
    HFileBlock prevBlock = null;
    while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
      long onDiskSize = -1;
      if (prevBlock != null) {
        onDiskSize = prevBlock.getNextBlockOnDiskSizeWithHeader();
      }
      // Flags: don't cache the block, use pread, this is not a compaction.
      // Also, pass null for expected block type to avoid checking it.
      HFileBlock block = reader.readBlock(offset, onDiskSize, false, true,
        false, true, null, DataBlockEncoding.NONE);
      BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(),
        offset);
      boolean isCached = cache.getBlock(blockCacheKey, true, false, true) != null;
      boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
      if (shouldBeCached != isCached) {
        throw new AssertionError(
          "shouldBeCached: " + shouldBeCached+ "\n" +
          "isCached: " + isCached + "\n" +
          "Test description: " + testDescription + "\n" +
          "block: " + block + "\n" +
          "blockCacheKey: " + blockCacheKey);
      }
      prevBlock = block;
      offset += block.getOnDiskSizeWithHeader();
    }
  } finally {
    reader.close();
  }
}
项目:HIndex    文件:TestCacheOnWriteInSchema.java   
private void readStoreFile(Path path) throws IOException {
  CacheConfig cacheConf = store.getCacheConfig();
  BlockCache cache = cacheConf.getBlockCache();
  StoreFile sf = new StoreFile(fs, path, conf, cacheConf,
    BloomType.ROWCOL);
  HFileReaderV2 reader = (HFileReaderV2) sf.createReader().getHFileReader();
  try {
    // Open a scanner with (on read) caching disabled
    HFileScanner scanner = reader.getScanner(false, false);
    assertTrue(testDescription, scanner.seekTo());
    // Cribbed from io.hfile.TestCacheOnWrite
    long offset = 0;
    HFileBlock prevBlock = null;
    while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
      long onDiskSize = -1;
      if (prevBlock != null) {
        onDiskSize = prevBlock.getNextBlockOnDiskSizeWithHeader();
      }
      // Flags: don't cache the block, use pread, this is not a compaction.
      // Also, pass null for expected block type to avoid checking it.
      HFileBlock block = reader.readBlock(offset, onDiskSize, false, true,
        false, true, null);
      BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(),
        offset);
      boolean isCached = cache.getBlock(blockCacheKey, true, false, true) != null;
      boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
      if (shouldBeCached != isCached) {
        throw new AssertionError(
          "shouldBeCached: " + shouldBeCached+ "\n" +
          "isCached: " + isCached + "\n" +
          "Test description: " + testDescription + "\n" +
          "block: " + block + "\n" +
          "blockCacheKey: " + blockCacheKey);
      }
      prevBlock = block;
      offset += block.getOnDiskSizeWithHeader();
    }
  } finally {
    reader.close();
  }
}
项目:IRIndex    文件:TestCacheOnWriteInSchema.java   
private void readStoreFile(Path path) throws IOException {
  CacheConfig cacheConf = store.getCacheConfig(); 
  BlockCache cache = cacheConf.getBlockCache();
  StoreFile sf = new StoreFile(fs, path, conf, cacheConf,
      BloomType.ROWCOL, null);
  store.passSchemaMetricsTo(sf);
  HFileReaderV2 reader = (HFileReaderV2) sf.createReader().getHFileReader();
  try {
    // Open a scanner with (on read) caching disabled
    HFileScanner scanner = reader.getScanner(false, false);
    assertTrue(testDescription, scanner.seekTo());
    // Cribbed from io.hfile.TestCacheOnWrite
    long offset = 0;
    HFileBlock prevBlock = null;
    while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
      long onDiskSize = -1;
      if (prevBlock != null) {
        onDiskSize = prevBlock.getNextBlockOnDiskSizeWithHeader();
      }
      // Flags: don't cache the block, use pread, this is not a compaction.
      // Also, pass null for expected block type to avoid checking it.
      HFileBlock block = reader.readBlock(offset, onDiskSize, false, true,
        false, null);
      BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(),
        offset);
      boolean isCached = cache.getBlock(blockCacheKey, true, false) != null;
      boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
      if (shouldBeCached != isCached) {
        throw new AssertionError(
          "shouldBeCached: " + shouldBeCached+ "\n" +
          "isCached: " + isCached + "\n" +
          "Test description: " + testDescription + "\n" +
          "block: " + block + "\n" +
          "blockCacheKey: " + blockCacheKey);
      }
      prevBlock = block;
      offset += block.getOnDiskSizeWithHeader();
    }
  } finally {
    reader.close();
  }
}
项目:PyroDB    文件:TestCacheOnWriteInSchema.java   
private void readStoreFile(Path path) throws IOException {
  CacheConfig cacheConf = store.getCacheConfig();
  BlockCache cache = cacheConf.getBlockCache();
  StoreFile sf = new StoreFile(fs, path, conf, cacheConf,
    BloomType.ROWCOL);
  HFileReaderV2 reader = (HFileReaderV2) sf.createReader().getHFileReader();
  try {
    // Open a scanner with (on read) caching disabled
    HFileScanner scanner = reader.getScanner(false, false);
    assertTrue(testDescription, scanner.seekTo());
    // Cribbed from io.hfile.TestCacheOnWrite
    long offset = 0;
    HFileBlock prevBlock = null;
    while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
      long onDiskSize = -1;
      if (prevBlock != null) {
        onDiskSize = prevBlock.getNextBlockOnDiskSizeWithHeader();
      }
      // Flags: don't cache the block, use pread, this is not a compaction.
      // Also, pass null for expected block type to avoid checking it.
      HFileBlock block = reader.readBlock(offset, onDiskSize, false, true,
        false, true, null, DataBlockEncoding.NONE);
      BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(),
        offset);
      boolean isCached = cache.getBlock(blockCacheKey, true, false, true) != null;
      boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
      if (shouldBeCached != isCached) {
        throw new AssertionError(
          "shouldBeCached: " + shouldBeCached+ "\n" +
          "isCached: " + isCached + "\n" +
          "Test description: " + testDescription + "\n" +
          "block: " + block + "\n" +
          "blockCacheKey: " + blockCacheKey);
      }
      prevBlock = block;
      offset += block.getOnDiskSizeWithHeader();
    }
  } finally {
    reader.close();
  }
}
项目:c5    文件:TestCacheOnWriteInSchema.java   
private void readStoreFile(Path path) throws IOException {
  CacheConfig cacheConf = store.getCacheConfig();
  BlockCache cache = cacheConf.getBlockCache();
  StoreFile sf = new StoreFile(fs, path, conf, cacheConf,
    BloomType.ROWCOL);
  HFileReaderV2 reader = (HFileReaderV2) sf.createReader().getHFileReader();
  try {
    // Open a scanner with (on read) caching disabled
    HFileScanner scanner = reader.getScanner(false, false);
    assertTrue(testDescription, scanner.seekTo());
    // Cribbed from io.hfile.TestCacheOnWrite
    long offset = 0;
    HFileBlock prevBlock = null;
    while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
      long onDiskSize = -1;
      if (prevBlock != null) {
        onDiskSize = prevBlock.getNextBlockOnDiskSizeWithHeader();
      }
      // Flags: don't cache the block, use pread, this is not a compaction.
      // Also, pass null for expected block type to avoid checking it.
      HFileBlock block = reader.readBlock(offset, onDiskSize, false, true,
        false, null);
      BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(),
        offset);
      boolean isCached = cache.getBlock(blockCacheKey, true, false) != null;
      boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
      if (shouldBeCached != isCached) {
        throw new AssertionError(
          "shouldBeCached: " + shouldBeCached+ "\n" +
          "isCached: " + isCached + "\n" +
          "Test description: " + testDescription + "\n" +
          "block: " + block + "\n" +
          "blockCacheKey: " + blockCacheKey);
      }
      prevBlock = block;
      offset += block.getOnDiskSizeWithHeader();
    }
  } finally {
    reader.close();
  }
}
项目:HBase-Research    文件:TestCacheOnWriteInSchema.java   
private void readStoreFile(Path path) throws IOException {
  CacheConfig cacheConf = store.getCacheConfig(); 
  BlockCache cache = cacheConf.getBlockCache();
  StoreFile sf = new StoreFile(fs, path, conf, cacheConf,
      BloomType.ROWCOL, null);
  store.passSchemaMetricsTo(sf);
  HFileReaderV2 reader = (HFileReaderV2) sf.createReader().getHFileReader();
  try {
    // Open a scanner with (on read) caching disabled
    HFileScanner scanner = reader.getScanner(false, false);
    assertTrue(testDescription, scanner.seekTo());
    // Cribbed from io.hfile.TestCacheOnWrite
    long offset = 0;
    HFileBlock prevBlock = null;
    while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
      long onDiskSize = -1;
      if (prevBlock != null) {
        onDiskSize = prevBlock.getNextBlockOnDiskSizeWithHeader();
      }
      // Flags: don't cache the block, use pread, this is not a compaction.
      // Also, pass null for expected block type to avoid checking it.
      HFileBlock block = reader.readBlock(offset, onDiskSize, false, true,
        false, null);
      BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(),
        offset);
      boolean isCached = cache.getBlock(blockCacheKey, true, false) != null;
      boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
      if (shouldBeCached != isCached) {
        throw new AssertionError(
          "shouldBeCached: " + shouldBeCached+ "\n" +
          "isCached: " + isCached + "\n" +
          "Test description: " + testDescription + "\n" +
          "block: " + block + "\n" +
          "blockCacheKey: " + blockCacheKey);
      }
      prevBlock = block;
      offset += block.getOnDiskSizeWithHeader();
    }
  } finally {
    reader.close();
  }
}
项目:hbase-0.94.8-qod    文件:TestCacheOnWriteInSchema.java   
private void readStoreFile(Path path) throws IOException {
  CacheConfig cacheConf = store.getCacheConfig(); 
  BlockCache cache = cacheConf.getBlockCache();
  StoreFile sf = new StoreFile(fs, path, conf, cacheConf,
      BloomType.ROWCOL, null);
  store.passSchemaMetricsTo(sf);
  HFileReaderV2 reader = (HFileReaderV2) sf.createReader().getHFileReader();
  try {
    // Open a scanner with (on read) caching disabled
    HFileScanner scanner = reader.getScanner(false, false);
    assertTrue(testDescription, scanner.seekTo());
    // Cribbed from io.hfile.TestCacheOnWrite
    long offset = 0;
    HFileBlock prevBlock = null;
    while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
      long onDiskSize = -1;
      if (prevBlock != null) {
        onDiskSize = prevBlock.getNextBlockOnDiskSizeWithHeader();
      }
      // Flags: don't cache the block, use pread, this is not a compaction.
      // Also, pass null for expected block type to avoid checking it.
      HFileBlock block = reader.readBlock(offset, onDiskSize, false, true,
        false, null);
      BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(),
        offset);
      boolean isCached = cache.getBlock(blockCacheKey, true, false) != null;
      boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
      if (shouldBeCached != isCached) {
        throw new AssertionError(
          "shouldBeCached: " + shouldBeCached+ "\n" +
          "isCached: " + isCached + "\n" +
          "Test description: " + testDescription + "\n" +
          "block: " + block + "\n" +
          "blockCacheKey: " + blockCacheKey);
      }
      prevBlock = block;
      offset += block.getOnDiskSizeWithHeader();
    }
  } finally {
    reader.close();
  }
}
项目:hbase-0.94.8-qod    文件:TestCacheOnWriteInSchema.java   
private void readStoreFile(Path path) throws IOException {
  CacheConfig cacheConf = store.getCacheConfig(); 
  BlockCache cache = cacheConf.getBlockCache();
  StoreFile sf = new StoreFile(fs, path, conf, cacheConf,
      BloomType.ROWCOL, null);
  store.passSchemaMetricsTo(sf);
  HFileReaderV2 reader = (HFileReaderV2) sf.createReader().getHFileReader();
  try {
    // Open a scanner with (on read) caching disabled
    HFileScanner scanner = reader.getScanner(false, false);
    assertTrue(testDescription, scanner.seekTo());
    // Cribbed from io.hfile.TestCacheOnWrite
    long offset = 0;
    HFileBlock prevBlock = null;
    while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
      long onDiskSize = -1;
      if (prevBlock != null) {
        onDiskSize = prevBlock.getNextBlockOnDiskSizeWithHeader();
      }
      // Flags: don't cache the block, use pread, this is not a compaction.
      // Also, pass null for expected block type to avoid checking it.
      HFileBlock block = reader.readBlock(offset, onDiskSize, false, true,
        false, null);
      BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(),
        offset);
      boolean isCached = cache.getBlock(blockCacheKey, true, false) != null;
      boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
      if (shouldBeCached != isCached) {
        throw new AssertionError(
          "shouldBeCached: " + shouldBeCached+ "\n" +
          "isCached: " + isCached + "\n" +
          "Test description: " + testDescription + "\n" +
          "block: " + block + "\n" +
          "blockCacheKey: " + blockCacheKey);
      }
      prevBlock = block;
      offset += block.getOnDiskSizeWithHeader();
    }
  } finally {
    reader.close();
  }
}
项目:DominoHBase    文件:TestCacheOnWriteInSchema.java   
private void readStoreFile(Path path) throws IOException {
  CacheConfig cacheConf = store.getCacheConfig(); 
  BlockCache cache = cacheConf.getBlockCache();
  StoreFile sf = new StoreFile(fs, path, conf, cacheConf,
      BloomType.ROWCOL, null);
  HFileReaderV2 reader = (HFileReaderV2) sf.createReader().getHFileReader();
  try {
    // Open a scanner with (on read) caching disabled
    HFileScanner scanner = reader.getScanner(false, false);
    assertTrue(testDescription, scanner.seekTo());
    // Cribbed from io.hfile.TestCacheOnWrite
    long offset = 0;
    HFileBlock prevBlock = null;
    while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
      long onDiskSize = -1;
      if (prevBlock != null) {
        onDiskSize = prevBlock.getNextBlockOnDiskSizeWithHeader();
      }
      // Flags: don't cache the block, use pread, this is not a compaction.
      // Also, pass null for expected block type to avoid checking it.
      HFileBlock block = reader.readBlock(offset, onDiskSize, false, true,
        false, null);
      BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(),
        offset);
      boolean isCached = cache.getBlock(blockCacheKey, true, false) != null;
      boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
      if (shouldBeCached != isCached) {
        throw new AssertionError(
          "shouldBeCached: " + shouldBeCached+ "\n" +
          "isCached: " + isCached + "\n" +
          "Test description: " + testDescription + "\n" +
          "block: " + block + "\n" +
          "blockCacheKey: " + blockCacheKey);
      }
      prevBlock = block;
      offset += block.getOnDiskSizeWithHeader();
    }
  } finally {
    reader.close();
  }
}
项目:hindex    文件:TestCacheOnWriteInSchema.java   
private void readStoreFile(Path path) throws IOException {
  CacheConfig cacheConf = store.getCacheConfig(); 
  BlockCache cache = cacheConf.getBlockCache();
  StoreFile sf = new StoreFile(fs, path, conf, cacheConf,
      BloomType.ROWCOL, null);
  store.passSchemaMetricsTo(sf);
  HFileReaderV2 reader = (HFileReaderV2) sf.createReader().getHFileReader();
  try {
    // Open a scanner with (on read) caching disabled
    HFileScanner scanner = reader.getScanner(false, false);
    assertTrue(testDescription, scanner.seekTo());
    // Cribbed from io.hfile.TestCacheOnWrite
    long offset = 0;
    HFileBlock prevBlock = null;
    while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
      long onDiskSize = -1;
      if (prevBlock != null) {
        onDiskSize = prevBlock.getNextBlockOnDiskSizeWithHeader();
      }
      // Flags: don't cache the block, use pread, this is not a compaction.
      // Also, pass null for expected block type to avoid checking it.
      HFileBlock block = reader.readBlock(offset, onDiskSize, false, true,
        false, null);
      BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(),
        offset);
      boolean isCached = cache.getBlock(blockCacheKey, true, false) != null;
      boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
      if (shouldBeCached != isCached) {
        throw new AssertionError(
          "shouldBeCached: " + shouldBeCached+ "\n" +
          "isCached: " + isCached + "\n" +
          "Test description: " + testDescription + "\n" +
          "block: " + block + "\n" +
          "blockCacheKey: " + blockCacheKey);
      }
      prevBlock = block;
      offset += block.getOnDiskSizeWithHeader();
    }
  } finally {
    reader.close();
  }
}