Java 类org.apache.hadoop.hbase.io.HeapSize 实例源码

项目:ditb    文件:CacheTestUtils.java   
/**
 * Just checks if heapsize grows when something is cached, and gets smaller
 * when the same object is evicted
 */

public static void testHeapSizeChanges(final BlockCache toBeTested,
    final int blockSize) {
  HFileBlockPair[] blocks = generateHFileBlocks(blockSize, 1);
  long heapSize = ((HeapSize) toBeTested).heapSize();
  toBeTested.cacheBlock(blocks[0].blockName, blocks[0].block);

  /*When we cache something HeapSize should always increase */
  assertTrue(heapSize < ((HeapSize) toBeTested).heapSize());

  toBeTested.evictBlock(blocks[0].blockName);

  /*Post eviction, heapsize should be the same */
  assertEquals(heapSize, ((HeapSize) toBeTested).heapSize());
}
项目:ditb    文件:TestHFileDataBlockEncoder.java   
private void testEncodingWithCacheInternals(boolean useTag) throws IOException {
  List<KeyValue> kvs = generator.generateTestKeyValues(60, useTag);
  HFileBlock block = getSampleHFileBlock(kvs, useTag);
  HFileBlock cacheBlock = createBlockOnDisk(kvs, block, useTag);

  LruBlockCache blockCache =
      new LruBlockCache(8 * 1024 * 1024, 32 * 1024);
  BlockCacheKey cacheKey = new BlockCacheKey("test", 0);
  blockCache.cacheBlock(cacheKey, cacheBlock);

  HeapSize heapSize = blockCache.getBlock(cacheKey, false, false, true);
  assertTrue(heapSize instanceof HFileBlock);

  HFileBlock returnedBlock = (HFileBlock) heapSize;;

  if (blockEncoder.getDataBlockEncoding() ==
      DataBlockEncoding.NONE) {
    assertEquals(block.getBufferWithHeader(),
        returnedBlock.getBufferWithHeader());
  } else {
    if (BlockType.ENCODED_DATA != returnedBlock.getBlockType()) {
      System.out.println(blockEncoder);
    }
    assertEquals(BlockType.ENCODED_DATA, returnedBlock.getBlockType());
  }
}
项目:LCIndex-HBase-0.94.16    文件:CacheTestUtils.java   
/**
 * Just checks if heapsize grows when something is cached, and gets smaller
 * when the same object is evicted
 */

public static void testHeapSizeChanges(final BlockCache toBeTested,
    final int blockSize) {
  HFileBlockPair[] blocks = generateHFileBlocks(blockSize, 1);
  long heapSize = ((HeapSize) toBeTested).heapSize();
  toBeTested.cacheBlock(blocks[0].blockName, blocks[0].block);

  /*When we cache something HeapSize should always increase */
  assertTrue(heapSize < ((HeapSize) toBeTested).heapSize());

  toBeTested.evictBlock(blocks[0].blockName);

  /*Post eviction, heapsize should be the same */
  assertEquals(heapSize, ((HeapSize) toBeTested).heapSize());
}
项目:LCIndex-HBase-0.94.16    文件:TestHFileDataBlockEncoder.java   
/**
 * Test putting and taking out blocks into cache with different
 * encoding options.
 */
@Test
public void testEncodingWithCache() {
  HFileBlock block = getSampleHFileBlock();
  LruBlockCache blockCache =
      new LruBlockCache(8 * 1024 * 1024, 32 * 1024, TEST_UTIL.getConfiguration());
  HFileBlock cacheBlock = blockEncoder.diskToCacheFormat(block, false);
  BlockCacheKey cacheKey = new BlockCacheKey("test", 0);
  blockCache.cacheBlock(cacheKey, cacheBlock);

  HeapSize heapSize = blockCache.getBlock(cacheKey, false, false);
  assertTrue(heapSize instanceof HFileBlock);

  HFileBlock returnedBlock = (HFileBlock) heapSize;;

  if (blockEncoder.getEncodingInCache() ==
      DataBlockEncoding.NONE) {
    assertEquals(block.getBufferWithHeader(),
        returnedBlock.getBufferWithHeader());
  } else {
    if (BlockType.ENCODED_DATA != returnedBlock.getBlockType()) {
      System.out.println(blockEncoder);
    }
    assertEquals(BlockType.ENCODED_DATA, returnedBlock.getBlockType());
  }
}
项目:pbase    文件:CacheTestUtils.java   
/**
 * Just checks if heapsize grows when something is cached, and gets smaller
 * when the same object is evicted
 */

public static void testHeapSizeChanges(final BlockCache toBeTested,
    final int blockSize) {
  HFileBlockPair[] blocks = generateHFileBlocks(blockSize, 1);
  long heapSize = ((HeapSize) toBeTested).heapSize();
  toBeTested.cacheBlock(blocks[0].blockName, blocks[0].block);

  /*When we cache something HeapSize should always increase */
  assertTrue(heapSize < ((HeapSize) toBeTested).heapSize());

  toBeTested.evictBlock(blocks[0].blockName);

  /*Post eviction, heapsize should be the same */
  assertEquals(heapSize, ((HeapSize) toBeTested).heapSize());
}
项目:pbase    文件:TestHFileDataBlockEncoder.java   
private void testEncodingWithCacheInternals(boolean useTag) throws IOException {
  List<KeyValue> kvs = generator.generateTestKeyValues(60, useTag);
  HFileBlock block = getSampleHFileBlock(kvs, useTag);
  HFileBlock cacheBlock = createBlockOnDisk(kvs, block, useTag);

  LruBlockCache blockCache =
      new LruBlockCache(8 * 1024 * 1024, 32 * 1024);
  BlockCacheKey cacheKey = new BlockCacheKey("test", 0);
  blockCache.cacheBlock(cacheKey, cacheBlock);

  HeapSize heapSize = blockCache.getBlock(cacheKey, false, false, true);
  assertTrue(heapSize instanceof HFileBlock);

  HFileBlock returnedBlock = (HFileBlock) heapSize;;

  if (blockEncoder.getDataBlockEncoding() ==
      DataBlockEncoding.NONE) {
    assertEquals(block.getBufferWithHeader(),
        returnedBlock.getBufferWithHeader());
  } else {
    if (BlockType.ENCODED_DATA != returnedBlock.getBlockType()) {
      System.out.println(blockEncoder);
    }
    assertEquals(BlockType.ENCODED_DATA, returnedBlock.getBlockType());
  }
}
项目:HIndex    文件:CacheTestUtils.java   
/**
 * Just checks if heapsize grows when something is cached, and gets smaller
 * when the same object is evicted
 */

public static void testHeapSizeChanges(final BlockCache toBeTested,
    final int blockSize) {
  HFileBlockPair[] blocks = generateHFileBlocks(blockSize, 1);
  long heapSize = ((HeapSize) toBeTested).heapSize();
  toBeTested.cacheBlock(blocks[0].blockName, blocks[0].block);

  /*When we cache something HeapSize should always increase */
  assertTrue(heapSize < ((HeapSize) toBeTested).heapSize());

  toBeTested.evictBlock(blocks[0].blockName);

  /*Post eviction, heapsize should be the same */
  assertEquals(heapSize, ((HeapSize) toBeTested).heapSize());
}
项目:HIndex    文件:TestHFileDataBlockEncoder.java   
private void testEncodingWithCacheInternals(boolean useTag) throws IOException {
  HFileBlock block = getSampleHFileBlock(useTag);
  HFileBlock cacheBlock = createBlockOnDisk(block, useTag);

  LruBlockCache blockCache =
      new LruBlockCache(8 * 1024 * 1024, 32 * 1024);
  BlockCacheKey cacheKey = new BlockCacheKey("test", 0);
  blockCache.cacheBlock(cacheKey, cacheBlock);

  HeapSize heapSize = blockCache.getBlock(cacheKey, false, false, true);
  assertTrue(heapSize instanceof HFileBlock);

  HFileBlock returnedBlock = (HFileBlock) heapSize;;

  if (blockEncoder.getDataBlockEncoding() ==
      DataBlockEncoding.NONE) {
    assertEquals(block.getBufferWithHeader(),
        returnedBlock.getBufferWithHeader());
  } else {
    if (BlockType.ENCODED_DATA != returnedBlock.getBlockType()) {
      System.out.println(blockEncoder);
    }
    assertEquals(BlockType.ENCODED_DATA, returnedBlock.getBlockType());
  }
}
项目:IRIndex    文件:CacheTestUtils.java   
/**
 * Just checks if heapsize grows when something is cached, and gets smaller
 * when the same object is evicted
 */

public static void testHeapSizeChanges(final BlockCache toBeTested,
    final int blockSize) {
  HFileBlockPair[] blocks = generateHFileBlocks(blockSize, 1);
  long heapSize = ((HeapSize) toBeTested).heapSize();
  toBeTested.cacheBlock(blocks[0].blockName, blocks[0].block);

  /*When we cache something HeapSize should always increase */
  assertTrue(heapSize < ((HeapSize) toBeTested).heapSize());

  toBeTested.evictBlock(blocks[0].blockName);

  /*Post eviction, heapsize should be the same */
  assertEquals(heapSize, ((HeapSize) toBeTested).heapSize());
}
项目:IRIndex    文件:TestHFileDataBlockEncoder.java   
/**
 * Test putting and taking out blocks into cache with different
 * encoding options.
 */
@Test
public void testEncodingWithCache() {
  HFileBlock block = getSampleHFileBlock();
  LruBlockCache blockCache =
      new LruBlockCache(8 * 1024 * 1024, 32 * 1024, TEST_UTIL.getConfiguration());
  HFileBlock cacheBlock = blockEncoder.diskToCacheFormat(block, false);
  BlockCacheKey cacheKey = new BlockCacheKey("test", 0);
  blockCache.cacheBlock(cacheKey, cacheBlock);

  HeapSize heapSize = blockCache.getBlock(cacheKey, false, false);
  assertTrue(heapSize instanceof HFileBlock);

  HFileBlock returnedBlock = (HFileBlock) heapSize;;

  if (blockEncoder.getEncodingInCache() ==
      DataBlockEncoding.NONE) {
    assertEquals(block.getBufferWithHeader(),
        returnedBlock.getBufferWithHeader());
  } else {
    if (BlockType.ENCODED_DATA != returnedBlock.getBlockType()) {
      System.out.println(blockEncoder);
    }
    assertEquals(BlockType.ENCODED_DATA, returnedBlock.getBlockType());
  }
}
项目:hbase    文件:TestTagRewriteCell.java   
@Test
public void testHeapSize() {
  Cell originalCell = CellUtil.createCell(Bytes.toBytes("row"), Bytes.toBytes("value"));
  final int fakeTagArrayLength = 10;
  Cell trCell = PrivateCellUtil.createCell(originalCell, new byte[fakeTagArrayLength]);

  // Get the heapSize before the internal tags array in trCell are nuked
  long trCellHeapSize = ((HeapSize)trCell).heapSize();

  // Make another TagRewriteCell with the original TagRewriteCell
  // This happens on systems with more than one RegionObserver/Coproc loaded (such as
  // VisibilityController and AccessController)
  Cell trCell2 = PrivateCellUtil.createCell(trCell, new byte[fakeTagArrayLength]);

  assertTrue("TagRewriteCell containing a TagRewriteCell's heapsize should be larger than a " +
      "single TagRewriteCell's heapsize", trCellHeapSize < ((HeapSize)trCell2).heapSize());
  assertTrue("TagRewriteCell should have had nulled out tags array", ((HeapSize)trCell).heapSize() <
      trCellHeapSize);
}
项目:hbase    文件:CacheTestUtils.java   
/**
 * Just checks if heapsize grows when something is cached, and gets smaller
 * when the same object is evicted
 */

public static void testHeapSizeChanges(final BlockCache toBeTested,
    final int blockSize) {
  HFileBlockPair[] blocks = generateHFileBlocks(blockSize, 1);
  long heapSize = ((HeapSize) toBeTested).heapSize();
  toBeTested.cacheBlock(blocks[0].blockName, blocks[0].block);

  /*When we cache something HeapSize should always increase */
  assertTrue(heapSize < ((HeapSize) toBeTested).heapSize());

  toBeTested.evictBlock(blocks[0].blockName);

  /*Post eviction, heapsize should be the same */
  assertEquals(heapSize, ((HeapSize) toBeTested).heapSize());
}
项目:hbase    文件:TestHFileDataBlockEncoder.java   
private void testEncodingWithCacheInternals(boolean useTag) throws IOException {
  List<KeyValue> kvs = generator.generateTestKeyValues(60, useTag);
  HFileBlock block = getSampleHFileBlock(kvs, useTag);
  HFileBlock cacheBlock = createBlockOnDisk(kvs, block, useTag);

  LruBlockCache blockCache =
      new LruBlockCache(8 * 1024 * 1024, 32 * 1024);
  BlockCacheKey cacheKey = new BlockCacheKey("test", 0);
  blockCache.cacheBlock(cacheKey, cacheBlock);

  HeapSize heapSize = blockCache.getBlock(cacheKey, false, false, true);
  assertTrue(heapSize instanceof HFileBlock);

  HFileBlock returnedBlock = (HFileBlock) heapSize;

  if (blockEncoder.getDataBlockEncoding() ==
      DataBlockEncoding.NONE) {
    assertEquals(block.getBufferReadOnly(), returnedBlock.getBufferReadOnly());
  } else {
    if (BlockType.ENCODED_DATA != returnedBlock.getBlockType()) {
      System.out.println(blockEncoder);
    }
    assertEquals(BlockType.ENCODED_DATA, returnedBlock.getBlockType());
  }
}
项目:PyroDB    文件:CacheTestUtils.java   
/**
 * Just checks if heapsize grows when something is cached, and gets smaller
 * when the same object is evicted
 */

public static void testHeapSizeChanges(final BlockCache toBeTested,
    final int blockSize) {
  HFileBlockPair[] blocks = generateHFileBlocks(blockSize, 1);
  long heapSize = ((HeapSize) toBeTested).heapSize();
  toBeTested.cacheBlock(blocks[0].blockName, blocks[0].block);

  /*When we cache something HeapSize should always increase */
  assertTrue(heapSize < ((HeapSize) toBeTested).heapSize());

  toBeTested.evictBlock(blocks[0].blockName);

  /*Post eviction, heapsize should be the same */
  assertEquals(heapSize, ((HeapSize) toBeTested).heapSize());
}
项目:PyroDB    文件:TestHFileDataBlockEncoder.java   
private void testEncodingWithCacheInternals(boolean useTag) throws IOException {
  List<KeyValue> kvs = generator.generateTestKeyValues(60, useTag);
  HFileBlock block = getSampleHFileBlock(kvs, useTag);
  HFileBlock cacheBlock = createBlockOnDisk(kvs, block, useTag);

  LruBlockCache blockCache =
      new LruBlockCache(8 * 1024 * 1024, 32 * 1024);
  BlockCacheKey cacheKey = new BlockCacheKey("test", 0);
  blockCache.cacheBlock(cacheKey, cacheBlock);

  HeapSize heapSize = blockCache.getBlock(cacheKey, false, false, true);
  assertTrue(heapSize instanceof HFileBlock);

  HFileBlock returnedBlock = (HFileBlock) heapSize;;

  if (blockEncoder.getDataBlockEncoding() ==
      DataBlockEncoding.NONE) {
    assertEquals(block.getBufferWithHeader(),
        returnedBlock.getBufferWithHeader());
  } else {
    if (BlockType.ENCODED_DATA != returnedBlock.getBlockType()) {
      System.out.println(blockEncoder);
    }
    assertEquals(BlockType.ENCODED_DATA, returnedBlock.getBlockType());
  }
}
项目:c5    文件:CacheTestUtils.java   
/**
 * Just checks if heapsize grows when something is cached, and gets smaller
 * when the same object is evicted
 */

public static void testHeapSizeChanges(final BlockCache toBeTested,
    final int blockSize) {
  HFileBlockPair[] blocks = generateHFileBlocks(blockSize, 1);
  long heapSize = ((HeapSize) toBeTested).heapSize();
  toBeTested.cacheBlock(blocks[0].blockName, blocks[0].block);

  /*When we cache something HeapSize should always increase */
  assertTrue(heapSize < ((HeapSize) toBeTested).heapSize());

  toBeTested.evictBlock(blocks[0].blockName);

  /*Post eviction, heapsize should be the same */
  assertEquals(heapSize, ((HeapSize) toBeTested).heapSize());
}
项目:c5    文件:TestHFileDataBlockEncoder.java   
/**
 * Test putting and taking out blocks into cache with different
 * encoding options.
 * @throws IOException 
 */
@Test
public void testEncodingWithCache() throws IOException {
  HFileBlock block = getSampleHFileBlock();
  LruBlockCache blockCache =
      new LruBlockCache(8 * 1024 * 1024, 32 * 1024);
  HFileBlock cacheBlock = createBlockOnDisk(block);
  BlockCacheKey cacheKey = new BlockCacheKey("test", 0);
  blockCache.cacheBlock(cacheKey, cacheBlock);

  HeapSize heapSize = blockCache.getBlock(cacheKey, false, false);
  assertTrue(heapSize instanceof HFileBlock);

  HFileBlock returnedBlock = (HFileBlock) heapSize;;

  if (blockEncoder.getDataBlockEncoding() ==
      DataBlockEncoding.NONE) {
    assertEquals(block.getBufferWithHeader(),
        returnedBlock.getBufferWithHeader());
  } else {
    if (BlockType.ENCODED_DATA != returnedBlock.getBlockType()) {
      System.out.println(blockEncoder);
    }
    assertEquals(BlockType.ENCODED_DATA, returnedBlock.getBlockType());
  }
}
项目:HBase-Research    文件:CacheTestUtils.java   
/**
 * Just checks if heapsize grows when something is cached, and gets smaller
 * when the same object is evicted
 */

public static void testHeapSizeChanges(final BlockCache toBeTested,
    final int blockSize) {
  HFileBlockPair[] blocks = generateHFileBlocks(blockSize, 1);
  long heapSize = ((HeapSize) toBeTested).heapSize();
  toBeTested.cacheBlock(blocks[0].blockName, blocks[0].block);

  /*When we cache something HeapSize should always increase */
  assertTrue(heapSize < ((HeapSize) toBeTested).heapSize());

  toBeTested.evictBlock(blocks[0].blockName);

  /*Post eviction, heapsize should be the same */
  assertEquals(heapSize, ((HeapSize) toBeTested).heapSize());
}
项目:HBase-Research    文件:TestHFileDataBlockEncoder.java   
/**
 * Test putting and taking out blocks into cache with different
 * encoding options.
 */
@Test
public void testEncodingWithCache() {
  HFileBlock block = getSampleHFileBlock();
  LruBlockCache blockCache =
      new LruBlockCache(8 * 1024 * 1024, 32 * 1024, TEST_UTIL.getConfiguration());
  HFileBlock cacheBlock = blockEncoder.diskToCacheFormat(block, false);
  BlockCacheKey cacheKey = new BlockCacheKey("test", 0);
  blockCache.cacheBlock(cacheKey, cacheBlock);

  HeapSize heapSize = blockCache.getBlock(cacheKey, false, false);
  assertTrue(heapSize instanceof HFileBlock);

  HFileBlock returnedBlock = (HFileBlock) heapSize;;

  if (blockEncoder.getEncodingInCache() ==
      DataBlockEncoding.NONE) {
    assertEquals(block.getBufferWithHeader(),
        returnedBlock.getBufferWithHeader());
  } else {
    if (BlockType.ENCODED_DATA != returnedBlock.getBlockType()) {
      System.out.println(blockEncoder);
    }
    assertEquals(BlockType.ENCODED_DATA, returnedBlock.getBlockType());
  }
}
项目:hbase-0.94.8-qod    文件:CacheTestUtils.java   
/**
 * Just checks if heapsize grows when something is cached, and gets smaller
 * when the same object is evicted
 */

public static void testHeapSizeChanges(final BlockCache toBeTested,
    final int blockSize) {
  HFileBlockPair[] blocks = generateHFileBlocks(blockSize, 1);
  long heapSize = ((HeapSize) toBeTested).heapSize();
  toBeTested.cacheBlock(blocks[0].blockName, blocks[0].block);

  /*When we cache something HeapSize should always increase */
  assertTrue(heapSize < ((HeapSize) toBeTested).heapSize());

  toBeTested.evictBlock(blocks[0].blockName);

  /*Post eviction, heapsize should be the same */
  assertEquals(heapSize, ((HeapSize) toBeTested).heapSize());
}
项目:hbase-0.94.8-qod    文件:TestHFileDataBlockEncoder.java   
/**
 * Test putting and taking out blocks into cache with different
 * encoding options.
 */
@Test
public void testEncodingWithCache() {
  HFileBlock block = getSampleHFileBlock();
  LruBlockCache blockCache =
      new LruBlockCache(8 * 1024 * 1024, 32 * 1024, TEST_UTIL.getConfiguration());
  HFileBlock cacheBlock = blockEncoder.diskToCacheFormat(block, false);
  BlockCacheKey cacheKey = new BlockCacheKey("test", 0);
  blockCache.cacheBlock(cacheKey, cacheBlock);

  HeapSize heapSize = blockCache.getBlock(cacheKey, false, false);
  assertTrue(heapSize instanceof HFileBlock);

  HFileBlock returnedBlock = (HFileBlock) heapSize;;

  if (blockEncoder.getEncodingInCache() ==
      DataBlockEncoding.NONE) {
    assertEquals(block.getBufferWithHeader(),
        returnedBlock.getBufferWithHeader());
  } else {
    if (BlockType.ENCODED_DATA != returnedBlock.getBlockType()) {
      System.out.println(blockEncoder);
    }
    assertEquals(BlockType.ENCODED_DATA, returnedBlock.getBlockType());
  }
}
项目:hbase-0.94.8-qod    文件:CacheTestUtils.java   
/**
 * Just checks if heapsize grows when something is cached, and gets smaller
 * when the same object is evicted
 */

public static void testHeapSizeChanges(final BlockCache toBeTested,
    final int blockSize) {
  HFileBlockPair[] blocks = generateHFileBlocks(blockSize, 1);
  long heapSize = ((HeapSize) toBeTested).heapSize();
  toBeTested.cacheBlock(blocks[0].blockName, blocks[0].block);

  /*When we cache something HeapSize should always increase */
  assertTrue(heapSize < ((HeapSize) toBeTested).heapSize());

  toBeTested.evictBlock(blocks[0].blockName);

  /*Post eviction, heapsize should be the same */
  assertEquals(heapSize, ((HeapSize) toBeTested).heapSize());
}
项目:hbase-0.94.8-qod    文件:TestHFileDataBlockEncoder.java   
/**
 * Test putting and taking out blocks into cache with different
 * encoding options.
 */
@Test
public void testEncodingWithCache() {
  HFileBlock block = getSampleHFileBlock();
  LruBlockCache blockCache =
      new LruBlockCache(8 * 1024 * 1024, 32 * 1024, TEST_UTIL.getConfiguration());
  HFileBlock cacheBlock = blockEncoder.diskToCacheFormat(block, false);
  BlockCacheKey cacheKey = new BlockCacheKey("test", 0);
  blockCache.cacheBlock(cacheKey, cacheBlock);

  HeapSize heapSize = blockCache.getBlock(cacheKey, false, false);
  assertTrue(heapSize instanceof HFileBlock);

  HFileBlock returnedBlock = (HFileBlock) heapSize;;

  if (blockEncoder.getEncodingInCache() ==
      DataBlockEncoding.NONE) {
    assertEquals(block.getBufferWithHeader(),
        returnedBlock.getBufferWithHeader());
  } else {
    if (BlockType.ENCODED_DATA != returnedBlock.getBlockType()) {
      System.out.println(blockEncoder);
    }
    assertEquals(BlockType.ENCODED_DATA, returnedBlock.getBlockType());
  }
}
项目:DominoHBase    文件:CacheTestUtils.java   
/**
 * Just checks if heapsize grows when something is cached, and gets smaller
 * when the same object is evicted
 */

public static void testHeapSizeChanges(final BlockCache toBeTested,
    final int blockSize) {
  HFileBlockPair[] blocks = generateHFileBlocks(blockSize, 1);
  long heapSize = ((HeapSize) toBeTested).heapSize();
  toBeTested.cacheBlock(blocks[0].blockName, blocks[0].block);

  /*When we cache something HeapSize should always increase */
  assertTrue(heapSize < ((HeapSize) toBeTested).heapSize());

  toBeTested.evictBlock(blocks[0].blockName);

  /*Post eviction, heapsize should be the same */
  assertEquals(heapSize, ((HeapSize) toBeTested).heapSize());
}
项目:DominoHBase    文件:TestHFileDataBlockEncoder.java   
/**
 * Test putting and taking out blocks into cache with different
 * encoding options.
 */
@Test
public void testEncodingWithCache() {
  HFileBlock block = getSampleHFileBlock();
  LruBlockCache blockCache =
      new LruBlockCache(8 * 1024 * 1024, 32 * 1024);
  HFileBlock cacheBlock = blockEncoder.diskToCacheFormat(block, false);
  BlockCacheKey cacheKey = new BlockCacheKey("test", 0);
  blockCache.cacheBlock(cacheKey, cacheBlock);

  HeapSize heapSize = blockCache.getBlock(cacheKey, false, false);
  assertTrue(heapSize instanceof HFileBlock);

  HFileBlock returnedBlock = (HFileBlock) heapSize;;

  if (blockEncoder.getEncodingInCache() ==
      DataBlockEncoding.NONE) {
    assertEquals(block.getBufferWithHeader(),
        returnedBlock.getBufferWithHeader());
  } else {
    if (BlockType.ENCODED_DATA != returnedBlock.getBlockType()) {
      System.out.println(blockEncoder);
    }
    assertEquals(BlockType.ENCODED_DATA, returnedBlock.getBlockType());
  }
}
项目:hindex    文件:CacheTestUtils.java   
/**
 * Just checks if heapsize grows when something is cached, and gets smaller
 * when the same object is evicted
 */

public static void testHeapSizeChanges(final BlockCache toBeTested,
    final int blockSize) {
  HFileBlockPair[] blocks = generateHFileBlocks(blockSize, 1);
  long heapSize = ((HeapSize) toBeTested).heapSize();
  toBeTested.cacheBlock(blocks[0].blockName, blocks[0].block);

  /*When we cache something HeapSize should always increase */
  assertTrue(heapSize < ((HeapSize) toBeTested).heapSize());

  toBeTested.evictBlock(blocks[0].blockName);

  /*Post eviction, heapsize should be the same */
  assertEquals(heapSize, ((HeapSize) toBeTested).heapSize());
}
项目:hindex    文件:TestHFileDataBlockEncoder.java   
/**
 * Test putting and taking out blocks into cache with different
 * encoding options.
 */
@Test
public void testEncodingWithCache() {
  HFileBlock block = getSampleHFileBlock();
  LruBlockCache blockCache =
      new LruBlockCache(8 * 1024 * 1024, 32 * 1024, TEST_UTIL.getConfiguration());
  HFileBlock cacheBlock = blockEncoder.diskToCacheFormat(block, false);
  BlockCacheKey cacheKey = new BlockCacheKey("test", 0);
  blockCache.cacheBlock(cacheKey, cacheBlock);

  HeapSize heapSize = blockCache.getBlock(cacheKey, false, false);
  assertTrue(heapSize instanceof HFileBlock);

  HFileBlock returnedBlock = (HFileBlock) heapSize;;

  if (blockEncoder.getEncodingInCache() ==
      DataBlockEncoding.NONE) {
    assertEquals(block.getBufferWithHeader(),
        returnedBlock.getBufferWithHeader());
  } else {
    if (BlockType.ENCODED_DATA != returnedBlock.getBlockType()) {
      System.out.println(blockEncoder);
    }
    assertEquals(BlockType.ENCODED_DATA, returnedBlock.getBlockType());
  }
}
项目:ditb    文件:CombinedBlockCache.java   
@Override
public long heapSize() {
  long l2size = 0;
  if (l2Cache instanceof HeapSize) {
    l2size = ((HeapSize) l2Cache).heapSize();
  }
  return lruCache.heapSize() + l2size;
}
项目:ditb    文件:TestAtomicOperation.java   
/**
 * Test written as a verifier for HBASE-7051, CheckAndPut should properly read
 * MVCC.
 *
 * Moved into TestAtomicOperation from its original location, TestHBase7051
 */
@Test
public void testPutAndCheckAndPutInParallel() throws Exception {

  final String tableName = "testPutAndCheckAndPut";
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setClass(HConstants.REGION_IMPL, MockHRegion.class, HeapSize.class);
  final MockHRegion region = (MockHRegion) TEST_UTIL.createLocalHRegion(Bytes.toBytes(tableName),
      null, null, tableName, conf, false, Durability.SYNC_WAL, null, Bytes.toBytes(family));

  Put[] puts = new Put[1];
  Put put = new Put(Bytes.toBytes("r1"));
  put.add(Bytes.toBytes(family), Bytes.toBytes("q1"), Bytes.toBytes("10"));
  puts[0] = put;

  region.batchMutate(puts, HConstants.NO_NONCE, HConstants.NO_NONCE);
  MultithreadedTestUtil.TestContext ctx =
    new MultithreadedTestUtil.TestContext(conf);
  ctx.addThread(new PutThread(ctx, region));
  ctx.addThread(new CheckAndPutThread(ctx, region));
  ctx.startThreads();
  while (testStep != TestStep.CHECKANDPUT_COMPLETED) {
    Thread.sleep(100);
  }
  ctx.stop();
  Scan s = new Scan();
  RegionScanner scanner = region.getScanner(s);
  List<Cell> results = new ArrayList<Cell>();
  ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(2).build();
  scanner.next(results, scannerContext);
  for (Cell keyValue : results) {
    assertEquals("50",Bytes.toString(CellUtil.cloneValue(keyValue)));
  }
}
项目:LCIndex-HBase-0.94.16    文件:TestBatchHRegionLockingAndWrites.java   
@Test
@SuppressWarnings("unchecked")
public void testRedundantRowKeys() throws Exception {

  final int batchSize = 100000;

  String tableName = getClass().getSimpleName();
  Configuration conf = HBaseConfiguration.create();
  conf.setClass(HConstants.REGION_IMPL, MockHRegion.class, HeapSize.class);
  MockHRegion region = (MockHRegion) TestHRegion.initHRegion(Bytes.toBytes(tableName), tableName, conf, Bytes.toBytes("a"));

  List<Pair<Mutation, Integer>> someBatch = Lists.newArrayList();
  int i = 0;
  while (i < batchSize) {
    if (i % 2 == 0) {
      someBatch.add(new Pair<Mutation, Integer>(new Put(Bytes.toBytes(0)), null));
    } else {
      someBatch.add(new Pair<Mutation, Integer>(new Put(Bytes.toBytes(1)), null));
    }
    i++;
  }
  long start = System.nanoTime();
  region.batchMutate(someBatch.toArray(new Pair[0]));
  long duration = System.nanoTime() - start;
  System.out.println("Batch mutate took: " + duration + "ns");
  assertEquals(2, region.getAcquiredLockCount());
}
项目:LCIndex-HBase-0.94.16    文件:TestHBase7051.java   
@Test
public void testPutAndCheckAndPutInParallel() throws Exception {

  final String tableName = "testPutAndCheckAndPut";
  Configuration conf = HBaseConfiguration.create();
  conf.setClass(HConstants.REGION_IMPL, MockHRegion.class, HeapSize.class);
  final MockHRegion region = (MockHRegion) TestHRegion.initHRegion(Bytes.toBytes(tableName),
      tableName, conf, Bytes.toBytes(family));

  List<Pair<Mutation, Integer>> putsAndLocks = Lists.newArrayList();
  Put[] puts = new Put[1];
  Put put = new Put(Bytes.toBytes("r1"));
  put.add(Bytes.toBytes(family), Bytes.toBytes("q1"), Bytes.toBytes("10"));
  puts[0] = put;
  Pair<Mutation, Integer> pair = new Pair<Mutation, Integer>(puts[0], null);

  putsAndLocks.add(pair);

  region.batchMutate(putsAndLocks.toArray(new Pair[0]));
  MultithreadedTestUtil.TestContext ctx =
    new MultithreadedTestUtil.TestContext(conf);
  ctx.addThread(new PutThread(ctx, region));
  ctx.addThread(new CheckAndPutThread(ctx, region));
  ctx.startThreads();
  while (testStep != TestStep.CHECKANDPUT_COMPLETED) {
    Thread.sleep(100);
  }
  ctx.stop();
  Scan s = new Scan();
  RegionScanner scanner = region.getScanner(s);
  List<KeyValue> results = new ArrayList<KeyValue>();
  scanner.next(results, 2);
  for (KeyValue keyValue : results) {
    assertEquals("50",Bytes.toString(keyValue.getValue()));
  }

}
项目:pbase    文件:TestAtomicOperation.java   
/**
 * Test written as a verifier for HBASE-7051, CheckAndPut should properly read
 * MVCC. 
 * 
 * Moved into TestAtomicOperation from its original location, TestHBase7051
 */
@Test
public void testPutAndCheckAndPutInParallel() throws Exception {

  final String tableName = "testPutAndCheckAndPut";
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setClass(HConstants.REGION_IMPL, MockHRegion.class, HeapSize.class);
  final MockHRegion region = (MockHRegion) TEST_UTIL.createLocalHRegion(Bytes.toBytes(tableName),
      null, null, tableName, conf, false, Durability.SYNC_WAL, null, Bytes.toBytes(family));

  Put[] puts = new Put[1];
  Put put = new Put(Bytes.toBytes("r1"));
  put.add(Bytes.toBytes(family), Bytes.toBytes("q1"), Bytes.toBytes("10"));
  puts[0] = put;

  region.batchMutate(puts);
  MultithreadedTestUtil.TestContext ctx =
    new MultithreadedTestUtil.TestContext(conf);
  ctx.addThread(new PutThread(ctx, region));
  ctx.addThread(new CheckAndPutThread(ctx, region));
  ctx.startThreads();
  while (testStep != TestStep.CHECKANDPUT_COMPLETED) {
    Thread.sleep(100);
  }
  ctx.stop();
  Scan s = new Scan();
  RegionScanner scanner = region.getScanner(s);
  List<Cell> results = new ArrayList<Cell>();
  scanner.next(results, 2);
  for (Cell keyValue : results) {
    assertEquals("50",Bytes.toString(CellUtil.cloneValue(keyValue)));
  }

}
项目:HIndex    文件:TestAtomicOperation.java   
/**
 * Test written as a verifier for HBASE-7051, CheckAndPut should properly read
 * MVCC. 
 * 
 * Moved into TestAtomicOperation from its original location, TestHBase7051
 */
@Test
public void testPutAndCheckAndPutInParallel() throws Exception {

  final String tableName = "testPutAndCheckAndPut";
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setClass(HConstants.REGION_IMPL, MockHRegion.class, HeapSize.class);
  final MockHRegion region = (MockHRegion) TEST_UTIL.createLocalHRegion(Bytes.toBytes(tableName),
      null, null, tableName, conf, false, Durability.SYNC_WAL, null, Bytes.toBytes(family));

  Put[] puts = new Put[1];
  Put put = new Put(Bytes.toBytes("r1"));
  put.add(Bytes.toBytes(family), Bytes.toBytes("q1"), Bytes.toBytes("10"));
  puts[0] = put;

  region.batchMutate(puts);
  MultithreadedTestUtil.TestContext ctx =
    new MultithreadedTestUtil.TestContext(conf);
  ctx.addThread(new PutThread(ctx, region));
  ctx.addThread(new CheckAndPutThread(ctx, region));
  ctx.startThreads();
  while (testStep != TestStep.CHECKANDPUT_COMPLETED) {
    Thread.sleep(100);
  }
  ctx.stop();
  Scan s = new Scan();
  RegionScanner scanner = region.getScanner(s);
  List<Cell> results = new ArrayList<Cell>();
  scanner.next(results, 2);
  for (Cell keyValue : results) {
    assertEquals("50",Bytes.toString(CellUtil.cloneValue(keyValue)));
  }

}
项目:IRIndex    文件:TestBatchHRegionLockingAndWrites.java   
@Test
@SuppressWarnings("unchecked")
public void testRedundantRowKeys() throws Exception {

  final int batchSize = 100000;

  String tableName = getClass().getSimpleName();
  Configuration conf = HBaseConfiguration.create();
  conf.setClass(HConstants.REGION_IMPL, MockHRegion.class, HeapSize.class);
  MockHRegion region = (MockHRegion) TestHRegion.initHRegion(Bytes.toBytes(tableName), tableName, conf, Bytes.toBytes("a"));

  List<Pair<Mutation, Integer>> someBatch = Lists.newArrayList();
  int i = 0;
  while (i < batchSize) {
    if (i % 2 == 0) {
      someBatch.add(new Pair<Mutation, Integer>(new Put(Bytes.toBytes(0)), null));
    } else {
      someBatch.add(new Pair<Mutation, Integer>(new Put(Bytes.toBytes(1)), null));
    }
    i++;
  }
  long start = System.nanoTime();
  region.batchMutate(someBatch.toArray(new Pair[0]));
  long duration = System.nanoTime() - start;
  System.out.println("Batch mutate took: " + duration + "ns");
  assertEquals(2, region.getAcquiredLockCount());
}
项目:IRIndex    文件:TestHBase7051.java   
@Test
public void testPutAndCheckAndPutInParallel() throws Exception {

  final String tableName = "testPutAndCheckAndPut";
  Configuration conf = HBaseConfiguration.create();
  conf.setClass(HConstants.REGION_IMPL, MockHRegion.class, HeapSize.class);
  final MockHRegion region = (MockHRegion) TestHRegion.initHRegion(Bytes.toBytes(tableName),
      tableName, conf, Bytes.toBytes(family));

  List<Pair<Mutation, Integer>> putsAndLocks = Lists.newArrayList();
  Put[] puts = new Put[1];
  Put put = new Put(Bytes.toBytes("r1"));
  put.add(Bytes.toBytes(family), Bytes.toBytes("q1"), Bytes.toBytes("10"));
  puts[0] = put;
  Pair<Mutation, Integer> pair = new Pair<Mutation, Integer>(puts[0], null);

  putsAndLocks.add(pair);

  region.batchMutate(putsAndLocks.toArray(new Pair[0]));
  MultithreadedTestUtil.TestContext ctx =
    new MultithreadedTestUtil.TestContext(conf);
  ctx.addThread(new PutThread(ctx, region));
  ctx.addThread(new CheckAndPutThread(ctx, region));
  ctx.startThreads();
  while (testStep != TestStep.CHECKANDPUT_COMPLETED) {
    Thread.sleep(100);
  }
  ctx.stop();
  Scan s = new Scan();
  RegionScanner scanner = region.getScanner(s);
  List<KeyValue> results = new ArrayList<KeyValue>();
  scanner.next(results, 2);
  for (KeyValue keyValue : results) {
    assertEquals("50",Bytes.toString(keyValue.getValue()));
  }

}
项目:hbase    文件:CombinedBlockCache.java   
@Override
public long heapSize() {
  long l2size = 0;
  if (l2Cache instanceof HeapSize) {
    l2size = ((HeapSize) l2Cache).heapSize();
  }
  return onHeapCache.heapSize() + l2size;
}
项目:hbase    文件:TestAtomicOperation.java   
/**
 * Test written as a verifier for HBASE-7051, CheckAndPut should properly read
 * MVCC.
 *
 * Moved into TestAtomicOperation from its original location, TestHBase7051
 */
@Test
public void testPutAndCheckAndPutInParallel() throws Exception {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setClass(HConstants.REGION_IMPL, MockHRegion.class, HeapSize.class);
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName()))
      .addFamily(new HColumnDescriptor(family));
  this.region = TEST_UTIL.createLocalHRegion(htd, null, null);
  Put[] puts = new Put[1];
  Put put = new Put(Bytes.toBytes("r1"));
  put.addColumn(Bytes.toBytes(family), Bytes.toBytes("q1"), Bytes.toBytes("10"));
  puts[0] = put;

  region.batchMutate(puts, HConstants.NO_NONCE, HConstants.NO_NONCE);
  MultithreadedTestUtil.TestContext ctx =
    new MultithreadedTestUtil.TestContext(conf);
  ctx.addThread(new PutThread(ctx, region));
  ctx.addThread(new CheckAndPutThread(ctx, region));
  ctx.startThreads();
  while (testStep != TestStep.CHECKANDPUT_COMPLETED) {
    Thread.sleep(100);
  }
  ctx.stop();
  Scan s = new Scan();
  RegionScanner scanner = region.getScanner(s);
  List<Cell> results = new ArrayList<>();
  ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(2).build();
  scanner.next(results, scannerContext);
  for (Cell keyValue : results) {
    assertEquals("50",Bytes.toString(CellUtil.cloneValue(keyValue)));
  }
}
项目:PyroDB    文件:TestAtomicOperation.java   
/**
 * Test written as a verifier for HBASE-7051, CheckAndPut should properly read
 * MVCC. 
 * 
 * Moved into TestAtomicOperation from its original location, TestHBase7051
 */
@Test
public void testPutAndCheckAndPutInParallel() throws Exception {

  final String tableName = "testPutAndCheckAndPut";
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setClass(HConstants.REGION_IMPL, MockHRegion.class, HeapSize.class);
  final MockHRegion region = (MockHRegion) TEST_UTIL.createLocalHRegion(Bytes.toBytes(tableName),
      null, null, tableName, conf, false, Durability.SYNC_WAL, null, Bytes.toBytes(family));

  Put[] puts = new Put[1];
  Put put = new Put(Bytes.toBytes("r1"));
  put.add(Bytes.toBytes(family), Bytes.toBytes("q1"), Bytes.toBytes("10"));
  puts[0] = put;

  region.batchMutate(puts);
  MultithreadedTestUtil.TestContext ctx =
    new MultithreadedTestUtil.TestContext(conf);
  ctx.addThread(new PutThread(ctx, region));
  ctx.addThread(new CheckAndPutThread(ctx, region));
  ctx.startThreads();
  while (testStep != TestStep.CHECKANDPUT_COMPLETED) {
    Thread.sleep(100);
  }
  ctx.stop();
  Scan s = new Scan();
  RegionScanner scanner = region.getScanner(s);
  List<Cell> results = new ArrayList<Cell>();
  scanner.next(results, 2);
  for (Cell keyValue : results) {
    assertEquals("50",Bytes.toString(CellUtil.cloneValue(keyValue)));
  }

}
项目:c5    文件:TestAtomicOperation.java   
/**
 * Test written as a verifier for HBASE-7051, CheckAndPut should properly read
 * MVCC. 
 * 
 * Moved into TestAtomicOperation from its original location, TestHBase7051
 */
@Test
public void testPutAndCheckAndPutInParallel() throws Exception {

  final String tableName = "testPutAndCheckAndPut";
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setClass(HConstants.REGION_IMPL, MockHRegion.class, HeapSize.class);
  final MockHRegion region = (MockHRegion) TEST_UTIL.createLocalHRegion(Bytes.toBytes(tableName),
      null, null, tableName, conf, false, Durability.SYNC_WAL, null, Bytes.toBytes(family));

  Put[] puts = new Put[1];
  Put put = new Put(Bytes.toBytes("r1"));
  put.add(Bytes.toBytes(family), Bytes.toBytes("q1"), Bytes.toBytes("10"));
  puts[0] = put;

  region.batchMutate(puts);
  MultithreadedTestUtil.TestContext ctx =
    new MultithreadedTestUtil.TestContext(conf);
  ctx.addThread(new PutThread(ctx, region));
  ctx.addThread(new CheckAndPutThread(ctx, region));
  ctx.startThreads();
  while (testStep != TestStep.CHECKANDPUT_COMPLETED) {
    Thread.sleep(100);
  }
  ctx.stop();
  Scan s = new Scan();
  RegionScanner scanner = region.getScanner(s);
  List<Cell> results = new ArrayList<Cell>();
  scanner.next(results, 2);
  for (Cell keyValue : results) {
    assertEquals("50",Bytes.toString(CellUtil.cloneValue(keyValue)));
  }

}
项目:HBase-Research    文件:TestHBase7051.java   
@Test
public void testPutAndCheckAndPutInParallel() throws Exception {

  final String tableName = "testPutAndCheckAndPut";
  Configuration conf = HBaseConfiguration.create();
  conf.setClass(HConstants.REGION_IMPL, MockHRegion.class, HeapSize.class);
  final MockHRegion region = (MockHRegion) TestHRegion.initHRegion(Bytes.toBytes(tableName),
      tableName, conf, Bytes.toBytes(family));

  List<Pair<Mutation, Integer>> putsAndLocks = Lists.newArrayList();
  Put[] puts = new Put[1];
  Put put = new Put(Bytes.toBytes("r1"));
  put.add(Bytes.toBytes(family), Bytes.toBytes("q1"), Bytes.toBytes("10"));
  puts[0] = put;
  Pair<Mutation, Integer> pair = new Pair<Mutation, Integer>(puts[0], null);

  putsAndLocks.add(pair);

  region.batchMutate(putsAndLocks.toArray(new Pair[0]));
  MultithreadedTestUtil.TestContext ctx =
    new MultithreadedTestUtil.TestContext(conf);
  ctx.addThread(new PutThread(ctx, region));
  ctx.addThread(new CheckAndPutThread(ctx, region));
  ctx.startThreads();
  while (testStep != TestStep.CHECKANDPUT_COMPLETED) {
    Thread.sleep(100);
  }
  ctx.stop();
  Scan s = new Scan();
  RegionScanner scanner = region.getScanner(s);
  List<KeyValue> results = new ArrayList<KeyValue>();
  scanner.next(results, 2);
  for (KeyValue keyValue : results) {
    assertEquals("50",Bytes.toString(keyValue.getValue()));
  }

}