Java 类org.apache.hadoop.hbase.io.hfile.BlockCache 实例源码

项目:gemfirexd-oss    文件:SortedOplogFactory.java   
public SortedOplogConfiguration(String name, BlockCache blockCache, SortedOplogStatistics stats, HFileStoreStatistics storeStats) {
  this.name = name;
  this.stats = stats;

  // defaults
  bloom = true;
  blockSize = 1 << 16;
  bytesPerChecksum = 1 << 14;
  checksum = Checksum.NONE;
  compression = Compression.NONE;
  keyEncoding = KeyEncoding.NONE;
  comparator = new ByteComparator();
  this.cacheDataBlocksOnRead = true;
  this.storeStats = storeStats;
  this.blockCache = blockCache;
}
项目:gemfirexd-oss    文件:SortedOplogFactory.java   
public SortedOplogConfiguration(String name, BlockCache blockCache, SortedOplogStatistics stats, HFileStoreStatistics storeStats) {
  this.name = name;
  this.stats = stats;

  // defaults
  bloom = true;
  blockSize = 1 << 16;
  bytesPerChecksum = 1 << 14;
  checksum = Checksum.NONE;
  compression = Compression.NONE;
  keyEncoding = KeyEncoding.NONE;
  comparator = new ByteComparator();
  this.cacheDataBlocksOnRead = true;
  this.storeStats = storeStats;
  this.blockCache = blockCache;
}
项目:hbase    文件:TestClearRegionBlockCache.java   
@Test
public void testClearBlockCache() throws Exception {
  BlockCache blockCache1 = rs1.getCacheConfig().getBlockCache();
  BlockCache blockCache2 = rs2.getCacheConfig().getBlockCache();

  long initialBlockCount1 = blockCache1.getBlockCount();
  long initialBlockCount2 = blockCache2.getBlockCount();

  // scan will cause blocks to be added in BlockCache
  scanAllRegionsForRS(rs1);
  assertEquals(blockCache1.getBlockCount() - initialBlockCount1,
    HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY));
  clearRegionBlockCache(rs1);

  scanAllRegionsForRS(rs2);
  assertEquals(blockCache2.getBlockCount() - initialBlockCount2,
    HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY));
  clearRegionBlockCache(rs2);

  assertEquals(initialBlockCount1, blockCache1.getBlockCount());
  assertEquals(initialBlockCount2, blockCache2.getBlockCount());
}
项目:hbase    文件:TestClearRegionBlockCache.java   
@Test
public void testClearBlockCacheFromAdmin() throws Exception {
  Admin admin = HTU.getAdmin();

  // All RS run in a same process, so the block cache is same for rs1 and rs2
  BlockCache blockCache = rs1.getCacheConfig().getBlockCache();
  long initialBlockCount = blockCache.getBlockCount();

  // scan will cause blocks to be added in BlockCache
  scanAllRegionsForRS(rs1);
  assertEquals(blockCache.getBlockCount() - initialBlockCount,
    HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY));
  scanAllRegionsForRS(rs2);
  assertEquals(blockCache.getBlockCount() - initialBlockCount,
    HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY)
        + HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY));

  CacheEvictionStats stats = admin.clearBlockCache(TABLE_NAME);
  assertEquals(stats.getEvictedBlocks(), HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY)
      + HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY));
  assertEquals(initialBlockCount, blockCache.getBlockCount());
}
项目:hbase    文件:TestClearRegionBlockCache.java   
@Test
public void testClearBlockCacheFromAsyncAdmin() throws Exception {
  AsyncAdmin admin =
      ConnectionFactory.createAsyncConnection(HTU.getConfiguration()).get().getAdmin();

  // All RS run in a same process, so the block cache is same for rs1 and rs2
  BlockCache blockCache = rs1.getCacheConfig().getBlockCache();
  long initialBlockCount = blockCache.getBlockCount();

  // scan will cause blocks to be added in BlockCache
  scanAllRegionsForRS(rs1);
  assertEquals(blockCache.getBlockCount() - initialBlockCount,
    HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY));
  scanAllRegionsForRS(rs2);
  assertEquals(blockCache.getBlockCount() - initialBlockCount,
    HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY)
        + HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY));

  CacheEvictionStats stats = admin.clearBlockCache(TABLE_NAME).get();
  assertEquals(stats.getEvictedBlocks(), HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY)
      + HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY));
  assertEquals(initialBlockCount, blockCache.getBlockCount());
}
项目:bigbase    文件:BlockCacheSimpleTest.java   
/**
 * Test create cache.
 */
public void testCreateCache()
{
    LOG.info("Test create cache started");

    try{
        Class<?> cls = Class.forName("com.koda.integ.hbase.blockcache.OffHeapBlockCache");

        Constructor<?> ctr = cls.getDeclaredConstructor(Configuration.class );
        cache = (BlockCache) ctr.newInstance(conf);
        assertTrue(true);
        LOG.info("Test create cache finished.");
    } catch(Exception e){
        LOG.error("Could not instantiate 'com.koda.integ.hbase.blockcache.OffHeapBlockCache'+ class, will resort to standard cache impl.");
        assertTrue(false);
    }

}
项目:ditb    文件:HeapMemoryManager.java   
public static HeapMemoryManager create(Configuration conf, FlushRequester memStoreFlusher,
              Server server, RegionServerAccounting regionServerAccounting) {
  BlockCache blockCache = CacheConfig.instantiateBlockCache(conf);
  if (blockCache instanceof ResizableBlockCache) {
    return new HeapMemoryManager((ResizableBlockCache) blockCache, memStoreFlusher, server,
               regionServerAccounting);
  }
  return null;
}
项目:ditb    文件:HRegion.java   
/**
 * Facility for dumping and compacting catalog tables. Only does catalog tables since these are
 * only tables we for sure know schema on. For usage run:
 * <pre>
 *   ./bin/hbase org.apache.hadoop.hbase.regionserver.HRegion
 * </pre>
 *
 * @throws IOException
 */
public static void main(String[] args) throws IOException {
  if (args.length < 1) {
    printUsageAndExit(null);
  }
  boolean majorCompact = false;
  if (args.length > 1) {
    if (!args[1].toLowerCase().startsWith("major")) {
      printUsageAndExit("ERROR: Unrecognized option <" + args[1] + ">");
    }
    majorCompact = true;
  }
  final Path tableDir = new Path(args[0]);
  final Configuration c = HBaseConfiguration.create();
  final FileSystem fs = FileSystem.get(c);
  final Path logdir = new Path(c.get("hbase.tmp.dir"));
  final String logname = "wal" + FSUtils.getTableName(tableDir) + System.currentTimeMillis();

  final Configuration walConf = new Configuration(c);
  FSUtils.setRootDir(walConf, logdir);
  final WALFactory wals = new WALFactory(walConf, null, logname);
  try {
    processTable(fs, tableDir, wals, c, majorCompact);
  } finally {
    wals.close();
    // TODO: is this still right?
    BlockCache bc = new CacheConfig(c).getBlockCache();
    if (bc != null) bc.shutdown();
  }
}
项目:ditb    文件:TestAtomicOperation.java   
@After
public void teardown() throws IOException {
  if (region != null) {
    BlockCache bc = region.getStores().get(0).getCacheConfig().getBlockCache();
    ((HRegion)region).close();
    WAL wal = ((HRegion)region).getWAL();
    if (wal != null) wal.close();
    if (bc != null) bc.shutdown();
    region = null;
  }
}
项目:gemfirexd-oss    文件:HFileSortedOplog.java   
public HFileSortedOplog(HDFSStoreImpl store, Path hfilePath,
    BlockCache blockCache, SortedOplogStatistics stats,
    HFileStoreStatistics storeStats) throws IOException {
  super(store, hfilePath, stats);
  cacheConf = getCacheConfInstance(blockCache, stats, storeStats);
  reader = getReaderContainer();
}
项目:gemfirexd-oss    文件:HFileSortedOplog.java   
private HFileSortedOplog(FileSystem inputFS, Path hfilePath,
    BlockCache blockCache, SortedOplogStatistics stats,
    HFileStoreStatistics storeStats) throws IOException {
  super(inputFS, hfilePath, stats);
  cacheConf = getCacheConfInstance(blockCache, stats, storeStats);
  reader = getReaderContainer();
}
项目:LCIndex-HBase-0.94.16    文件:HRegion.java   
/**
 * Facility for dumping and compacting catalog tables. Only does catalog tables since these are
 * only tables we for sure know schema on. For usage run:
 * 
 * <pre>
 *   ./bin/hbase org.apache.hadoop.hbase.regionserver.HRegion
 * </pre>
 * @param args
 * @throws IOException
 */
public static void main(String[] args) throws IOException {
  if (args.length < 1) {
    printUsageAndExit(null);
  }
  boolean majorCompact = false;
  if (args.length > 1) {
    if (!args[1].toLowerCase().startsWith("major")) {
      printUsageAndExit("ERROR: Unrecognized option <" + args[1] + ">");
    }
    majorCompact = true;
  }
  final Path tableDir = new Path(args[0]);
  final Configuration c = HBaseConfiguration.create();
  final FileSystem fs = FileSystem.get(c);
  final Path logdir =
      new Path(c.get("hbase.tmp.dir"), "hlog" + tableDir.getName()
          + EnvironmentEdgeManager.currentTimeMillis());
  final Path oldLogDir = new Path(c.get("hbase.tmp.dir"), HConstants.HREGION_OLDLOGDIR_NAME);
  final HLog log = new HLog(fs, logdir, oldLogDir, c);
  try {
    processTable(fs, tableDir, log, c, majorCompact);
  } finally {
    log.close();
    // TODO: is this still right?
    BlockCache bc = new CacheConfig(c).getBlockCache();
    if (bc != null) bc.shutdown();
  }
}
项目:LCIndex-HBase-0.94.16    文件:TestStoreFileBlockCacheSummary.java   
/**
 * This test inserts data into multiple tables and then reads both tables to ensure
 * they are in the block cache.
 *
 * @throws Exception exception
 */
@Test
public void testBlockCacheSummary() throws Exception {
  HTable ht = TEST_UTIL.createTable(Bytes.toBytes(TEST_TABLE), FAMILY);
  addRows(ht, FAMILY);

  HTable ht2 = TEST_UTIL.createTable(Bytes.toBytes(TEST_TABLE2), FAMILY);
  addRows(ht2, FAMILY);

  TEST_UTIL.flush();

  scan(ht, FAMILY);
  scan(ht2, FAMILY);

  BlockCache bc =
    new CacheConfig(TEST_UTIL.getConfiguration()).getBlockCache();
  List<BlockCacheColumnFamilySummary> bcs = 
    bc.getBlockCacheColumnFamilySummaries(TEST_UTIL.getConfiguration());
  LOG.info("blockCacheSummary: " + bcs);

  assertEquals("blockCache summary has entries", 3, bcs.size());

  BlockCacheColumnFamilySummary e = bcs.get(0);
  assertEquals("table", "-ROOT-", e.getTable());
  assertEquals("cf", "info", e.getColumnFamily());

  e = bcs.get(1);
  assertEquals("table", TEST_TABLE, e.getTable());
  assertEquals("cf", TEST_CF, e.getColumnFamily());

  e = bcs.get(2);
  assertEquals("table", TEST_TABLE2, e.getTable());
  assertEquals("cf", TEST_CF, e.getColumnFamily());

  ht.close();
  ht2.close();
}
项目:gemfirexd-oss    文件:HFileSortedOplog.java   
public HFileSortedOplog(HDFSStoreImpl store, Path hfilePath,
    BlockCache blockCache, SortedOplogStatistics stats,
    HFileStoreStatistics storeStats) throws IOException {
  super(store, hfilePath, stats);
  cacheConf = getCacheConfInstance(blockCache, stats, storeStats);
  reader = getReaderContainer();
}
项目:gemfirexd-oss    文件:HFileSortedOplog.java   
private HFileSortedOplog(FileSystem inputFS, Path hfilePath,
    BlockCache blockCache, SortedOplogStatistics stats,
    HFileStoreStatistics storeStats) throws IOException {
  super(inputFS, hfilePath, stats);
  cacheConf = getCacheConfInstance(blockCache, stats, storeStats);
  reader = getReaderContainer();
}
项目:pbase    文件:HeapMemoryManager.java   
public static HeapMemoryManager create(Configuration conf, FlushRequester memStoreFlusher,
    Server server) {
  BlockCache blockCache = CacheConfig.instantiateBlockCache(conf);
  if (blockCache instanceof ResizableBlockCache) {
    return new HeapMemoryManager((ResizableBlockCache) blockCache, memStoreFlusher, server);
  }
  return null;
}
项目:pbase    文件:HRegion.java   
/**
 * Facility for dumping and compacting catalog tables.
 * Only does catalog tables since these are only tables we for sure know
 * schema on.  For usage run:
 * <pre>
 *   ./bin/hbase org.apache.hadoop.hbase.regionserver.HRegion
 * </pre>
 *
 * @throws IOException
 */
public static void main(String[] args) throws IOException {
    if (args.length < 1) {
        printUsageAndExit(null);
    }
    boolean majorCompact = false;
    if (args.length > 1) {
        if (!args[1].toLowerCase().startsWith("major")) {
            printUsageAndExit("ERROR: Unrecognized option <" + args[1] + ">");
        }
        majorCompact = true;
    }
    final Path tableDir = new Path(args[0]);
    final Configuration c = HBaseConfiguration.create();
    final FileSystem fs = FileSystem.get(c);
    final Path logdir = new Path(c.get("hbase.tmp.dir"));
    final String logname = "wal" + FSUtils.getTableName(tableDir) + System.currentTimeMillis();

    final Configuration walConf = new Configuration(c);
    FSUtils.setRootDir(walConf, logdir);
    final WALFactory wals = new WALFactory(walConf, null, logname);
    try {
        processTable(fs, tableDir, wals, c, majorCompact);
    } finally {
        wals.close();
        // TODO: is this still right?
        BlockCache bc = new CacheConfig(c).getBlockCache();
        if (bc != null) bc.shutdown();
    }
}
项目:HIndex    文件:HRegion.java   
/**
 * Facility for dumping and compacting catalog tables.
 * Only does catalog tables since these are only tables we for sure know
 * schema on.  For usage run:
 * <pre>
 *   ./bin/hbase org.apache.hadoop.hbase.regionserver.HRegion
 * </pre>
 * @param args
 * @throws IOException
 */
public static void main(String[] args) throws IOException {
  if (args.length < 1) {
    printUsageAndExit(null);
  }
  boolean majorCompact = false;
  if (args.length > 1) {
    if (!args[1].toLowerCase().startsWith("major")) {
      printUsageAndExit("ERROR: Unrecognized option <" + args[1] + ">");
    }
    majorCompact = true;
  }
  final Path tableDir = new Path(args[0]);
  final Configuration c = HBaseConfiguration.create();
  final FileSystem fs = FileSystem.get(c);
  final Path logdir = new Path(c.get("hbase.tmp.dir"));
  final String logname = "hlog" + FSUtils.getTableName(tableDir) + System.currentTimeMillis();

  final HLog log = HLogFactory.createHLog(fs, logdir, logname, c);
  try {
    processTable(fs, tableDir, log, c, majorCompact);
  } finally {
     log.close();
     // TODO: is this still right?
     BlockCache bc = new CacheConfig(c).getBlockCache();
     if (bc != null) bc.shutdown();
  }
}
项目:HIndex    文件:TestStoreFileBlockCacheSummary.java   
/**
 * This test inserts data into multiple tables and then reads both tables to ensure
 * they are in the block cache.
 *
 * @throws Exception exception
 */
@Test
public void testBlockCacheSummary() throws Exception {
  HTable ht = TEST_UTIL.createTable(Bytes.toBytes(TEST_TABLE), FAMILY);
  addRows(ht, FAMILY);

  HTable ht2 = TEST_UTIL.createTable(Bytes.toBytes(TEST_TABLE2), FAMILY);
  addRows(ht2, FAMILY);

  TEST_UTIL.flush();

  scan(ht, FAMILY);
  scan(ht2, FAMILY);

  BlockCache bc =
    new CacheConfig(TEST_UTIL.getConfiguration()).getBlockCache();
  List<BlockCacheColumnFamilySummary> bcs = 
    bc.getBlockCacheColumnFamilySummaries(TEST_UTIL.getConfiguration());
  LOG.info("blockCacheSummary: " + bcs);

  assertTrue("blockCache summary has " + bcs.size() + " entries", bcs.size() >= 2);

  BlockCacheColumnFamilySummary e = bcs.get(bcs.size()-2);
  assertEquals("table", TEST_TABLE, e.getTable());
  assertEquals("cf", TEST_CF, e.getColumnFamily());

  e = bcs.get(bcs.size()-1);
  assertEquals("table", TEST_TABLE2, e.getTable());
  assertEquals("cf", TEST_CF, e.getColumnFamily());

  ht.close();
  ht2.close();
}
项目:IRIndex    文件:HRegion.java   
/**
 * Facility for dumping and compacting catalog tables.
 * Only does catalog tables since these are only tables we for sure know
 * schema on.  For usage run:
 * <pre>
 *   ./bin/hbase org.apache.hadoop.hbase.regionserver.HRegion
 * </pre>
 * @param args
 * @throws IOException
 */
public static void main(String[] args) throws IOException {
  if (args.length < 1) {
    printUsageAndExit(null);
  }
  boolean majorCompact = false;
  if (args.length > 1) {
    if (!args[1].toLowerCase().startsWith("major")) {
      printUsageAndExit("ERROR: Unrecognized option <" + args[1] + ">");
    }
    majorCompact = true;
  }
  final Path tableDir = new Path(args[0]);
  final Configuration c = HBaseConfiguration.create();
  final FileSystem fs = FileSystem.get(c);
  final Path logdir = new Path(c.get("hbase.tmp.dir"),
      "hlog" + tableDir.getName()
      + EnvironmentEdgeManager.currentTimeMillis());
  final Path oldLogDir = new Path(c.get("hbase.tmp.dir"),
      HConstants.HREGION_OLDLOGDIR_NAME);
  final HLog log = new HLog(fs, logdir, oldLogDir, c);
  try {
    processTable(fs, tableDir, log, c, majorCompact);
  } finally {
     log.close();
     // TODO: is this still right?
     BlockCache bc = new CacheConfig(c).getBlockCache();
     if (bc != null) bc.shutdown();
  }
}
项目:IRIndex    文件:TestStoreFileBlockCacheSummary.java   
/**
 * This test inserts data into multiple tables and then reads both tables to ensure
 * they are in the block cache.
 *
 * @throws Exception exception
 */
@Test
public void testBlockCacheSummary() throws Exception {
  HTable ht = TEST_UTIL.createTable(Bytes.toBytes(TEST_TABLE), FAMILY);
  addRows(ht, FAMILY);

  HTable ht2 = TEST_UTIL.createTable(Bytes.toBytes(TEST_TABLE2), FAMILY);
  addRows(ht2, FAMILY);

  TEST_UTIL.flush();

  scan(ht, FAMILY);
  scan(ht2, FAMILY);

  BlockCache bc =
    new CacheConfig(TEST_UTIL.getConfiguration()).getBlockCache();
  List<BlockCacheColumnFamilySummary> bcs = 
    bc.getBlockCacheColumnFamilySummaries(TEST_UTIL.getConfiguration());
  LOG.info("blockCacheSummary: " + bcs);

  assertEquals("blockCache summary has entries", 3, bcs.size());

  BlockCacheColumnFamilySummary e = bcs.get(0);
  assertEquals("table", "-ROOT-", e.getTable());
  assertEquals("cf", "info", e.getColumnFamily());

  e = bcs.get(1);
  assertEquals("table", TEST_TABLE, e.getTable());
  assertEquals("cf", TEST_CF, e.getColumnFamily());

  e = bcs.get(2);
  assertEquals("table", TEST_TABLE2, e.getTable());
  assertEquals("cf", TEST_CF, e.getColumnFamily());

  ht.close();
  ht2.close();
}
项目:hbase    文件:HRegionServer.java   
public CacheEvictionStats clearRegionBlockCache(Region region) {
  BlockCache blockCache = this.getCacheConfig().getBlockCache();
  long evictedBlocks = 0;

  for(Store store : region.getStores()) {
    for(StoreFile hFile : store.getStorefiles()) {
      evictedBlocks += blockCache.evictBlocksByHfileName(hFile.getPath().getName());
    }
  }

  return CacheEvictionStats.builder()
      .withEvictedBlocks(evictedBlocks)
      .build();
}
项目:hbase    文件:TestBlockEvictionFromClient.java   
private BlockCache setCacheProperties(HRegion region) {
  Iterator<HStore> strItr = region.getStores().iterator();
  BlockCache cache = null;
  while (strItr.hasNext()) {
    HStore store = strItr.next();
    CacheConfig cacheConf = store.getCacheConfig();
    cacheConf.setCacheDataOnWrite(true);
    cacheConf.setEvictOnClose(true);
    // Use the last one
    cache = cacheConf.getBlockCache();
  }
  return cache;
}
项目:hbase    文件:TestBlockEvictionFromClient.java   
private void iterateBlockCache(BlockCache cache, Iterator<CachedBlock> iterator) {
  int refCount;
  while (iterator.hasNext()) {
    CachedBlock next = iterator.next();
    BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
    if (cache instanceof BucketCache) {
      refCount = ((BucketCache) cache).getRefCount(cacheKey);
    } else if (cache instanceof CombinedBlockCache) {
      refCount = ((CombinedBlockCache) cache).getRefCount(cacheKey);
    } else {
      continue;
    }
    assertEquals(0, refCount);
  }
}
项目:hbase    文件:TestCacheOnWriteInSchema.java   
private void readStoreFile(Path path) throws IOException {
  CacheConfig cacheConf = store.getCacheConfig();
  BlockCache cache = cacheConf.getBlockCache();
  HStoreFile sf = new HStoreFile(fs, path, conf, cacheConf, BloomType.ROWCOL, true);
  sf.initReader();
  HFile.Reader reader = sf.getReader().getHFileReader();
  try {
    // Open a scanner with (on read) caching disabled
    HFileScanner scanner = reader.getScanner(false, false);
    assertTrue(testDescription, scanner.seekTo());
    // Cribbed from io.hfile.TestCacheOnWrite
    long offset = 0;
    while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
      // Flags: don't cache the block, use pread, this is not a compaction.
      // Also, pass null for expected block type to avoid checking it.
      HFileBlock block = reader.readBlock(offset, -1, false, true,
        false, true, null, DataBlockEncoding.NONE);
      BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(),
        offset);
      boolean isCached = cache.getBlock(blockCacheKey, true, false, true) != null;
      boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
      if (shouldBeCached != isCached) {
        throw new AssertionError(
          "shouldBeCached: " + shouldBeCached+ "\n" +
          "isCached: " + isCached + "\n" +
          "Test description: " + testDescription + "\n" +
          "block: " + block + "\n" +
          "blockCacheKey: " + blockCacheKey);
      }
      offset += block.getOnDiskSizeWithHeader();
    }
  } finally {
    reader.close();
  }
}
项目:hbase    文件:TestAtomicOperation.java   
@After
public void teardown() throws IOException {
  if (region != null) {
    BlockCache bc = region.getStores().get(0).getCacheConfig().getBlockCache();
    region.close();
    WAL wal = region.getWAL();
    if (wal != null) wal.close();
    if (bc != null) bc.shutdown();
    region = null;
  }
}
项目:RStore    文件:HRegion.java   
/**
 * Facility for dumping and compacting catalog tables.
 * Only does catalog tables since these are only tables we for sure know
 * schema on.  For usage run:
 * <pre>
 *   ./bin/hbase org.apache.hadoop.hbase.regionserver.HRegion
 * </pre>
 * @param args
 * @throws IOException
 */
public static void main(String[] args) throws IOException {
  if (args.length < 1) {
    printUsageAndExit(null);
  }
  boolean majorCompact = false;
  if (args.length > 1) {
    if (!args[1].toLowerCase().startsWith("major")) {
      printUsageAndExit("ERROR: Unrecognized option <" + args[1] + ">");
    }
    majorCompact = true;
  }
  final Path tableDir = new Path(args[0]);
  final Configuration c = HBaseConfiguration.create();
  final FileSystem fs = FileSystem.get(c);
  final Path logdir = new Path(c.get("hbase.tmp.dir"),
      "hlog" + tableDir.getName()
      + EnvironmentEdgeManager.currentTimeMillis());
  final Path oldLogDir = new Path(c.get("hbase.tmp.dir"),
      HConstants.HREGION_OLDLOGDIR_NAME);
  final HLog log = new HLog(fs, logdir, oldLogDir, c);
  try {
    processTable(fs, tableDir, log, c, majorCompact);
   } finally {
     log.close();
     // TODO: is this still right?
     BlockCache bc = new CacheConfig(c).getBlockCache();
     if (bc != null) bc.shutdown();
   }
}
项目:PyroDB    文件:HeapMemoryManager.java   
public static HeapMemoryManager create(Configuration conf, FlushRequester memStoreFlusher,
    Server server) {
  BlockCache blockCache = CacheConfig.instantiateBlockCache(conf);
  if (blockCache instanceof ResizableBlockCache) {
    return new HeapMemoryManager((ResizableBlockCache) blockCache, memStoreFlusher, server);
  }
  return null;
}
项目:PyroDB    文件:HRegion.java   
/**
 * Facility for dumping and compacting catalog tables.
 * Only does catalog tables since these are only tables we for sure know
 * schema on.  For usage run:
 * <pre>
 *   ./bin/hbase org.apache.hadoop.hbase.regionserver.HRegion
 * </pre>
 * @param args
 * @throws IOException
 */
public static void main(String[] args) throws IOException {
  if (args.length < 1) {
    printUsageAndExit(null);
  }
  boolean majorCompact = false;
  if (args.length > 1) {
    if (!args[1].toLowerCase().startsWith("major")) {
      printUsageAndExit("ERROR: Unrecognized option <" + args[1] + ">");
    }
    majorCompact = true;
  }
  final Path tableDir = new Path(args[0]);
  final Configuration c = HBaseConfiguration.create();
  final FileSystem fs = FileSystem.get(c);
  final Path logdir = new Path(c.get("hbase.tmp.dir"));
  final String logname = "hlog" + FSUtils.getTableName(tableDir) + System.currentTimeMillis();

  final HLog log = HLogFactory.createHLog(fs, logdir, logname, c);
  try {
    processTable(fs, tableDir, log, c, majorCompact);
  } finally {
     log.close();
     // TODO: is this still right?
     BlockCache bc = new CacheConfig(c).getBlockCache();
     if (bc != null) bc.shutdown();
  }
}
项目:PyroDB    文件:TestStoreFileBlockCacheSummary.java   
/**
 * This test inserts data into multiple tables and then reads both tables to ensure
 * they are in the block cache.
 *
 * @throws Exception exception
 */
@Test
public void testBlockCacheSummary() throws Exception {
  HTable ht = TEST_UTIL.createTable(Bytes.toBytes(TEST_TABLE), FAMILY);
  addRows(ht, FAMILY);

  HTable ht2 = TEST_UTIL.createTable(Bytes.toBytes(TEST_TABLE2), FAMILY);
  addRows(ht2, FAMILY);

  TEST_UTIL.flush();

  scan(ht, FAMILY);
  scan(ht2, FAMILY);

  BlockCache bc =
    new CacheConfig(TEST_UTIL.getConfiguration()).getBlockCache();
  List<BlockCacheColumnFamilySummary> bcs = 
    bc.getBlockCacheColumnFamilySummaries(TEST_UTIL.getConfiguration());
  LOG.info("blockCacheSummary: " + bcs);

  assertTrue("blockCache summary has " + bcs.size() + " entries", bcs.size() >= 2);

  BlockCacheColumnFamilySummary e = bcs.get(bcs.size()-2);
  assertEquals("table", TEST_TABLE, e.getTable());
  assertEquals("cf", TEST_CF, e.getColumnFamily());

  e = bcs.get(bcs.size()-1);
  assertEquals("table", TEST_TABLE2, e.getTable());
  assertEquals("cf", TEST_CF, e.getColumnFamily());

  ht.close();
  ht2.close();
}
项目:c5    文件:HRegion.java   
/**
 * Facility for dumping and compacting catalog tables.
 * Only does catalog tables since these are only tables we for sure know
 * schema on.  For usage run:
 * <pre>
 *   ./bin/hbase org.apache.hadoop.hbase.regionserver.HRegion
 * </pre>
 * @param args
 * @throws IOException
 */
public static void main(String[] args) throws IOException {
  if (args.length < 1) {
    printUsageAndExit(null);
  }
  boolean majorCompact = false;
  if (args.length > 1) {
    if (!args[1].toLowerCase().startsWith("major")) {
      printUsageAndExit("ERROR: Unrecognized option <" + args[1] + ">");
    }
    majorCompact = true;
  }
  final Path tableDir = new Path(args[0]);
  final Configuration c = HBaseConfiguration.create();
  final FileSystem fs = FileSystem.get(c);
  final Path logdir = new Path(c.get("hbase.tmp.dir"));
  final String logname = "hlog" + FSUtils.getTableName(tableDir) + System.currentTimeMillis();

  final HLog log = HLogFactory.createHLog(fs, logdir, logname, c);
  try {
    processTable(fs, tableDir, log, c, majorCompact);
  } finally {
     log.close();
     // TODO: is this still right?
     BlockCache bc = new CacheConfig(c).getBlockCache();
     if (bc != null) bc.shutdown();
  }
}
项目:c5    文件:TestStoreFileBlockCacheSummary.java   
/**
 * This test inserts data into multiple tables and then reads both tables to ensure
 * they are in the block cache.
 *
 * @throws Exception exception
 */
@Test
public void testBlockCacheSummary() throws Exception {
  HTable ht = TEST_UTIL.createTable(Bytes.toBytes(TEST_TABLE), FAMILY);
  addRows(ht, FAMILY);

  HTable ht2 = TEST_UTIL.createTable(Bytes.toBytes(TEST_TABLE2), FAMILY);
  addRows(ht2, FAMILY);

  TEST_UTIL.flush();

  scan(ht, FAMILY);
  scan(ht2, FAMILY);

  BlockCache bc =
    new CacheConfig(TEST_UTIL.getConfiguration()).getBlockCache();
  List<BlockCacheColumnFamilySummary> bcs = 
    bc.getBlockCacheColumnFamilySummaries(TEST_UTIL.getConfiguration());
  LOG.info("blockCacheSummary: " + bcs);

  assertTrue("blockCache summary has " + bcs.size() + " entries", bcs.size() >= 2);

  BlockCacheColumnFamilySummary e = bcs.get(bcs.size()-2);
  assertEquals("table", TEST_TABLE, e.getTable());
  assertEquals("cf", TEST_CF, e.getColumnFamily());

  e = bcs.get(bcs.size()-1);
  assertEquals("table", TEST_TABLE2, e.getTable());
  assertEquals("cf", TEST_CF, e.getColumnFamily());

  ht.close();
  ht2.close();
}
项目:HBase-Research    文件:HRegion.java   
/**
 * Facility for dumping and compacting catalog tables.
 * Only does catalog tables since these are only tables we for sure know
 * schema on.  For usage run:
 * <pre>
 *   ./bin/hbase org.apache.hadoop.hbase.regionserver.HRegion
 * </pre>
 * @param args
 * @throws IOException
 */
public static void main(String[] args) throws IOException {
  if (args.length < 1) {
    printUsageAndExit(null);
  }
  boolean majorCompact = false;
  if (args.length > 1) {
    if (!args[1].toLowerCase().startsWith("major")) {
      printUsageAndExit("ERROR: Unrecognized option <" + args[1] + ">");
    }
    majorCompact = true;
  }
  final Path tableDir = new Path(args[0]);
  final Configuration c = HBaseConfiguration.create();
  final FileSystem fs = FileSystem.get(c);
  final Path logdir = new Path(c.get("hbase.tmp.dir"),
      "hlog" + tableDir.getName()
      + EnvironmentEdgeManager.currentTimeMillis());
  final Path oldLogDir = new Path(c.get("hbase.tmp.dir"),
      HConstants.HREGION_OLDLOGDIR_NAME);
  final HLog log = new HLog(fs, logdir, oldLogDir, c);
  try {
    processTable(fs, tableDir, log, c, majorCompact);
  } finally {
     log.close();
     // TODO: is this still right?
     BlockCache bc = new CacheConfig(c).getBlockCache();
     if (bc != null) bc.shutdown();
  }
}
项目:HBase-Research    文件:TestStoreFileBlockCacheSummary.java   
/**
 * This test inserts data into multiple tables and then reads both tables to ensure
 * they are in the block cache.
 *
 * @throws Exception exception
 */
@Test
public void testBlockCacheSummary() throws Exception {
  HTable ht = TEST_UTIL.createTable(Bytes.toBytes(TEST_TABLE), FAMILY);
  addRows(ht, FAMILY);

  HTable ht2 = TEST_UTIL.createTable(Bytes.toBytes(TEST_TABLE2), FAMILY);
  addRows(ht2, FAMILY);

  TEST_UTIL.flush();

  scan(ht, FAMILY);
  scan(ht2, FAMILY);

  BlockCache bc =
    new CacheConfig(TEST_UTIL.getConfiguration()).getBlockCache();
  List<BlockCacheColumnFamilySummary> bcs = 
    bc.getBlockCacheColumnFamilySummaries(TEST_UTIL.getConfiguration());
  LOG.info("blockCacheSummary: " + bcs);

  assertEquals("blockCache summary has entries", 3, bcs.size());

  BlockCacheColumnFamilySummary e = bcs.get(0);
  assertEquals("table", "-ROOT-", e.getTable());
  assertEquals("cf", "info", e.getColumnFamily());

  e = bcs.get(1);
  assertEquals("table", TEST_TABLE, e.getTable());
  assertEquals("cf", TEST_CF, e.getColumnFamily());

  e = bcs.get(2);
  assertEquals("table", TEST_TABLE2, e.getTable());
  assertEquals("cf", TEST_CF, e.getColumnFamily());

  ht.close();
  ht2.close();
}
项目:hbase-0.94.8-qod    文件:HRegion.java   
/**
 * Facility for dumping and compacting catalog tables.
 * Only does catalog tables since these are only tables we for sure know
 * schema on.  For usage run:
 * <pre>
 *   ./bin/hbase org.apache.hadoop.hbase.regionserver.HRegion
 * </pre>
 * @param args
 * @throws IOException
 */
public static void main(String[] args) throws IOException {
  if (args.length < 1) {
    printUsageAndExit(null);
  }
  boolean majorCompact = false;
  if (args.length > 1) {
    if (!args[1].toLowerCase().startsWith("major")) {
      printUsageAndExit("ERROR: Unrecognized option <" + args[1] + ">");
    }
    majorCompact = true;
  }
  final Path tableDir = new Path(args[0]);
  final Configuration c = HBaseConfiguration.create();
  final FileSystem fs = FileSystem.get(c);
  final Path logdir = new Path(c.get("hbase.tmp.dir"),
      "hlog" + tableDir.getName()
      + EnvironmentEdgeManager.currentTimeMillis());
  final Path oldLogDir = new Path(c.get("hbase.tmp.dir"),
      HConstants.HREGION_OLDLOGDIR_NAME);
  final HLog log = new HLog(fs, logdir, oldLogDir, c);
  try {
    processTable(fs, tableDir, log, c, majorCompact);
  } finally {
     log.close();
     // TODO: is this still right?
     BlockCache bc = new CacheConfig(c).getBlockCache();
     if (bc != null) bc.shutdown();
  }
}
项目:hbase-0.94.8-qod    文件:TestStoreFileBlockCacheSummary.java   
/**
 * This test inserts data into multiple tables and then reads both tables to ensure
 * they are in the block cache.
 *
 * @throws Exception exception
 */
@Test
public void testBlockCacheSummary() throws Exception {
  HTable ht = TEST_UTIL.createTable(Bytes.toBytes(TEST_TABLE), FAMILY);
  addRows(ht, FAMILY);

  HTable ht2 = TEST_UTIL.createTable(Bytes.toBytes(TEST_TABLE2), FAMILY);
  addRows(ht2, FAMILY);

  TEST_UTIL.flush();

  scan(ht, FAMILY);
  scan(ht2, FAMILY);

  BlockCache bc =
    new CacheConfig(TEST_UTIL.getConfiguration()).getBlockCache();
  List<BlockCacheColumnFamilySummary> bcs = 
    bc.getBlockCacheColumnFamilySummaries(TEST_UTIL.getConfiguration());
  LOG.info("blockCacheSummary: " + bcs);

  assertEquals("blockCache summary has entries", 3, bcs.size());

  BlockCacheColumnFamilySummary e = bcs.get(0);
  assertEquals("table", "-ROOT-", e.getTable());
  assertEquals("cf", "info", e.getColumnFamily());

  e = bcs.get(1);
  assertEquals("table", TEST_TABLE, e.getTable());
  assertEquals("cf", TEST_CF, e.getColumnFamily());

  e = bcs.get(2);
  assertEquals("table", TEST_TABLE2, e.getTable());
  assertEquals("cf", TEST_CF, e.getColumnFamily());

  ht.close();
  ht2.close();
}
项目:hbase-0.94.8-qod    文件:HRegion.java   
/**
 * Facility for dumping and compacting catalog tables.
 * Only does catalog tables since these are only tables we for sure know
 * schema on.  For usage run:
 * <pre>
 *   ./bin/hbase org.apache.hadoop.hbase.regionserver.HRegion
 * </pre>
 * @param args
 * @throws IOException
 */
public static void main(String[] args) throws IOException {
  if (args.length < 1) {
    printUsageAndExit(null);
  }
  boolean majorCompact = false;
  if (args.length > 1) {
    if (!args[1].toLowerCase().startsWith("major")) {
      printUsageAndExit("ERROR: Unrecognized option <" + args[1] + ">");
    }
    majorCompact = true;
  }
  final Path tableDir = new Path(args[0]);
  final Configuration c = HBaseConfiguration.create();
  final FileSystem fs = FileSystem.get(c);
  final Path logdir = new Path(c.get("hbase.tmp.dir"),
      "hlog" + tableDir.getName()
      + EnvironmentEdgeManager.currentTimeMillis());
  final Path oldLogDir = new Path(c.get("hbase.tmp.dir"),
      HConstants.HREGION_OLDLOGDIR_NAME);
  final HLog log = new HLog(fs, logdir, oldLogDir, c);
  try {
    processTable(fs, tableDir, log, c, majorCompact);
  } finally {
     log.close();
     // TODO: is this still right?
     BlockCache bc = new CacheConfig(c).getBlockCache();
     if (bc != null) bc.shutdown();
  }
}
项目:hbase-0.94.8-qod    文件:TestStoreFileBlockCacheSummary.java   
/**
 * This test inserts data into multiple tables and then reads both tables to ensure
 * they are in the block cache.
 *
 * @throws Exception exception
 */
@Test
public void testBlockCacheSummary() throws Exception {
  HTable ht = TEST_UTIL.createTable(Bytes.toBytes(TEST_TABLE), FAMILY);
  addRows(ht, FAMILY);

  HTable ht2 = TEST_UTIL.createTable(Bytes.toBytes(TEST_TABLE2), FAMILY);
  addRows(ht2, FAMILY);

  TEST_UTIL.flush();

  scan(ht, FAMILY);
  scan(ht2, FAMILY);

  BlockCache bc =
    new CacheConfig(TEST_UTIL.getConfiguration()).getBlockCache();
  List<BlockCacheColumnFamilySummary> bcs = 
    bc.getBlockCacheColumnFamilySummaries(TEST_UTIL.getConfiguration());
  LOG.info("blockCacheSummary: " + bcs);

  assertEquals("blockCache summary has entries", 3, bcs.size());

  BlockCacheColumnFamilySummary e = bcs.get(0);
  assertEquals("table", "-ROOT-", e.getTable());
  assertEquals("cf", "info", e.getColumnFamily());

  e = bcs.get(1);
  assertEquals("table", TEST_TABLE, e.getTable());
  assertEquals("cf", TEST_CF, e.getColumnFamily());

  e = bcs.get(2);
  assertEquals("table", TEST_TABLE2, e.getTable());
  assertEquals("cf", TEST_CF, e.getColumnFamily());

  ht.close();
  ht2.close();
}
项目:DominoHBase    文件:HRegion.java   
/**
 * Facility for dumping and compacting catalog tables.
 * Only does catalog tables since these are only tables we for sure know
 * schema on.  For usage run:
 * <pre>
 *   ./bin/hbase org.apache.hadoop.hbase.regionserver.HRegion
 * </pre>
 * @param args
 * @throws IOException
 */
public static void main(String[] args) throws IOException {
  if (args.length < 1) {
    printUsageAndExit(null);
  }
  boolean majorCompact = false;
  if (args.length > 1) {
    if (!args[1].toLowerCase().startsWith("major")) {
      printUsageAndExit("ERROR: Unrecognized option <" + args[1] + ">");
    }
    majorCompact = true;
  }
  final Path tableDir = new Path(args[0]);
  final Configuration c = HBaseConfiguration.create();
  final FileSystem fs = FileSystem.get(c);
  final Path logdir = new Path(c.get("hbase.tmp.dir"));
  final String logname = "hlog" + tableDir.getName()
    + EnvironmentEdgeManager.currentTimeMillis();

  final HLog log = HLogFactory.createHLog(fs, logdir, logname, c);
  try {
    processTable(fs, tableDir, log, c, majorCompact);
  } finally {
     log.close();
     // TODO: is this still right?
     BlockCache bc = new CacheConfig(c).getBlockCache();
     if (bc != null) bc.shutdown();
  }
}
项目:DominoHBase    文件:TestStoreFileBlockCacheSummary.java   
/**
 * This test inserts data into multiple tables and then reads both tables to ensure
 * they are in the block cache.
 *
 * @throws Exception exception
 */
@Test
public void testBlockCacheSummary() throws Exception {
  HTable ht = TEST_UTIL.createTable(Bytes.toBytes(TEST_TABLE), FAMILY);
  addRows(ht, FAMILY);

  HTable ht2 = TEST_UTIL.createTable(Bytes.toBytes(TEST_TABLE2), FAMILY);
  addRows(ht2, FAMILY);

  TEST_UTIL.flush();

  scan(ht, FAMILY);
  scan(ht2, FAMILY);

  BlockCache bc =
    new CacheConfig(TEST_UTIL.getConfiguration()).getBlockCache();
  List<BlockCacheColumnFamilySummary> bcs = 
    bc.getBlockCacheColumnFamilySummaries(TEST_UTIL.getConfiguration());
  LOG.info("blockCacheSummary: " + bcs);

  assertEquals("blockCache summary has entries", 3, bcs.size());

  BlockCacheColumnFamilySummary e = bcs.get(0);
  assertEquals("table", "-ROOT-", e.getTable());
  assertEquals("cf", "info", e.getColumnFamily());

  e = bcs.get(1);
  assertEquals("table", TEST_TABLE, e.getTable());
  assertEquals("cf", TEST_CF, e.getColumnFamily());

  e = bcs.get(2);
  assertEquals("table", TEST_TABLE2, e.getTable());
  assertEquals("cf", TEST_CF, e.getColumnFamily());

  ht.close();
  ht2.close();
}