Java 类org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics 实例源码

项目:LCIndex-HBase-0.94.16    文件:HFileWriterV2.java   
/** Constructor that takes a path, creates and closes the output stream. */
public HFileWriterV2(Configuration conf, CacheConfig cacheConf,
    FileSystem fs, Path path, FSDataOutputStream ostream, int blockSize,
    Compression.Algorithm compressAlgo, HFileDataBlockEncoder blockEncoder,
    final KeyComparator comparator, final ChecksumType checksumType,
    final int bytesPerChecksum, boolean includeMVCCReadpoint) throws IOException {
  super(cacheConf,
      ostream == null ? createOutputStream(conf, fs, path) : ostream,
      path, blockSize, compressAlgo, blockEncoder, comparator);
  SchemaMetrics.configureGlobally(conf);
  this.checksumType = checksumType;
  this.bytesPerChecksum = bytesPerChecksum;
  this.includeMemstoreTS = includeMVCCReadpoint;
  if (!conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, false)) {
    this.minorVersion = 0;
  }
  finishInit(conf);
}
项目:LCIndex-HBase-0.94.16    文件:TestRegionServerMetrics.java   
private void assertTimeVaryingMetricCount(int expectedCount, String table, String cf,
    String regionName, String metricPrefix) {

  Integer expectedCountInteger = new Integer(expectedCount);

  if (cf != null) {
    String cfKey =
        SchemaMetrics.TABLE_PREFIX + table + "." + SchemaMetrics.CF_PREFIX + cf + "."
            + metricPrefix;
    Pair<Long, Integer> cfPair = RegionMetricsStorage.getTimeVaryingMetric(cfKey);
    assertEquals(expectedCountInteger, cfPair.getSecond());
  }

  if (regionName != null) {
    String rKey =
        SchemaMetrics.TABLE_PREFIX + table + "." + SchemaMetrics.REGION_PREFIX + regionName + "."
            + metricPrefix;

    Pair<Long, Integer> regionPair = RegionMetricsStorage.getTimeVaryingMetric(rKey);
    assertEquals(expectedCountInteger, regionPair.getSecond());
  }
}
项目:LCIndex-HBase-0.94.16    文件:TestRegionServerMetrics.java   
private void assertSizeMetric(String table, String[] cfs, int[] metrics) {
  // we have getsize & nextsize for each column family
  assertEquals(cfs.length * 2, metrics.length);

  for (int i =0; i < cfs.length; ++i) {
    String prefix = SchemaMetrics.generateSchemaMetricsPrefix(table, cfs[i]);
    String getMetric = prefix + SchemaMetrics.METRIC_GETSIZE;
    String nextMetric = prefix + SchemaMetrics.METRIC_NEXTSIZE;

    // verify getsize and nextsize matches
    int getSize = RegionMetricsStorage.getNumericMetrics().containsKey(getMetric) ?
        RegionMetricsStorage.getNumericMetrics().get(getMetric).intValue() : 0;
    int nextSize = RegionMetricsStorage.getNumericMetrics().containsKey(nextMetric) ?
        RegionMetricsStorage.getNumericMetrics().get(nextMetric).intValue() : 0;

    assertEquals(metrics[i], getSize);
    assertEquals(metrics[cfs.length + i], nextSize);
  }
}
项目:IRIndex    文件:HFileWriterV2.java   
/** Constructor that takes a path, creates and closes the output stream. */
public HFileWriterV2(Configuration conf, CacheConfig cacheConf,
    FileSystem fs, Path path, FSDataOutputStream ostream, int blockSize,
    Compression.Algorithm compressAlgo, HFileDataBlockEncoder blockEncoder,
    final KeyComparator comparator, final ChecksumType checksumType,
    final int bytesPerChecksum, boolean includeMVCCReadpoint) throws IOException {
  super(cacheConf,
      ostream == null ? createOutputStream(conf, fs, path) : ostream,
      path, blockSize, compressAlgo, blockEncoder, comparator);
  SchemaMetrics.configureGlobally(conf);
  this.checksumType = checksumType;
  this.bytesPerChecksum = bytesPerChecksum;
  this.includeMemstoreTS = includeMVCCReadpoint;
  if (!conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, false)) {
    this.minorVersion = 0;
  }
  finishInit(conf);
}
项目:IRIndex    文件:TestRegionServerMetrics.java   
private void assertTimeVaryingMetricCount(int expectedCount, String table, String cf,
    String regionName, String metricPrefix) {

  Integer expectedCountInteger = new Integer(expectedCount);

  if (cf != null) {
    String cfKey =
        SchemaMetrics.TABLE_PREFIX + table + "." + SchemaMetrics.CF_PREFIX + cf + "."
            + metricPrefix;
    Pair<Long, Integer> cfPair = RegionMetricsStorage.getTimeVaryingMetric(cfKey);
    assertEquals(expectedCountInteger, cfPair.getSecond());
  }

  if (regionName != null) {
    String rKey =
        SchemaMetrics.TABLE_PREFIX + table + "." + SchemaMetrics.REGION_PREFIX + regionName + "."
            + metricPrefix;

    Pair<Long, Integer> regionPair = RegionMetricsStorage.getTimeVaryingMetric(rKey);
    assertEquals(expectedCountInteger, regionPair.getSecond());
  }
}
项目:IRIndex    文件:TestRegionServerMetrics.java   
private void assertSizeMetric(String table, String[] cfs, int[] metrics) {
  // we have getsize & nextsize for each column family
  assertEquals(cfs.length * 2, metrics.length);

  for (int i =0; i < cfs.length; ++i) {
    String prefix = SchemaMetrics.generateSchemaMetricsPrefix(table, cfs[i]);
    String getMetric = prefix + SchemaMetrics.METRIC_GETSIZE;
    String nextMetric = prefix + SchemaMetrics.METRIC_NEXTSIZE;

    // verify getsize and nextsize matches
    int getSize = RegionMetricsStorage.getNumericMetrics().containsKey(getMetric) ?
        RegionMetricsStorage.getNumericMetrics().get(getMetric).intValue() : 0;
    int nextSize = RegionMetricsStorage.getNumericMetrics().containsKey(nextMetric) ?
        RegionMetricsStorage.getNumericMetrics().get(nextMetric).intValue() : 0;

    assertEquals(metrics[i], getSize);
    assertEquals(metrics[cfs.length + i], nextSize);
  }
}
项目:hindex    文件:TestRegionServerMetrics.java   
private void assertSizeMetric(String table, String[] cfs, int[] metrics) {
  // we have getsize & nextsize for each column family
  assertEquals(cfs.length * 2, metrics.length);

  for (int i =0; i < cfs.length; ++i) {
    String prefix = SchemaMetrics.generateSchemaMetricsPrefix(table, cfs[i]);
    String getMetric = prefix + SchemaMetrics.METRIC_GETSIZE;
    String nextMetric = prefix + SchemaMetrics.METRIC_NEXTSIZE;

    // verify getsize and nextsize matches
    int getSize = RegionMetricsStorage.getNumericMetrics().containsKey(getMetric) ?
        RegionMetricsStorage.getNumericMetrics().get(getMetric).intValue() : 0;
    int nextSize = RegionMetricsStorage.getNumericMetrics().containsKey(nextMetric) ?
        RegionMetricsStorage.getNumericMetrics().get(nextMetric).intValue() : 0;

    assertEquals(metrics[i], getSize);
    assertEquals(metrics[cfs.length + i], nextSize);
  }
}
项目:HBase-Research    文件:HFileWriterV2.java   
/** Constructor that takes a path, creates and closes the output stream. */
public HFileWriterV2(Configuration conf, CacheConfig cacheConf,
    FileSystem fs, Path path, FSDataOutputStream ostream, int blockSize,
    Compression.Algorithm compressAlgo, HFileDataBlockEncoder blockEncoder,
    final KeyComparator comparator, final ChecksumType checksumType,
    final int bytesPerChecksum, boolean includeMVCCReadpoint) throws IOException {
  super(cacheConf,
      ostream == null ? createOutputStream(conf, fs, path) : ostream,
      path, blockSize, compressAlgo, blockEncoder, comparator);
  SchemaMetrics.configureGlobally(conf);
  this.checksumType = checksumType;
  this.bytesPerChecksum = bytesPerChecksum;
  this.includeMemstoreTS = includeMVCCReadpoint;
  if (!conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, false)) {
    this.minorVersion = 0;
  }
  finishInit(conf);
}
项目:HBase-Research    文件:TestRegionServerMetrics.java   
private void assertSizeMetric(String table, String[] cfs, int[] metrics) {
  // we have getsize & nextsize for each column family
  assertEquals(cfs.length * 2, metrics.length);

  for (int i =0; i < cfs.length; ++i) {
    String prefix = SchemaMetrics.generateSchemaMetricsPrefix(table, cfs[i]);
    String getMetric = prefix + SchemaMetrics.METRIC_GETSIZE;
    String nextMetric = prefix + SchemaMetrics.METRIC_NEXTSIZE;

    // verify getsize and nextsize matches
    int getSize = RegionMetricsStorage.getNumericMetrics().containsKey(getMetric) ?
        RegionMetricsStorage.getNumericMetrics().get(getMetric).intValue() : 0;
    int nextSize = RegionMetricsStorage.getNumericMetrics().containsKey(nextMetric) ?
        RegionMetricsStorage.getNumericMetrics().get(nextMetric).intValue() : 0;

    assertEquals(metrics[i], getSize);
    assertEquals(metrics[cfs.length + i], nextSize);
  }
}
项目:hbase-0.94.8-qod    文件:TestRegionServerMetrics.java   
private void assertSizeMetric(String table, String[] cfs, int[] metrics) {
  // we have getsize & nextsize for each column family
  assertEquals(cfs.length * 2, metrics.length);

  for (int i =0; i < cfs.length; ++i) {
    String prefix = SchemaMetrics.generateSchemaMetricsPrefix(table, cfs[i]);
    String getMetric = prefix + SchemaMetrics.METRIC_GETSIZE;
    String nextMetric = prefix + SchemaMetrics.METRIC_NEXTSIZE;

    // verify getsize and nextsize matches
    int getSize = RegionMetricsStorage.getNumericMetrics().containsKey(getMetric) ?
        RegionMetricsStorage.getNumericMetrics().get(getMetric).intValue() : 0;
    int nextSize = RegionMetricsStorage.getNumericMetrics().containsKey(nextMetric) ?
        RegionMetricsStorage.getNumericMetrics().get(nextMetric).intValue() : 0;

    assertEquals(metrics[i], getSize);
    assertEquals(metrics[cfs.length + i], nextSize);
  }
}
项目:LCIndex-HBase-0.94.16    文件:StoreFile.java   
/**
 * Constructor, loads a reader and it's indices, etc. May allocate a substantial amount of ram
 * depending on the underlying files (10-20MB?).
 * @param fs The current file system to use.
 * @param p The path of the file.
 * @param blockcache <code>true</code> if the block cache is enabled.
 * @param conf The current configuration.
 * @param cacheConf The cache configuration and block cache reference.
 * @param cfBloomType The bloom type to use for this store file as specified by column family
 *          configuration. This may or may not be the same as the Bloom filter type actually
 *          present in the HFile, because column family configuration might change. If this is
 *          {@link BloomType#NONE}, the existing Bloom filter is ignored.
 * @param dataBlockEncoder data block encoding algorithm.
 * @throws IOException When opening the reader fails.
 */
public StoreFile(final FileSystem fs, final Path p, final Configuration conf,
    final CacheConfig cacheConf, final BloomType cfBloomType,
    final HFileDataBlockEncoder dataBlockEncoder) throws IOException {
  this.fs = fs;
  this.path = p;
  this.cacheConf = cacheConf;
  this.dataBlockEncoder =
      dataBlockEncoder == null ? NoOpDataBlockEncoder.INSTANCE : dataBlockEncoder;
  if (BloomFilterFactory.isGeneralBloomEnabled(conf)) {
    this.cfBloomType = cfBloomType;
  } else {
    LOG.info("Ignoring bloom filter check for file " + path + ": " + "cfBloomType=" + cfBloomType
        + " (disabled in config)");
    this.cfBloomType = BloomType.NONE;
  }

  // cache the modification time stamp of this store file
  FileStatus[] stats = FSUtils.listStatus(fs, p, null);
  if (stats != null && stats.length == 1) {
    this.modificationTimeStamp = stats[0].getModificationTime();
  } else {
    this.modificationTimeStamp = 0;
  }
  SchemaMetrics.configureGlobally(conf);
  initPossibleIndexesAndReference(fs, p, conf);
}
项目:LCIndex-HBase-0.94.16    文件:StoreScanner.java   
/**
 * Method used internally to initialize metric names throughout the constructors. To be called
 * after the store variable has been initialized!
 */
private void initializeMetricNames() {
  String tableName = SchemaMetrics.UNKNOWN;
  String family = SchemaMetrics.UNKNOWN;
  if (store != null) {
    tableName = store.getTableName();
    family = Bytes.toString(store.getFamily().getName());
  }
  this.metricNamePrefix = SchemaMetrics.generateSchemaMetricsPrefix(tableName, family);
}
项目:LCIndex-HBase-0.94.16    文件:HFileWriterV1.java   
/** Constructor that takes a path, creates and closes the output stream. */
public HFileWriterV1(Configuration conf, CacheConfig cacheConf,
    FileSystem fs, Path path, FSDataOutputStream ostream,
    int blockSize, Compression.Algorithm compress,
    HFileDataBlockEncoder blockEncoder,
    final KeyComparator comparator) throws IOException {
  super(cacheConf, ostream == null ? createOutputStream(conf, fs, path) : ostream, path,
      blockSize, compress, blockEncoder, comparator);
  SchemaMetrics.configureGlobally(conf);
}
项目:LCIndex-HBase-0.94.16    文件:HFile.java   
/**
 * Returns the factory to be used to create {@link HFile} writers
 */
public static final WriterFactory getWriterFactory(Configuration conf, CacheConfig cacheConf) {
  SchemaMetrics.configureGlobally(conf);
  int version = getFormatVersion(conf);
  switch (version) {
  case 1:
    return new HFileWriterV1.WriterFactoryV1(conf, cacheConf);
  case 2:
    return new HFileWriterV2.WriterFactoryV2(conf, cacheConf);
  default:
    throw new IllegalArgumentException("Cannot create writer for HFile " + "format version "
        + version);
  }
}
项目:LCIndex-HBase-0.94.16    文件:LruBlockCache.java   
/**
 * Helper function that updates the local size counter and also updates any
 * per-cf or per-blocktype metrics it can discern from given
 * {@link CachedBlock}
 *
 * @param cb
 * @param evict
 */
protected long updateSizeMetrics(CachedBlock cb, boolean evict) {
  long heapsize = cb.heapSize();
  if (evict) {
    heapsize *= -1;
  }
  Cacheable cachedBlock = cb.getBuffer();
  SchemaMetrics schemaMetrics = cachedBlock.getSchemaMetrics();
  if (schemaMetrics != null) {
    schemaMetrics.updateOnCachePutOrEvict(
        cachedBlock.getBlockType().getCategory(), heapsize, evict);
  }
  return size.addAndGet(heapsize);
}
项目:LCIndex-HBase-0.94.16    文件:TestHRegion.java   
/**
 * Testcase to check state of region initialization task set to ABORTED or not if any exceptions
 * during initialization
 * 
 * @throws Exception
 */
@Test
public void testStatusSettingToAbortIfAnyExceptionDuringRegionInitilization() throws Exception {
  HRegionInfo info = null;
  try {
    FileSystem fs = Mockito.mock(FileSystem.class);
    Mockito.when(fs.exists((Path) Mockito.anyObject())).thenThrow(new IOException());
    HTableDescriptor htd = new HTableDescriptor(tableName);
    htd.addFamily(new HColumnDescriptor("cf"));
    info = new HRegionInfo(htd.getName(), HConstants.EMPTY_BYTE_ARRAY,
        HConstants.EMPTY_BYTE_ARRAY, false);
    Path path = new Path(DIR + "testStatusSettingToAbortIfAnyExceptionDuringRegionInitilization");
    // no where we are instantiating HStore in this test case so useTableNameGlobally is null. To
    // avoid NullPointerException we are setting useTableNameGlobally to false.
    SchemaMetrics.setUseTableNameInTest(false);
    region = HRegion.newHRegion(path, null, fs, conf, info, htd, null);
    // region initialization throws IOException and set task state to ABORTED.
    region.initialize();
    fail("Region initialization should fail due to IOException");
  } catch (IOException io) {
    List<MonitoredTask> tasks = TaskMonitor.get().getTasks();
    for (MonitoredTask monitoredTask : tasks) {
      if (!(monitoredTask instanceof MonitoredRPCHandler)
          && monitoredTask.getDescription().contains(region.toString())) {
        assertTrue("Region state should be ABORTED.",
            monitoredTask.getState().equals(MonitoredTask.State.ABORTED));
        break;
      }
    }
  } finally {
    HRegion.closeHRegion(region);
  }
}
项目:LCIndex-HBase-0.94.16    文件:TestRegionServerMetrics.java   
private void assertStoreMetricEquals(long expected,
    SchemaMetrics schemaMetrics, StoreMetricType storeMetricType) {
  final String storeMetricName =
      schemaMetrics.getStoreMetricName(storeMetricType);
  Long startValue = startingMetrics.get(storeMetricName);
  assertEquals("Invalid value for store metric " + storeMetricName
      + " (type " + storeMetricType + ")", expected,
      RegionMetricsStorage.getNumericMetric(storeMetricName)
          - (startValue != null ? startValue : 0));
}
项目:LCIndex-HBase-0.94.16    文件:TestRegionServerMetrics.java   
@Test
public void testMultipleRegions() throws IOException, InterruptedException {

  TEST_UTIL.createRandomTable(
      TABLE_NAME,
      Arrays.asList(FAMILIES),
      MAX_VERSIONS, NUM_COLS_PER_ROW, NUM_FLUSHES, NUM_REGIONS, 1000);

  final HRegionServer rs =
      TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);

  assertEquals(NUM_REGIONS + META_AND_ROOT, rs.getOnlineRegions().size());

  rs.doMetrics();
  for (HRegion r : TEST_UTIL.getMiniHBaseCluster().getRegions(
      Bytes.toBytes(TABLE_NAME))) {
    for (Map.Entry<byte[], Store> storeEntry : r.getStores().entrySet()) {
      LOG.info("For region " + r.getRegionNameAsString() + ", CF " +
          Bytes.toStringBinary(storeEntry.getKey()) + " found store files " +
          ": " + storeEntry.getValue().getStorefiles());
    }
  }

  assertStoreMetricEquals(NUM_FLUSHES * NUM_REGIONS * FAMILIES.length
      + META_AND_ROOT, ALL_METRICS, StoreMetricType.STORE_FILE_COUNT);

  for (String cf : FAMILIES) {
    SchemaMetrics schemaMetrics = SchemaMetrics.getInstance(TABLE_NAME, cf);
    assertStoreMetricEquals(NUM_FLUSHES * NUM_REGIONS, schemaMetrics,
        StoreMetricType.STORE_FILE_COUNT);
  }

  // ensure that the max value is also maintained
  final String storeMetricName = ALL_METRICS
      .getStoreMetricNameMax(StoreMetricType.STORE_FILE_COUNT);
  assertEquals("Invalid value for store metric " + storeMetricName,
      NUM_FLUSHES, RegionMetricsStorage.getNumericMetric(storeMetricName));
}
项目:LCIndex-HBase-0.94.16    文件:TestBlocksScanned.java   
private void _testBlocksScanned(HTableDescriptor table) throws Exception {
  HRegion r = createNewHRegion(table, START_KEY, END_KEY,
      TEST_UTIL.getConfiguration());
  addContent(r, FAMILY, COL);
  r.flushcache();

  // Get the per-cf metrics
  SchemaMetrics schemaMetrics =
    SchemaMetrics.getInstance(Bytes.toString(table.getName()), Bytes.toString(FAMILY));
  Map<String, Long> schemaMetricSnapshot = SchemaMetrics.getMetricsSnapshot();

  // Do simple test of getting one row only first.
  Scan scan = new Scan(Bytes.toBytes("aaa"), Bytes.toBytes("aaz"));
  scan.addColumn(FAMILY, COL);
  scan.setMaxVersions(1);

  InternalScanner s = r.getScanner(scan);
  List<KeyValue> results = new ArrayList<KeyValue>();
  while (s.next(results));
  s.close();

  int expectResultSize = 'z' - 'a';
  Assert.assertEquals(expectResultSize, results.size());

  int kvPerBlock = (int) Math.ceil(BLOCK_SIZE / (double) results.get(0).getLength());
  Assert.assertEquals(2, kvPerBlock);

  long expectDataBlockRead = (long) Math.ceil(expectResultSize / (double) kvPerBlock);
  long expectIndexBlockRead = expectDataBlockRead;

  verifyDataAndIndexBlockRead(schemaMetricSnapshot, schemaMetrics,
      expectDataBlockRead, expectIndexBlockRead);
}
项目:LCIndex-HBase-0.94.16    文件:TestBlocksScanned.java   
private void verifyDataAndIndexBlockRead(Map<String, Long> previousMetricSnapshot,
    SchemaMetrics schemaMetrics, long expectDataBlockRead, long expectedIndexBlockRead){
  Map<String, Long> currentMetricsSnapshot = SchemaMetrics.getMetricsSnapshot();
  Map<String, Long> diffs =
    SchemaMetrics.diffMetrics(previousMetricSnapshot, currentMetricsSnapshot);

  long dataBlockRead = SchemaMetrics.getLong(diffs,
      schemaMetrics.getBlockMetricName(BlockCategory.DATA, false, BlockMetricType.READ_COUNT));
  long indexBlockRead = SchemaMetrics.getLong(diffs,
      schemaMetrics.getBlockMetricName(BlockCategory.INDEX, false, BlockMetricType.READ_COUNT));

  Assert.assertEquals(expectDataBlockRead, dataBlockRead);
  Assert.assertEquals(expectedIndexBlockRead, indexBlockRead);
}
项目:LCIndex-HBase-0.94.16    文件:TestMemStore.java   
@Override
public void setUp() throws Exception {
  super.setUp();
  this.mvcc = new MultiVersionConsistencyControl();
  this.memstore = new MemStore();
  SchemaMetrics.setUseTableNameInTest(false);
}
项目:LCIndex-HBase-0.94.16    文件:TestHFileReaderV1.java   
@Before
public void setUp() throws IOException {
  startingMetrics = SchemaMetrics.getMetricsSnapshot();
  conf = TEST_UTIL.getConfiguration();
  fs = FileSystem.get(conf);
  SchemaMetrics.configureGlobally(conf);
}
项目:LCIndex-HBase-0.94.16    文件:TestCachedBlockQueue.java   
public CachedBlock(final long heapSize, String name, long accessTime) {
  super(new BlockCacheKey(name, 0),
      new Cacheable() {
        @Override
        public long heapSize() {
          return ((int)(heapSize - CachedBlock.PER_BLOCK_OVERHEAD));
        }

        @Override
        public int getSerializedLength() {
          return 0;
        }

        @Override
        public void serialize(ByteBuffer destination) {
        }

        @Override
        public CacheableDeserializer<Cacheable> getDeserializer() {
          // TODO Auto-generated method stub
          return null;
        }

        @Override
        public BlockType getBlockType() {
          return BlockType.DATA;
        }

        @Override
        public SchemaMetrics getSchemaMetrics() {
          return SchemaMetrics.ALL_SCHEMA_METRICS;
        }
      }, accessTime, false);
}
项目:IRIndex    文件:StoreScanner.java   
/**
 * Method used internally to initialize metric names throughout the
 * constructors.
 *
 * To be called after the store variable has been initialized!
 */
private void initializeMetricNames() {
  String tableName = SchemaMetrics.UNKNOWN;
  String family = SchemaMetrics.UNKNOWN;
  if (store != null) {
    tableName = store.getTableName();
    family = Bytes.toString(store.getFamily().getName());
  }
  this.metricNamePrefix =
      SchemaMetrics.generateSchemaMetricsPrefix(tableName, family);
}
项目:IRIndex    文件:HFileWriterV1.java   
/** Constructor that takes a path, creates and closes the output stream. */
public HFileWriterV1(Configuration conf, CacheConfig cacheConf,
    FileSystem fs, Path path, FSDataOutputStream ostream,
    int blockSize, Compression.Algorithm compress,
    HFileDataBlockEncoder blockEncoder,
    final KeyComparator comparator) throws IOException {
  super(cacheConf, ostream == null ? createOutputStream(conf, fs, path) : ostream, path,
      blockSize, compress, blockEncoder, comparator);
  SchemaMetrics.configureGlobally(conf);
}
项目:IRIndex    文件:HFile.java   
/**
 * Returns the factory to be used to create {@link HFile} writers
 */
public static final WriterFactory getWriterFactory(Configuration conf,
    CacheConfig cacheConf) {
  SchemaMetrics.configureGlobally(conf);
  int version = getFormatVersion(conf);
  switch (version) {
  case 1:
    return new HFileWriterV1.WriterFactoryV1(conf, cacheConf);
  case 2:
    return new HFileWriterV2.WriterFactoryV2(conf, cacheConf);
  default:
    throw new IllegalArgumentException("Cannot create writer for HFile " +
        "format version " + version);
  }
}
项目:IRIndex    文件:LruBlockCache.java   
/**
 * Helper function that updates the local size counter and also updates any
 * per-cf or per-blocktype metrics it can discern from given
 * {@link CachedBlock}
 *
 * @param cb
 * @param evict
 */
protected long updateSizeMetrics(CachedBlock cb, boolean evict) {
  long heapsize = cb.heapSize();
  if (evict) {
    heapsize *= -1;
  }
  Cacheable cachedBlock = cb.getBuffer();
  SchemaMetrics schemaMetrics = cachedBlock.getSchemaMetrics();
  if (schemaMetrics != null) {
    schemaMetrics.updateOnCachePutOrEvict(
        cachedBlock.getBlockType().getCategory(), heapsize, evict);
  }
  return size.addAndGet(heapsize);
}
项目:IRIndex    文件:TestHRegion.java   
/**
 * Testcase to check state of region initialization task set to ABORTED or not if any exceptions
 * during initialization
 * 
 * @throws Exception
 */
@Test
public void testStatusSettingToAbortIfAnyExceptionDuringRegionInitilization() throws Exception {
  HRegionInfo info = null;
  try {
    FileSystem fs = Mockito.mock(FileSystem.class);
    Mockito.when(fs.exists((Path) Mockito.anyObject())).thenThrow(new IOException());
    HTableDescriptor htd = new HTableDescriptor(tableName);
    htd.addFamily(new HColumnDescriptor("cf"));
    info = new HRegionInfo(htd.getName(), HConstants.EMPTY_BYTE_ARRAY,
        HConstants.EMPTY_BYTE_ARRAY, false);
    Path path = new Path(DIR + "testStatusSettingToAbortIfAnyExceptionDuringRegionInitilization");
    // no where we are instantiating HStore in this test case so useTableNameGlobally is null. To
    // avoid NullPointerException we are setting useTableNameGlobally to false.
    SchemaMetrics.setUseTableNameInTest(false);
    region = HRegion.newHRegion(path, null, fs, conf, info, htd, null);
    // region initialization throws IOException and set task state to ABORTED.
    region.initialize();
    fail("Region initialization should fail due to IOException");
  } catch (IOException io) {
    List<MonitoredTask> tasks = TaskMonitor.get().getTasks();
    for (MonitoredTask monitoredTask : tasks) {
      if (!(monitoredTask instanceof MonitoredRPCHandler)
          && monitoredTask.getDescription().contains(region.toString())) {
        assertTrue("Region state should be ABORTED.",
            monitoredTask.getState().equals(MonitoredTask.State.ABORTED));
        break;
      }
    }
  } finally {
    HRegion.closeHRegion(region);
  }
}
项目:IRIndex    文件:TestRegionServerMetrics.java   
private void assertStoreMetricEquals(long expected,
    SchemaMetrics schemaMetrics, StoreMetricType storeMetricType) {
  final String storeMetricName =
      schemaMetrics.getStoreMetricName(storeMetricType);
  Long startValue = startingMetrics.get(storeMetricName);
  assertEquals("Invalid value for store metric " + storeMetricName
      + " (type " + storeMetricType + ")", expected,
      RegionMetricsStorage.getNumericMetric(storeMetricName)
          - (startValue != null ? startValue : 0));
}
项目:IRIndex    文件:TestRegionServerMetrics.java   
@Test
public void testMultipleRegions() throws IOException, InterruptedException {

  TEST_UTIL.createRandomTable(
      TABLE_NAME,
      Arrays.asList(FAMILIES),
      MAX_VERSIONS, NUM_COLS_PER_ROW, NUM_FLUSHES, NUM_REGIONS, 1000);

  final HRegionServer rs =
      TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);

  assertEquals(NUM_REGIONS + META_AND_ROOT, rs.getOnlineRegions().size());

  rs.doMetrics();
  for (HRegion r : TEST_UTIL.getMiniHBaseCluster().getRegions(
      Bytes.toBytes(TABLE_NAME))) {
    for (Map.Entry<byte[], Store> storeEntry : r.getStores().entrySet()) {
      LOG.info("For region " + r.getRegionNameAsString() + ", CF " +
          Bytes.toStringBinary(storeEntry.getKey()) + " found store files " +
          ": " + storeEntry.getValue().getStorefiles());
    }
  }

  assertStoreMetricEquals(NUM_FLUSHES * NUM_REGIONS * FAMILIES.length
      + META_AND_ROOT, ALL_METRICS, StoreMetricType.STORE_FILE_COUNT);

  for (String cf : FAMILIES) {
    SchemaMetrics schemaMetrics = SchemaMetrics.getInstance(TABLE_NAME, cf);
    assertStoreMetricEquals(NUM_FLUSHES * NUM_REGIONS, schemaMetrics,
        StoreMetricType.STORE_FILE_COUNT);
  }

  // ensure that the max value is also maintained
  final String storeMetricName = ALL_METRICS
      .getStoreMetricNameMax(StoreMetricType.STORE_FILE_COUNT);
  assertEquals("Invalid value for store metric " + storeMetricName,
      NUM_FLUSHES, RegionMetricsStorage.getNumericMetric(storeMetricName));
}
项目:IRIndex    文件:TestBlocksScanned.java   
private void _testBlocksScanned(HTableDescriptor table) throws Exception {
  HRegion r = createNewHRegion(table, START_KEY, END_KEY,
      TEST_UTIL.getConfiguration());
  addContent(r, FAMILY, COL);
  r.flushcache();

  // Get the per-cf metrics
  SchemaMetrics schemaMetrics =
    SchemaMetrics.getInstance(Bytes.toString(table.getName()), Bytes.toString(FAMILY));
  Map<String, Long> schemaMetricSnapshot = SchemaMetrics.getMetricsSnapshot();

  // Do simple test of getting one row only first.
  Scan scan = new Scan(Bytes.toBytes("aaa"), Bytes.toBytes("aaz"));
  scan.addColumn(FAMILY, COL);
  scan.setMaxVersions(1);

  InternalScanner s = r.getScanner(scan);
  List<KeyValue> results = new ArrayList<KeyValue>();
  while (s.next(results));
  s.close();

  int expectResultSize = 'z' - 'a';
  Assert.assertEquals(expectResultSize, results.size());

  int kvPerBlock = (int) Math.ceil(BLOCK_SIZE / (double) results.get(0).getLength());
  Assert.assertEquals(2, kvPerBlock);

  long expectDataBlockRead = (long) Math.ceil(expectResultSize / (double) kvPerBlock);
  long expectIndexBlockRead = expectDataBlockRead;

  verifyDataAndIndexBlockRead(schemaMetricSnapshot, schemaMetrics,
      expectDataBlockRead, expectIndexBlockRead);
}
项目:IRIndex    文件:TestBlocksScanned.java   
private void verifyDataAndIndexBlockRead(Map<String, Long> previousMetricSnapshot,
    SchemaMetrics schemaMetrics, long expectDataBlockRead, long expectedIndexBlockRead){
  Map<String, Long> currentMetricsSnapshot = SchemaMetrics.getMetricsSnapshot();
  Map<String, Long> diffs =
    SchemaMetrics.diffMetrics(previousMetricSnapshot, currentMetricsSnapshot);

  long dataBlockRead = SchemaMetrics.getLong(diffs,
      schemaMetrics.getBlockMetricName(BlockCategory.DATA, false, BlockMetricType.READ_COUNT));
  long indexBlockRead = SchemaMetrics.getLong(diffs,
      schemaMetrics.getBlockMetricName(BlockCategory.INDEX, false, BlockMetricType.READ_COUNT));

  Assert.assertEquals(expectDataBlockRead, dataBlockRead);
  Assert.assertEquals(expectedIndexBlockRead, indexBlockRead);
}
项目:IRIndex    文件:TestMemStore.java   
@Override
public void setUp() throws Exception {
  super.setUp();
  this.mvcc = new MultiVersionConsistencyControl();
  this.memstore = new MemStore();
  SchemaMetrics.setUseTableNameInTest(false);
}
项目:IRIndex    文件:TestHFileReaderV1.java   
@Before
public void setUp() throws IOException {
  startingMetrics = SchemaMetrics.getMetricsSnapshot();
  conf = TEST_UTIL.getConfiguration();
  fs = FileSystem.get(conf);
  SchemaMetrics.configureGlobally(conf);
}
项目:IRIndex    文件:TestCachedBlockQueue.java   
public CachedBlock(final long heapSize, String name, long accessTime) {
  super(new BlockCacheKey(name, 0),
      new Cacheable() {
        @Override
        public long heapSize() {
          return ((int)(heapSize - CachedBlock.PER_BLOCK_OVERHEAD));
        }

        @Override
        public int getSerializedLength() {
          return 0;
        }

        @Override
        public void serialize(ByteBuffer destination) {
        }

        @Override
        public CacheableDeserializer<Cacheable> getDeserializer() {
          // TODO Auto-generated method stub
          return null;
        }

        @Override
        public BlockType getBlockType() {
          return BlockType.DATA;
        }

        @Override
        public SchemaMetrics getSchemaMetrics() {
          return SchemaMetrics.ALL_SCHEMA_METRICS;
        }
      }, accessTime, false);
}
项目:HBase-Research    文件:StoreScanner.java   
/**
 * Method used internally to initialize metric names throughout the
 * constructors.
 *
 * To be called after the store variable has been initialized!
 */
private void initializeMetricNames() {
  String tableName = SchemaMetrics.UNKNOWN;
  String family = SchemaMetrics.UNKNOWN;
  if (store != null) {
    tableName = store.getTableName();
    family = Bytes.toString(store.getFamily().getName());
  }
  this.metricNamePrefix =
      SchemaMetrics.generateSchemaMetricsPrefix(tableName, family);
}
项目:HBase-Research    文件:HFileWriterV1.java   
/** Constructor that takes a path, creates and closes the output stream. */
public HFileWriterV1(Configuration conf, CacheConfig cacheConf,
    FileSystem fs, Path path, FSDataOutputStream ostream,
    int blockSize, Compression.Algorithm compress,
    HFileDataBlockEncoder blockEncoder,
    final KeyComparator comparator) throws IOException {
  super(cacheConf, ostream == null ? createOutputStream(conf, fs, path) : ostream, path,
      blockSize, compress, blockEncoder, comparator);
  SchemaMetrics.configureGlobally(conf);
}
项目:HBase-Research    文件:HFile.java   
/**
 * Returns the factory to be used to create {@link HFile} writers
 */
public static final WriterFactory getWriterFactory(Configuration conf,
    CacheConfig cacheConf) {
  SchemaMetrics.configureGlobally(conf);
  int version = getFormatVersion(conf);
  switch (version) {
  case 1:
    return new HFileWriterV1.WriterFactoryV1(conf, cacheConf);
  case 2:
    return new HFileWriterV2.WriterFactoryV2(conf, cacheConf);
  default:
    throw new IllegalArgumentException("Cannot create writer for HFile " +
        "format version " + version);
  }
}
项目:HBase-Research    文件:LruBlockCache.java   
/**
 * Helper function that updates the local size counter and also updates any
 * per-cf or per-blocktype metrics it can discern from given
 * {@link CachedBlock}
 *
 * @param cb
 * @param evict
 */
protected long updateSizeMetrics(CachedBlock cb, boolean evict) {
  long heapsize = cb.heapSize();
  if (evict) {
    heapsize *= -1;
  }
  Cacheable cachedBlock = cb.getBuffer();
  SchemaMetrics schemaMetrics = cachedBlock.getSchemaMetrics();
  if (schemaMetrics != null) {
    schemaMetrics.updateOnCachePutOrEvict(
        cachedBlock.getBlockType().getCategory(), heapsize, evict);
  }
  return size.addAndGet(heapsize);
}
项目:hindex    文件:TestRegionServerMetrics.java   
@Test
public void testMultipleRegions() throws IOException, InterruptedException {

  TEST_UTIL.createRandomTable(
      TABLE_NAME,
      Arrays.asList(FAMILIES),
      MAX_VERSIONS, NUM_COLS_PER_ROW, NUM_FLUSHES, NUM_REGIONS, 1000);

  final HRegionServer rs =
      TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);

  assertEquals(NUM_REGIONS + META_AND_ROOT, rs.getOnlineRegions().size());

  rs.doMetrics();
  for (HRegion r : TEST_UTIL.getMiniHBaseCluster().getRegions(
      Bytes.toBytes(TABLE_NAME))) {
    for (Map.Entry<byte[], Store> storeEntry : r.getStores().entrySet()) {
      LOG.info("For region " + r.getRegionNameAsString() + ", CF " +
          Bytes.toStringBinary(storeEntry.getKey()) + " found store files " +
          ": " + storeEntry.getValue().getStorefiles());
    }
  }

  assertStoreMetricEquals(NUM_FLUSHES * NUM_REGIONS * FAMILIES.length
      + META_AND_ROOT, ALL_METRICS, StoreMetricType.STORE_FILE_COUNT);

  for (String cf : FAMILIES) {
    SchemaMetrics schemaMetrics = SchemaMetrics.getInstance(TABLE_NAME, cf);
    assertStoreMetricEquals(NUM_FLUSHES * NUM_REGIONS, schemaMetrics,
        StoreMetricType.STORE_FILE_COUNT);
  }

  // ensure that the max value is also maintained
  final String storeMetricName = ALL_METRICS
      .getStoreMetricNameMax(StoreMetricType.STORE_FILE_COUNT);
  assertEquals("Invalid value for store metric " + storeMetricName,
      NUM_FLUSHES, RegionMetricsStorage.getNumericMetric(storeMetricName));
}