Java 类org.apache.hadoop.hbase.regionserver.compactions.OffPeakHours 实例源码

项目:hbase    文件:PressureAwareFlushThroughputController.java   
@Override
public void setConf(Configuration conf) {
  super.setConf(conf);
  if (conf == null) {
    return;
  }
  this.maxThroughputUpperBound =
      conf.getLong(HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_UPPER_BOUND,
        DEFAULT_HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_UPPER_BOUND);
  this.maxThroughputLowerBound =
      conf.getLong(HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_LOWER_BOUND,
        DEFAULT_HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_LOWER_BOUND);
  this.offPeakHours = OffPeakHours.getInstance(conf);
  this.controlPerSize =
      conf.getLong(HBASE_HSTORE_FLUSH_THROUGHPUT_CONTROL_CHECK_INTERVAL,
        DEFAULT_HBASE_HSTORE_FLUSH_THROUGHPUT_CONTROL_CHECK_INTERVAL);
  this.setMaxThroughput(this.maxThroughputLowerBound);
  this.tuningPeriod =
      getConf().getInt(HBASE_HSTORE_FLUSH_THROUGHPUT_TUNE_PERIOD,
        DEFAULT_HSTORE_FLUSH_THROUGHPUT_TUNE_PERIOD);
  LOG.info("Flush throughput configurations, upper bound: "
      + throughputDesc(maxThroughputUpperBound) + ", lower bound "
      + throughputDesc(maxThroughputLowerBound) + ", tuning period: " + tuningPeriod + " ms");
}
项目:pbase    文件:HStore.java   
/**
 * {@inheritDoc}
 */
@Override
public void onConfigurationChange(Configuration conf) {
    this.conf = new CompoundConfiguration()
            .add(conf)
            .addWritableMap(family.getValues());
    this.storeEngine.compactionPolicy.setConf(conf);
    this.offPeakHours = OffPeakHours.getInstance(conf);
}
项目:hbase    文件:PressureAwareCompactionThroughputController.java   
@Override
public void setConf(Configuration conf) {
  super.setConf(conf);
  if (conf == null) {
    return;
  }
  this.maxThroughputUpperBound =
      conf.getLong(HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND,
        DEFAULT_HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND);
  this.maxThroughputLowerBound =
      conf.getLong(HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND,
        DEFAULT_HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND);
  this.maxThroughputOffpeak =
      conf.getLong(HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_OFFPEAK,
        DEFAULT_HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_OFFPEAK);
  this.offPeakHours = OffPeakHours.getInstance(conf);
  this.controlPerSize =
      conf.getLong(HBASE_HSTORE_COMPACTION_THROUGHPUT_CONTROL_CHECK_INTERVAL,
        this.maxThroughputLowerBound);
  this.setMaxThroughput(this.maxThroughputLowerBound);
  this.tuningPeriod =
      getConf().getInt(HBASE_HSTORE_COMPACTION_THROUGHPUT_TUNE_PERIOD,
        DEFAULT_HSTORE_COMPACTION_THROUGHPUT_TUNE_PERIOD);
  LOG.info("Compaction throughput configurations, higher bound: "
      + throughputDesc(maxThroughputUpperBound) + ", lower bound "
      + throughputDesc(maxThroughputLowerBound) + ", off peak: "
      + throughputDesc(maxThroughputOffpeak) + ", tuning period: " + tuningPeriod + " ms");
}
项目:hbase    文件:HStore.java   
/**
 * {@inheritDoc}
 */
@Override
public void onConfigurationChange(Configuration conf) {
  this.conf = new CompoundConfiguration()
          .add(conf)
          .addBytesMap(family.getValues());
  this.storeEngine.compactionPolicy.setConf(conf);
  this.offPeakHours = OffPeakHours.getInstance(conf);
}
项目:ditb    文件:HStore.java   
protected OffPeakHours getOffPeakHours() {
  return this.offPeakHours;
}
项目:ditb    文件:HStore.java   
/**
 * {@inheritDoc}
 */
@Override public void onConfigurationChange(Configuration conf) {
  this.conf = new CompoundConfiguration().add(conf).addWritableMap(family.getValues());
  this.storeEngine.compactionPolicy.setConf(conf);
  this.offPeakHours = OffPeakHours.getInstance(conf);
}
项目:pbase    文件:HStore.java   
protected OffPeakHours getOffPeakHours() {
    return this.offPeakHours;
}
项目:hbase    文件:HStore.java   
protected OffPeakHours getOffPeakHours() {
  return this.offPeakHours;
}
项目:c5    文件:HStore.java   
/**
 * Constructor
 * @param region
 * @param family HColumnDescriptor for this column
 * @param confParam configuration object
 * failed.  Can be null.
 * @throws IOException
 */
protected HStore(final HRegion region, final HColumnDescriptor family,
    final Configuration confParam) throws IOException {

  HRegionInfo info = region.getRegionInfo();
  this.fs = region.getRegionFileSystem();

  // Assemble the store's home directory and Ensure it exists.
  fs.createStoreDir(family.getNameAsString());
  this.region = region;
  this.family = family;
  // 'conf' renamed to 'confParam' b/c we use this.conf in the constructor
  // CompoundConfiguration will look for keys in reverse order of addition, so we'd
  // add global config first, then table and cf overrides, then cf metadata.
  this.conf = new CompoundConfiguration()
    .add(confParam)
    .addStringMap(region.getTableDesc().getConfiguration())
    .addStringMap(family.getConfiguration())
    .addWritableMap(family.getValues());
  this.blocksize = family.getBlocksize();

  this.dataBlockEncoder =
      new HFileDataBlockEncoderImpl(family.getDataBlockEncoding());

  this.comparator = info.getComparator();
  // used by ScanQueryMatcher
  long timeToPurgeDeletes =
      Math.max(conf.getLong("hbase.hstore.time.to.purge.deletes", 0), 0);
  LOG.trace("Time to purge deletes set to " + timeToPurgeDeletes +
      "ms in store " + this);
  // Get TTL
  long ttl = determineTTLFromFamily(family);
  // Why not just pass a HColumnDescriptor in here altogether?  Even if have
  // to clone it?
  scanInfo = new ScanInfo(family, ttl, timeToPurgeDeletes, this.comparator);
  this.memstore = new MemStore(conf, this.comparator);
  this.offPeakHours = OffPeakHours.getInstance(conf);

  // Setting up cache configuration for this family
  this.cacheConf = new CacheConfig(conf, family);

  this.verifyBulkLoads = conf.getBoolean("hbase.hstore.bulkload.verify", false);

  this.blockingFileCount =
      conf.getInt(BLOCKING_STOREFILES_KEY, DEFAULT_BLOCKING_STOREFILE_COUNT);
  this.compactionCheckMultiplier = conf.getInt(
      COMPACTCHECKER_INTERVAL_MULTIPLIER_KEY, DEFAULT_COMPACTCHECKER_INTERVAL_MULTIPLIER);
  if (this.compactionCheckMultiplier <= 0) {
    LOG.error("Compaction check period multiplier must be positive, setting default: "
        + DEFAULT_COMPACTCHECKER_INTERVAL_MULTIPLIER);
    this.compactionCheckMultiplier = DEFAULT_COMPACTCHECKER_INTERVAL_MULTIPLIER;
  }

  if (HStore.closeCheckInterval == 0) {
    HStore.closeCheckInterval = conf.getInt(
        "hbase.hstore.close.check.interval", 10*1000*1000 /* 10 MB */);
  }

  this.storeEngine = StoreEngine.create(this, this.conf, this.comparator);
  this.storeEngine.getStoreFileManager().loadFiles(loadStoreFiles());

  // Initialize checksum type from name. The names are CRC32, CRC32C, etc.
  this.checksumType = getChecksumType(conf);
  // initilize bytes per checksum
  this.bytesPerChecksum = getBytesPerChecksum(conf);
  flushRetriesNumber = conf.getInt(
      "hbase.hstore.flush.retries.number", DEFAULT_FLUSH_RETRIES_NUMBER);
  pauseTime = conf.getInt(HConstants.HBASE_SERVER_PAUSE, HConstants.DEFAULT_HBASE_SERVER_PAUSE);
  if (flushRetriesNumber <= 0) {
    throw new IllegalArgumentException(
        "hbase.hstore.flush.retries.number must be > 0, not "
            + flushRetriesNumber);
  }
}