Java 类org.apache.hadoop.hbase.regionserver.compactions.CompactionPolicy 实例源码

项目:ditb    文件:StoreEngine.java   
/**
 * @return Compaction policy to use.
 */
public CompactionPolicy getCompactionPolicy() {
  return this.compactionPolicy;
}
项目:pbase    文件:StoreEngine.java   
/**
 * @return Compaction policy to use.
 */
public CompactionPolicy getCompactionPolicy() {
  return this.compactionPolicy;
}
项目:HIndex    文件:StoreEngine.java   
/**
 * @return Compaction policy to use.
 */
public CompactionPolicy getCompactionPolicy() {
  return this.compactionPolicy;
}
项目:hbase    文件:StoreEngine.java   
/**
 * @return Compaction policy to use.
 */
public CompactionPolicy getCompactionPolicy() {
  return this.compactionPolicy;
}
项目:PyroDB    文件:StoreEngine.java   
/**
 * @return Compaction policy to use.
 */
public CompactionPolicy getCompactionPolicy() {
  return this.compactionPolicy;
}
项目:c5    文件:StoreEngine.java   
/**
 * @return Compaction policy to use.
 */
public CompactionPolicy getCompactionPolicy() {
  return this.compactionPolicy;
}
项目:DominoHBase    文件:HStore.java   
/**
 * Constructor
 * @param basedir qualified path under which the region directory lives;
 * generally the table subdirectory
 * @param region
 * @param family HColumnDescriptor for this column
 * @param fs file system object
 * @param confParam configuration object
 * failed.  Can be null.
 * @throws IOException
 */
protected HStore(Path basedir, HRegion region, HColumnDescriptor family,
    FileSystem fs, Configuration confParam)
throws IOException {

  HRegionInfo info = region.getRegionInfo();
  this.fs = fs;
  // Assemble the store's home directory.
  Path p = getStoreHomedir(basedir, info.getEncodedName(), family.getName());
  // Ensure it exists.
  this.homedir = createStoreHomeDir(this.fs, p);
  this.region = region;
  this.family = family;
  // 'conf' renamed to 'confParam' b/c we use this.conf in the constructor
  this.conf = new CompoundConfiguration()
    .add(confParam)
    .add(family.getValues());
  this.blocksize = family.getBlocksize();

  this.dataBlockEncoder =
      new HFileDataBlockEncoderImpl(family.getDataBlockEncodingOnDisk(),
          family.getDataBlockEncoding());

  this.comparator = info.getComparator();
  // Get TTL
  this.ttl = determineTTLFromFamily(family);
  // used by ScanQueryMatcher
  long timeToPurgeDeletes =
      Math.max(conf.getLong("hbase.hstore.time.to.purge.deletes", 0), 0);
  LOG.trace("Time to purge deletes set to " + timeToPurgeDeletes +
      "ms in store " + this);
  // Why not just pass a HColumnDescriptor in here altogether?  Even if have
  // to clone it?
  scanInfo = new ScanInfo(family, ttl, timeToPurgeDeletes, this.comparator);
  this.memstore = new MemStore(conf, this.comparator);

  // Setting up cache configuration for this family
  this.cacheConf = new CacheConfig(conf, family);
  this.blockingStoreFileCount =
    conf.getInt("hbase.hstore.blockingStoreFiles", 7);

  this.verifyBulkLoads = conf.getBoolean("hbase.hstore.bulkload.verify", false);

  if (HStore.closeCheckInterval == 0) {
    HStore.closeCheckInterval = conf.getInt(
        "hbase.hstore.close.check.interval", 10*1000*1000 /* 10 MB */);
  }
  this.storefiles = sortAndClone(loadStoreFiles());

  // Initialize checksum type from name. The names are CRC32, CRC32C, etc.
  this.checksumType = getChecksumType(conf);
  // initilize bytes per checksum
  this.bytesPerChecksum = getBytesPerChecksum(conf);
  // Create a compaction tool instance
  this.compactor = new Compactor(conf);
  // Create a compaction manager.
  this.compactionPolicy = new CompactionPolicy(conf, this);
}