@Override protected void config() { super.config(); // Set up policy conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, "org.apache.hadoop.hbase.regionserver.DateTieredStoreEngine"); conf.setLong(CompactionConfiguration.DATE_TIERED_MAX_AGE_MILLIS_KEY, 100); conf.setLong(CompactionConfiguration.DATE_TIERED_INCOMING_WINDOW_MIN_KEY, 3); conf.setLong(ExponentialCompactionWindowFactory.BASE_WINDOW_MILLIS_KEY, 6); conf.setInt(ExponentialCompactionWindowFactory.WINDOWS_PER_TIER_KEY, 4); conf.setBoolean(CompactionConfiguration.DATE_TIERED_SINGLE_OUTPUT_FOR_MINOR_COMPACTION_KEY, false); // Special settings for compaction policy per window this.conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 2); this.conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 12); this.conf.setFloat(CompactionConfiguration.HBASE_HSTORE_COMPACTION_RATIO_KEY, 1.2F); conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 20); conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 5); }
@Override protected void config() { super.config(); // Set up policy conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, "org.apache.hadoop.hbase.regionserver.DateTieredStoreEngine"); conf.setLong(CompactionConfiguration.DATE_TIERED_MAX_AGE_MILLIS_KEY, 100); conf.setLong(CompactionConfiguration.DATE_TIERED_INCOMING_WINDOW_MIN_KEY, 3); conf.setLong(ExponentialCompactionWindowFactory.BASE_WINDOW_MILLIS_KEY, Long.MAX_VALUE / 2); conf.setInt(ExponentialCompactionWindowFactory.WINDOWS_PER_TIER_KEY, 2); conf.setBoolean(CompactionConfiguration.DATE_TIERED_SINGLE_OUTPUT_FOR_MINOR_COMPACTION_KEY, false); // Special settings for compaction policy per window this.conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 2); this.conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 12); this.conf.setFloat(CompactionConfiguration.HBASE_HSTORE_COMPACTION_RATIO_KEY, 1.2F); conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 20); conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 10); }
private long testCompactionWithoutThroughputLimit() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, DefaultStoreEngine.class.getName()); conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 100); conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 200); conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10000); conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY, NoLimitThroughputController.class.getName()); TEST_UTIL.startMiniCluster(1); try { HStore store = prepareData(); assertEquals(10, store.getStorefilesCount()); long startTime = System.currentTimeMillis(); TEST_UTIL.getAdmin().majorCompact(tableName); while (store.getStorefilesCount() != 1) { Thread.sleep(20); } return System.currentTimeMillis() - startTime; } finally { TEST_UTIL.shutdownMiniCluster(); } }
public StripeStoreConfig(Configuration config, StoreConfigInformation sci) { this.level0CompactMinFiles = config.getInt(MIN_FILES_L0_KEY, 4); this.flushIntoL0 = config.getBoolean(FLUSH_TO_L0_KEY, false); int minMinFiles = flushIntoL0 ? 3 : 4; // make sure not to compact tiny files too often. int minFiles = config.getInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, -1); this.stripeCompactMinFiles = config.getInt(MIN_FILES_KEY, Math.max(minMinFiles, minFiles)); this.stripeCompactMaxFiles = config.getInt(MAX_FILES_KEY, config.getInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 10)); this.maxRegionSplitImbalance = getFloat(config, MAX_REGION_SPLIT_IMBALANCE_KEY, 1.5f, true); float splitPartCount = getFloat(config, SPLIT_PARTS_KEY, 2f, true); if (Math.abs(splitPartCount - 1.0) < EPSILON) { LOG.error("Split part count cannot be 1 (" + splitPartCount + "), using the default"); splitPartCount = 2f; } this.splitPartCount = splitPartCount; // Arbitrary default split size - 4 times the size of one L0 compaction. // If we flush into L0 there's no split compaction, but for default value it is ok. double flushSize = sci.getMemstoreFlushSize(); if (flushSize == 0) { flushSize = 128 * 1024 * 1024; } long defaultSplitSize = (long)(flushSize * getLevel0MinFiles() * 4 * splitPartCount); this.sizeToSplitAt = config.getLong(SIZE_TO_SPLIT_KEY, defaultSplitSize); int initialCount = config.getInt(INITIAL_STRIPE_COUNT_KEY, 1); if (initialCount == 0) { LOG.error("Initial stripe count is 0, using the default"); initialCount = 1; } this.initialCount = initialCount; this.splitPartSize = (long)(this.sizeToSplitAt / this.splitPartCount); }
public DefaultStoreFileManager(KVComparator kvComparator, Configuration conf, CompactionConfiguration comConf) { this.kvComparator = kvComparator; this.comConf = comConf; this.blockingFileCount = conf.getInt(HStore.BLOCKING_STOREFILES_KEY, HStore.DEFAULT_BLOCKING_STOREFILE_COUNT); }
public StripeStoreConfig(Configuration config, StoreConfigInformation sci) { this.level0CompactMinFiles = config.getInt(MIN_FILES_L0_KEY, 4); this.flushIntoL0 = config.getBoolean(FLUSH_TO_L0_KEY, false); int minMinFiles = flushIntoL0 ? 3 : 4; // make sure not to compact tiny files too often. int minFiles = config.getInt(CompactionConfiguration.MIN_KEY, -1); this.stripeCompactMinFiles = config.getInt(MIN_FILES_KEY, Math.max(minMinFiles, minFiles)); this.stripeCompactMaxFiles = config.getInt(MAX_FILES_KEY, config.getInt(CompactionConfiguration.MAX_KEY, 10)); this.maxRegionSplitImbalance = getFloat(config, MAX_REGION_SPLIT_IMBALANCE_KEY, 1.5f, true); float splitPartCount = getFloat(config, SPLIT_PARTS_KEY, 2f, true); if (Math.abs(splitPartCount - 1.0) < EPSILON) { LOG.error("Split part count cannot be 1 (" + splitPartCount + "), using the default"); splitPartCount = 2f; } this.splitPartCount = splitPartCount; // Arbitrary default split size - 4 times the size of one L0 compaction. // If we flush into L0 there's no split compaction, but for default value it is ok. double flushSize = sci.getMemstoreFlushSize(); if (flushSize == 0) { flushSize = 128 * 1024 * 1024; } long defaultSplitSize = (long)(flushSize * getLevel0MinFiles() * 4 * splitPartCount); this.sizeToSplitAt = config.getLong(SIZE_TO_SPLIT_KEY, defaultSplitSize); int initialCount = config.getInt(INITIAL_STRIPE_COUNT_KEY, 1); if (initialCount == 0) { LOG.error("Initial stripe count is 0, using the default"); initialCount = 1; } this.initialCount = initialCount; this.splitPartSize = (long)(this.sizeToSplitAt / this.splitPartCount); }
public StripeStoreConfig(Configuration config, StoreConfigInformation sci) { this.level0CompactMinFiles = config.getInt(MIN_FILES_L0_KEY, 4); this.flushIntoL0 = config.getBoolean(FLUSH_TO_L0_KEY, false); int minMinFiles = flushIntoL0 ? 3 : 4; // make sure not to compact tiny files too often. int minFiles = config.getInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, -1); this.stripeCompactMinFiles = config.getInt(MIN_FILES_KEY, Math.max(minMinFiles, minFiles)); this.stripeCompactMaxFiles = config.getInt(MAX_FILES_KEY, config.getInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 10)); this.maxRegionSplitImbalance = getFloat(config, MAX_REGION_SPLIT_IMBALANCE_KEY, 1.5f, true); float splitPartCount = getFloat(config, SPLIT_PARTS_KEY, 2f, true); if (Math.abs(splitPartCount - 1.0) < EPSILON) { LOG.error("Split part count cannot be 1 (" + splitPartCount + "), using the default"); splitPartCount = 2f; } this.splitPartCount = splitPartCount; // Arbitrary default split size - 4 times the size of one L0 compaction. // If we flush into L0 there's no split compaction, but for default value it is ok. double flushSize = sci.getMemStoreFlushSize(); if (flushSize == 0) { flushSize = 128 * 1024 * 1024; } long defaultSplitSize = (long)(flushSize * getLevel0MinFiles() * 4 * splitPartCount); this.sizeToSplitAt = config.getLong(SIZE_TO_SPLIT_KEY, defaultSplitSize); int initialCount = config.getInt(INITIAL_STRIPE_COUNT_KEY, 1); if (initialCount == 0) { LOG.error("Initial stripe count is 0, using the default"); initialCount = 1; } this.initialCount = initialCount; this.splitPartSize = (long)(this.sizeToSplitAt / this.splitPartCount); }
public DefaultStoreFileManager(CellComparator cellComparator, Comparator<HStoreFile> storeFileComparator, Configuration conf, CompactionConfiguration comConf) { this.cellComparator = cellComparator; this.storeFileComparator = storeFileComparator; this.comConf = comConf; this.blockingFileCount = conf.getInt(HStore.BLOCKING_STOREFILES_KEY, HStore.DEFAULT_BLOCKING_STOREFILE_COUNT); }
/** * setup config values necessary for store */ protected void config() { this.conf = TEST_UTIL.getConfiguration(); this.conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 0); this.conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, minFiles); this.conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, maxFiles); this.conf.setLong(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_SIZE_KEY, minSize); this.conf.setLong(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_SIZE_KEY, maxSize); this.conf.setFloat(CompactionConfiguration.HBASE_HSTORE_COMPACTION_RATIO_KEY, 1.0F); }
private long testCompactionWithThroughputLimit() throws Exception { long throughputLimit = 1024L * 1024; Configuration conf = TEST_UTIL.getConfiguration(); conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, DefaultStoreEngine.class.getName()); conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 100); conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 200); conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10000); conf.setLong( PressureAwareCompactionThroughputController .HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND, throughputLimit); conf.setLong( PressureAwareCompactionThroughputController .HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND, throughputLimit); conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY, PressureAwareCompactionThroughputController.class.getName()); TEST_UTIL.startMiniCluster(1); try { HStore store = prepareData(); assertEquals(10, store.getStorefilesCount()); long startTime = System.currentTimeMillis(); TEST_UTIL.getAdmin().majorCompact(tableName); while (store.getStorefilesCount() != 1) { Thread.sleep(20); } long duration = System.currentTimeMillis() - startTime; double throughput = (double) store.getStorefilesSize() / duration * 1000; // confirm that the speed limit work properly(not too fast, and also not too slow) // 20% is the max acceptable error rate. assertTrue(throughput < throughputLimit * 1.2); assertTrue(throughput > throughputLimit * 0.8); return System.currentTimeMillis() - startTime; } finally { TEST_UTIL.shutdownMiniCluster(); } }
@BeforeClass public static void setUpBeforeClass() throws Exception { UTIL.getConfiguration().setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 2); UTIL.startMiniCluster(3); }