Java 类org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper 实例源码

项目:ditb    文件:TestHRegionReplayEvents.java   
@After
public void tearDown() throws Exception {
  if (reader != null) {
    reader.close();
  }

  if (primaryRegion != null) {
    HRegion.closeHRegion(primaryRegion);
  }
  if (secondaryRegion != null) {
    HRegion.closeHRegion(secondaryRegion);
  }

  EnvironmentEdgeManagerTestHelper.reset();
  LOG.info("Cleaning test directory: " + TEST_UTIL.getDataTestDir());
  TEST_UTIL.cleanupTestDir();
}
项目:LCIndex-HBase-0.94.16    文件:TestHRegion.java   
public void testIncrementColumnValue_heapSize() throws IOException {
  EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());

  this.region = initHRegion(tableName, getName(), conf, fam1);
  try {
    long byAmount = 1L;
    long size;

    for( int i = 0; i < 1000 ; i++) {
      region.incrementColumnValue(row, fam1, qual1, byAmount, true);

      size = region.memstoreSize.get();
      assertTrue("memstore size: " + size, size >= 0);
    }
  } finally {
    HRegion.closeHRegion(this.region);
    this.region = null;
  }
}
项目:IRIndex    文件:TestHRegion.java   
public void testIncrementColumnValue_heapSize() throws IOException {
  EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());

  this.region = initHRegion(tableName, getName(), conf, fam1);
  try {
    long byAmount = 1L;
    long size;

    for( int i = 0; i < 1000 ; i++) {
      region.incrementColumnValue(row, fam1, qual1, byAmount, true);

      size = region.memstoreSize.get();
      assertTrue("memstore size: " + size, size >= 0);
    }
  } finally {
    HRegion.closeHRegion(this.region);
    this.region = null;
  }
}
项目:hbase    文件:TestQuotaThrottle.java   
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  TEST_UTIL.getConfiguration().setBoolean(QuotaUtil.QUOTA_CONF_KEY, true);
  TEST_UTIL.getConfiguration().setInt(QuotaCache.REFRESH_CONF_KEY, REFRESH_TIME);
  TEST_UTIL.getConfiguration().setInt("hbase.hstore.compactionThreshold", 10);
  TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
  TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250);
  TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
  TEST_UTIL.getConfiguration().setBoolean("hbase.master.enabletable.roundrobin", true);
  TEST_UTIL.startMiniCluster(1);
  TEST_UTIL.waitTableAvailable(QuotaTableUtil.QUOTA_TABLE_NAME);
  QuotaCache.TEST_FORCE_REFRESH = true;

  tables = new Table[TABLE_NAMES.length];
  for (int i = 0; i < TABLE_NAMES.length; ++i) {
    tables[i] = TEST_UTIL.createTable(TABLE_NAMES[i], FAMILY);
  }

  envEdge = new ManualEnvironmentEdge();
  envEdge.setValue(EnvironmentEdgeManager.currentTime());
  EnvironmentEdgeManagerTestHelper.injectEdge(envEdge);
}
项目:hbase    文件:TestHRegionReplayEvents.java   
@After
public void tearDown() throws Exception {
  if (reader != null) {
    reader.close();
  }

  if (primaryRegion != null) {
    HBaseTestingUtility.closeRegionAndWAL(primaryRegion);
  }
  if (secondaryRegion != null) {
    HBaseTestingUtility.closeRegionAndWAL(secondaryRegion);
  }

  EnvironmentEdgeManagerTestHelper.reset();
  LOG.info("Cleaning test directory: " + TEST_UTIL.getDataTestDir());
  TEST_UTIL.cleanupTestDir();
}
项目:HBase-Research    文件:TestHRegion.java   
public void testIncrementColumnValue_heapSize() throws IOException {
  EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());

  this.region = initHRegion(tableName, getName(), conf, fam1);
  try {
    long byAmount = 1L;
    long size;

    for( int i = 0; i < 1000 ; i++) {
      region.incrementColumnValue(row, fam1, qual1, byAmount, true);

      size = region.memstoreSize.get();
      assertTrue("memstore size: " + size, size >= 0);
    }
  } finally {
    HRegion.closeHRegion(this.region);
    this.region = null;
  }
}
项目:hbase-0.94.8-qod    文件:TestHRegion.java   
public void testIncrementColumnValue_heapSize() throws IOException {
  EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());

  this.region = initHRegion(tableName, getName(), conf, fam1);
  try {
    long byAmount = 1L;
    long size;

    for( int i = 0; i < 1000 ; i++) {
      region.incrementColumnValue(row, fam1, qual1, byAmount, true);

      size = region.memstoreSize.get();
      assertTrue("memstore size: " + size, size >= 0);
    }
  } finally {
    HRegion.closeHRegion(this.region);
    this.region = null;
  }
}
项目:hbase-0.94.8-qod    文件:TestHRegion.java   
public void testIncrementColumnValue_heapSize() throws IOException {
  EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());

  this.region = initHRegion(tableName, getName(), conf, fam1);
  try {
    long byAmount = 1L;
    long size;

    for( int i = 0; i < 1000 ; i++) {
      region.incrementColumnValue(row, fam1, qual1, byAmount, true);

      size = region.memstoreSize.get();
      assertTrue("memstore size: " + size, size >= 0);
    }
  } finally {
    HRegion.closeHRegion(this.region);
    this.region = null;
  }
}
项目:hindex    文件:TestHRegion.java   
public void testIncrementColumnValue_heapSize() throws IOException {
  EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());

  this.region = initHRegion(tableName, getName(), conf, fam1);
  try {
    long byAmount = 1L;
    long size;

    for( int i = 0; i < 1000 ; i++) {
      region.incrementColumnValue(row, fam1, qual1, byAmount, true);

      size = region.memstoreSize.get();
      assertTrue("memstore size: " + size, size >= 0);
    }
  } finally {
    HRegion.closeHRegion(this.region);
    this.region = null;
  }
}
项目:ditb    文件:TestThriftServerCmdLine.java   
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  TEST_UTIL.getConfiguration().setBoolean("hbase.table.sanity.checks", false);
  TEST_UTIL.startMiniCluster();
  //ensure that server time increments every time we do an operation, otherwise
  //successive puts having the same timestamp will override each other
  EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
}
项目:ditb    文件:TestThriftHttpServer.java   
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  TEST_UTIL.getConfiguration().setBoolean("hbase.regionserver.thrift.http", true);
  TEST_UTIL.getConfiguration().setBoolean("hbase.table.sanity.checks", false);
  TEST_UTIL.startMiniCluster();
  //ensure that server time increments every time we do an operation, otherwise
  //successive puts having the same timestamp will override each other
  EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
}
项目:ditb    文件:TestSnapshotDescriptionUtils.java   
@After
public void cleanupFS() throws Exception {
  if (fs.exists(root)) {
    if (!fs.delete(root, true)) {
      throw new IOException("Failed to delete root test dir: " + root);
    }
    if (!fs.mkdirs(root)) {
      throw new IOException("Failed to create root test dir: " + root);
    }
  }
  EnvironmentEdgeManagerTestHelper.reset();
}
项目:ditb    文件:TestKeepDeletes.java   
@Before
public void setUp() throws Exception {
  /* HBASE-6832: [WINDOWS] Tests should use explicit timestamp for Puts, and not rely on
   * implicit RS timing.
   * Use an explicit timer (IncrementingEnvironmentEdge) so that the put, delete
   * compact timestamps are tracked. Otherwise, forced major compaction will not purge
   * Delete's having the same timestamp. see ScanQueryMatcher.match():
   * if (retainDeletesInOutput
   *     || (!isUserScan && (EnvironmentEdgeManager.currentTime() - timestamp)
   *     <= timeToPurgeDeletes) ... )
   *
   */
  EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
}
项目:LCIndex-HBase-0.94.16    文件:TestSnapshotDescriptionUtils.java   
@After
public void cleanupFS() throws Exception {
  if (fs.exists(root)) {
    if (!fs.delete(root, true)) {
      throw new IOException("Failed to delete root test dir: " + root);
    }
    if (!fs.mkdirs(root)) {
      throw new IOException("Failed to create root test dir: " + root);
    }
  }
  EnvironmentEdgeManagerTestHelper.reset();
}
项目:LCIndex-HBase-0.94.16    文件:TestHRegion.java   
public void testIncrementColumnValue_BumpSnapshot() throws IOException {
  ManualEnvironmentEdge mee = new ManualEnvironmentEdge();
  EnvironmentEdgeManagerTestHelper.injectEdge(mee);
  this.region = initHRegion(tableName, getName(), conf, fam1);
  try {
    long value = 42L;
    long incr = 44L;

    // first put something in kvset, then snapshot it.
    Put put = new Put(row);
    put.add(fam1, qual1, Bytes.toBytes(value));
    region.put(put);

    // get the store in question:
    Store s = region.getStore(fam1);
    s.snapshot(); //bam

    // now increment:
    long newVal = region.incrementColumnValue(row, fam1, qual1,
        incr, false);

    assertEquals(value+incr, newVal);

    // get both versions:
    Get get = new Get(row);
    get.setMaxVersions();
    get.addColumn(fam1,qual1);

    Result r = region.get(get, null);
    assertEquals(2, r.size());
    KeyValue first = r.raw()[0];
    KeyValue second = r.raw()[1];

    assertTrue("ICV failed to upgrade timestamp",
        first.getTimestamp() != second.getTimestamp());
  } finally {
    HRegion.closeHRegion(this.region);
    this.region = null;
  }
}
项目:pbase    文件:TestSnapshotDescriptionUtils.java   
@After
public void cleanupFS() throws Exception {
  if (fs.exists(root)) {
    if (!fs.delete(root, true)) {
      throw new IOException("Failed to delete root test dir: " + root);
    }
    if (!fs.mkdirs(root)) {
      throw new IOException("Failed to create root test dir: " + root);
    }
  }
  EnvironmentEdgeManagerTestHelper.reset();
}
项目:pbase    文件:TestKeepDeletes.java   
@Before
public void setUp() throws Exception {
  /* HBASE-6832: [WINDOWS] Tests should use explicit timestamp for Puts, and not rely on
   * implicit RS timing.
   * Use an explicit timer (IncrementingEnvironmentEdge) so that the put, delete
   * compact timestamps are tracked. Otherwise, forced major compaction will not purge
   * Delete's having the same timestamp. see ScanQueryMatcher.match():
   * if (retainDeletesInOutput
   *     || (!isUserScan && (EnvironmentEdgeManager.currentTime() - timestamp)
   *     <= timeToPurgeDeletes) ... )
   *
   */
  EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
}
项目:HIndex    文件:TestThriftServerCmdLine.java   
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  TEST_UTIL.startMiniCluster();
  //ensure that server time increments every time we do an operation, otherwise
  //successive puts having the same timestamp will override each other
  EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
}
项目:HIndex    文件:TestSnapshotDescriptionUtils.java   
@After
public void cleanupFS() throws Exception {
  if (fs.exists(root)) {
    if (!fs.delete(root, true)) {
      throw new IOException("Failed to delete root test dir: " + root);
    }
    if (!fs.mkdirs(root)) {
      throw new IOException("Failed to create root test dir: " + root);
    }
  }
  EnvironmentEdgeManagerTestHelper.reset();
}
项目:HIndex    文件:TestKeepDeletes.java   
@Before
public void setUp() throws Exception {
  /* HBASE-6832: [WINDOWS] Tests should use explicit timestamp for Puts, and not rely on
   * implicit RS timing.
   * Use an explicit timer (IncrementingEnvironmentEdge) so that the put, delete
   * compact timestamps are tracked. Otherwise, forced major compaction will not purge
   * Delete's having the same timestamp. see ScanQueryMatcher.match():
   * if (retainDeletesInOutput
   *     || (!isUserScan && (EnvironmentEdgeManager.currentTimeMillis() - timestamp)
   *     <= timeToPurgeDeletes) ... )
   *
   */
  EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
}
项目:IRIndex    文件:TestSnapshotDescriptionUtils.java   
@After
public void cleanupFS() throws Exception {
  if (fs.exists(root)) {
    if (!fs.delete(root, true)) {
      throw new IOException("Failed to delete root test dir: " + root);
    }
    if (!fs.mkdirs(root)) {
      throw new IOException("Failed to create root test dir: " + root);
    }
  }
  EnvironmentEdgeManagerTestHelper.reset();
}
项目:IRIndex    文件:TestHRegion.java   
public void testIncrementColumnValue_BumpSnapshot() throws IOException {
  ManualEnvironmentEdge mee = new ManualEnvironmentEdge();
  EnvironmentEdgeManagerTestHelper.injectEdge(mee);
  this.region = initHRegion(tableName, getName(), conf, fam1);
  try {
    long value = 42L;
    long incr = 44L;

    // first put something in kvset, then snapshot it.
    Put put = new Put(row);
    put.add(fam1, qual1, Bytes.toBytes(value));
    region.put(put);

    // get the store in question:
    Store s = region.getStore(fam1);
    s.snapshot(); //bam

    // now increment:
    long newVal = region.incrementColumnValue(row, fam1, qual1,
        incr, false);

    assertEquals(value+incr, newVal);

    // get both versions:
    Get get = new Get(row);
    get.setMaxVersions();
    get.addColumn(fam1,qual1);

    Result r = region.get(get, null);
    assertEquals(2, r.size());
    KeyValue first = r.raw()[0];
    KeyValue second = r.raw()[1];

    assertTrue("ICV failed to upgrade timestamp",
        first.getTimestamp() != second.getTimestamp());
  } finally {
    HRegion.closeHRegion(this.region);
    this.region = null;
  }
}
项目:hbase    文件:TestThriftServerCmdLine.java   
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  TEST_UTIL.getConfiguration().setBoolean("hbase.table.sanity.checks", false);
  TEST_UTIL.startMiniCluster();
  //ensure that server time increments every time we do an operation, otherwise
  //successive puts having the same timestamp will override each other
  EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
}
项目:hbase    文件:TestThriftHttpServer.java   
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  TEST_UTIL.getConfiguration().setBoolean("hbase.regionserver.thrift.http", true);
  TEST_UTIL.getConfiguration().setBoolean("hbase.table.sanity.checks", false);
  TEST_UTIL.startMiniCluster();
  //ensure that server time increments every time we do an operation, otherwise
  //successive puts having the same timestamp will override each other
  EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
}
项目:hbase    文件:TestRegionObserverBypass.java   
/**
 * Test that when bypass is called, we skip out calling any other coprocessors stacked up method,
 * in this case, a prePut.
 * If the column family is 'test', then bypass is invoked.
 */
@Test
public void testBypassAlsoCompletes() throws IOException {
  //ensure that server time increments every time we do an operation, otherwise
  //previous deletes will eclipse successive puts having the same timestamp
  EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());

  Table t = util.getConnection().getTable(tableName);
  List<Put> puts = new ArrayList<>();
  Put p = new Put(row1);
  p.addColumn(dummy, dummy, dummy);
  puts.add(p);
  p = new Put(row2);
  p.addColumn(test, dummy, dummy);
  puts.add(p);
  p = new Put(row3);
  p.addColumn(test, dummy, dummy);
  puts.add(p);
  t.put(puts);
  // Ensure expected result.
  checkRowAndDelete(t,row1,1);
  checkRowAndDelete(t,row2,0);
  checkRowAndDelete(t,row3,0);
  // We have three Coprocessors stacked up on the prePut. See the beforeClass setup. We did three
  // puts above two of which bypassed. A bypass means do not call the other coprocessors in the
  // stack so for the two 'test' calls in the above, we should not have call through to all all
  // three coprocessors in the chain. So we should have:
  // 3 invocations for first put + 1 invocation + 1 bypass for second put + 1 invocation +
  // 1 bypass for the last put. Assert.
  assertEquals("Total CP invocation count", 5, TestCoprocessor.PREPUT_INVOCATIONS.get());
  assertEquals("Total CP bypasses", 2, TestCoprocessor.PREPUT_BYPASSES.get());
}
项目:hbase    文件:TestSnapshotDescriptionUtils.java   
@After
public void cleanupFS() throws Exception {
  if (fs.exists(root)) {
    if (!fs.delete(root, true)) {
      throw new IOException("Failed to delete root test dir: " + root);
    }
    if (!fs.mkdirs(root)) {
      throw new IOException("Failed to create root test dir: " + root);
    }
  }
  EnvironmentEdgeManagerTestHelper.reset();
}
项目:hbase    文件:TestKeepDeletes.java   
@Before
public void setUp() throws Exception {
  /* HBASE-6832: [WINDOWS] Tests should use explicit timestamp for Puts, and not rely on
   * implicit RS timing.
   * Use an explicit timer (IncrementingEnvironmentEdge) so that the put, delete
   * compact timestamps are tracked. Otherwise, forced major compaction will not purge
   * Delete's having the same timestamp. see ScanQueryMatcher.match():
   * if (retainDeletesInOutput
   *     || (!isUserScan && (EnvironmentEdgeManager.currentTime() - timestamp)
   *     <= timeToPurgeDeletes) ... )
   *
   */
  EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
}
项目:hbase    文件:TestHStore.java   
@After
public void tearDown() throws Exception {
  EnvironmentEdgeManagerTestHelper.reset();
  if (store != null) {
    try {
      store.close();
    } catch (IOException e) {
    }
    store = null;
  }
  if (region != null) {
    region.close();
    region = null;
  }
}
项目:hbase    文件:TestScannerFromBucketCache.java   
@After
public void tearDown() throws Exception {
  EnvironmentEdgeManagerTestHelper.reset();
  LOG.info("Cleaning test directory: " + test_util.getDataTestDir());
  test_util.cleanupTestDir();
  CacheConfig.clearGlobalInstances();
}
项目:PyroDB    文件:TestThriftServerCmdLine.java   
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  TEST_UTIL.getConfiguration().setBoolean("hbase.table.sanity.checks", false);
  TEST_UTIL.startMiniCluster();
  //ensure that server time increments every time we do an operation, otherwise
  //successive puts having the same timestamp will override each other
  EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
}
项目:PyroDB    文件:TestSnapshotDescriptionUtils.java   
@After
public void cleanupFS() throws Exception {
  if (fs.exists(root)) {
    if (!fs.delete(root, true)) {
      throw new IOException("Failed to delete root test dir: " + root);
    }
    if (!fs.mkdirs(root)) {
      throw new IOException("Failed to create root test dir: " + root);
    }
  }
  EnvironmentEdgeManagerTestHelper.reset();
}
项目:PyroDB    文件:TestKeepDeletes.java   
@Before
public void setUp() throws Exception {
  /* HBASE-6832: [WINDOWS] Tests should use explicit timestamp for Puts, and not rely on
   * implicit RS timing.
   * Use an explicit timer (IncrementingEnvironmentEdge) so that the put, delete
   * compact timestamps are tracked. Otherwise, forced major compaction will not purge
   * Delete's having the same timestamp. see ScanQueryMatcher.match():
   * if (retainDeletesInOutput
   *     || (!isUserScan && (EnvironmentEdgeManager.currentTimeMillis() - timestamp)
   *     <= timeToPurgeDeletes) ... )
   *
   */
  EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
}
项目:c5    文件:TestSnapshotDescriptionUtils.java   
@After
public void cleanupFS() throws Exception {
  if (fs.exists(root)) {
    if (!fs.delete(root, true)) {
      throw new IOException("Failed to delete root test dir: " + root);
    }
    if (!fs.mkdirs(root)) {
      throw new IOException("Failed to create root test dir: " + root);
    }
  }
  EnvironmentEdgeManagerTestHelper.reset();
}
项目:c5    文件:TestKeepDeletes.java   
@Before
public void setUp() throws Exception {
  /* HBASE-6832: [WINDOWS] Tests should use explicit timestamp for Puts, and not rely on
   * implicit RS timing.
   * Use an explicit timer (IncrementingEnvironmentEdge) so that the put, delete
   * compact timestamps are tracked. Otherwise, forced major compaction will not purge
   * Delete's having the same timestamp. see ScanQueryMatcher.match():
   * if (retainDeletesInOutput
   *     || (!isUserScan && (EnvironmentEdgeManager.currentTimeMillis() - timestamp)
   *     <= timeToPurgeDeletes) ... )
   *
   */
  EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
}
项目:HBase-Research    文件:TestSnapshotDescriptionUtils.java   
@After
public void cleanupFS() throws Exception {
  if (fs.exists(root)) {
    if (!fs.delete(root, true)) {
      throw new IOException("Failed to delete root test dir: " + root);
    }
    if (!fs.mkdirs(root)) {
      throw new IOException("Failed to create root test dir: " + root);
    }
  }
  EnvironmentEdgeManagerTestHelper.reset();
}
项目:HBase-Research    文件:TestHRegion.java   
public void testIncrementColumnValue_BumpSnapshot() throws IOException {
  ManualEnvironmentEdge mee = new ManualEnvironmentEdge();
  EnvironmentEdgeManagerTestHelper.injectEdge(mee);
  this.region = initHRegion(tableName, getName(), conf, fam1);
  try {
    long value = 42L;
    long incr = 44L;

    // first put something in kvset, then snapshot it.
    Put put = new Put(row);
    put.add(fam1, qual1, Bytes.toBytes(value));
    region.put(put);

    // get the store in question:
    Store s = region.getStore(fam1);
    s.snapshot(); //bam

    // now increment:
    long newVal = region.incrementColumnValue(row, fam1, qual1,
        incr, false);

    assertEquals(value+incr, newVal);

    // get both versions:
    Get get = new Get(row);
    get.setMaxVersions();
    get.addColumn(fam1,qual1);

    Result r = region.get(get, null);
    assertEquals(2, r.size());
    KeyValue first = r.raw()[0];
    KeyValue second = r.raw()[1];

    assertTrue("ICV failed to upgrade timestamp",
        first.getTimestamp() != second.getTimestamp());
  } finally {
    HRegion.closeHRegion(this.region);
    this.region = null;
  }
}
项目:hbase-0.94.8-qod    文件:TestSnapshotDescriptionUtils.java   
@After
public void cleanupFS() throws Exception {
  if (fs.exists(root)) {
    if (!fs.delete(root, true)) {
      throw new IOException("Failed to delete root test dir: " + root);
    }
    if (!fs.mkdirs(root)) {
      throw new IOException("Failed to create root test dir: " + root);
    }
  }
  EnvironmentEdgeManagerTestHelper.reset();
}
项目:hbase-0.94.8-qod    文件:TestHRegion.java   
public void testIncrementColumnValue_BumpSnapshot() throws IOException {
  ManualEnvironmentEdge mee = new ManualEnvironmentEdge();
  EnvironmentEdgeManagerTestHelper.injectEdge(mee);
  this.region = initHRegion(tableName, getName(), conf, fam1);
  try {
    long value = 42L;
    long incr = 44L;

    // first put something in kvset, then snapshot it.
    Put put = new Put(row);
    put.add(fam1, qual1, Bytes.toBytes(value));
    region.put(put);

    // get the store in question:
    Store s = region.getStore(fam1);
    s.snapshot(); //bam

    // now increment:
    long newVal = region.incrementColumnValue(row, fam1, qual1,
        incr, false);

    assertEquals(value+incr, newVal);

    // get both versions:
    Get get = new Get(row);
    get.setMaxVersions();
    get.addColumn(fam1,qual1);

    Result r = region.get(get, null);
    assertEquals(2, r.size());
    KeyValue first = r.raw()[0];
    KeyValue second = r.raw()[1];

    assertTrue("ICV failed to upgrade timestamp",
        first.getTimestamp() != second.getTimestamp());
  } finally {
    HRegion.closeHRegion(this.region);
    this.region = null;
  }
}
项目:hbase-0.94.8-qod    文件:TestSnapshotDescriptionUtils.java   
@After
public void cleanupFS() throws Exception {
  if (fs.exists(root)) {
    if (!fs.delete(root, true)) {
      throw new IOException("Failed to delete root test dir: " + root);
    }
    if (!fs.mkdirs(root)) {
      throw new IOException("Failed to create root test dir: " + root);
    }
  }
  EnvironmentEdgeManagerTestHelper.reset();
}
项目:hbase-0.94.8-qod    文件:TestHRegion.java   
public void testIncrementColumnValue_BumpSnapshot() throws IOException {
  ManualEnvironmentEdge mee = new ManualEnvironmentEdge();
  EnvironmentEdgeManagerTestHelper.injectEdge(mee);
  this.region = initHRegion(tableName, getName(), conf, fam1);
  try {
    long value = 42L;
    long incr = 44L;

    // first put something in kvset, then snapshot it.
    Put put = new Put(row);
    put.add(fam1, qual1, Bytes.toBytes(value));
    region.put(put);

    // get the store in question:
    Store s = region.getStore(fam1);
    s.snapshot(); //bam

    // now increment:
    long newVal = region.incrementColumnValue(row, fam1, qual1,
        incr, false);

    assertEquals(value+incr, newVal);

    // get both versions:
    Get get = new Get(row);
    get.setMaxVersions();
    get.addColumn(fam1,qual1);

    Result r = region.get(get, null);
    assertEquals(2, r.size());
    KeyValue first = r.raw()[0];
    KeyValue second = r.raw()[1];

    assertTrue("ICV failed to upgrade timestamp",
        first.getTimestamp() != second.getTimestamp());
  } finally {
    HRegion.closeHRegion(this.region);
    this.region = null;
  }
}