Java 类org.apache.hadoop.hbase.HBaseTestCase 实例源码

项目:ditb    文件:TestScanner.java   
/**
 * Test that closing a scanner while a client is using it doesn't throw
 * NPEs but instead a UnknownScannerException. HBASE-2503
 * @throws Exception
 */
@Test
public void testRaceBetweenClientAndTimeout() throws Exception {
  try {
    this.r = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
    HBaseTestCase.addContent(this.r, HConstants.CATALOG_FAMILY);
    Scan scan = new Scan();
    InternalScanner s = r.getScanner(scan);
    List<Cell> results = new ArrayList<Cell>();
    try {
      s.next(results);
      s.close();
      s.next(results);
      fail("We don't want anything more, we should be failing");
    } catch (UnknownScannerException ex) {
      // ok!
      return;
    }
  } finally {
    HRegion.closeHRegion(this.r);
  }
}
项目:ditb    文件:TestScanner.java   
/**
 * Tests to do a sync flush during the middle of a scan. This is testing the StoreScanner
 * update readers code essentially.  This is not highly concurrent, since its all 1 thread.
 * HBase-910.
 * @throws Exception
 */
@Test
public void testScanAndSyncFlush() throws Exception {
  this.r = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
  HRegionIncommon hri = new HRegionIncommon(r);
  try {
      LOG.info("Added: " + HBaseTestCase.addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY),
          Bytes.toString(HConstants.REGIONINFO_QUALIFIER)));
    int count = count(hri, -1, false);
    assertEquals(count, count(hri, 100, false)); // do a sync flush.
  } catch (Exception e) {
    LOG.error("Failed", e);
    throw e;
  } finally {
    HRegion.closeHRegion(this.r);
  }
}
项目:ditb    文件:TestScanner.java   
/**
 * Tests to do a concurrent flush (using a 2nd thread) while scanning.  This tests both
 * the StoreScanner update readers and the transition from memstore -> snapshot -> store file.
 *
 * @throws Exception
 */
@Test
public void testScanAndRealConcurrentFlush() throws Exception {
  this.r = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
  HRegionIncommon hri = new HRegionIncommon(r);
  try {
      LOG.info("Added: " + HBaseTestCase.addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY),
          Bytes.toString(HConstants.REGIONINFO_QUALIFIER)));
    int count = count(hri, -1, false);
    assertEquals(count, count(hri, 100, true)); // do a true concurrent background thread flush
  } catch (Exception e) {
    LOG.error("Failed", e);
    throw e;
  } finally {
    HRegion.closeHRegion(this.r);
  }
}
项目:pbase    文件:TestScanner.java   
/**
 * Test that closing a scanner while a client is using it doesn't throw
 * NPEs but instead a UnknownScannerException. HBASE-2503
 * @throws Exception
 */
@Test
public void testRaceBetweenClientAndTimeout() throws Exception {
  try {
    this.r = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
    HBaseTestCase.addContent(this.r, HConstants.CATALOG_FAMILY);
    Scan scan = new Scan();
    InternalScanner s = r.getScanner(scan);
    List<Cell> results = new ArrayList<Cell>();
    try {
      s.next(results);
      s.close();
      s.next(results);
      fail("We don't want anything more, we should be failing");
    } catch (UnknownScannerException ex) {
      // ok!
      return;
    }
  } finally {
    HRegion.closeHRegion(this.r);
  }
}
项目:pbase    文件:TestScanner.java   
/**
 * Tests to do a sync flush during the middle of a scan. This is testing the StoreScanner
 * update readers code essentially.  This is not highly concurrent, since its all 1 thread.
 * HBase-910.
 * @throws Exception
 */
@Test
public void testScanAndSyncFlush() throws Exception {
  this.r = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
  HRegionIncommon hri = new HRegionIncommon(r);
  try {
      LOG.info("Added: " + HBaseTestCase.addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY),
          Bytes.toString(HConstants.REGIONINFO_QUALIFIER)));
    int count = count(hri, -1, false);
    assertEquals(count, count(hri, 100, false)); // do a sync flush.
  } catch (Exception e) {
    LOG.error("Failed", e);
    throw e;
  } finally {
    HRegion.closeHRegion(this.r);
  }
}
项目:pbase    文件:TestScanner.java   
/**
 * Tests to do a concurrent flush (using a 2nd thread) while scanning.  This tests both
 * the StoreScanner update readers and the transition from memstore -> snapshot -> store file.
 *
 * @throws Exception
 */
@Test
public void testScanAndRealConcurrentFlush() throws Exception {
  this.r = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
  HRegionIncommon hri = new HRegionIncommon(r);
  try {
      LOG.info("Added: " + HBaseTestCase.addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY),
          Bytes.toString(HConstants.REGIONINFO_QUALIFIER)));
    int count = count(hri, -1, false);
    assertEquals(count, count(hri, 100, true)); // do a true concurrent background thread flush
  } catch (Exception e) {
    LOG.error("Failed", e);
    throw e;
  } finally {
    HRegion.closeHRegion(this.r);
  }
}
项目:HIndex    文件:TestScanner.java   
/**
 * Test that closing a scanner while a client is using it doesn't throw
 * NPEs but instead a UnknownScannerException. HBASE-2503
 * @throws Exception
 */
@Test
public void testRaceBetweenClientAndTimeout() throws Exception {
  try {
    this.r = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
    HBaseTestCase.addContent(this.r, HConstants.CATALOG_FAMILY);
    Scan scan = new Scan();
    InternalScanner s = r.getScanner(scan);
    List<Cell> results = new ArrayList<Cell>();
    try {
      s.next(results);
      s.close();
      s.next(results);
      fail("We don't want anything more, we should be failing");
    } catch (UnknownScannerException ex) {
      // ok!
      return;
    }
  } finally {
    HRegion.closeHRegion(this.r);
  }
}
项目:HIndex    文件:TestScanner.java   
/**
 * Tests to do a sync flush during the middle of a scan. This is testing the StoreScanner
 * update readers code essentially.  This is not highly concurrent, since its all 1 thread.
 * HBase-910.
 * @throws Exception
 */
@Test
public void testScanAndSyncFlush() throws Exception {
  this.r = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
  HRegionIncommon hri = new HRegionIncommon(r);
  try {
      LOG.info("Added: " + HBaseTestCase.addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY),
          Bytes.toString(HConstants.REGIONINFO_QUALIFIER)));
    int count = count(hri, -1, false);
    assertEquals(count, count(hri, 100, false)); // do a sync flush.
  } catch (Exception e) {
    LOG.error("Failed", e);
    throw e;
  } finally {
    HRegion.closeHRegion(this.r);
  }
}
项目:HIndex    文件:TestScanner.java   
/**
 * Tests to do a concurrent flush (using a 2nd thread) while scanning.  This tests both
 * the StoreScanner update readers and the transition from memstore -> snapshot -> store file.
 *
 * @throws Exception
 */
@Test
public void testScanAndRealConcurrentFlush() throws Exception {
  this.r = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
  HRegionIncommon hri = new HRegionIncommon(r);
  try {
      LOG.info("Added: " + HBaseTestCase.addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY),
          Bytes.toString(HConstants.REGIONINFO_QUALIFIER)));
    int count = count(hri, -1, false);
    assertEquals(count, count(hri, 100, true)); // do a true concurrent background thread flush
  } catch (Exception e) {
    LOG.error("Failed", e);
    throw e;
  } finally {
    HRegion.closeHRegion(this.r);
  }
}
项目:hbase    文件:TestScanner.java   
/**
 * Test that closing a scanner while a client is using it doesn't throw
 * NPEs but instead a UnknownScannerException. HBASE-2503
 * @throws Exception
 */
@Test
public void testRaceBetweenClientAndTimeout() throws Exception {
  try {
    this.region = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
    HBaseTestCase.addContent(this.region, HConstants.CATALOG_FAMILY);
    Scan scan = new Scan();
    InternalScanner s = region.getScanner(scan);
    List<Cell> results = new ArrayList<>();
    try {
      s.next(results);
      s.close();
      s.next(results);
      fail("We don't want anything more, we should be failing");
    } catch (UnknownScannerException ex) {
      // ok!
      return;
    }
  } finally {
    HBaseTestingUtility.closeRegionAndWAL(this.region);
  }
}
项目:hbase    文件:TestScanner.java   
/**
 * Tests to do a sync flush during the middle of a scan. This is testing the StoreScanner
 * update readers code essentially.  This is not highly concurrent, since its all 1 thread.
 * HBase-910.
 * @throws Exception
 */
@Test
public void testScanAndSyncFlush() throws Exception {
  this.region = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
  Table hri = new RegionAsTable(region);
  try {
      LOG.info("Added: " +
        HBaseTestCase.addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY),
          Bytes.toString(HConstants.REGIONINFO_QUALIFIER)));
    int count = count(hri, -1, false);
    assertEquals(count, count(hri, 100, false)); // do a sync flush.
  } catch (Exception e) {
    LOG.error("Failed", e);
    throw e;
  } finally {
    HBaseTestingUtility.closeRegionAndWAL(this.region);
  }
}
项目:hbase    文件:TestScanner.java   
/**
 * Tests to do a concurrent flush (using a 2nd thread) while scanning.  This tests both
 * the StoreScanner update readers and the transition from memstore -> snapshot -> store file.
 *
 * @throws Exception
 */
@Test
public void testScanAndRealConcurrentFlush() throws Exception {
  this.region = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
  Table hri = new RegionAsTable(region);
  try {
      LOG.info("Added: " +
        HBaseTestCase.addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY),
          Bytes.toString(HConstants.REGIONINFO_QUALIFIER)));
    int count = count(hri, -1, false);
    assertEquals(count, count(hri, 100, true)); // do a true concurrent background thread flush
  } catch (Exception e) {
    LOG.error("Failed", e);
    throw e;
  } finally {
    HBaseTestingUtility.closeRegionAndWAL(this.region);
  }
}
项目:PyroDB    文件:TestScanner.java   
/**
 * Test that closing a scanner while a client is using it doesn't throw
 * NPEs but instead a UnknownScannerException. HBASE-2503
 * @throws Exception
 */
@Test
public void testRaceBetweenClientAndTimeout() throws Exception {
  try {
    this.r = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
    HBaseTestCase.addContent(this.r, HConstants.CATALOG_FAMILY);
    Scan scan = new Scan();
    InternalScanner s = r.getScanner(scan);
    List<Cell> results = new ArrayList<Cell>();
    try {
      s.next(results);
      s.close();
      s.next(results);
      fail("We don't want anything more, we should be failing");
    } catch (UnknownScannerException ex) {
      // ok!
      return;
    }
  } finally {
    HRegion.closeHRegion(this.r);
  }
}
项目:PyroDB    文件:TestScanner.java   
/**
 * Tests to do a sync flush during the middle of a scan. This is testing the StoreScanner
 * update readers code essentially.  This is not highly concurrent, since its all 1 thread.
 * HBase-910.
 * @throws Exception
 */
@Test
public void testScanAndSyncFlush() throws Exception {
  this.r = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
  HRegionIncommon hri = new HRegionIncommon(r);
  try {
      LOG.info("Added: " + HBaseTestCase.addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY),
          Bytes.toString(HConstants.REGIONINFO_QUALIFIER)));
    int count = count(hri, -1, false);
    assertEquals(count, count(hri, 100, false)); // do a sync flush.
  } catch (Exception e) {
    LOG.error("Failed", e);
    throw e;
  } finally {
    HRegion.closeHRegion(this.r);
  }
}
项目:PyroDB    文件:TestScanner.java   
/**
 * Tests to do a concurrent flush (using a 2nd thread) while scanning.  This tests both
 * the StoreScanner update readers and the transition from memstore -> snapshot -> store file.
 *
 * @throws Exception
 */
@Test
public void testScanAndRealConcurrentFlush() throws Exception {
  this.r = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
  HRegionIncommon hri = new HRegionIncommon(r);
  try {
      LOG.info("Added: " + HBaseTestCase.addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY),
          Bytes.toString(HConstants.REGIONINFO_QUALIFIER)));
    int count = count(hri, -1, false);
    assertEquals(count, count(hri, 100, true)); // do a true concurrent background thread flush
  } catch (Exception e) {
    LOG.error("Failed", e);
    throw e;
  } finally {
    HRegion.closeHRegion(this.r);
  }
}
项目:c5    文件:TestScanner.java   
/**
 * Test that closing a scanner while a client is using it doesn't throw
 * NPEs but instead a UnknownScannerException. HBASE-2503
 * @throws Exception
 */
@Test
public void testRaceBetweenClientAndTimeout() throws Exception {
  try {
    this.r = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
    HBaseTestCase.addContent(this.r, HConstants.CATALOG_FAMILY);
    Scan scan = new Scan();
    InternalScanner s = r.getScanner(scan);
    List<Cell> results = new ArrayList<Cell>();
    try {
      s.next(results);
      s.close();
      s.next(results);
      fail("We don't want anything more, we should be failing");
    } catch (UnknownScannerException ex) {
      // ok!
      return;
    }
  } finally {
    HRegion.closeHRegion(this.r);
  }
}
项目:c5    文件:TestScanner.java   
/**
 * Tests to do a sync flush during the middle of a scan. This is testing the StoreScanner
 * update readers code essentially.  This is not highly concurrent, since its all 1 thread.
 * HBase-910.
 * @throws Exception
 */
@Test
public void testScanAndSyncFlush() throws Exception {
  this.r = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
  HRegionIncommon hri = new HRegionIncommon(r);
  try {
      LOG.info("Added: " + HBaseTestCase.addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY),
          Bytes.toString(HConstants.REGIONINFO_QUALIFIER)));
    int count = count(hri, -1, false);
    assertEquals(count, count(hri, 100, false)); // do a sync flush.
  } catch (Exception e) {
    LOG.error("Failed", e);
    throw e;
  } finally {
    HRegion.closeHRegion(this.r);
  }
}
项目:c5    文件:TestScanner.java   
/**
 * Tests to do a concurrent flush (using a 2nd thread) while scanning.  This tests both
 * the StoreScanner update readers and the transition from memstore -> snapshot -> store file.
 *
 * @throws Exception
 */
@Test
public void testScanAndRealConcurrentFlush() throws Exception {
  this.r = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
  HRegionIncommon hri = new HRegionIncommon(r);
  try {
      LOG.info("Added: " + HBaseTestCase.addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY),
          Bytes.toString(HConstants.REGIONINFO_QUALIFIER)));
    int count = count(hri, -1, false);
    assertEquals(count, count(hri, 100, true)); // do a true concurrent background thread flush
  } catch (Exception e) {
    LOG.error("Failed", e);
    throw e;
  } finally {
    HRegion.closeHRegion(this.r);
  }
}
项目:hbase    文件:TestCoprocessorInterface.java   
@Test
public void testCoprocessorInterface() throws IOException {
  TableName tableName = TableName.valueOf(name.getMethodName());
  byte [][] families = { fam1, fam2, fam3 };

  Configuration hc = initConfig();
  HRegion region = initHRegion(tableName, name.getMethodName(), hc,
    new Class<?>[]{CoprocessorImpl.class}, families);
  for (int i = 0; i < 3; i++) {
    HBaseTestCase.addContent(region, fam3);
    region.flush(true);
  }

  region.compact(false);

  // HBASE-4197
  Scan s = new Scan();
  RegionScanner scanner = region.getCoprocessorHost().postScannerOpen(s, region.getScanner(s));
  assertTrue(scanner instanceof CustomScanner);
  // this would throw an exception before HBASE-4197
  scanner.next(new ArrayList<>());

  HBaseTestingUtility.closeRegionAndWAL(region);
  Coprocessor c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class);

  assertTrue("Coprocessor not started", ((CoprocessorImpl)c).wasStarted());
  assertTrue("Coprocessor not stopped", ((CoprocessorImpl)c).wasStopped());
  assertTrue(((CoprocessorImpl)c).wasOpened());
  assertTrue(((CoprocessorImpl)c).wasClosed());
  assertTrue(((CoprocessorImpl)c).wasFlushed());
  assertTrue(((CoprocessorImpl)c).wasCompacted());
}
项目:ditb    文件:TestCoprocessorInterface.java   
@Test
public void testSharedData() throws IOException {
  TableName tableName = TableName.valueOf(name.getMethodName());
  byte [][] families = { fam1, fam2, fam3 };

  Configuration hc = initSplit();
  Region region = initHRegion(tableName, name.getMethodName(), hc,
    new Class<?>[]{}, families);

  for (int i = 0; i < 3; i++) {
    HBaseTestCase.addContent(region, fam3);
    region.flush(true);
  }

  region.compact(false);

  byte [] splitRow = ((HRegion)region).checkSplit();
  assertNotNull(splitRow);
  Region [] regions = split(region, splitRow);
  for (int i = 0; i < regions.length; i++) {
    regions[i] = reopenRegion(regions[i], CoprocessorImpl.class, CoprocessorII.class);
  }
  Coprocessor c = regions[0].getCoprocessorHost().
      findCoprocessor(CoprocessorImpl.class.getName());
  Coprocessor c2 = regions[0].getCoprocessorHost().
      findCoprocessor(CoprocessorII.class.getName());
  Object o = ((CoprocessorImpl)c).getSharedData().get("test1");
  Object o2 = ((CoprocessorII)c2).getSharedData().get("test2");
  assertNotNull(o);
  assertNotNull(o2);
  // to coprocessors get different sharedDatas
  assertFalse(((CoprocessorImpl)c).getSharedData() == ((CoprocessorII)c2).getSharedData());
  for (int i = 1; i < regions.length; i++) {
    c = regions[i].getCoprocessorHost().
        findCoprocessor(CoprocessorImpl.class.getName());
    c2 = regions[i].getCoprocessorHost().
        findCoprocessor(CoprocessorII.class.getName());
    // make sure that all coprocessor of a class have identical sharedDatas
    assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o);
    assertTrue(((CoprocessorII)c2).getSharedData().get("test2") == o2);
  }
  // now have all Environments fail
  for (int i = 0; i < regions.length; i++) {
    try {
      byte [] r = regions[i].getRegionInfo().getStartKey();
      if (r == null || r.length <= 0) {
        // Its the start row.  Can't ask for null.  Ask for minimal key instead.
        r = new byte [] {0};
      }
      Get g = new Get(r);
      regions[i].get(g);
      fail();
    } catch (org.apache.hadoop.hbase.DoNotRetryIOException xc) {
    }
    assertNull(regions[i].getCoprocessorHost().
        findCoprocessor(CoprocessorII.class.getName()));
  }
  c = regions[0].getCoprocessorHost().
      findCoprocessor(CoprocessorImpl.class.getName());
  assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o);
  c = c2 = null;
  // perform a GC
  System.gc();
  // reopen the region
  region = reopenRegion(regions[0], CoprocessorImpl.class, CoprocessorII.class);
  c = region.getCoprocessorHost().
      findCoprocessor(CoprocessorImpl.class.getName());
  // CPimpl is unaffected, still the same reference
  assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o);
  c2 = region.getCoprocessorHost().
      findCoprocessor(CoprocessorII.class.getName());
  // new map and object created, hence the reference is different
  // hence the old entry was indeed removed by the GC and new one has been created
  Object o3 = ((CoprocessorII)c2).getSharedData().get("test2");
  assertFalse(o3 == o2);
}
项目:ditb    文件:TestCoprocessorInterface.java   
@Test
public void testCoprocessorInterface() throws IOException {
  TableName tableName = TableName.valueOf(name.getMethodName());
  byte [][] families = { fam1, fam2, fam3 };

  Configuration hc = initSplit();
  Region region = initHRegion(tableName, name.getMethodName(), hc,
    new Class<?>[]{CoprocessorImpl.class}, families);
  for (int i = 0; i < 3; i++) {
    HBaseTestCase.addContent(region, fam3);
    region.flush(true);
  }

  region.compact(false);

  byte [] splitRow = ((HRegion)region).checkSplit();

  assertNotNull(splitRow);
  Region [] regions = split(region, splitRow);
  for (int i = 0; i < regions.length; i++) {
    regions[i] = reopenRegion(regions[i], CoprocessorImpl.class);
  }
  HRegion.closeHRegion((HRegion)region);
  Coprocessor c = region.getCoprocessorHost().
    findCoprocessor(CoprocessorImpl.class.getName());

  // HBASE-4197
  Scan s = new Scan();
  RegionScanner scanner = regions[0].getCoprocessorHost().postScannerOpen(s, regions[0].getScanner(s));
  assertTrue(scanner instanceof CustomScanner);
  // this would throw an exception before HBASE-4197
  scanner.next(new ArrayList<Cell>());

  assertTrue("Coprocessor not started", ((CoprocessorImpl)c).wasStarted());
  assertTrue("Coprocessor not stopped", ((CoprocessorImpl)c).wasStopped());
  assertTrue(((CoprocessorImpl)c).wasOpened());
  assertTrue(((CoprocessorImpl)c).wasClosed());
  assertTrue(((CoprocessorImpl)c).wasFlushed());
  assertTrue(((CoprocessorImpl)c).wasCompacted());
  assertTrue(((CoprocessorImpl)c).wasSplit());

  for (int i = 0; i < regions.length; i++) {
    HRegion.closeHRegion((HRegion)regions[i]);
    c = region.getCoprocessorHost()
          .findCoprocessor(CoprocessorImpl.class.getName());
    assertTrue("Coprocessor not started", ((CoprocessorImpl)c).wasStarted());
    assertTrue("Coprocessor not stopped", ((CoprocessorImpl)c).wasStopped());
    assertTrue(((CoprocessorImpl)c).wasOpened());
    assertTrue(((CoprocessorImpl)c).wasClosed());
    assertTrue(((CoprocessorImpl)c).wasCompacted());
  }
}
项目:ditb    文件:TestCompaction.java   
/**
 * Verify that you can stop a long-running compaction (used during RS shutdown)
 * @throws Exception
 */
@Test
public void testInterruptCompaction() throws Exception {
  assertEquals(0, count());

  // lower the polling interval for this test
  int origWI = HStore.closeCheckInterval;
  HStore.closeCheckInterval = 10 * 1000; // 10 KB

  try {
    // Create a couple store files w/ 15KB (over 10KB interval)
    int jmax = (int) Math.ceil(15.0 / compactionThreshold);
    byte[] pad = new byte[1000]; // 1 KB chunk
    for (int i = 0; i < compactionThreshold; i++) {
      HRegionIncommon loader = new HRegionIncommon(r);
      Put p = new Put(Bytes.add(STARTROW, Bytes.toBytes(i)));
      p.setDurability(Durability.SKIP_WAL);
      for (int j = 0; j < jmax; j++) {
        p.add(COLUMN_FAMILY, Bytes.toBytes(j), pad);
      }
      HBaseTestCase.addContent(loader, Bytes.toString(COLUMN_FAMILY));
      loader.put(p);
      loader.flushcache();
    }

    HRegion spyR = spy(r);
    doAnswer(new Answer() {
      public Object answer(InvocationOnMock invocation) throws Throwable {
        r.writestate.writesEnabled = false;
        return invocation.callRealMethod();
      }
    }).when(spyR).doRegionCompactionPrep();

    // force a minor compaction, but not before requesting a stop
    spyR.compactStores();

    // ensure that the compaction stopped, all old files are intact,
    Store s = r.stores.get(COLUMN_FAMILY);
    assertEquals(compactionThreshold, s.getStorefilesCount());
    assertTrue(s.getStorefilesSize() > 15 * 1000);
    // and no new store files persisted past compactStores()
    FileStatus[] ls = r.getFilesystem().listStatus(r.getRegionFileSystem().getTempDir());
    assertEquals(0, ls.length);

  } finally {
    // don't mess up future tests
    r.writestate.writesEnabled = true;
    HStore.closeCheckInterval = origWI;

    // Delete all Store information once done using
    for (int i = 0; i < compactionThreshold; i++) {
      Delete delete = new Delete(Bytes.add(STARTROW, Bytes.toBytes(i)));
      byte[][] famAndQf = { COLUMN_FAMILY, null };
      delete.deleteFamily(famAndQf[0]);
      r.delete(delete);
    }
    r.flush(true);

    // Multiple versions allowed for an entry, so the delete isn't enough
    // Lower TTL and expire to ensure that all our entries have been wiped
    final int ttl = 1000;
    for (Store hstore : this.r.stores.values()) {
      HStore store = (HStore) hstore;
      ScanInfo old = store.getScanInfo();
      ScanInfo si =
          new ScanInfo(old.getConfiguration(), old.getFamily(), old.getMinVersions(),
              old.getMaxVersions(), ttl, old.getKeepDeletedCells(), 0, old.getComparator());
      store.setScanInfo(si);
    }
    Thread.sleep(ttl);

    r.compact(true);
    assertEquals(0, count());
  }
}
项目:ditb    文件:TestCompaction.java   
private void createStoreFile(final HRegion region, String family) throws IOException {
  HRegionIncommon loader = new HRegionIncommon(region);
  HBaseTestCase.addContent(loader, family);
  loader.flushcache();
}
项目:ditb    文件:TestMinorCompaction.java   
private void testMinorCompactionWithDelete(Delete delete, int expectedResultsAfterDelete) throws Exception {
  HRegionIncommon loader = new HRegionIncommon(r);
  for (int i = 0; i < compactionThreshold + 1; i++) {
    HBaseTestCase.addContent(loader, Bytes.toString(fam1), Bytes.toString(col1), firstRowBytes,
      thirdRowBytes, i);
    HBaseTestCase.addContent(loader, Bytes.toString(fam1), Bytes.toString(col2), firstRowBytes,
      thirdRowBytes, i);
    HBaseTestCase.addContent(loader, Bytes.toString(fam2), Bytes.toString(col1), firstRowBytes,
      thirdRowBytes, i);
    HBaseTestCase.addContent(loader, Bytes.toString(fam2), Bytes.toString(col2), firstRowBytes,
      thirdRowBytes, i);
    r.flush(true);
  }

  Result result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100));
  assertEquals(compactionThreshold, result.size());
  result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100));
  assertEquals(compactionThreshold, result.size());

  // Now add deletes to memstore and then flush it.  That will put us over
  // the compaction threshold of 3 store files.  Compacting these store files
  // should result in a compacted store file that has no references to the
  // deleted row.
  r.delete(delete);

  // Make sure that we have only deleted family2 from secondRowBytes
  result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100));
  assertEquals(expectedResultsAfterDelete, result.size());
  // but we still have firstrow
  result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100));
  assertEquals(compactionThreshold, result.size());

  r.flush(true);
  // should not change anything.
  // Let us check again

  // Make sure that we have only deleted family2 from secondRowBytes
  result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100));
  assertEquals(expectedResultsAfterDelete, result.size());
  // but we still have firstrow
  result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100));
  assertEquals(compactionThreshold, result.size());

  // do a compaction
  Store store2 = r.getStore(fam2);
  int numFiles1 = store2.getStorefiles().size();
  assertTrue("Was expecting to see 4 store files", numFiles1 > compactionThreshold); // > 3
  ((HStore)store2).compactRecentForTestingAssumingDefaultPolicy(compactionThreshold);   // = 3
  int numFiles2 = store2.getStorefiles().size();
  // Check that we did compact
  assertTrue("Number of store files should go down", numFiles1 > numFiles2);
  // Check that it was a minor compaction.
  assertTrue("Was not supposed to be a major compaction", numFiles2 > 1);

  // Make sure that we have only deleted family2 from secondRowBytes
  result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100));
  assertEquals(expectedResultsAfterDelete, result.size());
  // but we still have firstrow
  result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100));
  assertEquals(compactionThreshold, result.size());
}
项目:ditb    文件:TestHRegion.java   
@Test
public void testMerge() throws IOException {
  byte[][] families = { fam1, fam2, fam3 };
  Configuration hc = initSplit();
  // Setting up region
  String method = this.getName();
  this.region = initHRegion(tableName, method, hc, families);
  try {
    LOG.info("" + HBaseTestCase.addContent(region, fam3));
    region.flush(true);
    region.compactStores();
    byte[] splitRow = region.checkSplit();
    assertNotNull(splitRow);
    LOG.info("SplitRow: " + Bytes.toString(splitRow));
    HRegion[] subregions = splitRegion(region, splitRow);
    try {
      // Need to open the regions.
      for (int i = 0; i < subregions.length; i++) {
        HRegion.openHRegion(subregions[i], null);
        subregions[i].compactStores();
      }
      Path oldRegionPath = region.getRegionFileSystem().getRegionDir();
      Path oldRegion1 = subregions[0].getRegionFileSystem().getRegionDir();
      Path oldRegion2 = subregions[1].getRegionFileSystem().getRegionDir();
      long startTime = System.currentTimeMillis();
      region = HRegion.mergeAdjacent(subregions[0], subregions[1]);
      LOG.info("Merge regions elapsed time: "
          + ((System.currentTimeMillis() - startTime) / 1000.0));
      FILESYSTEM.delete(oldRegion1, true);
      FILESYSTEM.delete(oldRegion2, true);
      FILESYSTEM.delete(oldRegionPath, true);
      LOG.info("splitAndMerge completed.");
    } finally {
      for (int i = 0; i < subregions.length; i++) {
        try {
          HRegion.closeHRegion(subregions[i]);
        } catch (IOException e) {
          // Ignore.
        }
      }
    }
  } finally {
    HRegion.closeHRegion(this.region);
    this.region = null;
  }
}
项目:ditb    文件:TestScanner.java   
/**
 * Make sure scanner returns correct result when we run a major compaction
 * with deletes.
 *
 * @throws Exception
 */
@Test
@SuppressWarnings("deprecation")
public void testScanAndConcurrentMajorCompact() throws Exception {
  HTableDescriptor htd = TEST_UTIL.createTableDescriptor(name.getMethodName());
  this.r = TEST_UTIL.createLocalHRegion(htd, null, null);
  HRegionIncommon hri = new HRegionIncommon(r);

  try {
    HBaseTestCase.addContent(hri, Bytes.toString(fam1), Bytes.toString(col1),
        firstRowBytes, secondRowBytes);
    HBaseTestCase.addContent(hri, Bytes.toString(fam2), Bytes.toString(col1),
        firstRowBytes, secondRowBytes);

    Delete dc = new Delete(firstRowBytes);
    /* delete column1 of firstRow */
    dc.deleteColumns(fam1, col1);
    r.delete(dc);
    r.flush(true);

    HBaseTestCase.addContent(hri, Bytes.toString(fam1), Bytes.toString(col1),
        secondRowBytes, thirdRowBytes);
    HBaseTestCase.addContent(hri, Bytes.toString(fam2), Bytes.toString(col1),
        secondRowBytes, thirdRowBytes);
    r.flush(true);

    InternalScanner s = r.getScanner(new Scan());
    // run a major compact, column1 of firstRow will be cleaned.
    r.compact(true);

    List<Cell> results = new ArrayList<Cell>();
    s.next(results);

    // make sure returns column2 of firstRow
    assertTrue("result is not correct, keyValues : " + results,
        results.size() == 1);
    assertTrue(CellUtil.matchingRow(results.get(0), firstRowBytes)); 
    assertTrue(CellUtil.matchingFamily(results.get(0), fam2));

    results = new ArrayList<Cell>();
    s.next(results);

    // get secondRow
    assertTrue(results.size() == 2);
    assertTrue(CellUtil.matchingRow(results.get(0), secondRowBytes));
    assertTrue(CellUtil.matchingFamily(results.get(0), fam1));
    assertTrue(CellUtil.matchingFamily(results.get(1), fam2));
  } finally {
    HRegion.closeHRegion(this.r);
  }
}
项目:ditb    文件:TestMajorCompaction.java   
private void createStoreFile(final Region region, String family) throws IOException {
  HRegionIncommon loader = new HRegionIncommon(region);
  HBaseTestCase.addContent(loader, family);
  loader.flushcache();
}
项目:ditb    文件:TestMajorCompaction.java   
private void createSmallerStoreFile(final Region region) throws IOException {
  HRegionIncommon loader = new HRegionIncommon(region);
  HBaseTestCase.addContent(loader, Bytes.toString(COLUMN_FAMILY), ("" +
      "bbb").getBytes(), null);
  loader.flushcache();
}
项目:pbase    文件:TestCoprocessorInterface.java   
@Test
public void testSharedData() throws IOException {
  TableName tableName = TableName.valueOf(name.getMethodName());
  byte [][] families = { fam1, fam2, fam3 };

  Configuration hc = initSplit();
  HRegion region = initHRegion(tableName, name.getMethodName(), hc,
    new Class<?>[]{}, families);

  for (int i = 0; i < 3; i++) {
    HBaseTestCase.addContent(region, fam3);
    region.flushcache();
  }

  region.compactStores();

  byte [] splitRow = region.checkSplit();

  assertNotNull(splitRow);
  HRegion [] regions = split(region, splitRow);
  for (int i = 0; i < regions.length; i++) {
    regions[i] = reopenRegion(regions[i], CoprocessorImpl.class, CoprocessorII.class);
  }
  Coprocessor c = regions[0].getCoprocessorHost().
      findCoprocessor(CoprocessorImpl.class.getName());
  Coprocessor c2 = regions[0].getCoprocessorHost().
      findCoprocessor(CoprocessorII.class.getName());
  Object o = ((CoprocessorImpl)c).getSharedData().get("test1");
  Object o2 = ((CoprocessorII)c2).getSharedData().get("test2");
  assertNotNull(o);
  assertNotNull(o2);
  // to coprocessors get different sharedDatas
  assertFalse(((CoprocessorImpl)c).getSharedData() == ((CoprocessorII)c2).getSharedData());
  for (int i = 1; i < regions.length; i++) {
    c = regions[i].getCoprocessorHost().
        findCoprocessor(CoprocessorImpl.class.getName());
    c2 = regions[i].getCoprocessorHost().
        findCoprocessor(CoprocessorII.class.getName());
    // make sure that all coprocessor of a class have identical sharedDatas
    assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o);
    assertTrue(((CoprocessorII)c2).getSharedData().get("test2") == o2);
  }
  // now have all Environments fail
  for (int i = 0; i < regions.length; i++) {
    try {
      byte [] r = regions[i].getStartKey();
      if (r == null || r.length <= 0) {
        // Its the start row.  Can't ask for null.  Ask for minimal key instead.
        r = new byte [] {0};
      }
      Get g = new Get(r);
      regions[i].get(g);
      fail();
    } catch (org.apache.hadoop.hbase.DoNotRetryIOException xc) {
    }
    assertNull(regions[i].getCoprocessorHost().
        findCoprocessor(CoprocessorII.class.getName()));
  }
  c = regions[0].getCoprocessorHost().
      findCoprocessor(CoprocessorImpl.class.getName());
  assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o);
  c = c2 = null;
  // perform a GC
  System.gc();
  // reopen the region
  region = reopenRegion(regions[0], CoprocessorImpl.class, CoprocessorII.class);
  c = region.getCoprocessorHost().
      findCoprocessor(CoprocessorImpl.class.getName());
  // CPimpl is unaffected, still the same reference
  assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o);
  c2 = region.getCoprocessorHost().
      findCoprocessor(CoprocessorII.class.getName());
  // new map and object created, hence the reference is different
  // hence the old entry was indeed removed by the GC and new one has been created
  Object o3 = ((CoprocessorII)c2).getSharedData().get("test2");
  assertFalse(o3 == o2);
}
项目:pbase    文件:TestCoprocessorInterface.java   
@Test
public void testCoprocessorInterface() throws IOException {
  TableName tableName = TableName.valueOf(name.getMethodName());
  byte [][] families = { fam1, fam2, fam3 };

  Configuration hc = initSplit();
  HRegion region = initHRegion(tableName, name.getMethodName(), hc,
    new Class<?>[]{CoprocessorImpl.class}, families);
  for (int i = 0; i < 3; i++) {
    HBaseTestCase.addContent(region, fam3);
    region.flushcache();
  }

  region.compactStores();

  byte [] splitRow = region.checkSplit();

  assertNotNull(splitRow);
  HRegion [] regions = split(region, splitRow);
  for (int i = 0; i < regions.length; i++) {
    regions[i] = reopenRegion(regions[i], CoprocessorImpl.class);
  }
  HRegion.closeHRegion(region);
  Coprocessor c = region.getCoprocessorHost().
    findCoprocessor(CoprocessorImpl.class.getName());

  // HBASE-4197
  Scan s = new Scan();
  RegionScanner scanner = regions[0].getCoprocessorHost().postScannerOpen(s, regions[0].getScanner(s));
  assertTrue(scanner instanceof CustomScanner);
  // this would throw an exception before HBASE-4197
  scanner.next(new ArrayList<Cell>());

  assertTrue("Coprocessor not started", ((CoprocessorImpl)c).wasStarted());
  assertTrue("Coprocessor not stopped", ((CoprocessorImpl)c).wasStopped());
  assertTrue(((CoprocessorImpl)c).wasOpened());
  assertTrue(((CoprocessorImpl)c).wasClosed());
  assertTrue(((CoprocessorImpl)c).wasFlushed());
  assertTrue(((CoprocessorImpl)c).wasCompacted());
  assertTrue(((CoprocessorImpl)c).wasSplit());

  for (int i = 0; i < regions.length; i++) {
    HRegion.closeHRegion(regions[i]);
    c = region.getCoprocessorHost()
          .findCoprocessor(CoprocessorImpl.class.getName());
    assertTrue("Coprocessor not started", ((CoprocessorImpl)c).wasStarted());
    assertTrue("Coprocessor not stopped", ((CoprocessorImpl)c).wasStopped());
    assertTrue(((CoprocessorImpl)c).wasOpened());
    assertTrue(((CoprocessorImpl)c).wasClosed());
    assertTrue(((CoprocessorImpl)c).wasCompacted());
  }
}
项目:pbase    文件:TestCompaction.java   
/**
 * Verify that you can stop a long-running compaction
 * (used during RS shutdown)
 * @throws Exception
 */
@Test
public void testInterruptCompaction() throws Exception {
  assertEquals(0, count());

  // lower the polling interval for this test
  int origWI = HStore.closeCheckInterval;
  HStore.closeCheckInterval = 10*1000; // 10 KB

  try {
    // Create a couple store files w/ 15KB (over 10KB interval)
    int jmax = (int) Math.ceil(15.0/compactionThreshold);
    byte [] pad = new byte[1000]; // 1 KB chunk
    for (int i = 0; i < compactionThreshold; i++) {
      HRegionIncommon loader = new HRegionIncommon(r);
      Put p = new Put(Bytes.add(STARTROW, Bytes.toBytes(i)));
      p.setDurability(Durability.SKIP_WAL);
      for (int j = 0; j < jmax; j++) {
        p.add(COLUMN_FAMILY, Bytes.toBytes(j), pad);
      }
      HBaseTestCase.addContent(loader, Bytes.toString(COLUMN_FAMILY));
      loader.put(p);
      loader.flushcache();
    }

    HRegion spyR = spy(r);
    doAnswer(new Answer() {
      public Object answer(InvocationOnMock invocation) throws Throwable {
        r.writestate.writesEnabled = false;
        return invocation.callRealMethod();
      }
    }).when(spyR).doRegionCompactionPrep();

    // force a minor compaction, but not before requesting a stop
    spyR.compactStores();

    // ensure that the compaction stopped, all old files are intact,
    Store s = r.stores.get(COLUMN_FAMILY);
    assertEquals(compactionThreshold, s.getStorefilesCount());
    assertTrue(s.getStorefilesSize() > 15*1000);
    // and no new store files persisted past compactStores()
    FileStatus[] ls = r.getFilesystem().listStatus(r.getRegionFileSystem().getTempDir());
    assertEquals(0, ls.length);

  } finally {
    // don't mess up future tests
    r.writestate.writesEnabled = true;
    HStore.closeCheckInterval = origWI;

    // Delete all Store information once done using
    for (int i = 0; i < compactionThreshold; i++) {
      Delete delete = new Delete(Bytes.add(STARTROW, Bytes.toBytes(i)));
      byte [][] famAndQf = {COLUMN_FAMILY, null};
      delete.deleteFamily(famAndQf[0]);
      r.delete(delete);
    }
    r.flushcache();

    // Multiple versions allowed for an entry, so the delete isn't enough
    // Lower TTL and expire to ensure that all our entries have been wiped
    final int ttl = 1000;
    for (Store hstore: this.r.stores.values()) {
      HStore store = (HStore)hstore;
      ScanInfo old = store.getScanInfo();
      ScanInfo si = new ScanInfo(old.getFamily(),
          old.getMinVersions(), old.getMaxVersions(), ttl,
          old.getKeepDeletedCells(), 0, old.getComparator());
      store.setScanInfo(si);
    }
    Thread.sleep(ttl);

    r.compactStores(true);
    assertEquals(0, count());
  }
}
项目:pbase    文件:TestCompaction.java   
private void createStoreFile(final HRegion region, String family) throws IOException {
  HRegionIncommon loader = new HRegionIncommon(region);
  HBaseTestCase.addContent(loader, family);
  loader.flushcache();
}
项目:pbase    文件:TestMinorCompaction.java   
private void testMinorCompactionWithDelete(Delete delete, int expectedResultsAfterDelete) throws Exception {
  HRegionIncommon loader = new HRegionIncommon(r);
  for (int i = 0; i < compactionThreshold + 1; i++) {
    HBaseTestCase.addContent(loader, Bytes.toString(fam1), Bytes.toString(col1), firstRowBytes,
      thirdRowBytes, i);
    HBaseTestCase.addContent(loader, Bytes.toString(fam1), Bytes.toString(col2), firstRowBytes,
      thirdRowBytes, i);
    HBaseTestCase.addContent(loader, Bytes.toString(fam2), Bytes.toString(col1), firstRowBytes,
      thirdRowBytes, i);
    HBaseTestCase.addContent(loader, Bytes.toString(fam2), Bytes.toString(col2), firstRowBytes,
      thirdRowBytes, i);
    r.flushcache();
  }

  Result result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100));
  assertEquals(compactionThreshold, result.size());
  result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100));
  assertEquals(compactionThreshold, result.size());

  // Now add deletes to memstore and then flush it.  That will put us over
  // the compaction threshold of 3 store files.  Compacting these store files
  // should result in a compacted store file that has no references to the
  // deleted row.
  r.delete(delete);

  // Make sure that we have only deleted family2 from secondRowBytes
  result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100));
  assertEquals(expectedResultsAfterDelete, result.size());
  // but we still have firstrow
  result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100));
  assertEquals(compactionThreshold, result.size());

  r.flushcache();
  // should not change anything.
  // Let us check again

  // Make sure that we have only deleted family2 from secondRowBytes
  result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100));
  assertEquals(expectedResultsAfterDelete, result.size());
  // but we still have firstrow
  result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100));
  assertEquals(compactionThreshold, result.size());

  // do a compaction
  Store store2 = this.r.stores.get(fam2);
  int numFiles1 = store2.getStorefiles().size();
  assertTrue("Was expecting to see 4 store files", numFiles1 > compactionThreshold); // > 3
  ((HStore)store2).compactRecentForTestingAssumingDefaultPolicy(compactionThreshold);   // = 3
  int numFiles2 = store2.getStorefiles().size();
  // Check that we did compact
  assertTrue("Number of store files should go down", numFiles1 > numFiles2);
  // Check that it was a minor compaction.
  assertTrue("Was not supposed to be a major compaction", numFiles2 > 1);

  // Make sure that we have only deleted family2 from secondRowBytes
  result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100));
  assertEquals(expectedResultsAfterDelete, result.size());
  // but we still have firstrow
  result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100));
  assertEquals(compactionThreshold, result.size());
}
项目:pbase    文件:TestHRegion.java   
@Test
public void testMerge() throws IOException {
  byte[][] families = { fam1, fam2, fam3 };
  Configuration hc = initSplit();
  // Setting up region
  String method = this.getName();
  this.region = initHRegion(tableName, method, hc, families);
  try {
    LOG.info("" + HBaseTestCase.addContent(region, fam3));
    region.flushcache();
    region.compactStores();
    byte[] splitRow = region.checkSplit();
    assertNotNull(splitRow);
    LOG.info("SplitRow: " + Bytes.toString(splitRow));
    HRegion[] subregions = splitRegion(region, splitRow);
    try {
      // Need to open the regions.
      for (int i = 0; i < subregions.length; i++) {
        HRegion.openHRegion(subregions[i], null);
        subregions[i].compactStores();
      }
      Path oldRegionPath = region.getRegionFileSystem().getRegionDir();
      Path oldRegion1 = subregions[0].getRegionFileSystem().getRegionDir();
      Path oldRegion2 = subregions[1].getRegionFileSystem().getRegionDir();
      long startTime = System.currentTimeMillis();
      region = HRegion.mergeAdjacent(subregions[0], subregions[1]);
      LOG.info("Merge regions elapsed time: "
          + ((System.currentTimeMillis() - startTime) / 1000.0));
      FILESYSTEM.delete(oldRegion1, true);
      FILESYSTEM.delete(oldRegion2, true);
      FILESYSTEM.delete(oldRegionPath, true);
      LOG.info("splitAndMerge completed.");
    } finally {
      for (int i = 0; i < subregions.length; i++) {
        try {
          HRegion.closeHRegion(subregions[i]);
        } catch (IOException e) {
          // Ignore.
        }
      }
    }
  } finally {
    HRegion.closeHRegion(this.region);
    this.region = null;
  }
}
项目:pbase    文件:TestScanner.java   
/**
 * Test basic stop row filter works.
 * @throws Exception
 */
@Test
public void testStopRow() throws Exception {
  byte [] startrow = Bytes.toBytes("bbb");
  byte [] stoprow = Bytes.toBytes("ccc");
  try {
    this.r = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
    HBaseTestCase.addContent(this.r, HConstants.CATALOG_FAMILY);
    List<Cell> results = new ArrayList<Cell>();
    // Do simple test of getting one row only first.
    Scan scan = new Scan(Bytes.toBytes("abc"), Bytes.toBytes("abd"));
    scan.addFamily(HConstants.CATALOG_FAMILY);

    InternalScanner s = r.getScanner(scan);
    int count = 0;
    while (s.next(results)) {
      count++;
    }
    s.close();
    assertEquals(0, count);
    // Now do something a bit more imvolved.
    scan = new Scan(startrow, stoprow);
    scan.addFamily(HConstants.CATALOG_FAMILY);

    s = r.getScanner(scan);
    count = 0;
    Cell kv = null;
    results = new ArrayList<Cell>();
    for (boolean first = true; s.next(results);) {
      kv = results.get(0);
      if (first) {
        assertTrue(CellUtil.matchingRow(kv,  startrow));
        first = false;
      }
      count++;
    }
    assertTrue(Bytes.BYTES_COMPARATOR.compare(stoprow, CellUtil.cloneRow(kv)) > 0);
    // We got something back.
    assertTrue(count > 10);
    s.close();
  } finally {
    HRegion.closeHRegion(this.r);
  }
}
项目:pbase    文件:TestScanner.java   
/**
 * Make sure scanner returns correct result when we run a major compaction
 * with deletes.
 *
 * @throws Exception
 */
@Test
@SuppressWarnings("deprecation")
public void testScanAndConcurrentMajorCompact() throws Exception {
  HTableDescriptor htd = TEST_UTIL.createTableDescriptor(name.getMethodName());
  this.r = TEST_UTIL.createLocalHRegion(htd, null, null);
  HRegionIncommon hri = new HRegionIncommon(r);

  try {
    HBaseTestCase.addContent(hri, Bytes.toString(fam1), Bytes.toString(col1),
        firstRowBytes, secondRowBytes);
    HBaseTestCase.addContent(hri, Bytes.toString(fam2), Bytes.toString(col1),
        firstRowBytes, secondRowBytes);

    Delete dc = new Delete(firstRowBytes);
    /* delete column1 of firstRow */
    dc.deleteColumns(fam1, col1);
    r.delete(dc);
    r.flushcache();

    HBaseTestCase.addContent(hri, Bytes.toString(fam1), Bytes.toString(col1),
        secondRowBytes, thirdRowBytes);
    HBaseTestCase.addContent(hri, Bytes.toString(fam2), Bytes.toString(col1),
        secondRowBytes, thirdRowBytes);
    r.flushcache();

    InternalScanner s = r.getScanner(new Scan());
    // run a major compact, column1 of firstRow will be cleaned.
    r.compactStores(true);

    List<Cell> results = new ArrayList<Cell>();
    s.next(results);

    // make sure returns column2 of firstRow
    assertTrue("result is not correct, keyValues : " + results,
        results.size() == 1);
    assertTrue(CellUtil.matchingRow(results.get(0), firstRowBytes)); 
    assertTrue(CellUtil.matchingFamily(results.get(0), fam2));

    results = new ArrayList<Cell>();
    s.next(results);

    // get secondRow
    assertTrue(results.size() == 2);
    assertTrue(CellUtil.matchingRow(results.get(0), secondRowBytes));
    assertTrue(CellUtil.matchingFamily(results.get(0), fam1));
    assertTrue(CellUtil.matchingFamily(results.get(1), fam2));
  } finally {
    HRegion.closeHRegion(this.r);
  }
}
项目:pbase    文件:TestMajorCompaction.java   
private void createStoreFile(final HRegion region, String family) throws IOException {
  HRegionIncommon loader = new HRegionIncommon(region);
  HBaseTestCase.addContent(loader, family);
  loader.flushcache();
}
项目:pbase    文件:TestMajorCompaction.java   
private void createSmallerStoreFile(final HRegion region) throws IOException {
  HRegionIncommon loader = new HRegionIncommon(region);
  HBaseTestCase.addContent(loader, Bytes.toString(COLUMN_FAMILY), ("" +
      "bbb").getBytes(), null);
  loader.flushcache();
}
项目:HIndex    文件:TestCoprocessorInterface.java   
@Test
public void testSharedData() throws IOException {
  TableName tableName = TableName.valueOf(name.getMethodName());
  byte [][] families = { fam1, fam2, fam3 };

  Configuration hc = initSplit();
  HRegion region = initHRegion(tableName, name.getMethodName(), hc,
    new Class<?>[]{}, families);

  for (int i = 0; i < 3; i++) {
    HBaseTestCase.addContent(region, fam3);
    region.flushcache();
  }

  region.compactStores();

  byte [] splitRow = region.checkSplit();

  assertNotNull(splitRow);
  HRegion [] regions = split(region, splitRow);
  for (int i = 0; i < regions.length; i++) {
    regions[i] = reopenRegion(regions[i], CoprocessorImpl.class, CoprocessorII.class);
  }
  Coprocessor c = regions[0].getCoprocessorHost().
      findCoprocessor(CoprocessorImpl.class.getName());
  Coprocessor c2 = regions[0].getCoprocessorHost().
      findCoprocessor(CoprocessorII.class.getName());
  Object o = ((CoprocessorImpl)c).getSharedData().get("test1");
  Object o2 = ((CoprocessorII)c2).getSharedData().get("test2");
  assertNotNull(o);
  assertNotNull(o2);
  // to coprocessors get different sharedDatas
  assertFalse(((CoprocessorImpl)c).getSharedData() == ((CoprocessorII)c2).getSharedData());
  for (int i = 1; i < regions.length; i++) {
    c = regions[i].getCoprocessorHost().
        findCoprocessor(CoprocessorImpl.class.getName());
    c2 = regions[i].getCoprocessorHost().
        findCoprocessor(CoprocessorII.class.getName());
    // make sure that all coprocessor of a class have identical sharedDatas
    assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o);
    assertTrue(((CoprocessorII)c2).getSharedData().get("test2") == o2);
  }
  // now have all Environments fail
  for (int i = 0; i < regions.length; i++) {
    try {
      byte [] r = regions[i].getStartKey();
      if (r == null || r.length <= 0) {
        // Its the start row.  Can't ask for null.  Ask for minimal key instead.
        r = new byte [] {0};
      }
      Get g = new Get(r);
      regions[i].get(g);
      fail();
    } catch (org.apache.hadoop.hbase.DoNotRetryIOException xc) {
    }
    assertNull(regions[i].getCoprocessorHost().
        findCoprocessor(CoprocessorII.class.getName()));
  }
  c = regions[0].getCoprocessorHost().
      findCoprocessor(CoprocessorImpl.class.getName());
  assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o);
  c = c2 = null;
  // perform a GC
  System.gc();
  // reopen the region
  region = reopenRegion(regions[0], CoprocessorImpl.class, CoprocessorII.class);
  c = region.getCoprocessorHost().
      findCoprocessor(CoprocessorImpl.class.getName());
  // CPimpl is unaffected, still the same reference
  assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o);
  c2 = region.getCoprocessorHost().
      findCoprocessor(CoprocessorII.class.getName());
  // new map and object created, hence the reference is different
  // hence the old entry was indeed removed by the GC and new one has been created
  Object o3 = ((CoprocessorII)c2).getSharedData().get("test2");
  assertFalse(o3 == o2);
}
项目:HIndex    文件:TestCoprocessorInterface.java   
@Test
public void testCoprocessorInterface() throws IOException {
  TableName tableName = TableName.valueOf(name.getMethodName());
  byte [][] families = { fam1, fam2, fam3 };

  Configuration hc = initSplit();
  HRegion region = initHRegion(tableName, name.getMethodName(), hc,
    new Class<?>[]{CoprocessorImpl.class}, families);
  for (int i = 0; i < 3; i++) {
    HBaseTestCase.addContent(region, fam3);
    region.flushcache();
  }

  region.compactStores();

  byte [] splitRow = region.checkSplit();

  assertNotNull(splitRow);
  HRegion [] regions = split(region, splitRow);
  for (int i = 0; i < regions.length; i++) {
    regions[i] = reopenRegion(regions[i], CoprocessorImpl.class);
  }
  HRegion.closeHRegion(region);
  Coprocessor c = region.getCoprocessorHost().
    findCoprocessor(CoprocessorImpl.class.getName());

  // HBASE-4197
  Scan s = new Scan();
  RegionScanner scanner = regions[0].getCoprocessorHost().postScannerOpen(s, regions[0].getScanner(s));
  assertTrue(scanner instanceof CustomScanner);
  // this would throw an exception before HBASE-4197
  scanner.next(new ArrayList<Cell>());

  assertTrue("Coprocessor not started", ((CoprocessorImpl)c).wasStarted());
  assertTrue("Coprocessor not stopped", ((CoprocessorImpl)c).wasStopped());
  assertTrue(((CoprocessorImpl)c).wasOpened());
  assertTrue(((CoprocessorImpl)c).wasClosed());
  assertTrue(((CoprocessorImpl)c).wasFlushed());
  assertTrue(((CoprocessorImpl)c).wasCompacted());
  assertTrue(((CoprocessorImpl)c).wasSplit());

  for (int i = 0; i < regions.length; i++) {
    HRegion.closeHRegion(regions[i]);
    c = region.getCoprocessorHost()
          .findCoprocessor(CoprocessorImpl.class.getName());
    assertTrue("Coprocessor not started", ((CoprocessorImpl)c).wasStarted());
    assertTrue("Coprocessor not stopped", ((CoprocessorImpl)c).wasStopped());
    assertTrue(((CoprocessorImpl)c).wasOpened());
    assertTrue(((CoprocessorImpl)c).wasClosed());
    assertTrue(((CoprocessorImpl)c).wasCompacted());
  }
}