Java 类org.apache.hadoop.hbase.regionserver.HRegion 实例源码

项目:ditb    文件:HMerge.java   
@Override
protected void updateMeta(final byte [] oldRegion1,
    final byte [] oldRegion2,
  HRegion newRegion)
throws IOException {
  byte[][] regionsToDelete = {oldRegion1, oldRegion2};
  for (int r = 0; r < regionsToDelete.length; r++) {
    if(Bytes.equals(regionsToDelete[r], latestRegion.getRegionName())) {
      latestRegion = null;
    }
    Delete delete = new Delete(regionsToDelete[r]);
    table.delete(delete);
    if(LOG.isDebugEnabled()) {
      LOG.debug("updated columns in row: " + Bytes.toStringBinary(regionsToDelete[r]));
    }
  }
  newRegion.getRegionInfo().setOffline(true);

  MetaTableAccessor.addRegionToMeta(table, newRegion.getRegionInfo());

  if(LOG.isDebugEnabled()) {
    LOG.debug("updated columns in row: "
        + Bytes.toStringBinary(newRegion.getRegionInfo().getRegionName()));
  }
}
项目:ditb    文件:ZKSplitTransactionCoordination.java   
/**
 * Creates a new ephemeral node in the PENDING_SPLIT state for the specified region. Create it
 * ephemeral in case regionserver dies mid-split.
 * <p>
 * Does not transition nodes from other states. If a node already exists for this region, an
 * Exception will be thrown.
 * @param parent region to be created as offline
 * @param serverName server event originates from
 * @param hri_a daughter region
 * @param hri_b daughter region
 * @throws IOException
 */

@Override
public void startSplitTransaction(HRegion parent, ServerName serverName, HRegionInfo hri_a,
    HRegionInfo hri_b) throws IOException {

  HRegionInfo region = parent.getRegionInfo();
  try {

    LOG.debug(watcher.prefix("Creating ephemeral node for " + region.getEncodedName()
        + " in PENDING_SPLIT state"));
    byte[] payload = HRegionInfo.toDelimitedByteArray(hri_a, hri_b);
    RegionTransition rt =
        RegionTransition.createRegionTransition(RS_ZK_REQUEST_REGION_SPLIT,
          region.getRegionName(), serverName, payload);
    String node = ZKAssign.getNodeName(watcher, region.getEncodedName());
    if (!ZKUtil.createEphemeralNodeAndWatch(watcher, node, rt.toByteArray())) {
      throw new IOException("Failed create of ephemeral " + node);
    }

  } catch (KeeperException e) {
    throw new IOException("Failed creating PENDING_SPLIT znode on "
        + parent.getRegionInfo().getRegionNameAsString(), e);
  }

}
项目:ditb    文件:TestCoprocessorInterface.java   
Region initHRegion (TableName tableName, String callingMethod,
    Configuration conf, Class<?> [] implClasses, byte [][] families)
    throws IOException {
  HTableDescriptor htd = new HTableDescriptor(tableName);
  for(byte [] family : families) {
    htd.addFamily(new HColumnDescriptor(family));
  }
  HRegionInfo info = new HRegionInfo(tableName, null, null, false);
  Path path = new Path(DIR + callingMethod);
  HRegion r = HRegion.createHRegion(info, path, conf, htd);

  // this following piece is a hack.
  RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
  r.setCoprocessorHost(host);

  for (Class<?> implClass : implClasses) {
    host.load(implClass, Coprocessor.PRIORITY_USER, conf);
    Coprocessor c = host.findCoprocessor(implClass.getName());
    assertNotNull(c);
  }

  // Here we have to call pre and postOpen explicitly.
  host.preOpen();
  host.postOpen();
  return r;
}
项目:ditb    文件:TestCoprocessorConfiguration.java   
@Test
public void testRegionCoprocessorHostTableLoadingDisabled() throws Exception {
  Configuration conf = new Configuration(CONF);
  conf.setBoolean(CoprocessorHost.COPROCESSORS_ENABLED_CONF_KEY, true); // if defaults change
  conf.setBoolean(CoprocessorHost.USER_COPROCESSORS_ENABLED_CONF_KEY, false);
  HRegion region = mock(HRegion.class);
  when(region.getRegionInfo()).thenReturn(REGIONINFO);
  when(region.getTableDesc()).thenReturn(TABLEDESC);
  RegionServerServices rsServices = mock(RegionServerServices.class);
  systemCoprocessorLoaded.set(false);
  tableCoprocessorLoaded.set(false);
  new RegionCoprocessorHost(region, rsServices, conf);
  assertTrue("System coprocessors should have been loaded",
    systemCoprocessorLoaded.get());
  assertFalse("Table coprocessors should not have been loaded",
    tableCoprocessorLoaded.get());
}
项目:ditb    文件:RegionLocationFinder.java   
/**
 * Returns an ordered list of hosts that are hosting the blocks for this
 * region. The weight of each host is the sum of the block lengths of all
 * files on that host, so the first host in the list is the server which holds
 * the most bytes of the given region's HFiles.
 *
 * @param region region
 * @return ordered list of hosts holding blocks of the specified region
 */
protected HDFSBlocksDistribution internalGetTopBlockLocation(HRegionInfo region) {
  try {
    HTableDescriptor tableDescriptor = getTableDescriptor(region.getTable());
    if (tableDescriptor != null) {
      HDFSBlocksDistribution blocksDistribution =
          HRegion.computeHDFSBlocksDistribution(getConf(), tableDescriptor, region);
      return blocksDistribution;
    }
  } catch (IOException ioe) {
    LOG.warn("IOException during HDFSBlocksDistribution computation. for " + "region = "
        + region.getEncodedName(), ioe);
  }

  return new HDFSBlocksDistribution();
}
项目:ditb    文件:HBaseFsck.java   
/**
 * This is a special case hole -- when the first region of a table is
 * missing from META, HBase doesn't acknowledge the existance of the
 * table.
 */
@Override
public void handleRegionStartKeyNotEmpty(HbckInfo next) throws IOException {
  errors.reportError(ERROR_CODE.FIRST_REGION_STARTKEY_NOT_EMPTY,
      "First region should start with an empty key.  Creating a new " +
      "region and regioninfo in HDFS to plug the hole.",
      getTableInfo(), next);
  HTableDescriptor htd = getTableInfo().getHTD();
  // from special EMPTY_START_ROW to next region's startKey
  HRegionInfo newRegion = new HRegionInfo(htd.getTableName(),
      HConstants.EMPTY_START_ROW, next.getStartKey());

  // TODO test
  HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
  LOG.info("Table region start key was not empty.  Created new empty region: "
      + newRegion + " " +region);
  fixes++;
}
项目:ditb    文件:HBaseFsck.java   
/**
 * There is a hole in the hdfs regions that violates the table integrity
 * rules.  Create a new empty region that patches the hole.
 */
@Override
public void handleHoleInRegionChain(byte[] holeStartKey, byte[] holeStopKey) throws IOException {
  errors.reportError(
      ERROR_CODE.HOLE_IN_REGION_CHAIN,
      "There is a hole in the region chain between "
          + Bytes.toStringBinary(holeStartKey) + " and "
          + Bytes.toStringBinary(holeStopKey)
          + ".  Creating a new regioninfo and region "
          + "dir in hdfs to plug the hole.");
  HTableDescriptor htd = getTableInfo().getHTD();
  HRegionInfo newRegion = new HRegionInfo(htd.getTableName(), holeStartKey, holeStopKey);
  HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
  LOG.info("Plugged hole by creating new empty region: "+ newRegion + " " +region);
  fixes++;
}
项目:ditb    文件:ModifyRegionUtils.java   
/**
 * Create new set of regions on the specified file-system.
 * @param conf {@link Configuration}
 * @param rootDir Root directory for HBase instance
 * @param tableDir table directory
 * @param hTableDescriptor description of the table
 * @param newRegion {@link HRegionInfo} that describes the region to create
 * @param task {@link RegionFillTask} custom code to populate region after creation
 * @throws IOException
 */
public static HRegionInfo createRegion(final Configuration conf, final Path rootDir,
    final Path tableDir, final HTableDescriptor hTableDescriptor, final HRegionInfo newRegion,
    final RegionFillTask task) throws IOException {
  // 1. Create HRegion
  HRegion region = HRegion.createHRegion(newRegion,
    rootDir, tableDir, conf, hTableDescriptor, null,
    false, true);
  try {
    // 2. Custom user code to interact with the created region
    if (task != null) {
      task.fillRegion(region);
    }
  } finally {
    // 3. Close the new region to flush to disk. Close log file too.
    region.close();
  }
  return region.getRegionInfo();
}
项目:ditb    文件:HFileArchiveUtil.java   
/**
 * Get the archive directory for a given region under the specified table
 * @param tableName the table name. Cannot be null.
 * @param regiondir the path to the region directory. Cannot be null.
 * @return {@link Path} to the directory to archive the given region, or <tt>null</tt> if it
 *         should not be archived
 */
public static Path getRegionArchiveDir(Path rootDir,
                                       TableName tableName,
                                       Path regiondir) {
  // get the archive directory for a table
  Path archiveDir = getTableArchivePath(rootDir, tableName);

  // then add on the region path under the archive
  String encodedRegionName = regiondir.getName();
  return HRegion.getRegionDir(archiveDir, encodedRegionName);
}
项目:ditb    文件:TestCoprocessorInterface.java   
Region reopenRegion(final Region closedRegion, Class<?> ... implClasses)
    throws IOException {
  //HRegionInfo info = new HRegionInfo(tableName, null, null, false);
  Region r = HRegion.openHRegion(closedRegion, null);

  // this following piece is a hack. currently a coprocessorHost
  // is secretly loaded at OpenRegionHandler. we don't really
  // start a region server here, so just manually create cphost
  // and set it to region.
  Configuration conf = TEST_UTIL.getConfiguration();
  RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
  ((HRegion)r).setCoprocessorHost(host);

  for (Class<?> implClass : implClasses) {
    host.load(implClass, Coprocessor.PRIORITY_USER, conf);
  }
  // we need to manually call pre- and postOpen here since the
  // above load() is not the real case for CP loading. A CP is
  // expected to be loaded by default from 1) configuration; or 2)
  // HTableDescriptor. If it's loaded after HRegion initialized,
  // the pre- and postOpen() won't be triggered automatically.
  // Here we have to call pre and postOpen explicitly.
  host.preOpen();
  host.postOpen();
  return r;
}
项目:ditb    文件:ScanPreprocess.java   
public static ConditionTree preprocess(HRegion region, Filter filter, float maxScale) {
  if (filter == null) return null;
  ConditionTree tree = null;
  if (isIndexFilter(region, filter)) {
    System.out.println("preprocess A");
    tree = new ConditionTreeNoneLeafNode(region, (SingleColumnValueFilter) filter, maxScale);
  } else if (filter instanceof FilterList) {
    System.out.println("preprocess B");
    tree = new ConditionTreeNoneLeafNode(region, (FilterList) filter, maxScale);
  }
  if (tree.isPrune()) {
    System.out.println("return null for prune");
    return null;
  } else {
    return tree;
  }
}
项目:ditb    文件:TestAccessController.java   
@Test (timeout=180000)
public void testMergeRegions() throws Exception {
  final TableName tname = TableName.valueOf("testMergeRegions");
  createTestTable(tname);
  try {
    final List<HRegion> regions = TEST_UTIL.getHBaseCluster().findRegionsForTable(tname);
    assertTrue("not enough regions: " + regions.size(), regions.size() >= 2);

    AccessTestAction action = new AccessTestAction() {
      @Override
      public Object run() throws Exception {
        ACCESS_CONTROLLER.preMerge(ObserverContext.createAndPrepare(RSCP_ENV, null),
          regions.get(0), regions.get(1));
        return null;
      }
    };

    verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_OWNER, USER_GROUP_ADMIN);
    verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ,
      USER_GROUP_WRITE, USER_GROUP_CREATE);
  } finally {
    deleteTable(TEST_UTIL, tname);
  }
}
项目:ditb    文件:TestDurability.java   
private HRegion createHRegion (byte [] tableName, String callingMethod,
  WAL log, Durability durability)
throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  htd.setDurability(durability);
  HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
  Path path = new Path(DIR + callingMethod);
  if (FS.exists(path)) {
    if (!FS.delete(path, true)) {
      throw new IOException("Failed delete of " + path);
    }
  }
  return HRegion.createHRegion(info, path, CONF, htd, log);
}
项目:ditb    文件:TestCoprocessorConfiguration.java   
@Test
public void testRegionCoprocessorHostAllDisabled() throws Exception {
  Configuration conf = new Configuration(CONF);
  conf.setBoolean(CoprocessorHost.COPROCESSORS_ENABLED_CONF_KEY, false);
  HRegion region = mock(HRegion.class);
  when(region.getRegionInfo()).thenReturn(REGIONINFO);
  when(region.getTableDesc()).thenReturn(TABLEDESC);
  RegionServerServices rsServices = mock(RegionServerServices.class);
  systemCoprocessorLoaded.set(false);
  tableCoprocessorLoaded.set(false);
  new RegionCoprocessorHost(region, rsServices, conf);
  assertFalse("System coprocessors should not have been loaded",
    systemCoprocessorLoaded.get());
  assertFalse("Table coprocessors should not have been loaded",
    tableCoprocessorLoaded.get());
}
项目:ditb    文件:TestInvocationRecordFilter.java   
@Before
public void setUp() throws Exception {
  HTableDescriptor htd = new HTableDescriptor(
      TableName.valueOf(TABLE_NAME_BYTES));
  htd.addFamily(new HColumnDescriptor(FAMILY_NAME_BYTES));
  HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
  this.region = HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(),
      TEST_UTIL.getConfiguration(), htd);

  Put put = new Put(ROW_BYTES);
  for (int i = 0; i < 10; i += 2) {
    // puts 0, 2, 4, 6 and 8
    put.add(FAMILY_NAME_BYTES, Bytes.toBytes(QUALIFIER_PREFIX + i), i,
        Bytes.toBytes(VALUE_PREFIX + i));
  }
  this.region.put(put);
  this.region.flush(true);
}
项目:ditb    文件:HBaseTestingUtility.java   
public void verifyNumericRows(HRegion region, final byte[] f, int startRow, int endRow,
    final boolean present) throws IOException {
  for (int i = startRow; i < endRow; i++) {
    String failMsg = "Failed verification of row :" + i;
    byte[] data = Bytes.toBytes(String.valueOf(i));
    Result result = region.get(new Get(data));

    boolean hasResult = result != null && !result.isEmpty();
    assertEquals(failMsg + result, present, hasResult);
    if (!present) continue;

    assertTrue(failMsg, result.containsColumn(f, null));
    assertEquals(failMsg, result.getColumnCells(f, null).size(), 1);
    Cell cell = result.getColumnLatestCell(f, null);
    assertTrue(failMsg,
      Bytes.equals(data, 0, data.length, cell.getValueArray(), cell.getValueOffset(),
        cell.getValueLength()));
  }
}
项目:ditb    文件:TestDurability.java   
@Test
public void testIncrementWithReturnResultsSetToFalse() throws Exception {
  byte[] row1 = Bytes.toBytes("row1");
  byte[] col1 = Bytes.toBytes("col1");

  // Setting up region
  final WALFactory wals = new WALFactory(CONF, null, "testIncrementWithReturnResultsSetToFalse");
  byte[] tableName = Bytes.toBytes("testIncrementWithReturnResultsSetToFalse");
  final WAL wal = wals.getWAL(tableName);
  HRegion region = createHRegion(tableName, "increment", wal, Durability.USE_DEFAULT);

  Increment inc1 = new Increment(row1);
  inc1.setReturnResults(false);
  inc1.addColumn(FAMILY, col1, 1);
  Result res = region.increment(inc1);
  assertNull(res);
}
项目:ditb    文件:TestCoprocessorConfiguration.java   
@Test
public void testRegionCoprocessorHostDefaults() throws Exception {
  Configuration conf = new Configuration(CONF);
  HRegion region = mock(HRegion.class);
  when(region.getRegionInfo()).thenReturn(REGIONINFO);
  when(region.getTableDesc()).thenReturn(TABLEDESC);
  RegionServerServices rsServices = mock(RegionServerServices.class);
  systemCoprocessorLoaded.set(false);
  tableCoprocessorLoaded.set(false);
  new RegionCoprocessorHost(region, rsServices, conf);
  assertEquals("System coprocessors loading default was not honored",
    systemCoprocessorLoaded.get(),
    CoprocessorHost.DEFAULT_COPROCESSORS_ENABLED);
  assertEquals("Table coprocessors loading default was not honored",
    tableCoprocessorLoaded.get(), 
    CoprocessorHost.DEFAULT_COPROCESSORS_ENABLED &&
    CoprocessorHost.DEFAULT_USER_COPROCESSORS_ENABLED);
}
项目:ditb    文件:TestRegionObserverStacking.java   
HRegion initHRegion (byte [] tableName, String callingMethod,
    Configuration conf, byte [] ... families) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  for(byte [] family : families) {
    htd.addFamily(new HColumnDescriptor(family));
  }
  HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
  Path path = new Path(DIR + callingMethod);
  HRegion r = HRegion.createHRegion(info, path, conf, htd);
  // this following piece is a hack. currently a coprocessorHost
  // is secretly loaded at OpenRegionHandler. we don't really
  // start a region server here, so just manually create cphost
  // and set it to region.
  RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
  r.setCoprocessorHost(host);
  return r;
}
项目:ditb    文件:HFileArchiveTestingUtil.java   
/**
 * Helper method to get the archive directory for the specified region
 * @param conf {@link Configuration} to check for the name of the archive directory
 * @param region region that is being archived
 * @return {@link Path} to the archive directory for the given region
 */
public static Path getRegionArchiveDir(Configuration conf, HRegion region) throws IOException {
  return HFileArchiveUtil.getRegionArchiveDir(
      FSUtils.getRootDir(conf),
      region.getTableDesc().getTableName(),
      region.getRegionInfo().getEncodedName());
}
项目:ditb    文件:ZkOpenRegionCoordination.java   
/**
 * @param r Region we're working on.
 * @return whether znode is successfully transitioned to OPENED state.
 * @throws java.io.IOException
 */
@Override
public boolean transitionToOpened(final HRegion r, OpenRegionDetails ord) throws IOException {
  ZkOpenRegionDetails zkOrd = (ZkOpenRegionDetails) ord;

  boolean result = false;
  HRegionInfo hri = r.getRegionInfo();
  final String name = hri.getRegionNameAsString();
  // Finally, Transition ZK node to OPENED
  try {
    if (ZKAssign.transitionNodeOpened(watcher, hri,
      zkOrd.getServerName(), zkOrd.getVersion()) == -1) {
      String warnMsg = "Completed the OPEN of region " + name +
        " but when transitioning from " + " OPENING to OPENED ";
      try {
        String node = ZKAssign.getNodeName(watcher, hri.getEncodedName());
        if (ZKUtil.checkExists(watcher, node) < 0) {
          // if the znode
          coordination.getServer().abort(warnMsg + "the znode disappeared", null);
        } else {
          LOG.warn(warnMsg + "got a version mismatch, someone else clashed; " +
            "so now unassigning -- closing region on server: " + zkOrd.getServerName());
        }
      } catch (KeeperException ke) {
        coordination.getServer().abort(warnMsg, ke);
      }
    } else {
      LOG.debug("Transitioned " + r.getRegionInfo().getEncodedName() +
        " to OPENED in zk on " + zkOrd.getServerName());
      result = true;
    }
  } catch (KeeperException e) {
    LOG.error("Failed transitioning node " + name +
      " from OPENING to OPENED -- closing region", e);
  }
  return result;
}
项目:ditb    文件:TestOpenRegionHandler.java   
@Test
public void testTransitionToFailedOpenEvenIfCleanupFails() throws Exception {
  Server server = new MockServer(HTU);
  RegionServerServices rsServices = HTU.createMockRegionServerService();
  // Create it OFFLINE, which is what it expects
  ZKAssign.createNodeOffline(server.getZooKeeper(), TEST_HRI, server.getServerName());
  // Create the handler
  ZkCoordinatedStateManager csm = new ZkCoordinatedStateManager();
  csm.initialize(server);
  csm.start();

  ZkOpenRegionCoordination.ZkOpenRegionDetails zkCrd =
    new ZkOpenRegionCoordination.ZkOpenRegionDetails();
  zkCrd.setServerName(server.getServerName());

  OpenRegionHandler handler = new OpenRegionHandler(server, rsServices, TEST_HRI, TEST_HTD,
    -1, csm.getOpenRegionCoordination(), zkCrd) {
    @Override
    boolean updateMeta(HRegion r, long masterSystemTime) {
      return false;
    };

    @Override
    void cleanupFailedOpen(HRegion region) throws IOException {
      throw new IOException("FileSystem got closed.");
    }
  };
  rsServices.getRegionsInTransitionInRS().put(TEST_HRI.getEncodedNameAsBytes(), Boolean.TRUE);
  try {
    handler.process();
  } catch (Exception e) {
    // Ignore the IOException that we have thrown from cleanupFailedOpen
  }
  RegionTransition rt = RegionTransition.parseFrom(ZKAssign.getData(server.getZooKeeper(),
      TEST_HRI.getEncodedName()));
  assertEquals(EventType.RS_ZK_REGION_FAILED_OPEN, rt.getEventType());
}
项目:ditb    文件:TestMergeTool.java   
@Override
public void tearDown() throws Exception {
  super.tearDown();
  for (int i = 0; i < sourceRegions.length; i++) {
    HRegion r = regions[i];
    if (r != null) {
      HRegion.closeHRegion(r);
    }
  }
  wals.close();
  TEST_UTIL.shutdownMiniCluster();
}
项目:ditb    文件:SnapshotManifest.java   
/**
 * Creates a 'manifest' for the specified region, by reading directly from the HRegion object.
 * This is used by the "online snapshot" when the table is enabled.
 */
public void addRegion(final HRegion region) throws IOException {
  // 0. Get the ManifestBuilder/RegionVisitor
  RegionVisitor visitor = createRegionVisitor(desc);

  // 1. dump region meta info into the snapshot directory
  LOG.debug("Storing '" + region + "' region-info for snapshot.");
  Object regionData = visitor.regionOpen(region.getRegionInfo());
  monitor.rethrowException();

  // 2. iterate through all the stores in the region
  LOG.debug("Creating references for hfiles");

  for (Store store : region.getStores()) {
    // 2.1. build the snapshot reference for the store
    Object familyData = visitor.familyOpen(regionData, store.getFamily().getName());
    monitor.rethrowException();

    List<StoreFile> storeFiles = new ArrayList<StoreFile>(store.getStorefiles());
    if (LOG.isDebugEnabled()) {
      LOG.debug("Adding snapshot references for " + storeFiles  + " hfiles");
    }

    // 2.2. iterate through all the store's files and create "references".
    for (int i = 0, sz = storeFiles.size(); i < sz; i++) {
      StoreFile storeFile = storeFiles.get(i);
      monitor.rethrowException();

      // create "reference" to this store file.
      LOG.debug("Adding reference for file (" + (i+1) + "/" + sz + "): " + storeFile.getPath());
      visitor.storeFile(regionData, familyData, storeFile.getFileInfo());
    }
    visitor.familyClose(regionData, familyData);
  }
  visitor.regionClose(regionData);
}
项目:ditb    文件:RestoreSnapshotHelper.java   
/**
 * Clone region directory content from the snapshot info.
 *
 * Each region is encoded with the table name, so the cloned region will have
 * a different region name.
 *
 * Instead of copying the hfiles a HFileLink is created.
 *
 * @param region {@link HRegion} cloned
 * @param snapshotRegionInfo
 */
private void cloneRegion(final HRegion region, final HRegionInfo snapshotRegionInfo,
    final SnapshotRegionManifest manifest) throws IOException {
  final Path regionDir = new Path(tableDir, region.getRegionInfo().getEncodedName());
  final String tableName = tableDesc.getTableName().getNameAsString();
  for (SnapshotRegionManifest.FamilyFiles familyFiles: manifest.getFamilyFilesList()) {
    Path familyDir = new Path(regionDir, familyFiles.getFamilyName().toStringUtf8());
    for (SnapshotRegionManifest.StoreFile storeFile: familyFiles.getStoreFilesList()) {
      LOG.info("Adding HFileLink " + storeFile.getName() + " to table=" + tableName);
      restoreStoreFile(familyDir, snapshotRegionInfo, storeFile, createBackRefs);
    }
  }
}
项目:ditb    文件:TestFuzzyRowFilterEndToEnd.java   
private void runScanner(Table hTable, int expectedSize, Filter filter) throws IOException {

    String cf = "f";
    Scan scan = new Scan();
    scan.addFamily(cf.getBytes());
    scan.setFilter(filter);
    List<HRegion> regions = TEST_UTIL.getHBaseCluster().getRegions(table.getBytes());
    HRegion first = regions.get(0);
    first.getScanner(scan);
    RegionScanner scanner = first.getScanner(scan);
    List<Cell> results = new ArrayList<Cell>();
    // Result result;
    long timeBeforeScan = System.currentTimeMillis();
    int found = 0;
    while (scanner.next(results)) {
      found += results.size();
      results.clear();
    }
    found += results.size();
    long scanTime = System.currentTimeMillis() - timeBeforeScan;
    scanner.close();

    LOG.info("\nscan time = " + scanTime + "ms");
    LOG.info("found " + found + " results\n");

    assertEquals(expectedSize, found);
  }
项目:ditb    文件:Merge.java   
private HRegion merge(final HTableDescriptor htd, HRegion meta,
                      HRegionInfo info1, HRegionInfo info2)
throws IOException {
  if (info1 == null) {
    throw new IOException("Could not find " + Bytes.toStringBinary(region1) + " in " +
        Bytes.toStringBinary(meta.getRegionInfo().getRegionName()));
  }
  if (info2 == null) {
    throw new IOException("Could not find " + Bytes.toStringBinary(region2) + " in " +
        Bytes.toStringBinary(meta.getRegionInfo().getRegionName()));
  }
  HRegion merged = null;
  HRegion r1 = HRegion.openHRegion(info1, htd, utils.getLog(info1), getConf());
  try {
    HRegion r2 = HRegion.openHRegion(info2, htd, utils.getLog(info2), getConf());
    try {
      merged = HRegion.merge(r1, r2);
    } finally {
      if (!r2.isClosed()) {
        r2.close();
      }
    }
  } finally {
    if (!r1.isClosed()) {
      r1.close();
    }
  }

  // Remove the old regions from meta.
  // HRegion.merge has already deleted their files

  removeRegionFromMeta(meta, info1);
  removeRegionFromMeta(meta, info2);

  this.mergeInfo = merged.getRegionInfo();
  return merged;
}
项目:ditb    文件:Merge.java   
private void removeRegionFromMeta(HRegion meta, HRegionInfo regioninfo)
throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("Removing region: " + regioninfo + " from " + meta);
  }

  Delete delete  = new Delete(regioninfo.getRegionName(),
      System.currentTimeMillis());
  meta.delete(delete);
}
项目:ditb    文件:TestMergeTool.java   
/**
 * Test merge tool.
 * @throws Exception
 */
public void testMergeTool() throws Exception {
  // First verify we can read the rows from the source regions and that they
  // contain the right data.
  for (int i = 0; i < regions.length; i++) {
    for (int j = 0; j < rows[i].length; j++) {
      Get get = new Get(rows[i][j]);
      get.addFamily(FAMILY);
      Result result = regions[i].get(get);
      byte [] bytes =  CellUtil.cloneValue(result.rawCells()[0]);
      assertNotNull(bytes);
      assertTrue(Bytes.equals(bytes, rows[i][j]));
    }
    // Close the region and delete the log
    HRegion.closeHRegion(regions[i]);
  }
  WAL log = wals.getWAL(new byte[]{});
   // Merge Region 0 and Region 1
  HRegion merged = mergeAndVerify("merging regions 0 and 1 ",
    this.sourceRegions[0].getRegionNameAsString(),
    this.sourceRegions[1].getRegionNameAsString(), log, 2);

  // Merge the result of merging regions 0 and 1 with region 2
  merged = mergeAndVerify("merging regions 0+1 and 2",
    merged.getRegionInfo().getRegionNameAsString(),
    this.sourceRegions[2].getRegionNameAsString(), log, 3);

  // Merge the result of merging regions 0, 1 and 2 with region 3
  merged = mergeAndVerify("merging regions 0+1+2 and 3",
    merged.getRegionInfo().getRegionNameAsString(),
    this.sourceRegions[3].getRegionNameAsString(), log, 4);

  // Merge the result of merging regions 0, 1, 2 and 3 with region 4
  merged = mergeAndVerify("merging regions 0+1+2+3 and 4",
    merged.getRegionInfo().getRegionNameAsString(),
    this.sourceRegions[4].getRegionNameAsString(), log, rows.length);
}
项目:ditb    文件:HBaseFsck.java   
@Override
public void handleRegionEndKeyNotEmpty(byte[] curEndKey) throws IOException {
  errors.reportError(ERROR_CODE.LAST_REGION_ENDKEY_NOT_EMPTY,
      "Last region should end with an empty key.  Creating a new "
          + "region and regioninfo in HDFS to plug the hole.", getTableInfo());
  HTableDescriptor htd = getTableInfo().getHTD();
  // from curEndKey to EMPTY_START_ROW
  HRegionInfo newRegion = new HRegionInfo(htd.getTableName(), curEndKey,
      HConstants.EMPTY_START_ROW);

  HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
  LOG.info("Table region end key was not empty.  Created new empty region: " + newRegion
      + " " + region);
  fixes++;
}
项目:ditb    文件:HBaseFsckRepair.java   
/**
 * Creates, flushes, and closes a new region.
 */
public static HRegion createHDFSRegionDir(Configuration conf,
    HRegionInfo hri, HTableDescriptor htd) throws IOException {
  // Create HRegion
  Path root = FSUtils.getRootDir(conf);
  HRegion region = HRegion.createHRegion(hri, root, conf, htd, null);

  // Close the new region to flush to disk. Close log file too.
  HRegion.closeHRegion(region);
  return region;
}
项目:ditb    文件:CompactionTool.java   
/**
 * Create a "mock" HStore that uses the tmpDir specified by the user and
 * the store dir to compact as source.
 */
private static HStore getStore(final Configuration conf, final FileSystem fs,
    final Path tableDir, final HTableDescriptor htd, final HRegionInfo hri,
    final String familyName, final Path tempDir) throws IOException {
  HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, hri) {
    @Override
    public Path getTempDir() {
      return tempDir;
    }
  };
  HRegion region = new HRegion(regionFs, null, conf, htd, null);
  return new HStore(region, htd.getFamily(Bytes.toBytes(familyName)), conf);
}
项目:ditb    文件:TestOpenRegionHandler.java   
@Test
public void testFailedUpdateMeta() throws Exception {
  Server server = new MockServer(HTU);
  RegionServerServices rsServices = HTU.createMockRegionServerService();

  // Create it OFFLINE, which is what it expects
  ZKAssign.createNodeOffline(server.getZooKeeper(), TEST_HRI, server.getServerName());

  // Create the handler
  ZkCoordinatedStateManager csm = new ZkCoordinatedStateManager();
  csm.initialize(server);
  csm.start();

  ZkOpenRegionCoordination.ZkOpenRegionDetails zkCrd =
    new ZkOpenRegionCoordination.ZkOpenRegionDetails();
  zkCrd.setServerName(server.getServerName());

  OpenRegionHandler handler = new OpenRegionHandler(server, rsServices, TEST_HRI, TEST_HTD,
    -1, csm.getOpenRegionCoordination(), zkCrd) {
      @Override
      boolean updateMeta(final HRegion r, long masterSystemTime) {
        // Fake failure of updating META
        return false;
      }
  };
  rsServices.getRegionsInTransitionInRS().put(
    TEST_HRI.getEncodedNameAsBytes(), Boolean.TRUE);
  handler.process();

  // Handler should have transitioned it to FAILED_OPEN
  RegionTransition rt = RegionTransition.parseFrom(
    ZKAssign.getData(server.getZooKeeper(), TEST_HRI.getEncodedName()));
  assertEquals(EventType.RS_ZK_REGION_FAILED_OPEN, rt.getEventType());
}
项目:ditb    文件:TestMergeTable.java   
protected void setupMeta(Path rootdir, final HRegion [] regions)
throws IOException {
  HRegion meta =
    HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, rootdir,
    UTIL.getConfiguration(), UTIL.getMetaTableDescriptor());
  for (HRegion r: regions) {
    HRegion.addRegionToMETA(meta, r);
  }
  HRegion.closeHRegion(meta);
}
项目:ditb    文件:OpenRegionHandler.java   
void cleanupFailedOpen(final HRegion region) throws IOException {
  if (region != null) {
    byte[] encodedName = regionInfo.getEncodedNameAsBytes();
    try {
      rsServices.getRegionsInTransitionInRS().put(encodedName,Boolean.FALSE);
      this.rsServices.removeFromOnlineRegions(region, null);
      region.close();
    } finally {
      rsServices.getRegionsInTransitionInRS().remove(encodedName);
    }
  }
}
项目:ditb    文件:RegionReplicaFlushHandler.java   
public RegionReplicaFlushHandler(Server server, ClusterConnection connection,
    RpcRetryingCallerFactory rpcRetryingCallerFactory, RpcControllerFactory rpcControllerFactory,
    int operationTimeout, HRegion region) {
  super(server, EventType.RS_REGION_REPLICA_FLUSH);
  this.connection = connection;
  this.rpcRetryingCallerFactory = rpcRetryingCallerFactory;
  this.rpcControllerFactory = rpcControllerFactory;
  this.operationTimeout = operationTimeout;
  this.region = region;
}
项目:ditb    文件:FinishRegionRecoveringHandler.java   
@Override
public void process() throws IOException {
  Region region = this.rss.getRecoveringRegions().remove(regionName);
  if (region != null) {
    ((HRegion)region).setRecovering(false);
    LOG.info(path + " deleted; " + regionName + " recovered.");
  }
}
项目:ditb    文件:ScanPreprocess.java   
public ConditionTreeLeafNode(HRegion region, Range range, float maxScale) {
  this.range = range;
  this.scale = region.getStore(range.getFamily()).getIndexScanScale(range);
  // prune large scale node
  if (this.scale > maxScale) {
    Log.info("LCINFO: A prune=true because (this.scale) " + this.scale + " > " + maxScale
        + "(max scale)");
    this.prune = true;
  }
}
项目:ditb    文件:FlushSnapshotSubprocedure.java   
@Override
public Void call() throws Exception {
  // Taking the region read lock prevents the individual region from being closed while a
  // snapshot is in progress.  This is helpful but not sufficient for preventing races with
  // snapshots that involve multiple regions and regionservers.  It is still possible to have
  // an interleaving such that globally regions are missing, so we still need the verification
  // step.
  LOG.debug("Starting region operation on " + region);
  region.startRegionOperation();
  try {
    if (snapshotSkipFlush) {
    /*
     * This is to take an online-snapshot without force a coordinated flush to prevent pause
     * The snapshot type is defined inside the snapshot description. FlushSnapshotSubprocedure
     * should be renamed to distributedSnapshotSubprocedure, and the flush() behavior can be
     * turned on/off based on the flush type.
     * To minimized the code change, class name is not changed.
     */
      LOG.debug("take snapshot without flush memstore first");
    } else {
      LOG.debug("Flush Snapshotting region " + region.toString() + " started...");
      region.flush(true);
    }
    ((HRegion)region).addRegionToSnapshot(snapshot, monitor);
    if (snapshotSkipFlush) {
      LOG.debug("... SkipFlush Snapshotting region " + region.toString() + " completed.");
    } else {
      LOG.debug("... Flush Snapshotting region " + region.toString() + " completed.");
    }
  } finally {
    LOG.debug("Closing region operation on " + region);
    region.closeRegionOperation();
  }
  return null;
}
项目:ditb    文件:TestMultiSlaveReplication.java   
private void rollWALAndWait(final HBaseTestingUtility utility, final TableName table,
    final byte[] row) throws IOException {
  final Admin admin = utility.getHBaseAdmin();
  final MiniHBaseCluster cluster = utility.getMiniHBaseCluster();

  // find the region that corresponds to the given row.
  HRegion region = null;
  for (HRegion candidate : cluster.getRegions(table)) {
    if (HRegion.rowIsInRange(candidate.getRegionInfo(), row)) {
      region = candidate;
      break;
    }
  }
  assertNotNull("Couldn't find the region for row '" + Arrays.toString(row) + "'", region);

  final CountDownLatch latch = new CountDownLatch(1);

  // listen for successful log rolls
  final WALActionsListener listener = new WALActionsListener.Base() {
        @Override
        public void postLogRoll(final Path oldPath, final Path newPath) throws IOException {
          latch.countDown();
        }
      };
  region.getWAL().registerWALActionsListener(listener);

  // request a roll
  admin.rollWALWriter(cluster.getServerHoldingRegion(region.getTableDesc().getTableName(),
    region.getRegionInfo().getRegionName()));

  // wait
  try {
    latch.await();
  } catch (InterruptedException exception) {
    LOG.warn("Interrupted while waiting for the wal of '" + region + "' to roll. If later " +
        "replication tests fail, it's probably because we should still be waiting.");
    Thread.currentThread().interrupt();
  }
  region.getWAL().unregisterWALActionsListener(listener);
}