Java 类org.apache.hadoop.hbase.util.ModifyRegionUtils 实例源码

项目:ditb    文件:CreateTableProcedure.java   
protected static void assignRegions(final MasterProcedureEnv env,
    final TableName tableName, final List<HRegionInfo> regions)
    throws HBaseException, IOException {
  ProcedureSyncWait.waitRegionServers(env);

  final AssignmentManager assignmentManager = env.getMasterServices().getAssignmentManager();

  // Mark the table as Enabling
  assignmentManager.getTableStateManager().setTableState(tableName,
      ZooKeeperProtos.Table.State.ENABLING);

  // Trigger immediate assignment of the regions in round-robin fashion
  ModifyRegionUtils.assignRegions(assignmentManager, regions);

  // Enable table
  assignmentManager.getTableStateManager()
    .setTableState(tableName, ZooKeeperProtos.Table.State.ENABLED);
}
项目:ditb    文件:TestCreateTableProcedure.java   
@Test(timeout=60000, expected=TableExistsException.class)
public void testCreateExisting() throws Exception {
  final TableName tableName = TableName.valueOf("testCreateExisting");
  final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
  final HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f");
  final HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, null);

  // create the table
  long procId1 = procExec.submitProcedure(
    new CreateTableProcedure(procExec.getEnvironment(), htd, regions), nonceGroup, nonce);

  // create another with the same name
  ProcedurePrepareLatch latch2 = new ProcedurePrepareLatch.CompatibilityLatch();
  long procId2 = procExec.submitProcedure(
    new CreateTableProcedure(procExec.getEnvironment(), htd, regions, latch2),
    nonceGroup + 1,
    nonce + 1);

  ProcedureTestingUtility.waitProcedure(procExec, procId1);
  ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId1));

  ProcedureTestingUtility.waitProcedure(procExec, procId2);
  latch2.await();
}
项目:ditb    文件:TestCreateTableProcedure.java   
@Test(timeout=60000)
public void testCreateTwiceWithSameNonce() throws Exception {
  final TableName tableName = TableName.valueOf("testCreateTwiceWithSameNonce");
  final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
  final HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f");
  final HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, null);

  // create the table
  long procId1 = procExec.submitProcedure(
    new CreateTableProcedure(procExec.getEnvironment(), htd, regions), nonceGroup, nonce);

  // create another with the same name
  long procId2 = procExec.submitProcedure(
    new CreateTableProcedure(procExec.getEnvironment(), htd, regions), nonceGroup, nonce);

  ProcedureTestingUtility.waitProcedure(procExec, procId1);
  ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId1));

  ProcedureTestingUtility.waitProcedure(procExec, procId2);
  ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId2));
  assertTrue(procId1 == procId2);
}
项目:ditb    文件:TestCreateTableProcedure.java   
@Test(timeout=60000)
public void testRecoveryAndDoubleExecution() throws Exception {
  final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecution");

  // create the table
  final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
  ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);

  // Start the Create procedure && kill the executor
  byte[][] splitKeys = null;
  HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2");
  HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, splitKeys);
  long procId = procExec.submitProcedure(
    new CreateTableProcedure(procExec.getEnvironment(), htd, regions), nonceGroup, nonce);

  // Restart the executor and execute the step twice
  // NOTE: the 6 (number of CreateTableState steps) is hardcoded,
  //       so you have to look at this test at least once when you add a new step.
  MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(
    procExec, procId, 6, CreateTableState.values());

  MasterProcedureTestingUtility.validateTableCreation(
    UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2");
}
项目:hbase    文件:HMaster.java   
@Override
public long createSystemTable(final TableDescriptor tableDescriptor) throws IOException {
  if (isStopped()) {
    throw new MasterNotRunningException();
  }

  TableName tableName = tableDescriptor.getTableName();
  if (!(tableName.isSystemTable())) {
    throw new IllegalArgumentException(
      "Only system table creation can use this createSystemTable API");
  }

  RegionInfo[] newRegions = ModifyRegionUtils.createRegionInfos(tableDescriptor, null);

  LOG.info(getClientIdAuditPrefix() + " create " + tableDescriptor);

  // This special create table is called locally to master.  Therefore, no RPC means no need
  // to use nonce to detect duplicated RPC call.
  long procId = this.procedureExecutor.submitProcedure(
    new CreateTableProcedure(procedureExecutor.getEnvironment(), tableDescriptor, newRegions));

  return procId;
}
项目:hbase    文件:TestMasterFailoverWithProcedures.java   
private void testCreateWithFailoverAtStep(final int step) throws Exception {
  final TableName tableName = TableName.valueOf("testCreateWithFailoverAtStep" + step);

  // create the table
  ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
  ProcedureTestingUtility.setKillBeforeStoreUpdate(procExec, true);
  ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExec, true);

  // Start the Create procedure && kill the executor
  byte[][] splitKeys = null;
  TableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2");
  RegionInfo[] regions = ModifyRegionUtils.createRegionInfos(htd, splitKeys);
  long procId = procExec.submitProcedure(
      new CreateTableProcedure(procExec.getEnvironment(), htd, regions));
  testRecoveryAndDoubleExecution(UTIL, procId, step);

  MasterProcedureTestingUtility.validateTableCreation(
      UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2");
}
项目:hbase    文件:TestCreateTableProcedure.java   
@Test
public void testCreateWithoutColumnFamily() throws Exception {
  final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
  final TableName tableName = TableName.valueOf(name.getMethodName());
  // create table with 0 families will fail
  final TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(MasterProcedureTestingUtility.createHTD(tableName));

  // disable sanity check
  builder.setValue("hbase.table.sanity.checks", Boolean.FALSE.toString());
  TableDescriptor htd = builder.build();
  final RegionInfo[] regions = ModifyRegionUtils.createRegionInfos(htd, null);

  long procId =
      ProcedureTestingUtility.submitAndWait(procExec,
          new CreateTableProcedure(procExec.getEnvironment(), htd, regions));
  final Procedure<?> result = procExec.getResult(procId);
  assertEquals(true, result.isFailed());
  Throwable cause = ProcedureTestingUtility.getExceptionCause(result);
  assertTrue("expected DoNotRetryIOException, got " + cause,
      cause instanceof DoNotRetryIOException);
}
项目:hbase    文件:TestCreateTableProcedure.java   
@Test(expected=TableExistsException.class)
public void testCreateExisting() throws Exception {
  final TableName tableName = TableName.valueOf(name.getMethodName());
  final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
  final TableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f");
  final RegionInfo[] regions = ModifyRegionUtils.createRegionInfos(htd, null);

  // create the table
  long procId1 = procExec.submitProcedure(
    new CreateTableProcedure(procExec.getEnvironment(), htd, regions));

  // create another with the same name
  ProcedurePrepareLatch latch2 = new ProcedurePrepareLatch.CompatibilityLatch();
  long procId2 = procExec.submitProcedure(
    new CreateTableProcedure(procExec.getEnvironment(), htd, regions, latch2));

  ProcedureTestingUtility.waitProcedure(procExec, procId1);
  ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId1));

  ProcedureTestingUtility.waitProcedure(procExec, procId2);
  latch2.await();
}
项目:hbase    文件:TestCreateTableProcedure.java   
@Test
public void testRecoveryAndDoubleExecution() throws Exception {
  final TableName tableName = TableName.valueOf(name.getMethodName());

  // create the table
  final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
  ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);

  // Start the Create procedure && kill the executor
  byte[][] splitKeys = null;
  TableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2");
  RegionInfo[] regions = ModifyRegionUtils.createRegionInfos(htd, splitKeys);
  long procId = procExec.submitProcedure(
    new CreateTableProcedure(procExec.getEnvironment(), htd, regions));

  // Restart the executor and execute the step twice
  MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId);
  MasterProcedureTestingUtility.validateTableCreation(getMaster(), tableName, regions, F1, F2);
}
项目:hbase    文件:TestCreateTableProcedure.java   
private void testRollbackAndDoubleExecution(TableDescriptorBuilder builder) throws Exception {
  // create the table
  final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
  ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);

  // Start the Create procedure && kill the executor
  final byte[][] splitKeys = new byte[][] {
    Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
  };
  builder.setRegionReplication(3);
  TableDescriptor htd = builder.build();
  RegionInfo[] regions = ModifyRegionUtils.createRegionInfos(htd, splitKeys);
  long procId = procExec.submitProcedure(
    new CreateTableProcedure(procExec.getEnvironment(), htd, regions));

  int numberOfSteps = 0; // failing at pre operation
  MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, numberOfSteps);

  TableName tableName = htd.getTableName();
  MasterProcedureTestingUtility.validateTableDeletion(getMaster(), tableName);

  // are we able to create the table after a rollback?
  resetProcExecutorTestingKillFlag();
  testSimpleCreate(tableName, splitKeys);
}
项目:ditb    文件:CreateTableHandler.java   
/**
 * Create the on-disk structure for the table, and returns the regions info.
 * @param tableRootDir directory where the table is being created
 * @param tableName name of the table under construction
 * @return the list of regions created
 */
protected List<HRegionInfo> handleCreateHdfsRegions(final Path tableRootDir,
  final TableName tableName)
    throws IOException {
  return ModifyRegionUtils.createRegions(conf, tableRootDir,
      hTableDescriptor, newRegions, null);
}
项目:ditb    文件:HMaster.java   
@Override
public long createTable(
    final HTableDescriptor hTableDescriptor,
    final byte [][] splitKeys,
    final long nonceGroup,
    final long nonce) throws IOException {
  if (isStopped()) {
    throw new MasterNotRunningException();
  }

  String namespace = hTableDescriptor.getTableName().getNamespaceAsString();
  ensureNamespaceExists(namespace);

  HRegionInfo[] newRegions = ModifyRegionUtils.createHRegionInfos(hTableDescriptor, splitKeys);
  checkInitialized();
  sanityCheckTableDescriptor(hTableDescriptor);

  if (cpHost != null) {
    cpHost.preCreateTable(hTableDescriptor, newRegions);
  }
  LOG.info(getClientIdAuditPrefix() + " create " + hTableDescriptor);

  // TODO: We can handle/merge duplicate requests, and differentiate the case of
  //       TableExistsException by saying if the schema is the same or not.
  ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch();
  long procId = this.procedureExecutor.submitProcedure(
    new CreateTableProcedure(
      procedureExecutor.getEnvironment(), hTableDescriptor, newRegions, latch),
    nonceGroup,
    nonce);
  latch.await();

  if (cpHost != null) {
    cpHost.postCreateTable(hTableDescriptor, newRegions);
  }

  return procId;
}
项目:ditb    文件:RestoreSnapshotHelper.java   
/**
 * Remove specified regions from the file-system, using the archiver.
 */
private void removeHdfsRegions(final ThreadPoolExecutor exec, final List<HRegionInfo> regions)
    throws IOException {
  if (regions == null || regions.size() == 0) return;
  ModifyRegionUtils.editRegions(exec, regions, new ModifyRegionUtils.RegionEditTask() {
    @Override
    public void editRegion(final HRegionInfo hri) throws IOException {
      HFileArchiver.archiveRegion(conf, fs, hri);
    }
  });
}
项目:ditb    文件:RestoreSnapshotHelper.java   
/**
 * Restore specified regions by restoring content to the snapshot state.
 */
private void restoreHdfsRegions(final ThreadPoolExecutor exec,
    final Map<String, SnapshotRegionManifest> regionManifests,
    final List<HRegionInfo> regions) throws IOException {
  if (regions == null || regions.size() == 0) return;
  ModifyRegionUtils.editRegions(exec, regions, new ModifyRegionUtils.RegionEditTask() {
    @Override
    public void editRegion(final HRegionInfo hri) throws IOException {
      restoreRegion(hri, regionManifests.get(hri.getEncodedName()));
    }
  });
}
项目:ditb    文件:RestoreSnapshotHelper.java   
/**
 * Clone specified regions. For each region create a new region
 * and create a HFileLink for each hfile.
 */
private HRegionInfo[] cloneHdfsRegions(final ThreadPoolExecutor exec,
    final Map<String, SnapshotRegionManifest> regionManifests,
    final List<HRegionInfo> regions) throws IOException {
  if (regions == null || regions.size() == 0) return null;

  final Map<String, HRegionInfo> snapshotRegions =
    new HashMap<String, HRegionInfo>(regions.size());

  // clone region info (change embedded tableName with the new one)
  HRegionInfo[] clonedRegionsInfo = new HRegionInfo[regions.size()];
  for (int i = 0; i < clonedRegionsInfo.length; ++i) {
    // clone the region info from the snapshot region info
    HRegionInfo snapshotRegionInfo = regions.get(i);
    clonedRegionsInfo[i] = cloneRegionInfo(snapshotRegionInfo);

    // add the region name mapping between snapshot and cloned
    String snapshotRegionName = snapshotRegionInfo.getEncodedName();
    String clonedRegionName = clonedRegionsInfo[i].getEncodedName();
    regionsMap.put(Bytes.toBytes(snapshotRegionName), Bytes.toBytes(clonedRegionName));
    LOG.info("clone region=" + snapshotRegionName + " as " + clonedRegionName);

    // Add mapping between cloned region name and snapshot region info
    snapshotRegions.put(clonedRegionName, snapshotRegionInfo);
  }

  // create the regions on disk
  ModifyRegionUtils.createRegions(exec, conf, rootDir, tableDir,
    tableDesc, clonedRegionsInfo, new ModifyRegionUtils.RegionFillTask() {
      @Override
      public void fillRegion(final HRegion region) throws IOException {
        HRegionInfo snapshotHri = snapshotRegions.get(region.getRegionInfo().getEncodedName());
        cloneRegion(region, snapshotHri, regionManifests.get(snapshotHri.getEncodedName()));
      }
    });

  return clonedRegionsInfo;
}
项目:ditb    文件:TestCreateTableProcedure.java   
@Test(timeout=90000)
public void testRollbackAndDoubleExecution() throws Exception {
  final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecution");

  // create the table
  final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
  ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);

  // Start the Create procedure && kill the executor
  final byte[][] splitKeys = new byte[][] {
    Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
  };
  HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2");
  htd.setRegionReplication(3);
  HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, splitKeys);
  long procId = procExec.submitProcedure(
    new CreateTableProcedure(procExec.getEnvironment(), htd, regions), nonceGroup, nonce);

  // NOTE: the 4 (number of CreateTableState steps) is hardcoded,
  //       so you have to look at this test at least once when you add a new step.
  MasterProcedureTestingUtility.testRollbackAndDoubleExecution(
      procExec, procId, 4, CreateTableState.values());

  MasterProcedureTestingUtility.validateTableDeletion(
    UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2");

  // are we able to create the table after a rollback?
  resetProcExecutorTestingKillFlag();
  testSimpleCreate(tableName, splitKeys);
}
项目:ditb    文件:TestCreateTableProcedure.java   
@Test(timeout=90000)
public void testRollbackRetriableFailure() throws Exception {
  final TableName tableName = TableName.valueOf("testRollbackRetriableFailure");

  // create the table
  final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
  ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);

  // Start the Create procedure && kill the executor
  final byte[][] splitKeys = new byte[][] {
    Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
  };
  HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2");
  HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, splitKeys);
  long procId = procExec.submitProcedure(
    new FaultyCreateTableProcedure(procExec.getEnvironment(), htd, regions), nonceGroup, nonce);

  // NOTE: the 4 (number of CreateTableState steps) is hardcoded,
  //       so you have to look at this test at least once when you add a new step.
  MasterProcedureTestingUtility.testRollbackRetriableFailure(
      procExec, procId, 4, CreateTableState.values());

  MasterProcedureTestingUtility.validateTableDeletion(
    UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2");

  // are we able to create the table after a rollback?
  resetProcExecutorTestingKillFlag();
  testSimpleCreate(tableName, splitKeys);
}
项目:ditb    文件:MasterProcedureTestingUtility.java   
public static HRegionInfo[] createTable(final ProcedureExecutor<MasterProcedureEnv> procExec,
    final TableName tableName, final byte[][] splitKeys, String... family) throws IOException {
  HTableDescriptor htd = createHTD(tableName, family);
  HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, splitKeys);
  long procId = ProcedureTestingUtility.submitAndWait(procExec,
    new CreateTableProcedure(procExec.getEnvironment(), htd, regions));
  ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId));
  return regions;
}
项目:LCIndex-HBase-0.94.16    文件:CreateTableHandler.java   
/**
 * Create the on-disk structure for the table, and returns the regions info.
 * @param tableRootDir directory where the table is being created
 * @param tableName name of the table under construction
 * @return the list of regions created
 */
protected List<HRegionInfo> handleCreateHdfsRegions(final Path tableRootDir,
  final String tableName)
    throws IOException {
  return ModifyRegionUtils.createRegions(conf, tableRootDir,
      hTableDescriptor, newRegions, null);
}
项目:LCIndex-HBase-0.94.16    文件:RestoreSnapshotHelper.java   
/**
 * Clone specified regions. For each region create a new region
 * and create a HFileLink for each hfile.
 */
private HRegionInfo[] cloneHdfsRegions(final List<HRegionInfo> regions) throws IOException {
  if (regions == null || regions.size() == 0) return null;

  final Map<String, HRegionInfo> snapshotRegions =
    new HashMap<String, HRegionInfo>(regions.size());

  // clone region info (change embedded tableName with the new one)
  HRegionInfo[] clonedRegionsInfo = new HRegionInfo[regions.size()];
  for (int i = 0; i < clonedRegionsInfo.length; ++i) {
    // clone the region info from the snapshot region info
    HRegionInfo snapshotRegionInfo = regions.get(i);
    clonedRegionsInfo[i] = cloneRegionInfo(snapshotRegionInfo);

    // add the region name mapping between snapshot and cloned
    String snapshotRegionName = snapshotRegionInfo.getEncodedName();
    String clonedRegionName = clonedRegionsInfo[i].getEncodedName();
    regionsMap.put(Bytes.toBytes(snapshotRegionName), Bytes.toBytes(clonedRegionName));
    LOG.info("clone region=" + snapshotRegionName + " as " + clonedRegionName);

    // Add mapping between cloned region name and snapshot region info
    snapshotRegions.put(clonedRegionName, snapshotRegionInfo);
  }

  // create the regions on disk
  ModifyRegionUtils.createRegions(conf, tableDir.getParent(),
    tableDesc, clonedRegionsInfo, new ModifyRegionUtils.RegionFillTask() {
      public void fillRegion(final HRegion region) throws IOException {
        cloneRegion(region, snapshotRegions.get(region.getRegionInfo().getEncodedName()));
      }
    });

  return clonedRegionsInfo;
}
项目:pbase    文件:CreateTableHandler.java   
/**
 * Create the on-disk structure for the table, and returns the regions info.
 * @param tableRootDir directory where the table is being created
 * @param tableName name of the table under construction
 * @return the list of regions created
 */
protected List<HRegionInfo> handleCreateHdfsRegions(final Path tableRootDir,
  final TableName tableName)
    throws IOException {
  return ModifyRegionUtils.createRegions(conf, tableRootDir,
      hTableDescriptor, newRegions, null);
}
项目:pbase    文件:RestoreSnapshotHelper.java   
/**
 * Remove specified regions from the file-system, using the archiver.
 */
private void removeHdfsRegions(final ThreadPoolExecutor exec, final List<HRegionInfo> regions)
    throws IOException {
  if (regions == null || regions.size() == 0) return;
  ModifyRegionUtils.editRegions(exec, regions, new ModifyRegionUtils.RegionEditTask() {
    @Override
    public void editRegion(final HRegionInfo hri) throws IOException {
      HFileArchiver.archiveRegion(conf, fs, hri);
    }
  });
}
项目:pbase    文件:RestoreSnapshotHelper.java   
/**
 * Restore specified regions by restoring content to the snapshot state.
 */
private void restoreHdfsRegions(final ThreadPoolExecutor exec,
    final Map<String, SnapshotRegionManifest> regionManifests,
    final List<HRegionInfo> regions) throws IOException {
  if (regions == null || regions.size() == 0) return;
  ModifyRegionUtils.editRegions(exec, regions, new ModifyRegionUtils.RegionEditTask() {
    @Override
    public void editRegion(final HRegionInfo hri) throws IOException {
      restoreRegion(hri, regionManifests.get(hri.getEncodedName()));
    }
  });
}
项目:pbase    文件:RestoreSnapshotHelper.java   
/**
 * Clone specified regions. For each region create a new region
 * and create a HFileLink for each hfile.
 */
private HRegionInfo[] cloneHdfsRegions(final ThreadPoolExecutor exec,
    final Map<String, SnapshotRegionManifest> regionManifests,
    final List<HRegionInfo> regions) throws IOException {
  if (regions == null || regions.size() == 0) return null;

  final Map<String, HRegionInfo> snapshotRegions =
    new HashMap<String, HRegionInfo>(regions.size());

  // clone region info (change embedded tableName with the new one)
  HRegionInfo[] clonedRegionsInfo = new HRegionInfo[regions.size()];
  for (int i = 0; i < clonedRegionsInfo.length; ++i) {
    // clone the region info from the snapshot region info
    HRegionInfo snapshotRegionInfo = regions.get(i);
    clonedRegionsInfo[i] = cloneRegionInfo(snapshotRegionInfo);

    // add the region name mapping between snapshot and cloned
    String snapshotRegionName = snapshotRegionInfo.getEncodedName();
    String clonedRegionName = clonedRegionsInfo[i].getEncodedName();
    regionsMap.put(Bytes.toBytes(snapshotRegionName), Bytes.toBytes(clonedRegionName));
    LOG.info("clone region=" + snapshotRegionName + " as " + clonedRegionName);

    // Add mapping between cloned region name and snapshot region info
    snapshotRegions.put(clonedRegionName, snapshotRegionInfo);
  }

  // create the regions on disk
  ModifyRegionUtils.createRegions(exec, conf, rootDir, tableDir,
    tableDesc, clonedRegionsInfo, new ModifyRegionUtils.RegionFillTask() {
      @Override
      public void fillRegion(final HRegion region) throws IOException {
        HRegionInfo snapshotHri = snapshotRegions.get(region.getRegionInfo().getEncodedName());
        cloneRegion(region, snapshotHri, regionManifests.get(snapshotHri.getEncodedName()));
      }
    });

  return clonedRegionsInfo;
}
项目:HIndex    文件:CreateTableHandler.java   
/**
 * Create the on-disk structure for the table, and returns the regions info.
 * @param tableRootDir directory where the table is being created
 * @param tableName name of the table under construction
 * @return the list of regions created
 */
protected List<HRegionInfo> handleCreateHdfsRegions(final Path tableRootDir,
  final TableName tableName)
    throws IOException {
  return ModifyRegionUtils.createRegions(conf, tableRootDir,
      hTableDescriptor, newRegions, null);
}
项目:HIndex    文件:RestoreSnapshotHelper.java   
/**
 * Clone specified regions. For each region create a new region
 * and create a HFileLink for each hfile.
 */
private HRegionInfo[] cloneHdfsRegions(final List<HRegionInfo> regions) throws IOException {
  if (regions == null || regions.size() == 0) return null;

  final Map<String, HRegionInfo> snapshotRegions =
    new HashMap<String, HRegionInfo>(regions.size());

  // clone region info (change embedded tableName with the new one)
  HRegionInfo[] clonedRegionsInfo = new HRegionInfo[regions.size()];
  for (int i = 0; i < clonedRegionsInfo.length; ++i) {
    // clone the region info from the snapshot region info
    HRegionInfo snapshotRegionInfo = regions.get(i);
    clonedRegionsInfo[i] = cloneRegionInfo(snapshotRegionInfo);

    // add the region name mapping between snapshot and cloned
    String snapshotRegionName = snapshotRegionInfo.getEncodedName();
    String clonedRegionName = clonedRegionsInfo[i].getEncodedName();
    regionsMap.put(Bytes.toBytes(snapshotRegionName), Bytes.toBytes(clonedRegionName));
    LOG.info("clone region=" + snapshotRegionName + " as " + clonedRegionName);

    // Add mapping between cloned region name and snapshot region info
    snapshotRegions.put(clonedRegionName, snapshotRegionInfo);
  }

  // create the regions on disk
  ModifyRegionUtils.createRegions(conf, rootDir, tableDir,
    tableDesc, clonedRegionsInfo, new ModifyRegionUtils.RegionFillTask() {
      @Override
      public void fillRegion(final HRegion region) throws IOException {
        cloneRegion(region, snapshotRegions.get(region.getRegionInfo().getEncodedName()));
      }
    });

  return clonedRegionsInfo;
}
项目:IRIndex    文件:CreateTableHandler.java   
/**
 * Create the on-disk structure for the table, and returns the regions info.
 * @param tableRootDir directory where the table is being created
 * @param tableName name of the table under construction
 * @return the list of regions created
 */
protected List<HRegionInfo> handleCreateHdfsRegions(final Path tableRootDir,
  final String tableName)
    throws IOException {
  return ModifyRegionUtils.createRegions(conf, tableRootDir,
      hTableDescriptor, newRegions, null);
}
项目:IRIndex    文件:RestoreSnapshotHelper.java   
/**
 * Clone specified regions. For each region create a new region
 * and create a HFileLink for each hfile.
 */
private HRegionInfo[] cloneHdfsRegions(final List<HRegionInfo> regions) throws IOException {
  if (regions == null || regions.size() == 0) return null;

  final Map<String, HRegionInfo> snapshotRegions =
    new HashMap<String, HRegionInfo>(regions.size());

  // clone region info (change embedded tableName with the new one)
  HRegionInfo[] clonedRegionsInfo = new HRegionInfo[regions.size()];
  for (int i = 0; i < clonedRegionsInfo.length; ++i) {
    // clone the region info from the snapshot region info
    HRegionInfo snapshotRegionInfo = regions.get(i);
    clonedRegionsInfo[i] = cloneRegionInfo(snapshotRegionInfo);

    // add the region name mapping between snapshot and cloned
    String snapshotRegionName = snapshotRegionInfo.getEncodedName();
    String clonedRegionName = clonedRegionsInfo[i].getEncodedName();
    regionsMap.put(Bytes.toBytes(snapshotRegionName), Bytes.toBytes(clonedRegionName));
    LOG.info("clone region=" + snapshotRegionName + " as " + clonedRegionName);

    // Add mapping between cloned region name and snapshot region info
    snapshotRegions.put(clonedRegionName, snapshotRegionInfo);
  }

  // create the regions on disk
  ModifyRegionUtils.createRegions(conf, tableDir.getParent(),
    tableDesc, clonedRegionsInfo, new ModifyRegionUtils.RegionFillTask() {
      public void fillRegion(final HRegion region) throws IOException {
        cloneRegion(region, snapshotRegions.get(region.getRegionInfo().getEncodedName()));
      }
    });

  return clonedRegionsInfo;
}
项目:hbase    文件:HMaster.java   
@Override
public long createTable(
    final TableDescriptor tableDescriptor,
    final byte [][] splitKeys,
    final long nonceGroup,
    final long nonce) throws IOException {
  checkInitialized();

  String namespace = tableDescriptor.getTableName().getNamespaceAsString();
  this.clusterSchemaService.getNamespace(namespace);

  RegionInfo[] newRegions = ModifyRegionUtils.createRegionInfos(tableDescriptor, splitKeys);
  sanityCheckTableDescriptor(tableDescriptor);

  return MasterProcedureUtil.submitProcedure(
      new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
    @Override
    protected void run() throws IOException {
      getMaster().getMasterCoprocessorHost().preCreateTable(tableDescriptor, newRegions);

      LOG.info(getClientIdAuditPrefix() + " create " + tableDescriptor);

      // TODO: We can handle/merge duplicate requests, and differentiate the case of
      //       TableExistsException by saying if the schema is the same or not.
      ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch();
      submitProcedure(new CreateTableProcedure(
          procedureExecutor.getEnvironment(), tableDescriptor, newRegions, latch));
      latch.await();

      getMaster().getMasterCoprocessorHost().postCreateTable(tableDescriptor, newRegions);
    }

    @Override
    protected String getDescription() {
      return "CreateTableProcedure";
    }
  });
}
项目:hbase    文件:RestoreSnapshotHelper.java   
/**
 * Remove specified regions from the file-system, using the archiver.
 */
private void removeHdfsRegions(final ThreadPoolExecutor exec, final List<RegionInfo> regions)
    throws IOException {
  if (regions == null || regions.isEmpty()) return;
  ModifyRegionUtils.editRegions(exec, regions, new ModifyRegionUtils.RegionEditTask() {
    @Override
    public void editRegion(final RegionInfo hri) throws IOException {
      HFileArchiver.archiveRegion(conf, fs, hri);
    }
  });
}
项目:hbase    文件:RestoreSnapshotHelper.java   
/**
 * Restore specified regions by restoring content to the snapshot state.
 */
private void restoreHdfsRegions(final ThreadPoolExecutor exec,
    final Map<String, SnapshotRegionManifest> regionManifests,
    final List<RegionInfo> regions) throws IOException {
  if (regions == null || regions.isEmpty()) return;
  ModifyRegionUtils.editRegions(exec, regions, new ModifyRegionUtils.RegionEditTask() {
    @Override
    public void editRegion(final RegionInfo hri) throws IOException {
      restoreRegion(hri, regionManifests.get(hri.getEncodedName()));
    }
  });
}
项目:hbase    文件:RestoreSnapshotHelper.java   
/**
 * Restore specified mob regions by restoring content to the snapshot state.
 */
private void restoreHdfsMobRegions(final ThreadPoolExecutor exec,
    final Map<String, SnapshotRegionManifest> regionManifests,
    final List<RegionInfo> regions) throws IOException {
  if (regions == null || regions.isEmpty()) return;
  ModifyRegionUtils.editRegions(exec, regions, new ModifyRegionUtils.RegionEditTask() {
    @Override
    public void editRegion(final RegionInfo hri) throws IOException {
      restoreMobRegion(hri, regionManifests.get(hri.getEncodedName()));
    }
  });
}
项目:hbase    文件:RestoreSnapshotHelper.java   
/**
 * Clone specified regions. For each region create a new region
 * and create a HFileLink for each hfile.
 */
private RegionInfo[] cloneHdfsRegions(final ThreadPoolExecutor exec,
    final Map<String, SnapshotRegionManifest> regionManifests,
    final List<RegionInfo> regions) throws IOException {
  if (regions == null || regions.isEmpty()) return null;

  final Map<String, RegionInfo> snapshotRegions = new HashMap<>(regions.size());

  // clone region info (change embedded tableName with the new one)
  RegionInfo[] clonedRegionsInfo = new RegionInfo[regions.size()];
  for (int i = 0; i < clonedRegionsInfo.length; ++i) {
    // clone the region info from the snapshot region info
    RegionInfo snapshotRegionInfo = regions.get(i);
    clonedRegionsInfo[i] = cloneRegionInfo(snapshotRegionInfo);

    // add the region name mapping between snapshot and cloned
    String snapshotRegionName = snapshotRegionInfo.getEncodedName();
    String clonedRegionName = clonedRegionsInfo[i].getEncodedName();
    regionsMap.put(Bytes.toBytes(snapshotRegionName), Bytes.toBytes(clonedRegionName));
    LOG.info("clone region=" + snapshotRegionName + " as " + clonedRegionName);

    // Add mapping between cloned region name and snapshot region info
    snapshotRegions.put(clonedRegionName, snapshotRegionInfo);
  }

  // create the regions on disk
  ModifyRegionUtils.createRegions(exec, conf, rootDir,
    tableDesc, clonedRegionsInfo, new ModifyRegionUtils.RegionFillTask() {
      @Override
      public void fillRegion(final HRegion region) throws IOException {
        RegionInfo snapshotHri = snapshotRegions.get(region.getRegionInfo().getEncodedName());
        cloneRegion(region, snapshotHri, regionManifests.get(snapshotHri.getEncodedName()));
      }
    });

  return clonedRegionsInfo;
}
项目:hbase    文件:MasterProcedureTestingUtility.java   
public static RegionInfo[] createTable(final ProcedureExecutor<MasterProcedureEnv> procExec,
    final TableName tableName, final byte[][] splitKeys, String... family) throws IOException {
  TableDescriptor htd = createHTD(tableName, family);
  RegionInfo[] regions = ModifyRegionUtils.createRegionInfos(htd, splitKeys);
  long procId = ProcedureTestingUtility.submitAndWait(procExec,
    new CreateTableProcedure(procExec.getEnvironment(), htd, regions));
  ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId));
  return regions;
}
项目:PyroDB    文件:CreateTableHandler.java   
/**
 * Responsible of table creation (on-disk and META) and assignment.
 * - Create the table directory and descriptor (temp folder)
 * - Create the on-disk regions (temp folder)
 *   [If something fails here: we've just some trash in temp]
 * - Move the table from temp to the root directory
 *   [If something fails here: we've the table in place but some of the rows required
 *    present in META. (hbck needed)]
 * - Add regions to META
 *   [If something fails here: we don't have regions assigned: table disabled]
 * - Assign regions to Region Servers
 *   [If something fails here: we still have the table in disabled state]
 * - Update ZooKeeper with the enabled state
 */
private void handleCreateTable(TableName tableName)
    throws IOException, CoordinatedStateException {
  Path tempdir = fileSystemManager.getTempDir();
  FileSystem fs = fileSystemManager.getFileSystem();

  // 1. Create Table Descriptor
  Path tempTableDir = FSUtils.getTableDir(tempdir, tableName);
  new FSTableDescriptors(this.conf).createTableDescriptorForTableDirectory(
    tempTableDir, this.hTableDescriptor, false);
  Path tableDir = FSUtils.getTableDir(fileSystemManager.getRootDir(), tableName);

  // 2. Create Regions
  List<HRegionInfo> regionInfos = handleCreateHdfsRegions(tempdir, tableName);
  // 3. Move Table temp directory to the hbase root location
  if (!fs.rename(tempTableDir, tableDir)) {
    throw new IOException("Unable to move table from temp=" + tempTableDir +
      " to hbase root=" + tableDir);
  }

  if (regionInfos != null && regionInfos.size() > 0) {
    // 4. Add regions to META
    addRegionsToMeta(this.catalogTracker, regionInfos);

    // 5. Trigger immediate assignment of the regions in round-robin fashion
    ModifyRegionUtils.assignRegions(assignmentManager, regionInfos);
  }

  // 6. Set table enabled flag up in zk.
  try {
    assignmentManager.getTableStateManager().setTableState(tableName,
      ZooKeeperProtos.Table.State.ENABLED);
  } catch (CoordinatedStateException e) {
    throw new IOException("Unable to ensure that " + tableName + " will be" +
      " enabled because of a ZooKeeper issue", e);
  }
}
项目:PyroDB    文件:CreateTableHandler.java   
/**
 * Create the on-disk structure for the table, and returns the regions info.
 * @param tableRootDir directory where the table is being created
 * @param tableName name of the table under construction
 * @return the list of regions created
 */
protected List<HRegionInfo> handleCreateHdfsRegions(final Path tableRootDir,
  final TableName tableName)
    throws IOException {
  return ModifyRegionUtils.createRegions(conf, tableRootDir,
      hTableDescriptor, newRegions, null);
}
项目:PyroDB    文件:RestoreSnapshotHelper.java   
/**
 * Remove specified regions from the file-system, using the archiver.
 */
private void removeHdfsRegions(final ThreadPoolExecutor exec, final List<HRegionInfo> regions)
    throws IOException {
  if (regions == null || regions.size() == 0) return;
  ModifyRegionUtils.editRegions(exec, regions, new ModifyRegionUtils.RegionEditTask() {
    @Override
    public void editRegion(final HRegionInfo hri) throws IOException {
      HFileArchiver.archiveRegion(conf, fs, hri);
    }
  });
}
项目:PyroDB    文件:RestoreSnapshotHelper.java   
/**
 * Restore specified regions by restoring content to the snapshot state.
 */
private void restoreHdfsRegions(final ThreadPoolExecutor exec,
    final Map<String, SnapshotRegionManifest> regionManifests,
    final List<HRegionInfo> regions) throws IOException {
  if (regions == null || regions.size() == 0) return;
  ModifyRegionUtils.editRegions(exec, regions, new ModifyRegionUtils.RegionEditTask() {
    @Override
    public void editRegion(final HRegionInfo hri) throws IOException {
      restoreRegion(hri, regionManifests.get(hri.getEncodedName()));
    }
  });
}
项目:PyroDB    文件:RestoreSnapshotHelper.java   
/**
 * Clone specified regions. For each region create a new region
 * and create a HFileLink for each hfile.
 */
private HRegionInfo[] cloneHdfsRegions(final ThreadPoolExecutor exec,
    final Map<String, SnapshotRegionManifest> regionManifests,
    final List<HRegionInfo> regions) throws IOException {
  if (regions == null || regions.size() == 0) return null;

  final Map<String, HRegionInfo> snapshotRegions =
    new HashMap<String, HRegionInfo>(regions.size());

  // clone region info (change embedded tableName with the new one)
  HRegionInfo[] clonedRegionsInfo = new HRegionInfo[regions.size()];
  for (int i = 0; i < clonedRegionsInfo.length; ++i) {
    // clone the region info from the snapshot region info
    HRegionInfo snapshotRegionInfo = regions.get(i);
    clonedRegionsInfo[i] = cloneRegionInfo(snapshotRegionInfo);

    // add the region name mapping between snapshot and cloned
    String snapshotRegionName = snapshotRegionInfo.getEncodedName();
    String clonedRegionName = clonedRegionsInfo[i].getEncodedName();
    regionsMap.put(Bytes.toBytes(snapshotRegionName), Bytes.toBytes(clonedRegionName));
    LOG.info("clone region=" + snapshotRegionName + " as " + clonedRegionName);

    // Add mapping between cloned region name and snapshot region info
    snapshotRegions.put(clonedRegionName, snapshotRegionInfo);
  }

  // create the regions on disk
  ModifyRegionUtils.createRegions(exec, conf, rootDir, tableDir,
    tableDesc, clonedRegionsInfo, new ModifyRegionUtils.RegionFillTask() {
      @Override
      public void fillRegion(final HRegion region) throws IOException {
        HRegionInfo snapshotHri = snapshotRegions.get(region.getRegionInfo().getEncodedName());
        cloneRegion(region, snapshotHri, regionManifests.get(snapshotHri.getEncodedName()));
      }
    });

  return clonedRegionsInfo;
}
项目:c5    文件:CreateTableHandler.java   
/**
 * Create the on-disk structure for the table, and returns the regions info.
 * @param tableRootDir directory where the table is being created
 * @param tableName name of the table under construction
 * @return the list of regions created
 */
protected List<HRegionInfo> handleCreateHdfsRegions(final Path tableRootDir,
  final TableName tableName)
    throws IOException {
  return ModifyRegionUtils.createRegions(conf, tableRootDir,
      hTableDescriptor, newRegions, null);
}