Java 类org.apache.hadoop.hbase.regionserver.HRegionFileSystem 实例源码

项目:ditb    文件:CatalogJanitor.java   
/**
 * If merged region no longer holds reference to the merge regions, archive
 * merge region on hdfs and perform deleting references in hbase:meta
 * @param mergedRegion
 * @param regionA
 * @param regionB
 * @return true if we delete references in merged region on hbase:meta and archive
 *         the files on the file system
 * @throws IOException
 */
boolean cleanMergeRegion(final HRegionInfo mergedRegion,
    final HRegionInfo regionA, final HRegionInfo regionB) throws IOException {
  FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
  Path rootdir = this.services.getMasterFileSystem().getRootDir();
  Path tabledir = FSUtils.getTableDir(rootdir, mergedRegion.getTable());
  HTableDescriptor htd = getTableDescriptor(mergedRegion.getTable());
  HRegionFileSystem regionFs = null;
  try {
    regionFs = HRegionFileSystem.openRegionFromFileSystem(
        this.services.getConfiguration(), fs, tabledir, mergedRegion, true);
  } catch (IOException e) {
    LOG.warn("Merged region does not exist: " + mergedRegion.getEncodedName());
  }
  if (regionFs == null || !regionFs.hasReferences(htd)) {
    LOG.debug("Deleting region " + regionA.getRegionNameAsString() + " and "
        + regionB.getRegionNameAsString()
        + " from fs because merged region no longer holds references");
    HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionA);
    HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionB);
    MetaTableAccessor.deleteMergeQualifiers(server.getConnection(),
      mergedRegion);
    return true;
  }
  return false;
}
项目:ditb    文件:OfflineMetaRebuildTestCore.java   
protected HRegionInfo createRegion(Configuration conf, final Table htbl,
    byte[] startKey, byte[] endKey) throws IOException {
  Table meta = new HTable(conf, TableName.META_TABLE_NAME);
  HTableDescriptor htd = htbl.getTableDescriptor();
  HRegionInfo hri = new HRegionInfo(htbl.getName(), startKey, endKey);

  LOG.info("manually adding regioninfo and hdfs data: " + hri.toString());
  Path rootDir = FSUtils.getRootDir(conf);
  FileSystem fs = rootDir.getFileSystem(conf);
  Path p = new Path(FSUtils.getTableDir(rootDir, htbl.getName()),
      hri.getEncodedName());
  fs.mkdirs(p);
  Path riPath = new Path(p, HRegionFileSystem.REGION_INFO_FILE);
  FSDataOutputStream out = fs.create(riPath);
  out.write(hri.toDelimitedByteArray());
  out.close();

  // add to meta.
  MetaTableAccessor.addRegionToMeta(meta, hri);
  meta.close();
  return hri;
}
项目:incubator-tephra    文件:TransactionProcessorTest.java   
private HRegion createRegion(String tableName, byte[] family, long ttl) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  HColumnDescriptor cfd = new HColumnDescriptor(family);
  if (ttl > 0) {
    cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl));
  }
  cfd.setMaxVersions(10);
  htd.addFamily(cfd);
  htd.addCoprocessor(TransactionProcessor.class.getName());
  Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), htd.getTableName());
  FileSystem fs = FileSystem.get(conf);
  assertTrue(fs.mkdirs(tablePath));
  WALFactory walFactory = new WALFactory(conf, null, tableName + ".hlog");
  WAL hLog = walFactory.getWAL(new byte[]{1}, null);
  HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName));
  HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tablePath, regionInfo);
  return new HRegion(regionFS, hLog, conf, htd,
      new LocalRegionServerServices(conf, ServerName.valueOf(
          InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis())));
}
项目:incubator-tephra    文件:TransactionProcessorTest.java   
private HRegion createRegion(String tableName, byte[] family, long ttl) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  HColumnDescriptor cfd = new HColumnDescriptor(family);
  if (ttl > 0) {
    cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl));
  }
  cfd.setMaxVersions(10);
  htd.addFamily(cfd);
  htd.addCoprocessor(TransactionProcessor.class.getName());
  Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), htd.getTableName());
  Path hlogPath = new Path(FSUtils.getRootDir(conf) + "/hlog");
  FileSystem fs = FileSystem.get(conf);
  assertTrue(fs.mkdirs(tablePath));
  HLog hLog = HLogFactory.createHLog(fs, hlogPath, tableName, conf);
  HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName));
  HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tablePath, regionInfo);
  return new HRegion(regionFS, hLog, conf, htd, new MockRegionServerServices(conf, null));
}
项目:incubator-tephra    文件:TransactionProcessorTest.java   
private HRegion createRegion(String tableName, byte[] family, long ttl) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  HColumnDescriptor cfd = new HColumnDescriptor(family);
  if (ttl > 0) {
    cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl));
  }
  cfd.setMaxVersions(10);
  htd.addFamily(cfd);
  htd.addCoprocessor(TransactionProcessor.class.getName());
  Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), htd.getTableName());
  FileSystem fs = FileSystem.get(conf);
  assertTrue(fs.mkdirs(tablePath));
  WALFactory walFactory = new WALFactory(conf, null, tableName + ".hlog");
  WAL hLog = walFactory.getWAL(new byte[]{1}, null);
  HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName));
  HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tablePath, regionInfo);
  return new HRegion(regionFS, hLog, conf, htd,
      new LocalRegionServerServices(conf, ServerName.valueOf(
          InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis())));
}
项目:incubator-tephra    文件:TransactionProcessorTest.java   
private HRegion createRegion(String tableName, byte[] family, long ttl) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  HColumnDescriptor cfd = new HColumnDescriptor(family);
  if (ttl > 0) {
    cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl));
  }
  cfd.setMaxVersions(10);
  htd.addFamily(cfd);
  htd.addCoprocessor(TransactionProcessor.class.getName());
  Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), htd.getTableName());
  FileSystem fs = FileSystem.get(conf);
  assertTrue(fs.mkdirs(tablePath));
  WALFactory walFactory = new WALFactory(conf, null, tableName + ".hlog");
  WAL hLog = walFactory.getWAL(new byte[]{1});
  HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName));
  HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tablePath, regionInfo);
  return new HRegion(regionFS, hLog, conf, htd,
                     new LocalRegionServerServices(conf, ServerName.valueOf(
                       InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis())));
}
项目:incubator-tephra    文件:TransactionProcessorTest.java   
private HRegion createRegion(String tableName, byte[] family, long ttl) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  HColumnDescriptor cfd = new HColumnDescriptor(family);
  if (ttl > 0) {
    cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl));
  }
  cfd.setMaxVersions(10);
  htd.addFamily(cfd);
  htd.addCoprocessor(TransactionProcessor.class.getName());
  Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), htd.getTableName());
  FileSystem fs = FileSystem.get(conf);
  assertTrue(fs.mkdirs(tablePath));
  WALFactory walFactory = new WALFactory(conf, null, tableName + ".hlog");
  WAL hLog = walFactory.getWAL(new byte[]{1});
  HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName));
  HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tablePath, regionInfo);
  return new HRegion(regionFS, hLog, conf, htd,
                     new LocalRegionServerServices(conf, ServerName.valueOf(
                       InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis())));
}
项目:incubator-tephra    文件:TransactionProcessorTest.java   
private HRegion createRegion(String tableName, byte[] family, long ttl) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  HColumnDescriptor cfd = new HColumnDescriptor(family);
  if (ttl > 0) {
    cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl));
  }
  cfd.setMaxVersions(10);
  htd.addFamily(cfd);
  htd.addCoprocessor(TransactionProcessor.class.getName());
  Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), htd.getTableName());
  FileSystem fs = FileSystem.get(conf);
  assertTrue(fs.mkdirs(tablePath));
  WALFactory walFactory = new WALFactory(conf, null, tableName + ".hlog");
  WAL hLog = walFactory.getWAL(new byte[]{1});
  HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName));
  HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tablePath, regionInfo);
  return new HRegion(regionFS, hLog, conf, htd,
      new LocalRegionServerServices(conf, ServerName.valueOf(
          InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis())));
}
项目:incubator-tephra    文件:TransactionProcessorTest.java   
private HRegion createRegion(String tableName, byte[] family, long ttl) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  HColumnDescriptor cfd = new HColumnDescriptor(family);
  if (ttl > 0) {
    cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl));
  }
  cfd.setMaxVersions(10);
  htd.addFamily(cfd);
  htd.addCoprocessor(TransactionProcessor.class.getName());
  Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), htd.getTableName());
  Path hlogPath = new Path(FSUtils.getRootDir(conf) + "/hlog");
  FileSystem fs = FileSystem.get(conf);
  assertTrue(fs.mkdirs(tablePath));
  HLog hLog = HLogFactory.createHLog(fs, hlogPath, tableName, conf);
  HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName));
  HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tablePath, regionInfo);
  return new HRegion(regionFS, hLog, conf, htd, new MockRegionServerServices(conf, null));
}
项目:pbase    文件:CatalogJanitor.java   
/**
 * If merged region no longer holds reference to the merge regions, archive
 * merge region on hdfs and perform deleting references in hbase:meta
 * @param mergedRegion
 * @param regionA
 * @param regionB
 * @return true if we delete references in merged region on hbase:meta and archive
 *         the files on the file system
 * @throws IOException
 */
boolean cleanMergeRegion(final HRegionInfo mergedRegion,
    final HRegionInfo regionA, final HRegionInfo regionB) throws IOException {
  FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
  Path rootdir = this.services.getMasterFileSystem().getRootDir();
  Path tabledir = FSUtils.getTableDir(rootdir, mergedRegion.getTable());
  HTableDescriptor htd = getTableDescriptor(mergedRegion.getTable());
  HRegionFileSystem regionFs = null;
  try {
    regionFs = HRegionFileSystem.openRegionFromFileSystem(
        this.services.getConfiguration(), fs, tabledir, mergedRegion, true);
  } catch (IOException e) {
    LOG.warn("Merged region does not exist: " + mergedRegion.getEncodedName());
  }
  if (regionFs == null || !regionFs.hasReferences(htd)) {
    LOG.debug("Deleting region " + regionA.getRegionNameAsString() + " and "
        + regionB.getRegionNameAsString()
        + " from fs because merged region no longer holds references");
    HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionA);
    HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionB);
    MetaTableAccessor.deleteMergeQualifiers(server.getConnection(),
      mergedRegion);
    return true;
  }
  return false;
}
项目:pbase    文件:OfflineMetaRebuildTestCore.java   
protected HRegionInfo createRegion(Configuration conf, final Table htbl,
    byte[] startKey, byte[] endKey) throws IOException {
  Table meta = new HTable(conf, TableName.META_TABLE_NAME);
  HTableDescriptor htd = htbl.getTableDescriptor();
  HRegionInfo hri = new HRegionInfo(htbl.getName(), startKey, endKey);

  LOG.info("manually adding regioninfo and hdfs data: " + hri.toString());
  Path rootDir = FSUtils.getRootDir(conf);
  FileSystem fs = rootDir.getFileSystem(conf);
  Path p = new Path(FSUtils.getTableDir(rootDir, htbl.getName()),
      hri.getEncodedName());
  fs.mkdirs(p);
  Path riPath = new Path(p, HRegionFileSystem.REGION_INFO_FILE);
  FSDataOutputStream out = fs.create(riPath);
  out.write(hri.toDelimitedByteArray());
  out.close();

  // add to meta.
  MetaTableAccessor.addRegionToMeta(meta, hri);
  meta.close();
  return hri;
}
项目:HIndex    文件:CatalogJanitor.java   
/**
 * If merged region no longer holds reference to the merge regions, archive
 * merge region on hdfs and perform deleting references in hbase:meta
 * @param mergedRegion
 * @param regionA
 * @param regionB
 * @return true if we delete references in merged region on hbase:meta and archive
 *         the files on the file system
 * @throws IOException
 */
boolean cleanMergeRegion(final HRegionInfo mergedRegion,
    final HRegionInfo regionA, final HRegionInfo regionB) throws IOException {
  FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
  Path rootdir = this.services.getMasterFileSystem().getRootDir();
  Path tabledir = FSUtils.getTableDir(rootdir, mergedRegion.getTable());
  HTableDescriptor htd = getTableDescriptor(mergedRegion.getTable());
  HRegionFileSystem regionFs = null;
  try {
    regionFs = HRegionFileSystem.openRegionFromFileSystem(
        this.services.getConfiguration(), fs, tabledir, mergedRegion, true);
  } catch (IOException e) {
    LOG.warn("Merged region does not exist: " + mergedRegion.getEncodedName());
  }
  if (regionFs == null || !regionFs.hasReferences(htd)) {
    LOG.debug("Deleting region " + regionA.getRegionNameAsString() + " and "
        + regionB.getRegionNameAsString()
        + " from fs because merged region no longer holds references");
    HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionA);
    HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionB);
    MetaEditor.deleteMergeQualifiers(server.getCatalogTracker(), mergedRegion);
    return true;
  }
  return false;
}
项目:HIndex    文件:TakeSnapshotHandler.java   
/**
 * Take a snapshot of the specified disabled region
 */
protected void snapshotDisabledRegion(final HRegionInfo regionInfo)
    throws IOException {
  // 2 copy the regionInfo files to the snapshot
  HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs,
    workingDir, regionInfo);

  // check for error for each region
  monitor.rethrowException();

  // 2 for each region, copy over its recovered.edits directory
  Path regionDir = HRegion.getRegionDir(rootDir, regionInfo);
  Path snapshotRegionDir = regionFs.getRegionDir();
  new CopyRecoveredEditsTask(snapshot, monitor, fs, regionDir, snapshotRegionDir).call();
  monitor.rethrowException();
  status.setStatus("Completed copying recovered edits for offline snapshot of table: "
      + snapshotTable);

  // 2 reference all the files in the region
  new ReferenceRegionHFilesTask(snapshot, monitor, regionDir, fs, snapshotRegionDir).call();
  monitor.rethrowException();
  status.setStatus("Completed referencing HFiles for offline snapshot of table: " +
      snapshotTable);
}
项目:HIndex    文件:TestRestoreSnapshotHelper.java   
private void createSnapshot(final Path rootDir, final Path snapshotDir, final HTableDescriptor htd)
    throws IOException {
  // First region, simple with one plain hfile.
  HRegionInfo hri = new HRegionInfo(htd.getTableName());
  HRegionFileSystem r0fs = HRegionFileSystem.createRegionOnFileSystem(conf,
    fs, FSUtils.getTableDir(archiveDir, hri.getTable()), hri);
  Path storeFile = new Path(rootDir, TEST_HFILE);
  fs.createNewFile(storeFile);
  r0fs.commitStoreFile(TEST_FAMILY, storeFile);

  // Second region, used to test the split case.
  // This region contains a reference to the hfile in the first region.
  hri = new HRegionInfo(htd.getTableName());
  HRegionFileSystem r1fs = HRegionFileSystem.createRegionOnFileSystem(conf,
    fs, FSUtils.getTableDir(archiveDir, hri.getTable()), hri);
  storeFile = new Path(rootDir, TEST_HFILE + '.' + r0fs.getRegionInfo().getEncodedName());
  fs.createNewFile(storeFile);
  r1fs.commitStoreFile(TEST_FAMILY, storeFile);

  Path tableDir = FSUtils.getTableDir(archiveDir, htd.getTableName());
  FileUtil.copy(fs, tableDir, fs, snapshotDir, false, conf);
}
项目:HIndex    文件:OfflineMetaRebuildTestCore.java   
protected HRegionInfo createRegion(Configuration conf, final HTable htbl,
    byte[] startKey, byte[] endKey) throws IOException {
  HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
  HTableDescriptor htd = htbl.getTableDescriptor();
  HRegionInfo hri = new HRegionInfo(htbl.getName(), startKey, endKey);

  LOG.info("manually adding regioninfo and hdfs data: " + hri.toString());
  Path rootDir = FSUtils.getRootDir(conf);
  FileSystem fs = rootDir.getFileSystem(conf);
  Path p = new Path(FSUtils.getTableDir(rootDir, htbl.getName()),
      hri.getEncodedName());
  fs.mkdirs(p);
  Path riPath = new Path(p, HRegionFileSystem.REGION_INFO_FILE);
  FSDataOutputStream out = fs.create(riPath);
  out.write(hri.toDelimitedByteArray());
  out.close();

  // add to meta.
  MetaEditor.addRegionToMeta(meta, hri);
  meta.close();
  return hri;
}
项目:hbase    文件:MergeTableRegionsProcedure.java   
/**
 * Create merged region
 * @param env MasterProcedureEnv
 * @throws IOException
 */
private void createMergedRegion(final MasterProcedureEnv env) throws IOException {
  final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
  final Path tabledir = FSUtils.getTableDir(mfs.getRootDir(), regionsToMerge[0].getTable());
  final FileSystem fs = mfs.getFileSystem();
  HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(
    env.getMasterConfiguration(), fs, tabledir, regionsToMerge[0], false);
  regionFs.createMergesDir();

  mergeStoreFiles(env, regionFs, regionFs.getMergesDir());
  HRegionFileSystem regionFs2 = HRegionFileSystem.openRegionFromFileSystem(
    env.getMasterConfiguration(), fs, tabledir, regionsToMerge[1], false);
  mergeStoreFiles(env, regionFs2, regionFs.getMergesDir());

  regionFs.commitMergedRegion(mergedRegion);

  //Prepare to create merged regions
  env.getAssignmentManager().getRegionStates().
      getOrCreateRegionStateNode(mergedRegion).setState(State.MERGING_NEW);
}
项目:hbase    文件:MergeTableRegionsProcedure.java   
/**
 * Create reference file(s) of merging regions under the merges directory
 * @param env MasterProcedureEnv
 * @param regionFs region file system
 * @param mergedDir the temp directory of merged region
 * @throws IOException
 */
private void mergeStoreFiles(
    final MasterProcedureEnv env, final HRegionFileSystem regionFs, final Path mergedDir)
    throws IOException {
  final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
  final Configuration conf = env.getMasterConfiguration();
  final TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName());

  for (String family: regionFs.getFamilies()) {
    final ColumnFamilyDescriptor hcd = htd.getColumnFamily(Bytes.toBytes(family));
    final Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(family);

    if (storeFiles != null && storeFiles.size() > 0) {
      final CacheConfig cacheConf = new CacheConfig(conf, hcd);
      for (StoreFileInfo storeFileInfo: storeFiles) {
        // Create reference file(s) of the region in mergedDir
        regionFs.mergeStoreFile(mergedRegion, family, new HStoreFile(mfs.getFileSystem(),
            storeFileInfo, conf, cacheConf, hcd.getBloomFilterType(), true),
          mergedDir);
      }
    }
  }
}
项目:hbase    文件:SplitTableRegionProcedure.java   
/**
 * Create daughter regions
 * @param env MasterProcedureEnv
 * @throws IOException
 */
@VisibleForTesting
public void createDaughterRegions(final MasterProcedureEnv env) throws IOException {
  final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
  final Path tabledir = FSUtils.getTableDir(mfs.getRootDir(), getTableName());
  final FileSystem fs = mfs.getFileSystem();
  HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(
    env.getMasterConfiguration(), fs, tabledir, getParentRegion(), false);
  regionFs.createSplitsDir();

  Pair<Integer, Integer> expectedReferences = splitStoreFiles(env, regionFs);

  assertReferenceFileCount(fs, expectedReferences.getFirst(),
    regionFs.getSplitsDir(daughter_1_RI));
  //Move the files from the temporary .splits to the final /table/region directory
  regionFs.commitDaughterRegion(daughter_1_RI);
  assertReferenceFileCount(fs, expectedReferences.getFirst(),
    new Path(tabledir, daughter_1_RI.getEncodedName()));

  assertReferenceFileCount(fs, expectedReferences.getSecond(),
    regionFs.getSplitsDir(daughter_2_RI));
  regionFs.commitDaughterRegion(daughter_2_RI);
  assertReferenceFileCount(fs, expectedReferences.getSecond(),
    new Path(tabledir, daughter_2_RI.getEncodedName()));
}
项目:hbase    文件:SplitTableRegionProcedure.java   
private Pair<Path, Path> splitStoreFile(HRegionFileSystem regionFs, byte[] family, HStoreFile sf)
  throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("pid=" + getProcId() + " splitting started for store file: " +
        sf.getPath() + " for region: " + getParentRegion().getShortNameToLog());
  }

  final byte[] splitRow = getSplitRow();
  final String familyName = Bytes.toString(family);
  final Path path_first = regionFs.splitStoreFile(this.daughter_1_RI, familyName, sf, splitRow,
      false, splitPolicy);
  final Path path_second = regionFs.splitStoreFile(this.daughter_2_RI, familyName, sf, splitRow,
     true, splitPolicy);
  if (LOG.isDebugEnabled()) {
    LOG.debug("pid=" + getProcId() + " splitting complete for store file: " +
        sf.getPath() + " for region: " + getParentRegion().getShortNameToLog());
  }
  return new Pair<Path,Path>(path_first, path_second);
}
项目:hbase    文件:TestMajorCompactionRequest.java   
@Test public void testIfWeHaveNewReferenceFilesButOldStoreFiles() throws Exception {
  // this tests that reference files that are new, but have older timestamps for the files
  // they reference still will get compacted.
  TableName table = TableName.valueOf("TestMajorCompactor");
  TableDescriptor htd = UTILITY.createTableDescriptor(table, Bytes.toBytes(FAMILY));
  RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName()).build();
  HRegion region =
      HBaseTestingUtility.createRegionAndWAL(hri, rootRegionDir, UTILITY.getConfiguration(), htd);

  Configuration configuration = mock(Configuration.class);
  // the reference file timestamp is newer
  List<StoreFileInfo> storeFiles = mockStoreFiles(regionStoreDir, 4, 101);
  List<Path> paths = storeFiles.stream().map(StoreFileInfo::getPath).collect(Collectors.toList());
  // the files that are referenced are older, thus we still compact.
  HRegionFileSystem fileSystem =
      mockFileSystem(region.getRegionInfo(), true, storeFiles, 50);
  MajorCompactionRequest majorCompactionRequest = spy(new MajorCompactionRequest(configuration,
      region.getRegionInfo(), Sets.newHashSet(FAMILY), 100));
  doReturn(mock(Connection.class)).when(majorCompactionRequest).getConnection(eq(configuration));
  doReturn(paths).when(majorCompactionRequest).getReferenceFilePaths(any(FileSystem.class),
      any(Path.class));
  doReturn(fileSystem).when(majorCompactionRequest).getFileSystem(any(Connection.class));
  Set<String> result = majorCompactionRequest.getStoresRequiringCompaction(Sets.newHashSet("a"));
  assertEquals(FAMILY, Iterables.getOnlyElement(result));
}
项目:hbase    文件:TestMajorCompactionRequest.java   
private HRegionFileSystem mockFileSystem(RegionInfo info, boolean hasReferenceFiles,
    List<StoreFileInfo> storeFiles, long referenceFileTimestamp) throws IOException {
  FileSystem fileSystem = mock(FileSystem.class);
  if (hasReferenceFiles) {
    FileStatus fileStatus = mock(FileStatus.class);
    doReturn(referenceFileTimestamp).when(fileStatus).getModificationTime();
    doReturn(fileStatus).when(fileSystem).getFileLinkStatus(isA(Path.class));
  }
  HRegionFileSystem mockSystem = mock(HRegionFileSystem.class);
  doReturn(info).when(mockSystem).getRegionInfo();
  doReturn(regionStoreDir).when(mockSystem).getStoreDir(FAMILY);
  doReturn(hasReferenceFiles).when(mockSystem).hasReferences(anyString());
  doReturn(storeFiles).when(mockSystem).getStoreFiles(anyString());
  doReturn(fileSystem).when(mockSystem).getFileSystem();
  return mockSystem;
}
项目:hbase    文件:OfflineMetaRebuildTestCore.java   
protected RegionInfo createRegion(Configuration conf, final Table htbl,
    byte[] startKey, byte[] endKey) throws IOException {
  Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME);
  HTableDescriptor htd = htbl.getTableDescriptor();
  RegionInfo hri = RegionInfoBuilder.newBuilder(htbl.getName())
      .setStartKey(startKey)
      .setEndKey(endKey)
      .build();

  LOG.info("manually adding regioninfo and hdfs data: " + hri.toString());
  Path rootDir = FSUtils.getRootDir(conf);
  FileSystem fs = rootDir.getFileSystem(conf);
  Path p = new Path(FSUtils.getTableDir(rootDir, htbl.getName()),
      hri.getEncodedName());
  fs.mkdirs(p);
  Path riPath = new Path(p, HRegionFileSystem.REGION_INFO_FILE);
  FSDataOutputStream out = fs.create(riPath);
  out.write(RegionInfo.toDelimitedByteArray(hri));
  out.close();

  // add to meta.
  MetaTableAccessor.addRegionToMeta(TEST_UTIL.getConnection(), hri);
  meta.close();
  return hri;
}
项目:PyroDB    文件:CatalogJanitor.java   
/**
 * If merged region no longer holds reference to the merge regions, archive
 * merge region on hdfs and perform deleting references in hbase:meta
 * @param mergedRegion
 * @param regionA
 * @param regionB
 * @return true if we delete references in merged region on hbase:meta and archive
 *         the files on the file system
 * @throws IOException
 */
boolean cleanMergeRegion(final HRegionInfo mergedRegion,
    final HRegionInfo regionA, final HRegionInfo regionB) throws IOException {
  FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
  Path rootdir = this.services.getMasterFileSystem().getRootDir();
  Path tabledir = FSUtils.getTableDir(rootdir, mergedRegion.getTable());
  HTableDescriptor htd = getTableDescriptor(mergedRegion.getTable());
  HRegionFileSystem regionFs = null;
  try {
    regionFs = HRegionFileSystem.openRegionFromFileSystem(
        this.services.getConfiguration(), fs, tabledir, mergedRegion, true);
  } catch (IOException e) {
    LOG.warn("Merged region does not exist: " + mergedRegion.getEncodedName());
  }
  if (regionFs == null || !regionFs.hasReferences(htd)) {
    LOG.debug("Deleting region " + regionA.getRegionNameAsString() + " and "
        + regionB.getRegionNameAsString()
        + " from fs because merged region no longer holds references");
    HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionA);
    HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionB);
    MetaEditor.deleteMergeQualifiers(server.getCatalogTracker(), mergedRegion);
    return true;
  }
  return false;
}
项目:PyroDB    文件:OfflineMetaRebuildTestCore.java   
protected HRegionInfo createRegion(Configuration conf, final HTable htbl,
    byte[] startKey, byte[] endKey) throws IOException {
  HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
  HTableDescriptor htd = htbl.getTableDescriptor();
  HRegionInfo hri = new HRegionInfo(htbl.getName(), startKey, endKey);

  LOG.info("manually adding regioninfo and hdfs data: " + hri.toString());
  Path rootDir = FSUtils.getRootDir(conf);
  FileSystem fs = rootDir.getFileSystem(conf);
  Path p = new Path(FSUtils.getTableDir(rootDir, htbl.getName()),
      hri.getEncodedName());
  fs.mkdirs(p);
  Path riPath = new Path(p, HRegionFileSystem.REGION_INFO_FILE);
  FSDataOutputStream out = fs.create(riPath);
  out.write(hri.toDelimitedByteArray());
  out.close();

  // add to meta.
  MetaEditor.addRegionToMeta(meta, hri);
  meta.close();
  return hri;
}
项目:c5    文件:CatalogJanitor.java   
/**
 * If merged region no longer holds reference to the merge regions, archive
 * merge region on hdfs and perform deleting references in hbase:meta
 * @param mergedRegion
 * @param regionA
 * @param regionB
 * @return true if we delete references in merged region on hbase:meta and archive
 *         the files on the file system
 * @throws IOException
 */
boolean cleanMergeRegion(final HRegionInfo mergedRegion,
    final HRegionInfo regionA, final HRegionInfo regionB) throws IOException {
  FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
  Path rootdir = this.services.getMasterFileSystem().getRootDir();
  Path tabledir = FSUtils.getTableDir(rootdir, mergedRegion.getTable());
  HTableDescriptor htd = getTableDescriptor(mergedRegion.getTable());
  HRegionFileSystem regionFs = null;
  try {
    regionFs = HRegionFileSystem.openRegionFromFileSystem(
        this.services.getConfiguration(), fs, tabledir, mergedRegion, true);
  } catch (IOException e) {
    LOG.warn("Merged region does not exist: " + mergedRegion.getEncodedName());
  }
  if (regionFs == null || !regionFs.hasReferences(htd)) {
    LOG.debug("Deleting region " + regionA.getRegionNameAsString() + " and "
        + regionB.getRegionNameAsString()
        + " from fs because merged region no longer holds references");
    HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionA);
    HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionB);
    MetaEditor.deleteMergeQualifiers(server.getCatalogTracker(), mergedRegion);
    return true;
  }
  return false;
}
项目:c5    文件:TakeSnapshotHandler.java   
/**
 * Take a snapshot of the specified disabled region
 */
protected void snapshotDisabledRegion(final HRegionInfo regionInfo)
    throws IOException {
  // 2 copy the regionInfo files to the snapshot
  HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs,
    workingDir, regionInfo);

  // check for error for each region
  monitor.rethrowException();

  // 2 for each region, copy over its recovered.edits directory
  Path regionDir = HRegion.getRegionDir(rootDir, regionInfo);
  Path snapshotRegionDir = regionFs.getRegionDir();
  new CopyRecoveredEditsTask(snapshot, monitor, fs, regionDir, snapshotRegionDir).call();
  monitor.rethrowException();
  status.setStatus("Completed copying recovered edits for offline snapshot of table: "
      + snapshotTable);

  // 2 reference all the files in the region
  new ReferenceRegionHFilesTask(snapshot, monitor, regionDir, fs, snapshotRegionDir).call();
  monitor.rethrowException();
  status.setStatus("Completed referencing HFiles for offline snapshot of table: " +
      snapshotTable);
}
项目:c5    文件:TestRestoreSnapshotHelper.java   
private void createSnapshot(final Path rootDir, final Path snapshotDir, final HTableDescriptor htd)
    throws IOException {
  // First region, simple with one plain hfile.
  HRegionInfo hri = new HRegionInfo(htd.getTableName());
  HRegionFileSystem r0fs = HRegionFileSystem.createRegionOnFileSystem(conf,
    fs, FSUtils.getTableDir(archiveDir, hri.getTable()), hri);
  Path storeFile = new Path(rootDir, TEST_HFILE);
  fs.createNewFile(storeFile);
  r0fs.commitStoreFile(TEST_FAMILY, storeFile);

  // Second region, used to test the split case.
  // This region contains a reference to the hfile in the first region.
  hri = new HRegionInfo(htd.getTableName());
  HRegionFileSystem r1fs = HRegionFileSystem.createRegionOnFileSystem(conf,
    fs, FSUtils.getTableDir(archiveDir, hri.getTable()), hri);
  storeFile = new Path(rootDir, TEST_HFILE + '.' + r0fs.getRegionInfo().getEncodedName());
  fs.createNewFile(storeFile);
  r1fs.commitStoreFile(TEST_FAMILY, storeFile);

  Path tableDir = FSUtils.getTableDir(archiveDir, htd.getTableName());
  FileUtil.copy(fs, tableDir, fs, snapshotDir, false, conf);
}
项目:c5    文件:OfflineMetaRebuildTestCore.java   
protected HRegionInfo createRegion(Configuration conf, final HTable htbl,
    byte[] startKey, byte[] endKey) throws IOException {
  HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
  HTableDescriptor htd = htbl.getTableDescriptor();
  HRegionInfo hri = new HRegionInfo(htbl.getName(), startKey, endKey);

  LOG.info("manually adding regioninfo and hdfs data: " + hri.toString());
  Path rootDir = FSUtils.getRootDir(conf);
  FileSystem fs = rootDir.getFileSystem(conf);
  Path p = new Path(FSUtils.getTableDir(rootDir, htbl.getName()),
      hri.getEncodedName());
  fs.mkdirs(p);
  Path riPath = new Path(p, HRegionFileSystem.REGION_INFO_FILE);
  FSDataOutputStream out = fs.create(riPath);
  out.write(hri.toDelimitedByteArray());
  out.close();

  // add to meta.
  MetaEditor.addRegionToMeta(meta, hri);
  meta.close();
  return hri;
}
项目:ditb    文件:RestoreSnapshotHelper.java   
/**
 * @return the set of the regions contained in the table
 */
private List<HRegionInfo> getTableRegions() throws IOException {
  LOG.debug("get table regions: " + tableDir);
  FileStatus[] regionDirs = FSUtils.listStatus(fs, tableDir, new FSUtils.RegionDirFilter(fs));
  if (regionDirs == null) return null;

  List<HRegionInfo> regions = new ArrayList<HRegionInfo>(regionDirs.length);
  for (int i = 0; i < regionDirs.length; ++i) {
    HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDirs[i].getPath());
    regions.add(hri);
  }
  LOG.debug("found " + regions.size() + " regions for table=" +
      tableDesc.getTableName().getNameAsString());
  return regions;
}
项目:ditb    文件:HBaseFsck.java   
/**
 * Merge hdfs data by moving from contained HbckInfo into targetRegionDir.
 * @return number of file move fixes done to merge regions.
 */
public int mergeRegionDirs(Path targetRegionDir, HbckInfo contained) throws IOException {
  int fileMoves = 0;
  String thread = Thread.currentThread().getName();
  LOG.debug("[" + thread + "] Contained region dir after close and pause");
  debugLsr(contained.getHdfsRegionDir());

  // rename the contained into the container.
  FileSystem fs = targetRegionDir.getFileSystem(getConf());
  FileStatus[] dirs = null;
  try {
    dirs = fs.listStatus(contained.getHdfsRegionDir());
  } catch (FileNotFoundException fnfe) {
    // region we are attempting to merge in is not present!  Since this is a merge, there is
    // no harm skipping this region if it does not exist.
    if (!fs.exists(contained.getHdfsRegionDir())) {
      LOG.warn("[" + thread + "] HDFS region dir " + contained.getHdfsRegionDir()
          + " is missing. Assuming already sidelined or moved.");
    } else {
      sidelineRegionDir(fs, contained);
    }
    return fileMoves;
  }

  if (dirs == null) {
    if (!fs.exists(contained.getHdfsRegionDir())) {
      LOG.warn("[" + thread + "] HDFS region dir " + contained.getHdfsRegionDir()
          + " already sidelined.");
    } else {
      sidelineRegionDir(fs, contained);
    }
    return fileMoves;
  }

  for (FileStatus cf : dirs) {
    Path src = cf.getPath();
    Path dst =  new Path(targetRegionDir, src.getName());

    if (src.getName().equals(HRegionFileSystem.REGION_INFO_FILE)) {
      // do not copy the old .regioninfo file.
      continue;
    }

    if (src.getName().equals(HConstants.HREGION_OLDLOGDIR_NAME)) {
      // do not copy the .oldlogs files
      continue;
    }

    LOG.info("[" + thread + "] Moving files from " + src + " into containing region " + dst);
    // FileSystem.rename is inconsistent with directories -- if the
    // dst (foo/a) exists and is a dir, and the src (foo/b) is a dir,
    // it moves the src into the dst dir resulting in (foo/a/b).  If
    // the dst does not exist, and the src a dir, src becomes dst. (foo/b)
    for (FileStatus hfile : fs.listStatus(src)) {
      boolean success = fs.rename(hfile.getPath(), dst);
      if (success) {
        fileMoves++;
      }
    }
    LOG.debug("[" + thread + "] Sideline directory contents:");
    debugLsr(targetRegionDir);
  }

  // if all success.
  sidelineRegionDir(fs, contained);
  LOG.info("[" + thread + "] Sidelined region dir "+ contained.getHdfsRegionDir() + " into " +
      getSidelineDir());
  debugLsr(contained.getHdfsRegionDir());

  return fileMoves;
}
项目:ditb    文件:CompactionTool.java   
private void compactRegion(final Path tableDir, final HTableDescriptor htd,
    final Path regionDir, final boolean compactOnce, final boolean major)
    throws IOException {
  HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
  for (Path familyDir: FSUtils.getFamilyDirs(fs, regionDir)) {
    compactStoreFiles(tableDir, htd, hri, familyDir.getName(), compactOnce, major);
  }
}
项目:ditb    文件:CompactionTool.java   
/**
 * Create a "mock" HStore that uses the tmpDir specified by the user and
 * the store dir to compact as source.
 */
private static HStore getStore(final Configuration conf, final FileSystem fs,
    final Path tableDir, final HTableDescriptor htd, final HRegionInfo hri,
    final String familyName, final Path tempDir) throws IOException {
  HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, hri) {
    @Override
    public Path getTempDir() {
      return tempDir;
    }
  };
  HRegion region = new HRegion(regionFs, null, conf, htd, null);
  return new HStore(region, htd.getFamily(Bytes.toBytes(familyName)), conf);
}
项目:ditb    文件:TestUpgradeTo96.java   
/**
 * Creates a corrupt file, and run HFileV1 detector tool
 * @throws Exception
 */
@Test
public void testHFileV1DetectorWithCorruptFiles() throws Exception {
  // add a corrupt file.
  Path tablePath = new Path(hbaseRootDir, "foo");
  FileStatus[] regionsDir = fs.listStatus(tablePath);
  if (regionsDir == null) throw new IOException("No Regions found for table " + "foo");
  Path columnFamilyDir = null;
  Path targetRegion = null;
  for (FileStatus s : regionsDir) {
    if (fs.exists(new Path(s.getPath(), HRegionFileSystem.REGION_INFO_FILE))) {
      targetRegion = s.getPath();
      break;
    }
  }
  FileStatus[] cfs = fs.listStatus(targetRegion);
  for (FileStatus f : cfs) {
    if (f.isDirectory()) {
      columnFamilyDir = f.getPath();
      break;
    }
  }
  LOG.debug("target columnFamilyDir: " + columnFamilyDir);
  // now insert a corrupt file in the columnfamily.
  Path corruptFile = new Path(columnFamilyDir, "corrupt_file");
  if (!fs.createNewFile(corruptFile)) throw new IOException("Couldn't create corrupt file: "
      + corruptFile);
  assertEquals(1, ToolRunner.run(TEST_UTIL.getConfiguration(), new HFileV1Detector(), null));
  // remove the corrupt file
  FileSystem.get(TEST_UTIL.getConfiguration()).delete(corruptFile, false);
}
项目:pbase    文件:CatalogJanitor.java   
/**
 * Checks if a daughter region -- either splitA or splitB -- still holds
 * references to parent.
 * @param parent Parent region
 * @param daughter Daughter region
 * @return A pair where the first boolean says whether or not the daughter
 * region directory exists in the filesystem and then the second boolean says
 * whether the daughter has references to the parent.
 * @throws IOException
 */
Pair<Boolean, Boolean> checkDaughterInFs(final HRegionInfo parent, final HRegionInfo daughter)
throws IOException {
  if (daughter == null)  {
    return new Pair<Boolean, Boolean>(Boolean.FALSE, Boolean.FALSE);
  }

  FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
  Path rootdir = this.services.getMasterFileSystem().getRootDir();
  Path tabledir = FSUtils.getTableDir(rootdir, daughter.getTable());

  HRegionFileSystem regionFs = null;
  try {
    regionFs = HRegionFileSystem.openRegionFromFileSystem(
        this.services.getConfiguration(), fs, tabledir, daughter, true);
  } catch (IOException e) {
    LOG.warn("Daughter region does not exist: " + daughter.getEncodedName()
      + ", parent is: " + parent.getEncodedName());
    return new Pair<Boolean, Boolean>(Boolean.FALSE, Boolean.FALSE);
  }

  boolean references = false;
  HTableDescriptor parentDescriptor = getTableDescriptor(parent.getTable());
  for (HColumnDescriptor family: parentDescriptor.getFamilies()) {
    if ((references = regionFs.hasReferences(family.getNameAsString()))) {
      break;
    }
  }
  return new Pair<Boolean, Boolean>(Boolean.TRUE, Boolean.valueOf(references));
}
项目:pbase    文件:RestoreSnapshotHelper.java   
/**
 * @return the set of the regions contained in the table
 */
private List<HRegionInfo> getTableRegions() throws IOException {
  LOG.debug("get table regions: " + tableDir);
  FileStatus[] regionDirs = FSUtils.listStatus(fs, tableDir, new FSUtils.RegionDirFilter(fs));
  if (regionDirs == null) return null;

  List<HRegionInfo> regions = new LinkedList<HRegionInfo>();
  for (FileStatus regionDir: regionDirs) {
    HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir.getPath());
    regions.add(hri);
  }
  LOG.debug("found " + regions.size() + " regions for table=" +
      tableDesc.getTableName().getNameAsString());
  return regions;
}
项目:pbase    文件:CompactionTool.java   
private void compactRegion(final Path tableDir, final HTableDescriptor htd,
    final Path regionDir, final boolean compactOnce, final boolean major)
    throws IOException {
  HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
  for (Path familyDir: FSUtils.getFamilyDirs(fs, regionDir)) {
    compactStoreFiles(tableDir, htd, hri, familyDir.getName(), compactOnce, major);
  }
}
项目:pbase    文件:CompactionTool.java   
/**
 * Create a "mock" HStore that uses the tmpDir specified by the user and
 * the store dir to compact as source.
 */
private static HStore getStore(final Configuration conf, final FileSystem fs,
    final Path tableDir, final HTableDescriptor htd, final HRegionInfo hri,
    final String familyName, final Path tempDir) throws IOException {
  HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, hri) {
    @Override
    public Path getTempDir() {
      return tempDir;
    }
  };
  HRegion region = new HRegion(regionFs, null, conf, htd, null);
  return new HStore(region, htd.getFamily(Bytes.toBytes(familyName)), conf);
}
项目:pbase    文件:TestUpgradeTo96.java   
/**
 * Creates a corrupt file, and run HFileV1 detector tool
 * @throws Exception
 */
@Test
public void testHFileV1DetectorWithCorruptFiles() throws Exception {
  // add a corrupt file.
  Path tablePath = new Path(hbaseRootDir, "foo");
  FileStatus[] regionsDir = fs.listStatus(tablePath);
  if (regionsDir == null) throw new IOException("No Regions found for table " + "foo");
  Path columnFamilyDir = null;
  Path targetRegion = null;
  for (FileStatus s : regionsDir) {
    if (fs.exists(new Path(s.getPath(), HRegionFileSystem.REGION_INFO_FILE))) {
      targetRegion = s.getPath();
      break;
    }
  }
  FileStatus[] cfs = fs.listStatus(targetRegion);
  for (FileStatus f : cfs) {
    if (f.isDirectory()) {
      columnFamilyDir = f.getPath();
      break;
    }
  }
  LOG.debug("target columnFamilyDir: " + columnFamilyDir);
  // now insert a corrupt file in the columnfamily.
  Path corruptFile = new Path(columnFamilyDir, "corrupt_file");
  if (!fs.createNewFile(corruptFile)) throw new IOException("Couldn't create corrupt file: "
      + corruptFile);
  assertEquals(1, ToolRunner.run(TEST_UTIL.getConfiguration(), new HFileV1Detector(), null));
  // remove the corrupt file
  FileSystem.get(TEST_UTIL.getConfiguration()).delete(corruptFile, false);
}
项目:pbase    文件:TestHFileArchiving.java   
private List<String> getRegionStoreFiles(final HRegion region) throws IOException {
  Path regionDir = region.getRegionFileSystem().getRegionDir();
  FileSystem fs = region.getRegionFileSystem().getFileSystem();
  List<String> storeFiles = getAllFileNames(fs, regionDir);
  // remove all the non-storefile named files for the region
  for (int i = 0; i < storeFiles.size(); i++) {
    String file = storeFiles.get(i);
    if (file.contains(HRegionFileSystem.REGION_INFO_FILE) || file.contains("wal")) {
      storeFiles.remove(i--);
    }
  }
  storeFiles.remove(HRegionFileSystem.REGION_INFO_FILE);
  return storeFiles;
}
项目:pbase    文件:SnapshotTestingUtils.java   
private RegionData[] createTable(final HTableDescriptor htd, final int nregions)
    throws IOException {
  Path tableDir = FSUtils.getTableDir(rootDir, htd.getTableName());
  new FSTableDescriptors(conf).createTableDescriptorForTableDirectory(tableDir, htd, false);

  assertTrue(nregions % 2 == 0);
  RegionData[] regions = new RegionData[nregions];
  for (int i = 0; i < regions.length; i += 2) {
    byte[] startKey = Bytes.toBytes(0 + i * 2);
    byte[] endKey = Bytes.toBytes(1 + i * 2);

    // First region, simple with one plain hfile.
    HRegionInfo hri = new HRegionInfo(htd.getTableName(), startKey, endKey);
    HRegionFileSystem rfs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, hri);
    regions[i] = new RegionData(tableDir, hri, 3);
    for (int j = 0; j < regions[i].files.length; ++j) {
      Path storeFile = createStoreFile(rfs.createTempName());
      regions[i].files[j] = rfs.commitStoreFile(TEST_FAMILY, storeFile);
    }

    // Second region, used to test the split case.
    // This region contains a reference to the hfile in the first region.
    startKey = Bytes.toBytes(2 + i * 2);
    endKey = Bytes.toBytes(3 + i * 2);
    hri = new HRegionInfo(htd.getTableName());
    rfs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, hri);
    regions[i+1] = new RegionData(tableDir, hri, regions[i].files.length);
    for (int j = 0; j < regions[i].files.length; ++j) {
      String refName = regions[i].files[j].getName() + '.' + regions[i].hri.getEncodedName();
      Path refFile = createStoreFile(new Path(rootDir, refName));
      regions[i+1].files[j] = rfs.commitStoreFile(TEST_FAMILY, refFile);
    }
  }
  return regions;
}