Java 类org.apache.hadoop.hbase.util.FSUtils 实例源码

项目:ditb    文件:TestNamespace.java   
@Ignore @Test
public void testNamespaceJanitor() throws Exception {
  FileSystem fs = TEST_UTIL.getTestFileSystem();

  int fsCount = fs.listStatus(new Path(FSUtils.getRootDir(TEST_UTIL.getConfiguration()),
      HConstants.BASE_NAMESPACE_DIR)).length;
  Path fakeNSPath =
      FSUtils.getNamespaceDir(FSUtils.getRootDir(TEST_UTIL.getConfiguration()), "foo");
  assertTrue(fs.mkdirs(fakeNSPath));

  String fakeZnode = ZKUtil.joinZNode(ZooKeeperWatcher.namespaceZNode, "foo");
  int zkCount = ZKUtil.listChildrenNoWatch(TEST_UTIL.getZooKeeperWatcher(),
      ZooKeeperWatcher.namespaceZNode).size();
  ZKUtil.createWithParents(TEST_UTIL.getZooKeeperWatcher(), fakeZnode);
  Thread.sleep(10000);

  //verify namespace count is the same and orphan is removed
  assertFalse(fs.exists(fakeNSPath));
  assertEquals(fsCount, fs.listStatus(new Path(FSUtils.getRootDir(TEST_UTIL.getConfiguration()),
          HConstants.BASE_NAMESPACE_DIR)).length);

  assertEquals(-1, ZKUtil.checkExists(TEST_UTIL.getZooKeeperWatcher(), fakeZnode));
  assertEquals(zkCount,
      ZKUtil.listChildrenNoWatch(TEST_UTIL.getZooKeeperWatcher(),
          ZooKeeperWatcher.namespaceZNode).size());
}
项目:ditb    文件:UpgradeTo96.java   
/**
 * Performs log splitting for all regionserver directories.
 * @throws Exception
 */
private void doOfflineLogSplitting() throws Exception {
  LOG.info("Starting Log splitting");
  final Path rootDir = FSUtils.getRootDir(getConf());
  final Path oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME);
  // since this is the singleton, we needn't close it.
  final WALFactory factory = WALFactory.getInstance(getConf());
  FileSystem fs = FSUtils.getCurrentFileSystem(getConf());
  Path logDir = new Path(rootDir, HConstants.HREGION_LOGDIR_NAME);
  FileStatus[] regionServerLogDirs = FSUtils.listStatus(fs, logDir);
  if (regionServerLogDirs == null || regionServerLogDirs.length == 0) {
    LOG.info("No log directories to split, returning");
    return;
  }
  try {
    for (FileStatus regionServerLogDir : regionServerLogDirs) {
      // split its log dir, if exists
      WALSplitter.split(rootDir, regionServerLogDir.getPath(), oldLogDir, fs, getConf(), factory);
    }
    LOG.info("Successfully completed Log splitting");
  } catch (Exception e) {
    LOG.error("Got exception while doing Log splitting ", e);
    throw e;
  }
}
项目:ditb    文件:RegionStates.java   
/**
 * This method does an RPC to hbase:meta. Do not call this method with a lock/synchronize held.
 * @param hris The hris to check if empty in hbase:meta and if so, clean them up.
 */
private void cleanIfNoMetaEntry(Set<HRegionInfo> hris) {
  if (hris.isEmpty()) return;
  for (HRegionInfo hri: hris) {
    try {
      // This is RPC to meta table. It is done while we have a synchronize on
      // regionstates. No progress will be made if meta is not available at this time.
      // This is a cleanup task. Not critical.
      if (MetaTableAccessor.getRegion(server.getConnection(), hri.getEncodedNameAsBytes()) ==
          null) {
        regionOffline(hri);
        FSUtils.deleteRegionDir(server.getConfiguration(), hri);
      }
    } catch (IOException e) {
      LOG.warn("Got exception while deleting " + hri + " directories from file system.", e);
    }
  }
}
项目:ditb    文件:CatalogJanitor.java   
/**
 * If merged region no longer holds reference to the merge regions, archive
 * merge region on hdfs and perform deleting references in hbase:meta
 * @param mergedRegion
 * @param regionA
 * @param regionB
 * @return true if we delete references in merged region on hbase:meta and archive
 *         the files on the file system
 * @throws IOException
 */
boolean cleanMergeRegion(final HRegionInfo mergedRegion,
    final HRegionInfo regionA, final HRegionInfo regionB) throws IOException {
  FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
  Path rootdir = this.services.getMasterFileSystem().getRootDir();
  Path tabledir = FSUtils.getTableDir(rootdir, mergedRegion.getTable());
  HTableDescriptor htd = getTableDescriptor(mergedRegion.getTable());
  HRegionFileSystem regionFs = null;
  try {
    regionFs = HRegionFileSystem.openRegionFromFileSystem(
        this.services.getConfiguration(), fs, tabledir, mergedRegion, true);
  } catch (IOException e) {
    LOG.warn("Merged region does not exist: " + mergedRegion.getEncodedName());
  }
  if (regionFs == null || !regionFs.hasReferences(htd)) {
    LOG.debug("Deleting region " + regionA.getRegionNameAsString() + " and "
        + regionB.getRegionNameAsString()
        + " from fs because merged region no longer holds references");
    HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionA);
    HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionB);
    MetaTableAccessor.deleteMergeQualifiers(server.getConnection(),
      mergedRegion);
    return true;
  }
  return false;
}
项目:ditb    文件:MasterFileSystem.java   
public MasterFileSystem(Server master, MasterServices services)
throws IOException {
  this.conf = master.getConfiguration();
  this.master = master;
  this.services = services;
  // Set filesystem to be that of this.rootdir else we get complaints about
  // mismatched filesystems if hbase.rootdir is hdfs and fs.defaultFS is
  // default localfs.  Presumption is that rootdir is fully-qualified before
  // we get to here with appropriate fs scheme.
  this.rootdir = FSUtils.getRootDir(conf);
  this.tempdir = new Path(this.rootdir, HConstants.HBASE_TEMP_DIRECTORY);
  // Cover both bases, the old way of setting default fs and the new.
  // We're supposed to run on 0.20 and 0.21 anyways.
  this.fs = this.rootdir.getFileSystem(conf);
  FSUtils.setFsDefault(conf, new Path(this.fs.getUri()));
  // make sure the fs has the same conf
  fs.setConf(conf);
  // setup the filesystem variable
  // set up the archived logs path
  this.oldLogDir = createInitialFileSystemLayout();
  HFileSystem.addLocationsOrderInterceptor(conf);
  this.splitLogManager =
      new SplitLogManager(master, master.getConfiguration(), master, services,
          master.getServerName());
  this.distributedLogReplay = this.splitLogManager.isLogReplaying();
}
项目:ditb    文件:OfflineMetaRebuildTestCore.java   
protected HRegionInfo createRegion(Configuration conf, final Table htbl,
    byte[] startKey, byte[] endKey) throws IOException {
  Table meta = new HTable(conf, TableName.META_TABLE_NAME);
  HTableDescriptor htd = htbl.getTableDescriptor();
  HRegionInfo hri = new HRegionInfo(htbl.getName(), startKey, endKey);

  LOG.info("manually adding regioninfo and hdfs data: " + hri.toString());
  Path rootDir = FSUtils.getRootDir(conf);
  FileSystem fs = rootDir.getFileSystem(conf);
  Path p = new Path(FSUtils.getTableDir(rootDir, htbl.getName()),
      hri.getEncodedName());
  fs.mkdirs(p);
  Path riPath = new Path(p, HRegionFileSystem.REGION_INFO_FILE);
  FSDataOutputStream out = fs.create(riPath);
  out.write(hri.toDelimitedByteArray());
  out.close();

  // add to meta.
  MetaTableAccessor.addRegionToMeta(meta, hri);
  meta.close();
  return hri;
}
项目:ditb    文件:TestUpgradeTo96.java   
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  // Start up the mini cluster on top of an 0.94 root.dir that has data from
  // a 0.94 hbase run and see if we can migrate to 0.96
  TEST_UTIL.startMiniZKCluster();
  TEST_UTIL.startMiniDFSCluster(1);

  hbaseRootDir = TEST_UTIL.getDefaultRootDirPath();
  fs = FileSystem.get(TEST_UTIL.getConfiguration());
  FSUtils.setRootDir(TEST_UTIL.getConfiguration(), hbaseRootDir);
  zkw = TEST_UTIL.getZooKeeperWatcher();

  Path testdir = TEST_UTIL.getDataTestDir("TestUpgradeTo96");
  // get the untar 0.94 file structure

  set94FSLayout(testdir);
  setUp94Znodes();
}
项目:ditb    文件:MasterFileSystem.java   
public void deleteFamilyFromFS(HRegionInfo region, byte[] familyName)
    throws IOException {
  // archive family store files
  Path tableDir = FSUtils.getTableDir(rootdir, region.getTable());
  HFileArchiver.archiveFamily(fs, conf, region, tableDir, familyName);

  // delete the family folder
  Path familyDir = new Path(tableDir,
    new Path(region.getEncodedName(), Bytes.toString(familyName)));
  if (fs.delete(familyDir, true) == false) {
    if (fs.exists(familyDir)) {
      throw new IOException("Could not delete family "
          + Bytes.toString(familyName) + " from FileSystem for region "
          + region.getRegionNameAsString() + "(" + region.getEncodedName()
          + ")");
    }
  }
}
项目:ditb    文件:SplitLogManager.java   
/**
 * Get a list of paths that need to be split given a set of server-specific directories and
 * optionally  a filter.
 *
 * See {@link DefaultWALProvider#getServerNameFromWALDirectoryName} for more info on directory
 * layout.
 *
 * Should be package-private, but is needed by
 * {@link org.apache.hadoop.hbase.wal.WALSplitter#split(Path, Path, Path, FileSystem,
 *     Configuration, WALFactory)} for tests.
 */
@VisibleForTesting
public static FileStatus[] getFileList(final Configuration conf, final List<Path> logDirs,
    final PathFilter filter)
    throws IOException {
  List<FileStatus> fileStatus = new ArrayList<FileStatus>();
  for (Path logDir : logDirs) {
    final FileSystem fs = logDir.getFileSystem(conf);
    if (!fs.exists(logDir)) {
      LOG.warn(logDir + " doesn't exist. Nothing to do!");
      continue;
    }
    FileStatus[] logfiles = FSUtils.listStatus(fs, logDir, filter);
    if (logfiles == null || logfiles.length == 0) {
      LOG.info(logDir + " is empty dir, no logs to split");
    } else {
      Collections.addAll(fileStatus, logfiles);
    }
  }
  FileStatus[] a = new FileStatus[fileStatus.size()];
  return fileStatus.toArray(a);
}
项目:ditb    文件:IOTestProvider.java   
/**
 * @param factory factory that made us, identity used for FS layout. may not be null
 * @param conf may not be null
 * @param listeners may be null
 * @param providerId differentiate between providers from one facotry, used for FS layout. may be
 *                   null
 */
@Override
public void init(final WALFactory factory, final Configuration conf,
    final List<WALActionsListener> listeners, String providerId) throws IOException {
  if (null != log) {
    throw new IllegalStateException("WALProvider.init should only be called once.");
  }
  if (null == providerId) {
    providerId = DEFAULT_PROVIDER_ID;
  }
  final String logPrefix = factory.factoryId + WAL_FILE_NAME_DELIMITER + providerId;
  log = new IOTestWAL(FileSystem.get(conf), FSUtils.getRootDir(conf),
      DefaultWALProvider.getWALDirectoryName(factory.factoryId),
      HConstants.HREGION_OLDLOGDIR_NAME, conf, listeners,
      true, logPrefix, META_WAL_PROVIDER_ID.equals(providerId) ? META_WAL_PROVIDER_ID : null);
}
项目:ditb    文件:SnapshotLogCleaner.java   
/**
 * This method should only be called <b>once</b>, as it starts a thread to keep the cache
 * up-to-date.
 * <p>
 * {@inheritDoc}
 */
@Override
public void setConf(Configuration conf) {
  super.setConf(conf);
  try {
    long cacheRefreshPeriod = conf.getLong(
      WAL_CACHE_REFRESH_PERIOD_CONF_KEY, DEFAULT_WAL_CACHE_REFRESH_PERIOD);
    final FileSystem fs = FSUtils.getCurrentFileSystem(conf);
    Path rootDir = FSUtils.getRootDir(conf);
    cache = new SnapshotFileCache(fs, rootDir, cacheRefreshPeriod, cacheRefreshPeriod,
        "snapshot-log-cleaner-cache-refresher", new SnapshotFileCache.SnapshotFileInspector() {
          public Collection<String> filesUnderSnapshot(final Path snapshotDir)
              throws IOException {
            return SnapshotReferenceUtil.getWALNames(fs, snapshotDir);
          }
        });
  } catch (IOException e) {
    LOG.error("Failed to create snapshot log cleaner", e);
  }
}
项目:ditb    文件:TestCatalogJanitor.java   
/**
 * @param services Master services instance.
 * @param htd
 * @param parent
 * @param daughter
 * @param midkey
 * @param top True if we are to write a 'top' reference.
 * @return Path to reference we created.
 * @throws IOException
 */
private Path createReferences(final MasterServices services,
    final HTableDescriptor htd, final HRegionInfo parent,
    final HRegionInfo daughter, final byte [] midkey, final boolean top)
throws IOException {
  Path rootdir = services.getMasterFileSystem().getRootDir();
  Path tabledir = FSUtils.getTableDir(rootdir, parent.getTable());
  Path storedir = HStore.getStoreHomedir(tabledir, daughter,
    htd.getColumnFamilies()[0].getName());
  Reference ref =
    top? Reference.createTopReference(midkey): Reference.createBottomReference(midkey);
  long now = System.currentTimeMillis();
  // Reference name has this format: StoreFile#REF_NAME_PARSER
  Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName());
  FileSystem fs = services.getMasterFileSystem().getFileSystem();
  ref.write(fs, p);
  return p;
}
项目:ditb    文件:HFileLink.java   
/**
 * Get the full path of the HFile referenced by the back reference
 *
 * @param rootDir root hbase directory
 * @param linkRefPath Link Back Reference path
 * @return full path of the referenced hfile
 */
public static Path getHFileFromBackReference(final Path rootDir, final Path linkRefPath) {
  Pair<TableName, String> p = parseBackReferenceName(linkRefPath.getName());
  TableName linkTableName = p.getFirst();
  String linkRegionName = p.getSecond();

  String hfileName = getBackReferenceFileName(linkRefPath.getParent());
  Path familyPath = linkRefPath.getParent().getParent();
  Path regionPath = familyPath.getParent();
  Path tablePath = regionPath.getParent();

  String linkName = createHFileLinkName(FSUtils.getTableName(tablePath),
          regionPath.getName(), hfileName);
  Path linkTableDir = FSUtils.getTableDir(rootDir, linkTableName);
  Path regionDir = HRegion.getRegionDir(linkTableDir, linkRegionName);
  return new Path(new Path(regionDir, familyPath.getName()), linkName);
}
项目:ditb    文件:TableSnapshotInputFormatImpl.java   
public void initialize(InputSplit split, Configuration conf) throws IOException {
  this.scan = TableMapReduceUtil.convertStringToScan(split.getScan());
  this.split = split;
  HTableDescriptor htd = split.htd;
  HRegionInfo hri = this.split.getRegionInfo();
  FileSystem fs = FSUtils.getCurrentFileSystem(conf);


  // region is immutable, this should be fine,
  // otherwise we have to set the thread read point
  scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
  // disable caching of data blocks
  scan.setCacheBlocks(false);

  scanner =
      new ClientSideRegionScanner(conf, fs, new Path(split.restoreDir), htd, hri, scan, null);
}
项目:ditb    文件:TableSnapshotInputFormatImpl.java   
public static List<InputSplit> getSplits(Configuration conf) throws IOException {
  String snapshotName = getSnapshotName(conf);

  Path rootDir = FSUtils.getRootDir(conf);
  FileSystem fs = rootDir.getFileSystem(conf);

  SnapshotManifest manifest = getSnapshotManifest(conf, snapshotName, rootDir, fs);

  List<HRegionInfo> regionInfos = getRegionInfosFromManifest(manifest);

  // TODO: mapred does not support scan as input API. Work around for now.
  Scan scan = extractScanFromConf(conf);
  // the temp dir where the snapshot is restored
  Path restoreDir = new Path(conf.get(RESTORE_DIR_KEY));

  return getSplits(scan, manifest, regionInfos, restoreDir, conf);
}
项目:ditb    文件:LongTermArchivingHFileCleaner.java   
@Override
public boolean isFileDeletable(FileStatus fStat) {
  try {
    // if its a directory, then it can be deleted
    if (fStat.isDirectory()) return true;

    Path file = fStat.getPath();
    // check to see if
    FileStatus[] deleteStatus = FSUtils.listStatus(this.fs, file, null);
    // if the file doesn't exist, then it can be deleted (but should never
    // happen since deleted files shouldn't get passed in)
    if (deleteStatus == null) return true;

    // otherwise, we need to check the file's table and see its being archived
    Path family = file.getParent();
    Path region = family.getParent();
    Path table = region.getParent();

    String tableName = table.getName();
    boolean ret = !archiveTracker.keepHFiles(tableName);
    LOG.debug("Archiver says to [" + (ret ? "delete" : "keep") + "] files for table:" + tableName);
    return ret;
  } catch (IOException e) {
    LOG.error("Failed to lookup status of:" + fStat.getPath() + ", keeping it just incase.", e);
    return false;
  }
}
项目:ditb    文件:HFileArchiver.java   
/**
 * Remove from the specified region the store files of the specified column family,
 * either by archiving them or outright deletion
 * @param fs the filesystem where the store files live
 * @param conf {@link Configuration} to examine to determine the archive directory
 * @param parent Parent region hosting the store files
 * @param tableDir {@link Path} to where the table is being stored (for building the archive path)
 * @param family the family hosting the store files
 * @throws IOException if the files could not be correctly disposed.
 */
public static void archiveFamily(FileSystem fs, Configuration conf,
    HRegionInfo parent, Path tableDir, byte[] family) throws IOException {
  Path familyDir = new Path(tableDir, new Path(parent.getEncodedName(), Bytes.toString(family)));
  FileStatus[] storeFiles = FSUtils.listStatus(fs, familyDir);
  if (storeFiles == null) {
    LOG.debug("No store files to dispose for region=" + parent.getRegionNameAsString() +
        ", family=" + Bytes.toString(family));
    return;
  }

  FileStatusConverter getAsFile = new FileStatusConverter(fs);
  Collection<File> toArchive = Lists.transform(Arrays.asList(storeFiles), getAsFile);
  Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, parent, tableDir, family);

  // do the actual archive
  if (!resolveAndArchive(fs, storeArchiveDir, toArchive)) {
    throw new IOException("Failed to archive/delete all the files for region:"
        + Bytes.toString(parent.getRegionName()) + ", family:" + Bytes.toString(family)
        + " into " + storeArchiveDir + ". Something is probably awry on the filesystem.");
  }
}
项目:ditb    文件:TestFSHLog.java   
/**
 * A loaded WAL coprocessor won't break existing WAL test cases.
 */
@Test
public void testWALCoprocessorLoaded() throws Exception {
  // test to see whether the coprocessor is loaded or not.
  FSHLog log = null;
  try {
    log = new FSHLog(fs, FSUtils.getRootDir(conf), dir.toString(),
        HConstants.HREGION_OLDLOGDIR_NAME, conf, null, true, null, null);
    WALCoprocessorHost host = log.getCoprocessorHost();
    Coprocessor c = host.findCoprocessor(SampleRegionWALObserver.class.getName());
    assertNotNull(c);
  } finally {
    if (log != null) {
      log.close();
    }
  }
}
项目:ditb    文件:TestZooKeeperTableArchiveClient.java   
/**
 * Get all the files (non-directory entries) in the file system under the passed directory
 * @param dir directory to investigate
 * @return all files under the directory
 */
private List<Path> getAllFiles(FileSystem fs, Path dir) throws IOException {
  FileStatus[] files = FSUtils.listStatus(fs, dir, null);
  if (files == null) {
    LOG.warn("No files under:" + dir);
    return null;
  }

  List<Path> allFiles = new ArrayList<Path>();
  for (FileStatus file : files) {
    if (file.isDirectory()) {
      List<Path> subFiles = getAllFiles(fs, file.getPath());
      if (subFiles != null) allFiles.addAll(subFiles);
      continue;
    }
    allFiles.add(file.getPath());
  }
  return allFiles;
}
项目:ditb    文件:TestClusterId.java   
@Test
public void testRewritingClusterIdToPB() throws Exception {
  TEST_UTIL.startMiniZKCluster();
  TEST_UTIL.startMiniDFSCluster(1);
  TEST_UTIL.createRootDir();
  TEST_UTIL.getConfiguration().setBoolean("hbase.replication", true);
  Path rootDir = FSUtils.getRootDir(TEST_UTIL.getConfiguration());
  FileSystem fs = rootDir.getFileSystem(TEST_UTIL.getConfiguration());
  Path filePath = new Path(rootDir, HConstants.CLUSTER_ID_FILE_NAME);
  FSDataOutputStream s = null;
  try {
    s = fs.create(filePath);
    s.writeUTF(UUID.randomUUID().toString());
  } finally {
    if (s != null) {
      s.close();
    }
  }
  TEST_UTIL.startMiniHBaseCluster(1, 1);
  HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
  assertEquals(1, master.getServerManager().getOnlineServersList().size());
}
项目:ditb    文件:CompactionTool.java   
/**
 * return the top hosts of the store files, used by the Split
 */
private static String[] getStoreDirHosts(final FileSystem fs, final Path path)
    throws IOException {
  FileStatus[] files = FSUtils.listStatus(fs, path);
  if (files == null) {
    return new String[] {};
  }

  HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution();
  for (FileStatus hfileStatus: files) {
    HDFSBlocksDistribution storeFileBlocksDistribution =
      FSUtils.computeHDFSBlocksDistribution(fs, hfileStatus, 0, hfileStatus.getLen());
    hdfsBlocksDistribution.add(storeFileBlocksDistribution);
  }

  List<String> hosts = hdfsBlocksDistribution.getTopHosts();
  return hosts.toArray(new String[hosts.size()]);
}
项目:ditb    文件:IntegrationTestZKAndFSPermissions.java   
private void testFSPerms() throws IOException {
  Path rootDir = FSUtils.getRootDir(conf);

  LOG.info("");
  LOG.info("***********************************************************************************");
  LOG.info("Checking FS permissions for root dir:" + rootDir);
  LOG.info("***********************************************************************************");
  LOG.info("");
  FileSystem fs = rootDir.getFileSystem(conf);

  short expectedPerms = Short.valueOf(fsPerms, 8);

  assertEquals(
    FsPermission.createImmutable(expectedPerms),
    fs.getFileStatus(rootDir).getPermission());

  LOG.info("Checking FS permissions: SUCCESS");
}
项目:ditb    文件:HRegionFileSystem.java   
/**
 * Clean up any split detritus that may have been left around from previous split attempts. Call
 * this method on initial region deploy.
 *
 * @throws IOException
 */
void cleanupAnySplitDetritus() throws IOException {
  Path splitdir = this.getSplitsDir();
  if (!fs.exists(splitdir)) return;
  // Look at the splitdir. It could have the encoded names of the daughter
  // regions we tried to make. See if the daughter regions actually got made
  // out under the tabledir. If here under splitdir still, then the split did
  // not complete. Try and do cleanup. This code WILL NOT catch the case
  // where we successfully created daughter a but regionserver crashed during
  // the creation of region b. In this case, there'll be an orphan daughter
  // dir in the filesystem. TOOD: Fix.
  FileStatus[] daughters = FSUtils.listStatus(fs, splitdir, new FSUtils.DirFilter(fs));
  if (daughters != null) {
    for (FileStatus daughter : daughters) {
      Path daughterDir = new Path(getTableDir(), daughter.getPath().getName());
      if (fs.exists(daughterDir) && !deleteDir(daughterDir)) {
        throw new IOException("Failed delete of " + daughterDir);
      }
    }
  }
  cleanupSplitsDir();
  LOG.info("Cleaned up old failed split transaction detritus: " + splitdir);
}
项目:ditb    文件:HRegionFileSystem.java   
/**
 * Remove the region from the table directory, archiving the region's hfiles.
 *
 * @param conf       the {@link Configuration} to use
 * @param fs         {@link FileSystem} from which to remove the region
 * @param tableDir   {@link Path} to where the table is being stored
 * @param regionInfo {@link HRegionInfo} for region to be deleted
 * @throws IOException if the request cannot be completed
 */
public static void deleteRegionFromFileSystem(final Configuration conf, final FileSystem fs,
    final Path tableDir, final HRegionInfo regionInfo) throws IOException {
  HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo);
  Path regionDir = regionFs.getRegionDir();

  if (!fs.exists(regionDir)) {
    LOG.warn("Trying to delete a region that do not exists on disk: " + regionDir);
    return;
  }

  if (LOG.isDebugEnabled()) {
    LOG.debug("DELETING region " + regionDir);
  }

  // Archive region
  Path rootDir = FSUtils.getRootDir(conf);
  HFileArchiver.archiveRegion(fs, rootDir, tableDir, regionDir);

  // Delete empty region dir
  if (!fs.delete(regionDir, true)) {
    LOG.warn("Failed delete of " + regionDir);
  }
}
项目:ditb    文件:ReaderBase.java   
@Override
public void init(FileSystem fs, Path path, Configuration conf, FSDataInputStream stream)
    throws IOException {
  this.conf = conf;
  this.path = path;
  this.fs = fs;
  this.fileLength = this.fs.getFileStatus(path).getLen();
  String cellCodecClsName = initReader(stream);

  boolean compression = hasCompression();
  if (compression) {
    // If compression is enabled, new dictionaries are created here.
    try {
      if (compressionContext == null) {
        compressionContext = new CompressionContext(LRUDictionary.class,
            FSUtils.isRecoveredEdits(path), hasTagCompression());
      } else {
        compressionContext.clear();
      }
    } catch (Exception e) {
      throw new IOException("Failed to initialize CompressionContext", e);
    }
  }
  initAfterCompression(cellCodecClsName);
}
项目:ditb    文件:TestGenerateDelegationToken.java   
@BeforeClass
public static void setUp() throws Exception {
  Properties conf = MiniKdc.createConf();
  conf.put(MiniKdc.DEBUG, true);
  KDC = new MiniKdc(conf, new File(TEST_UTIL.getDataTestDir("kdc").toUri().getPath()));
  KDC.start();
  USERNAME = UserGroupInformation.getLoginUser().getShortUserName();
  PRINCIPAL = USERNAME + "/" + HOST;
  HTTP_PRINCIPAL = "HTTP/" + HOST;
  KDC.createPrincipal(KEYTAB_FILE, PRINCIPAL, HTTP_PRINCIPAL);
  TEST_UTIL.startMiniZKCluster();

  HBaseKerberosUtils.setKeytabFileForTesting(KEYTAB_FILE.getAbsolutePath());
  HBaseKerberosUtils.setPrincipalForTesting(PRINCIPAL + "@" + KDC.getRealm());
  HBaseKerberosUtils.setSecuredConfiguration(TEST_UTIL.getConfiguration());
  setHdfsSecuredConfiguration(TEST_UTIL.getConfiguration());
  UserGroupInformation.setConfiguration(TEST_UTIL.getConfiguration());
  TEST_UTIL.getConfiguration().setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
    TokenProvider.class.getName());
  TEST_UTIL.startMiniDFSCluster(1);
  Path rootdir = TEST_UTIL.getDataTestDirOnTestFS("TestGenerateDelegationToken");
  FSUtils.setRootDir(TEST_UTIL.getConfiguration(), rootdir);
  CLUSTER = new LocalHBaseCluster(TEST_UTIL.getConfiguration(), 1);
  CLUSTER.startup();
}
项目:ditb    文件:TestZooKeeperTableArchiveClient.java   
@After
public void tearDown() throws Exception {
  try {
    FileSystem fs = UTIL.getTestFileSystem();
    // cleanup each of the files/directories registered
    for (Path file : toCleanup) {
    // remove the table and archive directories
      FSUtils.delete(fs, file, true);
    }
  } catch (IOException e) {
    LOG.warn("Failure to delete archive directory", e);
  } finally {
    toCleanup.clear();
  }
  // make sure that backups are off for all tables
  archivingClient.disableHFileBackup();
}
项目:ditb    文件:HRegion.java   
public static void warmupHRegion(final HRegionInfo info, final HTableDescriptor htd,
    final WAL wal, final Configuration conf, final RegionServerServices rsServices,
    final CancelableProgressable reporter) throws IOException {

  if (info == null) throw new NullPointerException("Passed region info is null");

  if (LOG.isDebugEnabled()) {
    LOG.debug("HRegion.Warming up region: " + info);
  }

  Path rootDir = FSUtils.getRootDir(conf);
  Path tableDir = FSUtils.getTableDir(rootDir, info.getTable());

  FileSystem fs = null;
  if (rsServices != null) {
    fs = rsServices.getFileSystem();
  }
  if (fs == null) {
    fs = FileSystem.get(conf);
  }

  HRegion r = HRegion.newHRegion(tableDir, wal, fs, conf, info, htd, null);
  r.initializeWarmup(reporter);
}
项目:ditb    文件:TestNamespaceUpgrade.java   
@Test (timeout=300000)
public void testSnapshots() throws IOException, InterruptedException {
  String snapshots[][] = {snapshot1Keys, snapshot2Keys};
  for(int i = 1; i <= snapshots.length; i++) {
    for(TableName table: tables) {
      TEST_UTIL.getHBaseAdmin().cloneSnapshot(table+"_snapshot"+i, TableName.valueOf(table+"_clone"+i));
      FSUtils.logFileSystemState(FileSystem.get(TEST_UTIL.getConfiguration()),
          FSUtils.getRootDir(TEST_UTIL.getConfiguration()),
          LOG);
      int count = 0;
      for(Result res: new HTable(TEST_UTIL.getConfiguration(), table+"_clone"+i).getScanner(new
          Scan())) {
        assertEquals(snapshots[i-1][count++], Bytes.toString(res.getRow()));
      }
      Assert.assertEquals(table+"_snapshot"+i, snapshots[i-1].length, count);
    }
  }
}
项目:ditb    文件:StoreFileInfo.java   
/**
 * helper function to compute HDFS blocks distribution of a given reference file.For reference
 * file, we don't compute the exact value. We use some estimate instead given it might be good
 * enough. we assume bottom part takes the first half of reference file, top part takes the second
 * half of the reference file. This is just estimate, given midkey ofregion != midkey of HFile,
 * also the number and size of keys vary. If this estimate isn't good enough, we can improve it
 * later.
 *
 * @param fs        The FileSystem
 * @param reference The reference
 * @param status    The reference FileStatus
 * @return HDFS blocks distribution
 */
private static HDFSBlocksDistribution computeRefFileHDFSBlockDistribution(final FileSystem fs,
    final Reference reference, final FileStatus status) throws IOException {
  if (status == null) {
    return null;
  }

  long start = 0;
  long length = 0;

  if (Reference.isTopFileRegion(reference.getFileRegion())) {
    start = status.getLen() / 2;
    length = status.getLen() - status.getLen() / 2;
  } else {
    start = 0;
    length = status.getLen() / 2;
  }
  return FSUtils.computeHDFSBlocksDistribution(fs, status, start, length);
}
项目:ditb    文件:HBaseTestingUtility.java   
public MiniDFSCluster startMiniDFSCluster(int servers, final  String racks[], String hosts[])
    throws Exception {
  createDirsAndSetProperties();
  this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
      true, null, racks, hosts, null);

  // Set this just-started cluster as our filesystem.
  FileSystem fs = this.dfsCluster.getFileSystem();
  FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));

  // Wait for the cluster to be totally up
  this.dfsCluster.waitClusterUp();

  //reset the test directory for test file system
  dataTestDirOnTestFS = null;

  return this.dfsCluster;
}
项目:ditb    文件:HFileLink.java   
/**
 * Convert a HFileLink path to a table relative path.
 * e.g. the link: /hbase/test/0123/cf/testtb=4567-abcd
 *      becomes: /hbase/testtb/4567/cf/abcd
 *
 * @param path HFileLink path
 * @return Relative table path
 * @throws IOException on unexpected error.
 */
private static Path getHFileLinkPatternRelativePath(final Path path) {
  // table=region-hfile
  Matcher m = REF_OR_HFILE_LINK_PATTERN.matcher(path.getName());
  if (!m.matches()) {
    throw new IllegalArgumentException(path.getName() + " is not a valid HFileLink pattern!");
  }

  // Convert the HFileLink name into a real table/region/cf/hfile path.
  TableName tableName = TableName.valueOf(m.group(1), m.group(2));
  String regionName = m.group(3);
  String hfileName = m.group(4);
  String familyName = path.getParent().getName();
  Path tableDir = FSUtils.getTableDir(new Path("./"), tableName);
  return new Path(tableDir, new Path(regionName, new Path(familyName,
      hfileName)));
}
项目:ditb    文件:NamespaceUpgrade.java   
/**
 * Migrate all tables into respective namespaces, either default or system.  We put them into
 * a temporary location, '.data', in case a user table is name 'data'.  In a later method we will
 * move stuff from .data to data.
 * @throws IOException
 */
public void migrateTables() throws IOException {
  List<String> sysTables = Lists.newArrayList("-ROOT-",".META.", ".META");

  // Migrate tables including archive and tmp
  for (Path baseDir: baseDirs) {
    if (!fs.exists(baseDir)) continue;
    List<Path> oldTableDirs = FSUtils.getLocalTableDirs(fs, baseDir);
    for (Path oldTableDir: oldTableDirs) {
      if (NON_USER_TABLE_DIRS.contains(oldTableDir.getName())) continue;
      if (sysTables.contains(oldTableDir.getName())) continue;
      // Make the new directory under the ns to which we will move the table.
      Path nsDir = new Path(this.defNsDir,
        TableName.valueOf(oldTableDir.getName()).getQualifierAsString());
      LOG.info("Moving " + oldTableDir + " to " + nsDir);
      if (!fs.exists(nsDir.getParent())) {
        if (!fs.mkdirs(nsDir.getParent())) {
          throw new IOException("Failed to create namespace dir "+nsDir.getParent());
        }
      }
      if (sysTables.indexOf(oldTableDir.getName()) < 0) {
        LOG.info("Migrating table " + oldTableDir.getName() + " to " + nsDir);
        if (!fs.rename(oldTableDir, nsDir)) {
          throw new IOException("Failed to move "+oldTableDir+" to namespace dir "+nsDir);
        }
      }
    }
  }
}
项目:ditb    文件:NamespaceUpgrade.java   
/**
 * Removes .tableinfo files that are laid in pre-96 format (i.e., the tableinfo files are under
 * table directory).
 * @param tableName
 * @throws IOException
 */
private void removeTableInfoInPre96Format(TableName tableName) throws IOException {
  Path tableDir = FSUtils.getTableDir(rootDir, tableName);
  FileStatus[] status = FSUtils.listStatus(fs, tableDir, TABLEINFO_PATHFILTER);
  if (status == null) return;
  for (FileStatus fStatus : status) {
    FSUtils.delete(fs, fStatus.getPath(), false);
  }
}
项目:ditb    文件:NamespaceUpgrade.java   
static FileStatus getCurrentTableInfoStatus(FileSystem fs, Path dir)
throws IOException {
  FileStatus [] status = FSUtils.listStatus(fs, dir, TABLEINFO_PATHFILTER);
  if (status == null || status.length < 1) return null;
  FileStatus mostCurrent = null;
  for (FileStatus file : status) {
    if (mostCurrent == null || TABLEINFO_FILESTATUS_COMPARATOR.compare(file, mostCurrent) < 0) {
      mostCurrent = file;
    }
  }
  return mostCurrent;
}
项目:ditb    文件:NamespaceUpgrade.java   
public static boolean verifyNSUpgrade(FileSystem fs, Path rootDir)
    throws IOException {
  try {
    return FSUtils.getVersion(fs, rootDir).equals(HConstants.FILE_SYSTEM_VERSION);
  } catch (DeserializationException e) {
    throw new IOException("Failed to verify namespace upgrade", e);
  }
}
项目:ditb    文件:DisabledWALProvider.java   
@Override
public void init(final WALFactory factory, final Configuration conf,
    final List<WALActionsListener> listeners, String providerId) throws IOException {
  if (null != disabled) {
    throw new IllegalStateException("WALProvider.init should only be called once.");
  }
  if (null == providerId) {
    providerId = "defaultDisabled";
  }
  disabled = new DisabledWAL(new Path(FSUtils.getRootDir(conf), providerId), conf, null);
}
项目:ditb    文件:HBaseTestingUtility.java   
/**
 * Shuts down instance created by call to {@link #startMiniDFSCluster(int)}
 * or does nothing.
 * @throws IOException
 */
public void shutdownMiniDFSCluster() throws IOException {
  if (this.dfsCluster != null) {
    // The below throws an exception per dn, AsynchronousCloseException.
    this.dfsCluster.shutdown();
    dfsCluster = null;
    dataTestDirOnTestFS = null;
    FSUtils.setFsDefault(this.conf, new Path("file:///"));
  }
}
项目:ditb    文件:CleanerChore.java   
@Override
protected void chore() {
  try {
    FileStatus[] files = FSUtils.listStatus(this.fs, this.oldFileDir);
    checkAndDeleteEntries(files);
  } catch (IOException e) {
    e = RemoteExceptionHandler.checkIOException(e);
    LOG.warn("Error while cleaning the logs", e);
  }
}
项目:ditb    文件:HBaseTestingUtility.java   
private void setFs() throws IOException {
  if(this.dfsCluster == null){
    LOG.info("Skipping setting fs because dfsCluster is null");
    return;
  }
  FileSystem fs = this.dfsCluster.getFileSystem();
  FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
  if (this.conf.getBoolean(USE_LOCAL_FILESYSTEM, false)) {
    FSUtils.setFsDefault(this.conf, new Path("file:///"));
  }
}