Java 类org.apache.hadoop.fs.FileUtil 实例源码

项目:hadoop    文件:MiniDFSCluster.java   
public static void copyNameDirs(Collection<URI> srcDirs, Collection<URI> dstDirs,
    Configuration dstConf) throws IOException {
  URI srcDir = Lists.newArrayList(srcDirs).get(0);
  FileSystem dstFS = FileSystem.getLocal(dstConf).getRaw();
  for (URI dstDir : dstDirs) {
    Preconditions.checkArgument(!dstDir.equals(srcDir),
        "src and dst are the same: " + dstDir);
    File dstDirF = new File(dstDir);
    if (dstDirF.exists()) {
      if (!FileUtil.fullyDelete(dstDirF)) {
        throw new IOException("Unable to delete: " + dstDirF);
      }
    }
    LOG.info("Copying namedir from primary node dir "
        + srcDir + " to " + dstDir);
    FileUtil.copy(
        new File(srcDir),
        dstFS, new Path(dstDir), false, dstConf);
  }
}
项目:ditb    文件:TestTableMapReduceUtil.java   
@Test
@SuppressWarnings("deprecation")
public void shoudBeValidMapReduceWithPartitionerEvaluation()
    throws IOException {
  Configuration cfg = UTIL.getConfiguration();
  JobConf jobConf = new JobConf(cfg);
  try {
    jobConf.setJobName("process row task");
    jobConf.setNumReduceTasks(2);
    TableMapReduceUtil.initTableMapJob(TABLE_NAME, new String(COLUMN_FAMILY),
        ClassificatorMapper.class, ImmutableBytesWritable.class, Put.class,
        jobConf);

    TableMapReduceUtil.initTableReduceJob(TABLE_NAME,
        ClassificatorRowReduce.class, jobConf, HRegionPartitioner.class);
    RunningJob job = JobClient.runJob(jobConf);
    assertTrue(job.isSuccessful());
  } finally {
    if (jobConf != null)
      FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir")));
  }
}
项目:hadoop    文件:TestShell.java   
public void testShellCommandTimeout() throws Throwable {
  if(Shell.WINDOWS) {
    // setExecutable does not work on Windows
    return;
  }
  String rootDir = new File(System.getProperty(
      "test.build.data", "/tmp")).getAbsolutePath();
  File shellFile = new File(rootDir, "timeout.sh");
  String timeoutCommand = "sleep 4; echo \"hello\"";
  PrintWriter writer = new PrintWriter(new FileOutputStream(shellFile));
  writer.println(timeoutCommand);
  writer.close();
  FileUtil.setExecutable(shellFile, true);
  Shell.ShellCommandExecutor shexc 
  = new Shell.ShellCommandExecutor(new String[]{shellFile.getAbsolutePath()},
                                    null, null, 100);
  try {
    shexc.execute();
  } catch (Exception e) {
    //When timing out exception is thrown.
  }
  shellFile.delete();
  assertTrue("Script didnt not timeout" , shexc.isTimedOut());
}
项目:hadoop    文件:FSImageTestUtil.java   
/**
 * Return a standalone instance of FSEditLog that will log into the given
 * log directory. The returned instance is not yet opened.
 */
public static FSEditLog createStandaloneEditLog(File logDir)
    throws IOException {
  assertTrue(logDir.mkdirs() || logDir.exists());
  if (!FileUtil.fullyDeleteContents(logDir)) {
    throw new IOException("Unable to delete contents of " + logDir);
  }
  NNStorage storage = Mockito.mock(NNStorage.class);
  StorageDirectory sd 
    = FSImageTestUtil.mockStorageDirectory(logDir, NameNodeDirType.EDITS);
  List<StorageDirectory> sds = Lists.newArrayList(sd);
  Mockito.doReturn(sds).when(storage).dirIterable(NameNodeDirType.EDITS);
  Mockito.doReturn(sd).when(storage)
    .getStorageDirectory(Matchers.<URI>anyObject());

  FSEditLog editLog = new FSEditLog(new Configuration(), 
                       storage,
                       ImmutableList.of(logDir.toURI()));
  editLog.initJournalsForWrite();
  return editLog;
}
项目:hadoop    文件:TestBlockRecovery.java   
/**
 * Cleans the resources and closes the instance of datanode
 * @throws IOException if an error occurred
 */
@After
public void tearDown() throws IOException {
  if (dn != null) {
    try {
      dn.shutdown();
    } catch(Exception e) {
      LOG.error("Cannot close: ", e);
    } finally {
      File dir = new File(DATA_DIR);
      if (dir.exists())
        Assert.assertTrue(
            "Cannot delete data-node dirs", FileUtil.fullyDelete(dir));
    }
  }
}
项目:ditb    文件:HRegionFileSystem.java   
/**
 * Bulk load: Add a specified store file to the specified family. If the source file is on the
 * same different file-system is moved from the source location to the destination location,
 * otherwise is copied over.
 *
 * @param familyName Family that will gain the file
 * @param srcPath    {@link Path} to the file to import
 * @param seqNum     Bulk Load sequence number
 * @return The destination {@link Path} of the bulk loaded file
 * @throws IOException
 */
Path bulkLoadStoreFile(final String familyName, Path srcPath, long seqNum) throws IOException {
  // Copy the file if it's on another filesystem
  FileSystem srcFs = srcPath.getFileSystem(conf);
  FileSystem desFs = fs instanceof HFileSystem ? ((HFileSystem) fs).getBackingFs() : fs;

  // We can't compare FileSystem instances as equals() includes UGI instance
  // as part of the comparison and won't work when doing SecureBulkLoad
  // TODO deal with viewFS
  if (!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs)) {
    LOG.info("Bulk-load file " + srcPath + " is on different filesystem than "
        + "the destination store. Copying file over to destination filesystem.");
    Path tmpPath = createTempName();
    FileUtil.copy(srcFs, srcPath, fs, tmpPath, false, conf);
    LOG.info("Copied " + srcPath + " to temporary path on destination filesystem: " + tmpPath);
    srcPath = tmpPath;
  }

  return commitStoreFile(familyName, srcPath, seqNum, true);
}
项目:hadoop    文件:JobResourceUploader.java   
private Path copyRemoteFiles(Path parentDir, Path originalPath,
    Configuration conf, short replication) throws IOException {
  // check if we do not need to copy the files
  // is jt using the same file system.
  // just checking for uri strings... doing no dns lookups
  // to see if the filesystems are the same. This is not optimal.
  // but avoids name resolution.

  FileSystem remoteFs = null;
  remoteFs = originalPath.getFileSystem(conf);
  if (compareFs(remoteFs, jtFs)) {
    return originalPath;
  }
  // this might have name collisions. copy will throw an exception
  // parse the original path to create new path
  Path newPath = new Path(parentDir, originalPath.getName());
  FileUtil.copy(remoteFs, originalPath, jtFs, newPath, false, conf);
  jtFs.setReplication(newPath, replication);
  return newPath;
}
项目:hadoop    文件:LocalDistributedCacheManager.java   
/**
 * Utility method for creating a symlink and warning on errors.
 *
 * If link is null, does nothing.
 */
private void symlink(File workDir, String target, String link)
    throws IOException {
  if (link != null) {
    link = workDir.toString() + Path.SEPARATOR + link;
    File flink = new File(link);
    if (!flink.exists()) {
      LOG.info(String.format("Creating symlink: %s <- %s", target, link));
      if (0 != FileUtil.symLink(target, link)) {
        LOG.warn(String.format("Failed to create symlink: %s <- %s", target,
            link));
      } else {
        symlinksCreated.add(new File(link));
      }
    }
  }
}
项目:hadoop-oss    文件:TestSharedFileDescriptorFactory.java   
@Test(timeout=10000)
public void testReadAndWrite() throws Exception {
  File path = new File(TEST_BASE, "testReadAndWrite");
  path.mkdirs();
  SharedFileDescriptorFactory factory =
      SharedFileDescriptorFactory.create("woot_",
          new String[] { path.getAbsolutePath() });
  FileInputStream inStream =
      factory.createDescriptor("testReadAndWrite", 4096);
  FileOutputStream outStream = new FileOutputStream(inStream.getFD());
  outStream.write(101);
  inStream.getChannel().position(0);
  Assert.assertEquals(101, inStream.read());
  inStream.close();
  outStream.close();
  FileUtil.fullyDelete(path);
}
项目:hadoop-oss    文件:TestSharedFileDescriptorFactory.java   
@Test(timeout=60000)
public void testDirectoryFallbacks() throws Exception {
  File nonExistentPath = new File(TEST_BASE, "nonexistent");
  File permissionDeniedPath = new File("/");
  File goodPath = new File(TEST_BASE, "testDirectoryFallbacks");
  goodPath.mkdirs();
  try {
    SharedFileDescriptorFactory.create("shm_", 
        new String[] { nonExistentPath.getAbsolutePath(),
                        permissionDeniedPath.getAbsolutePath() });
    Assert.fail();
  } catch (IOException e) {
  }
  SharedFileDescriptorFactory factory =
      SharedFileDescriptorFactory.create("shm_", 
          new String[] { nonExistentPath.getAbsolutePath(),
                          permissionDeniedPath.getAbsolutePath(),
                          goodPath.getAbsolutePath() } );
  Assert.assertEquals(goodPath.getAbsolutePath(), factory.getPath());
  FileUtil.fullyDelete(goodPath);
}
项目:lustre-connector-for-hadoop    文件:LustreFsJavaImpl.java   
@Override
 public void chmod(String path, int mode) throws IOException {
File f = new File(path);
FsPermission perm = FsPermission.createImmutable((short)mode);
LinkedList<String> args = new LinkedList<String>();
args.add("/usr/bin/setfacl");
args.add("-m");
args.add(
    "u::" + perm.getUserAction().SYMBOL +
    ",g::" + perm.getGroupAction().SYMBOL +
    ",o::" + perm.getOtherAction().SYMBOL);
args.add(FileUtil.makeShellPath(f, true));  
   org.apache.hadoop.fs.util.Shell.runPrivileged(args.toArray(new String[0]));

   // Set default acls on directories so children can inherit.
   if(f.isDirectory()) {
    args.add(1, "-d");
    org.apache.hadoop.fs.util.Shell.runPrivileged(args.toArray(new String[0]));
   }
 }
项目:hadoop    文件:TestHDFSServerPorts.java   
/**
 * Start the BackupNode
 */
public BackupNode startBackupNode(Configuration conf) throws IOException {
  // Set up testing environment directories
  hdfsDir = new File(TEST_DATA_DIR, "backupNode");
  if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
    throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
  }
  File currDir = new File(hdfsDir, "name2");
  File currDir2 = new File(currDir, "current");
  File currDir3 = new File(currDir, "image");

  assertTrue(currDir.mkdirs());
  assertTrue(currDir2.mkdirs());
  assertTrue(currDir3.mkdirs());

  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      fileAsURI(new File(hdfsDir, "name2")).toString());
  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
      "${" + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + "}");

  // Start BackupNode
  String[] args = new String [] { StartupOption.BACKUP.getName() };
  BackupNode bu = (BackupNode)NameNode.createNameNode(args, conf);

  return bu;
}
项目:hadoop    文件:TestFileOutputCommitter.java   
private void testMapOnlyNoOutputInternal(int version) throws Exception {
  JobConf conf = new JobConf();
  //This is not set on purpose. FileOutputFormat.setOutputPath(conf, outDir);
  conf.set(JobContext.TASK_ATTEMPT_ID, attempt);
  conf.setInt(org.apache.hadoop.mapreduce.lib.output.
      FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION, version);
  JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
  TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
  FileOutputCommitter committer = new FileOutputCommitter();    

  // setup
  committer.setupJob(jContext);
  committer.setupTask(tContext);

  if(committer.needsTaskCommit(tContext)) {
    // do commit
    committer.commitTask(tContext);
  }
  committer.commitJob(jContext);

  // validate output
  FileUtil.fullyDelete(new File(outDir.toString()));
}
项目:hadoop    文件:Storage.java   
/**
 * @return true if the storage directory should prompt the user prior
 * to formatting (i.e if the directory appears to contain some data)
 * @throws IOException if the SD cannot be accessed due to an IO error
 */
@Override
public boolean hasSomeData() throws IOException {
  // Its alright for a dir not to exist, or to exist (properly accessible)
  // and be completely empty.
  if (!root.exists()) return false;

  if (!root.isDirectory()) {
    // a file where you expect a directory should not cause silent
    // formatting
    return true;
  }

  if (FileUtil.listFiles(root).length == 0) {
    // Empty dir can format without prompt.
    return false;
  }

  return true;
}
项目:monarch    文件:HDFSQuasiService.java   
private MiniDFSCluster createCluster() throws HDFSQuasiServiceException {
  MiniDFSCluster hdfsCluster = null;

  File baseDir = new File(getWorkingDir()).getAbsoluteFile();
  FileUtil.fullyDelete(baseDir);
  this.conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());

  LOG.info("Using base dir " + baseDir.getAbsolutePath());

  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(this.conf);
  builder.numDataNodes(getNumberOfDataNodes());
  try {
    hdfsCluster = builder.build();
  } catch (IOException e) {
    LOG.error("Error in creating mini DFS cluster ", e);
    throw new HDFSQuasiServiceException("Error in creating mini DFS cluster ", e);
  }
  ListIterator<DataNode> itr = hdfsCluster.getDataNodes().listIterator();
  LOG.info("NameNode: " + hdfsCluster.getNameNode().getNameNodeAddressHostPortString());
  while (itr.hasNext()) {
    DataNode dn = itr.next();
    LOG.info("DataNode: " + dn.getDisplayName());
  }
  return hdfsCluster;
}
项目:hadoop    文件:FSDownload.java   
private Path copy(Path sCopy, Path dstdir) throws IOException {
  FileSystem sourceFs = sCopy.getFileSystem(conf);
  Path dCopy = new Path(dstdir, "tmp_"+sCopy.getName());
  FileStatus sStat = sourceFs.getFileStatus(sCopy);
  if (sStat.getModificationTime() != resource.getTimestamp()) {
    throw new IOException("Resource " + sCopy +
        " changed on src filesystem (expected " + resource.getTimestamp() +
        ", was " + sStat.getModificationTime());
  }
  if (resource.getVisibility() == LocalResourceVisibility.PUBLIC) {
    if (!isPublic(sourceFs, sCopy, sStat, statCache)) {
      throw new IOException("Resource " + sCopy +
          " is not publicly accessable and as such cannot be part of the" +
          " public cache.");
    }
  }

  FileUtil.copy(sourceFs, sStat, FileSystem.getLocal(conf), dCopy, false,
      true, conf);
  return dCopy;
}
项目:hadoop    文件:TestAggregatedLogsBlock.java   
/**
 * try to read bad logs
 * 
 * @throws Exception
 */
@Test
public void testBadLogs() throws Exception {

  FileUtil.fullyDelete(new File("target/logs"));
  Configuration configuration = getConfiguration();

  writeLogs("target/logs/logs/application_0_0001/container_0_0001_01_000001");

  writeLog(configuration, "owner");

  AggregatedLogsBlockForTest aggregatedBlock = getAggregatedLogsBlockForTest(
      configuration, "admin", "container_0_0001_01_000001");
  ByteArrayOutputStream data = new ByteArrayOutputStream();
  PrintWriter printWriter = new PrintWriter(data);
  HtmlBlock html = new HtmlBlockForTest();
  HtmlBlock.Block block = new BlockForTest(html, printWriter, 10, false);
  aggregatedBlock.render(block);

  block.getWriter().flush();
  String out = data.toString();
  assertTrue(out
      .contains("Logs not available for entity. Aggregation may not be complete, Check back later or try the nodemanager at localhost:1234"));

}
项目:hadoop    文件:TestHDFSServerPorts.java   
/**
 * Start the namenode.
 */
public NameNode startNameNode(boolean withService) throws IOException {
  hdfsDir = new File(TEST_DATA_DIR, "dfs");
  if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
    throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
  }
  config = new HdfsConfiguration();
  config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      fileAsURI(new File(hdfsDir, "name1")).toString());
  FileSystem.setDefaultUri(config, "hdfs://" + THIS_HOST);
  if (withService) {
    NameNode.setServiceAddress(config, THIS_HOST);      
  }
  config.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, THIS_HOST);
  DFSTestUtil.formatNameNode(config);

  String[] args = new String[] {};
  // NameNode will modify config with the ports it bound to
  return NameNode.createNameNode(args, config);
}
项目:hadoop    文件:TestStreamAggregate.java   
@Test
public void testCommandLine() throws Exception {
  try {
    try {
      FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
    } catch (Exception e) {
    }

    createInput();
    boolean mayExit = false;

    // During tests, the default Configuration will use a local mapred
    // So don't specify -config or -cluster
    job = new StreamJob(genArgs(), mayExit);      
    job.go();
    File outFile = new File(OUTPUT_DIR, "part-00000").getAbsoluteFile();
    String output = StreamUtil.slurp(outFile);
    outFile.delete();
    System.err.println("outEx1=" + outputExpect);
    System.err.println("  out1=" + output);
    assertEquals(outputExpect, output);
  } finally {
    INPUT_FILE.delete();
    FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
  }
}
项目:hadoop    文件:FsVolumeImpl.java   
boolean isBPDirEmpty(String bpid) throws IOException {
  File volumeCurrentDir = this.getCurrentDir();
  File bpDir = new File(volumeCurrentDir, bpid);
  File bpCurrentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
  File finalizedDir = new File(bpCurrentDir,
      DataStorage.STORAGE_DIR_FINALIZED);
  File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW);
  if (finalizedDir.exists() && !DatanodeUtil.dirNoFilesRecursive(
      finalizedDir)) {
    return false;
  }
  if (rbwDir.exists() && FileUtil.list(rbwDir).length != 0) {
    return false;
  }
  return true;
}
项目:hadoop    文件:TestAuxServices.java   
@Test
public void testAuxServiceRecoverySetup() throws IOException {
  Configuration conf = new YarnConfiguration();
  conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true);
  conf.set(YarnConfiguration.NM_RECOVERY_DIR, TEST_DIR.toString());
  conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,
      new String[] { "Asrv", "Bsrv" });
  conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, "Asrv"),
      RecoverableServiceA.class, Service.class);
  conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, "Bsrv"),
      RecoverableServiceB.class, Service.class);
  try {
    final AuxServices aux = new AuxServices();
    aux.init(conf);
    Assert.assertEquals(2, aux.getServices().size());
    File auxStorageDir = new File(TEST_DIR,
        AuxServices.STATE_STORE_ROOT_NAME);
    Assert.assertEquals(2, auxStorageDir.listFiles().length);
    aux.close();
  } finally {
    FileUtil.fullyDelete(TEST_DIR);
  }
}
项目:hadoop    文件:TestSharedFileDescriptorFactory.java   
@Test(timeout=60000)
public void testDirectoryFallbacks() throws Exception {
  File nonExistentPath = new File(TEST_BASE, "nonexistent");
  File permissionDeniedPath = new File("/");
  File goodPath = new File(TEST_BASE, "testDirectoryFallbacks");
  goodPath.mkdirs();
  try {
    SharedFileDescriptorFactory.create("shm_", 
        new String[] { nonExistentPath.getAbsolutePath(),
                        permissionDeniedPath.getAbsolutePath() });
    Assert.fail();
  } catch (IOException e) {
  }
  SharedFileDescriptorFactory factory =
      SharedFileDescriptorFactory.create("shm_", 
          new String[] { nonExistentPath.getAbsolutePath(),
                          permissionDeniedPath.getAbsolutePath(),
                          goodPath.getAbsolutePath() } );
  Assert.assertEquals(goodPath.getAbsolutePath(), factory.getPath());
  FileUtil.fullyDelete(goodPath);
}
项目:hadoop    文件:TestTimelineWebServicesWithSSL.java   
@BeforeClass
public static void setupServer() throws Exception {
  conf = new YarnConfiguration();
  conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
  conf.setClass(YarnConfiguration.TIMELINE_SERVICE_STORE,
      MemoryTimelineStore.class, TimelineStore.class);
  conf.set(YarnConfiguration.YARN_HTTP_POLICY_KEY, "HTTPS_ONLY");

  File base = new File(BASEDIR);
  FileUtil.fullyDelete(base);
  base.mkdirs();
  keystoresDir = new File(BASEDIR).getAbsolutePath();
  sslConfDir =
      KeyStoreTestUtil.getClasspathDir(TestTimelineWebServicesWithSSL.class);

  KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
  conf.addResource("ssl-server.xml");
  conf.addResource("ssl-client.xml");

  timelineServer = new ApplicationHistoryServer();
  timelineServer.init(conf);
  timelineServer.start();
  store = timelineServer.getTimelineStore();
}
项目:hadoop    文件:TaskLog.java   
/**
 * Construct the command line for running the debug script
 * @param cmd The command and the arguments that should be run
 * @param stdoutFilename The filename that stdout should be saved to
 * @param stderrFilename The filename that stderr should be saved to
 * @param tailLength The length of the tail to be saved.
 * @return the command line as a String
 * @throws IOException
 */
static String buildDebugScriptCommandLine(List<String> cmd, String debugout)
throws IOException {
  StringBuilder mergedCmd = new StringBuilder();
  mergedCmd.append("exec ");
  boolean isExecutable = true;
  for(String s: cmd) {
    if (isExecutable) {
      // the executable name needs to be expressed as a shell path for the  
      // shell to find it.
      mergedCmd.append(FileUtil.makeShellPath(new File(s)));
      isExecutable = false; 
    } else {
      mergedCmd.append(s);
    }
    mergedCmd.append(" ");
  }
  mergedCmd.append(" < /dev/null ");
  mergedCmd.append(" >");
  mergedCmd.append(debugout);
  mergedCmd.append(" 2>&1 ");
  return mergedCmd.toString();
}
项目:hadoop    文件:TestFileJournalManager.java   
@Test(expected=IllegalStateException.class)
public void testFinalizeErrorReportedToNNStorage() throws IOException, InterruptedException {
  File f = new File(TestEditLog.TEST_DIR + "/filejournaltestError");
  // abort after 10th roll
  NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()),
                                 10, new AbortSpec(10, 0));
  StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();

  FileJournalManager jm = new FileJournalManager(conf, sd, storage);
  String sdRootPath = sd.getRoot().getAbsolutePath();
  FileUtil.chmod(sdRootPath, "-w", true);
  try {
    jm.finalizeLogSegment(0, 1);
  } finally {
    FileUtil.chmod(sdRootPath, "+w", true);
    assertTrue(storage.getRemovedStorageDirs().contains(sd));
  }
}
项目:hadoop    文件:DumpTypedBytes.java   
/**
 * The main driver for <code>DumpTypedBytes</code>.
 */
public int run(String[] args) throws Exception {
  if (args.length == 0) {
    System.err.println("Too few arguments!");
    printUsage();
    return 1;
  }
  Path pattern = new Path(args[0]);
  FileSystem fs = pattern.getFileSystem(getConf());
  fs.setVerifyChecksum(true);
  for (Path p : FileUtil.stat2Paths(fs.globStatus(pattern), pattern)) {
    List<FileStatus> inputFiles = new ArrayList<FileStatus>();
    FileStatus status = fs.getFileStatus(p);
    if (status.isDirectory()) {
      FileStatus[] files = fs.listStatus(p);
      Collections.addAll(inputFiles, files);
    } else {
      inputFiles.add(status);
    }
    return dumpTypedBytes(inputFiles);
  }
  return -1;
}
项目:hadoop    文件:BlockPoolSliceStorage.java   
/**
 * Cleanup the detachDir.
 * 
 * If the directory is not empty report an error; Otherwise remove the
 * directory.
 * 
 * @param detachDir detach directory
 * @throws IOException if the directory is not empty or it can not be removed
 */
private void cleanupDetachDir(File detachDir) throws IOException {
  if (!DataNodeLayoutVersion.supports(
      LayoutVersion.Feature.APPEND_RBW_DIR, layoutVersion)
      && detachDir.exists() && detachDir.isDirectory()) {

    if (FileUtil.list(detachDir).length != 0) {
      throw new IOException("Detached directory " + detachDir
          + " is not empty. Please manually move each file under this "
          + "directory to the finalized directory if the finalized "
          + "directory tree does not have the file.");
    } else if (!detachDir.delete()) {
      throw new IOException("Cannot remove directory " + detachDir);
    }
  }
}
项目:MRNMF    文件:MatrixUpdater.java   
public static void addInpuPath(Job job, Path path) throws IOException {
    FileSystem fs = path.getFileSystem(new Configuration());
    if (fs.isDirectory(path)) {
        for (Path p : FileUtil.stat2Paths(fs.listStatus(path))) {
            if (p.toString().contains("part"))
                FileInputFormat.addInputPath(job, p);
        }
    } else {
        FileInputFormat.addInputPath(job, path);
    }
}
项目:ditb    文件:TestCellCounter.java   
@BeforeClass
public static void beforeClass() throws Exception {
  UTIL.setJobWithoutMRCluster();
  UTIL.startMiniCluster();
  FQ_OUTPUT_DIR = new Path(OUTPUT_DIR).makeQualified(new LocalFileSystem());
  FileUtil.fullyDelete(new File(OUTPUT_DIR));
}
项目:hadoop    文件:TestAllowFormat.java   
@BeforeClass
public static void setUp() throws Exception {
  config = new Configuration();
  if ( DFS_BASE_DIR.exists() && !FileUtil.fullyDelete(DFS_BASE_DIR) ) {
    throw new IOException("Could not delete hdfs directory '" + DFS_BASE_DIR +
                          "'");
  }

  // Test has multiple name directories.
  // Format should not really prompt us if one of the directories exist,
  // but is empty. So in case the test hangs on an input, it means something
  // could be wrong in the format prompting code. (HDFS-1636)
  LOG.info("hdfsdir is " + DFS_BASE_DIR.getAbsolutePath());
  File nameDir1 = new File(DFS_BASE_DIR, "name1");
  File nameDir2 = new File(DFS_BASE_DIR, "name2");

  // To test multiple directory handling, we pre-create one of the name directories.
  nameDir1.mkdirs();

  // Set multiple name directories.
  config.set(DFS_NAMENODE_NAME_DIR_KEY, nameDir1.getPath() + "," + nameDir2.getPath());
  config.set(DFS_DATANODE_DATA_DIR_KEY, new File(DFS_BASE_DIR, "data").getPath());

  config.set(DFS_NAMENODE_CHECKPOINT_DIR_KEY,new File(DFS_BASE_DIR, "secondary").getPath());

  FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
}
项目:hadoop    文件:TestHftpFileSystem.java   
@AfterClass
public static void tearDown() throws Exception {
  if (cluster != null) {
    cluster.shutdown();
  }
  FileUtil.fullyDelete(new File(BASEDIR));
  KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
}
项目:hadoop-oss    文件:TestSSLHttpServer.java   
@AfterClass
public static void cleanup() throws Exception {
  server.stop();
  FileUtil.fullyDelete(new File(BASEDIR));
  KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
  clientSslFactory.destroy();
}
项目:hadoop-oss    文件:TestHttpCookieFlag.java   
@BeforeClass
public static void setUp() throws Exception {
  Configuration conf = new Configuration();
  conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
          DummyFilterInitializer.class.getName());

  File base = new File(BASEDIR);
  FileUtil.fullyDelete(base);
  base.mkdirs();
  keystoresDir = new File(BASEDIR).getAbsolutePath();
  sslConfDir = KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class);

  KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
  Configuration sslConf = KeyStoreTestUtil.getSslConfig();

  clientSslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, sslConf);
  clientSslFactory.init();

  server = new HttpServer2.Builder()
          .setName("test")
          .addEndpoint(new URI("http://localhost"))
          .addEndpoint(new URI("https://localhost"))
          .setConf(conf)
          .keyPassword(sslConf.get("ssl.server.keystore.keypassword"))
          .keyStore(sslConf.get("ssl.server.keystore.location"),
                  sslConf.get("ssl.server.keystore.password"),
                  sslConf.get("ssl.server.keystore.type", "jks"))
          .trustStore(sslConf.get("ssl.server.truststore.location"),
                  sslConf.get("ssl.server.truststore.password"),
                  sslConf.get("ssl.server.truststore.type", "jks"))
          .excludeCiphers(
                  sslConf.get("ssl.server.exclude.cipher.list"))
          .build();
  server.addServlet("echo", "/echo", TestHttpServer.EchoServlet.class);
  server.start();
}
项目:hadoop    文件:TestUnconsumedInput.java   
@Test
public void testUnconsumedInput() throws Exception
{
  String outFileName = "part-00000";
  File outFile = null;
  try {
    try {
      FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
    } catch (Exception e) {
    }

    createInput();

    // setup config to ignore unconsumed input
    Configuration conf = new Configuration();
    conf.set("stream.minRecWrittenToEnableSkip_", "0");

    job = new StreamJob();
    job.setConf(conf);
    int exitCode = job.run(genArgs());
    assertEquals("Job failed", 0, exitCode);
    outFile = new File(OUTPUT_DIR, outFileName).getAbsoluteFile();
    String output = StreamUtil.slurp(outFile);
    assertEquals("Output was truncated", EXPECTED_OUTPUT_SIZE,
        StringUtils.countMatches(output, "\t"));
  } finally {
    INPUT_FILE.delete();
    FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
  }
}
项目:scheduling-connector-for-hadoop    文件:FSDownload.java   
private Path copy(Path sCopy, Path dstdir) throws IOException {
  FileSystem sourceFs = sCopy.getFileSystem(conf);
  Path dCopy = new Path(dstdir, "tmp_" + sCopy.getName());
  FileStatus sStat = sourceFs.getFileStatus(sCopy);
  FileUtil.copy(sourceFs, sStat, FileSystem.getLocal(conf), dCopy, false,
      true, conf);
  return dCopy;
}
项目:hadoop    文件:TestWinUtils.java   
private void testChmodInternalR(String mode, String expectedPerm,
    String expectedPermx) throws IOException {
  // Setup test folder hierarchy
  File a = new File(TEST_DIR, "a");
  assertTrue(a.mkdir());
  chmod("700", a);
  File aa = new File(a, "a");
  assertTrue(aa.createNewFile());
  chmod("600", aa);
  File ab = new File(a, "b");
  assertTrue(ab.mkdir());
  chmod("700", ab);
  File aba = new File(ab, "a");
  assertTrue(aba.mkdir());
  chmod("700", aba);
  File abb = new File(ab, "b");
  assertTrue(abb.createNewFile());
  chmod("600", abb);
  File abx = new File(ab, "x");
  assertTrue(abx.createNewFile());
  chmod("u+x", abx);

  // Run chmod recursive
  chmodR(mode, a);

  // Verify outcome
  assertPermissions(a, "d" + expectedPermx);
  assertPermissions(aa, "-" + expectedPerm);
  assertPermissions(ab, "d" + expectedPermx);
  assertPermissions(aba, "d" + expectedPermx);
  assertPermissions(abb, "-" + expectedPerm);
  assertPermissions(abx, "-" + expectedPermx);

  assertTrue(FileUtil.fullyDelete(a));
}
项目:hadoop    文件:TestHftpFileSystem.java   
@BeforeClass
public static void setUp() throws Exception {
  config = new Configuration();
  cluster = new MiniDFSCluster.Builder(config).numDataNodes(2).build();
  blockPoolId = cluster.getNamesystem().getBlockPoolId();
  hftpUri = "hftp://"
      + config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
  File base = new File(BASEDIR);
  FileUtil.fullyDelete(base);
  base.mkdirs();
  keystoresDir = new File(BASEDIR).getAbsolutePath();
  sslConfDir = KeyStoreTestUtil.getClasspathDir(TestHftpFileSystem.class);

  KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, config, false);
}
项目:hadoop    文件:FileJournalManager.java   
@Override
public void purgeLogsOlderThan(long minTxIdToKeep)
    throws IOException {
  LOG.info("Purging logs older than " + minTxIdToKeep);
  File[] files = FileUtil.listFiles(sd.getCurrentDir());
  List<EditLogFile> editLogs = matchEditLogs(files, true);
  for (EditLogFile log : editLogs) {
    if (log.getFirstTxId() < minTxIdToKeep &&
        log.getLastTxId() < minTxIdToKeep) {
      purger.purgeLog(log);
    }
  }
}
项目:lustre-connector-for-hadoop    文件:LustreFsJavaImpl.java   
@Override
public boolean mkdirs(String path, short permissions) throws IOException {
  File f = new File(path), p = f;
  while(!p.getParentFile().exists()) {
    p = p.getParentFile();
  }
  if(!p.getParentFile().isDirectory()) {
    throw new FileAlreadyExistsException("Not a directory: "+ p.getParent());
  }
  boolean success = f.mkdirs();
  if (success) {
    if(-1 != permissions) {
    chmod(path, permissions);
    }
    // We give explicit permissions to the user who submitted the job using ACLs
    String user = UserGroupInformation.getCurrentUser().getShortUserName();
    LinkedList<String> args = new LinkedList<String>();
    args.add("/usr/bin/setfacl");
    args.add("-R");
    args.add("-m");
    args.add("u:" + user + ":" + FsAction.ALL.SYMBOL);
    args.add(FileUtil.makeShellPath(p, true));  
    org.apache.hadoop.fs.util.Shell.runPrivileged(args.toArray(new String[0]));
    args.add(2, "-d");
    org.apache.hadoop.fs.util.Shell.runPrivileged(args.toArray(new String[0]));
  }
  return (success || (f.exists() && f.isDirectory()));
}
项目:hadoop    文件:TestFileOutputCommitter.java   
private void testMapFileOutputCommitterInternal(int version)
    throws Exception {
  Job job = Job.getInstance();
  FileOutputFormat.setOutputPath(job, outDir);
  Configuration conf = job.getConfiguration();
  conf.set(MRJobConfig.TASK_ATTEMPT_ID, attempt);
  conf.setInt(FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION,
      version);
  JobContext jContext = new JobContextImpl(conf, taskID.getJobID());    
  TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
  FileOutputCommitter committer = new FileOutputCommitter(outDir, tContext);

  // setup
  committer.setupJob(jContext);
  committer.setupTask(tContext);

  // write output
  MapFileOutputFormat theOutputFormat = new MapFileOutputFormat();
  RecordWriter theRecordWriter = theOutputFormat.getRecordWriter(tContext);
  writeMapFileOutput(theRecordWriter, tContext);

  // do commit
  committer.commitTask(tContext);
  committer.commitJob(jContext);

  // validate output
  validateMapFileOutputContent(FileSystem.get(job.getConfiguration()), outDir);
  FileUtil.fullyDelete(new File(outDir.toString()));
}