Java 类org.apache.hadoop.fs.FileContext 实例源码

项目:hadoop    文件:TestChRootedFs.java   
@Before
public void setUp() throws Exception {
  // create the test root on local_fs
  fcTarget = FileContext.getLocalFSFileContext();
  chrootedTo = fileContextTestHelper.getAbsoluteTestRootPath(fcTarget);
  // In case previous test was killed before cleanup
  fcTarget.delete(chrootedTo, true);

  fcTarget.mkdir(chrootedTo, FileContext.DEFAULT_PERM, true);

  Configuration conf = new Configuration();

  // ChRoot to the root of the testDirectory
  fc = FileContext.getFileContext(
      new ChRootedFs(fcTarget.getDefaultFileSystem(), chrootedTo), conf);
}
项目:hadoop    文件:LoadGeneratorMR.java   
@Override
public void close() throws IOException {
  // Output the result to a file Results in the output dir
  FileContext fc;
  try {
    fc = FileContext.getFileContext(jobConf);
  } catch (IOException ioe) {
    System.err.println("Can not initialize the file system: " + 
        ioe.getLocalizedMessage());
    return;
  }
  FSDataOutputStream o = fc.create(FileOutputFormat.getTaskOutputPath(jobConf, "Results"),
      EnumSet.of(CreateFlag.CREATE));

  PrintStream out = new PrintStream(o);
  printResults(out);
  out.close();
  o.close();
}
项目:hadoop-oss    文件:ViewFsBaseTest.java   
@Test
public void testResolvePathThroughMountPoints() throws IOException {
  fileContextTestHelper.createFile(fcView, "/user/foo");
  Assert.assertEquals(new Path(targetTestRoot,"user/foo"),
                        fcView.resolvePath(new Path("/user/foo")));

  fcView.mkdir(
      fileContextTestHelper.getTestRootPath(fcView, "/user/dirX"),
      FileContext.DEFAULT_PERM, false);
  Assert.assertEquals(new Path(targetTestRoot,"user/dirX"),
      fcView.resolvePath(new Path("/user/dirX")));


  fcView.mkdir(
      fileContextTestHelper.getTestRootPath(fcView, "/user/dirX/dirY"),
      FileContext.DEFAULT_PERM, false);
  Assert.assertEquals(new Path(targetTestRoot,"user/dirX/dirY"),
      fcView.resolvePath(new Path("/user/dirX/dirY")));
}
项目:hadoop    文件:LoadGeneratorMR.java   
/** Main function called by tool runner.
 * It first initializes data by parsing the command line arguments.
 * It then calls the loadGenerator
 */
@Override
public int run(String[] args) throws Exception {
  int exitCode = parseArgsMR(args);
  if (exitCode != 0) {
    return exitCode;
  }
  System.out.println("Running LoadGeneratorMR against fileSystem: " + 
  FileContext.getFileContext().getDefaultFileSystem().getUri());

  return submitAsMapReduce(); // reducer will print the results
}
项目:hadoop-oss    文件:TestChRootedFs.java   
@Before
public void setUp() throws Exception {
  // create the test root on local_fs
  fcTarget = FileContext.getLocalFSFileContext();
  chrootedTo = fileContextTestHelper.getAbsoluteTestRootPath(fcTarget);
  // In case previous test was killed before cleanup
  fcTarget.delete(chrootedTo, true);

  fcTarget.mkdir(chrootedTo, FileContext.DEFAULT_PERM, true);

  Configuration conf = new Configuration();

  // ChRoot to the root of the testDirectory
  fc = FileContext.getFileContext(
      new ChRootedFs(fcTarget.getDefaultFileSystem(), chrootedTo), conf);
}
项目:hadoop-oss    文件:TestChRootedFs.java   
@Test
public void testMkdirDelete() throws IOException {
  fc.mkdir(fileContextTestHelper.getTestRootPath(fc, "/dirX"), FileContext.DEFAULT_PERM, false);
  Assert.assertTrue(isDir(fc, new Path("/dirX")));
  Assert.assertTrue(isDir(fcTarget, new Path(chrootedTo,"dirX")));

  fc.mkdir(fileContextTestHelper.getTestRootPath(fc, "/dirX/dirY"), FileContext.DEFAULT_PERM, false);
  Assert.assertTrue(isDir(fc, new Path("/dirX/dirY")));
  Assert.assertTrue(isDir(fcTarget, new Path(chrootedTo,"dirX/dirY")));


  // Delete the created dir
  Assert.assertTrue(fc.delete(new Path("/dirX/dirY"), false));
  Assert.assertFalse(exists(fc, new Path("/dirX/dirY")));
  Assert.assertFalse(exists(fcTarget, new Path(chrootedTo,"dirX/dirY")));

  Assert.assertTrue(fc.delete(new Path("/dirX"), false));
  Assert.assertFalse(exists(fc, new Path("/dirX")));
  Assert.assertFalse(exists(fcTarget, new Path(chrootedTo,"dirX")));

}
项目:hadoop-oss    文件:TestChRootedFs.java   
@Test
public void testRename() throws IOException {
  // Rename a file
  fileContextTestHelper.createFile(fc, "/newDir/foo");
  fc.rename(new Path("/newDir/foo"), new Path("/newDir/fooBar"));
  Assert.assertFalse(exists(fc, new Path("/newDir/foo")));
  Assert.assertFalse(exists(fcTarget, new Path(chrootedTo,"newDir/foo")));
  Assert.assertTrue(isFile(fc, fileContextTestHelper.getTestRootPath(fc,"/newDir/fooBar")));
  Assert.assertTrue(isFile(fcTarget, new Path(chrootedTo,"newDir/fooBar")));


  // Rename a dir
  fc.mkdir(new Path("/newDir/dirFoo"), FileContext.DEFAULT_PERM, false);
  fc.rename(new Path("/newDir/dirFoo"), new Path("/newDir/dirFooBar"));
  Assert.assertFalse(exists(fc, new Path("/newDir/dirFoo")));
  Assert.assertFalse(exists(fcTarget, new Path(chrootedTo,"newDir/dirFoo")));
  Assert.assertTrue(isDir(fc, fileContextTestHelper.getTestRootPath(fc,"/newDir/dirFooBar")));
  Assert.assertTrue(isDir(fcTarget, new Path(chrootedTo,"newDir/dirFooBar")));
}
项目:hadoop    文件:TestWriteRead.java   
@Before
public void initJunitModeTest() throws Exception {
  LOG.info("initJunitModeTest");

  conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); // 100K
                                                            // blocksize

  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  cluster.waitActive();

  mfs = cluster.getFileSystem();
  mfc = FileContext.getFileContext();

  Path rootdir = new Path(ROOT_DIR);
  mfs.mkdirs(rootdir);
}
项目:hadoop    文件:TestPermissionSymlinks.java   
private void doRenameLinkTargetNotWritableFC() throws Exception {
  // Rename the link when the target and parent are not writable
  user.doAs(new PrivilegedExceptionAction<Object>() {
    @Override
    public Object run() throws IOException {
      // First FileContext
      FileContext myfc = FileContext.getFileContext(conf);
      Path newlink = new Path(linkParent, "newlink");
      myfc.rename(link, newlink, Rename.NONE);
      Path linkTarget = myfc.getLinkTarget(newlink);
      assertEquals("Expected link's target to match target!",
          target, linkTarget);
      return null;
    }
  });
  assertTrue("Expected target to exist", wrapper.exists(target));
}
项目:hadoop    文件:HistoryFileManager.java   
private void mkdir(FileContext fc, Path path, FsPermission fsp)
    throws IOException {
  if (!fc.util().exists(path)) {
    try {
      fc.mkdir(path, fsp, true);

      FileStatus fsStatus = fc.getFileStatus(path);
      LOG.info("Perms after creating " + fsStatus.getPermission().toShort()
          + ", Expected: " + fsp.toShort());
      if (fsStatus.getPermission().toShort() != fsp.toShort()) {
        LOG.info("Explicitly setting permissions to : " + fsp.toShort()
            + ", " + fsp);
        fc.setPermission(path, fsp);
      }
    } catch (FileAlreadyExistsException e) {
      LOG.info("Directory: [" + path + "] already exists.");
    }
  }
}
项目:hadoop    文件:TestLinuxContainerExecutor.java   
@Test
public void testNonSecureRunAsSubmitter() throws Exception {
  Assume.assumeTrue(shouldRun());
  Assume.assumeFalse(UserGroupInformation.isSecurityEnabled());
  String expectedRunAsUser = appSubmitter;
  conf.set(YarnConfiguration.NM_NONSECURE_MODE_LIMIT_USERS, "false");
  exec.setConf(conf);
  File touchFile = new File(workSpace, "touch-file");
  int ret = runAndBlock("touch", touchFile.getAbsolutePath());

  assertEquals(0, ret);
  FileStatus fileStatus =
      FileContext.getLocalFSFileContext().getFileStatus(
        new Path(touchFile.getAbsolutePath()));
  assertEquals(expectedRunAsUser, fileStatus.getOwner());
  cleanupAppFiles(expectedRunAsUser);
  // reset conf
  conf.unset(YarnConfiguration.NM_NONSECURE_MODE_LIMIT_USERS);
  exec.setConf(conf);
}
项目:hadoop    文件:TestFSDownload.java   
static LocalResource createJar(FileContext files, Path p,
    LocalResourceVisibility vis) throws IOException {
  LOG.info("Create jar file " + p);
  File jarFile = new File((files.makeQualified(p)).toUri());
  FileOutputStream stream = new FileOutputStream(jarFile);
  LOG.info("Create jar out stream ");
  JarOutputStream out = new JarOutputStream(stream, new Manifest());
  LOG.info("Done writing jar stream ");
  out.close();
  LocalResource ret = recordFactory.newRecordInstance(LocalResource.class);
  ret.setResource(ConverterUtils.getYarnUrlFromPath(p));
  FileStatus status = files.getFileStatus(p);
  ret.setSize(status.getLen());
  ret.setTimestamp(status.getModificationTime());
  ret.setType(LocalResourceType.PATTERN);
  ret.setVisibility(vis);
  ret.setPattern("classes/.*");
  return ret;
}
项目:hadoop    文件:TestFSDownload.java   
static LocalResource createJarFile(FileContext files, Path p, int len,
    Random r, LocalResourceVisibility vis) throws IOException,
    URISyntaxException {
  byte[] bytes = new byte[len];
  r.nextBytes(bytes);

  File archiveFile = new File(p.toUri().getPath() + ".jar");
  archiveFile.createNewFile();
  JarOutputStream out = new JarOutputStream(
      new FileOutputStream(archiveFile));
  out.putNextEntry(new JarEntry(p.getName()));
  out.write(bytes);
  out.closeEntry();
  out.close();

  LocalResource ret = recordFactory.newRecordInstance(LocalResource.class);
  ret.setResource(ConverterUtils.getYarnUrlFromPath(new Path(p.toString()
      + ".jar")));
  ret.setSize(len);
  ret.setType(LocalResourceType.ARCHIVE);
  ret.setVisibility(vis);
  ret.setTimestamp(files.getFileStatus(new Path(p.toString() + ".jar"))
      .getModificationTime());
  return ret;
}
项目:hadoop    文件:TestViewFsWithXAttrs.java   
@Before
public void setUp() throws Exception {
  fcTarget = fc;
  fcTarget2 = fc2;
  targetTestRoot = fileContextTestHelper.getAbsoluteTestRootPath(fc);
  targetTestRoot2 = fileContextTestHelper.getAbsoluteTestRootPath(fc2);

  fcTarget.delete(targetTestRoot, true);
  fcTarget2.delete(targetTestRoot2, true);
  fcTarget.mkdir(targetTestRoot, new FsPermission((short) 0750), true);
  fcTarget2.mkdir(targetTestRoot2, new FsPermission((short) 0750), true);

  fsViewConf = ViewFileSystemTestSetup.createConfig();
  setupMountPoints();
  fcView = FileContext.getFileContext(FsConstants.VIEWFS_URI, fsViewConf);
}
项目:hadoop    文件:TestPermissionSymlinks.java   
private void doRenameSrcNotWritableFC() throws Exception {
  // Rename the link when the target and parent are not writable
  try {
    user.doAs(new PrivilegedExceptionAction<Object>() {
      @Override
      public Object run() throws IOException {
        FileContext myfc = FileContext.getFileContext(conf);
        Path newlink = new Path(targetParent, "newlink");
        myfc.rename(link, newlink, Rename.NONE);
        return null;
      }
    });
    fail("Renamed link even though link's parent is not writable!");
  } catch (IOException e) {
    GenericTestUtils.assertExceptionContains("Permission denied", e);
  }
}
项目:hadoop    文件:ResourceLocalizationService.java   
private void cleanUpLocalDir(FileContext lfs, DeletionService del,
    String localDir) {
  long currentTimeStamp = System.currentTimeMillis();
  renameLocalDir(lfs, localDir, ContainerLocalizer.USERCACHE,
    currentTimeStamp);
  renameLocalDir(lfs, localDir, ContainerLocalizer.FILECACHE,
    currentTimeStamp);
  renameLocalDir(lfs, localDir, ResourceLocalizationService.NM_PRIVATE_DIR,
    currentTimeStamp);
  try {
    deleteLocalDir(lfs, del, localDir);
  } catch (IOException e) {
    // Do nothing, just give the warning
    LOG.warn("Failed to delete localDir: " + localDir);
  }
}
项目:hadoop    文件:DataGenerator.java   
/** Parse the command line arguments and initialize the data */
private int init(String[] args) {
  try { // initialize file system handle
    fc = FileContext.getFileContext(getConf());
  } catch (IOException ioe) {
    System.err.println("Can not initialize the file system: " + 
        ioe.getLocalizedMessage());
    return -1;
  }

  for (int i = 0; i < args.length; i++) { // parse command line
    if (args[i].equals("-root")) {
      root = new Path(args[++i]);
    } else if (args[i].equals("-inDir")) {
      inDir = new File(args[++i]);
    } else {
      System.err.println(USAGE);
      ToolRunner.printGenericCommandUsage(System.err);
      System.exit(-1);
    }
  }
  return 0;
}
项目:hadoop    文件:HistoryFileManager.java   
@VisibleForTesting
protected static List<FileStatus> scanDirectory(Path path, FileContext fc,
    PathFilter pathFilter) throws IOException {
  path = fc.makeQualified(path);
  List<FileStatus> jhStatusList = new ArrayList<FileStatus>();
  try {
    RemoteIterator<FileStatus> fileStatusIter = fc.listStatus(path);
    while (fileStatusIter.hasNext()) {
      FileStatus fileStatus = fileStatusIter.next();
      Path filePath = fileStatus.getPath();
      if (fileStatus.isFile() && pathFilter.accept(filePath)) {
        jhStatusList.add(fileStatus);
      }
    }
  } catch (FileNotFoundException fe) {
    LOG.error("Error while scanning directory " + path, fe);
  }
  return jhStatusList;
}
项目:hadoop    文件:TestPermissionSymlinks.java   
private void doDeleteLinkParentNotWritable() throws Exception {
  // Try to delete where the symlink's parent dir is not writable
  try {
    user.doAs(new PrivilegedExceptionAction<Object>() {
      @Override
      public Object run() throws IOException {
        FileContext myfc = FileContext.getFileContext(conf);
        myfc.delete(link, false);
        return null;
      }
    });
    fail("Deleted symlink without write permissions on parent!");
  } catch (AccessControlException e) {
    GenericTestUtils.assertExceptionContains("Permission denied", e);
  }
}
项目:hadoop    文件:DirectoryCollection.java   
/**
 * Create any non-existent directories and parent directories, updating the
 * list of valid directories if necessary.
 * @param localFs local file system to use
 * @param perm absolute permissions to use for any directories created
 * @return true if there were no errors, false if at least one error occurred
 */
synchronized boolean createNonExistentDirs(FileContext localFs,
    FsPermission perm) {
  boolean failed = false;
  for (final String dir : localDirs) {
    try {
      createDir(localFs, new Path(dir), perm);
    } catch (IOException e) {
      LOG.warn("Unable to create directory " + dir + " error " +
          e.getMessage() + ", removing from the list of valid directories.");
      localDirs.remove(dir);
      errorDirs.add(dir);
      numFailures++;
      failed = true;
    }
  }
  return !failed;
}
项目:hadoop    文件:TestContainerLocalizer.java   
@Test
@SuppressWarnings("unchecked") // mocked generics
public void testContainerLocalizerClosesFilesystems() throws Exception {
  // verify filesystems are closed when localizer doesn't fail
  FileContext fs = FileContext.getLocalFSFileContext();
  spylfs = spy(fs.getDefaultFileSystem());
  ContainerLocalizer localizer = setupContainerLocalizerForTest();
  doNothing().when(localizer).localizeFiles(any(LocalizationProtocol.class),
      any(CompletionService.class), any(UserGroupInformation.class));
  verify(localizer, never()).closeFileSystems(
      any(UserGroupInformation.class));
  localizer.runLocalization(nmAddr);
  verify(localizer).closeFileSystems(any(UserGroupInformation.class));

  spylfs = spy(fs.getDefaultFileSystem());
  // verify filesystems are closed when localizer fails
  localizer = setupContainerLocalizerForTest();
  doThrow(new YarnRuntimeException("Forced Failure")).when(localizer).localizeFiles(
      any(LocalizationProtocol.class), any(CompletionService.class),
      any(UserGroupInformation.class));
  verify(localizer, never()).closeFileSystems(
      any(UserGroupInformation.class));
  localizer.runLocalization(nmAddr);
  verify(localizer).closeFileSystems(any(UserGroupInformation.class));
}
项目:hadoop    文件:TestLinuxContainerExecutor.java   
@Test
public void testContainerLaunch() throws Exception {
  Assume.assumeTrue(shouldRun());
  String expectedRunAsUser =
      conf.get(YarnConfiguration.NM_NONSECURE_MODE_LOCAL_USER_KEY,
        YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LOCAL_USER);

  File touchFile = new File(workSpace, "touch-file");
  int ret = runAndBlock("touch", touchFile.getAbsolutePath());

  assertEquals(0, ret);
  FileStatus fileStatus =
      FileContext.getLocalFSFileContext().getFileStatus(
        new Path(touchFile.getAbsolutePath()));
  assertEquals(expectedRunAsUser, fileStatus.getOwner());
  cleanupAppFiles(expectedRunAsUser);

}
项目:hadoop    文件:TestReservedRawPaths.java   
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  fsHelper = new FileSystemTestHelper();
  // Set up java key store
  String testRoot = fsHelper.getTestRootDir();
  File testRootDir = new File(testRoot).getAbsoluteFile();
  final Path jksPath = new Path(testRootDir.toString(), "test.jks");
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
      JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri()
  );
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE);
  fs = cluster.getFileSystem();
  fsWrapper = new FileSystemTestWrapper(cluster.getFileSystem());
  fcWrapper = new FileContextTestWrapper(
      FileContext.getFileContext(cluster.getURI(), conf));
  dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
  // Need to set the client's KeyProvider to the NN's for JKS,
  // else the updates do not get flushed properly
  fs.getClient().setKeyProvider(cluster.getNameNode().getNamesystem()
      .getProvider());
  DFSTestUtil.createKey(TEST_KEY, cluster, conf);
}
项目:hadoop    文件:TestJobHistoryUtils.java   
private Path createPath(FileContext fc, Path root, String year, String month,
                        String day, String id) throws IOException {
  Path path = new Path(root, year + Path.SEPARATOR + month + Path.SEPARATOR +
          day + Path.SEPARATOR + id);
  fc.mkdir(path, FsPermission.getDirDefault(), true);
  return path;
}
项目:hadoop-oss    文件:ViewFsBaseTest.java   
@Test(expected=FileNotFoundException.class) 
public void testResolvePathMissingThroughMountPoints2() throws IOException {
  fcView.mkdir(
      fileContextTestHelper.getTestRootPath(fcView, "/user/dirX"),
      FileContext.DEFAULT_PERM, false);
  fcView.resolvePath(new Path("/user/dirX/nonExisting"));
}
项目:hadoop    文件:LocalContainerLauncher.java   
public LocalContainerLauncher(AppContext context,
                              TaskUmbilicalProtocol umbilical) {
  super(LocalContainerLauncher.class.getName());
  this.context = context;
  this.umbilical = umbilical;
      // umbilical:  MRAppMaster creates (taskAttemptListener), passes to us
      // (TODO/FIXME:  pointless to use RPC to talk to self; should create
      // LocalTaskAttemptListener or similar:  implement umbilical protocol
      // but skip RPC stuff)

  try {
    curFC = FileContext.getFileContext(curDir.toURI());
  } catch (UnsupportedFileSystemException ufse) {
    LOG.error("Local filesystem " + curDir.toURI().toString()
              + " is unsupported?? (should never happen)");
  }

  // Save list of files/dirs that are supposed to be present so can delete
  // any extras created by one task before starting subsequent task.  Note
  // that there's no protection against deleted or renamed localization;
  // users who do that get what they deserve (and will have to disable
  // uberization in order to run correctly).
  File[] curLocalFiles = curDir.listFiles();
  localizedFiles = new HashSet<File>(curLocalFiles.length);
  for (int j = 0; j < curLocalFiles.length; ++j) {
    localizedFiles.add(curLocalFiles[j]);
  }

  // Relocalization note/future FIXME (per chrisdo, 20110315):  At moment,
  // full localization info is in AppSubmissionContext passed from client to
  // RM and then to NM for AM-container launch:  no difference between AM-
  // localization and MapTask- or ReduceTask-localization, so can assume all
  // OK.  Longer-term, will need to override uber-AM container-localization
  // request ("needed resources") with union of regular-AM-resources + task-
  // resources (and, if maps and reduces ever differ, then union of all three
  // types), OR will need localizer service/API that uber-AM can request
  // after running (e.g., "localizeForTask()" or "localizeForMapTask()").
}
项目:hadoop    文件:TestNodeHealthScriptRunner.java   
@After
public void tearDown() throws Exception {
  if (testRootDir.exists()) {
    FileContext.getLocalFSFileContext().delete(
        new Path(testRootDir.getAbsolutePath()), true);
  }
}
项目:hadoop    文件:JobImpl.java   
@Override
public Configuration loadConfFile() throws IOException {
  Path confPath = getConfFile();
  FileContext fc = FileContext.getFileContext(confPath.toUri(), conf);
  Configuration jobConf = new Configuration(false);
  jobConf.addResource(fc.open(confPath), confPath.toString());
  return jobConf;
}
项目:hadoop    文件:TestViewFsURIs.java   
@Test
public void testURIEmptyPath() throws Exception {
  Configuration conf = new Configuration();
  ConfigUtil.addLink(conf, "/user", new URI("file://foo"));

  FileContext.getFileContext(FsConstants.VIEWFS_URI, conf);
}
项目:hadoop-oss    文件:TestViewFsURIs.java   
@Test
public void testURIEmptyPath() throws Exception {
  Configuration conf = new Configuration();
  ConfigUtil.addLink(conf, "/user", new URI("file://foo"));

  FileContext.getFileContext(FsConstants.VIEWFS_URI, conf);
}
项目:hadoop-oss    文件:ViewFsTestSetup.java   
static public FileContext setupForViewFsLocalFs(FileContextTestHelper helper) throws Exception {
  /**
   * create the test root on local_fs - the  mount table will point here
   */
  FileContext fsTarget = FileContext.getLocalFSFileContext();
  Path targetOfTests = helper.getTestRootPath(fsTarget);
  // In case previous test was killed before cleanup
  fsTarget.delete(targetOfTests, true);

  fsTarget.mkdir(targetOfTests, FileContext.DEFAULT_PERM, true);
  Configuration conf = new Configuration();

  // Set up viewfs link for test dir as described above
  String testDir = helper.getTestRootPath(fsTarget).toUri()
      .getPath();
  linkUpFirstComponents(conf, testDir, fsTarget, "test dir");


  // Set up viewfs link for home dir as described above
  setUpHomeDir(conf, fsTarget);

  // the test path may be relative to working dir - we need to make that work:
  // Set up viewfs link for wd as described above
  String wdDir = fsTarget.getWorkingDirectory().toUri().getPath();
  linkUpFirstComponents(conf, wdDir, fsTarget, "working dir");

  FileContext fc = FileContext.getFileContext(FsConstants.VIEWFS_URI, conf);
  fc.setWorkingDirectory(new Path(wdDir)); // in case testdir relative to wd.
  Log.info("Working dir is: " + fc.getWorkingDirectory());
  //System.out.println("SRCOfTests = "+ getTestRootPath(fc, "test"));
  //System.out.println("TargetOfTests = "+ targetOfTests.toUri());
  return fc;
}
项目:hadoop-oss    文件:ViewFsTestSetup.java   
static void setUpHomeDir(Configuration conf, FileContext fsTarget) {
  String homeDir = fsTarget.getHomeDirectory().toUri().getPath();
  int indexOf2ndSlash = homeDir.indexOf('/', 1);
  if (indexOf2ndSlash >0) {
    linkUpFirstComponents(conf, homeDir, fsTarget, "home dir");
  } else { // home dir is at root. Just link the home dir itse
    URI linkTarget = fsTarget.makeQualified(new Path(homeDir)).toUri();
    ConfigUtil.addLink(conf, homeDir, linkTarget);
    Log.info("Added link for home dir " + homeDir + "->" + linkTarget);
  }
  // Now set the root of the home dir for viewfs
  String homeDirRoot = fsTarget.getHomeDirectory().getParent().toUri().getPath();
  ConfigUtil.setHomeDirConf(conf, homeDirRoot);
  Log.info("Home dir base for viewfs" + homeDirRoot);  
}
项目:hadoop    文件:ViewFsBaseTest.java   
@Before
public void setUp() throws Exception {
  initializeTargetTestRoot();

  // Make  user and data dirs - we creates links to them in the mount table
  fcTarget.mkdir(new Path(targetTestRoot,"user"),
      FileContext.DEFAULT_PERM, true);
  fcTarget.mkdir(new Path(targetTestRoot,"data"),
      FileContext.DEFAULT_PERM, true);
  fcTarget.mkdir(new Path(targetTestRoot,"dir2"),
      FileContext.DEFAULT_PERM, true);
  fcTarget.mkdir(new Path(targetTestRoot,"dir3"),
      FileContext.DEFAULT_PERM, true);
  FileContextTestHelper.createFile(fcTarget, new Path(targetTestRoot,"aFile"));


  // Now we use the mount fs to set links to user and dir
  // in the test root

  // Set up the defaultMT in the config with our mount point links
  conf = new Configuration();
  ConfigUtil.addLink(conf, "/targetRoot", targetTestRoot.toUri());
  ConfigUtil.addLink(conf, "/user",
      new Path(targetTestRoot,"user").toUri());
  ConfigUtil.addLink(conf, "/user2",
      new Path(targetTestRoot,"user").toUri());
  ConfigUtil.addLink(conf, "/data",
      new Path(targetTestRoot,"data").toUri());
  ConfigUtil.addLink(conf, "/internalDir/linkToDir2",
      new Path(targetTestRoot,"dir2").toUri());
  ConfigUtil.addLink(conf, "/internalDir/internalDir2/linkToDir3",
      new Path(targetTestRoot,"dir3").toUri());
  ConfigUtil.addLink(conf, "/danglingLink",
      new Path(targetTestRoot,"missingTarget").toUri());
  ConfigUtil.addLink(conf, "/linkToAFile",
      new Path(targetTestRoot,"aFile").toUri());

  fcView = FileContext.getFileContext(FsConstants.VIEWFS_URI, conf);
  // Also try viewfs://default/    - note authority is name of mount table
}
项目:hadoop-oss    文件:TestChRootedFs.java   
/**
 * We would have liked renames across file system to fail but 
 * Unfortunately there is not way to distinguish the two file systems 
 * @throws IOException
 */
@Test
public void testRenameAcrossFs() throws IOException {
  fc.mkdir(new Path("/newDir/dirFoo"), FileContext.DEFAULT_PERM, true);
  // the root will get interpreted to the root of the chrooted fs.
  fc.rename(new Path("/newDir/dirFoo"), new Path("file:///dirFooBar"));
  FileContextTestHelper.isDir(fc, new Path("/dirFooBar"));
}
项目:hadoop    文件:JobHistoryUtils.java   
/**
 * Ensure that path belongs to cluster's default file system unless
 * 1. it is already fully qualified.
 * 2. current job configuration uses default file system
 * 3. running from a test case without core-site.xml
 *
 * @param sourcePath source path
 * @param conf the job configuration
 * @return full qualified path (if necessary) in default file system
 */
private static String ensurePathInDefaultFileSystem(String sourcePath, Configuration conf) {
  Path path = new Path(sourcePath);
  FileContext fc = getDefaultFileContext();
  if (fc == null ||
      fc.getDefaultFileSystem().getUri().toString().equals(
          conf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, "")) ||
      path.toUri().getAuthority() != null ||
      path.toUri().getScheme()!= null) {
    return sourcePath;
  }

  return fc.makeQualified(path).toString();
}
项目:hadoop-oss    文件:LoadGenerator.java   
/** Main function called by tool runner.
 * It first initializes data by parsing the command line arguments.
 * It then calls the loadGenerator
 */
@Override
public int run(String[] args) throws Exception {
  int exitCode = parseArgs(false, args);
  if (exitCode != 0) {
    return exitCode;
  }
  System.out.println("Running LoadGenerator against fileSystem: " + 
  FileContext.getFileContext().getDefaultFileSystem().getUri());
  exitCode = generateLoadOnNN();
  printResults(System.out);
  return exitCode;
}
项目:hadoop    文件:TestViewFsHdfs.java   
@BeforeClass
public static void clusterSetupAtBegining() throws IOException,
    LoginException, URISyntaxException {
  SupportsBlocks = true;
  CONF.setBoolean(
      DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);

  cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
  cluster.waitClusterUp();
  fc = FileContext.getFileContext(cluster.getURI(0), CONF);
  Path defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + 
      UserGroupInformation.getCurrentUser().getShortUserName()));
  fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
}
项目:hadoop    文件:TestFsck.java   
/** Test fsck with symlinks in the filesystem */
@Test
public void testFsckSymlink() throws Exception {
  final DFSTestUtil util = new DFSTestUtil.Builder().
      setName(getClass().getSimpleName()).setNumFiles(1).build();
  final Configuration conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);

  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  try {
    final long precision = 1L;
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision);
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    fs = cluster.getFileSystem();
    final String fileName = "/srcdat";
    util.createFiles(fs, fileName);
    final FileContext fc = FileContext.getFileContext(
        cluster.getConfiguration(0));
    final Path file = new Path(fileName);
    final Path symlink = new Path("/srcdat-symlink");
    fc.createSymlink(file, symlink, false);
    util.waitReplication(fs, fileName, (short)3);
    long aTime = fc.getFileStatus(symlink).getAccessTime();
    Thread.sleep(precision);
    setupAuditLogs();
    String outStr = runFsck(conf, 0, true, "/");
    verifyAuditLogs();
    assertEquals(aTime, fc.getFileStatus(symlink).getAccessTime());
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
    assertTrue(outStr.contains("Total symlinks:\t\t1"));
    util.cleanup(fs, fileName);
  } finally {
    if (fs != null) {try{fs.close();} catch(Exception e){}}
    if (cluster != null) { cluster.shutdown(); }
  }
}
项目:hadoop    文件:TestDockerContainerExecutor.java   
@Before
public void setup() {
  try {
    lfs = FileContext.getLocalFSFileContext();
    workDir = new Path("/tmp/temp-" + System.currentTimeMillis());
    workSpace = new File(workDir.toUri().getPath());
    lfs.mkdir(workDir, FsPermission.getDirDefault(), true);
  } catch (IOException e) {
    throw new RuntimeException(e);
  }
  Configuration conf = new Configuration();
  yarnImage = "yarnImage";
  long time = System.currentTimeMillis();
  conf.set(YarnConfiguration.NM_LOCAL_DIRS, "/tmp/nm-local-dir" + time);
  conf.set(YarnConfiguration.NM_LOG_DIRS, "/tmp/userlogs" + time);

  dockerUrl = System.getProperty("docker-service-url");
  LOG.info("dockerUrl: " + dockerUrl);
  if (Strings.isNullOrEmpty(dockerUrl)) {
    return;
  }
  dockerUrl = " -H " + dockerUrl;
  dockerExec = "docker " + dockerUrl;
  conf.set(YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME, yarnImage);
  conf.set(YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_EXEC_NAME, dockerExec);
  exec = new DockerContainerExecutor();
  dirsHandler = new LocalDirsHandlerService();
  dirsHandler.init(conf);
  exec.setConf(conf);
  appSubmitter = System.getProperty("application.submitter");
  if (appSubmitter == null || appSubmitter.isEmpty()) {
    appSubmitter = "nobody";
  }
  shellExec(dockerExec + " pull " + testImage);

}
项目:elasticsearch_my    文件:HdfsBlobStoreContainerTests.java   
@Override
protected BlobStore newBlobStore() throws IOException {
    FileContext fileContext;
    try {
        fileContext = AccessController.doPrivileged((PrivilegedExceptionAction<FileContext>)
            () -> createContext(new URI("hdfs:///")));
    } catch (PrivilegedActionException e) {
        throw new RuntimeException(e.getCause());
    }
    return new HdfsBlobStore(fileContext, "temp", 1024);
}