Java 类org.apache.hadoop.fs.permission.FsAction 实例源码

项目:hadoop    文件:TestHttpFSFileSystemLocalFileSystem.java   
@Override
protected void testSetPermission() throws Exception {
  if (Path.WINDOWS) {
    FileSystem fs = FileSystem.get(getProxiedFSConf());
    Path path = new Path(getProxiedFSTestDir(), "foodir");
    fs.mkdirs(path);

    fs = getHttpFSFileSystem();
    FsPermission permission1 = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
    fs.setPermission(path, permission1);
    fs.close();

    fs = FileSystem.get(getProxiedFSConf());
    FileStatus status1 = fs.getFileStatus(path);
    fs.close();
    FsPermission permission2 = status1.getPermission();
    Assert.assertEquals(permission2, permission1);

    // sticky bit not supported on Windows with local file system, so the
    // subclass skips that part of the test
  } else {
    super.testSetPermission();
  }
}
项目:hadoop-oss    文件:FileSystem.java   
/**
 * This method provides the default implementation of
 * {@link #access(Path, FsAction)}.
 *
 * @param stat FileStatus to check
 * @param mode type of access to check
 * @throws IOException for any error
 */
@InterfaceAudience.Private
static void checkAccessPermissions(FileStatus stat, FsAction mode)
    throws IOException {
  FsPermission perm = stat.getPermission();
  UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
  String user = ugi.getShortUserName();
  List<String> groups = Arrays.asList(ugi.getGroupNames());
  if (user.equals(stat.getOwner())) {
    if (perm.getUserAction().implies(mode)) {
      return;
    }
  } else if (groups.contains(stat.getGroup())) {
    if (perm.getGroupAction().implies(mode)) {
      return;
    }
  } else {
    if (perm.getOtherAction().implies(mode)) {
      return;
    }
  }
  throw new AccessControlException(String.format(
    "Permission denied: user=%s, path=\"%s\":%s:%s:%s%s", user, stat.getPath(),
    stat.getOwner(), stat.getGroup(), stat.isDirectory() ? "d" : "-", perm));
}
项目:hadoop    文件:FileSystem.java   
/**
 * This method provides the default implementation of
 * {@link #access(Path, FsAction)}.
 *
 * @param stat FileStatus to check
 * @param mode type of access to check
 * @throws IOException for any error
 */
@InterfaceAudience.Private
static void checkAccessPermissions(FileStatus stat, FsAction mode)
    throws IOException {
  FsPermission perm = stat.getPermission();
  UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
  String user = ugi.getShortUserName();
  List<String> groups = Arrays.asList(ugi.getGroupNames());
  if (user.equals(stat.getOwner())) {
    if (perm.getUserAction().implies(mode)) {
      return;
    }
  } else if (groups.contains(stat.getGroup())) {
    if (perm.getGroupAction().implies(mode)) {
      return;
    }
  } else {
    if (perm.getOtherAction().implies(mode)) {
      return;
    }
  }
  throw new AccessControlException(String.format(
    "Permission denied: user=%s, path=\"%s\":%s:%s:%s%s", user, stat.getPath(),
    stat.getOwner(), stat.getGroup(), stat.isDirectory() ? "d" : "-", perm));
}
项目:hadoop-oss    文件:AclCommands.java   
/**
 * Prints a single extended ACL entry.  If the mask restricts the
 * permissions of the entry, then also prints the restricted version as the
 * effective permissions.  The mask applies to all named entries and also
 * the unnamed group entry.
 * @param aclStatus AclStatus for the path
 * @param fsPerm FsPermission for the path
 * @param entry AclEntry extended ACL entry to print
 */
private void printExtendedAclEntry(AclStatus aclStatus,
    FsPermission fsPerm, AclEntry entry) {
  if (entry.getName() != null || entry.getType() == AclEntryType.GROUP) {
    FsAction entryPerm = entry.getPermission();
    FsAction effectivePerm = aclStatus
        .getEffectivePermission(entry, fsPerm);
    if (entryPerm != effectivePerm) {
      out.println(String.format("%s\t#effective:%s", entry,
        effectivePerm.SYMBOL));
    } else {
      out.println(entry);
    }
  } else {
    out.println(entry);
  }
}
项目:hadoop-oss    文件:TestAclCommands.java   
@Test
public void testMultipleAclSpecParsing() throws Exception {
  List<AclEntry> parsedList = AclEntry.parseAclSpec(
      "group::rwx,user:user1:rwx,user:user2:rw-,"
          + "group:group1:rw-,default:group:group1:rw-", true);

  AclEntry basicAcl = new AclEntry.Builder().setType(AclEntryType.GROUP)
      .setPermission(FsAction.ALL).build();
  AclEntry user1Acl = new AclEntry.Builder().setType(AclEntryType.USER)
      .setPermission(FsAction.ALL).setName("user1").build();
  AclEntry user2Acl = new AclEntry.Builder().setType(AclEntryType.USER)
      .setPermission(FsAction.READ_WRITE).setName("user2").build();
  AclEntry group1Acl = new AclEntry.Builder().setType(AclEntryType.GROUP)
      .setPermission(FsAction.READ_WRITE).setName("group1").build();
  AclEntry defaultAcl = new AclEntry.Builder().setType(AclEntryType.GROUP)
      .setPermission(FsAction.READ_WRITE).setName("group1")
      .setScope(AclEntryScope.DEFAULT).build();
  List<AclEntry> expectedList = new ArrayList<AclEntry>();
  expectedList.add(basicAcl);
  expectedList.add(user1Acl);
  expectedList.add(user2Acl);
  expectedList.add(group1Acl);
  expectedList.add(defaultAcl);
  assertEquals("Parsed Acl not correct", expectedList, parsedList);
}
项目:hadoop    文件:FSDownload.java   
/**
 * Returns a boolean to denote whether a cache file is visible to all (public)
 * or not
 *
 * @return true if the path in the current path is visible to all, false
 * otherwise
 */
@Private
public static boolean isPublic(FileSystem fs, Path current, FileStatus sStat,
    LoadingCache<Path,Future<FileStatus>> statCache) throws IOException {
  current = fs.makeQualified(current);
  //the leaf level file should be readable by others
  if (!checkPublicPermsForAll(fs, sStat, FsAction.READ_EXECUTE, FsAction.READ)) {
    return false;
  }

  if (Shell.WINDOWS && fs instanceof LocalFileSystem) {
    // Relax the requirement for public cache on LFS on Windows since default
    // permissions are "700" all the way up to the drive letter. In this
    // model, the only requirement for a user is to give EVERYONE group
    // permission on the file and the file will be considered public.
    // This code path is only hit when fs.default.name is file:/// (mainly
    // in tests).
    return true;
  }
  return ancestorsHaveExecutePermissions(fs, current.getParent(), statCache);
}
项目:hadoop    文件:FSDownload.java   
private static boolean checkPublicPermsForAll(FileSystem fs, 
      FileStatus status, FsAction dir, FsAction file) 
  throws IOException {
  FsPermission perms = status.getPermission();
  FsAction otherAction = perms.getOtherAction();
  if (status.isDirectory()) {
    if (!otherAction.implies(dir)) {
      return false;
    }

    for (FileStatus child : fs.listStatus(status.getPath())) {
      if(!checkPublicPermsForAll(fs, child, dir, file)) {
        return false;
      }
    }
    return true;
  }
  return (otherAction.implies(file));
}
项目:hadoop    文件:BaseTestHttpFSWith.java   
private void testCreate(Path path, boolean override) throws Exception {
  FileSystem fs = getHttpFSFileSystem();
  FsPermission permission = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
  OutputStream os = fs.create(new Path(path.toUri().getPath()), permission, override, 1024,
                              (short) 2, 100 * 1024 * 1024, null);
  os.write(1);
  os.close();
  fs.close();

  fs = FileSystem.get(getProxiedFSConf());
  FileStatus status = fs.getFileStatus(path);
  if (!isLocalFS()) {
    Assert.assertEquals(status.getReplication(), 2);
    Assert.assertEquals(status.getBlockSize(), 100 * 1024 * 1024);
  }
  Assert.assertEquals(status.getPermission(), permission);
  InputStream is = fs.open(path);
  Assert.assertEquals(is.read(), 1);
  is.close();
  fs.close();
}
项目:hadoop    文件:AclCommands.java   
/**
 * Prints a single extended ACL entry.  If the mask restricts the
 * permissions of the entry, then also prints the restricted version as the
 * effective permissions.  The mask applies to all named entries and also
 * the unnamed group entry.
 * @param aclStatus AclStatus for the path
 * @param fsPerm FsPermission for the path
 * @param entry AclEntry extended ACL entry to print
 */
private void printExtendedAclEntry(AclStatus aclStatus,
    FsPermission fsPerm, AclEntry entry) {
  if (entry.getName() != null || entry.getType() == AclEntryType.GROUP) {
    FsAction entryPerm = entry.getPermission();
    FsAction effectivePerm = aclStatus
        .getEffectivePermission(entry, fsPerm);
    if (entryPerm != effectivePerm) {
      out.println(String.format("%s\t#effective:%s", entry,
        effectivePerm.SYMBOL));
    } else {
      out.println(entry);
    }
  } else {
    out.println(entry);
  }
}
项目:hadoop    文件:FSPermissionChecker.java   
/** Guarded by {@link FSNamesystem#readLock()} */
private void checkSubAccess(byte[][] pathByNameArr, int pathIdx, INode inode,
    int snapshotId, FsAction access, boolean ignoreEmptyDir)
    throws AccessControlException {
  if (inode == null || !inode.isDirectory()) {
    return;
  }

  Stack<INodeDirectory> directories = new Stack<INodeDirectory>();
  for(directories.push(inode.asDirectory()); !directories.isEmpty(); ) {
    INodeDirectory d = directories.pop();
    ReadOnlyList<INode> cList = d.getChildrenList(snapshotId);
    if (!(cList.isEmpty() && ignoreEmptyDir)) {
      //TODO have to figure this out with inodeattribute provider
      check(getINodeAttrs(pathByNameArr, pathIdx, d, snapshotId),
          inode.getFullPathName(), access);
    }

    for(INode child : cList) {
      if (child.isDirectory()) {
        directories.push(child.asDirectory());
      }
    }
  }
}
项目:hadoop    文件:FSPermissionChecker.java   
/**
 * Whether a cache pool can be accessed by the current context
 *
 * @param pool CachePool being accessed
 * @param access type of action being performed on the cache pool
 * @throws AccessControlException if pool cannot be accessed
 */
public void checkPermission(CachePool pool, FsAction access)
    throws AccessControlException {
  FsPermission mode = pool.getMode();
  if (isSuperUser()) {
    return;
  }
  if (getUser().equals(pool.getOwnerName())
      && mode.getUserAction().implies(access)) {
    return;
  }
  if (getGroups().contains(pool.getGroupName())
      && mode.getGroupAction().implies(access)) {
    return;
  }
  if (mode.getOtherAction().implies(access)) {
    return;
  }
  throw new AccessControlException("Permission denied while accessing pool "
      + pool.getPoolName() + ": user " + getUser() + " does not have "
      + access.toString() + " permissions.");
}
项目:ditb    文件:FSUtils.java   
/**
 * Throw an exception if an action is not permitted by a user on a file.
 *
 * @param ugi
 *          the user
 * @param file
 *          the file
 * @param action
 *          the action
 */
public static void checkAccess(UserGroupInformation ugi, FileStatus file,
    FsAction action) throws AccessDeniedException {
  if (ugi.getShortUserName().equals(file.getOwner())) {
    if (file.getPermission().getUserAction().implies(action)) {
      return;
    }
  } else if (contains(ugi.getGroupNames(), file.getGroup())) {
    if (file.getPermission().getGroupAction().implies(action)) {
      return;
    }
  } else if (file.getPermission().getOtherAction().implies(action)) {
    return;
  }
  throw new AccessDeniedException("Permission denied:" + " action=" + action
      + " path=" + file.getPath() + " user=" + ugi.getShortUserName());
}
项目:hadoop    文件:TestDFSPermission.java   
@Test
public void testAccessGroupMember() throws IOException, InterruptedException {
  FileSystem rootFs = FileSystem.get(conf);
  Path p2 = new Path("/p2");
  rootFs.mkdirs(p2);
  rootFs.setOwner(p2, UserGroupInformation.getCurrentUser().getShortUserName(), GROUP1_NAME);
  rootFs.setPermission(p2, new FsPermission((short) 0740));
  fs = USER1.doAs(new PrivilegedExceptionAction<FileSystem>() {
    @Override
    public FileSystem run() throws Exception {
      return FileSystem.get(conf);
    }
  });
  fs.access(p2, FsAction.READ);
  try {
    fs.access(p2, FsAction.EXECUTE);
    fail("The access call should have failed.");
  } catch (AccessControlException e) {
    assertTrue("Permission denied messages must carry the username",
            e.getMessage().contains(USER1_NAME));
    assertTrue("Permission denied messages must carry the path parent",
            e.getMessage().contains(
                p2.getParent().toUri().getPath()));
  }
}
项目:hadoop    文件:TestDFSPermission.java   
@Test
public void testAccessOthers() throws IOException, InterruptedException {
  FileSystem rootFs = FileSystem.get(conf);
  Path p3 = new Path("/p3");
  rootFs.mkdirs(p3);
  rootFs.setPermission(p3, new FsPermission((short) 0774));
  fs = USER1.doAs(new PrivilegedExceptionAction<FileSystem>() {
    @Override
    public FileSystem run() throws Exception {
      return FileSystem.get(conf);
    }
  });
  fs.access(p3, FsAction.READ);
  try {
    fs.access(p3, FsAction.READ_WRITE);
    fail("The access call should have failed.");
  } catch (AccessControlException e) {
    assertTrue("Permission denied messages must carry the username",
            e.getMessage().contains(USER1_NAME));
    assertTrue("Permission denied messages must carry the path parent",
            e.getMessage().contains(
                p3.getParent().toUri().getPath()));
  }
}
项目:hadoop    文件:TestWebHdfsUrl.java   
@Test(timeout=60000)
public void testCheckAccessUrl() throws IOException {
  Configuration conf = new Configuration();

  UserGroupInformation ugi =
      UserGroupInformation.createRemoteUser("test-user");
  UserGroupInformation.setLoginUser(ugi);

  WebHdfsFileSystem webhdfs = getWebHdfsFileSystem(ugi, conf);
  Path fsPath = new Path("/p1");

  URL checkAccessUrl = webhdfs.toUrl(GetOpParam.Op.CHECKACCESS,
      fsPath, new FsActionParam(FsAction.READ_WRITE));
  checkQueryParams(
      new String[]{
          GetOpParam.Op.CHECKACCESS.toQueryString(),
          new UserParam(ugi.getShortUserName()).toString(),
          FsActionParam.NAME + "=" + FsAction.READ_WRITE.SYMBOL
      },
      checkAccessUrl);
}
项目:ditb    文件:HBaseFsck.java   
private void preCheckPermission() throws IOException, AccessDeniedException {
  if (shouldIgnorePreCheckPermission()) {
    return;
  }

  Path hbaseDir = FSUtils.getRootDir(getConf());
  FileSystem fs = hbaseDir.getFileSystem(getConf());
  UserProvider userProvider = UserProvider.instantiate(getConf());
  UserGroupInformation ugi = userProvider.getCurrent().getUGI();
  FileStatus[] files = fs.listStatus(hbaseDir);
  for (FileStatus file : files) {
    try {
      FSUtils.checkAccess(ugi, file, FsAction.WRITE);
    } catch (AccessDeniedException ace) {
      LOG.warn("Got AccessDeniedException when preCheckPermission ", ace);
      errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + ugi.getUserName()
        + " does not have write perms to " + file.getPath()
        + ". Please rerun hbck as hdfs user " + file.getOwner());
      throw ace;
    }
  }
}
项目:hadoop    文件:TestFSPermissionChecker.java   
private void assertPermissionDenied(UserGroupInformation user, String path,
    FsAction access) throws IOException {
  try {
    INodesInPath iip = dir.getINodesInPath(path, true);
    dir.getPermissionChecker(SUPERUSER, SUPERGROUP, user).checkPermission(iip,
      false, null, null, access, null, false);
    fail("expected AccessControlException for user + " + user + ", path = " +
      path + ", access = " + access);
  } catch (AccessControlException e) {
    assertTrue("Permission denied messages must carry the username",
            e.getMessage().contains(user.getUserName().toString()));
    assertTrue("Permission denied messages must carry the path parent",
            e.getMessage().contains(
                new Path(path).getParent().toUri().getPath()));
  }
}
项目:hadoop    文件:TestBlobMetadata.java   
@Test
public void testFolderMetadata() throws Exception {
  Path folder = new Path("/folder");
  FsPermission justRead = new FsPermission(FsAction.READ, FsAction.READ,
      FsAction.READ);
  fs.mkdirs(folder, justRead);
  HashMap<String, String> metadata = backingStore
      .getMetadata(AzureBlobStorageTestAccount.toMockUri(folder));
  assertNotNull(metadata);
  assertEquals("true", metadata.get("hdi_isfolder"));
  assertEquals(getExpectedPermissionString("r--r--r--"),
      metadata.get("hdi_permission"));
}
项目:angel    文件:ClientDistributedCacheManager.java   
public boolean ancestorsHaveExecutePermissions(FileSystem fs, Path path,
    Map<URI, FileStatus> statCache) throws IOException {
  Path current = path;
  while (current != null) {
    // the subdirs in the path should have execute permissions for
    // others
    if (!checkPermissionOfOther(fs, current, FsAction.EXECUTE, statCache)) {
      return false;
    }
    current = current.getParent();
  }
  return true;
}
项目:angel    文件:ClientDistributedCacheManager.java   
/**
 * Checks for a given path whether the Other permissions on it imply the permission in the passed
 * FsAction
 * 
 * @return true if the path in the uri is visible to all, false otherwise
 * @throws IOException
 */
public boolean checkPermissionOfOther(FileSystem fs, Path path, FsAction action,
    Map<URI, FileStatus> statCache) throws IOException {
  FileStatus status = getFileStatus(fs, path.toUri(), statCache);
  FsPermission perms = status.getPermission();
  FsAction otherAction = perms.getOtherAction();
  return otherAction.implies(action);
}
项目:dremio-oss    文件:FileSystemWrapper.java   
@Override
public void access(final Path path, final FsAction mode) throws AccessControlException, FileNotFoundException, IOException {
  try {
    underlyingFs.access(path, mode);
  } catch(FSError e) {
    throw propagateFSError(e);
  }
}
项目:hadoop-oss    文件:FTPFileSystem.java   
private FsAction getFsAction(int accessGroup, FTPFile ftpFile) {
  FsAction action = FsAction.NONE;
  if (ftpFile.hasPermission(accessGroup, FTPFile.READ_PERMISSION)) {
    action.or(FsAction.READ);
  }
  if (ftpFile.hasPermission(accessGroup, FTPFile.WRITE_PERMISSION)) {
    action.or(FsAction.WRITE);
  }
  if (ftpFile.hasPermission(accessGroup, FTPFile.EXECUTE_PERMISSION)) {
    action.or(FsAction.EXECUTE);
  }
  return action;
}
项目:hadoop-oss    文件:FTPFileSystem.java   
private FsPermission getPermissions(FTPFile ftpFile) {
  FsAction user, group, others;
  user = getFsAction(FTPFile.USER_ACCESS, ftpFile);
  group = getFsAction(FTPFile.GROUP_ACCESS, ftpFile);
  others = getFsAction(FTPFile.WORLD_ACCESS, ftpFile);
  return new FsPermission(user, group, others);
}
项目:hadoop    文件:TestDistCacheEmulation.java   
/**
 * Validate setupGenerateDistCacheData by validating <li>permissions of the
 * distributed cache directory and <li>content of the generated sequence file.
 * This includes validation of dist cache file paths and their file sizes.
 */
private void doValidateSetupGenDC(
    RecordReader<LongWritable, BytesWritable> reader, FileSystem fs,
    long[] sortedFileSizes) throws IOException, InterruptedException {

  // Validate permissions of dist cache directory
  Path distCacheDir = dce.getDistributedCacheDir();
  assertEquals(
      "Wrong permissions for distributed cache dir " + distCacheDir,
      fs.getFileStatus(distCacheDir).getPermission().getOtherAction()
          .and(FsAction.EXECUTE), FsAction.EXECUTE);

  // Validate the content of the sequence file generated by
  // dce.setupGenerateDistCacheData().
  LongWritable key = new LongWritable();
  BytesWritable val = new BytesWritable();
  for (int i = 0; i < sortedFileSizes.length; i++) {
    assertTrue("Number of files written to the sequence file by "
        + "setupGenerateDistCacheData is less than the expected.",
        reader.nextKeyValue());
    key = reader.getCurrentKey();
    val = reader.getCurrentValue();
    long fileSize = key.get();
    String file = new String(val.getBytes(), 0, val.getLength());

    // Dist Cache files should be sorted based on file size.
    assertEquals("Dist cache file size is wrong.", sortedFileSizes[i],
        fileSize);

    // Validate dist cache file path.

    // parent dir of dist cache file
    Path parent = new Path(file).getParent().makeQualified(fs.getUri(),fs.getWorkingDirectory());
    // should exist in dist cache dir
    assertTrue("Public dist cache file path is wrong.",
        distCacheDir.equals(parent));
  }
}
项目:hadoop-oss    文件:ViewFileSystem.java   
@Override
public void access(Path path, FsAction mode) throws AccessControlException,
    FileNotFoundException, IOException {
  InodeTree.ResolveResult<FileSystem> res =
    fsState.resolve(getUriPath(path), true);
  res.targetFileSystem.access(res.remainingPath, mode);
}
项目:hadoop-oss    文件:ViewFs.java   
@Override
public void access(Path path, FsAction mode) throws AccessControlException,
    FileNotFoundException, UnresolvedLinkException, IOException {
  InodeTree.ResolveResult<AbstractFileSystem> res =
    fsState.resolve(getUriPath(path), true);
  res.targetFileSystem.access(res.remainingPath, mode);
}
项目:hadoop    文件:FTPFileSystem.java   
private FsPermission getPermissions(FTPFile ftpFile) {
  FsAction user, group, others;
  user = getFsAction(FTPFile.USER_ACCESS, ftpFile);
  group = getFsAction(FTPFile.GROUP_ACCESS, ftpFile);
  others = getFsAction(FTPFile.WORLD_ACCESS, ftpFile);
  return new FsPermission(user, group, others);
}
项目:hadoop-oss    文件:TestAclCommands.java   
@Override
public FileStatus[] listStatus(Path f) throws IOException {
  FsPermission perm = new FsPermission(FsAction.ALL, FsAction.READ_EXECUTE,
    FsAction.READ_EXECUTE);
  Path path = new Path("/foo");
  FileStatus stat = new FileStatus(1000, true, 3, 1000, 0, 0, perm, "owner",
    "group", path);
  return new FileStatus[] { stat };
}
项目:hadoop-oss    文件:ViewFileSystemBaseTest.java   
private void testRootReadableExecutableInternal(boolean located)
    throws IOException {
  // verify executable permission on root: cd /
  //
  Assert.assertFalse("In root before cd",
      fsView.getWorkingDirectory().isRoot());
  fsView.setWorkingDirectory(new Path("/"));
  Assert.assertTrue("Not in root dir after cd",
    fsView.getWorkingDirectory().isRoot());

  // verify readable
  //
  verifyRootChildren(listStatusInternal(located,
      fsView.getWorkingDirectory()));

  // verify permissions
  //
  final FileStatus rootStatus =
      fsView.getFileStatus(fsView.getWorkingDirectory());
  final FsPermission perms = rootStatus.getPermission();

  Assert.assertTrue("User-executable permission not set!",
      perms.getUserAction().implies(FsAction.EXECUTE));
  Assert.assertTrue("User-readable permission not set!",
      perms.getUserAction().implies(FsAction.READ));
  Assert.assertTrue("Group-executable permission not set!",
      perms.getGroupAction().implies(FsAction.EXECUTE));
  Assert.assertTrue("Group-readable permission not set!",
      perms.getGroupAction().implies(FsAction.READ));
  Assert.assertTrue("Other-executable permission not set!",
      perms.getOtherAction().implies(FsAction.EXECUTE));
  Assert.assertTrue("Other-readable permission not set!",
      perms.getOtherAction().implies(FsAction.READ));
}
项目:lustre-connector-for-hadoop    文件:LustreFsJavaImpl.java   
@Override
public boolean mkdirs(String path, short permissions) throws IOException {
  File f = new File(path), p = f;
  while(!p.getParentFile().exists()) {
    p = p.getParentFile();
  }
  if(!p.getParentFile().isDirectory()) {
    throw new FileAlreadyExistsException("Not a directory: "+ p.getParent());
  }
  boolean success = f.mkdirs();
  if (success) {
    if(-1 != permissions) {
    chmod(path, permissions);
    }
    // We give explicit permissions to the user who submitted the job using ACLs
    String user = UserGroupInformation.getCurrentUser().getShortUserName();
    LinkedList<String> args = new LinkedList<String>();
    args.add("/usr/bin/setfacl");
    args.add("-R");
    args.add("-m");
    args.add("u:" + user + ":" + FsAction.ALL.SYMBOL);
    args.add(FileUtil.makeShellPath(p, true));  
    org.apache.hadoop.fs.util.Shell.runPrivileged(args.toArray(new String[0]));
    args.add(2, "-d");
    org.apache.hadoop.fs.util.Shell.runPrivileged(args.toArray(new String[0]));
  }
  return (success || (f.exists() && f.isDirectory()));
}
项目:EasyML    文件:HDFSIO.java   
/**
 * Make directory in the uri position
 * @param uri target position
 * @return whether success or not
 * @throws IOException
 */
public static boolean mkdirs(String uri) throws IOException {
    Path path = new Path(Constants.NAME_NODE + "/" + uri);
    System.out.println("[mkdirs]" + path.toString());

    FsPermission dirPerm = new FsPermission(FsAction.ALL,FsAction.ALL,FsAction.ALL);
    Boolean flag = fs.mkdirs(path);
    if( flag )
        fs.setPermission(path, new FsPermission(dirPerm));
    return flag;
}
项目:monarch    文件:HDFSQuasiService.java   
public void createSecuredUserDir(String userName, String keytabdir) {
  try {
    UserGroupInformation.setConfiguration(conf);
    UserGroupInformation.loginUserFromKeytab(getHDFSPrincipal(""),
        keytabdir + File.separator + "hdfs.keytab");
    FileSystem fs = FileSystem.get(conf);
    Path userDir = new Path("/user" + File.separator + userName);
    fs.mkdirs(userDir, new FsPermission(FsAction.ALL, FsPermission.getDefault().getGroupAction(),
        FsPermission.getDefault().getOtherAction()));
    fs.setOwner(userDir, userName, "hadoop");
  } catch (IOException e) {
    e.printStackTrace();
  }

}
项目:hadoop    文件:FSDownload.java   
/**
 * Returns true if all ancestors of the specified path have the 'execute'
 * permission set for all users (i.e. that other users can traverse
 * the directory hierarchy to the given path)
 */
@VisibleForTesting
static boolean ancestorsHaveExecutePermissions(FileSystem fs,
    Path path, LoadingCache<Path,Future<FileStatus>> statCache)
    throws IOException {
  Path current = path;
  while (current != null) {
    //the subdirs in the path should have execute permissions for others
    if (!checkPermissionOfOther(fs, current, FsAction.EXECUTE, statCache)) {
      return false;
    }
    current = current.getParent();
  }
  return true;
}
项目:hadoop    文件:ClientDistributedCacheManager.java   
/**
 * Returns a boolean to denote whether a cache file is visible to all(public)
 * or not
 * @param conf
 * @param uri
 * @return true if the path in the uri is visible to all, false otherwise
 * @throws IOException
 */
static boolean isPublic(Configuration conf, URI uri,
    Map<URI, FileStatus> statCache) throws IOException {
  FileSystem fs = FileSystem.get(uri, conf);
  Path current = new Path(uri.getPath());
  //the leaf level file should be readable by others
  if (!checkPermissionOfOther(fs, current, FsAction.READ, statCache)) {
    return false;
  }
  return ancestorsHaveExecutePermissions(fs, current.getParent(), statCache);
}
项目:hadoop    文件:ClientDistributedCacheManager.java   
/**
 * Returns true if all ancestors of the specified path have the 'execute'
 * permission set for all users (i.e. that other users can traverse
 * the directory heirarchy to the given path)
 */
static boolean ancestorsHaveExecutePermissions(FileSystem fs, Path path,
    Map<URI, FileStatus> statCache) throws IOException {
  Path current = path;
  while (current != null) {
    //the subdirs in the path should have execute permissions for others
    if (!checkPermissionOfOther(fs, current, FsAction.EXECUTE, statCache)) {
      return false;
    }
    current = current.getParent();
  }
  return true;
}
项目:hadoop    文件:ClientDistributedCacheManager.java   
/**
 * Checks for a given path whether the Other permissions on it 
 * imply the permission in the passed FsAction
 * @param fs
 * @param path
 * @param action
 * @return true if the path in the uri is visible to all, false otherwise
 * @throws IOException
 */
private static boolean checkPermissionOfOther(FileSystem fs, Path path,
    FsAction action, Map<URI, FileStatus> statCache) throws IOException {
  FileStatus status = getFileStatus(fs, path.toUri(), statCache);
  FsPermission perms = status.getPermission();
  FsAction otherAction = perms.getOtherAction();
  if (otherAction.implies(action)) {
    return true;
  }
  return false;
}
项目:hadoop    文件:DFSClient.java   
public void checkAccess(String src, FsAction mode) throws IOException {
  checkOpen();
  TraceScope scope = getPathTraceScope("checkAccess", src);
  try {
    namenode.checkAccess(src, mode);
  } catch (RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
        FileNotFoundException.class,
        UnresolvedPathException.class);
  } finally {
    scope.close();
  }
}
项目:hadoop    文件:FSDirStatAndListingOp.java   
static DirectoryListing getListingInt(FSDirectory fsd, final String srcArg,
    byte[] startAfter, boolean needLocation) throws IOException {
  FSPermissionChecker pc = fsd.getPermissionChecker();
  byte[][] pathComponents = FSDirectory
      .getPathComponentsForReservedPath(srcArg);
  final String startAfterString = new String(startAfter, Charsets.UTF_8);
  final String src = fsd.resolvePath(pc, srcArg, pathComponents);
  final INodesInPath iip = fsd.getINodesInPath(src, true);

  // Get file name when startAfter is an INodePath
  if (FSDirectory.isReservedName(startAfterString)) {
    byte[][] startAfterComponents = FSDirectory
        .getPathComponentsForReservedPath(startAfterString);
    try {
      String tmp = FSDirectory.resolvePath(src, startAfterComponents, fsd);
      byte[][] regularPath = INode.getPathComponents(tmp);
      startAfter = regularPath[regularPath.length - 1];
    } catch (IOException e) {
      // Possibly the inode is deleted
      throw new DirectoryListingStartAfterNotFoundException(
          "Can't find startAfter " + startAfterString);
    }
  }

  boolean isSuperUser = true;
  if (fsd.isPermissionEnabled()) {
    if (iip.getLastINode() != null && iip.getLastINode().isDirectory()) {
      fsd.checkPathAccess(pc, iip, FsAction.READ_EXECUTE);
    } else {
      fsd.checkTraverse(pc, iip);
    }
    isSuperUser = pc.isSuperUser();
  }
  return getListing(fsd, iip, src, startAfter, needLocation, isSuperUser);
}
项目:hadoop    文件:FSDirStatAndListingOp.java   
static ContentSummary getContentSummary(
    FSDirectory fsd, String src) throws IOException {
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
  FSPermissionChecker pc = fsd.getPermissionChecker();
  src = fsd.resolvePath(pc, src, pathComponents);
  final INodesInPath iip = fsd.getINodesInPath(src, false);
  if (fsd.isPermissionEnabled()) {
    fsd.checkPermission(pc, iip, false, null, null, null,
        FsAction.READ_EXECUTE);
  }
  return getContentSummaryInt(fsd, iip);
}
项目:hadoop    文件:TestAclCommands.java   
@Override
public FileStatus[] listStatus(Path f) throws IOException {
  FsPermission perm = new FsPermission(FsAction.ALL, FsAction.READ_EXECUTE,
    FsAction.READ_EXECUTE);
  Path path = new Path("/foo");
  FileStatus stat = new FileStatus(1000, true, 3, 1000, 0, 0, perm, "owner",
    "group", path);
  return new FileStatus[] { stat };
}