Java 类org.apache.hadoop.fs.InvalidPathException 实例源码

项目:hadoop    文件:FSDirStatAndListingOp.java   
/**
 * Get the file info for a specific file.
 *
 * @param srcArg The string representation of the path to the file
 * @param resolveLink whether to throw UnresolvedLinkException
 *        if src refers to a symlink
 *
 * @return object containing information regarding the file
 *         or null if file not found
 */
static HdfsFileStatus getFileInfo(
    FSDirectory fsd, String srcArg, boolean resolveLink)
    throws IOException {
  String src = srcArg;
  if (!DFSUtil.isValidName(src)) {
    throw new InvalidPathException("Invalid file name: " + src);
  }
  FSPermissionChecker pc = fsd.getPermissionChecker();
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
  src = fsd.resolvePath(pc, src, pathComponents);
  final INodesInPath iip = fsd.getINodesInPath(src, resolveLink);
  boolean isSuperUser = true;
  if (fsd.isPermissionEnabled()) {
    fsd.checkPermission(pc, iip, false, null, null, null, null, false);
    isSuperUser = pc.isSuperUser();
  }
  return getFileInfo(fsd, src, resolveLink,
      FSDirectory.isReservedRawName(srcArg), isSuperUser);
}
项目:hadoop    文件:TestDFSMkdirs.java   
/**
 * Regression test for HDFS-3626. Creates a file using a non-canonical path
 * (i.e. with extra slashes between components) and makes sure that the NN
 * rejects it.
 */
@Test
public void testMkdirRpcNonCanonicalPath() throws IOException {
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
  try {
    NamenodeProtocols nnrpc = cluster.getNameNodeRpc();

    for (String pathStr : NON_CANONICAL_PATHS) {
      try {
        nnrpc.mkdirs(pathStr, new FsPermission((short)0755), true);
        fail("Did not fail when called with a non-canonicalized path: "
           + pathStr);
      } catch (InvalidPathException ipe) {
        // expected
      }
    }
  } finally {
    cluster.shutdown();
  }
}
项目:aliyun-oss-hadoop-fs    文件:DFSClient.java   
/**
 * Same {{@link #mkdirs(String, FsPermission, boolean)} except
 * that the permissions has already been masked against umask.
 */
public boolean primitiveMkdir(String src, FsPermission absPermission,
    boolean createParent) throws IOException {
  checkOpen();
  if (absPermission == null) {
    absPermission = applyUMask(null);
  }

  LOG.debug("{}: masked={}", src, absPermission);
  try (TraceScope ignored = tracer.newScope("mkdir")) {
    return namenode.mkdirs(src, absPermission, createParent);
  } catch (RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
        InvalidPathException.class,
        FileAlreadyExistsException.class,
        FileNotFoundException.class,
        ParentNotDirectoryException.class,
        SafeModeException.class,
        NSQuotaExceededException.class,
        DSQuotaExceededException.class,
        QuotaByStorageTypeExceededException.class,
        UnresolvedPathException.class,
        SnapshotAccessControlException.class);
  }
}
项目:aliyun-oss-hadoop-fs    文件:FSDirStatAndListingOp.java   
/**
 * Get the file info for a specific file.
 *
 * @param srcArg The string representation of the path to the file
 * @param resolveLink whether to throw UnresolvedLinkException
 *        if src refers to a symlink
 *
 * @return object containing information regarding the file
 *         or null if file not found
 */
static HdfsFileStatus getFileInfo(
    FSDirectory fsd, String srcArg, boolean resolveLink)
    throws IOException {
  String src = srcArg;
  if (!DFSUtil.isValidName(src)) {
    throw new InvalidPathException("Invalid file name: " + src);
  }
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
  if (fsd.isPermissionEnabled()) {
    FSPermissionChecker pc = fsd.getPermissionChecker();
    src = fsd.resolvePath(pc, src, pathComponents);
    final INodesInPath iip = fsd.getINodesInPath(src, resolveLink);
    fsd.checkPermission(pc, iip, false, null, null, null, null, false);
  } else {
    src = FSDirectory.resolvePath(src, pathComponents, fsd);
  }
  return getFileInfo(fsd, src, FSDirectory.isReservedRawName(srcArg),
                     resolveLink);
}
项目:aliyun-oss-hadoop-fs    文件:FSDirRenameOp.java   
private static void validateDestination(
    String src, String dst, INode srcInode)
    throws IOException {
  String error;
  if (srcInode.isSymlink() &&
      dst.equals(srcInode.asSymlink().getSymlinkString())) {
    throw new FileAlreadyExistsException("Cannot rename symlink " + src
        + " to its target " + dst);
  }
  // dst cannot be a directory or a file under src
  if (dst.startsWith(src)
      && dst.charAt(src.length()) == Path.SEPARATOR_CHAR) {
    error = "Rename destination " + dst
        + " is a directory or file under source " + src;
    NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
        + error);
    throw new IOException(error);
  }

  if (FSDirectory.isExactReservedName(src)
      || FSDirectory.isExactReservedName(dst)) {
    error = "Cannot rename to or from /.reserved";
    throw new InvalidPathException(error);
  }
}
项目:aliyun-oss-hadoop-fs    文件:FSDirAttrOp.java   
static HdfsFileStatus setPermission(
    FSDirectory fsd, final String srcArg, FsPermission permission)
    throws IOException {
  String src = srcArg;
  if (FSDirectory.isExactReservedName(src)) {
    throw new InvalidPathException(src);
  }
  FSPermissionChecker pc = fsd.getPermissionChecker();
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
  INodesInPath iip;
  fsd.writeLock();
  try {
    src = fsd.resolvePath(pc, src, pathComponents);
    iip = fsd.getINodesInPath4Write(src);
    fsd.checkOwner(pc, iip);
    unprotectedSetPermission(fsd, src, permission);
  } finally {
    fsd.writeUnlock();
  }
  fsd.getEditLog().logSetPermissions(src, permission);
  return fsd.getAuditFileInfo(iip);
}
项目:aliyun-oss-hadoop-fs    文件:TestDFSMkdirs.java   
/**
 * Regression test for HDFS-3626. Creates a file using a non-canonical path
 * (i.e. with extra slashes between components) and makes sure that the NN
 * rejects it.
 */
@Test
public void testMkdirRpcNonCanonicalPath() throws IOException {
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
  try {
    NamenodeProtocols nnrpc = cluster.getNameNodeRpc();

    for (String pathStr : NON_CANONICAL_PATHS) {
      try {
        nnrpc.mkdirs(pathStr, new FsPermission((short)0755), true);
        fail("Did not fail when called with a non-canonicalized path: "
           + pathStr);
      } catch (InvalidPathException ipe) {
        // expected
      }
    }
  } finally {
    cluster.shutdown();
  }
}
项目:big-c    文件:FSDirStatAndListingOp.java   
/**
 * Get the file info for a specific file.
 *
 * @param srcArg The string representation of the path to the file
 * @param resolveLink whether to throw UnresolvedLinkException
 *        if src refers to a symlink
 *
 * @return object containing information regarding the file
 *         or null if file not found
 */
static HdfsFileStatus getFileInfo(
    FSDirectory fsd, String srcArg, boolean resolveLink)
    throws IOException {
  String src = srcArg;
  if (!DFSUtil.isValidName(src)) {
    throw new InvalidPathException("Invalid file name: " + src);
  }
  FSPermissionChecker pc = fsd.getPermissionChecker();
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
  src = fsd.resolvePath(pc, src, pathComponents);
  final INodesInPath iip = fsd.getINodesInPath(src, resolveLink);
  boolean isSuperUser = true;
  if (fsd.isPermissionEnabled()) {
    fsd.checkPermission(pc, iip, false, null, null, null, null, false);
    isSuperUser = pc.isSuperUser();
  }
  return getFileInfo(fsd, src, resolveLink,
      FSDirectory.isReservedRawName(srcArg), isSuperUser);
}
项目:big-c    文件:TestDFSMkdirs.java   
/**
 * Regression test for HDFS-3626. Creates a file using a non-canonical path
 * (i.e. with extra slashes between components) and makes sure that the NN
 * rejects it.
 */
@Test
public void testMkdirRpcNonCanonicalPath() throws IOException {
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
  try {
    NamenodeProtocols nnrpc = cluster.getNameNodeRpc();

    for (String pathStr : NON_CANONICAL_PATHS) {
      try {
        nnrpc.mkdirs(pathStr, new FsPermission((short)0755), true);
        fail("Did not fail when called with a non-canonicalized path: "
           + pathStr);
      } catch (InvalidPathException ipe) {
        // expected
      }
    }
  } finally {
    cluster.shutdown();
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestDFSMkdirs.java   
/**
 * Regression test for HDFS-3626. Creates a file using a non-canonical path
 * (i.e. with extra slashes between components) and makes sure that the NN
 * rejects it.
 */
@Test
public void testMkdirRpcNonCanonicalPath() throws IOException {
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
  try {
    NamenodeProtocols nnrpc = cluster.getNameNodeRpc();

    for (String pathStr : NON_CANONICAL_PATHS) {
      try {
        nnrpc.mkdirs(pathStr, new FsPermission((short)0755), true);
        fail("Did not fail when called with a non-canonicalized path: "
           + pathStr);
      } catch (InvalidPathException ipe) {
        // expected
      }
    }
  } finally {
    cluster.shutdown();
  }
}
项目:hadoop-plus    文件:FSNamesystem.java   
/**
 * Create a symbolic link.
 */
void createSymlink(String target, String link,
    PermissionStatus dirPerms, boolean createParent) 
    throws IOException, UnresolvedLinkException {
  CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
  if (cacheEntry != null && cacheEntry.isSuccess()) {
    return; // Return previous response
  }
  if (!DFSUtil.isValidName(link)) {
    throw new InvalidPathException("Invalid link name: " + link);
  }
  if (FSDirectory.isReservedName(target)) {
    throw new InvalidPathException("Invalid target name: " + target);
  }
  boolean success = false;
  try {
    createSymlinkInt(target, link, dirPerms, createParent, cacheEntry != null);
    success = true;
  } catch (AccessControlException e) {
    logAuditEvent(false, "createSymlink", link, target, null);
    throw e;
  } finally {
    RetryCache.setState(cacheEntry, success);
  }
}
项目:hadoop-plus    文件:TestDFSMkdirs.java   
/**
 * Regression test for HDFS-3626. Creates a file using a non-canonical path
 * (i.e. with extra slashes between components) and makes sure that the NN
 * rejects it.
 */
@Test
public void testMkdirRpcNonCanonicalPath() throws IOException {
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
  try {
    NamenodeProtocols nnrpc = cluster.getNameNodeRpc();

    for (String pathStr : NON_CANONICAL_PATHS) {
      try {
        nnrpc.mkdirs(pathStr, new FsPermission((short)0755), true);
        fail("Did not fail when called with a non-canonicalized path: "
           + pathStr);
      } catch (InvalidPathException ipe) {
        // expected
      }
    }
  } finally {
    cluster.shutdown();
  }
}
项目:FlexMap    文件:TestDFSMkdirs.java   
/**
 * Regression test for HDFS-3626. Creates a file using a non-canonical path
 * (i.e. with extra slashes between components) and makes sure that the NN
 * rejects it.
 */
@Test
public void testMkdirRpcNonCanonicalPath() throws IOException {
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
  try {
    NamenodeProtocols nnrpc = cluster.getNameNodeRpc();

    for (String pathStr : NON_CANONICAL_PATHS) {
      try {
        nnrpc.mkdirs(pathStr, new FsPermission((short)0755), true);
        fail("Did not fail when called with a non-canonicalized path: "
           + pathStr);
      } catch (InvalidPathException ipe) {
        // expected
      }
    }
  } finally {
    cluster.shutdown();
  }
}
项目:hops    文件:TestDFSMkdirs.java   
/**
 * Regression test for HDFS-3626. Creates a file using a non-canonical path
 * (i.e. with extra slashes between components) and makes sure that the NN
 * rejects it.
 */
@Test
public void testMkdirRpcNonCanonicalPath() throws IOException {
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
  try {
    NamenodeProtocols nnrpc = cluster.getNameNodeRpc();

    for (String pathStr : NON_CANONICAL_PATHS) {
      try {
        nnrpc.mkdirs(pathStr, new FsPermission((short) 0755), true);
        fail("Did not fail when called with a non-canonicalized path: " +
            pathStr);
      } catch (InvalidPathException ipe) {
        // expected
      }
    }
  } finally {
    cluster.shutdown();
  }
}
项目:hadoop-TCP    文件:TestDFSMkdirs.java   
/**
 * Regression test for HDFS-3626. Creates a file using a non-canonical path
 * (i.e. with extra slashes between components) and makes sure that the NN
 * rejects it.
 */
@Test
public void testMkdirRpcNonCanonicalPath() throws IOException {
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
  try {
    NamenodeProtocols nnrpc = cluster.getNameNodeRpc();

    for (String pathStr : NON_CANONICAL_PATHS) {
      try {
        nnrpc.mkdirs(pathStr, new FsPermission((short)0755), true);
        fail("Did not fail when called with a non-canonicalized path: "
           + pathStr);
      } catch (InvalidPathException ipe) {
        // expected
      }
    }
  } finally {
    cluster.shutdown();
  }
}
项目:hardfs    文件:TestDFSMkdirs.java   
/**
 * Regression test for HDFS-3626. Creates a file using a non-canonical path
 * (i.e. with extra slashes between components) and makes sure that the NN
 * rejects it.
 */
@Test
public void testMkdirRpcNonCanonicalPath() throws IOException {
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
  try {
    NamenodeProtocols nnrpc = cluster.getNameNodeRpc();

    for (String pathStr : NON_CANONICAL_PATHS) {
      try {
        nnrpc.mkdirs(pathStr, new FsPermission((short)0755), true);
        fail("Did not fail when called with a non-canonicalized path: "
           + pathStr);
      } catch (InvalidPathException ipe) {
        // expected
      }
    }
  } finally {
    cluster.shutdown();
  }
}
项目:hadoop-on-lustre2    文件:TestDFSMkdirs.java   
/**
 * Regression test for HDFS-3626. Creates a file using a non-canonical path
 * (i.e. with extra slashes between components) and makes sure that the NN
 * rejects it.
 */
@Test
public void testMkdirRpcNonCanonicalPath() throws IOException {
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
  try {
    NamenodeProtocols nnrpc = cluster.getNameNodeRpc();

    for (String pathStr : NON_CANONICAL_PATHS) {
      try {
        nnrpc.mkdirs(pathStr, new FsPermission((short)0755), true);
        fail("Did not fail when called with a non-canonicalized path: "
           + pathStr);
      } catch (InvalidPathException ipe) {
        // expected
      }
    }
  } finally {
    cluster.shutdown();
  }
}
项目:cumulus    文件:DFSClient.java   
/**
 * Create a directory (or hierarchy of directories) with the given
 * name and permission.
 *
 * @param src The path of the directory being created
 * @param permission The permission of the directory being created.
 * If permission == null, use {@link FsPermission#getDefault()}.
 * @param createParent create missing parent directory if true
 * 
 * @return True if the operation success.
 * 
 * @see ClientProtocol#mkdirs(String, FsPermission, boolean)
 */
public boolean mkdirs(String src, FsPermission permission,
    boolean createParent) throws IOException {
  checkOpen();
  if (permission == null) {
    permission = FsPermission.getDefault();
  }
  FsPermission masked = permission.applyUMask(FsPermission.getUMask(conf));
  if(LOG.isDebugEnabled()) {
    LOG.debug(src + ": masked=" + masked);
  }
  try {
    return namenode.mkdirs(src, masked, createParent);
  } catch(RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   InvalidPathException.class,
                                   FileAlreadyExistsException.class,
                                   FileNotFoundException.class,
                                   ParentNotDirectoryException.class,
                                   SafeModeException.class,
                                   NSQuotaExceededException.class,
                                   UnresolvedPathException.class);
  }
}
项目:cumulus    文件:FSNamesystem.java   
private void renameToInternal(String src, String dst,
    Options.Rename... options) throws IOException {
  writeLock();
  try {
  if (NameNode.stateChangeLog.isDebugEnabled()) {
    NameNode.stateChangeLog.debug("DIR* NameSystem.renameTo: with options - "
        + src + " to " + dst);
  }
  if (isInSafeMode()) {
    throw new SafeModeException("Cannot rename " + src, safeMode);
  }
  if (!DFSUtil.isValidName(dst)) {
    throw new InvalidPathException("Invalid name: " + dst);
  }
  if (isPermissionEnabled) {
    checkParentAccess(src, FsAction.WRITE);
    checkAncestorAccess(dst, FsAction.WRITE);
  }

  HdfsFileStatus dinfo = dir.getFileInfo(dst, false);
  dir.renameTo(src, dst, options);
  changeLease(src, dst, dinfo); // update lease with new filename
  } finally {
    writeUnlock();
  }
}
项目:hadoop    文件:DFSClient.java   
/**
 * Same {{@link #mkdirs(String, FsPermission, boolean)} except
 * that the permissions has already been masked against umask.
 */
public boolean primitiveMkdir(String src, FsPermission absPermission, 
  boolean createParent)
  throws IOException {
  checkOpen();
  if (absPermission == null) {
    absPermission = 
      FsPermission.getDefault().applyUMask(dfsClientConf.uMask);
  } 

  if(LOG.isDebugEnabled()) {
    LOG.debug(src + ": masked=" + absPermission);
  }
  TraceScope scope = Trace.startSpan("mkdir", traceSampler);
  try {
    return namenode.mkdirs(src, absPermission, createParent);
  } catch(RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   InvalidPathException.class,
                                   FileAlreadyExistsException.class,
                                   FileNotFoundException.class,
                                   ParentNotDirectoryException.class,
                                   SafeModeException.class,
                                   NSQuotaExceededException.class,
                                   DSQuotaExceededException.class,
                                   UnresolvedPathException.class,
                                   SnapshotAccessControlException.class);
  } finally {
    scope.close();
  }
}
项目:hadoop    文件:FSDirRenameOp.java   
/**
 * The new rename which has the POSIX semantic.
 */
static Map.Entry<BlocksMapUpdateInfo, HdfsFileStatus> renameToInt(
    FSDirectory fsd, final String srcArg, final String dstArg,
    boolean logRetryCache, Options.Rename... options)
    throws IOException {
  String src = srcArg;
  String dst = dstArg;
  if (NameNode.stateChangeLog.isDebugEnabled()) {
    NameNode.stateChangeLog.debug("DIR* NameSystem.renameTo: with options -" +
        " " + src + " to " + dst);
  }
  if (!DFSUtil.isValidName(dst)) {
    throw new InvalidPathException("Invalid name: " + dst);
  }
  final FSPermissionChecker pc = fsd.getPermissionChecker();

  byte[][] srcComponents = FSDirectory.getPathComponentsForReservedPath(src);
  byte[][] dstComponents = FSDirectory.getPathComponentsForReservedPath(dst);
  BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
  src = fsd.resolvePath(pc, src, srcComponents);
  dst = fsd.resolvePath(pc, dst, dstComponents);
  renameTo(fsd, pc, src, dst, collectedBlocks, logRetryCache, options);
  INodesInPath dstIIP = fsd.getINodesInPath(dst, false);
  HdfsFileStatus resultingStat = fsd.getAuditFileInfo(dstIIP);

  return new AbstractMap.SimpleImmutableEntry<>(
      collectedBlocks, resultingStat);
}
项目:hadoop    文件:FSDirSnapshotOp.java   
/**
 * Create a snapshot
 * @param snapshotRoot The directory path where the snapshot is taken
 * @param snapshotName The name of the snapshot
 */
static String createSnapshot(
    FSDirectory fsd, SnapshotManager snapshotManager, String snapshotRoot,
    String snapshotName, boolean logRetryCache)
    throws IOException {
  final INodesInPath iip = fsd.getINodesInPath4Write(snapshotRoot);
  if (fsd.isPermissionEnabled()) {
    FSPermissionChecker pc = fsd.getPermissionChecker();
    fsd.checkOwner(pc, iip);
  }

  if (snapshotName == null || snapshotName.isEmpty()) {
    snapshotName = Snapshot.generateDefaultSnapshotName();
  } else if (!DFSUtil.isValidNameForComponent(snapshotName)) {
    throw new InvalidPathException("Invalid snapshot name: " + snapshotName);
  }

  String snapshotPath = null;
  verifySnapshotName(fsd, snapshotName, snapshotRoot);
  fsd.writeLock();
  try {
    snapshotPath = snapshotManager.createSnapshot(iip, snapshotRoot,
        snapshotName);
  } finally {
    fsd.writeUnlock();
  }
  fsd.getEditLog().logCreateSnapshot(snapshotRoot, snapshotName,
      logRetryCache);

  return snapshotPath;
}
项目:hadoop    文件:TestINodeFile.java   
private void testInvalidSymlinkTarget(NamenodeProtocols nnRpc,
    String invalidTarget, String link) throws IOException {
  try {
    FsPermission perm = FsPermission.createImmutable((short)0755);
    nnRpc.createSymlink(invalidTarget, link, perm, false);
    fail("Symbolic link creation of target " + invalidTarget + " should fail");
  } catch (InvalidPathException expected) {
    // Expected
  }
}
项目:aliyun-oss-hadoop-fs    文件:FSDirRenameOp.java   
/**
 * The new rename which has the POSIX semantic.
 */
static Map.Entry<BlocksMapUpdateInfo, HdfsFileStatus> renameToInt(
    FSDirectory fsd, final String srcArg, final String dstArg,
    boolean logRetryCache, Options.Rename... options)
    throws IOException {
  String src = srcArg;
  String dst = dstArg;
  if (NameNode.stateChangeLog.isDebugEnabled()) {
    NameNode.stateChangeLog.debug("DIR* NameSystem.renameTo: with options -" +
        " " + src + " to " + dst);
  }
  if (!DFSUtil.isValidName(dst)) {
    throw new InvalidPathException("Invalid name: " + dst);
  }
  final FSPermissionChecker pc = fsd.getPermissionChecker();

  byte[][] srcComponents = FSDirectory.getPathComponentsForReservedPath(src);
  byte[][] dstComponents = FSDirectory.getPathComponentsForReservedPath(dst);
  BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
  src = fsd.resolvePath(pc, src, srcComponents);
  dst = fsd.resolvePath(pc, dst, dstComponents);
  renameTo(fsd, pc, src, dst, collectedBlocks, logRetryCache, options);
  INodesInPath dstIIP = fsd.getINodesInPath(dst, false);
  HdfsFileStatus resultingStat = fsd.getAuditFileInfo(dstIIP);

  return new AbstractMap.SimpleImmutableEntry<>(
      collectedBlocks, resultingStat);
}
项目:aliyun-oss-hadoop-fs    文件:FSDirAttrOp.java   
static HdfsFileStatus setOwner(
    FSDirectory fsd, String src, String username, String group)
    throws IOException {
  if (FSDirectory.isExactReservedName(src)) {
    throw new InvalidPathException(src);
  }
  FSPermissionChecker pc = fsd.getPermissionChecker();
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
  INodesInPath iip;
  fsd.writeLock();
  try {
    src = fsd.resolvePath(pc, src, pathComponents);
    iip = fsd.getINodesInPath4Write(src);
    fsd.checkOwner(pc, iip);
    if (!pc.isSuperUser()) {
      if (username != null && !pc.getUser().equals(username)) {
        throw new AccessControlException("Non-super user cannot change owner");
      }
      if (group != null && !pc.containsGroup(group)) {
        throw new AccessControlException("User does not belong to " + group);
      }
    }
    unprotectedSetOwner(fsd, src, username, group);
  } finally {
    fsd.writeUnlock();
  }
  fsd.getEditLog().logSetOwner(src, username, group);
  return fsd.getAuditFileInfo(iip);
}
项目:aliyun-oss-hadoop-fs    文件:FSDirSnapshotOp.java   
/**
 * Create a snapshot
 * @param snapshotRoot The directory path where the snapshot is taken
 * @param snapshotName The name of the snapshot
 */
static String createSnapshot(
    FSDirectory fsd, SnapshotManager snapshotManager, String snapshotRoot,
    String snapshotName, boolean logRetryCache)
    throws IOException {
  final INodesInPath iip = fsd.getINodesInPath4Write(snapshotRoot);
  if (fsd.isPermissionEnabled()) {
    FSPermissionChecker pc = fsd.getPermissionChecker();
    fsd.checkOwner(pc, iip);
  }

  if (snapshotName == null || snapshotName.isEmpty()) {
    snapshotName = Snapshot.generateDefaultSnapshotName();
  } else if (!DFSUtil.isValidNameForComponent(snapshotName)) {
    throw new InvalidPathException("Invalid snapshot name: " + snapshotName);
  }

  String snapshotPath = null;
  verifySnapshotName(fsd, snapshotName, snapshotRoot);
  fsd.writeLock();
  try {
    snapshotPath = snapshotManager.createSnapshot(iip, snapshotRoot,
        snapshotName);
  } finally {
    fsd.writeUnlock();
  }
  fsd.getEditLog().logCreateSnapshot(snapshotRoot, snapshotName,
      logRetryCache);

  return snapshotPath;
}
项目:aliyun-oss-hadoop-fs    文件:TestINodeFile.java   
private void testInvalidSymlinkTarget(NamenodeProtocols nnRpc,
    String invalidTarget, String link) throws IOException {
  try {
    FsPermission perm = FsPermission.createImmutable((short)0755);
    nnRpc.createSymlink(invalidTarget, link, perm, false);
    fail("Symbolic link creation of target " + invalidTarget + " should fail");
  } catch (InvalidPathException expected) {
    // Expected
  }
}
项目:big-c    文件:DFSClient.java   
/**
 * Same {{@link #mkdirs(String, FsPermission, boolean)} except
 * that the permissions has already been masked against umask.
 */
public boolean primitiveMkdir(String src, FsPermission absPermission, 
  boolean createParent)
  throws IOException {
  checkOpen();
  if (absPermission == null) {
    absPermission = 
      FsPermission.getDefault().applyUMask(dfsClientConf.uMask);
  } 

  if(LOG.isDebugEnabled()) {
    LOG.debug(src + ": masked=" + absPermission);
  }
  TraceScope scope = Trace.startSpan("mkdir", traceSampler);
  try {
    return namenode.mkdirs(src, absPermission, createParent);
  } catch(RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   InvalidPathException.class,
                                   FileAlreadyExistsException.class,
                                   FileNotFoundException.class,
                                   ParentNotDirectoryException.class,
                                   SafeModeException.class,
                                   NSQuotaExceededException.class,
                                   DSQuotaExceededException.class,
                                   UnresolvedPathException.class,
                                   SnapshotAccessControlException.class);
  } finally {
    scope.close();
  }
}
项目:big-c    文件:FSDirRenameOp.java   
/**
 * The new rename which has the POSIX semantic.
 */
static Map.Entry<BlocksMapUpdateInfo, HdfsFileStatus> renameToInt(
    FSDirectory fsd, final String srcArg, final String dstArg,
    boolean logRetryCache, Options.Rename... options)
    throws IOException {
  String src = srcArg;
  String dst = dstArg;
  if (NameNode.stateChangeLog.isDebugEnabled()) {
    NameNode.stateChangeLog.debug("DIR* NameSystem.renameTo: with options -" +
        " " + src + " to " + dst);
  }
  if (!DFSUtil.isValidName(dst)) {
    throw new InvalidPathException("Invalid name: " + dst);
  }
  final FSPermissionChecker pc = fsd.getPermissionChecker();

  byte[][] srcComponents = FSDirectory.getPathComponentsForReservedPath(src);
  byte[][] dstComponents = FSDirectory.getPathComponentsForReservedPath(dst);
  BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
  src = fsd.resolvePath(pc, src, srcComponents);
  dst = fsd.resolvePath(pc, dst, dstComponents);
  renameTo(fsd, pc, src, dst, collectedBlocks, logRetryCache, options);
  INodesInPath dstIIP = fsd.getINodesInPath(dst, false);
  HdfsFileStatus resultingStat = fsd.getAuditFileInfo(dstIIP);

  return new AbstractMap.SimpleImmutableEntry<>(
      collectedBlocks, resultingStat);
}
项目:big-c    文件:FSDirSnapshotOp.java   
/**
 * Create a snapshot
 * @param snapshotRoot The directory path where the snapshot is taken
 * @param snapshotName The name of the snapshot
 */
static String createSnapshot(
    FSDirectory fsd, SnapshotManager snapshotManager, String snapshotRoot,
    String snapshotName, boolean logRetryCache)
    throws IOException {
  final INodesInPath iip = fsd.getINodesInPath4Write(snapshotRoot);
  if (fsd.isPermissionEnabled()) {
    FSPermissionChecker pc = fsd.getPermissionChecker();
    fsd.checkOwner(pc, iip);
  }

  if (snapshotName == null || snapshotName.isEmpty()) {
    snapshotName = Snapshot.generateDefaultSnapshotName();
  } else if (!DFSUtil.isValidNameForComponent(snapshotName)) {
    throw new InvalidPathException("Invalid snapshot name: " + snapshotName);
  }

  String snapshotPath = null;
  verifySnapshotName(fsd, snapshotName, snapshotRoot);
  fsd.writeLock();
  try {
    snapshotPath = snapshotManager.createSnapshot(iip, snapshotRoot,
        snapshotName);
  } finally {
    fsd.writeUnlock();
  }
  fsd.getEditLog().logCreateSnapshot(snapshotRoot, snapshotName,
      logRetryCache);

  return snapshotPath;
}
项目:big-c    文件:TestINodeFile.java   
private void testInvalidSymlinkTarget(NamenodeProtocols nnRpc,
    String invalidTarget, String link) throws IOException {
  try {
    FsPermission perm = FsPermission.createImmutable((short)0755);
    nnRpc.createSymlink(invalidTarget, link, perm, false);
    fail("Symbolic link creation of target " + invalidTarget + " should fail");
  } catch (InvalidPathException expected) {
    // Expected
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DFSClient.java   
/**
 * Same {{@link #mkdirs(String, FsPermission, boolean)} except
 * that the permissions has already been masked against umask.
 */
public boolean primitiveMkdir(String src, FsPermission absPermission, 
  boolean createParent)
  throws IOException {
  checkOpen();
  if (absPermission == null) {
    absPermission = 
      FsPermission.getDefault().applyUMask(dfsClientConf.uMask);
  } 

  if(LOG.isDebugEnabled()) {
    LOG.debug(src + ": masked=" + absPermission);
  }
  try {
    return namenode.mkdirs(src, absPermission, createParent);
  } catch(RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   InvalidPathException.class,
                                   FileAlreadyExistsException.class,
                                   FileNotFoundException.class,
                                   ParentNotDirectoryException.class,
                                   SafeModeException.class,
                                   NSQuotaExceededException.class,
                                   DSQuotaExceededException.class,
                                   UnresolvedPathException.class,
                                   SnapshotAccessControlException.class);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FSNamesystem.java   
/**
 * Create a symbolic link.
 */
@SuppressWarnings("deprecation")
void createSymlink(String target, String link,
    PermissionStatus dirPerms, boolean createParent) 
    throws IOException, UnresolvedLinkException {
  if (!FileSystem.areSymlinksEnabled()) {
    throw new UnsupportedOperationException("Symlinks not supported");
  }
  if (!DFSUtil.isValidName(link)) {
    throw new InvalidPathException("Invalid link name: " + link);
  }
  if (FSDirectory.isReservedName(target)) {
    throw new InvalidPathException("Invalid target name: " + target);
  }
  CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
  if (cacheEntry != null && cacheEntry.isSuccess()) {
    return; // Return previous response
  }
  boolean success = false;
  try {
    createSymlinkInt(target, link, dirPerms, createParent, cacheEntry != null);
    success = true;
  } catch (AccessControlException e) {
    logAuditEvent(false, "createSymlink", link, target, null);
    throw e;
  } finally {
    RetryCache.setState(cacheEntry, success);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FSNamesystem.java   
/**
 * Get the file info for a specific file.
 *
 * @param srcArg The string representation of the path to the file
 * @param resolveLink whether to throw UnresolvedLinkException 
 *        if src refers to a symlink
 *
 * @throws AccessControlException if access is denied
 * @throws UnresolvedLinkException if a symlink is encountered.
 *
 * @return object containing information regarding the file
 *         or null if file not found
 * @throws StandbyException 
 */
HdfsFileStatus getFileInfo(final String srcArg, boolean resolveLink)
  throws AccessControlException, UnresolvedLinkException,
         StandbyException, IOException {
  String src = srcArg;
  if (!DFSUtil.isValidName(src)) {
    throw new InvalidPathException("Invalid file name: " + src);
  }
  HdfsFileStatus stat = null;
  FSPermissionChecker pc = getPermissionChecker();
  checkOperation(OperationCategory.READ);
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
  readLock();
  try {
    checkOperation(OperationCategory.READ);
    src = resolvePath(src, pathComponents);
    boolean isSuperUser = true;
    if (isPermissionEnabled) {
      checkPermission(pc, src, false, null, null, null, null, false,
          resolveLink);
      isSuperUser = pc.isSuperUser();
    }
    stat = dir.getFileInfo(src, resolveLink,
        FSDirectory.isReservedRawName(srcArg), isSuperUser);
  } catch (AccessControlException e) {
    logAuditEvent(false, "getfileinfo", srcArg);
    throw e;
  } finally {
    readUnlock();
  }
  logAuditEvent(true, "getfileinfo", srcArg);
  return stat;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FSNamesystem.java   
private boolean mkdirsInt(final String srcArg, PermissionStatus permissions,
    boolean createParent) throws IOException, UnresolvedLinkException {
  String src = srcArg;
  if(NameNode.stateChangeLog.isDebugEnabled()) {
    NameNode.stateChangeLog.debug("DIR* NameSystem.mkdirs: " + src);
  }
  if (!DFSUtil.isValidName(src)) {
    throw new InvalidPathException(src);
  }
  FSPermissionChecker pc = getPermissionChecker();
  checkOperation(OperationCategory.WRITE);
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
  HdfsFileStatus resultingStat = null;
  boolean status = false;
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);   
    checkNameNodeSafeMode("Cannot create directory " + src);
    src = resolvePath(src, pathComponents);
    status = mkdirsInternal(pc, src, permissions, createParent);
    if (status) {
      resultingStat = getAuditFileInfo(src, false);
    }
  } finally {
    writeUnlock();
  }
  getEditLog().logSync();
  if (status) {
    logAuditEvent(true, "mkdirs", srcArg, null, resultingStat);
  }
  return status;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestINodeFile.java   
private void testInvalidSymlinkTarget(NamenodeProtocols nnRpc,
    String invalidTarget, String link) throws IOException {
  try {
    FsPermission perm = FsPermission.createImmutable((short)0755);
    nnRpc.createSymlink(invalidTarget, link, perm, false);
    fail("Symbolic link creation of target " + invalidTarget + " should fail");
  } catch (InvalidPathException expected) {
    // Expected
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:AbstractFileSystem.java   
/**
 * Get the path-part of a pathname. Checks that URI matches this file system
 * and that the path-part is a valid name.
 * 
 * @param p path
 * 
 * @return path-part of the Path p
 */
public String getUriPath(final Path p) {
  checkPath(p);
  String s = p.toUri().getPath();
  if (!isValidName(s)) {
    throw new InvalidPathException("Path part " + s + " from URI " + p
        + " is not a valid filename.");
  }
  return s;
}
项目:hadoop-plus    文件:DFSClient.java   
/**
 * Same {{@link #mkdirs(String, FsPermission, boolean)} except
 * that the permissions has already been masked against umask.
 */
public boolean primitiveMkdir(String src, FsPermission absPermission, 
  boolean createParent)
  throws IOException {
  checkOpen();
  if (absPermission == null) {
    absPermission = 
      FsPermission.getDefault().applyUMask(dfsClientConf.uMask);
  } 

  if(LOG.isDebugEnabled()) {
    LOG.debug(src + ": masked=" + absPermission);
  }
  try {
    return namenode.mkdirs(src, absPermission, createParent);
  } catch(RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   InvalidPathException.class,
                                   FileAlreadyExistsException.class,
                                   FileNotFoundException.class,
                                   ParentNotDirectoryException.class,
                                   SafeModeException.class,
                                   NSQuotaExceededException.class,
                                   DSQuotaExceededException.class,
                                   UnresolvedPathException.class,
                                   SnapshotAccessControlException.class);
  }
}
项目:hadoop-plus    文件:FSNamesystem.java   
/**
 * Get the file info for a specific file.
 *
 * @param src The string representation of the path to the file
 * @param resolveLink whether to throw UnresolvedLinkException 
 *        if src refers to a symlink
 *
 * @throws AccessControlException if access is denied
 * @throws UnresolvedLinkException if a symlink is encountered.
 *
 * @return object containing information regarding the file
 *         or null if file not found
 * @throws StandbyException 
 */
HdfsFileStatus getFileInfo(String src, boolean resolveLink) 
  throws AccessControlException, UnresolvedLinkException,
         StandbyException, IOException {
  if (!DFSUtil.isValidName(src)) {
    throw new InvalidPathException("Invalid file name: " + src);
  }
  HdfsFileStatus stat = null;
  FSPermissionChecker pc = getPermissionChecker();
  checkOperation(OperationCategory.READ);
  if (!DFSUtil.isValidName(src)) {
    throw new InvalidPathException("Invalid file name: " + src);
  }
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
  readLock();
  try {
    checkOperation(OperationCategory.READ);
    src = FSDirectory.resolvePath(src, pathComponents, dir);
    if (isPermissionEnabled) {
      checkTraverse(pc, src);
    }
    stat = dir.getFileInfo(src, resolveLink);
  } catch (AccessControlException e) {
    logAuditEvent(false, "getfileinfo", src);
    throw e;
  } finally {
    readUnlock();
  }
  logAuditEvent(true, "getfileinfo", src);
  return stat;
}
项目:hadoop-plus    文件:FSNamesystem.java   
private boolean mkdirsInt(String src, PermissionStatus permissions,
    boolean createParent) throws IOException, UnresolvedLinkException {
  if(NameNode.stateChangeLog.isDebugEnabled()) {
    NameNode.stateChangeLog.debug("DIR* NameSystem.mkdirs: " + src);
  }
  if (!DFSUtil.isValidName(src)) {
    throw new InvalidPathException(src);
  }
  FSPermissionChecker pc = getPermissionChecker();
  checkOperation(OperationCategory.WRITE);
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
  HdfsFileStatus resultingStat = null;
  boolean status = false;
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);   
    if (isInSafeMode()) {
      throw new SafeModeException("Cannot create directory " + src, safeMode);
    }
    src = FSDirectory.resolvePath(src, pathComponents, dir);
    status = mkdirsInternal(pc, src, permissions, createParent);
    if (status) {
      resultingStat = dir.getFileInfo(src, false);
    }
  } finally {
    writeUnlock();
  }
  getEditLog().logSync();
  if (status) {
    logAuditEvent(true, "mkdirs", src, null, resultingStat);
  }
  return status;
}