Java 类org.apache.hadoop.util.Shell 实例源码

项目:hadoop-oss    文件:LocalJavaKeyStoreProvider.java   
@Override
protected void stashOriginalFilePermissions() throws IOException {
  // save off permissions in case we need to
  // rewrite the keystore in flush()
  if (!Shell.WINDOWS) {
    Path path = Paths.get(file.getCanonicalPath());
    permissions = Files.getPosixFilePermissions(path);
  } else {
    // On Windows, the JDK does not support the POSIX file permission APIs.
    // Instead, we can do a winutils call and translate.
    String[] cmd = Shell.getGetPermissionCommand();
    String[] args = new String[cmd.length + 1];
    System.arraycopy(cmd, 0, args, 0, cmd.length);
    args[cmd.length] = file.getCanonicalPath();
    String out = Shell.execCommand(args);
    StringTokenizer t = new StringTokenizer(out, Shell.TOKEN_SEPARATOR_REGEX);
    // The winutils output consists of 10 characters because of the leading
    // directory indicator, i.e. "drwx------".  The JDK parsing method expects
    // a 9-character string, so remove the leading character.
    String permString = t.nextToken().substring(1);
    permissions = PosixFilePermissions.fromString(permString);
  }
}
项目:hadoop-oss    文件:LocalJavaKeyStoreProvider.java   
@Override
public void flush() throws IOException {
  super.flush();
  if (LOG.isDebugEnabled()) {
    LOG.debug("Reseting permissions to '" + permissions + "'");
  }
  if (!Shell.WINDOWS) {
    Files.setPosixFilePermissions(Paths.get(file.getCanonicalPath()),
        permissions);
  } else {
    // FsPermission expects a 10-character string because of the leading
    // directory indicator, i.e. "drwx------". The JDK toString method returns
    // a 9-character string, so prepend a leading character.
    FsPermission fsPermission = FsPermission.valueOf(
        "-" + PosixFilePermissions.toString(permissions));
    FileUtil.setPermission(file, fsPermission);
  }
}
项目:hadoop    文件:TestContainerLaunch.java   
@Test (timeout = 10000)
public void testWindowsShellScriptBuilderMkdir() throws IOException {
  String mkDirCmd = "@if not exist \"\" mkdir \"\"";

  // Test is only relevant on Windows
  Assume.assumeTrue(Shell.WINDOWS);

  // The tests are built on assuming 8191 max command line length
  assertEquals(8191, Shell.WINDOWS_MAX_SHELL_LENGHT);

  ShellScriptBuilder builder = ShellScriptBuilder.create();

  // test mkdir
  builder.mkdir(new Path(org.apache.commons.lang.StringUtils.repeat("A", 1024)));
  builder.mkdir(new Path(org.apache.commons.lang.StringUtils.repeat(
      "E", (Shell.WINDOWS_MAX_SHELL_LENGHT - mkDirCmd.length())/2)));
  try {
    builder.mkdir(new Path(org.apache.commons.lang.StringUtils.repeat(
        "X", (Shell.WINDOWS_MAX_SHELL_LENGHT - mkDirCmd.length())/2 +1)));
    fail("long mkdir was expected to throw");
  } catch(IOException e) {
    assertThat(e.getMessage(), CoreMatchers.containsString(expectedMessage));
  }    
}
项目:hadoop    文件:MiniYARNCluster.java   
@Override
protected synchronized void serviceStop() throws Exception {
  if (resourceManagers[index] != null) {
    waitForAppMastersToFinish(5000);
    resourceManagers[index].stop();
  }

  if (Shell.WINDOWS) {
    // On Windows, clean up the short temporary symlink that was created to
    // work around path length limitation.
    String testWorkDirPath = testWorkDir.getAbsolutePath();
    try {
      FileContext.getLocalFSFileContext().delete(new Path(testWorkDirPath),
        true);
    } catch (IOException e) {
      LOG.warn("could not cleanup symlink: " +
        testWorkDir.getAbsolutePath());
    }
  }
  super.serviceStop();
}
项目:hadoop    文件:TestContainerExecutor.java   
@Test (timeout = 5000)
public void testRunCommandWithCpuAndMemoryResources() {
  // Windows only test
  assumeTrue(Shell.WINDOWS);
  Configuration conf = new Configuration();
  conf.set(YarnConfiguration.NM_WINDOWS_CONTAINER_CPU_LIMIT_ENABLED, "true");
  conf.set(YarnConfiguration.NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED, "true");
  String[] command = containerExecutor.getRunCommand("echo", "group1", null, null,
      conf, Resource.newInstance(1024, 1));
  float yarnProcessors = NodeManagerHardwareUtils.getContainersCores(
      ResourceCalculatorPlugin.getResourceCalculatorPlugin(null, conf),
      conf);
  int cpuRate = Math.min(10000, (int) ((1 * 10000) / yarnProcessors));
  // Assert the cpu and memory limits are set correctly in the command
  String[] expected = { Shell.WINUTILS, "task", "create", "-m", "1024", "-c",
      String.valueOf(cpuRate), "group1", "cmd /c " + "echo" };
  Assert.assertTrue(Arrays.equals(expected, command));
}
项目:hadoop-oss    文件:Stat.java   
@Override
protected String[] getExecString() {
  String derefFlag = "-";
  if (dereference) {
    derefFlag = "-L";
  }
  if (Shell.LINUX) {
    return new String[] {
        "stat", derefFlag + "c", "%s,%F,%Y,%X,%a,%U,%G,%N", path.toString() };
  } else if (Shell.FREEBSD || Shell.MAC) {
    return new String[] {
        "stat", derefFlag + "f", "%z,%HT,%m,%a,%Op,%Su,%Sg,`link' -> `%Y'",
        path.toString() };
  } else {
    throw new UnsupportedOperationException(
        "stat is not supported on this platform");
  }
}
项目:hadoop    文件:TestNodeManagerShutdown.java   
/**
 * Creates a script to run a container that will run forever unless
 * stopped by external means.
 */
private static File createUnhaltingScriptFile(ContainerId cId,
    File scriptFileDir, File processStartFile) throws IOException {
  File scriptFile = Shell.appendScriptExtension(scriptFileDir, "scriptFile");
  PrintWriter fileWriter = new PrintWriter(scriptFile);
  if (Shell.WINDOWS) {
    fileWriter.println("@echo \"Running testscript for delayed kill\"");
    fileWriter.println("@echo \"Writing pid to start file\"");
    fileWriter.println("@echo " + cId + ">> " + processStartFile);
    fileWriter.println("@pause");
  } else {
    fileWriter.write("#!/bin/bash\n\n");
    fileWriter.write("echo \"Running testscript for delayed kill\"\n");
    fileWriter.write("hello=\"Got SIGTERM\"\n");
    fileWriter.write("umask 0\n");
    fileWriter.write("trap \"echo $hello >> " + processStartFile +
      "\" SIGTERM\n");
    fileWriter.write("echo \"Writing pid to start file\"\n");
    fileWriter.write("echo $$ >> " + processStartFile + "\n");
    fileWriter.write("while true; do\ndate >> /dev/null;\n done\n");
  }

  fileWriter.close();
  return scriptFile;
}
项目:hadoop-oss    文件:RawLocalFileSystem.java   
@Override
public boolean rename(Path src, Path dst) throws IOException {
  // Attempt rename using Java API.
  File srcFile = pathToFile(src);
  File dstFile = pathToFile(dst);
  if (srcFile.renameTo(dstFile)) {
    return true;
  }

  // Else try POSIX style rename on Windows only
  if (Shell.WINDOWS &&
      handleEmptyDstDirectoryOnWindows(src, srcFile, dst, dstFile)) {
    return true;
  }

  // The fallback behavior accomplishes the rename by a full copy.
  if (LOG.isDebugEnabled()) {
    LOG.debug("Falling through to a copy of " + src + " to " + dst);
  }
  return FileUtil.copy(this, src, this, dst, true, getConf());
}
项目:ditb    文件:HttpServer.java   
@InterfaceAudience.Private
public static Connector createDefaultChannelConnector() {
  SelectChannelConnector ret = new SelectChannelConnector();
  ret.setLowResourceMaxIdleTime(10000);
  ret.setAcceptQueueSize(128);
  ret.setResolveNames(false);
  ret.setUseDirectBuffers(false);
  if(Shell.WINDOWS) {
    // result of setting the SO_REUSEADDR flag is different on Windows
    // http://msdn.microsoft.com/en-us/library/ms740621(v=vs.85).aspx
    // without this 2 NN's can start on the same machine and listen on
    // the same port with indeterminate routing of incoming requests to them
    ret.setReuseAddress(false);
  }
  ret.setHeaderBufferSize(1024*64);
  return ret;
}
项目:hadoop-oss    文件:NativeIO.java   
public static void chmod(String path, int mode) throws IOException {
  if (!Shell.WINDOWS) {
    chmodImpl(path, mode);
  } else {
    try {
      chmodImpl(path, mode);
    } catch (NativeIOException nioe) {
      if (nioe.getErrorCode() == 3) {
        throw new NativeIOException("No such file or directory",
            Errno.ENOENT);
      } else {
        LOG.warn(String.format("NativeIO.chmod error (%d): %s",
            nioe.getErrorCode(), nioe.getMessage()));
        throw new NativeIOException("Unknown error", Errno.UNKNOWN);
      }
    }
  }
}
项目:hadoop-oss    文件:NativeIO.java   
public static String getOwner(FileDescriptor fd) throws IOException {
  ensureInitialized();
  if (Shell.WINDOWS) {
    String owner = Windows.getOwner(fd);
    owner = stripDomain(owner);
    return owner;
  } else {
    long uid = POSIX.getUIDforFDOwnerforOwner(fd);
    CachedUid cUid = uidCache.get(uid);
    long now = System.currentTimeMillis();
    if (cUid != null && (cUid.timestamp + cacheTimeout) > now) {
      return cUid.username;
    }
    String user = POSIX.getUserName(uid);
    LOG.info("Got UserName " + user + " for UID " + uid
        + " from the native implementation");
    cUid = new CachedUid(user, now);
    uidCache.put(uid, cUid);
    return user;
  }
}
项目:hadoop-oss    文件:NativeIO.java   
/**
 * Create a FileInputStream that shares delete permission on the
 * file opened at a given offset, i.e. other process can delete
 * the file the FileInputStream is reading. Only Windows implementation
 * uses the native interface.
 */
public static FileInputStream getShareDeleteFileInputStream(File f, long seekOffset)
    throws IOException {
  if (!Shell.WINDOWS) {
    RandomAccessFile rf = new RandomAccessFile(f, "r");
    if (seekOffset > 0) {
      rf.seek(seekOffset);
    }
    return new FileInputStream(rf.getFD());
  } else {
    // Use Windows native interface to create a FileInputStream that
    // shares delete permission on the file opened, and set it to the
    // given offset.
    //
    FileDescriptor fd = NativeIO.Windows.createFile(
        f.getAbsolutePath(),
        NativeIO.Windows.GENERIC_READ,
        NativeIO.Windows.FILE_SHARE_READ |
            NativeIO.Windows.FILE_SHARE_WRITE |
            NativeIO.Windows.FILE_SHARE_DELETE,
        NativeIO.Windows.OPEN_EXISTING);
    if (seekOffset > 0)
      NativeIO.Windows.setFilePointer(fd, seekOffset, NativeIO.Windows.FILE_BEGIN);
    return new FileInputStream(fd);
  }
}
项目:hadoop-oss    文件:TestLocalDirAllocator.java   
/** Two buffer dirs. The first dir does not exist & is on a read-only disk;
 * The second dir exists & is RW
 * @throws Exception
 */
@Test (timeout = 30000)
public void test0() throws Exception {
  if (isWindows) return;
  String dir0 = buildBufferDir(ROOT, 0);
  String dir1 = buildBufferDir(ROOT, 1);
  try {
    conf.set(CONTEXT, dir0 + "," + dir1);
    assertTrue(localFs.mkdirs(new Path(dir1)));
    BUFFER_ROOT.setReadOnly();
    validateTempDirCreation(dir1);
    validateTempDirCreation(dir1);
  } finally {
    Shell.execCommand(Shell.getSetPermissionCommand("u+w", false,
                                                    BUFFER_DIR_ROOT));
    rmBufferDirs();
  }
}
项目:hadoop-oss    文件:TestLocalDirAllocator.java   
/** Two buffer dirs. The first dir exists & is on a read-only disk;
 * The second dir exists & is RW
 * @throws Exception
 */
@Test (timeout = 30000)
public void testROBufferDirAndRWBufferDir() throws Exception {
  if (isWindows) return;
  String dir1 = buildBufferDir(ROOT, 1);
  String dir2 = buildBufferDir(ROOT, 2);
  try {
    conf.set(CONTEXT, dir1 + "," + dir2);
    assertTrue(localFs.mkdirs(new Path(dir2)));
    BUFFER_ROOT.setReadOnly();
    validateTempDirCreation(dir2);
    validateTempDirCreation(dir2);
  } finally {
    Shell.execCommand(Shell.getSetPermissionCommand("u+w", false,
                                                    BUFFER_DIR_ROOT));
    rmBufferDirs();
  }
}
项目:hadoop-oss    文件:TestLocalDirAllocator.java   
/** Test no side effect files are left over. After creating a temp
 * temp file, remove both the temp file and its parent. Verify that
 * no files or directories are left over as can happen when File objects
 * are mistakenly created from fully qualified path strings.
 * @throws IOException
 */
@Test (timeout = 30000)
public void testNoSideEffects() throws IOException {
  assumeTrue(!isWindows);
  String dir = buildBufferDir(ROOT, 0);
  try {
    conf.set(CONTEXT, dir);
    File result = dirAllocator.createTmpFileForWrite(FILENAME, -1, conf);
    assertTrue(result.delete());
    assertTrue(result.getParentFile().delete());
    assertFalse(new File(dir).exists());
  } finally {
    Shell.execCommand(Shell.getSetPermissionCommand("u+w", false,
                                                    BUFFER_DIR_ROOT));
    rmBufferDirs();
  }
}
项目:hadoop-oss    文件:TestLocalDirAllocator.java   
/**
 * Test getLocalPathToRead() returns correct filename and "file" schema.
 *
 * @throws IOException
 */
@Test (timeout = 30000)
public void testGetLocalPathToRead() throws IOException {
  assumeTrue(!isWindows);
  String dir = buildBufferDir(ROOT, 0);
  try {
    conf.set(CONTEXT, dir);
    assertTrue(localFs.mkdirs(new Path(dir)));
    File f1 = dirAllocator.createTmpFileForWrite(FILENAME, SMALL_FILE_SIZE,
        conf);
    Path p1 = dirAllocator.getLocalPathToRead(f1.getName(), conf);
    assertEquals(f1.getName(), p1.getName());
    assertEquals("file", p1.getFileSystem(conf).getUri().getScheme());
  } finally {
    Shell.execCommand(Shell.getSetPermissionCommand("u+w", false,
                                                    BUFFER_DIR_ROOT));
    rmBufferDirs();
  }
}
项目:hadoop    文件:HttpServer2.java   
@InterfaceAudience.Private
public static Connector createDefaultChannelConnector() {
  SelectChannelConnector ret = new SelectChannelConnectorWithSafeStartup();
  ret.setLowResourceMaxIdleTime(10000);
  ret.setAcceptQueueSize(128);
  ret.setResolveNames(false);
  ret.setUseDirectBuffers(false);
  if(Shell.WINDOWS) {
    // result of setting the SO_REUSEADDR flag is different on Windows
    // http://msdn.microsoft.com/en-us/library/ms740621(v=vs.85).aspx
    // without this 2 NN's can start on the same machine and listen on
    // the same port with indeterminate routing of incoming requests to them
    ret.setReuseAddress(false);
  }
  ret.setHeaderBufferSize(1024*64);
  return ret;
}
项目:hadoop    文件:RawLocalFileSystem.java   
protected boolean mkOneDirWithMode(Path p, File p2f, FsPermission permission)
    throws IOException {
  if (permission == null) {
    return p2f.mkdir();
  } else {
    if (Shell.WINDOWS && NativeIO.isAvailable()) {
      try {
        NativeIO.Windows.createDirectoryWithMode(p2f, permission.toShort());
        return true;
      } catch (IOException e) {
        if (LOG.isDebugEnabled()) {
          LOG.debug(String.format(
              "NativeIO.createDirectoryWithMode error, path = %s, mode = %o",
              p2f, permission.toShort()), e);
        }
        return false;
      }
    } else {
      boolean b = p2f.mkdir();
      if (b) {
        setPermission(p, permission);
      }
      return b;
    }
  }
}
项目:hadoop    文件:FileUtil.java   
/**
 * Change the permissions on a file / directory, recursively, if
 * needed.
 * @param filename name of the file whose permissions are to change
 * @param perm permission string
 * @param recursive true, if permissions should be changed recursively
 * @return the exit code from the command.
 * @throws IOException
 */
public static int chmod(String filename, String perm, boolean recursive)
                          throws IOException {
  String [] cmd = Shell.getSetPermissionCommand(perm, recursive);
  String[] args = new String[cmd.length + 1];
  System.arraycopy(cmd, 0, args, 0, cmd.length);
  args[cmd.length] = new File(filename).getPath();
  ShellCommandExecutor shExec = new ShellCommandExecutor(args);
  try {
    shExec.execute();
  }catch(IOException e) {
    if(LOG.isDebugEnabled()) {
      LOG.debug("Error while changing permission : " + filename 
                +" Exception: " + StringUtils.stringifyException(e));
    }
  }
  return shExec.getExitCode();
}
项目:hadoop    文件:NativeIO.java   
public static void chmod(String path, int mode) throws IOException {
  if (!Shell.WINDOWS) {
    chmodImpl(path, mode);
  } else {
    try {
      chmodImpl(path, mode);
    } catch (NativeIOException nioe) {
      if (nioe.getErrorCode() == 3) {
        throw new NativeIOException("No such file or directory",
            Errno.ENOENT);
      } else {
        LOG.warn(String.format("NativeIO.chmod error (%d): %s",
            nioe.getErrorCode(), nioe.getMessage()));
        throw new NativeIOException("Unknown error", Errno.UNKNOWN);
      }
    }
  }
}
项目:ditb    文件:TestNodeHealthCheckChore.java   
private Configuration getConfForNodeHealthScript() throws IOException {
  Configuration conf = UTIL.getConfiguration();
  File tempDir = new File(UTIL.getDataTestDir().toString());
  if (!tempDir.exists()) {
    if (!tempDir.mkdirs()) {
      throw new IOException("Failed mkdirs " + tempDir);
    }
  }
  String scriptName = "HealthScript" + UUID.randomUUID().toString()
      + (Shell.WINDOWS ? ".cmd" : ".sh");
  healthScriptFile = new File(tempDir.getAbsolutePath(), scriptName);
  conf.set(HConstants.HEALTH_SCRIPT_LOC, healthScriptFile.getAbsolutePath());
  conf.setLong(HConstants.HEALTH_FAILURE_THRESHOLD, 3);
  conf.setLong(HConstants.HEALTH_SCRIPT_TIMEOUT, SCRIPT_TIMEOUT);
  return conf;
}
项目:hadoop-oss    文件:TestDiskChecker.java   
private void _checkDirs(boolean isDir, FsPermission perm, boolean success)
    throws Throwable {
  File localDir = File.createTempFile("test", "tmp");
  if (isDir) {
    localDir.delete();
    localDir.mkdir();
  }
  Shell.execCommand(Shell.getSetPermissionCommand(String.format("%04o",
    perm.toShort()), false, localDir.getAbsolutePath()));
  try {
    DiskChecker.checkDir(FileSystem.getLocal(new Configuration()),
      new Path(localDir.getAbsolutePath()), perm);
    assertTrue("checkDir success", success);
  } catch (DiskErrorException e) {
    assertFalse("checkDir success", success);
  }
  localDir.delete();
}
项目:hadoop    文件:TestShellDecryptionKeyProvider.java   
@Test
public void testValidScript() throws Exception {
  if (!Shell.WINDOWS) {
    return;
  }
  String expectedResult = "decretedKey";

  // Create a simple script which echoes the given key plus the given
  // expected result (so that we validate both script input and output)
  File scriptFile = new File(TEST_ROOT_DIR, "testScript.cmd");
  FileUtils.writeStringToFile(scriptFile, "@echo %1 " + expectedResult);

  ShellDecryptionKeyProvider provider = new ShellDecryptionKeyProvider();
  Configuration conf = new Configuration();
  String account = "testacct";
  String key = "key1";
  conf.set(SimpleKeyProvider.KEY_ACCOUNT_KEY_PREFIX + account, key);
  conf.set(ShellDecryptionKeyProvider.KEY_ACCOUNT_SHELLKEYPROVIDER_SCRIPT,
      "cmd /c " + scriptFile.getAbsolutePath());

  String result = provider.getStorageAccountKey(account, conf);
  assertEquals(key + " " + expectedResult, result);
}
项目:big_data    文件:NativeIOaa.java   
/**
 * Returns the file stat for a file descriptor.
 *
 * @param fd
 *            file descriptor.
 * @return the file descriptor file stat.
 * @throws IOException
 *             thrown if there was an IO error while obtaining the file
 *             stat.
 */
public static Stat getFstat(FileDescriptor fd) throws IOException {
    Stat stat = null;
    if (!Shell.WINDOWS) {
        stat = fstat(fd);
        stat.owner = getName(IdCache.USER, stat.ownerId);
        stat.group = getName(IdCache.GROUP, stat.groupId);
    } else {
        try {
            stat = fstat(fd);
        } catch (NativeIOException nioe) {
            if (nioe.getErrorCode() == 6) {
                throw new NativeIOException("The handle is invalid.", Errno.EBADF);
            } else {
                LOG.warn(String.format("NativeIO.getFstat error (%d): %s", nioe.getErrorCode(),
                        nioe.getMessage()));
                throw new NativeIOException("Unknown error", Errno.UNKNOWN);
            }
        }
    }
    return stat;
}
项目:big_data    文件:NativeIOaa.java   
public static String getOwner(FileDescriptor fd) throws IOException {
    ensureInitialized();
    if (Shell.WINDOWS) {
        String owner = Windows.getOwner(fd);
        owner = stripDomain(owner);
        return owner;
    } else {
        long uid = POSIX.getUIDforFDOwnerforOwner(fd);
        CachedUid cUid = uidCache.get(uid);
        long now = System.currentTimeMillis();
        if (cUid != null && (cUid.timestamp + cacheTimeout) > now) {
            return cUid.username;
        }
        String user = POSIX.getUserName(uid);
        LOG.info("Got UserName " + user + " for UID " + uid + " from the native implementation");
        cUid = new CachedUid(user, now);
        uidCache.put(uid, cUid);
        return user;
    }
}
项目:big_data    文件:NativeIOaa.java   
/**
 * Create a FileInputStream that shares delete permission on the file
 * opened, i.e. other process can delete the file the FileInputStream is
 * reading. Only Windows implementation uses the native interface.
 */
public static FileInputStream getShareDeleteFileInputStream(File f) throws IOException {
    if (!Shell.WINDOWS) {
        // On Linux the default FileInputStream shares delete permission
        // on the file opened.
        //
        return new FileInputStream(f);
    } else {
        // Use Windows native interface to create a FileInputStream that
        // shares delete permission on the file opened.
        //
        FileDescriptor fd = Windows.createFile(f.getAbsolutePath(), Windows.GENERIC_READ,
                Windows.FILE_SHARE_READ | Windows.FILE_SHARE_WRITE | Windows.FILE_SHARE_DELETE,
                Windows.OPEN_EXISTING);
        return new FileInputStream(fd);
    }
}
项目:lustre-connector-for-hadoop    文件:LustreFsJavaImpl.java   
@Override
 public void chmod(String path, int mode) throws IOException {
File f = new File(path);
FsPermission perm = FsPermission.createImmutable((short)mode);
LinkedList<String> args = new LinkedList<String>();
args.add("/usr/bin/setfacl");
args.add("-m");
args.add(
    "u::" + perm.getUserAction().SYMBOL +
    ",g::" + perm.getGroupAction().SYMBOL +
    ",o::" + perm.getOtherAction().SYMBOL);
args.add(FileUtil.makeShellPath(f, true));  
   org.apache.hadoop.fs.util.Shell.runPrivileged(args.toArray(new String[0]));

   // Set default acls on directories so children can inherit.
   if(f.isDirectory()) {
    args.add(1, "-d");
    org.apache.hadoop.fs.util.Shell.runPrivileged(args.toArray(new String[0]));
   }
 }
项目:hadoop    文件:TestPathData.java   
@Test (timeout = 30000)
public void testAbsoluteGlob() throws Exception {
  PathData[] items = PathData.expandAsGlob(testDir+"/d1/f1*", conf);
  assertEquals(
      sortedString(testDir+"/d1/f1", testDir+"/d1/f1.1"),
      sortedString(items)
  );

  String absolutePathNoDriveLetter = testDir+"/d1/f1";
  if (Shell.WINDOWS) {
    // testDir is an absolute path with a drive letter on Windows, i.e.
    // c:/some/path
    // and for the test we want something like the following
    // /some/path
    absolutePathNoDriveLetter = absolutePathNoDriveLetter.substring(2);
  }
  items = PathData.expandAsGlob(absolutePathNoDriveLetter, conf);
  assertEquals(
      sortedString(absolutePathNoDriveLetter),
      sortedString(items)
  );
  items = PathData.expandAsGlob(".", conf);
  assertEquals(
      sortedString("."),
      sortedString(items)
  );
}
项目:hadoop-oss    文件:ShellBasedUnixGroupsMapping.java   
/**
 * Attempt to parse group names given that some names are not resolvable.
 * Use the group id list to identify those that are not resolved.
 *
 * @param groupNames a string representing a list of group names
 * @param groupIDs a string representing a list of group ids
 * @return a linked list of group names
 * @throws PartialGroupNameException
 */
private List<String> parsePartialGroupNames(String groupNames,
    String groupIDs) throws PartialGroupNameException {
  StringTokenizer nameTokenizer =
      new StringTokenizer(groupNames, Shell.TOKEN_SEPARATOR_REGEX);
  StringTokenizer idTokenizer =
      new StringTokenizer(groupIDs, Shell.TOKEN_SEPARATOR_REGEX);
  List<String> groups = new LinkedList<String>();
  while (nameTokenizer.hasMoreTokens()) {
    // check for unresolvable group names.
    if (!idTokenizer.hasMoreTokens()) {
      throw new PartialGroupNameException("Number of group names and ids do"
      + " not match. group name =" + groupNames + ", group id = " + groupIDs);
    }
    String groupName = nameTokenizer.nextToken();
    String groupID = idTokenizer.nextToken();
    if (!StringUtils.isNumeric(groupName) ||
        !groupName.equals(groupID)) {
      // if the group name is non-numeric, it is resolved.
      // if the group name is numeric, but is not the same as group id,
      // regard it as a group name.
      // if unfortunately, some group names are not resolvable, and
      // the group name is the same as the group id, regard it as not
      // resolved.
      groups.add(groupName);
    }
  }
  return groups;
}
项目:hadoop-oss    文件:ShellBasedUnixGroupsMapping.java   
/**
 * Attempt to partially resolve group names.
 *
 * @param userName the user's name
 * @param errMessage error message from the shell command
 * @param groupNames the incomplete list of group names
 * @return a list of resolved group names
 * @throws PartialGroupNameException
 */
private List<String> resolvePartialGroupNames(String userName,
    String errMessage, String groupNames) throws PartialGroupNameException {
  // Exception may indicate that some group names are not resolvable.
  // Shell-based implementation should tolerate unresolvable groups names,
  // and return resolvable ones, similar to what JNI-based implementation
  // does.
  if (Shell.WINDOWS) {
    throw new PartialGroupNameException("Does not support partial group"
    + " name resolution on Windows. " + errMessage);
  }
  if (groupNames.isEmpty()) {
    throw new PartialGroupNameException("The user name '" + userName
        + "' is not found. " + errMessage);
  } else {
    LOG.warn("Some group names for '" + userName + "' are not resolvable. "
        + errMessage);
    // attempt to partially resolve group names
    try {
      ShellCommandExecutor exec2 = createGroupIDExecutor(userName);
      exec2.execute();
      return parsePartialGroupNames(groupNames, exec2.getOutput());
    } catch (ExitCodeException ece) {
      // If exception is thrown trying to get group id list,
      // something is terribly wrong, so give up.
      throw new PartialGroupNameException("failed to get group id list for " +
      "user '" + userName + "'", ece);
    } catch (IOException ioe) {
      throw new PartialGroupNameException("can't execute the shell command to"
      + " get the list of group id for user '" + userName + "'", ioe);
    }
  }
}
项目:hadoop    文件:ProcfsBasedProcessTree.java   
/**
 * Checks if the ProcfsBasedProcessTree is available on this system.
 *
 * @return true if ProcfsBasedProcessTree is available. False otherwise.
 */
public static boolean isAvailable() {
  try {
    if (!Shell.LINUX) {
      LOG.info("ProcfsBasedProcessTree currently is supported only on "
          + "Linux.");
      return false;
    }
  } catch (SecurityException se) {
    LOG.warn("Failed to get Operating System name. " + se);
    return false;
  }
  return true;
}
项目:hadoop-oss    文件:KDiag.java   
/**
 * Locate the {@code krb5.conf} file and dump it.
 *
 * No-op on windows.
 * @throws IOException problems reading the file.
 */
private void validateKrb5File() throws IOException {
  if (!Shell.WINDOWS) {
    title("Locating Kerberos configuration file");
    String krbPath = ETC_KRB5_CONF;
    String jvmKrbPath = System.getProperty(JAVA_SECURITY_KRB5_CONF);
    if (jvmKrbPath != null && !jvmKrbPath.isEmpty()) {
      println("Setting kerberos path from sysprop %s: \"%s\"",
        JAVA_SECURITY_KRB5_CONF, jvmKrbPath);
      krbPath = jvmKrbPath;
    }

    String krb5name = System.getenv(KRB5_CCNAME);
    if (krb5name != null) {
      println("Setting kerberos path from environment variable %s: \"%s\"",
        KRB5_CCNAME, krb5name);
      krbPath = krb5name;
      if (jvmKrbPath != null) {
        println("Warning - both %s and %s were set - %s takes priority",
          JAVA_SECURITY_KRB5_CONF, KRB5_CCNAME, KRB5_CCNAME);
      }
    }

    File krbFile = new File(krbPath);
    println("Kerberos configuration file = %s", krbFile);
    dump(krbFile);
    endln();
  }
}
项目:hadoop-oss    文件:KDiag.java   
private void validateNTPConf() throws IOException {
  if (!Shell.WINDOWS) {
    File ntpfile = new File(ETC_NTP);
    if (ntpfile.exists()
        && verifyFileIsValid(ntpfile, CAT_OS,
        "NTP file: " + ntpfile)) {
      title("NTP");
      dump(ntpfile);
      endln();
    }
  }
}
项目:hadoop-oss    文件:Stat.java   
/**
 * Whether Stat is supported on the current platform
 * @return
 */
public static boolean isAvailable() {
  if (Shell.LINUX || Shell.FREEBSD || Shell.MAC) {
    return true;
  }
  return false;
}
项目:scheduling-connector-for-hadoop    文件:SlurmApplicationClient.java   
private String[] getJobStateAndNodeName(int jobId, String squeueCmd)
    throws IOException {
  String cmdResult = Shell.execCommand(squeueCmd, "-h", "-o", "%t:%N", "-j",
      String.valueOf(jobId));
  String[] result = cmdResult != null ? cmdResult.trim().split(":") : null;
  return result;
}
项目:hadoop    文件:ShortCircuitShm.java   
/**
 * Create the ShortCircuitShm.
 * 
 * @param shmId       The ID to use.
 * @param stream      The stream that we're going to use to create this 
 *                    shared memory segment.
 *                    
 *                    Although this is a FileInputStream, we are going to
 *                    assume that the underlying file descriptor is writable
 *                    as well as readable. It would be more appropriate to use
 *                    a RandomAccessFile here, but that class does not have
 *                    any public accessor which returns a FileDescriptor,
 *                    unlike FileInputStream.
 */
public ShortCircuitShm(ShmId shmId, FileInputStream stream)
      throws IOException {
  if (!NativeIO.isAvailable()) {
    throw new UnsupportedOperationException("NativeIO is not available.");
  }
  if (Shell.WINDOWS) {
    throw new UnsupportedOperationException(
        "DfsClientShm is not yet implemented for Windows.");
  }
  if (unsafe == null) {
    throw new UnsupportedOperationException(
        "can't use DfsClientShm because we failed to " +
        "load misc.Unsafe.");
  }
  this.shmId = shmId;
  this.mmappedLength = getUsableLength(stream);
  this.baseAddress = POSIX.mmap(stream.getFD(), 
      POSIX.MMAP_PROT_READ | POSIX.MMAP_PROT_WRITE, true, mmappedLength);
  this.slots = new Slot[mmappedLength / BYTES_PER_SLOT];
  this.allocatedSlots = new BitSet(slots.length);
  if (LOG.isTraceEnabled()) {
    LOG.trace("creating " + this.getClass().getSimpleName() +
        "(shmId=" + shmId +
        ", mmappedLength=" + mmappedLength +
        ", baseAddress=" + String.format("%x", baseAddress) +
        ", slots.length=" + slots.length + ")");
  }
}
项目:hadoop-oss    文件:DF.java   
@Override
protected String[] getExecString() {
  // ignoring the error since the exit code it enough
  if (Shell.WINDOWS){
    throw new AssertionError(
        "DF.getExecString() should never be called on Windows");
  } else {
    return new String[] {"bash","-c","exec 'df' '-k' '-P' '" + dirPath 
                    + "' 2>/dev/null"};
  }
}
项目:hadoop    文件:ViewFileSystemTestSetup.java   
static void linkUpFirstComponents(Configuration conf, String path, FileSystem fsTarget, String info) {
  int indexOfEnd = path.indexOf('/', 1);
  if (Shell.WINDOWS) {
    indexOfEnd = path.indexOf('/', indexOfEnd + 1);
  }
  String firstComponent = path.substring(0, indexOfEnd);
  URI linkTarget = fsTarget.makeQualified(new Path(firstComponent)).toUri();
  ConfigUtil.addLink(conf, firstComponent, linkTarget);
  Log.info("Added link for " + info + " " 
      + firstComponent + "->" + linkTarget);    
}
项目:hadoop    文件:TestFileUtil.java   
/**
 * Test that length on a symlink works as expected.
 */
@Test (timeout = 30000)
public void testSymlinkLength() throws Exception {
  Assert.assertFalse(del.exists());
  del.mkdirs();

  byte[] data = "testSymLinkData".getBytes();

  File file = new File(del, FILE);
  File link = new File(del, "_link");

  // write some data to the file
  FileOutputStream os = new FileOutputStream(file);
  os.write(data);
  os.close();

  Assert.assertEquals(0, link.length());

  // create the symlink
  FileUtil.symLink(file.getAbsolutePath(), link.getAbsolutePath());

  // ensure that File#length returns the target file and link size
  Assert.assertEquals(data.length, file.length());
  Assert.assertEquals(data.length, link.length());

  file.delete();
  Assert.assertFalse(file.exists());

  if (Shell.WINDOWS && !Shell.isJava7OrAbove()) {
    // On Java6 on Windows, we copied the file
    Assert.assertEquals(data.length, link.length());
  } else {
    // Otherwise, the target file size is zero
    Assert.assertEquals(0, link.length());
  }

  link.delete();
  Assert.assertFalse(link.exists());
}
项目:hadoop-oss    文件:FileUtil.java   
/**
 * Set the ownership on a file / directory. User name and group name
 * cannot both be null.
 * @param file the file to change
 * @param username the new user owner name
 * @param groupname the new group owner name
 * @throws IOException
 */
public static void setOwner(File file, String username,
    String groupname) throws IOException {
  if (username == null && groupname == null) {
    throw new IOException("username == null && groupname == null");
  }
  String arg = (username == null ? "" : username)
      + (groupname == null ? "" : ":" + groupname);
  String [] cmd = Shell.getSetOwnerCommand(arg);
  execCommand(file, cmd);
}