Java 类org.apache.hadoop.util.DiskChecker 实例源码

项目:aliyun-oss-hadoop-fs    文件:DirectoryCollection.java   
/**
 * Function to test whether a dir is working correctly by actually creating a
 * random directory.
 *
 * @param dir
 *          the dir to test
 */
private void verifyDirUsingMkdir(File dir) throws IOException {

  String randomDirName = RandomStringUtils.randomAlphanumeric(5);
  File target = new File(dir, randomDirName);
  int i = 0;
  while (target.exists()) {

    randomDirName = RandomStringUtils.randomAlphanumeric(5) + i;
    target = new File(dir, randomDirName);
    i++;
  }
  try {
    DiskChecker.checkDir(target);
  } finally {
    FileUtils.deleteQuietly(target);
  }
}
项目:aliyun-oss-hadoop-fs    文件:DataStorage.java   
/**
 * Create physical directory for block pools on the data node
 * 
 * @param dataDirs
 *          List of data directories
 * @param conf
 *          Configuration instance to use.
 * @throws IOException on errors
 */
static void makeBlockPoolDataDir(Collection<File> dataDirs,
    Configuration conf) throws IOException {
  if (conf == null)
    conf = new HdfsConfiguration();

  LocalFileSystem localFS = FileSystem.getLocal(conf);
  FsPermission permission = new FsPermission(conf.get(
      DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY,
      DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT));
  for (File data : dataDirs) {
    try {
      DiskChecker.checkDir(localFS, new Path(data.toURI()), permission);
    } catch ( IOException e ) {
      LOG.warn("Invalid directory in: " + data.getCanonicalPath() + ": "
          + e.getMessage());
    }
  }
}
项目:big-c    文件:DirectoryCollection.java   
/**
 * Function to test whether a dir is working correctly by actually creating a
 * random directory.
 *
 * @param dir
 *          the dir to test
 */
private void verifyDirUsingMkdir(File dir) throws IOException {

  String randomDirName = RandomStringUtils.randomAlphanumeric(5);
  File target = new File(dir, randomDirName);
  int i = 0;
  while (target.exists()) {

    randomDirName = RandomStringUtils.randomAlphanumeric(5) + i;
    target = new File(dir, randomDirName);
    i++;
  }
  try {
    DiskChecker.checkDir(target);
  } finally {
    FileUtils.deleteQuietly(target);
  }
}
项目:big-c    文件:DataStorage.java   
/**
 * Create physical directory for block pools on the data node
 * 
 * @param dataDirs
 *          List of data directories
 * @param conf
 *          Configuration instance to use.
 * @throws IOException on errors
 */
static void makeBlockPoolDataDir(Collection<File> dataDirs,
    Configuration conf) throws IOException {
  if (conf == null)
    conf = new HdfsConfiguration();

  LocalFileSystem localFS = FileSystem.getLocal(conf);
  FsPermission permission = new FsPermission(conf.get(
      DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY,
      DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT));
  for (File data : dataDirs) {
    try {
      DiskChecker.checkDir(localFS, new Path(data.toURI()), permission);
    } catch ( IOException e ) {
      LOG.warn("Invalid directory in: " + data.getCanonicalPath() + ": "
          + e.getMessage());
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DirectoryCollection.java   
/**
 * Function to test whether a dir is working correctly by actually creating a
 * random directory.
 *
 * @param dir
 *          the dir to test
 */
private void verifyDirUsingMkdir(File dir) throws IOException {

  String randomDirName = RandomStringUtils.randomAlphanumeric(5);
  File target = new File(dir, randomDirName);
  int i = 0;
  while (target.exists()) {

    randomDirName = RandomStringUtils.randomAlphanumeric(5) + i;
    target = new File(dir, randomDirName);
    i++;
  }
  try {
    DiskChecker.checkDir(target);
  } finally {
    FileUtils.deleteQuietly(target);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DataStorage.java   
/**
 * Create physical directory for block pools on the data node
 * 
 * @param dataDirs
 *          List of data directories
 * @param conf
 *          Configuration instance to use.
 * @throws IOException on errors
 */
static void makeBlockPoolDataDir(Collection<File> dataDirs,
    Configuration conf) throws IOException {
  if (conf == null)
    conf = new HdfsConfiguration();

  LocalFileSystem localFS = FileSystem.getLocal(conf);
  FsPermission permission = new FsPermission(conf.get(
      DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY,
      DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT));
  for (File data : dataDirs) {
    try {
      DiskChecker.checkDir(localFS, new Path(data.toURI()), permission);
    } catch ( IOException e ) {
      LOG.warn("Invalid directory in: " + data.getCanonicalPath() + ": "
          + e.getMessage());
    }
  }
}
项目:hadoop-EAR    文件:TaskTracker.java   
/**
 * Check if the given local directories
 * (and parent directories, if necessary) can be created.
 * @param localDirs where the new TaskTracker should keep its local files.
 * @throws DiskErrorException if all local directories are not writable
 */
private static void checkLocalDirs(String[] localDirs)
  throws DiskErrorException {
  boolean writable = false;

  if (localDirs != null) {
    for (int i = 0; i < localDirs.length; i++) {
      try {
        DiskChecker.checkDir(new File(localDirs[i]));
        writable = true;
      } catch(DiskErrorException e) {
        LOG.warn("Task Tracker local " + e.getMessage());
      }
    }
  }

  if (!writable)
    throw new DiskErrorException(
                                 "all local directories are not writable");
}
项目:hadoop-EAR    文件:AvatarDataNode.java   
public static AvatarDataNode makeInstance(String[] dataDirs, Configuration conf)
  throws IOException {
  ArrayList<File> dirs = new ArrayList<File>();
  for (int i = 0; i < dataDirs.length; i++) {
    File data = new File(dataDirs[i]);
    try {
      DiskChecker.checkDir(data);
      dirs.add(data);
    } catch(DiskErrorException e) {
      LOG.warn("Invalid directory in dfs.data.dir: " + e.getMessage());
    }
  }
  if (dirs.size() > 0) {
    String dnThreadName = "AvatarDataNode: [" +
      StringUtils.arrayToString(dataDirs) + "]";
    return new AvatarDataNode(conf, dirs, dnThreadName);
  }
  LOG.error("All directories in dfs.data.dir are invalid.");
  return null;
}
项目:hadoop-EAR    文件:DataNode.java   
static ArrayList<File> getDataDirsFromURIs(Collection<URI> dataDirs) {
  ArrayList<File> dirs = new ArrayList<File>();
  for (URI dirURI : dataDirs) {
    if (!"file".equalsIgnoreCase(dirURI.getScheme())) {
      LOG.warn("Unsupported URI schema in " + dirURI + ". Ignoring ...");
      continue;
    }
    // drop any (illegal) authority in the URI for backwards compatibility
    File data = new File(dirURI.getPath());
    try {
      DiskChecker.checkDir(data);
      dirs.add(data);
    } catch (IOException e) {
      LOG.warn("Invalid directory in dfs.data.dir: "
               + e.getMessage());
    }
  }
  return dirs;
}
项目:hadoop-EAR    文件:DataNode.java   
/**
 * Make an instance of DataNode after ensuring that at least one of the
 * given data directories (and their parent directories, if necessary)
 * can be created.
 * @param dataDirs List of directories, where the new DataNode instance should
 * keep its files.
 * @param conf Configuration instance to use.
 * @return DataNode instance for given list of data dirs and conf, or null if
 * no directory from this directory list can be created.
 * @throws IOException
 */
public static DataNode makeInstance(String[] dataDirs, Configuration conf)
  throws IOException {
  ArrayList<File> dirs = new ArrayList<File>();
  for (int i = 0; i < dataDirs.length; i++) {
    File data = new File(dataDirs[i]);
    try {
      DiskChecker.checkDir(data);
      dirs.add(data);
    } catch(DiskErrorException e) {
      LOG.warn("Invalid directory in dfs.data.dir: " + e.getMessage());
    }
  }
  if (dirs.size() > 0)
    return new DataNode(conf, dirs);
  LOG.error("All directories in dfs.data.dir are invalid.");
  return null;
}
项目:hadoop-plus    文件:DirectoryCollection.java   
/**
 * Check the health of current set of local directories, updating the list
 * of valid directories if necessary.
 * @return <em>true</em> if there is a new disk-failure identified in
 *         this checking. <em>false</em> otherwise.
 */
synchronized boolean checkDirs() {
  int oldNumFailures = numFailures;
  for (final String dir : localDirs) {
    try {
      DiskChecker.checkDir(new File(dir));
    } catch (DiskErrorException de) {
      LOG.warn("Directory " + dir + " error " +
          de.getMessage() + ", removing from the list of valid directories.");
      localDirs.remove(dir);
      failedDirs.add(dir);
      numFailures++;
    }
  }
  return numFailures > oldNumFailures;
}
项目:hadoop-plus    文件:DataStorage.java   
/**
 * Create physical directory for block pools on the data node
 * 
 * @param dataDirs
 *          List of data directories
 * @param conf
 *          Configuration instance to use.
 * @throws IOException on errors
 */
static void makeBlockPoolDataDir(Collection<File> dataDirs,
    Configuration conf) throws IOException {
  if (conf == null)
    conf = new HdfsConfiguration();

  LocalFileSystem localFS = FileSystem.getLocal(conf);
  FsPermission permission = new FsPermission(conf.get(
      DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY,
      DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT));
  for (File data : dataDirs) {
    try {
      DiskChecker.checkDir(localFS, new Path(data.toURI()), permission);
    } catch ( IOException e ) {
      LOG.warn("Invalid directory in: " + data.getCanonicalPath() + ": "
          + e.getMessage());
    }
  }
}
项目:FlexMap    文件:DataStorage.java   
/**
 * Create physical directory for block pools on the data node
 * 
 * @param dataDirs
 *          List of data directories
 * @param conf
 *          Configuration instance to use.
 * @throws IOException on errors
 */
static void makeBlockPoolDataDir(Collection<File> dataDirs,
    Configuration conf) throws IOException {
  if (conf == null)
    conf = new HdfsConfiguration();

  LocalFileSystem localFS = FileSystem.getLocal(conf);
  FsPermission permission = new FsPermission(conf.get(
      DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY,
      DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT));
  for (File data : dataDirs) {
    try {
      DiskChecker.checkDir(localFS, new Path(data.toURI()), permission);
    } catch ( IOException e ) {
      LOG.warn("Invalid directory in: " + data.getCanonicalPath() + ": "
          + e.getMessage());
    }
  }
}
项目:hops    文件:DirectoryCollection.java   
/**
 * Function to test whether a dir is working correctly by actually creating a
 * random directory.
 *
 * @param dir
 *          the dir to test
 */
private void verifyDirUsingMkdir(File dir) throws IOException {

  String randomDirName = RandomStringUtils.randomAlphanumeric(5);
  File target = new File(dir, randomDirName);
  int i = 0;
  while (target.exists()) {

    randomDirName = RandomStringUtils.randomAlphanumeric(5) + i;
    target = new File(dir, randomDirName);
    i++;
  }
  try {
    DiskChecker.checkDir(target);
  } finally {
    FileUtils.deleteQuietly(target);
  }
}
项目:hops    文件:DataStorage.java   
/**
 * Create physical directory for block pools on the data node
 *
 * @param dataDirs
 *     List of data directories
 * @param conf
 *     Configuration instance to use.
 * @throws IOException
 *     on errors
 */
static void makeBlockPoolDataDir(Collection<File> dataDirs,
    Configuration conf) throws IOException {
  if (conf == null) {
    conf = new HdfsConfiguration();
  }

  LocalFileSystem localFS = FileSystem.getLocal(conf);
  FsPermission permission = new FsPermission(
      conf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY,
          DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT));
  for (File data : dataDirs) {
    try {
      DiskChecker.checkDir(localFS, new Path(data.toURI()), permission);
    } catch (IOException e) {
      LOG.warn("Invalid directory in: " + data.getCanonicalPath() + ": " +
          e.getMessage());
    }
  }
}
项目:hadoop-TCP    文件:DirectoryCollection.java   
/**
 * Check the health of current set of local directories, updating the list
 * of valid directories if necessary.
 * @return <em>true</em> if there is a new disk-failure identified in
 *         this checking. <em>false</em> otherwise.
 */
synchronized boolean checkDirs() {
  int oldNumFailures = numFailures;
  for (final String dir : localDirs) {
    try {
      DiskChecker.checkDir(new File(dir));
    } catch (DiskErrorException de) {
      LOG.warn("Directory " + dir + " error " +
          de.getMessage() + ", removing from the list of valid directories.");
      localDirs.remove(dir);
      failedDirs.add(dir);
      numFailures++;
    }
  }
  return numFailures > oldNumFailures;
}
项目:hadoop-TCP    文件:DataStorage.java   
/**
 * Create physical directory for block pools on the data node
 * 
 * @param dataDirs
 *          List of data directories
 * @param conf
 *          Configuration instance to use.
 * @throws IOException on errors
 */
static void makeBlockPoolDataDir(Collection<File> dataDirs,
    Configuration conf) throws IOException {
  if (conf == null)
    conf = new HdfsConfiguration();

  LocalFileSystem localFS = FileSystem.getLocal(conf);
  FsPermission permission = new FsPermission(conf.get(
      DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY,
      DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT));
  for (File data : dataDirs) {
    try {
      DiskChecker.checkDir(localFS, new Path(data.toURI()), permission);
    } catch ( IOException e ) {
      LOG.warn("Invalid directory in: " + data.getCanonicalPath() + ": "
          + e.getMessage());
    }
  }
}
项目:hadoop-on-lustre    文件:TaskTracker.java   
/**
 * Check the current set of local directories, updating the list
 * of valid directories if necessary.
 * @throws DiskErrorException if no directories are writable
 */
synchronized void checkDirs() throws DiskErrorException {
  for (String dir : localDirs) {
    try {
      DiskChecker.checkDir(new File(dir));
    } catch (DiskErrorException de) {
      LOG.warn("TaskTracker local dir " + dir + " error " + 
          de.getMessage() + ", removing from local dirs");
      localDirs.remove(dir);
      numFailures++;
    }
  }
  if (localDirs.isEmpty()) {
    throw new DiskErrorException(
        "No mapred local directories are writable");
  }
}
项目:hadoop-on-lustre    文件:DataNode.java   
/**
 * Make an instance of DataNode after ensuring that at least one of the
 * given data directories (and their parent directories, if necessary)
 * can be created.
 * @param dataDirs List of directories, where the new DataNode instance should
 * keep its files.
 * @param conf Configuration instance to use.
 * @param resources Secure resources needed to run under Kerberos
 * @return DataNode instance for given list of data dirs and conf, or null if
 * no directory from this directory list can be created.
 * @throws IOException
 */
public static DataNode makeInstance(String[] dataDirs, Configuration conf, 
    SecureResources resources) throws IOException {
  UserGroupInformation.setConfiguration(conf);
  LocalFileSystem localFS = FileSystem.getLocal(conf);
  ArrayList<File> dirs = new ArrayList<File>();
  FsPermission dataDirPermission = 
    new FsPermission(conf.get(DATA_DIR_PERMISSION_KEY, 
                              DEFAULT_DATA_DIR_PERMISSION));
  for (String dir : dataDirs) {
    try {
      DiskChecker.checkDir(localFS, new Path(dir), dataDirPermission);
      dirs.add(new File(dir));
    } catch(IOException e) {
      LOG.warn("Invalid directory in " + DATA_DIR_KEY +  ": " + 
               e.getMessage());
    }
  }
  if (dirs.size() > 0) 
    return new DataNode(conf, dirs, resources);
  LOG.error("All directories in " + DATA_DIR_KEY + " are invalid.");
  return null;
}
项目:hardfs    文件:DirectoryCollection.java   
/**
 * Check the health of current set of local directories, updating the list
 * of valid directories if necessary.
 * @return <em>true</em> if there is a new disk-failure identified in
 *         this checking. <em>false</em> otherwise.
 */
synchronized boolean checkDirs() {
  int oldNumFailures = numFailures;
  for (final String dir : localDirs) {
    try {
      DiskChecker.checkDir(new File(dir));
    } catch (DiskErrorException de) {
      LOG.warn("Directory " + dir + " error " +
          de.getMessage() + ", removing from the list of valid directories.");
      localDirs.remove(dir);
      failedDirs.add(dir);
      numFailures++;
    }
  }
  return numFailures > oldNumFailures;
}
项目:hardfs    文件:DataStorage.java   
/**
 * Create physical directory for block pools on the data node
 * 
 * @param dataDirs
 *          List of data directories
 * @param conf
 *          Configuration instance to use.
 * @throws IOException on errors
 */
static void makeBlockPoolDataDir(Collection<File> dataDirs,
    Configuration conf) throws IOException {
  if (conf == null)
    conf = new HdfsConfiguration();

  LocalFileSystem localFS = FileSystem.getLocal(conf);
  FsPermission permission = new FsPermission(conf.get(
      DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY,
      DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT));
  for (File data : dataDirs) {
    try {
      DiskChecker.checkDir(localFS, new Path(data.toURI()), permission);
    } catch ( IOException e ) {
      LOG.warn("Invalid directory in: " + data.getCanonicalPath() + ": "
          + e.getMessage());
    }
  }
}
项目:hadoop-on-lustre2    文件:DataStorage.java   
/**
 * Create physical directory for block pools on the data node
 * 
 * @param dataDirs
 *          List of data directories
 * @param conf
 *          Configuration instance to use.
 * @throws IOException on errors
 */
static void makeBlockPoolDataDir(Collection<File> dataDirs,
    Configuration conf) throws IOException {
  if (conf == null)
    conf = new HdfsConfiguration();

  LocalFileSystem localFS = FileSystem.getLocal(conf);
  FsPermission permission = new FsPermission(conf.get(
      DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY,
      DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT));
  for (File data : dataDirs) {
    try {
      DiskChecker.checkDir(localFS, new Path(data.toURI()), permission);
    } catch ( IOException e ) {
      LOG.warn("Invalid directory in: " + data.getCanonicalPath() + ": "
          + e.getMessage());
    }
  }
}
项目:RDFS    文件:TaskTracker.java   
/**
 * Check if the given local directories
 * (and parent directories, if necessary) can be created.
 * @param localDirs where the new TaskTracker should keep its local files.
 * @throws DiskErrorException if all local directories are not writable
 */
private static void checkLocalDirs(String[] localDirs)
  throws DiskErrorException {
  boolean writable = false;

  if (localDirs != null) {
    for (int i = 0; i < localDirs.length; i++) {
      try {
        DiskChecker.checkDir(new File(localDirs[i]));
        writable = true;
      } catch(DiskErrorException e) {
        LOG.warn("Task Tracker local " + e.getMessage());
      }
    }
  }

  if (!writable)
    throw new DiskErrorException(
                                 "all local directories are not writable");
}
项目:RDFS    文件:AvatarDataNode.java   
public static AvatarDataNode makeInstance(String[] dataDirs, Configuration conf)
  throws IOException {
  ArrayList<File> dirs = new ArrayList<File>();
  for (int i = 0; i < dataDirs.length; i++) {
    File data = new File(dataDirs[i]);
    try {
      DiskChecker.checkDir(data);
      dirs.add(data);
    } catch(DiskErrorException e) {
      LOG.warn("Invalid directory in dfs.data.dir: " + e.getMessage());
    }
  }
  if (dirs.size() > 0) {
    String dnThreadName = "AvatarDataNode: [" +
      StringUtils.arrayToString(dataDirs) + "]";
    return new AvatarDataNode(conf, dirs, dnThreadName);
  }
  LOG.error("All directories in dfs.data.dir are invalid.");
  return null;
}
项目:RDFS    文件:DataNode.java   
static ArrayList<File> getDataDirsFromURIs(Collection<URI> dataDirs) {
  ArrayList<File> dirs = new ArrayList<File>();
  for (URI dirURI : dataDirs) {
    if (!"file".equalsIgnoreCase(dirURI.getScheme())) {
      LOG.warn("Unsupported URI schema in " + dirURI + ". Ignoring ...");
      continue;
    }
    // drop any (illegal) authority in the URI for backwards compatibility
    File data = new File(dirURI.getPath());
    try {
      DiskChecker.checkDir(data);
      dirs.add(data);
    } catch (IOException e) {
      LOG.warn("Invalid directory in dfs.data.dir: "
               + e.getMessage());
    }
  }
  return dirs;
}
项目:RDFS    文件:DataNode.java   
/**
 * Make an instance of DataNode after ensuring that at least one of the
 * given data directories (and their parent directories, if necessary)
 * can be created.
 * @param dataDirs List of directories, where the new DataNode instance should
 * keep its files.
 * @param conf Configuration instance to use.
 * @return DataNode instance for given list of data dirs and conf, or null if
 * no directory from this directory list can be created.
 * @throws IOException
 */
public static DataNode makeInstance(String[] dataDirs, Configuration conf)
  throws IOException {
  ArrayList<File> dirs = new ArrayList<File>();
  for (int i = 0; i < dataDirs.length; i++) {
    File data = new File(dataDirs[i]);
    try {
      DiskChecker.checkDir(data);
      dirs.add(data);
    } catch(DiskErrorException e) {
      LOG.warn("Invalid directory in dfs.data.dir: " + e.getMessage());
    }
  }
  if (dirs.size() > 0)
    return new DataNode(conf, dirs);
  LOG.error("All directories in dfs.data.dir are invalid.");
  return null;
}
项目:hadoop-0.20    文件:TaskTracker.java   
/**
 * Check if the given local directories
 * (and parent directories, if necessary) can be created.
 * @param localDirs where the new TaskTracker should keep its local files.
 * @throws DiskErrorException if all local directories are not writable
 */
private static void checkLocalDirs(String[] localDirs) 
  throws DiskErrorException {
  boolean writable = false;

  if (localDirs != null) {
    for (int i = 0; i < localDirs.length; i++) {
      try {
        DiskChecker.checkDir(new File(localDirs[i]));
        writable = true;
      } catch(DiskErrorException e) {
        LOG.warn("Task Tracker local " + e.getMessage());
      }
    }
  }

  if (!writable)
    throw new DiskErrorException(
                                 "all local directories are not writable");
}
项目:hadoop-0.20    文件:DataNode.java   
/**
 * Make an instance of DataNode after ensuring that at least one of the
 * given data directories (and their parent directories, if necessary)
 * can be created.
 * @param dataDirs List of directories, where the new DataNode instance should
 * keep its files.
 * @param conf Configuration instance to use.
 * @return DataNode instance for given list of data dirs and conf, or null if
 * no directory from this directory list can be created.
 * @throws IOException
 */
public static DataNode makeInstance(String[] dataDirs, Configuration conf)
  throws IOException {
  ArrayList<File> dirs = new ArrayList<File>();
  for (int i = 0; i < dataDirs.length; i++) {
    File data = new File(dataDirs[i]);
    try {
      DiskChecker.checkDir(data);
      dirs.add(data);
    } catch(DiskErrorException e) {
      LOG.warn("Invalid directory in dfs.data.dir: " + e.getMessage());
    }
  }
  if (dirs.size() > 0) 
    return new DataNode(conf, dirs);
  LOG.error("All directories in dfs.data.dir are invalid.");
  return null;
}
项目:mapreduce-fork    文件:TaskController.java   
/**
 * Sets up the permissions of the following directories on all the configured
 * disks:
 * <ul>
 * <li>mapreduce.cluster.local.directories</li>
 * <li>Hadoop log directories</li>
 * </ul>
 */
public void setup() throws IOException {
  FileSystem localFs = FileSystem.getLocal(conf);

  for (String localDir : this.mapredLocalDirs) {
    // Set up the mapreduce.cluster.local.directories.
    File mapredlocalDir = new File(localDir);
    if (!mapredlocalDir.isDirectory() && !mapredlocalDir.mkdirs()) {
      LOG.warn("Unable to create mapreduce.cluster.local.directory : "
          + mapredlocalDir.getPath());
    } else {
      localFs.setPermission(new Path(mapredlocalDir.getCanonicalPath()),
                            new FsPermission((short)0755));
    }
  }

  // Set up the user log directory
  File taskLog = TaskLog.getUserLogDir();
  if (!taskLog.isDirectory() && !taskLog.mkdirs()) {
    LOG.warn("Unable to create taskLog directory : " + taskLog.getPath());
  } else {
    localFs.setPermission(new Path(taskLog.getCanonicalPath()),
                          new FsPermission((short)0755));
  }
  DiskChecker.checkDir(TaskLog.getUserLogDir());
}
项目:mapreduce-fork    文件:TaskTracker.java   
/**
 * Check if the given local directories
 * (and parent directories, if necessary) can be created.
 * @param localDirs where the new TaskTracker should keep its local files.
 * @throws DiskErrorException if all local directories are not writable
 */
private static void checkLocalDirs(String[] localDirs) 
  throws DiskErrorException {
  boolean writable = false;

  if (localDirs != null) {
    for (int i = 0; i < localDirs.length; i++) {
      try {
        DiskChecker.checkDir(new File(localDirs[i]));
        writable = true;
      } catch(DiskErrorException e) {
        LOG.warn("Task Tracker local " + e.getMessage());
      }
    }
  }

  if (!writable)
    throw new DiskErrorException(
                                 "all local directories are not writable");
}
项目:mammoth    文件:TaskTracker.java   
/**
 * Check the current set of local directories, updating the list
 * of valid directories if necessary.
 * @throws DiskErrorException if no directories are writable
 */
synchronized void checkDirs() throws DiskErrorException {
  for (String dir : localDirs) {
    try {
      DiskChecker.checkDir(new File(dir));
    } catch (DiskErrorException de) {
      LOG.warn("TaskTracker local dir " + dir + " error " + 
          de.getMessage() + ", removing from local dirs");
      localDirs.remove(dir);
      numFailures++;
    }
  }
  if (localDirs.isEmpty()) {
    throw new DiskErrorException(
        "No mapred local directories are writable");
  }
}
项目:hortonworks-extension    文件:TaskTracker.java   
/**
 * Check the current set of local directories, updating the list
 * of valid directories if necessary.
 * @throws DiskErrorException if no directories are writable
 */
synchronized void checkDirs() throws DiskErrorException {
  for (String dir : localDirs) {
    try {
      DiskChecker.checkDir(new File(dir));
    } catch (DiskErrorException de) {
      LOG.warn("TaskTracker local dir " + dir + " error " + 
          de.getMessage() + ", removing from local dirs");
      localDirs.remove(dir);
      numFailures++;
    }
  }
  if (localDirs.isEmpty()) {
    throw new DiskErrorException(
        "No mapred local directories are writable");
  }
}
项目:hortonworks-extension    文件:DataNode.java   
/**
 * Make an instance of DataNode after ensuring that at least one of the
 * given data directories (and their parent directories, if necessary)
 * can be created.
 * @param dataDirs List of directories, where the new DataNode instance should
 * keep its files.
 * @param conf Configuration instance to use.
 * @param resources Secure resources needed to run under Kerberos
 * @return DataNode instance for given list of data dirs and conf, or null if
 * no directory from this directory list can be created.
 * @throws IOException
 */
public static DataNode makeInstance(String[] dataDirs, Configuration conf, 
    SecureResources resources) throws IOException {
  UserGroupInformation.setConfiguration(conf);
  LocalFileSystem localFS = FileSystem.getLocal(conf);
  ArrayList<File> dirs = new ArrayList<File>();
  FsPermission dataDirPermission = 
    new FsPermission(conf.get(DATA_DIR_PERMISSION_KEY, 
                              DEFAULT_DATA_DIR_PERMISSION));
  for (String dir : dataDirs) {
    try {
      DiskChecker.checkDir(localFS, new Path(dir), dataDirPermission);
      dirs.add(new File(dir));
    } catch(IOException e) {
      LOG.warn("Invalid directory in " + DATA_DIR_KEY +  ": " + 
               e.getMessage());
    }
  }
  if (dirs.size() > 0) 
    return new DataNode(conf, dirs, resources);
  LOG.error("All directories in " + DATA_DIR_KEY + " are invalid.");
  return null;
}
项目:hortonworks-extension    文件:TaskTracker.java   
/**
 * Check the current set of local directories, updating the list
 * of valid directories if necessary.
 * @throws DiskErrorException if no directories are writable
 */
synchronized void checkDirs() throws DiskErrorException {
  for (String dir : localDirs) {
    try {
      DiskChecker.checkDir(new File(dir));
    } catch (DiskErrorException de) {
      LOG.warn("TaskTracker local dir " + dir + " error " + 
          de.getMessage() + ", removing from local dirs");
      localDirs.remove(dir);
      numFailures++;
    }
  }
  if (localDirs.isEmpty()) {
    throw new DiskErrorException(
        "No mapred local directories are writable");
  }
}
项目:hortonworks-extension    文件:DataNode.java   
/**
 * Make an instance of DataNode after ensuring that at least one of the
 * given data directories (and their parent directories, if necessary)
 * can be created.
 * @param dataDirs List of directories, where the new DataNode instance should
 * keep its files.
 * @param conf Configuration instance to use.
 * @param resources Secure resources needed to run under Kerberos
 * @return DataNode instance for given list of data dirs and conf, or null if
 * no directory from this directory list can be created.
 * @throws IOException
 */
public static DataNode makeInstance(String[] dataDirs, Configuration conf, 
    SecureResources resources) throws IOException {
  UserGroupInformation.setConfiguration(conf);
  LocalFileSystem localFS = FileSystem.getLocal(conf);
  ArrayList<File> dirs = new ArrayList<File>();
  FsPermission dataDirPermission = 
    new FsPermission(conf.get(DATA_DIR_PERMISSION_KEY, 
                              DEFAULT_DATA_DIR_PERMISSION));
  for (String dir : dataDirs) {
    try {
      DiskChecker.checkDir(localFS, new Path(dir), dataDirPermission);
      dirs.add(new File(dir));
    } catch(IOException e) {
      LOG.warn("Invalid directory in " + DATA_DIR_KEY +  ": " + 
               e.getMessage());
    }
  }
  if (dirs.size() > 0) 
    return new DataNode(conf, dirs, resources);
  LOG.error("All directories in " + DATA_DIR_KEY + " are invalid.");
  return null;
}
项目:hadoop-gpu    文件:TaskTracker.java   
/**
 * Check if the given local directories
 * (and parent directories, if necessary) can be created.
 * @param localDirs where the new TaskTracker should keep its local files.
 * @throws DiskErrorException if all local directories are not writable
 */
private static void checkLocalDirs(String[] localDirs) 
  throws DiskErrorException {
  boolean writable = false;

  if (localDirs != null) {
    for (int i = 0; i < localDirs.length; i++) {
      try {
        DiskChecker.checkDir(new File(localDirs[i]));
        writable = true;
      } catch(DiskErrorException e) {
        LOG.warn("Task Tracker local " + e.getMessage());
      }
    }
  }

  if (!writable)
    throw new DiskErrorException(
                                 "all local directories are not writable");
}
项目:hadoop-gpu    文件:DataNode.java   
/**
 * Make an instance of DataNode after ensuring that at least one of the
 * given data directories (and their parent directories, if necessary)
 * can be created.
 * @param dataDirs List of directories, where the new DataNode instance should
 * keep its files.
 * @param conf Configuration instance to use.
 * @return DataNode instance for given list of data dirs and conf, or null if
 * no directory from this directory list can be created.
 * @throws IOException
 */
public static DataNode makeInstance(String[] dataDirs, Configuration conf)
  throws IOException {
  ArrayList<File> dirs = new ArrayList<File>();
  for (int i = 0; i < dataDirs.length; i++) {
    File data = new File(dataDirs[i]);
    try {
      DiskChecker.checkDir(data);
      dirs.add(data);
    } catch(DiskErrorException e) {
      LOG.warn("Invalid directory in dfs.data.dir: " + e.getMessage());
    }
  }
  if (dirs.size() > 0) 
    return new DataNode(conf, dirs);
  LOG.error("All directories in dfs.data.dir are invalid.");
  return null;
}
项目:hadoop    文件:DirectoryCollection.java   
Map<String, DiskErrorInformation> testDirs(List<String> dirs) {
  HashMap<String, DiskErrorInformation> ret =
      new HashMap<String, DiskErrorInformation>();
  for (final String dir : dirs) {
    String msg;
    try {
      File testDir = new File(dir);
      DiskChecker.checkDir(testDir);
      if (isDiskUsageOverPercentageLimit(testDir)) {
        msg =
            "used space above threshold of "
                + diskUtilizationPercentageCutoff
                + "%";
        ret.put(dir,
          new DiskErrorInformation(DiskErrorCause.DISK_FULL, msg));
        continue;
      } else if (isDiskFreeSpaceUnderLimit(testDir)) {
        msg =
            "free space below limit of " + diskUtilizationSpaceCutoff
                + "MB";
        ret.put(dir,
          new DiskErrorInformation(DiskErrorCause.DISK_FULL, msg));
        continue;
      }

      // create a random dir to make sure fs isn't in read-only mode
      verifyDirUsingMkdir(testDir);
    } catch (IOException ie) {
      ret.put(dir,
        new DiskErrorInformation(DiskErrorCause.OTHER, ie.getMessage()));
    }
  }
  return ret;
}
项目:hadoop    文件:JournalNode.java   
private static void validateAndCreateJournalDir(File dir) throws IOException {
  if (!dir.isAbsolute()) {
    throw new IllegalArgumentException(
        "Journal dir '" + dir + "' should be an absolute path");
  }

  DiskChecker.checkDir(dir);
}
项目:aliyun-oss-hadoop-fs    文件:DirectoryCollection.java   
Map<String, DiskErrorInformation> testDirs(List<String> dirs,
    Set<String> goodDirs) {
  HashMap<String, DiskErrorInformation> ret =
      new HashMap<String, DiskErrorInformation>();
  for (final String dir : dirs) {
    String msg;
    try {
      File testDir = new File(dir);
      DiskChecker.checkDir(testDir);
      float diskUtilizationPercentageCutoff = goodDirs.contains(dir) ?
          diskUtilizationPercentageCutoffHigh : diskUtilizationPercentageCutoffLow;
      if (isDiskUsageOverPercentageLimit(testDir,
          diskUtilizationPercentageCutoff)) {
        msg =
            "used space above threshold of "
                + diskUtilizationPercentageCutoff
                + "%";
        ret.put(dir,
          new DiskErrorInformation(DiskErrorCause.DISK_FULL, msg));
        continue;
      } else if (isDiskFreeSpaceUnderLimit(testDir)) {
        msg =
            "free space below limit of " + diskUtilizationSpaceCutoff
                + "MB";
        ret.put(dir,
          new DiskErrorInformation(DiskErrorCause.DISK_FULL, msg));
        continue;
      }

      // create a random dir to make sure fs isn't in read-only mode
      verifyDirUsingMkdir(testDir);
    } catch (IOException ie) {
      ret.put(dir,
        new DiskErrorInformation(DiskErrorCause.OTHER, ie.getMessage()));
    }
  }
  return ret;
}