Java 类org.apache.hadoop.fs.DU 实例源码

项目:hadoop-EAR    文件:FSDataset.java   
FSVolume(FSDataset dataset, File currentDir, Configuration conf) throws IOException {
  this.currentDir = currentDir; 
  File parent = currentDir.getParentFile();
  this.usage = new DF(parent, conf);
  this.reserved = usage.getReserved();
  this.dataset = dataset;
  this.namespaceMap = new NamespaceMap();
  this.dfsUsage = new DU(currentDir, conf);
  this.dfsUsage.start();
  this.nativeIOExecutor = Executors.newSingleThreadExecutor();
}
项目:cumulus    文件:FSDataset.java   
FSVolume(File currentDir, Configuration conf) throws IOException {
  this.reserved = conf.getLong("dfs.datanode.du.reserved", 0);
  this.currentDir = currentDir; 
  File parent = currentDir.getParentFile();
  final File finalizedDir = new File(
      currentDir, DataStorage.STORAGE_DIR_FINALIZED);

  // Files that were being written when the datanode was last shutdown
  // are now moved back to the data directory. It is possible that
  // in the future, we might want to do some sort of datanode-local
  // recovery for these blocks. For example, crc validation.
  //
  this.tmpDir = new File(parent, "tmp");
  if (tmpDir.exists()) {
    FileUtil.fullyDelete(tmpDir);
  }
  this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW);
  if (rbwDir.exists() && !supportAppends) {
    FileUtil.fullyDelete(rbwDir);
  }
  this.dataDir = new FSDir(finalizedDir);
  if (!rbwDir.mkdirs()) {  // create rbw directory if not exist
    if (!rbwDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + rbwDir.toString());
    }
  }
  if (!tmpDir.mkdirs()) {
    if (!tmpDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + tmpDir.toString());
    }
  }
  this.usage = new DF(parent, conf);
  this.dfsUsage = new DU(parent, conf);
  this.dfsUsage.start();
}
项目:RDFS    文件:FSDataset.java   
FSVolume(FSDataset dataset, File currentDir, Configuration conf) throws IOException {
  this.currentDir = currentDir; 
  File parent = currentDir.getParentFile();
  this.usage = new DF(parent, conf);
  this.reserved = usage.getReserved();
  this.dataset = dataset;
  this.namespaceMap = new NamespaceMap();
  this.dfsUsage = new DU(currentDir, conf);
  this.dfsUsage.start();
}
项目:hadoop    文件:BlockPoolSlice.java   
/**
 * Create a blook pool slice 
 * @param bpid Block pool Id
 * @param volume {@link FsVolumeImpl} to which this BlockPool belongs to
 * @param bpDir directory corresponding to the BlockPool
 * @param conf configuration
 * @throws IOException
 */
BlockPoolSlice(String bpid, FsVolumeImpl volume, File bpDir,
    Configuration conf) throws IOException {
  this.bpid = bpid;
  this.volume = volume;
  this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT); 
  this.finalizedDir = new File(
      currentDir, DataStorage.STORAGE_DIR_FINALIZED);
  this.lazypersistDir = new File(currentDir, DataStorage.STORAGE_DIR_LAZY_PERSIST);
  if (!this.finalizedDir.exists()) {
    if (!this.finalizedDir.mkdirs()) {
      throw new IOException("Failed to mkdirs " + this.finalizedDir);
    }
  }

  this.deleteDuplicateReplicas = conf.getBoolean(
      DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION,
      DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION_DEFAULT);

  // Files that were being written when the datanode was last shutdown
  // are now moved back to the data directory. It is possible that
  // in the future, we might want to do some sort of datanode-local
  // recovery for these blocks. For example, crc validation.
  //
  this.tmpDir = new File(bpDir, DataStorage.STORAGE_DIR_TMP);
  if (tmpDir.exists()) {
    FileUtil.fullyDelete(tmpDir);
  }
  this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW);
  final boolean supportAppends = conf.getBoolean(
      DFSConfigKeys.DFS_SUPPORT_APPEND_KEY,
      DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT);
  if (rbwDir.exists() && !supportAppends) {
    FileUtil.fullyDelete(rbwDir);
  }
  if (!rbwDir.mkdirs()) {  // create rbw directory if not exist
    if (!rbwDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + rbwDir.toString());
    }
  }
  if (!tmpDir.mkdirs()) {
    if (!tmpDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + tmpDir.toString());
    }
  }
  // Use cached value initially if available. Or the following call will
  // block until the initial du command completes.
  this.dfsUsage = new DU(bpDir, conf, loadDfsUsed());
  this.dfsUsage.start();

  // Make the dfs usage to be saved during shutdown.
  ShutdownHookManager.get().addShutdownHook(
    new Runnable() {
      @Override
      public void run() {
        if (!dfsUsedSaved) {
          saveDfsUsed();
        }
      }
    }, SHUTDOWN_HOOK_PRIORITY);
}
项目:aliyun-oss-hadoop-fs    文件:BlockPoolSlice.java   
/**
 * Create a blook pool slice 
 * @param bpid Block pool Id
 * @param volume {@link FsVolumeImpl} to which this BlockPool belongs to
 * @param bpDir directory corresponding to the BlockPool
 * @param conf configuration
 * @throws IOException
 */
BlockPoolSlice(String bpid, FsVolumeImpl volume, File bpDir,
    Configuration conf) throws IOException {
  this.bpid = bpid;
  this.volume = volume;
  this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT); 
  this.finalizedDir = new File(
      currentDir, DataStorage.STORAGE_DIR_FINALIZED);
  this.lazypersistDir = new File(currentDir, DataStorage.STORAGE_DIR_LAZY_PERSIST);
  if (!this.finalizedDir.exists()) {
    if (!this.finalizedDir.mkdirs()) {
      throw new IOException("Failed to mkdirs " + this.finalizedDir);
    }
  }

  this.ioFileBufferSize = DFSUtilClient.getIoFileBufferSize(conf);

  this.deleteDuplicateReplicas = conf.getBoolean(
      DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION,
      DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION_DEFAULT);

  // Files that were being written when the datanode was last shutdown
  // are now moved back to the data directory. It is possible that
  // in the future, we might want to do some sort of datanode-local
  // recovery for these blocks. For example, crc validation.
  //
  this.tmpDir = new File(bpDir, DataStorage.STORAGE_DIR_TMP);
  if (tmpDir.exists()) {
    FileUtil.fullyDelete(tmpDir);
  }
  this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW);
  if (!rbwDir.mkdirs()) {  // create rbw directory if not exist
    if (!rbwDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + rbwDir.toString());
    }
  }
  if (!tmpDir.mkdirs()) {
    if (!tmpDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + tmpDir.toString());
    }
  }
  // Use cached value initially if available. Or the following call will
  // block until the initial du command completes.
  this.dfsUsage = new DU(bpDir, conf, loadDfsUsed());
  this.dfsUsage.start();

  // Make the dfs usage to be saved during shutdown.
  ShutdownHookManager.get().addShutdownHook(
    new Runnable() {
      @Override
      public void run() {
        if (!dfsUsedSaved) {
          saveDfsUsed();
        }
      }
    }, SHUTDOWN_HOOK_PRIORITY);
}
项目:big-c    文件:BlockPoolSlice.java   
/**
 * Create a blook pool slice 
 * @param bpid Block pool Id
 * @param volume {@link FsVolumeImpl} to which this BlockPool belongs to
 * @param bpDir directory corresponding to the BlockPool
 * @param conf configuration
 * @throws IOException
 */
BlockPoolSlice(String bpid, FsVolumeImpl volume, File bpDir,
    Configuration conf) throws IOException {
  this.bpid = bpid;
  this.volume = volume;
  this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT); 
  this.finalizedDir = new File(
      currentDir, DataStorage.STORAGE_DIR_FINALIZED);
  this.lazypersistDir = new File(currentDir, DataStorage.STORAGE_DIR_LAZY_PERSIST);
  if (!this.finalizedDir.exists()) {
    if (!this.finalizedDir.mkdirs()) {
      throw new IOException("Failed to mkdirs " + this.finalizedDir);
    }
  }

  this.deleteDuplicateReplicas = conf.getBoolean(
      DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION,
      DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION_DEFAULT);

  // Files that were being written when the datanode was last shutdown
  // are now moved back to the data directory. It is possible that
  // in the future, we might want to do some sort of datanode-local
  // recovery for these blocks. For example, crc validation.
  //
  this.tmpDir = new File(bpDir, DataStorage.STORAGE_DIR_TMP);
  if (tmpDir.exists()) {
    FileUtil.fullyDelete(tmpDir);
  }
  this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW);
  final boolean supportAppends = conf.getBoolean(
      DFSConfigKeys.DFS_SUPPORT_APPEND_KEY,
      DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT);
  if (rbwDir.exists() && !supportAppends) {
    FileUtil.fullyDelete(rbwDir);
  }
  if (!rbwDir.mkdirs()) {  // create rbw directory if not exist
    if (!rbwDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + rbwDir.toString());
    }
  }
  if (!tmpDir.mkdirs()) {
    if (!tmpDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + tmpDir.toString());
    }
  }
  // Use cached value initially if available. Or the following call will
  // block until the initial du command completes.
  this.dfsUsage = new DU(bpDir, conf, loadDfsUsed());
  this.dfsUsage.start();

  // Make the dfs usage to be saved during shutdown.
  ShutdownHookManager.get().addShutdownHook(
    new Runnable() {
      @Override
      public void run() {
        if (!dfsUsedSaved) {
          saveDfsUsed();
        }
      }
    }, SHUTDOWN_HOOK_PRIORITY);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:BlockPoolSlice.java   
/**
 * Create a blook pool slice 
 * @param bpid Block pool Id
 * @param volume {@link FsVolumeImpl} to which this BlockPool belongs to
 * @param bpDir directory corresponding to the BlockPool
 * @param conf configuration
 * @throws IOException
 */
BlockPoolSlice(String bpid, FsVolumeImpl volume, File bpDir,
    Configuration conf) throws IOException {
  this.bpid = bpid;
  this.volume = volume;
  this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT); 
  this.finalizedDir = new File(
      currentDir, DataStorage.STORAGE_DIR_FINALIZED);
  this.lazypersistDir = new File(currentDir, DataStorage.STORAGE_DIR_LAZY_PERSIST);
  if (!this.finalizedDir.exists()) {
    if (!this.finalizedDir.mkdirs()) {
      throw new IOException("Failed to mkdirs " + this.finalizedDir);
    }
  }

  this.deleteDuplicateReplicas = conf.getBoolean(
      DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION,
      DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION_DEFAULT);

  // Files that were being written when the datanode was last shutdown
  // are now moved back to the data directory. It is possible that
  // in the future, we might want to do some sort of datanode-local
  // recovery for these blocks. For example, crc validation.
  //
  this.tmpDir = new File(bpDir, DataStorage.STORAGE_DIR_TMP);
  if (tmpDir.exists()) {
    FileUtil.fullyDelete(tmpDir);
  }
  this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW);
  final boolean supportAppends = conf.getBoolean(
      DFSConfigKeys.DFS_SUPPORT_APPEND_KEY,
      DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT);
  if (rbwDir.exists() && !supportAppends) {
    FileUtil.fullyDelete(rbwDir);
  }
  if (!rbwDir.mkdirs()) {  // create rbw directory if not exist
    if (!rbwDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + rbwDir.toString());
    }
  }
  if (!tmpDir.mkdirs()) {
    if (!tmpDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + tmpDir.toString());
    }
  }
  // Use cached value initially if available. Or the following call will
  // block until the initial du command completes.
  this.dfsUsage = new DU(bpDir, conf, loadDfsUsed());
  this.dfsUsage.start();

  // Make the dfs usage to be saved during shutdown.
  ShutdownHookManager.get().addShutdownHook(
    new Runnable() {
      @Override
      public void run() {
        if (!dfsUsedSaved) {
          saveDfsUsed();
        }
      }
    }, SHUTDOWN_HOOK_PRIORITY);
}
项目:hadoop-plus    文件:BlockPoolSlice.java   
/**
 * Create a blook pool slice 
 * @param bpid Block pool Id
 * @param volume {@link FsVolumeImpl} to which this BlockPool belongs to
 * @param bpDir directory corresponding to the BlockPool
 * @param conf
 * @throws IOException
 */
BlockPoolSlice(String bpid, FsVolumeImpl volume, File bpDir,
    Configuration conf) throws IOException {
  this.bpid = bpid;
  this.volume = volume;
  this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT); 
  final File finalizedDir = new File(
      currentDir, DataStorage.STORAGE_DIR_FINALIZED);

  // Files that were being written when the datanode was last shutdown
  // are now moved back to the data directory. It is possible that
  // in the future, we might want to do some sort of datanode-local
  // recovery for these blocks. For example, crc validation.
  //
  this.tmpDir = new File(bpDir, DataStorage.STORAGE_DIR_TMP);
  if (tmpDir.exists()) {
    FileUtil.fullyDelete(tmpDir);
  }
  this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW);
  final boolean supportAppends = conf.getBoolean(
      DFSConfigKeys.DFS_SUPPORT_APPEND_KEY,
      DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT);
  if (rbwDir.exists() && !supportAppends) {
    FileUtil.fullyDelete(rbwDir);
  }
  final int maxBlocksPerDir = conf.getInt(
      DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_KEY,
      DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_DEFAULT);
  this.finalizedDir = new LDir(finalizedDir, maxBlocksPerDir);
  if (!rbwDir.mkdirs()) {  // create rbw directory if not exist
    if (!rbwDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + rbwDir.toString());
    }
  }
  if (!tmpDir.mkdirs()) {
    if (!tmpDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + tmpDir.toString());
    }
  }
  this.dfsUsage = new DU(bpDir, conf);
  this.dfsUsage.start();
}
项目:FlexMap    文件:BlockPoolSlice.java   
/**
 * Create a blook pool slice 
 * @param bpid Block pool Id
 * @param volume {@link FsVolumeImpl} to which this BlockPool belongs to
 * @param bpDir directory corresponding to the BlockPool
 * @param conf configuration
 * @throws IOException
 */
BlockPoolSlice(String bpid, FsVolumeImpl volume, File bpDir,
    Configuration conf) throws IOException {
  this.bpid = bpid;
  this.volume = volume;
  this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT); 
  this.finalizedDir = new File(
      currentDir, DataStorage.STORAGE_DIR_FINALIZED);
  this.lazypersistDir = new File(currentDir, DataStorage.STORAGE_DIR_LAZY_PERSIST);
  if (!this.finalizedDir.exists()) {
    if (!this.finalizedDir.mkdirs()) {
      throw new IOException("Failed to mkdirs " + this.finalizedDir);
    }
  }

  this.deleteDuplicateReplicas = conf.getBoolean(
      DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION,
      DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION_DEFAULT);

  // Files that were being written when the datanode was last shutdown
  // are now moved back to the data directory. It is possible that
  // in the future, we might want to do some sort of datanode-local
  // recovery for these blocks. For example, crc validation.
  //
  this.tmpDir = new File(bpDir, DataStorage.STORAGE_DIR_TMP);
  if (tmpDir.exists()) {
    FileUtil.fullyDelete(tmpDir);
  }
  this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW);
  final boolean supportAppends = conf.getBoolean(
      DFSConfigKeys.DFS_SUPPORT_APPEND_KEY,
      DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT);
  if (rbwDir.exists() && !supportAppends) {
    FileUtil.fullyDelete(rbwDir);
  }
  if (!rbwDir.mkdirs()) {  // create rbw directory if not exist
    if (!rbwDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + rbwDir.toString());
    }
  }
  if (!tmpDir.mkdirs()) {
    if (!tmpDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + tmpDir.toString());
    }
  }
  // Use cached value initially if available. Or the following call will
  // block until the initial du command completes.
  this.dfsUsage = new DU(bpDir, conf, loadDfsUsed());
  this.dfsUsage.start();

  // Make the dfs usage to be saved during shutdown.
  ShutdownHookManager.get().addShutdownHook(
    new Runnable() {
      @Override
      public void run() {
        if (!dfsUsedSaved) {
          saveDfsUsed();
        }
      }
    }, SHUTDOWN_HOOK_PRIORITY);
}
项目:hops    文件:BlockPoolSlice.java   
/**
 * Create a blook pool slice
 *
 * @param bpid
 *     Block pool Id
 * @param volume
 *     {@link FsVolumeImpl} to which this BlockPool belongs to
 * @param bpDir
 *     directory corresponding to the BlockPool
 * @param conf
 * @throws IOException
 */
BlockPoolSlice(String bpid, FsVolumeImpl volume, File bpDir,
    Configuration conf) throws IOException {
  this.bpid = bpid;
  this.volume = volume;
  this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
  final File finalizedDir =
      new File(currentDir, DataStorage.STORAGE_DIR_FINALIZED);

  // Files that were being written when the datanode was last shutdown
  // are now moved back to the data directory. It is possible that
  // in the future, we might want to do some sort of datanode-local
  // recovery for these blocks. For example, crc validation.
  //
  this.tmpDir = new File(bpDir, DataStorage.STORAGE_DIR_TMP);
  if (tmpDir.exists()) {
    FileUtil.fullyDelete(tmpDir);
  }
  this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW);
  final boolean supportAppends =
      conf.getBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY,
          DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT);
  if (rbwDir.exists() && !supportAppends) {
    FileUtil.fullyDelete(rbwDir);
  }
  final int maxBlocksPerDir =
      conf.getInt(DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_KEY,
          DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_DEFAULT);
  this.finalizedDir = new LDir(finalizedDir, maxBlocksPerDir);
  if (!rbwDir.mkdirs()) {  // create rbw directory if not exist
    if (!rbwDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + rbwDir.toString());
    }
  }
  if (!tmpDir.mkdirs()) {
    if (!tmpDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + tmpDir.toString());
    }
  }
  this.dfsUsage = new DU(bpDir, conf);
  this.dfsUsage.start();
}
项目:hadoop-TCP    文件:BlockPoolSlice.java   
/**
 * Create a blook pool slice 
 * @param bpid Block pool Id
 * @param volume {@link FsVolumeImpl} to which this BlockPool belongs to
 * @param bpDir directory corresponding to the BlockPool
 * @param conf
 * @throws IOException
 */
BlockPoolSlice(String bpid, FsVolumeImpl volume, File bpDir,
    Configuration conf) throws IOException {
  this.bpid = bpid;
  this.volume = volume;
  this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT); 
  final File finalizedDir = new File(
      currentDir, DataStorage.STORAGE_DIR_FINALIZED);

  // Files that were being written when the datanode was last shutdown
  // are now moved back to the data directory. It is possible that
  // in the future, we might want to do some sort of datanode-local
  // recovery for these blocks. For example, crc validation.
  //
  this.tmpDir = new File(bpDir, DataStorage.STORAGE_DIR_TMP);
  if (tmpDir.exists()) {
    FileUtil.fullyDelete(tmpDir);
  }
  this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW);
  final boolean supportAppends = conf.getBoolean(
      DFSConfigKeys.DFS_SUPPORT_APPEND_KEY,
      DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT);
  if (rbwDir.exists() && !supportAppends) {
    FileUtil.fullyDelete(rbwDir);
  }
  final int maxBlocksPerDir = conf.getInt(
      DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_KEY,
      DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_DEFAULT);
  this.finalizedDir = new LDir(finalizedDir, maxBlocksPerDir);
  if (!rbwDir.mkdirs()) {  // create rbw directory if not exist
    if (!rbwDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + rbwDir.toString());
    }
  }
  if (!tmpDir.mkdirs()) {
    if (!tmpDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + tmpDir.toString());
    }
  }
  this.dfsUsage = new DU(bpDir, conf);
  this.dfsUsage.start();
}
项目:hadoop-on-lustre    文件:FSDataset.java   
FSVolume(File currentDir, Configuration conf) throws IOException {
  this.reserved = conf.getLong("dfs.datanode.du.reserved", 0);
  this.dataDir = new FSDir(currentDir);
  this.currentDir = currentDir;
  boolean supportAppends = conf.getBoolean("dfs.support.append", false);
  File parent = currentDir.getParentFile();

  this.detachDir = new File(parent, "detach");
  if (detachDir.exists()) {
    recoverDetachedBlocks(currentDir, detachDir);
  }

  // remove all blocks from "tmp" directory. These were either created
  // by pre-append clients (0.18.x) or are part of replication request.
  // They can be safely removed.
  this.tmpDir = new File(parent, "tmp");
  if (tmpDir.exists()) {
    FileUtil.fullyDelete(tmpDir);
  }

  // Files that were being written when the datanode was last shutdown
  // should not be deleted.
  blocksBeingWritten = new File(parent, "blocksBeingWritten");
  if (blocksBeingWritten.exists()) {
    if (supportAppends) {  
      recoverBlocksBeingWritten(blocksBeingWritten);
    } else {
      FileUtil.fullyDelete(blocksBeingWritten);
    }
  }

  if (!blocksBeingWritten.mkdirs()) {
    if (!blocksBeingWritten.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + blocksBeingWritten.toString());
    }
  }
  if (!tmpDir.mkdirs()) {
    if (!tmpDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + tmpDir.toString());
    }
  }
  if (!detachDir.mkdirs()) {
    if (!detachDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + detachDir.toString());
    }
  }
  this.usage = new DF(parent, conf);
  this.dfsUsage = new DU(parent, conf);
  this.dfsUsage.start();
}
项目:hardfs    文件:BlockPoolSlice.java   
/**
 * Create a blook pool slice 
 * @param bpid Block pool Id
 * @param volume {@link FsVolumeImpl} to which this BlockPool belongs to
 * @param bpDir directory corresponding to the BlockPool
 * @param conf
 * @throws IOException
 */
BlockPoolSlice(String bpid, FsVolumeImpl volume, File bpDir,
    Configuration conf) throws IOException {
  this.bpid = bpid;
  this.volume = volume;
  this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT); 
  final File finalizedDir = new File(
      currentDir, DataStorage.STORAGE_DIR_FINALIZED);

  // Files that were being written when the datanode was last shutdown
  // are now moved back to the data directory. It is possible that
  // in the future, we might want to do some sort of datanode-local
  // recovery for these blocks. For example, crc validation.
  //
  this.tmpDir = new File(bpDir, DataStorage.STORAGE_DIR_TMP);
  if (tmpDir.exists()) {
    FileUtil.fullyDelete(tmpDir);
  }
  this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW);
  final boolean supportAppends = conf.getBoolean(
      DFSConfigKeys.DFS_SUPPORT_APPEND_KEY,
      DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT);
  if (rbwDir.exists() && !supportAppends) {
    FileUtil.fullyDelete(rbwDir);
  }
  final int maxBlocksPerDir = conf.getInt(
      DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_KEY,
      DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_DEFAULT);
  this.finalizedDir = new LDir(finalizedDir, maxBlocksPerDir);
  if (!rbwDir.mkdirs()) {  // create rbw directory if not exist
    if (!rbwDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + rbwDir.toString());
    }
  }
  if (!tmpDir.mkdirs()) {
    if (!tmpDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + tmpDir.toString());
    }
  }
  this.dfsUsage = new DU(bpDir, conf);
  this.dfsUsage.start();
}
项目:hadoop-on-lustre2    文件:BlockPoolSlice.java   
/**
 * Create a blook pool slice 
 * @param bpid Block pool Id
 * @param volume {@link FsVolumeImpl} to which this BlockPool belongs to
 * @param bpDir directory corresponding to the BlockPool
 * @param conf
 * @throws IOException
 */
BlockPoolSlice(String bpid, FsVolumeImpl volume, File bpDir,
    Configuration conf) throws IOException {
  this.bpid = bpid;
  this.volume = volume;
  this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT); 
  final File finalizedDir = new File(
      currentDir, DataStorage.STORAGE_DIR_FINALIZED);

  // Files that were being written when the datanode was last shutdown
  // are now moved back to the data directory. It is possible that
  // in the future, we might want to do some sort of datanode-local
  // recovery for these blocks. For example, crc validation.
  //
  this.tmpDir = new File(bpDir, DataStorage.STORAGE_DIR_TMP);
  if (tmpDir.exists()) {
    FileUtil.fullyDelete(tmpDir);
  }
  this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW);
  final boolean supportAppends = conf.getBoolean(
      DFSConfigKeys.DFS_SUPPORT_APPEND_KEY,
      DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT);
  if (rbwDir.exists() && !supportAppends) {
    FileUtil.fullyDelete(rbwDir);
  }
  final int maxBlocksPerDir = conf.getInt(
      DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_KEY,
      DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_DEFAULT);
  this.finalizedDir = new LDir(finalizedDir, maxBlocksPerDir);
  if (!rbwDir.mkdirs()) {  // create rbw directory if not exist
    if (!rbwDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + rbwDir.toString());
    }
  }
  if (!tmpDir.mkdirs()) {
    if (!tmpDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + tmpDir.toString());
    }
  }
  // Use cached value initially if available. Or the following call will
  // block until the initial du command completes.
  this.dfsUsage = new DU(bpDir, conf, loadDfsUsed());
  this.dfsUsage.start();

  // Make the dfs usage to be saved during shutdown.
  ShutdownHookManager.get().addShutdownHook(
    new Runnable() {
      @Override
      public void run() {
        if (!dfsUsedSaved) {
          saveDfsUsed();
        }
      }
    }, SHUTDOWN_HOOK_PRIORITY);
}
项目:hortonworks-extension    文件:FSDataset.java   
FSVolume(File currentDir, Configuration conf) throws IOException {
  this.reserved = conf.getLong("dfs.datanode.du.reserved", 0);
  this.dataDir = new FSDir(currentDir);
  this.currentDir = currentDir;
  boolean supportAppends = conf.getBoolean("dfs.support.append", false);
  File parent = currentDir.getParentFile();

  this.detachDir = new File(parent, "detach");
  if (detachDir.exists()) {
    recoverDetachedBlocks(currentDir, detachDir);
  }

  // remove all blocks from "tmp" directory. These were either created
  // by pre-append clients (0.18.x) or are part of replication request.
  // They can be safely removed.
  this.tmpDir = new File(parent, "tmp");
  if (tmpDir.exists()) {
    FileUtil.fullyDelete(tmpDir);
  }

  // Files that were being written when the datanode was last shutdown
  // should not be deleted.
  blocksBeingWritten = new File(parent, "blocksBeingWritten");
  if (blocksBeingWritten.exists()) {
    if (supportAppends) {  
      recoverBlocksBeingWritten(blocksBeingWritten);
    } else {
      FileUtil.fullyDelete(blocksBeingWritten);
    }
  }

  if (!blocksBeingWritten.mkdirs()) {
    if (!blocksBeingWritten.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + blocksBeingWritten.toString());
    }
  }
  if (!tmpDir.mkdirs()) {
    if (!tmpDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + tmpDir.toString());
    }
  }
  if (!detachDir.mkdirs()) {
    if (!detachDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + detachDir.toString());
    }
  }
  this.usage = new DF(parent, conf);
  this.dfsUsage = new DU(parent, conf);
  this.dfsUsage.start();
}
项目:hortonworks-extension    文件:FSDataset.java   
FSVolume(File currentDir, Configuration conf) throws IOException {
  this.reserved = conf.getLong("dfs.datanode.du.reserved", 0);
  this.dataDir = new FSDir(currentDir);
  this.currentDir = currentDir;
  boolean supportAppends = conf.getBoolean("dfs.support.append", false);
  File parent = currentDir.getParentFile();

  this.detachDir = new File(parent, "detach");
  if (detachDir.exists()) {
    recoverDetachedBlocks(currentDir, detachDir);
  }

  // remove all blocks from "tmp" directory. These were either created
  // by pre-append clients (0.18.x) or are part of replication request.
  // They can be safely removed.
  this.tmpDir = new File(parent, "tmp");
  if (tmpDir.exists()) {
    FileUtil.fullyDelete(tmpDir);
  }

  // Files that were being written when the datanode was last shutdown
  // should not be deleted.
  blocksBeingWritten = new File(parent, "blocksBeingWritten");
  if (blocksBeingWritten.exists()) {
    if (supportAppends) {  
      recoverBlocksBeingWritten(blocksBeingWritten);
    } else {
      FileUtil.fullyDelete(blocksBeingWritten);
    }
  }

  if (!blocksBeingWritten.mkdirs()) {
    if (!blocksBeingWritten.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + blocksBeingWritten.toString());
    }
  }
  if (!tmpDir.mkdirs()) {
    if (!tmpDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + tmpDir.toString());
    }
  }
  if (!detachDir.mkdirs()) {
    if (!detachDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + detachDir.toString());
    }
  }
  this.usage = new DF(parent, conf);
  this.dfsUsage = new DU(parent, conf);
  this.dfsUsage.start();
}