Java 类org.apache.hadoop.fs.HardLink 实例源码

项目:hadoop    文件:ReplicaInfo.java   
/**
 * Remove a hard link by copying the block to a temporary place and 
 * then moving it back
 * @param numLinks number of hard links
 * @return true if copy is successful; 
 *         false if it is already detached or no need to be detached
 * @throws IOException if there is any copy error
 */
public boolean unlinkBlock(int numLinks) throws IOException {
  if (isUnlinked()) {
    return false;
  }
  File file = getBlockFile();
  if (file == null || getVolume() == null) {
    throw new IOException("detachBlock:Block not found. " + this);
  }
  File meta = getMetaFile();

  if (HardLink.getLinkCount(file) > numLinks) {
    DataNode.LOG.info("CopyOnWrite for block " + this);
    unlinkFile(file, this);
  }
  if (HardLink.getLinkCount(meta) > numLinks) {
    unlinkFile(meta, this);
  }
  setUnlinked();
  return true;
}
项目:aliyun-oss-hadoop-fs    文件:ReplicaInfo.java   
/**
 * This function "breaks hardlinks" to the current replica file.
 *
 * When doing a DataNode upgrade, we create a bunch of hardlinks to each block
 * file.  This cleverly ensures that both the old and the new storage
 * directories can contain the same block file, without using additional space
 * for the data.
 *
 * However, when we want to append to the replica file, we need to "break" the
 * hardlink to ensure that the old snapshot continues to contain the old data
 * length.  If we failed to do that, we could roll back to the previous/
 * directory during a downgrade, and find that the block contents were longer
 * than they were at the time of upgrade.
 *
 * @return true only if data was copied.
 * @throws IOException
 */
public boolean breakHardLinksIfNeeded() throws IOException {
  File file = getBlockFile();
  if (file == null || getVolume() == null) {
    throw new IOException("detachBlock:Block not found. " + this);
  }
  File meta = getMetaFile();

  int linkCount = HardLink.getLinkCount(file);
  if (linkCount > 1) {
    DataNode.LOG.info("Breaking hardlink for " + linkCount + "x-linked " +
        "block " + this);
    breakHardlinks(file, this);
  }
  if (HardLink.getLinkCount(meta) > 1) {
    breakHardlinks(meta, this);
  }
  return true;
}
项目:big-c    文件:ReplicaInfo.java   
/**
 * Remove a hard link by copying the block to a temporary place and 
 * then moving it back
 * @param numLinks number of hard links
 * @return true if copy is successful; 
 *         false if it is already detached or no need to be detached
 * @throws IOException if there is any copy error
 */
public boolean unlinkBlock(int numLinks) throws IOException {
  if (isUnlinked()) {
    return false;
  }
  File file = getBlockFile();
  if (file == null || getVolume() == null) {
    throw new IOException("detachBlock:Block not found. " + this);
  }
  File meta = getMetaFile();

  if (HardLink.getLinkCount(file) > numLinks) {
    DataNode.LOG.info("CopyOnWrite for block " + this);
    unlinkFile(file, this);
  }
  if (HardLink.getLinkCount(meta) > numLinks) {
    unlinkFile(meta, this);
  }
  setUnlinked();
  return true;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:ReplicaInfo.java   
/**
 * Remove a hard link by copying the block to a temporary place and 
 * then moving it back
 * @param numLinks number of hard links
 * @return true if copy is successful; 
 *         false if it is already detached or no need to be detached
 * @throws IOException if there is any copy error
 */
public boolean unlinkBlock(int numLinks) throws IOException {
  if (isUnlinked()) {
    return false;
  }
  File file = getBlockFile();
  if (file == null || getVolume() == null) {
    throw new IOException("detachBlock:Block not found. " + this);
  }
  File meta = getMetaFile();

  if (HardLink.getLinkCount(file) > numLinks) {
    DataNode.LOG.info("CopyOnWrite for block " + this);
    unlinkFile(file, this);
  }
  if (HardLink.getLinkCount(meta) > numLinks) {
    unlinkFile(meta, this);
  }
  setUnlinked();
  return true;
}
项目:hadoop-EAR    文件:FastCopySetupUtil.java   
private void verifyHardLinks(DatanodeInfo srcInfo, DatanodeInfo dstInfo,
    int srcNamespaceId, Block srcBlock, int dstNamespaceId, Block dstBlock,
    boolean hardlink) throws IOException {
  // Verify hard links.
  DataNode dnSrc = dnMap.get(srcInfo.getPort());
  File blockFileSrc = dnSrc.data.getBlockFile(srcNamespaceId, srcBlock);
  LOG.warn("Link count for : " + blockFileSrc + " is : "
      + HardLink.getLinkCount(blockFileSrc));
  if (hardlink) {
    assertTrue(HardLink.getLinkCount(blockFileSrc) > 1);
  } else {
    assertEquals(1, HardLink.getLinkCount(blockFileSrc));
  }

  DataNode dnDst = dnMap.get(dstInfo.getPort());
  File blockFileDst = dnDst.data.getBlockFile(dstNamespaceId, dstBlock);
  if (hardlink) {
    assertTrue(HardLink.getLinkCount(blockFileDst) > 1);
  } else {
    assertEquals(1, HardLink.getLinkCount(blockFileDst));
  }
}
项目:hadoop-plus    文件:ReplicaInfo.java   
/**
 * Remove a hard link by copying the block to a temporary place and 
 * then moving it back
 * @param numLinks number of hard links
 * @return true if copy is successful; 
 *         false if it is already detached or no need to be detached
 * @throws IOException if there is any copy error
 */
public boolean unlinkBlock(int numLinks) throws IOException {
  if (isUnlinked()) {
    return false;
  }
  File file = getBlockFile();
  if (file == null || getVolume() == null) {
    throw new IOException("detachBlock:Block not found. " + this);
  }
  File meta = getMetaFile();

  if (HardLink.getLinkCount(file) > numLinks) {
    DataNode.LOG.info("CopyOnWrite for block " + this);
    unlinkFile(file, this);
  }
  if (HardLink.getLinkCount(meta) > numLinks) {
    unlinkFile(meta, this);
  }
  setUnlinked();
  return true;
}
项目:FlexMap    文件:ReplicaInfo.java   
/**
 * Remove a hard link by copying the block to a temporary place and 
 * then moving it back
 * @param numLinks number of hard links
 * @return true if copy is successful; 
 *         false if it is already detached or no need to be detached
 * @throws IOException if there is any copy error
 */
public boolean unlinkBlock(int numLinks) throws IOException {
  if (isUnlinked()) {
    return false;
  }
  File file = getBlockFile();
  if (file == null || getVolume() == null) {
    throw new IOException("detachBlock:Block not found. " + this);
  }
  File meta = getMetaFile();

  if (HardLink.getLinkCount(file) > numLinks) {
    DataNode.LOG.info("CopyOnWrite for block " + this);
    unlinkFile(file, this);
  }
  if (HardLink.getLinkCount(meta) > numLinks) {
    unlinkFile(meta, this);
  }
  setUnlinked();
  return true;
}
项目:hops    文件:ReplicaInfo.java   
/**
 * Remove a hard link by copying the block to a temporary place and
 * then moving it back
 *
 * @param numLinks
 *     number of hard links
 * @return true if copy is successful;
 * false if it is already detached or no need to be detached
 * @throws IOException
 *     if there is any copy error
 */
public boolean unlinkBlock(int numLinks) throws IOException {
  if (isUnlinked()) {
    return false;
  }
  File file = getBlockFile();
  if (file == null || getVolume() == null) {
    throw new IOException("detachBlock:Block not found. " + this);
  }
  File meta = getMetaFile();

  if (HardLink.getLinkCount(file) > numLinks) {
    DataNode.LOG.info("CopyOnWrite for block " + this);
    unlinkFile(file, this);
  }
  if (HardLink.getLinkCount(meta) > numLinks) {
    unlinkFile(meta, this);
  }
  setUnlinked();
  return true;
}
项目:hadoop-TCP    文件:ReplicaInfo.java   
/**
 * Remove a hard link by copying the block to a temporary place and 
 * then moving it back
 * @param numLinks number of hard links
 * @return true if copy is successful; 
 *         false if it is already detached or no need to be detached
 * @throws IOException if there is any copy error
 */
public boolean unlinkBlock(int numLinks) throws IOException {
  if (isUnlinked()) {
    return false;
  }
  File file = getBlockFile();
  if (file == null || getVolume() == null) {
    throw new IOException("detachBlock:Block not found. " + this);
  }
  File meta = getMetaFile();

  if (HardLink.getLinkCount(file) > numLinks) {
    DataNode.LOG.info("CopyOnWrite for block " + this);
    unlinkFile(file, this);
  }
  if (HardLink.getLinkCount(meta) > numLinks) {
    unlinkFile(meta, this);
  }
  setUnlinked();
  return true;
}
项目:hadoop-on-lustre    文件:DatanodeBlockInfo.java   
/**
 * Returns true if this block was copied, otherwise returns false.
 */
boolean detachBlock(Block block, int numLinks) throws IOException {
  if (isDetached()) {
    return false;
  }
  if (file == null || volume == null) {
    throw new IOException("detachBlock:Block not found. " + block);
  }
  File meta = FSDataset.getMetaFile(file, block);
  if (meta == null) {
    throw new IOException("Meta file not found for block " + block);
  }

  if (HardLink.getLinkCount(file) > numLinks) {
    DataNode.LOG.info("CopyOnWrite for block " + block);
    detachFile(file, block);
  }
  if (HardLink.getLinkCount(meta) > numLinks) {
    detachFile(meta, block);
  }
  setDetached();
  return true;
}
项目:hardfs    文件:ReplicaInfo.java   
/**
 * Remove a hard link by copying the block to a temporary place and 
 * then moving it back
 * @param numLinks number of hard links
 * @return true if copy is successful; 
 *         false if it is already detached or no need to be detached
 * @throws IOException if there is any copy error
 */
public boolean unlinkBlock(int numLinks) throws IOException {
  if (isUnlinked()) {
    return false;
  }
  File file = getBlockFile();
  if (file == null || getVolume() == null) {
    throw new IOException("detachBlock:Block not found. " + this);
  }
  File meta = getMetaFile();

  if (HardLink.getLinkCount(file) > numLinks) {
    DataNode.LOG.info("CopyOnWrite for block " + this);
    unlinkFile(file, this);
  }
  if (HardLink.getLinkCount(meta) > numLinks) {
    unlinkFile(meta, this);
  }
  setUnlinked();
  return true;
}
项目:hadoop-on-lustre2    文件:ReplicaInfo.java   
/**
 * Remove a hard link by copying the block to a temporary place and 
 * then moving it back
 * @param numLinks number of hard links
 * @return true if copy is successful; 
 *         false if it is already detached or no need to be detached
 * @throws IOException if there is any copy error
 */
public boolean unlinkBlock(int numLinks) throws IOException {
  if (isUnlinked()) {
    return false;
  }
  File file = getBlockFile();
  if (file == null || getVolume() == null) {
    throw new IOException("detachBlock:Block not found. " + this);
  }
  File meta = getMetaFile();

  if (HardLink.getLinkCount(file) > numLinks) {
    DataNode.LOG.info("CopyOnWrite for block " + this);
    unlinkFile(file, this);
  }
  if (HardLink.getLinkCount(meta) > numLinks) {
    unlinkFile(meta, this);
  }
  setUnlinked();
  return true;
}
项目:RDFS    文件:FastCopySetupUtil.java   
private void verifyHardLinks(DatanodeInfo srcInfo, DatanodeInfo dstInfo,
    int srcNamespaceId, Block srcBlock, int dstNamespaceId, Block dstBlock,
    boolean hardlink) throws IOException {
  // Verify hard links.
  DataNode dnSrc = dnMap.get(srcInfo.getPort());
  File blockFileSrc = dnSrc.data.getBlockFile(srcNamespaceId, srcBlock);
  LOG.warn("Link count for : " + blockFileSrc + " is : "
      + HardLink.getLinkCount(blockFileSrc));
  if (hardlink) {
    assertTrue(HardLink.getLinkCount(blockFileSrc) > 1);
  } else {
    assertEquals(1, HardLink.getLinkCount(blockFileSrc));
  }

  DataNode dnDst = dnMap.get(dstInfo.getPort());
  File blockFileDst = dnDst.data.getBlockFile(dstNamespaceId, dstBlock);
  if (hardlink) {
    assertTrue(HardLink.getLinkCount(blockFileDst) > 1);
  } else {
    assertEquals(1, HardLink.getLinkCount(blockFileDst));
  }
}
项目:RDFS    文件:DatanodeBlockInfo.java   
/**
 * Returns true if this block was copied, otherwise returns false.
 */
boolean detachBlock(int namespaceId, Block block, int numLinks) throws IOException {
  if (isDetached()) {
    return false;
  }
  if (file == null || volume == null) {
    throw new IOException("detachBlock:Block not found. " + block);
  }
  File meta = FSDataset.getMetaFile(file, block);
  if (meta == null) {
    throw new IOException("Meta file not found for block " + block);
  }

  if (HardLink.getLinkCount(file) > numLinks) {
    DataNode.LOG.info("CopyOnWrite for block " + block);
    detachFile(namespaceId, file, block);
  }
  if (HardLink.getLinkCount(meta) > numLinks) {
    detachFile(namespaceId, meta, block);
  }
  setDetached();
  return true;
}
项目:hortonworks-extension    文件:DatanodeBlockInfo.java   
/**
 * Returns true if this block was copied, otherwise returns false.
 */
boolean detachBlock(Block block, int numLinks) throws IOException {
  if (isDetached()) {
    return false;
  }
  if (file == null || volume == null) {
    throw new IOException("detachBlock:Block not found. " + block);
  }
  File meta = FSDataset.getMetaFile(file, block);
  if (meta == null) {
    throw new IOException("Meta file not found for block " + block);
  }

  if (HardLink.getLinkCount(file) > numLinks) {
    DataNode.LOG.info("CopyOnWrite for block " + block);
    detachFile(file, block);
  }
  if (HardLink.getLinkCount(meta) > numLinks) {
    detachFile(meta, block);
  }
  setDetached();
  return true;
}
项目:hortonworks-extension    文件:DatanodeBlockInfo.java   
/**
 * Returns true if this block was copied, otherwise returns false.
 */
boolean detachBlock(Block block, int numLinks) throws IOException {
  if (isDetached()) {
    return false;
  }
  if (file == null || volume == null) {
    throw new IOException("detachBlock:Block not found. " + block);
  }
  File meta = FSDataset.getMetaFile(file, block);
  if (meta == null) {
    throw new IOException("Meta file not found for block " + block);
  }

  if (HardLink.getLinkCount(file) > numLinks) {
    DataNode.LOG.info("CopyOnWrite for block " + block);
    detachFile(file, block);
  }
  if (HardLink.getLinkCount(meta) > numLinks) {
    detachFile(meta, block);
  }
  setDetached();
  return true;
}
项目:big_data    文件:NativeIOaa.java   
public static void link(File src, File dst) throws IOException {
    if (!nativeLoaded) {
        HardLink.createHardLink(src, dst);
    } else {
        link0(src.getAbsolutePath(), dst.getAbsolutePath());
    }
}
项目:hadoop    文件:BlockPoolSliceStorage.java   
/**
 * Hardlink all finalized and RBW blocks in fromDir to toDir
 * 
 * @param fromDir directory where the snapshot is stored
 * @param toDir the current data directory
 * @throws IOException if error occurs during hardlink
 */
private void linkAllBlocks(DataNode datanode, File fromDir, File toDir)
    throws IOException {
  // do the link
  int diskLayoutVersion = this.getLayoutVersion();
  // hardlink finalized blocks in tmpDir
  HardLink hardLink = new HardLink();
  DataStorage.linkBlocks(datanode, new File(fromDir, DataStorage.STORAGE_DIR_FINALIZED),
    new File(toDir,DataStorage.STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink);
  DataStorage.linkBlocks(datanode, new File(fromDir, DataStorage.STORAGE_DIR_RBW),
      new File(toDir, DataStorage.STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
  LOG.info( hardLink.linkStats.report() );
}
项目:hadoop    文件:DataStorage.java   
/**
 * Hardlink all finalized and RBW blocks in fromDir to toDir
 *
 * @param fromDir      The directory where the 'from' snapshot is stored
 * @param fromBbwDir   In HDFS 1.x, the directory where blocks
 *                     that are under construction are stored.
 * @param toDir        The current data directory
 *
 * @throws IOException If error occurs during hardlink
 */
private void linkAllBlocks(DataNode datanode, File fromDir, File fromBbwDir,
    File toDir) throws IOException {
  HardLink hardLink = new HardLink();
  // do the link
  int diskLayoutVersion = this.getLayoutVersion();
  if (DataNodeLayoutVersion.supports(
      LayoutVersion.Feature.APPEND_RBW_DIR, diskLayoutVersion)) {
    // hardlink finalized blocks in tmpDir/finalized
    linkBlocks(datanode, new File(fromDir, STORAGE_DIR_FINALIZED),
        new File(toDir, STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink);
    // hardlink rbw blocks in tmpDir/rbw
    linkBlocks(datanode, new File(fromDir, STORAGE_DIR_RBW),
        new File(toDir, STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
  } else { // pre-RBW version
    // hardlink finalized blocks in tmpDir
    linkBlocks(datanode, fromDir, new File(toDir, STORAGE_DIR_FINALIZED),
        diskLayoutVersion, hardLink);      
    if (fromBbwDir.exists()) {
      /*
       * We need to put the 'blocksBeingWritten' from HDFS 1.x into the rbw
       * directory.  It's a little messy, because the blocksBeingWriten was
       * NOT underneath the 'current' directory in those releases.  See
       * HDFS-3731 for details.
       */
      linkBlocks(datanode, fromBbwDir,
          new File(toDir, STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
    }
  } 
  LOG.info( hardLink.linkStats.report() );
}
项目:hadoop    文件:NativeIO.java   
public static void link(File src, File dst) throws IOException {
  if (!nativeLoaded) {
    HardLink.createHardLink(src, dst);
  } else {
    link0(src.getAbsolutePath(), dst.getAbsolutePath());
  }
}
项目:aliyun-oss-hadoop-fs    文件:BlockPoolSliceStorage.java   
/**
 * Hardlink all finalized and RBW blocks in fromDir to toDir
 * 
 * @param fromDir directory where the snapshot is stored
 * @param toDir the current data directory
 * @throws IOException if error occurs during hardlink
 */
private void linkAllBlocks(DataNode datanode, File fromDir, File toDir)
    throws IOException {
  // do the link
  int diskLayoutVersion = this.getLayoutVersion();
  // hardlink finalized blocks in tmpDir
  HardLink hardLink = new HardLink();
  DataStorage.linkBlocks(datanode, new File(fromDir, DataStorage.STORAGE_DIR_FINALIZED),
    new File(toDir,DataStorage.STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink);
  DataStorage.linkBlocks(datanode, new File(fromDir, DataStorage.STORAGE_DIR_RBW),
      new File(toDir, DataStorage.STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
  LOG.info( hardLink.linkStats.report() );
}
项目:aliyun-oss-hadoop-fs    文件:DataStorage.java   
/**
 * Hardlink all finalized and RBW blocks in fromDir to toDir
 *
 * @param fromDir      The directory where the 'from' snapshot is stored
 * @param fromBbwDir   In HDFS 1.x, the directory where blocks
 *                     that are under construction are stored.
 * @param toDir        The current data directory
 *
 * @throws IOException If error occurs during hardlink
 */
private void linkAllBlocks(DataNode datanode, File fromDir, File fromBbwDir,
    File toDir) throws IOException {
  HardLink hardLink = new HardLink();
  // do the link
  int diskLayoutVersion = this.getLayoutVersion();
  if (DataNodeLayoutVersion.supports(
      LayoutVersion.Feature.APPEND_RBW_DIR, diskLayoutVersion)) {
    // hardlink finalized blocks in tmpDir/finalized
    linkBlocks(datanode, new File(fromDir, STORAGE_DIR_FINALIZED),
        new File(toDir, STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink);
    // hardlink rbw blocks in tmpDir/rbw
    linkBlocks(datanode, new File(fromDir, STORAGE_DIR_RBW),
        new File(toDir, STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
  } else { // pre-RBW version
    // hardlink finalized blocks in tmpDir
    linkBlocks(datanode, fromDir, new File(toDir, STORAGE_DIR_FINALIZED),
        diskLayoutVersion, hardLink);      
    if (fromBbwDir.exists()) {
      /*
       * We need to put the 'blocksBeingWritten' from HDFS 1.x into the rbw
       * directory.  It's a little messy, because the blocksBeingWriten was
       * NOT underneath the 'current' directory in those releases.  See
       * HDFS-3731 for details.
       */
      linkBlocks(datanode, fromBbwDir,
          new File(toDir, STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
    }
  } 
  LOG.info( hardLink.linkStats.report() );
}
项目:big-c    文件:BlockPoolSliceStorage.java   
/**
 * Hardlink all finalized and RBW blocks in fromDir to toDir
 * 
 * @param fromDir directory where the snapshot is stored
 * @param toDir the current data directory
 * @throws IOException if error occurs during hardlink
 */
private void linkAllBlocks(DataNode datanode, File fromDir, File toDir)
    throws IOException {
  // do the link
  int diskLayoutVersion = this.getLayoutVersion();
  // hardlink finalized blocks in tmpDir
  HardLink hardLink = new HardLink();
  DataStorage.linkBlocks(datanode, new File(fromDir, DataStorage.STORAGE_DIR_FINALIZED),
    new File(toDir,DataStorage.STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink);
  DataStorage.linkBlocks(datanode, new File(fromDir, DataStorage.STORAGE_DIR_RBW),
      new File(toDir, DataStorage.STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
  LOG.info( hardLink.linkStats.report() );
}
项目:big-c    文件:DataStorage.java   
/**
 * Hardlink all finalized and RBW blocks in fromDir to toDir
 *
 * @param fromDir      The directory where the 'from' snapshot is stored
 * @param fromBbwDir   In HDFS 1.x, the directory where blocks
 *                     that are under construction are stored.
 * @param toDir        The current data directory
 *
 * @throws IOException If error occurs during hardlink
 */
private void linkAllBlocks(DataNode datanode, File fromDir, File fromBbwDir,
    File toDir) throws IOException {
  HardLink hardLink = new HardLink();
  // do the link
  int diskLayoutVersion = this.getLayoutVersion();
  if (DataNodeLayoutVersion.supports(
      LayoutVersion.Feature.APPEND_RBW_DIR, diskLayoutVersion)) {
    // hardlink finalized blocks in tmpDir/finalized
    linkBlocks(datanode, new File(fromDir, STORAGE_DIR_FINALIZED),
        new File(toDir, STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink);
    // hardlink rbw blocks in tmpDir/rbw
    linkBlocks(datanode, new File(fromDir, STORAGE_DIR_RBW),
        new File(toDir, STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
  } else { // pre-RBW version
    // hardlink finalized blocks in tmpDir
    linkBlocks(datanode, fromDir, new File(toDir, STORAGE_DIR_FINALIZED),
        diskLayoutVersion, hardLink);      
    if (fromBbwDir.exists()) {
      /*
       * We need to put the 'blocksBeingWritten' from HDFS 1.x into the rbw
       * directory.  It's a little messy, because the blocksBeingWriten was
       * NOT underneath the 'current' directory in those releases.  See
       * HDFS-3731 for details.
       */
      linkBlocks(datanode, fromBbwDir,
          new File(toDir, STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
    }
  } 
  LOG.info( hardLink.linkStats.report() );
}
项目:big-c    文件:NativeIO.java   
public static void link(File src, File dst) throws IOException {
  if (!nativeLoaded) {
    HardLink.createHardLink(src, dst);
  } else {
    link0(src.getAbsolutePath(), dst.getAbsolutePath());
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:BlockPoolSliceStorage.java   
/**
 * Hardlink all finalized and RBW blocks in fromDir to toDir
 * 
 * @param fromDir directory where the snapshot is stored
 * @param toDir the current data directory
 * @throws IOException if error occurs during hardlink
 */
private void linkAllBlocks(DataNode datanode, File fromDir, File toDir)
    throws IOException {
  // do the link
  int diskLayoutVersion = this.getLayoutVersion();
  // hardlink finalized blocks in tmpDir
  HardLink hardLink = new HardLink();
  DataStorage.linkBlocks(datanode, new File(fromDir, DataStorage.STORAGE_DIR_FINALIZED),
    new File(toDir,DataStorage.STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink);
  DataStorage.linkBlocks(datanode, new File(fromDir, DataStorage.STORAGE_DIR_RBW),
      new File(toDir, DataStorage.STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
  LOG.info( hardLink.linkStats.report() );
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DataStorage.java   
/**
 * Hardlink all finalized and RBW blocks in fromDir to toDir
 *
 * @param fromDir      The directory where the 'from' snapshot is stored
 * @param fromBbwDir   In HDFS 1.x, the directory where blocks
 *                     that are under construction are stored.
 * @param toDir        The current data directory
 *
 * @throws IOException If error occurs during hardlink
 */
private void linkAllBlocks(DataNode datanode, File fromDir, File fromBbwDir,
    File toDir) throws IOException {
  HardLink hardLink = new HardLink();
  // do the link
  int diskLayoutVersion = this.getLayoutVersion();
  if (DataNodeLayoutVersion.supports(
      LayoutVersion.Feature.APPEND_RBW_DIR, diskLayoutVersion)) {
    // hardlink finalized blocks in tmpDir/finalized
    linkBlocks(datanode, new File(fromDir, STORAGE_DIR_FINALIZED),
        new File(toDir, STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink);
    // hardlink rbw blocks in tmpDir/rbw
    linkBlocks(datanode, new File(fromDir, STORAGE_DIR_RBW),
        new File(toDir, STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
  } else { // pre-RBW version
    // hardlink finalized blocks in tmpDir
    linkBlocks(datanode, fromDir, new File(toDir, STORAGE_DIR_FINALIZED),
        diskLayoutVersion, hardLink);      
    if (fromBbwDir.exists()) {
      /*
       * We need to put the 'blocksBeingWritten' from HDFS 1.x into the rbw
       * directory.  It's a little messy, because the blocksBeingWriten was
       * NOT underneath the 'current' directory in those releases.  See
       * HDFS-3731 for details.
       */
      linkBlocks(datanode, fromBbwDir,
          new File(toDir, STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
    }
  } 
  LOG.info( hardLink.linkStats.report() );
}
项目:hadoop-2.6.0-cdh5.4.3    文件:NativeIO.java   
public static void link(File src, File dst) throws IOException {
  if (!nativeLoaded) {
    HardLink.createHardLink(src, dst);
  } else {
    link0(src.getAbsolutePath(), dst.getAbsolutePath());
  }
}
项目:hadoop-EAR    文件:LocalStripeStore.java   
private void persistent(LocalStripeInfo si, File tmpFile) throws IOException {
  blockToStripeStore.put(si.getKey(), si);
  File f = new File(storeDir, si.getKey());
  if (f.exists()) {
    f.delete();
  }
  HardLink.createHardLink(tmpFile, f);
}
项目:hadoop-EAR    文件:NativeIO.java   
/**
 * Wrapper around native link()
 */
public static void link(File src, File dst) throws IOException {
  if (src == null || dst == null) {
    throw new IllegalArgumentException("Null parameter passed");
  }
  if (isAvailable()) {
    link(src.getAbsolutePath(), dst.getAbsolutePath());
  } else {
    HardLink.createHardLink(src, dst);
  }
}
项目:hadoop-EAR    文件:DatanodeBlockInfo.java   
/**
 * Returns true if this block was copied, otherwise returns false.
 */
boolean detachBlock(int namespaceId, Block block, int numLinks) throws IOException {
  if (isDetached()) {
    return false;
  }
  if (blockDataFile.getFile() == null || blockDataFile.volume == null) {
    throw new IOException("detachBlock:Block not found. " + block);
  }

  File meta = null;
  if (!inlineChecksum) {
    meta = BlockWithChecksumFileWriter.getMetaFile(blockDataFile.getFile(), block);
    if (meta == null) {
      throw new IOException("Meta file not found for block " + block);
    }
  }

  if (HardLink.getLinkCount(blockDataFile.getFile()) > numLinks) {
    DataNode.LOG.info("CopyOnWrite for block " + block);
    detachFile(namespaceId, blockDataFile.getFile(), block);
  }
  if (!inlineChecksum) {
    if (HardLink.getLinkCount(meta) > numLinks) {
      detachFile(namespaceId, meta, block);
    }
  }
  setDetached();
  return true;
}
项目:hadoop-plus    文件:BlockPoolSliceStorage.java   
/**
 * Hardlink all finalized and RBW blocks in fromDir to toDir
 * 
 * @param fromDir directory where the snapshot is stored
 * @param toDir the current data directory
 * @throws IOException if error occurs during hardlink
 */
private void linkAllBlocks(File fromDir, File toDir) throws IOException {
  // do the link
  int diskLayoutVersion = this.getLayoutVersion();
  // hardlink finalized blocks in tmpDir
  HardLink hardLink = new HardLink();
  DataStorage.linkBlocks(new File(fromDir, DataStorage.STORAGE_DIR_FINALIZED), 
    new File(toDir,DataStorage.STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink);
  DataStorage.linkBlocks(new File(fromDir, DataStorage.STORAGE_DIR_RBW), 
      new File(toDir, DataStorage.STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
  LOG.info( hardLink.linkStats.report() );
}
项目:hadoop-plus    文件:DataStorage.java   
/**
 * Hardlink all finalized and RBW blocks in fromDir to toDir
 *
 * @param fromDir      The directory where the 'from' snapshot is stored
 * @param fromBbwDir   In HDFS 1.x, the directory where blocks
 *                     that are under construction are stored.
 * @param toDir        The current data directory
 *
 * @throws IOException If error occurs during hardlink
 */
private void linkAllBlocks(File fromDir, File fromBbwDir, File toDir)
    throws IOException {
  HardLink hardLink = new HardLink();
  // do the link
  int diskLayoutVersion = this.getLayoutVersion();
  if (LayoutVersion.supports(Feature.APPEND_RBW_DIR, diskLayoutVersion)) {
    // hardlink finalized blocks in tmpDir/finalized
    linkBlocks(new File(fromDir, STORAGE_DIR_FINALIZED), 
        new File(toDir, STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink);
    // hardlink rbw blocks in tmpDir/rbw
    linkBlocks(new File(fromDir, STORAGE_DIR_RBW), 
        new File(toDir, STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
  } else { // pre-RBW version
    // hardlink finalized blocks in tmpDir
    linkBlocks(fromDir, new File(toDir, STORAGE_DIR_FINALIZED), 
        diskLayoutVersion, hardLink);      
    if (fromBbwDir.exists()) {
      /*
       * We need to put the 'blocksBeingWritten' from HDFS 1.x into the rbw
       * directory.  It's a little messy, because the blocksBeingWriten was
       * NOT underneath the 'current' directory in those releases.  See
       * HDFS-3731 for details.
       */
      linkBlocks(fromBbwDir,
          new File(toDir, STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
    }
  } 
  LOG.info( hardLink.linkStats.report() );
}
项目:StuJava    文件:NativeIO.java   
public static void link(File src, File dst) throws IOException {
  if (!nativeLoaded) {
    HardLink.createHardLink(src, dst);
  } else {
    link0(src.getAbsolutePath(), dst.getAbsolutePath());
  }
}
项目:FlexMap    文件:BlockPoolSliceStorage.java   
/**
 * Hardlink all finalized and RBW blocks in fromDir to toDir
 * 
 * @param fromDir directory where the snapshot is stored
 * @param toDir the current data directory
 * @throws IOException if error occurs during hardlink
 */
private void linkAllBlocks(DataNode datanode, File fromDir, File toDir)
    throws IOException {
  // do the link
  int diskLayoutVersion = this.getLayoutVersion();
  // hardlink finalized blocks in tmpDir
  HardLink hardLink = new HardLink();
  DataStorage.linkBlocks(datanode, new File(fromDir, DataStorage.STORAGE_DIR_FINALIZED),
    new File(toDir,DataStorage.STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink);
  DataStorage.linkBlocks(datanode, new File(fromDir, DataStorage.STORAGE_DIR_RBW),
      new File(toDir, DataStorage.STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
  LOG.info( hardLink.linkStats.report() );
}
项目:FlexMap    文件:DataStorage.java   
/**
 * Hardlink all finalized and RBW blocks in fromDir to toDir
 *
 * @param fromDir      The directory where the 'from' snapshot is stored
 * @param fromBbwDir   In HDFS 1.x, the directory where blocks
 *                     that are under construction are stored.
 * @param toDir        The current data directory
 *
 * @throws IOException If error occurs during hardlink
 */
private void linkAllBlocks(DataNode datanode, File fromDir, File fromBbwDir,
    File toDir) throws IOException {
  HardLink hardLink = new HardLink();
  // do the link
  int diskLayoutVersion = this.getLayoutVersion();
  if (DataNodeLayoutVersion.supports(
      LayoutVersion.Feature.APPEND_RBW_DIR, diskLayoutVersion)) {
    // hardlink finalized blocks in tmpDir/finalized
    linkBlocks(datanode, new File(fromDir, STORAGE_DIR_FINALIZED),
        new File(toDir, STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink);
    // hardlink rbw blocks in tmpDir/rbw
    linkBlocks(datanode, new File(fromDir, STORAGE_DIR_RBW),
        new File(toDir, STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
  } else { // pre-RBW version
    // hardlink finalized blocks in tmpDir
    linkBlocks(datanode, fromDir, new File(toDir, STORAGE_DIR_FINALIZED),
        diskLayoutVersion, hardLink);      
    if (fromBbwDir.exists()) {
      /*
       * We need to put the 'blocksBeingWritten' from HDFS 1.x into the rbw
       * directory.  It's a little messy, because the blocksBeingWriten was
       * NOT underneath the 'current' directory in those releases.  See
       * HDFS-3731 for details.
       */
      linkBlocks(datanode, fromBbwDir,
          new File(toDir, STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
    }
  } 
  LOG.info( hardLink.linkStats.report() );
}
项目:FlexMap    文件:DataStorage.java   
static void linkBlocks(DataNode datanode, File from, File to, int oldLV,
    HardLink hl) throws IOException {
  boolean upgradeToIdBasedLayout = false;
  // If we are upgrading from a version older than the one where we introduced
  // block ID-based layout AND we're working with the finalized directory,
  // we'll need to upgrade from the old flat layout to the block ID-based one
  if (oldLV > DataNodeLayoutVersion.Feature.BLOCKID_BASED_LAYOUT.getInfo().
      getLayoutVersion() && to.getName().equals(STORAGE_DIR_FINALIZED)) {
    upgradeToIdBasedLayout = true;
  }

  final List<LinkArgs> idBasedLayoutSingleLinks = Lists.newArrayList();
  linkBlocksHelper(from, to, oldLV, hl, upgradeToIdBasedLayout, to,
      idBasedLayoutSingleLinks);
  int numLinkWorkers = datanode.getConf().getInt(
      DFSConfigKeys.DFS_DATANODE_BLOCK_ID_LAYOUT_UPGRADE_THREADS_KEY,
      DFSConfigKeys.DFS_DATANODE_BLOCK_ID_LAYOUT_UPGRADE_THREADS);
  ExecutorService linkWorkers = Executors.newFixedThreadPool(numLinkWorkers);
  final int step = idBasedLayoutSingleLinks.size() / numLinkWorkers + 1;
  List<Future<Void>> futures = Lists.newArrayList();
  for (int i = 0; i < idBasedLayoutSingleLinks.size(); i += step) {
    final int iCopy = i;
    futures.add(linkWorkers.submit(new Callable<Void>() {
      @Override
      public Void call() throws IOException {
        int upperBound = Math.min(iCopy + step,
            idBasedLayoutSingleLinks.size());
        for (int j = iCopy; j < upperBound; j++) {
          LinkArgs cur = idBasedLayoutSingleLinks.get(j);
          NativeIO.link(cur.src, cur.dst);
        }
        return null;
      }
    }));
  }
  linkWorkers.shutdown();
  for (Future<Void> f : futures) {
    Futures.get(f, IOException.class);
  }
}
项目:hops    文件:BlockPoolSliceStorage.java   
/**
 * Hardlink all finalized and RBW blocks in fromDir to toDir
 *
 * @param fromDir
 *     directory where the snapshot is stored
 * @param toDir
 *     the current data directory
 * @throws IOException
 *     if error occurs during hardlink
 */
private void linkAllBlocks(File fromDir, File toDir) throws IOException {
  // do the link
  int diskLayoutVersion = this.getLayoutVersion();
  // hardlink finalized blocks in tmpDir
  HardLink hardLink = new HardLink();
  DataStorage.linkBlocks(new File(fromDir, DataStorage.STORAGE_DIR_FINALIZED),
      new File(toDir, DataStorage.STORAGE_DIR_FINALIZED), diskLayoutVersion,
      hardLink);
  DataStorage.linkBlocks(new File(fromDir, DataStorage.STORAGE_DIR_RBW),
      new File(toDir, DataStorage.STORAGE_DIR_RBW), diskLayoutVersion,
      hardLink);
  LOG.info(hardLink.linkStats.report());
}
项目:hops    文件:DataStorage.java   
/**
 * Hardlink all finalized and RBW blocks in fromDir to toDir
 *
 * @param fromDir
 *     The directory where the 'from' snapshot is stored
 * @param fromBbwDir
 *     In HDFS 1.x, the directory where blocks
 *     that are under construction are stored.
 * @param toDir
 *     The current data directory
 * @throws IOException
 *     If error occurs during hardlink
 */
private void linkAllBlocks(File fromDir, File fromBbwDir, File toDir)
    throws IOException {
  HardLink hardLink = new HardLink();
  // do the link
  int diskLayoutVersion = this.getLayoutVersion();
  if (LayoutVersion.supports(Feature.APPEND_RBW_DIR, diskLayoutVersion)) {
    // hardlink finalized blocks in tmpDir/finalized
    linkBlocks(new File(fromDir, STORAGE_DIR_FINALIZED),
        new File(toDir, STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink);
    // hardlink rbw blocks in tmpDir/rbw
    linkBlocks(new File(fromDir, STORAGE_DIR_RBW),
        new File(toDir, STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
  } else { // pre-RBW version
    // hardlink finalized blocks in tmpDir
    linkBlocks(fromDir, new File(toDir, STORAGE_DIR_FINALIZED),
        diskLayoutVersion, hardLink);
    if (fromBbwDir.exists()) {
      /*
       * We need to put the 'blocksBeingWritten' from HDFS 1.x into the rbw
       * directory.  It's a little messy, because the blocksBeingWriten was
       * NOT underneath the 'current' directory in those releases.  See
       * HDFS-3731 for details.
       */
      linkBlocks(fromBbwDir, new File(toDir, STORAGE_DIR_RBW),
          diskLayoutVersion, hardLink);
    }
  }
  LOG.info(hardLink.linkStats.report());
}
项目:yuzhouwan    文件:NativeIO.java   
public static void link(File src, File dst) throws IOException {
    if (!nativeLoaded) {
        HardLink.createHardLink(src, dst);
    } else {
        link0(src.getAbsolutePath(), dst.getAbsolutePath());
    }
}