Java 类org.apache.hadoop.util.DataChecksum 实例源码

项目:hadoop-oss    文件:FileSystem.java   
/**
 * Return a set of server default configuration values
 * @return server default configuration values
 * @throws IOException
 * @deprecated use {@link #getServerDefaults(Path)} instead
 */
@Deprecated
public FsServerDefaults getServerDefaults() throws IOException {
  Configuration conf = getConf();
  // CRC32 is chosen as default as it is available in all 
  // releases that support checksum.
  // The client trash configuration is ignored.
  return new FsServerDefaults(getDefaultBlockSize(), 
      conf.getInt("io.bytes.per.checksum", 512), 
      64 * 1024, 
      getDefaultReplication(),
      conf.getInt(IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT),
      false,
      FS_TRASH_INTERVAL_DEFAULT,
      DataChecksum.Type.CRC32);
}
项目:hadoop-oss    文件:ChecksumFileSystem.java   
public ChecksumFSOutputSummer(ChecksumFileSystem fs, 
                      Path file, 
                      boolean overwrite,
                      int bufferSize,
                      short replication,
                      long blockSize,
                      Progressable progress,
                      FsPermission permission)
  throws IOException {
  super(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,
      fs.getBytesPerSum()));
  int bytesPerSum = fs.getBytesPerSum();
  this.datas = fs.getRawFileSystem().create(file, permission, overwrite,
                                     bufferSize, replication, blockSize,
                                     progress);
  int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize);
  this.sums = fs.getRawFileSystem().create(fs.getChecksumFile(file),
                                           permission, true, sumBufferSize,
                                           replication, blockSize, null);
  sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
  sums.writeInt(bytesPerSum);
}
项目:hadoop    文件:IFileInputStream.java   
/**
 * Create a checksum input stream that reads
 * @param in The input stream to be verified for checksum.
 * @param len The length of the input stream including checksum bytes.
 */
public IFileInputStream(InputStream in, long len, Configuration conf) {
  this.in = in;
  this.inFd = getFileDescriptorIfAvail(in);
  sum = DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 
      Integer.MAX_VALUE);
  checksumSize = sum.getChecksumSize();
  length = len;
  dataLength = length - checksumSize;

  conf = (conf != null) ? conf : new Configuration();
  readahead = conf.getBoolean(MRConfig.MAPRED_IFILE_READAHEAD,
      MRConfig.DEFAULT_MAPRED_IFILE_READAHEAD);
  readaheadLength = conf.getInt(MRConfig.MAPRED_IFILE_READAHEAD_BYTES,
      MRConfig.DEFAULT_MAPRED_IFILE_READAHEAD_BYTES);

  doReadahead();
}
项目:hadoop    文件:DFSOutputStream.java   
/** Construct a new output stream for append. */
private DFSOutputStream(DFSClient dfsClient, String src,
    EnumSet<CreateFlag> flags, Progressable progress, LocatedBlock lastBlock,
    HdfsFileStatus stat, DataChecksum checksum) throws IOException {
  this(dfsClient, src, progress, stat, checksum);
  initialFileSize = stat.getLen(); // length of file when opened
  this.shouldSyncBlock = flags.contains(CreateFlag.SYNC_BLOCK);

  boolean toNewBlock = flags.contains(CreateFlag.NEW_BLOCK);

  // The last partial block of the file has to be filled.
  if (!toNewBlock && lastBlock != null) {
    // indicate that we are appending to an existing block
    bytesCurBlock = lastBlock.getBlockSize();
    streamer = new DataStreamer(lastBlock, stat, bytesPerChecksum);
  } else {
    computePacketChunkSize(dfsClient.getConf().writePacketSize,
        bytesPerChecksum);
    streamer = new DataStreamer(stat,
        lastBlock != null ? lastBlock.getBlock() : null);
  }
  this.fileEncryptionInfo = stat.getFileEncryptionInfo();
}
项目:hadoop    文件:DFSOutputStream.java   
static DFSOutputStream newStreamForAppend(DFSClient dfsClient, String src,
    EnumSet<CreateFlag> flags, int bufferSize, Progressable progress,
    LocatedBlock lastBlock, HdfsFileStatus stat, DataChecksum checksum,
    String[] favoredNodes) throws IOException {
  TraceScope scope =
      dfsClient.getPathTraceScope("newStreamForAppend", src);
  try {
    final DFSOutputStream out = new DFSOutputStream(dfsClient, src, flags,
        progress, lastBlock, stat, checksum);
    if (favoredNodes != null && favoredNodes.length != 0) {
      out.streamer.setFavoredNodes(favoredNodes);
    }
    out.start();
    return out;
  } finally {
    scope.close();
  }
}
项目:hadoop    文件:RemoteBlockReader2.java   
protected RemoteBlockReader2(String file, String bpid, long blockId,
    DataChecksum checksum, boolean verifyChecksum,
    long startOffset, long firstChunkOffset, long bytesToRead, Peer peer,
    DatanodeID datanodeID, PeerCache peerCache) {
  this.isLocal = DFSClient.isLocalAddress(NetUtils.
      createSocketAddr(datanodeID.getXferAddr()));
  // Path is used only for printing block and file information in debug
  this.peer = peer;
  this.datanodeID = datanodeID;
  this.in = peer.getInputStreamChannel();
  this.checksum = checksum;
  this.verifyChecksum = verifyChecksum;
  this.startOffset = Math.max( startOffset, 0 );
  this.filename = file;
  this.peerCache = peerCache;
  this.blockId = blockId;

  // The total number of bytes that we need to transfer from the DN is
  // the amount that the user wants (bytesToRead), plus the padding at
  // the beginning in order to chunk-align. Note that the DN may elect
  // to send more than this amount if the read starts/ends mid-chunk.
  this.bytesNeededToFinish = bytesToRead + (startOffset - firstChunkOffset);
  bytesPerChecksum = this.checksum.getBytesPerChecksum();
  checksumSize = this.checksum.getChecksumSize();
}
项目:hadoop    文件:DFSClient.java   
/**
 * Same as {{@link #create(String, FsPermission, EnumSet, short, long,
 *  Progressable, int, ChecksumOpt)} except that the permission
 *  is absolute (ie has already been masked with umask.
 */
public DFSOutputStream primitiveCreate(String src, 
                           FsPermission absPermission,
                           EnumSet<CreateFlag> flag,
                           boolean createParent,
                           short replication,
                           long blockSize,
                           Progressable progress,
                           int buffersize,
                           ChecksumOpt checksumOpt)
    throws IOException, UnresolvedLinkException {
  checkOpen();
  CreateFlag.validate(flag);
  DFSOutputStream result = primitiveAppend(src, flag, buffersize, progress);
  if (result == null) {
    DataChecksum checksum = dfsClientConf.createChecksum(checksumOpt);
    result = DFSOutputStream.newStreamForCreate(this, src, absPermission,
        flag, createParent, replication, blockSize, progress, buffersize,
        checksum, null);
  }
  beginFileLease(result.getFileId(), result);
  return result;
}
项目:hadoop    文件:FSEditLogOp.java   
/**
 * Construct the reader
 * @param in The stream to read from.
 * @param logVersion The version of the data coming from the stream.
 */
public Reader(DataInputStream in, StreamLimiter limiter, int logVersion) {
  this.logVersion = logVersion;
  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITS_CHESKUM, logVersion)) {
    this.checksum = DataChecksum.newCrc32();
  } else {
    this.checksum = null;
  }
  // It is possible that the logVersion is actually a future layoutversion
  // during the rolling upgrade (e.g., the NN gets upgraded first). We
  // assume future layout will also support length of editlog op.
  this.supportEditLogLength = NameNodeLayoutVersion.supports(
      NameNodeLayoutVersion.Feature.EDITLOG_LENGTH, logVersion)
      || logVersion < NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION;

  if (this.checksum != null) {
    this.in = new DataInputStream(
        new CheckedInputStream(in, this.checksum));
  } else {
    this.in = in;
  }
  this.limiter = limiter;
  this.cache = new OpInstanceCache();
  this.maxOpSize = DFSConfigKeys.DFS_NAMENODE_MAX_OP_SIZE_DEFAULT;
}
项目:hadoop    文件:BlockMetadataHeader.java   
/**
 * Read the header without changing the position of the FileChannel.
 *
 * @param fc The FileChannel to read.
 * @return the Metadata Header.
 * @throws IOException on error.
 */
public static BlockMetadataHeader preadHeader(FileChannel fc)
    throws IOException {
  final byte arr[] = new byte[getHeaderSize()];
  ByteBuffer buf = ByteBuffer.wrap(arr);

  while (buf.hasRemaining()) {
    if (fc.read(buf, 0) <= 0) {
      throw new EOFException("unexpected EOF while reading " +
          "metadata file header");
    }
  }
  short version = (short)((arr[0] << 8) | (arr[1] & 0xff));
  DataChecksum dataChecksum = DataChecksum.newDataChecksum(arr, 2);
  return new BlockMetadataHeader(version, dataChecksum);
}
项目:hadoop    文件:TestShortCircuitCache.java   
public TestFileDescriptorPair() throws IOException {
  fis = new FileInputStream[2];
  for (int i = 0; i < 2; i++) {
    String name = dir.getDir() + "/file" + i;
    FileOutputStream fos = new FileOutputStream(name);
    if (i == 0) {
      // write 'data' file
      fos.write(1);
    } else {
      // write 'metadata' file
      BlockMetadataHeader header =
          new BlockMetadataHeader((short)1,
              DataChecksum.newDataChecksum(DataChecksum.Type.NULL, 4));
      DataOutputStream dos = new DataOutputStream(fos);
      BlockMetadataHeader.writeHeader(dos, header);
      dos.close();
    }
    fos.close();
    fis[i] = new FileInputStream(name);
  }
}
项目:hadoop    文件:FileSystem.java   
/**
 * Return a set of server default configuration values
 * @return server default configuration values
 * @throws IOException
 * @deprecated use {@link #getServerDefaults(Path)} instead
 */
@Deprecated
public FsServerDefaults getServerDefaults() throws IOException {
  Configuration conf = getConf();
  // CRC32 is chosen as default as it is available in all 
  // releases that support checksum.
  // The client trash configuration is ignored.
  return new FsServerDefaults(getDefaultBlockSize(), 
      conf.getInt("io.bytes.per.checksum", 512), 
      64 * 1024, 
      getDefaultReplication(),
      conf.getInt("io.file.buffer.size", 4096),
      false,
      CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT,
      DataChecksum.Type.CRC32);
}
项目:hadoop    文件:ChecksumFileSystem.java   
public ChecksumFSOutputSummer(ChecksumFileSystem fs, 
                      Path file, 
                      boolean overwrite,
                      int bufferSize,
                      short replication,
                      long blockSize,
                      Progressable progress,
                      FsPermission permission)
  throws IOException {
  super(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,
      fs.getBytesPerSum()));
  int bytesPerSum = fs.getBytesPerSum();
  this.datas = fs.getRawFileSystem().create(file, permission, overwrite,
                                     bufferSize, replication, blockSize,
                                     progress);
  int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize);
  this.sums = fs.getRawFileSystem().create(fs.getChecksumFile(file),
                                           permission, true, sumBufferSize,
                                           replication, blockSize, null);
  sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
  sums.writeInt(bytesPerSum);
}
项目:ditb    文件:ChecksumUtil.java   
/**
 * Generates a checksum for all the data in indata. The checksum is
 * written to outdata.
 * @param indata input data stream
 * @param startOffset starting offset in the indata stream from where to
 *                    compute checkums from
 * @param endOffset ending offset in the indata stream upto
 *                   which checksums needs to be computed
 * @param outdata the output buffer where checksum values are written
 * @param outOffset the starting offset in the outdata where the
 *                  checksum values are written
 * @param checksumType type of checksum
 * @param bytesPerChecksum number of bytes per checksum value
 */
static void generateChecksums(byte[] indata, int startOffset, int endOffset,
  byte[] outdata, int outOffset, ChecksumType checksumType,
  int bytesPerChecksum) throws IOException {

  if (checksumType == ChecksumType.NULL) {
    return; // No checksum for this block.
  }

  DataChecksum checksum = DataChecksum.newDataChecksum(
      checksumType.getDataChecksumType(), bytesPerChecksum);

  checksum.calculateChunkedSums(
     ByteBuffer.wrap(indata, startOffset, endOffset - startOffset),
     ByteBuffer.wrap(outdata, outOffset, outdata.length - outOffset));
}
项目:aliyun-oss-hadoop-fs    文件:IFileInputStream.java   
/**
 * Create a checksum input stream that reads
 * @param in The input stream to be verified for checksum.
 * @param len The length of the input stream including checksum bytes.
 */
public IFileInputStream(InputStream in, long len, Configuration conf) {
  this.in = in;
  this.inFd = getFileDescriptorIfAvail(in);
  sum = DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 
      Integer.MAX_VALUE);
  checksumSize = sum.getChecksumSize();
  length = len;
  dataLength = length - checksumSize;

  conf = (conf != null) ? conf : new Configuration();
  readahead = conf.getBoolean(MRConfig.MAPRED_IFILE_READAHEAD,
      MRConfig.DEFAULT_MAPRED_IFILE_READAHEAD);
  readaheadLength = conf.getInt(MRConfig.MAPRED_IFILE_READAHEAD_BYTES,
      MRConfig.DEFAULT_MAPRED_IFILE_READAHEAD_BYTES);

  doReadahead();
}
项目:aliyun-oss-hadoop-fs    文件:DFSOutputStream.java   
static DFSOutputStream newStreamForAppend(DFSClient dfsClient, String src,
    EnumSet<CreateFlag> flags, Progressable progress, LocatedBlock lastBlock,
    HdfsFileStatus stat, DataChecksum checksum, String[] favoredNodes)
    throws IOException {
  if(stat.getErasureCodingPolicy() != null) {
    throw new IOException(
        "Not support appending to a striping layout file yet.");
  }
  try (TraceScope ignored =
           dfsClient.newPathTraceScope("newStreamForAppend", src)) {
    final DFSOutputStream out = new DFSOutputStream(dfsClient, src, flags,
        progress, lastBlock, stat, checksum, favoredNodes);
    out.start();
    return out;
  }
}
项目:big-c    文件:ChecksumFileSystem.java   
public ChecksumFSOutputSummer(ChecksumFileSystem fs, 
                      Path file, 
                      boolean overwrite,
                      int bufferSize,
                      short replication,
                      long blockSize,
                      Progressable progress,
                      FsPermission permission)
  throws IOException {
  super(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,
      fs.getBytesPerSum()));
  int bytesPerSum = fs.getBytesPerSum();
  this.datas = fs.getRawFileSystem().create(file, permission, overwrite,
                                     bufferSize, replication, blockSize,
                                     progress);
  int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize);
  this.sums = fs.getRawFileSystem().create(fs.getChecksumFile(file),
                                           permission, true, sumBufferSize,
                                           replication, blockSize, null);
  sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
  sums.writeInt(bytesPerSum);
}
项目:aliyun-oss-hadoop-fs    文件:RemoteBlockReader2.java   
protected RemoteBlockReader2(String file, long blockId,
    DataChecksum checksum, boolean verifyChecksum,
    long startOffset, long firstChunkOffset, long bytesToRead, Peer peer,
    DatanodeID datanodeID, PeerCache peerCache, Tracer tracer) {
  this.isLocal = DFSUtilClient.isLocalAddress(NetUtils.
      createSocketAddr(datanodeID.getXferAddr()));
  // Path is used only for printing block and file information in debug
  this.peer = peer;
  this.datanodeID = datanodeID;
  this.in = peer.getInputStreamChannel();
  this.checksum = checksum;
  this.verifyChecksum = verifyChecksum;
  this.startOffset = Math.max( startOffset, 0 );
  this.filename = file;
  this.peerCache = peerCache;
  this.blockId = blockId;

  // The total number of bytes that we need to transfer from the DN is
  // the amount that the user wants (bytesToRead), plus the padding at
  // the beginning in order to chunk-align. Note that the DN may elect
  // to send more than this amount if the read starts/ends mid-chunk.
  this.bytesNeededToFinish = bytesToRead + (startOffset - firstChunkOffset);
  bytesPerChecksum = this.checksum.getBytesPerChecksum();
  checksumSize = this.checksum.getChecksumSize();
  this.tracer = tracer;
}
项目:big-c    文件:BlockMetadataHeader.java   
/**
 * Read the header without changing the position of the FileChannel.
 *
 * @param fc The FileChannel to read.
 * @return the Metadata Header.
 * @throws IOException on error.
 */
public static BlockMetadataHeader preadHeader(FileChannel fc)
    throws IOException {
  final byte arr[] = new byte[getHeaderSize()];
  ByteBuffer buf = ByteBuffer.wrap(arr);

  while (buf.hasRemaining()) {
    if (fc.read(buf, 0) <= 0) {
      throw new EOFException("unexpected EOF while reading " +
          "metadata file header");
    }
  }
  short version = (short)((arr[0] << 8) | (arr[1] & 0xff));
  DataChecksum dataChecksum = DataChecksum.newDataChecksum(arr, 2);
  return new BlockMetadataHeader(version, dataChecksum);
}
项目:big-c    文件:FSEditLogOp.java   
/**
 * Construct the reader
 * @param in The stream to read from.
 * @param logVersion The version of the data coming from the stream.
 */
public Reader(DataInputStream in, StreamLimiter limiter, int logVersion) {
  this.logVersion = logVersion;
  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITS_CHESKUM, logVersion)) {
    this.checksum = DataChecksum.newCrc32();
  } else {
    this.checksum = null;
  }
  // It is possible that the logVersion is actually a future layoutversion
  // during the rolling upgrade (e.g., the NN gets upgraded first). We
  // assume future layout will also support length of editlog op.
  this.supportEditLogLength = NameNodeLayoutVersion.supports(
      NameNodeLayoutVersion.Feature.EDITLOG_LENGTH, logVersion)
      || logVersion < NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION;

  if (this.checksum != null) {
    this.in = new DataInputStream(
        new CheckedInputStream(in, this.checksum));
  } else {
    this.in = in;
  }
  this.limiter = limiter;
  this.cache = new OpInstanceCache();
  this.maxOpSize = DFSConfigKeys.DFS_NAMENODE_MAX_OP_SIZE_DEFAULT;
}
项目:aliyun-oss-hadoop-fs    文件:TestShortCircuitCache.java   
public TestFileDescriptorPair() throws IOException {
  fis = new FileInputStream[2];
  for (int i = 0; i < 2; i++) {
    String name = dir.getDir() + "/file" + i;
    FileOutputStream fos = new FileOutputStream(name);
    if (i == 0) {
      // write 'data' file
      fos.write(1);
    } else {
      // write 'metadata' file
      BlockMetadataHeader header =
          new BlockMetadataHeader((short)1,
              DataChecksum.newDataChecksum(DataChecksum.Type.NULL, 4));
      DataOutputStream dos = new DataOutputStream(fos);
      BlockMetadataHeader.writeHeader(dos, header);
      dos.close();
    }
    fos.close();
    fis[i] = new FileInputStream(name);
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestSimulatedFSDataset.java   
@Test
public void testGetMetaData() throws IOException {
  final SimulatedFSDataset fsdataset = getSimulatedFSDataset();
  ExtendedBlock b = new ExtendedBlock(bpid, FIRST_BLK_ID, 5, 0);
  try {
    assertTrue(fsdataset.getMetaDataInputStream(b) == null);
    assertTrue("Expected an IO exception", false);
  } catch (IOException e) {
    // ok - as expected
  }
  addSomeBlocks(fsdataset); // Only need to add one but ....
  b = new ExtendedBlock(bpid, FIRST_BLK_ID, 0, 0);
  InputStream metaInput = fsdataset.getMetaDataInputStream(b);
  DataInputStream metaDataInput = new DataInputStream(metaInput);
  short version = metaDataInput.readShort();
  assertEquals(BlockMetadataHeader.VERSION, version);
  DataChecksum checksum = DataChecksum.newDataChecksum(metaDataInput);
  assertEquals(DataChecksum.Type.NULL, checksum.getChecksumType());
  assertEquals(0, checksum.getChecksumSize());  
}
项目:aliyun-oss-hadoop-fs    文件:TestDataXceiverLazyPersistHint.java   
/**
 * Issue a write block call with dummy parameters. The only parameter useful
 * for this test is the value of lazyPersist.
 */
private void issueWriteBlockCall(DataXceiver xceiver, boolean lazyPersist)
    throws IOException {
  xceiver.writeBlock(
      new ExtendedBlock("Dummy-pool", 0L),
      StorageType.RAM_DISK,
      null,
      "Dummy-Client",
      new DatanodeInfo[0],
      new StorageType[0],
      mock(DatanodeInfo.class),
      BlockConstructionStage.PIPELINE_SETUP_CREATE,
      0, 0, 0, 0,
      DataChecksum.newDataChecksum(DataChecksum.Type.NULL, 0),
      CachingStrategy.newDefaultStrategy(),
      lazyPersist,
      false, null);
}
项目:big-c    文件:TestShortCircuitCache.java   
public TestFileDescriptorPair() throws IOException {
  fis = new FileInputStream[2];
  for (int i = 0; i < 2; i++) {
    String name = dir.getDir() + "/file" + i;
    FileOutputStream fos = new FileOutputStream(name);
    if (i == 0) {
      // write 'data' file
      fos.write(1);
    } else {
      // write 'metadata' file
      BlockMetadataHeader header =
          new BlockMetadataHeader((short)1,
              DataChecksum.newDataChecksum(DataChecksum.Type.NULL, 4));
      DataOutputStream dos = new DataOutputStream(fos);
      BlockMetadataHeader.writeHeader(dos, header);
      dos.close();
    }
    fos.close();
    fis[i] = new FileInputStream(name);
  }
}
项目:aliyun-oss-hadoop-fs    文件:FileSystem.java   
/**
 * Return a set of server default configuration values
 * @return server default configuration values
 * @throws IOException
 * @deprecated use {@link #getServerDefaults(Path)} instead
 */
@Deprecated
public FsServerDefaults getServerDefaults() throws IOException {
  Configuration conf = getConf();
  // CRC32 is chosen as default as it is available in all 
  // releases that support checksum.
  // The client trash configuration is ignored.
  return new FsServerDefaults(getDefaultBlockSize(), 
      conf.getInt("io.bytes.per.checksum", 512), 
      64 * 1024, 
      getDefaultReplication(),
      conf.getInt("io.file.buffer.size", 4096),
      false,
      CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT,
      DataChecksum.Type.CRC32);
}
项目:big-c    文件:DFSOutputStream.java   
/** Construct a new output stream for append. */
private DFSOutputStream(DFSClient dfsClient, String src,
    EnumSet<CreateFlag> flags, Progressable progress, LocatedBlock lastBlock,
    HdfsFileStatus stat, DataChecksum checksum) throws IOException {
  this(dfsClient, src, progress, stat, checksum);
  initialFileSize = stat.getLen(); // length of file when opened
  this.shouldSyncBlock = flags.contains(CreateFlag.SYNC_BLOCK);

  boolean toNewBlock = flags.contains(CreateFlag.NEW_BLOCK);

  // The last partial block of the file has to be filled.
  if (!toNewBlock && lastBlock != null) {
    // indicate that we are appending to an existing block
    bytesCurBlock = lastBlock.getBlockSize();
    streamer = new DataStreamer(lastBlock, stat, bytesPerChecksum);
  } else {
    computePacketChunkSize(dfsClient.getConf().writePacketSize,
        bytesPerChecksum);
    streamer = new DataStreamer(stat,
        lastBlock != null ? lastBlock.getBlock() : null);
  }
  this.fileEncryptionInfo = stat.getFileEncryptionInfo();
}
项目:hadoop-oss    文件:FsServerDefaults.java   
public FsServerDefaults(long blockSize, int bytesPerChecksum,
    int writePacketSize, short replication, int fileBufferSize,
    boolean encryptDataTransfer, long trashInterval,
    DataChecksum.Type checksumType) {
  this.blockSize = blockSize;
  this.bytesPerChecksum = bytesPerChecksum;
  this.writePacketSize = writePacketSize;
  this.replication = replication;
  this.fileBufferSize = fileBufferSize;
  this.encryptDataTransfer = encryptDataTransfer;
  this.trashInterval = trashInterval;
  this.checksumType = checksumType;
}
项目:hadoop-oss    文件:FsServerDefaults.java   
@Override
@InterfaceAudience.Private
public void readFields(DataInput in) throws IOException {
  blockSize = in.readLong();
  bytesPerChecksum = in.readInt();
  writePacketSize = in.readInt();
  replication = in.readShort();
  fileBufferSize = in.readInt();
  checksumType = WritableUtils.readEnum(in, DataChecksum.Type.class);
}
项目:hadoop-oss    文件:ChecksumFs.java   
public ChecksumFSOutputSummer(final ChecksumFs fs, final Path file, 
  final EnumSet<CreateFlag> createFlag,
  final FsPermission absolutePermission, final int bufferSize,
  final short replication, final long blockSize, 
  final Progressable progress, final ChecksumOpt checksumOpt,
  final boolean createParent) throws IOException {
  super(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,
      fs.getBytesPerSum()));

  // checksumOpt is passed down to the raw fs. Unless it implements
  // checksum impelemts internally, checksumOpt will be ignored.
  // If the raw fs does checksum internally, we will end up with
  // two layers of checksumming. i.e. checksumming checksum file.
  this.datas = fs.getRawFs().createInternal(file, createFlag,
      absolutePermission, bufferSize, replication, blockSize, progress,
       checksumOpt,  createParent);

  // Now create the chekcsumfile; adjust the buffsize
  int bytesPerSum = fs.getBytesPerSum();
  int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize);
  this.sums = fs.getRawFs().createInternal(fs.getChecksumFile(file),
      EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
      absolutePermission, sumBufferSize, replication, blockSize, progress,
      checksumOpt, createParent);
  sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
  sums.writeInt(bytesPerSum);
}
项目:hadoop-oss    文件:MD5MD5CRC32FileChecksum.java   
public static DataChecksum.Type getCrcTypeFromAlgorithmName(String algorithm)
    throws IOException {
  if (algorithm.endsWith(DataChecksum.Type.CRC32.name())) {
    return DataChecksum.Type.CRC32;
  } else if (algorithm.endsWith(DataChecksum.Type.CRC32C.name())) {
    return DataChecksum.Type.CRC32C;
  }

  throw new IOException("Unknown checksum type in " + algorithm);
}
项目:hadoop-oss    文件:Options.java   
/**
 * A helper method for processing user input and default value to 
 * create a combined checksum option. This is a bit complicated because
 * bytesPerChecksum is kept for backward compatibility.
 *
 * @param defaultOpt Default checksum option
 * @param userOpt User-specified checksum option. Ignored if null.
 * @param userBytesPerChecksum User-specified bytesPerChecksum
 *                Ignored if < 0.
 */
public static ChecksumOpt processChecksumOpt(ChecksumOpt defaultOpt, 
    ChecksumOpt userOpt, int userBytesPerChecksum) {
  final boolean useDefaultType;
  final DataChecksum.Type type;
  if (userOpt != null 
      && userOpt.getChecksumType() != DataChecksum.Type.DEFAULT) {
    useDefaultType = false;
    type = userOpt.getChecksumType();
  } else {
    useDefaultType = true;
    type = defaultOpt.getChecksumType();
  }

  //  bytesPerChecksum - order of preference
  //    user specified value in bytesPerChecksum
  //    user specified value in checksumOpt
  //    default.
  if (userBytesPerChecksum > 0) {
    return new ChecksumOpt(type, userBytesPerChecksum);
  } else if (userOpt != null && userOpt.getBytesPerChecksum() > 0) {
    return !useDefaultType? userOpt
        : new ChecksumOpt(type, userOpt.getBytesPerChecksum());
  } else {
    return useDefaultType? defaultOpt
        : new ChecksumOpt(type, defaultOpt.getBytesPerChecksum());
  }
}
项目:hadoop    文件:IFileOutputStream.java   
/**
 * Create a checksum output stream that writes
 * the bytes to the given stream.
 * @param out
 */
public IFileOutputStream(OutputStream out) {
  super(out);
  sum = DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,
      Integer.MAX_VALUE);
  barray = new byte[sum.getChecksumSize()];
}
项目:hadoop    文件:DFSOutputStream.java   
/** 
 * @return the object for computing checksum.
 *         The type is NULL if checksum is not computed.
 */
private static DataChecksum getChecksum4Compute(DataChecksum checksum,
    HdfsFileStatus stat) {
  if (isLazyPersist(stat) && stat.getReplication() == 1) {
    // do not compute checksum for writing to single replica to memory
    return DataChecksum.newDataChecksum(Type.NULL,
        checksum.getBytesPerChecksum());
  }
  return checksum;
}
项目:hadoop    文件:DFSOutputStream.java   
private DFSOutputStream(DFSClient dfsClient, String src, Progressable progress,
    HdfsFileStatus stat, DataChecksum checksum) throws IOException {
  super(getChecksum4Compute(checksum, stat));
  this.dfsClient = dfsClient;
  this.src = src;
  this.fileId = stat.getFileId();
  this.blockSize = stat.getBlockSize();
  this.blockReplication = stat.getReplication();
  this.fileEncryptionInfo = stat.getFileEncryptionInfo();
  this.progress = progress;
  this.cachingStrategy = new AtomicReference<CachingStrategy>(
      dfsClient.getDefaultWriteCachingStrategy());
  if ((progress != null) && DFSClient.LOG.isDebugEnabled()) {
    DFSClient.LOG.debug(
        "Set non-null progress callback on DFSOutputStream " + src);
  }

  this.bytesPerChecksum = checksum.getBytesPerChecksum();
  if (bytesPerChecksum <= 0) {
    throw new HadoopIllegalArgumentException(
        "Invalid value: bytesPerChecksum = " + bytesPerChecksum + " <= 0");
  }
  if (blockSize % bytesPerChecksum != 0) {
    throw new HadoopIllegalArgumentException("Invalid values: "
        + DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY + " (=" + bytesPerChecksum
        + ") must divide block size (=" + blockSize + ").");
  }
  this.checksum4WriteBlock = checksum;

  this.dfsclientSlowLogThresholdMs =
    dfsClient.getConf().dfsclientSlowIoWarningThresholdMs;
  this.byteArrayManager = dfsClient.getClientContext().getByteArrayManager();
}
项目:hadoop    文件:DFSOutputStream.java   
/** Construct a new output stream for creating a file. */
private DFSOutputStream(DFSClient dfsClient, String src, HdfsFileStatus stat,
    EnumSet<CreateFlag> flag, Progressable progress,
    DataChecksum checksum, String[] favoredNodes) throws IOException {
  this(dfsClient, src, progress, stat, checksum);
  this.shouldSyncBlock = flag.contains(CreateFlag.SYNC_BLOCK);

  computePacketChunkSize(dfsClient.getConf().writePacketSize, bytesPerChecksum);

  streamer = new DataStreamer(stat, null);
  if (favoredNodes != null && favoredNodes.length != 0) {
    streamer.setFavoredNodes(favoredNodes);
  }
}
项目:hadoop    文件:RemoteBlockReader.java   
private RemoteBlockReader(String file, String bpid, long blockId,
    DataInputStream in, DataChecksum checksum, boolean verifyChecksum,
    long startOffset, long firstChunkOffset, long bytesToRead, Peer peer,
    DatanodeID datanodeID, PeerCache peerCache) {
  // Path is used only for printing block and file information in debug
  super(new Path("/" + Block.BLOCK_FILE_PREFIX + blockId +
                  ":" + bpid + ":of:"+ file)/*too non path-like?*/,
        1, verifyChecksum,
        checksum.getChecksumSize() > 0? checksum : null, 
        checksum.getBytesPerChecksum(),
        checksum.getChecksumSize());

  this.isLocal = DFSClient.isLocalAddress(NetUtils.
      createSocketAddr(datanodeID.getXferAddr()));

  this.peer = peer;
  this.datanodeID = datanodeID;
  this.in = in;
  this.checksum = checksum;
  this.startOffset = Math.max( startOffset, 0 );
  this.blockId = blockId;

  // The total number of bytes that we need to transfer from the DN is
  // the amount that the user wants (bytesToRead), plus the padding at
  // the beginning in order to chunk-align. Note that the DN may elect
  // to send more than this amount if the read starts/ends mid-chunk.
  this.bytesNeededToFinish = bytesToRead + (startOffset - firstChunkOffset);

  this.firstChunkOffset = firstChunkOffset;
  lastChunkOffset = firstChunkOffset;
  lastChunkLen = -1;

  bytesPerChecksum = this.checksum.getBytesPerChecksum();
  checksumSize = this.checksum.getChecksumSize();
  this.peerCache = peerCache;
}
项目:hadoop    文件:BlockReaderLocal.java   
private BlockReaderLocal(Builder builder) {
  this.replica = builder.replica;
  this.dataIn = replica.getDataStream().getChannel();
  this.dataPos = builder.dataPos;
  this.checksumIn = replica.getMetaStream().getChannel();
  BlockMetadataHeader header = builder.replica.getMetaHeader();
  this.checksum = header.getChecksum();
  this.verifyChecksum = builder.verifyChecksum &&
      (this.checksum.getChecksumType().id != DataChecksum.CHECKSUM_NULL);
  this.filename = builder.filename;
  this.block = builder.block;
  this.bytesPerChecksum = checksum.getBytesPerChecksum();
  this.checksumSize = checksum.getChecksumSize();

  this.maxAllocatedChunks = (bytesPerChecksum == 0) ? 0 :
      ((builder.bufferSize + bytesPerChecksum - 1) / bytesPerChecksum);
  // Calculate the effective maximum readahead.
  // We can't do more readahead than there is space in the buffer.
  int maxReadaheadChunks = (bytesPerChecksum == 0) ? 0 :
      ((Math.min(builder.bufferSize, builder.maxReadahead) +
          bytesPerChecksum - 1) / bytesPerChecksum);
  if (maxReadaheadChunks == 0) {
    this.zeroReadaheadRequested = true;
    maxReadaheadChunks = 1;
  } else {
    this.zeroReadaheadRequested = false;
  }
  this.maxReadaheadLength = maxReadaheadChunks * bytesPerChecksum;
  this.storageType = builder.storageType;
}
项目:hadoop    文件:BlockReaderLocalLegacy.java   
private BlockReaderLocalLegacy(DFSClient.Conf conf, String hdfsfile,
    ExtendedBlock block, Token<BlockTokenIdentifier> token, long startOffset,
    long length, BlockLocalPathInfo pathinfo, FileInputStream dataIn)
    throws IOException {
  this(conf, hdfsfile, block, token, startOffset, length, pathinfo,
      DataChecksum.newDataChecksum(DataChecksum.Type.NULL, 4), false,
      dataIn, startOffset, null);
}
项目:hadoop    文件:DataTransferProtoUtil.java   
public static ChecksumProto toProto(DataChecksum checksum) {
  ChecksumTypeProto type = PBHelper.convert(checksum.getChecksumType());
  // ChecksumType#valueOf never returns null
  return ChecksumProto.newBuilder()
    .setBytesPerChecksum(checksum.getBytesPerChecksum())
    .setType(type)
    .build();
}
项目:hadoop    文件:DataTransferProtoUtil.java   
public static DataChecksum fromProto(ChecksumProto proto) {
  if (proto == null) return null;

  int bytesPerChecksum = proto.getBytesPerChecksum();
  DataChecksum.Type type = PBHelper.convert(proto.getType());
  return DataChecksum.newDataChecksum(type, bytesPerChecksum);
}
项目:hadoop    文件:DFSClient.java   
private DataChecksum.Type getChecksumType(Configuration conf) {
  final String checksum = conf.get(
      DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY,
      DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
  try {
    return DataChecksum.Type.valueOf(checksum);
  } catch(IllegalArgumentException iae) {
    LOG.warn("Bad checksum type: " + checksum + ". Using default "
        + DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
    return DataChecksum.Type.valueOf(
        DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT); 
  }
}