Java 类org.apache.hadoop.util.PureJavaCrc32 实例源码

项目:hadoop    文件:TestShuffleHandler.java   
private static void createIndexFile(File indexFile, Configuration conf)
    throws IOException {
  if (indexFile.exists()) {
    System.out.println("Deleting existing file");
    indexFile.delete();
  }
  indexFile.createNewFile();
  FSDataOutputStream output = FileSystem.getLocal(conf).getRaw().append(
      new Path(indexFile.getAbsolutePath()));
  Checksum crc = new PureJavaCrc32();
  crc.reset();
  CheckedOutputStream chk = new CheckedOutputStream(output, crc);
  String msg = "Writing new index file. This file will be used only " +
      "for the testing.";
  chk.write(Arrays.copyOf(msg.getBytes(),
      MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH));
  output.writeLong(chk.getChecksum().getValue());
  output.close();
}
项目:aliyun-oss-hadoop-fs    文件:TestShuffleHandler.java   
private static void createIndexFile(File indexFile, Configuration conf)
    throws IOException {
  if (indexFile.exists()) {
    System.out.println("Deleting existing file");
    indexFile.delete();
  }
  indexFile.createNewFile();
  FSDataOutputStream output = FileSystem.getLocal(conf).getRaw().append(
      new Path(indexFile.getAbsolutePath()));
  Checksum crc = new PureJavaCrc32();
  crc.reset();
  CheckedOutputStream chk = new CheckedOutputStream(output, crc);
  String msg = "Writing new index file. This file will be used only " +
      "for the testing.";
  chk.write(Arrays.copyOf(msg.getBytes(),
      MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH));
  output.writeLong(chk.getChecksum().getValue());
  output.close();
}
项目:big-c    文件:TestShuffleHandler.java   
private static void createIndexFile(File indexFile, Configuration conf)
    throws IOException {
  if (indexFile.exists()) {
    System.out.println("Deleting existing file");
    indexFile.delete();
  }
  indexFile.createNewFile();
  FSDataOutputStream output = FileSystem.getLocal(conf).getRaw().append(
      new Path(indexFile.getAbsolutePath()));
  Checksum crc = new PureJavaCrc32();
  crc.reset();
  CheckedOutputStream chk = new CheckedOutputStream(output, crc);
  String msg = "Writing new index file. This file will be used only " +
      "for the testing.";
  chk.write(Arrays.copyOf(msg.getBytes(),
      MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH));
  output.writeLong(chk.getChecksum().getValue());
  output.close();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestShuffleHandler.java   
private static void createIndexFile(File indexFile, Configuration conf)
    throws IOException {
  if (indexFile.exists()) {
    System.out.println("Deleting existing file");
    indexFile.delete();
  }
  indexFile.createNewFile();
  FSDataOutputStream output = FileSystem.getLocal(conf).getRaw().append(
      new Path(indexFile.getAbsolutePath()));
  Checksum crc = new PureJavaCrc32();
  crc.reset();
  CheckedOutputStream chk = new CheckedOutputStream(output, crc);
  String msg = "Writing new index file. This file will be used only " +
      "for the testing.";
  chk.write(Arrays.copyOf(msg.getBytes(),
      MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH));
  output.writeLong(chk.getChecksum().getValue());
  output.close();
}
项目:hadoop-EAR    文件:ChecksumFileSystem.java   
public ChecksumFSOutputSummer(ChecksumFileSystem fs, 
                      Path file, 
                      boolean overwrite,
                      int bufferSize,
                      short replication,
                      long blockSize,
                      int bytesPerChecksum,
                      Progressable progress)
  throws IOException {
  super(new PureJavaCrc32(), bytesPerChecksum, 4, null);
  this.datas = fs.getRawFileSystem().create(file, overwrite, bufferSize, 
                                     replication, blockSize, progress);
  int sumBufferSize = fs.getSumBufferSize(bytesPerChecksum, bufferSize);
  this.sums = fs.getRawFileSystem().create(fs.getChecksumFile(file), true, 
                                           sumBufferSize, replication,
                                           blockSize);
  sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
  sums.writeInt(bytesPerChecksum);
}
项目:hadoop-plus    文件:TestShuffleHandler.java   
public static void createIndexFile(File indexFile, Configuration conf)
    throws IOException {
  if (indexFile.exists()) {
    System.out.println("Deleting existing file");
    indexFile.delete();
  }
  indexFile.createNewFile();
  FSDataOutputStream output = FileSystem.getLocal(conf).getRaw().append(
      new Path(indexFile.getAbsolutePath()));
  Checksum crc = new PureJavaCrc32();
  crc.reset();
  CheckedOutputStream chk = new CheckedOutputStream(output, crc);
  String msg = "Writing new index file. This file will be used only " +
      "for the testing.";
  chk.write(Arrays.copyOf(msg.getBytes(),
      MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH));
  output.writeLong(chk.getChecksum().getValue());
  output.close();
}
项目:hadoop-plus    文件:FSEditLogOp.java   
/**
 * Construct the reader
 * @param in The stream to read from.
 * @param logVersion The version of the data coming from the stream.
 */
@SuppressWarnings("deprecation")
public Reader(DataInputStream in, StreamLimiter limiter,
    int logVersion) {
  this.logVersion = logVersion;
  if (LayoutVersion.supports(Feature.EDITS_CHESKUM, logVersion)) {
    this.checksum = new PureJavaCrc32();
  } else {
    this.checksum = null;
  }

  if (this.checksum != null) {
    this.in = new DataInputStream(
        new CheckedInputStream(in, this.checksum));
  } else {
    this.in = in;
  }
  this.limiter = limiter;
  this.cache = new OpInstanceCache();
  this.maxOpSize = DFSConfigKeys.DFS_NAMENODE_MAX_OP_SIZE_DEFAULT;
}
项目:hadoop-plus    文件:ChecksumFileSystem.java   
public ChecksumFSOutputSummer(ChecksumFileSystem fs, 
                      Path file, 
                      boolean overwrite,
                      int bufferSize,
                      short replication,
                      long blockSize,
                      Progressable progress)
  throws IOException {
  super(new PureJavaCrc32(), fs.getBytesPerSum(), 4);
  int bytesPerSum = fs.getBytesPerSum();
  this.datas = fs.getRawFileSystem().create(file, overwrite, bufferSize, 
                                     replication, blockSize, progress);
  int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize);
  this.sums = fs.getRawFileSystem().create(fs.getChecksumFile(file), true, 
                                           sumBufferSize, replication,
                                           blockSize);
  sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
  sums.writeInt(bytesPerSum);
}
项目:hadoop-plus    文件:ChecksumFs.java   
public ChecksumFSOutputSummer(final ChecksumFs fs, final Path file, 
  final EnumSet<CreateFlag> createFlag,
  final FsPermission absolutePermission, final int bufferSize,
  final short replication, final long blockSize, 
  final Progressable progress, final ChecksumOpt checksumOpt,
  final boolean createParent) throws IOException {
  super(new PureJavaCrc32(), fs.getBytesPerSum(), 4);

  // checksumOpt is passed down to the raw fs. Unless it implements
  // checksum impelemts internally, checksumOpt will be ignored.
  // If the raw fs does checksum internally, we will end up with
  // two layers of checksumming. i.e. checksumming checksum file.
  this.datas = fs.getRawFs().createInternal(file, createFlag,
      absolutePermission, bufferSize, replication, blockSize, progress,
       checksumOpt,  createParent);

  // Now create the chekcsumfile; adjust the buffsize
  int bytesPerSum = fs.getBytesPerSum();
  int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize);
  this.sums = fs.getRawFs().createInternal(fs.getChecksumFile(file),
      EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
      absolutePermission, sumBufferSize, replication, blockSize, progress,
      checksumOpt, createParent);
  sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
  sums.writeInt(bytesPerSum);
}
项目:FlexMap    文件:TestShuffleHandler.java   
private static void createIndexFile(File indexFile, Configuration conf)
    throws IOException {
  if (indexFile.exists()) {
    System.out.println("Deleting existing file");
    indexFile.delete();
  }
  indexFile.createNewFile();
  FSDataOutputStream output = FileSystem.getLocal(conf).getRaw().append(
      new Path(indexFile.getAbsolutePath()));
  Checksum crc = new PureJavaCrc32();
  crc.reset();
  CheckedOutputStream chk = new CheckedOutputStream(output, crc);
  String msg = "Writing new index file. This file will be used only " +
      "for the testing.";
  chk.write(Arrays.copyOf(msg.getBytes(),
      MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH));
  output.writeLong(chk.getChecksum().getValue());
  output.close();
}
项目:hops    文件:TestShuffleHandler.java   
private static void createIndexFile(File indexFile, Configuration conf)
    throws IOException {
  if (indexFile.exists()) {
    System.out.println("Deleting existing file");
    indexFile.delete();
  }
  indexFile.createNewFile();
  FSDataOutputStream output = FileSystem.getLocal(conf).getRaw().append(
      new Path(indexFile.getAbsolutePath()));
  Checksum crc = new PureJavaCrc32();
  crc.reset();
  CheckedOutputStream chk = new CheckedOutputStream(output, crc);
  String msg = "Writing new index file. This file will be used only " +
      "for the testing.";
  chk.write(Arrays.copyOf(msg.getBytes(),
      MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH));
  output.writeLong(chk.getChecksum().getValue());
  output.close();
}
项目:hops    文件:ChecksumFileSystem.java   
public ChecksumFSOutputSummer(ChecksumFileSystem fs, 
                      Path file, 
                      boolean overwrite,
                      int bufferSize,
                      short replication,
                      long blockSize,
                      Progressable progress,
                      FsPermission permission)
  throws IOException {
  super(new PureJavaCrc32(), fs.getBytesPerSum(), 4);
  int bytesPerSum = fs.getBytesPerSum();
  this.datas = fs.getRawFileSystem().create(file, permission, overwrite,
                                     bufferSize, replication, blockSize,
                                     progress);
  int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize);
  this.sums = fs.getRawFileSystem().create(fs.getChecksumFile(file),
                                           permission, true, sumBufferSize,
                                           replication, blockSize, null);
  sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
  sums.writeInt(bytesPerSum);
}
项目:hops    文件:ChecksumFs.java   
public ChecksumFSOutputSummer(final ChecksumFs fs, final Path file, 
  final EnumSet<CreateFlag> createFlag,
  final FsPermission absolutePermission, final int bufferSize,
  final short replication, final long blockSize, 
  final Progressable progress, final ChecksumOpt checksumOpt,
  final boolean createParent) throws IOException {
  super(new PureJavaCrc32(), fs.getBytesPerSum(), 4);

  // checksumOpt is passed down to the raw fs. Unless it implements
  // checksum impelemts internally, checksumOpt will be ignored.
  // If the raw fs does checksum internally, we will end up with
  // two layers of checksumming. i.e. checksumming checksum file.
  this.datas = fs.getRawFs().createInternal(file, createFlag,
      absolutePermission, bufferSize, replication, blockSize, progress,
       checksumOpt,  createParent);

  // Now create the chekcsumfile; adjust the buffsize
  int bytesPerSum = fs.getBytesPerSum();
  int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize);
  this.sums = fs.getRawFs().createInternal(fs.getChecksumFile(file),
      EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
      absolutePermission, sumBufferSize, replication, blockSize, progress,
      checksumOpt, createParent);
  sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
  sums.writeInt(bytesPerSum);
}
项目:hadoop-TCP    文件:TestShuffleHandler.java   
public static void createIndexFile(File indexFile, Configuration conf)
    throws IOException {
  if (indexFile.exists()) {
    System.out.println("Deleting existing file");
    indexFile.delete();
  }
  indexFile.createNewFile();
  FSDataOutputStream output = FileSystem.getLocal(conf).getRaw().append(
      new Path(indexFile.getAbsolutePath()));
  Checksum crc = new PureJavaCrc32();
  crc.reset();
  CheckedOutputStream chk = new CheckedOutputStream(output, crc);
  String msg = "Writing new index file. This file will be used only " +
      "for the testing.";
  chk.write(Arrays.copyOf(msg.getBytes(),
      MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH));
  output.writeLong(chk.getChecksum().getValue());
  output.close();
}
项目:hadoop-TCP    文件:FSEditLogOp.java   
/**
 * Construct the reader
 * @param in The stream to read from.
 * @param logVersion The version of the data coming from the stream.
 */
@SuppressWarnings("deprecation")
public Reader(DataInputStream in, StreamLimiter limiter,
    int logVersion) {
  this.logVersion = logVersion;
  if (LayoutVersion.supports(Feature.EDITS_CHESKUM, logVersion)) {
    this.checksum = new PureJavaCrc32();
  } else {
    this.checksum = null;
  }

  if (this.checksum != null) {
    this.in = new DataInputStream(
        new CheckedInputStream(in, this.checksum));
  } else {
    this.in = in;
  }
  this.limiter = limiter;
  this.cache = new OpInstanceCache();
  this.maxOpSize = DFSConfigKeys.DFS_NAMENODE_MAX_OP_SIZE_DEFAULT;
}
项目:hadoop-TCP    文件:ChecksumFileSystem.java   
public ChecksumFSOutputSummer(ChecksumFileSystem fs, 
                      Path file, 
                      boolean overwrite,
                      int bufferSize,
                      short replication,
                      long blockSize,
                      Progressable progress)
  throws IOException {
  super(new PureJavaCrc32(), fs.getBytesPerSum(), 4);
  int bytesPerSum = fs.getBytesPerSum();
  this.datas = fs.getRawFileSystem().create(file, overwrite, bufferSize, 
                                     replication, blockSize, progress);
  int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize);
  this.sums = fs.getRawFileSystem().create(fs.getChecksumFile(file), true, 
                                           sumBufferSize, replication,
                                           blockSize);
  sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
  sums.writeInt(bytesPerSum);
}
项目:hadoop-TCP    文件:ChecksumFs.java   
public ChecksumFSOutputSummer(final ChecksumFs fs, final Path file, 
  final EnumSet<CreateFlag> createFlag,
  final FsPermission absolutePermission, final int bufferSize,
  final short replication, final long blockSize, 
  final Progressable progress, final ChecksumOpt checksumOpt,
  final boolean createParent) throws IOException {
  super(new PureJavaCrc32(), fs.getBytesPerSum(), 4);

  // checksumOpt is passed down to the raw fs. Unless it implements
  // checksum impelemts internally, checksumOpt will be ignored.
  // If the raw fs does checksum internally, we will end up with
  // two layers of checksumming. i.e. checksumming checksum file.
  this.datas = fs.getRawFs().createInternal(file, createFlag,
      absolutePermission, bufferSize, replication, blockSize, progress,
       checksumOpt,  createParent);

  // Now create the chekcsumfile; adjust the buffsize
  int bytesPerSum = fs.getBytesPerSum();
  int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize);
  this.sums = fs.getRawFs().createInternal(fs.getChecksumFile(file),
      EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
      absolutePermission, sumBufferSize, replication, blockSize, progress,
      checksumOpt, createParent);
  sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
  sums.writeInt(bytesPerSum);
}
项目:hadoop-on-lustre    文件:ChecksumFileSystem.java   
public ChecksumFSOutputSummer(ChecksumFileSystem fs, 
                      Path file, 
                      boolean overwrite,
                      int bufferSize,
                      short replication,
                      long blockSize,
                      Progressable progress)
  throws IOException {
  super(new PureJavaCrc32(), fs.getBytesPerSum(), 4);
  int bytesPerSum = fs.getBytesPerSum();
  this.datas = fs.getRawFileSystem().create(file, overwrite, bufferSize, 
                                     replication, blockSize, progress);
  int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize);
  this.sums = fs.getRawFileSystem().create(fs.getChecksumFile(file), true, 
                                           sumBufferSize, replication,
                                           blockSize);
  sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
  sums.writeInt(bytesPerSum);
}
项目:hardfs    文件:TestShuffleHandler.java   
public static void createIndexFile(File indexFile, Configuration conf)
    throws IOException {
  if (indexFile.exists()) {
    System.out.println("Deleting existing file");
    indexFile.delete();
  }
  indexFile.createNewFile();
  FSDataOutputStream output = FileSystem.getLocal(conf).getRaw().append(
      new Path(indexFile.getAbsolutePath()));
  Checksum crc = new PureJavaCrc32();
  crc.reset();
  CheckedOutputStream chk = new CheckedOutputStream(output, crc);
  String msg = "Writing new index file. This file will be used only " +
      "for the testing.";
  chk.write(Arrays.copyOf(msg.getBytes(),
      MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH));
  output.writeLong(chk.getChecksum().getValue());
  output.close();
}
项目:hardfs    文件:FSEditLogOp.java   
/**
 * Construct the reader
 * @param in The stream to read from.
 * @param logVersion The version of the data coming from the stream.
 */
@SuppressWarnings("deprecation")
public Reader(DataInputStream in, StreamLimiter limiter,
    int logVersion) {
  this.logVersion = logVersion;
  if (LayoutVersion.supports(Feature.EDITS_CHESKUM, logVersion)) {
    this.checksum = new PureJavaCrc32();
  } else {
    this.checksum = null;
  }

  if (this.checksum != null) {
    this.in = new DataInputStream(
        new CheckedInputStream(in, this.checksum));
  } else {
    this.in = in;
  }
  this.limiter = limiter;
  this.cache = new OpInstanceCache();
  this.maxOpSize = DFSConfigKeys.DFS_NAMENODE_MAX_OP_SIZE_DEFAULT;
}
项目:hardfs    文件:ChecksumFileSystem.java   
public ChecksumFSOutputSummer(ChecksumFileSystem fs, 
                      Path file, 
                      boolean overwrite,
                      int bufferSize,
                      short replication,
                      long blockSize,
                      Progressable progress)
  throws IOException {
  super(new PureJavaCrc32(), fs.getBytesPerSum(), 4);
  int bytesPerSum = fs.getBytesPerSum();
  this.datas = fs.getRawFileSystem().create(file, overwrite, bufferSize, 
                                     replication, blockSize, progress);
  int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize);
  this.sums = fs.getRawFileSystem().create(fs.getChecksumFile(file), true, 
                                           sumBufferSize, replication,
                                           blockSize);
  sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
  sums.writeInt(bytesPerSum);
}
项目:hardfs    文件:ChecksumFs.java   
public ChecksumFSOutputSummer(final ChecksumFs fs, final Path file, 
  final EnumSet<CreateFlag> createFlag,
  final FsPermission absolutePermission, final int bufferSize,
  final short replication, final long blockSize, 
  final Progressable progress, final ChecksumOpt checksumOpt,
  final boolean createParent) throws IOException {
  super(new PureJavaCrc32(), fs.getBytesPerSum(), 4);

  // checksumOpt is passed down to the raw fs. Unless it implements
  // checksum impelemts internally, checksumOpt will be ignored.
  // If the raw fs does checksum internally, we will end up with
  // two layers of checksumming. i.e. checksumming checksum file.
  this.datas = fs.getRawFs().createInternal(file, createFlag,
      absolutePermission, bufferSize, replication, blockSize, progress,
       checksumOpt,  createParent);

  // Now create the chekcsumfile; adjust the buffsize
  int bytesPerSum = fs.getBytesPerSum();
  int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize);
  this.sums = fs.getRawFs().createInternal(fs.getChecksumFile(file),
      EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
      absolutePermission, sumBufferSize, replication, blockSize, progress,
      checksumOpt, createParent);
  sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
  sums.writeInt(bytesPerSum);
}
项目:hadoop-on-lustre2    文件:TestShuffleHandler.java   
private static void createIndexFile(File indexFile, Configuration conf)
    throws IOException {
  if (indexFile.exists()) {
    System.out.println("Deleting existing file");
    indexFile.delete();
  }
  indexFile.createNewFile();
  FSDataOutputStream output = FileSystem.getLocal(conf).getRaw().append(
      new Path(indexFile.getAbsolutePath()));
  Checksum crc = new PureJavaCrc32();
  crc.reset();
  CheckedOutputStream chk = new CheckedOutputStream(output, crc);
  String msg = "Writing new index file. This file will be used only " +
      "for the testing.";
  chk.write(Arrays.copyOf(msg.getBytes(),
      MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH));
  output.writeLong(chk.getChecksum().getValue());
  output.close();
}
项目:hadoop-on-lustre2    文件:FSEditLogOp.java   
/**
 * Construct the reader
 * @param in The stream to read from.
 * @param logVersion The version of the data coming from the stream.
 */
public Reader(DataInputStream in, StreamLimiter limiter, int logVersion) {
  this.logVersion = logVersion;
  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITS_CHESKUM, logVersion)) {
    this.checksum = new PureJavaCrc32();
  } else {
    this.checksum = null;
  }
  // It is possible that the logVersion is actually a future layoutversion
  // during the rolling upgrade (e.g., the NN gets upgraded first). We
  // assume future layout will also support length of editlog op.
  this.supportEditLogLength = NameNodeLayoutVersion.supports(
      NameNodeLayoutVersion.Feature.EDITLOG_LENGTH, logVersion)
      || logVersion < NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION;

  if (this.checksum != null) {
    this.in = new DataInputStream(
        new CheckedInputStream(in, this.checksum));
  } else {
    this.in = in;
  }
  this.limiter = limiter;
  this.cache = new OpInstanceCache();
  this.maxOpSize = DFSConfigKeys.DFS_NAMENODE_MAX_OP_SIZE_DEFAULT;
}
项目:hadoop-on-lustre2    文件:ChecksumFileSystem.java   
public ChecksumFSOutputSummer(ChecksumFileSystem fs, 
                      Path file, 
                      boolean overwrite,
                      int bufferSize,
                      short replication,
                      long blockSize,
                      Progressable progress)
  throws IOException {
  super(new PureJavaCrc32(), fs.getBytesPerSum(), 4);
  int bytesPerSum = fs.getBytesPerSum();
  this.datas = fs.getRawFileSystem().create(file, overwrite, bufferSize, 
                                     replication, blockSize, progress);
  int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize);
  this.sums = fs.getRawFileSystem().create(fs.getChecksumFile(file), true, 
                                           sumBufferSize, replication,
                                           blockSize);
  sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
  sums.writeInt(bytesPerSum);
}
项目:hadoop-on-lustre2    文件:ChecksumFs.java   
public ChecksumFSOutputSummer(final ChecksumFs fs, final Path file, 
  final EnumSet<CreateFlag> createFlag,
  final FsPermission absolutePermission, final int bufferSize,
  final short replication, final long blockSize, 
  final Progressable progress, final ChecksumOpt checksumOpt,
  final boolean createParent) throws IOException {
  super(new PureJavaCrc32(), fs.getBytesPerSum(), 4);

  // checksumOpt is passed down to the raw fs. Unless it implements
  // checksum impelemts internally, checksumOpt will be ignored.
  // If the raw fs does checksum internally, we will end up with
  // two layers of checksumming. i.e. checksumming checksum file.
  this.datas = fs.getRawFs().createInternal(file, createFlag,
      absolutePermission, bufferSize, replication, blockSize, progress,
       checksumOpt,  createParent);

  // Now create the chekcsumfile; adjust the buffsize
  int bytesPerSum = fs.getBytesPerSum();
  int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize);
  this.sums = fs.getRawFs().createInternal(fs.getChecksumFile(file),
      EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
      absolutePermission, sumBufferSize, replication, blockSize, progress,
      checksumOpt, createParent);
  sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
  sums.writeInt(bytesPerSum);
}
项目:cumulus    文件:DFSOutputStream.java   
private DFSOutputStream(DFSClient dfsClient, String src, long blockSize,
    Progressable progress, int bytesPerChecksum, short replication)
    throws IOException {
  super(new PureJavaCrc32(), bytesPerChecksum, 4);
  this.dfsClient = dfsClient;
  this.conf = dfsClient.conf;
  this.src = src;
  // this.blockSize = blockSize;
  this.blockReplication = replication;
  this.progress = progress;
  if ((progress != null) && DFSClient.LOG.isDebugEnabled()) {
    DFSClient.LOG.debug("Set non-null progress callback on DFSOutputStream "
        + src);
  }

  if (bytesPerChecksum < 1 || blockSize % bytesPerChecksum != 0) {
    throw new IOException("io.bytes.per.checksum(" + bytesPerChecksum
        + ") and blockSize(" + blockSize + ") do not match. "
        + "blockSize should be a " + "multiple of io.bytes.per.checksum");

  }
  checksum = DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32,
      bytesPerChecksum);
}
项目:cumulus    文件:DFSOutputStream.java   
private DFSOutputStream(DFSClient dfsClient, String src, long blockSize,
    Progressable progress, int bytesPerChecksum) throws IOException {
  super(new PureJavaCrc32(), bytesPerChecksum, 4);
  this.dfsClient = dfsClient;
  this.conf = dfsClient.conf;
  this.src = src;
  // this.blockSize = blockSize;
  this.progress = progress;
  if ((progress != null) && DFSClient.LOG.isDebugEnabled()) {
    DFSClient.LOG.debug("Set non-null progress callback on DFSOutputStream "
        + src);
  }

  if (bytesPerChecksum < 1 || blockSize % bytesPerChecksum != 0) {
    throw new IOException("io.bytes.per.checksum(" + bytesPerChecksum
        + ") and blockSize(" + blockSize + ") do not match. "
        + "blockSize should be a " + "multiple of io.bytes.per.checksum");

  }
  checksum = DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32,
      bytesPerChecksum);
}
项目:cumulus    文件:TestParallelImageWrite.java   
private void checkImages(FSNamesystem fsn) throws Exception {
  Iterator<StorageDirectory> iter = fsn.
          getFSImage().dirIterator(FSImage.NameNodeDirType.IMAGE);
  List<Long> checksums = new ArrayList<Long>();
  while (iter.hasNext()) {
    StorageDirectory sd = iter.next();
    File fsImage = FSImage.getImageFile(sd, FSImage.NameNodeFile.IMAGE);
    PureJavaCrc32 crc = new PureJavaCrc32();
    FileInputStream in = new FileInputStream(fsImage);
    byte[] buff = new byte[4096];
    int read = 0;
    while ((read = in.read(buff)) != -1) {
     crc.update(buff, 0, read);
    }
    long val = crc.getValue();
    checksums.add(val);
  }
  assertTrue("Not enough fsimage copies in MiniDFSCluster " + 
             "to test parallel write", checksums.size() > 1);
  for (int i = 1; i < checksums.size(); i++) {
    assertEquals(checksums.get(i - 1), checksums.get(i));
  }
}
项目:RDFS    文件:BlockXCodingMerger.java   
public BlockXCodingMerger(Block block, int namespaceId,
        DataInputStream[] childInputStreams, long offsetInBlock,
        long length, String[] childAddrs, String myAddr,
        DataTransferThrottler throttler,
        int mergerLevel) throws IOException{
    super();
    this.block = block;
    this.namespaceId = namespaceId;
    this.childInputStreams = childInputStreams;
    this.offsetInBlock = offsetInBlock;
    this.length = length;
    this.childAddrs = childAddrs;
    this.myAddr = myAddr;
    this.throttler = throttler;
    this.mergerLevel = mergerLevel;
    Configuration conf = new Configuration();
    this.packetSize = conf.getInt("raid.blockreconstruct.packetsize", 4096);
    this.bytesPerChecksum = conf.getInt("io.bytes.per.checksum", 512);
    this.checksum = DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32,
            bytesPerChecksum, new PureJavaCrc32());
    this.checksumSize = checksum.getChecksumSize();
}
项目:tez    文件:TestShuffleHandler.java   
private static void createIndexFile(File indexFile, Configuration conf)
    throws IOException {
  if (indexFile.exists()) {
    System.out.println("Deleting existing file");
    indexFile.delete();
  }
  indexFile.createNewFile();
  FSDataOutputStream output = FileSystem.getLocal(conf).getRaw().append(
      new Path(indexFile.getAbsolutePath()));
  Checksum crc = new PureJavaCrc32();
  crc.reset();
  CheckedOutputStream chk = new CheckedOutputStream(output, crc);
  String msg = "Writing new index file. This file will be used only " +
      "for the testing.";
  chk.write(Arrays.copyOf(msg.getBytes(),
      MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH));
  output.writeLong(chk.getChecksum().getValue());
  output.close();
}
项目:hortonworks-extension    文件:ChecksumFileSystem.java   
public ChecksumFSOutputSummer(ChecksumFileSystem fs, 
                      Path file, 
                      boolean overwrite,
                      int bufferSize,
                      short replication,
                      long blockSize,
                      Progressable progress)
  throws IOException {
  super(new PureJavaCrc32(), fs.getBytesPerSum(), 4);
  int bytesPerSum = fs.getBytesPerSum();
  this.datas = fs.getRawFileSystem().create(file, overwrite, bufferSize, 
                                     replication, blockSize, progress);
  int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize);
  this.sums = fs.getRawFileSystem().create(fs.getChecksumFile(file), true, 
                                           sumBufferSize, replication,
                                           blockSize);
  sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
  sums.writeInt(bytesPerSum);
}
项目:hortonworks-extension    文件:ChecksumFileSystem.java   
public ChecksumFSOutputSummer(ChecksumFileSystem fs, 
                      Path file, 
                      boolean overwrite,
                      int bufferSize,
                      short replication,
                      long blockSize,
                      Progressable progress)
  throws IOException {
  super(new PureJavaCrc32(), fs.getBytesPerSum(), 4);
  int bytesPerSum = fs.getBytesPerSum();
  this.datas = fs.getRawFileSystem().create(file, overwrite, bufferSize, 
                                     replication, blockSize, progress);
  int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize);
  this.sums = fs.getRawFileSystem().create(fs.getChecksumFile(file), true, 
                                           sumBufferSize, replication,
                                           blockSize);
  sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
  sums.writeInt(bytesPerSum);
}
项目:hadoop    文件:GenSort.java   
public static void outputRecords(OutputStream out,
                                 boolean useAscii,
                                 Unsigned16 firstRecordNumber,
                                 Unsigned16 recordsToGenerate,
                                 Unsigned16 checksum
                                 ) throws IOException {
  byte[] row = new byte[100];
  Unsigned16 recordNumber = new Unsigned16(firstRecordNumber);
  Unsigned16 lastRecordNumber = new Unsigned16(firstRecordNumber);
  Checksum crc = new PureJavaCrc32();
  Unsigned16 tmp = new Unsigned16();
  lastRecordNumber.add(recordsToGenerate);
  Unsigned16 ONE = new Unsigned16(1);
  Unsigned16 rand = Random16.skipAhead(firstRecordNumber);
  while (!recordNumber.equals(lastRecordNumber)) {
    Random16.nextRand(rand);
    if (useAscii) {
      generateAsciiRecord(row, rand, recordNumber);
    } else {
      generateRecord(row, rand, recordNumber);
    }
    if (checksum != null) {
      crc.reset();
      crc.update(row, 0, row.length);
      tmp.set(crc.getValue());
      checksum.add(tmp);
    }
    recordNumber.add(ONE);
    out.write(row);
  }
}
项目:aliyun-oss-hadoop-fs    文件:GenSort.java   
public static void outputRecords(OutputStream out,
                                 boolean useAscii,
                                 Unsigned16 firstRecordNumber,
                                 Unsigned16 recordsToGenerate,
                                 Unsigned16 checksum
                                 ) throws IOException {
  byte[] row = new byte[100];
  Unsigned16 recordNumber = new Unsigned16(firstRecordNumber);
  Unsigned16 lastRecordNumber = new Unsigned16(firstRecordNumber);
  Checksum crc = new PureJavaCrc32();
  Unsigned16 tmp = new Unsigned16();
  lastRecordNumber.add(recordsToGenerate);
  Unsigned16 ONE = new Unsigned16(1);
  Unsigned16 rand = Random16.skipAhead(firstRecordNumber);
  while (!recordNumber.equals(lastRecordNumber)) {
    Random16.nextRand(rand);
    if (useAscii) {
      generateAsciiRecord(row, rand, recordNumber);
    } else {
      generateRecord(row, rand, recordNumber);
    }
    if (checksum != null) {
      crc.reset();
      crc.update(row, 0, row.length);
      tmp.set(crc.getValue());
      checksum.add(tmp);
    }
    recordNumber.add(ONE);
    out.write(row);
  }
}
项目:big-c    文件:GenSort.java   
public static void outputRecords(OutputStream out,
                                 boolean useAscii,
                                 Unsigned16 firstRecordNumber,
                                 Unsigned16 recordsToGenerate,
                                 Unsigned16 checksum
                                 ) throws IOException {
  byte[] row = new byte[100];
  Unsigned16 recordNumber = new Unsigned16(firstRecordNumber);
  Unsigned16 lastRecordNumber = new Unsigned16(firstRecordNumber);
  Checksum crc = new PureJavaCrc32();
  Unsigned16 tmp = new Unsigned16();
  lastRecordNumber.add(recordsToGenerate);
  Unsigned16 ONE = new Unsigned16(1);
  Unsigned16 rand = Random16.skipAhead(firstRecordNumber);
  while (!recordNumber.equals(lastRecordNumber)) {
    Random16.nextRand(rand);
    if (useAscii) {
      generateAsciiRecord(row, rand, recordNumber);
    } else {
      generateRecord(row, rand, recordNumber);
    }
    if (checksum != null) {
      crc.reset();
      crc.update(row, 0, row.length);
      tmp.set(crc.getValue());
      checksum.add(tmp);
    }
    recordNumber.add(ONE);
    out.write(row);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:GenSort.java   
public static void outputRecords(OutputStream out,
                                 boolean useAscii,
                                 Unsigned16 firstRecordNumber,
                                 Unsigned16 recordsToGenerate,
                                 Unsigned16 checksum
                                 ) throws IOException {
  byte[] row = new byte[100];
  Unsigned16 recordNumber = new Unsigned16(firstRecordNumber);
  Unsigned16 lastRecordNumber = new Unsigned16(firstRecordNumber);
  Checksum crc = new PureJavaCrc32();
  Unsigned16 tmp = new Unsigned16();
  lastRecordNumber.add(recordsToGenerate);
  Unsigned16 ONE = new Unsigned16(1);
  Unsigned16 rand = Random16.skipAhead(firstRecordNumber);
  while (!recordNumber.equals(lastRecordNumber)) {
    Random16.nextRand(rand);
    if (useAscii) {
      generateAsciiRecord(row, rand, recordNumber);
    } else {
      generateRecord(row, rand, recordNumber);
    }
    if (checksum != null) {
      crc.reset();
      crc.update(row, 0, row.length);
      tmp.set(crc.getValue());
      checksum.add(tmp);
    }
    recordNumber.add(ONE);
    out.write(row);
  }
}
项目:hadoop-EAR    文件:IFileOutputStream.java   
/**
 * Create a checksum output stream that writes
 * the bytes to the given stream.
 * @param out
 */
public IFileOutputStream(OutputStream out) {
  super(out);
  sum = DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32,
      Integer.MAX_VALUE, new PureJavaCrc32());
  barray = new byte[sum.getChecksumSize()];
}
项目:hadoop-EAR    文件:IFileInputStream.java   
/**
 * Create a checksum input stream that reads
 * @param in The input stream to be verified for checksum.
 * @param len The length of the input stream including checksum bytes.
 */
public IFileInputStream(InputStream in, long len) {
  this.in = in;
  sum = DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, 
      Integer.MAX_VALUE, new PureJavaCrc32());
  checksumSize = sum.getChecksumSize();
  length = len;
  dataLength = length - checksumSize;
}
项目:hadoop-EAR    文件:BlockReaderAccelerator.java   
/**
 * object constructor
 */
public BlockReaderAccelerator(
  Configuration conf,
  InetSocketAddress targetAddress,
  DatanodeInfo chosenNode, 
  int dataTransferVersion,
  int namespaceId,
  String clientName,
  Socket sock,
  String hdfsfile, 
  LocatedBlock blk,
  long startOffset, 
  long length,
  boolean verifyChecksum,
  DFSClientMetrics metrics) throws IOException {

  this.conf = conf;
  this.targetAddress = targetAddress;
  this.datanodeInfo = chosenNode;
  this.dataTransferVersion = dataTransferVersion;
  this.namespaceId = namespaceId;
  this.clientName = clientName;
  this.sock = sock;
  this.hdfsfile = hdfsfile;
  this.blk = blk;
  this.startOffset = startOffset;
  this.length = length;
  this.verifyChecksum = verifyChecksum;
  this.metrics = metrics;

   // create a checksum checker
   if (this.verifyChecksum) {
     this.checker = new PureJavaCrc32();
   }
}