Java 类org.apache.hadoop.fs.ChecksumFileSystem 实例源码

项目:hadoop-EAR    文件:TestFSInputChecker.java   
/**
 * Tests read/seek/getPos/skipped opeation for input stream.
 */
private void testChecker(ChecksumFileSystem fileSys, boolean readCS)
throws Exception {
  Path file = new Path("try.dat");
  if( readCS ) {
    writeFile(fileSys, file);
  } else {
    writeFile(fileSys.getRawFileSystem(), file);
  }
  stm = fileSys.open(file);
  checkReadAndGetPos();
  checkSeek();
  checkSkip();
  //checkMark
  assertFalse(stm.markSupported());
  stm.close();
  cleanupFile(fileSys, file);
}
项目:hadoop-on-lustre    文件:TestFSInputChecker.java   
/**
 * Tests read/seek/getPos/skipped opeation for input stream.
 */
private void testChecker(ChecksumFileSystem fileSys, boolean readCS)
throws Exception {
  Path file = new Path("try.dat");
  if( readCS ) {
    writeFile(fileSys, file);
  } else {
    writeFile(fileSys.getRawFileSystem(), file);
  }
  stm = fileSys.open(file);
  checkReadAndGetPos();
  checkSeek();
  checkSkip();
  //checkMark
  assertFalse(stm.markSupported());
  stm.close();
  cleanupFile(fileSys, file);
}
项目:RDFS    文件:TestFSInputChecker.java   
/**
 * Tests read/seek/getPos/skipped opeation for input stream.
 */
private void testChecker(ChecksumFileSystem fileSys, boolean readCS)
throws Exception {
  Path file = new Path("try.dat");
  if( readCS ) {
    writeFile(fileSys, file);
  } else {
    writeFile(fileSys.getRawFileSystem(), file);
  }
  stm = fileSys.open(file);
  checkReadAndGetPos();
  checkSeek();
  checkSkip();
  //checkMark
  assertFalse(stm.markSupported());
  stm.close();
  cleanupFile(fileSys, file);
}
项目:hadoop-0.20    文件:TestFSInputChecker.java   
/**
 * Tests read/seek/getPos/skipped opeation for input stream.
 */
private void testChecker(ChecksumFileSystem fileSys, boolean readCS)
throws Exception {
  Path file = new Path("try.dat");
  if( readCS ) {
    writeFile(fileSys, file);
  } else {
    writeFile(fileSys.getRawFileSystem(), file);
  }
  stm = fileSys.open(file);
  checkReadAndGetPos();
  checkSeek();
  checkSkip();
  //checkMark
  assertFalse(stm.markSupported());
  stm.close();
  cleanupFile(fileSys, file);
}
项目:hortonworks-extension    文件:TestFSInputChecker.java   
/**
 * Tests read/seek/getPos/skipped opeation for input stream.
 */
private void testChecker(ChecksumFileSystem fileSys, boolean readCS)
throws Exception {
  Path file = new Path("try.dat");
  if( readCS ) {
    writeFile(fileSys, file);
  } else {
    writeFile(fileSys.getRawFileSystem(), file);
  }
  stm = fileSys.open(file);
  checkReadAndGetPos();
  checkSeek();
  checkSkip();
  //checkMark
  assertFalse(stm.markSupported());
  stm.close();
  cleanupFile(fileSys, file);
}
项目:hortonworks-extension    文件:TestFSInputChecker.java   
/**
 * Tests read/seek/getPos/skipped opeation for input stream.
 */
private void testChecker(ChecksumFileSystem fileSys, boolean readCS)
throws Exception {
  Path file = new Path("try.dat");
  if( readCS ) {
    writeFile(fileSys, file);
  } else {
    writeFile(fileSys.getRawFileSystem(), file);
  }
  stm = fileSys.open(file);
  checkReadAndGetPos();
  checkSeek();
  checkSkip();
  //checkMark
  assertFalse(stm.markSupported());
  stm.close();
  cleanupFile(fileSys, file);
}
项目:hadoop-gpu    文件:TestFSInputChecker.java   
/**
 * Tests read/seek/getPos/skipped opeation for input stream.
 */
private void testChecker(ChecksumFileSystem fileSys, boolean readCS)
throws Exception {
  Path file = new Path("try.dat");
  if( readCS ) {
    writeFile(fileSys, file);
  } else {
    writeFile(fileSys.getRawFileSystem(), file);
  }
  stm = fileSys.open(file);
  checkReadAndGetPos();
  checkSeek();
  checkSkip();
  //checkMark
  assertFalse(stm.markSupported());
  stm.close();
  cleanupFile(fileSys, file);
}
项目:hadoop    文件:TestSeekBug.java   
private void smallReadSeek(FileSystem fileSys, Path name) throws IOException {
  if (fileSys instanceof ChecksumFileSystem) {
    fileSys = ((ChecksumFileSystem)fileSys).getRawFileSystem();
  }
  // Make the buffer size small to trigger code for HADOOP-922
  FSDataInputStream stmRaw = fileSys.open(name, 1);
  byte[] expected = new byte[ONEMB];
  Random rand = new Random(seed);
  rand.nextBytes(expected);

  // Issue a simple read first.
  byte[] actual = new byte[128];
  stmRaw.seek(100000);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, 100000, expected, "First Small Read Test");

  // now do a small seek of 4 bytes, within the same block.
  int newpos1 = 100000 + 128 + 4;
  stmRaw.seek(newpos1);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, newpos1, expected, "Small Seek Bug 1");

  // seek another 256 bytes this time
  int newpos2 = newpos1 + 256;
  stmRaw.seek(newpos2);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, newpos2, expected, "Small Seek Bug 2");

  // all done
  stmRaw.close();
}
项目:aliyun-oss-hadoop-fs    文件:TestSeekBug.java   
private void smallReadSeek(FileSystem fileSys, Path name) throws IOException {
  if (fileSys instanceof ChecksumFileSystem) {
    fileSys = ((ChecksumFileSystem)fileSys).getRawFileSystem();
  }
  // Make the buffer size small to trigger code for HADOOP-922
  FSDataInputStream stmRaw = fileSys.open(name, 1);
  byte[] expected = new byte[ONEMB];
  Random rand = new Random(seed);
  rand.nextBytes(expected);

  // Issue a simple read first.
  byte[] actual = new byte[128];
  stmRaw.seek(100000);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, 100000, expected, "First Small Read Test");

  // now do a small seek of 4 bytes, within the same block.
  int newpos1 = 100000 + 128 + 4;
  stmRaw.seek(newpos1);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, newpos1, expected, "Small Seek Bug 1");

  // seek another 256 bytes this time
  int newpos2 = newpos1 + 256;
  stmRaw.seek(newpos2);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, newpos2, expected, "Small Seek Bug 2");

  // all done
  stmRaw.close();
}
项目:big-c    文件:TestSeekBug.java   
private void smallReadSeek(FileSystem fileSys, Path name) throws IOException {
  if (fileSys instanceof ChecksumFileSystem) {
    fileSys = ((ChecksumFileSystem)fileSys).getRawFileSystem();
  }
  // Make the buffer size small to trigger code for HADOOP-922
  FSDataInputStream stmRaw = fileSys.open(name, 1);
  byte[] expected = new byte[ONEMB];
  Random rand = new Random(seed);
  rand.nextBytes(expected);

  // Issue a simple read first.
  byte[] actual = new byte[128];
  stmRaw.seek(100000);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, 100000, expected, "First Small Read Test");

  // now do a small seek of 4 bytes, within the same block.
  int newpos1 = 100000 + 128 + 4;
  stmRaw.seek(newpos1);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, newpos1, expected, "Small Seek Bug 1");

  // seek another 256 bytes this time
  int newpos2 = newpos1 + 256;
  stmRaw.seek(newpos2);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, newpos2, expected, "Small Seek Bug 2");

  // all done
  stmRaw.close();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestSeekBug.java   
private void smallReadSeek(FileSystem fileSys, Path name) throws IOException {
  if (fileSys instanceof ChecksumFileSystem) {
    fileSys = ((ChecksumFileSystem)fileSys).getRawFileSystem();
  }
  // Make the buffer size small to trigger code for HADOOP-922
  FSDataInputStream stmRaw = fileSys.open(name, 1);
  byte[] expected = new byte[ONEMB];
  Random rand = new Random(seed);
  rand.nextBytes(expected);

  // Issue a simple read first.
  byte[] actual = new byte[128];
  stmRaw.seek(100000);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, 100000, expected, "First Small Read Test");

  // now do a small seek of 4 bytes, within the same block.
  int newpos1 = 100000 + 128 + 4;
  stmRaw.seek(newpos1);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, newpos1, expected, "Small Seek Bug 1");

  // seek another 256 bytes this time
  int newpos2 = newpos1 + 256;
  stmRaw.seek(newpos2);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, newpos2, expected, "Small Seek Bug 2");

  // all done
  stmRaw.close();
}
项目:hadoop-EAR    文件:TestSeekBug.java   
private void smallReadSeek(FileSystem fileSys, Path name) throws IOException {
  if (fileSys instanceof ChecksumFileSystem) {
    fileSys = ((ChecksumFileSystem)fileSys).getRawFileSystem();
  }
  // Make the buffer size small to trigger code for HADOOP-922
  FSDataInputStream stmRaw = fileSys.open(name, 1);
  byte[] expected = new byte[ONEMB];
  Random rand = new Random(seed);
  rand.nextBytes(expected);

  // Issue a simple read first.
  byte[] actual = new byte[128];
  stmRaw.seek(100000);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, 100000, expected, "First Small Read Test");

  // now do a small seek of 4 bytes, within the same block.
  int newpos1 = 100000 + 128 + 4;
  stmRaw.seek(newpos1);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, newpos1, expected, "Small Seek Bug 1");

  // seek another 256 bytes this time
  int newpos2 = newpos1 + 256;
  stmRaw.seek(newpos2);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, newpos2, expected, "Small Seek Bug 2");

  // all done
  stmRaw.close();
}
项目:hadoop-EAR    文件:TestFSInputChecker.java   
public void testFSInputChecker() throws Exception {
  Configuration conf = new Configuration();
  conf.setLong("dfs.block.size", BLOCK_SIZE);
  conf.setInt("io.bytes.per.checksum", BYTES_PER_SUM);
  conf.set("fs.hdfs.impl",
           "org.apache.hadoop.hdfs.ChecksumDistributedFileSystem");
  rand.nextBytes(expected);

  // test DFS
  MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
  ChecksumFileSystem fileSys = (ChecksumFileSystem)cluster.getFileSystem();
  try {
    testChecker(fileSys, true);
    testChecker(fileSys, false);
    testSeekAndRead(fileSys);
  } finally {
    fileSys.close();
    cluster.shutdown();
  }


  // test Local FS
  fileSys = FileSystem.getLocal(conf);
  try {
    testChecker(fileSys, true);
    testChecker(fileSys, false);
    testFileCorruption((LocalFileSystem)fileSys);
    testSeekAndRead(fileSys);
  }finally {
    fileSys.close();
  }
}
项目:hadoop-EAR    文件:TestFSInputChecker.java   
private void testSeekAndRead(ChecksumFileSystem fileSys)
throws IOException {
  Path file = new Path("try.dat");
  writeFile(fileSys, file);
  stm = fileSys.open(file,
      fileSys.getConf().getInt("io.file.buffer.size", 4096));
  checkSeekAndRead();
  stm.close();
  cleanupFile(fileSys, file);
}
项目:hadoop-plus    文件:TestSeekBug.java   
private void smallReadSeek(FileSystem fileSys, Path name) throws IOException {
  if (fileSys instanceof ChecksumFileSystem) {
    fileSys = ((ChecksumFileSystem)fileSys).getRawFileSystem();
  }
  // Make the buffer size small to trigger code for HADOOP-922
  FSDataInputStream stmRaw = fileSys.open(name, 1);
  byte[] expected = new byte[ONEMB];
  Random rand = new Random(seed);
  rand.nextBytes(expected);

  // Issue a simple read first.
  byte[] actual = new byte[128];
  stmRaw.seek(100000);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, 100000, expected, "First Small Read Test");

  // now do a small seek of 4 bytes, within the same block.
  int newpos1 = 100000 + 128 + 4;
  stmRaw.seek(newpos1);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, newpos1, expected, "Small Seek Bug 1");

  // seek another 256 bytes this time
  int newpos2 = newpos1 + 256;
  stmRaw.seek(newpos2);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, newpos2, expected, "Small Seek Bug 2");

  // all done
  stmRaw.close();
}
项目:FlexMap    文件:TestSeekBug.java   
private void smallReadSeek(FileSystem fileSys, Path name) throws IOException {
  if (fileSys instanceof ChecksumFileSystem) {
    fileSys = ((ChecksumFileSystem)fileSys).getRawFileSystem();
  }
  // Make the buffer size small to trigger code for HADOOP-922
  FSDataInputStream stmRaw = fileSys.open(name, 1);
  byte[] expected = new byte[ONEMB];
  Random rand = new Random(seed);
  rand.nextBytes(expected);

  // Issue a simple read first.
  byte[] actual = new byte[128];
  stmRaw.seek(100000);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, 100000, expected, "First Small Read Test");

  // now do a small seek of 4 bytes, within the same block.
  int newpos1 = 100000 + 128 + 4;
  stmRaw.seek(newpos1);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, newpos1, expected, "Small Seek Bug 1");

  // seek another 256 bytes this time
  int newpos2 = newpos1 + 256;
  stmRaw.seek(newpos2);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, newpos2, expected, "Small Seek Bug 2");

  // all done
  stmRaw.close();
}
项目:hops    文件:TestSmallFileSeek.java   
private void smallReadSeek(FileSystem fileSys, Path name) throws IOException {
  if (fileSys instanceof ChecksumFileSystem) {
    fileSys = ((ChecksumFileSystem) fileSys).getRawFileSystem();
  }
  // Make the buffer size small to trigger code for HADOOP-922
  FSDataInputStream stmRaw = fileSys.open(name, 1);
  byte[] expected = new byte[ONEMB];
  Random rand = new Random(seed);
  rand.nextBytes(expected);

  // Issue a simple read first.
  byte[] actual = new byte[128];
  stmRaw.seek(100000);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, 100000, expected, "First Small Read Test");

  // now do a small seek of 4 bytes, within the same block.
  int newpos1 = 100000 + 128 + 4;
  stmRaw.seek(newpos1);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, newpos1, expected, "Small Seek Bug 1");

  // seek another 256 bytes this time
  int newpos2 = newpos1 + 256;
  stmRaw.seek(newpos2);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, newpos2, expected, "Small Seek Bug 2");

  // all done
  stmRaw.close();
}
项目:hops    文件:TestSeekBug.java   
private void smallReadSeek(FileSystem fileSys, Path name) throws IOException {
  if (fileSys instanceof ChecksumFileSystem) {
    fileSys = ((ChecksumFileSystem) fileSys).getRawFileSystem();
  }
  // Make the buffer size small to trigger code for HADOOP-922
  FSDataInputStream stmRaw = fileSys.open(name, 1);
  byte[] expected = new byte[ONEMB];
  Random rand = new Random(seed);
  rand.nextBytes(expected);

  // Issue a simple read first.
  byte[] actual = new byte[128];
  stmRaw.seek(100000);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, 100000, expected, "First Small Read Test");

  // now do a small seek of 4 bytes, within the same block.
  int newpos1 = 100000 + 128 + 4;
  stmRaw.seek(newpos1);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, newpos1, expected, "Small Seek Bug 1");

  // seek another 256 bytes this time
  int newpos2 = newpos1 + 256;
  stmRaw.seek(newpos2);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, newpos2, expected, "Small Seek Bug 2");

  // all done
  stmRaw.close();
}
项目:hadoop-TCP    文件:TestSeekBug.java   
private void smallReadSeek(FileSystem fileSys, Path name) throws IOException {
  if (fileSys instanceof ChecksumFileSystem) {
    fileSys = ((ChecksumFileSystem)fileSys).getRawFileSystem();
  }
  // Make the buffer size small to trigger code for HADOOP-922
  FSDataInputStream stmRaw = fileSys.open(name, 1);
  byte[] expected = new byte[ONEMB];
  Random rand = new Random(seed);
  rand.nextBytes(expected);

  // Issue a simple read first.
  byte[] actual = new byte[128];
  stmRaw.seek(100000);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, 100000, expected, "First Small Read Test");

  // now do a small seek of 4 bytes, within the same block.
  int newpos1 = 100000 + 128 + 4;
  stmRaw.seek(newpos1);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, newpos1, expected, "Small Seek Bug 1");

  // seek another 256 bytes this time
  int newpos2 = newpos1 + 256;
  stmRaw.seek(newpos2);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, newpos2, expected, "Small Seek Bug 2");

  // all done
  stmRaw.close();
}
项目:parquet-mr    文件:BaseCommand.java   
private FSDataOutputStream create(String filename, boolean noChecksum)
    throws IOException {
  Path filePath = qualifiedPath(filename);
  // even though it was qualified using the default FS, it may not be in it
  FileSystem fs = filePath.getFileSystem(getConf());
  if (noChecksum && fs instanceof ChecksumFileSystem) {
    fs = ((ChecksumFileSystem) fs).getRawFileSystem();
  }
  return fs.create(filePath, true /* overwrite */);
}
项目:hadoop-on-lustre    文件:TestSeekBug.java   
private void smallReadSeek(FileSystem fileSys, Path name) throws IOException {
  if (fileSys instanceof ChecksumFileSystem) {
    fileSys = ((ChecksumFileSystem)fileSys).getRawFileSystem();
  }
  // Make the buffer size small to trigger code for HADOOP-922
  FSDataInputStream stmRaw = fileSys.open(name, 1);
  byte[] expected = new byte[ONEMB];
  Random rand = new Random(seed);
  rand.nextBytes(expected);

  // Issue a simple read first.
  byte[] actual = new byte[128];
  stmRaw.seek(100000);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, 100000, expected, "First Small Read Test");

  // now do a small seek of 4 bytes, within the same block.
  int newpos1 = 100000 + 128 + 4;
  stmRaw.seek(newpos1);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, newpos1, expected, "Small Seek Bug 1");

  // seek another 256 bytes this time
  int newpos2 = newpos1 + 256;
  stmRaw.seek(newpos2);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, newpos2, expected, "Small Seek Bug 2");

  // all done
  stmRaw.close();
}
项目:hadoop-on-lustre    文件:TestFSInputChecker.java   
public void testFSInputChecker() throws Exception {
  Configuration conf = new Configuration();
  conf.setLong("dfs.block.size", BLOCK_SIZE);
  conf.setInt("io.bytes.per.checksum", BYTES_PER_SUM);
  conf.set("fs.hdfs.impl",
           "org.apache.hadoop.hdfs.ChecksumDistributedFileSystem");
  rand.nextBytes(expected);

  // test DFS
  MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
  ChecksumFileSystem fileSys = (ChecksumFileSystem)cluster.getFileSystem();
  try {
    testChecker(fileSys, true);
    testChecker(fileSys, false);
    testSeekAndRead(fileSys);
  } finally {
    fileSys.close();
    cluster.shutdown();
  }


  // test Local FS
  fileSys = FileSystem.getLocal(conf);
  try {
    testChecker(fileSys, true);
    testChecker(fileSys, false);
    testFileCorruption((LocalFileSystem)fileSys);
    testSeekAndRead(fileSys);
  }finally {
    fileSys.close();
  }
}
项目:hadoop-on-lustre    文件:TestFSInputChecker.java   
private void testSeekAndRead(ChecksumFileSystem fileSys)
throws IOException {
  Path file = new Path("try.dat");
  writeFile(fileSys, file);
  stm = fileSys.open(file,
      fileSys.getConf().getInt("io.file.buffer.size", 4096));
  checkSeekAndRead();
  stm.close();
  cleanupFile(fileSys, file);
}
项目:hardfs    文件:TestSeekBug.java   
private void smallReadSeek(FileSystem fileSys, Path name) throws IOException {
  if (fileSys instanceof ChecksumFileSystem) {
    fileSys = ((ChecksumFileSystem)fileSys).getRawFileSystem();
  }
  // Make the buffer size small to trigger code for HADOOP-922
  FSDataInputStream stmRaw = fileSys.open(name, 1);
  byte[] expected = new byte[ONEMB];
  Random rand = new Random(seed);
  rand.nextBytes(expected);

  // Issue a simple read first.
  byte[] actual = new byte[128];
  stmRaw.seek(100000);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, 100000, expected, "First Small Read Test");

  // now do a small seek of 4 bytes, within the same block.
  int newpos1 = 100000 + 128 + 4;
  stmRaw.seek(newpos1);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, newpos1, expected, "Small Seek Bug 1");

  // seek another 256 bytes this time
  int newpos2 = newpos1 + 256;
  stmRaw.seek(newpos2);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, newpos2, expected, "Small Seek Bug 2");

  // all done
  stmRaw.close();
}
项目:hadoop-on-lustre2    文件:TestSeekBug.java   
private void smallReadSeek(FileSystem fileSys, Path name) throws IOException {
  if (fileSys instanceof ChecksumFileSystem) {
    fileSys = ((ChecksumFileSystem)fileSys).getRawFileSystem();
  }
  // Make the buffer size small to trigger code for HADOOP-922
  FSDataInputStream stmRaw = fileSys.open(name, 1);
  byte[] expected = new byte[ONEMB];
  Random rand = new Random(seed);
  rand.nextBytes(expected);

  // Issue a simple read first.
  byte[] actual = new byte[128];
  stmRaw.seek(100000);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, 100000, expected, "First Small Read Test");

  // now do a small seek of 4 bytes, within the same block.
  int newpos1 = 100000 + 128 + 4;
  stmRaw.seek(newpos1);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, newpos1, expected, "Small Seek Bug 1");

  // seek another 256 bytes this time
  int newpos2 = newpos1 + 256;
  stmRaw.seek(newpos2);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, newpos2, expected, "Small Seek Bug 2");

  // all done
  stmRaw.close();
}
项目:cumulus    文件:TestSeekBug.java   
private void smallReadSeek(FileSystem fileSys, Path name) throws IOException {
  if (fileSys instanceof ChecksumFileSystem) {
    fileSys = ((ChecksumFileSystem)fileSys).getRawFileSystem();
  }
  // Make the buffer size small to trigger code for HADOOP-922
  FSDataInputStream stmRaw = fileSys.open(name, 1);
  byte[] expected = new byte[ONEMB];
  Random rand = new Random(seed);
  rand.nextBytes(expected);

  // Issue a simple read first.
  byte[] actual = new byte[128];
  stmRaw.seek(100000);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, 100000, expected, "First Small Read Test");

  // now do a small seek of 4 bytes, within the same block.
  int newpos1 = 100000 + 128 + 4;
  stmRaw.seek(newpos1);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, newpos1, expected, "Small Seek Bug 1");

  // seek another 256 bytes this time
  int newpos2 = newpos1 + 256;
  stmRaw.seek(newpos2);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, newpos2, expected, "Small Seek Bug 2");

  // all done
  stmRaw.close();
}
项目:RDFS    文件:TestSeekBug.java   
private void smallReadSeek(FileSystem fileSys, Path name) throws IOException {
  if (fileSys instanceof ChecksumFileSystem) {
    fileSys = ((ChecksumFileSystem)fileSys).getRawFileSystem();
  }
  // Make the buffer size small to trigger code for HADOOP-922
  FSDataInputStream stmRaw = fileSys.open(name, 1);
  byte[] expected = new byte[ONEMB];
  Random rand = new Random(seed);
  rand.nextBytes(expected);

  // Issue a simple read first.
  byte[] actual = new byte[128];
  stmRaw.seek(100000);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, 100000, expected, "First Small Read Test");

  // now do a small seek of 4 bytes, within the same block.
  int newpos1 = 100000 + 128 + 4;
  stmRaw.seek(newpos1);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, newpos1, expected, "Small Seek Bug 1");

  // seek another 256 bytes this time
  int newpos2 = newpos1 + 256;
  stmRaw.seek(newpos2);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, newpos2, expected, "Small Seek Bug 2");

  // all done
  stmRaw.close();
}
项目:RDFS    文件:TestFSInputChecker.java   
public void testFSInputChecker() throws Exception {
  Configuration conf = new Configuration();
  conf.setLong("dfs.block.size", BLOCK_SIZE);
  conf.setInt("io.bytes.per.checksum", BYTES_PER_SUM);
  conf.set("fs.hdfs.impl",
           "org.apache.hadoop.hdfs.ChecksumDistributedFileSystem");
  rand.nextBytes(expected);

  // test DFS
  MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
  ChecksumFileSystem fileSys = (ChecksumFileSystem)cluster.getFileSystem();
  try {
    testChecker(fileSys, true);
    testChecker(fileSys, false);
    testSeekAndRead(fileSys);
  } finally {
    fileSys.close();
    cluster.shutdown();
  }


  // test Local FS
  fileSys = FileSystem.getLocal(conf);
  try {
    testChecker(fileSys, true);
    testChecker(fileSys, false);
    testFileCorruption((LocalFileSystem)fileSys);
    testSeekAndRead(fileSys);
  }finally {
    fileSys.close();
  }
}
项目:RDFS    文件:TestFSInputChecker.java   
private void testSeekAndRead(ChecksumFileSystem fileSys)
throws IOException {
  Path file = new Path("try.dat");
  writeFile(fileSys, file);
  stm = fileSys.open(file,
      fileSys.getConf().getInt("io.file.buffer.size", 4096));
  checkSeekAndRead();
  stm.close();
  cleanupFile(fileSys, file);
}
项目:hadoop-0.20    文件:TestSeekBug.java   
private void smallReadSeek(FileSystem fileSys, Path name) throws IOException {
  if (fileSys instanceof ChecksumFileSystem) {
    fileSys = ((ChecksumFileSystem)fileSys).getRawFileSystem();
  }
  // Make the buffer size small to trigger code for HADOOP-922
  FSDataInputStream stmRaw = fileSys.open(name, 1);
  byte[] expected = new byte[ONEMB];
  Random rand = new Random(seed);
  rand.nextBytes(expected);

  // Issue a simple read first.
  byte[] actual = new byte[128];
  stmRaw.seek(100000);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, 100000, expected, "First Small Read Test");

  // now do a small seek of 4 bytes, within the same block.
  int newpos1 = 100000 + 128 + 4;
  stmRaw.seek(newpos1);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, newpos1, expected, "Small Seek Bug 1");

  // seek another 256 bytes this time
  int newpos2 = newpos1 + 256;
  stmRaw.seek(newpos2);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, newpos2, expected, "Small Seek Bug 2");

  // all done
  stmRaw.close();
}
项目:hadoop-0.20    文件:TestFSInputChecker.java   
public void testFSInputChecker() throws Exception {
  Configuration conf = new Configuration();
  conf.setLong("dfs.block.size", BLOCK_SIZE);
  conf.setInt("io.bytes.per.checksum", BYTES_PER_SUM);
  conf.set("fs.hdfs.impl",
           "org.apache.hadoop.hdfs.ChecksumDistributedFileSystem");
  rand.nextBytes(expected);

  // test DFS
  MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
  ChecksumFileSystem fileSys = (ChecksumFileSystem)cluster.getFileSystem();
  try {
    testChecker(fileSys, true);
    testChecker(fileSys, false);
    testSeekAndRead(fileSys);
  } finally {
    fileSys.close();
    cluster.shutdown();
  }


  // test Local FS
  fileSys = FileSystem.getLocal(conf);
  try {
    testChecker(fileSys, true);
    testChecker(fileSys, false);
    testFileCorruption((LocalFileSystem)fileSys);
    testSeekAndRead(fileSys);
  }finally {
    fileSys.close();
  }
}
项目:hadoop-0.20    文件:TestFSInputChecker.java   
private void testSeekAndRead(ChecksumFileSystem fileSys)
throws IOException {
  Path file = new Path("try.dat");
  writeFile(fileSys, file);
  stm = fileSys.open(file,
      fileSys.getConf().getInt("io.file.buffer.size", 4096));
  checkSeekAndRead();
  stm.close();
  cleanupFile(fileSys, file);
}
项目:hortonworks-extension    文件:TestSeekBug.java   
private void smallReadSeek(FileSystem fileSys, Path name) throws IOException {
  if (fileSys instanceof ChecksumFileSystem) {
    fileSys = ((ChecksumFileSystem)fileSys).getRawFileSystem();
  }
  // Make the buffer size small to trigger code for HADOOP-922
  FSDataInputStream stmRaw = fileSys.open(name, 1);
  byte[] expected = new byte[ONEMB];
  Random rand = new Random(seed);
  rand.nextBytes(expected);

  // Issue a simple read first.
  byte[] actual = new byte[128];
  stmRaw.seek(100000);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, 100000, expected, "First Small Read Test");

  // now do a small seek of 4 bytes, within the same block.
  int newpos1 = 100000 + 128 + 4;
  stmRaw.seek(newpos1);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, newpos1, expected, "Small Seek Bug 1");

  // seek another 256 bytes this time
  int newpos2 = newpos1 + 256;
  stmRaw.seek(newpos2);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, newpos2, expected, "Small Seek Bug 2");

  // all done
  stmRaw.close();
}
项目:hortonworks-extension    文件:TestFSInputChecker.java   
public void testFSInputChecker() throws Exception {
  Configuration conf = new Configuration();
  conf.setLong("dfs.block.size", BLOCK_SIZE);
  conf.setInt("io.bytes.per.checksum", BYTES_PER_SUM);
  conf.set("fs.hdfs.impl",
           "org.apache.hadoop.hdfs.ChecksumDistributedFileSystem");
  rand.nextBytes(expected);

  // test DFS
  MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
  ChecksumFileSystem fileSys = (ChecksumFileSystem)cluster.getFileSystem();
  try {
    testChecker(fileSys, true);
    testChecker(fileSys, false);
    testSeekAndRead(fileSys);
  } finally {
    fileSys.close();
    cluster.shutdown();
  }


  // test Local FS
  fileSys = FileSystem.getLocal(conf);
  try {
    testChecker(fileSys, true);
    testChecker(fileSys, false);
    testFileCorruption((LocalFileSystem)fileSys);
    testSeekAndRead(fileSys);
  }finally {
    fileSys.close();
  }
}
项目:hortonworks-extension    文件:TestFSInputChecker.java   
private void testSeekAndRead(ChecksumFileSystem fileSys)
throws IOException {
  Path file = new Path("try.dat");
  writeFile(fileSys, file);
  stm = fileSys.open(file,
      fileSys.getConf().getInt("io.file.buffer.size", 4096));
  checkSeekAndRead();
  stm.close();
  cleanupFile(fileSys, file);
}
项目:hortonworks-extension    文件:TestSeekBug.java   
private void smallReadSeek(FileSystem fileSys, Path name) throws IOException {
  if (fileSys instanceof ChecksumFileSystem) {
    fileSys = ((ChecksumFileSystem)fileSys).getRawFileSystem();
  }
  // Make the buffer size small to trigger code for HADOOP-922
  FSDataInputStream stmRaw = fileSys.open(name, 1);
  byte[] expected = new byte[ONEMB];
  Random rand = new Random(seed);
  rand.nextBytes(expected);

  // Issue a simple read first.
  byte[] actual = new byte[128];
  stmRaw.seek(100000);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, 100000, expected, "First Small Read Test");

  // now do a small seek of 4 bytes, within the same block.
  int newpos1 = 100000 + 128 + 4;
  stmRaw.seek(newpos1);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, newpos1, expected, "Small Seek Bug 1");

  // seek another 256 bytes this time
  int newpos2 = newpos1 + 256;
  stmRaw.seek(newpos2);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, newpos2, expected, "Small Seek Bug 2");

  // all done
  stmRaw.close();
}
项目:hortonworks-extension    文件:TestFSInputChecker.java   
public void testFSInputChecker() throws Exception {
  Configuration conf = new Configuration();
  conf.setLong("dfs.block.size", BLOCK_SIZE);
  conf.setInt("io.bytes.per.checksum", BYTES_PER_SUM);
  conf.set("fs.hdfs.impl",
           "org.apache.hadoop.hdfs.ChecksumDistributedFileSystem");
  rand.nextBytes(expected);

  // test DFS
  MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
  ChecksumFileSystem fileSys = (ChecksumFileSystem)cluster.getFileSystem();
  try {
    testChecker(fileSys, true);
    testChecker(fileSys, false);
    testSeekAndRead(fileSys);
  } finally {
    fileSys.close();
    cluster.shutdown();
  }


  // test Local FS
  fileSys = FileSystem.getLocal(conf);
  try {
    testChecker(fileSys, true);
    testChecker(fileSys, false);
    testFileCorruption((LocalFileSystem)fileSys);
    testSeekAndRead(fileSys);
  }finally {
    fileSys.close();
  }
}
项目:hortonworks-extension    文件:TestFSInputChecker.java   
private void testSeekAndRead(ChecksumFileSystem fileSys)
throws IOException {
  Path file = new Path("try.dat");
  writeFile(fileSys, file);
  stm = fileSys.open(file,
      fileSys.getConf().getInt("io.file.buffer.size", 4096));
  checkSeekAndRead();
  stm.close();
  cleanupFile(fileSys, file);
}
项目:hadoop-gpu    文件:TestSeekBug.java   
private void smallReadSeek(FileSystem fileSys, Path name) throws IOException {
  if (fileSys instanceof ChecksumFileSystem) {
    fileSys = ((ChecksumFileSystem)fileSys).getRawFileSystem();
  }
  // Make the buffer size small to trigger code for HADOOP-922
  FSDataInputStream stmRaw = fileSys.open(name, 1);
  byte[] expected = new byte[ONEMB];
  Random rand = new Random(seed);
  rand.nextBytes(expected);

  // Issue a simple read first.
  byte[] actual = new byte[128];
  stmRaw.seek(100000);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, 100000, expected, "First Small Read Test");

  // now do a small seek of 4 bytes, within the same block.
  int newpos1 = 100000 + 128 + 4;
  stmRaw.seek(newpos1);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, newpos1, expected, "Small Seek Bug 1");

  // seek another 256 bytes this time
  int newpos2 = newpos1 + 256;
  stmRaw.seek(newpos2);
  stmRaw.read(actual, 0, actual.length);
  checkAndEraseData(actual, newpos2, expected, "Small Seek Bug 2");

  // all done
  stmRaw.close();
}
项目:hadoop-gpu    文件:TestFSInputChecker.java   
public void testFSInputChecker() throws Exception {
  Configuration conf = new Configuration();
  conf.setLong("dfs.block.size", BLOCK_SIZE);
  conf.setInt("io.bytes.per.checksum", BYTES_PER_SUM);
  conf.set("fs.hdfs.impl",
           "org.apache.hadoop.hdfs.ChecksumDistributedFileSystem");
  rand.nextBytes(expected);

  // test DFS
  MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
  ChecksumFileSystem fileSys = (ChecksumFileSystem)cluster.getFileSystem();
  try {
    testChecker(fileSys, true);
    testChecker(fileSys, false);
    testSeekAndRead(fileSys);
  } finally {
    fileSys.close();
    cluster.shutdown();
  }


  // test Local FS
  fileSys = FileSystem.getLocal(conf);
  try {
    testChecker(fileSys, true);
    testChecker(fileSys, false);
    testFileCorruption((LocalFileSystem)fileSys);
    testSeekAndRead(fileSys);
  }finally {
    fileSys.close();
  }
}