Java 类org.apache.hadoop.fs.contract.ContractTestUtils 实例源码

项目:cloudup    文件:CloudupTestUtils.java   
public static int createTestFiles(File sourceDir, int size)
    throws IOException{
  File subdir = new File(sourceDir, "subdir");
  int expected = 0;
  mkdirs(subdir);
  File top = new File(sourceDir, "top");
  FileUtils.write(top, "toplevel");
  expected++;
  for (int i = 0; i < size; i++) {
    String text = String.format("file-%02d", i);
    File f = new File(subdir, text);
    FileUtils.write(f, f.toString());
  }
  expected += size;

  // and write the largest file
  File largest = new File(subdir, "largest");
  FileUtils.writeByteArrayToFile(largest,
      ContractTestUtils.dataset(8192, 32, 64));
  expected++;
  return expected;
}
项目:hadoop    文件:TestS3ADeleteManyFiles.java   
@Test
public void testOpenCreate() throws IOException {
  Path dir = new Path("/tests3a");
  ContractTestUtils.createAndVerifyFile(fs, dir, 1024);
  ContractTestUtils.createAndVerifyFile(fs, dir, 5 * 1024 * 1024);
  ContractTestUtils.createAndVerifyFile(fs, dir, 20 * 1024 * 1024);


  /*
  Enable to test the multipart upload
  try {
    ContractTestUtils.createAndVerifyFile(fs, dir,
        (long)6 * 1024 * 1024 * 1024);
  } catch (IOException e) {
    fail(e.getMessage());
  }
  */
}
项目:aliyun-oss-hadoop-fs    文件:TestS3ADeleteManyFiles.java   
@Test
public void testOpenCreate() throws IOException {
  Path dir = new Path("/tests3a");
  ContractTestUtils.createAndVerifyFile(fs, dir, 1024);
  ContractTestUtils.createAndVerifyFile(fs, dir, 5 * 1024 * 1024);
  ContractTestUtils.createAndVerifyFile(fs, dir, 20 * 1024 * 1024);


  /*
  Enable to test the multipart upload
  try {
    ContractTestUtils.createAndVerifyFile(fs, dir,
        (long)6 * 1024 * 1024 * 1024);
  } catch (IOException e) {
    fail(e.getMessage());
  }
  */
}
项目:big-c    文件:TestS3ADeleteManyFiles.java   
@Test
public void testOpenCreate() throws IOException {
  Path dir = new Path("/tests3a");
  ContractTestUtils.createAndVerifyFile(fs, dir, 1024);
  ContractTestUtils.createAndVerifyFile(fs, dir, 5 * 1024 * 1024);
  ContractTestUtils.createAndVerifyFile(fs, dir, 20 * 1024 * 1024);


  /*
  Enable to test the multipart upload
  try {
    ContractTestUtils.createAndVerifyFile(fs, dir,
        (long)6 * 1024 * 1024 * 1024);
  } catch (IOException e) {
    fail(e.getMessage());
  }
  */
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestS3ADeleteManyFiles.java   
@Test
public void testOpenCreate() throws IOException {
  Path dir = new Path("/tests3a");
  ContractTestUtils.createAndVerifyFile(fs, dir, 1024);
  ContractTestUtils.createAndVerifyFile(fs, dir, 5 * 1024 * 1024);
  ContractTestUtils.createAndVerifyFile(fs, dir, 20 * 1024 * 1024);


  /*
  Enable to test the multipart upload
  try {
    ContractTestUtils.createAndVerifyFile(fs, dir,
        (long)6 * 1024 * 1024 * 1024);
  } catch (IOException e) {
    fail(e.getMessage());
  }
  */
}
项目:hops    文件:AbstractContractDistCpTest.java   
/**
 * Executes a test using a file system sub-tree with multiple nesting levels.
 *
 * @param srcFS source FileSystem
 * @param srcDir source directory
 * @param dstFS destination FileSystem
 * @param dstDir destination directory
 * @throws Exception if there is a failure
 */
private void deepDirectoryStructure(FileSystem srcFS, Path srcDir,
    FileSystem dstFS, Path dstDir) throws Exception {
  Path inputDir = new Path(srcDir, "inputDir");
  Path inputSubDir1 = new Path(inputDir, "subDir1");
  Path inputSubDir2 = new Path(inputDir, "subDir2/subDir3");
  Path inputFile1 = new Path(inputDir, "file1");
  Path inputFile2 = new Path(inputSubDir1, "file2");
  Path inputFile3 = new Path(inputSubDir2, "file3");
  mkdirs(srcFS, inputSubDir1);
  mkdirs(srcFS, inputSubDir2);
  byte[] data1 = dataset(100, 33, 43);
  createFile(srcFS, inputFile1, true, data1);
  byte[] data2 = dataset(200, 43, 53);
  createFile(srcFS, inputFile2, true, data2);
  byte[] data3 = dataset(300, 53, 63);
  createFile(srcFS, inputFile3, true, data3);
  Path target = new Path(dstDir, "outputDir");
  runDistCp(inputDir, target);
  ContractTestUtils.assertIsDirectory(dstFS, target);
  verifyFileContents(dstFS, new Path(target, "inputDir/file1"), data1);
  verifyFileContents(dstFS,
      new Path(target, "inputDir/subDir1/file2"), data2);
  verifyFileContents(dstFS,
      new Path(target, "inputDir/subDir2/subDir3/file3"), data3);
}
项目:hops    文件:AbstractContractDistCpTest.java   
/**
 * Executes a test using multiple large files.
 *
 * @param srcFS source FileSystem
 * @param srcDir source directory
 * @param dstFS destination FileSystem
 * @param dstDir destination directory
 * @throws Exception if there is a failure
 */
private void largeFiles(FileSystem srcFS, Path srcDir, FileSystem dstFS,
    Path dstDir) throws Exception {
  Path inputDir = new Path(srcDir, "inputDir");
  Path inputFile1 = new Path(inputDir, "file1");
  Path inputFile2 = new Path(inputDir, "file2");
  Path inputFile3 = new Path(inputDir, "file3");
  mkdirs(srcFS, inputDir);
  int fileSizeKb = conf.getInt("scale.test.distcp.file.size.kb", 10 * 1024);
  int fileSizeMb = fileSizeKb / 1024;
  getLog().info("{} with file size {}", testName.getMethodName(), fileSizeMb);
  byte[] data1 = dataset((fileSizeMb + 1) * 1024 * 1024, 33, 43);
  createFile(srcFS, inputFile1, true, data1);
  byte[] data2 = dataset((fileSizeMb + 2) * 1024 * 1024, 43, 53);
  createFile(srcFS, inputFile2, true, data2);
  byte[] data3 = dataset((fileSizeMb + 3) * 1024 * 1024, 53, 63);
  createFile(srcFS, inputFile3, true, data3);
  Path target = new Path(dstDir, "outputDir");
  runDistCp(inputDir, target);
  ContractTestUtils.assertIsDirectory(dstFS, target);
  verifyFileContents(dstFS, new Path(target, "inputDir/file1"), data1);
  verifyFileContents(dstFS, new Path(target, "inputDir/file2"), data2);
  verifyFileContents(dstFS, new Path(target, "inputDir/file3"), data3);
}
项目:hadoop-oss    文件:LocalFSContract.java   
/**
 *  tweak some of the contract parameters based on the local system
 *  state
 */
protected void adjustContractToLocalEnvironment() {
  if (Shell.WINDOWS) {
    //NTFS doesn't do case sensitivity, and its permissions are ACL-based
    getConf().setBoolean(getConfKey(ContractOptions.IS_CASE_SENSITIVE), false);
    getConf().setBoolean(getConfKey(ContractOptions.SUPPORTS_UNIX_PERMISSIONS), false);
  } else if (ContractTestUtils.isOSX()) {
    //OSX HFS+ is not case sensitive
    getConf().setBoolean(getConfKey(ContractOptions.IS_CASE_SENSITIVE),
                         false);
  }
}
项目:hadoop-oss    文件:TestRawlocalContractRename.java   
/**
 * Test fallback rename code <code>handleEmptyDstDirectoryOnWindows()</code>
 * even on not Windows platform where the normal <code>File.renameTo()</code>
 * is supposed to work well. This test has been added for HADOOP-9805.
 * 
 * @see AbstractContractRenameTest#testRenameWithNonEmptySubDirPOSIX()
 */
@Test
public void testRenameWithNonEmptySubDirPOSIX() throws Throwable {
  final Path renameTestDir = path("testRenameWithNonEmptySubDir");
  final Path srcDir = new Path(renameTestDir, "src1");
  final Path srcSubDir = new Path(srcDir, "sub");
  final Path finalDir = new Path(renameTestDir, "dest");
  FileSystem fs = getFileSystem();
  ContractTestUtils.rm(fs, renameTestDir, true, false);

  fs.mkdirs(srcDir);
  fs.mkdirs(finalDir);
  ContractTestUtils.writeTextFile(fs, new Path(srcDir, "source.txt"),
      "this is the file in src dir", false);
  ContractTestUtils.writeTextFile(fs, new Path(srcSubDir, "subfile.txt"),
      "this is the file in src/sub dir", false);

  ContractTestUtils.assertPathExists(fs, "not created in src dir",
      new Path(srcDir, "source.txt"));
  ContractTestUtils.assertPathExists(fs, "not created in src/sub dir",
      new Path(srcSubDir, "subfile.txt"));

  RawLocalFileSystem rlfs = (RawLocalFileSystem) fs;
  rlfs.handleEmptyDstDirectoryOnWindows(srcDir, rlfs.pathToFile(srcDir),
      finalDir, rlfs.pathToFile(finalDir));

  // Accept only POSIX rename behavior in this test
  ContractTestUtils.assertPathExists(fs, "not renamed into dest dir",
      new Path(finalDir, "source.txt"));
  ContractTestUtils.assertPathExists(fs, "not renamed into dest/sub dir",
      new Path(finalDir, "sub/subfile.txt"));

  ContractTestUtils.assertPathDoesNotExist(fs, "not deleted",
      new Path(srcDir, "source.txt"));
}
项目:hadoop    文件:TestHDFSContractAppend.java   
@Override
public void testRenameFileBeingAppended() throws Throwable {
  try {
    super.testRenameFileBeingAppended();
    fail("Expected a FileNotFoundException");
  } catch (FileNotFoundException e) {
    // downgrade
    ContractTestUtils.downgrade("Renaming an open file" +
                                "still creates the old path", e);

  }
}
项目:hadoop    文件:LocalFSContract.java   
/**
 *  tweak some of the contract parameters based on the local system
 *  state
 */
protected void adjustContractToLocalEnvironment() {
  if (Shell.WINDOWS) {
    //NTFS doesn't do case sensitivity, and its permissions are ACL-based
    getConf().setBoolean(getConfKey(ContractOptions.IS_CASE_SENSITIVE), false);
    getConf().setBoolean(getConfKey(ContractOptions.SUPPORTS_UNIX_PERMISSIONS), false);
  } else if (ContractTestUtils.isOSX()) {
    //OSX HFS+ is not case sensitive
    getConf().setBoolean(getConfKey(ContractOptions.IS_CASE_SENSITIVE),
                         false);
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestS3ABlockingThreadPool.java   
@Test
public void testFastMultiPartUpload() throws Exception {
  conf.setBoolean(Constants.FAST_UPLOAD, true);
  fs = S3ATestUtils.createTestFileSystem(conf);
  ContractTestUtils.createAndVerifyFile(fs, getTestPath(), 16 * 1024 *
      1024);

}
项目:aliyun-oss-hadoop-fs    文件:LocalFSContract.java   
/**
 *  tweak some of the contract parameters based on the local system
 *  state
 */
protected void adjustContractToLocalEnvironment() {
  if (Shell.WINDOWS) {
    //NTFS doesn't do case sensitivity, and its permissions are ACL-based
    getConf().setBoolean(getConfKey(ContractOptions.IS_CASE_SENSITIVE), false);
    getConf().setBoolean(getConfKey(ContractOptions.SUPPORTS_UNIX_PERMISSIONS), false);
  } else if (ContractTestUtils.isOSX()) {
    //OSX HFS+ is not case sensitive
    getConf().setBoolean(getConfKey(ContractOptions.IS_CASE_SENSITIVE),
                         false);
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestRawlocalContractRename.java   
/**
 * Test fallback rename code <code>handleEmptyDstDirectoryOnWindows()</code>
 * even on not Windows platform where the normal <code>File.renameTo()</code>
 * is supposed to work well. This test has been added for HADOOP-9805.
 * 
 * @see AbstractContractRenameTest#testRenameWithNonEmptySubDirPOSIX()
 */
@Test
public void testRenameWithNonEmptySubDirPOSIX() throws Throwable {
  final Path renameTestDir = path("testRenameWithNonEmptySubDir");
  final Path srcDir = new Path(renameTestDir, "src1");
  final Path srcSubDir = new Path(srcDir, "sub");
  final Path finalDir = new Path(renameTestDir, "dest");
  FileSystem fs = getFileSystem();
  ContractTestUtils.rm(fs, renameTestDir, true, false);

  fs.mkdirs(srcDir);
  fs.mkdirs(finalDir);
  ContractTestUtils.writeTextFile(fs, new Path(srcDir, "source.txt"),
      "this is the file in src dir", false);
  ContractTestUtils.writeTextFile(fs, new Path(srcSubDir, "subfile.txt"),
      "this is the file in src/sub dir", false);

  ContractTestUtils.assertPathExists(fs, "not created in src dir",
      new Path(srcDir, "source.txt"));
  ContractTestUtils.assertPathExists(fs, "not created in src/sub dir",
      new Path(srcSubDir, "subfile.txt"));

  RawLocalFileSystem rlfs = (RawLocalFileSystem) fs;
  rlfs.handleEmptyDstDirectoryOnWindows(srcDir, rlfs.pathToFile(srcDir),
      finalDir, rlfs.pathToFile(finalDir));

  // Accept only POSIX rename behavior in this test
  ContractTestUtils.assertPathExists(fs, "not renamed into dest dir",
      new Path(finalDir, "source.txt"));
  ContractTestUtils.assertPathExists(fs, "not renamed into dest/sub dir",
      new Path(finalDir, "sub/subfile.txt"));

  ContractTestUtils.assertPathDoesNotExist(fs, "not deleted",
      new Path(srcDir, "source.txt"));
}
项目:big-c    文件:TestHDFSContractAppend.java   
@Override
public void testRenameFileBeingAppended() throws Throwable {
  try {
    super.testRenameFileBeingAppended();
    fail("Expected a FileNotFoundException");
  } catch (FileNotFoundException e) {
    // downgrade
    ContractTestUtils.downgrade("Renaming an open file" +
                                "still creates the old path", e);

  }
}
项目:big-c    文件:LocalFSContract.java   
/**
 *  tweak some of the contract parameters based on the local system
 *  state
 */
protected void adjustContractToLocalEnvironment() {
  if (Shell.WINDOWS) {
    //NTFS doesn't do case sensitivity, and its permissions are ACL-based
    getConf().setBoolean(getConfKey(ContractOptions.IS_CASE_SENSITIVE), false);
    getConf().setBoolean(getConfKey(ContractOptions.SUPPORTS_UNIX_PERMISSIONS), false);
  } else if (ContractTestUtils.isOSX()) {
    //OSX HFS+ is not case sensitive
    getConf().setBoolean(getConfKey(ContractOptions.IS_CASE_SENSITIVE),
                         false);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestHDFSContractAppend.java   
@Override
public void testRenameFileBeingAppended() throws Throwable {
  try {
    super.testRenameFileBeingAppended();
    fail("Expected a FileNotFoundException");
  } catch (FileNotFoundException e) {
    // downgrade
    ContractTestUtils.downgrade("Renaming an open file" +
                                "still creates the old path", e);

  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:LocalFSContract.java   
/**
 *  tweak some of the contract parameters based on the local system
 *  state
 */
protected void adjustContractToLocalEnvironment() {
  if (Shell.WINDOWS) {
    //NTFS doesn't do case sensitivity, and its permissions are ACL-based
    getConf().setBoolean(getConfKey(ContractOptions.IS_CASE_SENSITIVE), false);
    getConf().setBoolean(getConfKey(ContractOptions.SUPPORTS_UNIX_PERMISSIONS), false);
  } else if (ContractTestUtils.isOSX()) {
    //OSX HFS+ is not case sensitive
    getConf().setBoolean(getConfKey(ContractOptions.IS_CASE_SENSITIVE),
                         false);
  }
}
项目:FlexMap    文件:TestHDFSContractAppend.java   
@Override
public void testRenameFileBeingAppended() throws Throwable {
  try {
    super.testRenameFileBeingAppended();
    fail("Expected a FileNotFoundException");
  } catch (FileNotFoundException e) {
    // downgrade
    ContractTestUtils.downgrade("Renaming an open file" +
                                "still creates the old path", e);

  }
}
项目:hops    文件:LocalFSContract.java   
/**
 *  tweak some of the contract parameters based on the local system
 *  state
 */
protected void adjustContractToLocalEnvironment() {
  if (Shell.WINDOWS) {
    //NTFS doesn't do case sensitivity, and its permissions are ACL-based
    getConf().setBoolean(getConfKey(ContractOptions.IS_CASE_SENSITIVE), false);
    getConf().setBoolean(getConfKey(ContractOptions.SUPPORTS_UNIX_PERMISSIONS), false);
  } else if (ContractTestUtils.isOSX()) {
    //OSX HFS+ is not case sensitive
    getConf().setBoolean(getConfKey(ContractOptions.IS_CASE_SENSITIVE),
                         false);
  }
}
项目:hops    文件:TestRawlocalContractRename.java   
/**
 * Test fallback rename code <code>handleEmptyDstDirectoryOnWindows()</code>
 * even on not Windows platform where the normal <code>File.renameTo()</code>
 * is supposed to work well. This test has been added for HADOOP-9805.
 * 
 * @see AbstractContractRenameTest#testRenameWithNonEmptySubDirPOSIX()
 */
@Test
public void testRenameWithNonEmptySubDirPOSIX() throws Throwable {
  final Path renameTestDir = path("testRenameWithNonEmptySubDir");
  final Path srcDir = new Path(renameTestDir, "src1");
  final Path srcSubDir = new Path(srcDir, "sub");
  final Path finalDir = new Path(renameTestDir, "dest");
  FileSystem fs = getFileSystem();
  ContractTestUtils.rm(fs, renameTestDir, true, false);

  fs.mkdirs(srcDir);
  fs.mkdirs(finalDir);
  ContractTestUtils.writeTextFile(fs, new Path(srcDir, "source.txt"),
      "this is the file in src dir", false);
  ContractTestUtils.writeTextFile(fs, new Path(srcSubDir, "subfile.txt"),
      "this is the file in src/sub dir", false);

  ContractTestUtils.assertPathExists(fs, "not created in src dir",
      new Path(srcDir, "source.txt"));
  ContractTestUtils.assertPathExists(fs, "not created in src/sub dir",
      new Path(srcSubDir, "subfile.txt"));

  RawLocalFileSystem rlfs = (RawLocalFileSystem) fs;
  rlfs.handleEmptyDstDirectoryOnWindows(srcDir, rlfs.pathToFile(srcDir),
      finalDir, rlfs.pathToFile(finalDir));

  // Accept only POSIX rename behavior in this test
  ContractTestUtils.assertPathExists(fs, "not renamed into dest dir",
      new Path(finalDir, "source.txt"));
  ContractTestUtils.assertPathExists(fs, "not renamed into dest/sub dir",
      new Path(finalDir, "sub/subfile.txt"));

  ContractTestUtils.assertPathDoesNotExist(fs, "not deleted",
      new Path(srcDir, "source.txt"));
}
项目:cephfs-hadoop    文件:CephFSContract.java   
/**
 *  tweak some of the contract parameters based on the local system
 *  state
 */
protected void adjustContractToLocalEnvironment() {
  if (Shell.WINDOWS) {
    //NTFS doesn't do case sensitivity, and its permissions are ACL-based
    getConf().setBoolean(getConfKey(ContractOptions.IS_CASE_SENSITIVE), false);
    getConf().setBoolean(getConfKey(ContractOptions.SUPPORTS_UNIX_PERMISSIONS), false);
  } else if (ContractTestUtils.isOSX()) {
    //OSX HFS+ is not case sensitive
    getConf().setBoolean(getConfKey(ContractOptions.IS_CASE_SENSITIVE),
                         false);
  }
}
项目:hadoop    文件:TestSwiftContractOpen.java   
@Override
public void testOpenReadDir() throws Throwable {
  ContractTestUtils.skip("Skipping object-store quirk");
}
项目:hadoop    文件:TestSwiftContractOpen.java   
@Override
public void testOpenReadDirWithChild() throws Throwable {
  ContractTestUtils.skip("Skipping object-store quirk");
}
项目:hadoop    文件:TestSwiftContractCreate.java   
@Override
public void testOverwriteEmptyDirectory() throws Throwable {
  ContractTestUtils.skip("blobstores can't distinguish empty directories from files");
}
项目:hadoop    文件:TestS3NContractCreate.java   
@Override
public void testOverwriteEmptyDirectory() throws Throwable {
  ContractTestUtils.skip(
      "blobstores can't distinguish empty directories from files");
}
项目:hadoop    文件:TestS3AContractCreate.java   
@Override
public void testOverwriteEmptyDirectory() throws Throwable {
  ContractTestUtils.skip(
      "blobstores can't distinguish empty directories from files");
}
项目:hadoop    文件:TestS3AFastOutputStream.java   
@Test
public void testRegularUpload() throws IOException {
  ContractTestUtils.createAndVerifyFile(fs, getTestPath(), 1024 * 1024);
}
项目:hadoop    文件:TestS3AFastOutputStream.java   
@Test
public void testMultiPartUpload() throws IOException {
  ContractTestUtils.createAndVerifyFile(fs, getTestPath(), 6 * 1024 *
      1024);
}
项目:hadoop    文件:TestS3ADeleteManyFiles.java   
@Test
public void testBulkRenameAndDelete() throws Throwable {
  final Path scaleTestDir = getTestPath();
  final Path srcDir = new Path(scaleTestDir, "src");
  final Path finalDir = new Path(scaleTestDir, "final");
  final long count = getOperationCount();
  ContractTestUtils.rm(fs, scaleTestDir, true, false);

  fs.mkdirs(srcDir);
  fs.mkdirs(finalDir);

  int testBufferSize = fs.getConf()
      .getInt(ContractTestUtils.IO_CHUNK_BUFFER_SIZE,
          ContractTestUtils.DEFAULT_IO_CHUNK_BUFFER_SIZE);
  // use Executor to speed up file creation
  ExecutorService exec = Executors.newFixedThreadPool(16);
  final ExecutorCompletionService<Boolean> completionService =
      new ExecutorCompletionService<Boolean>(exec);
  try {
    final byte[] data = ContractTestUtils.dataset(testBufferSize, 'a', 'z');

    for (int i = 0; i < count; ++i) {
      final String fileName = "foo-" + i;
      completionService.submit(new Callable<Boolean>() {
        @Override
        public Boolean call() throws IOException {
          ContractTestUtils.createFile(fs, new Path(srcDir, fileName),
              false, data);
          return fs.exists(new Path(srcDir, fileName));
        }
      });
    }
    for (int i = 0; i < count; ++i) {
      final Future<Boolean> future = completionService.take();
      try {
        if (!future.get()) {
          LOG.warn("cannot create file");
        }
      } catch (ExecutionException e) {
        LOG.warn("Error while uploading file", e.getCause());
        throw e;
      }
    }
  } finally {
    exec.shutdown();
  }

  int nSrcFiles = fs.listStatus(srcDir).length;
  fs.rename(srcDir, finalDir);
  assertEquals(nSrcFiles, fs.listStatus(finalDir).length);
  ContractTestUtils.assertPathDoesNotExist(fs, "not deleted after rename",
      new Path(srcDir, "foo-" + 0));
  ContractTestUtils.assertPathDoesNotExist(fs, "not deleted after rename",
      new Path(srcDir, "foo-" + count / 2));
  ContractTestUtils.assertPathDoesNotExist(fs, "not deleted after rename",
      new Path(srcDir, "foo-" + (count - 1)));
  ContractTestUtils.assertPathExists(fs, "not renamed to dest dir",
      new Path(finalDir, "foo-" + 0));
  ContractTestUtils.assertPathExists(fs, "not renamed to dest dir",
      new Path(finalDir, "foo-" + count/2));
  ContractTestUtils.assertPathExists(fs, "not renamed to dest dir",
      new Path(finalDir, "foo-" + (count-1)));

  ContractTestUtils.assertDeleted(fs, finalDir, true, false);
}
项目:hadoop    文件:S3AScaleTestBase.java   
@After
public void tearDown() throws Exception {
  ContractTestUtils.rm(fs, getTestPath(), true, true);
}
项目:aliyun-oss-hadoop-fs    文件:TestSwiftContractOpen.java   
@Override
public void testOpenReadDir() throws Throwable {
  ContractTestUtils.skip("Skipping object-store quirk");
}
项目:aliyun-oss-hadoop-fs    文件:TestSwiftContractOpen.java   
@Override
public void testOpenReadDirWithChild() throws Throwable {
  ContractTestUtils.skip("Skipping object-store quirk");
}
项目:aliyun-oss-hadoop-fs    文件:TestSwiftContractCreate.java   
@Override
public void testOverwriteEmptyDirectory() throws Throwable {
  ContractTestUtils.skip("blobstores can't distinguish empty directories from files");
}
项目:aliyun-oss-hadoop-fs    文件:TestOSSContractCreate.java   
@Override
public void testOverwriteEmptyDirectory() throws Throwable {
  ContractTestUtils.skip(
          "blobstores can't distinguish empty directories from files");
}
项目:aliyun-oss-hadoop-fs    文件:TestS3NContractCreate.java   
@Override
public void testOverwriteEmptyDirectory() throws Throwable {
  ContractTestUtils.skip(
      "blobstores can't distinguish empty directories from files");
}
项目:aliyun-oss-hadoop-fs    文件:TestS3AContractCreate.java   
@Override
public void testOverwriteEmptyDirectory() throws Throwable {
  ContractTestUtils.skip(
      "blobstores can't distinguish empty directories from files");
}
项目:aliyun-oss-hadoop-fs    文件:TestS3AFastOutputStream.java   
@Test
public void testRegularUpload() throws IOException {
  ContractTestUtils.createAndVerifyFile(fs, getTestPath(), 1024 * 1024);
}
项目:aliyun-oss-hadoop-fs    文件:TestS3AFastOutputStream.java   
@Test
public void testMultiPartUpload() throws IOException {
  ContractTestUtils.createAndVerifyFile(fs, getTestPath(), 6 * 1024 *
      1024);
}
项目:aliyun-oss-hadoop-fs    文件:TestS3ADeleteManyFiles.java   
@Test
public void testBulkRenameAndDelete() throws Throwable {
  final Path scaleTestDir = getTestPath();
  final Path srcDir = new Path(scaleTestDir, "src");
  final Path finalDir = new Path(scaleTestDir, "final");
  final long count = getOperationCount();
  ContractTestUtils.rm(fs, scaleTestDir, true, false);

  fs.mkdirs(srcDir);
  fs.mkdirs(finalDir);

  int testBufferSize = fs.getConf()
      .getInt(ContractTestUtils.IO_CHUNK_BUFFER_SIZE,
          ContractTestUtils.DEFAULT_IO_CHUNK_BUFFER_SIZE);
  // use Executor to speed up file creation
  ExecutorService exec = Executors.newFixedThreadPool(16);
  final ExecutorCompletionService<Boolean> completionService =
      new ExecutorCompletionService<Boolean>(exec);
  try {
    final byte[] data = ContractTestUtils.dataset(testBufferSize, 'a', 'z');

    for (int i = 0; i < count; ++i) {
      final String fileName = "foo-" + i;
      completionService.submit(new Callable<Boolean>() {
        @Override
        public Boolean call() throws IOException {
          ContractTestUtils.createFile(fs, new Path(srcDir, fileName),
              false, data);
          return fs.exists(new Path(srcDir, fileName));
        }
      });
    }
    for (int i = 0; i < count; ++i) {
      final Future<Boolean> future = completionService.take();
      try {
        if (!future.get()) {
          LOG.warn("cannot create file");
        }
      } catch (ExecutionException e) {
        LOG.warn("Error while uploading file", e.getCause());
        throw e;
      }
    }
  } finally {
    exec.shutdown();
  }

  int nSrcFiles = fs.listStatus(srcDir).length;
  fs.rename(srcDir, finalDir);
  assertEquals(nSrcFiles, fs.listStatus(finalDir).length);
  ContractTestUtils.assertPathDoesNotExist(fs, "not deleted after rename",
      new Path(srcDir, "foo-" + 0));
  ContractTestUtils.assertPathDoesNotExist(fs, "not deleted after rename",
      new Path(srcDir, "foo-" + count / 2));
  ContractTestUtils.assertPathDoesNotExist(fs, "not deleted after rename",
      new Path(srcDir, "foo-" + (count - 1)));
  ContractTestUtils.assertPathExists(fs, "not renamed to dest dir",
      new Path(finalDir, "foo-" + 0));
  ContractTestUtils.assertPathExists(fs, "not renamed to dest dir",
      new Path(finalDir, "foo-" + count/2));
  ContractTestUtils.assertPathExists(fs, "not renamed to dest dir",
      new Path(finalDir, "foo-" + (count-1)));

  ContractTestUtils.assertDeleted(fs, finalDir, true, false);
}