Java 类org.apache.hadoop.fs.Options.CreateOpts 实例源码

项目:hadoop-oss    文件:SequenceFile.java   
/**
 * Construct the preferred type of SequenceFile Writer.
 * @param fs The configured filesystem.
 * @param conf The configuration.
 * @param name The name of the file.
 * @param keyClass The 'key' type.
 * @param valClass The 'value' type.
 * @param bufferSize buffer size for the underlaying outputstream.
 * @param replication replication factor for the file.
 * @param blockSize block size for the file.
 * @param createParent create parent directory if non-existent
 * @param compressionType The compression type.
 * @param codec The compression codec.
 * @param metadata The metadata of the file.
 * @return Returns the handle to the constructed SequenceFile Writer.
 * @throws IOException
 */
@Deprecated
public static Writer
createWriter(FileSystem fs, Configuration conf, Path name,
             Class keyClass, Class valClass, int bufferSize,
             short replication, long blockSize, boolean createParent,
             CompressionType compressionType, CompressionCodec codec,
             Metadata metadata) throws IOException {
  return createWriter(FileContext.getFileContext(fs.getUri(), conf),
      conf, name, keyClass, valClass, compressionType, codec,
      metadata, EnumSet.of(CreateFlag.CREATE,CreateFlag.OVERWRITE),
      CreateOpts.bufferSize(bufferSize),
      createParent ? CreateOpts.createParent()
                   : CreateOpts.donotCreateParent(),
      CreateOpts.repFac(replication),
      CreateOpts.blockSize(blockSize)
    );
}
项目:hadoop-oss    文件:FileContextMainOperationsBaseTest.java   
@Test
/*
 * Test method
 *  org.apache.hadoop.fs.FileContext.getFileContext(AbstractFileSystem)
 */
public void testGetFileContext1() throws IOException {
  final Path rootPath = getTestRootPath(fc, "test");
  AbstractFileSystem asf = fc.getDefaultFileSystem();
  // create FileContext using the protected #getFileContext(1) method:
  FileContext fc2 = FileContext.getFileContext(asf);
  // Now just check that this context can do something reasonable:
  final Path path = new Path(rootPath, "zoo");
  FSDataOutputStream out = fc2.create(path, EnumSet.of(CREATE),
      Options.CreateOpts.createParent());
  out.close();
  Path pathResolved = fc2.resolvePath(path);
  assertEquals(pathResolved.toUri().getPath(), path.toUri().getPath());
}
项目:hadoop-oss    文件:SymlinkBaseTest.java   
@Test(timeout=10000)
/** Try to create a file with parent that is a dangling link */
public void testCreateFileViaDanglingLinkParent() throws IOException {
  Path dir  = new Path(testBaseDir1()+"/dangling");
  Path file = new Path(testBaseDir1()+"/dangling/file");
  wrapper.createSymlink(new Path("/doesNotExist"), dir, false);
  FSDataOutputStream out;
  try {
    out = wrapper.create(file, EnumSet.of(CreateFlag.CREATE),
                    CreateOpts.repFac((short) 1),
                    CreateOpts.blockSize(blockSize));
    out.close();
    fail("Created a link with dangling link parent");
  } catch (FileNotFoundException e) {
    // Expected. The parent is dangling.
  }
}
项目:hadoop-oss    文件:LoadGenerator.java   
/** Create a file with a length of <code>fileSize</code>.
 * The file is filled with 'a'.
 */
private void genFile(Path file, long fileSize) throws IOException {
  long startTime = Time.now();
  FSDataOutputStream out = null;
  try {
    out = fc.create(file,
        EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
        CreateOpts.createParent(), CreateOpts.bufferSize(4096),
        CreateOpts.repFac((short) 3));
    executionTime[CREATE] += (Time.now() - startTime);
    numOfOps[CREATE]++;

    long i = fileSize;
    while (i > 0) {
      long s = Math.min(fileSize, WRITE_CONTENTS.length);
      out.write(WRITE_CONTENTS, 0, (int) s);
      i -= s;
    }

    startTime = Time.now();
    executionTime[WRITE_CLOSE] += (Time.now() - startTime);
    numOfOps[WRITE_CLOSE]++;
  } finally {
    IOUtils.cleanup(LOG, out);
  }
}
项目:hadoop    文件:SequenceFile.java   
/**
 * Construct the preferred type of SequenceFile Writer.
 * @param fs The configured filesystem.
 * @param conf The configuration.
 * @param name The name of the file.
 * @param keyClass The 'key' type.
 * @param valClass The 'value' type.
 * @param bufferSize buffer size for the underlaying outputstream.
 * @param replication replication factor for the file.
 * @param blockSize block size for the file.
 * @param createParent create parent directory if non-existent
 * @param compressionType The compression type.
 * @param codec The compression codec.
 * @param metadata The metadata of the file.
 * @return Returns the handle to the constructed SequenceFile Writer.
 * @throws IOException
 */
@Deprecated
public static Writer
createWriter(FileSystem fs, Configuration conf, Path name,
             Class keyClass, Class valClass, int bufferSize,
             short replication, long blockSize, boolean createParent,
             CompressionType compressionType, CompressionCodec codec,
             Metadata metadata) throws IOException {
  return createWriter(FileContext.getFileContext(fs.getUri(), conf),
      conf, name, keyClass, valClass, compressionType, codec,
      metadata, EnumSet.of(CreateFlag.CREATE,CreateFlag.OVERWRITE),
      CreateOpts.bufferSize(bufferSize),
      createParent ? CreateOpts.createParent()
                   : CreateOpts.donotCreateParent(),
      CreateOpts.repFac(replication),
      CreateOpts.blockSize(blockSize)
    );
}
项目:hadoop    文件:FileContextMainOperationsBaseTest.java   
@Test
/*
 * Test method
 *  org.apache.hadoop.fs.FileContext.getFileContext(AbstractFileSystem)
 */
public void testGetFileContext1() throws IOException {
  final Path rootPath = getTestRootPath(fc, "test");
  AbstractFileSystem asf = fc.getDefaultFileSystem();
  // create FileContext using the protected #getFileContext(1) method:
  FileContext fc2 = FileContext.getFileContext(asf);
  // Now just check that this context can do something reasonable:
  final Path path = new Path(rootPath, "zoo");
  FSDataOutputStream out = fc2.create(path, EnumSet.of(CREATE),
      Options.CreateOpts.createParent());
  out.close();
  Path pathResolved = fc2.resolvePath(path);
  assertEquals(pathResolved.toUri().getPath(), path.toUri().getPath());
}
项目:hadoop    文件:SymlinkBaseTest.java   
@Test(timeout=10000)
/** Try to create a file with parent that is a dangling link */
public void testCreateFileViaDanglingLinkParent() throws IOException {
  Path dir  = new Path(testBaseDir1()+"/dangling");
  Path file = new Path(testBaseDir1()+"/dangling/file");
  wrapper.createSymlink(new Path("/doesNotExist"), dir, false);
  FSDataOutputStream out;
  try {
    out = wrapper.create(file, EnumSet.of(CreateFlag.CREATE),
                    CreateOpts.repFac((short) 1),
                    CreateOpts.blockSize(blockSize));
    out.close();
    fail("Created a link with dangling link parent");
  } catch (FileNotFoundException e) {
    // Expected. The parent is dangling.
  }
}
项目:hadoop    文件:LoadGenerator.java   
/** Create a file with a length of <code>fileSize</code>.
 * The file is filled with 'a'.
 */
private void genFile(Path file, long fileSize) throws IOException {
  long startTime = Time.now();
  FSDataOutputStream out = null;
  try {
    out = fc.create(file,
        EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
        CreateOpts.createParent(), CreateOpts.bufferSize(4096),
        CreateOpts.repFac((short) 3));
    executionTime[CREATE] += (Time.now() - startTime);
    numOfOps[CREATE]++;

    long i = fileSize;
    while (i > 0) {
      long s = Math.min(fileSize, WRITE_CONTENTS.length);
      out.write(WRITE_CONTENTS, 0, (int) s);
      i -= s;
    }

    startTime = Time.now();
    executionTime[WRITE_CLOSE] += (Time.now() - startTime);
    numOfOps[WRITE_CLOSE]++;
  } finally {
    IOUtils.cleanup(LOG, out);
  }
}
项目:aliyun-oss-hadoop-fs    文件:SequenceFile.java   
/**
 * Construct the preferred type of SequenceFile Writer.
 * @param fs The configured filesystem.
 * @param conf The configuration.
 * @param name The name of the file.
 * @param keyClass The 'key' type.
 * @param valClass The 'value' type.
 * @param bufferSize buffer size for the underlaying outputstream.
 * @param replication replication factor for the file.
 * @param blockSize block size for the file.
 * @param createParent create parent directory if non-existent
 * @param compressionType The compression type.
 * @param codec The compression codec.
 * @param metadata The metadata of the file.
 * @return Returns the handle to the constructed SequenceFile Writer.
 * @throws IOException
 */
@Deprecated
public static Writer
createWriter(FileSystem fs, Configuration conf, Path name,
             Class keyClass, Class valClass, int bufferSize,
             short replication, long blockSize, boolean createParent,
             CompressionType compressionType, CompressionCodec codec,
             Metadata metadata) throws IOException {
  return createWriter(FileContext.getFileContext(fs.getUri(), conf),
      conf, name, keyClass, valClass, compressionType, codec,
      metadata, EnumSet.of(CreateFlag.CREATE,CreateFlag.OVERWRITE),
      CreateOpts.bufferSize(bufferSize),
      createParent ? CreateOpts.createParent()
                   : CreateOpts.donotCreateParent(),
      CreateOpts.repFac(replication),
      CreateOpts.blockSize(blockSize)
    );
}
项目:aliyun-oss-hadoop-fs    文件:FileContextMainOperationsBaseTest.java   
@Test
/*
 * Test method
 *  org.apache.hadoop.fs.FileContext.getFileContext(AbstractFileSystem)
 */
public void testGetFileContext1() throws IOException {
  final Path rootPath = getTestRootPath(fc, "test");
  AbstractFileSystem asf = fc.getDefaultFileSystem();
  // create FileContext using the protected #getFileContext(1) method:
  FileContext fc2 = FileContext.getFileContext(asf);
  // Now just check that this context can do something reasonable:
  final Path path = new Path(rootPath, "zoo");
  FSDataOutputStream out = fc2.create(path, EnumSet.of(CREATE),
      Options.CreateOpts.createParent());
  out.close();
  Path pathResolved = fc2.resolvePath(path);
  assertEquals(pathResolved.toUri().getPath(), path.toUri().getPath());
}
项目:aliyun-oss-hadoop-fs    文件:SymlinkBaseTest.java   
@Test(timeout=10000)
/** Try to create a file with parent that is a dangling link */
public void testCreateFileViaDanglingLinkParent() throws IOException {
  Path dir  = new Path(testBaseDir1()+"/dangling");
  Path file = new Path(testBaseDir1()+"/dangling/file");
  wrapper.createSymlink(new Path("/doesNotExist"), dir, false);
  FSDataOutputStream out;
  try {
    out = wrapper.create(file, EnumSet.of(CreateFlag.CREATE),
                    CreateOpts.repFac((short) 1),
                    CreateOpts.blockSize(blockSize));
    out.close();
    fail("Created a link with dangling link parent");
  } catch (FileNotFoundException e) {
    // Expected. The parent is dangling.
  }
}
项目:aliyun-oss-hadoop-fs    文件:LoadGenerator.java   
/** Create a file with a length of <code>fileSize</code>.
 * The file is filled with 'a'.
 */
private void genFile(Path file, long fileSize) throws IOException {
  long startTime = Time.now();
  FSDataOutputStream out = null;
  try {
    out = fc.create(file,
        EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
        CreateOpts.createParent(), CreateOpts.bufferSize(4096),
        CreateOpts.repFac((short) 3));
    executionTime[CREATE] += (Time.now() - startTime);
    numOfOps[CREATE]++;

    long i = fileSize;
    while (i > 0) {
      long s = Math.min(fileSize, WRITE_CONTENTS.length);
      out.write(WRITE_CONTENTS, 0, (int) s);
      i -= s;
    }

    startTime = Time.now();
    executionTime[WRITE_CLOSE] += (Time.now() - startTime);
    numOfOps[WRITE_CLOSE]++;
  } finally {
    IOUtils.cleanup(LOG, out);
  }
}
项目:gemfirexd-oss    文件:SequenceFile.java   
/**
 * Construct the preferred type of SequenceFile Writer.
 * @param fs The configured filesystem.
 * @param conf The configuration.
 * @param name The name of the file.
 * @param keyClass The 'key' type.
 * @param valClass The 'value' type.
 * @param bufferSize buffer size for the underlaying outputstream.
 * @param replication replication factor for the file.
 * @param blockSize block size for the file.
 * @param createParent create parent directory if non-existent
 * @param compressionType The compression type.
 * @param codec The compression codec.
 * @param metadata The metadata of the file.
 * @return Returns the handle to the constructed SequenceFile Writer.
 * @throws IOException
 */
@Deprecated
public static Writer
createWriter(FileSystem fs, Configuration conf, Path name,
             Class keyClass, Class valClass, int bufferSize,
             short replication, long blockSize, boolean createParent,
             CompressionType compressionType, CompressionCodec codec,
             Metadata metadata) throws IOException {
  return createWriter(FileContext.getFileContext(fs.getUri(), conf),
      conf, name, keyClass, valClass, compressionType, codec,
      metadata, EnumSet.of(CreateFlag.CREATE,CreateFlag.OVERWRITE),
      CreateOpts.bufferSize(bufferSize),
      createParent ? CreateOpts.createParent()
                   : CreateOpts.donotCreateParent(),
      CreateOpts.repFac(replication),
      CreateOpts.blockSize(blockSize)
    );
}
项目:big-c    文件:SequenceFile.java   
/**
 * Construct the preferred type of SequenceFile Writer.
 * @param fs The configured filesystem.
 * @param conf The configuration.
 * @param name The name of the file.
 * @param keyClass The 'key' type.
 * @param valClass The 'value' type.
 * @param bufferSize buffer size for the underlaying outputstream.
 * @param replication replication factor for the file.
 * @param blockSize block size for the file.
 * @param createParent create parent directory if non-existent
 * @param compressionType The compression type.
 * @param codec The compression codec.
 * @param metadata The metadata of the file.
 * @return Returns the handle to the constructed SequenceFile Writer.
 * @throws IOException
 */
@Deprecated
public static Writer
createWriter(FileSystem fs, Configuration conf, Path name,
             Class keyClass, Class valClass, int bufferSize,
             short replication, long blockSize, boolean createParent,
             CompressionType compressionType, CompressionCodec codec,
             Metadata metadata) throws IOException {
  return createWriter(FileContext.getFileContext(fs.getUri(), conf),
      conf, name, keyClass, valClass, compressionType, codec,
      metadata, EnumSet.of(CreateFlag.CREATE,CreateFlag.OVERWRITE),
      CreateOpts.bufferSize(bufferSize),
      createParent ? CreateOpts.createParent()
                   : CreateOpts.donotCreateParent(),
      CreateOpts.repFac(replication),
      CreateOpts.blockSize(blockSize)
    );
}
项目:big-c    文件:FileContextMainOperationsBaseTest.java   
@Test
/*
 * Test method
 *  org.apache.hadoop.fs.FileContext.getFileContext(AbstractFileSystem)
 */
public void testGetFileContext1() throws IOException {
  final Path rootPath = getTestRootPath(fc, "test");
  AbstractFileSystem asf = fc.getDefaultFileSystem();
  // create FileContext using the protected #getFileContext(1) method:
  FileContext fc2 = FileContext.getFileContext(asf);
  // Now just check that this context can do something reasonable:
  final Path path = new Path(rootPath, "zoo");
  FSDataOutputStream out = fc2.create(path, EnumSet.of(CREATE),
      Options.CreateOpts.createParent());
  out.close();
  Path pathResolved = fc2.resolvePath(path);
  assertEquals(pathResolved.toUri().getPath(), path.toUri().getPath());
}
项目:big-c    文件:SymlinkBaseTest.java   
@Test(timeout=10000)
/** Try to create a file with parent that is a dangling link */
public void testCreateFileViaDanglingLinkParent() throws IOException {
  Path dir  = new Path(testBaseDir1()+"/dangling");
  Path file = new Path(testBaseDir1()+"/dangling/file");
  wrapper.createSymlink(new Path("/doesNotExist"), dir, false);
  FSDataOutputStream out;
  try {
    out = wrapper.create(file, EnumSet.of(CreateFlag.CREATE),
                    CreateOpts.repFac((short) 1),
                    CreateOpts.blockSize(blockSize));
    out.close();
    fail("Created a link with dangling link parent");
  } catch (FileNotFoundException e) {
    // Expected. The parent is dangling.
  }
}
项目:big-c    文件:LoadGenerator.java   
/** Create a file with a length of <code>fileSize</code>.
 * The file is filled with 'a'.
 */
private void genFile(Path file, long fileSize) throws IOException {
  long startTime = Time.now();
  FSDataOutputStream out = null;
  try {
    out = fc.create(file,
        EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
        CreateOpts.createParent(), CreateOpts.bufferSize(4096),
        CreateOpts.repFac((short) 3));
    executionTime[CREATE] += (Time.now() - startTime);
    numOfOps[CREATE]++;

    long i = fileSize;
    while (i > 0) {
      long s = Math.min(fileSize, WRITE_CONTENTS.length);
      out.write(WRITE_CONTENTS, 0, (int) s);
      i -= s;
    }

    startTime = Time.now();
    executionTime[WRITE_CLOSE] += (Time.now() - startTime);
    numOfOps[WRITE_CLOSE]++;
  } finally {
    IOUtils.cleanup(LOG, out);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:SequenceFile.java   
/**
 * Construct the preferred type of SequenceFile Writer.
 * @param fs The configured filesystem.
 * @param conf The configuration.
 * @param name The name of the file.
 * @param keyClass The 'key' type.
 * @param valClass The 'value' type.
 * @param bufferSize buffer size for the underlaying outputstream.
 * @param replication replication factor for the file.
 * @param blockSize block size for the file.
 * @param createParent create parent directory if non-existent
 * @param compressionType The compression type.
 * @param codec The compression codec.
 * @param metadata The metadata of the file.
 * @return Returns the handle to the constructed SequenceFile Writer.
 * @throws IOException
 */
@Deprecated
public static Writer
createWriter(FileSystem fs, Configuration conf, Path name,
             Class keyClass, Class valClass, int bufferSize,
             short replication, long blockSize, boolean createParent,
             CompressionType compressionType, CompressionCodec codec,
             Metadata metadata) throws IOException {
  return createWriter(FileContext.getFileContext(fs.getUri(), conf),
      conf, name, keyClass, valClass, compressionType, codec,
      metadata, EnumSet.of(CreateFlag.CREATE,CreateFlag.OVERWRITE),
      CreateOpts.bufferSize(bufferSize),
      createParent ? CreateOpts.createParent()
                   : CreateOpts.donotCreateParent(),
      CreateOpts.repFac(replication),
      CreateOpts.blockSize(blockSize)
    );
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FileContextMainOperationsBaseTest.java   
@Test
/*
 * Test method
 *  org.apache.hadoop.fs.FileContext.getFileContext(AbstractFileSystem)
 */
public void testGetFileContext1() throws IOException {
  final Path rootPath = getTestRootPath(fc, "test");
  AbstractFileSystem asf = fc.getDefaultFileSystem();
  // create FileContext using the protected #getFileContext(1) method:
  FileContext fc2 = FileContext.getFileContext(asf);
  // Now just check that this context can do something reasonable:
  final Path path = new Path(rootPath, "zoo");
  FSDataOutputStream out = fc2.create(path, EnumSet.of(CREATE),
      Options.CreateOpts.createParent());
  out.close();
  Path pathResolved = fc2.resolvePath(path);
  assertEquals(pathResolved.toUri().getPath(), path.toUri().getPath());
}
项目:hadoop-2.6.0-cdh5.4.3    文件:SymlinkBaseTest.java   
@Test(timeout=10000)
/** Try to create a file with parent that is a dangling link */
public void testCreateFileViaDanglingLinkParent() throws IOException {
  Path dir  = new Path(testBaseDir1()+"/dangling");
  Path file = new Path(testBaseDir1()+"/dangling/file");
  wrapper.createSymlink(new Path("/doesNotExist"), dir, false);
  FSDataOutputStream out;
  try {
    out = wrapper.create(file, EnumSet.of(CreateFlag.CREATE),
                    CreateOpts.repFac((short) 1),
                    CreateOpts.blockSize(blockSize));
    out.close();
    fail("Created a link with dangling link parent");
  } catch (FileNotFoundException e) {
    // Expected. The parent is dangling.
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:LoadGenerator.java   
/** Create a file with a length of <code>fileSize</code>.
 * The file is filled with 'a'.
 */
private void genFile(Path file, long fileSize) throws IOException {
  long startTime = Time.now();
  FSDataOutputStream out = null;
  try {
    out = fc.create(file,
        EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
        CreateOpts.createParent(), CreateOpts.bufferSize(4096),
        CreateOpts.repFac((short) 3));
    executionTime[CREATE] += (Time.now() - startTime);
    totalNumOfOps[CREATE]++;

    long i = fileSize;
    while (i > 0) {
      long s = Math.min(fileSize, WRITE_CONTENTS.length);
      out.write(WRITE_CONTENTS, 0, (int) s);
      i -= s;
    }

    startTime = Time.now();
    executionTime[WRITE_CLOSE] += (Time.now() - startTime);
    totalNumOfOps[WRITE_CLOSE]++;
  } finally {
    IOUtils.cleanup(LOG, out);
  }
}
项目:hops    文件:LoadGenerator.java   
/** Create a file with a length of <code>fileSize</code>.
 * The file is filled with 'a'.
 */
private void genFile(Path file, long fileSize) throws IOException {
  long startTime = Time.now();
  FSDataOutputStream out = null;
  try {
    out = fc.create(file,
        EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
        CreateOpts.createParent(), CreateOpts.bufferSize(4096),
        CreateOpts.repFac((short) 3));
    executionTime[CREATE] += (Time.now() - startTime);
    numOfOps[CREATE]++;

    long i = fileSize;
    while (i > 0) {
      long s = Math.min(fileSize, WRITE_CONTENTS.length);
      out.write(WRITE_CONTENTS, 0, (int) s);
      i -= s;
    }

    startTime = Time.now();
    executionTime[WRITE_CLOSE] += (Time.now() - startTime);
    numOfOps[WRITE_CLOSE]++;
  } finally {
    IOUtils.cleanup(LOG, out);
  }
}
项目:hadoop-plus    文件:SequenceFile.java   
/**
 * Construct the preferred type of SequenceFile Writer.
 * @param fs The configured filesystem.
 * @param conf The configuration.
 * @param name The name of the file.
 * @param keyClass The 'key' type.
 * @param valClass The 'value' type.
 * @param bufferSize buffer size for the underlaying outputstream.
 * @param replication replication factor for the file.
 * @param blockSize block size for the file.
 * @param createParent create parent directory if non-existent
 * @param compressionType The compression type.
 * @param codec The compression codec.
 * @param metadata The metadata of the file.
 * @return Returns the handle to the constructed SequenceFile Writer.
 * @throws IOException
 */
@Deprecated
public static Writer
createWriter(FileSystem fs, Configuration conf, Path name,
             Class keyClass, Class valClass, int bufferSize,
             short replication, long blockSize, boolean createParent,
             CompressionType compressionType, CompressionCodec codec,
             Metadata metadata) throws IOException {
  return createWriter(FileContext.getFileContext(fs.getUri(), conf),
      conf, name, keyClass, valClass, compressionType, codec,
      metadata, EnumSet.of(CreateFlag.CREATE,CreateFlag.OVERWRITE),
      CreateOpts.bufferSize(bufferSize),
      createParent ? CreateOpts.createParent()
                   : CreateOpts.donotCreateParent(),
      CreateOpts.repFac(replication),
      CreateOpts.blockSize(blockSize)
    );
}
项目:hadoop-plus    文件:SymlinkBaseTest.java   
@Test(timeout=10000)
/** Try to create a file with parent that is a dangling link */
public void testCreateFileViaDanglingLinkParent() throws IOException {
  Path dir  = new Path(testBaseDir1()+"/dangling");
  Path file = new Path(testBaseDir1()+"/dangling/file");
  wrapper.createSymlink(new Path("/doesNotExist"), dir, false);
  FSDataOutputStream out;
  try {
    out = wrapper.create(file, EnumSet.of(CreateFlag.CREATE),
                    CreateOpts.repFac((short) 1),
                    CreateOpts.blockSize(blockSize));
    out.close();
    fail("Created a link with dangling link parent");
  } catch (FileNotFoundException e) {
    // Expected. The parent is dangling.
  }
}
项目:hadoop-plus    文件:FileContextSymlinkBaseTest.java   
@Test
/** Try to create a file with parent that is a dangling link */
public void testCreateFileViaDanglingLinkParent() throws IOException {
  Path dir  = new Path(testBaseDir1()+"/dangling");
  Path file = new Path(testBaseDir1()+"/dangling/file");
  fc.createSymlink(new Path("/doesNotExist"), dir, false);
  FSDataOutputStream out;
  try {
    out = fc.create(file, EnumSet.of(CreateFlag.CREATE), 
                    CreateOpts.repFac((short) 1),
                    CreateOpts.blockSize(blockSize));
    out.close();
    fail("Created a link with dangling link parent");
  } catch (FileNotFoundException e) {
    // Expected. The parent is dangling.
  }
}
项目:hadoop-plus    文件:LoadGenerator.java   
/** Create a file with a length of <code>fileSize</code>.
 * The file is filled with 'a'.
 */
private void genFile(Path file, long fileSize) throws IOException {
  long startTime = Time.now();
  FSDataOutputStream out = fc.create(file,
      EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
      CreateOpts.createParent(), CreateOpts.bufferSize(4096),
      CreateOpts.repFac((short) 3));
  executionTime[CREATE] += (Time.now()-startTime);
  totalNumOfOps[CREATE]++;

  for (long i=0; i<fileSize; i++) {
    out.writeByte('a');
  }
  startTime = Time.now();
  out.close();
  executionTime[WRITE_CLOSE] += (Time.now()-startTime);
  totalNumOfOps[WRITE_CLOSE]++;
}
项目:hops    文件:SequenceFile.java   
/**
 * Construct the preferred type of SequenceFile Writer.
 * @param fs The configured filesystem.
 * @param conf The configuration.
 * @param name The name of the file.
 * @param keyClass The 'key' type.
 * @param valClass The 'value' type.
 * @param bufferSize buffer size for the underlaying outputstream.
 * @param replication replication factor for the file.
 * @param blockSize block size for the file.
 * @param createParent create parent directory if non-existent
 * @param compressionType The compression type.
 * @param codec The compression codec.
 * @param metadata The metadata of the file.
 * @return Returns the handle to the constructed SequenceFile Writer.
 * @throws IOException
 */
@Deprecated
public static Writer
createWriter(FileSystem fs, Configuration conf, Path name,
             Class keyClass, Class valClass, int bufferSize,
             short replication, long blockSize, boolean createParent,
             CompressionType compressionType, CompressionCodec codec,
             Metadata metadata) throws IOException {
  return createWriter(FileContext.getFileContext(fs.getUri(), conf),
      conf, name, keyClass, valClass, compressionType, codec,
      metadata, EnumSet.of(CreateFlag.CREATE,CreateFlag.OVERWRITE),
      CreateOpts.bufferSize(bufferSize),
      createParent ? CreateOpts.createParent()
                   : CreateOpts.donotCreateParent(),
      CreateOpts.repFac(replication),
      CreateOpts.blockSize(blockSize)
    );
}
项目:hops    文件:FileContextMainOperationsBaseTest.java   
@Test
/*
 * Test method
 *  org.apache.hadoop.fs.FileContext.getFileContext(AbstractFileSystem)
 */
public void testGetFileContext1() throws IOException {
  final Path rootPath = getTestRootPath(fc, "test");
  AbstractFileSystem asf = fc.getDefaultFileSystem();
  // create FileContext using the protected #getFileContext(1) method:
  FileContext fc2 = FileContext.getFileContext(asf);
  // Now just check that this context can do something reasonable:
  final Path path = new Path(rootPath, "zoo");
  FSDataOutputStream out = fc2.create(path, EnumSet.of(CREATE),
      Options.CreateOpts.createParent());
  out.close();
  Path pathResolved = fc2.resolvePath(path);
  assertEquals(pathResolved.toUri().getPath(), path.toUri().getPath());
}
项目:hops    文件:SymlinkBaseTest.java   
@Test(timeout=10000)
/** Try to create a file with parent that is a dangling link */
public void testCreateFileViaDanglingLinkParent() throws IOException {
  Path dir  = new Path(testBaseDir1()+"/dangling");
  Path file = new Path(testBaseDir1()+"/dangling/file");
  wrapper.createSymlink(new Path("/doesNotExist"), dir, false);
  FSDataOutputStream out;
  try {
    out = wrapper.create(file, EnumSet.of(CreateFlag.CREATE),
                    CreateOpts.repFac((short) 1),
                    CreateOpts.blockSize(blockSize));
    out.close();
    fail("Created a link with dangling link parent");
  } catch (FileNotFoundException e) {
    // Expected. The parent is dangling.
  }
}
项目:hadoop-oss    文件:FileContextTestWrapper.java   
public long createFile(Path path, int numBlocks, CreateOpts... options)
    throws IOException {
  BlockSize blockSizeOpt = CreateOpts.getOpt(CreateOpts.BlockSize.class, options);
  long blockSize = blockSizeOpt != null ? blockSizeOpt.getValue()
      : DEFAULT_BLOCK_SIZE;
  FSDataOutputStream out =
    fc.create(path, EnumSet.of(CreateFlag.CREATE), options);
  byte[] data = getFileData(numBlocks, blockSize);
  out.write(data, 0, data.length);
  out.close();
  return data.length;
}
项目:hadoop-oss    文件:FileContextTestWrapper.java   
public void appendToFile(Path path, int numBlocks, CreateOpts... options)
    throws IOException {
  BlockSize blockSizeOpt = CreateOpts.getOpt(CreateOpts.BlockSize.class, options);
  long blockSize = blockSizeOpt != null ? blockSizeOpt.getValue()
      : DEFAULT_BLOCK_SIZE;
  FSDataOutputStream out;
  out = fc.create(path, EnumSet.of(CreateFlag.APPEND));
  byte[] data = getFileData(numBlocks, blockSize);
  out.write(data, 0, data.length);
  out.close();
}
项目:hadoop-oss    文件:FileContextTestWrapper.java   
@Override
public FSDataOutputStream create(Path f, EnumSet<CreateFlag> createFlag,
    CreateOpts... opts) throws AccessControlException,
    FileAlreadyExistsException, FileNotFoundException,
    ParentNotDirectoryException, UnsupportedFileSystemException, IOException {
  return fc.create(f, createFlag, opts);
}
项目:hadoop-oss    文件:FileContextMainOperationsBaseTest.java   
private void writeReadAndDelete(int len) throws IOException {
  Path path = getTestRootPath(fc, "test/hadoop/file");

  fc.mkdir(path.getParent(), FsPermission.getDefault(), true);

  FSDataOutputStream out = fc.create(path, EnumSet.of(CREATE),
      CreateOpts.repFac((short) 1), CreateOpts
          .blockSize(getDefaultBlockSize()));
  out.write(data, 0, len);
  out.close();

  Assert.assertTrue("Exists", exists(fc, path));
  Assert.assertEquals("Length", len, fc.getFileStatus(path).getLen());

  FSDataInputStream in = fc.open(path);
  byte[] buf = new byte[len];
  in.readFully(0, buf);
  in.close();

  Assert.assertEquals(len, buf.length);
  for (int i = 0; i < buf.length; i++) {
    Assert.assertEquals("Position " + i, data[i], buf[i]);
  }

  Assert.assertTrue("Deleted", fc.delete(path, false));

  Assert.assertFalse("No longer exists", exists(fc, path));

}
项目:hadoop-oss    文件:FileContextMainOperationsBaseTest.java   
@Test
public void testOutputStreamClosedTwice() throws IOException {
  //HADOOP-4760 according to Closeable#close() closing already-closed 
  //streams should have no effect. 
  Path src = getTestRootPath(fc, "test/hadoop/file");
  FSDataOutputStream out = fc.create(src, EnumSet.of(CREATE),
          Options.CreateOpts.createParent());

  out.writeChar('H'); //write some data
  out.close();
  out.close();
}
项目:hadoop-oss    文件:FileContextMainOperationsBaseTest.java   
@Test
public void testSetVerifyChecksum() throws IOException {
  final Path rootPath = getTestRootPath(fc, "test");
  final Path path = new Path(rootPath, "zoo");

  FSDataOutputStream out = fc.create(path, EnumSet.of(CREATE),
      Options.CreateOpts.createParent());
  try {
    // instruct FS to verify checksum through the FileContext:
    fc.setVerifyChecksum(true, path);
    out.write(data, 0, data.length);
  } finally {
    out.close();
  }

  // NB: underlying FS may be different (this is an abstract test),
  // so we cannot assert .zoo.crc existence.
  // Instead, we check that the file is read correctly:
  FileStatus fileStatus = fc.getFileStatus(path);
  final long len = fileStatus.getLen();
  assertTrue(len == data.length);
  byte[] bb = new byte[(int)len];
  FSDataInputStream fsdis = fc.open(path);
  try {
    fsdis.readFully(bb);
  } finally {
    fsdis.close();
  }
  assertArrayEquals(data, bb);
}
项目:hadoop-oss    文件:FileContextTestHelper.java   
public static long createFile(FileContext fc, Path path, int numBlocks,
    CreateOpts... options) throws IOException {
  BlockSize blockSizeOpt = CreateOpts.getOpt(CreateOpts.BlockSize.class, options);
  long blockSize = blockSizeOpt != null ? blockSizeOpt.getValue()
      : DEFAULT_BLOCK_SIZE;
  FSDataOutputStream out = 
    fc.create(path, EnumSet.of(CreateFlag.CREATE), options);
  byte[] data = getFileData(numBlocks, blockSize);
  out.write(data, 0, data.length);
  out.close();
  return data.length;
}
项目:hadoop-oss    文件:FileContextTestHelper.java   
public static void appendToFile(FileContext fc, Path path, int numBlocks,
    CreateOpts... options) throws IOException {
  BlockSize blockSizeOpt = CreateOpts.getOpt(CreateOpts.BlockSize.class, options);
  long blockSize = blockSizeOpt != null ? blockSizeOpt.getValue()
      : DEFAULT_BLOCK_SIZE;
  FSDataOutputStream out;
  out = fc.create(path, EnumSet.of(CreateFlag.APPEND));
  byte[] data = getFileData(numBlocks, blockSize);
  out.write(data, 0, data.length);
  out.close();
}
项目:hadoop-oss    文件:FileContextTestHelper.java   
public static void writeFile(FileContext fc, Path path, byte b[])
    throws IOException {
  FSDataOutputStream out = 
    fc.create(path,EnumSet.of(CreateFlag.CREATE), CreateOpts.createParent());
  out.write(b);
  out.close();
}
项目:hadoop-oss    文件:FileSystemTestWrapper.java   
public long createFile(Path path, int numBlocks, CreateOpts... options)
    throws IOException {
  BlockSize blockSizeOpt = CreateOpts.getOpt(CreateOpts.BlockSize.class, options);
  long blockSize = blockSizeOpt != null ? blockSizeOpt.getValue()
      : DEFAULT_BLOCK_SIZE;
  FSDataOutputStream out =
    create(path, EnumSet.of(CreateFlag.CREATE), options);
  byte[] data = getFileData(numBlocks, blockSize);
  out.write(data, 0, data.length);
  out.close();
  return data.length;
}
项目:hadoop-oss    文件:FileSystemTestWrapper.java   
public void appendToFile(Path path, int numBlocks, CreateOpts... options)
    throws IOException {
  BlockSize blockSizeOpt = CreateOpts.getOpt(CreateOpts.BlockSize.class, options);
  long blockSize = blockSizeOpt != null ? blockSizeOpt.getValue()
      : DEFAULT_BLOCK_SIZE;
  FSDataOutputStream out;
  out = fs.append(path);
  byte[] data = getFileData(numBlocks, blockSize);
  out.write(data, 0, data.length);
  out.close();
}