Java 类org.apache.hadoop.util.Progressable 实例源码

项目:hadoop    文件:DFSClient.java   
/**
 * Append to an existing file if {@link CreateFlag#APPEND} is present
 */
private DFSOutputStream primitiveAppend(String src, EnumSet<CreateFlag> flag,
    int buffersize, Progressable progress) throws IOException {
  if (flag.contains(CreateFlag.APPEND)) {
    HdfsFileStatus stat = getFileInfo(src);
    if (stat == null) { // No file to append to
      // New file needs to be created if create option is present
      if (!flag.contains(CreateFlag.CREATE)) {
        throw new FileNotFoundException("failed to append to non-existent file "
            + src + " on client " + clientName);
      }
      return null;
    }
    return callAppend(src, buffersize, flag, progress, null);
  }
  return null;
}
项目:hadoop-oss    文件:FileSystem.java   
@Deprecated
protected FSDataOutputStream primitiveCreate(Path f,
   FsPermission absolutePermission, EnumSet<CreateFlag> flag, int bufferSize,
   short replication, long blockSize, Progressable progress,
   ChecksumOpt checksumOpt) throws IOException {

  boolean pathExists = exists(f);
  CreateFlag.validate(f, pathExists, flag);

  // Default impl  assumes that permissions do not matter and 
  // nor does the bytesPerChecksum  hence
  // calling the regular create is good enough.
  // FSs that implement permissions should override this.

  if (pathExists && flag.contains(CreateFlag.APPEND)) {
    return append(f, bufferSize, progress);
  }

  return this.create(f, absolutePermission,
      flag.contains(CreateFlag.OVERWRITE), bufferSize, replication,
      blockSize, progress);
}
项目:hadoop-oss    文件:ChecksumFileSystem.java   
public ChecksumFSOutputSummer(ChecksumFileSystem fs, 
                      Path file, 
                      boolean overwrite,
                      int bufferSize,
                      short replication,
                      long blockSize,
                      Progressable progress,
                      FsPermission permission)
  throws IOException {
  super(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,
      fs.getBytesPerSum()));
  int bytesPerSum = fs.getBytesPerSum();
  this.datas = fs.getRawFileSystem().create(file, permission, overwrite,
                                     bufferSize, replication, blockSize,
                                     progress);
  int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize);
  this.sums = fs.getRawFileSystem().create(fs.getChecksumFile(file),
                                           permission, true, sumBufferSize,
                                           replication, blockSize, null);
  sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
  sums.writeInt(bytesPerSum);
}
项目:hadoop    文件:FileSystem.java   
@Deprecated
protected FSDataOutputStream primitiveCreate(Path f,
   FsPermission absolutePermission, EnumSet<CreateFlag> flag, int bufferSize,
   short replication, long blockSize, Progressable progress,
   ChecksumOpt checksumOpt) throws IOException {

  boolean pathExists = exists(f);
  CreateFlag.validate(f, pathExists, flag);

  // Default impl  assumes that permissions do not matter and 
  // nor does the bytesPerChecksum  hence
  // calling the regular create is good enough.
  // FSs that implement permissions should override this.

  if (pathExists && flag.contains(CreateFlag.APPEND)) {
    return append(f, bufferSize, progress);
  }

  return this.create(f, absolutePermission,
      flag.contains(CreateFlag.OVERWRITE), bufferSize, replication,
      blockSize, progress);
}
项目:hadoop    文件:DFSClient.java   
/**
 * Same as {@link #create(String, FsPermission, EnumSet, boolean, short, long,
 * Progressable, int, ChecksumOpt)} with the addition of favoredNodes that is
 * a hint to where the namenode should place the file blocks.
 * The favored nodes hint is not persisted in HDFS. Hence it may be honored
 * at the creation time only. HDFS could move the blocks during balancing or
 * replication, to move the blocks from favored nodes. A value of null means
 * no favored nodes for this create
 */
public DFSOutputStream create(String src, 
                           FsPermission permission,
                           EnumSet<CreateFlag> flag, 
                           boolean createParent,
                           short replication,
                           long blockSize,
                           Progressable progress,
                           int buffersize,
                           ChecksumOpt checksumOpt,
                           InetSocketAddress[] favoredNodes) throws IOException {
  checkOpen();
  if (permission == null) {
    permission = FsPermission.getFileDefault();
  }
  FsPermission masked = permission.applyUMask(dfsClientConf.uMask);
  if(LOG.isDebugEnabled()) {
    LOG.debug(src + ": masked=" + masked);
  }
  final DFSOutputStream result = DFSOutputStream.newStreamForCreate(this,
      src, masked, flag, createParent, replication, blockSize, progress,
      buffersize, dfsClientConf.createChecksum(checksumOpt),
      getFavoredNodesStr(favoredNodes));
  beginFileLease(result.getFileId(), result);
  return result;
}
项目:hadoop    文件:Task.java   
public ValuesIterator (RawKeyValueIterator in, 
                       RawComparator<KEY> comparator, 
                       Class<KEY> keyClass,
                       Class<VALUE> valClass, Configuration conf, 
                       Progressable reporter)
  throws IOException {
  this.in = in;
  this.comparator = comparator;
  this.reporter = reporter;
  SerializationFactory serializationFactory = new SerializationFactory(conf);
  this.keyDeserializer = serializationFactory.getDeserializer(keyClass);
  this.keyDeserializer.open(keyIn);
  this.valDeserializer = serializationFactory.getDeserializer(valClass);
  this.valDeserializer.open(this.valueIn);
  readNextKey();
  key = nextKey;
  nextKey = null; // force new instance creation
  hasNext = more;
}
项目:hadoop    文件:NativeAzureFileSystem.java   
@Override
@SuppressWarnings("deprecation")
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
    EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize,
    Progressable progress) throws IOException {

  // Check if file should be appended or overwritten. Assume that the file
  // is overwritten on if the CREATE and OVERWRITE create flags are set. Note
  // that any other combinations of create flags will result in an open new or
  // open with append.
  final EnumSet<CreateFlag> createflags =
      EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE);
  boolean overwrite = flags.containsAll(createflags);

  // Delegate the create non-recursive call.
  return this.createNonRecursive(f, permission, overwrite,
      bufferSize, replication, blockSize, progress);
}
项目:ditb    文件:TestHRegionFileSystem.java   
@Override
public FSDataOutputStream create(Path arg0, FsPermission arg1, boolean arg2, int arg3,
    short arg4, long arg5, Progressable arg6) throws IOException {
  LOG.debug("Create, " + retryCount);
  if (retryCount++ < successRetryCount) throw new IOException("Something bad happen");
  return null;
}
项目:hadoop-oss    文件:ViewFs.java   
@Override
public FSDataOutputStream createInternal(final Path f,
    final EnumSet<CreateFlag> flag, final FsPermission absolutePermission,
    final int bufferSize, final short replication, final long blockSize,
    final Progressable progress, final ChecksumOpt checksumOpt,
    final boolean createParent) throws AccessControlException,
    FileAlreadyExistsException, FileNotFoundException,
    ParentNotDirectoryException, UnsupportedFileSystemException,
    UnresolvedLinkException, IOException {
  InodeTree.ResolveResult<AbstractFileSystem> res;
  try {
    res = fsState.resolve(getUriPath(f), false);
  } catch (FileNotFoundException e) {
    if (createParent) {
      throw readOnlyMountTable("create", f);
    } else {
      throw e;
    }
  }
  assert(res.remainingPath != null);
  return res.targetFileSystem.createInternal(res.remainingPath, flag,
      absolutePermission, bufferSize, replication,
      blockSize, progress, checksumOpt,
      createParent);
}
项目:hadoop    文件:DFSClient.java   
/**
 * Same as {{@link #create(String, FsPermission, EnumSet, short, long,
 *  Progressable, int, ChecksumOpt)} except that the permission
 *  is absolute (ie has already been masked with umask.
 */
public DFSOutputStream primitiveCreate(String src, 
                           FsPermission absPermission,
                           EnumSet<CreateFlag> flag,
                           boolean createParent,
                           short replication,
                           long blockSize,
                           Progressable progress,
                           int buffersize,
                           ChecksumOpt checksumOpt)
    throws IOException, UnresolvedLinkException {
  checkOpen();
  CreateFlag.validate(flag);
  DFSOutputStream result = primitiveAppend(src, flag, buffersize, progress);
  if (result == null) {
    DataChecksum checksum = dfsClientConf.createChecksum(checksumOpt);
    result = DFSOutputStream.newStreamForCreate(this, src, absPermission,
        flag, createParent, replication, blockSize, progress, buffersize,
        checksum, null);
  }
  beginFileLease(result.getFileId(), result);
  return result;
}
项目:hadoop    文件:Merger.java   
public static <K extends Object, V extends Object>
RawKeyValueIterator merge(Configuration conf, FileSystem fs,
                          Class<K> keyClass, Class<V> valueClass,
                          CompressionCodec codec,
                          List<Segment<K, V>> segments,
                          int mergeFactor, Path tmpDir,
                          RawComparator<K> comparator, Progressable reporter,
                          boolean sortSegments,
                          Counters.Counter readsCounter,
                          Counters.Counter writesCounter,
                          Progress mergePhase,
                          TaskType taskType)
    throws IOException {
  return new MergeQueue<K, V>(conf, fs, segments, comparator, reporter,
                         sortSegments, codec,
                         taskType).merge(keyClass, valueClass,
                                             mergeFactor, tmpDir,
                                             readsCounter, writesCounter,
                                             mergePhase);
}
项目:lustre-connector-for-hadoop    文件:LustreFileSystem.java   
@Override
public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite,
    int bufferSize, short replication, long blockSize, Progressable progress) throws IOException {
  String file = mapToLocal(f).toUri().getPath();
  return new FSDataOutputStream(
      fsDelegate.open(file, overwrite ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
              : EnumSet.of(CreateFlag.CREATE), permission.toShort(), true, 
            bufferSize, getDefaultStripeSize(), getDefaultStripeCount(), -1, null), statistics);
}
项目:dremio-oss    文件:FileSystemWrapper.java   
@Override
public FSDataOutputStream create(Path f, Progressable progress) throws IOException {
  try {
    return newFSDataOutputStreamWrapper(underlyingFs.create(f, progress));
  } catch(FSError e) {
    throw propagateFSError(e);
  }
}
项目:hadoop-oss    文件:FileSystem.java   
/**
 * Create an FSDataOutputStream at the indicated Path with write-progress
 * reporting.
 * @param f the path of the file to open
 * @param overwrite if a file with this name already exists, then if true,
 *   the file will be overwritten, and if false an error will be thrown.
 * @param bufferSize the size of the buffer to be used.
 */
public FSDataOutputStream create(Path f, 
                                 boolean overwrite,
                                 int bufferSize,
                                 Progressable progress
                                 ) throws IOException {
  return create(f, overwrite, bufferSize, 
                getDefaultReplication(f),
                getDefaultBlockSize(f), progress);
}
项目:hadoop-oss    文件:FileSystem.java   
/**
 * Create an FSDataOutputStream at the indicated Path with write-progress
 * reporting.
 * @param f the file name to open
 * @param overwrite if a file with this name already exists, then if true,
 *   the file will be overwritten, and if false an error will be thrown.
 * @param bufferSize the size of the buffer to be used.
 * @param replication required block replication for the file. 
 */
public FSDataOutputStream create(Path f,
                                          boolean overwrite,
                                          int bufferSize,
                                          short replication,
                                          long blockSize,
                                          Progressable progress
                                          ) throws IOException {
  return this.create(f, FsPermission.getFileDefault().applyUMask(
      FsPermission.getUMask(getConf())), overwrite, bufferSize,
      replication, blockSize, progress);
}
项目:hadoop    文件:S3OutputStream.java   
public S3OutputStream(Configuration conf, FileSystemStore store,
                      Path path, long blockSize, Progressable progress,
                      int buffersize) throws IOException {

  this.conf = conf;
  this.store = store;
  this.path = path;
  this.blockSize = blockSize;
  this.backupFile = newBackupFile();
  this.backupStream = new FileOutputStream(backupFile);
  this.bufferSize = buffersize;
  this.outBuf = new byte[bufferSize];

}
项目:hadoop    文件:DistributedFileSystem.java   
@Override
protected HdfsDataOutputStream primitiveCreate(Path f,
  FsPermission absolutePermission, EnumSet<CreateFlag> flag, int bufferSize,
  short replication, long blockSize, Progressable progress,
  ChecksumOpt checksumOpt) throws IOException {
  statistics.incrementWriteOps(1);
  final DFSOutputStream dfsos = dfs.primitiveCreate(
    getPathName(fixRelativePart(f)),
    absolutePermission, flag, true, replication, blockSize,
    progress, bufferSize, checksumOpt);
  return dfs.createWrappedOutputStream(dfsos, statistics);
}
项目:hadoop-oss    文件:ChecksumFileSystem.java   
private FSDataOutputStream create(Path f, FsPermission permission,
    boolean overwrite, boolean createParent, int bufferSize,
    short replication, long blockSize,
    Progressable progress) throws IOException {
  Path parent = f.getParent();
  if (parent != null) {
    if (!createParent && !exists(parent)) {
      throw new FileNotFoundException("Parent directory doesn't exist: "
          + parent);
    } else if (!mkdirs(parent)) {
      throw new IOException("Mkdirs failed to create " + parent
          + " (exists=" + exists(parent) + ", cwd=" + getWorkingDirectory()
          + ")");
    }
  }
  final FSDataOutputStream out;
  if (writeChecksum) {
    out = new FSDataOutputStream(
        new ChecksumFSOutputSummer(this, f, overwrite, bufferSize, replication,
            blockSize, progress, permission), null);
  } else {
    out = fs.create(f, permission, overwrite, bufferSize, replication,
        blockSize, progress);
    // remove the checksum file since we aren't writing one
    Path checkFile = getChecksumFile(f);
    if (fs.exists(checkFile)) {
      fs.delete(checkFile, true);
    }
  }
  return out;
}
项目:hadoop    文件:ViewFileSystem.java   
@Override
public FSDataOutputStream create(final Path f, final FsPermission permission,
    final boolean overwrite, final int bufferSize, final short replication,
    final long blockSize, final Progressable progress) throws IOException {
  InodeTree.ResolveResult<FileSystem> res;
  try {
    res = fsState.resolve(getUriPath(f), false);
  } catch (FileNotFoundException e) {
      throw readOnlyMountTable("create", f);
  }
  assert(res.remainingPath != null);
  return res.targetFileSystem.create(res.remainingPath, permission,
       overwrite, bufferSize, replication, blockSize, progress);
}
项目:hadoop    文件:S3AOutputStream.java   
public ProgressableProgressListener(Upload upload, Progressable progress, 
  FileSystem.Statistics statistics) {
  this.upload = upload;
  this.progress = progress;
  this.statistics = statistics;
  this.lastBytesTransferred = 0;
}
项目:hadoop-oss    文件:ChecksumFs.java   
public ChecksumFSOutputSummer(final ChecksumFs fs, final Path file, 
  final EnumSet<CreateFlag> createFlag,
  final FsPermission absolutePermission, final int bufferSize,
  final short replication, final long blockSize, 
  final Progressable progress, final ChecksumOpt checksumOpt,
  final boolean createParent) throws IOException {
  super(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,
      fs.getBytesPerSum()));

  // checksumOpt is passed down to the raw fs. Unless it implements
  // checksum impelemts internally, checksumOpt will be ignored.
  // If the raw fs does checksum internally, we will end up with
  // two layers of checksumming. i.e. checksumming checksum file.
  this.datas = fs.getRawFs().createInternal(file, createFlag,
      absolutePermission, bufferSize, replication, blockSize, progress,
       checksumOpt,  createParent);

  // Now create the chekcsumfile; adjust the buffsize
  int bytesPerSum = fs.getBytesPerSum();
  int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize);
  this.sums = fs.getRawFs().createInternal(fs.getChecksumFile(file),
      EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
      absolutePermission, sumBufferSize, replication, blockSize, progress,
      checksumOpt, createParent);
  sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
  sums.writeInt(bytesPerSum);
}
项目:hadoop    文件:BloomMapFile.java   
@Deprecated
public Writer(Configuration conf, FileSystem fs, String dirName,
    Class<? extends WritableComparable> keyClass,
    Class<? extends Writable> valClass, CompressionType compress,
    CompressionCodec codec, Progressable progress) throws IOException {
  this(conf, new Path(dirName), keyClass(keyClass), valueClass(valClass), 
       compression(compress, codec), progressable(progress));
}
项目:hadoop    文件:ChecksumFs.java   
public ChecksumFSOutputSummer(final ChecksumFs fs, final Path file, 
  final EnumSet<CreateFlag> createFlag,
  final FsPermission absolutePermission, final int bufferSize,
  final short replication, final long blockSize, 
  final Progressable progress, final ChecksumOpt checksumOpt,
  final boolean createParent) throws IOException {
  super(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,
      fs.getBytesPerSum()));

  // checksumOpt is passed down to the raw fs. Unless it implements
  // checksum impelemts internally, checksumOpt will be ignored.
  // If the raw fs does checksum internally, we will end up with
  // two layers of checksumming. i.e. checksumming checksum file.
  this.datas = fs.getRawFs().createInternal(file, createFlag,
      absolutePermission, bufferSize, replication, blockSize, progress,
       checksumOpt,  createParent);

  // Now create the chekcsumfile; adjust the buffsize
  int bytesPerSum = fs.getBytesPerSum();
  int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize);
  this.sums = fs.getRawFs().createInternal(fs.getChecksumFile(file),
      EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
      absolutePermission, sumBufferSize, replication, blockSize, progress,
      checksumOpt, createParent);
  sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
  sums.writeInt(bytesPerSum);
}
项目:dremio-oss    文件:FileSystemWrapper.java   
@Override
public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException {
  try {
    return newFSDataOutputStreamWrapper(underlyingFs.create(f, overwrite, bufferSize, replication, blockSize, progress));
  } catch(FSError e) {
    throw propagateFSError(e);
  }
}
项目:hadoop    文件:ChecksumFileSystem.java   
private FSDataOutputStream create(Path f, FsPermission permission,
    boolean overwrite, boolean createParent, int bufferSize,
    short replication, long blockSize,
    Progressable progress) throws IOException {
  Path parent = f.getParent();
  if (parent != null) {
    if (!createParent && !exists(parent)) {
      throw new FileNotFoundException("Parent directory doesn't exist: "
          + parent);
    } else if (!mkdirs(parent)) {
      throw new IOException("Mkdirs failed to create " + parent
          + " (exists=" + exists(parent) + ", cwd=" + getWorkingDirectory()
          + ")");
    }
  }
  final FSDataOutputStream out;
  if (writeChecksum) {
    out = new FSDataOutputStream(
        new ChecksumFSOutputSummer(this, f, overwrite, bufferSize, replication,
            blockSize, progress, permission), null);
  } else {
    out = fs.create(f, permission, overwrite, bufferSize, replication,
        blockSize, progress);
    // remove the checksum file since we aren't writing one
    Path checkFile = getChecksumFile(f);
    if (fs.exists(checkFile)) {
      fs.delete(checkFile, true);
    }
  }
  return out;
}
项目:hadoop-oss    文件:ChRootedFileSystem.java   
@Override
public FSDataOutputStream create(final Path f, final FsPermission permission,
    final boolean overwrite, final int bufferSize, final short replication,
    final long blockSize, final Progressable progress) throws IOException {
  return super.create(fullPath(f), permission, overwrite, bufferSize,
      replication, blockSize, progress);
}
项目:dremio-oss    文件:FileSystemWrapper.java   
@Override
public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize, Progressable progress) throws IOException {
  try {
    return newFSDataOutputStreamWrapper(underlyingFs.create(f, overwrite, bufferSize, progress));
  } catch(FSError e) {
    throw propagateFSError(e);
  }
}
项目:ditb    文件:TableOutputFormat.java   
@Override
public RecordWriter getRecordWriter(FileSystem ignored, JobConf job, String name,
    Progressable progress)
throws IOException {
  // expecting exactly one path
  TableName tableName = TableName.valueOf(job.get(OUTPUT_TABLE));
  BufferedMutator mutator =  null;
  // Connection is not closed. Dies with JVM.  No possibility for cleanup.
  Connection connection = ConnectionFactory.createConnection(job);
  mutator = connection.getBufferedMutator(tableName);
  // Clear write buffer on fail is true by default so no need to reset it.
  return new TableRecordWriter(mutator);
}
项目:hadoop    文件:Hdfs.java   
@Override
public HdfsDataOutputStream createInternal(Path f,
    EnumSet<CreateFlag> createFlag, FsPermission absolutePermission,
    int bufferSize, short replication, long blockSize, Progressable progress,
    ChecksumOpt checksumOpt, boolean createParent) throws IOException {

  final DFSOutputStream dfsos = dfs.primitiveCreate(getUriPath(f),
    absolutePermission, createFlag, createParent, replication, blockSize,
    progress, bufferSize, checksumOpt);
  return dfs.createWrappedOutputStream(dfsos, statistics,
      dfsos.getInitialLen());
}
项目:hadoop    文件:LazyOutputFormat.java   
public LazyRecordWriter(JobConf job, OutputFormat of, String name,
    Progressable progress)  throws IOException {
  this.of = of;
  this.job = job;
  this.name = name;
  this.progress = progress;
}
项目:hadoop    文件:TestFilterFileSystem.java   
public FSDataOutputStream create(Path f,
    FsPermission permission,
    EnumSet<CreateFlag> flags,
    int bufferSize,
    short replication,
    long blockSize,
    Progressable progress) throws IOException {
  return null;
}
项目:hadoop    文件:FilterFileSystem.java   
@Override
protected FSDataOutputStream primitiveCreate(Path f,
    FsPermission absolutePermission, EnumSet<CreateFlag> flag,
    int bufferSize, short replication, long blockSize,
    Progressable progress, ChecksumOpt checksumOpt)
    throws IOException {
  return fs.primitiveCreate(f, absolutePermission, flag,
      bufferSize, replication, blockSize, progress, checksumOpt);
}
项目:hadoop-oss    文件:BloomMapFile.java   
@Deprecated
public Writer(Configuration conf, FileSystem fs, String dirName,
    Class<? extends WritableComparable> keyClass,
    Class<? extends Writable> valClass, CompressionType compress,
    CompressionCodec codec, Progressable progress) throws IOException {
  this(conf, new Path(dirName), keyClass(keyClass), valueClass(valClass), 
       compression(compress, codec), progressable(progress));
}
项目:dremio-oss    文件:FileSystemWrapper.java   
@Override
public FSDataOutputStream create(Path f, FsPermission permission, EnumSet<CreateFlag> flags, int bufferSize,
    short replication, long blockSize, Progressable progress, ChecksumOpt checksumOpt) throws IOException {
  try {
    return newFSDataOutputStreamWrapper(underlyingFs.create(f, permission, flags, bufferSize, replication,
        blockSize, progress, checksumOpt));
  } catch(FSError e) {
    throw propagateFSError(e);
  }
}
项目:hadoop    文件:Merger.java   
public MergeQueue(Configuration conf, FileSystem fs,
    List<Segment<K, V>> segments, RawComparator<K> comparator,
    Progressable reporter, boolean sortSegments, CompressionCodec codec,
    TaskType taskType) {
  this(conf, fs, segments, comparator, reporter, sortSegments,
      taskType);
  this.codec = codec;
}
项目:hadoop-oss    文件:MapFile.java   
/** Create the named map for keys of the named class. 
 * @deprecated Use Writer(Configuration, Path, Option...) instead.
 */
@Deprecated
public Writer(Configuration conf, FileSystem fs, String dirName,
              Class<? extends WritableComparable> keyClass, Class valClass,
              CompressionType compress, 
              Progressable progress) throws IOException {
  this(conf, new Path(dirName), keyClass(keyClass), valueClass(valClass),
       compression(compress), progressable(progress));
}
项目:hadoop    文件:TestFilterFileSystem.java   
public FSDataOutputStream create(Path f,
    FsPermission permission,
    EnumSet<CreateFlag> flags,
    int bufferSize,
    short replication,
    long blockSize,
    Progressable progress,
    ChecksumOpt checksumOpt) throws IOException {
  return null;
}
项目:WIFIProbe    文件:HDFSTool.java   
public static void uploadFiles(final InputStream in, final String outputFile) throws IOException{

        String dst = NodeConfig.HDFS_PATH+outputFile;
        Configuration conf = new Configuration();
        FileSystem fs = FileSystem.get(URI.create(dst), conf);
        OutputStream out = fs.create(new Path(dst), new Progressable() {
            public void progress() {
                System.out.print(". ");
            }
        });
        IOUtils.copyBytes(in, out, BUFFER_SIZE, true);
    }
项目:hadoop    文件:DFSOutputStream.java   
private DFSOutputStream(DFSClient dfsClient, String src, Progressable progress,
    HdfsFileStatus stat, DataChecksum checksum) throws IOException {
  super(getChecksum4Compute(checksum, stat));
  this.dfsClient = dfsClient;
  this.src = src;
  this.fileId = stat.getFileId();
  this.blockSize = stat.getBlockSize();
  this.blockReplication = stat.getReplication();
  this.fileEncryptionInfo = stat.getFileEncryptionInfo();
  this.progress = progress;
  this.cachingStrategy = new AtomicReference<CachingStrategy>(
      dfsClient.getDefaultWriteCachingStrategy());
  if ((progress != null) && DFSClient.LOG.isDebugEnabled()) {
    DFSClient.LOG.debug(
        "Set non-null progress callback on DFSOutputStream " + src);
  }

  this.bytesPerChecksum = checksum.getBytesPerChecksum();
  if (bytesPerChecksum <= 0) {
    throw new HadoopIllegalArgumentException(
        "Invalid value: bytesPerChecksum = " + bytesPerChecksum + " <= 0");
  }
  if (blockSize % bytesPerChecksum != 0) {
    throw new HadoopIllegalArgumentException("Invalid values: "
        + DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY + " (=" + bytesPerChecksum
        + ") must divide block size (=" + blockSize + ").");
  }
  this.checksum4WriteBlock = checksum;

  this.dfsclientSlowLogThresholdMs =
    dfsClient.getConf().dfsclientSlowIoWarningThresholdMs;
  this.byteArrayManager = dfsClient.getClientContext().getByteArrayManager();
}
项目:hadoop    文件:FilterFileSystem.java   
@Override
public FSDataOutputStream create(Path f,
      FsPermission permission,
      EnumSet<CreateFlag> flags,
      int bufferSize,
      short replication,
      long blockSize,
      Progressable progress,
      ChecksumOpt checksumOpt) throws IOException {
  return fs.create(f, permission,
    flags, bufferSize, replication, blockSize, progress, checksumOpt);
}