Java 类org.apache.hadoop.mapred.SequenceFileRecordReader 实例源码

项目:GeoCrawler    文件:ContentAsTextInputFormat.java   
public ContentAsTextRecordReader(Configuration conf, FileSplit split)
    throws IOException {
  sequenceFileRecordReader = new SequenceFileRecordReader<Text, Content>(
      conf, split);
  innerKey = sequenceFileRecordReader.createKey();
  innerValue = sequenceFileRecordReader.createValue();
}
项目:systemml    文件:SamplingSortMRInputFormat.java   
@Override
public RecordReader<K,V> getRecordReader(InputSplit split,JobConf job, Reporter reporter)
       throws IOException 
   {
    if(reporter!=null)
        reporter.setStatus(split.toString());
    return new SequenceFileRecordReader<>(job, (FileSplit) split);
   }
项目:anthelion    文件:ContentAsTextInputFormat.java   
public ContentAsTextRecordReader(Configuration conf, FileSplit split)
  throws IOException {
  sequenceFileRecordReader = new SequenceFileRecordReader<Text, Content>(
    conf, split);
  innerKey = (Text)sequenceFileRecordReader.createKey();
  innerValue = (Content)sequenceFileRecordReader.createValue();
}
项目:pss    文件:CustomSequenceFileInputFormat.java   
@Override
public RecordReader<K, V> getRecordReader(InputSplit split, JobConf job, Reporter reporter)
        throws IOException {

    reporter.setStatus(split.toString());

    return new SequenceFileRecordReader<K, V>(job, (FileSplit) split);
}
项目:pss    文件:NonSplitableSequenceInputFormat.java   
@Override
public RecordReader getRecordReader(InputSplit split,
        JobConf job, Reporter reporter) throws IOException {

    SplitLen = split.getLength();
    FileSplit f = (FileSplit)split; 
    path = f.getPath();
    j=job;

    return new SequenceFileRecordReader(job, (FileSplit) split);
}
项目:hadoop    文件:HadoopArchives.java   
@Override
public RecordReader<LongWritable, HarEntry> getRecordReader(InputSplit split,
    JobConf job, Reporter reporter) throws IOException {
  return new SequenceFileRecordReader<LongWritable, HarEntry>(job,
             (FileSplit)split);
}
项目:hadoop    文件:DistCh.java   
/** {@inheritDoc} */
public RecordReader<Text, FileOperation> getRecordReader(InputSplit split,
    JobConf job, Reporter reporter) throws IOException {
  return new SequenceFileRecordReader<Text, FileOperation>(job,
      (FileSplit)split);
}
项目:hadoop    文件:DistCpV1.java   
/**
 * Returns a reader for this split of the src file list.
 */
public RecordReader<Text, Text> getRecordReader(InputSplit split,
    JobConf job, Reporter reporter) throws IOException {
  return new SequenceFileRecordReader<Text, Text>(job, (FileSplit)split);
}
项目:aliyun-oss-hadoop-fs    文件:HadoopArchives.java   
@Override
public RecordReader<LongWritable, HarEntry> getRecordReader(InputSplit split,
    JobConf job, Reporter reporter) throws IOException {
  return new SequenceFileRecordReader<LongWritable, HarEntry>(job,
             (FileSplit)split);
}
项目:aliyun-oss-hadoop-fs    文件:DistCh.java   
/** {@inheritDoc} */
public RecordReader<Text, FileOperation> getRecordReader(InputSplit split,
    JobConf job, Reporter reporter) throws IOException {
  return new SequenceFileRecordReader<Text, FileOperation>(job,
      (FileSplit)split);
}
项目:big-c    文件:HadoopArchives.java   
@Override
public RecordReader<LongWritable, HarEntry> getRecordReader(InputSplit split,
    JobConf job, Reporter reporter) throws IOException {
  return new SequenceFileRecordReader<LongWritable, HarEntry>(job,
             (FileSplit)split);
}
项目:big-c    文件:DistCh.java   
/** {@inheritDoc} */
public RecordReader<Text, FileOperation> getRecordReader(InputSplit split,
    JobConf job, Reporter reporter) throws IOException {
  return new SequenceFileRecordReader<Text, FileOperation>(job,
      (FileSplit)split);
}
项目:big-c    文件:DistCpV1.java   
/**
 * Returns a reader for this split of the src file list.
 */
public RecordReader<Text, Text> getRecordReader(InputSplit split,
    JobConf job, Reporter reporter) throws IOException {
  return new SequenceFileRecordReader<Text, Text>(job, (FileSplit)split);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:HadoopArchives.java   
@Override
public RecordReader<LongWritable, HarEntry> getRecordReader(InputSplit split,
    JobConf job, Reporter reporter) throws IOException {
  return new SequenceFileRecordReader<LongWritable, HarEntry>(job,
             (FileSplit)split);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DistCh.java   
/** {@inheritDoc} */
public RecordReader<Text, FileOperation> getRecordReader(InputSplit split,
    JobConf job, Reporter reporter) throws IOException {
  return new SequenceFileRecordReader<Text, FileOperation>(job,
      (FileSplit)split);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DistCpV1.java   
/**
 * Returns a reader for this split of the src file list.
 */
public RecordReader<Text, Text> getRecordReader(InputSplit split,
    JobConf job, Reporter reporter) throws IOException {
  return new SequenceFileRecordReader<Text, Text>(job, (FileSplit)split);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:HadoopArchives.java   
public RecordReader<LongWritable, Text> getRecordReader(InputSplit split,
    JobConf job, Reporter reporter) throws IOException {
  return new SequenceFileRecordReader<LongWritable, Text>(job,
             (FileSplit)split);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DistCh.java   
/** {@inheritDoc} */
public RecordReader<Text, FileOperation> getRecordReader(InputSplit split,
    JobConf job, Reporter reporter) throws IOException {
  return new SequenceFileRecordReader<Text, FileOperation>(job,
      (FileSplit)split);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DistCp.java   
/**
 * Returns a reader for this split of the src file list.
 */
public RecordReader<Text, Text> getRecordReader(InputSplit split,
    JobConf job, Reporter reporter) throws IOException {
  return new SequenceFileRecordReader<Text, Text>(job, (FileSplit)split);
}
项目:hadoop-EAR    文件:HadoopArchives.java   
public RecordReader<LongWritable, HarEntry> getRecordReader(InputSplit split,
    JobConf job, Reporter reporter) throws IOException {
  return new SequenceFileRecordReader<LongWritable, HarEntry>(job,
             (FileSplit)split);
}
项目:hadoop-EAR    文件:DistCh.java   
/** {@inheritDoc} */
public RecordReader<Text, FileOperation> getRecordReader(InputSplit split,
    JobConf job, Reporter reporter) throws IOException {
  return new SequenceFileRecordReader<Text, FileOperation>(job,
      (FileSplit)split);
}
项目:hadoop-EAR    文件:DistCp.java   
/**
 * Returns a reader for this split of the src file list.
 */
public RecordReader<Text, Text> getRecordReader(InputSplit split,
    JobConf job, Reporter reporter) throws IOException {
  return new SequenceFileRecordReader<Text, Text>(job, (FileSplit)split);
}
项目:hadoop-EAR    文件:DataFsck.java   
/** {@inheritDoc} */
public RecordReader<Text, Text> getRecordReader(InputSplit split,
    JobConf job, Reporter reporter) throws IOException {
  return new SequenceFileRecordReader<Text, Text>(job,
      (FileSplit) split);
}
项目:hadoop-EAR    文件:FastFileCheck.java   
@Override
public RecordReader<Text, Text> getRecordReader(InputSplit split,
    JobConf job, Reporter reporter) throws IOException {
  return new SequenceFileRecordReader<Text, Text>(job, (FileSplit) split);
}
项目:hadoop-plus    文件:HadoopArchives.java   
@Override
public RecordReader<LongWritable, HarEntry> getRecordReader(InputSplit split,
    JobConf job, Reporter reporter) throws IOException {
  return new SequenceFileRecordReader<LongWritable, HarEntry>(job,
             (FileSplit)split);
}
项目:hadoop-plus    文件:DistCh.java   
/** {@inheritDoc} */
public RecordReader<Text, FileOperation> getRecordReader(InputSplit split,
    JobConf job, Reporter reporter) throws IOException {
  return new SequenceFileRecordReader<Text, FileOperation>(job,
      (FileSplit)split);
}
项目:hadoop-plus    文件:DistCpV1.java   
/**
 * Returns a reader for this split of the src file list.
 */
public RecordReader<Text, Text> getRecordReader(InputSplit split,
    JobConf job, Reporter reporter) throws IOException {
  return new SequenceFileRecordReader<Text, Text>(job, (FileSplit)split);
}
项目:DistCPPlus    文件:CopyInputFormat.java   
/**
 * Returns a reader for this split of the src file list.
 */
public RecordReader<Text, Text> getRecordReader(InputSplit split,
        JobConf job, Reporter reporter) throws IOException
{
    return new SequenceFileRecordReader<Text, Text>(job, (FileSplit)split);
}
项目:hops    文件:MapReduceEncoder.java   
/**
 * {@inheritDoc}
 */
public RecordReader<Text, PolicyInfo> getRecordReader(InputSplit split,
    JobConf job, Reporter reporter) throws IOException {
  return new SequenceFileRecordReader<Text, PolicyInfo>(job,
      (FileSplit) split);
}
项目:hops    文件:HadoopArchives.java   
@Override
public RecordReader<LongWritable, HarEntry> getRecordReader(InputSplit split,
    JobConf job, Reporter reporter) throws IOException {
  return new SequenceFileRecordReader<LongWritable, HarEntry>(job,
             (FileSplit)split);
}
项目:hops    文件:DistCh.java   
/** {@inheritDoc} */
public RecordReader<Text, FileOperation> getRecordReader(InputSplit split,
    JobConf job, Reporter reporter) throws IOException {
  return new SequenceFileRecordReader<Text, FileOperation>(job,
      (FileSplit)split);
}
项目:hops    文件:DistCpV1.java   
/**
 * Returns a reader for this split of the src file list.
 */
public RecordReader<Text, Text> getRecordReader(InputSplit split,
    JobConf job, Reporter reporter) throws IOException {
  return new SequenceFileRecordReader<Text, Text>(job, (FileSplit)split);
}
项目:guagua    文件:GuaguaSequenceRecordReader.java   
@Override
public void initialize(GuaguaFileSplit split) throws IOException {
    FileSplit fileSplit = new FileSplit(new Path(split.getPath()), split.getOffset(), split.getLength(),
            (String[]) null);
    this.sequenceReader = new SequenceFileRecordReader<KEY, VALUE>(conf, fileSplit);
}
项目:guagua    文件:GuaguaSequenceRecordReader.java   
@Override
public void initialize(GuaguaFileSplit split) throws IOException {
    FileSplit fileSplit = new FileSplit(new Path(split.getPath()), split.getOffset(), split.getLength(),
            (String[]) null);
    this.sequenceReader = new SequenceFileRecordReader<KEY, VALUE>(conf, fileSplit);
}
项目:guagua    文件:GuaguaSequenceRecordReader.java   
@Override
public void initialize(GuaguaFileSplit split) throws IOException {
    FileSplit fileSplit = new FileSplit(new Path(split.getPath()), split.getOffset(), split.getLength(),
            (String[]) null);
    this.sequenceReader = new SequenceFileRecordReader<KEY, VALUE>(conf, fileSplit);
}
项目:guagua    文件:GuaguaSequenceRecordReader.java   
@Override
public void initialize(GuaguaFileSplit split) throws IOException {
    FileSplit fileSplit = new FileSplit(new Path(split.getPath()), split.getOffset(), split.getLength(),
            (String[]) null);
    this.sequenceReader = new SequenceFileRecordReader<KEY, VALUE>(conf, fileSplit);
}
项目:hadoop-TCP    文件:HadoopArchives.java   
@Override
public RecordReader<LongWritable, HarEntry> getRecordReader(InputSplit split,
    JobConf job, Reporter reporter) throws IOException {
  return new SequenceFileRecordReader<LongWritable, HarEntry>(job,
             (FileSplit)split);
}
项目:hadoop-TCP    文件:DistCh.java   
/** {@inheritDoc} */
public RecordReader<Text, FileOperation> getRecordReader(InputSplit split,
    JobConf job, Reporter reporter) throws IOException {
  return new SequenceFileRecordReader<Text, FileOperation>(job,
      (FileSplit)split);
}
项目:hadoop-TCP    文件:DistCpV1.java   
/**
 * Returns a reader for this split of the src file list.
 */
public RecordReader<Text, Text> getRecordReader(InputSplit split,
    JobConf job, Reporter reporter) throws IOException {
  return new SequenceFileRecordReader<Text, Text>(job, (FileSplit)split);
}
项目:hadoop-on-lustre    文件:HadoopArchives.java   
@Override
public RecordReader<LongWritable, HarEntry> getRecordReader(InputSplit split,
    JobConf job, Reporter reporter) throws IOException {
  return new SequenceFileRecordReader<LongWritable, HarEntry>(job,
             (FileSplit)split);
}