Java 类org.apache.hadoop.mapred.OutputFormat 实例源码

项目:ditb    文件:TableMapReduceUtil.java   
/**
 * @see org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil#addDependencyJars(org.apache.hadoop.mapreduce.Job)
 */
public static void addDependencyJars(JobConf job) throws IOException {
  org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addHBaseDependencyJars(job);
  org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJars(
    job,
    // when making changes here, consider also mapreduce.TableMapReduceUtil
    // pull job classes
    job.getMapOutputKeyClass(),
    job.getMapOutputValueClass(),
    job.getOutputKeyClass(),
    job.getOutputValueClass(),
    job.getPartitionerClass(),
    job.getClass("mapred.input.format.class", TextInputFormat.class, InputFormat.class),
    job.getClass("mapred.output.format.class", TextOutputFormat.class, OutputFormat.class),
    job.getCombinerClass());
}
项目:mnemonic    文件:MneMapredBufferDataTest.java   
@Test(enabled = true)
public void testWriteBufferData() throws Exception {
  NullWritable nada = NullWritable.get();
  MneDurableOutputSession<DurableBuffer<?>> sess =
      new MneDurableOutputSession<DurableBuffer<?>>(null, m_conf,
          MneConfigHelper.DEFAULT_OUTPUT_CONFIG_PREFIX);
  MneDurableOutputValue<DurableBuffer<?>> mdvalue =
      new MneDurableOutputValue<DurableBuffer<?>>(sess);
  OutputFormat<NullWritable, MneDurableOutputValue<DurableBuffer<?>>> outputFormat =
      new MneOutputFormat<MneDurableOutputValue<DurableBuffer<?>>>();
  RecordWriter<NullWritable, MneDurableOutputValue<DurableBuffer<?>>> writer =
      outputFormat.getRecordWriter(m_fs, m_conf, null, null);
  DurableBuffer<?> dbuf = null;
  Checksum cs = new CRC32();
  cs.reset();
  for (int i = 0; i < m_reccnt; ++i) {
    dbuf = genupdDurableBuffer(sess, cs);
    Assert.assertNotNull(dbuf);
    writer.write(nada, mdvalue.of(dbuf));
  }
  m_checksum = cs.getValue();
  writer.close(null);
  sess.close();
}
项目:flink    文件:HadoopOutputFormatTest.java   
@Test
public void testOpen() throws Exception {

    OutputFormat<String, Long> dummyOutputFormat = mock(DummyOutputFormat.class);
    DummyOutputCommitter outputCommitter = mock(DummyOutputCommitter.class);
    JobConf jobConf = Mockito.spy(new JobConf());
    when(jobConf.getOutputCommitter()).thenReturn(outputCommitter);

    HadoopOutputFormat<String, Long> outputFormat = new HadoopOutputFormat<>(dummyOutputFormat, jobConf);

    outputFormat.open(1, 1);

    verify(jobConf, times(2)).getOutputCommitter();
    verify(outputCommitter, times(1)).setupJob(any(JobContext.class));
    verify(dummyOutputFormat, times(1)).getRecordWriter(any(FileSystem.class), any(JobConf.class), anyString(), any(Progressable.class));
}
项目:flink    文件:HadoopOutputFormatTest.java   
@Test
public void testCloseWithTaskCommit() throws Exception {
    OutputFormat<String, Long> dummyOutputFormat = mock(DummyOutputFormat.class);
    DummyOutputCommitter outputCommitter = mock(DummyOutputCommitter.class);
    when(outputCommitter.needsTaskCommit(any(TaskAttemptContext.class))).thenReturn(true);
    DummyRecordWriter recordWriter = mock(DummyRecordWriter.class);
    JobConf jobConf = mock(JobConf.class);

    HadoopOutputFormat<String, Long> outputFormat = new HadoopOutputFormat<>(dummyOutputFormat, jobConf);
    outputFormat.recordWriter = recordWriter;
    outputFormat.outputCommitter = outputCommitter;

    outputFormat.close();

    verify(recordWriter, times(1)).close(any(Reporter.class));
    verify(outputCommitter, times(1)).commitTask(any(TaskAttemptContext.class));
}
项目:flink    文件:HadoopOutputFormatTest.java   
@Test
public void testCloseWithoutTaskCommit() throws Exception {
    OutputFormat<String, Long> dummyOutputFormat = mock(DummyOutputFormat.class);
    DummyOutputCommitter outputCommitter = mock(DummyOutputCommitter.class);
    when(outputCommitter.needsTaskCommit(any(TaskAttemptContext.class))).thenReturn(false);
    DummyRecordWriter recordWriter = mock(DummyRecordWriter.class);
    JobConf jobConf = mock(JobConf.class);

    HadoopOutputFormat<String, Long> outputFormat = new HadoopOutputFormat<>(dummyOutputFormat, jobConf);
    outputFormat.recordWriter = recordWriter;
    outputFormat.outputCommitter = outputCommitter;

    outputFormat.close();

    verify(recordWriter, times(1)).close(any(Reporter.class));
    verify(outputCommitter, times(0)).commitTask(any(TaskAttemptContext.class));
}
项目:flink    文件:HadoopOutputFormatTest.java   
@Test
public void testOpen() throws Exception {

    OutputFormat<String, Long> dummyOutputFormat = mock(DummyOutputFormat.class);
    DummyOutputCommitter outputCommitter = mock(DummyOutputCommitter.class);
    JobConf jobConf = spy(new JobConf());
    when(jobConf.getOutputCommitter()).thenReturn(outputCommitter);

    HadoopOutputFormat<String, Long> outputFormat = new HadoopOutputFormat<>(dummyOutputFormat, jobConf);

    outputFormat.open(1, 1);

    verify(jobConf, times(2)).getOutputCommitter();
    verify(outputCommitter, times(1)).setupJob(any(JobContext.class));
    verify(dummyOutputFormat, times(1)).getRecordWriter(any(FileSystem.class), any(JobConf.class), anyString(), any(Progressable.class));
}
项目:flink    文件:HadoopOutputFormatTest.java   
@Test
public void testCloseWithTaskCommit() throws Exception {
    OutputFormat<String, Long> dummyOutputFormat = mock(DummyOutputFormat.class);
    DummyOutputCommitter outputCommitter = mock(DummyOutputCommitter.class);
    when(outputCommitter.needsTaskCommit(any(TaskAttemptContext.class))).thenReturn(true);
    DummyRecordWriter recordWriter = mock(DummyRecordWriter.class);
    JobConf jobConf = mock(JobConf.class);

    HadoopOutputFormat<String, Long> outputFormat = new HadoopOutputFormat<>(dummyOutputFormat, jobConf);
    outputFormat.recordWriter = recordWriter;
    outputFormat.outputCommitter = outputCommitter;

    outputFormat.close();

    verify(recordWriter, times(1)).close(any(Reporter.class));
    verify(outputCommitter, times(1)).commitTask(any(TaskAttemptContext.class));
}
项目:flink    文件:HadoopOutputFormatTest.java   
@Test
public void testCloseWithoutTaskCommit() throws Exception {
    OutputFormat<String, Long> dummyOutputFormat = mock(DummyOutputFormat.class);
    DummyOutputCommitter outputCommitter = mock(DummyOutputCommitter.class);
    when(outputCommitter.needsTaskCommit(any(TaskAttemptContext.class))).thenReturn(false);
    DummyRecordWriter recordWriter = mock(DummyRecordWriter.class);
    JobConf jobConf = mock(JobConf.class);

    HadoopOutputFormat<String, Long> outputFormat = new HadoopOutputFormat<>(dummyOutputFormat, jobConf);
    outputFormat.recordWriter = recordWriter;
    outputFormat.outputCommitter = outputCommitter;

    outputFormat.close();

    verify(recordWriter, times(1)).close(any(Reporter.class));
    verify(outputCommitter, times(0)).commitTask(any(TaskAttemptContext.class));
}
项目:hazelcast-jet    文件:WriteHdfsP.java   
@Override @Nonnull
public List<Processor> get(int count) {
    return processorList = range(0, count).mapToObj(i -> {
        try {
            String uuid = context.jetInstance().getCluster().getLocalMember().getUuid();
            TaskAttemptID taskAttemptID = new TaskAttemptID("jet-node-" + uuid, jobContext.getJobID().getId(),
                    JOB_SETUP, i, 0);
            jobConf.set("mapred.task.id", taskAttemptID.toString());
            jobConf.setInt("mapred.task.partition", i);

            TaskAttemptContextImpl taskAttemptContext = new TaskAttemptContextImpl(jobConf, taskAttemptID);
            @SuppressWarnings("unchecked")
            OutputFormat<K, V> outFormat = jobConf.getOutputFormat();
            RecordWriter<K, V> recordWriter = outFormat.getRecordWriter(
                    null, jobConf, uuid + '-' + valueOf(i), Reporter.NULL);
            return new WriteHdfsP<>(
                    recordWriter, taskAttemptContext, outputCommitter, extractKeyFn, extractValueFn);
        } catch (IOException e) {
            throw new JetException(e);
        }

    }).collect(toList());
}
项目:pbase    文件:TableMapReduceUtil.java   
/**
 * @see org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil#addDependencyJars(org.apache.hadoop.mapreduce.Job)
 */
public static void addDependencyJars(JobConf job) throws IOException {
  org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addHBaseDependencyJars(job);
  org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJars(
    job,
    // when making changes here, consider also mapreduce.TableMapReduceUtil
    // pull job classes
    job.getMapOutputKeyClass(),
    job.getMapOutputValueClass(),
    job.getOutputKeyClass(),
    job.getOutputValueClass(),
    job.getPartitionerClass(),
    job.getClass("mapred.input.format.class", TextInputFormat.class, InputFormat.class),
    job.getClass("mapred.output.format.class", TextOutputFormat.class, OutputFormat.class),
    job.getCombinerClass());
}
项目:ignite    文件:HadoopV1OutputCollector.java   
/**
 * @param jobConf Job configuration.
 * @param taskCtx Task context.
 * @param directWrite Direct write flag.
 * @param fileName File name.
 * @throws IOException In case of IO exception.
 */
HadoopV1OutputCollector(JobConf jobConf, HadoopTaskContext taskCtx, boolean directWrite,
    @Nullable String fileName, TaskAttemptID attempt) throws IOException {
    this.jobConf = jobConf;
    this.taskCtx = taskCtx;
    this.attempt = attempt;

    if (directWrite) {
        jobConf.set("mapreduce.task.attempt.id", attempt.toString());

        OutputFormat outFormat = jobConf.getOutputFormat();

        writer = outFormat.getRecordWriter(null, jobConf, fileName, Reporter.NULL);
    }
    else
        writer = null;
}
项目:HIndex    文件:TableMapReduceUtil.java   
/**
 * @see org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil#addDependencyJars(org.apache.hadoop.mapreduce.Job)
 */
public static void addDependencyJars(JobConf job) throws IOException {
  org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addHBaseDependencyJars(job);
  org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJars(
    job,
    // when making changes here, consider also mapreduce.TableMapReduceUtil
    // pull job classes
    job.getMapOutputKeyClass(),
    job.getMapOutputValueClass(),
    job.getOutputKeyClass(),
    job.getOutputValueClass(),
    job.getPartitionerClass(),
    job.getClass("mapred.input.format.class", TextInputFormat.class, InputFormat.class),
    job.getClass("mapred.output.format.class", TextOutputFormat.class, OutputFormat.class),
    job.getCombinerClass());
}
项目:PyroDB    文件:TableMapReduceUtil.java   
/**
 * @see org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil#addDependencyJars(org.apache.hadoop.mapreduce.Job)
 */
public static void addDependencyJars(JobConf job) throws IOException {
  org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addHBaseDependencyJars(job);
  org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJars(
    job,
    // when making changes here, consider also mapreduce.TableMapReduceUtil
    // pull job classes
    job.getMapOutputKeyClass(),
    job.getMapOutputValueClass(),
    job.getOutputKeyClass(),
    job.getOutputValueClass(),
    job.getPartitionerClass(),
    job.getClass("mapred.input.format.class", TextInputFormat.class, InputFormat.class),
    job.getClass("mapred.output.format.class", TextOutputFormat.class, OutputFormat.class),
    job.getCombinerClass());
}
项目:c5    文件:TableMapReduceUtil.java   
/**
 * @see org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil#addDependencyJars(org.apache.hadoop.mapreduce.Job)
 */
public static void addDependencyJars(JobConf job) throws IOException {
  org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addHBaseDependencyJars(job);
  org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJars(
    job,
    // when making changes here, consider also mapreduce.TableMapReduceUtil
    // pull job classes
    job.getMapOutputKeyClass(),
    job.getMapOutputValueClass(),
    job.getOutputKeyClass(),
    job.getOutputValueClass(),
    job.getPartitionerClass(),
    job.getClass("mapred.input.format.class", TextInputFormat.class, InputFormat.class),
    job.getClass("mapred.output.format.class", TextOutputFormat.class, OutputFormat.class),
    job.getCombinerClass());
}
项目:HBase-Research    文件:TableMapReduceUtil.java   
/**
 * @see org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil#addDependencyJars(Job)
 */
public static void addDependencyJars(JobConf job) throws IOException {
  org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJars(
    job,
    org.apache.zookeeper.ZooKeeper.class,
    com.google.common.base.Function.class,
    com.google.protobuf.Message.class,
    job.getMapOutputKeyClass(),
    job.getMapOutputValueClass(),
    job.getOutputKeyClass(),
    job.getOutputValueClass(),
    job.getPartitionerClass(),
    job.getClass("mapred.input.format.class", TextInputFormat.class, InputFormat.class),
    job.getClass("mapred.output.format.class", TextOutputFormat.class, OutputFormat.class),
    job.getCombinerClass());
}
项目:hbase-0.94.8-qod    文件:TableMapReduceUtil.java   
/**
 * @see org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil#addDependencyJars(Job)
 */
public static void addDependencyJars(JobConf job) throws IOException {
  org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJars(
    job,
    org.apache.zookeeper.ZooKeeper.class,
    com.google.common.base.Function.class,
    com.google.protobuf.Message.class,
    job.getMapOutputKeyClass(),
    job.getMapOutputValueClass(),
    job.getOutputKeyClass(),
    job.getOutputValueClass(),
    job.getPartitionerClass(),
    job.getClass("mapred.input.format.class", TextInputFormat.class, InputFormat.class),
    job.getClass("mapred.output.format.class", TextOutputFormat.class, OutputFormat.class),
    job.getCombinerClass());
}
项目:hbase-0.94.8-qod    文件:TableMapReduceUtil.java   
/**
 * @see org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil#addDependencyJars(Job)
 */
public static void addDependencyJars(JobConf job) throws IOException {
  org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJars(
    job,
    org.apache.zookeeper.ZooKeeper.class,
    com.google.common.base.Function.class,
    com.google.protobuf.Message.class,
    job.getMapOutputKeyClass(),
    job.getMapOutputValueClass(),
    job.getOutputKeyClass(),
    job.getOutputValueClass(),
    job.getPartitionerClass(),
    job.getClass("mapred.input.format.class", TextInputFormat.class, InputFormat.class),
    job.getClass("mapred.output.format.class", TextOutputFormat.class, OutputFormat.class),
    job.getCombinerClass());
}
项目:DominoHBase    文件:TableMapReduceUtil.java   
/**
 * @see org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil#addDependencyJars(Job)
 */
public static void addDependencyJars(JobConf job) throws IOException {
  org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJars(
    job,
    org.apache.zookeeper.ZooKeeper.class,
    com.google.common.base.Function.class,
    com.google.protobuf.Message.class,
    job.getMapOutputKeyClass(),
    job.getMapOutputValueClass(),
    job.getOutputKeyClass(),
    job.getOutputValueClass(),
    job.getPartitionerClass(),
    job.getClass("mapred.input.format.class", TextInputFormat.class, InputFormat.class),
    job.getClass("mapred.output.format.class", TextOutputFormat.class, OutputFormat.class),
    job.getCombinerClass());
}
项目:hindex    文件:TableMapReduceUtil.java   
/**
 * @see org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil#addDependencyJars(Job)
 */
public static void addDependencyJars(JobConf job) throws IOException {
  org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJars(
    job,
    org.apache.zookeeper.ZooKeeper.class,
    com.google.common.base.Function.class,
    com.google.protobuf.Message.class,
    job.getMapOutputKeyClass(),
    job.getMapOutputValueClass(),
    job.getOutputKeyClass(),
    job.getOutputValueClass(),
    job.getPartitionerClass(),
    job.getClass("mapred.input.format.class", TextInputFormat.class, InputFormat.class),
    job.getClass("mapred.output.format.class", TextOutputFormat.class, OutputFormat.class),
    job.getCombinerClass());
}
项目:hadoop    文件:LazyOutputFormat.java   
/**
 * Set the underlying output format for LazyOutputFormat.
 * @param job the {@link JobConf} to modify
 * @param theClass the underlying class
 */
@SuppressWarnings("unchecked")
public static void  setOutputFormatClass(JobConf job, 
    Class<? extends OutputFormat> theClass) {
    job.setOutputFormat(LazyOutputFormat.class);
    job.setClass("mapreduce.output.lazyoutputformat.outputformat", theClass, OutputFormat.class);
}
项目:hadoop    文件:LazyOutputFormat.java   
@SuppressWarnings("unchecked")
private void getBaseOutputFormat(JobConf job) throws IOException {
  baseOut = ReflectionUtils.newInstance(
      job.getClass("mapreduce.output.lazyoutputformat.outputformat", null, OutputFormat.class), 
      job); 
  if (baseOut == null) {
    throw new IOException("Ouput format not set for LazyOutputFormat");
  }
}
项目:hadoop    文件:LazyOutputFormat.java   
public LazyRecordWriter(JobConf job, OutputFormat of, String name,
    Progressable progress)  throws IOException {
  this.of = of;
  this.job = job;
  this.name = name;
  this.progress = progress;
}
项目:aliyun-oss-hadoop-fs    文件:LazyOutputFormat.java   
/**
 * Set the underlying output format for LazyOutputFormat.
 * @param job the {@link JobConf} to modify
 * @param theClass the underlying class
 */
@SuppressWarnings("unchecked")
public static void  setOutputFormatClass(JobConf job, 
    Class<? extends OutputFormat> theClass) {
    job.setOutputFormat(LazyOutputFormat.class);
    job.setClass("mapreduce.output.lazyoutputformat.outputformat", theClass, OutputFormat.class);
}
项目:aliyun-oss-hadoop-fs    文件:LazyOutputFormat.java   
@SuppressWarnings("unchecked")
private void getBaseOutputFormat(JobConf job) throws IOException {
  baseOut = ReflectionUtils.newInstance(
      job.getClass("mapreduce.output.lazyoutputformat.outputformat", null, OutputFormat.class), 
      job); 
  if (baseOut == null) {
    throw new IOException("Ouput format not set for LazyOutputFormat");
  }
}
项目:aliyun-oss-hadoop-fs    文件:LazyOutputFormat.java   
public LazyRecordWriter(JobConf job, OutputFormat of, String name,
    Progressable progress)  throws IOException {
  this.of = of;
  this.job = job;
  this.name = name;
  this.progress = progress;
}
项目:big-c    文件:LazyOutputFormat.java   
/**
 * Set the underlying output format for LazyOutputFormat.
 * @param job the {@link JobConf} to modify
 * @param theClass the underlying class
 */
@SuppressWarnings("unchecked")
public static void  setOutputFormatClass(JobConf job, 
    Class<? extends OutputFormat> theClass) {
    job.setOutputFormat(LazyOutputFormat.class);
    job.setClass("mapreduce.output.lazyoutputformat.outputformat", theClass, OutputFormat.class);
}
项目:big-c    文件:LazyOutputFormat.java   
@SuppressWarnings("unchecked")
private void getBaseOutputFormat(JobConf job) throws IOException {
  baseOut = ReflectionUtils.newInstance(
      job.getClass("mapreduce.output.lazyoutputformat.outputformat", null, OutputFormat.class), 
      job); 
  if (baseOut == null) {
    throw new IOException("Ouput format not set for LazyOutputFormat");
  }
}
项目:big-c    文件:LazyOutputFormat.java   
public LazyRecordWriter(JobConf job, OutputFormat of, String name,
    Progressable progress)  throws IOException {
  this.of = of;
  this.job = job;
  this.name = name;
  this.progress = progress;
}
项目:flink    文件:HadoopOutputFormatTest.java   
@Test
public void testWriteRecord() throws Exception {
    OutputFormat<String, Long> dummyOutputFormat = mock(DummyOutputFormat.class);
    DummyRecordWriter recordWriter = mock(DummyRecordWriter.class);
    JobConf jobConf = mock(JobConf.class);

    HadoopOutputFormat<String, Long> outputFormat = new HadoopOutputFormat<>(dummyOutputFormat, jobConf);
    outputFormat.recordWriter = recordWriter;

    outputFormat.writeRecord(new Tuple2<>("key", 1L));

    verify(recordWriter, times(1)).write(anyString(), anyLong());
}
项目:flink    文件:HadoopOutputFormatTest.java   
@Test
public void testFinalizeGlobal() throws Exception {
    OutputFormat<String, Long> dummyOutputFormat = mock(DummyOutputFormat.class);
    DummyOutputCommitter outputCommitter = mock(DummyOutputCommitter.class);
    JobConf jobConf = Mockito.spy(new JobConf());
    when(jobConf.getOutputCommitter()).thenReturn(outputCommitter);

    HadoopOutputFormat<String, Long> outputFormat = new HadoopOutputFormat<>(dummyOutputFormat, jobConf);

    outputFormat.finalizeGlobal(1);

    verify(outputCommitter, times(1)).commitJob(any(JobContext.class));
}
项目:flink    文件:HadoopOutputFormatTest.java   
@Test
public void testWriteRecord() throws Exception {
    OutputFormat<String, Long> dummyOutputFormat = mock(DummyOutputFormat.class);
    DummyRecordWriter recordWriter = mock(DummyRecordWriter.class);
    JobConf jobConf = mock(JobConf.class);

    HadoopOutputFormat<String, Long> outputFormat = new HadoopOutputFormat<>(dummyOutputFormat, jobConf);
    outputFormat.recordWriter = recordWriter;

    outputFormat.writeRecord(new Tuple2<>("key", 1L));

    verify(recordWriter, times(1)).write(anyString(), anyLong());
}
项目:flink    文件:HadoopOutputFormatTest.java   
@Test
public void testFinalizeGlobal() throws Exception {
    OutputFormat<String, Long> dummyOutputFormat = mock(DummyOutputFormat.class);
    DummyOutputCommitter outputCommitter = mock(DummyOutputCommitter.class);
    JobConf jobConf = spy(new JobConf());
    when(jobConf.getOutputCommitter()).thenReturn(outputCommitter);

    HadoopOutputFormat<String, Long> outputFormat = new HadoopOutputFormat<>(dummyOutputFormat, jobConf);

    outputFormat.finalizeGlobal(1);

    verify(outputCommitter, times(1)).commitJob(any(JobContext.class));
}
项目:LCIndex-HBase-0.94.16    文件:TableMapReduceUtil.java   
/**
 * @see org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil#addDependencyJars(org.apache.hadoop.mapreduce.Job)
 */
public static void addDependencyJars(JobConf job) throws IOException {
  org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addHBaseDependencyJars(job);
  org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJars(
    job,
    // when making changes here, consider also mapreduce.TableMapReduceUtil
    job.getMapOutputKeyClass(),
    job.getMapOutputValueClass(),
    job.getOutputKeyClass(),
    job.getOutputValueClass(),
    job.getPartitionerClass(),
    job.getClass("mapred.input.format.class", TextInputFormat.class, InputFormat.class),
    job.getClass("mapred.output.format.class", TextOutputFormat.class, OutputFormat.class),
    job.getCombinerClass());
}
项目:hadoop-2.6.0-cdh5.4.3    文件:LazyOutputFormat.java   
/**
 * Set the underlying output format for LazyOutputFormat.
 * @param job the {@link JobConf} to modify
 * @param theClass the underlying class
 */
@SuppressWarnings("unchecked")
public static void  setOutputFormatClass(JobConf job, 
    Class<? extends OutputFormat> theClass) {
    job.setOutputFormat(LazyOutputFormat.class);
    job.setClass("mapreduce.output.lazyoutputformat.outputformat", theClass, OutputFormat.class);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:LazyOutputFormat.java   
@SuppressWarnings("unchecked")
private void getBaseOutputFormat(JobConf job) throws IOException {
  baseOut = ReflectionUtils.newInstance(
      job.getClass("mapreduce.output.lazyoutputformat.outputformat", null, OutputFormat.class), 
      job); 
  if (baseOut == null) {
    throw new IOException("Ouput format not set for LazyOutputFormat");
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:LazyOutputFormat.java   
public LazyRecordWriter(JobConf job, OutputFormat of, String name,
    Progressable progress)  throws IOException {
  this.of = of;
  this.job = job;
  this.name = name;
  this.progress = progress;
}
项目:hadoop-plus    文件:LazyOutputFormat.java   
/**
 * Set the underlying output format for LazyOutputFormat.
 * @param job the {@link JobConf} to modify
 * @param theClass the underlying class
 */
@SuppressWarnings("unchecked")
public static void  setOutputFormatClass(JobConf job, 
    Class<? extends OutputFormat> theClass) {
    job.setOutputFormat(LazyOutputFormat.class);
    job.setClass("mapreduce.output.lazyoutputformat.outputformat", theClass, OutputFormat.class);
}
项目:hadoop-plus    文件:LazyOutputFormat.java   
@SuppressWarnings("unchecked")
private void getBaseOutputFormat(JobConf job) throws IOException {
  baseOut = ReflectionUtils.newInstance(
      job.getClass("mapreduce.output.lazyoutputformat.outputformat", null, OutputFormat.class), 
      job); 
  if (baseOut == null) {
    throw new IOException("Ouput format not set for LazyOutputFormat");
  }
}
项目:hadoop-plus    文件:LazyOutputFormat.java   
public LazyRecordWriter(JobConf job, OutputFormat of, String name,
    Progressable progress)  throws IOException {
  this.of = of;
  this.job = job;
  this.name = name;
  this.progress = progress;
}
项目:systemml    文件:OutputInfo.java   
public OutputInfo(Class<? extends OutputFormat> formatCls,
        Class<? extends Writable> keyCls, Class<? extends Writable> valueCls)
{
    outputFormatClass=formatCls;
    outputKeyClass=keyCls;
    outputValueClass=valueCls;
}