Java 类org.apache.hadoop.mapred.OutputCommitter 实例源码

项目:ignite    文件:HadoopV1CleanupTask.java   
/** {@inheritDoc} */
@Override public void run(HadoopTaskContext taskCtx) throws IgniteCheckedException {
    HadoopV2TaskContext ctx = (HadoopV2TaskContext)taskCtx;

    JobContext jobCtx = ctx.jobContext();

    try {
        OutputCommitter committer = jobCtx.getJobConf().getOutputCommitter();

        if (abort)
            committer.abortJob(jobCtx, JobStatus.State.FAILED);
        else
            committer.commitJob(jobCtx);
    }
    catch (IOException e) {
        throw new IgniteCheckedException(e);
    }
}
项目:ignite    文件:HadoopV1SetupTask.java   
/** {@inheritDoc} */
@Override public void run(HadoopTaskContext taskCtx) throws IgniteCheckedException {
    HadoopV2TaskContext ctx = (HadoopV2TaskContext)taskCtx;

    try {
        ctx.jobConf().getOutputFormat().checkOutputSpecs(null, ctx.jobConf());

        OutputCommitter committer = ctx.jobConf().getOutputCommitter();

        if (committer != null)
            committer.setupJob(ctx.jobContext());
    }
    catch (IOException e) {
        throw new IgniteCheckedException(e);
    }
}
项目:flink    文件:HadoopOutputFormatBase.java   
@Override
public void finalizeGlobal(int parallelism) throws IOException {

    try {
        JobContext jobContext = new JobContextImpl(this.jobConf, new JobID());
        OutputCommitter outputCommitter = this.jobConf.getOutputCommitter();

        // finalize HDFS output format
        outputCommitter.commitJob(jobContext);
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
}
项目:flink    文件:HadoopOutputFormatBase.java   
@Override
public void finalizeGlobal(int parallelism) throws IOException {

    try {
        JobContext jobContext = new JobContextImpl(this.jobConf, new JobID());
        OutputCommitter outputCommitter = this.jobConf.getOutputCommitter();

        // finalize HDFS output format
        outputCommitter.commitJob(jobContext);
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
}
项目:hazelcast-jet    文件:WriteHdfsP.java   
private WriteHdfsP(RecordWriter<K, V> recordWriter,
                   TaskAttemptContextImpl taskAttemptContext,
                   OutputCommitter outputCommitter,
                   DistributedFunction<? super T, K> extractKeyFn,
                   DistributedFunction<? super T, V> extractValueFn
) {
    this.recordWriter = recordWriter;
    this.taskAttemptContext = taskAttemptContext;
    this.outputCommitter = outputCommitter;
    this.extractKeyFn = extractKeyFn;
    this.extractValueFn = extractValueFn;
}
项目:ignite    文件:HadoopV1OutputCollector.java   
/**
 * Commit task.
 *
 * @throws IOException In failed.
 */
public void commit() throws IOException {
    if (writer != null) {
        OutputCommitter outputCommitter = jobConf.getOutputCommitter();

        TaskAttemptContext taskCtx = new TaskAttemptContextImpl(jobConf, attempt);

        if (outputCommitter.needsTaskCommit(taskCtx))
            outputCommitter.commitTask(taskCtx);
    }
}
项目:flink    文件:HadoopOutputFormat.java   
public HadoopOutputFormat(org.apache.hadoop.mapred.OutputFormat<K, V> mapredOutputFormat, Class<OutputCommitter> outputCommitterClass, JobConf job) {
    this(mapredOutputFormat, job);
    super.getJobConf().setOutputCommitter(outputCommitterClass);
}
项目:flink    文件:HadoopOutputFormat.java   
public HadoopOutputFormat(org.apache.hadoop.mapred.OutputFormat<K, V> mapredOutputFormat, Class<OutputCommitter> outputCommitterClass, JobConf job) {
    this(mapredOutputFormat, job);
    super.getJobConf().setOutputCommitter(outputCommitterClass);
}
项目:bigdata-interop    文件:BigQueryMapredOutputCommitter.java   
@VisibleForTesting
void setMapreduceOutputCommitter(
    org.apache.hadoop.mapreduce.OutputCommitter outputCommitter) {
  this.mapreduceOutputCommitter = outputCommitter;
}