Java 类org.apache.hadoop.mapred.TaskCompletionEvent 实例源码

项目:hadoop    文件:JobImpl.java   
@Override
public TaskCompletionEvent[] getMapAttemptCompletionEvents(
    int startIndex, int maxEvents) {
  TaskCompletionEvent[] events = EMPTY_TASK_COMPLETION_EVENTS;
  readLock.lock();
  try {
    if (mapAttemptCompletionEvents.size() > startIndex) {
      int actualMax = Math.min(maxEvents,
          (mapAttemptCompletionEvents.size() - startIndex));
      events = mapAttemptCompletionEvents.subList(startIndex,
          actualMax + startIndex).toArray(events);
    }
    return events;
  } finally {
    readLock.unlock();
  }
}
项目:hadoop    文件:TypeConverter.java   
public static TaskCompletionEvent.Status fromYarn(
    TaskAttemptCompletionEventStatus newStatus) {
  switch (newStatus) {
  case FAILED:
    return TaskCompletionEvent.Status.FAILED;
  case KILLED:
    return TaskCompletionEvent.Status.KILLED;
  case OBSOLETE:
    return TaskCompletionEvent.Status.OBSOLETE;
  case SUCCEEDED:
    return TaskCompletionEvent.Status.SUCCEEDED;
  case TIPFAILED:
    return TaskCompletionEvent.Status.TIPFAILED;
  }
  throw new YarnRuntimeException("Unrecognized status: " + newStatus);
}
项目:hadoop    文件:ShuffleSchedulerImpl.java   
@Override
public void resolve(TaskCompletionEvent event) {
  switch (event.getTaskStatus()) {
  case SUCCEEDED:
    URI u = getBaseURI(reduceId, event.getTaskTrackerHttp());
    addKnownMapOutput(u.getHost() + ":" + u.getPort(),
        u.toString(),
        event.getTaskAttemptId());
    maxMapRuntime = Math.max(maxMapRuntime, event.getTaskRunTime());
    break;
  case FAILED:
  case KILLED:
  case OBSOLETE:
    obsoleteMapOutput(event.getTaskAttemptId());
    LOG.info("Ignoring obsolete output of " + event.getTaskStatus() +
        " map-task: '" + event.getTaskAttemptId() + "'");
    break;
  case TIPFAILED:
    tipFailed(event.getTaskAttemptId().getTaskID());
    LOG.info("Ignoring output of failed map TIP: '" +
        event.getTaskAttemptId() + "'");
    break;
  }
}
项目:aliyun-oss-hadoop-fs    文件:JobImpl.java   
@Override
public TaskCompletionEvent[] getMapAttemptCompletionEvents(
    int startIndex, int maxEvents) {
  TaskCompletionEvent[] events = EMPTY_TASK_COMPLETION_EVENTS;
  readLock.lock();
  try {
    if (mapAttemptCompletionEvents.size() > startIndex) {
      int actualMax = Math.min(maxEvents,
          (mapAttemptCompletionEvents.size() - startIndex));
      events = mapAttemptCompletionEvents.subList(startIndex,
          actualMax + startIndex).toArray(events);
    }
    return events;
  } finally {
    readLock.unlock();
  }
}
项目:aliyun-oss-hadoop-fs    文件:TypeConverter.java   
public static TaskCompletionEvent.Status fromYarn(
    TaskAttemptCompletionEventStatus newStatus) {
  switch (newStatus) {
  case FAILED:
    return TaskCompletionEvent.Status.FAILED;
  case KILLED:
    return TaskCompletionEvent.Status.KILLED;
  case OBSOLETE:
    return TaskCompletionEvent.Status.OBSOLETE;
  case SUCCEEDED:
    return TaskCompletionEvent.Status.SUCCEEDED;
  case TIPFAILED:
    return TaskCompletionEvent.Status.TIPFAILED;
  }
  throw new YarnRuntimeException("Unrecognized status: " + newStatus);
}
项目:aliyun-oss-hadoop-fs    文件:ShuffleSchedulerImpl.java   
@Override
public void resolve(TaskCompletionEvent event) {
  switch (event.getTaskStatus()) {
  case SUCCEEDED:
    URI u = getBaseURI(reduceId, event.getTaskTrackerHttp());
    addKnownMapOutput(u.getHost() + ":" + u.getPort(),
        u.toString(),
        event.getTaskAttemptId());
    maxMapRuntime = Math.max(maxMapRuntime, event.getTaskRunTime());
    break;
  case FAILED:
  case KILLED:
  case OBSOLETE:
    obsoleteMapOutput(event.getTaskAttemptId());
    LOG.info("Ignoring obsolete output of " + event.getTaskStatus() +
        " map-task: '" + event.getTaskAttemptId() + "'");
    break;
  case TIPFAILED:
    tipFailed(event.getTaskAttemptId().getTaskID());
    LOG.info("Ignoring output of failed map TIP: '" +
        event.getTaskAttemptId() + "'");
    break;
  }
}
项目:big-c    文件:JobImpl.java   
@Override
public TaskCompletionEvent[] getMapAttemptCompletionEvents(
    int startIndex, int maxEvents) {
  TaskCompletionEvent[] events = EMPTY_TASK_COMPLETION_EVENTS;
  readLock.lock();
  try {
    if (mapAttemptCompletionEvents.size() > startIndex) {
      int actualMax = Math.min(maxEvents,
          (mapAttemptCompletionEvents.size() - startIndex));
      events = mapAttemptCompletionEvents.subList(startIndex,
          actualMax + startIndex).toArray(events);
    }
    return events;
  } finally {
    readLock.unlock();
  }
}
项目:big-c    文件:TypeConverter.java   
public static TaskCompletionEvent.Status fromYarn(
    TaskAttemptCompletionEventStatus newStatus) {
  switch (newStatus) {
  case FAILED:
    return TaskCompletionEvent.Status.FAILED;
  case KILLED:
    return TaskCompletionEvent.Status.KILLED;
  case OBSOLETE:
    return TaskCompletionEvent.Status.OBSOLETE;
  case SUCCEEDED:
    return TaskCompletionEvent.Status.SUCCEEDED;
  case TIPFAILED:
    return TaskCompletionEvent.Status.TIPFAILED;
  }
  throw new YarnRuntimeException("Unrecognized status: " + newStatus);
}
项目:big-c    文件:ShuffleSchedulerImpl.java   
@Override
public void resolve(TaskCompletionEvent event) {
  switch (event.getTaskStatus()) {
  case SUCCEEDED:
    URI u = getBaseURI(reduceId, event.getTaskTrackerHttp());
    addKnownMapOutput(u.getHost() + ":" + u.getPort(),
        u.toString(),
        event.getTaskAttemptId());
    maxMapRuntime = Math.max(maxMapRuntime, event.getTaskRunTime());
    break;
  case FAILED:
  case KILLED:
  case OBSOLETE:
    obsoleteMapOutput(event.getTaskAttemptId());
    LOG.info("Ignoring obsolete output of " + event.getTaskStatus() +
        " map-task: '" + event.getTaskAttemptId() + "'");
    break;
  case TIPFAILED:
    tipFailed(event.getTaskAttemptId().getTaskID());
    LOG.info("Ignoring output of failed map TIP: '" +
        event.getTaskAttemptId() + "'");
    break;
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:JobImpl.java   
@Override
public TaskCompletionEvent[] getMapAttemptCompletionEvents(
    int startIndex, int maxEvents) {
  TaskCompletionEvent[] events = EMPTY_TASK_COMPLETION_EVENTS;
  readLock.lock();
  try {
    if (mapAttemptCompletionEvents.size() > startIndex) {
      int actualMax = Math.min(maxEvents,
          (mapAttemptCompletionEvents.size() - startIndex));
      events = mapAttemptCompletionEvents.subList(startIndex,
          actualMax + startIndex).toArray(events);
    }
    return events;
  } finally {
    readLock.unlock();
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TypeConverter.java   
public static TaskCompletionEvent.Status fromYarn(
    TaskAttemptCompletionEventStatus newStatus) {
  switch (newStatus) {
  case FAILED:
    return TaskCompletionEvent.Status.FAILED;
  case KILLED:
    return TaskCompletionEvent.Status.KILLED;
  case OBSOLETE:
    return TaskCompletionEvent.Status.OBSOLETE;
  case SUCCEEDED:
    return TaskCompletionEvent.Status.SUCCEEDED;
  case TIPFAILED:
    return TaskCompletionEvent.Status.TIPFAILED;
  }
  throw new YarnRuntimeException("Unrecognized status: " + newStatus);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:ShuffleSchedulerImpl.java   
@Override
public void resolve(TaskCompletionEvent event) {
  switch (event.getTaskStatus()) {
  case SUCCEEDED:
    URI u = getBaseURI(reduceId, event.getTaskTrackerHttp());
    addKnownMapOutput(u.getHost() + ":" + u.getPort(),
        u.toString(),
        event.getTaskAttemptId());
    maxMapRuntime = Math.max(maxMapRuntime, event.getTaskRunTime());
    break;
  case FAILED:
  case KILLED:
  case OBSOLETE:
    obsoleteMapOutput(event.getTaskAttemptId());
    LOG.info("Ignoring obsolete output of " + event.getTaskStatus() +
        " map-task: '" + event.getTaskAttemptId() + "'");
    break;
  case TIPFAILED:
    tipFailed(event.getTaskAttemptId().getTaskID());
    LOG.info("Ignoring output of failed map TIP: '" +
        event.getTaskAttemptId() + "'");
    break;
  }
}
项目:hadoop-EAR    文件:TestReduceTaskNoMapOutput.java   
@SuppressWarnings({ "deprecation", "unchecked" })
public static TaskCompletionEvent[] runJob(JobConf conf, Class mapperClass,
                boolean enableNoFetchEmptyMapOutputs) throws Exception {
  conf.setMapperClass(mapperClass);
  conf.setReducerClass(IdentityReducer.class);
  conf.setOutputKeyClass(Text.class);
  conf.setOutputValueClass(Text.class);
  conf.setNumMapTasks(3);
  conf.setNumReduceTasks(1);
  conf.setInputFormat(FakeIF.class);
  conf.setBoolean("mapred.enable.no.fetch.map.outputs", enableNoFetchEmptyMapOutputs);
  FileInputFormat.setInputPaths(conf, new Path("/in"));
  final Path outp = new Path("/out");
  FileOutputFormat.setOutputPath(conf, outp);
  RunningJob job = null;

  job = JobClient.runJob(conf);
  assertTrue(job.isSuccessful());
  return job.getTaskCompletionEvents(0);
}
项目:hadoop-plus    文件:JobImpl.java   
@Override
public TaskCompletionEvent[] getMapAttemptCompletionEvents(
    int startIndex, int maxEvents) {
  TaskCompletionEvent[] events = EMPTY_TASK_COMPLETION_EVENTS;
  readLock.lock();
  try {
    if (mapAttemptCompletionEvents.size() > startIndex) {
      int actualMax = Math.min(maxEvents,
          (mapAttemptCompletionEvents.size() - startIndex));
      events = mapAttemptCompletionEvents.subList(startIndex,
          actualMax + startIndex).toArray(events);
    }
    return events;
  } finally {
    readLock.unlock();
  }
}
项目:hadoop-plus    文件:TypeConverter.java   
public static TaskCompletionEvent.Status fromYarn(
    TaskAttemptCompletionEventStatus newStatus) {
  switch (newStatus) {
  case FAILED:
    return TaskCompletionEvent.Status.FAILED;
  case KILLED:
    return TaskCompletionEvent.Status.KILLED;
  case OBSOLETE:
    return TaskCompletionEvent.Status.OBSOLETE;
  case SUCCEEDED:
    return TaskCompletionEvent.Status.SUCCEEDED;
  case TIPFAILED:
    return TaskCompletionEvent.Status.TIPFAILED;
  }
  throw new YarnRuntimeException("Unrecognized status: " + newStatus);
}
项目:hadoop-plus    文件:ShuffleSchedulerImpl.java   
@Override
public void resolve(TaskCompletionEvent event) {
  switch (event.getTaskStatus()) {
  case SUCCEEDED:
    URI u = getBaseURI(reduceId, event.getTaskTrackerHttp());
    addKnownMapOutput(u.getHost() + ":" + u.getPort(),
        u.toString(),
        event.getTaskAttemptId());
    maxMapRuntime = Math.max(maxMapRuntime, event.getTaskRunTime());
    break;
  case FAILED:
  case KILLED:
  case OBSOLETE:
    obsoleteMapOutput(event.getTaskAttemptId());
    LOG.info("Ignoring obsolete output of " + event.getTaskStatus() +
        " map-task: '" + event.getTaskAttemptId() + "'");
    break;
  case TIPFAILED:
    tipFailed(event.getTaskAttemptId().getTaskID());
    LOG.info("Ignoring output of failed map TIP: '" +
        event.getTaskAttemptId() + "'");
    break;
  }
}
项目:FlexMap    文件:JobImpl.java   
@Override
public TaskCompletionEvent[] getMapAttemptCompletionEvents(
    int startIndex, int maxEvents) {
  TaskCompletionEvent[] events = EMPTY_TASK_COMPLETION_EVENTS;
  readLock.lock();
  try {
    if (mapAttemptCompletionEvents.size() > startIndex) {
    for(TaskCompletionEvent event:mapAttemptCompletionEvents){

        LOG.info("map completion event"+event.getTaskId().toString()+"status:"+event.getStatus().toString());
    }  
      int actualMax = Math.min(maxEvents,
          (mapAttemptCompletionEvents.size() - startIndex));
      events = mapAttemptCompletionEvents.subList(startIndex,
          actualMax + startIndex).toArray(events);
    }
    return events;
  } finally {
    readLock.unlock();
  }
}
项目:FlexMap    文件:TypeConverter.java   
public static TaskCompletionEvent.Status fromYarn(
    TaskAttemptCompletionEventStatus newStatus) {
  switch (newStatus) {
  case FAILED:
    return TaskCompletionEvent.Status.FAILED;
  case KILLED:
    return TaskCompletionEvent.Status.KILLED;
  case OBSOLETE:
    return TaskCompletionEvent.Status.OBSOLETE;
  case SUCCEEDED:
    return TaskCompletionEvent.Status.SUCCEEDED;
  case TIPFAILED:
    return TaskCompletionEvent.Status.TIPFAILED;
  }
  throw new YarnRuntimeException("Unrecognized status: " + newStatus);
}
项目:FlexMap    文件:ShuffleSchedulerImpl.java   
@Override
public void resolve(TaskCompletionEvent event) {
  switch (event.getTaskStatus()) {
  case SUCCEEDED:
    URI u = getBaseURI(reduceId, event.getTaskTrackerHttp());
    addKnownMapOutput(u.getHost() + ":" + u.getPort(),
        u.toString(),
        event.getTaskAttemptId());
    maxMapRuntime = Math.max(maxMapRuntime, event.getTaskRunTime());
    break;
  case FAILED:
  case OBSOLETE:
    obsoleteMapOutput(event.getTaskAttemptId());
    LOG.info("Ignoring obsolete output of " + event.getTaskStatus() +
        " map-task: '" + event.getTaskAttemptId() + "'");
    break;
  case TIPFAILED:
  case KILLED:  
    tipFailed(event.getTaskAttemptId().getTaskID());
    LOG.info("Ignoring output of failed map TIP: '" +
        event.getTaskAttemptId() + "'");
    break;
  }
}
项目:hops    文件:JobImpl.java   
@Override
public TaskCompletionEvent[] getMapAttemptCompletionEvents(
    int startIndex, int maxEvents) {
  TaskCompletionEvent[] events = EMPTY_TASK_COMPLETION_EVENTS;
  readLock.lock();
  try {
    if (mapAttemptCompletionEvents.size() > startIndex) {
      int actualMax = Math.min(maxEvents,
          (mapAttemptCompletionEvents.size() - startIndex));
      events = mapAttemptCompletionEvents.subList(startIndex,
          actualMax + startIndex).toArray(events);
    }
    return events;
  } finally {
    readLock.unlock();
  }
}
项目:hops    文件:TypeConverter.java   
public static TaskCompletionEvent.Status fromYarn(
    TaskAttemptCompletionEventStatus newStatus) {
  switch (newStatus) {
  case FAILED:
    return TaskCompletionEvent.Status.FAILED;
  case KILLED:
    return TaskCompletionEvent.Status.KILLED;
  case OBSOLETE:
    return TaskCompletionEvent.Status.OBSOLETE;
  case SUCCEEDED:
    return TaskCompletionEvent.Status.SUCCEEDED;
  case TIPFAILED:
    return TaskCompletionEvent.Status.TIPFAILED;
  }
  throw new YarnRuntimeException("Unrecognized status: " + newStatus);
}
项目:hops    文件:ShuffleSchedulerImpl.java   
@Override
public void resolve(TaskCompletionEvent event) {
  switch (event.getTaskStatus()) {
  case SUCCEEDED:
    URI u = getBaseURI(reduceId, event.getTaskTrackerHttp());
    addKnownMapOutput(u.getHost() + ":" + u.getPort(),
        u.toString(),
        event.getTaskAttemptId());
    maxMapRuntime = Math.max(maxMapRuntime, event.getTaskRunTime());
    break;
  case FAILED:
  case KILLED:
  case OBSOLETE:
    obsoleteMapOutput(event.getTaskAttemptId());
    LOG.info("Ignoring obsolete output of " + event.getTaskStatus() +
        " map-task: '" + event.getTaskAttemptId() + "'");
    break;
  case TIPFAILED:
    tipFailed(event.getTaskAttemptId().getTaskID());
    LOG.info("Ignoring output of failed map TIP: '" +
        event.getTaskAttemptId() + "'");
    break;
  }
}
项目:compiler    文件:BoaOutputCommitter.java   
@Override
public void abortJob(JobContext context, JobStatus.State runState) throws java.io.IOException {
    super.abortJob(context, runState);

    final JobClient jobClient = new JobClient(new JobConf(context.getConfiguration()));
    final RunningJob job = jobClient.getJob((org.apache.hadoop.mapred.JobID) JobID.forName(context.getConfiguration().get("mapred.job.id")));
    String diag = "";
    for (final TaskCompletionEvent event : job.getTaskCompletionEvents(0))
        switch (event.getTaskStatus()) {
            case SUCCEEDED:
                break;
               default:
                diag += "Diagnostics for: " + event.getTaskTrackerHttp() + "\n";
                for (final String s : job.getTaskDiagnostics(event.getTaskAttemptId()))
                    diag += s + "\n";
                diag += "\n";
                break;
        }
    updateStatus(diag, context.getConfiguration().getInt("boa.hadoop.jobid", 0));
}
项目:hadoop-TCP    文件:JobImpl.java   
@Override
public TaskCompletionEvent[] getMapAttemptCompletionEvents(
    int startIndex, int maxEvents) {
  TaskCompletionEvent[] events = EMPTY_TASK_COMPLETION_EVENTS;
  readLock.lock();
  try {
    if (mapAttemptCompletionEvents.size() > startIndex) {
      int actualMax = Math.min(maxEvents,
          (mapAttemptCompletionEvents.size() - startIndex));
      events = mapAttemptCompletionEvents.subList(startIndex,
          actualMax + startIndex).toArray(events);
    }
    return events;
  } finally {
    readLock.unlock();
  }
}
项目:hadoop-TCP    文件:TypeConverter.java   
public static TaskCompletionEvent.Status fromYarn(
    TaskAttemptCompletionEventStatus newStatus) {
  switch (newStatus) {
  case FAILED:
    return TaskCompletionEvent.Status.FAILED;
  case KILLED:
    return TaskCompletionEvent.Status.KILLED;
  case OBSOLETE:
    return TaskCompletionEvent.Status.OBSOLETE;
  case SUCCEEDED:
    return TaskCompletionEvent.Status.SUCCEEDED;
  case TIPFAILED:
    return TaskCompletionEvent.Status.TIPFAILED;
  }
  throw new YarnRuntimeException("Unrecognized status: " + newStatus);
}
项目:hadoop-TCP    文件:ShuffleSchedulerImpl.java   
@Override
public void resolve(TaskCompletionEvent event) {
  switch (event.getTaskStatus()) {
  case SUCCEEDED:
    URI u = getBaseURI(reduceId, event.getTaskTrackerHttp());
    addKnownMapOutput(u.getHost() + ":" + u.getPort(),
        u.toString(),
        event.getTaskAttemptId());
    maxMapRuntime = Math.max(maxMapRuntime, event.getTaskRunTime());
    break;
  case FAILED:
  case KILLED:
  case OBSOLETE:
    obsoleteMapOutput(event.getTaskAttemptId());
    LOG.info("Ignoring obsolete output of " + event.getTaskStatus() +
        " map-task: '" + event.getTaskAttemptId() + "'");
    break;
  case TIPFAILED:
    tipFailed(event.getTaskAttemptId().getTaskID());
    LOG.info("Ignoring output of failed map TIP: '" +
        event.getTaskAttemptId() + "'");
    break;
  }
}
项目:hardfs    文件:JobImpl.java   
@Override
public TaskCompletionEvent[] getMapAttemptCompletionEvents(
    int startIndex, int maxEvents) {
  TaskCompletionEvent[] events = EMPTY_TASK_COMPLETION_EVENTS;
  readLock.lock();
  try {
    if (mapAttemptCompletionEvents.size() > startIndex) {
      int actualMax = Math.min(maxEvents,
          (mapAttemptCompletionEvents.size() - startIndex));
      events = mapAttemptCompletionEvents.subList(startIndex,
          actualMax + startIndex).toArray(events);
    }
    return events;
  } finally {
    readLock.unlock();
  }
}
项目:hardfs    文件:TypeConverter.java   
public static TaskCompletionEvent.Status fromYarn(
    TaskAttemptCompletionEventStatus newStatus) {
  switch (newStatus) {
  case FAILED:
    return TaskCompletionEvent.Status.FAILED;
  case KILLED:
    return TaskCompletionEvent.Status.KILLED;
  case OBSOLETE:
    return TaskCompletionEvent.Status.OBSOLETE;
  case SUCCEEDED:
    return TaskCompletionEvent.Status.SUCCEEDED;
  case TIPFAILED:
    return TaskCompletionEvent.Status.TIPFAILED;
  }
  throw new YarnRuntimeException("Unrecognized status: " + newStatus);
}
项目:hardfs    文件:ShuffleSchedulerImpl.java   
@Override
public void resolve(TaskCompletionEvent event) {
  switch (event.getTaskStatus()) {
  case SUCCEEDED:
    URI u = getBaseURI(reduceId, event.getTaskTrackerHttp());
    addKnownMapOutput(u.getHost() + ":" + u.getPort(),
        u.toString(),
        event.getTaskAttemptId());
    maxMapRuntime = Math.max(maxMapRuntime, event.getTaskRunTime());
    break;
  case FAILED:
  case KILLED:
  case OBSOLETE:
    obsoleteMapOutput(event.getTaskAttemptId());
    LOG.info("Ignoring obsolete output of " + event.getTaskStatus() +
        " map-task: '" + event.getTaskAttemptId() + "'");
    break;
  case TIPFAILED:
    tipFailed(event.getTaskAttemptId().getTaskID());
    LOG.info("Ignoring output of failed map TIP: '" +
        event.getTaskAttemptId() + "'");
    break;
  }
}
项目:hadoop-on-lustre2    文件:JobImpl.java   
@Override
public TaskCompletionEvent[] getMapAttemptCompletionEvents(
    int startIndex, int maxEvents) {
  TaskCompletionEvent[] events = EMPTY_TASK_COMPLETION_EVENTS;
  readLock.lock();
  try {
    if (mapAttemptCompletionEvents.size() > startIndex) {
      int actualMax = Math.min(maxEvents,
          (mapAttemptCompletionEvents.size() - startIndex));
      events = mapAttemptCompletionEvents.subList(startIndex,
          actualMax + startIndex).toArray(events);
    }
    return events;
  } finally {
    readLock.unlock();
  }
}
项目:hadoop-on-lustre2    文件:TypeConverter.java   
public static TaskCompletionEvent.Status fromYarn(
    TaskAttemptCompletionEventStatus newStatus) {
  switch (newStatus) {
  case FAILED:
    return TaskCompletionEvent.Status.FAILED;
  case KILLED:
    return TaskCompletionEvent.Status.KILLED;
  case OBSOLETE:
    return TaskCompletionEvent.Status.OBSOLETE;
  case SUCCEEDED:
    return TaskCompletionEvent.Status.SUCCEEDED;
  case TIPFAILED:
    return TaskCompletionEvent.Status.TIPFAILED;
  }
  throw new YarnRuntimeException("Unrecognized status: " + newStatus);
}
项目:hadoop-on-lustre2    文件:ShuffleSchedulerImpl.java   
@Override
public void resolve(TaskCompletionEvent event) {
  switch (event.getTaskStatus()) {
  case SUCCEEDED:
    URI u = getBaseURI(reduceId, event.getTaskTrackerHttp());
    addKnownMapOutput(u.getHost() + ":" + u.getPort(),
        u.toString(),
        event.getTaskAttemptId());
    maxMapRuntime = Math.max(maxMapRuntime, event.getTaskRunTime());
    break;
  case FAILED:
  case KILLED:
  case OBSOLETE:
    obsoleteMapOutput(event.getTaskAttemptId());
    LOG.info("Ignoring obsolete output of " + event.getTaskStatus() +
        " map-task: '" + event.getTaskAttemptId() + "'");
    break;
  case TIPFAILED:
    tipFailed(event.getTaskAttemptId().getTaskID());
    LOG.info("Ignoring output of failed map TIP: '" +
        event.getTaskAttemptId() + "'");
    break;
  }
}
项目:RDFS    文件:TestReduceTaskNoMapOutput.java   
public static TaskCompletionEvent[] runJob(JobConf conf, Class mapperClass,
                boolean enableNoFetchEmptyMapOutputs) throws Exception {
  conf.setMapperClass(mapperClass);
  conf.setReducerClass(IdentityReducer.class);
  conf.setOutputKeyClass(Text.class);
  conf.setOutputValueClass(Text.class);
  conf.setNumMapTasks(3);
  conf.setNumReduceTasks(1);
  conf.setInputFormat(FakeIF.class);
  conf.setBoolean("mapred.enable.no.fetch.map.outputs", enableNoFetchEmptyMapOutputs);
  FileInputFormat.setInputPaths(conf, new Path("/in"));
  final Path outp = new Path("/out");
  FileOutputFormat.setOutputPath(conf, outp);
  RunningJob job = null;

  job = JobClient.runJob(conf);
  assertTrue(job.isSuccessful());
  return job.getTaskCompletionEvents(0);
}
项目:ivry-security    文件:LogMover.java   
private String getTTlogURL(String jobId) throws Exception {
    JobConf jobConf = new JobConf(getConf());
    JobClient jobClient = new JobClient(jobConf);
    RunningJob job = jobClient.getJob(JobID.forName(jobId));
    if (job == null) {
        LOG.warn("No running job for job id: " + jobId);
        return null;
    }
    TaskCompletionEvent[] tasks = job.getTaskCompletionEvents(0);
    // 0th even is setup, 1 event is launcher, 2 event is cleanup
    if (tasks != null && tasks.length == 3 && tasks[1] != null) {
        return tasks[1].getTaskTrackerHttp() + "/tasklog?attemptid="
                + tasks[1].getTaskAttemptId() + "&all=true";
    } else {
        LOG.warn("No running task for job: " + jobId);
    }
    return null;
}
项目:incubator-falcon    文件:LogMover.java   
private String getTTlogURL(String jobId) throws Exception {
    JobConf jobConf = new JobConf(getConf());
    JobClient jobClient = new JobClient(jobConf);
    RunningJob job = jobClient.getJob(JobID.forName(jobId));
    if (job == null) {
        LOG.warn("No running job for job id: " + jobId);
        return null;
    }
    TaskCompletionEvent[] tasks = job.getTaskCompletionEvents(0);
    // 0th even is setup, 1 event is launcher, 2 event is cleanup
    if (tasks != null && tasks.length == 3 && tasks[1] != null) {
        return tasks[1].getTaskTrackerHttp() + "/tasklog?attemptid="
                + tasks[1].getTaskAttemptId() + "&all=true";
    } else {
        LOG.warn("No running task for job: " + jobId);
    }
    return null;
}
项目:hadoop    文件:TypeConverter.java   
public static TaskCompletionEvent[] fromYarn(
    TaskAttemptCompletionEvent[] newEvents) {
  TaskCompletionEvent[] oldEvents =
      new TaskCompletionEvent[newEvents.length];
  int i = 0;
  for (TaskAttemptCompletionEvent newEvent
      : newEvents) {
    oldEvents[i++] = fromYarn(newEvent);
  }
  return oldEvents;
}
项目:hadoop    文件:TypeConverter.java   
public static TaskCompletionEvent fromYarn(
    TaskAttemptCompletionEvent newEvent) {
  return new TaskCompletionEvent(newEvent.getEventId(),
            fromYarn(newEvent.getAttemptId()), newEvent.getAttemptId().getId(),
            newEvent.getAttemptId().getTaskId().getTaskType().equals(TaskType.MAP),
            fromYarn(newEvent.getStatus()),
            newEvent.getMapOutputServerAddress());
}
项目:hadoop    文件:TestEventFetcher.java   
private MapTaskCompletionEventsUpdate getMockedCompletionEventsUpdate(
    int startIdx, int numEvents) {
  ArrayList<TaskCompletionEvent> tceList =
      new ArrayList<TaskCompletionEvent>(numEvents);
  for (int i = 0; i < numEvents; ++i) {
    int eventIdx = startIdx + i;
    TaskCompletionEvent tce = new TaskCompletionEvent(eventIdx,
        new TaskAttemptID("12345", 1, TaskType.MAP, eventIdx, 0),
        eventIdx, true, TaskCompletionEvent.Status.SUCCEEDED,
        "http://somehost:8888");
    tceList.add(tce);
  }
  TaskCompletionEvent[] events = {};
  return new MapTaskCompletionEventsUpdate(tceList.toArray(events), false);
}
项目:hadoop    文件:CompletedJob.java   
@Override
public synchronized TaskCompletionEvent[] getMapAttemptCompletionEvents(
    int startIndex, int maxEvents) {
  if (mapCompletionEvents == null) {
    constructTaskAttemptCompletionEvents();
  }
  return TypeConverter.fromYarn(getAttemptCompletionEvents(
      mapCompletionEvents, startIndex, maxEvents));
}
项目:hadoop    文件:TestJobHistoryEntities.java   
/**
 * Simple test of some methods of CompletedJob
 * @throws Exception
 */
@Test (timeout=30000)
public void testGetTaskAttemptCompletionEvent() throws Exception{
  HistoryFileInfo info = mock(HistoryFileInfo.class);
  when(info.getConfFile()).thenReturn(fullConfPath);
  completedJob =
    new CompletedJob(conf, jobId, fullHistoryPath, loadTasks, "user",
        info, jobAclsManager);
  TaskCompletionEvent[] events= completedJob.getMapAttemptCompletionEvents(0,1000);
  assertEquals(10, completedJob.getMapAttemptCompletionEvents(0,10).length);
  int currentEventId=0;
  for (TaskCompletionEvent taskAttemptCompletionEvent : events) {
    int eventId= taskAttemptCompletionEvent.getEventId();
    assertTrue(eventId>=currentEventId);
    currentEventId=eventId;
  }
  assertNull(completedJob.loadConfFile() );
  // job name
  assertEquals("Sleep job",completedJob.getName());
  // queue name
  assertEquals("default",completedJob.getQueueName());
  // progress
  assertEquals(1.0, completedJob.getProgress(),0.001);
  // 12 rows in answer
  assertEquals(12,completedJob.getTaskAttemptCompletionEvents(0,1000).length);
  // select first 10 rows
  assertEquals(10,completedJob.getTaskAttemptCompletionEvents(0,10).length);
  // select 5-10 rows include 5th
  assertEquals(7,completedJob.getTaskAttemptCompletionEvents(5,10).length);

  // without errors
  assertEquals(1,completedJob.getDiagnostics().size());
  assertEquals("",completedJob.getDiagnostics().get(0));

  assertEquals(0, completedJob.getJobACLs().size());

}