Java 类org.apache.hadoop.util.StringInterner 实例源码

项目:angel    文件:WorkerAttempt.java   
@SuppressWarnings("unchecked")
private void cleanContainer() {
  AngelDeployMode deployMode = context.getDeployMode();
  ContainerLauncherEvent launchEvent = null;

  if (deployMode == AngelDeployMode.LOCAL) {
    launchEvent =
        new LocalContainerLauncherEvent(ContainerLauncherEventType.CONTAINER_REMOTE_CLEANUP, id);
  } else {
    launchEvent =
        new YarnContainerLauncherEvent(id, container.getId(), StringInterner.weakIntern(container
            .getNodeId().toString()), container.getContainerToken(),
            ContainerLauncherEventType.CONTAINER_REMOTE_CLEANUP);
  }
  context.getEventHandler().handle(launchEvent);
}
项目:hadoop    文件:TaskAttemptListenerImpl.java   
@Override
 public void reportDiagnosticInfo(TaskAttemptID taskAttemptID, String diagnosticInfo)
throws IOException {
   diagnosticInfo = StringInterner.weakIntern(diagnosticInfo);
   LOG.info("Diagnostics report from " + taskAttemptID.toString() + ": "
       + diagnosticInfo);

   org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
     TypeConverter.toYarn(taskAttemptID);
   taskHeartbeatHandler.progressing(attemptID);

   // This is mainly used for cases where we want to propagate exception traces
   // of tasks that fail.

   // This call exists as a hadoop mapreduce legacy wherein all changes in
   // counters/progress/phase/output-size are reported through statusUpdate()
   // call but not diagnosticInformation.
   context.getEventHandler().handle(
       new TaskAttemptDiagnosticsUpdateEvent(attemptID, diagnosticInfo));
 }
项目:hadoop    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
private void sendLaunchedEvents() {
  JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptId.getTaskId()
      .getJobId());
  jce.addCounterUpdate(attemptId.getTaskId().getTaskType() == TaskType.MAP ?
      JobCounter.TOTAL_LAUNCHED_MAPS : JobCounter.TOTAL_LAUNCHED_REDUCES, 1);
  eventHandler.handle(jce);

  LOG.info("TaskAttempt: [" + attemptId
      + "] using containerId: [" + container.getId() + " on NM: ["
      + StringInterner.weakIntern(container.getNodeId().toString()) + "]");
  TaskAttemptStartedEvent tase =
    new TaskAttemptStartedEvent(TypeConverter.fromYarn(attemptId),
        TypeConverter.fromYarn(attemptId.getTaskId().getTaskType()),
        launchTime, trackerName, httpPort, shufflePort, container.getId(),
        locality.toString(), avataar.toString());
  eventHandler.handle(
      new JobHistoryEvent(attemptId.getTaskId().getJobId(), tase));
}
项目:hadoop    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt, 
    TaskAttemptEvent event) {
  // unregister it to TaskAttemptListener so that it stops listening
  // for it
  taskAttempt.taskAttemptListener.unregister(
      taskAttempt.attemptId, taskAttempt.jvmID);

  if (event instanceof TaskAttemptKillEvent) {
    taskAttempt.addDiagnosticInfo(
        ((TaskAttemptKillEvent) event).getMessage());
  }

  taskAttempt.reportedStatus.progress = 1.0f;
  taskAttempt.updateProgressSplits();
  //send the cleanup event to containerLauncher
  taskAttempt.eventHandler.handle(new ContainerLauncherEvent(
      taskAttempt.attemptId, 
      taskAttempt.container.getId(), StringInterner
          .weakIntern(taskAttempt.container.getNodeId().toString()),
      taskAttempt.container.getContainerToken(),
      ContainerLauncher.EventType.CONTAINER_REMOTE_CLEANUP));
}
项目:hadoop    文件:TaskImpl.java   
private void handleTaskAttemptCompletion(TaskAttemptId attemptId,
    TaskAttemptCompletionEventStatus status) {
  TaskAttempt attempt = attempts.get(attemptId);
  //raise the completion event only if the container is assigned
  // to nextAttemptNumber
  if (attempt.getNodeHttpAddress() != null) {
    TaskAttemptCompletionEvent tce = recordFactory
        .newRecordInstance(TaskAttemptCompletionEvent.class);
    tce.setEventId(-1);
    String scheme = (encryptedShuffle) ? "https://" : "http://";
    tce.setMapOutputServerAddress(StringInterner.weakIntern(scheme
       + attempt.getNodeHttpAddress().split(":")[0] + ":"
       + attempt.getShufflePort()));
    tce.setStatus(status);
    tce.setAttemptId(attempt.getID());
    int runTime = 0;
    if (attempt.getFinishTime() != 0 && attempt.getLaunchTime() !=0)
      runTime = (int)(attempt.getFinishTime() - attempt.getLaunchTime());
    tce.setAttemptRunTime(runTime);

    //raise the event to job so that it adds the completion event to its
    //data structures
    eventHandler.handle(new JobTaskAttemptCompletedEvent(tce));
  }
}
项目:hadoop    文件:TaskStatus.java   
public void readFields(DataInput in) throws IOException {
  this.taskid.readFields(in);
  setProgress(in.readFloat());
  this.numSlots = in.readInt();
  this.runState = WritableUtils.readEnum(in, State.class);
  setDiagnosticInfo(StringInterner.weakIntern(Text.readString(in)));
  setStateString(StringInterner.weakIntern(Text.readString(in)));
  this.phase = WritableUtils.readEnum(in, Phase.class); 
  this.startTime = in.readLong(); 
  this.finishTime = in.readLong(); 
  counters = new Counters();
  this.includeAllCounters = in.readBoolean();
  this.outputSize = in.readLong();
  counters.readFields(in);
  nextRecordRange.readFields(in);
}
项目:hadoop    文件:TaskReport.java   
public void readFields(DataInput in) throws IOException {
  this.taskid.readFields(in);
  this.progress = in.readFloat();
  this.state = StringInterner.weakIntern(Text.readString(in));
  this.startTime = in.readLong(); 
  this.finishTime = in.readLong();

  diagnostics = WritableUtils.readStringArray(in);
  counters = new Counters();
  counters.readFields(in);
  currentStatus = WritableUtils.readEnum(in, TIPStatus.class);
  if (currentStatus == TIPStatus.RUNNING) {
    int num = WritableUtils.readVInt(in);    
    for (int i = 0; i < num; i++) {
      TaskAttemptID t = new TaskAttemptID();
      t.readFields(in);
      runningAttempts.add(t);
    }
  } else if (currentStatus == TIPStatus.COMPLETE) {
    successfulAttempt.readFields(in);
  }
}
项目:hadoop    文件:QueueInfo.java   
@Override
public void readFields(DataInput in) throws IOException {
  queueName = StringInterner.weakIntern(Text.readString(in));
  queueState = WritableUtils.readEnum(in, QueueState.class);
  schedulingInfo = StringInterner.weakIntern(Text.readString(in));
  int length = in.readInt();
  stats = new JobStatus[length];
  for (int i = 0; i < length; i++) {
    stats[i] = new JobStatus();
    stats[i].readFields(in);
  }
  int count = in.readInt();
  children.clear();
  for (int i = 0; i < count; i++) {
    QueueInfo childQueueInfo = new QueueInfo();
    childQueueInfo.readFields(in);
    children.add(childQueueInfo);
  }
}
项目:hadoop    文件:JobHistoryParser.java   
private void handleReduceAttemptFinishedEvent
(ReduceAttemptFinishedEvent event) {
  TaskInfo taskInfo = info.tasksMap.get(event.getTaskId());
  TaskAttemptInfo attemptInfo = 
    taskInfo.attemptsMap.get(event.getAttemptId());
  attemptInfo.finishTime = event.getFinishTime();
  attemptInfo.status = StringInterner.weakIntern(event.getTaskStatus());
  attemptInfo.state = StringInterner.weakIntern(event.getState());
  attemptInfo.shuffleFinishTime = event.getShuffleFinishTime();
  attemptInfo.sortFinishTime = event.getSortFinishTime();
  attemptInfo.counters = event.getCounters();
  attemptInfo.hostname = StringInterner.weakIntern(event.getHostname());
  attemptInfo.port = event.getPort();
  attemptInfo.rackname = StringInterner.weakIntern(event.getRackName());
  info.completedTaskAttemptsMap.put(event.getAttemptId(), attemptInfo);
}
项目:aliyun-oss-hadoop-fs    文件:TaskAttemptListenerImpl.java   
@Override
 public void reportDiagnosticInfo(TaskAttemptID taskAttemptID, String diagnosticInfo)
throws IOException {
   diagnosticInfo = StringInterner.weakIntern(diagnosticInfo);
   LOG.info("Diagnostics report from " + taskAttemptID.toString() + ": "
       + diagnosticInfo);

   org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
     TypeConverter.toYarn(taskAttemptID);
   taskHeartbeatHandler.progressing(attemptID);

   // This is mainly used for cases where we want to propagate exception traces
   // of tasks that fail.

   // This call exists as a hadoop mapreduce legacy wherein all changes in
   // counters/progress/phase/output-size are reported through statusUpdate()
   // call but not diagnosticInformation.
   context.getEventHandler().handle(
       new TaskAttemptDiagnosticsUpdateEvent(attemptID, diagnosticInfo));
 }
项目:aliyun-oss-hadoop-fs    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
private void sendLaunchedEvents() {
  JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptId.getTaskId()
      .getJobId());
  jce.addCounterUpdate(attemptId.getTaskId().getTaskType() == TaskType.MAP ?
      JobCounter.TOTAL_LAUNCHED_MAPS : JobCounter.TOTAL_LAUNCHED_REDUCES, 1);
  eventHandler.handle(jce);

  LOG.info("TaskAttempt: [" + attemptId
      + "] using containerId: [" + container.getId() + " on NM: ["
      + StringInterner.weakIntern(container.getNodeId().toString()) + "]");
  TaskAttemptStartedEvent tase =
    new TaskAttemptStartedEvent(TypeConverter.fromYarn(attemptId),
        TypeConverter.fromYarn(attemptId.getTaskId().getTaskType()),
        launchTime, trackerName, httpPort, shufflePort, container.getId(),
        locality.toString(), avataar.toString());
  eventHandler.handle(
      new JobHistoryEvent(attemptId.getTaskId().getJobId(), tase));
}
项目:aliyun-oss-hadoop-fs    文件:TaskImpl.java   
private void handleTaskAttemptCompletion(TaskAttemptId attemptId,
    TaskAttemptCompletionEventStatus status) {
  TaskAttempt attempt = attempts.get(attemptId);
  //raise the completion event only if the container is assigned
  // to nextAttemptNumber
  if (attempt.getNodeHttpAddress() != null) {
    TaskAttemptCompletionEvent tce = recordFactory
        .newRecordInstance(TaskAttemptCompletionEvent.class);
    tce.setEventId(-1);
    String scheme = (encryptedShuffle) ? "https://" : "http://";
    tce.setMapOutputServerAddress(StringInterner.weakIntern(scheme
       + attempt.getNodeHttpAddress().split(":")[0] + ":"
       + attempt.getShufflePort()));
    tce.setStatus(status);
    tce.setAttemptId(attempt.getID());
    int runTime = 0;
    if (attempt.getFinishTime() != 0 && attempt.getLaunchTime() !=0)
      runTime = (int)(attempt.getFinishTime() - attempt.getLaunchTime());
    tce.setAttemptRunTime(runTime);

    //raise the event to job so that it adds the completion event to its
    //data structures
    eventHandler.handle(new JobTaskAttemptCompletedEvent(tce));
  }
}
项目:aliyun-oss-hadoop-fs    文件:TaskStatus.java   
public void readFields(DataInput in) throws IOException {
  this.taskid.readFields(in);
  setProgress(in.readFloat());
  this.numSlots = in.readInt();
  this.runState = WritableUtils.readEnum(in, State.class);
  setDiagnosticInfo(StringInterner.weakIntern(Text.readString(in)));
  setStateString(StringInterner.weakIntern(Text.readString(in)));
  this.phase = WritableUtils.readEnum(in, Phase.class); 
  this.startTime = in.readLong(); 
  this.finishTime = in.readLong(); 
  counters = new Counters();
  this.includeAllCounters = in.readBoolean();
  this.outputSize = in.readLong();
  counters.readFields(in);
  nextRecordRange.readFields(in);
}
项目:aliyun-oss-hadoop-fs    文件:TaskReport.java   
public void readFields(DataInput in) throws IOException {
  this.taskid.readFields(in);
  this.progress = in.readFloat();
  this.state = StringInterner.weakIntern(Text.readString(in));
  this.startTime = in.readLong(); 
  this.finishTime = in.readLong();

  diagnostics = WritableUtils.readStringArray(in);
  counters = new Counters();
  counters.readFields(in);
  currentStatus = WritableUtils.readEnum(in, TIPStatus.class);
  if (currentStatus == TIPStatus.RUNNING) {
    int num = WritableUtils.readVInt(in);    
    for (int i = 0; i < num; i++) {
      TaskAttemptID t = new TaskAttemptID();
      t.readFields(in);
      runningAttempts.add(t);
    }
  } else if (currentStatus == TIPStatus.COMPLETE) {
    successfulAttempt.readFields(in);
  }
}
项目:aliyun-oss-hadoop-fs    文件:QueueInfo.java   
@Override
public void readFields(DataInput in) throws IOException {
  queueName = StringInterner.weakIntern(Text.readString(in));
  queueState = WritableUtils.readEnum(in, QueueState.class);
  schedulingInfo = StringInterner.weakIntern(Text.readString(in));
  int length = in.readInt();
  stats = new JobStatus[length];
  for (int i = 0; i < length; i++) {
    stats[i] = new JobStatus();
    stats[i].readFields(in);
  }
  int count = in.readInt();
  children.clear();
  for (int i = 0; i < count; i++) {
    QueueInfo childQueueInfo = new QueueInfo();
    childQueueInfo.readFields(in);
    children.add(childQueueInfo);
  }
}
项目:aliyun-oss-hadoop-fs    文件:EventReader.java   
static Counters fromAvro(JhCounters counters) {
  Counters result = new Counters();
  if(counters != null) {
    for (JhCounterGroup g : counters.getGroups()) {
      CounterGroup group =
          result.addGroup(StringInterner.weakIntern(g.getName().toString()),
              StringInterner.weakIntern(g.getDisplayName().toString()));
      for (JhCounter c : g.getCounts()) {
        group.addCounter(StringInterner.weakIntern(c.getName().toString()),
            StringInterner.weakIntern(c.getDisplayName().toString()),
                c.getValue());
      }
    }
  }
  return result;
}
项目:aliyun-oss-hadoop-fs    文件:JobHistoryParser.java   
private void handleReduceAttemptFinishedEvent
(ReduceAttemptFinishedEvent event) {
  TaskInfo taskInfo = info.tasksMap.get(event.getTaskId());
  TaskAttemptInfo attemptInfo = 
    taskInfo.attemptsMap.get(event.getAttemptId());
  attemptInfo.finishTime = event.getFinishTime();
  attemptInfo.status = StringInterner.weakIntern(event.getTaskStatus());
  attemptInfo.state = StringInterner.weakIntern(event.getState());
  attemptInfo.shuffleFinishTime = event.getShuffleFinishTime();
  attemptInfo.sortFinishTime = event.getSortFinishTime();
  attemptInfo.counters = event.getCounters();
  attemptInfo.hostname = StringInterner.weakIntern(event.getHostname());
  attemptInfo.port = event.getPort();
  attemptInfo.rackname = StringInterner.weakIntern(event.getRackName());
  info.completedTaskAttemptsMap.put(event.getAttemptId(), attemptInfo);
}
项目:big-c    文件:TaskAttemptListenerImpl.java   
@Override
 public void reportDiagnosticInfo(TaskAttemptID taskAttemptID, String diagnosticInfo)
throws IOException {
   diagnosticInfo = StringInterner.weakIntern(diagnosticInfo);
   LOG.info("Diagnostics report from " + taskAttemptID.toString() + ": "
       + diagnosticInfo);

   org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
     TypeConverter.toYarn(taskAttemptID);
   taskHeartbeatHandler.progressing(attemptID);

   // This is mainly used for cases where we want to propagate exception traces
   // of tasks that fail.

   // This call exists as a hadoop mapreduce legacy wherein all changes in
   // counters/progress/phase/output-size are reported through statusUpdate()
   // call but not diagnosticInformation.
   context.getEventHandler().handle(
       new TaskAttemptDiagnosticsUpdateEvent(attemptID, diagnosticInfo));
 }
项目:big-c    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
private void sendLaunchedEvents() {
  JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptId.getTaskId()
      .getJobId());
  jce.addCounterUpdate(attemptId.getTaskId().getTaskType() == TaskType.MAP ?
      JobCounter.TOTAL_LAUNCHED_MAPS : JobCounter.TOTAL_LAUNCHED_REDUCES, 1);
  eventHandler.handle(jce);

  LOG.info("TaskAttempt: [" + attemptId
      + "] using containerId: [" + container.getId() + " on NM: ["
      + StringInterner.weakIntern(container.getNodeId().toString()) + "]");
  TaskAttemptStartedEvent tase =
    new TaskAttemptStartedEvent(TypeConverter.fromYarn(attemptId),
        TypeConverter.fromYarn(attemptId.getTaskId().getTaskType()),
        launchTime, trackerName, httpPort, shufflePort, container.getId(),
        locality.toString(), avataar.toString());
  eventHandler.handle(
      new JobHistoryEvent(attemptId.getTaskId().getJobId(), tase));
}
项目:big-c    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt, 
    TaskAttemptEvent event) {
  // unregister it to TaskAttemptListener so that it stops listening
  // for it
  taskAttempt.taskAttemptListener.unregister(
      taskAttempt.attemptId, taskAttempt.jvmID);

  if (event instanceof TaskAttemptKillEvent) {
    taskAttempt.addDiagnosticInfo(
        ((TaskAttemptKillEvent) event).getMessage());
  }

  taskAttempt.reportedStatus.progress = 1.0f;
  taskAttempt.updateProgressSplits();
  //send the cleanup event to containerLauncher
  taskAttempt.eventHandler.handle(new ContainerLauncherEvent(
      taskAttempt.attemptId, 
      taskAttempt.container.getId(), StringInterner
          .weakIntern(taskAttempt.container.getNodeId().toString()),
      taskAttempt.container.getContainerToken(),
      ContainerLauncher.EventType.CONTAINER_REMOTE_CLEANUP));
}
项目:big-c    文件:TaskImpl.java   
private void handleTaskAttemptCompletion(TaskAttemptId attemptId,
    TaskAttemptCompletionEventStatus status) {
  TaskAttempt attempt = attempts.get(attemptId);
  //raise the completion event only if the container is assigned
  // to nextAttemptNumber
  if (attempt.getNodeHttpAddress() != null) {
    TaskAttemptCompletionEvent tce = recordFactory
        .newRecordInstance(TaskAttemptCompletionEvent.class);
    tce.setEventId(-1);
    String scheme = (encryptedShuffle) ? "https://" : "http://";
    tce.setMapOutputServerAddress(StringInterner.weakIntern(scheme
       + attempt.getNodeHttpAddress().split(":")[0] + ":"
       + attempt.getShufflePort()));
    tce.setStatus(status);
    tce.setAttemptId(attempt.getID());
    int runTime = 0;
    if (attempt.getFinishTime() != 0 && attempt.getLaunchTime() !=0)
      runTime = (int)(attempt.getFinishTime() - attempt.getLaunchTime());
    tce.setAttemptRunTime(runTime);

    //raise the event to job so that it adds the completion event to its
    //data structures
    eventHandler.handle(new JobTaskAttemptCompletedEvent(tce));
  }
}
项目:big-c    文件:TaskStatus.java   
public void readFields(DataInput in) throws IOException {
  this.taskid.readFields(in);
  setProgress(in.readFloat());
  this.numSlots = in.readInt();
  this.runState = WritableUtils.readEnum(in, State.class);
  setDiagnosticInfo(StringInterner.weakIntern(Text.readString(in)));
  setStateString(StringInterner.weakIntern(Text.readString(in)));
  this.phase = WritableUtils.readEnum(in, Phase.class); 
  this.startTime = in.readLong(); 
  this.finishTime = in.readLong(); 
  counters = new Counters();
  this.includeAllCounters = in.readBoolean();
  this.outputSize = in.readLong();
  counters.readFields(in);
  nextRecordRange.readFields(in);
}
项目:big-c    文件:TaskReport.java   
public void readFields(DataInput in) throws IOException {
  this.taskid.readFields(in);
  this.progress = in.readFloat();
  this.state = StringInterner.weakIntern(Text.readString(in));
  this.startTime = in.readLong(); 
  this.finishTime = in.readLong();

  diagnostics = WritableUtils.readStringArray(in);
  counters = new Counters();
  counters.readFields(in);
  currentStatus = WritableUtils.readEnum(in, TIPStatus.class);
  if (currentStatus == TIPStatus.RUNNING) {
    int num = WritableUtils.readVInt(in);    
    for (int i = 0; i < num; i++) {
      TaskAttemptID t = new TaskAttemptID();
      t.readFields(in);
      runningAttempts.add(t);
    }
  } else if (currentStatus == TIPStatus.COMPLETE) {
    successfulAttempt.readFields(in);
  }
}
项目:big-c    文件:QueueInfo.java   
@Override
public void readFields(DataInput in) throws IOException {
  queueName = StringInterner.weakIntern(Text.readString(in));
  queueState = WritableUtils.readEnum(in, QueueState.class);
  schedulingInfo = StringInterner.weakIntern(Text.readString(in));
  int length = in.readInt();
  stats = new JobStatus[length];
  for (int i = 0; i < length; i++) {
    stats[i] = new JobStatus();
    stats[i].readFields(in);
  }
  int count = in.readInt();
  children.clear();
  for (int i = 0; i < count; i++) {
    QueueInfo childQueueInfo = new QueueInfo();
    childQueueInfo.readFields(in);
    children.add(childQueueInfo);
  }
}
项目:big-c    文件:JobHistoryParser.java   
private void handleReduceAttemptFinishedEvent
(ReduceAttemptFinishedEvent event) {
  TaskInfo taskInfo = info.tasksMap.get(event.getTaskId());
  TaskAttemptInfo attemptInfo = 
    taskInfo.attemptsMap.get(event.getAttemptId());
  attemptInfo.finishTime = event.getFinishTime();
  attemptInfo.status = StringInterner.weakIntern(event.getTaskStatus());
  attemptInfo.state = StringInterner.weakIntern(event.getState());
  attemptInfo.shuffleFinishTime = event.getShuffleFinishTime();
  attemptInfo.sortFinishTime = event.getSortFinishTime();
  attemptInfo.counters = event.getCounters();
  attemptInfo.hostname = StringInterner.weakIntern(event.getHostname());
  attemptInfo.port = event.getPort();
  attemptInfo.rackname = StringInterner.weakIntern(event.getRackName());
  info.completedTaskAttemptsMap.put(event.getAttemptId(), attemptInfo);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TaskAttemptListenerImpl.java   
@Override
 public void reportDiagnosticInfo(TaskAttemptID taskAttemptID, String diagnosticInfo)
throws IOException {
   diagnosticInfo = StringInterner.weakIntern(diagnosticInfo);
   LOG.info("Diagnostics report from " + taskAttemptID.toString() + ": "
       + diagnosticInfo);

   org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
     TypeConverter.toYarn(taskAttemptID);
   taskHeartbeatHandler.progressing(attemptID);

   // This is mainly used for cases where we want to propagate exception traces
   // of tasks that fail.

   // This call exists as a hadoop mapreduce legacy wherein all changes in
   // counters/progress/phase/output-size are reported through statusUpdate()
   // call but not diagnosticInformation.
   context.getEventHandler().handle(
       new TaskAttemptDiagnosticsUpdateEvent(attemptID, diagnosticInfo));
 }
项目:hadoop-2.6.0-cdh5.4.3    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
private void sendLaunchedEvents() {
  JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptId.getTaskId()
      .getJobId());
  jce.addCounterUpdate(attemptId.getTaskId().getTaskType() == TaskType.MAP ?
      JobCounter.TOTAL_LAUNCHED_MAPS : JobCounter.TOTAL_LAUNCHED_REDUCES, 1);
  eventHandler.handle(jce);

  LOG.info("TaskAttempt: [" + attemptId
      + "] using containerId: [" + container.getId() + " on NM: ["
      + StringInterner.weakIntern(container.getNodeId().toString()) + "]");
  TaskAttemptStartedEvent tase =
    new TaskAttemptStartedEvent(TypeConverter.fromYarn(attemptId),
        TypeConverter.fromYarn(attemptId.getTaskId().getTaskType()),
        launchTime, trackerName, httpPort, shufflePort, container.getId(),
        locality.toString(), avataar.toString());
  eventHandler.handle(
      new JobHistoryEvent(attemptId.getTaskId().getJobId(), tase));
}
项目:hops    文件:EventReader.java   
static Counters fromAvro(JhCounters counters) {
  Counters result = new Counters();
  if(counters != null) {
    for (JhCounterGroup g : counters.getGroups()) {
      CounterGroup group =
          result.addGroup(StringInterner.weakIntern(g.getName().toString()),
              StringInterner.weakIntern(g.getDisplayName().toString()));
      for (JhCounter c : g.getCounts()) {
        group.addCounter(StringInterner.weakIntern(c.getName().toString()),
            StringInterner.weakIntern(c.getDisplayName().toString()),
                c.getValue());
      }
    }
  }
  return result;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TaskImpl.java   
private void handleTaskAttemptCompletion(TaskAttemptId attemptId,
    TaskAttemptCompletionEventStatus status) {
  TaskAttempt attempt = attempts.get(attemptId);
  //raise the completion event only if the container is assigned
  // to nextAttemptNumber
  if (attempt.getNodeHttpAddress() != null) {
    TaskAttemptCompletionEvent tce = recordFactory
        .newRecordInstance(TaskAttemptCompletionEvent.class);
    tce.setEventId(-1);
    String scheme = (encryptedShuffle) ? "https://" : "http://";
    tce.setMapOutputServerAddress(StringInterner.weakIntern(scheme
       + attempt.getNodeHttpAddress().split(":")[0] + ":"
       + attempt.getShufflePort()));
    tce.setStatus(status);
    tce.setAttemptId(attempt.getID());
    int runTime = 0;
    if (attempt.getFinishTime() != 0 && attempt.getLaunchTime() !=0)
      runTime = (int)(attempt.getFinishTime() - attempt.getLaunchTime());
    tce.setAttemptRunTime(runTime);

    //raise the event to job so that it adds the completion event to its
    //data structures
    eventHandler.handle(new JobTaskAttemptCompletedEvent(tce));
  }
}
项目:hops    文件:TaskReport.java   
public void readFields(DataInput in) throws IOException {
  this.taskid.readFields(in);
  this.progress = in.readFloat();
  this.state = StringInterner.weakIntern(Text.readString(in));
  this.startTime = in.readLong(); 
  this.finishTime = in.readLong();

  diagnostics = WritableUtils.readStringArray(in);
  counters = new Counters();
  counters.readFields(in);
  currentStatus = WritableUtils.readEnum(in, TIPStatus.class);
  if (currentStatus == TIPStatus.RUNNING) {
    int num = WritableUtils.readVInt(in);    
    for (int i = 0; i < num; i++) {
      TaskAttemptID t = new TaskAttemptID();
      t.readFields(in);
      runningAttempts.add(t);
    }
  } else if (currentStatus == TIPStatus.COMPLETE) {
    successfulAttempt.readFields(in);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:Task.java   
public void readFields(DataInput in) throws IOException {
  jobFile = StringInterner.weakIntern(Text.readString(in));
  taskId = TaskAttemptID.read(in);
  partition = in.readInt();
  numSlotsRequired = in.readInt();
  taskStatus.readFields(in);
  skipRanges.readFields(in);
  currentRecIndexIterator = skipRanges.skipRangeIterator();
  currentRecStartIndex = currentRecIndexIterator.next();
  skipping = in.readBoolean();
  jobCleanup = in.readBoolean();
  if (jobCleanup) {
    jobRunStateForCleanup = 
      WritableUtils.readEnum(in, JobStatus.State.class);
  }
  jobSetup = in.readBoolean();
  writeSkipRecs = in.readBoolean();
  taskCleanup = in.readBoolean();
  if (taskCleanup) {
    setPhase(TaskStatus.Phase.CLEANUP);
  }
  user = StringInterner.weakIntern(Text.readString(in));
  extraData.readFields(in);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TaskReport.java   
public void readFields(DataInput in) throws IOException {
  this.taskid.readFields(in);
  this.progress = in.readFloat();
  this.state = StringInterner.weakIntern(Text.readString(in));
  this.startTime = in.readLong(); 
  this.finishTime = in.readLong();

  diagnostics = WritableUtils.readStringArray(in);
  counters = new Counters();
  counters.readFields(in);
  currentStatus = WritableUtils.readEnum(in, TIPStatus.class);
  if (currentStatus == TIPStatus.RUNNING) {
    int num = WritableUtils.readVInt(in);    
    for (int i = 0; i < num; i++) {
      TaskAttemptID t = new TaskAttemptID();
      t.readFields(in);
      runningAttempts.add(t);
    }
  } else if (currentStatus == TIPStatus.COMPLETE) {
    successfulAttempt.readFields(in);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:QueueInfo.java   
@Override
public void readFields(DataInput in) throws IOException {
  queueName = StringInterner.weakIntern(Text.readString(in));
  queueState = WritableUtils.readEnum(in, QueueState.class);
  schedulingInfo = StringInterner.weakIntern(Text.readString(in));
  int length = in.readInt();
  stats = new JobStatus[length];
  for (int i = 0; i < length; i++) {
    stats[i] = new JobStatus();
    stats[i].readFields(in);
  }
  int count = in.readInt();
  children.clear();
  for (int i = 0; i < count; i++) {
    QueueInfo childQueueInfo = new QueueInfo();
    childQueueInfo.readFields(in);
    children.add(childQueueInfo);
  }
}
项目:hops    文件:TaskImpl.java   
private void handleTaskAttemptCompletion(TaskAttemptId attemptId,
    TaskAttemptCompletionEventStatus status) {
  TaskAttempt attempt = attempts.get(attemptId);
  //raise the completion event only if the container is assigned
  // to nextAttemptNumber
  if (attempt.getNodeHttpAddress() != null) {
    TaskAttemptCompletionEvent tce = recordFactory
        .newRecordInstance(TaskAttemptCompletionEvent.class);
    tce.setEventId(-1);
    String scheme = (encryptedShuffle) ? "https://" : "http://";
    tce.setMapOutputServerAddress(StringInterner.weakIntern(scheme
       + attempt.getNodeHttpAddress().split(":")[0] + ":"
       + attempt.getShufflePort()));
    tce.setStatus(status);
    tce.setAttemptId(attempt.getID());
    int runTime = 0;
    if (attempt.getFinishTime() != 0 && attempt.getLaunchTime() !=0)
      runTime = (int)(attempt.getFinishTime() - attempt.getLaunchTime());
    tce.setAttemptRunTime(runTime);

    //raise the event to job so that it adds the completion event to its
    //data structures
    eventHandler.handle(new JobTaskAttemptCompletedEvent(tce));
  }
}
项目:hServer    文件:SubmittedJob.java   
@SuppressWarnings("unchecked")
private static <T> T getSplitDetails(FSDataInputStream inFile, long offset, Configuration configuration)
        throws IOException {
    inFile.seek(offset);
    String className = StringInterner.weakIntern(Text.readString(inFile));
    Class<T> cls;
    try {
        cls = (Class<T>) configuration.getClassByName(className);
    } catch (ClassNotFoundException ce) {
        IOException wrap = new IOException("Split class " + className +
                " not found");
        wrap.initCause(ce);
        throw wrap;
    }
    SerializationFactory factory = new SerializationFactory(configuration);
    Deserializer<T> deserializer =
            (Deserializer<T>) factory.getDeserializer(cls);
    deserializer.open(inFile);
    T split = deserializer.deserialize(null);
    return split;
}
项目:hops    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
private static void sendContainerCleanup(TaskAttemptImpl taskAttempt,
    TaskAttemptEvent event) {
  if (event instanceof TaskAttemptKillEvent) {
    taskAttempt.addDiagnosticInfo(
        ((TaskAttemptKillEvent) event).getMessage());
  }
  //send the cleanup event to containerLauncher
  taskAttempt.eventHandler.handle(new ContainerLauncherEvent(
      taskAttempt.attemptId,
      taskAttempt.container.getId(), StringInterner
      .weakIntern(taskAttempt.container.getNodeId().toString()),
      taskAttempt.container.getContainerToken(),
      ContainerLauncher.EventType.CONTAINER_REMOTE_CLEANUP,
      event.getType() == TaskAttemptEventType.TA_TIMED_OUT));
}
项目:hadoop-plus    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
private void sendLaunchedEvents() {
  JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptId.getTaskId()
      .getJobId());
  jce.addCounterUpdate(attemptId.getTaskId().getTaskType() == TaskType.MAP ?
      JobCounter.TOTAL_LAUNCHED_MAPS : JobCounter.TOTAL_LAUNCHED_REDUCES, 1);
  eventHandler.handle(jce);

  LOG.info("TaskAttempt: [" + attemptId
      + "] using containerId: [" + container.getId() + " on NM: ["
      + StringInterner.weakIntern(container.getNodeId().toString()) + "]");
  TaskAttemptStartedEvent tase =
    new TaskAttemptStartedEvent(TypeConverter.fromYarn(attemptId),
        TypeConverter.fromYarn(attemptId.getTaskId().getTaskType()),
        launchTime, trackerName, httpPort, shufflePort, container.getId(),
        locality.toString(), avataar.toString());
  eventHandler.handle(
      new JobHistoryEvent(attemptId.getTaskId().getJobId(), tase));
}
项目:hadoop-plus    文件:TaskImpl.java   
private void handleTaskAttemptCompletion(TaskAttemptId attemptId,
    TaskAttemptCompletionEventStatus status) {
  TaskAttempt attempt = attempts.get(attemptId);
  //raise the completion event only if the container is assigned
  // to nextAttemptNumber
  if (attempt.getNodeHttpAddress() != null) {
    TaskAttemptCompletionEvent tce = recordFactory
        .newRecordInstance(TaskAttemptCompletionEvent.class);
    tce.setEventId(-1);
    String scheme = (encryptedShuffle) ? "https://" : "http://";
    tce.setMapOutputServerAddress(StringInterner.weakIntern(scheme
       + attempt.getNodeHttpAddress().split(":")[0] + ":"
       + attempt.getShufflePort()));
    tce.setStatus(status);
    tce.setAttemptId(attempt.getID());
    int runTime = 0;
    if (attempt.getFinishTime() != 0 && attempt.getLaunchTime() !=0)
      runTime = (int)(attempt.getFinishTime() - attempt.getLaunchTime());
    tce.setAttemptRunTime(runTime);

    //raise the event to job so that it adds the completion event to its
    //data structures
    eventHandler.handle(new JobTaskAttemptCompletedEvent(tce));
  }
}
项目:hadoop-plus    文件:TaskStatus.java   
public void readFields(DataInput in) throws IOException {
  this.taskid.readFields(in);
  setProgress(in.readFloat());
  this.numSlots = in.readInt();
  this.runState = WritableUtils.readEnum(in, State.class);
  setDiagnosticInfo(StringInterner.weakIntern(Text.readString(in)));
  setStateString(StringInterner.weakIntern(Text.readString(in)));
  this.phase = WritableUtils.readEnum(in, Phase.class); 
  this.startTime = in.readLong(); 
  this.finishTime = in.readLong(); 
  counters = new Counters();
  this.includeAllCounters = in.readBoolean();
  this.outputSize = in.readLong();
  counters.readFields(in);
  nextRecordRange.readFields(in);
}
项目:hops    文件:TaskAttemptListenerImpl.java   
@Override
 public void reportDiagnosticInfo(TaskAttemptID taskAttemptID, String diagnosticInfo)
throws IOException {
   diagnosticInfo = StringInterner.weakIntern(diagnosticInfo);
   LOG.info("Diagnostics report from " + taskAttemptID.toString() + ": "
       + diagnosticInfo);

   org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
     TypeConverter.toYarn(taskAttemptID);
   taskHeartbeatHandler.progressing(attemptID);

   // This is mainly used for cases where we want to propagate exception traces
   // of tasks that fail.

   // This call exists as a hadoop mapreduce legacy wherein all changes in
   // counters/progress/phase/output-size are reported through statusUpdate()
   // call but not diagnosticInformation.
   context.getEventHandler().handle(
       new TaskAttemptDiagnosticsUpdateEvent(attemptID, diagnosticInfo));
 }