Java 类org.apache.hadoop.mapred.JobStatus 实例源码

项目:hadoop    文件:HistoryViewer.java   
private void printJobAnalysis() {
  if (!job.getJobStatus().equals
      (JobStatus.getJobRunState(JobStatus.SUCCEEDED))) {
    System.out.println("No Analysis available as job did not finish");
    return;
  }

  AnalyzedJob avg = new AnalyzedJob(job);

  System.out.println("\nAnalysis");
  System.out.println("=========");
  printAnalysis(avg.getMapTasks(), cMap, "map", avg.getAvgMapTime(), 10);
  printLast(avg.getMapTasks(), "map", cFinishMapRed);

  if (avg.getReduceTasks().length > 0) {
    printAnalysis(avg.getReduceTasks(), cShuffle, "shuffle", 
        avg.getAvgShuffleTime(), 10);
    printLast(avg.getReduceTasks(), "shuffle", cFinishShuffle);

    printAnalysis(avg.getReduceTasks(), cReduce, "reduce",
        avg.getAvgReduceTime(), 10);
    printLast(avg.getReduceTasks(), "reduce", cFinishMapRed);
  }
  System.out.println("=========");
}
项目:aliyun-oss-hadoop-fs    文件:HistoryViewer.java   
private void printJobAnalysis() {
  if (!job.getJobStatus().equals
      (JobStatus.getJobRunState(JobStatus.SUCCEEDED))) {
    System.out.println("No Analysis available as job did not finish");
    return;
  }

  AnalyzedJob avg = new AnalyzedJob(job);

  System.out.println("\nAnalysis");
  System.out.println("=========");
  printAnalysis(avg.getMapTasks(), cMap, "map", avg.getAvgMapTime(), 10);
  printLast(avg.getMapTasks(), "map", cFinishMapRed);

  if (avg.getReduceTasks().length > 0) {
    printAnalysis(avg.getReduceTasks(), cShuffle, "shuffle", 
        avg.getAvgShuffleTime(), 10);
    printLast(avg.getReduceTasks(), "shuffle", cFinishShuffle);

    printAnalysis(avg.getReduceTasks(), cReduce, "reduce",
        avg.getAvgReduceTime(), 10);
    printLast(avg.getReduceTasks(), "reduce", cFinishMapRed);
  }
  System.out.println("=========");
}
项目:big-c    文件:HistoryViewer.java   
private void printJobAnalysis() {
  if (!job.getJobStatus().equals
      (JobStatus.getJobRunState(JobStatus.SUCCEEDED))) {
    System.out.println("No Analysis available as job did not finish");
    return;
  }

  AnalyzedJob avg = new AnalyzedJob(job);

  System.out.println("\nAnalysis");
  System.out.println("=========");
  printAnalysis(avg.getMapTasks(), cMap, "map", avg.getAvgMapTime(), 10);
  printLast(avg.getMapTasks(), "map", cFinishMapRed);

  if (avg.getReduceTasks().length > 0) {
    printAnalysis(avg.getReduceTasks(), cShuffle, "shuffle", 
        avg.getAvgShuffleTime(), 10);
    printLast(avg.getReduceTasks(), "shuffle", cFinishShuffle);

    printAnalysis(avg.getReduceTasks(), cReduce, "reduce",
        avg.getAvgReduceTime(), 10);
    printLast(avg.getReduceTasks(), "reduce", cFinishMapRed);
  }
  System.out.println("=========");
}
项目:hadoop-2.6.0-cdh5.4.3    文件:JobHistoryParser.java   
/**
 * Only used for unit tests.
 */
@Private
public synchronized JobInfo parse(EventReader reader) throws IOException {

  if (info != null) {
    return info;
  }

  info = new JobInfo();
  parse(reader, this);
  if (info.getJobStatus() == null) {
    info.jobStatus = JobStatus.getJobRunState(JobStatus.FAILED);
    if (info.getErrorInfo() == null || info.getErrorInfo().equals("")) {
      info.errorInfo = "Application failed due to failed ApplicationMaster.\n"
              + "Only partial information is available; some values may be "
              + "inaccurate.";
    }
  }
  if (info.getFinishTime() == -1L) {
    info.finishTime = info.getLaunchTime();
  }
  return info;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:HistoryViewer.java   
private void printJobAnalysis() {
  if (!job.getJobStatus().equals
      (JobStatus.getJobRunState(JobStatus.SUCCEEDED))) {
    System.out.println("No Analysis available as job did not finish");
    return;
  }

  AnalyzedJob avg = new AnalyzedJob(job);

  System.out.println("\nAnalysis");
  System.out.println("=========");
  printAnalysis(avg.getMapTasks(), cMap, "map", avg.getAvgMapTime(), 10);
  printLast(avg.getMapTasks(), "map", cFinishMapRed);

  if (avg.getReduceTasks().length > 0) {
    printAnalysis(avg.getReduceTasks(), cShuffle, "shuffle", 
        avg.getAvgShuffleTime(), 10);
    printLast(avg.getReduceTasks(), "shuffle", cFinishShuffle);

    printAnalysis(avg.getReduceTasks(), cReduce, "reduce",
        avg.getAvgReduceTime(), 10);
    printLast(avg.getReduceTasks(), "reduce", cFinishMapRed);
  }
  System.out.println("=========");
}
项目:hadoop-2.6.0-cdh5.4.3    文件:JTClient.java   
/**
 * It uses to check whether job is started or not.
 * @param id job id
 * @return true if job is running.
 * @throws IOException if an I/O error occurs.
 */
public boolean isJobStarted(JobID id) throws IOException {
  JobInfo jInfo = getJobInfo(id);
  int counter = 0;
  while (counter < 60) {
    if (jInfo.getStatus().getRunState() == JobStatus.RUNNING) {
      break;
    } else {
      UtilsForTests.waitFor(1000);
      jInfo = getJobInfo(jInfo.getID());
      Assert.assertNotNull("Job information is null",jInfo);
    }
    counter++;
  }
  return (counter != 60)? true : false ;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:HadoopJob.java   
/**
 * Update this job status according to the given JobStatus
 * 
 * @param status
 */
void update(JobStatus status) {
  this.status = status;
  try {
    this.counters = running.getCounters();
    this.completed = running.isComplete();
    this.successful = running.isSuccessful();
    this.mapProgress = running.mapProgress();
    this.reduceProgress = running.reduceProgress();
    // running.getTaskCompletionEvents(fromEvent);

  } catch (IOException ioe) {
    ioe.printStackTrace();
  }

  this.completedMaps = (int) (this.totalMaps * this.mapProgress);
  this.completedReduces = (int) (this.totalReduces * this.reduceProgress);
}
项目:hadoop-EAR    文件:HadoopJob.java   
/**
 * Update this job status according to the given JobStatus
 * 
 * @param status
 */
void update(JobStatus status) {
  this.status = status;
  try {
    this.counters = running.getCounters();
    this.completed = running.isComplete();
    this.successful = running.isSuccessful();
    this.mapProgress = running.mapProgress();
    this.reduceProgress = running.reduceProgress();
    // running.getTaskCompletionEvents(fromEvent);

  } catch (IOException ioe) {
    ioe.printStackTrace();
  }

  this.completedMaps = (int) (this.totalMaps * this.mapProgress);
  this.completedReduces = (int) (this.totalReduces * this.reduceProgress);
}
项目:hadoop-EAR    文件:SimulatorJobTracker.java   
@Override
public synchronized JobStatus submitJob(JobID jobId) throws IOException {
  boolean loggingEnabled = LOG.isDebugEnabled();
  if (loggingEnabled) {
    LOG.debug("submitJob for jobname = " + jobId);
  }
  if (jobs.containsKey(jobId)) {
    // job already running, don't start twice
    if (loggingEnabled) {
      LOG.debug("Job '" + jobId.getId() + "' already present ");
    }
    return jobs.get(jobId).getStatus();
  }
  JobStory jobStory = SimulatorJobCache.get(jobId);
  if (jobStory == null) {
    throw new IllegalArgumentException("Job not found in SimulatorJobCache: "+jobId);
  }
  validateAndSetClock(jobStory.getSubmissionTime());

  SimulatorJobInProgress job = new SimulatorJobInProgress(jobId, this,
                                                          this.conf, 
                                                          jobStory);
  return addJob(jobId, job);
}
项目:hadoop-EAR    文件:SimulatorJobTracker.java   
/**
 * Safely clean-up all data structures at the end of the 
 * job (success/failure/killed). In addition to performing the tasks that the
 * original finalizeJob does, we also inform the SimulatorEngine about the 
 * completion of this job. 
 *  
 * @param job completed job.
 */
@Override
synchronized void finalizeJob(JobInProgress job) {

  // Let the SimulatorEngine know that the job is done
  JobStatus cloneStatus = (JobStatus)job.getStatus().clone();
  engine.markCompletedJob(cloneStatus, 
                          SimulatorJobTracker.getClock().getTime());

  JobID jobId = job.getStatus().getJobID();
  LOG.info("Finished job " + jobId + " endtime = " +
            getClock().getTime() + " with status: " +
            JobStatus.getJobRunState(job.getStatus().getRunState()));

  // for updating the metrics and JobHistory, invoke the original 
  // finalizeJob.
  super.finalizeJob(job);

  // now placing this job in queue for future nuking
  cleanupJob(job);
}
项目:sqoop-on-spark    文件:MapreduceSubmissionEngine.java   
/**
 * Convert map-reduce specific job status constants to Sqoop job status
 * constants.
 *
 * @param status Map-reduce job constant
 * @return Equivalent submission status
 */
private SubmissionStatus convertMapreduceState(int status) {
  if(status == JobStatus.PREP) {
    return SubmissionStatus.BOOTING;
  } else if (status == JobStatus.RUNNING) {
    return SubmissionStatus.RUNNING;
  } else if (status == JobStatus.FAILED) {
    return SubmissionStatus.FAILED;
  } else if (status == JobStatus.KILLED) {
    return SubmissionStatus.FAILED;
  } else if (status == JobStatus.SUCCEEDED) {
    return SubmissionStatus.SUCCEEDED;
  }

  throw new SqoopException(MapreduceSubmissionError.MAPREDUCE_0004,
    "Unknown status " + status);
}
项目:hadoop-plus    文件:HistoryViewer.java   
private void printJobAnalysis() {
  if (!job.getJobStatus().equals
      (JobStatus.getJobRunState(JobStatus.SUCCEEDED))) {
    System.out.println("No Analysis available as job did not finish");
    return;
  }

  AnalyzedJob avg = new AnalyzedJob(job);

  System.out.println("\nAnalysis");
  System.out.println("=========");
  printAnalysis(avg.getMapTasks(), cMap, "map", avg.getAvgMapTime(), 10);
  printLast(avg.getMapTasks(), "map", cFinishMapRed);

  if (avg.getReduceTasks().length > 0) {
    printAnalysis(avg.getReduceTasks(), cShuffle, "shuffle", 
        avg.getAvgShuffleTime(), 10);
    printLast(avg.getReduceTasks(), "shuffle", cFinishShuffle);

    printAnalysis(avg.getReduceTasks(), cReduce, "reduce",
        avg.getAvgReduceTime(), 10);
    printLast(avg.getReduceTasks(), "reduce", cFinishMapRed);
  }
  System.out.println("=========");
}
项目:FlexMap    文件:HistoryViewer.java   
private void printJobAnalysis() {
  if (!job.getJobStatus().equals
      (JobStatus.getJobRunState(JobStatus.SUCCEEDED))) {
    System.out.println("No Analysis available as job did not finish");
    return;
  }

  AnalyzedJob avg = new AnalyzedJob(job);

  System.out.println("\nAnalysis");
  System.out.println("=========");
  printAnalysis(avg.getMapTasks(), cMap, "map", avg.getAvgMapTime(), 10);
  printLast(avg.getMapTasks(), "map", cFinishMapRed);

  if (avg.getReduceTasks().length > 0) {
    printAnalysis(avg.getReduceTasks(), cShuffle, "shuffle", 
        avg.getAvgShuffleTime(), 10);
    printLast(avg.getReduceTasks(), "shuffle", cFinishShuffle);

    printAnalysis(avg.getReduceTasks(), cReduce, "reduce",
        avg.getAvgReduceTime(), 10);
    printLast(avg.getReduceTasks(), "reduce", cFinishMapRed);
  }
  System.out.println("=========");
}
项目:ignite    文件:HadoopV1CleanupTask.java   
/** {@inheritDoc} */
@Override public void run(HadoopTaskContext taskCtx) throws IgniteCheckedException {
    HadoopV2TaskContext ctx = (HadoopV2TaskContext)taskCtx;

    JobContext jobCtx = ctx.jobContext();

    try {
        OutputCommitter committer = jobCtx.getJobConf().getOutputCommitter();

        if (abort)
            committer.abortJob(jobCtx, JobStatus.State.FAILED);
        else
            committer.commitJob(jobCtx);
    }
    catch (IOException e) {
        throw new IgniteCheckedException(e);
    }
}
项目:hops    文件:HistoryViewer.java   
private void printJobAnalysis() {
  if (!job.getJobStatus().equals
      (JobStatus.getJobRunState(JobStatus.SUCCEEDED))) {
    System.out.println("No Analysis available as job did not finish");
    return;
  }

  AnalyzedJob avg = new AnalyzedJob(job);

  System.out.println("\nAnalysis");
  System.out.println("=========");
  printAnalysis(avg.getMapTasks(), cMap, "map", avg.getAvgMapTime(), 10);
  printLast(avg.getMapTasks(), "map", cFinishMapRed);

  if (avg.getReduceTasks().length > 0) {
    printAnalysis(avg.getReduceTasks(), cShuffle, "shuffle", 
        avg.getAvgShuffleTime(), 10);
    printLast(avg.getReduceTasks(), "shuffle", cFinishShuffle);

    printAnalysis(avg.getReduceTasks(), cReduce, "reduce",
        avg.getAvgReduceTime(), 10);
    printLast(avg.getReduceTasks(), "reduce", cFinishMapRed);
  }
  System.out.println("=========");
}
项目:hadoop-TCP    文件:HistoryViewer.java   
private void printJobAnalysis() {
  if (!job.getJobStatus().equals
      (JobStatus.getJobRunState(JobStatus.SUCCEEDED))) {
    System.out.println("No Analysis available as job did not finish");
    return;
  }

  AnalyzedJob avg = new AnalyzedJob(job);

  System.out.println("\nAnalysis");
  System.out.println("=========");
  printAnalysis(avg.getMapTasks(), cMap, "map", avg.getAvgMapTime(), 10);
  printLast(avg.getMapTasks(), "map", cFinishMapRed);

  if (avg.getReduceTasks().length > 0) {
    printAnalysis(avg.getReduceTasks(), cShuffle, "shuffle", 
        avg.getAvgShuffleTime(), 10);
    printLast(avg.getReduceTasks(), "shuffle", cFinishShuffle);

    printAnalysis(avg.getReduceTasks(), cReduce, "reduce",
        avg.getAvgReduceTime(), 10);
    printLast(avg.getReduceTasks(), "reduce", cFinishMapRed);
  }
  System.out.println("=========");
}
项目:hadoop-on-lustre    文件:JTClient.java   
/**
 * It uses to check whether job is started or not.
 * @param id job id
 * @return true if job is running.
 * @throws IOException if an I/O error occurs.
 */
public boolean isJobStarted(JobID id) throws IOException {
  JobInfo jInfo = getJobInfo(id);
  int counter = 0;
  while (counter < 60) {
    if (jInfo.getStatus().getRunState() == JobStatus.RUNNING) {
      break;
    } else {
      UtilsForTests.waitFor(1000);
      jInfo = getJobInfo(jInfo.getID());
      Assert.assertNotNull("Job information is null",jInfo);
    }
    counter++;
  }
  return (counter != 60)? true : false ;
}
项目:hadoop-on-lustre    文件:HadoopJob.java   
/**
 * Update this job status according to the given JobStatus
 * 
 * @param status
 */
void update(JobStatus status) {
  this.status = status;
  try {
    this.counters = running.getCounters();
    this.completed = running.isComplete();
    this.successful = running.isSuccessful();
    this.mapProgress = running.mapProgress();
    this.reduceProgress = running.reduceProgress();
    // running.getTaskCompletionEvents(fromEvent);

  } catch (IOException ioe) {
    ioe.printStackTrace();
  }

  this.completedMaps = (int) (this.totalMaps * this.mapProgress);
  this.completedReduces = (int) (this.totalReduces * this.reduceProgress);
}
项目:hardfs    文件:HistoryViewer.java   
private void printJobAnalysis() {
  if (!job.getJobStatus().equals
      (JobStatus.getJobRunState(JobStatus.SUCCEEDED))) {
    System.out.println("No Analysis available as job did not finish");
    return;
  }

  AnalyzedJob avg = new AnalyzedJob(job);

  System.out.println("\nAnalysis");
  System.out.println("=========");
  printAnalysis(avg.getMapTasks(), cMap, "map", avg.getAvgMapTime(), 10);
  printLast(avg.getMapTasks(), "map", cFinishMapRed);

  if (avg.getReduceTasks().length > 0) {
    printAnalysis(avg.getReduceTasks(), cShuffle, "shuffle", 
        avg.getAvgShuffleTime(), 10);
    printLast(avg.getReduceTasks(), "shuffle", cFinishShuffle);

    printAnalysis(avg.getReduceTasks(), cReduce, "reduce",
        avg.getAvgReduceTime(), 10);
    printLast(avg.getReduceTasks(), "reduce", cFinishMapRed);
  }
  System.out.println("=========");
}
项目:hadoop-on-lustre2    文件:HistoryViewer.java   
private void printJobAnalysis() {
  if (!job.getJobStatus().equals
      (JobStatus.getJobRunState(JobStatus.SUCCEEDED))) {
    System.out.println("No Analysis available as job did not finish");
    return;
  }

  AnalyzedJob avg = new AnalyzedJob(job);

  System.out.println("\nAnalysis");
  System.out.println("=========");
  printAnalysis(avg.getMapTasks(), cMap, "map", avg.getAvgMapTime(), 10);
  printLast(avg.getMapTasks(), "map", cFinishMapRed);

  if (avg.getReduceTasks().length > 0) {
    printAnalysis(avg.getReduceTasks(), cShuffle, "shuffle", 
        avg.getAvgShuffleTime(), 10);
    printLast(avg.getReduceTasks(), "shuffle", cFinishShuffle);

    printAnalysis(avg.getReduceTasks(), cReduce, "reduce",
        avg.getAvgReduceTime(), 10);
    printLast(avg.getReduceTasks(), "reduce", cFinishMapRed);
  }
  System.out.println("=========");
}
项目:ankush    文件:JobStatusProvider.java   
public Map<String, Object> getJobDetails(JobClient jobClient, String jobId)
        throws AnkushException {
    String errMsg = "Unable to getch Hadoop jobs details, could not connect to Hadoop JobClient.";
    try {
        if (jobClient != null) {
            // Get the jobs that are submitted.
            JobStatus[] jobStatus = jobClient.getAllJobs();
            for (JobStatus jobSts : jobStatus) {

            }
        }
    } catch (Exception e) {
        HadoopUtils.addAndLogError(this.LOG, this.clusterConfig, errMsg,
                Constant.Component.Name.HADOOP, e);
        throw new AnkushException(errMsg);
    }
    return null;
}
项目:RDFS    文件:HadoopJob.java   
/**
 * Update this job status according to the given JobStatus
 * 
 * @param status
 */
void update(JobStatus status) {
  this.status = status;
  try {
    this.counters = running.getCounters();
    this.completed = running.isComplete();
    this.successful = running.isSuccessful();
    this.mapProgress = running.mapProgress();
    this.reduceProgress = running.reduceProgress();
    // running.getTaskCompletionEvents(fromEvent);

  } catch (IOException ioe) {
    ioe.printStackTrace();
  }

  this.completedMaps = (int) (this.totalMaps * this.mapProgress);
  this.completedReduces = (int) (this.totalReduces * this.reduceProgress);
}
项目:RDFS    文件:SimulatorJobTracker.java   
@Override
public synchronized JobStatus submitJob(JobID jobId) throws IOException {
  boolean loggingEnabled = LOG.isDebugEnabled();
  if (loggingEnabled) {
    LOG.debug("submitJob for jobname = " + jobId);
  }
  if (jobs.containsKey(jobId)) {
    // job already running, don't start twice
    if (loggingEnabled) {
      LOG.debug("Job '" + jobId.getId() + "' already present ");
    }
    return jobs.get(jobId).getStatus();
  }
  JobStory jobStory = SimulatorJobCache.get(jobId);
  if (jobStory == null) {
    throw new IllegalArgumentException("Job not found in SimulatorJobCache: "+jobId);
  }
  validateAndSetClock(jobStory.getSubmissionTime());

  SimulatorJobInProgress job = new SimulatorJobInProgress(jobId, this,
                                                          this.conf, 
                                                          jobStory);
  return addJob(jobId, job);
}
项目:RDFS    文件:SimulatorJobTracker.java   
/**
 * Safely clean-up all data structures at the end of the 
 * job (success/failure/killed). In addition to performing the tasks that the
 * original finalizeJob does, we also inform the SimulatorEngine about the 
 * completion of this job. 
 *  
 * @param job completed job.
 */
@Override
synchronized void finalizeJob(JobInProgress job) {

  // Let the SimulatorEngine know that the job is done
  JobStatus cloneStatus = (JobStatus)job.getStatus().clone();
  engine.markCompletedJob(cloneStatus, 
                          SimulatorJobTracker.getClock().getTime());

  JobID jobId = job.getStatus().getJobID();
  LOG.info("Finished job " + jobId + " endtime = " +
            getClock().getTime() + " with status: " +
            JobStatus.getJobRunState(job.getStatus().getRunState()));

  // for updating the metrics and JobHistory, invoke the original 
  // finalizeJob.
  super.finalizeJob(job);

  // now placing this job in queue for future nuking
  cleanupJob(job);
}
项目:hadoop-0.20    文件:HadoopJob.java   
/**
 * Update this job status according to the given JobStatus
 * 
 * @param status
 */
void update(JobStatus status) {
  this.status = status;
  try {
    this.counters = running.getCounters();
    this.completed = running.isComplete();
    this.successful = running.isSuccessful();
    this.mapProgress = running.mapProgress();
    this.reduceProgress = running.reduceProgress();
    // running.getTaskCompletionEvents(fromEvent);

  } catch (IOException ioe) {
    ioe.printStackTrace();
  }

  this.completedMaps = (int) (this.totalMaps * this.mapProgress);
  this.completedReduces = (int) (this.totalReduces * this.reduceProgress);
}
项目:hanoi-hadoop-2.0.0-cdh    文件:JTClient.java   
/**
 * It uses to check whether job is started or not.
 * @param id job id
 * @return true if job is running.
 * @throws IOException if an I/O error occurs.
 */
public boolean isJobStarted(JobID id) throws IOException {
  JobInfo jInfo = getJobInfo(id);
  int counter = 0;
  while (counter < 60) {
    if (jInfo.getStatus().getRunState() == JobStatus.RUNNING) {
      break;
    } else {
      UtilsForTests.waitFor(1000);
      jInfo = getJobInfo(jInfo.getID());
      Assert.assertNotNull("Job information is null",jInfo);
    }
    counter++;
  }
  return (counter != 60)? true : false ;
}
项目:hanoi-hadoop-2.0.0-cdh    文件:HadoopJob.java   
/**
 * Update this job status according to the given JobStatus
 * 
 * @param status
 */
void update(JobStatus status) {
  this.status = status;
  try {
    this.counters = running.getCounters();
    this.completed = running.isComplete();
    this.successful = running.isSuccessful();
    this.mapProgress = running.mapProgress();
    this.reduceProgress = running.reduceProgress();
    // running.getTaskCompletionEvents(fromEvent);

  } catch (IOException ioe) {
    ioe.printStackTrace();
  }

  this.completedMaps = (int) (this.totalMaps * this.mapProgress);
  this.completedReduces = (int) (this.totalReduces * this.reduceProgress);
}
项目:mapreduce-fork    文件:HistoryViewer.java   
private void printJobAnalysis() {
  if (!job.getJobStatus().equals
      (JobStatus.getJobRunState(JobStatus.SUCCEEDED))) {
    System.out.println("No Analysis available as job did not finish");
    return;
  }

  AnalyzedJob avg = new AnalyzedJob(job);

  System.out.println("\nAnalysis");
  System.out.println("=========");
  printAnalysis(avg.getMapTasks(), cMap, "map", avg.getAvgMapTime(), 10);
  printLast(avg.getMapTasks(), "map", cFinishMapRed);

  if (avg.getReduceTasks().length > 0) {
    printAnalysis(avg.getReduceTasks(), cShuffle, "shuffle", 
        avg.getAvgShuffleTime(), 10);
    printLast(avg.getReduceTasks(), "shuffle", cFinishShuffle);

    printAnalysis(avg.getReduceTasks(), cReduce, "reduce",
        avg.getAvgReduceTime(), 10);
    printLast(avg.getReduceTasks(), "reduce", cFinishMapRed);
  }
  System.out.println("=========");
}
项目:mapreduce-fork    文件:JobInfoImpl.java   
public JobInfoImpl(
    JobID id, boolean setupLaunched, boolean setupFinished,
    boolean cleanupLaunched, int runningMaps, int runningReduces,
    int waitingMaps, int waitingReduces, int finishedMaps,
    int finishedReduces, JobStatus status, String historyUrl,
    List<String> blackListedTracker, boolean isComplete, int numMaps,
    int numReduces, boolean historyCopied) {
  super();
  this.blackListedTracker = blackListedTracker;
  this.historyUrl = historyUrl;
  this.id = id;
  this.setupLaunched = setupLaunched;
  this.setupFinished = setupFinished;
  this.cleanupLaunched = cleanupLaunched;
  this.status = status;
  this.runningMaps = runningMaps;
  this.runningReduces = runningReduces;
  this.waitingMaps = waitingMaps;
  this.waitingReduces = waitingReduces;
  this.finishedMaps = finishedMaps;
  this.finishedReduces = finishedReduces;
  this.numMaps = numMaps;
  this.numReduces = numReduces;
  this.historyCopied = historyCopied;
}
项目:mapreduce-fork    文件:HadoopJob.java   
static JobState ofInt(int state) {
  if (state == JobStatus.PREP) {
    return PREPARE;
  }
  else if (state == JobStatus.RUNNING) {
    return RUNNING;
  }
  else if (state == JobStatus.FAILED) {
    return FAILED;
  }
  else if (state == JobStatus.SUCCEEDED) {
    return SUCCEEDED;
  }
  else {
    return null;
  }
}
项目:mapreduce-fork    文件:HadoopJob.java   
/**
 * Update this job status according to the given JobStatus
 * 
 * @param status
 */
void update(JobStatus status) {
  this.status = status;
  try {
    this.counters = running.getCounters();
    this.completed = running.isComplete();
    this.successful = running.isSuccessful();
    this.mapProgress = running.mapProgress();
    this.reduceProgress = running.reduceProgress();
    // running.getTaskCompletionEvents(fromEvent);

  } catch (IOException ioe) {
    ioe.printStackTrace();
  }

  this.completedMaps = (int) (this.totalMaps * this.mapProgress);
  this.completedReduces = (int) (this.totalReduces * this.reduceProgress);
}
项目:mapreduce-fork    文件:SimulatorJobTracker.java   
/**
 * Safely clean-up all data structures at the end of the 
 * job (success/failure/killed). In addition to performing the tasks that the
 * original finalizeJob does, we also inform the SimulatorEngine about the 
 * completion of this job. 
 *  
 * @param job completed job.
 */
@Override
synchronized void finalizeJob(JobInProgress job) {

  // Let the SimulatorEngine know that the job is done
  JobStatus cloneStatus = (JobStatus)job.getStatus().clone();
  engine.markCompletedJob(cloneStatus, 
                          SimulatorJobTracker.getClock().getTime());

  JobID jobId = job.getStatus().getJobID();
  LOG.info("Finished job " + jobId + " endtime = " +
            getClock().getTime() + " with status: " +
            JobStatus.getJobRunState(job.getStatus().getRunState()));

  // for updating the metrics and JobHistory, invoke the original 
  // finalizeJob.
  super.finalizeJob(job);

  // now placing this job in queue for future nuking
  cleanupJob(job);
}
项目:mapreduce-fork    文件:UtilsForGridmix.java   
/**
 * List the current gridmix jobid's.
 * @param client - job client.
 * @param execJobCount - number of executed jobs.
 * @return - list of gridmix jobid's.
 */
public static List<JobID> listGridmixJobIDs(JobClient client, 
    int execJobCount) throws IOException { 
  List<JobID> jobids = new ArrayList<JobID>();
  JobStatus [] jobStatus = client.getAllJobs();
  int numJobs = jobStatus.length;
  for (int index = 1; index <= execJobCount; index++) {
    JobStatus js = jobStatus[numJobs - index];
    JobID jobid = js.getJobID();
    String jobName = js.getJobName();
    if (!jobName.equals("GRIDMIX_GENERATE_INPUT_DATA") && 
        !jobName.equals("GRIDMIX_GENERATE_DISTCACHE_DATA")) {
      jobids.add(jobid);
    }
  }
  return (jobids.size() == 0)? null : jobids;
}
项目:hbase-indexer    文件:BatchStateUpdaterTest.java   
@Test
public void testRun_Running() throws Exception {
    String jobId = "job_201407251005_0815";
    createDefinition("mytest", jobId);
    RunningJob job = createJob(jobId, JobStatus.RUNNING);

    when(job.getJobState()).thenReturn(JobStatus.RUNNING);

    Assert.assertEquals(0, executorService.getQueue().size());
    checkAllIndexes();

    Assert.assertEquals(1, executorService.getQueue().size());
    verify(model, VerificationModeFactory.times(1)).getIndexer(anyString());
    verify(model, VerificationModeFactory.times(0)).updateIndexerInternal(any(IndexerDefinition.class));
    Thread.sleep(60);
    Assert.assertEquals(1, executorService.getQueue().size());
    verify(model, VerificationModeFactory.times(2)).getIndexer(anyString());
    verify(model, VerificationModeFactory.times(0)).updateIndexerInternal(any(IndexerDefinition.class));


    when(job.getJobState()).thenReturn(JobStatus.SUCCEEDED);
    Thread.sleep(60);
    Assert.assertEquals(0, executorService.getQueue().size());
    verify(model, VerificationModeFactory.times(3)).getIndexer(anyString());
    verify(model, VerificationModeFactory.times(1)).updateIndexerInternal(any(IndexerDefinition.class));
}
项目:hortonworks-extension    文件:JTClient.java   
/**
 * It uses to check whether job is started or not.
 * @param id job id
 * @return true if job is running.
 * @throws IOException if an I/O error occurs.
 */
public boolean isJobStarted(JobID id) throws IOException {
  JobInfo jInfo = getJobInfo(id);
  int counter = 0;
  while (counter < 60) {
    if (jInfo.getStatus().getRunState() == JobStatus.RUNNING) {
      break;
    } else {
      UtilsForTests.waitFor(1000);
      jInfo = getJobInfo(jInfo.getID());
      Assert.assertNotNull("Job information is null",jInfo);
    }
    counter++;
  }
  return (counter != 60)? true : false ;
}
项目:hortonworks-extension    文件:HadoopJob.java   
/**
 * Update this job status according to the given JobStatus
 * 
 * @param status
 */
void update(JobStatus status) {
  this.status = status;
  try {
    this.counters = running.getCounters();
    this.completed = running.isComplete();
    this.successful = running.isSuccessful();
    this.mapProgress = running.mapProgress();
    this.reduceProgress = running.reduceProgress();
    // running.getTaskCompletionEvents(fromEvent);

  } catch (IOException ioe) {
    ioe.printStackTrace();
  }

  this.completedMaps = (int) (this.totalMaps * this.mapProgress);
  this.completedReduces = (int) (this.totalReduces * this.reduceProgress);
}
项目:hortonworks-extension    文件:JTClient.java   
/**
 * It uses to check whether job is started or not.
 * @param id job id
 * @return true if job is running.
 * @throws IOException if an I/O error occurs.
 */
public boolean isJobStarted(JobID id) throws IOException {
  JobInfo jInfo = getJobInfo(id);
  int counter = 0;
  while (counter < 60) {
    if (jInfo.getStatus().getRunState() == JobStatus.RUNNING) {
      break;
    } else {
      UtilsForTests.waitFor(1000);
      jInfo = getJobInfo(jInfo.getID());
      Assert.assertNotNull("Job information is null",jInfo);
    }
    counter++;
  }
  return (counter != 60)? true : false ;
}
项目:hortonworks-extension    文件:HadoopJob.java   
/**
 * Update this job status according to the given JobStatus
 * 
 * @param status
 */
void update(JobStatus status) {
  this.status = status;
  try {
    this.counters = running.getCounters();
    this.completed = running.isComplete();
    this.successful = running.isSuccessful();
    this.mapProgress = running.mapProgress();
    this.reduceProgress = running.reduceProgress();
    // running.getTaskCompletionEvents(fromEvent);

  } catch (IOException ioe) {
    ioe.printStackTrace();
  }

  this.completedMaps = (int) (this.totalMaps * this.mapProgress);
  this.completedReduces = (int) (this.totalReduces * this.reduceProgress);
}
项目:hadoop-gpu    文件:HadoopJob.java   
/**
 * Update this job status according to the given JobStatus
 * 
 * @param status
 */
void update(JobStatus status) {
  this.status = status;
  try {
    this.counters = running.getCounters();
    this.completed = running.isComplete();
    this.successful = running.isSuccessful();
    this.mapProgress = running.mapProgress();
    this.reduceProgress = running.reduceProgress();
    // running.getTaskCompletionEvents(fromEvent);

  } catch (IOException ioe) {
    ioe.printStackTrace();
  }

  this.completedMaps = (int) (this.totalMaps * this.mapProgress);
  this.completedReduces = (int) (this.totalReduces * this.reduceProgress);
}
项目:hadoop    文件:JobHistoryParser.java   
private void handleJobFinishedEvent(JobFinishedEvent event) {
  info.finishTime = event.getFinishTime();
  info.finishedMaps = event.getFinishedMaps();
  info.finishedReduces = event.getFinishedReduces();
  info.failedMaps = event.getFailedMaps();
  info.failedReduces = event.getFailedReduces();
  info.totalCounters = event.getTotalCounters();
  info.mapCounters = event.getMapCounters();
  info.reduceCounters = event.getReduceCounters();
  info.jobStatus = JobStatus.getJobRunState(JobStatus.SUCCEEDED);
}