Java 类org.apache.hadoop.mapred.JobClient.NetworkedJob 实例源码

项目:hadoop    文件:TestNetworkedJob.java   
@Test (timeout=5000)
public void testGetNullCounters() throws Exception {
  //mock creation
  Job mockJob = mock(Job.class);
  RunningJob underTest = new JobClient.NetworkedJob(mockJob); 

  when(mockJob.getCounters()).thenReturn(null);
  assertNull(underTest.getCounters());
  //verification
  verify(mockJob).getCounters();
}
项目:aliyun-oss-hadoop-fs    文件:TestNetworkedJob.java   
@Test (timeout=5000)
public void testGetNullCounters() throws Exception {
  //mock creation
  Job mockJob = mock(Job.class);
  RunningJob underTest = new JobClient.NetworkedJob(mockJob); 

  when(mockJob.getCounters()).thenReturn(null);
  assertNull(underTest.getCounters());
  //verification
  verify(mockJob).getCounters();
}
项目:big-c    文件:TestNetworkedJob.java   
@Test (timeout=5000)
public void testGetNullCounters() throws Exception {
  //mock creation
  Job mockJob = mock(Job.class);
  RunningJob underTest = new JobClient.NetworkedJob(mockJob); 

  when(mockJob.getCounters()).thenReturn(null);
  assertNull(underTest.getCounters());
  //verification
  verify(mockJob).getCounters();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestNetworkedJob.java   
@Test (timeout=5000)
public void testGetNullCounters() throws Exception {
  //mock creation
  Job mockJob = mock(Job.class);
  RunningJob underTest = new JobClient.NetworkedJob(mockJob); 

  when(mockJob.getCounters()).thenReturn(null);
  assertNull(underTest.getCounters());
  //verification
  verify(mockJob).getCounters();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestTaskKilling.java   
/**
 * Verifying the running job status whether it succeeds or not
 * after failing some of its tasks.
 */
@Test
public void testFailedTaskJobStatus() throws IOException, 
        InterruptedException {
  conf = remoteJTClient.getDaemonConf();
  TaskInfo taskInfo = null;
  SleepJob job = new SleepJob();
  job.setConf(conf);
  JobConf jobConf = job.setupJobConf(1, 1, 10000, 4000, 100, 100);
  RunningJob runJob = jobClient.submitJob(jobConf);
  JobID jobId = runJob.getID();
  JobInfo jInfo = remoteJTClient.getJobInfo(jobId);
  Assert.assertTrue("Job has not been started for 1 min.", 
      jtClient.isJobStarted(jobId));
  TaskInfo[] taskInfos = remoteJTClient.getTaskInfo(jobId);
  for (TaskInfo taskinfo : taskInfos) {
    if (!taskinfo.isSetupOrCleanup() && taskinfo.getTaskID().isMap()) {
      taskInfo = taskinfo;
      break;
    }
  }
  Assert.assertTrue("Task has not been started for 1 min.", 
      jtClient.isTaskStarted(taskInfo));

  // Fail the running task.
  NetworkedJob networkJob = jobClient.new NetworkedJob(jInfo.getStatus());
  TaskID tID = TaskID.downgrade(taskInfo.getTaskID());
  TaskAttemptID taskAttID = new TaskAttemptID(tID , 0);
  networkJob.killTask(taskAttID, true);

  LOG.info("Waiting till the job is completed...");
  while (!jInfo.getStatus().isJobComplete()) {
    UtilsForTests.waitFor(100);
    jInfo = remoteJTClient.getJobInfo(jobId);
  }
  Assert.assertEquals("JobStatus", JobStatus.SUCCEEDED, 
     jInfo.getStatus().getRunState());
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestTaskKilling.java   
public void checkTaskCompletionEvent (TaskAttemptID taskAttemptID,
    JobInfo jInfo) throws Exception {
  boolean match = false;
  int count = 0;
  while (!match) {
    TaskCompletionEvent[] taskCompletionEvents =  jobClient.new
      NetworkedJob(jInfo.getStatus()).getTaskCompletionEvents(0);
    for (TaskCompletionEvent taskCompletionEvent : taskCompletionEvents) {
      LOG.info("taskCompletionEvent.getTaskAttemptId().toString() is : " + 
        taskCompletionEvent.getTaskAttemptId().toString());
      LOG.info("compared to taskAttemptID.toString() :" + 
        taskAttemptID.toString());
      if ((taskCompletionEvent.getTaskAttemptId().toString()).
          equals(taskAttemptID.toString())){
        match = true;
        //Sleeping for 10 seconds giving time for the next task
        //attempt to run
        Thread.sleep(10000);
        break;
      }
    }
    if (!match) {
      LOG.info("Thread is sleeping for 10 seconds");
      Thread.sleep(10000);
      count++;
    }
    //If the count goes beyond a point, then break; This is to avoid
    //infinite loop under unforeseen circumstances.Testcase will anyway
    //fail later.
    if (count > 10) {
      Assert.fail("Since the task attemptid is not appearing in the" +
          "TaskCompletionEvent, it seems this task attempt was not killed");
    }
  }
}
项目:hadoop-plus    文件:TestNetworkedJob.java   
@Test (timeout=5000)
public void testGetNullCounters() throws Exception {
  //mock creation
  Job mockJob = mock(Job.class);
  RunningJob underTest = new JobClient.NetworkedJob(mockJob); 

  when(mockJob.getCounters()).thenReturn(null);
  assertNull(underTest.getCounters());
  //verification
  verify(mockJob).getCounters();
}
项目:FlexMap    文件:TestNetworkedJob.java   
@Test (timeout=5000)
public void testGetNullCounters() throws Exception {
  //mock creation
  Job mockJob = mock(Job.class);
  RunningJob underTest = new JobClient.NetworkedJob(mockJob); 

  when(mockJob.getCounters()).thenReturn(null);
  assertNull(underTest.getCounters());
  //verification
  verify(mockJob).getCounters();
}
项目:hops    文件:TestNetworkedJob.java   
@Test (timeout=5000)
public void testGetNullCounters() throws Exception {
  //mock creation
  Job mockJob = mock(Job.class);
  RunningJob underTest = new JobClient.NetworkedJob(mockJob); 

  when(mockJob.getCounters()).thenReturn(null);
  assertNull(underTest.getCounters());
  //verification
  verify(mockJob).getCounters();
}
项目:hadoop-TCP    文件:TestNetworkedJob.java   
@Test (timeout=5000)
public void testGetNullCounters() throws Exception {
  //mock creation
  Job mockJob = mock(Job.class);
  RunningJob underTest = new JobClient.NetworkedJob(mockJob); 

  when(mockJob.getCounters()).thenReturn(null);
  assertNull(underTest.getCounters());
  //verification
  verify(mockJob).getCounters();
}
项目:hardfs    文件:TestNetworkedJob.java   
@Test (timeout=5000)
public void testGetNullCounters() throws Exception {
  //mock creation
  Job mockJob = mock(Job.class);
  RunningJob underTest = new JobClient.NetworkedJob(mockJob); 

  when(mockJob.getCounters()).thenReturn(null);
  assertNull(underTest.getCounters());
  //verification
  verify(mockJob).getCounters();
}
项目:hadoop-on-lustre2    文件:TestNetworkedJob.java   
@Test (timeout=5000)
public void testGetNullCounters() throws Exception {
  //mock creation
  Job mockJob = mock(Job.class);
  RunningJob underTest = new JobClient.NetworkedJob(mockJob); 

  when(mockJob.getCounters()).thenReturn(null);
  assertNull(underTest.getCounters());
  //verification
  verify(mockJob).getCounters();
}
项目:hanoi-hadoop-2.0.0-cdh    文件:TestTaskKilling.java   
/**
 * Verifying the running job status whether it succeeds or not
 * after failing some of its tasks.
 */
@Test
public void testFailedTaskJobStatus() throws IOException, 
        InterruptedException {
  conf = remoteJTClient.getDaemonConf();
  TaskInfo taskInfo = null;
  SleepJob job = new SleepJob();
  job.setConf(conf);
  JobConf jobConf = job.setupJobConf(1, 1, 10000, 4000, 100, 100);
  RunningJob runJob = jobClient.submitJob(jobConf);
  JobID jobId = runJob.getID();
  JobInfo jInfo = remoteJTClient.getJobInfo(jobId);
  Assert.assertTrue("Job has not been started for 1 min.", 
      jtClient.isJobStarted(jobId));
  TaskInfo[] taskInfos = remoteJTClient.getTaskInfo(jobId);
  for (TaskInfo taskinfo : taskInfos) {
    if (!taskinfo.isSetupOrCleanup() && taskinfo.getTaskID().isMap()) {
      taskInfo = taskinfo;
      break;
    }
  }
  Assert.assertTrue("Task has not been started for 1 min.", 
      jtClient.isTaskStarted(taskInfo));

  // Fail the running task.
  NetworkedJob networkJob = jobClient.new NetworkedJob(jInfo.getStatus());
  TaskID tID = TaskID.downgrade(taskInfo.getTaskID());
  TaskAttemptID taskAttID = new TaskAttemptID(tID , 0);
  networkJob.killTask(taskAttID, true);

  LOG.info("Waiting till the job is completed...");
  while (!jInfo.getStatus().isJobComplete()) {
    UtilsForTests.waitFor(100);
    jInfo = remoteJTClient.getJobInfo(jobId);
  }
  Assert.assertEquals("JobStatus", JobStatus.SUCCEEDED, 
     jInfo.getStatus().getRunState());
}
项目:hanoi-hadoop-2.0.0-cdh    文件:TestTaskKilling.java   
public void checkTaskCompletionEvent (TaskAttemptID taskAttemptID,
    JobInfo jInfo) throws Exception {
  boolean match = false;
  int count = 0;
  while (!match) {
    TaskCompletionEvent[] taskCompletionEvents =  jobClient.new
      NetworkedJob(jInfo.getStatus()).getTaskCompletionEvents(0);
    for (TaskCompletionEvent taskCompletionEvent : taskCompletionEvents) {
      LOG.info("taskCompletionEvent.getTaskAttemptId().toString() is : " + 
        taskCompletionEvent.getTaskAttemptId().toString());
      LOG.info("compared to taskAttemptID.toString() :" + 
        taskAttemptID.toString());
      if ((taskCompletionEvent.getTaskAttemptId().toString()).
          equals(taskAttemptID.toString())){
        match = true;
        //Sleeping for 10 seconds giving time for the next task
        //attempt to run
        Thread.sleep(10000);
        break;
      }
    }
    if (!match) {
      LOG.info("Thread is sleeping for 10 seconds");
      Thread.sleep(10000);
      count++;
    }
    //If the count goes beyond a point, then break; This is to avoid
    //infinite loop under unforeseen circumstances.Testcase will anyway
    //fail later.
    if (count > 10) {
      Assert.fail("Since the task attemptid is not appearing in the" +
          "TaskCompletionEvent, it seems this task attempt was not killed");
    }
  }
}
项目:mapreduce-fork    文件:TestTaskKilling.java   
public void checkTaskCompletionEvent(
    TaskAttemptID taskAttemptID, JobInfo jInfo) throws Exception {
  boolean match = false;
  int count = 0;
  while (!match) {
    TaskCompletionEvent[] taskCompletionEvents =
        new JobClient.NetworkedJob(jInfo.getStatus(),jobClient.cluster)
            .getTaskCompletionEvents(0);
    for (TaskCompletionEvent taskCompletionEvent : taskCompletionEvents) {
      if ((taskCompletionEvent.getTaskAttemptId().toString())
          .equals(taskAttemptID.toString())) {
        match = true;
        // Sleeping for 10 seconds giving time for the next task
        // attempt to run
        Thread.sleep(10000);
        break;
      }
    }
    if (!match) {
      LOG.info("Thread is sleeping for 10 seconds");
      Thread.sleep(10000);
      count++;
    }
    // If the count goes beyond a point, then break; This is to avoid
    // infinite loop under unforeseen circumstances.Testcase will anyway
    // fail later.
    if (count > 10) {
      Assert.fail("Since the task attemptid is not appearing in the"
          + "TaskCompletionEvent, it seems this task attempt was not killed");
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestJobCacheDirectoriesCleanUp.java   
/**
 * Submit a job and create folders and files in work folder with 
 * non-writable permissions under task attempt id folder.
 * Fail the job and verify whether the files and folders
 * are cleaned up or not.
 * @throws IOException
 */
@Test
public void testJobCleanupAfterJobFail() throws IOException {
  HashMap<TTClient,ArrayList<String>> map = 
      new HashMap<TTClient,ArrayList<String>>();
  conf = rtClient.getDaemonConf();
  SleepJob job = new SleepJob();
  job.setConf(conf);
  JobConf jobConf = job.setupJobConf(1, 0, 10000,0, 10, 10);
  JobClient client = jtClient.getClient();
  RunningJob runJob = client.submitJob(jobConf);
  JobID jobId = runJob.getID();
  JobInfo jobInfo = rtClient.getJobInfo(jobId);
  Assert.assertTrue("Job has not been started for 1 min", 
      jtClient.isJobStarted(jobId));
  TaskInfo [] taskInfos = rtClient.getTaskInfo(jobId);
  boolean isFailTask = false;
  for (TaskInfo taskinfo : taskInfos) {
    if (!taskinfo.isSetupOrCleanup()) {        
      Assert.assertTrue("Task has not been started for 1 min ",
          jtClient.isTaskStarted(taskinfo));
      String tasktracker = getTaskTracker(taskinfo);
      Assert.assertNotNull("TaskTracker has not been found", tasktracker);
      TTClient ttclient = getTTClient(tasktracker);        
      map.put(ttClient, getTTClientMapRedLocalDirs(ttClient, 
          taskinfo, jobId));
      if (!isFailTask) {
        Assert.assertNotNull("TaskInfo is null.", taskinfo);
        TaskID taskId = TaskID.downgrade(taskinfo.getTaskID());
        TaskAttemptID taskAttID = new TaskAttemptID(taskId, 
            taskinfo.numFailedAttempts());
        int MAX_MAP_TASK_ATTEMPTS = Integer.
             parseInt(jobConf.get("mapred.map.max.attempts"));
        while(taskinfo.numFailedAttempts() < MAX_MAP_TASK_ATTEMPTS) {
          NetworkedJob networkJob = jtClient.getClient().
             new NetworkedJob(jobInfo.getStatus());
          networkJob.killTask(taskAttID, true);
          taskinfo = rtClient.getTaskInfo(taskinfo.getTaskID());
          taskAttID = new TaskAttemptID(taskId, taskinfo.numFailedAttempts());
          jobInfo = rtClient.getJobInfo(jobId);
        }
        isFailTask=true;
      }
    }
  }
  LOG.info("Waiting till the job is completed...");
  Assert.assertTrue("Job has not been completed for 1 min",
      jtClient.isJobStopped(jobId));
  jobInfo = rtClient.getJobInfo(jobId);
  Assert.assertEquals("Job has not been failed", 
          jobInfo.getStatus().getRunState(), JobStatus.FAILED);
  UtilsForTests.waitFor(3000); 
  Assert.assertTrue("Directories have not been cleaned up " + 
      "after completion of job", verifyJobDirectoryCleanup(map));
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestTaskKillingOfStreamingJob.java   
/**
 * Set the sleep time for the tasks is 3 seconds and kill the task using sigkill.
 * Verify whether task is killed after 3 seconds or not. 
 */
@Test
public void testStatusOfKilledTaskWithSignalSleepTime() 
    throws IOException, Exception {
  String runtimeArgs [] = {
      "-D", "mapred.job.name=Numbers Sum",
      "-D", "mapred.map.tasks=1",
      "-D", "mapred.reduce.tasks=1",
      "-D", "mapred.tasktracker.tasks.sleeptime-before-sigkill=3000" };

  JobID jobId = getJobIdOfRunningStreamJob(runtimeArgs);    
  Assert.assertNotNull("Job ID not found for 1 min", jobId);
  Assert.assertTrue("Job has not been started for 1 min.", 
      jtClient.isJobStarted(jobId));

  TaskInfo taskInfo = getTaskInfoOfRunningStreamJob(jobId);
  Assert.assertNotNull("TaskInfo is null",taskInfo);
  Assert.assertTrue("Task has not been started for 1 min.", 
      jtClient.isTaskStarted(taskInfo));

  JobInfo jInfo = wovenClient.getJobInfo(jobId); 
  NetworkedJob networkJob = client.new NetworkedJob(jInfo.getStatus());
  TaskID tID = TaskID.downgrade(taskInfo.getTaskID());
  TaskAttemptID taskAttID = new TaskAttemptID(tID, 0);
  networkJob.killTask(taskAttID, false);

  int counter = 0;
  while (counter++ < 60) {
    if (taskInfo.getTaskStatus().length == 0) {
      UtilsForTests.waitFor(1000);
      taskInfo = wovenClient.getTaskInfo(taskInfo.getTaskID());
    } else if (taskInfo.getTaskStatus()[0].getRunState() == 
        TaskStatus.State.RUNNING) {
      UtilsForTests.waitFor(1000);
      taskInfo = wovenClient.getTaskInfo(taskInfo.getTaskID());
    } else if (taskInfo.getTaskStatus()[0].getRunState() == 
        TaskStatus.State.KILLED_UNCLEAN) {
      UtilsForTests.waitFor(1000);
      taskInfo = wovenClient.getTaskInfo(taskInfo.getTaskID());
    } else {
      break;
    }
  }
  Assert.assertTrue("Task has been killed before sigkill " + 
      "sleep time of 3 secs.", counter > 3 && TaskStatus.State.KILLED == 
      taskInfo.getTaskStatus()[0].getRunState());

  LOG.info("Waiting till the job is completed...");
  while (!jInfo.getStatus().isJobComplete()) {
    UtilsForTests.waitFor(100);
    jInfo = wovenClient.getJobInfo(jobId);
  }
  Assert.assertEquals("Job has not been succeeded.", 
          jInfo.getStatus().getRunState(), JobStatus.SUCCEEDED);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestTaskKillingOfStreamingJob.java   
/**
 * Set the maximum attempts for the maps and reducers are one.
 * Failed the task and verify whether streaming job is failed or not.
 */
@Test
public void testStreamingJobStatusForFailedTask() throws IOException {
  String runtimeArgs [] = {
      "-D", "mapred.job.name=Numbers Sum",
      "-D", "mapred.map.tasks=1",
      "-D", "mapred.reduce.tasks=1",
      "-D", "mapred.map.max.attempts=1",
      "-D", "mapred.reduce.max.attempts=1"};

  JobID jobId = getJobIdOfRunningStreamJob(runtimeArgs);
  Assert.assertNotNull("Job ID not found for 1 min", jobId);
  Assert.assertTrue("Job has not been started for 1 min.", 
      jtClient.isJobStarted(jobId));

  TaskInfo taskInfo = getTaskInfoOfRunningStreamJob(jobId);
  Assert.assertNotNull("TaskInfo is null",taskInfo);
  Assert.assertTrue("Task has not been started for 1 min.", 
      jtClient.isTaskStarted(taskInfo));

  JobInfo jInfo = wovenClient.getJobInfo(jobId);
  NetworkedJob networkJob = client.new NetworkedJob(jInfo.getStatus());
  TaskID tID = TaskID.downgrade(taskInfo.getTaskID());
  TaskAttemptID taskAttID = new TaskAttemptID(tID, 0);
  networkJob.killTask(taskAttID, true);

  int counter = 0;
  while (counter++ < 60) {
    if (taskInfo.getTaskStatus().length == 0) {
      UtilsForTests.waitFor(1000);
      taskInfo = wovenClient.getTaskInfo(taskInfo.getTaskID());
    }else if (taskInfo.getTaskStatus()[0].getRunState() == 
        TaskStatus.State.RUNNING) {
      UtilsForTests.waitFor(1000);
      taskInfo = wovenClient.getTaskInfo(taskInfo.getTaskID());
    } else if (taskInfo.getTaskStatus()[0].getRunState() == 
        TaskStatus.State.FAILED_UNCLEAN) {
      UtilsForTests.waitFor(1000);
      taskInfo = wovenClient.getTaskInfo(taskInfo.getTaskID());
    } else {
      break;
    }
  }
  Assert.assertTrue("Task has not been Failed" , TaskStatus.State.FAILED == 
      taskInfo.getTaskStatus()[0].getRunState());

  LOG.info("Waiting till the job is completed...");
  while (!jInfo.getStatus().isJobComplete()) {
    UtilsForTests.waitFor(100);
    jInfo = wovenClient.getJobInfo(jobId);
  }
  Assert.assertEquals("Job has not been failed", 
      jInfo.getStatus().getRunState(), JobStatus.FAILED);
}
项目:hanoi-hadoop-2.0.0-cdh    文件:TestJobCacheDirectoriesCleanUp.java   
/**
 * Submit a job and create folders and files in work folder with 
 * non-writable permissions under task attempt id folder.
 * Fail the job and verify whether the files and folders
 * are cleaned up or not.
 * @throws IOException
 */
@Test
public void testJobCleanupAfterJobFail() throws IOException {
  HashMap<TTClient,ArrayList<String>> map = 
      new HashMap<TTClient,ArrayList<String>>();
  conf = rtClient.getDaemonConf();
  SleepJob job = new SleepJob();
  job.setConf(conf);
  JobConf jobConf = job.setupJobConf(1, 0, 10000,0, 10, 10);
  JobClient client = jtClient.getClient();
  RunningJob runJob = client.submitJob(jobConf);
  JobID jobId = runJob.getID();
  JobInfo jobInfo = rtClient.getJobInfo(jobId);
  Assert.assertTrue("Job has not been started for 1 min", 
      jtClient.isJobStarted(jobId));
  TaskInfo [] taskInfos = rtClient.getTaskInfo(jobId);
  boolean isFailTask = false;
  for (TaskInfo taskinfo : taskInfos) {
    if (!taskinfo.isSetupOrCleanup()) {        
      Assert.assertTrue("Task has not been started for 1 min ",
          jtClient.isTaskStarted(taskinfo));
      String tasktracker = getTaskTracker(taskinfo);
      Assert.assertNotNull("TaskTracker has not been found", tasktracker);
      TTClient ttclient = getTTClient(tasktracker);        
      map.put(ttClient, getTTClientMapRedLocalDirs(ttClient, 
          taskinfo, jobId));
      if (!isFailTask) {
        Assert.assertNotNull("TaskInfo is null.", taskinfo);
        TaskID taskId = TaskID.downgrade(taskinfo.getTaskID());
        TaskAttemptID taskAttID = new TaskAttemptID(taskId, 
            taskinfo.numFailedAttempts());
        int MAX_MAP_TASK_ATTEMPTS = Integer.
             parseInt(jobConf.get("mapred.map.max.attempts"));
        while(taskinfo.numFailedAttempts() < MAX_MAP_TASK_ATTEMPTS) {
          NetworkedJob networkJob = jtClient.getClient().
             new NetworkedJob(jobInfo.getStatus());
          networkJob.killTask(taskAttID, true);
          taskinfo = rtClient.getTaskInfo(taskinfo.getTaskID());
          taskAttID = new TaskAttemptID(taskId, taskinfo.numFailedAttempts());
          jobInfo = rtClient.getJobInfo(jobId);
        }
        isFailTask=true;
      }
    }
  }
  LOG.info("Waiting till the job is completed...");
  Assert.assertTrue("Job has not been completed for 1 min",
      jtClient.isJobStopped(jobId));
  jobInfo = rtClient.getJobInfo(jobId);
  Assert.assertEquals("Job has not been failed", 
          jobInfo.getStatus().getRunState(), JobStatus.FAILED);
  UtilsForTests.waitFor(3000); 
  Assert.assertTrue("Directories have not been cleaned up " + 
      "after completion of job", verifyJobDirectoryCleanup(map));
}
项目:hanoi-hadoop-2.0.0-cdh    文件:TestTaskKillingOfStreamingJob.java   
/**
 * Set the sleep time for the tasks is 3 seconds and kill the task using sigkill.
 * Verify whether task is killed after 3 seconds or not. 
 */
@Test
public void testStatusOfKilledTaskWithSignalSleepTime() 
    throws IOException, Exception {
  String runtimeArgs [] = {
      "-D", "mapred.job.name=Numbers Sum",
      "-D", "mapred.map.tasks=1",
      "-D", "mapred.reduce.tasks=1",
      "-D", "mapred.tasktracker.tasks.sleeptime-before-sigkill=3000" };

  JobID jobId = getJobIdOfRunningStreamJob(runtimeArgs);    
  Assert.assertNotNull("Job ID not found for 1 min", jobId);
  Assert.assertTrue("Job has not been started for 1 min.", 
      jtClient.isJobStarted(jobId));

  TaskInfo taskInfo = getTaskInfoOfRunningStreamJob(jobId);
  Assert.assertNotNull("TaskInfo is null",taskInfo);
  Assert.assertTrue("Task has not been started for 1 min.", 
      jtClient.isTaskStarted(taskInfo));

  JobInfo jInfo = wovenClient.getJobInfo(jobId); 
  NetworkedJob networkJob = client.new NetworkedJob(jInfo.getStatus());
  TaskID tID = TaskID.downgrade(taskInfo.getTaskID());
  TaskAttemptID taskAttID = new TaskAttemptID(tID, 0);
  networkJob.killTask(taskAttID, false);

  int counter = 0;
  while (counter++ < 60) {
    if (taskInfo.getTaskStatus().length == 0) {
      UtilsForTests.waitFor(1000);
      taskInfo = wovenClient.getTaskInfo(taskInfo.getTaskID());
    } else if (taskInfo.getTaskStatus()[0].getRunState() == 
        TaskStatus.State.RUNNING) {
      UtilsForTests.waitFor(1000);
      taskInfo = wovenClient.getTaskInfo(taskInfo.getTaskID());
    } else if (taskInfo.getTaskStatus()[0].getRunState() == 
        TaskStatus.State.KILLED_UNCLEAN) {
      UtilsForTests.waitFor(1000);
      taskInfo = wovenClient.getTaskInfo(taskInfo.getTaskID());
    } else {
      break;
    }
  }
  Assert.assertTrue("Task has been killed before sigkill " + 
      "sleep time of 3 secs.", counter > 3 && TaskStatus.State.KILLED == 
      taskInfo.getTaskStatus()[0].getRunState());

  LOG.info("Waiting till the job is completed...");
  while (!jInfo.getStatus().isJobComplete()) {
    UtilsForTests.waitFor(100);
    jInfo = wovenClient.getJobInfo(jobId);
  }
  Assert.assertEquals("Job has not been succeeded.", 
          jInfo.getStatus().getRunState(), JobStatus.SUCCEEDED);
}
项目:hanoi-hadoop-2.0.0-cdh    文件:TestTaskKillingOfStreamingJob.java   
/**
 * Set the maximum attempts for the maps and reducers are one.
 * Failed the task and verify whether streaming job is failed or not.
 */
@Test
public void testStreamingJobStatusForFailedTask() throws IOException {
  String runtimeArgs [] = {
      "-D", "mapred.job.name=Numbers Sum",
      "-D", "mapred.map.tasks=1",
      "-D", "mapred.reduce.tasks=1",
      "-D", "mapred.map.max.attempts=1",
      "-D", "mapred.reduce.max.attempts=1"};

  JobID jobId = getJobIdOfRunningStreamJob(runtimeArgs);
  Assert.assertNotNull("Job ID not found for 1 min", jobId);
  Assert.assertTrue("Job has not been started for 1 min.", 
      jtClient.isJobStarted(jobId));

  TaskInfo taskInfo = getTaskInfoOfRunningStreamJob(jobId);
  Assert.assertNotNull("TaskInfo is null",taskInfo);
  Assert.assertTrue("Task has not been started for 1 min.", 
      jtClient.isTaskStarted(taskInfo));

  JobInfo jInfo = wovenClient.getJobInfo(jobId);
  NetworkedJob networkJob = client.new NetworkedJob(jInfo.getStatus());
  TaskID tID = TaskID.downgrade(taskInfo.getTaskID());
  TaskAttemptID taskAttID = new TaskAttemptID(tID, 0);
  networkJob.killTask(taskAttID, true);

  int counter = 0;
  while (counter++ < 60) {
    if (taskInfo.getTaskStatus().length == 0) {
      UtilsForTests.waitFor(1000);
      taskInfo = wovenClient.getTaskInfo(taskInfo.getTaskID());
    }else if (taskInfo.getTaskStatus()[0].getRunState() == 
        TaskStatus.State.RUNNING) {
      UtilsForTests.waitFor(1000);
      taskInfo = wovenClient.getTaskInfo(taskInfo.getTaskID());
    } else if (taskInfo.getTaskStatus()[0].getRunState() == 
        TaskStatus.State.FAILED_UNCLEAN) {
      UtilsForTests.waitFor(1000);
      taskInfo = wovenClient.getTaskInfo(taskInfo.getTaskID());
    } else {
      break;
    }
  }
  Assert.assertTrue("Task has not been Failed" , TaskStatus.State.FAILED == 
      taskInfo.getTaskStatus()[0].getRunState());

  LOG.info("Waiting till the job is completed...");
  while (!jInfo.getStatus().isJobComplete()) {
    UtilsForTests.waitFor(100);
    jInfo = wovenClient.getJobInfo(jobId);
  }
  Assert.assertEquals("Job has not been failed", 
      jInfo.getStatus().getRunState(), JobStatus.FAILED);
}
项目:mapreduce-fork    文件:TestTaskKilling.java   
/**
 * Verifying the running job status whether it succeeds or not after failing
 * some of its tasks.
 * 
 * @throws ClassNotFoundException
 */
@Test
public void testFailedTaskJobStatus()
    throws IOException, InterruptedException, ClassNotFoundException {
  Configuration conf = new Configuration(cluster.getConf());
  TaskInfo taskInfo = null;
  SleepJob job = new SleepJob();
  job.setConf(conf);
  Job slpJob = job.createJob(3, 1, 4000, 4000, 100, 100);
  JobConf jobConf = new JobConf(conf);
  jobConf.setMaxMapAttempts(20);
  jobConf.setMaxReduceAttempts(20);
  slpJob.submit();
  RunningJob runJob =
      jobClient.getJob(org.apache.hadoop.mapred.JobID.downgrade(slpJob
          .getJobID()));
  JobID id = runJob.getID();
  JobInfo jInfo = remoteJTClient.getJobInfo(id);
  int counter = 0;
  while (counter < 60) {
    if (jInfo.getStatus().getRunState() == JobStatus.RUNNING) {
      break;
    } else {
      UtilsForTests.waitFor(1000);
      jInfo = remoteJTClient.getJobInfo(id);
    }
    counter++;
  }
  Assert.assertTrue("Job has not been started for 1 min.", counter != 60);

  TaskInfo[] taskInfos = remoteJTClient.getTaskInfo(id);
  for (TaskInfo taskinfo : taskInfos) {
    if (!taskinfo.isSetupOrCleanup()) {
      taskInfo = taskinfo;
    }
  }

  counter = 0;
  taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID());
  while (counter < 60) {
    if (taskInfo.getTaskStatus().length > 0) {
      if (taskInfo.getTaskStatus()[0].getRunState() == TaskStatus.State.RUNNING) {
        break;
      }
    }
    UtilsForTests.waitFor(1000);
    taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID());
    counter++;
  }
  Assert.assertTrue("Task has not been started for 1 min.", counter != 60);

  NetworkedJob networkJob = new JobClient.NetworkedJob(jInfo.getStatus(),jobClient.cluster);
  TaskID tID = TaskID.downgrade(taskInfo.getTaskID());
  TaskAttemptID taskAttID = new TaskAttemptID(tID, 0);
  networkJob.killTask(taskAttID, false);

  LOG.info("Waiting till the job is completed...");
  while (!jInfo.getStatus().isJobComplete()) {
    UtilsForTests.waitFor(100);
    jInfo = remoteJTClient.getJobInfo(id);
  }

  Assert.assertEquals(
      "JobStatus", jInfo.getStatus().getRunState(), JobStatus.SUCCEEDED);
}