Java 类org.apache.hadoop.mapred.JobID 实例源码

项目:hadoop    文件:TestJobControl.java   
@SuppressWarnings("deprecation")
@Test(timeout = 30000)
public void testJobState() throws Exception {
  Job job_1 = getCopyJob();
  JobControl jc = new JobControl("Test");
  jc.addJob(job_1);
  Assert.assertEquals(Job.WAITING, job_1.getState());
  job_1.setState(Job.SUCCESS);
  Assert.assertEquals(Job.WAITING, job_1.getState());

  org.apache.hadoop.mapreduce.Job mockjob =
      mock(org.apache.hadoop.mapreduce.Job.class);
  org.apache.hadoop.mapreduce.JobID jid =
      new org.apache.hadoop.mapreduce.JobID("test", 0);
  when(mockjob.getJobID()).thenReturn(jid);
  job_1.setJob(mockjob);
  Assert.assertEquals("job_test_0000", job_1.getMapredJobID());
  job_1.setMapredJobID("job_test_0001");
  Assert.assertEquals("job_test_0000", job_1.getMapredJobID());
  jc.stop();
}
项目:hadoop    文件:DataJoinJob.java   
/**
 * Submit/run a map/reduce job.
 * 
 * @param job
 * @return true for success
 * @throws IOException
 */
public static boolean runJob(JobConf job) throws IOException {
  JobClient jc = new JobClient(job);
  boolean sucess = true;
  RunningJob running = null;
  try {
    running = jc.submitJob(job);
    JobID jobId = running.getID();
    System.out.println("Job " + jobId + " is submitted");
    while (!running.isComplete()) {
      System.out.println("Job " + jobId + " is still running.");
      try {
        Thread.sleep(60000);
      } catch (InterruptedException e) {
      }
      running = jc.getJob(jobId);
    }
    sucess = running.isSuccessful();
  } finally {
    if (!sucess && (running != null)) {
      running.killJob();
    }
    jc.close();
  }
  return sucess;
}
项目:aliyun-oss-hadoop-fs    文件:TestJobControl.java   
@SuppressWarnings("deprecation")
@Test(timeout = 30000)
public void testJobState() throws Exception {
  Job job_1 = getCopyJob();
  JobControl jc = new JobControl("Test");
  jc.addJob(job_1);
  Assert.assertEquals(Job.WAITING, job_1.getState());
  job_1.setState(Job.SUCCESS);
  Assert.assertEquals(Job.WAITING, job_1.getState());

  org.apache.hadoop.mapreduce.Job mockjob =
      mock(org.apache.hadoop.mapreduce.Job.class);
  org.apache.hadoop.mapreduce.JobID jid =
      new org.apache.hadoop.mapreduce.JobID("test", 0);
  when(mockjob.getJobID()).thenReturn(jid);
  job_1.setJob(mockjob);
  Assert.assertEquals("job_test_0000", job_1.getMapredJobID());
  job_1.setMapredJobID("job_test_0001");
  Assert.assertEquals("job_test_0000", job_1.getMapredJobID());
  jc.stop();
}
项目:aliyun-oss-hadoop-fs    文件:DataJoinJob.java   
/**
 * Submit/run a map/reduce job.
 * 
 * @param job
 * @return true for success
 * @throws IOException
 */
public static boolean runJob(JobConf job) throws IOException {
  JobClient jc = new JobClient(job);
  boolean sucess = true;
  RunningJob running = null;
  try {
    running = jc.submitJob(job);
    JobID jobId = running.getID();
    System.out.println("Job " + jobId + " is submitted");
    while (!running.isComplete()) {
      System.out.println("Job " + jobId + " is still running.");
      try {
        Thread.sleep(60000);
      } catch (InterruptedException e) {
      }
      running = jc.getJob(jobId);
    }
    sucess = running.isSuccessful();
  } finally {
    if (!sucess && (running != null)) {
      running.killJob();
    }
    jc.close();
  }
  return sucess;
}
项目:big-c    文件:TestJobControl.java   
@SuppressWarnings("deprecation")
@Test(timeout = 30000)
public void testJobState() throws Exception {
  Job job_1 = getCopyJob();
  JobControl jc = new JobControl("Test");
  jc.addJob(job_1);
  Assert.assertEquals(Job.WAITING, job_1.getState());
  job_1.setState(Job.SUCCESS);
  Assert.assertEquals(Job.WAITING, job_1.getState());

  org.apache.hadoop.mapreduce.Job mockjob =
      mock(org.apache.hadoop.mapreduce.Job.class);
  org.apache.hadoop.mapreduce.JobID jid =
      new org.apache.hadoop.mapreduce.JobID("test", 0);
  when(mockjob.getJobID()).thenReturn(jid);
  job_1.setJob(mockjob);
  Assert.assertEquals("job_test_0000", job_1.getMapredJobID());
  job_1.setMapredJobID("job_test_0001");
  Assert.assertEquals("job_test_0000", job_1.getMapredJobID());
  jc.stop();
}
项目:big-c    文件:DataJoinJob.java   
/**
 * Submit/run a map/reduce job.
 * 
 * @param job
 * @return true for success
 * @throws IOException
 */
public static boolean runJob(JobConf job) throws IOException {
  JobClient jc = new JobClient(job);
  boolean sucess = true;
  RunningJob running = null;
  try {
    running = jc.submitJob(job);
    JobID jobId = running.getID();
    System.out.println("Job " + jobId + " is submitted");
    while (!running.isComplete()) {
      System.out.println("Job " + jobId + " is still running.");
      try {
        Thread.sleep(60000);
      } catch (InterruptedException e) {
      }
      running = jc.getJob(jobId);
    }
    sucess = running.isSuccessful();
  } finally {
    if (!sucess && (running != null)) {
      running.killJob();
    }
    jc.close();
  }
  return sucess;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestJobControl.java   
@SuppressWarnings("deprecation")
@Test(timeout = 30000)
public void testJobState() throws Exception {
  Job job_1 = getCopyJob();
  JobControl jc = new JobControl("Test");
  jc.addJob(job_1);
  Assert.assertEquals(Job.WAITING, job_1.getState());
  job_1.setState(Job.SUCCESS);
  Assert.assertEquals(Job.WAITING, job_1.getState());

  org.apache.hadoop.mapreduce.Job mockjob =
      mock(org.apache.hadoop.mapreduce.Job.class);
  org.apache.hadoop.mapreduce.JobID jid =
      new org.apache.hadoop.mapreduce.JobID("test", 0);
  when(mockjob.getJobID()).thenReturn(jid);
  job_1.setJob(mockjob);
  Assert.assertEquals("job_test_0000", job_1.getMapredJobID());
  job_1.setMapredJobID("job_test_0001");
  Assert.assertEquals("job_test_0000", job_1.getMapredJobID());
  jc.stop();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DataJoinJob.java   
/**
 * Submit/run a map/reduce job.
 * 
 * @param job
 * @return true for success
 * @throws IOException
 */
public static boolean runJob(JobConf job) throws IOException {
  JobClient jc = new JobClient(job);
  boolean sucess = true;
  RunningJob running = null;
  try {
    running = jc.submitJob(job);
    JobID jobId = running.getID();
    System.out.println("Job " + jobId + " is submitted");
    while (!running.isComplete()) {
      System.out.println("Job " + jobId + " is still running.");
      try {
        Thread.sleep(60000);
      } catch (InterruptedException e) {
      }
      running = jc.getJob(jobId);
    }
    sucess = running.isSuccessful();
  } finally {
    if (!sucess && (running != null)) {
      running.killJob();
    }
    jc.close();
  }
  return sucess;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestTrackerDistributedCacheManager.java   
public void testRemoveTaskDistributedCacheManager() throws Exception {
  if (!canRun()) {
    return;
  }
  TrackerDistributedCacheManager manager = new TrackerDistributedCacheManager(
      conf, taskController);
  JobID jobId = new JobID("jobtracker", 1);
  manager.newTaskDistributedCacheManager(jobId, conf);

  TaskDistributedCacheManager taskDistributedCacheManager = manager
      .getTaskDistributedCacheManager(jobId);
  assertNotNull(taskDistributedCacheManager);

  manager.removeTaskDistributedCacheManager(jobId);

  taskDistributedCacheManager = manager.getTaskDistributedCacheManager(jobId);
  assertNull(taskDistributedCacheManager);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:MRCluster.java   
/**
  * Allow the job to continue through MR control job.
  * @param id of the job. 
  * @throws IOException when failed to get task info. 
  */
public void signalAllTasks(JobID id) throws IOException{
  TaskInfo[] taskInfos = getJTClient().getProxy().getTaskInfo(id);
  if(taskInfos !=null) {
    for (TaskInfo taskInfoRemaining : taskInfos) {
      if(taskInfoRemaining != null) {
        FinishTaskControlAction action = new FinishTaskControlAction(TaskID
            .downgrade(taskInfoRemaining.getTaskID()));
        Collection<TTClient> tts = getTTClients();
        for (TTClient cli : tts) {
          cli.getProxy().sendAction(action);
        }
      }
    }  
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DataJoinJob.java   
/**
 * Submit/run a map/reduce job.
 * 
 * @param job
 * @return true for success
 * @throws IOException
 */
public static boolean runJob(JobConf job) throws IOException {
  JobClient jc = new JobClient(job);
  boolean sucess = true;
  RunningJob running = null;
  try {
    running = jc.submitJob(job);
    JobID jobId = running.getID();
    System.out.println("Job " + jobId + " is submitted");
    while (!running.isComplete()) {
      System.out.println("Job " + jobId + " is still running.");
      try {
        Thread.sleep(60000);
      } catch (InterruptedException e) {
      }
      running = jc.getJob(jobId);
    }
    sucess = running.isSuccessful();
  } finally {
    if (!sucess && (running != null)) {
      running.killJob();
    }
    jc.close();
  }
  return sucess;
}
项目:hadoop-EAR    文件:TestNoJobSetupCleanup.java   
private Job submitAndValidateJob(JobConf conf, int numMaps, int numReds)
    throws IOException, InterruptedException, ClassNotFoundException {
  conf.setJobSetupCleanupNeeded(false);
  Job job = MapReduceTestUtil.createJob(conf, inDir, outDir,
              numMaps, numReds);

  job.setOutputFormatClass(MyOutputFormat.class);
  job.waitForCompletion(true);
  assertTrue(job.isSuccessful());
  JobID jobid = (org.apache.hadoop.mapred.JobID)job.getID();

  JobClient jc = new JobClient(conf);
  assertTrue(jc.getSetupTaskReports(jobid).length == 0);
  assertTrue(jc.getCleanupTaskReports(jobid).length == 0);
  assertTrue(jc.getMapTaskReports(jobid).length == numMaps);
  assertTrue(jc.getReduceTaskReports(jobid).length == numReds);
  FileSystem fs = FileSystem.get(conf);
  assertTrue("Job output directory doesn't exit!", fs.exists(outDir));
  FileStatus[] list = fs.listStatus(outDir, new OutputFilter());
  int numPartFiles = numReds == 0 ? numMaps : numReds;
  assertTrue("Number of part-files is " + list.length + " and not "
      + numPartFiles, list.length == numPartFiles);
  return job;
}
项目:hadoop-EAR    文件:TestReleaseManager.java   
public void testNewTag() throws IOException {
  LOG.info("Start testNewTag");
  JobID jobid = new JobID("TestJob", 1);
  long oldTimeStamp = releaseTimeStamp;
  long currentTimeStamp = System.currentTimeMillis();
  try {
    Thread.sleep(1000);
  } catch(InterruptedException e) {
  }
  String workingPath = getRelease(releaseTimeStamp, jobid);
  String workingTag = workingPath + "/RELEASE_COPY_DONE";
  FileStatus tagStatus = fs.getFileStatus(new Path(workingTag));
  long newTimeStamp = tagStatus.getModificationTime();
  LOG.info("Before getRelease, " + workingTag + " timestamp is " + oldTimeStamp);
  LOG.info("After getRelease, the timestamp is " + newTimeStamp);
  assertEquals(newTimeStamp > currentTimeStamp, true);
  assertEquals(newTimeStamp > oldTimeStamp, true);
  LOG.info("Done with the testing for testNewTag");
}
项目:hadoop-EAR    文件:DataJoinJob.java   
/**
 * Submit/run a map/reduce job.
 * 
 * @param job
 * @return true for success
 * @throws IOException
 */
public static boolean runJob(JobConf job) throws IOException {
  JobClient jc = new JobClient(job);
  boolean sucess = true;
  RunningJob running = null;
  try {
    running = jc.submitJob(job);
    JobID jobId = running.getID();
    System.out.println("Job " + jobId + " is submitted");
    while (!running.isComplete()) {
      System.out.println("Job " + jobId + " is still running.");
      try {
        Thread.sleep(60000);
      } catch (InterruptedException e) {
      }
      running = jc.getJob(jobId);
    }
    sucess = running.isSuccessful();
  } finally {
    if (!sucess && (running != null)) {
      running.killJob();
    }
    jc.close();
  }
  return sucess;
}
项目:hadoop-plus    文件:TestJobControl.java   
@SuppressWarnings("deprecation")
@Test(timeout = 30000)
public void testJobState() throws Exception {
  Job job_1 = getCopyJob();
  JobControl jc = new JobControl("Test");
  jc.addJob(job_1);
  Assert.assertEquals(Job.WAITING, job_1.getState());
  job_1.setState(Job.SUCCESS);
  Assert.assertEquals(Job.WAITING, job_1.getState());

  org.apache.hadoop.mapreduce.Job mockjob =
      mock(org.apache.hadoop.mapreduce.Job.class);
  org.apache.hadoop.mapreduce.JobID jid =
      new org.apache.hadoop.mapreduce.JobID("test", 0);
  when(mockjob.getJobID()).thenReturn(jid);
  job_1.setJob(mockjob);
  Assert.assertEquals("job_test_0000", job_1.getMapredJobID());
  job_1.setMapredJobID("job_test_0001");
  Assert.assertEquals("job_test_0000", job_1.getMapredJobID());
  jc.stop();
}
项目:hadoop-plus    文件:DataJoinJob.java   
/**
 * Submit/run a map/reduce job.
 * 
 * @param job
 * @return true for success
 * @throws IOException
 */
public static boolean runJob(JobConf job) throws IOException {
  JobClient jc = new JobClient(job);
  boolean sucess = true;
  RunningJob running = null;
  try {
    running = jc.submitJob(job);
    JobID jobId = running.getID();
    System.out.println("Job " + jobId + " is submitted");
    while (!running.isComplete()) {
      System.out.println("Job " + jobId + " is still running.");
      try {
        Thread.sleep(60000);
      } catch (InterruptedException e) {
      }
      running = jc.getJob(jobId);
    }
    sucess = running.isSuccessful();
  } finally {
    if (!sucess && (running != null)) {
      running.killJob();
    }
    jc.close();
  }
  return sucess;
}
项目:FlexMap    文件:TestJobControl.java   
@SuppressWarnings("deprecation")
@Test(timeout = 30000)
public void testJobState() throws Exception {
  Job job_1 = getCopyJob();
  JobControl jc = new JobControl("Test");
  jc.addJob(job_1);
  Assert.assertEquals(Job.WAITING, job_1.getState());
  job_1.setState(Job.SUCCESS);
  Assert.assertEquals(Job.WAITING, job_1.getState());

  org.apache.hadoop.mapreduce.Job mockjob =
      mock(org.apache.hadoop.mapreduce.Job.class);
  org.apache.hadoop.mapreduce.JobID jid =
      new org.apache.hadoop.mapreduce.JobID("test", 0);
  when(mockjob.getJobID()).thenReturn(jid);
  job_1.setJob(mockjob);
  Assert.assertEquals("job_test_0000", job_1.getMapredJobID());
  job_1.setMapredJobID("job_test_0001");
  Assert.assertEquals("job_test_0000", job_1.getMapredJobID());
  jc.stop();
}
项目:hServer    文件:HServerJobClient.java   
@Override
public RunningJob submitJob(final JobConf job) throws IOException
{
    ensureInvocationGridPresent();
    ExecutorService async = Executors.newSingleThreadExecutor();
    final JobID jobID = JobID.forName("job_"+job.getJobName()+"_0");

    Future jobSubmitted = async.submit(new Callable<Object>() {
        @Override
        public Object call() throws Exception {
            try {
                JobScheduler.getInstance().runOldApiJob(job, jobID, sortEnabled, null, grid);
            } finally {
                if (unloadGrid) {
                    grid.unload();
                }
            }
            return null;
        }
    });
    async.shutdown(); //Will shut down after task is done

    return new HServerRunningJob(jobID, jobSubmitted);
}
项目:hops    文件:MapReduceEncodingManager.java   
private void cleanRecovery() throws IOException {
  new LightWeightRequestHandler(HDFSOperationType.DELETE_ENCODING_JOBS) {
    @Override
    public Object performTask() throws IOException {
      EncodingJobsDataAccess da = (EncodingJobsDataAccess)
          HdfsStorageFactory.getDataAccess(EncodingJobsDataAccess.class);
      Iterator<MapReduceEncoder> it = completedJobs.iterator();
      while (it.hasNext()) {
        MapReduceEncoder job = it.next();
        JobID jobId = job.getJobID();
        da.delete(new EncodingJob(jobId.getJtIdentifier(), jobId.getId()));
        it.remove();
      }
      return null;
    }
  }.handle();
}
项目:hops    文件:TestJobControl.java   
@SuppressWarnings("deprecation")
@Test(timeout = 30000)
public void testJobState() throws Exception {
  Job job_1 = getCopyJob();
  JobControl jc = new JobControl("Test");
  jc.addJob(job_1);
  Assert.assertEquals(Job.WAITING, job_1.getState());
  job_1.setState(Job.SUCCESS);
  Assert.assertEquals(Job.WAITING, job_1.getState());

  org.apache.hadoop.mapreduce.Job mockjob =
      mock(org.apache.hadoop.mapreduce.Job.class);
  org.apache.hadoop.mapreduce.JobID jid =
      new org.apache.hadoop.mapreduce.JobID("test", 0);
  when(mockjob.getJobID()).thenReturn(jid);
  job_1.setJob(mockjob);
  Assert.assertEquals("job_test_0000", job_1.getMapredJobID());
  job_1.setMapredJobID("job_test_0001");
  Assert.assertEquals("job_test_0000", job_1.getMapredJobID());
  jc.stop();
}
项目:hops    文件:DataJoinJob.java   
/**
 * Submit/run a map/reduce job.
 * 
 * @param job
 * @return true for success
 * @throws IOException
 */
public static boolean runJob(JobConf job) throws IOException {
  JobClient jc = new JobClient(job);
  boolean sucess = true;
  RunningJob running = null;
  try {
    running = jc.submitJob(job);
    JobID jobId = running.getID();
    System.out.println("Job " + jobId + " is submitted");
    while (!running.isComplete()) {
      System.out.println("Job " + jobId + " is still running.");
      try {
        Thread.sleep(60000);
      } catch (InterruptedException e) {
      }
      running = jc.getJob(jobId);
    }
    sucess = running.isSuccessful();
  } finally {
    if (!sucess && (running != null)) {
      running.killJob();
    }
    jc.close();
  }
  return sucess;
}
项目:compiler    文件:BoaOutputCommitter.java   
@Override
public void abortJob(JobContext context, JobStatus.State runState) throws java.io.IOException {
    super.abortJob(context, runState);

    final JobClient jobClient = new JobClient(new JobConf(context.getConfiguration()));
    final RunningJob job = jobClient.getJob((org.apache.hadoop.mapred.JobID) JobID.forName(context.getConfiguration().get("mapred.job.id")));
    String diag = "";
    for (final TaskCompletionEvent event : job.getTaskCompletionEvents(0))
        switch (event.getTaskStatus()) {
            case SUCCEEDED:
                break;
               default:
                diag += "Diagnostics for: " + event.getTaskTrackerHttp() + "\n";
                for (final String s : job.getTaskDiagnostics(event.getTaskAttemptId()))
                    diag += s + "\n";
                diag += "\n";
                break;
        }
    updateStatus(diag, context.getConfiguration().getInt("boa.hadoop.jobid", 0));
}
项目:hadoop-TCP    文件:TestJobControl.java   
@SuppressWarnings("deprecation")
@Test(timeout = 30000)
public void testJobState() throws Exception {
  Job job_1 = getCopyJob();
  JobControl jc = new JobControl("Test");
  jc.addJob(job_1);
  Assert.assertEquals(Job.WAITING, job_1.getState());
  job_1.setState(Job.SUCCESS);
  Assert.assertEquals(Job.WAITING, job_1.getState());

  org.apache.hadoop.mapreduce.Job mockjob =
      mock(org.apache.hadoop.mapreduce.Job.class);
  org.apache.hadoop.mapreduce.JobID jid =
      new org.apache.hadoop.mapreduce.JobID("test", 0);
  when(mockjob.getJobID()).thenReturn(jid);
  job_1.setJob(mockjob);
  Assert.assertEquals("job_test_0000", job_1.getMapredJobID());
  job_1.setMapredJobID("job_test_0001");
  Assert.assertEquals("job_test_0000", job_1.getMapredJobID());
  jc.stop();
}
项目:hadoop-TCP    文件:DataJoinJob.java   
/**
 * Submit/run a map/reduce job.
 * 
 * @param job
 * @return true for success
 * @throws IOException
 */
public static boolean runJob(JobConf job) throws IOException {
  JobClient jc = new JobClient(job);
  boolean sucess = true;
  RunningJob running = null;
  try {
    running = jc.submitJob(job);
    JobID jobId = running.getID();
    System.out.println("Job " + jobId + " is submitted");
    while (!running.isComplete()) {
      System.out.println("Job " + jobId + " is still running.");
      try {
        Thread.sleep(60000);
      } catch (InterruptedException e) {
      }
      running = jc.getJob(jobId);
    }
    sucess = running.isSuccessful();
  } finally {
    if (!sucess && (running != null)) {
      running.killJob();
    }
    jc.close();
  }
  return sucess;
}
项目:spork-streaming    文件:SimplePigStats.java   
@SuppressWarnings("deprecation")
JobStats addJobStats(Job job) {
    MapReduceOper mro = jobMroMap.get(job);

    if (mro == null) {
        LOG.warn("unable to get MR oper for job: " + job.toString());
        return null;
    }
    JobStats js = mroJobMap.get(mro);

    JobID jobId = job.getAssignedJobID();
    js.setId(jobId);
    js.setAlias(mro);
    js.setConf(job.getJobConf());
    return js;
}
项目:spork-streaming    文件:GruntParser.java   
@Override
protected void processKill(String jobid) throws IOException
{
    if (mJobConf != null) {
        JobClient jc = new JobClient(mJobConf);
        JobID id = JobID.forName(jobid);
        RunningJob job = jc.getJob(id);
        if (job == null)
            System.out.println("Job with id " + jobid + " is not active");
        else
        {
            job.killJob();
            log.info("Kill " + id + " submitted.");
        }
    }
}
项目:spork-streaming    文件:TestJobStats.java   
@Test
public void testMedianMapReduceTime() throws Exception {

    JobConf jobConf = new JobConf();
    JobClient jobClient = Mockito.mock(JobClient.class);

    // mock methods to return the predefined map and reduce task reports
    Mockito.when(jobClient.getMapTaskReports(jobID)).thenReturn(mapTaskReports);
    Mockito.when(jobClient.getReduceTaskReports(jobID)).thenReturn(reduceTaskReports);

    PigStats.JobGraph jobGraph = new PigStats.JobGraph();
    JobStats jobStats = createJobStats("JobStatsTest", jobGraph);
    getJobStatsMethod("setId", JobID.class).invoke(jobStats, jobID);
    getJobStatsMethod("setSuccessful", boolean.class).invoke(jobStats, true);

    getJobStatsMethod("addMapReduceStatistics", JobClient.class, Configuration.class)
        .invoke(jobStats, jobClient, jobConf);
    String msg = (String)getJobStatsMethod("getDisplayString", boolean.class)
        .invoke(jobStats, false);

    System.out.println(JobStats.SUCCESS_HEADER);
    System.out.println(msg);

    assertTrue(msg.startsWith(ASSERT_STRING));
}
项目:spork    文件:MapReduceLauncher.java   
@Override
public void killJob(String jobID, Configuration conf) throws BackendException {
    try {
        if (conf != null) {
            JobConf jobConf = new JobConf(conf);
            JobClient jc = new JobClient(jobConf);
            JobID id = JobID.forName(jobID);
            RunningJob job = jc.getJob(id);
            if (job == null)
                System.out.println("Job with id " + jobID + " is not active");
            else
            {
                job.killJob();
                log.info("Kill " + id + " submitted.");
            }
        }
    } catch (IOException e) {
        throw new BackendException(e);
    }
}
项目:spork    文件:TestMRJobStats.java   
@Test
public void testMedianMapReduceTime() throws Exception {
    JobClient jobClient = Mockito.mock(JobClient.class);

    // mock methods to return the predefined map and reduce task reports
    Mockito.when(jobClient.getMapTaskReports(jobID)).thenReturn(mapTaskReports);
    Mockito.when(jobClient.getReduceTaskReports(jobID)).thenReturn(reduceTaskReports);

    PigStats.JobGraph jobGraph = new PigStats.JobGraph();
    MRJobStats jobStats = createJobStats("JobStatsTest", jobGraph);
    getJobStatsMethod("setId", JobID.class).invoke(jobStats, jobID);
    jobStats.setSuccessful(true);

    getJobStatsMethod("addMapReduceStatistics", Iterator.class, Iterator.class)
        .invoke(jobStats, Arrays.asList(mapTaskReports).iterator(), Arrays.asList(reduceTaskReports).iterator());
    String msg = (String)getJobStatsMethod("getDisplayString")
        .invoke(jobStats);

    System.out.println(JobStats.SUCCESS_HEADER);
    System.out.println(msg);

    assertTrue(msg.startsWith(ASSERT_STRING));
}
项目:hadoop-on-lustre    文件:TestNetworkedJob.java   
@SuppressWarnings("deprecation")
@Test
public void testBadUpdate() throws Exception {
  JobStatus mockStatus = mock(JobStatus.class);
  JobProfile mockProf = mock(JobProfile.class);
  JobSubmissionProtocol mockClient = mock(JobSubmissionProtocol.class);

  JobID id = new JobID("test",0);

  RunningJob rj = new JobClient.NetworkedJob(mockStatus, mockProf, mockClient);

  when(mockProf.getJobID()).thenReturn(id);
  when(mockClient.getJobStatus(id)).thenReturn(null);

  boolean caught = false;
  try {
    rj.isSuccessful();
  } catch(IOException e) {
    caught = true;
  }
  assertTrue("Expected updateStatus to throw an IOException bt it did not", caught);

  //verification
  verify(mockProf).getJobID();
  verify(mockClient).getJobStatus(id);
}
项目:hadoop-on-lustre    文件:TestNetworkedJob.java   
@SuppressWarnings("deprecation")
@Test
public void testGetNullCounters() throws Exception {
  JobStatus mockStatus = mock(JobStatus.class);
  JobProfile mockProf = mock(JobProfile.class);
  JobSubmissionProtocol mockClient = mock(JobSubmissionProtocol.class);
  RunningJob underTest =
    new JobClient.NetworkedJob(mockStatus, mockProf, mockClient); 

  JobID id = new JobID("test", 0);
  when(mockProf.getJobID()).thenReturn(id);
  when(mockClient.getJobCounters(id)).thenReturn(null);
  assertNull(underTest.getCounters());
  //verification
  verify(mockClient).getJobCounters(id);
}
项目:hadoop-on-lustre    文件:TestTrackerDistributedCacheManager.java   
public void testRemoveTaskDistributedCacheManager() throws Exception {
  if (!canRun()) {
    return;
  }
  TrackerDistributedCacheManager manager = new TrackerDistributedCacheManager(
      conf, taskController);
  JobID jobId = new JobID("jobtracker", 1);
  manager.newTaskDistributedCacheManager(jobId, conf);

  TaskDistributedCacheManager taskDistributedCacheManager = manager
      .getTaskDistributedCacheManager(jobId);
  assertNotNull(taskDistributedCacheManager);

  manager.removeTaskDistributedCacheManager(jobId);

  taskDistributedCacheManager = manager.getTaskDistributedCacheManager(jobId);
  assertNull(taskDistributedCacheManager);
}
项目:hadoop-on-lustre    文件:MRCluster.java   
/**
  * Allow the job to continue through MR control job.
  * @param id of the job. 
  * @throws IOException when failed to get task info. 
  */
public void signalAllTasks(JobID id) throws IOException{
  TaskInfo[] taskInfos = getJTClient().getProxy().getTaskInfo(id);
  if(taskInfos !=null) {
    for (TaskInfo taskInfoRemaining : taskInfos) {
      if(taskInfoRemaining != null) {
        FinishTaskControlAction action = new FinishTaskControlAction(TaskID
            .downgrade(taskInfoRemaining.getTaskID()));
        Collection<TTClient> tts = getTTClients();
        for (TTClient cli : tts) {
          cli.getProxy().sendAction(action);
        }
      }
    }  
  }
}
项目:hadoop-on-lustre    文件:DataJoinJob.java   
/**
 * Submit/run a map/reduce job.
 * 
 * @param job
 * @return true for success
 * @throws IOException
 */
public static boolean runJob(JobConf job) throws IOException {
  JobClient jc = new JobClient(job);
  boolean sucess = true;
  RunningJob running = null;
  try {
    running = jc.submitJob(job);
    JobID jobId = running.getID();
    System.out.println("Job " + jobId + " is submitted");
    while (!running.isComplete()) {
      System.out.println("Job " + jobId + " is still running.");
      try {
        Thread.sleep(60000);
      } catch (InterruptedException e) {
      }
      running = jc.getJob(jobId);
    }
    sucess = running.isSuccessful();
  } finally {
    if (!sucess && (running != null)) {
      running.killJob();
    }
    jc.close();
  }
  return sucess;
}
项目:hardfs    文件:TestJobControl.java   
@SuppressWarnings("deprecation")
@Test(timeout = 30000)
public void testJobState() throws Exception {
  Job job_1 = getCopyJob();
  JobControl jc = new JobControl("Test");
  jc.addJob(job_1);
  Assert.assertEquals(Job.WAITING, job_1.getState());
  job_1.setState(Job.SUCCESS);
  Assert.assertEquals(Job.WAITING, job_1.getState());

  org.apache.hadoop.mapreduce.Job mockjob =
      mock(org.apache.hadoop.mapreduce.Job.class);
  org.apache.hadoop.mapreduce.JobID jid =
      new org.apache.hadoop.mapreduce.JobID("test", 0);
  when(mockjob.getJobID()).thenReturn(jid);
  job_1.setJob(mockjob);
  Assert.assertEquals("job_test_0000", job_1.getMapredJobID());
  job_1.setMapredJobID("job_test_0001");
  Assert.assertEquals("job_test_0000", job_1.getMapredJobID());
  jc.stop();
}
项目:hardfs    文件:DataJoinJob.java   
/**
 * Submit/run a map/reduce job.
 * 
 * @param job
 * @return true for success
 * @throws IOException
 */
public static boolean runJob(JobConf job) throws IOException {
  JobClient jc = new JobClient(job);
  boolean sucess = true;
  RunningJob running = null;
  try {
    running = jc.submitJob(job);
    JobID jobId = running.getID();
    System.out.println("Job " + jobId + " is submitted");
    while (!running.isComplete()) {
      System.out.println("Job " + jobId + " is still running.");
      try {
        Thread.sleep(60000);
      } catch (InterruptedException e) {
      }
      running = jc.getJob(jobId);
    }
    sucess = running.isSuccessful();
  } finally {
    if (!sucess && (running != null)) {
      running.killJob();
    }
    jc.close();
  }
  return sucess;
}
项目:hadoop-on-lustre2    文件:TestJobControl.java   
@SuppressWarnings("deprecation")
@Test(timeout = 30000)
public void testJobState() throws Exception {
  Job job_1 = getCopyJob();
  JobControl jc = new JobControl("Test");
  jc.addJob(job_1);
  Assert.assertEquals(Job.WAITING, job_1.getState());
  job_1.setState(Job.SUCCESS);
  Assert.assertEquals(Job.WAITING, job_1.getState());

  org.apache.hadoop.mapreduce.Job mockjob =
      mock(org.apache.hadoop.mapreduce.Job.class);
  org.apache.hadoop.mapreduce.JobID jid =
      new org.apache.hadoop.mapreduce.JobID("test", 0);
  when(mockjob.getJobID()).thenReturn(jid);
  job_1.setJob(mockjob);
  Assert.assertEquals("job_test_0000", job_1.getMapredJobID());
  job_1.setMapredJobID("job_test_0001");
  Assert.assertEquals("job_test_0000", job_1.getMapredJobID());
  jc.stop();
}
项目:hadoop-on-lustre2    文件:DataJoinJob.java   
/**
 * Submit/run a map/reduce job.
 * 
 * @param job
 * @return true for success
 * @throws IOException
 */
public static boolean runJob(JobConf job) throws IOException {
  JobClient jc = new JobClient(job);
  boolean sucess = true;
  RunningJob running = null;
  try {
    running = jc.submitJob(job);
    JobID jobId = running.getID();
    System.out.println("Job " + jobId + " is submitted");
    while (!running.isComplete()) {
      System.out.println("Job " + jobId + " is still running.");
      try {
        Thread.sleep(60000);
      } catch (InterruptedException e) {
      }
      running = jc.getJob(jobId);
    }
    sucess = running.isSuccessful();
  } finally {
    if (!sucess && (running != null)) {
      running.killJob();
    }
    jc.close();
  }
  return sucess;
}
项目:eoulsan    文件:HadoopJobEmergencyStopTask.java   
@Override
public void stop() {

  getLogger().info("Try to kill " + this.jobId + " Hadoop job");

  // Create configuration object
  final Configuration conf = createConfiguration();

  try {

    final JobClient client = new JobClient(conf);

    if (client != null) {

      final RunningJob job = client.getJob(JobID.forName(this.jobId));

      if (job != null) {
        job.killJob();
      }
    }
  } catch (IOException e) {
    getLogger().severe(e.getMessage());
  }

  getLogger().info("Hadoop job " + this.jobId + " killed");
}
项目:RDFS    文件:TestNoJobSetupCleanup.java   
private Job submitAndValidateJob(JobConf conf, int numMaps, int numReds)
    throws IOException, InterruptedException, ClassNotFoundException {
  conf.setJobSetupCleanupNeeded(false);
  Job job = MapReduceTestUtil.createJob(conf, inDir, outDir,
              numMaps, numReds);

  job.setOutputFormatClass(MyOutputFormat.class);
  job.waitForCompletion(true);
  assertTrue(job.isSuccessful());
  JobID jobid = (org.apache.hadoop.mapred.JobID)job.getID();

  JobClient jc = new JobClient(conf);
  assertTrue(jc.getSetupTaskReports(jobid).length == 0);
  assertTrue(jc.getCleanupTaskReports(jobid).length == 0);
  assertTrue(jc.getMapTaskReports(jobid).length == numMaps);
  assertTrue(jc.getReduceTaskReports(jobid).length == numReds);
  FileSystem fs = FileSystem.get(conf);
  assertTrue("Job output directory doesn't exit!", fs.exists(outDir));
  FileStatus[] list = fs.listStatus(outDir, new OutputFilter());
  int numPartFiles = numReds == 0 ? numMaps : numReds;
  assertTrue("Number of part-files is " + list.length + " and not "
      + numPartFiles, list.length == numPartFiles);
  return job;
}