Java 类org.apache.hadoop.mapred.RunningJob 实例源码

项目:ditb    文件:TestTableInputFormat.java   
void testInputFormat(Class<? extends InputFormat> clazz) throws IOException {
  final JobConf job = MapreduceTestingShim.getJobConf(mrCluster);
  job.setInputFormat(clazz);
  job.setOutputFormat(NullOutputFormat.class);
  job.setMapperClass(ExampleVerifier.class);
  job.setNumReduceTasks(0);
  LOG.debug("submitting job.");
  final RunningJob run = JobClient.runJob(job);
  assertTrue("job failed!", run.isSuccessful());
  assertEquals("Saw the wrong number of instances of the filtered-for row.", 2, run.getCounters()
      .findCounter(TestTableInputFormat.class.getName() + ":row", "aaa").getCounter());
  assertEquals("Saw any instances of the filtered out row.", 0, run.getCounters()
      .findCounter(TestTableInputFormat.class.getName() + ":row", "bbb").getCounter());
  assertEquals("Saw the wrong number of instances of columnA.", 1, run.getCounters()
      .findCounter(TestTableInputFormat.class.getName() + ":family", "columnA").getCounter());
  assertEquals("Saw the wrong number of instances of columnB.", 1, run.getCounters()
      .findCounter(TestTableInputFormat.class.getName() + ":family", "columnB").getCounter());
  assertEquals("Saw the wrong count of values for the filtered-for row.", 2, run.getCounters()
      .findCounter(TestTableInputFormat.class.getName() + ":value", "value aaa").getCounter());
  assertEquals("Saw the wrong count of values for the filtered-out row.", 0, run.getCounters()
      .findCounter(TestTableInputFormat.class.getName() + ":value", "value bbb").getCounter());
}
项目:hadoop    文件:TestMiniMRProxyUser.java   
private void mrRun() throws Exception {
  FileSystem fs = FileSystem.get(getJobConf());
  Path inputDir = new Path("input");
  fs.mkdirs(inputDir);
  Writer writer = new OutputStreamWriter(fs.create(new Path(inputDir, "data.txt")));
  writer.write("hello");
  writer.close();

  Path outputDir = new Path("output", "output");

  JobConf jobConf = new JobConf(getJobConf());
  jobConf.setInt("mapred.map.tasks", 1);
  jobConf.setInt("mapred.map.max.attempts", 1);
  jobConf.setInt("mapred.reduce.max.attempts", 1);
  jobConf.set("mapred.input.dir", inputDir.toString());
  jobConf.set("mapred.output.dir", outputDir.toString());

  JobClient jobClient = new JobClient(jobConf);
  RunningJob runJob = jobClient.submitJob(jobConf);
  runJob.waitForCompletion();
  assertTrue(runJob.isComplete());
  assertTrue(runJob.isSuccessful());
}
项目:hadoop    文件:DataJoinJob.java   
/**
 * Submit/run a map/reduce job.
 * 
 * @param job
 * @return true for success
 * @throws IOException
 */
public static boolean runJob(JobConf job) throws IOException {
  JobClient jc = new JobClient(job);
  boolean sucess = true;
  RunningJob running = null;
  try {
    running = jc.submitJob(job);
    JobID jobId = running.getID();
    System.out.println("Job " + jobId + " is submitted");
    while (!running.isComplete()) {
      System.out.println("Job " + jobId + " is still running.");
      try {
        Thread.sleep(60000);
      } catch (InterruptedException e) {
      }
      running = jc.getJob(jobId);
    }
    sucess = running.isSuccessful();
  } finally {
    if (!sucess && (running != null)) {
      running.killJob();
    }
    jc.close();
  }
  return sucess;
}
项目:ditb    文件:TestTableMapReduceUtil.java   
@Test
@SuppressWarnings("deprecation")
public void shoudBeValidMapReduceEvaluation() throws Exception {
  Configuration cfg = UTIL.getConfiguration();
  JobConf jobConf = new JobConf(cfg);
  try {
    jobConf.setJobName("process row task");
    jobConf.setNumReduceTasks(1);
    TableMapReduceUtil.initTableMapJob(TABLE_NAME, new String(COLUMN_FAMILY),
        ClassificatorMapper.class, ImmutableBytesWritable.class, Put.class,
        jobConf);
    TableMapReduceUtil.initTableReduceJob(TABLE_NAME,
        ClassificatorRowReduce.class, jobConf);
    RunningJob job = JobClient.runJob(jobConf);
    assertTrue(job.isSuccessful());
  } finally {
    if (jobConf != null)
      FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir")));
  }
}
项目:ditb    文件:TestTableMapReduceUtil.java   
@Test
@SuppressWarnings("deprecation")
public void shoudBeValidMapReduceWithPartitionerEvaluation()
    throws IOException {
  Configuration cfg = UTIL.getConfiguration();
  JobConf jobConf = new JobConf(cfg);
  try {
    jobConf.setJobName("process row task");
    jobConf.setNumReduceTasks(2);
    TableMapReduceUtil.initTableMapJob(TABLE_NAME, new String(COLUMN_FAMILY),
        ClassificatorMapper.class, ImmutableBytesWritable.class, Put.class,
        jobConf);

    TableMapReduceUtil.initTableReduceJob(TABLE_NAME,
        ClassificatorRowReduce.class, jobConf, HRegionPartitioner.class);
    RunningJob job = JobClient.runJob(jobConf);
    assertTrue(job.isSuccessful());
  } finally {
    if (jobConf != null)
      FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir")));
  }
}
项目:ditb    文件:TestMultiTableSnapshotInputFormat.java   
@Override
protected void runJob(String jobName, Configuration c, List<Scan> scans)
    throws IOException, InterruptedException, ClassNotFoundException {
  JobConf job = new JobConf(TEST_UTIL.getConfiguration());

  job.setJobName(jobName);
  job.setMapperClass(Mapper.class);
  job.setReducerClass(Reducer.class);

  TableMapReduceUtil.initMultiTableSnapshotMapperJob(getSnapshotScanMapping(scans), Mapper.class,
      ImmutableBytesWritable.class, ImmutableBytesWritable.class, job, true, restoreDir);

  TableMapReduceUtil.addDependencyJars(job);

  job.setReducerClass(Reducer.class);
  job.setNumReduceTasks(1); // one to get final "first" and "last" key
  FileOutputFormat.setOutputPath(job, new Path(job.getJobName()));
  LOG.info("Started " + job.getJobName());

  RunningJob runningJob = JobClient.runJob(job);
  runningJob.waitForCompletion();
  assertTrue(runningJob.isSuccessful());
  LOG.info("After map/reduce completion - job " + jobName);
}
项目:aliyun-oss-hadoop-fs    文件:TestMiniMRProxyUser.java   
private void mrRun() throws Exception {
  FileSystem fs = FileSystem.get(getJobConf());
  Path inputDir = new Path("input");
  fs.mkdirs(inputDir);
  Writer writer = new OutputStreamWriter(fs.create(new Path(inputDir, "data.txt")));
  writer.write("hello");
  writer.close();

  Path outputDir = new Path("output", "output");

  JobConf jobConf = new JobConf(getJobConf());
  jobConf.setInt("mapred.map.tasks", 1);
  jobConf.setInt("mapred.map.max.attempts", 1);
  jobConf.setInt("mapred.reduce.max.attempts", 1);
  jobConf.set("mapred.input.dir", inputDir.toString());
  jobConf.set("mapred.output.dir", outputDir.toString());

  JobClient jobClient = new JobClient(jobConf);
  RunningJob runJob = jobClient.submitJob(jobConf);
  runJob.waitForCompletion();
  assertTrue(runJob.isComplete());
  assertTrue(runJob.isSuccessful());
}
项目:aliyun-oss-hadoop-fs    文件:DataJoinJob.java   
/**
 * Submit/run a map/reduce job.
 * 
 * @param job
 * @return true for success
 * @throws IOException
 */
public static boolean runJob(JobConf job) throws IOException {
  JobClient jc = new JobClient(job);
  boolean sucess = true;
  RunningJob running = null;
  try {
    running = jc.submitJob(job);
    JobID jobId = running.getID();
    System.out.println("Job " + jobId + " is submitted");
    while (!running.isComplete()) {
      System.out.println("Job " + jobId + " is still running.");
      try {
        Thread.sleep(60000);
      } catch (InterruptedException e) {
      }
      running = jc.getJob(jobId);
    }
    sucess = running.isSuccessful();
  } finally {
    if (!sucess && (running != null)) {
      running.killJob();
    }
    jc.close();
  }
  return sucess;
}
项目:big-c    文件:TestMiniMRProxyUser.java   
private void mrRun() throws Exception {
  FileSystem fs = FileSystem.get(getJobConf());
  Path inputDir = new Path("input");
  fs.mkdirs(inputDir);
  Writer writer = new OutputStreamWriter(fs.create(new Path(inputDir, "data.txt")));
  writer.write("hello");
  writer.close();

  Path outputDir = new Path("output", "output");

  JobConf jobConf = new JobConf(getJobConf());
  jobConf.setInt("mapred.map.tasks", 1);
  jobConf.setInt("mapred.map.max.attempts", 1);
  jobConf.setInt("mapred.reduce.max.attempts", 1);
  jobConf.set("mapred.input.dir", inputDir.toString());
  jobConf.set("mapred.output.dir", outputDir.toString());

  JobClient jobClient = new JobClient(jobConf);
  RunningJob runJob = jobClient.submitJob(jobConf);
  runJob.waitForCompletion();
  assertTrue(runJob.isComplete());
  assertTrue(runJob.isSuccessful());
}
项目:big-c    文件:DataJoinJob.java   
/**
 * Submit/run a map/reduce job.
 * 
 * @param job
 * @return true for success
 * @throws IOException
 */
public static boolean runJob(JobConf job) throws IOException {
  JobClient jc = new JobClient(job);
  boolean sucess = true;
  RunningJob running = null;
  try {
    running = jc.submitJob(job);
    JobID jobId = running.getID();
    System.out.println("Job " + jobId + " is submitted");
    while (!running.isComplete()) {
      System.out.println("Job " + jobId + " is still running.");
      try {
        Thread.sleep(60000);
      } catch (InterruptedException e) {
      }
      running = jc.getJob(jobId);
    }
    sucess = running.isSuccessful();
  } finally {
    if (!sucess && (running != null)) {
      running.killJob();
    }
    jc.close();
  }
  return sucess;
}
项目:LCIndex-HBase-0.94.16    文件:TestTableMapReduce.java   
private void runTestOnTable(HTable table) throws IOException {
  JobConf jobConf = null;
  try {
    LOG.info("Before map/reduce startup");
    jobConf = new JobConf(UTIL.getConfiguration(), TestTableMapReduce.class);
    jobConf.setJobName("process column contents");
    jobConf.setNumReduceTasks(1);
    TableMapReduceUtil.initTableMapJob(Bytes.toString(table.getTableName()),
      Bytes.toString(INPUT_FAMILY), ProcessContentsMapper.class,
      ImmutableBytesWritable.class, Put.class, jobConf);
    TableMapReduceUtil.initTableReduceJob(Bytes.toString(table.getTableName()),
      IdentityTableReduce.class, jobConf);

    LOG.info("Started " + Bytes.toString(table.getTableName()));
    RunningJob job = JobClient.runJob(jobConf);
    assertTrue(job.isSuccessful());
    LOG.info("After map/reduce completion");

    // verify map-reduce results
    verify(Bytes.toString(table.getTableName()));
  } finally {
    if (jobConf != null) {
      FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir")));
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestMiniMRProxyUser.java   
private void mrRun() throws Exception {
  FileSystem fs = FileSystem.get(getJobConf());
  Path inputDir = new Path("input");
  fs.mkdirs(inputDir);
  Writer writer = new OutputStreamWriter(fs.create(new Path(inputDir, "data.txt")));
  writer.write("hello");
  writer.close();

  Path outputDir = new Path("output", "output");

  JobConf jobConf = new JobConf(getJobConf());
  jobConf.setInt("mapred.map.tasks", 1);
  jobConf.setInt("mapred.map.max.attempts", 1);
  jobConf.setInt("mapred.reduce.max.attempts", 1);
  jobConf.set("mapred.input.dir", inputDir.toString());
  jobConf.set("mapred.output.dir", outputDir.toString());

  JobClient jobClient = new JobClient(jobConf);
  RunningJob runJob = jobClient.submitJob(jobConf);
  runJob.waitForCompletion();
  assertTrue(runJob.isComplete());
  assertTrue(runJob.isSuccessful());
}
项目:hops    文件:DataJoinJob.java   
/**
 * Submit/run a map/reduce job.
 * 
 * @param job
 * @return true for success
 * @throws IOException
 */
public static boolean runJob(JobConf job) throws IOException {
  JobClient jc = new JobClient(job);
  boolean sucess = true;
  RunningJob running = null;
  try {
    running = jc.submitJob(job);
    JobID jobId = running.getID();
    System.out.println("Job " + jobId + " is submitted");
    while (!running.isComplete()) {
      System.out.println("Job " + jobId + " is still running.");
      try {
        Thread.sleep(60000);
      } catch (InterruptedException e) {
      }
      running = jc.getJob(jobId);
    }
    sucess = running.isSuccessful();
  } finally {
    if (!sucess && (running != null)) {
      running.killJob();
    }
    jc.close();
  }
  return sucess;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DataJoinJob.java   
/**
 * Submit/run a map/reduce job.
 * 
 * @param job
 * @return true for success
 * @throws IOException
 */
public static boolean runJob(JobConf job) throws IOException {
  JobClient jc = new JobClient(job);
  boolean sucess = true;
  RunningJob running = null;
  try {
    running = jc.submitJob(job);
    JobID jobId = running.getID();
    System.out.println("Job " + jobId + " is submitted");
    while (!running.isComplete()) {
      System.out.println("Job " + jobId + " is still running.");
      try {
        Thread.sleep(60000);
      } catch (InterruptedException e) {
      }
      running = jc.getJob(jobId);
    }
    sucess = running.isSuccessful();
  } finally {
    if (!sucess && (running != null)) {
      running.killJob();
    }
    jc.close();
  }
  return sucess;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestJobACLs.java   
/**
 * Verify JobContext.JOB_ACL_VIEW_JOB
 * 
 * @throws IOException
 * @throws InterruptedException
 */
private void verifyACLViewJob() throws IOException, InterruptedException {

  // Set the job up.
  final JobConf myConf = mr.createJobConf();
  myConf.set(JobContext.JOB_ACL_VIEW_JOB, viewColleague);

  // Submit the job as user1
  RunningJob job = submitJobAsUser(myConf, jobSubmitter);

  final JobID jobId = job.getID();

  // Try operations as an unauthorized user.
  verifyViewJobAsUnauthorizedUser(myConf, jobId, modifyColleague);

  // Try operations as an authorized user, who is part of view-job-acl.
  verifyViewJobAsAuthorizedUser(myConf, jobId, viewColleague);

  // Try operations as an authorized user, who is a queue administrator.
  verifyViewJobAsAuthorizedUser(myConf, jobId, qAdmin);

  // Clean up the job
  job.killJob();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestJobACLs.java   
/**
 * Submits a sleep job with 1 map task that runs for a long time(2000 sec)
 * @param clusterConf
 * @param user the jobOwner
 * @return RunningJob that is started
 * @throws IOException
 * @throws InterruptedException
 */
private RunningJob submitJobAsUser(final JobConf clusterConf, String user)
    throws IOException, InterruptedException {
  UserGroupInformation ugi =
      UserGroupInformation.createUserForTesting(user, new String[] {});
  RunningJob job = (RunningJob) ugi.doAs(new PrivilegedExceptionAction<Object>() {
    @Override
    public Object run() throws Exception {
      JobClient jobClient = new JobClient(clusterConf);
      SleepJob sleepJob = new SleepJob();
      sleepJob.setConf(clusterConf);
      JobConf jobConf = sleepJob.setupJobConf(1, 0, 2000, 1000, 1000, 1000);
      RunningJob runningJob = jobClient.submitJob(jobConf);
      return runningJob;
    }
  });
  return job;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DataJoinJob.java   
/**
 * Submit/run a map/reduce job.
 * 
 * @param job
 * @return true for success
 * @throws IOException
 */
public static boolean runJob(JobConf job) throws IOException {
  JobClient jc = new JobClient(job);
  boolean sucess = true;
  RunningJob running = null;
  try {
    running = jc.submitJob(job);
    JobID jobId = running.getID();
    System.out.println("Job " + jobId + " is submitted");
    while (!running.isComplete()) {
      System.out.println("Job " + jobId + " is still running.");
      try {
        Thread.sleep(60000);
      } catch (InterruptedException e) {
      }
      running = jc.getJob(jobId);
    }
    sucess = running.isSuccessful();
  } finally {
    if (!sucess && (running != null)) {
      running.killJob();
    }
    jc.close();
  }
  return sucess;
}
项目:compiler    文件:BoaOutputCommitter.java   
@Override
public void abortJob(JobContext context, JobStatus.State runState) throws java.io.IOException {
    super.abortJob(context, runState);

    final JobClient jobClient = new JobClient(new JobConf(context.getConfiguration()));
    final RunningJob job = jobClient.getJob((org.apache.hadoop.mapred.JobID) JobID.forName(context.getConfiguration().get("mapred.job.id")));
    String diag = "";
    for (final TaskCompletionEvent event : job.getTaskCompletionEvents(0))
        switch (event.getTaskStatus()) {
            case SUCCEEDED:
                break;
               default:
                diag += "Diagnostics for: " + event.getTaskTrackerHttp() + "\n";
                for (final String s : job.getTaskDiagnostics(event.getTaskAttemptId()))
                    diag += s + "\n";
                diag += "\n";
                break;
        }
    updateStatus(diag, context.getConfiguration().getInt("boa.hadoop.jobid", 0));
}
项目:hadoop-EAR    文件:DataJoinJob.java   
/**
 * Submit/run a map/reduce job.
 * 
 * @param job
 * @return true for success
 * @throws IOException
 */
public static boolean runJob(JobConf job) throws IOException {
  JobClient jc = new JobClient(job);
  boolean sucess = true;
  RunningJob running = null;
  try {
    running = jc.submitJob(job);
    JobID jobId = running.getID();
    System.out.println("Job " + jobId + " is submitted");
    while (!running.isComplete()) {
      System.out.println("Job " + jobId + " is still running.");
      try {
        Thread.sleep(60000);
      } catch (InterruptedException e) {
      }
      running = jc.getJob(jobId);
    }
    sucess = running.isSuccessful();
  } finally {
    if (!sucess && (running != null)) {
      running.killJob();
    }
    jc.close();
  }
  return sucess;
}
项目:hops    文件:TestMiniMRProxyUser.java   
private void mrRun() throws Exception {
  FileSystem fs = FileSystem.get(getJobConf());
  Path inputDir = new Path("input");
  fs.mkdirs(inputDir);
  Writer writer = new OutputStreamWriter(fs.create(new Path(inputDir, "data.txt")));
  writer.write("hello");
  writer.close();

  Path outputDir = new Path("output", "output");

  JobConf jobConf = new JobConf(getJobConf());
  jobConf.setInt("mapred.map.tasks", 1);
  jobConf.setInt("mapred.map.max.attempts", 1);
  jobConf.setInt("mapred.reduce.max.attempts", 1);
  jobConf.set("mapred.input.dir", inputDir.toString());
  jobConf.set("mapred.output.dir", outputDir.toString());

  JobClient jobClient = new JobClient(jobConf);
  RunningJob runJob = jobClient.submitJob(jobConf);
  runJob.waitForCompletion();
  assertTrue(runJob.isComplete());
  assertTrue(runJob.isSuccessful());
}
项目:hadoop-plus    文件:TestMiniMRProxyUser.java   
private void mrRun() throws Exception {
  FileSystem fs = FileSystem.get(getJobConf());
  Path inputDir = new Path("input");
  fs.mkdirs(inputDir);
  Writer writer = new OutputStreamWriter(fs.create(new Path(inputDir, "data.txt")));
  writer.write("hello");
  writer.close();

  Path outputDir = new Path("output", "output");

  JobConf jobConf = new JobConf(getJobConf());
  jobConf.setInt("mapred.map.tasks", 1);
  jobConf.setInt("mapred.map.max.attempts", 1);
  jobConf.setInt("mapred.reduce.max.attempts", 1);
  jobConf.set("mapred.input.dir", inputDir.toString());
  jobConf.set("mapred.output.dir", outputDir.toString());

  JobClient jobClient = new JobClient(jobConf);
  RunningJob runJob = jobClient.submitJob(jobConf);
  runJob.waitForCompletion();
  assertTrue(runJob.isComplete());
  assertTrue(runJob.isSuccessful());
}
项目:hadoop-plus    文件:DataJoinJob.java   
/**
 * Submit/run a map/reduce job.
 * 
 * @param job
 * @return true for success
 * @throws IOException
 */
public static boolean runJob(JobConf job) throws IOException {
  JobClient jc = new JobClient(job);
  boolean sucess = true;
  RunningJob running = null;
  try {
    running = jc.submitJob(job);
    JobID jobId = running.getID();
    System.out.println("Job " + jobId + " is submitted");
    while (!running.isComplete()) {
      System.out.println("Job " + jobId + " is still running.");
      try {
        Thread.sleep(60000);
      } catch (InterruptedException e) {
      }
      running = jc.getJob(jobId);
    }
    sucess = running.isSuccessful();
  } finally {
    if (!sucess && (running != null)) {
      running.killJob();
    }
    jc.close();
  }
  return sucess;
}
项目:pbase    文件:TestTableMapReduceUtil.java   
@Test
@SuppressWarnings("deprecation")
public void shoudBeValidMapReduceEvaluation() throws Exception {
  Configuration cfg = UTIL.getConfiguration();
  JobConf jobConf = new JobConf(cfg);
  try {
    jobConf.setJobName("process row task");
    jobConf.setNumReduceTasks(1);
    TableMapReduceUtil.initTableMapJob(TABLE_NAME, new String(COLUMN_FAMILY),
        ClassificatorMapper.class, ImmutableBytesWritable.class, Put.class,
        jobConf);
    TableMapReduceUtil.initTableReduceJob(TABLE_NAME,
        ClassificatorRowReduce.class, jobConf);
    RunningJob job = JobClient.runJob(jobConf);
    assertTrue(job.isSuccessful());
  } finally {
    if (jobConf != null)
      FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir")));
  }
}
项目:pbase    文件:TestTableMapReduceUtil.java   
@Test
@SuppressWarnings("deprecation")
public void shoudBeValidMapReduceWithPartitionerEvaluation()
    throws IOException {
  Configuration cfg = UTIL.getConfiguration();
  JobConf jobConf = new JobConf(cfg);
  try {
    jobConf.setJobName("process row task");
    jobConf.setNumReduceTasks(2);
    TableMapReduceUtil.initTableMapJob(TABLE_NAME, new String(COLUMN_FAMILY),
        ClassificatorMapper.class, ImmutableBytesWritable.class, Put.class,
        jobConf);

    TableMapReduceUtil.initTableReduceJob(TABLE_NAME,
        ClassificatorRowReduce.class, jobConf, HRegionPartitioner.class);
    RunningJob job = JobClient.runJob(jobConf);
    assertTrue(job.isSuccessful());
  } finally {
    if (jobConf != null)
      FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir")));
  }
}
项目:pbase    文件:TestTableInputFormat.java   
void testInputFormat(Class<? extends InputFormat> clazz) throws IOException {
  final JobConf job = MapreduceTestingShim.getJobConf(mrCluster);
  job.setInputFormat(clazz);
  job.setOutputFormat(NullOutputFormat.class);
  job.setMapperClass(ExampleVerifier.class);
  job.setNumReduceTasks(0);
  LOG.debug("submitting job.");
  final RunningJob run = JobClient.runJob(job);
  assertTrue("job failed!", run.isSuccessful());
  assertEquals("Saw the wrong number of instances of the filtered-for row.", 2, run.getCounters()
      .findCounter(TestTableInputFormat.class.getName() + ":row", "aaa").getCounter());
  assertEquals("Saw any instances of the filtered out row.", 0, run.getCounters()
      .findCounter(TestTableInputFormat.class.getName() + ":row", "bbb").getCounter());
  assertEquals("Saw the wrong number of instances of columnA.", 1, run.getCounters()
      .findCounter(TestTableInputFormat.class.getName() + ":family", "columnA").getCounter());
  assertEquals("Saw the wrong number of instances of columnB.", 1, run.getCounters()
      .findCounter(TestTableInputFormat.class.getName() + ":family", "columnB").getCounter());
  assertEquals("Saw the wrong count of values for the filtered-for row.", 2, run.getCounters()
      .findCounter(TestTableInputFormat.class.getName() + ":value", "value aaa").getCounter());
  assertEquals("Saw the wrong count of values for the filtered-out row.", 0, run.getCounters()
      .findCounter(TestTableInputFormat.class.getName() + ":value", "value bbb").getCounter());
}
项目:HIndex    文件:TestTableMapReduceUtil.java   
@Test
@SuppressWarnings("deprecation")
public void shoudBeValidMapReduceEvaluation() throws Exception {
  Configuration cfg = UTIL.getConfiguration();
  JobConf jobConf = new JobConf(cfg);
  try {
    jobConf.setJobName("process row task");
    jobConf.setNumReduceTasks(1);
    TableMapReduceUtil.initTableMapJob(TABLE_NAME, new String(COLUMN_FAMILY),
        ClassificatorMapper.class, ImmutableBytesWritable.class, Put.class,
        jobConf);
    TableMapReduceUtil.initTableReduceJob(TABLE_NAME,
        ClassificatorRowReduce.class, jobConf);
    RunningJob job = JobClient.runJob(jobConf);
    assertTrue(job.isSuccessful());
  } finally {
    if (jobConf != null)
      FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir")));
  }
}
项目:HIndex    文件:TestTableMapReduceUtil.java   
@Test
@SuppressWarnings("deprecation")
public void shoudBeValidMapReduceWithPartitionerEvaluation()
    throws IOException {
  Configuration cfg = UTIL.getConfiguration();
  JobConf jobConf = new JobConf(cfg);
  try {
    jobConf.setJobName("process row task");
    jobConf.setNumReduceTasks(2);
    TableMapReduceUtil.initTableMapJob(TABLE_NAME, new String(COLUMN_FAMILY),
        ClassificatorMapper.class, ImmutableBytesWritable.class, Put.class,
        jobConf);

    TableMapReduceUtil.initTableReduceJob(TABLE_NAME,
        ClassificatorRowReduce.class, jobConf, HRegionPartitioner.class);
    RunningJob job = JobClient.runJob(jobConf);
    assertTrue(job.isSuccessful());
  } finally {
    if (jobConf != null)
      FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir")));
  }
}
项目:hops    文件:MapReduceBlockRepairManager.java   
private void initialize() {
  if (initialized) {
    return;
  }

  try {
    JobClient jobClient = new JobClient(getConf());
    for (RepairJob job : recoverActiveJobs()) {
      org.apache.hadoop.mapred.JobID jobId =
          new org.apache.hadoop.mapred.JobID(job.getJtIdentifier(),
              job.getJobId());
      RunningJob runningJob = jobClient.getJob(jobId);
      if (runningJob == null) {
        throw new IOException("Failed to recover");
      }
      ActiveRepair recovered = new ActiveRepair(jobId, runningJob,
          new Path(job.getInDir()), new Path(job.getOutDir()));
      currentRepairs.put(job.getPath(), recovered);
    }
  } catch (IOException e) {
    LOG.error("Encoding job recovery failed", e);
    throw new RuntimeException(e);
  }

  initialized = true;
}
项目:hadoop    文件:TestKeyFieldBasedComparator.java   
public void configure(String keySpec, int expect) throws Exception {
  Path testdir = new Path(TEST_DIR.getAbsolutePath());
  Path inDir = new Path(testdir, "in");
  Path outDir = new Path(testdir, "out");
  FileSystem fs = getFileSystem();
  fs.delete(testdir, true);
  conf.setInputFormat(TextInputFormat.class);
  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setOutputKeyClass(Text.class);
  conf.setOutputValueClass(LongWritable.class);

  conf.setNumMapTasks(1);
  conf.setNumReduceTasks(1);

  conf.setOutputFormat(TextOutputFormat.class);
  conf.setOutputKeyComparatorClass(KeyFieldBasedComparator.class);
  conf.setKeyFieldComparatorOptions(keySpec);
  conf.setKeyFieldPartitionerOptions("-k1.1,1.1");
  conf.set(JobContext.MAP_OUTPUT_KEY_FIELD_SEPERATOR, " ");
  conf.setMapperClass(InverseMapper.class);
  conf.setReducerClass(IdentityReducer.class);
  if (!fs.mkdirs(testdir)) {
    throw new IOException("Mkdirs failed to create " + testdir.toString());
  }
  if (!fs.mkdirs(inDir)) {
    throw new IOException("Mkdirs failed to create " + inDir.toString());
  }
  // set up input data in 2 files 
  Path inFile = new Path(inDir, "part0");
  FileOutputStream fos = new FileOutputStream(inFile.toString());
  fos.write((line1 + "\n").getBytes());
  fos.write((line2 + "\n").getBytes());
  fos.close();
  JobClient jc = new JobClient(conf);
  RunningJob r_job = jc.submitJob(conf);
  while (!r_job.isComplete()) {
    Thread.sleep(1000);
  }

  if (!r_job.isSuccessful()) {
    fail("Oops! The job broke due to an unexpected error");
  }
  Path[] outputFiles = FileUtil.stat2Paths(
      getFileSystem().listStatus(outDir,
      new Utils.OutputFileUtils.OutputFilesFilter()));
  if (outputFiles.length > 0) {
    InputStream is = getFileSystem().open(outputFiles[0]);
    BufferedReader reader = new BufferedReader(new InputStreamReader(is));
    String line = reader.readLine();
    //make sure we get what we expect as the first line, and also
    //that we have two lines
    if (expect == 1) {
      assertTrue(line.startsWith(line1));
    } else if (expect == 2) {
      assertTrue(line.startsWith(line2));
    }
    line = reader.readLine();
    if (expect == 1) {
      assertTrue(line.startsWith(line2));
    } else if (expect == 2) {
      assertTrue(line.startsWith(line1));
    }
    reader.close();
  }
}
项目:hadoop    文件:TestEncryptedShuffle.java   
private void encryptedShuffleWithCerts(boolean useClientCerts)
  throws Exception {
  try {
    Configuration conf = new Configuration();
    String keystoresDir = new File(BASEDIR).getAbsolutePath();
    String sslConfsDir =
      KeyStoreTestUtil.getClasspathDir(TestEncryptedShuffle.class);
    KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfsDir, conf,
                                    useClientCerts);
    conf.setBoolean(MRConfig.SHUFFLE_SSL_ENABLED_KEY, true);
    startCluster(conf);
    FileSystem fs = FileSystem.get(getJobConf());
    Path inputDir = new Path("input");
    fs.mkdirs(inputDir);
    Writer writer =
      new OutputStreamWriter(fs.create(new Path(inputDir, "data.txt")));
    writer.write("hello");
    writer.close();

    Path outputDir = new Path("output", "output");

    JobConf jobConf = new JobConf(getJobConf());
    jobConf.setInt("mapred.map.tasks", 1);
    jobConf.setInt("mapred.map.max.attempts", 1);
    jobConf.setInt("mapred.reduce.max.attempts", 1);
    jobConf.set("mapred.input.dir", inputDir.toString());
    jobConf.set("mapred.output.dir", outputDir.toString());
    JobClient jobClient = new JobClient(jobConf);
    RunningJob runJob = jobClient.submitJob(jobConf);
    runJob.waitForCompletion();
    Assert.assertTrue(runJob.isComplete());
    Assert.assertTrue(runJob.isSuccessful());
  } finally {
    stopCluster();
  }
}
项目:hadoop    文件:TestMROldApiJobs.java   
static boolean runJob(JobConf conf, Path inDir, Path outDir, int numMaps, 
                         int numReds) throws IOException, InterruptedException {

  FileSystem fs = FileSystem.get(conf);
  if (fs.exists(outDir)) {
    fs.delete(outDir, true);
  }
  if (!fs.exists(inDir)) {
    fs.mkdirs(inDir);
  }
  String input = "The quick brown fox\n" + "has many silly\n"
      + "red fox sox\n";
  for (int i = 0; i < numMaps; ++i) {
    DataOutputStream file = fs.create(new Path(inDir, "part-" + i));
    file.writeBytes(input);
    file.close();
  }

  DistributedCache.addFileToClassPath(TestMRJobs.APP_JAR, conf, fs);
  conf.setOutputCommitter(CustomOutputCommitter.class);
  conf.setInputFormat(TextInputFormat.class);
  conf.setOutputKeyClass(LongWritable.class);
  conf.setOutputValueClass(Text.class);

  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setNumMapTasks(numMaps);
  conf.setNumReduceTasks(numReds);

  JobClient jobClient = new JobClient(conf);

  RunningJob job = jobClient.submitJob(conf);
  return jobClient.monitorAndPrintJob(conf, job);
}
项目:ditb    文件:TestTableSnapshotInputFormat.java   
public static void doTestWithMapReduce(HBaseTestingUtility util, TableName tableName,
    String snapshotName, byte[] startRow, byte[] endRow, Path tableDir, int numRegions,
    int expectedNumSplits, boolean shutdownCluster) throws Exception {

  //create the table and snapshot
  createTableAndSnapshot(util, tableName, snapshotName, startRow, endRow, numRegions);

  if (shutdownCluster) {
    util.shutdownMiniHBaseCluster();
  }

  try {
    // create the job
    JobConf jobConf = new JobConf(util.getConfiguration());

    jobConf.setJarByClass(util.getClass());
    org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJars(jobConf,
      TestTableSnapshotInputFormat.class);

    TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, COLUMNS,
      TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
      NullWritable.class, jobConf, true, tableDir);

    jobConf.setReducerClass(TestTableSnapshotInputFormat.TestTableSnapshotReducer.class);
    jobConf.setNumReduceTasks(1);
    jobConf.setOutputFormat(NullOutputFormat.class);

    RunningJob job = JobClient.runJob(jobConf);
    Assert.assertTrue(job.isSuccessful());
  } finally {
    if (!shutdownCluster) {
      util.getHBaseAdmin().deleteSnapshot(snapshotName);
      util.deleteTable(tableName);
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestEncryptedShuffle.java   
private void encryptedShuffleWithCerts(boolean useClientCerts)
  throws Exception {
  try {
    Configuration conf = new Configuration();
    String keystoresDir = new File(BASEDIR).getAbsolutePath();
    String sslConfsDir =
      KeyStoreTestUtil.getClasspathDir(TestEncryptedShuffle.class);
    KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfsDir, conf,
                                    useClientCerts);
    conf.setBoolean(MRConfig.SHUFFLE_SSL_ENABLED_KEY, true);
    startCluster(conf);
    FileSystem fs = FileSystem.get(getJobConf());
    Path inputDir = new Path("input");
    fs.mkdirs(inputDir);
    Writer writer =
      new OutputStreamWriter(fs.create(new Path(inputDir, "data.txt")));
    writer.write("hello");
    writer.close();

    Path outputDir = new Path("output", "output");

    JobConf jobConf = new JobConf(getJobConf());
    jobConf.setInt("mapred.map.tasks", 1);
    jobConf.setInt("mapred.map.max.attempts", 1);
    jobConf.setInt("mapred.reduce.max.attempts", 1);
    jobConf.set("mapred.input.dir", inputDir.toString());
    jobConf.set("mapred.output.dir", outputDir.toString());
    JobClient jobClient = new JobClient(jobConf);
    RunningJob runJob = jobClient.submitJob(jobConf);
    runJob.waitForCompletion();
    Assert.assertTrue(runJob.isComplete());
    Assert.assertTrue(runJob.isSuccessful());
  } finally {
    stopCluster();
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestMROldApiJobs.java   
static boolean runJob(JobConf conf, Path inDir, Path outDir, int numMaps, 
                         int numReds) throws IOException, InterruptedException {

  FileSystem fs = FileSystem.get(conf);
  if (fs.exists(outDir)) {
    fs.delete(outDir, true);
  }
  if (!fs.exists(inDir)) {
    fs.mkdirs(inDir);
  }
  String input = "The quick brown fox\n" + "has many silly\n"
      + "red fox sox\n";
  for (int i = 0; i < numMaps; ++i) {
    DataOutputStream file = fs.create(new Path(inDir, "part-" + i));
    file.writeBytes(input);
    file.close();
  }

  DistributedCache.addFileToClassPath(TestMRJobs.APP_JAR, conf, fs);
  conf.setOutputCommitter(CustomOutputCommitter.class);
  conf.setInputFormat(TextInputFormat.class);
  conf.setOutputKeyClass(LongWritable.class);
  conf.setOutputValueClass(Text.class);

  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setNumMapTasks(numMaps);
  conf.setNumReduceTasks(numReds);

  JobClient jobClient = new JobClient(conf);

  RunningJob job = jobClient.submitJob(conf);
  return jobClient.monitorAndPrintJob(conf, job);
}
项目:aliyun-oss-hadoop-fs    文件:OldAPICombinerTest.java   
@Test
public void testWordCountCombinerWithOldAPI() throws Exception {
  final Configuration nativeConf = ScenarioConfiguration.getNativeConfiguration();
  nativeConf.addResource(TestConstants.COMBINER_CONF_PATH);
  final String nativeoutput = TestConstants.NATIVETASK_OLDAPI_COMBINER_TEST_NATIVE_OUTPUTPATH;
  final JobConf nativeJob = getOldAPIJobconf(nativeConf, "nativeCombinerWithOldAPI",
                                             inputpath, nativeoutput);
  RunningJob nativeRunning = JobClient.runJob(nativeJob);

  Counter nativeReduceGroups = nativeRunning.getCounters().findCounter(
    TaskCounter.REDUCE_INPUT_RECORDS);

  final Configuration normalConf = ScenarioConfiguration.getNormalConfiguration();
  normalConf.addResource(TestConstants.COMBINER_CONF_PATH);
  final String normaloutput = TestConstants.NATIVETASK_OLDAPI_COMBINER_TEST_NORMAL_OUTPUTPATH;
  final JobConf normalJob = getOldAPIJobconf(normalConf, "normalCombinerWithOldAPI",
                                             inputpath, normaloutput);

  RunningJob normalRunning = JobClient.runJob(normalJob);
  Counter normalReduceGroups = normalRunning.getCounters().findCounter(
    TaskCounter.REDUCE_INPUT_RECORDS);

  final boolean compareRet = ResultVerifier.verify(nativeoutput, normaloutput);
  assertEquals("file compare result: if they are the same ,then return true", true, compareRet);

  assertEquals("The input reduce record count must be same",
               nativeReduceGroups.getValue(), normalReduceGroups.getValue());
}
项目:big-c    文件:TestEncryptedShuffle.java   
private void encryptedShuffleWithCerts(boolean useClientCerts)
  throws Exception {
  try {
    Configuration conf = new Configuration();
    String keystoresDir = new File(BASEDIR).getAbsolutePath();
    String sslConfsDir =
      KeyStoreTestUtil.getClasspathDir(TestEncryptedShuffle.class);
    KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfsDir, conf,
                                    useClientCerts);
    conf.setBoolean(MRConfig.SHUFFLE_SSL_ENABLED_KEY, true);
    startCluster(conf);
    FileSystem fs = FileSystem.get(getJobConf());
    Path inputDir = new Path("input");
    fs.mkdirs(inputDir);
    Writer writer =
      new OutputStreamWriter(fs.create(new Path(inputDir, "data.txt")));
    writer.write("hello");
    writer.close();

    Path outputDir = new Path("output", "output");

    JobConf jobConf = new JobConf(getJobConf());
    jobConf.setInt("mapred.map.tasks", 1);
    jobConf.setInt("mapred.map.max.attempts", 1);
    jobConf.setInt("mapred.reduce.max.attempts", 1);
    jobConf.set("mapred.input.dir", inputDir.toString());
    jobConf.set("mapred.output.dir", outputDir.toString());
    JobClient jobClient = new JobClient(jobConf);
    RunningJob runJob = jobClient.submitJob(jobConf);
    runJob.waitForCompletion();
    Assert.assertTrue(runJob.isComplete());
    Assert.assertTrue(runJob.isSuccessful());
  } finally {
    stopCluster();
  }
}
项目:big-c    文件:TestMROldApiJobs.java   
static boolean runJob(JobConf conf, Path inDir, Path outDir, int numMaps, 
                         int numReds) throws IOException, InterruptedException {

  FileSystem fs = FileSystem.get(conf);
  if (fs.exists(outDir)) {
    fs.delete(outDir, true);
  }
  if (!fs.exists(inDir)) {
    fs.mkdirs(inDir);
  }
  String input = "The quick brown fox\n" + "has many silly\n"
      + "red fox sox\n";
  for (int i = 0; i < numMaps; ++i) {
    DataOutputStream file = fs.create(new Path(inDir, "part-" + i));
    file.writeBytes(input);
    file.close();
  }

  DistributedCache.addFileToClassPath(TestMRJobs.APP_JAR, conf, fs);
  conf.setOutputCommitter(CustomOutputCommitter.class);
  conf.setInputFormat(TextInputFormat.class);
  conf.setOutputKeyClass(LongWritable.class);
  conf.setOutputValueClass(Text.class);

  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setNumMapTasks(numMaps);
  conf.setNumReduceTasks(numReds);

  JobClient jobClient = new JobClient(conf);

  RunningJob job = jobClient.submitJob(conf);
  return jobClient.monitorAndPrintJob(conf, job);
}
项目:tracing-framework    文件:JobRunner.java   
public String status() {
    RunningJob rj = this.rj;
    if (rj != null)
        return rj.toString();
    else
        return status;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestEncryptedShuffle.java   
private void encryptedShuffleWithCerts(boolean useClientCerts)
  throws Exception {
  try {
    Configuration conf = new Configuration();
    String keystoresDir = new File(BASEDIR).getAbsolutePath();
    String sslConfsDir =
      KeyStoreTestUtil.getClasspathDir(TestEncryptedShuffle.class);
    KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfsDir, conf,
                                    useClientCerts);
    conf.setBoolean(MRConfig.SHUFFLE_SSL_ENABLED_KEY, true);
    startCluster(conf);
    FileSystem fs = FileSystem.get(getJobConf());
    Path inputDir = new Path("input");
    fs.mkdirs(inputDir);
    Writer writer =
      new OutputStreamWriter(fs.create(new Path(inputDir, "data.txt")));
    writer.write("hello");
    writer.close();

    Path outputDir = new Path("output", "output");

    JobConf jobConf = new JobConf(getJobConf());
    jobConf.setInt("mapred.map.tasks", 1);
    jobConf.setInt("mapred.map.max.attempts", 1);
    jobConf.setInt("mapred.reduce.max.attempts", 1);
    jobConf.set("mapred.input.dir", inputDir.toString());
    jobConf.set("mapred.output.dir", outputDir.toString());
    JobClient jobClient = new JobClient(jobConf);
    RunningJob runJob = jobClient.submitJob(jobConf);
    runJob.waitForCompletion();
    Assert.assertTrue(runJob.isComplete());
    Assert.assertTrue(runJob.isSuccessful());
  } finally {
    stopCluster();
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:OldAPICombinerTest.java   
@Test
public void testWordCountCombinerWithOldAPI() throws Exception {
  final Configuration nativeConf = ScenarioConfiguration.getNativeConfiguration();
  nativeConf.addResource(TestConstants.COMBINER_CONF_PATH);
  final String nativeoutput = TestConstants.NATIVETASK_OLDAPI_COMBINER_TEST_NATIVE_OUTPUTPATH;
  final JobConf nativeJob = getOldAPIJobconf(nativeConf, "nativeCombinerWithOldAPI",
                                             inputpath, nativeoutput);
  RunningJob nativeRunning = JobClient.runJob(nativeJob);

  Counter nativeReduceGroups = nativeRunning.getCounters().findCounter(
    TaskCounter.REDUCE_INPUT_RECORDS);

  final Configuration normalConf = ScenarioConfiguration.getNormalConfiguration();
  normalConf.addResource(TestConstants.COMBINER_CONF_PATH);
  final String normaloutput = TestConstants.NATIVETASK_OLDAPI_COMBINER_TEST_NORMAL_OUTPUTPATH;
  final JobConf normalJob = getOldAPIJobconf(normalConf, "normalCombinerWithOldAPI",
                                             inputpath, normaloutput);

  RunningJob normalRunning = JobClient.runJob(normalJob);
  Counter normalReduceGroups = normalRunning.getCounters().findCounter(
    TaskCounter.REDUCE_INPUT_RECORDS);

  final boolean compareRet = ResultVerifier.verify(nativeoutput, normaloutput);
  assertEquals("file compare result: if they are the same ,then return true", true, compareRet);

  assertEquals("The input reduce record count must be same",
               nativeReduceGroups.getValue(), normalReduceGroups.getValue());
}