Java 类org.apache.hadoop.mapred.TestMapCollection.FakeIF 实例源码

项目:hadoop-2.6.0-cdh5.4.3    文件:TestReduceFetch.java   
public static Counters runJob(JobConf conf) throws Exception {
  conf.setMapperClass(MapMB.class);
  conf.setReducerClass(IdentityReducer.class);
  conf.setOutputKeyClass(Text.class);
  conf.setOutputValueClass(Text.class);
  conf.setNumReduceTasks(1);
  conf.setInputFormat(FakeIF.class);
  FileInputFormat.setInputPaths(conf, new Path("/in"));
  final Path outp = new Path("/out");
  FileOutputFormat.setOutputPath(conf, outp);
  RunningJob job = null;
  try {
    job = JobClient.runJob(conf);
    assertTrue(job.isSuccessful());
  } finally {
    FileSystem fs = dfsCluster.getFileSystem();
    if (fs.exists(outp)) {
      fs.delete(outp, true);
    }
  }
  return job.getCounters();
}
项目:hadoop-EAR    文件:TestReduceFetch.java   
public static Counters runJob(JobConf conf) throws Exception {
  conf.setMapperClass(MapMB.class);
  conf.setReducerClass(IdentityReducer.class);
  conf.setOutputKeyClass(Text.class);
  conf.setOutputValueClass(Text.class);
  conf.setNumReduceTasks(1);
  conf.setInputFormat(FakeIF.class);
  FileInputFormat.setInputPaths(conf, new Path("/in"));
  final Path outp = new Path("/out");
  FileOutputFormat.setOutputPath(conf, outp);
  RunningJob job = null;
  try {
    job = JobClient.runJob(conf);
    assertTrue(job.isSuccessful());
  } finally {
    FileSystem fs = dfsCluster.getFileSystem();
    if (fs.exists(outp)) {
      fs.delete(outp, true);
    }
  }
  return job.getCounters();
}
项目:hadoop-EAR    文件:TestReduceTaskNoMapOutput.java   
@SuppressWarnings({ "deprecation", "unchecked" })
public static TaskCompletionEvent[] runJob(JobConf conf, Class mapperClass,
                boolean enableNoFetchEmptyMapOutputs) throws Exception {
  conf.setMapperClass(mapperClass);
  conf.setReducerClass(IdentityReducer.class);
  conf.setOutputKeyClass(Text.class);
  conf.setOutputValueClass(Text.class);
  conf.setNumMapTasks(3);
  conf.setNumReduceTasks(1);
  conf.setInputFormat(FakeIF.class);
  conf.setBoolean("mapred.enable.no.fetch.map.outputs", enableNoFetchEmptyMapOutputs);
  FileInputFormat.setInputPaths(conf, new Path("/in"));
  final Path outp = new Path("/out");
  FileOutputFormat.setOutputPath(conf, outp);
  RunningJob job = null;

  job = JobClient.runJob(conf);
  assertTrue(job.isSuccessful());
  return job.getTaskCompletionEvents(0);
}
项目:hadoop-on-lustre    文件:TestReduceFetch.java   
public static Counters runJob(JobConf conf) throws Exception {
  conf.setMapperClass(MapMB.class);
  conf.setReducerClass(IdentityReducer.class);
  conf.setOutputKeyClass(Text.class);
  conf.setOutputValueClass(Text.class);
  conf.setNumReduceTasks(1);
  conf.setInputFormat(FakeIF.class);
  FileInputFormat.setInputPaths(conf, new Path("/in"));
  final Path outp = new Path("/out");
  FileOutputFormat.setOutputPath(conf, outp);
  RunningJob job = null;
  try {
    job = JobClient.runJob(conf);
    assertTrue(job.isSuccessful());
  } finally {
    FileSystem fs = dfsCluster.getFileSystem();
    if (fs.exists(outp)) {
      fs.delete(outp, true);
    }
  }
  return job.getCounters();
}
项目:RDFS    文件:TestReduceFetch.java   
public static Counters runJob(JobConf conf) throws Exception {
  conf.setMapperClass(MapMB.class);
  conf.setReducerClass(IdentityReducer.class);
  conf.setOutputKeyClass(Text.class);
  conf.setOutputValueClass(Text.class);
  conf.setNumReduceTasks(1);
  conf.setInputFormat(FakeIF.class);
  FileInputFormat.setInputPaths(conf, new Path("/in"));
  final Path outp = new Path("/out");
  FileOutputFormat.setOutputPath(conf, outp);
  RunningJob job = null;
  try {
    job = JobClient.runJob(conf);
    assertTrue(job.isSuccessful());
  } finally {
    FileSystem fs = dfsCluster.getFileSystem();
    if (fs.exists(outp)) {
      fs.delete(outp, true);
    }
  }
  return job.getCounters();
}
项目:RDFS    文件:TestReduceTaskNoMapOutput.java   
public static TaskCompletionEvent[] runJob(JobConf conf, Class mapperClass,
                boolean enableNoFetchEmptyMapOutputs) throws Exception {
  conf.setMapperClass(mapperClass);
  conf.setReducerClass(IdentityReducer.class);
  conf.setOutputKeyClass(Text.class);
  conf.setOutputValueClass(Text.class);
  conf.setNumMapTasks(3);
  conf.setNumReduceTasks(1);
  conf.setInputFormat(FakeIF.class);
  conf.setBoolean("mapred.enable.no.fetch.map.outputs", enableNoFetchEmptyMapOutputs);
  FileInputFormat.setInputPaths(conf, new Path("/in"));
  final Path outp = new Path("/out");
  FileOutputFormat.setOutputPath(conf, outp);
  RunningJob job = null;

  job = JobClient.runJob(conf);
  assertTrue(job.isSuccessful());
  return job.getTaskCompletionEvents(0);
}
项目:hadoop-0.20    文件:TestReduceFetch.java   
public static Counters runJob(JobConf conf) throws Exception {
  conf.setMapperClass(MapMB.class);
  conf.setReducerClass(IdentityReducer.class);
  conf.setOutputKeyClass(Text.class);
  conf.setOutputValueClass(Text.class);
  conf.setNumReduceTasks(1);
  conf.setInputFormat(FakeIF.class);
  FileInputFormat.setInputPaths(conf, new Path("/in"));
  final Path outp = new Path("/out");
  FileOutputFormat.setOutputPath(conf, outp);
  RunningJob job = null;
  try {
    job = JobClient.runJob(conf);
    assertTrue(job.isSuccessful());
  } finally {
    FileSystem fs = dfsCluster.getFileSystem();
    if (fs.exists(outp)) {
      fs.delete(outp, true);
    }
  }
  return job.getCounters();
}
项目:hanoi-hadoop-2.0.0-cdh    文件:TestReduceFetch.java   
public static Counters runJob(JobConf conf) throws Exception {
  conf.setMapperClass(MapMB.class);
  conf.setReducerClass(IdentityReducer.class);
  conf.setOutputKeyClass(Text.class);
  conf.setOutputValueClass(Text.class);
  conf.setNumReduceTasks(1);
  conf.setInputFormat(FakeIF.class);
  FileInputFormat.setInputPaths(conf, new Path("/in"));
  final Path outp = new Path("/out");
  FileOutputFormat.setOutputPath(conf, outp);
  RunningJob job = null;
  try {
    job = JobClient.runJob(conf);
    assertTrue(job.isSuccessful());
  } finally {
    FileSystem fs = dfsCluster.getFileSystem();
    if (fs.exists(outp)) {
      fs.delete(outp, true);
    }
  }
  return job.getCounters();
}
项目:hortonworks-extension    文件:TestReduceFetch.java   
public static Counters runJob(JobConf conf) throws Exception {
  conf.setMapperClass(MapMB.class);
  conf.setReducerClass(IdentityReducer.class);
  conf.setOutputKeyClass(Text.class);
  conf.setOutputValueClass(Text.class);
  conf.setNumReduceTasks(1);
  conf.setInputFormat(FakeIF.class);
  FileInputFormat.setInputPaths(conf, new Path("/in"));
  final Path outp = new Path("/out");
  FileOutputFormat.setOutputPath(conf, outp);
  RunningJob job = null;
  try {
    job = JobClient.runJob(conf);
    assertTrue(job.isSuccessful());
  } finally {
    FileSystem fs = dfsCluster.getFileSystem();
    if (fs.exists(outp)) {
      fs.delete(outp, true);
    }
  }
  return job.getCounters();
}
项目:hortonworks-extension    文件:TestReduceFetch.java   
public static Counters runJob(JobConf conf) throws Exception {
  conf.setMapperClass(MapMB.class);
  conf.setReducerClass(IdentityReducer.class);
  conf.setOutputKeyClass(Text.class);
  conf.setOutputValueClass(Text.class);
  conf.setNumReduceTasks(1);
  conf.setInputFormat(FakeIF.class);
  FileInputFormat.setInputPaths(conf, new Path("/in"));
  final Path outp = new Path("/out");
  FileOutputFormat.setOutputPath(conf, outp);
  RunningJob job = null;
  try {
    job = JobClient.runJob(conf);
    assertTrue(job.isSuccessful());
  } finally {
    FileSystem fs = dfsCluster.getFileSystem();
    if (fs.exists(outp)) {
      fs.delete(outp, true);
    }
  }
  return job.getCounters();
}
项目:hadoop-gpu    文件:TestReduceFetch.java   
public static Counters runJob(JobConf conf) throws Exception {
  conf.setMapperClass(MapMB.class);
  conf.setReducerClass(IdentityReducer.class);
  conf.setOutputKeyClass(Text.class);
  conf.setOutputValueClass(Text.class);
  conf.setNumReduceTasks(1);
  conf.setInputFormat(FakeIF.class);
  FileInputFormat.setInputPaths(conf, new Path("/in"));
  final Path outp = new Path("/out");
  FileOutputFormat.setOutputPath(conf, outp);
  RunningJob job = null;
  try {
    job = JobClient.runJob(conf);
    assertTrue(job.isSuccessful());
  } finally {
    FileSystem fs = dfsCluster.getFileSystem();
    if (fs.exists(outp)) {
      fs.delete(outp, true);
    }
  }
  return job.getCounters();
}
项目:hadoop-EAR    文件:TestNewCollector.java   
private void runTest(String name, int keyLen, int valLen,
    int bigKeyLen, int bigValLen, int recordsNumPerMapper,
    int sortMb, float spillPer, int numMapperTasks,
    int numReducerTask, double[] reducerRecPercents,
    int[] numBigRecordsStart, int[] numBigRecordsMiddle,
    int[] numBigRecordsEnd) throws Exception {
  JobConf conf = mrCluster.createJobConf();
  conf.setInt("io.sort.mb", sortMb);
  conf.set("io.sort.spill.percent", Float.toString(spillPer));
  conf.setInt("test.key.length", keyLen);
  conf.setInt("test.value.length", valLen);
  conf.setInt("test.bigkey.length", bigKeyLen);
  conf.setInt("test.bigvalue.length", bigValLen);
  conf.setNumMapTasks(numMapperTasks);
  conf.setNumReduceTasks(numReducerTask);
  conf.setInputFormat(FakeIF.class);
  conf.setOutputFormat(NullOutputFormat.class);
  conf.setMapperClass(TestNewCollectorMapper.class);
  conf.setReducerClass(TestNewCollectorReducer.class);
  conf.setMapOutputKeyClass(TestNewCollectorKey.class);
  conf.setMapOutputValueClass(BytesWritable.class);
  conf.setBoolean("mapred.map.output.blockcollector", true);

  RecordNumStore.setJobConf(numReducerTask, numMapperTasks,
      recordsNumPerMapper, reducerRecPercents, numBigRecordsStart,
      numBigRecordsMiddle, numBigRecordsEnd, conf);
  RecordNumStore.getInst(conf);
  LOG.info("Running " + name);
  JobClient.runJob(conf);
}
项目:RDFS    文件:TestNewCollector.java   
private void runTest(String name, int keyLen, int valLen,
    int bigKeyLen, int bigValLen, int recordsNumPerMapper,
    int sortMb, float spillPer, int numMapperTasks,
    int numReducerTask, double[] reducerRecPercents,
    int[] numBigRecordsStart, int[] numBigRecordsMiddle,
    int[] numBigRecordsEnd) throws Exception {
  JobConf conf = mrCluster.createJobConf();
  conf.setInt("io.sort.mb", sortMb);
  conf.set("io.sort.spill.percent", Float.toString(spillPer));
  conf.setInt("test.key.length", keyLen);
  conf.setInt("test.value.length", valLen);
  conf.setInt("test.bigkey.length", bigKeyLen);
  conf.setInt("test.bigvalue.length", bigValLen);
  conf.setNumMapTasks(numMapperTasks);
  conf.setNumReduceTasks(numReducerTask);
  conf.setInputFormat(FakeIF.class);
  conf.setOutputFormat(NullOutputFormat.class);
  conf.setMapperClass(TestNewCollectorMapper.class);
  conf.setReducerClass(TestNewCollectorReducer.class);
  conf.setMapOutputKeyClass(TestNewCollectorKey.class);
  conf.setMapOutputValueClass(BytesWritable.class);
  conf.setBoolean("mapred.map.output.blockcollector", true);

  RecordNumStore.setJobConf(numReducerTask, numMapperTasks,
      recordsNumPerMapper, reducerRecPercents, numBigRecordsStart,
      numBigRecordsMiddle, numBigRecordsEnd, conf);
  RecordNumStore.getInst(conf);
  LOG.info("Running " + name);
  JobClient.runJob(conf);
}