Java 类org.apache.hadoop.util.ToolRunner 实例源码

项目:hadoop    文件:TestHadoopArchives.java   
private String makeArchiveWithRepl() throws Exception {
  final String inputPathStr = inputPath.toUri().getPath();
  System.out.println("inputPathStr = " + inputPathStr);

  final URI uri = fs.getUri();
  final String prefix = "har://hdfs-" + uri.getHost() + ":" + uri.getPort()
      + archivePath.toUri().getPath() + Path.SEPARATOR;

  final String harName = "foo.har";
  final String fullHarPathStr = prefix + harName;
  final String[] args = { "-archiveName", harName, "-p", inputPathStr,
      "-r 3", "*", archivePath.toString() };
  System.setProperty(HadoopArchives.TEST_HADOOP_ARCHIVES_JAR_PATH,
      HADOOP_ARCHIVES_JAR);
  final HadoopArchives har = new HadoopArchives(conf);
  assertEquals(0, ToolRunner.run(har, args));
  return fullHarPathStr;
}
项目:hadoop    文件:TestHadoopArchives.java   
private String makeArchive(Path parentPath, String relGlob) throws Exception {
  final String parentPathStr = parentPath.toUri().getPath();
  final String relPathGlob = relGlob == null ? "*" : relGlob;
  System.out.println("parentPathStr = " + parentPathStr);

  final URI uri = fs.getUri();
  final String prefix = "har://hdfs-" + uri.getHost() + ":" + uri.getPort()
      + archivePath.toUri().getPath() + Path.SEPARATOR;

  final String harName = "foo.har";
  final String fullHarPathStr = prefix + harName;
  final String[] args = { "-archiveName", harName, "-p", parentPathStr,
      relPathGlob, archivePath.toString() };
  System.setProperty(HadoopArchives.TEST_HADOOP_ARCHIVES_JAR_PATH,
      HADOOP_ARCHIVES_JAR);
  final HadoopArchives har = new HadoopArchives(conf);
  assertEquals(0, ToolRunner.run(har, args));
  return fullHarPathStr;
}
项目:hadoop-oss    文件:ZKFailoverController.java   
private boolean confirmFormat() {
  String parentZnode = getParentZnode();
  System.err.println(
      "===============================================\n" +
      "The configured parent znode " + parentZnode + " already exists.\n" +
      "Are you sure you want to clear all failover information from\n" +
      "ZooKeeper?\n" +
      "WARNING: Before proceeding, ensure that all HDFS services and\n" +
      "failover controllers are stopped!\n" +
      "===============================================");
  try {
    return ToolRunner.confirmPrompt("Proceed formatting " + parentZnode + "?");
  } catch (IOException e) {
    LOG.debug("Failed to confirm", e);
    return false;
  }
}
项目:hadoop    文件:TestGetConf.java   
private String runTool(HdfsConfiguration conf, String[] args, boolean success)
    throws Exception {
  ByteArrayOutputStream o = new ByteArrayOutputStream();
  PrintStream out = new PrintStream(o, true);
  try {
    int ret = ToolRunner.run(new GetConf(conf, out, out), args);
    out.flush();
    System.err.println("Output: " + o.toString());
    assertEquals("Expected " + (success?"success":"failure") +
        " for args: " + Joiner.on(" ").join(args) + "\n" +
        "Output: " + o.toString(),
        success, ret == 0);
    return o.toString();
  } finally {
    o.close();
    out.close();
  }
}
项目:ditb    文件:IntegrationTestBigLinkedList.java   
public int run(Path inputDir, int numMappers) throws Exception {
  getConf().set(SEARCHER_INPUTDIR_KEY, inputDir.toString());
  SortedSet<byte []> keys = readKeysToSearch(getConf());
  if (keys.isEmpty()) throw new RuntimeException("No keys to find");
  LOG.info("Count of keys to find: " + keys.size());
  for(byte [] key: keys)  LOG.info("Key: " + Bytes.toStringBinary(key));
  Path hbaseDir = new Path(getConf().get(HConstants.HBASE_DIR));
  // Now read all WALs. In two dirs. Presumes certain layout.
  Path walsDir = new Path(hbaseDir, HConstants.HREGION_LOGDIR_NAME);
  Path oldWalsDir = new Path(hbaseDir, HConstants.HREGION_OLDLOGDIR_NAME);
  LOG.info("Running Search with keys inputDir=" + inputDir +", numMappers=" + numMappers +
    " against " + getConf().get(HConstants.HBASE_DIR));
  int ret = ToolRunner.run(new WALSearcher(getConf()), new String [] {walsDir.toString(), ""});
  if (ret != 0) return ret;
  return ToolRunner.run(new WALSearcher(getConf()), new String [] {oldWalsDir.toString(), ""});
}
项目:hadoop-oss    文件:DataGenerator.java   
/** Parse the command line arguments and initialize the data */
private int init(String[] args) {
  try { // initialize file system handle
    fc = FileContext.getFileContext(getConf());
  } catch (IOException ioe) {
    System.err.println("Can not initialize the file system: " + 
        ioe.getLocalizedMessage());
    return -1;
  }

  for (int i = 0; i < args.length; i++) { // parse command line
    if (args[i].equals("-root")) {
      root = new Path(args[++i]);
    } else if (args[i].equals("-inDir")) {
      inDir = new File(args[++i]);
    } else {
      System.err.println(USAGE);
      ToolRunner.printGenericCommandUsage(System.err);
      System.exit(-1);
    }
  }
  return 0;
}
项目:hadoop    文件:HadoopArchives.java   
/** the main functions **/
public static void main(String[] args) {
  JobConf job = new JobConf(HadoopArchives.class);

  HadoopArchives harchives = new HadoopArchives(job);
  int ret = 0;

  try{
    ret = ToolRunner.run(harchives, args);
  } catch(Exception e) {
    LOG.debug("Exception in archives  ", e);
    System.err.println(e.getClass().getSimpleName() + " in archives");
    final String s = e.getLocalizedMessage();
    if (s != null) {
      System.err.println(s);
    } else {
      e.printStackTrace(System.err);
    }
    System.exit(1);
  }
  System.exit(ret);
}
项目:mumu-mapreduce    文件:DailyTrafficStatisticsMapRedTest.java   
@Test
public void mapreduce() {
    MapReduceConfiguration mapReduceConfiguration = new MapReduceConfiguration();
    long start = System.currentTimeMillis();
    log.info("开始计算nginx访问日志IP统计量");
    try {
        String inputPath = mapReduceConfiguration.url() + "/mapreduce/nginxlog/access/input";
        String outputPath = mapReduceConfiguration.url() + "/mapreduce/nginxlog/access/output/daily" + DateFormatUtils.format(new Date(), "yyyyMMddHHmmss");
        ToolRunner.run(new DailyTrafficStatisticsMapRed(), new String[]{inputPath, outputPath});
        mapReduceConfiguration.print(outputPath);
    } catch (Exception e) {
        log.error(e);
    }
    long end = System.currentTimeMillis();
    log.info("运行mapreduce程序花费时间:" + (end - start) / 1000 + "s");
}
项目:hadoop    文件:TestCopyFiles.java   
public void testCopyDuplication() throws Exception {
  final FileSystem localfs = FileSystem.get(LOCAL_FS, new Configuration());
  try {    
    MyFile[] files = createFiles(localfs, TEST_ROOT_DIR+"/srcdat");
    ToolRunner.run(new DistCpV1(new Configuration()),
        new String[] {"file:///"+TEST_ROOT_DIR+"/srcdat",
                      "file:///"+TEST_ROOT_DIR+"/src2/srcdat"});
    assertTrue("Source and destination directories do not match.",
               checkFiles(localfs, TEST_ROOT_DIR+"/src2/srcdat", files));

    assertEquals(DistCpV1.DuplicationException.ERROR_CODE,
        ToolRunner.run(new DistCpV1(new Configuration()),
        new String[] {"file:///"+TEST_ROOT_DIR+"/srcdat",
                      "file:///"+TEST_ROOT_DIR+"/src2/srcdat",
                      "file:///"+TEST_ROOT_DIR+"/destdat",}));
  }
  finally {
    deldir(localfs, TEST_ROOT_DIR+"/destdat");
    deldir(localfs, TEST_ROOT_DIR+"/srcdat");
    deldir(localfs, TEST_ROOT_DIR+"/src2");
  }
}
项目:hadoop    文件:SCMAdmin.java   
private static void printHelp(String cmd) {
  String summary = "scmadmin is the command to execute shared cache manager" +
      "administrative commands.\n" +
      "The full syntax is: \n\n" +
      "hadoop scmadmin" +
      " [-runCleanerTask]" +
      " [-help [cmd]]\n";

  String runCleanerTask =
      "-runCleanerTask: Run cleaner task right away.\n";

  String help = "-help [cmd]: \tDisplays help for the given command or all commands if none\n" +
      "\t\tis specified.\n";

  if ("runCleanerTask".equals(cmd)) {
    System.out.println(runCleanerTask);
  } else if ("help".equals(cmd)) {
    System.out.println(help);
  } else {
    System.out.println(summary);
    System.out.println(runCleanerTask);
    System.out.println(help);
    System.out.println();
    ToolRunner.printGenericCommandUsage(System.out);
  }
}
项目:mumu-parquet    文件:MapReduceParquetMapReducer.java   
@Override
public int run(String[] args) throws Exception {
    if (args.length != 2) {
        System.err.printf("Usage: %s [generic options] <input> <output>\n",
                getClass().getSimpleName());
        ToolRunner.printGenericCommandUsage(System.err);
        return -1;
    }

    Job job = new Job(getConf(), "Text to Parquet");
    job.setJarByClass(getClass());
    FileInputFormat.addInputPath(job, new Path(args[0]));
    FileOutputFormat.setOutputPath(job, new Path(args[1]));
    job.setMapperClass(TextToParquetMapper.class);
    job.setNumReduceTasks(0);
    job.setOutputFormatClass(AvroParquetOutputFormat.class);
    AvroParquetOutputFormat.setSchema(job, SCHEMA);
    job.setOutputKeyClass(Void.class);
    job.setOutputValueClass(Group.class);
    return job.waitForCompletion(true) ? 0 : 1;
}
项目:hadoop    文件:TestLocalJobSubmission.java   
/**
 * test the local job submission options of
 * -jt local -libjars
 * @throws IOException
 */
@Test
public void testLocalJobLibjarsOption() throws IOException {
  Path jarPath = makeJar(new Path(TEST_ROOT_DIR, "test.jar"));

  Configuration conf = new Configuration();
  conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "hdfs://testcluster");
  final String[] args = {
      "-jt" , "local", "-libjars", jarPath.toString(),
      "-m", "1", "-r", "1", "-mt", "1", "-rt", "1"
  };
  int res = -1;
  try {
    res = ToolRunner.run(conf, new SleepJob(), args);
  } catch (Exception e) {
    System.out.println("Job failed with " + e.getLocalizedMessage());
    e.printStackTrace(System.out);
    fail("Job failed");
  }
  assertEquals("dist job res is not 0:", 0, res);
}
项目:hadoop    文件:TestTeraSort.java   
private void runTeraGen(Configuration conf, Path sortInput)
    throws Exception {
  String[] genArgs = {NUM_ROWS, sortInput.toString()};

  // Run TeraGen
  assertEquals(ToolRunner.run(conf, new TeraGen(), genArgs), 0);
}
项目:hadoop    文件:DataGenerator.java   
/** Parse the command line arguments and initialize the data */
private int init(String[] args) {
  try { // initialize file system handle
    fc = FileContext.getFileContext(getConf());
  } catch (IOException ioe) {
    System.err.println("Can not initialize the file system: " + 
        ioe.getLocalizedMessage());
    return -1;
  }

  for (int i = 0; i < args.length; i++) { // parse command line
    if (args[i].equals("-root")) {
      root = new Path(args[++i]);
    } else if (args[i].equals("-inDir")) {
      inDir = new File(args[++i]);
    } else {
      System.err.println(USAGE);
      ToolRunner.printGenericCommandUsage(System.err);
      System.exit(-1);
    }
  }
  return 0;
}
项目:ditb    文件:LoadTestTool.java   
@Override
public void run() {
  try {
    int ret = ToolRunner.run(HBaseConfiguration.create(), new LoadTestTool(), workerArgs);
    if (ret != 0) {
      throw new RuntimeException("LoadTestTool exit with non-zero return code.");
    }
  } catch (Exception ex) {
    LOG.error("Error in worker thread", ex);
    workerThreadError(ex);
  }
}
项目:hadoop-oss    文件:HAAdmin.java   
protected void printUsage(PrintStream errOut) {
  errOut.println(getUsageString());
  for (Map.Entry<String, UsageInfo> e : USAGE.entrySet()) {
    String cmd = e.getKey();
    UsageInfo usage = e.getValue();

    errOut.println("    [" + cmd + " " + usage.args + "]"); 
  }
  errOut.println();
  ToolRunner.printGenericCommandUsage(errOut);    
}
项目:hadoop-oss    文件:GetGroupsTestBase.java   
private String runTool(Configuration conf, String[] args, boolean success)
    throws Exception {
  ByteArrayOutputStream o = new ByteArrayOutputStream();
  PrintStream out = new PrintStream(o, true);
  try {
    int ret = ToolRunner.run(getTool(out), args);
    assertEquals(success, ret == 0);
    return o.toString();
  } finally {
    o.close();
    out.close();
  }
}
项目:hadoop    文件:TestTeraSort.java   
private void runTeraSort(Configuration conf,
    Path sortInput, Path sortOutput) throws Exception {

  // Setup command-line arguments to 'sort'
  String[] sortArgs = {sortInput.toString(), sortOutput.toString()};

  // Run Sort
  assertEquals(ToolRunner.run(conf, new TeraSort(), sortArgs), 0);
}
项目:aliyun-maxcompute-data-collectors    文件:MainframeImportTool.java   
@Override
public void printHelp(ToolOptions toolOptions) {
  System.out.println("usage: sqoop " + getToolName()
      + " [GENERIC-ARGS] [TOOL-ARGS]\n");

  toolOptions.printHelp();

  System.out.println("\nGeneric Hadoop command-line arguments:");
  System.out.println("(must preceed any tool-specific arguments)");
  ToolRunner.printGenericCommandUsage(System.out);
  System.out.println(
      "\nAt minimum, you must specify --connect and --" + DS_ARG);
}
项目:aliyun-maxcompute-data-collectors    文件:JobTool.java   
@Override
/** {@inheritDoc} */
public void printHelp(ToolOptions opts) {
  System.out.println("usage: sqoop " + getToolName()
      + " [GENERIC-ARGS] [JOB-ARGS] [-- [<tool-name>] [TOOL-ARGS]]");
  System.out.println("");

  opts.printHelp();

  System.out.println("");
  System.out.println("Generic Hadoop command-line arguments:");
  System.out.println("(must preceed any tool-specific arguments)");
  ToolRunner.printGenericCommandUsage(System.out);
}
项目:hadoop    文件:KeyShell.java   
@Override
public boolean validate() {
  provider = getKeyProvider();
  if (provider == null) {
    out.println("There are no valid KeyProviders configured. Nothing\n"
      + "was deleted. Use the -provider option to specify a provider.");
    return false;
  }
  if (keyName == null) {
    out.println("There is no keyName specified. Please specify a " +
        "<keyname>. See the usage description with -help.");
    return false;
  }
  if (interactive) {
    try {
      cont = ToolRunner
          .confirmPrompt("You are about to DELETE all versions of "
              + " key " + keyName + " from KeyProvider "
              + provider + ". Continue? ");
      if (!cont) {
        out.println(keyName + " has not been deleted.");
      }
      return cont;
    } catch (IOException e) {
      out.println(keyName + " will not be deleted.");
      e.printStackTrace(err);
    }
  }
  return true;
}
项目:hadoop    文件:TestZKRMStateStorePerf.java   
static public void main(String[] args) throws Exception {
  TestZKRMStateStorePerf perf = new TestZKRMStateStorePerf();

  int res = -1;
  try {
    res = ToolRunner.run(perf, args);
  } catch(Exception e) {
    System.err.print(StringUtils.stringifyException(e));
    res = -2;
  }
  if(res == -1) {
    System.err.print(USAGE);
  }
  System.exit(res);
}
项目:hadoop    文件:ClusterCLI.java   
public static void main(String[] args) throws Exception {
  ClusterCLI cli = new ClusterCLI();
  cli.setSysOutPrintStream(System.out);
  cli.setSysErrPrintStream(System.err);
  int res = ToolRunner.run(cli, args);
  cli.stop();
  System.exit(res);
}
项目:hadoop    文件:TestCopyFiles.java   
/** copy files from dfs file system to dfs file system */
public void testCopyFromDfsToDfs() throws Exception {
  String namenode = null;
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    final FileSystem hdfs = cluster.getFileSystem();
    namenode = FileSystem.getDefaultUri(conf).toString();
    if (namenode.startsWith("hdfs://")) {
      MyFile[] files = createFiles(URI.create(namenode), "/srcdat");
      ToolRunner.run(new DistCpV1(conf), new String[] {
                                       "-log",
                                       namenode+"/logs",
                                       namenode+"/srcdat",
                                       namenode+"/destdat"});
      assertTrue("Source and destination directories do not match.",
                 checkFiles(hdfs, "/destdat", files));
      FileSystem fs = FileSystem.get(URI.create(namenode+"/logs"), conf);
      assertTrue("Log directory does not exist.",
                 fs.exists(new Path(namenode+"/logs")));
      deldir(hdfs, "/destdat");
      deldir(hdfs, "/srcdat");
      deldir(hdfs, "/logs");
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
项目:hadoop    文件:QueueCLI.java   
public static void main(String[] args) throws Exception {
  QueueCLI cli = new QueueCLI();
  cli.setSysOutPrintStream(System.out);
  cli.setSysErrPrintStream(System.err);
  int res = ToolRunner.run(cli, args);
  cli.stop();
  System.exit(res);
}
项目:hadoop    文件:NodeCLI.java   
public static void main(String[] args) throws Exception {
  NodeCLI cli = new NodeCLI();
  cli.setSysOutPrintStream(System.out);
  cli.setSysErrPrintStream(System.err);
  int res = ToolRunner.run(cli, args);
  cli.stop();
  System.exit(res);
}
项目:hadoop    文件:TestRPCCallBenchmark.java   
@Test(timeout=20000)
public void testBenchmarkWithProto() throws Exception {
  int rc = ToolRunner.run(new RPCCallBenchmark(),
      new String[] {
    "--clientThreads", "30",
    "--serverThreads", "30",
    "--time", "5",
    "--serverReaderThreads", "4",
    "--messageSize", "1024",
    "--engine", "protobuf"});
  assertEquals(0, rc);
}
项目:hadoop    文件:TestDFSIO.java   
public static void main(String[] args) {
  TestDFSIO bench = new TestDFSIO();
  int res = -1;
  try {
    res = ToolRunner.run(bench, args);
  } catch(Exception e) {
    System.err.print(StringUtils.stringifyException(e));
    res = -2;
  }
  if(res == -1)
    System.err.print(USAGE);
  System.exit(res);
}
项目:ditb    文件:Merge.java   
public static void main(String[] args) {
  int status;
  try {
    status = ToolRunner.run(HBaseConfiguration.create(), new Merge(), args);
  } catch (Exception e) {
    LOG.error("exiting due to error", e);
    status = -1;
  }
  System.exit(status);
}
项目:hadoop    文件:Balancer.java   
/**
 * Run a balancer
 * @param args Command line arguments
 */
public static void main(String[] args) {
  if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) {
    System.exit(0);
  }

  try {
    System.exit(ToolRunner.run(new HdfsConfiguration(), new Cli(), args));
  } catch (Throwable e) {
    LOG.error("Exiting balancer due an exception", e);
    System.exit(-1);
  }
}
项目:hadoop    文件:Util.java   
/** Print usage messages */
public static int printUsage(String[] args, String usage) {
  err.println("args = " + Arrays.asList(args));
  err.println();
  err.println("Usage: java " + usage);
  err.println();
  ToolRunner.printGenericCommandUsage(err);
  return -1;
}
项目:hadoop    文件:TestWordStats.java   
@Test public void testGetTheMean() throws Exception {
  String args[] = new String[2];
  args[0] = INPUT;
  args[1] = MEAN_OUTPUT;

  WordMean wm = new WordMean();
  ToolRunner.run(new Configuration(), wm, args);
  double mean = wm.getMean();

  // outputs MUST match
  WordMeanReader wr = new WordMeanReader();
  assertEquals(mean, wr.read(INPUT), 0.0);
}
项目:ditb    文件:TestAcidGuarantees.java   
public static void main(String args[]) throws Exception {
  Configuration c = HBaseConfiguration.create();
  int status;
  try {
    TestAcidGuarantees test = new TestAcidGuarantees();
    status = ToolRunner.run(c, test, args);
  } catch (Exception e) {
    LOG.error("Exiting due to error", e);
    status = -1;
  }
  System.exit(status);
}
项目:hadoop    文件:TestRPCCallBenchmark.java   
@Test(timeout=20000)
public void testBenchmarkWithWritable() throws Exception {
  int rc = ToolRunner.run(new RPCCallBenchmark(),
      new String[] {
    "--clientThreads", "30",
    "--serverThreads", "30",
    "--time", "5",
    "--serverReaderThreads", "4",
    "--messageSize", "1024",
    "--engine", "writable"});
  assertEquals(0, rc);
}
项目:hadoop    文件:TestMRCredentials.java   
/**
 * run a distributed job and verify that TokenCache is available
 * @throws IOException
 */
@Test
public void test () throws IOException {

  // make sure JT starts
  Configuration jobConf =  new JobConf(mrCluster.getConfig());

  // provide namenodes names for the job to get the delegation tokens for
  //String nnUri = dfsCluster.getNameNode().getUri(namenode).toString();
  NameNode nn = dfsCluster.getNameNode();
  URI nnUri = NameNode.getUri(nn.getNameNodeAddress());
  jobConf.set(JobContext.JOB_NAMENODES, nnUri + "," + nnUri.toString());


  jobConf.set("mapreduce.job.credentials.json" , "keys.json");

  // using argument to pass the file name
  String[] args = {
      "-m", "1", "-r", "1", "-mt", "1", "-rt", "1"
  };

  int res = -1;
  try {
    res = ToolRunner.run(jobConf, new CredentialsTestJob(), args);
  } catch (Exception e) {
    System.out.println("Job failed with" + e.getLocalizedMessage());
    e.printStackTrace(System.out);
    fail("Job failed");
  }
  assertEquals("dist job res is not 0", res, 0);

}
项目:ditb    文件:TestImportTSVWithTTLs.java   
protected static Tool doMROnTableTest(HBaseTestingUtility util, String family, String data,
    String[] args, int valueMultiplier) throws Exception {
  TableName table = TableName.valueOf(args[args.length - 1]);
  Configuration conf = new Configuration(util.getConfiguration());

  // populate input file
  FileSystem fs = FileSystem.get(conf);
  Path inputPath = fs.makeQualified(new Path(util
      .getDataTestDirOnTestFS(table.getNameAsString()), "input.dat"));
  FSDataOutputStream op = fs.create(inputPath, true);
  op.write(Bytes.toBytes(data));
  op.close();
  LOG.debug(String.format("Wrote test data to file: %s", inputPath));

  if (conf.getBoolean(FORCE_COMBINER_CONF, true)) {
    LOG.debug("Forcing combiner.");
    conf.setInt("mapreduce.map.combine.minspills", 1);
  }

  // run the import
  List<String> argv = new ArrayList<String>(Arrays.asList(args));
  argv.add(inputPath.toString());
  Tool tool = new ImportTsv();
  LOG.debug("Running ImportTsv with arguments: " + argv);
  try {
    // Job will fail if observer rejects entries without TTL
    assertEquals(0, ToolRunner.run(conf, tool, argv.toArray(args)));
  } finally {
    // Clean up
    if (conf.getBoolean(DELETE_AFTER_LOAD_CONF, true)) {
      LOG.debug("Deleting test subdirectory");
      util.cleanupDataTestDirOnTestFS(table.getNameAsString());
    }
  }

  return tool;
}
项目:hadoop    文件:FailJob.java   
public int run(String[] args) throws Exception {
  if(args.length < 1) {
    System.err.println("FailJob " +
        " (-failMappers|-failReducers)");
    ToolRunner.printGenericCommandUsage(System.err);
    return 2;
  }
  boolean failMappers = false, failReducers = false;

  for (int i = 0; i < args.length; i++ ) {
    if (args[i].equals("-failMappers")) {
      failMappers = true;
    }
    else if(args[i].equals("-failReducers")) {
      failReducers = true;
    }
  }
  if (!(failMappers ^ failReducers)) {
    System.err.println("Exactly one of -failMappers or -failReducers must be specified.");
    return 3;
  }

  // Write a file with one line per mapper.
  final FileSystem fs = FileSystem.get(getConf());
  Path inputDir = new Path(FailJob.class.getSimpleName() + "_in");
  fs.mkdirs(inputDir);
  for (int i = 0; i < getConf().getInt("mapred.map.tasks", 1); ++i) {
    BufferedWriter w = new BufferedWriter(new OutputStreamWriter(
        fs.create(new Path(inputDir, Integer.toString(i)))));
    w.write(Integer.toString(i) + "\n");
    w.close();
  }

  Job job = createJob(failMappers, failReducers, inputDir);
  return job.waitForCompletion(true) ? 0 : 1;
}
项目:hadoop    文件:RandomTextWriter.java   
static int printUsage() {
  System.out.println("randomtextwriter " +
                     "[-outFormat <output format class>] " + 
                     "<output>");
  ToolRunner.printGenericCommandUsage(System.out);
  return 2;
}
项目:hadoop    文件:GetGroupsTestBase.java   
private String runTool(Configuration conf, String[] args, boolean success)
    throws Exception {
  ByteArrayOutputStream o = new ByteArrayOutputStream();
  PrintStream out = new PrintStream(o, true);
  try {
    int ret = ToolRunner.run(getTool(out), args);
    assertEquals(success, ret == 0);
    return o.toString();
  } finally {
    o.close();
    out.close();
  }
}
项目:ditb    文件:HFilePrettyPrinter.java   
public static void main(String[] args) throws Exception {
  Configuration conf = HBaseConfiguration.create();
  // no need for a block cache
  conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0);
  int ret = ToolRunner.run(conf, new HFilePrettyPrinter(), args);
  System.exit(ret);
}