Java 类org.apache.hadoop.util.ProgramDriver 实例源码

项目:ditb    文件:Driver.java   
/**
 * @param args
 * @throws Throwable
 */
public static void main(String[] args) throws Throwable {
  ProgramDriver pgd = new ProgramDriver();

  pgd.addClass(RowCounter.NAME, RowCounter.class,
    "Count rows in HBase table.");
  pgd.addClass(CellCounter.NAME, CellCounter.class,
    "Count cells in HBase table.");
  pgd.addClass(Export.NAME, Export.class, "Write table data to HDFS.");
  pgd.addClass(Import.NAME, Import.class, "Import data written by Export.");
  pgd.addClass(ImportTsv.NAME, ImportTsv.class, "Import data in TSV format.");
  pgd.addClass(LoadIncrementalHFiles.NAME, LoadIncrementalHFiles.class,
               "Complete a bulk data load.");
  pgd.addClass(CopyTable.NAME, CopyTable.class,
      "Export a table from local cluster to peer cluster.");
  pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, "Compare" +
      " the data from tables in two different clusters. WARNING: It" +
      " doesn't work for incrementColumnValues'd cells since the" +
      " timestamp is changed after being appended to the log.");
  pgd.addClass(WALPlayer.NAME, WALPlayer.class, "Replay WAL files.");
  pgd.addClass(ExportSnapshot.NAME, ExportSnapshot.class, "Export" +
      " the specific snapshot to a given FileSystem.");

  ProgramDriver.class.getMethod("driver", new Class [] {String[].class}).
    invoke(pgd, new Object[]{args});
}
项目:LCIndex-HBase-0.94.16    文件:Driver.java   
/**
 * @param args
 * @throws Throwable
 */
public static void main(String[] args) throws Throwable {
  ProgramDriver pgd = new ProgramDriver();
  pgd.addClass(RowCounter.NAME, RowCounter.class,
    "Count rows in HBase table");
  pgd.addClass(CellCounter.NAME, CellCounter.class,
    "Count cells in HBase table");
  pgd.addClass(Export.NAME, Export.class, "Write table data to HDFS.");
  pgd.addClass(Import.NAME, Import.class, "Import data written by Export.");
  pgd.addClass(ImportTsv.NAME, ImportTsv.class, "Import data in TSV format.");
  pgd.addClass(LoadIncrementalHFiles.NAME, LoadIncrementalHFiles.class,
               "Complete a bulk data load.");
  pgd.addClass(CopyTable.NAME, CopyTable.class,
      "Export a table from local cluster to peer cluster");
  pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, "Compare" +
      " the data from tables in two different clusters. WARNING: It" +
      " doesn't work for incrementColumnValues'd cells since the" +
      " timestamp is changed after being appended to the log.");
  ProgramDriver.class.getMethod("driver", new Class [] {String[].class}).
    invoke(pgd, new Object[]{args});
}
项目:hadoop-EAR    文件:AllTestDriver.java   
/**
 * A description of the test program for running all the tests using jar file
 */
public static void main(String argv[]){
  ProgramDriver pgd = new ProgramDriver();
  try {
    pgd.addClass("gentest", DFSGeneralTest.class, "A map/reduce benchmark that supports running multi-thread operations in multiple machines");
    pgd.addClass("locktest", DFSLockTest.class, "A benchmark that spawns many threads and each thread run many configurable read/write FileSystem operations to test FSNamesystem lock's concurrency.");
    pgd.addClass("dirtest", DFSDirTest.class, "A map/reduce benchmark that creates many jobs and each job spawns many threads and each thread create/delete many dirs.");
    pgd.addClass("dfstest", DFSIOTest.class, "A map/reduce benchmark that creates many jobs and each jobs can create many files to test i/o rate per task of hadoop cluster.");
    pgd.addClass("structure-gen", StructureGenerator.class, "Create a structure of files and directories as an input for data-gen");
    pgd.addClass("data-gen", DataGenerator.class, "Create files and directories on cluster as inputs for load-gen");
    pgd.addClass("load-gen", LoadGenerator.class, "A tool to test the behavior of NameNode with different client loads.");
    pgd.addClass("testnn", TestNNThroughputBenchmark.class, "Test the behavior of the namenode on localhost." +
        " Here namenode is real and others are simulated");
    pgd.driver(argv);
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
项目:pbase    文件:Driver.java   
/**
 * @param args
 * @throws Throwable
 */
public static void main(String[] args) throws Throwable {
  ProgramDriver pgd = new ProgramDriver();
  pgd.addClass(RowCounter.NAME, RowCounter.class,
    "Count rows in HBase table");
  pgd.addClass(CellCounter.NAME, CellCounter.class,
    "Count cells in HBase table");
  pgd.addClass(Export.NAME, Export.class, "Write table data to HDFS.");
  pgd.addClass(Import.NAME, Import.class, "Import data written by Export.");
  pgd.addClass(ImportTsv.NAME, ImportTsv.class, "Import data in TSV format.");
  pgd.addClass(LoadIncrementalHFiles.NAME, LoadIncrementalHFiles.class,
               "Complete a bulk data load.");
  pgd.addClass(CopyTable.NAME, CopyTable.class,
      "Export a table from local cluster to peer cluster");
  pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, "Compare" +
      " the data from tables in two different clusters. WARNING: It" +
      " doesn't work for incrementColumnValues'd cells since the" +
      " timestamp is changed after being appended to the log.");
  ProgramDriver.class.getMethod("driver", new Class [] {String[].class}).
    invoke(pgd, new Object[]{args});
}
项目:HIndex    文件:Driver.java   
/**
 * @param args
 * @throws Throwable
 */
public static void main(String[] args) throws Throwable {
  ProgramDriver pgd = new ProgramDriver();
  pgd.addClass(RowCounter.NAME, RowCounter.class,
    "Count rows in HBase table");
  pgd.addClass(CellCounter.NAME, CellCounter.class,
    "Count cells in HBase table");
  pgd.addClass(Export.NAME, Export.class, "Write table data to HDFS.");
  pgd.addClass(Import.NAME, Import.class, "Import data written by Export.");
  pgd.addClass(ImportTsv.NAME, ImportTsv.class, "Import data in TSV format.");
  pgd.addClass(LoadIncrementalHFiles.NAME, LoadIncrementalHFiles.class,
               "Complete a bulk data load.");
  pgd.addClass(CopyTable.NAME, CopyTable.class,
      "Export a table from local cluster to peer cluster");
  pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, "Compare" +
      " the data from tables in two different clusters. WARNING: It" +
      " doesn't work for incrementColumnValues'd cells since the" +
      " timestamp is changed after being appended to the log.");
  ProgramDriver.class.getMethod("driver", new Class [] {String[].class}).
    invoke(pgd, new Object[]{args});
}
项目:bigdata-tutorial    文件:ExampleDriver.java   
public static void main(String argv[]) {
    int exitCode = -1;
    ProgramDriver pgd = new ProgramDriver();
    try {
        pgd.addClass("wordcount", WordCount.class,
                "A map/reduce program that counts the words in the input files.");

        pgd.addClass("xflowstatic", XflowStatic.class,
                "A map/reduce program that static xflow from data files.");

        exitCode = pgd.run(argv);
    } catch (Throwable e) {
        e.printStackTrace();
    }

    System.exit(exitCode);
}
项目:pss    文件:PartDriver.java   
public static void main(String args[]) throws UnsupportedEncodingException {

        int exitCode = -1;
        ProgramDriver pgd = new ProgramDriver();
        try {
            JobConf job = new JobConf();
            new GenericOptionsParser(job, args);
            String metric = job.get(Config.METRIC_PROPERTY,Config.METRIC_VALUE).toLowerCase();
            if(metric.contains("j")){
                JaccardCoarsePartitionMain.main(args);
            }else 
                HolderCosinePartitionMain.main(args);
            //          // pgd.addClass("cpartitionw", CosineWeightPartitionMain.class,
            //          // "\tCosine static partitioning on weight sorted documents");
            //          pgd.addClass("cpartitiona", CosineAllPartitionMain.class,
            //                  "\tCosine static partitioning on ALL sorted documents");
            //pgd.driver(args);
            exitCode = 0;
        } catch (Throwable e) {
            e.printStackTrace();
        }
        System.exit(exitCode);
    }
项目:pss    文件:CleanPagesDriver.java   
/**
 * Input are files of documents per line. The files are text and can either be compressed via .wrac.gz or not compressed.
 * MARCH NOT FINISHED
 * Prints these options to chose from:<br>
 * - [html] for html pages to be cleaned. <br>
 * - [wrac] for .wrac.gz files to be cleaned.<br>

 * @param argv : command line inputs
 */
public static void main(String argv[]) {
    int exitCode = -1;
    ProgramDriver pgd = new ProgramDriver();
    try {
        pgd.addClass("warc", WarcFileCleaner.class,
                "A MapReduce job to clean .warc.gz webpages from html and weird characters into set of features.");
        pgd.addClass(
                "html",
                PageCleaner.class,
                "A MapReduce job to clean html pages from stopwords, weird characters even alphanumerics. It further convert letters into lowercase. ");
        pgd.driver(argv);
    } catch (Throwable e) {
        e.printStackTrace();
    }

    System.exit(exitCode);
}
项目:pss    文件:PreprocessDriver.java   
/**
 * Prints these options to chose from:<br>
 * - [clean] documents to produce document ID: bag of cleaned words. <br>
 * - [hash] bag of words into bag of hashed tokens.<br>
 * - Produce [sequence] records [LongWritable,FeatureWeightArrayWritable] <br>
 * - [seq] deals with writing/reading/combining sequence files.
 * 
 * @param argv : command line inputs
 */
public static void main(String argv[]) {
    int exitCode = -1;
    ProgramDriver pgd = new ProgramDriver();
    try {
        pgd.addClass("clean", CleanPagesDriver.class,
                "A MapReduce job to clean input pages. See options.");
        pgd.addClass(
                "hash", HashPagesDriver.class,
                "A MapReduce job to collect features then hash input data into [docId <features>] with associated weights if desired. ");
        pgd.addClass("seq", SequenceDriver.class,
                "For writing/reading/merging sequence files. See optoins.\n\n");
        pgd.driver(argv);
    } catch (Throwable e) {
        e.printStackTrace();
    }

    System.exit(exitCode);
}
项目:pss    文件:SequenceDriver.java   
/**
 * Prints these options to chose from:<br>
 * - [read] read sequence files and print into console. <br>
 * - [convert] convert text files into sequence files.<br>
 * 
 * @param argv : command line inputs
 */
public static void main(String argv[]) {
    int exitCode = -1;
    ProgramDriver pgd = new ProgramDriver();
    try {
        pgd.addClass("write", SeqWriter.class,
                "A MapReduce job to convert hashed pages into sequence files.");
        pgd.addClass("read", SeqReader.class,
                "Print out sequence pages in readable format.");
        pgd.addClass("combine", SeqFilesCombiner.class,
                "A regular java program to combine sequence records from multiple files into one file in hdfs.");
        pgd.driver(argv);
    } catch (Throwable e) {
        e.printStackTrace();
    }

    System.exit(exitCode);
}
项目:pss    文件:LshDriver.java   
public static void main(String argv[]) {
    int exitCode = -1;
    ProgramDriver pgd = new ProgramDriver();
    try {
        pgd.addClass("randomlsh", ProjectionLshDriver.class,
                "Partition input vectors according to random projections.");
        pgd.addClass("minhashlsh", MinHashLshDriver.class,
                "Partition input vectors according to minhash values.");
        pgd.addClass("lshstat", LshStat.class, "Collect statistics from binray lshpartitions/");
        pgd.driver(argv);
    } catch (Throwable e) {
        e.printStackTrace();
    }

    System.exit(exitCode);
}
项目:IRIndex    文件:Driver.java   
/**
 * @param args
 * @throws Throwable
 */
public static void main(String[] args) throws Throwable {
  ProgramDriver pgd = new ProgramDriver();
  pgd.addClass(RowCounter.NAME, RowCounter.class,
    "Count rows in HBase table");
  pgd.addClass(CellCounter.NAME, CellCounter.class,
    "Count cells in HBase table");
  pgd.addClass(Export.NAME, Export.class, "Write table data to HDFS.");
  pgd.addClass(Import.NAME, Import.class, "Import data written by Export.");
  pgd.addClass(ImportTsv.NAME, ImportTsv.class, "Import data in TSV format.");
  pgd.addClass(LoadIncrementalHFiles.NAME, LoadIncrementalHFiles.class,
               "Complete a bulk data load.");
  pgd.addClass(CopyTable.NAME, CopyTable.class,
      "Export a table from local cluster to peer cluster");
  pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, "Compare" +
      " the data from tables in two different clusters. WARNING: It" +
      " doesn't work for incrementColumnValues'd cells since the" +
      " timestamp is changed after being appended to the log.");
  ProgramDriver.class.getMethod("driver", new Class [] {String[].class}).
    invoke(pgd, new Object[]{args});
}
项目:hbase    文件:Driver.java   
/**
 * @param args
 * @throws Throwable
 */
public static void main(String[] args) throws Throwable {
  ProgramDriver pgd = new ProgramDriver();

  pgd.addClass(RowCounter.NAME, RowCounter.class,
    "Count rows in HBase table.");
  pgd.addClass(CellCounter.NAME, CellCounter.class,
    "Count cells in HBase table.");
  pgd.addClass(Export.NAME, Export.class, "Write table data to HDFS.");
  pgd.addClass(Import.NAME, Import.class, "Import data written by Export.");
  pgd.addClass(ImportTsv.NAME, ImportTsv.class, "Import data in TSV format.");
  pgd.addClass(LoadIncrementalHFiles.NAME, LoadIncrementalHFiles.class,
               "Complete a bulk data load.");
  pgd.addClass(CopyTable.NAME, CopyTable.class,
      "Export a table from local cluster to peer cluster.");
  pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, "Compare" +
      " data from tables in two different clusters. It" +
      " doesn't work for incrementColumnValues'd cells since" +
      " timestamp is changed after appending to WAL.");
  pgd.addClass(WALPlayer.NAME, WALPlayer.class, "Replay WAL files.");
  pgd.addClass(ExportSnapshot.NAME, ExportSnapshot.class, "Export" +
      " the specific snapshot to a given FileSystem.");

  ProgramDriver.class.getMethod("driver", new Class [] {String[].class}).
    invoke(pgd, new Object[]{args});
}
项目:RStore    文件:Driver.java   
/**
 * @param args
 * @throws Throwable
 */
public static void main(String[] args) throws Throwable {
  ProgramDriver pgd = new ProgramDriver();
  pgd.addClass(RowCounter.NAME, RowCounter.class,
    "Count rows in HBase table");
  pgd.addClass(CellCounter.NAME, CellCounter.class,
    "Count cells in HBase table");
  pgd.addClass(Export.NAME, Export.class, "Write table data to HDFS.");
  pgd.addClass(Import.NAME, Import.class, "Import data written by Export.");
  pgd.addClass(ImportTsv.NAME, ImportTsv.class, "Import data in TSV format.");
  pgd.addClass(LoadIncrementalHFiles.NAME, LoadIncrementalHFiles.class,
               "Complete a bulk data load.");
  pgd.addClass(CopyTable.NAME, CopyTable.class,
      "Export a table from local cluster to peer cluster");
  pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, "Compare" +
      " the data from tables in two different clusters. WARNING: It" +
      " doesn't work for incrementColumnValues'd cells since the" +
      " timestamp is changed after being appended to the log.");
  ProgramDriver.class.getMethod("driver", new Class [] {String[].class}).
    invoke(pgd, new Object[]{args});
}
项目:cloud-bigtable-examples    文件:WordCountDriver.java   
public static void main(String[] args) {
  ProgramDriver programDriver = new ProgramDriver();
  int exitCode = -1;
  try {
    programDriver.addClass("wordcount-hbase", WordCountHBase.class,
        "A map/reduce program that counts the words in the input files.");
    programDriver.addClass("export-table", Export.class,
        "A map/reduce program that exports a table to a file.");
    //programDriver.addClass("cellcounter", CellCounter.class, "Count them cells!");
    programDriver.driver(args);
    exitCode = programDriver.run(args);
  } catch (Throwable e) {
    e.printStackTrace();
  }
  System.exit(exitCode);
}
项目:PyroDB    文件:Driver.java   
/**
 * @param args
 * @throws Throwable
 */
public static void main(String[] args) throws Throwable {
  ProgramDriver pgd = new ProgramDriver();
  pgd.addClass(RowCounter.NAME, RowCounter.class,
    "Count rows in HBase table");
  pgd.addClass(CellCounter.NAME, CellCounter.class,
    "Count cells in HBase table");
  pgd.addClass(Export.NAME, Export.class, "Write table data to HDFS.");
  pgd.addClass(Import.NAME, Import.class, "Import data written by Export.");
  pgd.addClass(ImportTsv.NAME, ImportTsv.class, "Import data in TSV format.");
  pgd.addClass(LoadIncrementalHFiles.NAME, LoadIncrementalHFiles.class,
               "Complete a bulk data load.");
  pgd.addClass(CopyTable.NAME, CopyTable.class,
      "Export a table from local cluster to peer cluster");
  pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, "Compare" +
      " the data from tables in two different clusters. WARNING: It" +
      " doesn't work for incrementColumnValues'd cells since the" +
      " timestamp is changed after being appended to the log.");
  ProgramDriver.class.getMethod("driver", new Class [] {String[].class}).
    invoke(pgd, new Object[]{args});
}
项目:c5    文件:Driver.java   
/**
 * @param args
 * @throws Throwable
 */
public static void main(String[] args) throws Throwable {
  ProgramDriver pgd = new ProgramDriver();
  pgd.addClass(RowCounter.NAME, RowCounter.class,
    "Count rows in HBase table");
  pgd.addClass(CellCounter.NAME, CellCounter.class,
    "Count cells in HBase table");
  pgd.addClass(Export.NAME, Export.class, "Write table data to HDFS.");
  pgd.addClass(Import.NAME, Import.class, "Import data written by Export.");
  pgd.addClass(ImportTsv.NAME, ImportTsv.class, "Import data in TSV format.");
  pgd.addClass(LoadIncrementalHFiles.NAME, LoadIncrementalHFiles.class,
               "Complete a bulk data load.");
  pgd.addClass(CopyTable.NAME, CopyTable.class,
      "Export a table from local cluster to peer cluster");
  pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, "Compare" +
      " the data from tables in two different clusters. WARNING: It" +
      " doesn't work for incrementColumnValues'd cells since the" +
      " timestamp is changed after being appended to the log.");
  ProgramDriver.class.getMethod("driver", new Class [] {String[].class}).
    invoke(pgd, new Object[]{args});
}
项目:varcrunch    文件:VarCrunchDriver.java   
public static void main(String[] args) throws Exception {

        ProgramDriver programDriver = new ProgramDriver();
        int exitCode = -1;
        try {
            programDriver.addClass("readdepth-intervals", ComputeReadDepthInInterval.class, "Computes read depth over a given size interval");
            programDriver.addClass("readdepth-hist", DepthHistogram.class, "Computes distribution of read depths");
            programDriver.addClass("germline", GermlinePipeline.class, "Standard germline variant caller");
            programDriver.addClass("somatic", SomaticPipeline.class, "Standard somatic variant caller, takes tumor/normal input");

            programDriver.driver(args);

            exitCode = 0;
        } catch (Throwable e) {
            e.printStackTrace();
        }

        System.exit(exitCode);
    }
项目:RDFS    文件:AllTestDriver.java   
/**
 * A description of the test program for running all the tests using jar file
 */
public static void main(String argv[]){
  ProgramDriver pgd = new ProgramDriver();
  try {
    pgd.addClass("gentest", DFSGeneralTest.class, "A map/reduce benchmark that supports running multi-thread operations in multiple machines");
    pgd.addClass("locktest", DFSLockTest.class, "A benchmark that spawns many threads and each thread run many configurable read/write FileSystem operations to test FSNamesystem lock's concurrency.");
    pgd.addClass("dirtest", DFSDirTest.class, "A map/reduce benchmark that creates many jobs and each job spawns many threads and each thread create/delete many dirs.");
    pgd.addClass("dfstest", DFSIOTest.class, "A map/reduce benchmark that creates many jobs and each jobs can create many files to test i/o rate per task of hadoop cluster.");
    pgd.addClass("structure-gen", StructureGenerator.class, "Create a structure of files and directories as an input for data-gen");
    pgd.addClass("data-gen", DataGenerator.class, "Create files and directories on cluster as inputs for load-gen");
    pgd.addClass("load-gen", LoadGenerator.class, "A tool to test the behavior of NameNode with different client loads.");
    pgd.addClass("testnn", TestNNThroughputBenchmark.class, "Test the behavior of the namenode on localhost." +
        " Here namenode is real and others are simulated");
    pgd.driver(argv);
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
项目:HBase-Research    文件:Driver.java   
/**
 * @param args
 * @throws Throwable
 */
public static void main(String[] args) throws Throwable {
  ProgramDriver pgd = new ProgramDriver();
  pgd.addClass(RowCounter.NAME, RowCounter.class,
    "Count rows in HBase table");
  pgd.addClass(CellCounter.NAME, CellCounter.class,
    "Count cells in HBase table");
  pgd.addClass(Export.NAME, Export.class, "Write table data to HDFS.");
  pgd.addClass(Import.NAME, Import.class, "Import data written by Export.");
  pgd.addClass(ImportTsv.NAME, ImportTsv.class, "Import data in TSV format.");
  pgd.addClass(LoadIncrementalHFiles.NAME, LoadIncrementalHFiles.class,
               "Complete a bulk data load.");
  pgd.addClass(CopyTable.NAME, CopyTable.class,
      "Export a table from local cluster to peer cluster");
  pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, "Compare" +
      " the data from tables in two different clusters. WARNING: It" +
      " doesn't work for incrementColumnValues'd cells since the" +
      " timestamp is changed after being appended to the log.");
  ProgramDriver.class.getMethod("driver", new Class [] {String[].class}).
    invoke(pgd, new Object[]{args});
}
项目:hbase-0.94.8-qod    文件:Driver.java   
/**
 * @param args
 * @throws Throwable
 */
public static void main(String[] args) throws Throwable {
  ProgramDriver pgd = new ProgramDriver();
  pgd.addClass(RowCounter.NAME, RowCounter.class,
    "Count rows in HBase table");
  pgd.addClass(CellCounter.NAME, CellCounter.class,
    "Count cells in HBase table");
  pgd.addClass(Export.NAME, Export.class, "Write table data to HDFS.");
  pgd.addClass(Import.NAME, Import.class, "Import data written by Export.");
  pgd.addClass(ImportTsv.NAME, ImportTsv.class, "Import data in TSV format.");
  pgd.addClass(LoadIncrementalHFiles.NAME, LoadIncrementalHFiles.class,
               "Complete a bulk data load.");
  pgd.addClass(CopyTable.NAME, CopyTable.class,
      "Export a table from local cluster to peer cluster");
  pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, "Compare" +
      " the data from tables in two different clusters. WARNING: It" +
      " doesn't work for incrementColumnValues'd cells since the" +
      " timestamp is changed after being appended to the log.");
  ProgramDriver.class.getMethod("driver", new Class [] {String[].class}).
    invoke(pgd, new Object[]{args});
}
项目:hbase-0.94.8-qod    文件:Driver.java   
/**
 * @param args
 * @throws Throwable
 */
public static void main(String[] args) throws Throwable {
  ProgramDriver pgd = new ProgramDriver();
  pgd.addClass(RowCounter.NAME, RowCounter.class,
    "Count rows in HBase table");
  pgd.addClass(CellCounter.NAME, CellCounter.class,
    "Count cells in HBase table");
  pgd.addClass(Export.NAME, Export.class, "Write table data to HDFS.");
  pgd.addClass(Import.NAME, Import.class, "Import data written by Export.");
  pgd.addClass(ImportTsv.NAME, ImportTsv.class, "Import data in TSV format.");
  pgd.addClass(LoadIncrementalHFiles.NAME, LoadIncrementalHFiles.class,
               "Complete a bulk data load.");
  pgd.addClass(CopyTable.NAME, CopyTable.class,
      "Export a table from local cluster to peer cluster");
  pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, "Compare" +
      " the data from tables in two different clusters. WARNING: It" +
      " doesn't work for incrementColumnValues'd cells since the" +
      " timestamp is changed after being appended to the log.");
  ProgramDriver.class.getMethod("driver", new Class [] {String[].class}).
    invoke(pgd, new Object[]{args});
}
项目:DominoHBase    文件:Driver.java   
/**
 * @param args
 * @throws Throwable
 */
public static void main(String[] args) throws Throwable {
  ProgramDriver pgd = new ProgramDriver();
  pgd.addClass(RowCounter.NAME, RowCounter.class,
    "Count rows in HBase table");
  pgd.addClass(CellCounter.NAME, CellCounter.class,
    "Count cells in HBase table");
  pgd.addClass(Export.NAME, Export.class, "Write table data to HDFS.");
  pgd.addClass(Import.NAME, Import.class, "Import data written by Export.");
  pgd.addClass(ImportTsv.NAME, ImportTsv.class, "Import data in TSV format.");
  pgd.addClass(LoadIncrementalHFiles.NAME, LoadIncrementalHFiles.class,
               "Complete a bulk data load.");
  pgd.addClass(CopyTable.NAME, CopyTable.class,
      "Export a table from local cluster to peer cluster");
  pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, "Compare" +
      " the data from tables in two different clusters. WARNING: It" +
      " doesn't work for incrementColumnValues'd cells since the" +
      " timestamp is changed after being appended to the log.");
  ProgramDriver.class.getMethod("driver", new Class [] {String[].class}).
    invoke(pgd, new Object[]{args});
}
项目:hindex    文件:Driver.java   
/**
 * @param args
 * @throws Throwable
 */
public static void main(String[] args) throws Throwable {
  ProgramDriver pgd = new ProgramDriver();
  pgd.addClass(RowCounter.NAME, RowCounter.class,
    "Count rows in HBase table");
  pgd.addClass(CellCounter.NAME, CellCounter.class,
    "Count cells in HBase table");
  pgd.addClass(Export.NAME, Export.class, "Write table data to HDFS.");
  pgd.addClass(Import.NAME, Import.class, "Import data written by Export.");
  pgd.addClass(ImportTsv.NAME, ImportTsv.class, "Import data in TSV format.");
  pgd.addClass(LoadIncrementalHFiles.NAME, LoadIncrementalHFiles.class,
               "Complete a bulk data load.");
  pgd.addClass(CopyTable.NAME, CopyTable.class,
      "Export a table from local cluster to peer cluster");
  pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, "Compare" +
      " the data from tables in two different clusters. WARNING: It" +
      " doesn't work for incrementColumnValues'd cells since the" +
      " timestamp is changed after being appended to the log.");
  ProgramDriver.class.getMethod("driver", new Class [] {String[].class}).
    invoke(pgd, new Object[]{args});
}
项目:hadoop-oss    文件:CoreTestDriver.java   
public CoreTestDriver(ProgramDriver pgd) {
  this.pgd = pgd;
  try {
    pgd.addClass("testsetfile", TestSetFile.class, 
        "A test for flat files of binary key/value pairs.");
    pgd.addClass("testarrayfile", TestArrayFile.class, 
        "A test for flat files of binary key/value pairs.");
    pgd.addClass("testrpc", TestRPC.class, "A test for rpc.");
    pgd.addClass("testipc", TestIPC.class, "A test for ipc.");
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
项目:hadoop    文件:YarnTestDriver.java   
public YarnTestDriver(ProgramDriver pgd) {
  this.pgd = pgd;
  try {
    pgd.addClass(TestZKRMStateStorePerf.class.getSimpleName(),
        TestZKRMStateStorePerf.class,
        "ZKRMStateStore i/o benchmark.");
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
项目:hadoop    文件:HdfsTestDriver.java   
public HdfsTestDriver(ProgramDriver pgd) {
  this.pgd = pgd;
  try {
    pgd.addClass("dfsthroughput", BenchmarkThroughput.class, 
        "measure hdfs throughput");
    pgd.addClass("minidfscluster", MiniDFSClusterManager.class, 
        "Run a single-process mini DFS cluster");
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
项目:hadoop    文件:CoreTestDriver.java   
public CoreTestDriver(ProgramDriver pgd) {
  this.pgd = pgd;
  try {
    pgd.addClass("testsetfile", TestSetFile.class, 
        "A test for flat files of binary key/value pairs.");
    pgd.addClass("testarrayfile", TestArrayFile.class, 
        "A test for flat files of binary key/value pairs.");
    pgd.addClass("testrpc", TestRPC.class, "A test for rpc.");
    pgd.addClass("testipc", TestIPC.class, "A test for ipc.");
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
项目:ditb    文件:TestDriver.java   
@Test
public void testDriverMainMethod() throws Throwable {
  ProgramDriver programDriverMock = mock(ProgramDriver.class);
  Driver.setProgramDriver(programDriverMock);
  Driver.main(new String[]{});
  verify(programDriverMock).driver(Mockito.any(String[].class));    
}
项目:aliyun-oss-hadoop-fs    文件:YarnTestDriver.java   
public YarnTestDriver(ProgramDriver pgd) {
  this.pgd = pgd;
  try {
    pgd.addClass(TestZKRMStateStorePerf.class.getSimpleName(),
        TestZKRMStateStorePerf.class,
        "ZKRMStateStore i/o benchmark.");
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
项目:aliyun-oss-hadoop-fs    文件:HdfsTestDriver.java   
public HdfsTestDriver(ProgramDriver pgd) {
  this.pgd = pgd;
  try {
    pgd.addClass("dfsthroughput", BenchmarkThroughput.class, 
        "measure hdfs throughput");
    pgd.addClass("minidfscluster", MiniDFSClusterManager.class, 
        "Run a single-process mini DFS cluster");
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
项目:aliyun-oss-hadoop-fs    文件:CoreTestDriver.java   
public CoreTestDriver(ProgramDriver pgd) {
  this.pgd = pgd;
  try {
    pgd.addClass("testsetfile", TestSetFile.class, 
        "A test for flat files of binary key/value pairs.");
    pgd.addClass("testarrayfile", TestArrayFile.class, 
        "A test for flat files of binary key/value pairs.");
    pgd.addClass("testrpc", TestRPC.class, "A test for rpc.");
    pgd.addClass("testipc", TestIPC.class, "A test for ipc.");
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
项目:big-c    文件:YarnTestDriver.java   
public YarnTestDriver(ProgramDriver pgd) {
  this.pgd = pgd;
  try {
    pgd.addClass(TestZKRMStateStorePerf.class.getSimpleName(),
        TestZKRMStateStorePerf.class,
        "ZKRMStateStore i/o benchmark.");
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
项目:big-c    文件:HdfsTestDriver.java   
public HdfsTestDriver(ProgramDriver pgd) {
  this.pgd = pgd;
  try {
    pgd.addClass("dfsthroughput", BenchmarkThroughput.class, 
        "measure hdfs throughput");
    pgd.addClass("minidfscluster", MiniDFSClusterManager.class, 
        "Run a single-process mini DFS cluster");
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
项目:big-c    文件:CoreTestDriver.java   
public CoreTestDriver(ProgramDriver pgd) {
  this.pgd = pgd;
  try {
    pgd.addClass("testsetfile", TestSetFile.class, 
        "A test for flat files of binary key/value pairs.");
    pgd.addClass("testarrayfile", TestArrayFile.class, 
        "A test for flat files of binary key/value pairs.");
    pgd.addClass("testrpc", TestRPC.class, "A test for rpc.");
    pgd.addClass("testipc", TestIPC.class, "A test for ipc.");
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
项目:hadoop-wiki-index    文件:ExampleDriver.java   
public static void main(String argv[]){
  int exitCode = -1;
  ProgramDriver pgd = new ProgramDriver();
  try {
    pgd.addClass("WikiTitleExtract", WikiTitleExtract.class,
            "A map/reduce program that extract the contents between <title></title> in xml files");

    pgd.addClass("WikiWordCount", WikiWordCount.class,
                 "A map/reduce program that count the word frequency in the files");

    pgd.addClass("BadDocCount", BadDocCount.class,
              "A map/reduce program that count the document which do not contains <title></title>");

    pgd.addClass("InvertedIndex", InvertedIndex.class,
            "A map/reduce program that calculate the inverted index");

    pgd.addClass("IDOffsetTitle", IDOffsetTitle.class,
            "A map/reduce program that extract the <ID> <Offset> <title> tripe");

    pgd.addClass("RedirectCount", RedirectCount.class,
            "A map/reduce program that count the number of redirect page");

    pgd.addClass("PageWordCount", PageWordCount.class,
            "A map/reduce program that count the wordcount of each page");

    pgd.addClass("PageMaxWordCount", PageMaxWordCount.class,
            "A map/reduce program that count the most frequent occurence of a word of each page");

    exitCode = pgd.run(argv);
  }
  catch(Throwable e){
    e.printStackTrace();
  }

  System.exit(exitCode);
}
项目:LCIndex-HBase-0.94.16    文件:Driver.java   
/**
 * @param args
 * @throws Throwable
 */
public static void main(String[] args) throws Throwable {
  ProgramDriver pgd = new ProgramDriver();
  pgd.addClass(RowCounter.NAME, RowCounter.class,
    "Count rows in HBase table");
  ProgramDriver.class.getMethod("driver", new Class [] {String[].class}).
    invoke(pgd, new Object[]{args});
}
项目:hadoop-2.6.0-cdh5.4.3    文件:HdfsTestDriver.java   
public HdfsTestDriver(ProgramDriver pgd) {
  this.pgd = pgd;
  try {
    pgd.addClass("dfsthroughput", BenchmarkThroughput.class, 
        "measure hdfs throughput");
    pgd.addClass("minidfscluster", MiniDFSClusterManager.class, 
        "Run a single-process mini DFS cluster");
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:CoreTestDriver.java   
public CoreTestDriver(ProgramDriver pgd) {
  this.pgd = pgd;
  try {
    pgd.addClass("testsetfile", TestSetFile.class, 
        "A test for flat files of binary key/value pairs.");
    pgd.addClass("testarrayfile", TestArrayFile.class, 
        "A test for flat files of binary key/value pairs.");
    pgd.addClass("testrpc", TestRPC.class, "A test for rpc.");
    pgd.addClass("testipc", TestIPC.class, "A test for ipc.");
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:AllTestDriver.java   
/**
 * A description of the test program for running all the tests using jar file
 */
public static void main(String argv[]){
  ProgramDriver pgd = new ProgramDriver();
  try {
    pgd.addClass("threadedmapbench", ThreadedMapBenchmark.class, 
                 "A map/reduce benchmark that compares the performance " + 
                 "of maps with multiple spills over maps with 1 spill");
    pgd.addClass("mrbench", MRBench.class, "A map/reduce benchmark that can create many small jobs");
    pgd.addClass("nnbench", NNBench.class, "A benchmark that stresses the namenode.");
    pgd.addClass("mapredtest", TestMapRed.class, "A map/reduce test check.");
    pgd.addClass("testfilesystem", TestFileSystem.class, "A test for FileSystem read/write.");
    pgd.addClass("testsequencefile", TestSequenceFile.class, "A test for flat files of binary key value pairs.");
    pgd.addClass("testsetfile", TestSetFile.class, "A test for flat files of binary key/value pairs.");
    pgd.addClass("testarrayfile", TestArrayFile.class, "A test for flat files of binary key/value pairs.");
    pgd.addClass("testrpc", TestRPC.class, "A test for rpc.");
    pgd.addClass("testsequencefileinputformat", TestSequenceFileInputFormat.class, "A test for sequence file input format.");
    pgd.addClass("testtextinputformat", TestTextInputFormat.class, "A test for text input format.");
    pgd.addClass("TestDFSIO", TestDFSIO.class, "Distributed i/o benchmark.");
    pgd.addClass("DFSCIOTest", DFSCIOTest.class, "Distributed i/o benchmark of libhdfs.");
    pgd.addClass("DistributedFSCheck", DistributedFSCheck.class, "Distributed checkup of the file system consistency.");
    pgd.addClass("testmapredsort", SortValidator.class, 
                 "A map/reduce program that validates the map-reduce framework's sort.");
    pgd.addClass("testbigmapoutput", BigMapOutput.class, 
                 "A map/reduce program that works on a very big " + 
                 "non-splittable file and does identity map/reduce");
    pgd.addClass("loadgen", GenericMRLoadGenerator.class, "Generic map/reduce load generator");
    pgd.addClass("filebench", FileBench.class, "Benchmark SequenceFile(Input|Output)Format (block,record compressed and uncompressed), Text(Input|Output)Format (compressed and uncompressed)");
    pgd.addClass("dfsthroughput", BenchmarkThroughput.class, 
                 "measure hdfs throughput");
    pgd.addClass("MRReliabilityTest", ReliabilityTest.class,
        "A program that tests the reliability of the MR framework by " +
        "injecting faults/failures");
    pgd.addClass("minicluster", MiniHadoopClusterManager.class,
        "Single process HDFS and MR cluster.");
    pgd.driver(argv);
  } catch(Throwable e) {
    e.printStackTrace();
  }
}