Java 类org.apache.hadoop.mapred.GenericMRLoadGenerator 实例源码

项目:hadoop-2.6.0-cdh5.4.3    文件:AllTestDriver.java   
/**
 * A description of the test program for running all the tests using jar file
 */
public static void main(String argv[]){
  ProgramDriver pgd = new ProgramDriver();
  try {
    pgd.addClass("threadedmapbench", ThreadedMapBenchmark.class, 
                 "A map/reduce benchmark that compares the performance " + 
                 "of maps with multiple spills over maps with 1 spill");
    pgd.addClass("mrbench", MRBench.class, "A map/reduce benchmark that can create many small jobs");
    pgd.addClass("nnbench", NNBench.class, "A benchmark that stresses the namenode.");
    pgd.addClass("mapredtest", TestMapRed.class, "A map/reduce test check.");
    pgd.addClass("testfilesystem", TestFileSystem.class, "A test for FileSystem read/write.");
    pgd.addClass("testsequencefile", TestSequenceFile.class, "A test for flat files of binary key value pairs.");
    pgd.addClass("testsetfile", TestSetFile.class, "A test for flat files of binary key/value pairs.");
    pgd.addClass("testarrayfile", TestArrayFile.class, "A test for flat files of binary key/value pairs.");
    pgd.addClass("testrpc", TestRPC.class, "A test for rpc.");
    pgd.addClass("testsequencefileinputformat", TestSequenceFileInputFormat.class, "A test for sequence file input format.");
    pgd.addClass("testtextinputformat", TestTextInputFormat.class, "A test for text input format.");
    pgd.addClass("TestDFSIO", TestDFSIO.class, "Distributed i/o benchmark.");
    pgd.addClass("DFSCIOTest", DFSCIOTest.class, "Distributed i/o benchmark of libhdfs.");
    pgd.addClass("DistributedFSCheck", DistributedFSCheck.class, "Distributed checkup of the file system consistency.");
    pgd.addClass("testmapredsort", SortValidator.class, 
                 "A map/reduce program that validates the map-reduce framework's sort.");
    pgd.addClass("testbigmapoutput", BigMapOutput.class, 
                 "A map/reduce program that works on a very big " + 
                 "non-splittable file and does identity map/reduce");
    pgd.addClass("loadgen", GenericMRLoadGenerator.class, "Generic map/reduce load generator");
    pgd.addClass("filebench", FileBench.class, "Benchmark SequenceFile(Input|Output)Format (block,record compressed and uncompressed), Text(Input|Output)Format (compressed and uncompressed)");
    pgd.addClass("dfsthroughput", BenchmarkThroughput.class, 
                 "measure hdfs throughput");
    pgd.addClass("MRReliabilityTest", ReliabilityTest.class,
        "A program that tests the reliability of the MR framework by " +
        "injecting faults/failures");
    pgd.addClass("minicluster", MiniHadoopClusterManager.class,
        "Single process HDFS and MR cluster.");
    pgd.driver(argv);
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
项目:hadoop-on-lustre    文件:AllTestDriver.java   
/**
 * A description of the test program for running all the tests using jar file
 */
public static void main(String argv[]){
  ProgramDriver pgd = new ProgramDriver();
  try {
    pgd.addClass("threadedmapbench", ThreadedMapBenchmark.class, 
                 "A map/reduce benchmark that compares the performance " + 
                 "of maps with multiple spills over maps with 1 spill");
    pgd.addClass("mrbench", MRBench.class, "A map/reduce benchmark that can create many small jobs");
    pgd.addClass("nnbench", NNBench.class, "A benchmark that stresses the namenode.");
    pgd.addClass("mapredtest", TestMapRed.class, "A map/reduce test check.");
    pgd.addClass("testfilesystem", TestFileSystem.class, "A test for FileSystem read/write.");
    pgd.addClass("testsequencefile", TestSequenceFile.class, "A test for flat files of binary key value pairs.");
    pgd.addClass("testsetfile", TestSetFile.class, "A test for flat files of binary key/value pairs.");
    pgd.addClass("testarrayfile", TestArrayFile.class, "A test for flat files of binary key/value pairs.");
    pgd.addClass("testrpc", TestRPC.class, "A test for rpc.");
    pgd.addClass("testipc", TestIPC.class, "A test for ipc.");
    pgd.addClass("testsequencefileinputformat", TestSequenceFileInputFormat.class, "A test for sequence file input format.");
    pgd.addClass("testtextinputformat", TestTextInputFormat.class, "A test for text input format.");
    pgd.addClass("TestDFSIO", TestDFSIO.class, "Distributed i/o benchmark.");
    pgd.addClass("DFSCIOTest", DFSCIOTest.class, "Distributed i/o benchmark of libhdfs.");
    pgd.addClass("DistributedFSCheck", DistributedFSCheck.class, "Distributed checkup of the file system consistency.");
    pgd.addClass("testmapredsort", SortValidator.class, 
                 "A map/reduce program that validates the map-reduce framework's sort.");
    pgd.addClass("testbigmapoutput", BigMapOutput.class, 
                 "A map/reduce program that works on a very big " + 
                 "non-splittable file and does identity map/reduce");
    pgd.addClass("loadgen", GenericMRLoadGenerator.class, "Generic map/reduce load generator");
    pgd.addClass("filebench", FileBench.class, "Benchmark SequenceFile(Input|Output)Format (block,record compressed and uncompressed), Text(Input|Output)Format (compressed and uncompressed)");
    pgd.addClass("dfsthroughput", BenchmarkThroughput.class, 
                 "measure hdfs throughput");
    pgd.addClass("MRReliabilityTest", ReliabilityTest.class,
        "A program that tests the reliability of the MR framework by " +
        "injecting faults/failures");
    pgd.driver(argv);
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
项目:RDFS    文件:AllTestDriver.java   
/**
 * A description of the test program for running all the tests using jar file
 */
public static void main(String argv[]){
  ProgramDriver pgd = new ProgramDriver();
  try {
    pgd.addClass("threadedmapbench", ThreadedMapBenchmark.class, 
                 "A map/reduce benchmark that compares the performance " + 
                 "of maps with multiple spills over maps with 1 spill");
    pgd.addClass("mrbench", MRBench.class, "A map/reduce benchmark that can create many small jobs");
    pgd.addClass("nnbench", NNBench.class, "A benchmark that stresses the namenode.");
    pgd.addClass("mapredtest", TestMapRed.class, "A map/reduce test check.");
    pgd.addClass("testfilesystem", TestFileSystem.class, "A test for FileSystem read/write.");
    pgd.addClass("testsequencefile", TestSequenceFile.class, "A test for flat files of binary key value pairs.");
    pgd.addClass("testsetfile", TestSetFile.class, "A test for flat files of binary key/value pairs.");
    pgd.addClass("testarrayfile", TestArrayFile.class, "A test for flat files of binary key/value pairs.");
    pgd.addClass("testrpc", TestRPC.class, "A test for rpc.");
    pgd.addClass("testipc", TestIPC.class, "A test for ipc.");
    pgd.addClass("testsequencefileinputformat", TestSequenceFileInputFormat.class, "A test for sequence file input format.");
    pgd.addClass("testtextinputformat", TestTextInputFormat.class, "A test for text input format.");
    pgd.addClass("TestDFSIO", TestDFSIO.class, "Distributed i/o benchmark.");
    pgd.addClass("DFSCIOTest", DFSCIOTest.class, "Distributed i/o benchmark of libhdfs.");
    pgd.addClass("DistributedFSCheck", DistributedFSCheck.class, "Distributed checkup of the file system consistency.");
    pgd.addClass("testmapredsort", SortValidator.class, 
                 "A map/reduce program that validates the map-reduce framework's sort.");
    pgd.addClass("testbigmapoutput", BigMapOutput.class, 
                 "A map/reduce program that works on a very big " + 
                 "non-splittable file and does identity map/reduce");
    pgd.addClass("loadgen", GenericMRLoadGenerator.class, "Generic map/reduce load generator");
    pgd.addClass("filebench", FileBench.class, "Benchmark SequenceFile(Input|Output)Format (block,record compressed and uncompressed), Text(Input|Output)Format (compressed and uncompressed)");
    pgd.addClass("dfsthroughput", BenchmarkThroughput.class, 
                 "measure hdfs throughput");
    pgd.addClass("MRReliabilityTest", ReliabilityTest.class,
        "A program that tests the reliability of the MR framework by " +
        "injecting faults/failures");
    pgd.driver(argv);
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
项目:hadoop-0.20    文件:AllTestDriver.java   
/**
 * A description of the test program for running all the tests using jar file
 */
public static void main(String argv[]){
  ProgramDriver pgd = new ProgramDriver();
  try {
    pgd.addClass("threadedmapbench", ThreadedMapBenchmark.class, 
                 "A map/reduce benchmark that compares the performance " + 
                 "of maps with multiple spills over maps with 1 spill");
    pgd.addClass("mrbench", MRBench.class, "A map/reduce benchmark that can create many small jobs");
    pgd.addClass("nnbench", NNBench.class, "A benchmark that stresses the namenode.");
    pgd.addClass("mapredtest", TestMapRed.class, "A map/reduce test check.");
    pgd.addClass("testfilesystem", TestFileSystem.class, "A test for FileSystem read/write.");
    pgd.addClass("testsequencefile", TestSequenceFile.class, "A test for flat files of binary key value pairs.");
    pgd.addClass("testsetfile", TestSetFile.class, "A test for flat files of binary key/value pairs.");
    pgd.addClass("testarrayfile", TestArrayFile.class, "A test for flat files of binary key/value pairs.");
    pgd.addClass("testrpc", TestRPC.class, "A test for rpc.");
    pgd.addClass("testipc", TestIPC.class, "A test for ipc.");
    pgd.addClass("testsequencefileinputformat", TestSequenceFileInputFormat.class, "A test for sequence file input format.");
    pgd.addClass("testtextinputformat", TestTextInputFormat.class, "A test for text input format.");
    pgd.addClass("TestDFSIO", TestDFSIO.class, "Distributed i/o benchmark.");
    pgd.addClass("DFSCIOTest", DFSCIOTest.class, "Distributed i/o benchmark of libhdfs.");
    pgd.addClass("DistributedFSCheck", DistributedFSCheck.class, "Distributed checkup of the file system consistency.");
    pgd.addClass("testmapredsort", SortValidator.class, 
                 "A map/reduce program that validates the map-reduce framework's sort.");
    pgd.addClass("testbigmapoutput", BigMapOutput.class, 
                 "A map/reduce program that works on a very big " + 
                 "non-splittable file and does identity map/reduce");
    pgd.addClass("loadgen", GenericMRLoadGenerator.class, "Generic map/reduce load generator");
    pgd.addClass("filebench", FileBench.class, "Benchmark SequenceFile(Input|Output)Format (block,record compressed and uncompressed), Text(Input|Output)Format (compressed and uncompressed)");
    pgd.addClass("dfsthroughput", BenchmarkThroughput.class, 
                 "measure hdfs throughput");
    pgd.addClass("MRReliabilityTest", ReliabilityTest.class,
        "A program that tests the reliability of the MR framework by " +
        "injecting faults/failures");
    pgd.driver(argv);
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
项目:hanoi-hadoop-2.0.0-cdh    文件:AllTestDriver.java   
/**
 * A description of the test program for running all the tests using jar file
 */
public static void main(String argv[]){
  ProgramDriver pgd = new ProgramDriver();
  try {
    pgd.addClass("threadedmapbench", ThreadedMapBenchmark.class, 
                 "A map/reduce benchmark that compares the performance " + 
                 "of maps with multiple spills over maps with 1 spill");
    pgd.addClass("mrbench", MRBench.class, "A map/reduce benchmark that can create many small jobs");
    pgd.addClass("nnbench", NNBench.class, "A benchmark that stresses the namenode.");
    pgd.addClass("mapredtest", TestMapRed.class, "A map/reduce test check.");
    pgd.addClass("testfilesystem", TestFileSystem.class, "A test for FileSystem read/write.");
    pgd.addClass("testsequencefile", TestSequenceFile.class, "A test for flat files of binary key value pairs.");
    pgd.addClass("testsetfile", TestSetFile.class, "A test for flat files of binary key/value pairs.");
    pgd.addClass("testarrayfile", TestArrayFile.class, "A test for flat files of binary key/value pairs.");
    pgd.addClass("testrpc", TestRPC.class, "A test for rpc.");
    pgd.addClass("testsequencefileinputformat", TestSequenceFileInputFormat.class, "A test for sequence file input format.");
    pgd.addClass("testtextinputformat", TestTextInputFormat.class, "A test for text input format.");
    pgd.addClass("TestDFSIO", TestDFSIO.class, "Distributed i/o benchmark.");
    pgd.addClass("DFSCIOTest", DFSCIOTest.class, "Distributed i/o benchmark of libhdfs.");
    pgd.addClass("DistributedFSCheck", DistributedFSCheck.class, "Distributed checkup of the file system consistency.");
    pgd.addClass("testmapredsort", SortValidator.class, 
                 "A map/reduce program that validates the map-reduce framework's sort.");
    pgd.addClass("testbigmapoutput", BigMapOutput.class, 
                 "A map/reduce program that works on a very big " + 
                 "non-splittable file and does identity map/reduce");
    pgd.addClass("loadgen", GenericMRLoadGenerator.class, "Generic map/reduce load generator");
    pgd.addClass("filebench", FileBench.class, "Benchmark SequenceFile(Input|Output)Format (block,record compressed and uncompressed), Text(Input|Output)Format (compressed and uncompressed)");
    pgd.addClass("dfsthroughput", BenchmarkThroughput.class, 
                 "measure hdfs throughput");
    pgd.addClass("MRReliabilityTest", ReliabilityTest.class,
        "A program that tests the reliability of the MR framework by " +
        "injecting faults/failures");
    pgd.addClass("minicluster", MiniHadoopClusterManager.class,
        "Single process HDFS and MR cluster.");
    pgd.driver(argv);
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
项目:hortonworks-extension    文件:AllTestDriver.java   
/**
 * A description of the test program for running all the tests using jar file
 */
public static void main(String argv[]){
  ProgramDriver pgd = new ProgramDriver();
  try {
    pgd.addClass("threadedmapbench", ThreadedMapBenchmark.class, 
                 "A map/reduce benchmark that compares the performance " + 
                 "of maps with multiple spills over maps with 1 spill");
    pgd.addClass("mrbench", MRBench.class, "A map/reduce benchmark that can create many small jobs");
    pgd.addClass("nnbench", NNBench.class, "A benchmark that stresses the namenode.");
    pgd.addClass("mapredtest", TestMapRed.class, "A map/reduce test check.");
    pgd.addClass("testfilesystem", TestFileSystem.class, "A test for FileSystem read/write.");
    pgd.addClass("testsequencefile", TestSequenceFile.class, "A test for flat files of binary key value pairs.");
    pgd.addClass("testsetfile", TestSetFile.class, "A test for flat files of binary key/value pairs.");
    pgd.addClass("testarrayfile", TestArrayFile.class, "A test for flat files of binary key/value pairs.");
    pgd.addClass("testrpc", TestRPC.class, "A test for rpc.");
    pgd.addClass("testipc", TestIPC.class, "A test for ipc.");
    pgd.addClass("testsequencefileinputformat", TestSequenceFileInputFormat.class, "A test for sequence file input format.");
    pgd.addClass("testtextinputformat", TestTextInputFormat.class, "A test for text input format.");
    pgd.addClass("TestDFSIO", TestDFSIO.class, "Distributed i/o benchmark.");
    pgd.addClass("DFSCIOTest", DFSCIOTest.class, "Distributed i/o benchmark of libhdfs.");
    pgd.addClass("DistributedFSCheck", DistributedFSCheck.class, "Distributed checkup of the file system consistency.");
    pgd.addClass("testmapredsort", SortValidator.class, 
                 "A map/reduce program that validates the map-reduce framework's sort.");
    pgd.addClass("testbigmapoutput", BigMapOutput.class, 
                 "A map/reduce program that works on a very big " + 
                 "non-splittable file and does identity map/reduce");
    pgd.addClass("loadgen", GenericMRLoadGenerator.class, "Generic map/reduce load generator");
    pgd.addClass("filebench", FileBench.class, "Benchmark SequenceFile(Input|Output)Format (block,record compressed and uncompressed), Text(Input|Output)Format (compressed and uncompressed)");
    pgd.addClass("dfsthroughput", BenchmarkThroughput.class, 
                 "measure hdfs throughput");
    pgd.addClass("MRReliabilityTest", ReliabilityTest.class,
        "A program that tests the reliability of the MR framework by " +
        "injecting faults/failures");
    pgd.driver(argv);
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
项目:hortonworks-extension    文件:AllTestDriver.java   
/**
 * A description of the test program for running all the tests using jar file
 */
public static void main(String argv[]){
  ProgramDriver pgd = new ProgramDriver();
  try {
    pgd.addClass("threadedmapbench", ThreadedMapBenchmark.class, 
                 "A map/reduce benchmark that compares the performance " + 
                 "of maps with multiple spills over maps with 1 spill");
    pgd.addClass("mrbench", MRBench.class, "A map/reduce benchmark that can create many small jobs");
    pgd.addClass("nnbench", NNBench.class, "A benchmark that stresses the namenode.");
    pgd.addClass("mapredtest", TestMapRed.class, "A map/reduce test check.");
    pgd.addClass("testfilesystem", TestFileSystem.class, "A test for FileSystem read/write.");
    pgd.addClass("testsequencefile", TestSequenceFile.class, "A test for flat files of binary key value pairs.");
    pgd.addClass("testsetfile", TestSetFile.class, "A test for flat files of binary key/value pairs.");
    pgd.addClass("testarrayfile", TestArrayFile.class, "A test for flat files of binary key/value pairs.");
    pgd.addClass("testrpc", TestRPC.class, "A test for rpc.");
    pgd.addClass("testipc", TestIPC.class, "A test for ipc.");
    pgd.addClass("testsequencefileinputformat", TestSequenceFileInputFormat.class, "A test for sequence file input format.");
    pgd.addClass("testtextinputformat", TestTextInputFormat.class, "A test for text input format.");
    pgd.addClass("TestDFSIO", TestDFSIO.class, "Distributed i/o benchmark.");
    pgd.addClass("DFSCIOTest", DFSCIOTest.class, "Distributed i/o benchmark of libhdfs.");
    pgd.addClass("DistributedFSCheck", DistributedFSCheck.class, "Distributed checkup of the file system consistency.");
    pgd.addClass("testmapredsort", SortValidator.class, 
                 "A map/reduce program that validates the map-reduce framework's sort.");
    pgd.addClass("testbigmapoutput", BigMapOutput.class, 
                 "A map/reduce program that works on a very big " + 
                 "non-splittable file and does identity map/reduce");
    pgd.addClass("loadgen", GenericMRLoadGenerator.class, "Generic map/reduce load generator");
    pgd.addClass("filebench", FileBench.class, "Benchmark SequenceFile(Input|Output)Format (block,record compressed and uncompressed), Text(Input|Output)Format (compressed and uncompressed)");
    pgd.addClass("dfsthroughput", BenchmarkThroughput.class, 
                 "measure hdfs throughput");
    pgd.addClass("MRReliabilityTest", ReliabilityTest.class,
        "A program that tests the reliability of the MR framework by " +
        "injecting faults/failures");
    pgd.driver(argv);
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
项目:hadoop-gpu    文件:AllTestDriver.java   
/**
 * A description of the test program for running all the tests using jar file
 */
public static void main(String argv[]){
  ProgramDriver pgd = new ProgramDriver();
  try {
    pgd.addClass("threadedmapbench", ThreadedMapBenchmark.class, 
                 "A map/reduce benchmark that compares the performance " + 
                 "of maps with multiple spills over maps with 1 spill");
    pgd.addClass("mrbench", MRBench.class, "A map/reduce benchmark that can create many small jobs");
    pgd.addClass("nnbench", NNBench.class, "A benchmark that stresses the namenode.");
    pgd.addClass("mapredtest", TestMapRed.class, "A map/reduce test check.");
    pgd.addClass("testfilesystem", TestFileSystem.class, "A test for FileSystem read/write.");
    pgd.addClass("testsequencefile", TestSequenceFile.class, "A test for flat files of binary key value pairs.");
    pgd.addClass("testsetfile", TestSetFile.class, "A test for flat files of binary key/value pairs.");
    pgd.addClass("testarrayfile", TestArrayFile.class, "A test for flat files of binary key/value pairs.");
    pgd.addClass("testrpc", TestRPC.class, "A test for rpc.");
    pgd.addClass("testipc", TestIPC.class, "A test for ipc.");
    pgd.addClass("testsequencefileinputformat", TestSequenceFileInputFormat.class, "A test for sequence file input format.");
    pgd.addClass("testtextinputformat", TestTextInputFormat.class, "A test for text input format.");
    pgd.addClass("TestDFSIO", TestDFSIO.class, "Distributed i/o benchmark.");
    pgd.addClass("DFSCIOTest", DFSCIOTest.class, "Distributed i/o benchmark of libhdfs.");
    pgd.addClass("DistributedFSCheck", DistributedFSCheck.class, "Distributed checkup of the file system consistency.");
    pgd.addClass("testmapredsort", SortValidator.class, 
                 "A map/reduce program that validates the map-reduce framework's sort.");
    pgd.addClass("testbigmapoutput", BigMapOutput.class, 
                 "A map/reduce program that works on a very big " + 
                 "non-splittable file and does identity map/reduce");
    pgd.addClass("loadgen", GenericMRLoadGenerator.class, "Generic map/reduce load generator");
    pgd.addClass("filebench", FileBench.class, "Benchmark SequenceFile(Input|Output)Format (block,record compressed and uncompressed), Text(Input|Output)Format (compressed and uncompressed)");
    pgd.addClass("dfsthroughput", BenchmarkThroughput.class, 
                 "measure hdfs throughput");
    pgd.addClass("MRReliabilityTest", ReliabilityTest.class,
        "A program that tests the reliability of the MR framework by " +
        "injecting faults/failures");
    pgd.driver(argv);
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
项目:hadoop    文件:MapredTestDriver.java   
public MapredTestDriver(ProgramDriver pgd) {
  this.pgd = pgd;
  try {
    pgd.addClass("testsequencefile", TestSequenceFile.class, 
    "A test for flat files of binary key value pairs.");
    pgd.addClass("threadedmapbench", ThreadedMapBenchmark.class, 
        "A map/reduce benchmark that compares the performance " + 
        "of maps with multiple spills over maps with 1 spill");
    pgd.addClass("mrbench", MRBench.class, 
        "A map/reduce benchmark that can create many small jobs");
    pgd.addClass("mapredtest", TestMapRed.class, "A map/reduce test check.");
    pgd.addClass("testsequencefileinputformat", 
        TestSequenceFileInputFormat.class, 
        "A test for sequence file input format.");
    pgd.addClass("testtextinputformat", TestTextInputFormat.class, 
        "A test for text input format.");
    pgd.addClass("testmapredsort", SortValidator.class, 
        "A map/reduce program that validates the " +
        "map-reduce framework's sort.");
    pgd.addClass("testbigmapoutput", BigMapOutput.class, 
        "A map/reduce program that works on a very big " +
        "non-splittable file and does identity map/reduce");
    pgd.addClass("loadgen", GenericMRLoadGenerator.class, 
        "Generic map/reduce load generator");
    pgd.addClass("MRReliabilityTest", ReliabilityTest.class,
        "A program that tests the reliability of the MR framework by " +
        "injecting faults/failures");
    pgd.addClass("fail", FailJob.class, "a job that always fails");
    pgd.addClass("sleep", SleepJob.class, 
                 "A job that sleeps at each map and reduce task.");
    pgd.addClass("nnbench", NNBench.class, 
        "A benchmark that stresses the namenode.");
    pgd.addClass("testfilesystem", TestFileSystem.class, 
        "A test for FileSystem read/write.");
    pgd.addClass(TestDFSIO.class.getSimpleName(), TestDFSIO.class, 
        "Distributed i/o benchmark.");
    pgd.addClass("DFSCIOTest", DFSCIOTest.class, "" +
        "Distributed i/o benchmark of libhdfs.");
    pgd.addClass("DistributedFSCheck", DistributedFSCheck.class, 
        "Distributed checkup of the file system consistency.");
    pgd.addClass("filebench", FileBench.class, 
        "Benchmark SequenceFile(Input|Output)Format " +
        "(block,record compressed and uncompressed), " +
        "Text(Input|Output)Format (compressed and uncompressed)");
    pgd.addClass(JHLogAnalyzer.class.getSimpleName(), JHLogAnalyzer.class, 
        "Job History Log analyzer.");
    pgd.addClass(SliveTest.class.getSimpleName(), SliveTest.class, 
        "HDFS Stress Test and Live Data Verification.");
    pgd.addClass("minicluster", MiniHadoopClusterManager.class,
    "Single process HDFS and MR cluster.");
    pgd.addClass("largesorter", LargeSorter.class,
        "Large-Sort tester");
    pgd.addClass("NNloadGenerator", LoadGenerator.class,
            "Generate load on Namenode using NN loadgenerator run WITHOUT MR");
    pgd.addClass("NNloadGeneratorMR", LoadGeneratorMR.class,
        "Generate load on Namenode using NN loadgenerator run as MR job");
    pgd.addClass("NNstructureGenerator", StructureGenerator.class,
        "Generate the structure to be used by NNdataGenerator");
    pgd.addClass("NNdataGenerator", DataGenerator.class,
        "Generate the data to be used by NNloadGenerator");
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
项目:aliyun-oss-hadoop-fs    文件:MapredTestDriver.java   
public MapredTestDriver(ProgramDriver pgd) {
  this.pgd = pgd;
  try {
    pgd.addClass("testsequencefile", TestSequenceFile.class,
    "A test for flat files of binary key value pairs.");
    pgd.addClass("threadedmapbench", ThreadedMapBenchmark.class,
        "A map/reduce benchmark that compares the performance " +
        "of maps with multiple spills over maps with 1 spill");
    pgd.addClass("mrbench", MRBench.class,
        "A map/reduce benchmark that can create many small jobs");
    pgd.addClass("mapredtest", TestMapRed.class, "A map/reduce test check.");
    pgd.addClass("testsequencefileinputformat",
        TestSequenceFileInputFormat.class,
        "A test for sequence file input format.");
    pgd.addClass("testtextinputformat", TestTextInputFormat.class,
        "A test for text input format.");
    pgd.addClass("testmapredsort", SortValidator.class,
        "A map/reduce program that validates the " +
        "map-reduce framework's sort.");
    pgd.addClass("testbigmapoutput", BigMapOutput.class,
        "A map/reduce program that works on a very big " +
        "non-splittable file and does identity map/reduce");
    pgd.addClass("loadgen", GenericMRLoadGenerator.class,
        "Generic map/reduce load generator");
    pgd.addClass("MRReliabilityTest", ReliabilityTest.class,
        "A program that tests the reliability of the MR framework by " +
        "injecting faults/failures");
    pgd.addClass("fail", FailJob.class, "a job that always fails");
    pgd.addClass("sleep", SleepJob.class,
                 "A job that sleeps at each map and reduce task.");
    pgd.addClass("timelineperformance", TimelineServicePerformance.class,
                 "A job that launches mappers to test timlineserver performance.");
    pgd.addClass("nnbench", NNBench.class,
        "A benchmark that stresses the namenode w/ MR.");
    pgd.addClass("nnbenchWithoutMR", NNBenchWithoutMR.class,
        "A benchmark that stresses the namenode w/o MR.");
    pgd.addClass("testfilesystem", TestFileSystem.class,
        "A test for FileSystem read/write.");
    pgd.addClass(TestDFSIO.class.getSimpleName(), TestDFSIO.class,
        "Distributed i/o benchmark.");
    pgd.addClass("DFSCIOTest", DFSCIOTest.class, "" +
        "Distributed i/o benchmark of libhdfs.");
    pgd.addClass("DistributedFSCheck", DistributedFSCheck.class,
        "Distributed checkup of the file system consistency.");
    pgd.addClass("filebench", FileBench.class,
        "Benchmark SequenceFile(Input|Output)Format " +
        "(block,record compressed and uncompressed), " +
        "Text(Input|Output)Format (compressed and uncompressed)");
    pgd.addClass(JHLogAnalyzer.class.getSimpleName(), JHLogAnalyzer.class,
        "Job History Log analyzer.");
    pgd.addClass(SliveTest.class.getSimpleName(), SliveTest.class,
        "HDFS Stress Test and Live Data Verification.");
    pgd.addClass("minicluster", MiniHadoopClusterManager.class,
    "Single process HDFS and MR cluster.");
    pgd.addClass("largesorter", LargeSorter.class,
        "Large-Sort tester");
    pgd.addClass("NNloadGenerator", LoadGenerator.class,
            "Generate load on Namenode using NN loadgenerator run WITHOUT MR");
    pgd.addClass("NNloadGeneratorMR", LoadGeneratorMR.class,
        "Generate load on Namenode using NN loadgenerator run as MR job");
    pgd.addClass("NNstructureGenerator", StructureGenerator.class,
        "Generate the structure to be used by NNdataGenerator");
    pgd.addClass("NNdataGenerator", DataGenerator.class,
        "Generate the data to be used by NNloadGenerator");
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
项目:big-c    文件:MapredTestDriver.java   
public MapredTestDriver(ProgramDriver pgd) {
  this.pgd = pgd;
  try {
    pgd.addClass("testsequencefile", TestSequenceFile.class, 
    "A test for flat files of binary key value pairs.");
    pgd.addClass("threadedmapbench", ThreadedMapBenchmark.class, 
        "A map/reduce benchmark that compares the performance " + 
        "of maps with multiple spills over maps with 1 spill");
    pgd.addClass("mrbench", MRBench.class, 
        "A map/reduce benchmark that can create many small jobs");
    pgd.addClass("mapredtest", TestMapRed.class, "A map/reduce test check.");
    pgd.addClass("testsequencefileinputformat", 
        TestSequenceFileInputFormat.class, 
        "A test for sequence file input format.");
    pgd.addClass("testtextinputformat", TestTextInputFormat.class, 
        "A test for text input format.");
    pgd.addClass("testmapredsort", SortValidator.class, 
        "A map/reduce program that validates the " +
        "map-reduce framework's sort.");
    pgd.addClass("testbigmapoutput", BigMapOutput.class, 
        "A map/reduce program that works on a very big " +
        "non-splittable file and does identity map/reduce");
    pgd.addClass("loadgen", GenericMRLoadGenerator.class, 
        "Generic map/reduce load generator");
    pgd.addClass("MRReliabilityTest", ReliabilityTest.class,
        "A program that tests the reliability of the MR framework by " +
        "injecting faults/failures");
    pgd.addClass("fail", FailJob.class, "a job that always fails");
    pgd.addClass("sleep", SleepJob.class, 
                 "A job that sleeps at each map and reduce task.");
    pgd.addClass("nnbench", NNBench.class, 
        "A benchmark that stresses the namenode.");
    pgd.addClass("testfilesystem", TestFileSystem.class, 
        "A test for FileSystem read/write.");
    pgd.addClass(TestDFSIO.class.getSimpleName(), TestDFSIO.class, 
        "Distributed i/o benchmark.");
    pgd.addClass("DFSCIOTest", DFSCIOTest.class, "" +
        "Distributed i/o benchmark of libhdfs.");
    pgd.addClass("DistributedFSCheck", DistributedFSCheck.class, 
        "Distributed checkup of the file system consistency.");
    pgd.addClass("filebench", FileBench.class, 
        "Benchmark SequenceFile(Input|Output)Format " +
        "(block,record compressed and uncompressed), " +
        "Text(Input|Output)Format (compressed and uncompressed)");
    pgd.addClass(JHLogAnalyzer.class.getSimpleName(), JHLogAnalyzer.class, 
        "Job History Log analyzer.");
    pgd.addClass(SliveTest.class.getSimpleName(), SliveTest.class, 
        "HDFS Stress Test and Live Data Verification.");
    pgd.addClass("minicluster", MiniHadoopClusterManager.class,
    "Single process HDFS and MR cluster.");
    pgd.addClass("largesorter", LargeSorter.class,
        "Large-Sort tester");
    pgd.addClass("NNloadGenerator", LoadGenerator.class,
            "Generate load on Namenode using NN loadgenerator run WITHOUT MR");
    pgd.addClass("NNloadGeneratorMR", LoadGeneratorMR.class,
        "Generate load on Namenode using NN loadgenerator run as MR job");
    pgd.addClass("NNstructureGenerator", StructureGenerator.class,
        "Generate the structure to be used by NNdataGenerator");
    pgd.addClass("NNdataGenerator", DataGenerator.class,
        "Generate the data to be used by NNloadGenerator");
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:MapredTestDriver.java   
public MapredTestDriver(ProgramDriver pgd) {
  this.pgd = pgd;
  try {
    pgd.addClass("testsequencefile", TestSequenceFile.class, 
    "A test for flat files of binary key value pairs.");
    pgd.addClass("threadedmapbench", ThreadedMapBenchmark.class, 
        "A map/reduce benchmark that compares the performance " + 
        "of maps with multiple spills over maps with 1 spill");
    pgd.addClass("mrbench", MRBench.class, 
        "A map/reduce benchmark that can create many small jobs");
    pgd.addClass("mapredtest", TestMapRed.class, "A map/reduce test check.");
    pgd.addClass("testsequencefileinputformat", 
        TestSequenceFileInputFormat.class, 
        "A test for sequence file input format.");
    pgd.addClass("testtextinputformat", TestTextInputFormat.class, 
        "A test for text input format.");
    pgd.addClass("testmapredsort", SortValidator.class, 
        "A map/reduce program that validates the " +
        "map-reduce framework's sort.");
    pgd.addClass("testbigmapoutput", BigMapOutput.class, 
        "A map/reduce program that works on a very big " +
        "non-splittable file and does identity map/reduce");
    pgd.addClass("loadgen", GenericMRLoadGenerator.class, 
        "Generic map/reduce load generator");
    pgd.addClass("MRReliabilityTest", ReliabilityTest.class,
        "A program that tests the reliability of the MR framework by " +
        "injecting faults/failures");
    pgd.addClass("fail", FailJob.class, "a job that always fails");
    pgd.addClass("sleep", SleepJob.class, 
                 "A job that sleeps at each map and reduce task.");
    pgd.addClass("nnbench", NNBench.class, 
        "A benchmark that stresses the namenode.");
    pgd.addClass("testfilesystem", TestFileSystem.class, 
        "A test for FileSystem read/write.");
    pgd.addClass(TestDFSIO.class.getSimpleName(), TestDFSIO.class, 
        "Distributed i/o benchmark.");
    pgd.addClass("DFSCIOTest", DFSCIOTest.class, "" +
        "Distributed i/o benchmark of libhdfs.");
    pgd.addClass("DistributedFSCheck", DistributedFSCheck.class, 
        "Distributed checkup of the file system consistency.");
    pgd.addClass("filebench", FileBench.class, 
        "Benchmark SequenceFile(Input|Output)Format " +
        "(block,record compressed and uncompressed), " +
        "Text(Input|Output)Format (compressed and uncompressed)");
    pgd.addClass(JHLogAnalyzer.class.getSimpleName(), JHLogAnalyzer.class, 
        "Job History Log analyzer.");
    pgd.addClass(SliveTest.class.getSimpleName(), SliveTest.class, 
        "HDFS Stress Test and Live Data Verification.");
    pgd.addClass("minicluster", MiniHadoopClusterManager.class,
    "Single process HDFS and MR cluster.");
    pgd.addClass("largesorter", LargeSorter.class,
        "Large-Sort tester");
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:GenericMRLoadJobCreator.java   
public static JobConf createJob(String[] argv, boolean mapoutputCompressed,
    boolean outputCompressed) throws Exception {

  JobConf job = new JobConf();
  job.setJarByClass(GenericMRLoadGenerator.class);
  job.setMapperClass(SampleMapper.class);
  job.setReducerClass(SampleReducer.class);
  if (!parseArgs(argv, job)) {
    return null;
  }

  if (null == FileOutputFormat.getOutputPath(job)) {
    // No output dir? No writes
    job.setOutputFormat(NullOutputFormat.class);
  }

  if (0 == FileInputFormat.getInputPaths(job).length) {
    // No input dir? Generate random data
    System.err.println("No input path; ignoring InputFormat");
    confRandom(job);
  } else if (null != job.getClass("mapred.indirect.input.format", null)) {
    // specified IndirectInputFormat? Build src list
    JobClient jClient = new JobClient(job);
    Path sysdir = jClient.getSystemDir();
    Random r = new Random();
    Path indirInputFile = new Path(sysdir, Integer.toString(r
        .nextInt(Integer.MAX_VALUE), 36)
        + "_files");
    job.set("mapred.indirect.input.file", indirInputFile.toString());
    SequenceFile.Writer writer = SequenceFile.createWriter(sysdir
        .getFileSystem(job), job, indirInputFile, LongWritable.class,
        Text.class, SequenceFile.CompressionType.NONE);
    try {
      for (Path p : FileInputFormat.getInputPaths(job)) {
        FileSystem fs = p.getFileSystem(job);
        Stack<Path> pathstack = new Stack<Path>();
        pathstack.push(p);
        while (!pathstack.empty()) {
          for (FileStatus stat : fs.listStatus(pathstack.pop())) {
            if (stat.isDir()) {
              if (!stat.getPath().getName().startsWith("_")) {
                pathstack.push(stat.getPath());
              }
            } else {
              writer.sync();
              writer.append(new LongWritable(stat.getLen()), new Text(stat
                  .getPath().toUri().toString()));
            }
          }
        }
      }
    } finally {
      writer.close();
    }
  }

  job.setCompressMapOutput(mapoutputCompressed);
  job.setBoolean("mapred.output.compress", outputCompressed);
  return job;

}
项目:hadoop-EAR    文件:GenericMRLoadJobCreator.java   
public static JobConf createJob(String[] argv, boolean mapoutputCompressed,
    boolean outputCompressed) throws Exception {

  JobConf job = new JobConf();
  job.setJarByClass(GenericMRLoadGenerator.class);
  job.setMapperClass(SampleMapper.class);
  job.setReducerClass(SampleReducer.class);
  if (!parseArgs(argv, job)) {
    return null;
  }

  if (null == FileOutputFormat.getOutputPath(job)) {
    // No output dir? No writes
    job.setOutputFormat(NullOutputFormat.class);
  }

  if (0 == FileInputFormat.getInputPaths(job).length) {
    // No input dir? Generate random data
    System.err.println("No input path; ignoring InputFormat");
    confRandom(job);
  } else if (null != job.getClass("mapred.indirect.input.format", null)) {
    // specified IndirectInputFormat? Build src list
    JobClient jClient = new JobClient(job);
    Path sysdir = jClient.getSystemDir();
    Random r = new Random();
    Path indirInputFile = new Path(sysdir, Integer.toString(r
        .nextInt(Integer.MAX_VALUE), 36)
        + "_files");
    job.set("mapred.indirect.input.file", indirInputFile.toString());
    SequenceFile.Writer writer = SequenceFile.createWriter(sysdir
        .getFileSystem(job), job, indirInputFile, LongWritable.class,
        Text.class, SequenceFile.CompressionType.NONE);
    try {
      for (Path p : FileInputFormat.getInputPaths(job)) {
        FileSystem fs = p.getFileSystem(job);
        Stack<Path> pathstack = new Stack<Path>();
        pathstack.push(p);
        while (!pathstack.empty()) {
          for (FileStatus stat : fs.listStatus(pathstack.pop())) {
            if (stat.isDir()) {
              if (!stat.getPath().getName().startsWith("_")) {
                pathstack.push(stat.getPath());
              }
            } else {
              writer.sync();
              writer.append(new LongWritable(stat.getLen()), new Text(stat
                  .getPath().toUri().toString()));
            }
          }
        }
      }
    } finally {
      writer.close();
    }
  }

  job.setCompressMapOutput(mapoutputCompressed);
  job.setBoolean("mapred.output.compress", outputCompressed);
  return job;

}
项目:hadoop-EAR    文件:AllTestDriver.java   
/**
 * A description of the test program for running all the tests using jar file
 */
public static void main(String argv[]){
  ProgramDriver pgd = new ProgramDriver();
  try {
    pgd.addClass("threadedmapbench", ThreadedMapBenchmark.class, 
                 "A map/reduce benchmark that compares the performance " + 
                 "of maps with multiple spills over maps with 1 spill");
    pgd.addClass("mrbench", MRBench.class, "A map/reduce benchmark that can create many small jobs");
    pgd.addClass("nnbench", NNBench.class, "A benchmark that stresses the namenode.");
    pgd.addClass("mapredtest", TestMapRed.class, "A map/reduce test check.");
    pgd.addClass("testfilesystem", TestFileSystem.class, "A test for FileSystem read/write.");
    pgd.addClass("testsequencefile", TestSequenceFile.class, "A test for flat files of binary key value pairs.");
    pgd.addClass("testsetfile", TestSetFile.class, "A test for flat files of binary key/value pairs.");
    pgd.addClass("testarrayfile", TestArrayFile.class, "A test for flat files of binary key/value pairs.");
    pgd.addClass("testrpc", TestRPC.class, "A test for rpc.");
    pgd.addClass("testipc", TestIPC.class, "A test for ipc.");
    pgd.addClass("testsequencefileinputformat", TestSequenceFileInputFormat.class, "A test for sequence file input format.");
    pgd.addClass("testtextinputformat", TestTextInputFormat.class, "A test for text input format.");
    pgd.addClass("TestDFSIO", TestDFSIO.class, "Distributed i/o benchmark.");
    pgd.addClass("TestAppendStress", TestAppendStress.class, "Distributed append stress test.");
    pgd.addClass("DFSCIOTest", DFSCIOTest.class, "Distributed i/o benchmark of libhdfs.");
    pgd.addClass("DistributedFSCheck", DistributedFSCheck.class, "Distributed checkup of the file system consistency.");
    pgd.addClass("testmapredsort", SortValidator.class, 
                 "A map/reduce program that validates the map-reduce framework's sort.");
    pgd.addClass("testbigmapoutput", BigMapOutput.class, 
                 "A map/reduce program that works on a very big " + 
                 "non-splittable file and does identity map/reduce");
    pgd.addClass("loadgen", GenericMRLoadGenerator.class, "Generic map/reduce load generator");
    pgd.addClass("filebench", FileBench.class, "Benchmark SequenceFile(Input|Output)Format (block,record compressed and uncompressed), Text(Input|Output)Format (compressed and uncompressed)");
    pgd.addClass("dfsthroughput", BenchmarkThroughput.class, 
                 "measure hdfs throughput");
    pgd.addClass("MRReliabilityTest", ReliabilityTest.class,
        "A program that tests the reliability of the MR framework by " +
        "injecting faults/failures");
    pgd.addClass("LocalReadWritePerf", LocalReadWritePerf.class, "HDFS local read/write benchmark");
    pgd.addClass("BenchmarkHar", BenchmarkHar.class, "benchmark for different ways of generating har files");
    pgd.driver(argv);
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
项目:hadoop-plus    文件:MapredTestDriver.java   
public MapredTestDriver(ProgramDriver pgd) {
  this.pgd = pgd;
  try {
    pgd.addClass("testsequencefile", TestSequenceFile.class, 
    "A test for flat files of binary key value pairs.");
    pgd.addClass("threadedmapbench", ThreadedMapBenchmark.class, 
        "A map/reduce benchmark that compares the performance " + 
        "of maps with multiple spills over maps with 1 spill");
    pgd.addClass("mrbench", MRBench.class, 
        "A map/reduce benchmark that can create many small jobs");
    pgd.addClass("mapredtest", TestMapRed.class, "A map/reduce test check.");
    pgd.addClass("testsequencefileinputformat", 
        TestSequenceFileInputFormat.class, 
        "A test for sequence file input format.");
    pgd.addClass("testtextinputformat", TestTextInputFormat.class, 
        "A test for text input format.");
    pgd.addClass("testmapredsort", SortValidator.class, 
        "A map/reduce program that validates the " +
        "map-reduce framework's sort.");
    pgd.addClass("testbigmapoutput", BigMapOutput.class, 
        "A map/reduce program that works on a very big " +
        "non-splittable file and does identity map/reduce");
    pgd.addClass("loadgen", GenericMRLoadGenerator.class, 
        "Generic map/reduce load generator");
    pgd.addClass("MRReliabilityTest", ReliabilityTest.class,
        "A program that tests the reliability of the MR framework by " +
        "injecting faults/failures");
    pgd.addClass("fail", FailJob.class, "a job that always fails");
    pgd.addClass("sleep", SleepJob.class, 
                 "A job that sleeps at each map and reduce task.");
    pgd.addClass("nnbench", NNBench.class, 
        "A benchmark that stresses the namenode.");
    pgd.addClass("testfilesystem", TestFileSystem.class, 
        "A test for FileSystem read/write.");
    pgd.addClass(TestDFSIO.class.getSimpleName(), TestDFSIO.class, 
        "Distributed i/o benchmark.");
    pgd.addClass("DFSCIOTest", DFSCIOTest.class, "" +
        "Distributed i/o benchmark of libhdfs.");
    pgd.addClass("DistributedFSCheck", DistributedFSCheck.class, 
        "Distributed checkup of the file system consistency.");
    pgd.addClass("filebench", FileBench.class, 
        "Benchmark SequenceFile(Input|Output)Format " +
        "(block,record compressed and uncompressed), " +
        "Text(Input|Output)Format (compressed and uncompressed)");
    pgd.addClass(JHLogAnalyzer.class.getSimpleName(), JHLogAnalyzer.class, 
        "Job History Log analyzer.");
    pgd.addClass(SliveTest.class.getSimpleName(), SliveTest.class, 
        "HDFS Stress Test and Live Data Verification.");
    pgd.addClass("minicluster", MiniHadoopClusterManager.class,
    "Single process HDFS and MR cluster.");
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
项目:FlexMap    文件:MapredTestDriver.java   
public MapredTestDriver(ProgramDriver pgd) {
  this.pgd = pgd;
  try {
    pgd.addClass("testsequencefile", TestSequenceFile.class, 
    "A test for flat files of binary key value pairs.");
    pgd.addClass("threadedmapbench", ThreadedMapBenchmark.class, 
        "A map/reduce benchmark that compares the performance " + 
        "of maps with multiple spills over maps with 1 spill");
    pgd.addClass("mrbench", MRBench.class, 
        "A map/reduce benchmark that can create many small jobs");
    pgd.addClass("mapredtest", TestMapRed.class, "A map/reduce test check.");
    pgd.addClass("testsequencefileinputformat", 
        TestSequenceFileInputFormat.class, 
        "A test for sequence file input format.");
    pgd.addClass("testtextinputformat", TestTextInputFormat.class, 
        "A test for text input format.");
    pgd.addClass("testmapredsort", SortValidator.class, 
        "A map/reduce program that validates the " +
        "map-reduce framework's sort.");
    pgd.addClass("testbigmapoutput", BigMapOutput.class, 
        "A map/reduce program that works on a very big " +
        "non-splittable file and does identity map/reduce");
    pgd.addClass("loadgen", GenericMRLoadGenerator.class, 
        "Generic map/reduce load generator");
    pgd.addClass("MRReliabilityTest", ReliabilityTest.class,
        "A program that tests the reliability of the MR framework by " +
        "injecting faults/failures");
    pgd.addClass("fail", FailJob.class, "a job that always fails");
    pgd.addClass("sleep", SleepJob.class, 
                 "A job that sleeps at each map and reduce task.");
    pgd.addClass("nnbench", NNBench.class, 
        "A benchmark that stresses the namenode.");
    pgd.addClass("testfilesystem", TestFileSystem.class, 
        "A test for FileSystem read/write.");
    pgd.addClass(TestDFSIO.class.getSimpleName(), TestDFSIO.class, 
        "Distributed i/o benchmark.");
    pgd.addClass("DFSCIOTest", DFSCIOTest.class, "" +
        "Distributed i/o benchmark of libhdfs.");
    pgd.addClass("DistributedFSCheck", DistributedFSCheck.class, 
        "Distributed checkup of the file system consistency.");
    pgd.addClass("filebench", FileBench.class, 
        "Benchmark SequenceFile(Input|Output)Format " +
        "(block,record compressed and uncompressed), " +
        "Text(Input|Output)Format (compressed and uncompressed)");
    pgd.addClass(JHLogAnalyzer.class.getSimpleName(), JHLogAnalyzer.class, 
        "Job History Log analyzer.");
    pgd.addClass(SliveTest.class.getSimpleName(), SliveTest.class, 
        "HDFS Stress Test and Live Data Verification.");
    pgd.addClass("minicluster", MiniHadoopClusterManager.class,
    "Single process HDFS and MR cluster.");
    pgd.addClass("largesorter", LargeSorter.class,
        "Large-Sort tester");
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
项目:hops    文件:MapredTestDriver.java   
public MapredTestDriver(ProgramDriver pgd) {
  this.pgd = pgd;
  try {
    pgd.addClass("testsequencefile", TestSequenceFile.class,
    "A test for flat files of binary key value pairs.");
    pgd.addClass("threadedmapbench", ThreadedMapBenchmark.class,
        "A map/reduce benchmark that compares the performance " +
        "of maps with multiple spills over maps with 1 spill");
    pgd.addClass("mrbench", MRBench.class,
        "A map/reduce benchmark that can create many small jobs");
    pgd.addClass("mapredtest", TestMapRed.class, "A map/reduce test check.");
    pgd.addClass("testsequencefileinputformat",
        TestSequenceFileInputFormat.class,
        "A test for sequence file input format.");
    pgd.addClass("testtextinputformat", TestTextInputFormat.class,
        "A test for text input format.");
    pgd.addClass("testmapredsort", SortValidator.class,
        "A map/reduce program that validates the " +
        "map-reduce framework's sort.");
    pgd.addClass("testbigmapoutput", BigMapOutput.class,
        "A map/reduce program that works on a very big " +
        "non-splittable file and does identity map/reduce");
    pgd.addClass("loadgen", GenericMRLoadGenerator.class,
        "Generic map/reduce load generator");
    pgd.addClass("MRReliabilityTest", ReliabilityTest.class,
        "A program that tests the reliability of the MR framework by " +
        "injecting faults/failures");
    pgd.addClass("fail", FailJob.class, "a job that always fails");
    pgd.addClass("sleep", SleepJob.class,
                 "A job that sleeps at each map and reduce task.");
    pgd.addClass("timelineperformance", TimelineServicePerformance.class,
                 "A job that launches mappers to test timlineserver performance.");
    pgd.addClass("nnbench", NNBench.class,
        "A benchmark that stresses the namenode w/ MR.");
    pgd.addClass("nnbenchWithoutMR", NNBenchWithoutMR.class,
        "A benchmark that stresses the namenode w/o MR.");
    pgd.addClass("testfilesystem", TestFileSystem.class,
        "A test for FileSystem read/write.");
    pgd.addClass(TestDFSIO.class.getSimpleName(), TestDFSIO.class,
        "Distributed i/o benchmark.");
    pgd.addClass("DFSCIOTest", DFSCIOTest.class, "" +
        "Distributed i/o benchmark of libhdfs.");
    pgd.addClass("DistributedFSCheck", DistributedFSCheck.class,
        "Distributed checkup of the file system consistency.");
    pgd.addClass("filebench", FileBench.class,
        "Benchmark SequenceFile(Input|Output)Format " +
        "(block,record compressed and uncompressed), " +
        "Text(Input|Output)Format (compressed and uncompressed)");
    pgd.addClass(JHLogAnalyzer.class.getSimpleName(), JHLogAnalyzer.class,
        "Job History Log analyzer.");
    pgd.addClass(SliveTest.class.getSimpleName(), SliveTest.class,
        "HDFS Stress Test and Live Data Verification.");
    pgd.addClass("minicluster", MiniHadoopClusterManager.class,
    "Single process HDFS and MR cluster.");
    pgd.addClass("largesorter", LargeSorter.class,
        "Large-Sort tester");
    pgd.addClass("NNloadGenerator", LoadGenerator.class,
            "Generate load on Namenode using NN loadgenerator run WITHOUT MR");
    pgd.addClass("NNloadGeneratorMR", LoadGeneratorMR.class,
        "Generate load on Namenode using NN loadgenerator run as MR job");
    pgd.addClass("NNstructureGenerator", StructureGenerator.class,
        "Generate the structure to be used by NNdataGenerator");
    pgd.addClass("NNdataGenerator", DataGenerator.class,
        "Generate the data to be used by NNloadGenerator");
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
项目:hadoop-TCP    文件:MapredTestDriver.java   
public MapredTestDriver(ProgramDriver pgd) {
  this.pgd = pgd;
  try {
    pgd.addClass("testsequencefile", TestSequenceFile.class, 
    "A test for flat files of binary key value pairs.");
    pgd.addClass("threadedmapbench", ThreadedMapBenchmark.class, 
        "A map/reduce benchmark that compares the performance " + 
        "of maps with multiple spills over maps with 1 spill");
    pgd.addClass("mrbench", MRBench.class, 
        "A map/reduce benchmark that can create many small jobs");
    pgd.addClass("mapredtest", TestMapRed.class, "A map/reduce test check.");
    pgd.addClass("testsequencefileinputformat", 
        TestSequenceFileInputFormat.class, 
        "A test for sequence file input format.");
    pgd.addClass("testtextinputformat", TestTextInputFormat.class, 
        "A test for text input format.");
    pgd.addClass("testmapredsort", SortValidator.class, 
        "A map/reduce program that validates the " +
        "map-reduce framework's sort.");
    pgd.addClass("testbigmapoutput", BigMapOutput.class, 
        "A map/reduce program that works on a very big " +
        "non-splittable file and does identity map/reduce");
    pgd.addClass("loadgen", GenericMRLoadGenerator.class, 
        "Generic map/reduce load generator");
    pgd.addClass("MRReliabilityTest", ReliabilityTest.class,
        "A program that tests the reliability of the MR framework by " +
        "injecting faults/failures");
    pgd.addClass("fail", FailJob.class, "a job that always fails");
    pgd.addClass("sleep", SleepJob.class, 
                 "A job that sleeps at each map and reduce task.");
    pgd.addClass("nnbench", NNBench.class, 
        "A benchmark that stresses the namenode.");
    pgd.addClass("testfilesystem", TestFileSystem.class, 
        "A test for FileSystem read/write.");
    pgd.addClass(TestDFSIO.class.getSimpleName(), TestDFSIO.class, 
        "Distributed i/o benchmark.");
    pgd.addClass("DFSCIOTest", DFSCIOTest.class, "" +
        "Distributed i/o benchmark of libhdfs.");
    pgd.addClass("DistributedFSCheck", DistributedFSCheck.class, 
        "Distributed checkup of the file system consistency.");
    pgd.addClass("filebench", FileBench.class, 
        "Benchmark SequenceFile(Input|Output)Format " +
        "(block,record compressed and uncompressed), " +
        "Text(Input|Output)Format (compressed and uncompressed)");
    pgd.addClass(JHLogAnalyzer.class.getSimpleName(), JHLogAnalyzer.class, 
        "Job History Log analyzer.");
    pgd.addClass(SliveTest.class.getSimpleName(), SliveTest.class, 
        "HDFS Stress Test and Live Data Verification.");
    pgd.addClass("minicluster", MiniHadoopClusterManager.class,
    "Single process HDFS and MR cluster.");
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
项目:hadoop-on-lustre    文件:GenericMRLoadJobCreator.java   
public static JobConf createJob(String[] argv, boolean mapoutputCompressed,
    boolean outputCompressed) throws Exception {

  JobConf job = new JobConf();
  job.setJarByClass(GenericMRLoadGenerator.class);
  job.setMapperClass(SampleMapper.class);
  job.setReducerClass(SampleReducer.class);
  if (!parseArgs(argv, job)) {
    return null;
  }

  if (null == FileOutputFormat.getOutputPath(job)) {
    // No output dir? No writes
    job.setOutputFormat(NullOutputFormat.class);
  }

  if (0 == FileInputFormat.getInputPaths(job).length) {
    // No input dir? Generate random data
    System.err.println("No input path; ignoring InputFormat");
    confRandom(job);
  } else if (null != job.getClass("mapred.indirect.input.format", null)) {
    // specified IndirectInputFormat? Build src list
    JobClient jClient = new JobClient(job);
    Path sysdir = jClient.getSystemDir();
    Random r = new Random();
    Path indirInputFile = new Path(sysdir, Integer.toString(r
        .nextInt(Integer.MAX_VALUE), 36)
        + "_files");
    job.set("mapred.indirect.input.file", indirInputFile.toString());
    SequenceFile.Writer writer = SequenceFile.createWriter(sysdir
        .getFileSystem(job), job, indirInputFile, LongWritable.class,
        Text.class, SequenceFile.CompressionType.NONE);
    try {
      for (Path p : FileInputFormat.getInputPaths(job)) {
        FileSystem fs = p.getFileSystem(job);
        Stack<Path> pathstack = new Stack<Path>();
        pathstack.push(p);
        while (!pathstack.empty()) {
          for (FileStatus stat : fs.listStatus(pathstack.pop())) {
            if (stat.isDir()) {
              if (!stat.getPath().getName().startsWith("_")) {
                pathstack.push(stat.getPath());
              }
            } else {
              writer.sync();
              writer.append(new LongWritable(stat.getLen()), new Text(stat
                  .getPath().toUri().toString()));
            }
          }
        }
      }
    } finally {
      writer.close();
    }
  }

  job.setCompressMapOutput(mapoutputCompressed);
  job.setBoolean("mapred.output.compress", outputCompressed);
  return job;

}
项目:hardfs    文件:MapredTestDriver.java   
public MapredTestDriver(ProgramDriver pgd) {
  this.pgd = pgd;
  try {
    pgd.addClass("testsequencefile", TestSequenceFile.class, 
    "A test for flat files of binary key value pairs.");
    pgd.addClass("threadedmapbench", ThreadedMapBenchmark.class, 
        "A map/reduce benchmark that compares the performance " + 
        "of maps with multiple spills over maps with 1 spill");
    pgd.addClass("mrbench", MRBench.class, 
        "A map/reduce benchmark that can create many small jobs");
    pgd.addClass("mapredtest", TestMapRed.class, "A map/reduce test check.");
    pgd.addClass("testsequencefileinputformat", 
        TestSequenceFileInputFormat.class, 
        "A test for sequence file input format.");
    pgd.addClass("testtextinputformat", TestTextInputFormat.class, 
        "A test for text input format.");
    pgd.addClass("testmapredsort", SortValidator.class, 
        "A map/reduce program that validates the " +
        "map-reduce framework's sort.");
    pgd.addClass("testbigmapoutput", BigMapOutput.class, 
        "A map/reduce program that works on a very big " +
        "non-splittable file and does identity map/reduce");
    pgd.addClass("loadgen", GenericMRLoadGenerator.class, 
        "Generic map/reduce load generator");
    pgd.addClass("MRReliabilityTest", ReliabilityTest.class,
        "A program that tests the reliability of the MR framework by " +
        "injecting faults/failures");
    pgd.addClass("fail", FailJob.class, "a job that always fails");
    pgd.addClass("sleep", SleepJob.class, 
                 "A job that sleeps at each map and reduce task.");
    pgd.addClass("nnbench", NNBench.class, 
        "A benchmark that stresses the namenode.");
    pgd.addClass("testfilesystem", TestFileSystem.class, 
        "A test for FileSystem read/write.");
    pgd.addClass(TestDFSIO.class.getSimpleName(), TestDFSIO.class, 
        "Distributed i/o benchmark.");
    pgd.addClass("DFSCIOTest", DFSCIOTest.class, "" +
        "Distributed i/o benchmark of libhdfs.");
    pgd.addClass("DistributedFSCheck", DistributedFSCheck.class, 
        "Distributed checkup of the file system consistency.");
    pgd.addClass("filebench", FileBench.class, 
        "Benchmark SequenceFile(Input|Output)Format " +
        "(block,record compressed and uncompressed), " +
        "Text(Input|Output)Format (compressed and uncompressed)");
    pgd.addClass(JHLogAnalyzer.class.getSimpleName(), JHLogAnalyzer.class, 
        "Job History Log analyzer.");
    pgd.addClass(SliveTest.class.getSimpleName(), SliveTest.class, 
        "HDFS Stress Test and Live Data Verification.");
    pgd.addClass("minicluster", MiniHadoopClusterManager.class,
    "Single process HDFS and MR cluster.");
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
项目:hadoop-on-lustre2    文件:MapredTestDriver.java   
public MapredTestDriver(ProgramDriver pgd) {
  this.pgd = pgd;
  try {
    pgd.addClass("testsequencefile", TestSequenceFile.class, 
    "A test for flat files of binary key value pairs.");
    pgd.addClass("threadedmapbench", ThreadedMapBenchmark.class, 
        "A map/reduce benchmark that compares the performance " + 
        "of maps with multiple spills over maps with 1 spill");
    pgd.addClass("mrbench", MRBench.class, 
        "A map/reduce benchmark that can create many small jobs");
    pgd.addClass("mapredtest", TestMapRed.class, "A map/reduce test check.");
    pgd.addClass("testsequencefileinputformat", 
        TestSequenceFileInputFormat.class, 
        "A test for sequence file input format.");
    pgd.addClass("testtextinputformat", TestTextInputFormat.class, 
        "A test for text input format.");
    pgd.addClass("testmapredsort", SortValidator.class, 
        "A map/reduce program that validates the " +
        "map-reduce framework's sort.");
    pgd.addClass("testbigmapoutput", BigMapOutput.class, 
        "A map/reduce program that works on a very big " +
        "non-splittable file and does identity map/reduce");
    pgd.addClass("loadgen", GenericMRLoadGenerator.class, 
        "Generic map/reduce load generator");
    pgd.addClass("MRReliabilityTest", ReliabilityTest.class,
        "A program that tests the reliability of the MR framework by " +
        "injecting faults/failures");
    pgd.addClass("fail", FailJob.class, "a job that always fails");
    pgd.addClass("sleep", SleepJob.class, 
                 "A job that sleeps at each map and reduce task.");
    pgd.addClass("nnbench", NNBench.class, 
        "A benchmark that stresses the namenode.");
    pgd.addClass("testfilesystem", TestFileSystem.class, 
        "A test for FileSystem read/write.");
    pgd.addClass(TestDFSIO.class.getSimpleName(), TestDFSIO.class, 
        "Distributed i/o benchmark.");
    pgd.addClass("DFSCIOTest", DFSCIOTest.class, "" +
        "Distributed i/o benchmark of libhdfs.");
    pgd.addClass("DistributedFSCheck", DistributedFSCheck.class, 
        "Distributed checkup of the file system consistency.");
    pgd.addClass("filebench", FileBench.class, 
        "Benchmark SequenceFile(Input|Output)Format " +
        "(block,record compressed and uncompressed), " +
        "Text(Input|Output)Format (compressed and uncompressed)");
    pgd.addClass(JHLogAnalyzer.class.getSimpleName(), JHLogAnalyzer.class, 
        "Job History Log analyzer.");
    pgd.addClass(SliveTest.class.getSimpleName(), SliveTest.class, 
        "HDFS Stress Test and Live Data Verification.");
    pgd.addClass("minicluster", MiniHadoopClusterManager.class,
    "Single process HDFS and MR cluster.");
    pgd.addClass("largesorter", LargeSorter.class,
        "Large-Sort tester");
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
项目:RDFS    文件:GenericMRLoadJobCreator.java   
public static JobConf createJob(String[] argv, boolean mapoutputCompressed,
    boolean outputCompressed) throws Exception {

  JobConf job = new JobConf();
  job.setJarByClass(GenericMRLoadGenerator.class);
  job.setMapperClass(SampleMapper.class);
  job.setReducerClass(SampleReducer.class);
  if (!parseArgs(argv, job)) {
    return null;
  }

  if (null == FileOutputFormat.getOutputPath(job)) {
    // No output dir? No writes
    job.setOutputFormat(NullOutputFormat.class);
  }

  if (0 == FileInputFormat.getInputPaths(job).length) {
    // No input dir? Generate random data
    System.err.println("No input path; ignoring InputFormat");
    confRandom(job);
  } else if (null != job.getClass("mapred.indirect.input.format", null)) {
    // specified IndirectInputFormat? Build src list
    JobClient jClient = new JobClient(job);
    Path sysdir = jClient.getSystemDir();
    Random r = new Random();
    Path indirInputFile = new Path(sysdir, Integer.toString(r
        .nextInt(Integer.MAX_VALUE), 36)
        + "_files");
    job.set("mapred.indirect.input.file", indirInputFile.toString());
    SequenceFile.Writer writer = SequenceFile.createWriter(sysdir
        .getFileSystem(job), job, indirInputFile, LongWritable.class,
        Text.class, SequenceFile.CompressionType.NONE);
    try {
      for (Path p : FileInputFormat.getInputPaths(job)) {
        FileSystem fs = p.getFileSystem(job);
        Stack<Path> pathstack = new Stack<Path>();
        pathstack.push(p);
        while (!pathstack.empty()) {
          for (FileStatus stat : fs.listStatus(pathstack.pop())) {
            if (stat.isDir()) {
              if (!stat.getPath().getName().startsWith("_")) {
                pathstack.push(stat.getPath());
              }
            } else {
              writer.sync();
              writer.append(new LongWritable(stat.getLen()), new Text(stat
                  .getPath().toUri().toString()));
            }
          }
        }
      }
    } finally {
      writer.close();
    }
  }

  job.setCompressMapOutput(mapoutputCompressed);
  job.setBoolean("mapred.output.compress", outputCompressed);
  return job;

}
项目:hadoop-0.20    文件:GenericMRLoadJobCreator.java   
public static JobConf createJob(String[] argv, boolean mapoutputCompressed,
    boolean outputCompressed) throws Exception {

  JobConf job = new JobConf();
  job.setJarByClass(GenericMRLoadGenerator.class);
  job.setMapperClass(SampleMapper.class);
  job.setReducerClass(SampleReducer.class);
  if (!parseArgs(argv, job)) {
    return null;
  }

  if (null == FileOutputFormat.getOutputPath(job)) {
    // No output dir? No writes
    job.setOutputFormat(NullOutputFormat.class);
  }

  if (0 == FileInputFormat.getInputPaths(job).length) {
    // No input dir? Generate random data
    System.err.println("No input path; ignoring InputFormat");
    confRandom(job);
  } else if (null != job.getClass("mapred.indirect.input.format", null)) {
    // specified IndirectInputFormat? Build src list
    JobClient jClient = new JobClient(job);
    Path sysdir = jClient.getSystemDir();
    Random r = new Random();
    Path indirInputFile = new Path(sysdir, Integer.toString(r
        .nextInt(Integer.MAX_VALUE), 36)
        + "_files");
    job.set("mapred.indirect.input.file", indirInputFile.toString());
    SequenceFile.Writer writer = SequenceFile.createWriter(sysdir
        .getFileSystem(job), job, indirInputFile, LongWritable.class,
        Text.class, SequenceFile.CompressionType.NONE);
    try {
      for (Path p : FileInputFormat.getInputPaths(job)) {
        FileSystem fs = p.getFileSystem(job);
        Stack<Path> pathstack = new Stack<Path>();
        pathstack.push(p);
        while (!pathstack.empty()) {
          for (FileStatus stat : fs.listStatus(pathstack.pop())) {
            if (stat.isDir()) {
              if (!stat.getPath().getName().startsWith("_")) {
                pathstack.push(stat.getPath());
              }
            } else {
              writer.sync();
              writer.append(new LongWritable(stat.getLen()), new Text(stat
                  .getPath().toUri().toString()));
            }
          }
        }
      }
    } finally {
      writer.close();
    }
  }

  job.setCompressMapOutput(mapoutputCompressed);
  job.setBoolean("mapred.output.compress", outputCompressed);
  return job;

}
项目:hanoi-hadoop-2.0.0-cdh    文件:GenericMRLoadJobCreator.java   
public static JobConf createJob(String[] argv, boolean mapoutputCompressed,
    boolean outputCompressed) throws Exception {

  JobConf job = new JobConf();
  job.setJarByClass(GenericMRLoadGenerator.class);
  job.setMapperClass(SampleMapper.class);
  job.setReducerClass(SampleReducer.class);
  if (!parseArgs(argv, job)) {
    return null;
  }

  if (null == FileOutputFormat.getOutputPath(job)) {
    // No output dir? No writes
    job.setOutputFormat(NullOutputFormat.class);
  }

  if (0 == FileInputFormat.getInputPaths(job).length) {
    // No input dir? Generate random data
    System.err.println("No input path; ignoring InputFormat");
    confRandom(job);
  } else if (null != job.getClass("mapred.indirect.input.format", null)) {
    // specified IndirectInputFormat? Build src list
    JobClient jClient = new JobClient(job);
    Path sysdir = jClient.getSystemDir();
    Random r = new Random();
    Path indirInputFile = new Path(sysdir, Integer.toString(r
        .nextInt(Integer.MAX_VALUE), 36)
        + "_files");
    job.set("mapred.indirect.input.file", indirInputFile.toString());
    SequenceFile.Writer writer = SequenceFile.createWriter(sysdir
        .getFileSystem(job), job, indirInputFile, LongWritable.class,
        Text.class, SequenceFile.CompressionType.NONE);
    try {
      for (Path p : FileInputFormat.getInputPaths(job)) {
        FileSystem fs = p.getFileSystem(job);
        Stack<Path> pathstack = new Stack<Path>();
        pathstack.push(p);
        while (!pathstack.empty()) {
          for (FileStatus stat : fs.listStatus(pathstack.pop())) {
            if (stat.isDir()) {
              if (!stat.getPath().getName().startsWith("_")) {
                pathstack.push(stat.getPath());
              }
            } else {
              writer.sync();
              writer.append(new LongWritable(stat.getLen()), new Text(stat
                  .getPath().toUri().toString()));
            }
          }
        }
      }
    } finally {
      writer.close();
    }
  }

  job.setCompressMapOutput(mapoutputCompressed);
  job.setBoolean("mapred.output.compress", outputCompressed);
  return job;

}
项目:mapreduce-fork    文件:MapredTestDriver.java   
public MapredTestDriver(ProgramDriver pgd) {
  this.pgd = pgd;
  try {
    pgd.addClass("testsequencefile", TestSequenceFile.class, 
    "A test for flat files of binary key value pairs.");
    pgd.addClass("threadedmapbench", ThreadedMapBenchmark.class, 
        "A map/reduce benchmark that compares the performance " + 
        "of maps with multiple spills over maps with 1 spill");
    pgd.addClass("mrbench", MRBench.class, 
        "A map/reduce benchmark that can create many small jobs");
    pgd.addClass("mapredtest", TestMapRed.class, "A map/reduce test check.");
    pgd.addClass("testsequencefileinputformat", 
        TestSequenceFileInputFormat.class, 
        "A test for sequence file input format.");
    pgd.addClass("testtextinputformat", TestTextInputFormat.class, 
        "A test for text input format.");
    pgd.addClass("testmapredsort", SortValidator.class, 
        "A map/reduce program that validates the " +
        "map-reduce framework's sort.");
    pgd.addClass("testbigmapoutput", BigMapOutput.class, 
        "A map/reduce program that works on a very big " +
        "non-splittable file and does identity map/reduce");
    pgd.addClass("loadgen", GenericMRLoadGenerator.class, 
        "Generic map/reduce load generator");
    pgd.addClass("MRReliabilityTest", ReliabilityTest.class,
        "A program that tests the reliability of the MR framework by " +
        "injecting faults/failures");
    pgd.addClass("fail", FailJob.class, "a job that always fails");
    pgd.addClass("sleep", SleepJob.class, 
                 "A job that sleeps at each map and reduce task.");
    pgd.addClass("nnbench", NNBench.class, 
        "A benchmark that stresses the namenode.");
    pgd.addClass("testfilesystem", TestFileSystem.class, 
        "A test for FileSystem read/write.");
    pgd.addClass(TestDFSIO.class.getSimpleName(), TestDFSIO.class, 
        "Distributed i/o benchmark.");
    pgd.addClass("DFSCIOTest", DFSCIOTest.class, "" +
        "Distributed i/o benchmark of libhdfs.");
    pgd.addClass("DistributedFSCheck", DistributedFSCheck.class, 
        "Distributed checkup of the file system consistency.");
    pgd.addClass("filebench", FileBench.class, 
        "Benchmark SequenceFile(Input|Output)Format " +
        "(block,record compressed and uncompressed), " +
        "Text(Input|Output)Format (compressed and uncompressed)");
    pgd.addClass(JHLogAnalyzer.class.getSimpleName(), JHLogAnalyzer.class, 
        "Job History Log analyzer.");
    pgd.addClass(SliveTest.class.getSimpleName(), SliveTest.class, 
        "HDFS Stress Test and Live Data Verification.");
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
项目:hortonworks-extension    文件:GenericMRLoadJobCreator.java   
public static JobConf createJob(String[] argv, boolean mapoutputCompressed,
    boolean outputCompressed) throws Exception {

  JobConf job = new JobConf();
  job.setJarByClass(GenericMRLoadGenerator.class);
  job.setMapperClass(SampleMapper.class);
  job.setReducerClass(SampleReducer.class);
  if (!parseArgs(argv, job)) {
    return null;
  }

  if (null == FileOutputFormat.getOutputPath(job)) {
    // No output dir? No writes
    job.setOutputFormat(NullOutputFormat.class);
  }

  if (0 == FileInputFormat.getInputPaths(job).length) {
    // No input dir? Generate random data
    System.err.println("No input path; ignoring InputFormat");
    confRandom(job);
  } else if (null != job.getClass("mapred.indirect.input.format", null)) {
    // specified IndirectInputFormat? Build src list
    JobClient jClient = new JobClient(job);
    Path sysdir = jClient.getSystemDir();
    Random r = new Random();
    Path indirInputFile = new Path(sysdir, Integer.toString(r
        .nextInt(Integer.MAX_VALUE), 36)
        + "_files");
    job.set("mapred.indirect.input.file", indirInputFile.toString());
    SequenceFile.Writer writer = SequenceFile.createWriter(sysdir
        .getFileSystem(job), job, indirInputFile, LongWritable.class,
        Text.class, SequenceFile.CompressionType.NONE);
    try {
      for (Path p : FileInputFormat.getInputPaths(job)) {
        FileSystem fs = p.getFileSystem(job);
        Stack<Path> pathstack = new Stack<Path>();
        pathstack.push(p);
        while (!pathstack.empty()) {
          for (FileStatus stat : fs.listStatus(pathstack.pop())) {
            if (stat.isDir()) {
              if (!stat.getPath().getName().startsWith("_")) {
                pathstack.push(stat.getPath());
              }
            } else {
              writer.sync();
              writer.append(new LongWritable(stat.getLen()), new Text(stat
                  .getPath().toUri().toString()));
            }
          }
        }
      }
    } finally {
      writer.close();
    }
  }

  job.setCompressMapOutput(mapoutputCompressed);
  job.setBoolean("mapred.output.compress", outputCompressed);
  return job;

}
项目:hortonworks-extension    文件:GenericMRLoadJobCreator.java   
public static JobConf createJob(String[] argv, boolean mapoutputCompressed,
    boolean outputCompressed) throws Exception {

  JobConf job = new JobConf();
  job.setJarByClass(GenericMRLoadGenerator.class);
  job.setMapperClass(SampleMapper.class);
  job.setReducerClass(SampleReducer.class);
  if (!parseArgs(argv, job)) {
    return null;
  }

  if (null == FileOutputFormat.getOutputPath(job)) {
    // No output dir? No writes
    job.setOutputFormat(NullOutputFormat.class);
  }

  if (0 == FileInputFormat.getInputPaths(job).length) {
    // No input dir? Generate random data
    System.err.println("No input path; ignoring InputFormat");
    confRandom(job);
  } else if (null != job.getClass("mapred.indirect.input.format", null)) {
    // specified IndirectInputFormat? Build src list
    JobClient jClient = new JobClient(job);
    Path sysdir = jClient.getSystemDir();
    Random r = new Random();
    Path indirInputFile = new Path(sysdir, Integer.toString(r
        .nextInt(Integer.MAX_VALUE), 36)
        + "_files");
    job.set("mapred.indirect.input.file", indirInputFile.toString());
    SequenceFile.Writer writer = SequenceFile.createWriter(sysdir
        .getFileSystem(job), job, indirInputFile, LongWritable.class,
        Text.class, SequenceFile.CompressionType.NONE);
    try {
      for (Path p : FileInputFormat.getInputPaths(job)) {
        FileSystem fs = p.getFileSystem(job);
        Stack<Path> pathstack = new Stack<Path>();
        pathstack.push(p);
        while (!pathstack.empty()) {
          for (FileStatus stat : fs.listStatus(pathstack.pop())) {
            if (stat.isDir()) {
              if (!stat.getPath().getName().startsWith("_")) {
                pathstack.push(stat.getPath());
              }
            } else {
              writer.sync();
              writer.append(new LongWritable(stat.getLen()), new Text(stat
                  .getPath().toUri().toString()));
            }
          }
        }
      }
    } finally {
      writer.close();
    }
  }

  job.setCompressMapOutput(mapoutputCompressed);
  job.setBoolean("mapred.output.compress", outputCompressed);
  return job;

}
项目:hadoop-gpu    文件:GenericMRLoadJobCreator.java   
public static JobConf createJob(String[] argv, boolean mapoutputCompressed,
    boolean outputCompressed) throws Exception {

  JobConf job = new JobConf();
  job.setJarByClass(GenericMRLoadGenerator.class);
  job.setMapperClass(SampleMapper.class);
  job.setReducerClass(SampleReducer.class);
  if (!parseArgs(argv, job)) {
    return null;
  }

  if (null == FileOutputFormat.getOutputPath(job)) {
    // No output dir? No writes
    job.setOutputFormat(NullOutputFormat.class);
  }

  if (0 == FileInputFormat.getInputPaths(job).length) {
    // No input dir? Generate random data
    System.err.println("No input path; ignoring InputFormat");
    confRandom(job);
  } else if (null != job.getClass("mapred.indirect.input.format", null)) {
    // specified IndirectInputFormat? Build src list
    JobClient jClient = new JobClient(job);
    Path sysdir = jClient.getSystemDir();
    Random r = new Random();
    Path indirInputFile = new Path(sysdir, Integer.toString(r
        .nextInt(Integer.MAX_VALUE), 36)
        + "_files");
    job.set("mapred.indirect.input.file", indirInputFile.toString());
    SequenceFile.Writer writer = SequenceFile.createWriter(sysdir
        .getFileSystem(job), job, indirInputFile, LongWritable.class,
        Text.class, SequenceFile.CompressionType.NONE);
    try {
      for (Path p : FileInputFormat.getInputPaths(job)) {
        FileSystem fs = p.getFileSystem(job);
        Stack<Path> pathstack = new Stack<Path>();
        pathstack.push(p);
        while (!pathstack.empty()) {
          for (FileStatus stat : fs.listStatus(pathstack.pop())) {
            if (stat.isDir()) {
              if (!stat.getPath().getName().startsWith("_")) {
                pathstack.push(stat.getPath());
              }
            } else {
              writer.sync();
              writer.append(new LongWritable(stat.getLen()), new Text(stat
                  .getPath().toUri().toString()));
            }
          }
        }
      }
    } finally {
      writer.close();
    }
  }

  job.setCompressMapOutput(mapoutputCompressed);
  job.setBoolean("mapred.output.compress", outputCompressed);
  return job;

}