Java 类org.apache.hadoop.mapred.TestMiniMRWithDFS 实例源码

项目:hadoop-on-lustre    文件:TestStreamingTaskLog.java   
/**
 * Runs the streaming job and validates the output.
 * @throws IOException
 */
private void runStreamJobAndValidateEnv() throws IOException {
  int returnStatus = -1;
  boolean mayExit = false;
  StreamJob job = new StreamJob(genArgs(), mayExit);
  returnStatus = job.go();
  assertEquals("StreamJob failed.", 0, returnStatus);

  // validate environment variables set for the child(script) of java process
  String env = TestMiniMRWithDFS.readOutput(outputPath, mr.createJobConf());
  long logSize = USERLOG_LIMIT_KB * 1024;
  assertTrue("environment set for child is wrong", env.contains("INFO,TLA")
             && env.contains("-Dhadoop.tasklog.taskid=attempt_")
             && env.contains("-Dhadoop.tasklog.totalLogFileSize=" + logSize)
             && env.contains("-Dhadoop.tasklog.iscleanup=false"));
}
项目:hortonworks-extension    文件:TestStreamingTaskLog.java   
/**
 * Runs the streaming job and validates the output.
 * @throws IOException
 */
private void runStreamJobAndValidateEnv() throws IOException {
  int returnStatus = -1;
  boolean mayExit = false;
  StreamJob job = new StreamJob(genArgs(), mayExit);
  returnStatus = job.go();
  assertEquals("StreamJob failed.", 0, returnStatus);

  // validate environment variables set for the child(script) of java process
  String env = TestMiniMRWithDFS.readOutput(outputPath, mr.createJobConf());
  long logSize = USERLOG_LIMIT_KB * 1024;
  assertTrue("environment set for child is wrong", env.contains("INFO,TLA")
             && env.contains("-Dhadoop.tasklog.taskid=attempt_")
             && env.contains("-Dhadoop.tasklog.totalLogFileSize=" + logSize)
             && env.contains("-Dhadoop.tasklog.iscleanup=false"));
}
项目:hortonworks-extension    文件:TestStreamingTaskLog.java   
/**
 * Runs the streaming job and validates the output.
 * @throws IOException
 */
private void runStreamJobAndValidateEnv() throws IOException {
  int returnStatus = -1;
  boolean mayExit = false;
  StreamJob job = new StreamJob(genArgs(), mayExit);
  returnStatus = job.go();
  assertEquals("StreamJob failed.", 0, returnStatus);

  // validate environment variables set for the child(script) of java process
  String env = TestMiniMRWithDFS.readOutput(outputPath, mr.createJobConf());
  long logSize = USERLOG_LIMIT_KB * 1024;
  assertTrue("environment set for child is wrong", env.contains("INFO,TLA")
             && env.contains("-Dhadoop.tasklog.taskid=attempt_")
             && env.contains("-Dhadoop.tasklog.totalLogFileSize=" + logSize)
             && env.contains("-Dhadoop.tasklog.iscleanup=false"));
}
项目:hadoop-EAR    文件:TestUlimit.java   
/**
 * Runs the streaming program. and asserts the result of the program.
 * @param memLimit memory limit to set for mapred child.
 * @param result Expected result
 * @throws IOException
 */
private void runProgram(String memLimit) throws IOException {
  boolean mayExit = false;
  StreamJob job = new StreamJob(genArgs(memLimit), mayExit);
  job.go();
  String output = TestMiniMRWithDFS.readOutput(outputPath,
                                      mr.createJobConf());
  assertEquals("output is wrong", SET_MEMORY_LIMIT,
                                  output.trim());
}
项目:hadoop-on-lustre    文件:TestUlimit.java   
/**
 * Runs the streaming program. and asserts the result of the program.
 * @param memLimit memory limit to set for mapred child.
 * @param result Expected result
 * @throws IOException
 */
private void runProgram(String memLimit) throws IOException {
  boolean mayExit = false;
  StreamJob job = new StreamJob(genArgs(memLimit), mayExit);
  job.go();
  String output = TestMiniMRWithDFS.readOutput(outputPath,
                                      mr.createJobConf());
  assertEquals("output is wrong", SET_MEMORY_LIMIT,
                                  output.trim());
}
项目:RDFS    文件:TestUlimit.java   
/**
 * Runs the streaming program. and asserts the result of the program.
 * @param memLimit memory limit to set for mapred child.
 * @param result Expected result
 * @throws IOException
 */
private void runProgram(String memLimit) throws IOException {
  boolean mayExit = false;
  StreamJob job = new StreamJob(genArgs(memLimit), mayExit);
  job.go();
  String output = TestMiniMRWithDFS.readOutput(outputPath,
                                      mr.createJobConf());
  assertEquals("output is wrong", SET_MEMORY_LIMIT,
                                  output.trim());
}
项目:hadoop-0.20    文件:TestUlimit.java   
/**
 * Runs the streaming program. and asserts the result of the program.
 * @param memLimit memory limit to set for mapred child.
 * @param result Expected result
 * @throws IOException
 */
private void runProgram(String memLimit) throws IOException {
  boolean mayExit = false;
  StreamJob job = new StreamJob(genArgs(memLimit), mayExit);
  job.go();
  String output = TestMiniMRWithDFS.readOutput(outputPath,
                                      mr.createJobConf());
  assertEquals("output is wrong", SET_MEMORY_LIMIT,
                                  output.trim());
}
项目:hortonworks-extension    文件:TestUlimit.java   
/**
 * Runs the streaming program. and asserts the result of the program.
 * @param memLimit memory limit to set for mapred child.
 * @param result Expected result
 * @throws IOException
 */
private void runProgram(String memLimit) throws IOException {
  boolean mayExit = false;
  StreamJob job = new StreamJob(genArgs(memLimit), mayExit);
  job.go();
  String output = TestMiniMRWithDFS.readOutput(outputPath,
                                      mr.createJobConf());
  assertEquals("output is wrong", SET_MEMORY_LIMIT,
                                  output.trim());
}
项目:hortonworks-extension    文件:TestUlimit.java   
/**
 * Runs the streaming program. and asserts the result of the program.
 * @param memLimit memory limit to set for mapred child.
 * @param result Expected result
 * @throws IOException
 */
private void runProgram(String memLimit) throws IOException {
  boolean mayExit = false;
  StreamJob job = new StreamJob(genArgs(memLimit), mayExit);
  job.go();
  String output = TestMiniMRWithDFS.readOutput(outputPath,
                                      mr.createJobConf());
  assertEquals("output is wrong", SET_MEMORY_LIMIT,
                                  output.trim());
}
项目:hadoop-gpu    文件:TestUlimit.java   
/**
 * Runs the streaming program. and asserts the result of the program.
 * @param memLimit memory limit to set for mapred child.
 * @param result Expected result
 * @throws IOException
 */
private void runProgram(String memLimit) throws IOException {
  boolean mayExit = false;
  StreamJob job = new StreamJob(genArgs(memLimit), mayExit);
  job.go();
  String output = TestMiniMRWithDFS.readOutput(outputPath,
                                      mr.createJobConf());
  assertEquals("output is wrong", SET_MEMORY_LIMIT,
                                  output.trim());
}
项目:hadoop-EAR    文件:TestPipes.java   
static void runProgram(MiniMRCluster mr, MiniDFSCluster dfs, 
                        Path program, Path inputPath, Path outputPath,
                        int numMaps, int numReduces, String[] expectedResults,
                        JobConf conf
                       ) throws IOException {
  Path wordExec = new Path("/testing/bin/application");
  JobConf job = null;
  if(conf == null) {
    job = mr.createJobConf();
  }else {
    job = new JobConf(conf);
  } 
  job.setNumMapTasks(numMaps);
  job.setNumReduceTasks(numReduces);
  {
    FileSystem fs = dfs.getFileSystem();
    fs.delete(wordExec.getParent(), true);
    fs.copyFromLocalFile(program, wordExec);                                         
    Submitter.setExecutable(job, fs.makeQualified(wordExec).toString());
    Submitter.setIsJavaRecordReader(job, true);
    Submitter.setIsJavaRecordWriter(job, true);
    FileInputFormat.setInputPaths(job, inputPath);
    FileOutputFormat.setOutputPath(job, outputPath);
    RunningJob rJob = null;
    if (numReduces == 0) {
      rJob = Submitter.jobSubmit(job);

      while (!rJob.isComplete()) {
        try {
          Thread.sleep(1000);
        } catch (InterruptedException ie) {
          throw new RuntimeException(ie);
        }
      }
    } else {
      rJob = Submitter.runJob(job);
    }
    assertTrue("pipes job failed", rJob.isSuccessful());

    Counters counters = rJob.getCounters();
    Counters.Group wordCountCounters = counters.getGroup("WORDCOUNT");
    int numCounters = 0;
    for (Counter c : wordCountCounters) {
      System.out.println(c);
      ++numCounters;
    }
    assertTrue("No counters found!", (numCounters > 0));
  }

  List<String> results = new ArrayList<String>();
  for (Path p:FileUtil.stat2Paths(dfs.getFileSystem().listStatus(outputPath,
      new Utils.OutputFileUtils.OutputFilesFilter()))) {
    results.add(TestMiniMRWithDFS.readOutput(p, job));
  }
  assertEquals("number of reduces is wrong", 
               expectedResults.length, results.size());
  for(int i=0; i < results.size(); i++) {
    assertEquals("pipes program " + program + " output " + i + " wrong",
                 expectedResults[i], results.get(i));
  }
}
项目:hadoop-on-lustre    文件:TestPipes.java   
static void runProgram(MiniMRCluster mr, MiniDFSCluster dfs, 
                        Path program, Path inputPath, Path outputPath,
                        int numMaps, int numReduces, String[] expectedResults,
                        JobConf conf
                       ) throws IOException {
  Path wordExec = new Path("testing/bin/application");
  JobConf job = null;
  if(conf == null) {
    job = mr.createJobConf();
  }else {
    job = new JobConf(conf);
  } 
  job.setNumMapTasks(numMaps);
  job.setNumReduceTasks(numReduces);
  {
    FileSystem fs = dfs.getFileSystem();
    fs.delete(wordExec.getParent(), true);
    fs.copyFromLocalFile(program, wordExec);                                         
    Submitter.setExecutable(job, fs.makeQualified(wordExec).toString());
    Submitter.setIsJavaRecordReader(job, true);
    Submitter.setIsJavaRecordWriter(job, true);
    FileInputFormat.setInputPaths(job, inputPath);
    FileOutputFormat.setOutputPath(job, outputPath);
    RunningJob rJob = null;
    if (numReduces == 0) {
      rJob = Submitter.jobSubmit(job);

      while (!rJob.isComplete()) {
        try {
          Thread.sleep(1000);
        } catch (InterruptedException ie) {
          throw new RuntimeException(ie);
        }
      }
    } else {
      rJob = Submitter.runJob(job);
    }
    assertTrue("pipes job failed", rJob.isSuccessful());

    Counters counters = rJob.getCounters();
    Counters.Group wordCountCounters = counters.getGroup("WORDCOUNT");
    int numCounters = 0;
    for (Counter c : wordCountCounters) {
      System.out.println(c);
      ++numCounters;
    }
    assertTrue("No counters found!", (numCounters > 0));
  }

  List<String> results = new ArrayList<String>();
  for (Path p:FileUtil.stat2Paths(dfs.getFileSystem().listStatus(outputPath,
      new Utils.OutputFileUtils.OutputFilesFilter()))) {
    results.add(TestMiniMRWithDFS.readOutput(p, job));
  }
  assertEquals("number of reduces is wrong", 
               expectedResults.length, results.size());
  for(int i=0; i < results.size(); i++) {
    assertEquals("pipes program " + program + " output " + i + " wrong",
                 expectedResults[i], results.get(i));
  }
}
项目:RDFS    文件:TestPipes.java   
static void runProgram(MiniMRCluster mr, MiniDFSCluster dfs, 
                        Path program, Path inputPath, Path outputPath,
                        int numMaps, int numReduces, String[] expectedResults,
                        JobConf conf
                       ) throws IOException {
  Path wordExec = new Path("/testing/bin/application");
  JobConf job = null;
  if(conf == null) {
    job = mr.createJobConf();
  }else {
    job = new JobConf(conf);
  } 
  job.setNumMapTasks(numMaps);
  job.setNumReduceTasks(numReduces);
  {
    FileSystem fs = dfs.getFileSystem();
    fs.delete(wordExec.getParent(), true);
    fs.copyFromLocalFile(program, wordExec);                                         
    Submitter.setExecutable(job, fs.makeQualified(wordExec).toString());
    Submitter.setIsJavaRecordReader(job, true);
    Submitter.setIsJavaRecordWriter(job, true);
    FileInputFormat.setInputPaths(job, inputPath);
    FileOutputFormat.setOutputPath(job, outputPath);
    RunningJob rJob = null;
    if (numReduces == 0) {
      rJob = Submitter.jobSubmit(job);

      while (!rJob.isComplete()) {
        try {
          Thread.sleep(1000);
        } catch (InterruptedException ie) {
          throw new RuntimeException(ie);
        }
      }
    } else {
      rJob = Submitter.runJob(job);
    }
    assertTrue("pipes job failed", rJob.isSuccessful());

    Counters counters = rJob.getCounters();
    Counters.Group wordCountCounters = counters.getGroup("WORDCOUNT");
    int numCounters = 0;
    for (Counter c : wordCountCounters) {
      System.out.println(c);
      ++numCounters;
    }
    assertTrue("No counters found!", (numCounters > 0));
  }

  List<String> results = new ArrayList<String>();
  for (Path p:FileUtil.stat2Paths(dfs.getFileSystem().listStatus(outputPath,
      new Utils.OutputFileUtils.OutputFilesFilter()))) {
    results.add(TestMiniMRWithDFS.readOutput(p, job));
  }
  assertEquals("number of reduces is wrong", 
               expectedResults.length, results.size());
  for(int i=0; i < results.size(); i++) {
    assertEquals("pipes program " + program + " output " + i + " wrong",
                 expectedResults[i], results.get(i));
  }
}
项目:hadoop-0.20    文件:TestPipes.java   
private void runProgram(MiniMRCluster mr, MiniDFSCluster dfs, 
                        Path program, Path inputPath, Path outputPath,
                        int numMaps, int numReduces, String[] expectedResults
                       ) throws IOException {
  Path wordExec = new Path("/testing/bin/application");
  JobConf job = mr.createJobConf();
  job.setNumMapTasks(numMaps);
  job.setNumReduceTasks(numReduces);
  {
    FileSystem fs = dfs.getFileSystem();
    fs.delete(wordExec.getParent(), true);
    fs.copyFromLocalFile(program, wordExec);                                         
    Submitter.setExecutable(job, fs.makeQualified(wordExec).toString());
    Submitter.setIsJavaRecordReader(job, true);
    Submitter.setIsJavaRecordWriter(job, true);
    FileInputFormat.setInputPaths(job, inputPath);
    FileOutputFormat.setOutputPath(job, outputPath);
    RunningJob rJob = null;
    if (numReduces == 0) {
      rJob = Submitter.jobSubmit(job);

      while (!rJob.isComplete()) {
        try {
          Thread.sleep(1000);
        } catch (InterruptedException ie) {
          throw new RuntimeException(ie);
        }
      }
    } else {
      rJob = Submitter.runJob(job);
    }
    assertTrue("pipes job failed", rJob.isSuccessful());

    Counters counters = rJob.getCounters();
    Counters.Group wordCountCounters = counters.getGroup("WORDCOUNT");
    int numCounters = 0;
    for (Counter c : wordCountCounters) {
      System.out.println(c);
      ++numCounters;
    }
    assertTrue("No counters found!", (numCounters > 0));
  }

  List<String> results = new ArrayList<String>();
  for (Path p:FileUtil.stat2Paths(dfs.getFileSystem().listStatus(outputPath,
                                new OutputLogFilter()))) {
    results.add(TestMiniMRWithDFS.readOutput(p, job));
  }
  assertEquals("number of reduces is wrong", 
               expectedResults.length, results.size());
  for(int i=0; i < results.size(); i++) {
    assertEquals("pipes program " + program + " output " + i + " wrong",
                 expectedResults[i], results.get(i));
  }
}
项目:hortonworks-extension    文件:TestPipes.java   
static void runProgram(MiniMRCluster mr, MiniDFSCluster dfs, 
                        Path program, Path inputPath, Path outputPath,
                        int numMaps, int numReduces, String[] expectedResults,
                        JobConf conf
                       ) throws IOException {
  Path wordExec = new Path("testing/bin/application");
  JobConf job = null;
  if(conf == null) {
    job = mr.createJobConf();
  }else {
    job = new JobConf(conf);
  } 
  job.setNumMapTasks(numMaps);
  job.setNumReduceTasks(numReduces);
  {
    FileSystem fs = dfs.getFileSystem();
    fs.delete(wordExec.getParent(), true);
    fs.copyFromLocalFile(program, wordExec);                                         
    Submitter.setExecutable(job, fs.makeQualified(wordExec).toString());
    Submitter.setIsJavaRecordReader(job, true);
    Submitter.setIsJavaRecordWriter(job, true);
    FileInputFormat.setInputPaths(job, inputPath);
    FileOutputFormat.setOutputPath(job, outputPath);
    RunningJob rJob = null;
    if (numReduces == 0) {
      rJob = Submitter.jobSubmit(job);

      while (!rJob.isComplete()) {
        try {
          Thread.sleep(1000);
        } catch (InterruptedException ie) {
          throw new RuntimeException(ie);
        }
      }
    } else {
      rJob = Submitter.runJob(job);
    }
    assertTrue("pipes job failed", rJob.isSuccessful());

    Counters counters = rJob.getCounters();
    Counters.Group wordCountCounters = counters.getGroup("WORDCOUNT");
    int numCounters = 0;
    for (Counter c : wordCountCounters) {
      System.out.println(c);
      ++numCounters;
    }
    assertTrue("No counters found!", (numCounters > 0));
  }

  List<String> results = new ArrayList<String>();
  for (Path p:FileUtil.stat2Paths(dfs.getFileSystem().listStatus(outputPath,
      new Utils.OutputFileUtils.OutputFilesFilter()))) {
    results.add(TestMiniMRWithDFS.readOutput(p, job));
  }
  assertEquals("number of reduces is wrong", 
               expectedResults.length, results.size());
  for(int i=0; i < results.size(); i++) {
    assertEquals("pipes program " + program + " output " + i + " wrong",
                 expectedResults[i], results.get(i));
  }
}
项目:hortonworks-extension    文件:TestPipes.java   
static void runProgram(MiniMRCluster mr, MiniDFSCluster dfs, 
                        Path program, Path inputPath, Path outputPath,
                        int numMaps, int numReduces, String[] expectedResults,
                        JobConf conf
                       ) throws IOException {
  Path wordExec = new Path("testing/bin/application");
  JobConf job = null;
  if(conf == null) {
    job = mr.createJobConf();
  }else {
    job = new JobConf(conf);
  } 
  job.setNumMapTasks(numMaps);
  job.setNumReduceTasks(numReduces);
  {
    FileSystem fs = dfs.getFileSystem();
    fs.delete(wordExec.getParent(), true);
    fs.copyFromLocalFile(program, wordExec);                                         
    Submitter.setExecutable(job, fs.makeQualified(wordExec).toString());
    Submitter.setIsJavaRecordReader(job, true);
    Submitter.setIsJavaRecordWriter(job, true);
    FileInputFormat.setInputPaths(job, inputPath);
    FileOutputFormat.setOutputPath(job, outputPath);
    RunningJob rJob = null;
    if (numReduces == 0) {
      rJob = Submitter.jobSubmit(job);

      while (!rJob.isComplete()) {
        try {
          Thread.sleep(1000);
        } catch (InterruptedException ie) {
          throw new RuntimeException(ie);
        }
      }
    } else {
      rJob = Submitter.runJob(job);
    }
    assertTrue("pipes job failed", rJob.isSuccessful());

    Counters counters = rJob.getCounters();
    Counters.Group wordCountCounters = counters.getGroup("WORDCOUNT");
    int numCounters = 0;
    for (Counter c : wordCountCounters) {
      System.out.println(c);
      ++numCounters;
    }
    assertTrue("No counters found!", (numCounters > 0));
  }

  List<String> results = new ArrayList<String>();
  for (Path p:FileUtil.stat2Paths(dfs.getFileSystem().listStatus(outputPath,
      new Utils.OutputFileUtils.OutputFilesFilter()))) {
    results.add(TestMiniMRWithDFS.readOutput(p, job));
  }
  assertEquals("number of reduces is wrong", 
               expectedResults.length, results.size());
  for(int i=0; i < results.size(); i++) {
    assertEquals("pipes program " + program + " output " + i + " wrong",
                 expectedResults[i], results.get(i));
  }
}
项目:hadoop-gpu    文件:TestPipes.java   
private void runProgram(MiniMRCluster mr, MiniDFSCluster dfs, 
                        Path program, Path inputPath, Path outputPath,
                        int numMaps, int numReduces, String[] expectedResults
                       ) throws IOException {
  Path wordExec = new Path("/testing/bin/application");
  JobConf job = mr.createJobConf();
  job.setNumMapTasks(numMaps);
  job.setNumReduceTasks(numReduces);
  {
    FileSystem fs = dfs.getFileSystem();
    fs.delete(wordExec.getParent(), true);
    fs.copyFromLocalFile(program, wordExec);                                         
    Submitter.setExecutable(job, fs.makeQualified(wordExec).toString());
    Submitter.setIsJavaRecordReader(job, true);
    Submitter.setIsJavaRecordWriter(job, true);
    FileInputFormat.setInputPaths(job, inputPath);
    FileOutputFormat.setOutputPath(job, outputPath);
    RunningJob rJob = null;
    if (numReduces == 0) {
      rJob = Submitter.jobSubmit(job);

      while (!rJob.isComplete()) {
        try {
          Thread.sleep(1000);
        } catch (InterruptedException ie) {
          throw new RuntimeException(ie);
        }
      }
    } else {
      rJob = Submitter.runJob(job);
    }
    assertTrue("pipes job failed", rJob.isSuccessful());

    Counters counters = rJob.getCounters();
    Counters.Group wordCountCounters = counters.getGroup("WORDCOUNT");
    int numCounters = 0;
    for (Counter c : wordCountCounters) {
      System.out.println(c);
      ++numCounters;
    }
    assertTrue("No counters found!", (numCounters > 0));
  }

  List<String> results = new ArrayList<String>();
  for (Path p:FileUtil.stat2Paths(dfs.getFileSystem().listStatus(outputPath,
                                new OutputLogFilter()))) {
    results.add(TestMiniMRWithDFS.readOutput(p, job));
  }
  assertEquals("number of reduces is wrong", 
               expectedResults.length, results.size());
  for(int i=0; i < results.size(); i++) {
    assertEquals("pipes program " + program + " output " + i + " wrong",
                 expectedResults[i], results.get(i));
  }
}