Java 类org.apache.hadoop.mapred.MRCaching.TestResult 实例源码

项目:hadoop    文件:TestMiniMRDFSCaching.java   
public void testWithDFS() throws IOException {
  MiniMRCluster mr = null;
  MiniDFSCluster dfs = null;
  FileSystem fileSys = null;
  try {
    JobConf conf = new JobConf();
    dfs = new MiniDFSCluster.Builder(conf).build();
    fileSys = dfs.getFileSystem();
    mr = new MiniMRCluster(2, fileSys.getUri().toString(), 4);
    MRCaching.setupCache("/cachedir", fileSys);
    // run the wordcount example with caching
    TestResult ret = MRCaching.launchMRCache("/testing/wc/input",
                                          "/testing/wc/output",
                                          "/cachedir",
                                          mr.createJobConf(),
                                          "The quick brown fox\nhas many silly\n"
                                          + "red fox sox\n");
    assertTrue("Archives not matching", ret.isOutputOk);
    // launch MR cache with symlinks
    ret = MRCaching.launchMRCache("/testing/wc/input",
                                  "/testing/wc/output",
                                  "/cachedir",
                                  mr.createJobConf(),
                                  "The quick brown fox\nhas many silly\n"
                                  + "red fox sox\n");
    assertTrue("Archives not matching", ret.isOutputOk);
  } finally {
    if (fileSys != null) {
      fileSys.close();
    }
    if (dfs != null) {
      dfs.shutdown();
    }
    if (mr != null) {
      mr.shutdown();
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestMiniMRDFSCaching.java   
public void testWithDFS() throws IOException {
  MiniMRCluster mr = null;
  MiniDFSCluster dfs = null;
  FileSystem fileSys = null;
  try {
    JobConf conf = new JobConf();
    dfs = new MiniDFSCluster.Builder(conf).build();
    fileSys = dfs.getFileSystem();
    mr = new MiniMRCluster(2, fileSys.getUri().toString(), 4);
    MRCaching.setupCache("/cachedir", fileSys);
    // run the wordcount example with caching
    TestResult ret = MRCaching.launchMRCache("/testing/wc/input",
                                          "/testing/wc/output",
                                          "/cachedir",
                                          mr.createJobConf(),
                                          "The quick brown fox\nhas many silly\n"
                                          + "red fox sox\n");
    assertTrue("Archives not matching", ret.isOutputOk);
    // launch MR cache with symlinks
    ret = MRCaching.launchMRCache("/testing/wc/input",
                                  "/testing/wc/output",
                                  "/cachedir",
                                  mr.createJobConf(),
                                  "The quick brown fox\nhas many silly\n"
                                  + "red fox sox\n");
    assertTrue("Archives not matching", ret.isOutputOk);
  } finally {
    if (fileSys != null) {
      fileSys.close();
    }
    if (dfs != null) {
      dfs.shutdown();
    }
    if (mr != null) {
      mr.shutdown();
    }
  }
}
项目:big-c    文件:TestMiniMRDFSCaching.java   
public void testWithDFS() throws IOException {
  MiniMRCluster mr = null;
  MiniDFSCluster dfs = null;
  FileSystem fileSys = null;
  try {
    JobConf conf = new JobConf();
    dfs = new MiniDFSCluster.Builder(conf).build();
    fileSys = dfs.getFileSystem();
    mr = new MiniMRCluster(2, fileSys.getUri().toString(), 4);
    MRCaching.setupCache("/cachedir", fileSys);
    // run the wordcount example with caching
    TestResult ret = MRCaching.launchMRCache("/testing/wc/input",
                                          "/testing/wc/output",
                                          "/cachedir",
                                          mr.createJobConf(),
                                          "The quick brown fox\nhas many silly\n"
                                          + "red fox sox\n");
    assertTrue("Archives not matching", ret.isOutputOk);
    // launch MR cache with symlinks
    ret = MRCaching.launchMRCache("/testing/wc/input",
                                  "/testing/wc/output",
                                  "/cachedir",
                                  mr.createJobConf(),
                                  "The quick brown fox\nhas many silly\n"
                                  + "red fox sox\n");
    assertTrue("Archives not matching", ret.isOutputOk);
  } finally {
    if (fileSys != null) {
      fileSys.close();
    }
    if (dfs != null) {
      dfs.shutdown();
    }
    if (mr != null) {
      mr.shutdown();
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestMiniMRDFSCaching.java   
public void testWithDFS() throws IOException {
  MiniMRCluster mr = null;
  MiniDFSCluster dfs = null;
  FileSystem fileSys = null;
  try {
    JobConf conf = new JobConf();
    dfs = new MiniDFSCluster.Builder(conf).build();
    fileSys = dfs.getFileSystem();
    mr = new MiniMRCluster(2, fileSys.getUri().toString(), 4);
    MRCaching.setupCache("/cachedir", fileSys);
    // run the wordcount example with caching
    TestResult ret = MRCaching.launchMRCache("/testing/wc/input",
                                          "/testing/wc/output",
                                          "/cachedir",
                                          mr.createJobConf(),
                                          "The quick brown fox\nhas many silly\n"
                                          + "red fox sox\n");
    assertTrue("Archives not matching", ret.isOutputOk);
    // launch MR cache with symlinks
    ret = MRCaching.launchMRCache("/testing/wc/input",
                                  "/testing/wc/output",
                                  "/cachedir",
                                  mr.createJobConf(),
                                  "The quick brown fox\nhas many silly\n"
                                  + "red fox sox\n");
    assertTrue("Archives not matching", ret.isOutputOk);
  } finally {
    if (fileSys != null) {
      fileSys.close();
    }
    if (dfs != null) {
      dfs.shutdown();
    }
    if (mr != null) {
      mr.shutdown();
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestMiniMRDFSCaching.java   
public void testWithDFS() throws IOException {
  MiniMRCluster mr = null;
  MiniDFSCluster dfs = null;
  FileSystem fileSys = null;
  try {
    JobConf conf = new JobConf();
    dfs = new MiniDFSCluster(conf, 1, true, null);
    fileSys = dfs.getFileSystem();
    mr = new MiniMRCluster(2, fileSys.getName(), 4);
    MRCaching.setupCache("/cachedir", fileSys);
    // run the wordcount example with caching
    TestResult ret = MRCaching.launchMRCache("/testing/wc/input",
                                          "/testing/wc/output",
                                          "/cachedir",
                                          mr.createJobConf(),
                                          "The quick brown fox\nhas many silly\n"
                                          + "red fox sox\n", false);
    assertTrue("Archives not matching", ret.isOutputOk);
    // launch MR cache with symlinks
    ret = MRCaching.launchMRCache("/testing/wc/input",
                                  "/testing/wc/output",
                                  "/cachedir",
                                  mr.createJobConf(),
                                  "The quick brown fox\nhas many silly\n"
                                  + "red fox sox\n", true);
    assertTrue("Archives not matching", ret.isOutputOk);
  } finally {
    if (fileSys != null) {
      fileSys.close();
    }
    if (dfs != null) {
      dfs.shutdown();
    }
    if (mr != null) {
      mr.shutdown();
    }
  }
}
项目:hadoop-EAR    文件:TestMiniMRDFSCaching.java   
public void testWithDFS() throws IOException {
  MiniMRCluster mr = null;
  MiniDFSCluster dfs = null;
  FileSystem fileSys = null;
  try {
    JobConf conf = new JobConf();
    conf.set("fs.hdfs.impl",
             "org.apache.hadoop.hdfs.ChecksumDistributedFileSystem");      
    dfs = new MiniDFSCluster(conf, 1, true, null);
    fileSys = dfs.getFileSystem();
    mr = new MiniMRCluster(2, fileSys.getName(), 4);
    MRCaching.setupCache("/cachedir", fileSys);
    // run the wordcount example with caching
    TestResult ret = MRCaching.launchMRCache("/testing/wc/input",
                                          "/testing/wc/output",
                                          "/cachedir",
                                          mr.createJobConf(),
                                          "The quick brown fox\nhas many silly\n"
                                          + "red fox sox\n", false);
    assertTrue("Archives not matching", ret.isOutputOk);
    // launch MR cache with symlinks
    ret = MRCaching.launchMRCache("/testing/wc/input",
                                  "/testing/wc/output",
                                  "/cachedir",
                                  mr.createJobConf(),
                                  "The quick brown fox\nhas many silly\n"
                                  + "red fox sox\n", true);
    assertTrue("Archives not matching", ret.isOutputOk);
  } finally {
    if (fileSys != null) {
      fileSys.close();
    }
    if (dfs != null) {
      dfs.shutdown();
    }
    if (mr != null) {
      mr.shutdown();
    }
  }
}
项目:hadoop-plus    文件:TestMiniMRDFSCaching.java   
public void testWithDFS() throws IOException {
  MiniMRCluster mr = null;
  MiniDFSCluster dfs = null;
  FileSystem fileSys = null;
  try {
    JobConf conf = new JobConf();
    dfs = new MiniDFSCluster(conf, 1, true, null);
    fileSys = dfs.getFileSystem();
    mr = new MiniMRCluster(2, fileSys.getUri().toString(), 4);
    MRCaching.setupCache("/cachedir", fileSys);
    // run the wordcount example with caching
    TestResult ret = MRCaching.launchMRCache("/testing/wc/input",
                                          "/testing/wc/output",
                                          "/cachedir",
                                          mr.createJobConf(),
                                          "The quick brown fox\nhas many silly\n"
                                          + "red fox sox\n");
    assertTrue("Archives not matching", ret.isOutputOk);
    // launch MR cache with symlinks
    ret = MRCaching.launchMRCache("/testing/wc/input",
                                  "/testing/wc/output",
                                  "/cachedir",
                                  mr.createJobConf(),
                                  "The quick brown fox\nhas many silly\n"
                                  + "red fox sox\n");
    assertTrue("Archives not matching", ret.isOutputOk);
  } finally {
    if (fileSys != null) {
      fileSys.close();
    }
    if (dfs != null) {
      dfs.shutdown();
    }
    if (mr != null) {
      mr.shutdown();
    }
  }
}
项目:FlexMap    文件:TestMiniMRDFSCaching.java   
public void testWithDFS() throws IOException {
  MiniMRCluster mr = null;
  MiniDFSCluster dfs = null;
  FileSystem fileSys = null;
  try {
    JobConf conf = new JobConf();
    dfs = new MiniDFSCluster.Builder(conf).build();
    fileSys = dfs.getFileSystem();
    mr = new MiniMRCluster(2, fileSys.getUri().toString(), 4);
    MRCaching.setupCache("/cachedir", fileSys);
    // run the wordcount example with caching
    TestResult ret = MRCaching.launchMRCache("/testing/wc/input",
                                          "/testing/wc/output",
                                          "/cachedir",
                                          mr.createJobConf(),
                                          "The quick brown fox\nhas many silly\n"
                                          + "red fox sox\n");
    assertTrue("Archives not matching", ret.isOutputOk);
    // launch MR cache with symlinks
    ret = MRCaching.launchMRCache("/testing/wc/input",
                                  "/testing/wc/output",
                                  "/cachedir",
                                  mr.createJobConf(),
                                  "The quick brown fox\nhas many silly\n"
                                  + "red fox sox\n");
    assertTrue("Archives not matching", ret.isOutputOk);
  } finally {
    if (fileSys != null) {
      fileSys.close();
    }
    if (dfs != null) {
      dfs.shutdown();
    }
    if (mr != null) {
      mr.shutdown();
    }
  }
}
项目:hops    文件:TestMiniMRDFSCaching.java   
@Test
public void testWithDFS() throws IOException {
  MiniMRCluster mr = null;
  MiniDFSCluster dfs = null;
  FileSystem fileSys = null;
  try {
    JobConf conf = new JobConf();
    dfs = new MiniDFSCluster.Builder(conf).build();
    fileSys = dfs.getFileSystem();
    mr = new MiniMRCluster(2, fileSys.getUri().toString(), 4);
    MRCaching.setupCache("/cachedir", fileSys);
    // run the wordcount example with caching
    TestResult ret = MRCaching.launchMRCache("/testing/wc/input",
                                          "/testing/wc/output",
                                          "/cachedir",
                                          mr.createJobConf(),
                                          "The quick brown fox\nhas many silly\n"
                                          + "red fox sox\n");
    assertTrue("Archives not matching", ret.isOutputOk);
    // launch MR cache with symlinks
    ret = MRCaching.launchMRCache("/testing/wc/input",
                                  "/testing/wc/output",
                                  "/cachedir",
                                  mr.createJobConf(),
                                  "The quick brown fox\nhas many silly\n"
                                  + "red fox sox\n");
    assertTrue("Archives not matching", ret.isOutputOk);
  } finally {
    if (fileSys != null) {
      fileSys.close();
    }
    if (dfs != null) {
      dfs.shutdown();
    }
    if (mr != null) {
      mr.shutdown();
    }
  }
}
项目:hadoop-TCP    文件:TestMiniMRDFSCaching.java   
public void testWithDFS() throws IOException {
  MiniMRCluster mr = null;
  MiniDFSCluster dfs = null;
  FileSystem fileSys = null;
  try {
    JobConf conf = new JobConf();
    dfs = new MiniDFSCluster(conf, 1, true, null);
    fileSys = dfs.getFileSystem();
    mr = new MiniMRCluster(2, fileSys.getUri().toString(), 4);
    MRCaching.setupCache("/cachedir", fileSys);
    // run the wordcount example with caching
    TestResult ret = MRCaching.launchMRCache("/testing/wc/input",
                                          "/testing/wc/output",
                                          "/cachedir",
                                          mr.createJobConf(),
                                          "The quick brown fox\nhas many silly\n"
                                          + "red fox sox\n");
    assertTrue("Archives not matching", ret.isOutputOk);
    // launch MR cache with symlinks
    ret = MRCaching.launchMRCache("/testing/wc/input",
                                  "/testing/wc/output",
                                  "/cachedir",
                                  mr.createJobConf(),
                                  "The quick brown fox\nhas many silly\n"
                                  + "red fox sox\n");
    assertTrue("Archives not matching", ret.isOutputOk);
  } finally {
    if (fileSys != null) {
      fileSys.close();
    }
    if (dfs != null) {
      dfs.shutdown();
    }
    if (mr != null) {
      mr.shutdown();
    }
  }
}
项目:hadoop-on-lustre    文件:TestMiniMRDFSCaching.java   
public void testWithDFS() throws IOException {
  MiniMRCluster mr = null;
  MiniDFSCluster dfs = null;
  FileSystem fileSys = null;
  try {
    JobConf conf = new JobConf();
    conf.set("fs.hdfs.impl",
             "org.apache.hadoop.hdfs.ChecksumDistributedFileSystem");      
    dfs = new MiniDFSCluster(conf, 1, true, null);
    fileSys = dfs.getFileSystem();
    mr = new MiniMRCluster(2, fileSys.getName(), 4);
    MRCaching.setupCache("/cachedir", fileSys);
    // run the wordcount example with caching
    TestResult ret = MRCaching.launchMRCache("/testing/wc/input",
                                          "/testing/wc/output",
                                          "/cachedir",
                                          mr.createJobConf(),
                                          "The quick brown fox\nhas many silly\n"
                                          + "red fox sox\n", false);
    assertTrue("Archives not matching", ret.isOutputOk);
    // launch MR cache with symlinks
    ret = MRCaching.launchMRCache("/testing/wc/input",
                                  "/testing/wc/output",
                                  "/cachedir",
                                  mr.createJobConf(),
                                  "The quick brown fox\nhas many silly\n"
                                  + "red fox sox\n", true);
    assertTrue("Archives not matching", ret.isOutputOk);
  } finally {
    if (fileSys != null) {
      fileSys.close();
    }
    if (dfs != null) {
      dfs.shutdown();
    }
    if (mr != null) {
      mr.shutdown();
    }
  }
}
项目:hardfs    文件:TestMiniMRDFSCaching.java   
public void testWithDFS() throws IOException {
  MiniMRCluster mr = null;
  MiniDFSCluster dfs = null;
  FileSystem fileSys = null;
  try {
    JobConf conf = new JobConf();
    dfs = new MiniDFSCluster(conf, 1, true, null);
    fileSys = dfs.getFileSystem();
    mr = new MiniMRCluster(2, fileSys.getUri().toString(), 4);
    MRCaching.setupCache("/cachedir", fileSys);
    // run the wordcount example with caching
    TestResult ret = MRCaching.launchMRCache("/testing/wc/input",
                                          "/testing/wc/output",
                                          "/cachedir",
                                          mr.createJobConf(),
                                          "The quick brown fox\nhas many silly\n"
                                          + "red fox sox\n");
    assertTrue("Archives not matching", ret.isOutputOk);
    // launch MR cache with symlinks
    ret = MRCaching.launchMRCache("/testing/wc/input",
                                  "/testing/wc/output",
                                  "/cachedir",
                                  mr.createJobConf(),
                                  "The quick brown fox\nhas many silly\n"
                                  + "red fox sox\n");
    assertTrue("Archives not matching", ret.isOutputOk);
  } finally {
    if (fileSys != null) {
      fileSys.close();
    }
    if (dfs != null) {
      dfs.shutdown();
    }
    if (mr != null) {
      mr.shutdown();
    }
  }
}
项目:hadoop-on-lustre2    文件:TestMiniMRDFSCaching.java   
public void testWithDFS() throws IOException {
  MiniMRCluster mr = null;
  MiniDFSCluster dfs = null;
  FileSystem fileSys = null;
  try {
    JobConf conf = new JobConf();
    dfs = new MiniDFSCluster.Builder(conf).build();
    fileSys = dfs.getFileSystem();
    mr = new MiniMRCluster(2, fileSys.getUri().toString(), 4);
    MRCaching.setupCache("/cachedir", fileSys);
    // run the wordcount example with caching
    TestResult ret = MRCaching.launchMRCache("/testing/wc/input",
                                          "/testing/wc/output",
                                          "/cachedir",
                                          mr.createJobConf(),
                                          "The quick brown fox\nhas many silly\n"
                                          + "red fox sox\n");
    assertTrue("Archives not matching", ret.isOutputOk);
    // launch MR cache with symlinks
    ret = MRCaching.launchMRCache("/testing/wc/input",
                                  "/testing/wc/output",
                                  "/cachedir",
                                  mr.createJobConf(),
                                  "The quick brown fox\nhas many silly\n"
                                  + "red fox sox\n");
    assertTrue("Archives not matching", ret.isOutputOk);
  } finally {
    if (fileSys != null) {
      fileSys.close();
    }
    if (dfs != null) {
      dfs.shutdown();
    }
    if (mr != null) {
      mr.shutdown();
    }
  }
}
项目:RDFS    文件:TestMiniMRDFSCaching.java   
public void testWithDFS() throws IOException {
  MiniMRCluster mr = null;
  MiniDFSCluster dfs = null;
  FileSystem fileSys = null;
  try {
    JobConf conf = new JobConf();
    conf.set("fs.hdfs.impl",
             "org.apache.hadoop.hdfs.ChecksumDistributedFileSystem");      
    dfs = new MiniDFSCluster(conf, 1, true, null);
    fileSys = dfs.getFileSystem();
    mr = new MiniMRCluster(2, fileSys.getName(), 4);
    MRCaching.setupCache("/cachedir", fileSys);
    // run the wordcount example with caching
    TestResult ret = MRCaching.launchMRCache("/testing/wc/input",
                                          "/testing/wc/output",
                                          "/cachedir",
                                          mr.createJobConf(),
                                          "The quick brown fox\nhas many silly\n"
                                          + "red fox sox\n", false);
    assertTrue("Archives not matching", ret.isOutputOk);
    // launch MR cache with symlinks
    ret = MRCaching.launchMRCache("/testing/wc/input",
                                  "/testing/wc/output",
                                  "/cachedir",
                                  mr.createJobConf(),
                                  "The quick brown fox\nhas many silly\n"
                                  + "red fox sox\n", true);
    assertTrue("Archives not matching", ret.isOutputOk);
  } finally {
    if (fileSys != null) {
      fileSys.close();
    }
    if (dfs != null) {
      dfs.shutdown();
    }
    if (mr != null) {
      mr.shutdown();
    }
  }
}
项目:hadoop-0.20    文件:TestMiniMRDFSCaching.java   
public void testWithDFS() throws IOException {
  MiniMRCluster mr = null;
  MiniDFSCluster dfs = null;
  FileSystem fileSys = null;
  try {
    JobConf conf = new JobConf();
    conf.set("fs.hdfs.impl",
             "org.apache.hadoop.hdfs.ChecksumDistributedFileSystem");      
    dfs = new MiniDFSCluster(conf, 1, true, null);
    fileSys = dfs.getFileSystem();
    mr = new MiniMRCluster(2, fileSys.getName(), 4);
    MRCaching.setupCache("/cachedir", fileSys);
    // run the wordcount example with caching
    TestResult ret = MRCaching.launchMRCache("/testing/wc/input",
                                          "/testing/wc/output",
                                          "/cachedir",
                                          mr.createJobConf(),
                                          "The quick brown fox\nhas many silly\n"
                                          + "red fox sox\n", false);
    assertTrue("Archives not matching", ret.isOutputOk);
    // launch MR cache with symlinks
    ret = MRCaching.launchMRCache("/testing/wc/input",
                                  "/testing/wc/output",
                                  "/cachedir",
                                  mr.createJobConf(),
                                  "The quick brown fox\nhas many silly\n"
                                  + "red fox sox\n", true);
    assertTrue("Archives not matching", ret.isOutputOk);
  } finally {
    if (fileSys != null) {
      fileSys.close();
    }
    if (dfs != null) {
      dfs.shutdown();
    }
    if (mr != null) {
      mr.shutdown();
    }
  }
}
项目:hanoi-hadoop-2.0.0-cdh    文件:TestMiniMRDFSCaching.java   
public void testWithDFS() throws IOException {
  MiniMRCluster mr = null;
  MiniDFSCluster dfs = null;
  FileSystem fileSys = null;
  try {
    JobConf conf = new JobConf();
    dfs = new MiniDFSCluster(conf, 1, true, null);
    fileSys = dfs.getFileSystem();
    mr = new MiniMRCluster(2, fileSys.getName(), 4);
    MRCaching.setupCache("/cachedir", fileSys);
    // run the wordcount example with caching
    TestResult ret = MRCaching.launchMRCache("/testing/wc/input",
                                          "/testing/wc/output",
                                          "/cachedir",
                                          mr.createJobConf(),
                                          "The quick brown fox\nhas many silly\n"
                                          + "red fox sox\n", false);
    assertTrue("Archives not matching", ret.isOutputOk);
    // launch MR cache with symlinks
    ret = MRCaching.launchMRCache("/testing/wc/input",
                                  "/testing/wc/output",
                                  "/cachedir",
                                  mr.createJobConf(),
                                  "The quick brown fox\nhas many silly\n"
                                  + "red fox sox\n", true);
    assertTrue("Archives not matching", ret.isOutputOk);
  } finally {
    if (fileSys != null) {
      fileSys.close();
    }
    if (dfs != null) {
      dfs.shutdown();
    }
    if (mr != null) {
      mr.shutdown();
    }
  }
}
项目:mapreduce-fork    文件:TestMiniMRDFSCaching.java   
public void testWithDFS() throws IOException {
  MiniMRCluster mr = null;
  MiniDFSCluster dfs = null;
  FileSystem fileSys = null;
  try {
    JobConf conf = new JobConf();
    dfs = new MiniDFSCluster(conf, 1, true, null);
    fileSys = dfs.getFileSystem();
    mr = new MiniMRCluster(2, fileSys.getUri().toString(), 4);
    MRCaching.setupCache("/cachedir", fileSys);
    // run the wordcount example with caching
    TestResult ret = MRCaching.launchMRCache("/testing/wc/input",
                                          "/testing/wc/output",
                                          "/cachedir",
                                          mr.createJobConf(),
                                          "The quick brown fox\nhas many silly\n"
                                          + "red fox sox\n", false);
    assertTrue("Archives not matching", ret.isOutputOk);
    // launch MR cache with symlinks
    ret = MRCaching.launchMRCache("/testing/wc/input",
                                  "/testing/wc/output",
                                  "/cachedir",
                                  mr.createJobConf(),
                                  "The quick brown fox\nhas many silly\n"
                                  + "red fox sox\n", true);
    assertTrue("Archives not matching", ret.isOutputOk);
  } finally {
    if (fileSys != null) {
      fileSys.close();
    }
    if (dfs != null) {
      dfs.shutdown();
    }
    if (mr != null) {
      mr.shutdown();
    }
  }
}
项目:hortonworks-extension    文件:TestMiniMRDFSCaching.java   
public void testWithDFS() throws IOException {
  MiniMRCluster mr = null;
  MiniDFSCluster dfs = null;
  FileSystem fileSys = null;
  try {
    JobConf conf = new JobConf();
    conf.set("fs.hdfs.impl",
             "org.apache.hadoop.hdfs.ChecksumDistributedFileSystem");      
    dfs = new MiniDFSCluster(conf, 1, true, null);
    fileSys = dfs.getFileSystem();
    mr = new MiniMRCluster(2, fileSys.getName(), 4);
    MRCaching.setupCache("/cachedir", fileSys);
    // run the wordcount example with caching
    TestResult ret = MRCaching.launchMRCache("/testing/wc/input",
                                          "/testing/wc/output",
                                          "/cachedir",
                                          mr.createJobConf(),
                                          "The quick brown fox\nhas many silly\n"
                                          + "red fox sox\n", false);
    assertTrue("Archives not matching", ret.isOutputOk);
    // launch MR cache with symlinks
    ret = MRCaching.launchMRCache("/testing/wc/input",
                                  "/testing/wc/output",
                                  "/cachedir",
                                  mr.createJobConf(),
                                  "The quick brown fox\nhas many silly\n"
                                  + "red fox sox\n", true);
    assertTrue("Archives not matching", ret.isOutputOk);
  } finally {
    if (fileSys != null) {
      fileSys.close();
    }
    if (dfs != null) {
      dfs.shutdown();
    }
    if (mr != null) {
      mr.shutdown();
    }
  }
}
项目:hortonworks-extension    文件:TestMiniMRDFSCaching.java   
public void testWithDFS() throws IOException {
  MiniMRCluster mr = null;
  MiniDFSCluster dfs = null;
  FileSystem fileSys = null;
  try {
    JobConf conf = new JobConf();
    conf.set("fs.hdfs.impl",
             "org.apache.hadoop.hdfs.ChecksumDistributedFileSystem");      
    dfs = new MiniDFSCluster(conf, 1, true, null);
    fileSys = dfs.getFileSystem();
    mr = new MiniMRCluster(2, fileSys.getName(), 4);
    MRCaching.setupCache("/cachedir", fileSys);
    // run the wordcount example with caching
    TestResult ret = MRCaching.launchMRCache("/testing/wc/input",
                                          "/testing/wc/output",
                                          "/cachedir",
                                          mr.createJobConf(),
                                          "The quick brown fox\nhas many silly\n"
                                          + "red fox sox\n", false);
    assertTrue("Archives not matching", ret.isOutputOk);
    // launch MR cache with symlinks
    ret = MRCaching.launchMRCache("/testing/wc/input",
                                  "/testing/wc/output",
                                  "/cachedir",
                                  mr.createJobConf(),
                                  "The quick brown fox\nhas many silly\n"
                                  + "red fox sox\n", true);
    assertTrue("Archives not matching", ret.isOutputOk);
  } finally {
    if (fileSys != null) {
      fileSys.close();
    }
    if (dfs != null) {
      dfs.shutdown();
    }
    if (mr != null) {
      mr.shutdown();
    }
  }
}
项目:hadoop-gpu    文件:TestMiniMRDFSCaching.java   
public void testWithDFS() throws IOException {
  MiniMRCluster mr = null;
  MiniDFSCluster dfs = null;
  FileSystem fileSys = null;
  try {
    JobConf conf = new JobConf();
    conf.set("fs.hdfs.impl",
             "org.apache.hadoop.hdfs.ChecksumDistributedFileSystem");      
    dfs = new MiniDFSCluster(conf, 1, true, null);
    fileSys = dfs.getFileSystem();
    mr = new MiniMRCluster(2, fileSys.getName(), 4);
    MRCaching.setupCache("/cachedir", fileSys);
    // run the wordcount example with caching
    TestResult ret = MRCaching.launchMRCache("/testing/wc/input",
                                          "/testing/wc/output",
                                          "/cachedir",
                                          mr.createJobConf(),
                                          "The quick brown fox\nhas many silly\n"
                                          + "red fox sox\n", false);
    assertTrue("Archives not matching", ret.isOutputOk);
    // launch MR cache with symlinks
    ret = MRCaching.launchMRCache("/testing/wc/input",
                                  "/testing/wc/output",
                                  "/cachedir",
                                  mr.createJobConf(),
                                  "The quick brown fox\nhas many silly\n"
                                  + "red fox sox\n", true);
    assertTrue("Archives not matching", ret.isOutputOk);
  } finally {
    if (fileSys != null) {
      fileSys.close();
    }
    if (dfs != null) {
      dfs.shutdown();
    }
    if (mr != null) {
      mr.shutdown();
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestMiniMRLocalFS.java   
public void testWithLocal()
    throws IOException, InterruptedException, ClassNotFoundException {
  MiniMRCluster mr = null;
  try {
    mr = new MiniMRCluster(2, "file:///", 3);
    // make cleanup inline sothat validation of existence of these directories
    // can be done
    mr.setInlineCleanupThreads();

    TestMiniMRWithDFS.runPI(mr, mr.createJobConf());

    // run the wordcount example with caching
    JobConf job = mr.createJobConf();
    TestResult ret = MRCaching.launchMRCache(TEST_ROOT_DIR + "/wc/input",
                                          TEST_ROOT_DIR + "/wc/output", 
                                          TEST_ROOT_DIR + "/cachedir",
                                          job,
                                          "The quick brown fox\n" 
                                          + "has many silly\n"
                                          + "red fox sox\n");
    // assert the number of lines read during caching
    assertTrue("Failed test archives not matching", ret.isOutputOk);
    // test the task report fetchers
    JobClient client = new JobClient(job);
    JobID jobid = ret.job.getID();
    TaskReport[] reports;
    reports = client.getSetupTaskReports(jobid);
    assertEquals("number of setups", 2, reports.length);
    reports = client.getMapTaskReports(jobid);
    assertEquals("number of maps", 1, reports.length);
    reports = client.getReduceTaskReports(jobid);
    assertEquals("number of reduces", 1, reports.length);
    reports = client.getCleanupTaskReports(jobid);
    assertEquals("number of cleanups", 2, reports.length);
    Counters counters = ret.job.getCounters();
    assertEquals("number of map inputs", 3, 
                 counters.getCounter(Task.Counter.MAP_INPUT_RECORDS));
    assertEquals("number of reduce outputs", 9, 
                 counters.getCounter(Task.Counter.REDUCE_OUTPUT_RECORDS));
    runCustomFormats(mr);
    runSecondarySort(mr.createJobConf());
  } finally {
    if (mr != null) { mr.shutdown(); }
  }
}
项目:hadoop-EAR    文件:TestMiniMRLocalFS.java   
public void testWithLocal()
    throws IOException, InterruptedException, ClassNotFoundException {
  MiniMRCluster mr = null;
  try {
    mr = new MiniMRCluster(2, "file:///", 3);
    TestMiniMRWithDFS.runPI(mr, mr.createJobConf());

    // run the wordcount example with caching
    JobConf job = mr.createJobConf();
    TestResult ret = MRCaching.launchMRCache(TEST_ROOT_DIR + "/wc/input",
                                          TEST_ROOT_DIR + "/wc/output", 
                                          TEST_ROOT_DIR + "/cachedir",
                                          job,
                                          "The quick brown fox\n" 
                                          + "has many silly\n"
                                          + "red fox sox\n");
    // assert the number of lines read during caching
    assertTrue("Failed test archives not matching", ret.isOutputOk);
    // test the task report fetchers
    JobClient client = new JobClient(job);
    JobID jobid = ret.job.getID();
    TaskReport[] reports;
    reports = client.getSetupTaskReports(jobid);
    assertEquals("number of setups", 2, reports.length);
    reports = client.getMapTaskReports(jobid);
    assertEquals("number of maps", 1, reports.length);
    reports = client.getReduceTaskReports(jobid);
    assertEquals("number of reduces", 1, reports.length);
    reports = client.getCleanupTaskReports(jobid);
    assertEquals("number of cleanups", 2, reports.length);
    Counters counters = ret.job.getCounters();
    assertEquals("number of map inputs", 3, 
                 counters.getCounter(Task.Counter.MAP_INPUT_RECORDS));
    assertEquals("number of reduce outputs", 9, 
                 counters.getCounter(Task.Counter.REDUCE_OUTPUT_RECORDS));
    runCustomFormats(mr);
    runSecondarySort(mr.createJobConf());
  } finally {
    if (mr != null) { mr.shutdown(); }
  }
}
项目:hadoop-on-lustre    文件:TestMiniMRLocalFS.java   
public void testWithLocal()
    throws IOException, InterruptedException, ClassNotFoundException {
  MiniMRCluster mr = null;
  try {
    mr = new MiniMRCluster(2, "file:///", 3);
    // make cleanup inline sothat validation of existence of these directories
    // can be done
    mr.setInlineCleanupThreads();

    TestMiniMRWithDFS.runPI(mr, mr.createJobConf());

    // run the wordcount example with caching
    JobConf job = mr.createJobConf();
    TestResult ret = MRCaching.launchMRCache(TEST_ROOT_DIR + "/wc/input",
                                          TEST_ROOT_DIR + "/wc/output", 
                                          TEST_ROOT_DIR + "/cachedir",
                                          job,
                                          "The quick brown fox\n" 
                                          + "has many silly\n"
                                          + "red fox sox\n");
    // assert the number of lines read during caching
    assertTrue("Failed test archives not matching", ret.isOutputOk);
    // test the task report fetchers
    JobClient client = new JobClient(job);
    JobID jobid = ret.job.getID();
    TaskReport[] reports;
    reports = client.getSetupTaskReports(jobid);
    assertEquals("number of setups", 2, reports.length);
    reports = client.getMapTaskReports(jobid);
    assertEquals("number of maps", 1, reports.length);
    reports = client.getReduceTaskReports(jobid);
    assertEquals("number of reduces", 1, reports.length);
    reports = client.getCleanupTaskReports(jobid);
    assertEquals("number of cleanups", 2, reports.length);
    Counters counters = ret.job.getCounters();
    assertEquals("number of map inputs", 3, 
                 counters.getCounter(Task.Counter.MAP_INPUT_RECORDS));
    assertEquals("number of reduce outputs", 9, 
                 counters.getCounter(Task.Counter.REDUCE_OUTPUT_RECORDS));
    runCustomFormats(mr);
    runSecondarySort(mr.createJobConf());
  } finally {
    if (mr != null) { mr.shutdown(); }
  }
}
项目:RDFS    文件:TestMiniMRLocalFS.java   
public void testWithLocal()
    throws IOException, InterruptedException, ClassNotFoundException {
  MiniMRCluster mr = null;
  try {
    mr = new MiniMRCluster(2, "file:///", 3);
    TestMiniMRWithDFS.runPI(mr, mr.createJobConf());

    // run the wordcount example with caching
    JobConf job = mr.createJobConf();
    TestResult ret = MRCaching.launchMRCache(TEST_ROOT_DIR + "/wc/input",
                                          TEST_ROOT_DIR + "/wc/output", 
                                          TEST_ROOT_DIR + "/cachedir",
                                          job,
                                          "The quick brown fox\n" 
                                          + "has many silly\n"
                                          + "red fox sox\n");
    // assert the number of lines read during caching
    assertTrue("Failed test archives not matching", ret.isOutputOk);
    // test the task report fetchers
    JobClient client = new JobClient(job);
    JobID jobid = ret.job.getID();
    TaskReport[] reports;
    reports = client.getSetupTaskReports(jobid);
    assertEquals("number of setups", 2, reports.length);
    reports = client.getMapTaskReports(jobid);
    assertEquals("number of maps", 1, reports.length);
    reports = client.getReduceTaskReports(jobid);
    assertEquals("number of reduces", 1, reports.length);
    reports = client.getCleanupTaskReports(jobid);
    assertEquals("number of cleanups", 2, reports.length);
    Counters counters = ret.job.getCounters();
    assertEquals("number of map inputs", 3, 
                 counters.getCounter(Task.Counter.MAP_INPUT_RECORDS));
    assertEquals("number of reduce outputs", 9, 
                 counters.getCounter(Task.Counter.REDUCE_OUTPUT_RECORDS));
    runCustomFormats(mr);
    runSecondarySort(mr.createJobConf());
  } finally {
    if (mr != null) { mr.shutdown(); }
  }
}
项目:hadoop-0.20    文件:TestMiniMRLocalFS.java   
public void testWithLocal()
    throws IOException, InterruptedException, ClassNotFoundException {
  MiniMRCluster mr = null;
  try {
    mr = new MiniMRCluster(2, "file:///", 3);
    TestMiniMRWithDFS.runPI(mr, mr.createJobConf());

    // run the wordcount example with caching
    JobConf job = mr.createJobConf();
    TestResult ret = MRCaching.launchMRCache(TEST_ROOT_DIR + "/wc/input",
                                          TEST_ROOT_DIR + "/wc/output", 
                                          TEST_ROOT_DIR + "/cachedir",
                                          job,
                                          "The quick brown fox\n" 
                                          + "has many silly\n"
                                          + "red fox sox\n");
    // assert the number of lines read during caching
    assertTrue("Failed test archives not matching", ret.isOutputOk);
    // test the task report fetchers
    JobClient client = new JobClient(job);
    JobID jobid = ret.job.getID();
    TaskReport[] reports;
    reports = client.getSetupTaskReports(jobid);
    assertEquals("number of setups", 2, reports.length);
    reports = client.getMapTaskReports(jobid);
    assertEquals("number of maps", 1, reports.length);
    reports = client.getReduceTaskReports(jobid);
    assertEquals("number of reduces", 1, reports.length);
    reports = client.getCleanupTaskReports(jobid);
    assertEquals("number of cleanups", 2, reports.length);
    Counters counters = ret.job.getCounters();
    assertEquals("number of map inputs", 3, 
                 counters.getCounter(Task.Counter.MAP_INPUT_RECORDS));
    assertEquals("number of reduce outputs", 9, 
                 counters.getCounter(Task.Counter.REDUCE_OUTPUT_RECORDS));
    runCustomFormats(mr);
    runSecondarySort(mr.createJobConf());
  } finally {
    if (mr != null) { mr.shutdown(); }
  }
}
项目:hanoi-hadoop-2.0.0-cdh    文件:TestMiniMRLocalFS.java   
public void testWithLocal()
    throws IOException, InterruptedException, ClassNotFoundException {
  MiniMRCluster mr = null;
  try {
    mr = new MiniMRCluster(2, "file:///", 3);
    // make cleanup inline sothat validation of existence of these directories
    // can be done
    mr.setInlineCleanupThreads();

    TestMiniMRWithDFS.runPI(mr, mr.createJobConf());

    // run the wordcount example with caching
    JobConf job = mr.createJobConf();
    TestResult ret = MRCaching.launchMRCache(TEST_ROOT_DIR + "/wc/input",
                                          TEST_ROOT_DIR + "/wc/output", 
                                          TEST_ROOT_DIR + "/cachedir",
                                          job,
                                          "The quick brown fox\n" 
                                          + "has many silly\n"
                                          + "red fox sox\n");
    // assert the number of lines read during caching
    assertTrue("Failed test archives not matching", ret.isOutputOk);
    // test the task report fetchers
    JobClient client = new JobClient(job);
    JobID jobid = ret.job.getID();
    TaskReport[] reports;
    reports = client.getSetupTaskReports(jobid);
    assertEquals("number of setups", 2, reports.length);
    reports = client.getMapTaskReports(jobid);
    assertEquals("number of maps", 1, reports.length);
    reports = client.getReduceTaskReports(jobid);
    assertEquals("number of reduces", 1, reports.length);
    reports = client.getCleanupTaskReports(jobid);
    assertEquals("number of cleanups", 2, reports.length);
    Counters counters = ret.job.getCounters();
    assertEquals("number of map inputs", 3, 
                 counters.getCounter(Task.Counter.MAP_INPUT_RECORDS));
    assertEquals("number of reduce outputs", 9, 
                 counters.getCounter(Task.Counter.REDUCE_OUTPUT_RECORDS));
    runCustomFormats(mr);
    runSecondarySort(mr.createJobConf());
  } finally {
    if (mr != null) { mr.shutdown(); }
  }
}
项目:mapreduce-fork    文件:TestMiniMRLocalFS.java   
public void testWithLocal() 
    throws IOException, InterruptedException, ClassNotFoundException {
  MiniMRCluster mr = null;
  try {
    mr = new MiniMRCluster(2, "file:///", 3);
    // make cleanup inline sothat validation of existence of these directories
    // can be done
    mr.setInlineCleanupThreads();

    TestMiniMRWithDFS.runPI(mr, mr.createJobConf());

    // run the wordcount example with caching
    JobConf job = mr.createJobConf();
    TestResult ret = MRCaching.launchMRCache(TEST_ROOT_DIR + "/wc/input",
                                          TEST_ROOT_DIR + "/wc/output", 
                                          TEST_ROOT_DIR + "/cachedir",
                                          job,
                                          "The quick brown fox\n" 
                                          + "has many silly\n"
                                          + "red fox sox\n");
    // assert the number of lines read during caching
    assertTrue("Failed test archives not matching", ret.isOutputOk);
    // test the task report fetchers
    JobClient client = new JobClient(job);
    JobID jobid = ret.job.getID();
    TaskReport[] reports;
    reports = client.getSetupTaskReports(jobid);
    assertEquals("number of setups", 2, reports.length);
    reports = client.getMapTaskReports(jobid);
    assertEquals("number of maps", 1, reports.length);
    reports = client.getReduceTaskReports(jobid);
    assertEquals("number of reduces", 1, reports.length);
    reports = client.getCleanupTaskReports(jobid);
    assertEquals("number of cleanups", 2, reports.length);
    Counters counters = ret.job.getCounters();
    assertEquals("number of map inputs", 3, 
                 counters.getCounter(TaskCounter.MAP_INPUT_RECORDS));
    assertEquals("number of reduce outputs", 9, 
                 counters.getCounter(TaskCounter.REDUCE_OUTPUT_RECORDS));
    runCustomFormats(mr);
    runSecondarySort(mr.createJobConf());
  } finally {
    if (mr != null) { mr.shutdown(); }
  }
}
项目:hortonworks-extension    文件:TestMiniMRLocalFS.java   
public void testWithLocal()
    throws IOException, InterruptedException, ClassNotFoundException {
  MiniMRCluster mr = null;
  try {
    mr = new MiniMRCluster(2, "file:///", 3);
    // make cleanup inline sothat validation of existence of these directories
    // can be done
    mr.setInlineCleanupThreads();

    TestMiniMRWithDFS.runPI(mr, mr.createJobConf());

    // run the wordcount example with caching
    JobConf job = mr.createJobConf();
    TestResult ret = MRCaching.launchMRCache(TEST_ROOT_DIR + "/wc/input",
                                          TEST_ROOT_DIR + "/wc/output", 
                                          TEST_ROOT_DIR + "/cachedir",
                                          job,
                                          "The quick brown fox\n" 
                                          + "has many silly\n"
                                          + "red fox sox\n");
    // assert the number of lines read during caching
    assertTrue("Failed test archives not matching", ret.isOutputOk);
    // test the task report fetchers
    JobClient client = new JobClient(job);
    JobID jobid = ret.job.getID();
    TaskReport[] reports;
    reports = client.getSetupTaskReports(jobid);
    assertEquals("number of setups", 2, reports.length);
    reports = client.getMapTaskReports(jobid);
    assertEquals("number of maps", 1, reports.length);
    reports = client.getReduceTaskReports(jobid);
    assertEquals("number of reduces", 1, reports.length);
    reports = client.getCleanupTaskReports(jobid);
    assertEquals("number of cleanups", 2, reports.length);
    Counters counters = ret.job.getCounters();
    assertEquals("number of map inputs", 3, 
                 counters.getCounter(Task.Counter.MAP_INPUT_RECORDS));
    assertEquals("number of reduce outputs", 9, 
                 counters.getCounter(Task.Counter.REDUCE_OUTPUT_RECORDS));
    runCustomFormats(mr);
    runSecondarySort(mr.createJobConf());
  } finally {
    if (mr != null) { mr.shutdown(); }
  }
}
项目:hortonworks-extension    文件:TestMiniMRLocalFS.java   
public void testWithLocal()
    throws IOException, InterruptedException, ClassNotFoundException {
  MiniMRCluster mr = null;
  try {
    mr = new MiniMRCluster(2, "file:///", 3);
    // make cleanup inline sothat validation of existence of these directories
    // can be done
    mr.setInlineCleanupThreads();

    TestMiniMRWithDFS.runPI(mr, mr.createJobConf());

    // run the wordcount example with caching
    JobConf job = mr.createJobConf();
    TestResult ret = MRCaching.launchMRCache(TEST_ROOT_DIR + "/wc/input",
                                          TEST_ROOT_DIR + "/wc/output", 
                                          TEST_ROOT_DIR + "/cachedir",
                                          job,
                                          "The quick brown fox\n" 
                                          + "has many silly\n"
                                          + "red fox sox\n");
    // assert the number of lines read during caching
    assertTrue("Failed test archives not matching", ret.isOutputOk);
    // test the task report fetchers
    JobClient client = new JobClient(job);
    JobID jobid = ret.job.getID();
    TaskReport[] reports;
    reports = client.getSetupTaskReports(jobid);
    assertEquals("number of setups", 2, reports.length);
    reports = client.getMapTaskReports(jobid);
    assertEquals("number of maps", 1, reports.length);
    reports = client.getReduceTaskReports(jobid);
    assertEquals("number of reduces", 1, reports.length);
    reports = client.getCleanupTaskReports(jobid);
    assertEquals("number of cleanups", 2, reports.length);
    Counters counters = ret.job.getCounters();
    assertEquals("number of map inputs", 3, 
                 counters.getCounter(Task.Counter.MAP_INPUT_RECORDS));
    assertEquals("number of reduce outputs", 9, 
                 counters.getCounter(Task.Counter.REDUCE_OUTPUT_RECORDS));
    runCustomFormats(mr);
    runSecondarySort(mr.createJobConf());
  } finally {
    if (mr != null) { mr.shutdown(); }
  }
}
项目:hadoop-gpu    文件:TestMiniMRLocalFS.java   
public void testWithLocal()
    throws IOException, InterruptedException, ClassNotFoundException {
  MiniMRCluster mr = null;
  try {
    mr = new MiniMRCluster(2, "file:///", 3);
    TestMiniMRWithDFS.runPI(mr, mr.createJobConf());

    // run the wordcount example with caching
    JobConf job = mr.createJobConf();
    TestResult ret = MRCaching.launchMRCache(TEST_ROOT_DIR + "/wc/input",
                                          TEST_ROOT_DIR + "/wc/output", 
                                          TEST_ROOT_DIR + "/cachedir",
                                          job,
                                          "The quick brown fox\n" 
                                          + "has many silly\n"
                                          + "red fox sox\n");
    // assert the number of lines read during caching
    assertTrue("Failed test archives not matching", ret.isOutputOk);
    // test the task report fetchers
    JobClient client = new JobClient(job);
    JobID jobid = ret.job.getID();
    TaskReport[] reports;
    reports = client.getSetupTaskReports(jobid);
    assertEquals("number of setups", 2, reports.length);
    reports = client.getMapTaskReports(jobid);
    assertEquals("number of maps", 1, reports.length);
    reports = client.getReduceTaskReports(jobid);
    assertEquals("number of reduces", 1, reports.length);
    reports = client.getCleanupTaskReports(jobid);
    assertEquals("number of cleanups", 2, reports.length);
    Counters counters = ret.job.getCounters();
    assertEquals("number of map inputs", 3, 
                 counters.getCounter(Task.Counter.MAP_INPUT_RECORDS));
    assertEquals("number of reduce outputs", 9, 
                 counters.getCounter(Task.Counter.REDUCE_OUTPUT_RECORDS));
    runCustomFormats(mr);
    runSecondarySort(mr.createJobConf());
  } finally {
    if (mr != null) { mr.shutdown(); }
  }
}