Java 类org.apache.hadoop.mapred.MiniMRCluster 实例源码

项目:ditb    文件:MapreduceTestingShim.java   
public JobConf obtainJobConf(MiniMRCluster cluster) {
  if (cluster == null) return null;
  try {
    Object runner = cluster.getJobTrackerRunner();
    Method meth = runner.getClass().getDeclaredMethod("getJobTracker", emptyParam);
    Object tracker = meth.invoke(runner, new Object []{});
    Method m = tracker.getClass().getDeclaredMethod("getConf", emptyParam);
    return (JobConf) m.invoke(tracker, new Object []{});
  } catch (NoSuchMethodException nsme) {
    return null;
  } catch (InvocationTargetException ite) {
    return null;
  } catch (IllegalAccessException iae) {
    return null;
  }
}
项目:LCIndex-HBase-0.94.16    文件:MapreduceTestingShim.java   
public JobConf obtainJobConf(MiniMRCluster cluster) {
  if (cluster == null) return null;
  try {
    Object runner = cluster.getJobTrackerRunner();
    Method meth = runner.getClass().getDeclaredMethod("getJobTracker", emptyParam);
    Object tracker = meth.invoke(runner, new Object []{});
    Method m = tracker.getClass().getDeclaredMethod("getConf", emptyParam);
    return (JobConf) m.invoke(tracker, new Object []{});
  } catch (NoSuchMethodException nsme) {
    return null;
  } catch (InvocationTargetException ite) {
    return null;
  } catch (IllegalAccessException iae) {
    return null;
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestTokenCache.java   
@BeforeClass
public static void setUp() throws Exception {
  Configuration conf = new Configuration();
  dfsCluster = new MiniDFSCluster(conf, numSlaves, true, null);
  jConf = new JobConf(conf);
  mrCluster = new MiniMRCluster(0, 0, numSlaves, 
      dfsCluster.getFileSystem().getUri().toString(), 1, null, null, null, 
      jConf);

  createTokenFileJson();
  verifySecretKeysInJSONFile();
  createTokenFileBinary();
  verifySecretKeysInBinaryFile();
  NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads();
  FileSystem fs = dfsCluster.getFileSystem();

  p1 = new Path("file1");
  p2 = new Path("file2");
  p1 = fs.makeQualified(p1);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestEncryptedShuffle.java   
private void startCluster(Configuration  conf) throws Exception {
  if (System.getProperty("hadoop.log.dir") == null) {
    System.setProperty("hadoop.log.dir", "build/test-dir");
  }
  conf.set("dfs.block.access.token.enable", "false");
  conf.set("dfs.permissions", "true");
  conf.set("hadoop.security.authentication", "simple");
  dfsCluster = new MiniDFSCluster(conf, 1, true, null);
  FileSystem fileSystem = dfsCluster.getFileSystem();
  fileSystem.mkdirs(new Path("/tmp"));
  fileSystem.mkdirs(new Path("/user"));
  fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
  fileSystem.setPermission(
    new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
  fileSystem.setPermission(
    new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
  fileSystem.setPermission(
    new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
  FileSystem.setDefaultUri(conf, fileSystem.getUri());
  mrCluster = new MiniMRCluster(1, fileSystem.getUri().toString(), 1, null, null, new JobConf(conf));
}
项目:sqoop-on-spark    文件:HadoopMiniClusterRunner.java   
@SuppressWarnings("deprecation")
@Override
public void start() throws Exception {
  System.setProperty("test.build.data", getDataDir());
  LOG.info("test.build.data set to: " + getDataDir());

  System.setProperty("hadoop.log.dir", getLogDir());
  LOG.info("log dir set to: " + getLogDir());

  // Start DFS server
  LOG.info("Starting DFS cluster...");
  dfsCluster = new MiniDFSCluster(config, 1, true, null);
  if (dfsCluster.isClusterUp()) {
    LOG.info("Started DFS cluster on port: " + dfsCluster.getNameNodePort());
  } else {
    LOG.error("Could not start DFS cluster");
  }

  // Start MR server
  LOG.info("Starting MR cluster");
  mrCluster = new MiniMRCluster(0, 0, 1, dfsCluster.getFileSystem().getUri()
      .toString(), 1, null, null, null, new JobConf(config));
  LOG.info("Started MR cluster");
  config = prepareConfiguration(mrCluster.createJobConf());
}
项目:pbase    文件:MapreduceTestingShim.java   
public JobConf obtainJobConf(MiniMRCluster cluster) {
  if (cluster == null) return null;
  try {
    Object runner = cluster.getJobTrackerRunner();
    Method meth = runner.getClass().getDeclaredMethod("getJobTracker", emptyParam);
    Object tracker = meth.invoke(runner, new Object []{});
    Method m = tracker.getClass().getDeclaredMethod("getConf", emptyParam);
    return (JobConf) m.invoke(tracker, new Object []{});
  } catch (NoSuchMethodException nsme) {
    return null;
  } catch (InvocationTargetException ite) {
    return null;
  } catch (IllegalAccessException iae) {
    return null;
  }
}
项目:HIndex    文件:MapreduceTestingShim.java   
public JobConf obtainJobConf(MiniMRCluster cluster) {
  if (cluster == null) return null;
  try {
    Object runner = cluster.getJobTrackerRunner();
    Method meth = runner.getClass().getDeclaredMethod("getJobTracker", emptyParam);
    Object tracker = meth.invoke(runner, new Object []{});
    Method m = tracker.getClass().getDeclaredMethod("getConf", emptyParam);
    return (JobConf) m.invoke(tracker, new Object []{});
  } catch (NoSuchMethodException nsme) {
    return null;
  } catch (InvocationTargetException ite) {
    return null;
  } catch (IllegalAccessException iae) {
    return null;
  }
}
项目:IRIndex    文件:MapreduceTestingShim.java   
public JobConf obtainJobConf(MiniMRCluster cluster) {
  if (cluster == null) return null;
  try {
    Object runner = cluster.getJobTrackerRunner();
    Method meth = runner.getClass().getDeclaredMethod("getJobTracker", emptyParam);
    Object tracker = meth.invoke(runner, new Object []{});
    Method m = tracker.getClass().getDeclaredMethod("getConf", emptyParam);
    return (JobConf) m.invoke(tracker, new Object []{});
  } catch (NoSuchMethodException nsme) {
    return null;
  } catch (InvocationTargetException ite) {
    return null;
  } catch (IllegalAccessException iae) {
    return null;
  }
}
项目:hbase    文件:MapreduceTestingShim.java   
@Override
public JobConf obtainJobConf(MiniMRCluster cluster) {
  if (cluster == null) return null;
  try {
    Object runner = cluster.getJobTrackerRunner();
    Method meth = runner.getClass().getDeclaredMethod("getJobTracker", emptyParam);
    Object tracker = meth.invoke(runner, new Object []{});
    Method m = tracker.getClass().getDeclaredMethod("getConf", emptyParam);
    return (JobConf) m.invoke(tracker, new Object []{});
  } catch (NoSuchMethodException nsme) {
    return null;
  } catch (InvocationTargetException ite) {
    return null;
  } catch (IllegalAccessException iae) {
    return null;
  }
}
项目:hbase    文件:MapreduceTestingShim.java   
@Override
public JobConf obtainJobConf(MiniMRCluster cluster) {
  if (cluster == null) return null;
  try {
    Object runner = cluster.getJobTrackerRunner();
    Method meth = runner.getClass().getDeclaredMethod("getJobTracker", emptyParam);
    Object tracker = meth.invoke(runner, new Object []{});
    Method m = tracker.getClass().getDeclaredMethod("getConf", emptyParam);
    return (JobConf) m.invoke(tracker, new Object []{});
  } catch (NoSuchMethodException nsme) {
    return null;
  } catch (InvocationTargetException ite) {
    return null;
  } catch (IllegalAccessException iae) {
    return null;
  }
}
项目:PyroDB    文件:MapreduceTestingShim.java   
public JobConf obtainJobConf(MiniMRCluster cluster) {
  if (cluster == null) return null;
  try {
    Object runner = cluster.getJobTrackerRunner();
    Method meth = runner.getClass().getDeclaredMethod("getJobTracker", emptyParam);
    Object tracker = meth.invoke(runner, new Object []{});
    Method m = tracker.getClass().getDeclaredMethod("getConf", emptyParam);
    return (JobConf) m.invoke(tracker, new Object []{});
  } catch (NoSuchMethodException nsme) {
    return null;
  } catch (InvocationTargetException ite) {
    return null;
  } catch (IllegalAccessException iae) {
    return null;
  }
}
项目:c5    文件:MapreduceTestingShim.java   
public JobConf obtainJobConf(MiniMRCluster cluster) {
  if (cluster == null) return null;
  try {
    Object runner = cluster.getJobTrackerRunner();
    Method meth = runner.getClass().getDeclaredMethod("getJobTracker", emptyParam);
    Object tracker = meth.invoke(runner, new Object []{});
    Method m = tracker.getClass().getDeclaredMethod("getConf", emptyParam);
    return (JobConf) m.invoke(tracker, new Object []{});
  } catch (NoSuchMethodException nsme) {
    return null;
  } catch (InvocationTargetException ite) {
    return null;
  } catch (IllegalAccessException iae) {
    return null;
  }
}
项目:hadoop-on-lustre    文件:TestTokenCache.java   
@BeforeClass
public static void setUp() throws Exception {
  Configuration conf = new Configuration();
  dfsCluster = new MiniDFSCluster(conf, numSlaves, true, null);
  jConf = new JobConf(conf);
  mrCluster = new MiniMRCluster(0, 0, numSlaves, 
      dfsCluster.getFileSystem().getUri().toString(), 1, null, null, null, 
      jConf);

  createTokenFileJson();
  verifySecretKeysInJSONFile();
  dfsCluster.getNameNode().getNamesystem()
        .getDelegationTokenSecretManager().startThreads();
  FileSystem fs = dfsCluster.getFileSystem();

  p1 = new Path("file1");
  p2 = new Path("file2");
  p1 = fs.makeQualified(p1);
}
项目:RDFS    文件:TestUlimit.java   
/**
 * This tests the setting of memory limit for streaming processes.
 * This will launch a streaming app which will allocate 10MB memory.
 * First, program is launched with sufficient memory. And test expects
 * it to succeed. Then program is launched with insufficient memory and 
 * is expected to be a failure.  
 */
public void testCommandLine() {
  if (StreamUtil.isCygwin()) {
    return;
  }
  try {
    final int numSlaves = 2;
    Configuration conf = new Configuration();
    dfs = new MiniDFSCluster(conf, numSlaves, true, null);
    fs = dfs.getFileSystem();

    mr = new MiniMRCluster(numSlaves, fs.getUri().toString(), 1);
    writeInputFile(fs, inputPath);
    map = StreamUtil.makeJavaCommand(UlimitApp.class, new String[]{});  
    runProgram(SET_MEMORY_LIMIT);
    fs.delete(outputPath, true);
    assertFalse("output not cleaned up", fs.exists(outputPath));
    mr.waitUntilIdle();
  } catch(IOException e) {
    fail(e.toString());
  } finally {
    mr.shutdown();
    dfs.shutdown();
  }
}
项目:hadoop-0.20    文件:TestUlimit.java   
/**
 * This tests the setting of memory limit for streaming processes.
 * This will launch a streaming app which will allocate 10MB memory.
 * First, program is launched with sufficient memory. And test expects
 * it to succeed. Then program is launched with insufficient memory and 
 * is expected to be a failure.  
 */
public void testCommandLine() {
  if (StreamUtil.isCygwin()) {
    return;
  }
  try {
    final int numSlaves = 2;
    Configuration conf = new Configuration();
    dfs = new MiniDFSCluster(conf, numSlaves, true, null);
    fs = dfs.getFileSystem();

    mr = new MiniMRCluster(numSlaves, fs.getUri().toString(), 1);
    writeInputFile(fs, inputPath);
    map = StreamUtil.makeJavaCommand(UlimitApp.class, new String[]{});  
    runProgram(SET_MEMORY_LIMIT);
    fs.delete(outputPath, true);
    assertFalse("output not cleaned up", fs.exists(outputPath));
    mr.waitUntilIdle();
  } catch(IOException e) {
    fail(e.toString());
  } finally {
    mr.shutdown();
    dfs.shutdown();
  }
}
项目:hanoi-hadoop-2.0.0-cdh    文件:TestTokenCache.java   
@BeforeClass
public static void setUp() throws Exception {
  Configuration conf = new Configuration();
  dfsCluster = new MiniDFSCluster(conf, numSlaves, true, null);
  jConf = new JobConf(conf);
  mrCluster = new MiniMRCluster(0, 0, numSlaves, 
      dfsCluster.getFileSystem().getUri().toString(), 1, null, null, null, 
      jConf);

  createTokenFileJson();
  verifySecretKeysInJSONFile();
  NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads();
  FileSystem fs = dfsCluster.getFileSystem();

  p1 = new Path("file1");
  p2 = new Path("file2");
  p1 = fs.makeQualified(p1);
}
项目:hanoi-hadoop-2.0.0-cdh    文件:TestEncryptedShuffle.java   
private void startCluster(Configuration  conf) throws Exception {
  if (System.getProperty("hadoop.log.dir") == null) {
    System.setProperty("hadoop.log.dir", "build/test-dir");
  }
  conf.set("dfs.block.access.token.enable", "false");
  conf.set("dfs.permissions", "true");
  conf.set("hadoop.security.authentication", "simple");
  dfsCluster = new MiniDFSCluster(conf, 1, true, null);
  FileSystem fileSystem = dfsCluster.getFileSystem();
  fileSystem.mkdirs(new Path("/tmp"));
  fileSystem.mkdirs(new Path("/user"));
  fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
  fileSystem.setPermission(
    new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
  fileSystem.setPermission(
    new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
  fileSystem.setPermission(
    new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
  FileSystem.setDefaultUri(conf, fileSystem.getUri());
  mrCluster = new MiniMRCluster(1, fileSystem.getUri().toString(), 1, null, null, new JobConf(conf));
}
项目:mapreduce-fork    文件:TestJobACLs.java   
private void startCluster(boolean reStart) throws Exception {

    // Configure job queues
    String[] queueNames = {"default"};
    createQueuesConfigFile(queueNames,
        new String[] { jobSubmitter }, new String[] { qAdmin });

    JobConf conf = new JobConf();

    // Enable queue and job level authorization
    conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true);

    // Enable CompletedJobStore
    FileSystem fs = FileSystem.getLocal(conf);
    if (!reStart) {
      fs.delete(TEST_DIR, true);
    }
    conf.set(JTConfig.JT_PERSIST_JOBSTATUS_DIR,
        fs.makeQualified(TEST_DIR).toString());
    conf.setBoolean(JTConfig.JT_PERSIST_JOBSTATUS, true);
    conf.set(JTConfig.JT_PERSIST_JOBSTATUS_HOURS, "1");

    UserGroupInformation MR_UGI = UserGroupInformation.getLoginUser();
    mr = new MiniMRCluster(0, 0, 1, "file:///", 1, null, null, MR_UGI, conf);
  }
项目:mapreduce-fork    文件:TestTokenCache.java   
@BeforeClass
public static void setUp() throws Exception {
  Configuration conf = new Configuration();
  dfsCluster = new MiniDFSCluster(conf, numSlaves, true, null);
  jConf = new JobConf(conf);
  mrCluster = new MiniMRCluster(0, 0, numSlaves, 
      dfsCluster.getFileSystem().getUri().toString(), 1, null, null, null, 
      jConf);

  createTokenFileJson();
  verifySecretKeysInJSONFile();
  dfsCluster.getNamesystem().getDelegationTokenSecretManager().startThreads();
  FileSystem fs = dfsCluster.getFileSystem();

  p1 = new Path("file1");
  p2 = new Path("file2");

  p1 = fs.makeQualified(p1);
}
项目:mapreduce-fork    文件:TestTokenCacheOldApi.java   
@BeforeClass
public static void setUp() throws Exception {
  Configuration conf = new Configuration();
  dfsCluster = new MiniDFSCluster(conf, numSlaves, true, null);
  jConf = new JobConf(conf);
  mrCluster = new MiniMRCluster(0, 0, numSlaves, 
      dfsCluster.getFileSystem().getUri().toString(), 1, null, null, null, 
      jConf);

  createTokenFileJson();
  verifySecretKeysInJSONFile();
  dfsCluster.getNamesystem()
        .getDelegationTokenSecretManager().startThreads();
  FileSystem fs = dfsCluster.getFileSystem();

  p1 = new Path("file1");
  p2 = new Path("file2");
  p1 = fs.makeQualified(p1);
}
项目:DominoHBase    文件:MapreduceTestingShim.java   
public JobConf obtainJobConf(MiniMRCluster cluster) {
  if (cluster == null) return null;
  try {
    Object runner = cluster.getJobTrackerRunner();
    Method meth = runner.getClass().getDeclaredMethod("getJobTracker", emptyParam);
    Object tracker = meth.invoke(runner, new Object []{});
    Method m = tracker.getClass().getDeclaredMethod("getConf", emptyParam);
    return (JobConf) m.invoke(tracker, new Object []{});
  } catch (NoSuchMethodException nsme) {
    return null;
  } catch (InvocationTargetException ite) {
    return null;
  } catch (IllegalAccessException iae) {
    return null;
  }
}
项目:hortonworks-extension    文件:TestTokenCache.java   
@BeforeClass
public static void setUp() throws Exception {
  Configuration conf = new Configuration();
  dfsCluster = new MiniDFSCluster(conf, numSlaves, true, null);
  jConf = new JobConf(conf);
  mrCluster = new MiniMRCluster(0, 0, numSlaves, 
      dfsCluster.getFileSystem().getUri().toString(), 1, null, null, null, 
      jConf);

  createTokenFileJson();
  verifySecretKeysInJSONFile();
  dfsCluster.getNameNode().getNamesystem()
        .getDelegationTokenSecretManager().startThreads();
  FileSystem fs = dfsCluster.getFileSystem();

  p1 = new Path("file1");
  p2 = new Path("file2");
  p1 = fs.makeQualified(p1);
}
项目:hortonworks-extension    文件:TestTokenCache.java   
@BeforeClass
public static void setUp() throws Exception {
  Configuration conf = new Configuration();
  dfsCluster = new MiniDFSCluster(conf, numSlaves, true, null);
  jConf = new JobConf(conf);
  mrCluster = new MiniMRCluster(0, 0, numSlaves, 
      dfsCluster.getFileSystem().getUri().toString(), 1, null, null, null, 
      jConf);

  createTokenFileJson();
  verifySecretKeysInJSONFile();
  dfsCluster.getNameNode().getNamesystem()
        .getDelegationTokenSecretManager().startThreads();
  FileSystem fs = dfsCluster.getFileSystem();

  p1 = new Path("file1");
  p2 = new Path("file2");
  p1 = fs.makeQualified(p1);
}
项目:hadoop-gpu    文件:TestUlimit.java   
/**
 * This tests the setting of memory limit for streaming processes.
 * This will launch a streaming app which will allocate 10MB memory.
 * First, program is launched with sufficient memory. And test expects
 * it to succeed. Then program is launched with insufficient memory and 
 * is expected to be a failure.  
 */
public void testCommandLine() {
  if (StreamUtil.isCygwin()) {
    return;
  }
  try {
    final int numSlaves = 2;
    Configuration conf = new Configuration();
    dfs = new MiniDFSCluster(conf, numSlaves, true, null);
    fs = dfs.getFileSystem();

    mr = new MiniMRCluster(numSlaves, fs.getUri().toString(), 1);
    writeInputFile(fs, inputPath);
    map = StreamUtil.makeJavaCommand(UlimitApp.class, new String[]{});  
    runProgram(SET_MEMORY_LIMIT);
    fs.delete(outputPath, true);
    assertFalse("output not cleaned up", fs.exists(outputPath));
    mr.waitUntilIdle();
  } catch(IOException e) {
    fail(e.toString());
  } finally {
    mr.shutdown();
    dfs.shutdown();
  }
}
项目:hadoop    文件:TestDelegationToken.java   
@Before
public void setup() throws Exception {
  user1 = UserGroupInformation.createUserForTesting("alice", 
                                                    new String[]{"users"});
  user2 = UserGroupInformation.createUserForTesting("bob", 
                                                    new String[]{"users"});
  cluster = new MiniMRCluster(0,0,1,"file:///",1);
}
项目:hadoop    文件:TestNonExistentJob.java   
protected void setUp() throws Exception {
  super.setUp();
  if (System.getProperty("hadoop.log.dir") == null) {
    System.setProperty("hadoop.log.dir", "/tmp");
  }
  int taskTrackers = 2;
  int dataNodes = 2;
  String proxyUser = System.getProperty("user.name");
  String proxyGroup = "g";
  StringBuilder sb = new StringBuilder();
  sb.append("127.0.0.1,localhost");
  for (InetAddress i : InetAddress.getAllByName(InetAddress.getLocalHost().getHostName())) {
    sb.append(",").append(i.getCanonicalHostName());
  }

  JobConf conf = new JobConf();
  conf.set("dfs.block.access.token.enable", "false");
  conf.set("dfs.permissions", "true");
  conf.set("hadoop.security.authentication", "simple");

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(dataNodes)
      .build();
  FileSystem fileSystem = dfsCluster.getFileSystem();
  fileSystem.mkdirs(new Path("/tmp"));
  fileSystem.mkdirs(new Path("/user"));
  fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
  fileSystem.setPermission(new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
  fileSystem.setPermission(new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
  fileSystem.setPermission(new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
  String nnURI = fileSystem.getUri().toString();
  int numDirs = 1;
  String[] racks = null;
  String[] hosts = null;
  mrCluster = new MiniMRCluster(0, 0, taskTrackers, nnURI, numDirs, racks, hosts, null, conf);
  ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
}
项目:hadoop    文件:TestFileArgs.java   
public TestFileArgs() throws IOException
{
  // Set up mini cluster
  conf = new Configuration();
  dfs = new MiniDFSCluster.Builder(conf).build();
  fileSys = dfs.getFileSystem();
  namenode = fileSys.getUri().getAuthority();
  mr  = new MiniMRCluster(1, namenode, 1);

  map = LS_PATH;
  FileSystem.setDefaultUri(conf, "hdfs://" + namenode);
  setTestDir(new File("/tmp/TestFileArgs"));
}
项目:hadoop    文件:TestStreamingStatus.java   
/**
 * Start the cluster and create input file before running the actual test.
 *
 * @throws IOException
 */
@Before
public void setUp() throws IOException {
  conf = new JobConf();
  conf.setBoolean(JTConfig.JT_RETIREJOBS, false);
  conf.setBoolean(JTConfig.JT_PERSIST_JOBSTATUS, false);

  mr = new MiniMRCluster(1, "file:///", 3, null , null, conf);

  Path inFile = new Path(INPUT_FILE);
  fs = inFile.getFileSystem(mr.createJobConf());
  clean(fs);

  buildExpectedJobOutput();
}
项目:ditb    文件:MapreduceTestingShim.java   
public JobConf obtainJobConf(MiniMRCluster cluster) {
  try {
    Method meth = MiniMRCluster.class.getMethod("getJobTrackerConf", emptyParam);
    return (JobConf) meth.invoke(cluster, new Object []{});
  } catch (NoSuchMethodException nsme) {
    return null;
  } catch (InvocationTargetException ite) {
    return null;
  } catch (IllegalAccessException iae) {
    return null;
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestDelegationToken.java   
@Before
public void setup() throws Exception {
  user1 = UserGroupInformation.createUserForTesting("alice", 
                                                    new String[]{"users"});
  user2 = UserGroupInformation.createUserForTesting("bob", 
                                                    new String[]{"users"});
  cluster = new MiniMRCluster(0,0,1,"file:///",1);
}
项目:aliyun-oss-hadoop-fs    文件:TestNonExistentJob.java   
protected void setUp() throws Exception {
  super.setUp();
  if (System.getProperty("hadoop.log.dir") == null) {
    System.setProperty("hadoop.log.dir", "/tmp");
  }
  int taskTrackers = 2;
  int dataNodes = 2;
  String proxyUser = System.getProperty("user.name");
  String proxyGroup = "g";
  StringBuilder sb = new StringBuilder();
  sb.append("127.0.0.1,localhost");
  for (InetAddress i : InetAddress.getAllByName(InetAddress.getLocalHost().getHostName())) {
    sb.append(",").append(i.getCanonicalHostName());
  }

  JobConf conf = new JobConf();
  conf.set("dfs.block.access.token.enable", "false");
  conf.set("dfs.permissions", "true");
  conf.set("hadoop.security.authentication", "simple");

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(dataNodes)
      .build();
  FileSystem fileSystem = dfsCluster.getFileSystem();
  fileSystem.mkdirs(new Path("/tmp"));
  fileSystem.mkdirs(new Path("/user"));
  fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
  fileSystem.setPermission(new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
  fileSystem.setPermission(new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
  fileSystem.setPermission(new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
  String nnURI = fileSystem.getUri().toString();
  int numDirs = 1;
  String[] racks = null;
  String[] hosts = null;
  mrCluster = new MiniMRCluster(0, 0, taskTrackers, nnURI, numDirs, racks, hosts, null, conf);
  ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
}
项目:aliyun-oss-hadoop-fs    文件:TestFileArgs.java   
public TestFileArgs() throws IOException
{
  // Set up mini cluster
  conf = new Configuration();
  dfs = new MiniDFSCluster.Builder(conf).build();
  fileSys = dfs.getFileSystem();
  namenode = fileSys.getUri().getAuthority();
  mr  = new MiniMRCluster(1, namenode, 1);

  map = LS_PATH;
  FileSystem.setDefaultUri(conf, "hdfs://" + namenode);
  setTestDir(new File("/tmp/TestFileArgs"));
}
项目:aliyun-oss-hadoop-fs    文件:TestStreamingStatus.java   
/**
 * Start the cluster and create input file before running the actual test.
 *
 * @throws IOException
 */
@Before
public void setUp() throws IOException {
  conf = new JobConf();
  conf.setBoolean(JTConfig.JT_RETIREJOBS, false);
  conf.setBoolean(JTConfig.JT_PERSIST_JOBSTATUS, false);

  mr = new MiniMRCluster(1, "file:///", 3, null , null, conf);

  Path inFile = new Path(INPUT_FILE);
  fs = inFile.getFileSystem(mr.createJobConf());
  clean(fs);

  buildExpectedJobOutput();
}
项目:mutation-server    文件:TestCluster.java   
public void start() throws IOException {

        File testCluster = new File(WORKING_DIRECTORY);
        if (testCluster.exists()) {
            FileUtil.deleteDirectory(testCluster);
        }
        testCluster.mkdirs();

        File testClusterData = new File(WORKING_DIRECTORY + "/data");
        File testClusterLog = new File(WORKING_DIRECTORY + "/logs");


        if (cluster == null) {

            conf = new HdfsConfiguration();     
            conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,
                    testClusterData.getAbsolutePath());
            cluster = new MiniDFSCluster.Builder(conf).build();
            fs = cluster.getFileSystem();

            // set mincluster as default config
            HdfsUtil.setDefaultConfiguration(conf);
            System.setProperty("hadoop.log.dir", testClusterLog.getAbsolutePath());
            MiniMRCluster mrCluster = new MiniMRCluster(1, fs.getUri()
                    .toString(), 1, null, null, new JobConf(conf));
            JobConf mrClusterConf = mrCluster.createJobConf();
            HdfsUtil.setDefaultConfiguration(new Configuration(mrClusterConf));

            System.out.println("------");

            JobClient client = new JobClient(mrClusterConf);
            ClusterStatus status = client.getClusterStatus(true);
            System.out.println(status.getActiveTrackerNames());
        }
    }
项目:big-c    文件:TestDelegationToken.java   
@Before
public void setup() throws Exception {
  user1 = UserGroupInformation.createUserForTesting("alice", 
                                                    new String[]{"users"});
  user2 = UserGroupInformation.createUserForTesting("bob", 
                                                    new String[]{"users"});
  cluster = new MiniMRCluster(0,0,1,"file:///",1);
}
项目:big-c    文件:TestNonExistentJob.java   
protected void setUp() throws Exception {
  super.setUp();
  if (System.getProperty("hadoop.log.dir") == null) {
    System.setProperty("hadoop.log.dir", "/tmp");
  }
  int taskTrackers = 2;
  int dataNodes = 2;
  String proxyUser = System.getProperty("user.name");
  String proxyGroup = "g";
  StringBuilder sb = new StringBuilder();
  sb.append("127.0.0.1,localhost");
  for (InetAddress i : InetAddress.getAllByName(InetAddress.getLocalHost().getHostName())) {
    sb.append(",").append(i.getCanonicalHostName());
  }

  JobConf conf = new JobConf();
  conf.set("dfs.block.access.token.enable", "false");
  conf.set("dfs.permissions", "true");
  conf.set("hadoop.security.authentication", "simple");

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(dataNodes)
      .build();
  FileSystem fileSystem = dfsCluster.getFileSystem();
  fileSystem.mkdirs(new Path("/tmp"));
  fileSystem.mkdirs(new Path("/user"));
  fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
  fileSystem.setPermission(new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
  fileSystem.setPermission(new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
  fileSystem.setPermission(new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
  String nnURI = fileSystem.getUri().toString();
  int numDirs = 1;
  String[] racks = null;
  String[] hosts = null;
  mrCluster = new MiniMRCluster(0, 0, taskTrackers, nnURI, numDirs, racks, hosts, null, conf);
  ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
}
项目:big-c    文件:TestFileArgs.java   
public TestFileArgs() throws IOException
{
  // Set up mini cluster
  conf = new Configuration();
  dfs = new MiniDFSCluster.Builder(conf).build();
  fileSys = dfs.getFileSystem();
  namenode = fileSys.getUri().getAuthority();
  mr  = new MiniMRCluster(1, namenode, 1);

  map = LS_PATH;
  FileSystem.setDefaultUri(conf, "hdfs://" + namenode);
  setTestDir(new File("/tmp/TestFileArgs"));
}
项目:big-c    文件:TestStreamingStatus.java   
/**
 * Start the cluster and create input file before running the actual test.
 *
 * @throws IOException
 */
@Before
public void setUp() throws IOException {
  conf = new JobConf();
  conf.setBoolean(JTConfig.JT_RETIREJOBS, false);
  conf.setBoolean(JTConfig.JT_PERSIST_JOBSTATUS, false);

  mr = new MiniMRCluster(1, "file:///", 3, null , null, conf);

  Path inFile = new Path(INPUT_FILE);
  fs = inFile.getFileSystem(mr.createJobConf());
  clean(fs);

  buildExpectedJobOutput();
}
项目:imputationserver    文件:TestCluster.java   
public void start() throws IOException {

        File testCluster = new File(WORKING_DIRECTORY);
        if (testCluster.exists()) {
            FileUtil.deleteDirectory(testCluster);
        }
        testCluster.mkdirs();

        File testClusterData = new File(WORKING_DIRECTORY + "/data");
        File testClusterLog = new File(WORKING_DIRECTORY + "/logs");


        if (cluster == null) {

            conf = new HdfsConfiguration();     
            conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,
                    testClusterData.getAbsolutePath());
            cluster = new MiniDFSCluster.Builder(conf).build();
            fs = cluster.getFileSystem();

            // set mincluster as default config
            HdfsUtil.setDefaultConfiguration(conf);
            System.setProperty("hadoop.log.dir", testClusterLog.getAbsolutePath());
            MiniMRCluster mrCluster = new MiniMRCluster(1, fs.getUri()
                    .toString(), 1, null, null, new JobConf(conf));
            JobConf mrClusterConf = mrCluster.createJobConf();
            HdfsUtil.setDefaultConfiguration(new Configuration(mrClusterConf));

            System.out.println("------");

            JobClient client = new JobClient(mrClusterConf);
            ClusterStatus status = client.getClusterStatus(true);
            System.out.println(status.getActiveTrackerNames());
        }
    }
项目:LCIndex-HBase-0.94.16    文件:MapreduceTestingShim.java   
public JobConf obtainJobConf(MiniMRCluster cluster) {
  try {
    Method meth = MiniMRCluster.class.getMethod("getJobTrackerConf", emptyParam);
    return (JobConf) meth.invoke(cluster, new Object []{});
  } catch (NoSuchMethodException nsme) {
    return null;
  } catch (InvocationTargetException ite) {
    return null;
  } catch (IllegalAccessException iae) {
    return null;
  }
}