Java 类org.apache.hadoop.hbase.IntegrationTestingUtility 实例源码

项目:ditb    文件:IntegrationTestWithCellVisibilityLoadAndVerify.java   
@Override
public int runTestFromCommandLine() throws Exception {
  IntegrationTestingUtility.setUseDistributedCluster(getConf());
  int numPresplits = getConf().getInt("loadmapper.numPresplits", 5);
  // create HTableDescriptor for specified table
  HTableDescriptor htd = new HTableDescriptor(getTablename());
  htd.addFamily(new HColumnDescriptor(TEST_FAMILY));

  Admin admin = new HBaseAdmin(getConf());
  try {
    admin.createTable(htd, Bytes.toBytes(0L), Bytes.toBytes(-1L), numPresplits);
  } finally {
    admin.close();
  }
  doLoad(getConf(), htd);
  doVerify(getConf(), htd);
  getTestingUtil(getConf()).deleteTable(htd.getName());
  return 0;
}
项目:HIndex    文件:IntegrationTestWithCellVisibilityLoadAndVerify.java   
public int runTestFromCommandLine() throws Exception {
  IntegrationTestingUtility.setUseDistributedCluster(getConf());
  int numPresplits = getConf().getInt("loadmapper.numPresplits", 5);
  // create HTableDescriptor for specified table
  String table = getTablename();
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
  htd.addFamily(new HColumnDescriptor(TEST_FAMILY));

  HBaseAdmin admin = new HBaseAdmin(getConf());
  try {
    admin.createTable(htd, Bytes.toBytes(0L), Bytes.toBytes(-1L), numPresplits);
  } finally {
    admin.close();
  }
  doLoad(getConf(), htd);
  doVerify(getConf(), htd);
  getTestingUtil(getConf()).deleteTable(htd.getName());
  return 0;
}
项目:hbase    文件:IntegrationTestRSGroup.java   
@Before
public void beforeMethod() throws Exception {
  if(!initialized) {
    LOG.info("Setting up IntegrationTestRSGroup");
    LOG.info("Initializing cluster with " + NUM_SLAVES_BASE + " servers");
    TEST_UTIL = new IntegrationTestingUtility();
    TEST_UTIL.getConfiguration().set(HConstants.HBASE_MASTER_LOADBALANCER_CLASS,
      RSGroupBasedLoadBalancer.class.getName());
    TEST_UTIL.getConfiguration().set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
      RSGroupAdminEndpoint.class.getName());
    ((IntegrationTestingUtility)TEST_UTIL).initializeCluster(NUM_SLAVES_BASE);
    //set shared configs
    admin = TEST_UTIL.getAdmin();
    cluster = TEST_UTIL.getHBaseClusterInterface();
    rsGroupAdmin = new VerifyingRSGroupAdminClient(new RSGroupAdminClient(TEST_UTIL.getConnection()),
        TEST_UTIL.getConfiguration());
    LOG.info("Done initializing cluster");
    initialized = true;
    //cluster may not be clean
    //cleanup when initializing
    afterMethod();
  }
}
项目:hbase    文件:ChaosMonkeyRunner.java   
public static void main(String[] args) throws Exception {
  Configuration conf = HBaseConfiguration.create();
  String[] actualArgs = args;
  if (args.length > 0 && "-c".equals(args[0])) {
    int argCount = args.length - 2;
    if (argCount < 0) {
      throw new IllegalArgumentException("Missing path for -c parameter");
    }
    // load the resource specified by the second parameter
    conf.addResource(args[1]);
    actualArgs = new String[argCount];
    System.arraycopy(args, 2, actualArgs, 0, argCount);
  }
  IntegrationTestingUtility.setUseDistributedCluster(conf);
  int ret = ToolRunner.run(conf, new ChaosMonkeyRunner(), actualArgs);
  System.exit(ret);
}
项目:hbase    文件:IntegrationTestWithCellVisibilityLoadAndVerify.java   
@Override
public int runTestFromCommandLine() throws Exception {
  IntegrationTestingUtility.setUseDistributedCluster(getConf());
  int numPresplits = getConf().getInt("loadmapper.numPresplits", 5);
  // create HTableDescriptor for specified table
  HTableDescriptor htd = new HTableDescriptor(getTablename());
  htd.addFamily(new HColumnDescriptor(TEST_FAMILY));

  try (Connection conn = ConnectionFactory.createConnection(getConf());
      Admin admin = conn.getAdmin()) {
    admin.createTable(htd, Bytes.toBytes(0L), Bytes.toBytes(-1L), numPresplits);
  }
  doLoad(getConf(), htd);
  doVerify(getConf(), htd);
  getTestingUtil(getConf()).deleteTable(getTablename());
  return 0;
}
项目:PyroDB    文件:IntegrationTestWithCellVisibilityLoadAndVerify.java   
public int runTestFromCommandLine() throws Exception {
  IntegrationTestingUtility.setUseDistributedCluster(getConf());
  int numPresplits = getConf().getInt("loadmapper.numPresplits", 5);
  // create HTableDescriptor for specified table
  String table = getTablename();
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
  htd.addFamily(new HColumnDescriptor(TEST_FAMILY));

  HBaseAdmin admin = new HBaseAdmin(getConf());
  try {
    admin.createTable(htd, Bytes.toBytes(0L), Bytes.toBytes(-1L), numPresplits);
  } finally {
    admin.close();
  }
  doLoad(getConf(), htd);
  doVerify(getConf(), htd);
  getTestingUtil(getConf()).deleteTable(htd.getName());
  return 0;
}
项目:ditb    文件:IntegrationTestImportTsv.java   
/**
 * Confirm the absence of the {@link TotalOrderPartitioner} partitions file.
 */
protected static void validateDeletedPartitionsFile(Configuration conf) throws IOException {
  if (!conf.getBoolean(IntegrationTestingUtility.IS_DISTRIBUTED_CLUSTER, false))
    return;

  FileSystem fs = FileSystem.get(conf);
  Path partitionsFile = new Path(TotalOrderPartitioner.getPartitionFile(conf));
  assertFalse("Failed to clean up partitions file.", fs.exists(partitionsFile));
}
项目:ditb    文件:IntegrationTestImportTsv.java   
public static void main(String[] args) throws Exception {
  Configuration conf = HBaseConfiguration.create();
  IntegrationTestingUtility.setUseDistributedCluster(conf);
  util = new IntegrationTestingUtility(conf);
  // not using ToolRunner to avoid unnecessary call to setConf()
  args = new GenericOptionsParser(conf, args).getRemainingArgs();
  int status = new IntegrationTestImportTsv().run(args);
  System.exit(status);
}
项目:ditb    文件:ChaosMonkeyRunner.java   
protected IntegrationTestingUtility getTestingUtil(Configuration conf) {
  if (this.util == null) {
    if (conf == null) {
      this.util = new IntegrationTestingUtility();
      this.setConf(util.getConfiguration());
    } else {
      this.util = new IntegrationTestingUtility(conf);
    }
  }
  return util;
}
项目:ditb    文件:IntegrationTestZKAndFSPermissions.java   
public static void main(String[] args) throws Exception {
  Configuration configuration = HBaseConfiguration.create();
  IntegrationTestingUtility.setUseDistributedCluster(configuration);
  IntegrationTestZKAndFSPermissions tool = new IntegrationTestZKAndFSPermissions();
  int ret = ToolRunner.run(configuration, tool, args);
  System.exit(ret);
}
项目:ditb    文件:IntegrationTestTimeBoundedMultiGetRequestsWithRegionReplicas.java   
public static void main(String args[]) throws Exception {
  Configuration conf = HBaseConfiguration.create();
  IntegrationTestingUtility.setUseDistributedCluster(conf);
  int ret = ToolRunner.run(conf,
      new IntegrationTestTimeBoundedMultiGetRequestsWithRegionReplicas(), args);
  System.exit(ret);
}
项目:LCIndex-HBase-0.94.16    文件:ChaosMonkey.java   
public static void main(String[] args) throws Exception {
  Configuration conf = HBaseConfiguration.create();
  IntegrationTestingUtility.setUseDistributedCluster(conf);
  IntegrationTestingUtility util = new IntegrationTestingUtility(conf);
  util.initializeCluster(1);

  ChaosMonkey monkey = new ChaosMonkey(util);
  int ret = ToolRunner.run(conf, monkey, args);
  System.exit(ret);
}
项目:LCIndex-HBase-0.94.16    文件:IntegrationTestBigLinkedList.java   
private IntegrationTestingUtility getTestingUtil() {
  if (this.util == null) {
    if (getConf() == null) {
      this.util = new IntegrationTestingUtility();
    } else {
      this.util = new IntegrationTestingUtility(getConf());
    }
  }
  return util;
}
项目:LCIndex-HBase-0.94.16    文件:IntegrationTestLoadAndVerify.java   
private IntegrationTestingUtility getTestingUtil() {
  if (this.util == null) {
    if (getConf() == null) {
      this.util = new IntegrationTestingUtility();
    } else {
      this.util = new IntegrationTestingUtility(getConf());
    }
  }
  return util;
}
项目:HIndex    文件:IntegrationTestImportTsv.java   
/**
 * Confirm the absence of the {@link TotalOrderPartitioner} partitions file.
 */
protected static void validateDeletedPartitionsFile(Configuration conf) throws IOException {
  if (!conf.getBoolean(IntegrationTestingUtility.IS_DISTRIBUTED_CLUSTER, false))
    return;

  FileSystem fs = FileSystem.get(conf);
  Path partitionsFile = new Path(TotalOrderPartitioner.getPartitionFile(conf));
  assertFalse("Failed to clean up partitions file.", fs.exists(partitionsFile));
}
项目:HIndex    文件:IntegrationTestImportTsv.java   
@Override
public void commitJob(JobContext context) throws IOException {
  super.commitJob(context);

  // inherit jar dependencies added to distributed cache loaded by parent job
  Configuration conf = HBaseConfiguration.create(context.getConfiguration());
  conf.set("mapred.job.classpath.archives",
    context.getConfiguration().get("mapred.job.classpath.archives", ""));
  conf.set("mapreduce.job.cache.archives.visibilities",
    context.getConfiguration().get("mapreduce.job.cache.archives.visibilities", ""));

  // can't use IntegrationTest instance of util because it hasn't been
  // instantiated on the JVM running this method. Create our own.
  IntegrationTestingUtility util =
      new IntegrationTestingUtility(conf);

  // this is why we're here: launch a child job. The rest of this should
  // look a lot like TestImportTsv#testMROnTable.
  final String table = format("%s-%s-child", NAME, context.getJobID());
  final String cf = "FAM";
  String fileLocation = System.getenv(UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION);
  conf.set(ImportTsv.CREDENTIALS_LOCATION, fileLocation);
  String[] args = {
      "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B",
      "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b",
      table
  };

  try {
    util.createTable(table, cf);
    LOG.info("testRunFromOutputCommitter: launching child job.");
    TestImportTsv.doMROnTableTest(util, cf, null, args, 1);
  } catch (Exception e) {
    throw new IOException("Underlying MapReduce job failed. Aborting commit.", e);
  } finally {
    if (util.getHBaseAdmin().tableExists(table)) {
      util.deleteTable(table);
    }
  }
}
项目:HIndex    文件:IntegrationTestImportTsv.java   
public static void main(String[] args) throws Exception {
  Configuration conf = HBaseConfiguration.create();
  IntegrationTestingUtility.setUseDistributedCluster(conf);
  util = new IntegrationTestingUtility(conf);
  // not using ToolRunner to avoid unnecessary call to setConf()
  args = new GenericOptionsParser(conf, args).getRemainingArgs();
  int status = new IntegrationTestImportTsv().run(args);
  System.exit(status);
}
项目:HIndex    文件:IntegrationTestLoadAndVerify.java   
public int runTestFromCommandLine() throws Exception {
  IntegrationTestingUtility.setUseDistributedCluster(getConf());
  boolean doLoad = false;
  boolean doVerify = false;
  boolean doDelete = getConf().getBoolean("loadmapper.deleteAfter",true);
  int numPresplits = getConf().getInt("loadmapper.numPresplits", 40);

  if (toRun.equals("load")) {
    doLoad = true;
  } else if (toRun.equals("verify")) {
    doVerify= true;
  } else if (toRun.equals("loadAndVerify")) {
    doLoad=true;
    doVerify= true;
  } else {
    System.err.println("Invalid argument " + toRun);
    usage();
    return 1;
  }

  // create HTableDescriptor for specified table
  String table = getTablename();
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
  htd.addFamily(new HColumnDescriptor(TEST_FAMILY));

  HBaseAdmin admin = new HBaseAdmin(getConf());
  if (doLoad) {
    admin.createTable(htd, Bytes.toBytes(0L), Bytes.toBytes(-1L), numPresplits);
    doLoad(getConf(), htd);
  }
  if (doVerify) {
    doVerify(getConf(), htd);
    if (doDelete) {
      getTestingUtil(getConf()).deleteTable(htd.getName());
    }
  }
  return 0;
}
项目:IRIndex    文件:ChaosMonkey.java   
public static void main(String[] args) throws Exception {
  Configuration conf = HBaseConfiguration.create();
  IntegrationTestingUtility.setUseDistributedCluster(conf);
  IntegrationTestingUtility util = new IntegrationTestingUtility(conf);
  util.initializeCluster(1);

  ChaosMonkey monkey = new ChaosMonkey(util);
  int ret = ToolRunner.run(conf, monkey, args);
  System.exit(ret);
}
项目:IRIndex    文件:IntegrationTestBigLinkedList.java   
private IntegrationTestingUtility getTestingUtil() {
  if (this.util == null) {
    if (getConf() == null) {
      this.util = new IntegrationTestingUtility();
    } else {
      this.util = new IntegrationTestingUtility(getConf());
    }
  }
  return util;
}
项目:IRIndex    文件:IntegrationTestLoadAndVerify.java   
private IntegrationTestingUtility getTestingUtil() {
  if (this.util == null) {
    if (getConf() == null) {
      this.util = new IntegrationTestingUtility();
    } else {
      this.util = new IntegrationTestingUtility(getConf());
    }
  }
  return util;
}
项目:hbase    文件:IntegrationTestImportTsv.java   
/**
 * Confirm the absence of the {@link TotalOrderPartitioner} partitions file.
 */
protected static void validateDeletedPartitionsFile(Configuration conf) throws IOException {
  if (!conf.getBoolean(IntegrationTestingUtility.IS_DISTRIBUTED_CLUSTER, false))
    return;

  FileSystem fs = FileSystem.get(conf);
  Path partitionsFile = new Path(TotalOrderPartitioner.getPartitionFile(conf));
  assertFalse("Failed to clean up partitions file.", fs.exists(partitionsFile));
}
项目:hbase    文件:IntegrationTestImportTsv.java   
public static void main(String[] args) throws Exception {
  Configuration conf = HBaseConfiguration.create();
  IntegrationTestingUtility.setUseDistributedCluster(conf);
  util = new IntegrationTestingUtility(conf);
  int status = ToolRunner.run(conf, new IntegrationTestImportTsv(), args);
  System.exit(status);
}
项目:hbase    文件:IntegrationTestRSGroup.java   
@After
public void afterMethod() throws Exception {
  LOG.info("Cleaning up previous test run");
  //cleanup previous artifacts
  deleteTableIfNecessary();
  deleteNamespaceIfNecessary();
  deleteGroups();
  admin.setBalancerRunning(true, true);

  LOG.info("Restoring the cluster");
  ((IntegrationTestingUtility)TEST_UTIL).restoreCluster();
  LOG.info("Done restoring the cluster");

  TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      LOG.info("Waiting for cleanup to finish "+ rsGroupAdmin.listRSGroups());
      //Might be greater since moving servers back to default
      //is after starting a server
      return rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getServers().size()
          >= NUM_SLAVES_BASE;
    }
  });

  TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      LOG.info("Waiting for regionservers to be registered "+ rsGroupAdmin.listRSGroups());
      //Might be greater since moving servers back to default
      //is after starting a server
      return rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getServers().size()
          == getNumServers();
    }
  });

  LOG.info("Done cleaning up previous test run");
}
项目:hbase    文件:Monkeys.java   
public Monkeys(Configuration conf) {
  this.conf = Preconditions.checkNotNull(conf, "Should specify a configuration");
  this.monkeyRunner = new ChaosMonkeyRunner();
  this.runner = () -> {
    try {
      monkeyRunner.getAndStartMonkey();
    } catch (Exception e) {
      LOG.error("Exception occured when running chaos monkeys: ", e);
    }
  };
  this.executor = Executors.newSingleThreadExecutor(new ThreadFactoryBuilder()
      .setDaemon(true).setNameFormat("ChaosMonkey").build());
  IntegrationTestingUtility.setUseDistributedCluster(this.conf);
}
项目:hbase    文件:ChaosMonkeyRunner.java   
protected IntegrationTestingUtility getTestingUtil(Configuration conf) {
  if (this.util == null) {
    if (conf == null) {
      this.util = new IntegrationTestingUtility();
      this.setConf(util.getConfiguration());
    } else {
      this.util = new IntegrationTestingUtility(conf);
    }
  }
  return util;
}
项目:hbase    文件:IntegrationTestZKAndFSPermissions.java   
public static void main(String[] args) throws Exception {
  Configuration configuration = HBaseConfiguration.create();
  IntegrationTestingUtility.setUseDistributedCluster(configuration);
  IntegrationTestZKAndFSPermissions tool = new IntegrationTestZKAndFSPermissions();
  int ret = ToolRunner.run(configuration, tool, args);
  System.exit(ret);
}
项目:hbase    文件:IntegrationTestTimeBoundedMultiGetRequestsWithRegionReplicas.java   
public static void main(String args[]) throws Exception {
  Configuration conf = HBaseConfiguration.create();
  IntegrationTestingUtility.setUseDistributedCluster(conf);
  int ret = ToolRunner.run(conf,
      new IntegrationTestTimeBoundedMultiGetRequestsWithRegionReplicas(), args);
  System.exit(ret);
}
项目:hbase    文件:IntegrationTestMonkeys.java   
public static void main(String[] args) throws Exception {
  // Run chaos monkeys 15 seconds, then stop them.
  // After 10 seconds, run chaos monkeys again.
  Configuration conf = HBaseConfiguration.create();
  IntegrationTestingUtility.setUseDistributedCluster(conf);
  int exitCode = ToolRunner.run(conf, new IntegrationTestMonkeys(), args);
  System.exit(exitCode);
}
项目:PyroDB    文件:IntegrationTestImportTsv.java   
/**
 * Confirm the absence of the {@link TotalOrderPartitioner} partitions file.
 */
protected static void validateDeletedPartitionsFile(Configuration conf) throws IOException {
  if (!conf.getBoolean(IntegrationTestingUtility.IS_DISTRIBUTED_CLUSTER, false))
    return;

  FileSystem fs = FileSystem.get(conf);
  Path partitionsFile = new Path(TotalOrderPartitioner.getPartitionFile(conf));
  assertFalse("Failed to clean up partitions file.", fs.exists(partitionsFile));
}
项目:PyroDB    文件:IntegrationTestImportTsv.java   
@Override
public void commitJob(JobContext context) throws IOException {
  super.commitJob(context);

  // inherit jar dependencies added to distributed cache loaded by parent job
  Configuration conf = HBaseConfiguration.create(context.getConfiguration());
  conf.set("mapreduce.job.classpath.archives",
    context.getConfiguration().get("mapreduce.job.classpath.archives", ""));
  conf.set("mapreduce.job.cache.archives.visibilities",
    context.getConfiguration().get("mapreduce.job.cache.archives.visibilities", ""));

  // can't use IntegrationTest instance of util because it hasn't been
  // instantiated on the JVM running this method. Create our own.
  IntegrationTestingUtility util =
      new IntegrationTestingUtility(conf);

  // this is why we're here: launch a child job. The rest of this should
  // look a lot like TestImportTsv#testMROnTable.
  final String table = format("%s-%s-child", NAME, context.getJobID());
  final String cf = "FAM";
  String fileLocation = System.getenv(UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION);
  conf.set(ImportTsv.CREDENTIALS_LOCATION, fileLocation);
  String[] args = {
      "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B",
      "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b",
      table
  };

  try {
    util.createTable(table, cf);
    LOG.info("testRunFromOutputCommitter: launching child job.");
    TestImportTsv.doMROnTableTest(util, cf, null, args, 1);
  } catch (Exception e) {
    throw new IOException("Underlying MapReduce job failed. Aborting commit.", e);
  } finally {
    if (util.getHBaseAdmin().tableExists(table)) {
      util.deleteTable(table);
    }
  }
}
项目:PyroDB    文件:IntegrationTestImportTsv.java   
public static void main(String[] args) throws Exception {
  Configuration conf = HBaseConfiguration.create();
  IntegrationTestingUtility.setUseDistributedCluster(conf);
  util = new IntegrationTestingUtility(conf);
  // not using ToolRunner to avoid unnecessary call to setConf()
  args = new GenericOptionsParser(conf, args).getRemainingArgs();
  int status = new IntegrationTestImportTsv().run(args);
  System.exit(status);
}
项目:PyroDB    文件:IntegrationTestLoadAndVerify.java   
public int runTestFromCommandLine() throws Exception {
  IntegrationTestingUtility.setUseDistributedCluster(getConf());
  boolean doLoad = false;
  boolean doVerify = false;
  boolean doDelete = getConf().getBoolean("loadmapper.deleteAfter",true);
  int numPresplits = getConf().getInt("loadmapper.numPresplits", 40);

  if (toRun.equals("load")) {
    doLoad = true;
  } else if (toRun.equals("verify")) {
    doVerify= true;
  } else if (toRun.equals("loadAndVerify")) {
    doLoad=true;
    doVerify= true;
  } else {
    System.err.println("Invalid argument " + toRun);
    usage();
    return 1;
  }

  // create HTableDescriptor for specified table
  String table = getTablename();
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
  htd.addFamily(new HColumnDescriptor(TEST_FAMILY));

  HBaseAdmin admin = new HBaseAdmin(getConf());
  if (doLoad) {
    admin.createTable(htd, Bytes.toBytes(0L), Bytes.toBytes(-1L), numPresplits);
    doLoad(getConf(), htd);
  }
  if (doVerify) {
    doVerify(getConf(), htd);
    if (doDelete) {
      getTestingUtil(getConf()).deleteTable(htd.getName());
    }
  }
  return 0;
}
项目:HBase-Research    文件:ChaosMonkey.java   
public static void main(String[] args) throws Exception {
  Configuration conf = HBaseConfiguration.create();
  IntegrationTestingUtility.setUseDistributedCluster(conf);
  IntegrationTestingUtility util = new IntegrationTestingUtility(conf);
  util.initializeCluster(1);

  ChaosMonkey monkey = new ChaosMonkey(util, EVERY_MINUTE_RANDOM_ACTION_POLICY);
  int ret = ToolRunner.run(conf, monkey, args);
  System.exit(ret);
}
项目:HBase-Research    文件:IntegrationTestBigLinkedList.java   
private IntegrationTestingUtility getTestingUtil() {
  if (this.util == null) {
    if (getConf() == null) {
      this.util = new IntegrationTestingUtility();
    } else {
      this.util = new IntegrationTestingUtility(getConf());
    }
  }
  return util;
}
项目:HBase-Research    文件:IntegrationTestLoadAndVerify.java   
private IntegrationTestingUtility getTestingUtil() {
  if (this.util == null) {
    if (getConf() == null) {
      this.util = new IntegrationTestingUtility();
    } else {
      this.util = new IntegrationTestingUtility(getConf());
    }
  }
  return util;
}
项目:hbase-0.94.8-qod    文件:ChaosMonkey.java   
public static void main(String[] args) throws Exception {
  Configuration conf = HBaseConfiguration.create();
  IntegrationTestingUtility.setUseDistributedCluster(conf);
  IntegrationTestingUtility util = new IntegrationTestingUtility(conf);
  util.initializeCluster(1);

  ChaosMonkey monkey = new ChaosMonkey(util, EVERY_MINUTE_RANDOM_ACTION_POLICY);
  int ret = ToolRunner.run(conf, monkey, args);
  System.exit(ret);
}
项目:hbase-0.94.8-qod    文件:IntegrationTestBigLinkedList.java   
private IntegrationTestingUtility getTestingUtil() {
  if (this.util == null) {
    if (getConf() == null) {
      this.util = new IntegrationTestingUtility();
    } else {
      this.util = new IntegrationTestingUtility(getConf());
    }
  }
  return util;
}
项目:hbase-0.94.8-qod    文件:IntegrationTestLoadAndVerify.java   
private IntegrationTestingUtility getTestingUtil() {
  if (this.util == null) {
    if (getConf() == null) {
      this.util = new IntegrationTestingUtility();
    } else {
      this.util = new IntegrationTestingUtility(getConf());
    }
  }
  return util;
}
项目:hbase-0.94.8-qod    文件:ChaosMonkey.java   
public static void main(String[] args) throws Exception {
  Configuration conf = HBaseConfiguration.create();
  IntegrationTestingUtility.setUseDistributedCluster(conf);
  IntegrationTestingUtility util = new IntegrationTestingUtility(conf);
  util.initializeCluster(1);

  ChaosMonkey monkey = new ChaosMonkey(util, EVERY_MINUTE_RANDOM_ACTION_POLICY);
  int ret = ToolRunner.run(conf, monkey, args);
  System.exit(ret);
}