Java 类org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication 实例源码

项目:ditb    文件:Driver.java   
/**
 * @param args
 * @throws Throwable
 */
public static void main(String[] args) throws Throwable {
  ProgramDriver pgd = new ProgramDriver();

  pgd.addClass(RowCounter.NAME, RowCounter.class,
    "Count rows in HBase table.");
  pgd.addClass(CellCounter.NAME, CellCounter.class,
    "Count cells in HBase table.");
  pgd.addClass(Export.NAME, Export.class, "Write table data to HDFS.");
  pgd.addClass(Import.NAME, Import.class, "Import data written by Export.");
  pgd.addClass(ImportTsv.NAME, ImportTsv.class, "Import data in TSV format.");
  pgd.addClass(LoadIncrementalHFiles.NAME, LoadIncrementalHFiles.class,
               "Complete a bulk data load.");
  pgd.addClass(CopyTable.NAME, CopyTable.class,
      "Export a table from local cluster to peer cluster.");
  pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, "Compare" +
      " the data from tables in two different clusters. WARNING: It" +
      " doesn't work for incrementColumnValues'd cells since the" +
      " timestamp is changed after being appended to the log.");
  pgd.addClass(WALPlayer.NAME, WALPlayer.class, "Replay WAL files.");
  pgd.addClass(ExportSnapshot.NAME, ExportSnapshot.class, "Export" +
      " the specific snapshot to a given FileSystem.");

  ProgramDriver.class.getMethod("driver", new Class [] {String[].class}).
    invoke(pgd, new Object[]{args});
}
项目:LCIndex-HBase-0.94.16    文件:Driver.java   
/**
 * @param args
 * @throws Throwable
 */
public static void main(String[] args) throws Throwable {
  ProgramDriver pgd = new ProgramDriver();
  pgd.addClass(RowCounter.NAME, RowCounter.class,
    "Count rows in HBase table");
  pgd.addClass(CellCounter.NAME, CellCounter.class,
    "Count cells in HBase table");
  pgd.addClass(Export.NAME, Export.class, "Write table data to HDFS.");
  pgd.addClass(Import.NAME, Import.class, "Import data written by Export.");
  pgd.addClass(ImportTsv.NAME, ImportTsv.class, "Import data in TSV format.");
  pgd.addClass(LoadIncrementalHFiles.NAME, LoadIncrementalHFiles.class,
               "Complete a bulk data load.");
  pgd.addClass(CopyTable.NAME, CopyTable.class,
      "Export a table from local cluster to peer cluster");
  pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, "Compare" +
      " the data from tables in two different clusters. WARNING: It" +
      " doesn't work for incrementColumnValues'd cells since the" +
      " timestamp is changed after being appended to the log.");
  ProgramDriver.class.getMethod("driver", new Class [] {String[].class}).
    invoke(pgd, new Object[]{args});
}
项目:pbase    文件:Driver.java   
/**
 * @param args
 * @throws Throwable
 */
public static void main(String[] args) throws Throwable {
  ProgramDriver pgd = new ProgramDriver();
  pgd.addClass(RowCounter.NAME, RowCounter.class,
    "Count rows in HBase table");
  pgd.addClass(CellCounter.NAME, CellCounter.class,
    "Count cells in HBase table");
  pgd.addClass(Export.NAME, Export.class, "Write table data to HDFS.");
  pgd.addClass(Import.NAME, Import.class, "Import data written by Export.");
  pgd.addClass(ImportTsv.NAME, ImportTsv.class, "Import data in TSV format.");
  pgd.addClass(LoadIncrementalHFiles.NAME, LoadIncrementalHFiles.class,
               "Complete a bulk data load.");
  pgd.addClass(CopyTable.NAME, CopyTable.class,
      "Export a table from local cluster to peer cluster");
  pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, "Compare" +
      " the data from tables in two different clusters. WARNING: It" +
      " doesn't work for incrementColumnValues'd cells since the" +
      " timestamp is changed after being appended to the log.");
  ProgramDriver.class.getMethod("driver", new Class [] {String[].class}).
    invoke(pgd, new Object[]{args});
}
项目:HIndex    文件:Driver.java   
/**
 * @param args
 * @throws Throwable
 */
public static void main(String[] args) throws Throwable {
  ProgramDriver pgd = new ProgramDriver();
  pgd.addClass(RowCounter.NAME, RowCounter.class,
    "Count rows in HBase table");
  pgd.addClass(CellCounter.NAME, CellCounter.class,
    "Count cells in HBase table");
  pgd.addClass(Export.NAME, Export.class, "Write table data to HDFS.");
  pgd.addClass(Import.NAME, Import.class, "Import data written by Export.");
  pgd.addClass(ImportTsv.NAME, ImportTsv.class, "Import data in TSV format.");
  pgd.addClass(LoadIncrementalHFiles.NAME, LoadIncrementalHFiles.class,
               "Complete a bulk data load.");
  pgd.addClass(CopyTable.NAME, CopyTable.class,
      "Export a table from local cluster to peer cluster");
  pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, "Compare" +
      " the data from tables in two different clusters. WARNING: It" +
      " doesn't work for incrementColumnValues'd cells since the" +
      " timestamp is changed after being appended to the log.");
  ProgramDriver.class.getMethod("driver", new Class [] {String[].class}).
    invoke(pgd, new Object[]{args});
}
项目:IRIndex    文件:Driver.java   
/**
 * @param args
 * @throws Throwable
 */
public static void main(String[] args) throws Throwable {
  ProgramDriver pgd = new ProgramDriver();
  pgd.addClass(RowCounter.NAME, RowCounter.class,
    "Count rows in HBase table");
  pgd.addClass(CellCounter.NAME, CellCounter.class,
    "Count cells in HBase table");
  pgd.addClass(Export.NAME, Export.class, "Write table data to HDFS.");
  pgd.addClass(Import.NAME, Import.class, "Import data written by Export.");
  pgd.addClass(ImportTsv.NAME, ImportTsv.class, "Import data in TSV format.");
  pgd.addClass(LoadIncrementalHFiles.NAME, LoadIncrementalHFiles.class,
               "Complete a bulk data load.");
  pgd.addClass(CopyTable.NAME, CopyTable.class,
      "Export a table from local cluster to peer cluster");
  pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, "Compare" +
      " the data from tables in two different clusters. WARNING: It" +
      " doesn't work for incrementColumnValues'd cells since the" +
      " timestamp is changed after being appended to the log.");
  ProgramDriver.class.getMethod("driver", new Class [] {String[].class}).
    invoke(pgd, new Object[]{args});
}
项目:hbase    文件:Driver.java   
/**
 * @param args
 * @throws Throwable
 */
public static void main(String[] args) throws Throwable {
  ProgramDriver pgd = new ProgramDriver();

  pgd.addClass(RowCounter.NAME, RowCounter.class,
    "Count rows in HBase table.");
  pgd.addClass(CellCounter.NAME, CellCounter.class,
    "Count cells in HBase table.");
  pgd.addClass(Export.NAME, Export.class, "Write table data to HDFS.");
  pgd.addClass(Import.NAME, Import.class, "Import data written by Export.");
  pgd.addClass(ImportTsv.NAME, ImportTsv.class, "Import data in TSV format.");
  pgd.addClass(LoadIncrementalHFiles.NAME, LoadIncrementalHFiles.class,
               "Complete a bulk data load.");
  pgd.addClass(CopyTable.NAME, CopyTable.class,
      "Export a table from local cluster to peer cluster.");
  pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, "Compare" +
      " data from tables in two different clusters. It" +
      " doesn't work for incrementColumnValues'd cells since" +
      " timestamp is changed after appending to WAL.");
  pgd.addClass(WALPlayer.NAME, WALPlayer.class, "Replay WAL files.");
  pgd.addClass(ExportSnapshot.NAME, ExportSnapshot.class, "Export" +
      " the specific snapshot to a given FileSystem.");

  ProgramDriver.class.getMethod("driver", new Class [] {String[].class}).
    invoke(pgd, new Object[]{args});
}
项目:RStore    文件:Driver.java   
/**
 * @param args
 * @throws Throwable
 */
public static void main(String[] args) throws Throwable {
  ProgramDriver pgd = new ProgramDriver();
  pgd.addClass(RowCounter.NAME, RowCounter.class,
    "Count rows in HBase table");
  pgd.addClass(CellCounter.NAME, CellCounter.class,
    "Count cells in HBase table");
  pgd.addClass(Export.NAME, Export.class, "Write table data to HDFS.");
  pgd.addClass(Import.NAME, Import.class, "Import data written by Export.");
  pgd.addClass(ImportTsv.NAME, ImportTsv.class, "Import data in TSV format.");
  pgd.addClass(LoadIncrementalHFiles.NAME, LoadIncrementalHFiles.class,
               "Complete a bulk data load.");
  pgd.addClass(CopyTable.NAME, CopyTable.class,
      "Export a table from local cluster to peer cluster");
  pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, "Compare" +
      " the data from tables in two different clusters. WARNING: It" +
      " doesn't work for incrementColumnValues'd cells since the" +
      " timestamp is changed after being appended to the log.");
  ProgramDriver.class.getMethod("driver", new Class [] {String[].class}).
    invoke(pgd, new Object[]{args});
}
项目:PyroDB    文件:Driver.java   
/**
 * @param args
 * @throws Throwable
 */
public static void main(String[] args) throws Throwable {
  ProgramDriver pgd = new ProgramDriver();
  pgd.addClass(RowCounter.NAME, RowCounter.class,
    "Count rows in HBase table");
  pgd.addClass(CellCounter.NAME, CellCounter.class,
    "Count cells in HBase table");
  pgd.addClass(Export.NAME, Export.class, "Write table data to HDFS.");
  pgd.addClass(Import.NAME, Import.class, "Import data written by Export.");
  pgd.addClass(ImportTsv.NAME, ImportTsv.class, "Import data in TSV format.");
  pgd.addClass(LoadIncrementalHFiles.NAME, LoadIncrementalHFiles.class,
               "Complete a bulk data load.");
  pgd.addClass(CopyTable.NAME, CopyTable.class,
      "Export a table from local cluster to peer cluster");
  pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, "Compare" +
      " the data from tables in two different clusters. WARNING: It" +
      " doesn't work for incrementColumnValues'd cells since the" +
      " timestamp is changed after being appended to the log.");
  ProgramDriver.class.getMethod("driver", new Class [] {String[].class}).
    invoke(pgd, new Object[]{args});
}
项目:c5    文件:Driver.java   
/**
 * @param args
 * @throws Throwable
 */
public static void main(String[] args) throws Throwable {
  ProgramDriver pgd = new ProgramDriver();
  pgd.addClass(RowCounter.NAME, RowCounter.class,
    "Count rows in HBase table");
  pgd.addClass(CellCounter.NAME, CellCounter.class,
    "Count cells in HBase table");
  pgd.addClass(Export.NAME, Export.class, "Write table data to HDFS.");
  pgd.addClass(Import.NAME, Import.class, "Import data written by Export.");
  pgd.addClass(ImportTsv.NAME, ImportTsv.class, "Import data in TSV format.");
  pgd.addClass(LoadIncrementalHFiles.NAME, LoadIncrementalHFiles.class,
               "Complete a bulk data load.");
  pgd.addClass(CopyTable.NAME, CopyTable.class,
      "Export a table from local cluster to peer cluster");
  pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, "Compare" +
      " the data from tables in two different clusters. WARNING: It" +
      " doesn't work for incrementColumnValues'd cells since the" +
      " timestamp is changed after being appended to the log.");
  ProgramDriver.class.getMethod("driver", new Class [] {String[].class}).
    invoke(pgd, new Object[]{args});
}
项目:HBase-Research    文件:Driver.java   
/**
 * @param args
 * @throws Throwable
 */
public static void main(String[] args) throws Throwable {
  ProgramDriver pgd = new ProgramDriver();
  pgd.addClass(RowCounter.NAME, RowCounter.class,
    "Count rows in HBase table");
  pgd.addClass(CellCounter.NAME, CellCounter.class,
    "Count cells in HBase table");
  pgd.addClass(Export.NAME, Export.class, "Write table data to HDFS.");
  pgd.addClass(Import.NAME, Import.class, "Import data written by Export.");
  pgd.addClass(ImportTsv.NAME, ImportTsv.class, "Import data in TSV format.");
  pgd.addClass(LoadIncrementalHFiles.NAME, LoadIncrementalHFiles.class,
               "Complete a bulk data load.");
  pgd.addClass(CopyTable.NAME, CopyTable.class,
      "Export a table from local cluster to peer cluster");
  pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, "Compare" +
      " the data from tables in two different clusters. WARNING: It" +
      " doesn't work for incrementColumnValues'd cells since the" +
      " timestamp is changed after being appended to the log.");
  ProgramDriver.class.getMethod("driver", new Class [] {String[].class}).
    invoke(pgd, new Object[]{args});
}
项目:hbase-0.94.8-qod    文件:Driver.java   
/**
 * @param args
 * @throws Throwable
 */
public static void main(String[] args) throws Throwable {
  ProgramDriver pgd = new ProgramDriver();
  pgd.addClass(RowCounter.NAME, RowCounter.class,
    "Count rows in HBase table");
  pgd.addClass(CellCounter.NAME, CellCounter.class,
    "Count cells in HBase table");
  pgd.addClass(Export.NAME, Export.class, "Write table data to HDFS.");
  pgd.addClass(Import.NAME, Import.class, "Import data written by Export.");
  pgd.addClass(ImportTsv.NAME, ImportTsv.class, "Import data in TSV format.");
  pgd.addClass(LoadIncrementalHFiles.NAME, LoadIncrementalHFiles.class,
               "Complete a bulk data load.");
  pgd.addClass(CopyTable.NAME, CopyTable.class,
      "Export a table from local cluster to peer cluster");
  pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, "Compare" +
      " the data from tables in two different clusters. WARNING: It" +
      " doesn't work for incrementColumnValues'd cells since the" +
      " timestamp is changed after being appended to the log.");
  ProgramDriver.class.getMethod("driver", new Class [] {String[].class}).
    invoke(pgd, new Object[]{args});
}
项目:hbase-0.94.8-qod    文件:Driver.java   
/**
 * @param args
 * @throws Throwable
 */
public static void main(String[] args) throws Throwable {
  ProgramDriver pgd = new ProgramDriver();
  pgd.addClass(RowCounter.NAME, RowCounter.class,
    "Count rows in HBase table");
  pgd.addClass(CellCounter.NAME, CellCounter.class,
    "Count cells in HBase table");
  pgd.addClass(Export.NAME, Export.class, "Write table data to HDFS.");
  pgd.addClass(Import.NAME, Import.class, "Import data written by Export.");
  pgd.addClass(ImportTsv.NAME, ImportTsv.class, "Import data in TSV format.");
  pgd.addClass(LoadIncrementalHFiles.NAME, LoadIncrementalHFiles.class,
               "Complete a bulk data load.");
  pgd.addClass(CopyTable.NAME, CopyTable.class,
      "Export a table from local cluster to peer cluster");
  pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, "Compare" +
      " the data from tables in two different clusters. WARNING: It" +
      " doesn't work for incrementColumnValues'd cells since the" +
      " timestamp is changed after being appended to the log.");
  ProgramDriver.class.getMethod("driver", new Class [] {String[].class}).
    invoke(pgd, new Object[]{args});
}
项目:DominoHBase    文件:Driver.java   
/**
 * @param args
 * @throws Throwable
 */
public static void main(String[] args) throws Throwable {
  ProgramDriver pgd = new ProgramDriver();
  pgd.addClass(RowCounter.NAME, RowCounter.class,
    "Count rows in HBase table");
  pgd.addClass(CellCounter.NAME, CellCounter.class,
    "Count cells in HBase table");
  pgd.addClass(Export.NAME, Export.class, "Write table data to HDFS.");
  pgd.addClass(Import.NAME, Import.class, "Import data written by Export.");
  pgd.addClass(ImportTsv.NAME, ImportTsv.class, "Import data in TSV format.");
  pgd.addClass(LoadIncrementalHFiles.NAME, LoadIncrementalHFiles.class,
               "Complete a bulk data load.");
  pgd.addClass(CopyTable.NAME, CopyTable.class,
      "Export a table from local cluster to peer cluster");
  pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, "Compare" +
      " the data from tables in two different clusters. WARNING: It" +
      " doesn't work for incrementColumnValues'd cells since the" +
      " timestamp is changed after being appended to the log.");
  ProgramDriver.class.getMethod("driver", new Class [] {String[].class}).
    invoke(pgd, new Object[]{args});
}
项目:hindex    文件:Driver.java   
/**
 * @param args
 * @throws Throwable
 */
public static void main(String[] args) throws Throwable {
  ProgramDriver pgd = new ProgramDriver();
  pgd.addClass(RowCounter.NAME, RowCounter.class,
    "Count rows in HBase table");
  pgd.addClass(CellCounter.NAME, CellCounter.class,
    "Count cells in HBase table");
  pgd.addClass(Export.NAME, Export.class, "Write table data to HDFS.");
  pgd.addClass(Import.NAME, Import.class, "Import data written by Export.");
  pgd.addClass(ImportTsv.NAME, ImportTsv.class, "Import data in TSV format.");
  pgd.addClass(LoadIncrementalHFiles.NAME, LoadIncrementalHFiles.class,
               "Complete a bulk data load.");
  pgd.addClass(CopyTable.NAME, CopyTable.class,
      "Export a table from local cluster to peer cluster");
  pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, "Compare" +
      " the data from tables in two different clusters. WARNING: It" +
      " doesn't work for incrementColumnValues'd cells since the" +
      " timestamp is changed after being appended to the log.");
  ProgramDriver.class.getMethod("driver", new Class [] {String[].class}).
    invoke(pgd, new Object[]{args});
}
项目:hbase    文件:TestVerifyReplication.java   
private void runVerifyReplication(String[] args, int expectedGoodRows, int expectedBadRows)
    throws IOException, InterruptedException, ClassNotFoundException {
  Job job = new VerifyReplication().createSubmittableJob(new Configuration(conf1), args);
  if (job == null) {
    fail("Job wasn't created, see the log");
  }
  if (!job.waitForCompletion(true)) {
    fail("Job failed, see the log");
  }
  assertEquals(expectedGoodRows,
    job.getCounters().findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue());
  assertEquals(expectedBadRows,
    job.getCounters().findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue());
}
项目:hbase    文件:TestVerifyReplication.java   
@Test
public void testVerifyReplicationSnapshotArguments() {
  String[] args =
      new String[] { "--sourceSnapshotName=snapshot1", "2", tableName.getNameAsString() };
  assertFalse(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args));

  args = new String[] { "--sourceSnapshotTmpDir=tmp", "2", tableName.getNameAsString() };
  assertFalse(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args));

  args = new String[] { "--sourceSnapshotName=snapshot1", "--sourceSnapshotTmpDir=tmp", "2",
      tableName.getNameAsString() };
  assertTrue(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args));

  args = new String[] { "--peerSnapshotName=snapshot1", "2", tableName.getNameAsString() };
  assertFalse(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args));

  args = new String[] { "--peerSnapshotTmpDir=/tmp/", "2", tableName.getNameAsString() };
  assertFalse(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args));

  args = new String[] { "--peerSnapshotName=snapshot1", "--peerSnapshotTmpDir=/tmp/",
      "--peerFSAddress=tempfs", "--peerHBaseRootAddress=hdfs://tempfs:50070/hbase/", "2",
      tableName.getNameAsString() };
  assertTrue(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args));

  args = new String[] { "--sourceSnapshotName=snapshot1", "--sourceSnapshotTmpDir=/tmp/",
      "--peerSnapshotName=snapshot2", "--peerSnapshotTmpDir=/tmp/", "--peerFSAddress=tempfs",
      "--peerHBaseRootAddress=hdfs://tempfs:50070/hbase/", "2", tableName.getNameAsString() };

  assertTrue(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args));
}
项目:LCIndex-HBase-0.94.16    文件:TestReplicationSmallTests.java   
/**
 * Do a small loading into a table, make sure the data is really the same,
 * then run the VerifyReplication job to check the results. Do a second
 * comparison where all the cells are different.
 * @throws Exception
 */
@Test(timeout=300000)
public void testVerifyRepJob() throws Exception {
  // Populate the tables, at the same time it guarantees that the tables are
  // identical since it does the check
  testSmallBatch();

  String[] args = new String[] {"2", Bytes.toString(tableName)};
  Job job = VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS, args);
  if (job == null) {
    fail("Job wasn't created, see the log");
  }
  if (!job.waitForCompletion(true)) {
    fail("Job failed, see the log");
  }
  assertEquals(NB_ROWS_IN_BATCH, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue());
  assertEquals(0, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue());

  Scan scan = new Scan();
  ResultScanner rs = htable2.getScanner(scan);
  Put put = null;
  for (Result result : rs) {
    put = new Put(result.getRow());
    KeyValue firstVal = result.raw()[0];
    put.add(firstVal.getFamily(),
        firstVal.getQualifier(), Bytes.toBytes("diff data"));
    htable2.put(put);
  }
  Delete delete = new Delete(put.getRow());
  htable2.delete(delete);
  job = VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS, args);
  if (job == null) {
    fail("Job wasn't created, see the log");
  }
  if (!job.waitForCompletion(true)) {
    fail("Job failed, see the log");
  }
  assertEquals(0, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue());
  assertEquals(NB_ROWS_IN_BATCH, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue());
}
项目:pbase    文件:TestReplicationSmallTests.java   
/**
 * Do a small loading into a table, make sure the data is really the same,
 * then run the VerifyReplication job to check the results. Do a second
 * comparison where all the cells are different.
 * @throws Exception
 */
@Test(timeout=300000)
public void testVerifyRepJob() throws Exception {
  // Populate the tables, at the same time it guarantees that the tables are
  // identical since it does the check
  testSmallBatch();

  String[] args = new String[] {"2", tableName.getNameAsString()};
  Job job = VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS, args);
  if (job == null) {
    fail("Job wasn't created, see the log");
  }
  if (!job.waitForCompletion(true)) {
    fail("Job failed, see the log");
  }
  assertEquals(NB_ROWS_IN_BATCH, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue());
  assertEquals(0, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue());

  Scan scan = new Scan();
  ResultScanner rs = htable2.getScanner(scan);
  Put put = null;
  for (Result result : rs) {
    put = new Put(result.getRow());
    Cell firstVal = result.rawCells()[0];
    put.add(CellUtil.cloneFamily(firstVal),
        CellUtil.cloneQualifier(firstVal), Bytes.toBytes("diff data"));
    htable2.put(put);
  }
  Delete delete = new Delete(put.getRow());
  htable2.delete(delete);
  job = VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS, args);
  if (job == null) {
    fail("Job wasn't created, see the log");
  }
  if (!job.waitForCompletion(true)) {
    fail("Job failed, see the log");
  }
  assertEquals(0, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue());
  assertEquals(NB_ROWS_IN_BATCH, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue());
}
项目:HIndex    文件:TestReplicationSmallTests.java   
/**
 * Do a small loading into a table, make sure the data is really the same,
 * then run the VerifyReplication job to check the results. Do a second
 * comparison where all the cells are different.
 * @throws Exception
 */
@Test(timeout=300000)
public void testVerifyRepJob() throws Exception {
  // Populate the tables, at the same time it guarantees that the tables are
  // identical since it does the check
  testSmallBatch();

  String[] args = new String[] {"2", Bytes.toString(tableName)};
  Job job = VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS, args);
  if (job == null) {
    fail("Job wasn't created, see the log");
  }
  if (!job.waitForCompletion(true)) {
    fail("Job failed, see the log");
  }
  assertEquals(NB_ROWS_IN_BATCH, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue());
  assertEquals(0, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue());

  Scan scan = new Scan();
  ResultScanner rs = htable2.getScanner(scan);
  Put put = null;
  for (Result result : rs) {
    put = new Put(result.getRow());
    Cell firstVal = result.rawCells()[0];
    put.add(CellUtil.cloneFamily(firstVal),
        CellUtil.cloneQualifier(firstVal), Bytes.toBytes("diff data"));
    htable2.put(put);
  }
  Delete delete = new Delete(put.getRow());
  htable2.delete(delete);
  job = VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS, args);
  if (job == null) {
    fail("Job wasn't created, see the log");
  }
  if (!job.waitForCompletion(true)) {
    fail("Job failed, see the log");
  }
  assertEquals(0, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue());
  assertEquals(NB_ROWS_IN_BATCH, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue());
}
项目:IRIndex    文件:TestReplicationSmallTests.java   
/**
 * Do a small loading into a table, make sure the data is really the same,
 * then run the VerifyReplication job to check the results. Do a second
 * comparison where all the cells are different.
 * @throws Exception
 */
@Test(timeout=300000)
public void testVerifyRepJob() throws Exception {
  // Populate the tables, at the same time it guarantees that the tables are
  // identical since it does the check
  testSmallBatch();

  String[] args = new String[] {"2", Bytes.toString(tableName)};
  Job job = VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS, args);
  if (job == null) {
    fail("Job wasn't created, see the log");
  }
  if (!job.waitForCompletion(true)) {
    fail("Job failed, see the log");
  }
  assertEquals(NB_ROWS_IN_BATCH, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue());
  assertEquals(0, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue());

  Scan scan = new Scan();
  ResultScanner rs = htable2.getScanner(scan);
  Put put = null;
  for (Result result : rs) {
    put = new Put(result.getRow());
    KeyValue firstVal = result.raw()[0];
    put.add(firstVal.getFamily(),
        firstVal.getQualifier(), Bytes.toBytes("diff data"));
    htable2.put(put);
  }
  Delete delete = new Delete(put.getRow());
  htable2.delete(delete);
  job = VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS, args);
  if (job == null) {
    fail("Job wasn't created, see the log");
  }
  if (!job.waitForCompletion(true)) {
    fail("Job failed, see the log");
  }
  assertEquals(0, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue());
  assertEquals(NB_ROWS_IN_BATCH, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue());
}
项目:PyroDB    文件:TestReplicationSmallTests.java   
/**
 * Do a small loading into a table, make sure the data is really the same,
 * then run the VerifyReplication job to check the results. Do a second
 * comparison where all the cells are different.
 * @throws Exception
 */
@Test(timeout=300000)
public void testVerifyRepJob() throws Exception {
  // Populate the tables, at the same time it guarantees that the tables are
  // identical since it does the check
  testSmallBatch();

  String[] args = new String[] {"2", Bytes.toString(tableName)};
  Job job = VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS, args);
  if (job == null) {
    fail("Job wasn't created, see the log");
  }
  if (!job.waitForCompletion(true)) {
    fail("Job failed, see the log");
  }
  assertEquals(NB_ROWS_IN_BATCH, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue());
  assertEquals(0, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue());

  Scan scan = new Scan();
  ResultScanner rs = htable2.getScanner(scan);
  Put put = null;
  for (Result result : rs) {
    put = new Put(result.getRow());
    Cell firstVal = result.rawCells()[0];
    put.add(CellUtil.cloneFamily(firstVal),
        CellUtil.cloneQualifier(firstVal), Bytes.toBytes("diff data"));
    htable2.put(put);
  }
  Delete delete = new Delete(put.getRow());
  htable2.delete(delete);
  job = VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS, args);
  if (job == null) {
    fail("Job wasn't created, see the log");
  }
  if (!job.waitForCompletion(true)) {
    fail("Job failed, see the log");
  }
  assertEquals(0, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue());
  assertEquals(NB_ROWS_IN_BATCH, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue());
}
项目:c5    文件:TestReplicationSmallTests.java   
/**
 * Do a small loading into a table, make sure the data is really the same,
 * then run the VerifyReplication job to check the results. Do a second
 * comparison where all the cells are different.
 * @throws Exception
 */
@Test(timeout=300000)
public void testVerifyRepJob() throws Exception {
  // Populate the tables, at the same time it guarantees that the tables are
  // identical since it does the check
  testSmallBatch();

  String[] args = new String[] {"2", Bytes.toString(tableName)};
  Job job = VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS, args);
  if (job == null) {
    fail("Job wasn't created, see the log");
  }
  if (!job.waitForCompletion(true)) {
    fail("Job failed, see the log");
  }
  assertEquals(NB_ROWS_IN_BATCH, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue());
  assertEquals(0, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue());

  Scan scan = new Scan();
  ResultScanner rs = htable2.getScanner(scan);
  Put put = null;
  for (Result result : rs) {
    put = new Put(result.getRow());
    Cell firstVal = result.rawCells()[0];
    put.add(CellUtil.cloneFamily(firstVal),
        CellUtil.cloneQualifier(firstVal), Bytes.toBytes("diff data"));
    htable2.put(put);
  }
  Delete delete = new Delete(put.getRow());
  htable2.delete(delete);
  job = VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS, args);
  if (job == null) {
    fail("Job wasn't created, see the log");
  }
  if (!job.waitForCompletion(true)) {
    fail("Job failed, see the log");
  }
  assertEquals(0, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue());
  assertEquals(NB_ROWS_IN_BATCH, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue());
}
项目:HBase-Research    文件:TestReplicationSmallTests.java   
/**
 * Do a small loading into a table, make sure the data is really the same,
 * then run the VerifyReplication job to check the results. Do a second
 * comparison where all the cells are different.
 * @throws Exception
 */
@Test(timeout=300000)
public void testVerifyRepJob() throws Exception {
  // Populate the tables, at the same time it guarantees that the tables are
  // identical since it does the check
  testSmallBatch();

  String[] args = new String[] {"2", Bytes.toString(tableName)};
  Job job = VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS, args);
  if (job == null) {
    fail("Job wasn't created, see the log");
  }
  if (!job.waitForCompletion(true)) {
    fail("Job failed, see the log");
  }
  assertEquals(NB_ROWS_IN_BATCH, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue());
  assertEquals(0, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue());

  Scan scan = new Scan();
  ResultScanner rs = htable2.getScanner(scan);
  Put put = null;
  for (Result result : rs) {
    put = new Put(result.getRow());
    KeyValue firstVal = result.raw()[0];
    put.add(firstVal.getFamily(),
        firstVal.getQualifier(), Bytes.toBytes("diff data"));
    htable2.put(put);
  }
  Delete delete = new Delete(put.getRow());
  htable2.delete(delete);
  job = VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS, args);
  if (job == null) {
    fail("Job wasn't created, see the log");
  }
  if (!job.waitForCompletion(true)) {
    fail("Job failed, see the log");
  }
  assertEquals(0, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue());
  assertEquals(NB_ROWS_IN_BATCH, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue());
}
项目:hbase-0.94.8-qod    文件:TestReplicationSmallTests.java   
/**
 * Do a small loading into a table, make sure the data is really the same,
 * then run the VerifyReplication job to check the results. Do a second
 * comparison where all the cells are different.
 * @throws Exception
 */
@Test(timeout=300000)
public void testVerifyRepJob() throws Exception {
  // Populate the tables, at the same time it guarantees that the tables are
  // identical since it does the check
  testSmallBatch();

  String[] args = new String[] {"2", Bytes.toString(tableName)};
  Job job = VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS, args);
  if (job == null) {
    fail("Job wasn't created, see the log");
  }
  if (!job.waitForCompletion(true)) {
    fail("Job failed, see the log");
  }
  assertEquals(NB_ROWS_IN_BATCH, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue());
  assertEquals(0, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue());

  Scan scan = new Scan();
  ResultScanner rs = htable2.getScanner(scan);
  Put put = null;
  for (Result result : rs) {
    put = new Put(result.getRow());
    KeyValue firstVal = result.raw()[0];
    put.add(firstVal.getFamily(),
        firstVal.getQualifier(), Bytes.toBytes("diff data"));
    htable2.put(put);
  }
  Delete delete = new Delete(put.getRow());
  htable2.delete(delete);
  job = VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS, args);
  if (job == null) {
    fail("Job wasn't created, see the log");
  }
  if (!job.waitForCompletion(true)) {
    fail("Job failed, see the log");
  }
  assertEquals(0, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue());
  assertEquals(NB_ROWS_IN_BATCH, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue());
}
项目:hbase-0.94.8-qod    文件:TestReplicationSmallTests.java   
/**
 * Do a small loading into a table, make sure the data is really the same,
 * then run the VerifyReplication job to check the results. Do a second
 * comparison where all the cells are different.
 * @throws Exception
 */
@Test(timeout=300000)
public void testVerifyRepJob() throws Exception {
  // Populate the tables, at the same time it guarantees that the tables are
  // identical since it does the check
  testSmallBatch();

  String[] args = new String[] {"2", Bytes.toString(tableName)};
  Job job = VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS, args);
  if (job == null) {
    fail("Job wasn't created, see the log");
  }
  if (!job.waitForCompletion(true)) {
    fail("Job failed, see the log");
  }
  assertEquals(NB_ROWS_IN_BATCH, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue());
  assertEquals(0, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue());

  Scan scan = new Scan();
  ResultScanner rs = htable2.getScanner(scan);
  Put put = null;
  for (Result result : rs) {
    put = new Put(result.getRow());
    KeyValue firstVal = result.raw()[0];
    put.add(firstVal.getFamily(),
        firstVal.getQualifier(), Bytes.toBytes("diff data"));
    htable2.put(put);
  }
  Delete delete = new Delete(put.getRow());
  htable2.delete(delete);
  job = VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS, args);
  if (job == null) {
    fail("Job wasn't created, see the log");
  }
  if (!job.waitForCompletion(true)) {
    fail("Job failed, see the log");
  }
  assertEquals(0, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue());
  assertEquals(NB_ROWS_IN_BATCH, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue());
}
项目:DominoHBase    文件:TestReplicationSmallTests.java   
/**
 * Do a small loading into a table, make sure the data is really the same,
 * then run the VerifyReplication job to check the results. Do a second
 * comparison where all the cells are different.
 * @throws Exception
 */
@Test(timeout=300000)
public void testVerifyRepJob() throws Exception {
  // Populate the tables, at the same time it guarantees that the tables are
  // identical since it does the check
  testSmallBatch();

  String[] args = new String[] {"2", Bytes.toString(tableName)};
  Job job = VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS, args);
  if (job == null) {
    fail("Job wasn't created, see the log");
  }
  if (!job.waitForCompletion(true)) {
    fail("Job failed, see the log");
  }
  assertEquals(NB_ROWS_IN_BATCH, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue());
  assertEquals(0, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue());

  Scan scan = new Scan();
  ResultScanner rs = htable2.getScanner(scan);
  Put put = null;
  for (Result result : rs) {
    put = new Put(result.getRow());
    KeyValue firstVal = result.raw()[0];
    put.add(firstVal.getFamily(),
        firstVal.getQualifier(), Bytes.toBytes("diff data"));
    htable2.put(put);
  }
  Delete delete = new Delete(put.getRow());
  htable2.delete(delete);
  job = VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS, args);
  if (job == null) {
    fail("Job wasn't created, see the log");
  }
  if (!job.waitForCompletion(true)) {
    fail("Job failed, see the log");
  }
  assertEquals(0, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue());
  assertEquals(NB_ROWS_IN_BATCH, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue());
}
项目:hindex    文件:TestReplicationSmallTests.java   
/**
 * Do a small loading into a table, make sure the data is really the same,
 * then run the VerifyReplication job to check the results. Do a second
 * comparison where all the cells are different.
 * @throws Exception
 */
@Test(timeout=300000)
public void testVerifyRepJob() throws Exception {
  // Populate the tables, at the same time it guarantees that the tables are
  // identical since it does the check
  testSmallBatch();

  String[] args = new String[] {"2", Bytes.toString(tableName)};
  Job job = VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS, args);
  if (job == null) {
    fail("Job wasn't created, see the log");
  }
  if (!job.waitForCompletion(true)) {
    fail("Job failed, see the log");
  }
  assertEquals(NB_ROWS_IN_BATCH, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue());
  assertEquals(0, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue());

  Scan scan = new Scan();
  ResultScanner rs = htable2.getScanner(scan);
  Put put = null;
  for (Result result : rs) {
    put = new Put(result.getRow());
    KeyValue firstVal = result.raw()[0];
    put.add(firstVal.getFamily(),
        firstVal.getQualifier(), Bytes.toBytes("diff data"));
    htable2.put(put);
  }
  Delete delete = new Delete(put.getRow());
  htable2.delete(delete);
  job = VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS, args);
  if (job == null) {
    fail("Job wasn't created, see the log");
  }
  if (!job.waitForCompletion(true)) {
    fail("Job failed, see the log");
  }
  assertEquals(0, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue());
  assertEquals(NB_ROWS_IN_BATCH, job.getCounters().
      findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue());
}