Java 类org.apache.hadoop.hbase.client.HTable 实例源码

项目:ditb    文件:OfflineMetaRebuildTestCore.java   
protected void wipeOutMeta() throws IOException {
  // Mess it up by blowing up meta.
  Admin admin = TEST_UTIL.getHBaseAdmin();
  Scan s = new Scan();
  Table meta = new HTable(conf, TableName.META_TABLE_NAME);
  ResultScanner scanner = meta.getScanner(s);
  List<Delete> dels = new ArrayList<Delete>();
  for (Result r : scanner) {
    HRegionInfo info =
        HRegionInfo.getHRegionInfo(r);
    if(info != null && !info.getTable().getNamespaceAsString()
        .equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) {
      Delete d = new Delete(r.getRow());
      dels.add(d);
      admin.unassign(r.getRow(), true);
    }
  }
  meta.delete(dels);
  scanner.close();
  meta.close();
}
项目:SparkDemo    文件:MyClass.java   
public static void QueryByCondition2(String tableName) {

        try {
            HTablePool pool = new HTablePool(configuration, 1000);
            HTable table = (HTable) pool.getTable(tableName);
            Filter filter = new SingleColumnValueFilter(Bytes
                    .toBytes("column1"), null, CompareOp.EQUAL, Bytes
                    .toBytes("aaa")); // 当列column1的值为aaa时进行查询
            Scan s = new Scan();
            s.setFilter(filter);
            ResultScanner rs = table.getScanner(s);
            for (Result r : rs) {
                System.out.println("获得到rowkey:" + new String(r.getRow()));
                for (KeyValue keyValue : r.raw()) {
                    System.out.println("列:" + new String(keyValue.getFamily())
                            + "====值:" + new String(keyValue.getValue()));
                }
            }
        } catch (Exception e) {
            e.printStackTrace();
        }

    }
项目:ditb    文件:TestHBaseFsck.java   
/**
 * Setup a clean table with a certain region_replica count
 *
 * It will set tbl which needs to be closed after test
 *
 * @param tableName
 * @param replicaCount
 * @throws Exception
 */
void setupTableWithRegionReplica(TableName tablename, int replicaCount) throws Exception {
  HTableDescriptor desc = new HTableDescriptor(tablename);
  desc.setRegionReplication(replicaCount);
  HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toString(FAM));
  desc.addFamily(hcd); // If a table has no CF's it doesn't get checked
  createTable(TEST_UTIL, desc, SPLITS);

  tbl = (HTable) connection.getTable(tablename, tableExecutorService);
  List<Put> puts = new ArrayList<Put>();
  for (byte[] row : ROWKEYS) {
    Put p = new Put(row);
    p.add(FAM, Bytes.toBytes("val"), row);
    puts.add(p);
  }
  tbl.put(puts);
  tbl.flushCommits();
}
项目:HBase-High-Performance-Cookbook    文件:HBaseRegularClient.java   
/**
 * Getting all records  a row from an existing SS tables 
 * @method getAllRecord
 * @inputParameters hbaseBtable Name used
 * @return type: no return type as its a void method 
 * 
 **/
@SuppressWarnings({ "deprecation", "resource" })
public static void getAllRecord(String myHbaseBtableName) {
  ResultScanner hbaseBSs = null;
  try {
    HTable hbaseBtable = new HTable(hbaseBconf, myHbaseBtableName);
    Scan hbaseBScan = new Scan();
    hbaseBSs = hbaseBtable.getScanner(hbaseBScan);
    for (Result r : hbaseBSs) {
      for (KeyValue hbaseBkv : r.raw()) {
        System.out.print(new String(hbaseBkv.getRow()) + " ");
        System.out.print(new String(hbaseBkv.getFamily()) + ":");
        System.out.print(new String(hbaseBkv.getQualifier()) + " ");
        System.out.print(hbaseBkv.getTimestamp() + " ");
        System.out.println(new String(hbaseBkv.getValue()));
      }
    }
  } catch (IOException eio) {
    eip.printStackTrace();
  } finally {
    if (hbaseBSs != null) hbaseBSs.close();
    // closing the ss hbaseBtable 
  }
}
项目:ditb    文件:IntegrationTestBigLinkedList.java   
@Override
public int run(String[] args) throws Exception {
  if (args.length != 1) {
    System.out.println("Usage : " + Delete.class.getSimpleName() + " <node to delete>");
    return 0;
  }
  byte[] val = Bytes.toBytesBinary(args[0]);

  org.apache.hadoop.hbase.client.Delete delete
    = new org.apache.hadoop.hbase.client.Delete(val);

  Table table = new HTable(getConf(), getTableName(getConf()));
  table.delete(delete);
  table.close();

  System.out.println("Delete successful");
  return 0;
}
项目:ditb    文件:TestMultiTableInputFormat.java   
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  // switch TIF to log at DEBUG level
  TEST_UTIL.enableDebug(MultiTableInputFormat.class);
  TEST_UTIL.enableDebug(MultiTableInputFormatBase.class);
  TEST_UTIL.setJobWithoutMRCluster();
  // start mini hbase cluster
  TEST_UTIL.startMiniCluster(3);
  // create and fill table
  for (int i = 0; i < 3; i++) {
    try (HTable table =
        TEST_UTIL.createMultiRegionTable(TableName.valueOf(TABLE_NAME + String.valueOf(i)),
          INPUT_FAMILY, 4)) {
      TEST_UTIL.loadTable(table, INPUT_FAMILY, false);
    }
  }
}
项目:Transwarp-Sample-Code    文件:LobUtil.java   
/**
 * 上传对象到LOB
 * @param tableName Hyperbase表名
 * @param row rowkey byte形式
 * @param filename 文件名
 * @param fileData 文件
 */
public void putLob(String tableName, String row, String filename, byte[] fileData){
    byte[] rowkey = Bytes.toBytes(row);
    try {
        HTable htable = new HTable(conf, tableName);
        Put put = new Put(rowkey);
        put.add(Bytes.toBytes(family1), Bytes.toBytes(f1_q1), Bytes.toBytes(filename));
        put.add(Bytes.toBytes(family2), Bytes.toBytes(f2_q1), fileData);
        htable.put(put);
        htable.flushCommits();
        htable.close();
    } catch (IOException e1) {
        // TODO Auto-generated catch block
        e1.printStackTrace();
    }
}
项目:Transwarp-Sample-Code    文件:udtfCheck.java   
@Override
public void process(Object[] record) throws HiveException {
    final String document = (String) stringOI.getPrimitiveJavaObject(record[0]);

    if (document == null) {
        return;
    }

    String[] tokens = document.split(",");
    String[] results = tokens[1].split(" ");

    try {
        hTable = new HTable(conf, "bi");
        Get get = new Get(Bytes.toBytes(tokens[0]));
        result = hTable.exists(get);
    } catch (Exception e) {
        e.printStackTrace();
    }

    if (!result) {
        for (String r : results) {
            forward(new Object[]{tokens[0], r});
        }
    }
}
项目:ditb    文件:TestScannersWithLabels.java   
private static int insertData(TableName tableName, String column, double prob) throws IOException {
  byte[] k = new byte[3];
  byte[][] famAndQf = KeyValue.parseColumn(Bytes.toBytes(column));

  List<Put> puts = new ArrayList<>();
  for (int i = 0; i < 9; i++) {
    Put put = new Put(Bytes.toBytes("row" + i));
    put.setDurability(Durability.SKIP_WAL);
    put.add(famAndQf[0], famAndQf[1], k);
    put.setCellVisibility(new CellVisibility("(" + SECRET + "|" + CONFIDENTIAL + ")" + "&" + "!"
        + TOPSECRET));
    puts.add(put);
  }
  try (Table table = new HTable(TEST_UTIL.getConfiguration(), tableName)) {
    table.put(puts);
  }
  return puts.size();
}
项目:aliyun-maxcompute-data-collectors    文件:HBaseBulkImportJob.java   
@Override
protected void jobSetup(Job job) throws IOException, ImportException {
  super.jobSetup(job);

  // we shouldn't have gotten here if bulk load dir is not set
  // so let's throw a ImportException
  if(getContext().getDestination() == null){
    throw new ImportException("Can't run HBaseBulkImportJob without a " +
        "valid destination directory.");
  }

  TableMapReduceUtil.addDependencyJars(job.getConfiguration(), Preconditions.class);
  FileOutputFormat.setOutputPath(job, getContext().getDestination());
  HTable hTable = new HTable(job.getConfiguration(), options.getHBaseTable());
  HFileOutputFormat.configureIncrementalLoad(job, hTable);
}
项目:ditb    文件:TestHFileOutputFormat.java   
private void runIncrementalPELoad(
    Configuration conf, HTable table, Path outDir)
throws Exception {
  Job job = new Job(conf, "testLocalMRIncrementalLoad");
  job.setWorkingDirectory(util.getDataTestDirOnTestFS("runIncrementalPELoad"));
  job.getConfiguration().setStrings("io.serializations", conf.get("io.serializations"),
      MutationSerialization.class.getName(), ResultSerialization.class.getName(),
      KeyValueSerialization.class.getName());
  setupRandomGeneratorMapper(job);
  HFileOutputFormat2.configureIncrementalLoad(job, table.getTableDescriptor(),
      table.getRegionLocator());
  FileOutputFormat.setOutputPath(job, outDir);

  Assert.assertFalse( util.getTestFileSystem().exists(outDir)) ;

  assertEquals(table.getRegionLocator().getAllRegionLocations().size(), job.getNumReduceTasks());

  assertTrue(job.waitForCompletion(true));
}
项目:ditb    文件:TestQuotaThrottle.java   
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  TEST_UTIL.getConfiguration().setBoolean(QuotaUtil.QUOTA_CONF_KEY, true);
  TEST_UTIL.getConfiguration().setInt("hbase.hstore.compactionThreshold", 10);
  TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
  TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250);
  TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
  TEST_UTIL.getConfiguration().setBoolean("hbase.master.enabletable.roundrobin", true);
  TEST_UTIL.startMiniCluster(1);
  TEST_UTIL.waitTableAvailable(QuotaTableUtil.QUOTA_TABLE_NAME);
  QuotaCache.setTEST_FORCE_REFRESH(true);

  tables = new HTable[TABLE_NAMES.length];
  for (int i = 0; i < TABLE_NAMES.length; ++i) {
    tables[i] = TEST_UTIL.createTable(TABLE_NAMES[i], FAMILY);
  }
}
项目:ditb    文件:TableInputFormatBase.java   
/**
 * Allows subclasses to set the {@link HTable}.
 *
 * Will attempt to reuse the underlying Connection for our own needs, including
 * retreiving an Admin interface to the HBase cluster.
 *
 * @param table  The table to get the data from.
 * @throws IOException 
 * @deprecated Use {@link #initializeTable(Connection, TableName)} instead.
 */
@Deprecated
protected void setHTable(HTable table) throws IOException {
  this.table = table;
  this.connection = table.getConnection();
  try {
    this.regionLocator = table.getRegionLocator();
    this.admin = this.connection.getAdmin();
  } catch (NeedUnmanagedConnectionException exception) {
    LOG.warn("You are using an HTable instance that relies on an HBase-managed Connection. " +
        "This is usually due to directly creating an HTable, which is deprecated. Instead, you " +
        "should create a Connection object and then request a Table instance from it. If you " +
        "don't need the Table instance for your own use, you should instead use the " +
        "TableInputFormatBase.initalizeTable method directly.");
    LOG.info("Creating an additional unmanaged connection because user provided one can't be " +
        "used for administrative actions. We'll close it when we close out the table.");
    LOG.debug("Details about our failure to request an administrative interface.", exception);
    // Do we need a "copy the settings from this Connection" method? are things like the User
    // properly maintained by just looking again at the Configuration?
    this.connection = ConnectionFactory.createConnection(this.connection.getConfiguration());
    this.regionLocator = this.connection.getRegionLocator(table.getName());
    this.admin = this.connection.getAdmin();
  }
}
项目:ditb    文件:TestZKBasedOpenCloseRegion.java   
@BeforeClass public static void beforeAllTests() throws Exception {
  Configuration c = TEST_UTIL.getConfiguration();
  c.setBoolean("hbase.assignment.usezk", true);
  c.setBoolean("dfs.support.append", true);
  c.setInt("hbase.regionserver.info.port", 0);
  TEST_UTIL.startMiniCluster(2);
  TEST_UTIL.createMultiRegionTable(TABLENAME, FAMILIES);
  HTable t = new HTable(TEST_UTIL.getConfiguration(), TABLENAME);
  countOfRegions = -1;
  try (RegionLocator r = t.getRegionLocator()) {
    countOfRegions = r.getStartKeys().length;
  }
  waitUntilAllRegionsAssigned();
  addToEachStartKey(countOfRegions);
  t.close();
  TEST_UTIL.getHBaseCluster().getMaster().assignmentManager.initializeHandlerTrackers();
}
项目:ditb    文件:TestLogRollPeriod.java   
/**
 * Tests that the LogRoller perform the roll even if there are no edits
 */
@Test
public void testNoEdits() throws Exception {
  TableName tableName = TableName.valueOf("TestLogRollPeriodNoEdits");
  TEST_UTIL.createTable(tableName, "cf");
  try {
    Table table = new HTable(TEST_UTIL.getConfiguration(), tableName);
    try {
      HRegionServer server = TEST_UTIL.getRSForFirstRegionInTable(tableName);
      WAL log = server.getWAL(null);
      checkMinLogRolls(log, 5);
    } finally {
      table.close();
    }
  } finally {
    TEST_UTIL.deleteTable(tableName);
  }
}
项目:ditb    文件:TestCoprocessorEndpoint.java   
@BeforeClass
public static void setupBeforeClass() throws Exception {
  // set configure to indicate which cp should be loaded
  Configuration conf = util.getConfiguration();
  conf.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 5000);
  conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
      org.apache.hadoop.hbase.coprocessor.ColumnAggregationEndpoint.class.getName(),
      ProtobufCoprocessorService.class.getName());
  conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
      ProtobufCoprocessorService.class.getName());
  util.startMiniCluster(2);

  Admin admin = util.getHBaseAdmin();
  HTableDescriptor desc = new HTableDescriptor(TEST_TABLE);
  desc.addFamily(new HColumnDescriptor(TEST_FAMILY));
  admin.createTable(desc, new byte[][]{ROWS[rowSeperator1], ROWS[rowSeperator2]});
  util.waitUntilAllRegionsAssigned(TEST_TABLE);

  Table table = new HTable(conf, TEST_TABLE);
  for (int i = 0; i < ROWSIZE; i++) {
    Put put = new Put(ROWS[i]);
    put.addColumn(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(i));
    table.put(put);
  }
  table.close();
}
项目:ditb    文件:TestMultiRowRangeFilter.java   
@Test
public void testMultiRowRangeFilterWithoutRangeOverlap() throws IOException {
  tableName = Bytes.toBytes("testMultiRowRangeFilterWithoutRangeOverlap");
  HTable ht = TEST_UTIL.createTable(tableName, family, Integer.MAX_VALUE);
  generateRows(numRows, ht, family, qf, value);

  Scan scan = new Scan();
  scan.setMaxVersions();

  List<RowRange> ranges = new ArrayList<RowRange>();
  ranges.add(new RowRange(Bytes.toBytes(30), true, Bytes.toBytes(40), false));
  ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false));
  ranges.add(new RowRange(Bytes.toBytes(60), true, Bytes.toBytes(70), false));

  MultiRowRangeFilter filter = new MultiRowRangeFilter(ranges);
  scan.setFilter(filter);
  int resultsSize = getResultsSize(ht, scan);
  LOG.info("found " + resultsSize + " results");
  List<Cell> results1 = getScanResult(Bytes.toBytes(10), Bytes.toBytes(20), ht);
  List<Cell> results2 = getScanResult(Bytes.toBytes(30), Bytes.toBytes(40), ht);
  List<Cell> results3 = getScanResult(Bytes.toBytes(60), Bytes.toBytes(70), ht);

  assertEquals(results1.size() + results2.size() + results3.size(), resultsSize);

  ht.close();
}
项目:ditb    文件:TestOpenTableInCoprocessor.java   
private void runCoprocessorConnectionToRemoteTable(Class<? extends BaseRegionObserver> clazz,
    boolean[] completeCheck) throws Throwable {
  HTableDescriptor primary = new HTableDescriptor(primaryTable);
  primary.addFamily(new HColumnDescriptor(family));
  // add our coprocessor
  primary.addCoprocessor(clazz.getName());

  HTableDescriptor other = new HTableDescriptor(otherTable);
  other.addFamily(new HColumnDescriptor(family));


  Admin admin = UTIL.getHBaseAdmin();
  admin.createTable(primary);
  admin.createTable(other);

  Table table = new HTable(UTIL.getConfiguration(), TableName.valueOf("primary"));
  Put p = new Put(new byte[] { 'a' });
  p.add(family, null, new byte[] { 'a' });
  table.put(p);
  table.close();

  Table target = new HTable(UTIL.getConfiguration(), otherTable);
  assertTrue("Didn't complete update to target table!", completeCheck[0]);
  assertEquals("Didn't find inserted row", 1, getKeyValueCount(target));
  target.close();
}
项目:ditb    文件:TestMultiRowRangeFilter.java   
@Test
public void testMultiRowRangeFilterWithEmptyStartRow() throws IOException {
  tableName = Bytes.toBytes("testMultiRowRangeFilterWithEmptyStartRow");
  HTable ht = TEST_UTIL.createTable(tableName, family, Integer.MAX_VALUE);
  generateRows(numRows, ht, family, qf, value);
  Scan scan = new Scan();
  scan.setMaxVersions();

  List<RowRange> ranges = new ArrayList<RowRange>();
  ranges.add(new RowRange(Bytes.toBytes(""), true, Bytes.toBytes(10), false));
  ranges.add(new RowRange(Bytes.toBytes(30), true, Bytes.toBytes(40), false));

  MultiRowRangeFilter filter = new MultiRowRangeFilter(ranges);
  scan.setFilter(filter);
  int resultsSize = getResultsSize(ht, scan);
  List<Cell> results1 = getScanResult(Bytes.toBytes(""), Bytes.toBytes(10), ht);
  List<Cell> results2 = getScanResult(Bytes.toBytes(30), Bytes.toBytes(40), ht);
  assertEquals(results1.size() + results2.size(), resultsSize);

  ht.close();
}
项目:ditb    文件:TestNamespaceUpgrade.java   
@Test (timeout=300000)
public void testSnapshots() throws IOException, InterruptedException {
  String snapshots[][] = {snapshot1Keys, snapshot2Keys};
  for(int i = 1; i <= snapshots.length; i++) {
    for(TableName table: tables) {
      TEST_UTIL.getHBaseAdmin().cloneSnapshot(table+"_snapshot"+i, TableName.valueOf(table+"_clone"+i));
      FSUtils.logFileSystemState(FileSystem.get(TEST_UTIL.getConfiguration()),
          FSUtils.getRootDir(TEST_UTIL.getConfiguration()),
          LOG);
      int count = 0;
      for(Result res: new HTable(TEST_UTIL.getConfiguration(), table+"_clone"+i).getScanner(new
          Scan())) {
        assertEquals(snapshots[i-1][count++], Bytes.toString(res.getRow()));
      }
      Assert.assertEquals(table+"_snapshot"+i, snapshots[i-1].length, count);
    }
  }
}
项目:ditb    文件:IndexChooser.java   
public IndexChooser(final IndexTable indexTable) throws IOException {
  this.indexTable = indexTable;
  indexRegionMaps = new TreeMap<byte[], List<HRegionInfo>>(Bytes.BYTES_COMPARATOR);

  for (Map.Entry<byte[], Table> entry : indexTable.getIndexTableMaps().entrySet()) {
    if (!(entry.getValue() instanceof HTable)) {
      throw new IOException(
          "table is not an instance of HTable, it is " + entry.getValue().getClass().getName());
    }
    HTable htable = (HTable) entry.getValue();
    ArrayList<HRegionInfo> list =
        new ArrayList<HRegionInfo>(htable.getRegionLocations().keySet());
    indexRegionMaps.put(entry.getKey(), list);
  }

  speedTimes = DEFAULT_SPEED_TIMES;
}
项目:ditb    文件:TestHBaseFsck.java   
@Test (timeout=180000)
public void testTableWithNoRegions() throws Exception {
  // We might end up with empty regions in a table
  // see also testNoHdfsTable()
  TableName table =
      TableName.valueOf(name.getMethodName());
  try {
    // create table with one region
    HTableDescriptor desc = new HTableDescriptor(table);
    HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toString(FAM));
    desc.addFamily(hcd); // If a table has no CF's it doesn't get checked
    createTable(TEST_UTIL, desc, null);
    tbl = (HTable) connection.getTable(table, tableExecutorService);

    // Mess it up by leaving a hole in the assignment, meta, and hdfs data
    deleteRegion(conf, tbl.getTableDescriptor(), HConstants.EMPTY_START_ROW,
        HConstants.EMPTY_END_ROW, false, false, true);

    HBaseFsck hbck = doFsck(conf, false);
    assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.NOT_IN_HDFS });

    doFsck(conf, true);

    // fix hole
    doFsck(conf, true);

    // check that hole fixed
    assertNoErrors(doFsck(conf, false));
  } finally {
    cleanupTable(table);
  }

}
项目:ditb    文件:TestRowCountEndpoint.java   
public void testEndpoint() throws Throwable {
  Table table = new HTable(CONF, TEST_TABLE);

  // insert some test rows
  for (int i=0; i<5; i++) {
    byte[] iBytes = Bytes.toBytes(i);
    Put p = new Put(iBytes);
    p.add(TEST_FAMILY, TEST_COLUMN, iBytes);
    table.put(p);
  }

  final ExampleProtos.CountRequest request = ExampleProtos.CountRequest.getDefaultInstance();
  Map<byte[],Long> results = table.coprocessorService(ExampleProtos.RowCountService.class,
      null, null,
      new Batch.Call<ExampleProtos.RowCountService,Long>() {
        public Long call(ExampleProtos.RowCountService counter) throws IOException {
          ServerRpcController controller = new ServerRpcController();
          BlockingRpcCallback<ExampleProtos.CountResponse> rpcCallback =
              new BlockingRpcCallback<ExampleProtos.CountResponse>();
          counter.getRowCount(controller, request, rpcCallback);
          ExampleProtos.CountResponse response = rpcCallback.get();
          if (controller.failedOnException()) {
            throw controller.getFailedOn();
          }
          return (response != null && response.hasCount()) ? response.getCount() : 0;
        }
      });
  // should be one region with results
  assertEquals(1, results.size());
  Iterator<Long> iter = results.values().iterator();
  Long val = iter.next();
  assertNotNull(val);
  assertEquals(5l, val.longValue());
}
项目:flume-release-1.7.0    文件:TestAsyncHBaseSink.java   
@Test
public void testThreeEvents() throws Exception {
  testUtility.createTable(tableName.getBytes(), columnFamily.getBytes());
  deleteTable = true;
  AsyncHBaseSink sink = new AsyncHBaseSink(testUtility.getConfiguration());
  Configurables.configure(sink, ctx);
  Channel channel = new MemoryChannel();
  Configurables.configure(channel, ctx);
  sink.setChannel(channel);
  sink.start();
  Transaction tx = channel.getTransaction();
  tx.begin();
  for (int i = 0; i < 3; i++) {
    Event e = EventBuilder.withBody(Bytes.toBytes(valBase + "-" + i));
    channel.put(e);
  }
  tx.commit();
  tx.close();
  Assert.assertFalse(sink.isConfNull());
  sink.process();
  sink.stop();
  HTable table = new HTable(testUtility.getConfiguration(), tableName);
  byte[][] results = getResults(table, 3);
  byte[] out;
  int found = 0;
  for (int i = 0; i < 3; i++) {
    for (int j = 0; j < 3; j++) {
      if (Arrays.equals(results[j], Bytes.toBytes(valBase + "-" + i))) {
        found++;
        break;
      }
    }
  }
  Assert.assertEquals(3, found);
  out = results[3];
  Assert.assertArrayEquals(Longs.toByteArray(3), out);
}
项目:ditb    文件:TestServerCustomProtocol.java   
@Test
public void testNullReturn() throws Throwable {
  try (HTable table = new HTable(util.getConfiguration(), TEST_TABLE)) {
    RegionLocator locator = table.getRegionLocator();
    Map<byte[],String> results = hello(table, "nobody", ROW_A, ROW_C);
    verifyRegionResults(locator, results, null, ROW_A);
    verifyRegionResults(locator, results, null, ROW_B);
    verifyRegionResults(locator, results, null, ROW_C);
  }
}
项目:ditb    文件:TestChangingEncoding.java   
static void verifyTestDataBatch(Configuration conf, TableName tableName,
    int batchId) throws Exception {
  LOG.debug("Verifying test data batch " + batchId);
  Table table = new HTable(conf, tableName);
  for (int i = 0; i < NUM_ROWS_PER_BATCH; ++i) {
    Get get = new Get(getRowKey(batchId, i));
    Result result = table.get(get);
    for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
      Cell kv = result.getColumnLatestCell(CF_BYTES, getQualifier(j));
      assertTrue(CellUtil.matchingValue(kv, getValue(batchId, i, j)));
    }
  }
  table.close();
}
项目:ditb    文件:HBaseTestingUtility.java   
/**
 * Truncate a table using the admin command.
 * Effectively disables, deletes, and recreates the table.
 * @param tableName table which must exist.
 * @param preserveRegions keep the existing split points
 * @return HTable for the new table
 */
public HTable truncateTable(final TableName tableName, final boolean preserveRegions)
    throws IOException {
  Admin admin = getHBaseAdmin();
  if (!admin.isTableDisabled(tableName)) {
    admin.disableTable(tableName);
  }
  admin.truncateTable(tableName, preserveRegions);
  return new HTable(getConfiguration(), tableName);
}
项目:ditb    文件:TestServerCustomProtocol.java   
@Test
public void testCompoundCall() throws Throwable {
  try (HTable table = new HTable(util.getConfiguration(), TEST_TABLE)) {
    RegionLocator locator = table.getRegionLocator();
    Map<byte [], String> results = compoundOfHelloAndPing(table, ROW_A, ROW_C);
    verifyRegionResults(locator, results, "Hello, pong", ROW_A);
    verifyRegionResults(locator, results, "Hello, pong", ROW_B);
    verifyRegionResults(locator, results, "Hello, pong", ROW_C);
  }
}
项目:flume-release-1.7.0    文件:TestHBaseSink.java   
@Test
public void testTransactionStateOnSerializationException() throws Exception {
  initContextForSimpleHbaseEventSerializer();
  ctx.put("batchSize", "1");
  ctx.put(HBaseSinkConfigurationConstants.CONFIG_SERIALIZER,
          "org.apache.flume.sink.hbase.MockSimpleHbaseEventSerializer");

  HBaseSink sink = new HBaseSink(conf);
  Configurables.configure(sink, ctx);
  // Reset the context to a higher batchSize
  ctx.put("batchSize", "100");
  Channel channel = new MemoryChannel();
  Configurables.configure(channel, new Context());
  sink.setChannel(channel);
  sink.start();
  Transaction tx = channel.getTransaction();
  tx.begin();
  Event e = EventBuilder.withBody(Bytes.toBytes(valBase + "-" + 0));
  channel.put(e);
  tx.commit();
  tx.close();
  try {
    MockSimpleHbaseEventSerializer.throwException = true;
    sink.process();
    Assert.fail("FlumeException expected from serilazer");
  } catch (FlumeException ex) {
    Assert.assertEquals("Exception for testing", ex.getMessage());
  }
  MockSimpleHbaseEventSerializer.throwException = false;
  sink.process();
  sink.stop();
  HTable table = new HTable(conf, tableName);
  byte[][] results = getResults(table, 1);
  byte[] out = results[0];
  Assert.assertArrayEquals(e.getBody(), out);
  out = results[1];
  Assert.assertArrayEquals(Longs.toByteArray(1), out);
}
项目:ditb    文件:TestHFileOutputFormat2.java   
/**
 * Test for {@link HFileOutputFormat2#configureDataBlockEncoding(HTableDescriptor, Configuration)}
 * and {@link HFileOutputFormat2#createFamilyDataBlockEncodingMap(Configuration)}.
 * Tests that the compression map is correctly serialized into
 * and deserialized from configuration
 *
 * @throws IOException
 */
@Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test
public void testSerializeDeserializeFamilyDataBlockEncodingMap() throws IOException {
  for (int numCfs = 0; numCfs <= 3; numCfs++) {
    Configuration conf = new Configuration(this.util.getConfiguration());
    Map<String, DataBlockEncoding> familyToDataBlockEncoding =
        getMockColumnFamiliesForDataBlockEncoding(numCfs);
    Table table = Mockito.mock(HTable.class);
    setupMockColumnFamiliesForDataBlockEncoding(table,
        familyToDataBlockEncoding);
    HTableDescriptor tableDescriptor = table.getTableDescriptor();
    HFileOutputFormat2.configureDataBlockEncoding(tableDescriptor, conf);

    // read back family specific data block encoding settings from the
    // configuration
    Map<byte[], DataBlockEncoding> retrievedFamilyToDataBlockEncodingMap =
        HFileOutputFormat2
        .createFamilyDataBlockEncodingMap(conf);

    // test that we have a value for all column families that matches with the
    // used mock values
    for (Entry<String, DataBlockEncoding> entry : familyToDataBlockEncoding.entrySet()) {
      assertEquals("DataBlockEncoding configuration incorrect for column family:"
          + entry.getKey(), entry.getValue(),
          retrievedFamilyToDataBlockEncodingMap.get(entry.getKey().getBytes()));
    }
  }
}
项目:Transwarp-Sample-Code    文件:udfCheck.java   
public static boolean evaluate(String rowkey) {
    try {
        hTable = new HTable(conf, "bi");
        Get get = new Get(Bytes.toBytes(rowkey));
        result = hTable.exists(get);
        return result;
    } catch (Exception e) {
        e.printStackTrace();
    }
    return false;
}
项目:QDrill    文件:HBaseGroupScan.java   
private void init() {
  logger.debug("Getting region locations");
  try {
    HTable table = new HTable(storagePluginConfig.getHBaseConf(), hbaseScanSpec.getTableName());
    this.hTableDesc = table.getTableDescriptor();
    NavigableMap<HRegionInfo, ServerName> regionsMap = table.getRegionLocations();
    statsCalculator = new TableStatsCalculator(table, hbaseScanSpec, storagePlugin.getContext().getConfig(), storagePluginConfig);

    boolean foundStartRegion = false;
    regionsToScan = new TreeMap<HRegionInfo, ServerName>();
    for (Entry<HRegionInfo, ServerName> mapEntry : regionsMap.entrySet()) {
      HRegionInfo regionInfo = mapEntry.getKey();
      if (!foundStartRegion && hbaseScanSpec.getStartRow() != null && hbaseScanSpec.getStartRow().length != 0 && !regionInfo.containsRow(hbaseScanSpec.getStartRow())) {
        continue;
      }
      foundStartRegion = true;
      regionsToScan.put(regionInfo, mapEntry.getValue());
      scanSizeInBytes += statsCalculator.getRegionSizeInBytes(regionInfo.getRegionName());
      if (hbaseScanSpec.getStopRow() != null && hbaseScanSpec.getStopRow().length != 0 && regionInfo.containsRow(hbaseScanSpec.getStopRow())) {
        break;
      }
    }

    table.close();
  } catch (IOException e) {
    throw new DrillRuntimeException("Error getting region info for table: " + hbaseScanSpec.getTableName(), e);
  }
  verifyColumns();
}
项目:ditb    文件:TestServerCustomProtocol.java   
@Test
public void testEmptyReturnType() throws Throwable {
  try (HTable table = new HTable(util.getConfiguration(), TEST_TABLE)) {
    Map<byte[],String> results = noop(table, ROW_A, ROW_C);
    assertEquals("Should have results from three regions", 3, results.size());
    // all results should be null
    for (Object v : results.values()) {
      assertNull(v);
    }
  }
}
项目:ditb    文件:TestLoadIncrementalHFiles.java   
private void runTest(String testName, HTableDescriptor htd, BloomType bloomType,
    boolean preCreateTable, byte[][] tableSplitKeys, byte[][][] hfileRanges) throws Exception {

  for (boolean managed : new boolean[] { true, false }) {
    Path dir = util.getDataTestDirOnTestFS(testName);
    FileSystem fs = util.getTestFileSystem();
    dir = dir.makeQualified(fs);
    Path familyDir = new Path(dir, Bytes.toString(FAMILY));

    int hfileIdx = 0;
    for (byte[][] range : hfileRanges) {
      byte[] from = range[0];
      byte[] to = range[1];
      HFileTestUtil.createHFile(util.getConfiguration(), fs, new Path(familyDir, "hfile_"
          + hfileIdx++), FAMILY, QUALIFIER, from, to, 1000);
    }
    int expectedRows = hfileIdx * 1000;

    if (preCreateTable) {
      util.getHBaseAdmin().createTable(htd, tableSplitKeys);
    }

    final TableName tableName = htd.getTableName();
    if (!util.getHBaseAdmin().tableExists(tableName)) {
      util.getHBaseAdmin().createTable(htd);
    }
    LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration());

    if (managed) {
      try (HTable table = new HTable(util.getConfiguration(), tableName)) {
        loader.doBulkLoad(dir, table);
        assertEquals(expectedRows, util.countRows(table));
      }
    } else {
      try (Connection conn = ConnectionFactory.createConnection(util.getConfiguration());
          HTable table = (HTable) conn.getTable(tableName)) {
        loader.doBulkLoad(dir, table);
      }
    }

    // verify staging folder has been cleaned up
    Path stagingBasePath = SecureBulkLoadUtil.getBaseStagingDir(util.getConfiguration());
    if (fs.exists(stagingBasePath)) {
      FileStatus[] files = fs.listStatus(stagingBasePath);
      for (FileStatus file : files) {
        assertTrue("Folder=" + file.getPath() + " is not cleaned up.",
            file.getPath().getName() != "DONOTERASE");
      }
    }

    util.deleteTable(tableName);
  }
}
项目:QDrill    文件:TestTableGenerator.java   
public static void generateHBaseDatasetCompositeKeyInt(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  int startVal = 0;
  int stopVal = 1000;
  int interval = 47;
  long counter = 0;
  for (int i = startVal; i < stopVal; i += interval, counter ++) {
    byte[] rowKey = ByteBuffer.allocate(12).putInt(i).array();

    for(int j = 0; j < 8; ++j) {
      rowKey[4 + j] = (byte)(counter >> (56 - (j * 8)));
    }

    Put p = new Put(rowKey);
    p.add(FAMILY_F, COLUMN_C, "dummy".getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();
}
项目:QDrill    文件:TestTableGenerator.java   
public static void generateHBaseDatasetDoubleOB(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  for (double i = 0.5; i <= 100.00; i += 0.75) {
      byte[] bytes = new byte[9];
      org.apache.hadoop.hbase.util.PositionedByteRange br =
              new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 9);
      org.apache.hadoop.hbase.util.OrderedBytes.encodeFloat64(br, i,
              org.apache.hadoop.hbase.util.Order.ASCENDING);
    Put p = new Put(bytes);
    p.add(FAMILY_F, COLUMN_C, String.format("value %03f", i).getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();

  admin.flush(tableName);
}
项目:ditb    文件:TestGetLastFlushedSequenceId.java   
@Test
public void test() throws IOException, InterruptedException {
  testUtil.getHBaseAdmin().createNamespace(
    NamespaceDescriptor.create(tableName.getNamespaceAsString()).build());
  HTable table = testUtil.createTable(tableName, families);
  table.put(new Put(Bytes.toBytes("k")).add(family, Bytes.toBytes("q"), Bytes.toBytes("v")));
  table.flushCommits();
  MiniHBaseCluster cluster = testUtil.getMiniHBaseCluster();
  List<JVMClusterUtil.RegionServerThread> rsts = cluster.getRegionServerThreads();
  Region region = null;
  for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) {
    HRegionServer hrs = rsts.get(i).getRegionServer();
    for (Region r : hrs.getOnlineRegions(tableName)) {
      region = r;
      break;
    }
  }
  assertNotNull(region);
  Thread.sleep(2000);
  RegionStoreSequenceIds ids =
      testUtil.getHBaseCluster().getMaster()
          .getLastSequenceId(region.getRegionInfo().getEncodedNameAsBytes());
  assertEquals(HConstants.NO_SEQNUM, ids.getLastFlushedSequenceId());
  // This will be the sequenceid just before that of the earliest edit in memstore.
  long storeSequenceId = ids.getStoreSequenceId(0).getSequenceId();
  assertTrue(storeSequenceId > 0);
  testUtil.getHBaseAdmin().flush(tableName);
  Thread.sleep(2000);
  ids =
      testUtil.getHBaseCluster().getMaster()
          .getLastSequenceId(region.getRegionInfo().getEncodedNameAsBytes());
  assertTrue(ids.getLastFlushedSequenceId() + " > " + storeSequenceId,
    ids.getLastFlushedSequenceId() > storeSequenceId);
  assertEquals(ids.getLastFlushedSequenceId(), ids.getStoreSequenceId(0).getSequenceId());
  table.close();
}
项目:QDrill    文件:TestTableGenerator.java   
public static void generateHBaseDatasetIntOB(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  for (int i = -49; i <= 100; i ++) {
    byte[] bytes = new byte[5];
    org.apache.hadoop.hbase.util.PositionedByteRange br =
            new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 5);
    org.apache.hadoop.hbase.util.OrderedBytes.encodeInt32(br, i,
            org.apache.hadoop.hbase.util.Order.ASCENDING);
    Put p = new Put(bytes);
    p.add(FAMILY_F, COLUMN_C, String.format("value %d", i).getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();

  admin.flush(tableName);
}
项目:QDrill    文件:TestTableGenerator.java   
public static void generateHBaseDatasetDoubleOBDesc(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  for (double i = 0.5; i <= 100.00; i += 0.75) {
      byte[] bytes = new byte[9];
      org.apache.hadoop.hbase.util.PositionedByteRange br =
              new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 9);
      org.apache.hadoop.hbase.util.OrderedBytes.encodeFloat64(br, i,
              org.apache.hadoop.hbase.util.Order.DESCENDING);
    Put p = new Put(bytes);
    p.add(FAMILY_F, COLUMN_C, String.format("value %03f", i).getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();

  admin.flush(tableName);
}
项目:QDrill    文件:TestTableGenerator.java   
public static void generateHBaseDatasetFloatOBDesc(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  for (float i = (float)0.5; i <= 100.00; i += 0.75) {
    byte[] bytes = new byte[5];
    org.apache.hadoop.hbase.util.PositionedByteRange br =
            new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 5);
    org.apache.hadoop.hbase.util.OrderedBytes.encodeFloat32(br, i,
            org.apache.hadoop.hbase.util.Order.DESCENDING);
    Put p = new Put(bytes);
    p.add(FAMILY_F, COLUMN_C, String.format("value %03f", i).getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();

  admin.flush(tableName);
}