Java 类org.apache.hadoop.hbase.codec.KeyValueCodecWithTags 实例源码

项目:ditb    文件:TestSecureLoadIncrementalHFiles.java   
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  // set the always on security provider
  UserProvider.setUserProviderForTesting(util.getConfiguration(),
    HadoopSecurityEnabledUserProviderForTesting.class);
  // setup configuration
  SecureTestUtil.enableSecurity(util.getConfiguration());
  util.getConfiguration().setInt(
      LoadIncrementalHFiles.MAX_FILES_PER_REGION_PER_FAMILY,
      MAX_FILES_PER_REGION_PER_FAMILY);
  // change default behavior so that tag values are returned with normal rpcs
  util.getConfiguration().set(HConstants.RPC_CODEC_CONF_KEY,
      KeyValueCodecWithTags.class.getCanonicalName());

  util.startMiniCluster();

  // Wait for the ACL table to become available
  util.waitTableEnabled(AccessControlLists.ACL_TABLE_NAME);

  setupNamespace();
}
项目:hbase    文件:TestSecureLoadIncrementalHFiles.java   
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  // set the always on security provider
  UserProvider.setUserProviderForTesting(util.getConfiguration(),
    HadoopSecurityEnabledUserProviderForTesting.class);
  // setup configuration
  SecureTestUtil.enableSecurity(util.getConfiguration());
  util.getConfiguration().setInt(LoadIncrementalHFiles.MAX_FILES_PER_REGION_PER_FAMILY,
    MAX_FILES_PER_REGION_PER_FAMILY);
  // change default behavior so that tag values are returned with normal rpcs
  util.getConfiguration().set(HConstants.RPC_CODEC_CONF_KEY,
    KeyValueCodecWithTags.class.getCanonicalName());

  util.startMiniCluster();

  // Wait for the ACL table to become available
  util.waitTableEnabled(AccessControlLists.ACL_TABLE_NAME);

  setupNamespace();
}
项目:ditb    文件:TestLoadIncrementalHFilesUseSecurityEndPoint.java   
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  util.getConfiguration().setInt(LoadIncrementalHFiles.MAX_FILES_PER_REGION_PER_FAMILY,
    MAX_FILES_PER_REGION_PER_FAMILY);
  util.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
    "org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint");
  // change default behavior so that tag values are returned with normal rpcs
  util.getConfiguration().set(HConstants.RPC_CODEC_CONF_KEY,
      KeyValueCodecWithTags.class.getCanonicalName());

  util.startMiniCluster();
  setupNamespace();
}
项目:ditb    文件:TestLoadIncrementalHFiles.java   
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  util.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,"");
  util.getConfiguration().setInt(
    LoadIncrementalHFiles.MAX_FILES_PER_REGION_PER_FAMILY,
    MAX_FILES_PER_REGION_PER_FAMILY);
  // change default behavior so that tag values are returned with normal rpcs
  util.getConfiguration().set(HConstants.RPC_CODEC_CONF_KEY,
      KeyValueCodecWithTags.class.getCanonicalName());
  util.startMiniCluster();

  setupNamespace();
}
项目:Gaffer    文件:MiniHBaseStore.java   
private Configuration setupConf() throws IOException {
    final Configuration conf = HBaseConfiguration.create();
    VisibilityTestUtil.enableVisiblityLabels(conf);
    conf.set(Superusers.SUPERUSER_CONF_KEY, ADMIN_USERNAME);
    conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MIN_FORMAT_VERSION_WITH_TAGS);
    conf.set(JTConfig.JT_IPC_ADDRESS, JTConfig.LOCAL_FRAMEWORK_NAME);
    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, CommonConfigurationKeysPublic.FS_DEFAULT_NAME_DEFAULT);
    conf.setClass(VisibilityUtils.VISIBILITY_LABEL_GENERATOR_CLASS, SimpleScanLabelGenerator.class,
            ScanLabelGenerator.class);
    conf.set(HConstants.REPLICATION_CODEC_CONF_KEY, KeyValueCodecWithTags.class.getName());
    return conf;
}
项目:hbase    文件:TestLoadIncrementalHFiles.java   
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  util.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, "");
  util.getConfiguration().setInt(LoadIncrementalHFiles.MAX_FILES_PER_REGION_PER_FAMILY,
    MAX_FILES_PER_REGION_PER_FAMILY);
  // change default behavior so that tag values are returned with normal rpcs
  util.getConfiguration().set(HConstants.RPC_CODEC_CONF_KEY,
    KeyValueCodecWithTags.class.getCanonicalName());
  util.startMiniCluster();

  setupNamespace();
}
项目:ditb    文件:WALCellCodec.java   
@Override
public Decoder getDecoder(InputStream is) {
  return (compression == null)
      ? new KeyValueCodecWithTags.KeyValueDecoder(is) : new CompressedKvDecoder(is, compression);
}
项目:ditb    文件:TestReplicationWithTags.java   
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  conf1.setInt("hfile.format.version", 3);
  conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
  conf1.setInt("replication.source.size.capacity", 10240);
  conf1.setLong("replication.source.sleepforretries", 100);
  conf1.setInt("hbase.regionserver.maxlogs", 10);
  conf1.setLong("hbase.master.logcleaner.ttl", 10);
  conf1.setInt("zookeeper.recovery.retry", 1);
  conf1.setInt("zookeeper.recovery.retry.intervalmill", 10);
  conf1.setBoolean("dfs.support.append", true);
  conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
  conf1.setInt("replication.stats.thread.period.seconds", 5);
  conf1.setBoolean("hbase.tests.use.shortcircuit.reads", false);
  conf1.setStrings(HConstants.REPLICATION_CODEC_CONF_KEY, KeyValueCodecWithTags.class.getName());
  conf1.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
      TestCoprocessorForTagsAtSource.class.getName());

  utility1 = new HBaseTestingUtility(conf1);
  utility1.startMiniZKCluster();
  MiniZooKeeperCluster miniZK = utility1.getZkCluster();
  // Have to reget conf1 in case zk cluster location different
  // than default
  conf1 = utility1.getConfiguration();
  replicationAdmin = new ReplicationAdmin(conf1);
  LOG.info("Setup first Zk");

  // Base conf2 on conf1 so it gets the right zk cluster.
  conf2 = HBaseConfiguration.create(conf1);
  conf2.setInt("hfile.format.version", 3);
  conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
  conf2.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
  conf2.setBoolean("dfs.support.append", true);
  conf2.setBoolean("hbase.tests.use.shortcircuit.reads", false);
  conf2.setStrings(HConstants.REPLICATION_CODEC_CONF_KEY, KeyValueCodecWithTags.class.getName());
  conf2.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
      TestCoprocessorForTagsAtSink.class.getName());

  utility2 = new HBaseTestingUtility(conf2);
  utility2.setZkCluster(miniZK);

  replicationAdmin.addPeer("2", utility2.getClusterKey());

  LOG.info("Setup second Zk");
  utility1.startMiniCluster(2);
  utility2.startMiniCluster(2);

  HTableDescriptor table = new HTableDescriptor(TABLE_NAME);
  HColumnDescriptor fam = new HColumnDescriptor(FAMILY);
  fam.setMaxVersions(3);
  fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
  table.addFamily(fam);
  try (Connection conn = ConnectionFactory.createConnection(conf1);
      Admin admin = conn.getAdmin()) {
    admin.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
  }
  try (Connection conn = ConnectionFactory.createConnection(conf2);
      Admin admin = conn.getAdmin()) {
    admin.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
  }
  htable1 = utility1.getConnection().getTable(TABLE_NAME);
  htable2 = utility2.getConnection().getTable(TABLE_NAME);
}
项目:pbase    文件:TestReplicationWithTags.java   
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  conf1.setInt("hfile.format.version", 3);
  conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
  conf1.setInt("replication.source.size.capacity", 10240);
  conf1.setLong("replication.source.sleepforretries", 100);
  conf1.setInt("hbase.regionserver.maxlogs", 10);
  conf1.setLong("hbase.master.logcleaner.ttl", 10);
  conf1.setInt("zookeeper.recovery.retry", 1);
  conf1.setInt("zookeeper.recovery.retry.intervalmill", 10);
  conf1.setBoolean("dfs.support.append", true);
  conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
  conf1.setInt("replication.stats.thread.period.seconds", 5);
  conf1.setBoolean("hbase.tests.use.shortcircuit.reads", false);
  conf1.setStrings(HConstants.REPLICATION_CODEC_CONF_KEY, KeyValueCodecWithTags.class.getName());
  conf1.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
      TestCoprocessorForTagsAtSource.class.getName());

  utility1 = new HBaseTestingUtility(conf1);
  utility1.startMiniZKCluster();
  MiniZooKeeperCluster miniZK = utility1.getZkCluster();
  // Have to reget conf1 in case zk cluster location different
  // than default
  conf1 = utility1.getConfiguration();
  replicationAdmin = new ReplicationAdmin(conf1);
  LOG.info("Setup first Zk");

  // Base conf2 on conf1 so it gets the right zk cluster.
  conf2 = HBaseConfiguration.create(conf1);
  conf2.setInt("hfile.format.version", 3);
  conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
  conf2.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
  conf2.setBoolean("dfs.support.append", true);
  conf2.setBoolean("hbase.tests.use.shortcircuit.reads", false);
  conf2.setStrings(HConstants.REPLICATION_CODEC_CONF_KEY, KeyValueCodecWithTags.class.getName());
  conf2.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
      TestCoprocessorForTagsAtSink.class.getName());

  utility2 = new HBaseTestingUtility(conf2);
  utility2.setZkCluster(miniZK);

  replicationAdmin.addPeer("2", utility2.getClusterKey());

  LOG.info("Setup second Zk");
  utility1.startMiniCluster(2);
  utility2.startMiniCluster(2);

  HTableDescriptor table = new HTableDescriptor(TABLE_NAME);
  HColumnDescriptor fam = new HColumnDescriptor(FAMILY);
  fam.setMaxVersions(3);
  fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
  table.addFamily(fam);
  try (Connection conn = ConnectionFactory.createConnection(conf1);
      Admin admin = conn.getAdmin()) {
    admin.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
  }
  try (Connection conn = ConnectionFactory.createConnection(conf2);
      Admin admin = conn.getAdmin()) {
    admin.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
  }
  htable1 = utility1.getConnection().getTable(TABLE_NAME);
  htable2 = utility2.getConnection().getTable(TABLE_NAME);
}
项目:hbase    文件:WALCellCodec.java   
@Override
public Decoder getDecoder(InputStream is) {
  return (compression == null)
      ? new KeyValueCodecWithTags.KeyValueDecoder(is) : new CompressedKvDecoder(is, compression);
}
项目:hbase    文件:TestReplicationWithTags.java   
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  conf1.setInt("hfile.format.version", 3);
  conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
  conf1.setInt("replication.source.size.capacity", 10240);
  conf1.setLong("replication.source.sleepforretries", 100);
  conf1.setInt("hbase.regionserver.maxlogs", 10);
  conf1.setLong("hbase.master.logcleaner.ttl", 10);
  conf1.setInt("zookeeper.recovery.retry", 1);
  conf1.setInt("zookeeper.recovery.retry.intervalmill", 10);
  conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
  conf1.setInt("replication.stats.thread.period.seconds", 5);
  conf1.setBoolean("hbase.tests.use.shortcircuit.reads", false);
  conf1.setStrings(HConstants.REPLICATION_CODEC_CONF_KEY, KeyValueCodecWithTags.class.getName());
  conf1.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
      TestCoprocessorForTagsAtSource.class.getName());

  utility1 = new HBaseTestingUtility(conf1);
  utility1.startMiniZKCluster();
  MiniZooKeeperCluster miniZK = utility1.getZkCluster();
  // Have to reget conf1 in case zk cluster location different
  // than default
  conf1 = utility1.getConfiguration();
  replicationAdmin = new ReplicationAdmin(conf1);
  LOG.info("Setup first Zk");

  // Base conf2 on conf1 so it gets the right zk cluster.
  conf2 = HBaseConfiguration.create(conf1);
  conf2.setInt("hfile.format.version", 3);
  conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
  conf2.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
  conf2.setBoolean("hbase.tests.use.shortcircuit.reads", false);
  conf2.setStrings(HConstants.REPLICATION_CODEC_CONF_KEY, KeyValueCodecWithTags.class.getName());
  conf2.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
          TestCoprocessorForTagsAtSink.class.getName());

  utility2 = new HBaseTestingUtility(conf2);
  utility2.setZkCluster(miniZK);

  LOG.info("Setup second Zk");
  utility1.startMiniCluster(2);
  utility2.startMiniCluster(2);

  ReplicationPeerConfig rpc = new ReplicationPeerConfig();
  rpc.setClusterKey(utility2.getClusterKey());
  replicationAdmin.addPeer("2", rpc, null);

  HTableDescriptor table = new HTableDescriptor(TABLE_NAME);
  HColumnDescriptor fam = new HColumnDescriptor(FAMILY);
  fam.setMaxVersions(3);
  fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
  table.addFamily(fam);
  try (Connection conn = ConnectionFactory.createConnection(conf1);
      Admin admin = conn.getAdmin()) {
    admin.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
  }
  try (Connection conn = ConnectionFactory.createConnection(conf2);
      Admin admin = conn.getAdmin()) {
    admin.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
  }
  htable1 = utility1.getConnection().getTable(TABLE_NAME);
  htable2 = utility2.getConnection().getTable(TABLE_NAME);
}
项目:hbase    文件:TestBufferedDataBlockEncoder.java   
@Test
public void testKVCodecWithTagsForDecodedCellsWithNoTags() throws Exception {
  KeyValue kv1 = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("1"),
      HConstants.LATEST_TIMESTAMP, Bytes.toBytes("1"));
  // kv1.getKey() return a copy of the Key bytes which starts from RK_length. Means from offsets,
  // we need to reduce the KL and VL parts.
  OnheapDecodedCell c1 = new OnheapDecodedCell(kv1.getKey(), kv1.getRowLength(),
      kv1.getFamilyOffset() - KeyValue.ROW_OFFSET, kv1.getFamilyLength(),
      kv1.getQualifierOffset() - KeyValue.ROW_OFFSET, kv1.getQualifierLength(),
      kv1.getTimestamp(), kv1.getTypeByte(), kv1.getValueArray(), kv1.getValueOffset(),
      kv1.getValueLength(), kv1.getSequenceId(), kv1.getTagsArray(), kv1.getTagsOffset(),
      kv1.getTagsLength());
  KeyValue kv2 = new KeyValue(Bytes.toBytes("r2"), Bytes.toBytes("f"), Bytes.toBytes("2"),
      HConstants.LATEST_TIMESTAMP, Bytes.toBytes("2"));
  OnheapDecodedCell c2 = new OnheapDecodedCell(kv2.getKey(), kv2.getRowLength(),
      kv2.getFamilyOffset() - KeyValue.ROW_OFFSET, kv2.getFamilyLength(),
      kv2.getQualifierOffset() - KeyValue.ROW_OFFSET, kv2.getQualifierLength(),
      kv2.getTimestamp(), kv2.getTypeByte(), kv2.getValueArray(), kv2.getValueOffset(),
      kv2.getValueLength(), kv2.getSequenceId(), kv2.getTagsArray(), kv2.getTagsOffset(),
      kv2.getTagsLength());
  KeyValue kv3 = new KeyValue(Bytes.toBytes("r3"), Bytes.toBytes("cf"), Bytes.toBytes("qual"),
      HConstants.LATEST_TIMESTAMP, Bytes.toBytes("3"));
  BufferedDataBlockEncoder.OffheapDecodedExtendedCell
      c3 = new BufferedDataBlockEncoder.OffheapDecodedExtendedCell(ByteBuffer.wrap(kv2.getKey()),
      kv2.getRowLength(), kv2.getFamilyOffset() - KeyValue.ROW_OFFSET, kv2.getFamilyLength(),
      kv2.getQualifierOffset() - KeyValue.ROW_OFFSET, kv2.getQualifierLength(),
      kv2.getTimestamp(), kv2.getTypeByte(), ByteBuffer.wrap(kv2.getValueArray()),
      kv2.getValueOffset(), kv2.getValueLength(), kv2.getSequenceId(),
      ByteBuffer.wrap(kv2.getTagsArray()), kv2.getTagsOffset(), kv2.getTagsLength());
  ByteArrayOutputStream os = new ByteArrayOutputStream();
  KeyValueCodecWithTags codec = new KeyValueCodecWithTags();
  Encoder encoder = codec.getEncoder(os);
  encoder.write(c1);
  encoder.write(c2);
  encoder.write(c3);
  ByteArrayInputStream is = new ByteArrayInputStream(os.toByteArray());
  Decoder decoder = codec.getDecoder(is);
  assertTrue(decoder.advance());
  assertTrue(CellUtil.equals(c1, decoder.current()));
  assertTrue(decoder.advance());
  assertTrue(CellUtil.equals(c2, decoder.current()));
  assertTrue(decoder.advance());
  assertTrue(CellUtil.equals(c3, decoder.current()));
  assertFalse(decoder.advance());
}
项目:PyroDB    文件:TestReplicationWithTags.java   
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  conf1.setInt("hfile.format.version", 3);
  conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
  conf1.setInt("replication.source.size.capacity", 10240);
  conf1.setLong("replication.source.sleepforretries", 100);
  conf1.setInt("hbase.regionserver.maxlogs", 10);
  conf1.setLong("hbase.master.logcleaner.ttl", 10);
  conf1.setInt("zookeeper.recovery.retry", 1);
  conf1.setInt("zookeeper.recovery.retry.intervalmill", 10);
  conf1.setBoolean("dfs.support.append", true);
  conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
  conf1.setInt("replication.stats.thread.period.seconds", 5);
  conf1.setBoolean("hbase.tests.use.shortcircuit.reads", false);
  conf1.setStrings(HConstants.REPLICATION_CODEC_CONF_KEY, KeyValueCodecWithTags.class.getName());
  conf1.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
      TestCoprocessorForTagsAtSource.class.getName());

  utility1 = new HBaseTestingUtility(conf1);
  utility1.startMiniZKCluster();
  MiniZooKeeperCluster miniZK = utility1.getZkCluster();
  // Have to reget conf1 in case zk cluster location different
  // than default
  conf1 = utility1.getConfiguration();
  replicationAdmin = new ReplicationAdmin(conf1);
  LOG.info("Setup first Zk");

  // Base conf2 on conf1 so it gets the right zk cluster.
  conf2 = HBaseConfiguration.create(conf1);
  conf2.setInt("hfile.format.version", 3);
  conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
  conf2.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
  conf2.setBoolean("dfs.support.append", true);
  conf2.setBoolean("hbase.tests.use.shortcircuit.reads", false);
  conf2.setStrings(HConstants.REPLICATION_CODEC_CONF_KEY, KeyValueCodecWithTags.class.getName());
  conf2.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
      TestCoprocessorForTagsAtSink.class.getName());

  utility2 = new HBaseTestingUtility(conf2);
  utility2.setZkCluster(miniZK);

  replicationAdmin.addPeer("2", utility2.getClusterKey());

  LOG.info("Setup second Zk");
  utility1.startMiniCluster(2);
  utility2.startMiniCluster(2);

  HTableDescriptor table = new HTableDescriptor(TableName.valueOf(TABLE_NAME));
  HColumnDescriptor fam = new HColumnDescriptor(FAMILY);
  fam.setMaxVersions(3);
  fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
  table.addFamily(fam);
  HBaseAdmin admin = null;
  try {
    admin = new HBaseAdmin(conf1);
    admin.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
  } finally {
    if (admin != null) {
      admin.close();
    }
  }
  try {
    admin = new HBaseAdmin(conf2);
    admin.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
  } finally {
    if(admin != null){
      admin.close();
    }
  }
  htable1 = new HTable(conf1, TABLE_NAME);
  htable1.setWriteBufferSize(1024);
  htable2 = new HTable(conf2, TABLE_NAME);
}