Java 类org.apache.hadoop.hbase.io.hfile.HFile 实例源码

项目:ditb    文件:VisibilityController.java   
@Override
public void start(CoprocessorEnvironment env) throws IOException {
  this.conf = env.getConfiguration();

  authorizationEnabled = isAuthorizationSupported(conf);
  if (!authorizationEnabled) {
    LOG.warn("The VisibilityController has been loaded with authorization checks disabled.");
  }

  if (HFile.getFormatVersion(conf) < HFile.MIN_FORMAT_VERSION_WITH_TAGS) {
    throw new RuntimeException("A minimum HFile version of " + HFile.MIN_FORMAT_VERSION_WITH_TAGS
      + " is required to persist visibility labels. Consider setting " + HFile.FORMAT_VERSION_KEY
      + " accordingly.");
  }

  if (env instanceof RegionServerCoprocessorEnvironment) {
    throw new RuntimeException("Visibility controller should not be configured as "
        + "'hbase.coprocessor.regionserver.classes'.");
  }
  // Do not create for master CPs
  if (!(env instanceof MasterCoprocessorEnvironment)) {
    visibilityLabelService = VisibilityLabelServiceManager.getInstance()
        .getVisibilityLabelService(this.conf);
  }
}
项目:ditb    文件:CompoundBloomFilter.java   
/**
 * De-serialization for compound Bloom filter metadata. Must be consistent
 * with what {@link CompoundBloomFilterWriter} does.
 *
 * @param meta serialized Bloom filter metadata without any magic blocks
 * @throws IOException
 */
public CompoundBloomFilter(DataInput meta, HFile.Reader reader)
    throws IOException {
  this.reader = reader;

  totalByteSize = meta.readLong();
  hashCount = meta.readInt();
  hashType = meta.readInt();
  totalKeyCount = meta.readLong();
  totalMaxKeys = meta.readLong();
  numChunks = meta.readInt();
  comparator = FixedFileTrailer.createComparator(
      Bytes.toString(Bytes.readByteArray(meta)));

  hash = Hash.getInstance(hashType);
  if (hash == null) {
    throw new IllegalArgumentException("Invalid hash type: " + hashType);
  }

  index = new HFileBlockIndex.BlockIndexReader(comparator, 1);
  index.readRootIndex(meta, numChunks);
}
项目:ditb    文件:BloomFilterFactory.java   
/**
 * Instantiates the correct Bloom filter class based on the version provided
 * in the meta block data.
 *
 * @param meta the byte array holding the Bloom filter's metadata, including
 *          version information
 * @param reader the {@link HFile} reader to use to lazily load Bloom filter
 *          blocks
 * @return an instance of the correct type of Bloom filter
 * @throws IllegalArgumentException
 */
public static BloomFilter
    createFromMeta(DataInput meta, HFile.Reader reader)
    throws IllegalArgumentException, IOException {
  int version = meta.readInt();
  switch (version) {
    case ByteBloomFilter.VERSION:
      // This is only possible in a version 1 HFile. We are ignoring the
      // passed comparator because raw byte comparators are always used
      // in version 1 Bloom filters.
      return new ByteBloomFilter(meta);

    case CompoundBloomFilterBase.VERSION:
      return new CompoundBloomFilter(meta, reader);

    default:
      throw new IllegalArgumentException(
        "Bad bloom filter format version " + version
      );
  }
}
项目:ditb    文件:BloomFilterFactory.java   
/**
 * Creates a new Delete Family Bloom filter at the time of
 * {@link org.apache.hadoop.hbase.regionserver.StoreFile} writing.
 * @param conf
 * @param cacheConf
 * @param maxKeys an estimate of the number of keys we expect to insert.
 *        Irrelevant if compound Bloom filters are enabled.
 * @param writer the HFile writer
 * @return the new Bloom filter, or null in case Bloom filters are disabled
 *         or when failed to create one.
 */
public static BloomFilterWriter createDeleteBloomAtWrite(Configuration conf,
    CacheConfig cacheConf, int maxKeys, HFile.Writer writer) {
  if (!isDeleteFamilyBloomEnabled(conf)) {
    LOG.info("Delete Bloom filters are disabled by configuration for "
        + writer.getPath()
        + (conf == null ? " (configuration is null)" : ""));
    return null;
  }

  float err = getErrorRate(conf);

  int maxFold = getMaxFold(conf);
  // In case of compound Bloom filters we ignore the maxKeys hint.
  CompoundBloomFilterWriter bloomWriter = new CompoundBloomFilterWriter(getBloomBlockSize(conf),
      err, Hash.getHashType(conf), maxFold, cacheConf.shouldCacheBloomsOnWrite(),
      KeyValue.RAW_COMPARATOR);
  writer.addInlineBlockWriter(bloomWriter);
  return bloomWriter;
}
项目:ditb    文件:HRegion.java   
@Override public long getOldestHfileTs(boolean majorCompactioOnly) throws IOException {
  long result = Long.MAX_VALUE;
  for (Store store : getStores()) {
    Collection<StoreFile> storeFiles = store.getStorefiles();
    if (storeFiles == null) continue;
    for (StoreFile file : storeFiles) {
      StoreFile.Reader sfReader = file.getReader();
      if (sfReader == null) continue;
      HFile.Reader reader = sfReader.getHFileReader();
      if (reader == null) continue;
      if (majorCompactioOnly) {
        byte[] val = reader.loadFileInfo().get(StoreFile.MAJOR_COMPACTION_KEY);
        if (val == null) continue;
        if (val == null || !Bytes.toBoolean(val)) {
          continue;
        }
      }
      result = Math.min(result, reader.getFileContext().getFileCreateTime());
    }
  }
  return result == Long.MAX_VALUE ? 0 : result;
}
项目:ditb    文件:TestAccessController.java   
private void createHFile(Path path,
    byte[] family, byte[] qualifier,
    byte[] startKey, byte[] endKey, int numRows) throws IOException {

  HFile.Writer writer = null;
  long now = System.currentTimeMillis();
  try {
    HFileContext context = new HFileContextBuilder().build();
    writer = HFile.getWriterFactory(conf, new CacheConfig(conf))
        .withPath(fs, path)
        .withFileContext(context)
        .create();
    // subtract 2 since numRows doesn't include boundary keys
    for (byte[] key : Bytes.iterateOnSplits(startKey, endKey, true, numRows-2)) {
      KeyValue kv = new KeyValue(key, family, qualifier, now, key);
      writer.append(kv);
    }
  } finally {
    if(writer != null)
      writer.close();
  }
}
项目:ditb    文件:HFilePerformanceEvaluation.java   
@Override
void setUp() throws Exception {

  HFileContextBuilder builder = new HFileContextBuilder()
      .withCompression(AbstractHFileWriter.compressionByName(codec))
      .withBlockSize(RFILE_BLOCKSIZE);

  if (cipher == "aes") {
    byte[] cipherKey = new byte[AES.KEY_LENGTH];
    new SecureRandom().nextBytes(cipherKey);
    builder.withEncryptionContext(Encryption.newContext(conf)
        .setCipher(Encryption.getCipher(conf, cipher))
        .setKey(cipherKey));
  } else if (!"none".equals(cipher)) {
    throw new IOException("Cipher " + cipher + " not supported.");
  }

  HFileContext hFileContext = builder.build();

  writer = HFile.getWriterFactoryNoCache(conf)
      .withPath(fs, mf)
      .withFileContext(hFileContext)
      .withComparator(new KeyValue.RawBytesComparator())
      .create();
}
项目:ditb    文件:TestRegionObserverInterface.java   
private static void createHFile(
    Configuration conf,
    FileSystem fs, Path path,
    byte[] family, byte[] qualifier) throws IOException {
  HFileContext context = new HFileContextBuilder().build();
  HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf))
      .withPath(fs, path)
      .withFileContext(context)
      .create();
  long now = System.currentTimeMillis();
  try {
    for (int i =1;i<=9;i++) {
      KeyValue kv = new KeyValue(Bytes.toBytes(i+""), family, qualifier, now, Bytes.toBytes(i+""));
      writer.append(kv);
    }
  } finally {
    writer.close();
  }
}
项目:ditb    文件:TestHRegionServerBulkLoad.java   
/**
 * Create an HFile with the given number of rows with a specified value.
 */
public static void createHFile(FileSystem fs, Path path, byte[] family,
    byte[] qualifier, byte[] value, int numRows) throws IOException {
  HFileContext context = new HFileContextBuilder().withBlockSize(BLOCKSIZE)
                          .withCompression(COMPRESSION)
                          .build();
  HFile.Writer writer = HFile
      .getWriterFactory(conf, new CacheConfig(conf))
      .withPath(fs, path)
      .withFileContext(context)
      .create();
  long now = System.currentTimeMillis();
  try {
    // subtract 2 since iterateOnSplits doesn't include boundary keys
    for (int i = 0; i < numRows; i++) {
      KeyValue kv = new KeyValue(rowkey(i), family, qualifier, now, value);
      writer.append(kv);
    }
    writer.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY, Bytes.toBytes(now));
  } finally {
    writer.close();
  }
}
项目:ditb    文件:TestBulkLoad.java   
private String createHFileForFamilies(byte[] family) throws IOException {
  HFile.WriterFactory hFileFactory = HFile.getWriterFactoryNoCache(conf);
  // TODO We need a way to do this without creating files
  File hFileLocation = testFolder.newFile();
  FSDataOutputStream out = new FSDataOutputStream(new FileOutputStream(hFileLocation));
  try {
    hFileFactory.withOutputStream(out);
    hFileFactory.withFileContext(new HFileContext());
    HFile.Writer writer = hFileFactory.create();
    try {
      writer.append(new KeyValue(CellUtil.createCell(randomBytes,
          family,
          randomBytes,
          0l,
          KeyValue.Type.Put.getCode(),
          randomBytes)));
    } finally {
      writer.close();
    }
  } finally {
    out.close();
  }
  return hFileLocation.getAbsoluteFile().getAbsolutePath();
}
项目:ditb    文件:TestHRegionReplayEvents.java   
private String createHFileForFamilies(Path testPath, byte[] family,
    byte[] valueBytes) throws IOException {
  HFile.WriterFactory hFileFactory = HFile.getWriterFactoryNoCache(TEST_UTIL.getConfiguration());
  // TODO We need a way to do this without creating files
  Path testFile = new Path(testPath, UUID.randomUUID().toString());
  FSDataOutputStream out = TEST_UTIL.getTestFileSystem().create(testFile);
  try {
    hFileFactory.withOutputStream(out);
    hFileFactory.withFileContext(new HFileContext());
    HFile.Writer writer = hFileFactory.create();
    try {
      writer.append(new KeyValue(CellUtil.createCell(valueBytes, family, valueBytes, 0l,
        KeyValue.Type.Put.getCode(), valueBytes)));
    } finally {
      writer.close();
    }
  } finally {
    out.close();
  }
  return testFile.toString();
}
项目:ditb    文件:TestStore.java   
/**
 * Verify that compression and data block encoding are respected by the
 * Store.createWriterInTmp() method, used on store flush.
 */
@Test
public void testCreateWriter() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  FileSystem fs = FileSystem.get(conf);

  HColumnDescriptor hcd = new HColumnDescriptor(family);
  hcd.setCompressionType(Compression.Algorithm.GZ);
  hcd.setDataBlockEncoding(DataBlockEncoding.DIFF);
  init(name.getMethodName(), conf, hcd);

  // Test createWriterInTmp()
  StoreFile.Writer writer = store.createWriterInTmp(4, hcd.getCompression(), false, true, false);
  Path path = writer.getPath();
  writer.append(new KeyValue(row, family, qf1, Bytes.toBytes(1)));
  writer.append(new KeyValue(row, family, qf2, Bytes.toBytes(2)));
  writer.append(new KeyValue(row2, family, qf1, Bytes.toBytes(3)));
  writer.append(new KeyValue(row2, family, qf2, Bytes.toBytes(4)));
  writer.close();

  // Verify that compression and encoding settings are respected
  HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf), conf);
  Assert.assertEquals(hcd.getCompressionType(), reader.getCompressionAlgorithm());
  Assert.assertEquals(hcd.getDataBlockEncoding(), reader.getDataBlockEncoding());
  reader.close();
}
项目:ditb    文件:TestEncryptionRandomKeying.java   
private static byte[] extractHFileKey(Path path) throws Exception {
  HFile.Reader reader = HFile.createReader(TEST_UTIL.getTestFileSystem(), path,
    new CacheConfig(conf), conf);
  try {
    reader.loadFileInfo();
    Encryption.Context cryptoContext = reader.getFileContext().getEncryptionContext();
    assertNotNull("Reader has a null crypto context", cryptoContext);
    Key key = cryptoContext.getKey();
    if (key == null) {
      return null;
    }
    return key.getEncoded();
  } finally {
    reader.close();
  }
}
项目:ditb    文件:IntegrationTestIngestWithEncryption.java   
@Override
public void setUpCluster() throws Exception {
  util = getTestingUtil(null);
  Configuration conf = util.getConfiguration();
  if (!util.isDistributedCluster()) {
    // Inject required configuration if we are not running in distributed mode
    conf.setInt(HFile.FORMAT_VERSION_KEY, 3);
    conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName());
    conf.set(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, "hbase");
    conf.setClass("hbase.regionserver.hlog.reader.impl", SecureProtobufLogReader.class,
      Reader.class);
    conf.setClass("hbase.regionserver.hlog.writer.impl", SecureProtobufLogWriter.class,
      Writer.class);
    conf.setBoolean(HConstants.ENABLE_WAL_ENCRYPTION, true);
  }
  // Check if the cluster configuration can support this test
  try {
    EncryptionTest.testEncryption(conf, "AES", null);
  } catch (Exception e) {
    LOG.warn("Encryption configuration test did not pass, skipping test");
    return;
  }
  super.setUpCluster();
  initialized = true;
}
项目:ditb    文件:IntegrationTestWithCellVisibilityLoadAndVerify.java   
@Override
public void setUpCluster() throws Exception {
  util = getTestingUtil(null);
  Configuration conf = util.getConfiguration();
  conf.setInt(HFile.FORMAT_VERSION_KEY, 3);
  conf.set("hbase.coprocessor.master.classes", VisibilityController.class.getName());
  conf.set("hbase.coprocessor.region.classes", VisibilityController.class.getName());
  conf.set("hbase.superuser", User.getCurrent().getName());
  conf.setBoolean("dfs.permissions", false);
  super.setUpCluster();
  String[] users = userNames.split(",");
  if (users.length != 2) {
    System.err.println(ERROR_STR);
    throw new IOException(ERROR_STR);
  }
  System.out.println(userNames + " "+users[0]+ " "+users[1]);
  USER1 = User.createUserForTesting(conf, users[0], new String[] {});
  USER2 = User.createUserForTesting(conf, users[1], new String[] {});
  addLabelsAndAuths();
}
项目:gemfirexd-oss    文件:HFileSortedOplog.java   
public HFileSortedOplogWriter() throws IOException {
      writer = HFile.getWriterFactory(hconf, hcache)
          .withPath(fs, path)
          .withBlockSize(sopConfig.getBlockSize())
          .withBytesPerChecksum(sopConfig.getBytesPerChecksum())
          .withChecksumType(HFileSortedOplogFactory.convertChecksum(sopConfig.getChecksum()))
//          .withComparator(sopConfig.getComparator())
          .withCompression(HFileSortedOplogFactory.convertCompression(sopConfig.getCompression()))
          .withDataBlockEncoder(HFileSortedOplogFactory.convertEncoding(sopConfig.getKeyEncoding()))
          .create();

      bfw = sopConfig.isBloomFilterEnabled() ?
//          BloomFilterFactory.createGeneralBloomAtWrite(hconf, hcache, BloomType.ROW,
//              0, writer, sopConfig.getComparator())
          BloomFilterFactory.createGeneralBloomAtWrite(hconf, hcache, BloomType.ROW,
              0, writer)
          : null;
    }
项目:gemfirexd-oss    文件:HFileSortedOplog.java   
public HFileSortedOplogWriter(int keys) throws IOException {
      try {
        int hfileBlockSize = Integer.getInteger(
            HoplogConfig.HFILE_BLOCK_SIZE_CONF, (1 << 16));

        Algorithm compress = Algorithm.valueOf(System.getProperty(HoplogConfig.COMPRESSION,
            HoplogConfig.COMPRESSION_DEFAULT));

//        ByteComparator bc = new ByteComparator();
        writer = HFile.getWriterFactory(conf, cacheConf)
            .withPath(fsProvider.getFS(), path)
            .withBlockSize(hfileBlockSize)
//            .withComparator(bc)
            .withCompression(compress)
            .create();
        bfw = BloomFilterFactory.createGeneralBloomAtWrite(conf, cacheConf, BloomType.ROW, keys,
            writer);

        logger.fine("Created hoplog writer with compression " + compress);
      } catch (IOException e) {
        logger.fine("IO Error while creating writer");
        throw e;
      }
    }
项目:LCIndex-HBase-0.94.16    文件:CompressionTest.java   
public static void doSmokeTest(FileSystem fs, Path path, String codec)
throws Exception {
  Configuration conf = HBaseConfiguration.create();
  HFile.Writer writer = HFile.getWriterFactoryNoCache(conf)
      .withPath(fs, path)
      .withCompression(codec)
      .create();
  writer.append(Bytes.toBytes("testkey"), Bytes.toBytes("testval"));
  writer.appendFileInfo(Bytes.toBytes("infokey"), Bytes.toBytes("infoval"));
  writer.close();

  HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf));
  reader.loadFileInfo();
  byte[] key = reader.getFirstKey();
  boolean rc = Bytes.toString(key).equals("testkey");
  reader.close();

  if (!rc) {
    throw new Exception("Read back incorrect result: " +
                        Bytes.toStringBinary(key));
  }
}
项目:LCIndex-HBase-0.94.16    文件:CompoundBloomFilter.java   
/**
 * De-serialization for compound Bloom filter metadata. Must be consistent
 * with what {@link CompoundBloomFilterWriter} does.
 *
 * @param meta serialized Bloom filter metadata without any magic blocks
 * @throws IOException
 */
public CompoundBloomFilter(DataInput meta, HFile.Reader reader)
    throws IOException {
  this.reader = reader;

  totalByteSize = meta.readLong();
  hashCount = meta.readInt();
  hashType = meta.readInt();
  totalKeyCount = meta.readLong();
  totalMaxKeys = meta.readLong();
  numChunks = meta.readInt();
  comparator = FixedFileTrailer.createComparator(
      Bytes.toString(Bytes.readByteArray(meta)));

  hash = Hash.getInstance(hashType);
  if (hash == null) {
    throw new IllegalArgumentException("Invalid hash type: " + hashType);
  }

  index = new HFileBlockIndex.BlockIndexReader(comparator, 1);
  index.readRootIndex(meta, numChunks);
}
项目:LCIndex-HBase-0.94.16    文件:BloomFilterFactory.java   
/**
 * Instantiates the correct Bloom filter class based on the version provided
 * in the meta block data.
 *
 * @param meta the byte array holding the Bloom filter's metadata, including
 *          version information
 * @param reader the {@link HFile} reader to use to lazily load Bloom filter
 *          blocks
 * @return an instance of the correct type of Bloom filter
 * @throws IllegalArgumentException
 */
public static BloomFilter
    createFromMeta(DataInput meta, HFile.Reader reader)
    throws IllegalArgumentException, IOException {
  int version = meta.readInt();
  switch (version) {
    case ByteBloomFilter.VERSION:
      // This is only possible in a version 1 HFile. We are ignoring the
      // passed comparator because raw byte comparators are always used
      // in version 1 Bloom filters.
      return new ByteBloomFilter(meta);

    case CompoundBloomFilterBase.VERSION:
      return new CompoundBloomFilter(meta, reader);

    default:
      throw new IllegalArgumentException(
        "Bad bloom filter format version " + version
      );
  }
}
项目:HIndex    文件:TestAccessController.java   
private void createHFile(Path path,
    byte[] family, byte[] qualifier,
    byte[] startKey, byte[] endKey, int numRows) throws IOException {

  HFile.Writer writer = null;
  long now = System.currentTimeMillis();
  try {
    HFileContext context = new HFileContextBuilder().build();
    writer = HFile.getWriterFactory(conf, new CacheConfig(conf))
        .withPath(fs, path)
        .withFileContext(context)
        .create();
    // subtract 2 since numRows doesn't include boundary keys
    for (byte[] key : Bytes.iterateOnSplits(startKey, endKey, true, numRows-2)) {
      KeyValue kv = new KeyValue(key, family, qualifier, now, key);
      writer.append(kv);
    }
  } finally {
    if(writer != null)
      writer.close();
  }
}
项目:HIndex    文件:TestHRegionServerBulkLoad.java   
/**
 * Create an HFile with the given number of rows with a specified value.
 */
public static void createHFile(FileSystem fs, Path path, byte[] family,
    byte[] qualifier, byte[] value, int numRows) throws IOException {
  HFileContext context = new HFileContextBuilder().withBlockSize(BLOCKSIZE)
                          .withCompression(COMPRESSION)
                          .build();
  HFile.Writer writer = HFile
      .getWriterFactory(conf, new CacheConfig(conf))
      .withPath(fs, path)
      .withFileContext(context)
      .create();
  long now = System.currentTimeMillis();
  try {
    // subtract 2 since iterateOnSplits doesn't include boundary keys
    for (int i = 0; i < numRows; i++) {
      KeyValue kv = new KeyValue(rowkey(i), family, qualifier, now, value);
      writer.append(kv);
    }
  } finally {
    writer.close();
  }
}
项目:HIndex    文件:HFileTestUtil.java   
/**
 * Create an HFile with the given number of rows between a given
 * start key and end key.
 */
public static void createHFile(
    Configuration configuration,
    FileSystem fs, Path path,
    byte[] family, byte[] qualifier,
    byte[] startKey, byte[] endKey, int numRows) throws IOException
{
  HFileContext meta = new HFileContextBuilder().build();
  HFile.Writer writer = HFile.getWriterFactory(configuration, new CacheConfig(configuration))
      .withPath(fs, path)
      .withFileContext(meta)
      .create();
  long now = System.currentTimeMillis();
  try {
    // subtract 2 since iterateOnSplits doesn't include boundary keys
    for (byte[] key : Bytes.iterateOnSplits(startKey, endKey, numRows-2)) {
      KeyValue kv = new KeyValue(key, family, qualifier, now, key);
      writer.append(kv);
    }
  } finally {
    writer.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY,
        Bytes.toBytes(System.currentTimeMillis()));
    writer.close();
  }
}
项目:LCIndex-HBase-0.94.16    文件:TestLoadIncrementalHFiles.java   
/**
 * Create an HFile with the given number of rows between a given
 * start key and end key.
 * TODO put me in an HFileTestUtil or something?
 */
static void createHFile(
    Configuration conf,
    FileSystem fs, Path path,
    byte[] family, byte[] qualifier,
    byte[] startKey, byte[] endKey, int numRows) throws IOException
{
  HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf))
      .withPath(fs, path)
      .withBlockSize(BLOCKSIZE)
      .withCompression(COMPRESSION)
      .withComparator(KeyValue.KEY_COMPARATOR)
      .create();
  long now = System.currentTimeMillis();
  try {
    // subtract 2 since iterateOnSplits doesn't include boundary keys
    for (byte[] key : Bytes.iterateOnSplits(startKey, endKey, numRows-2)) {
      KeyValue kv = new KeyValue(key, family, qualifier, now, key);
      writer.append(kv);
    }
  } finally {
    writer.close();
  }
}
项目:HIndex    文件:TestEncryptionRandomKeying.java   
private static byte[] extractHFileKey(Path path) throws Exception {
  HFile.Reader reader = HFile.createReader(TEST_UTIL.getTestFileSystem(), path,
    new CacheConfig(conf), conf);
  try {
    reader.loadFileInfo();
    Encryption.Context cryptoContext = reader.getFileContext().getEncryptionContext();
    assertNotNull("Reader has a null crypto context", cryptoContext);
    Key key = cryptoContext.getKey();
    if (key == null) {
      return null;
    }
    return key.getEncoded();
  } finally {
    reader.close();
  }
}
项目:gemfirexd-oss    文件:HFileSortedOplog.java   
public HFileSortedOplogWriter() throws IOException {
      writer = HFile.getWriterFactory(hconf, hcache)
          .withPath(fs, path)
          .withBlockSize(sopConfig.getBlockSize())
          .withBytesPerChecksum(sopConfig.getBytesPerChecksum())
          .withChecksumType(HFileSortedOplogFactory.convertChecksum(sopConfig.getChecksum()))
//          .withComparator(sopConfig.getComparator())
          .withCompression(HFileSortedOplogFactory.convertCompression(sopConfig.getCompression()))
          .withDataBlockEncoder(HFileSortedOplogFactory.convertEncoding(sopConfig.getKeyEncoding()))
          .create();

      bfw = sopConfig.isBloomFilterEnabled() ?
//          BloomFilterFactory.createGeneralBloomAtWrite(hconf, hcache, BloomType.ROW,
//              0, writer, sopConfig.getComparator())
          BloomFilterFactory.createGeneralBloomAtWrite(hconf, hcache, BloomType.ROW,
              0, writer)
          : null;
    }
项目:HIndex    文件:SecureTestUtil.java   
public static void enableSecurity(Configuration conf) throws IOException {
  conf.set("hadoop.security.authorization", "false");
  conf.set("hadoop.security.authentication", "simple");
  conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, AccessController.class.getName());
  conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, AccessController.class.getName() +
    "," + SecureBulkLoadEndpoint.class.getName());
  conf.set(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, AccessController.class.getName());
  // The secure minicluster creates separate service principals based on the
  // current user's name, one for each slave. We need to add all of these to
  // the superuser list or security won't function properly. We expect the
  // HBase service account(s) to have superuser privilege.
  String currentUser = User.getCurrent().getName();
  StringBuffer sb = new StringBuffer();
  sb.append("admin,");
  sb.append(currentUser);
  // Assumes we won't ever have a minicluster with more than 5 slaves
  for (int i = 0; i < 5; i++) {
    sb.append(',');
    sb.append(currentUser); sb.append(".hfs."); sb.append(i);
  }
  conf.set("hbase.superuser", sb.toString());
  // Need HFile V3 for tags for security features
  conf.setInt(HFile.FORMAT_VERSION_KEY, 3);
}
项目:Megh    文件:HadoopFilePerformanceTest.java   
private void readHFileSeq(Path file, Compression.Algorithm compression) throws Exception
{

  CacheConfig cacheConf = new CacheConfig(conf);
  HFile.Reader reader = HFile.createReader(hdfs, file, cacheConf, conf);
  HFileScanner scanner = reader.getScanner(true, true, false);

  scanner.seekTo();

  @SuppressWarnings("unused")
  KeyValue kv = null;
  while (scanner.next()) {
    kv = scanner.getKeyValue();
    //logger.debug("key: {} value: {}", new String (kv.getKey()), new String (kv.getValue()));
  }

}
项目:Megh    文件:HadoopFilePerformanceTest.java   
private void readHFileRandom(Path file, Compression.Algorithm compression) throws Exception
{
  CacheConfig cacheConf = new CacheConfig(conf);
  HFile.Reader reader = HFile.createReader(hdfs, file, cacheConf, conf);
  HFileScanner scanner = reader.getScanner(true, true, false);

  @SuppressWarnings("unused")
  KeyValue kv = null;
  scanner.seekTo();
  Random random = new Random();
  for (int i = 0; i < testSize; i++) {
    scanner.seekTo();
    scanner.seekTo(getKey(random.nextInt(testSize)).getBytes());
    kv = scanner.getKeyValue();
    //logger.debug("key: {} value: {}", new String (kv.getKey()), new String (kv.getValue()));
  }
}
项目:pbase    文件:VisibilityController.java   
@Override
public void start(CoprocessorEnvironment env) throws IOException {
  this.conf = env.getConfiguration();
  if (HFile.getFormatVersion(conf) < HFile.MIN_FORMAT_VERSION_WITH_TAGS) {
    throw new RuntimeException("A minimum HFile version of " + HFile.MIN_FORMAT_VERSION_WITH_TAGS
      + " is required to persist visibility labels. Consider setting " + HFile.FORMAT_VERSION_KEY
      + " accordingly.");
  }

  if (env instanceof RegionServerCoprocessorEnvironment) {
    throw new RuntimeException("Visibility controller should not be configured as "
        + "'hbase.coprocessor.regionserver.classes'.");
  }
  // Do not create for master CPs
  if (!(env instanceof MasterCoprocessorEnvironment)) {
    visibilityLabelService = VisibilityLabelServiceManager.getInstance()
        .getVisibilityLabelService(this.conf);
  }
  Pair<List<String>, List<String>> superUsersAndGroups =
      VisibilityUtils.getSystemAndSuperUsers(this.conf);
  this.superUsers = superUsersAndGroups.getFirst();
  this.superGroups = superUsersAndGroups.getSecond();
}
项目:HIndex    文件:IntegrationTestIngestWithEncryption.java   
@Override
public void setUpCluster() throws Exception {
  util = getTestingUtil(null);
  Configuration conf = util.getConfiguration();
  conf.setInt(HFile.FORMAT_VERSION_KEY, 3);
  if (!util.isDistributedCluster()) {
    // Inject the test key provider and WAL alternative if running on a
    // localhost cluster; otherwise, whether or not the schema change below
    // takes effect depends on the distributed cluster site configuration.
    conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName());
    conf.set(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, "hbase");
    conf.setClass("hbase.regionserver.hlog.reader.impl", SecureProtobufLogReader.class,
      HLog.Reader.class);
    conf.setClass("hbase.regionserver.hlog.writer.impl", SecureProtobufLogWriter.class,
      HLog.Writer.class);
    conf.setBoolean(HConstants.ENABLE_WAL_ENCRYPTION, true);
  }
  super.setUpCluster();
}
项目:pbase    文件:CompoundBloomFilter.java   
/**
 * De-serialization for compound Bloom filter metadata. Must be consistent
 * with what {@link CompoundBloomFilterWriter} does.
 *
 * @param meta serialized Bloom filter metadata without any magic blocks
 * @throws IOException
 */
public CompoundBloomFilter(DataInput meta, HFile.Reader reader)
    throws IOException {
  this.reader = reader;

  totalByteSize = meta.readLong();
  hashCount = meta.readInt();
  hashType = meta.readInt();
  totalKeyCount = meta.readLong();
  totalMaxKeys = meta.readLong();
  numChunks = meta.readInt();
  comparator = FixedFileTrailer.createComparator(
      Bytes.toString(Bytes.readByteArray(meta)));

  hash = Hash.getInstance(hashType);
  if (hash == null) {
    throw new IllegalArgumentException("Invalid hash type: " + hashType);
  }

  index = new HFileBlockIndex.BlockIndexReader(comparator, 1);
  index.readRootIndex(meta, numChunks);
}
项目:HIndex    文件:BloomFilterFactory.java   
/**
 * Instantiates the correct Bloom filter class based on the version provided
 * in the meta block data.
 *
 * @param meta the byte array holding the Bloom filter's metadata, including
 *          version information
 * @param reader the {@link HFile} reader to use to lazily load Bloom filter
 *          blocks
 * @return an instance of the correct type of Bloom filter
 * @throws IllegalArgumentException
 */
public static BloomFilter
    createFromMeta(DataInput meta, HFile.Reader reader)
    throws IllegalArgumentException, IOException {
  int version = meta.readInt();
  switch (version) {
    case ByteBloomFilter.VERSION:
      // This is only possible in a version 1 HFile. We are ignoring the
      // passed comparator because raw byte comparators are always used
      // in version 1 Bloom filters.
      return new ByteBloomFilter(meta);

    case CompoundBloomFilterBase.VERSION:
      return new CompoundBloomFilter(meta, reader);

    default:
      throw new IllegalArgumentException(
        "Bad bloom filter format version " + version
      );
  }
}
项目:pbase    文件:BloomFilterFactory.java   
/**
 * Creates a new Delete Family Bloom filter at the time of
 * {@link org.apache.hadoop.hbase.regionserver.StoreFile} writing.
 * @param conf
 * @param cacheConf
 * @param maxKeys an estimate of the number of keys we expect to insert.
 *        Irrelevant if compound Bloom filters are enabled.
 * @param writer the HFile writer
 * @return the new Bloom filter, or null in case Bloom filters are disabled
 *         or when failed to create one.
 */
public static BloomFilterWriter createDeleteBloomAtWrite(Configuration conf,
    CacheConfig cacheConf, int maxKeys, HFile.Writer writer) {
  if (!isDeleteFamilyBloomEnabled(conf)) {
    LOG.info("Delete Bloom filters are disabled by configuration for "
        + writer.getPath()
        + (conf == null ? " (configuration is null)" : ""));
    return null;
  }

  float err = getErrorRate(conf);

  int maxFold = getMaxFold(conf);
  // In case of compound Bloom filters we ignore the maxKeys hint.
  CompoundBloomFilterWriter bloomWriter = new CompoundBloomFilterWriter(getBloomBlockSize(conf),
      err, Hash.getHashType(conf), maxFold, cacheConf.shouldCacheBloomsOnWrite(),
      KeyValue.RAW_COMPARATOR);
  writer.addInlineBlockWriter(bloomWriter);
  return bloomWriter;
}
项目:pbase    文件:HStore.java   
private HFileContext createFileContext(Compression.Algorithm compression,
                                       boolean includeMVCCReadpoint, boolean includesTag, Encryption.Context cryptoContext) {
    if (compression == null) {
        compression = HFile.DEFAULT_COMPRESSION_ALGORITHM;
    }
    HFileContext hFileContext = new HFileContextBuilder()
            .withIncludesMvcc(includeMVCCReadpoint)
            .withIncludesTags(includesTag)
            .withCompression(compression)
            .withCompressTags(family.isCompressTags())
            .withChecksumType(checksumType)
            .withBytesPerCheckSum(bytesPerChecksum)
            .withBlockSize(blocksize)
            .withHBaseCheckSum(true)
            .withDataBlockEncoding(family.getDataBlockEncoding())
            .withEncryptionContext(cryptoContext)
            .build();
    return hFileContext;
}
项目:pbase    文件:SecureTestUtil.java   
public static void enableSecurity(Configuration conf) throws IOException {
  conf.set("hadoop.security.authorization", "false");
  conf.set("hadoop.security.authentication", "simple");
  conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, AccessController.class.getName());
  conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, AccessController.class.getName() +
    "," + SecureBulkLoadEndpoint.class.getName());
  conf.set(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, AccessController.class.getName());
  // The secure minicluster creates separate service principals based on the
  // current user's name, one for each slave. We need to add all of these to
  // the superuser list or security won't function properly. We expect the
  // HBase service account(s) to have superuser privilege.
  String currentUser = User.getCurrent().getName();
  StringBuffer sb = new StringBuffer();
  sb.append("admin,");
  sb.append(currentUser);
  // Assumes we won't ever have a minicluster with more than 5 slaves
  for (int i = 0; i < 5; i++) {
    sb.append(',');
    sb.append(currentUser); sb.append(".hfs."); sb.append(i);
  }
  conf.set("hbase.superuser", sb.toString());
  // Need HFile V3 for tags for security features
  conf.setInt(HFile.FORMAT_VERSION_KEY, 3);
}
项目:pbase    文件:TestAccessController.java   
private void createHFile(Path path,
    byte[] family, byte[] qualifier,
    byte[] startKey, byte[] endKey, int numRows) throws IOException {

  HFile.Writer writer = null;
  long now = System.currentTimeMillis();
  try {
    HFileContext context = new HFileContextBuilder().build();
    writer = HFile.getWriterFactory(conf, new CacheConfig(conf))
        .withPath(fs, path)
        .withFileContext(context)
        .create();
    // subtract 2 since numRows doesn't include boundary keys
    for (byte[] key : Bytes.iterateOnSplits(startKey, endKey, true, numRows-2)) {
      KeyValue kv = new KeyValue(key, family, qualifier, now, key);
      writer.append(kv);
    }
  } finally {
    if(writer != null)
      writer.close();
  }
}
项目:pbase    文件:TestRegionObserverInterface.java   
private static void createHFile(
    Configuration conf,
    FileSystem fs, Path path,
    byte[] family, byte[] qualifier) throws IOException {
  HFileContext context = new HFileContextBuilder().build();
  HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf))
      .withPath(fs, path)
      .withFileContext(context)
      .create();
  long now = System.currentTimeMillis();
  try {
    for (int i =1;i<=9;i++) {
      KeyValue kv = new KeyValue(Bytes.toBytes(i+""), family, qualifier, now, Bytes.toBytes(i+""));
      writer.append(kv);
    }
  } finally {
    writer.close();
  }
}
项目:pbase    文件:HFileTestUtil.java   
/**
 * Create an HFile with the given number of rows between a given
 * start key and end key.
 */
public static void createHFile(
    Configuration configuration,
    FileSystem fs, Path path,
    byte[] family, byte[] qualifier,
    byte[] startKey, byte[] endKey, int numRows) throws IOException
{
  HFileContext meta = new HFileContextBuilder().build();
  HFile.Writer writer = HFile.getWriterFactory(configuration, new CacheConfig(configuration))
      .withPath(fs, path)
      .withFileContext(meta)
      .create();
  long now = System.currentTimeMillis();
  try {
    // subtract 2 since iterateOnSplits doesn't include boundary keys
    for (byte[] key : Bytes.iterateOnSplits(startKey, endKey, numRows-2)) {
      KeyValue kv = new KeyValue(key, family, qualifier, now, key);
      writer.append(kv);
    }
  } finally {
    writer.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY,
        Bytes.toBytes(System.currentTimeMillis()));
    writer.close();
  }
}
项目:HIndex    文件:HStore.java   
private HFileContext createFileContext(Compression.Algorithm compression,
    boolean includeMVCCReadpoint, boolean includesTag, Encryption.Context cryptoContext) {
  if (compression == null) {
    compression = HFile.DEFAULT_COMPRESSION_ALGORITHM;
  }
  HFileContext hFileContext = new HFileContextBuilder()
                              .withIncludesMvcc(includeMVCCReadpoint)
                              .withIncludesTags(includesTag)
                              .withCompression(compression)
                              .withCompressTags(family.shouldCompressTags())
                              .withChecksumType(checksumType)
                              .withBytesPerCheckSum(bytesPerChecksum)
                              .withBlockSize(blocksize)
                              .withHBaseCheckSum(true)
                              .withDataBlockEncoding(family.getDataBlockEncoding())
                              .withEncryptionContext(cryptoContext)
                              .build();
  return hFileContext;
}