Java 类org.apache.hadoop.hbase.regionserver.BloomType 实例源码

项目:ditb    文件:VisibilityController.java   
/********************************* Master related hooks **********************************/

  @Override
  public void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
    // Need to create the new system table for labels here
    MasterServices master = ctx.getEnvironment().getMasterServices();
    if (!MetaTableAccessor.tableExists(master.getConnection(), LABELS_TABLE_NAME)) {
      HTableDescriptor labelsTable = new HTableDescriptor(LABELS_TABLE_NAME);
      HColumnDescriptor labelsColumn = new HColumnDescriptor(LABELS_TABLE_FAMILY);
      labelsColumn.setBloomFilterType(BloomType.NONE);
      labelsColumn.setBlockCacheEnabled(false); // We will cache all the labels. No need of normal
                                                 // table block cache.
      labelsTable.addFamily(labelsColumn);
      // Let the "labels" table having only one region always. We are not expecting too many labels in
      // the system.
      labelsTable.setValue(HTableDescriptor.SPLIT_POLICY,
          DisabledRegionSplitPolicy.class.getName());
      labelsTable.setValue(Bytes.toBytes(HConstants.DISALLOW_WRITES_IN_RECOVERING),
          Bytes.toBytes(true));
      master.createTable(labelsTable, null, HConstants.NO_NONCE, HConstants.NO_NONCE);
    }
  }
项目:ditb    文件:ThriftUtilities.java   
/**
 * This utility method creates a new Hbase HColumnDescriptor object based on a
 * Thrift ColumnDescriptor "struct".
 *
 * @param in
 *          Thrift ColumnDescriptor object
 * @return HColumnDescriptor
 * @throws IllegalArgument
 */
static public HColumnDescriptor colDescFromThrift(ColumnDescriptor in)
    throws IllegalArgument {
  Compression.Algorithm comp =
    Compression.getCompressionAlgorithmByName(in.compression.toLowerCase());
  BloomType bt =
    BloomType.valueOf(in.bloomFilterType);

  if (in.name == null || !in.name.hasRemaining()) {
    throw new IllegalArgument("column name is empty");
  }
  byte [] parsedName = KeyValue.parseColumn(Bytes.getBytes(in.name))[0];
  HColumnDescriptor col = new HColumnDescriptor(parsedName)
      .setMaxVersions(in.maxVersions)
      .setCompressionType(comp)
      .setInMemory(in.inMemory)
      .setBlockCacheEnabled(in.blockCacheEnabled)
      .setTimeToLive(in.timeToLive > 0 ? in.timeToLive : Integer.MAX_VALUE)
      .setBloomFilterType(bt);
  return col;
}
项目:ditb    文件:AccessControlLists.java   
/**
 * Create the ACL table
 * @param master
 * @throws IOException
 */
static void createACLTable(MasterServices master) throws IOException {
  master.createTable(new HTableDescriptor(ACL_TABLE_NAME)
    .addFamily(new HColumnDescriptor(ACL_LIST_FAMILY)
      .setMaxVersions(1)
      .setInMemory(true)
      .setBlockCacheEnabled(true)
      .setBlocksize(8 * 1024)
      .setBloomFilterType(BloomType.NONE)
      .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
      // Set cache data blocks in L1 if more than one cache tier deployed; e.g. this will
      // be the case if we are using CombinedBlockCache (Bucket Cache).
      .setCacheDataInL1(true)),
  null,
  HConstants.NO_NONCE,
  HConstants.NO_NONCE);
}
项目:ditb    文件:LMDIndexDirectStoreFileScanner.java   
private List<byte[]> initRowKeyList(FileSystem fileSystem, CacheConfig cacheConf,
    Configuration conf, TreeMap<byte[], TreeSet<byte[]>> indexFamilyMap,
    ScanRange.ScanRangeList rangeList) throws IOException {
  // init
  StoreFile bucketStoreFile =
      new StoreFile(fileSystem, LMDIndexParameters.getTmpBucketFilePath(file.getPath()), conf,
          cacheConf, BloomType.NONE);
  StoreFile secondaryStoreFile =
      new StoreFile(fileSystem, LMDIndexParameters.getTmpSecondaryFilePath(file.getPath()), conf,
          cacheConf, BloomType.NONE);
  StoreFileScanner bucketScanner = getStoreFileScanner(bucketStoreFile);
  StoreFileScanner secondaryScanner = getStoreFileScanner(secondaryStoreFile);
  // get hit buckets
  MDRange[] ranges = getRanges(indexFamilyMap, rangeList);
  List<LMDBucket> bucketList = getBucketRanges(bucketScanner, ranges);
  // scan rowkeys based on the buckets
  List<byte[]> rowkeyList = getRawRowkeyList(secondaryScanner, bucketList, ranges);
  // deinit
  bucketScanner.close();
  bucketStoreFile.closeReader(true);
  secondaryScanner.close();
  secondaryStoreFile.closeReader(true);
  return rowkeyList;
}
项目:ditb    文件:HBaseTestingUtility.java   
/**
 * Create a table.
 * @param htd
 * @param families
 * @param splitKeys
 * @param c Configuration to use
 * @return An HTable instance for the created table.
 * @throws IOException
 */
public HTable createTable(HTableDescriptor htd, byte[][] families, byte[][] splitKeys,
    Configuration c) throws IOException {
  for (byte[] family : families) {
    HColumnDescriptor hcd = new HColumnDescriptor(family);
    // Disable blooms (they are on by default as of 0.95) but we disable them here because
    // tests have hard coded counts of what to expect in block cache, etc., and blooms being
    // on is interfering.
    hcd.setBloomFilterType(BloomType.NONE);
    htd.addFamily(hcd);
  }
  getHBaseAdmin().createTable(htd, splitKeys);
  // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
  // assigned
  waitUntilAllRegionsAssigned(htd.getTableName());
  return (HTable) getConnection().getTable(htd.getTableName());
}
项目:ditb    文件:HBaseTestingUtility.java   
/**
 * Create a table.
 * @param tableName
 * @param families
 * @param c Configuration to use
 * @return An HTable instance for the created table.
 * @throws IOException
 */
public HTable createTable(byte[] tableName, byte[][] families,
    final Configuration c)
throws IOException {
  HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
  for(byte[] family : families) {
    HColumnDescriptor hcd = new HColumnDescriptor(family);
    // Disable blooms (they are on by default as of 0.95) but we disable them here because
    // tests have hard coded counts of what to expect in block cache, etc., and blooms being
    // on is interfering.
    hcd.setBloomFilterType(BloomType.NONE);
    desc.addFamily(hcd);
  }
  getHBaseAdmin().createTable(desc);
  return new HTable(c, desc.getTableName());
}
项目:ditb    文件:TestLoadIncrementalHFiles.java   
@Test
public void testSplitALot() throws Exception {
  runTest("testSplitALot", BloomType.NONE,
    new byte[][] {
      Bytes.toBytes("aaaa"), Bytes.toBytes("bbb"),
      Bytes.toBytes("ccc"), Bytes.toBytes("ddd"),
      Bytes.toBytes("eee"), Bytes.toBytes("fff"),
      Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
      Bytes.toBytes("iii"), Bytes.toBytes("lll"),
      Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
      Bytes.toBytes("ooo"), Bytes.toBytes("ppp"),
      Bytes.toBytes("qqq"), Bytes.toBytes("rrr"),
      Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
      Bytes.toBytes("uuu"), Bytes.toBytes("vvv"),
      Bytes.toBytes("zzz"),
    },
    new byte[][][] {
      new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("zzz") },
    }
  );
}
项目:ditb    文件:LoadTestTool.java   
private void parseColumnFamilyOptions(CommandLine cmd) {
  String dataBlockEncodingStr = cmd.getOptionValue(OPT_DATA_BLOCK_ENCODING);
  dataBlockEncodingAlgo = dataBlockEncodingStr == null ? null :
      DataBlockEncoding.valueOf(dataBlockEncodingStr);

  String compressStr = cmd.getOptionValue(OPT_COMPRESSION);
  compressAlgo = compressStr == null ? Compression.Algorithm.NONE :
      Compression.Algorithm.valueOf(compressStr);

  String bloomStr = cmd.getOptionValue(OPT_BLOOM);
  bloomType = bloomStr == null ? BloomType.ROW :
      BloomType.valueOf(bloomStr);

  inMemoryCF = cmd.hasOption(OPT_INMEMORY);
  if (cmd.hasOption(OPT_ENCRYPTION)) {
    cipher = Encryption.getCipher(conf, cmd.getOptionValue(OPT_ENCRYPTION));
  }

}
项目:ditb    文件:HTableDescriptor.java   
public static HTableDescriptor metaTableDescriptor(final Configuration conf)
    throws IOException {
  HTableDescriptor metaDescriptor = new HTableDescriptor(
    TableName.META_TABLE_NAME,
    new HColumnDescriptor[] {
      new HColumnDescriptor(HConstants.CATALOG_FAMILY)
        .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
          HConstants.DEFAULT_HBASE_META_VERSIONS))
        .setInMemory(true)
        .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
          HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
        .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
        // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
        .setBloomFilterType(BloomType.NONE)
       });
  metaDescriptor.addCoprocessor(
    "org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
    null, Coprocessor.PRIORITY_SYSTEM, null);
  return metaDescriptor;
}
项目:incubator-atlas    文件:HBaseBasedAuditRepository.java   
private void createTableIfNotExists() throws AtlasException {
    Admin admin = null;
    try {
        admin = connection.getAdmin();
        LOG.info("Checking if table {} exists", tableName.getNameAsString());
        if (!admin.tableExists(tableName)) {
            LOG.info("Creating table {}", tableName.getNameAsString());
            HTableDescriptor tableDescriptor = new HTableDescriptor(tableName);
            HColumnDescriptor columnFamily = new HColumnDescriptor(COLUMN_FAMILY);
            columnFamily.setMaxVersions(1);
            columnFamily.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF);
            columnFamily.setCompressionType(Compression.Algorithm.GZ);
            columnFamily.setBloomFilterType(BloomType.ROW);
            tableDescriptor.addFamily(columnFamily);
            admin.createTable(tableDescriptor);
        } else {
            LOG.info("Table {} exists", tableName.getNameAsString());
        }
    } catch (IOException e) {
        throw new AtlasException(e);
    } finally {
        close(admin);
    }
}
项目:cloud-bigtable-client    文件:TestColumnDescriptorAdapter.java   
@Test
public void ignoredOptionsAreIgnored() {
  // We're really checking to make certain we don't trigger an exception for an ignored option:
  descriptor.setCompressionType(Compression.Algorithm.LZ4);
  descriptor.setCompactionCompressionType(Compression.Algorithm.LZ4);
  descriptor.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF);
  descriptor.setBlockCacheEnabled(false);
  descriptor.setCacheDataOnWrite(true);
  descriptor.setCacheDataInL1(true);
  descriptor.setEvictBlocksOnClose(false);
  descriptor.setBloomFilterType(BloomType.ROW);
  descriptor.setPrefetchBlocksOnOpen(true);
  descriptor.setBlocksize(16 * 1024);
  descriptor.setScope(1); // REPLICATION_SCOPE
  descriptor.setInMemory(true);

  ColumnFamily.Builder result = adapter.adapt(descriptor)
      .clearName()
      .clearGcExpression();

  Assert.assertArrayEquals(
      new byte[0],
      result.build().toByteArray());
}
项目:pbase    文件:VisibilityController.java   
/********************************* Master related hooks **********************************/

  @Override
  public void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
    // Need to create the new system table for labels here
    MasterServices master = ctx.getEnvironment().getMasterServices();
    if (!MetaTableAccessor.tableExists(master.getConnection(), LABELS_TABLE_NAME)) {
      HTableDescriptor labelsTable = new HTableDescriptor(LABELS_TABLE_NAME);
      HColumnDescriptor labelsColumn = new HColumnDescriptor(LABELS_TABLE_FAMILY);
      labelsColumn.setBloomFilterType(BloomType.NONE);
      labelsColumn.setBlockCacheEnabled(false); // We will cache all the labels. No need of normal
                                                 // table block cache.
      labelsTable.addFamily(labelsColumn);
      // Let the "labels" table having only one region always. We are not expecting too many labels in
      // the system.
      labelsTable.setValue(HTableDescriptor.SPLIT_POLICY,
          DisabledRegionSplitPolicy.class.getName());
      labelsTable.setValue(Bytes.toBytes(HConstants.DISALLOW_WRITES_IN_RECOVERING),
          Bytes.toBytes(true));
      master.createTable(labelsTable, null);
    }
  }
项目:pbase    文件:HBaseTestingUtility.java   
/**
 * Create a table.
 * @param htd
 * @param families
 * @param c Configuration to use
 * @return An HTable instance for the created table.
 * @throws IOException
 */
public HTable createTable(HTableDescriptor htd, byte[][] families, Configuration c)
throws IOException {
  for(byte[] family : families) {
    HColumnDescriptor hcd = new HColumnDescriptor(family);
    // Disable blooms (they are on by default as of 0.95) but we disable them here because
    // tests have hard coded counts of what to expect in block cache, etc., and blooms being
    // on is interfering.
    hcd.setBloomFilterType(BloomType.NONE);
    htd.addFamily(hcd);
  }
  getHBaseAdmin().createTable(htd);
  // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
  waitUntilAllRegionsAssigned(htd.getTableName());
  return (HTable)getConnection().getTable(htd.getTableName());
}
项目:pbase    文件:HBaseTestingUtility.java   
/**
 * Create a table.
 * @param tableName
 * @param families
 * @param c Configuration to use
 * @return An HTable instance for the created table.
 * @throws IOException
 */
public HTable createTable(byte[] tableName, byte[][] families,
    final Configuration c)
throws IOException {
  HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
  for(byte[] family : families) {
    HColumnDescriptor hcd = new HColumnDescriptor(family);
    // Disable blooms (they are on by default as of 0.95) but we disable them here because
    // tests have hard coded counts of what to expect in block cache, etc., and blooms being
    // on is interfering.
    hcd.setBloomFilterType(BloomType.NONE);
    desc.addFamily(hcd);
  }
  getHBaseAdmin().createTable(desc);
  return new HTable(c, desc.getTableName());
}
项目:pbase    文件:LoadTestTool.java   
private void parseColumnFamilyOptions(CommandLine cmd) {
  String dataBlockEncodingStr = cmd.getOptionValue(OPT_DATA_BLOCK_ENCODING);
  dataBlockEncodingAlgo = dataBlockEncodingStr == null ? null :
      DataBlockEncoding.valueOf(dataBlockEncodingStr);

  String compressStr = cmd.getOptionValue(OPT_COMPRESSION);
  compressAlgo = compressStr == null ? Compression.Algorithm.NONE :
      Compression.Algorithm.valueOf(compressStr);

  String bloomStr = cmd.getOptionValue(OPT_BLOOM);
  bloomType = bloomStr == null ? BloomType.ROW :
      BloomType.valueOf(bloomStr);

  inMemoryCF = cmd.hasOption(OPT_INMEMORY);
  if (cmd.hasOption(OPT_ENCRYPTION)) {
    cipher = Encryption.getCipher(conf, cmd.getOptionValue(OPT_ENCRYPTION));
  }

}
项目:pbase    文件:HTableDescriptor.java   
public static HTableDescriptor metaTableDescriptor(final Configuration conf)
    throws IOException {
  HTableDescriptor metaDescriptor = new HTableDescriptor(
    TableName.META_TABLE_NAME,
    new HColumnDescriptor[] {
      new HColumnDescriptor(HConstants.CATALOG_FAMILY)
        .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
          HConstants.DEFAULT_HBASE_META_VERSIONS))
        .setInMemory(true)
        .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
          HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
        .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
        // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
        .setBloomFilterType(BloomType.NONE)
       });
  metaDescriptor.addCoprocessor(
    "org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
    null, Coprocessor.PRIORITY_SYSTEM, null);
  return metaDescriptor;
}
项目:HIndex    文件:ThriftUtilities.java   
/**
 * This utility method creates a new Hbase HColumnDescriptor object based on a
 * Thrift ColumnDescriptor "struct".
 *
 * @param in
 *          Thrift ColumnDescriptor object
 * @return HColumnDescriptor
 * @throws IllegalArgument
 */
static public HColumnDescriptor colDescFromThrift(ColumnDescriptor in)
    throws IllegalArgument {
  Compression.Algorithm comp =
    Compression.getCompressionAlgorithmByName(in.compression.toLowerCase());
  BloomType bt =
    BloomType.valueOf(in.bloomFilterType);

  if (in.name == null || !in.name.hasRemaining()) {
    throw new IllegalArgument("column name is empty");
  }
  byte [] parsedName = KeyValue.parseColumn(Bytes.getBytes(in.name))[0];
  HColumnDescriptor col = new HColumnDescriptor(parsedName)
      .setMaxVersions(in.maxVersions)
      .setCompressionType(comp)
      .setInMemory(in.inMemory)
      .setBlockCacheEnabled(in.blockCacheEnabled)
      .setTimeToLive(in.timeToLive)
      .setBloomFilterType(bt);
  return col;
}
项目:HIndex    文件:VisibilityController.java   
/********************************* Master related hooks **********************************/

  @Override
  public void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
    // Need to create the new system table for labels here
    MasterServices master = ctx.getEnvironment().getMasterServices();
    if (!MetaReader.tableExists(master.getCatalogTracker(), LABELS_TABLE_NAME)) {
      HTableDescriptor labelsTable = new HTableDescriptor(LABELS_TABLE_NAME);
      HColumnDescriptor labelsColumn = new HColumnDescriptor(LABELS_TABLE_FAMILY);
      labelsColumn.setBloomFilterType(BloomType.NONE);
      labelsColumn.setBlockCacheEnabled(false); // We will cache all the labels. No need of normal
                                                 // table block cache.
      labelsTable.addFamily(labelsColumn);
      // Let the "labels" table having only one region always. We are not expecting too many labels in
      // the system.
      labelsTable.setValue(HTableDescriptor.SPLIT_POLICY,
          DisabledRegionSplitPolicy.class.getName());
      labelsTable.setValue(Bytes.toBytes(HConstants.DISALLOW_WRITES_IN_RECOVERING),
          Bytes.toBytes(true));
      master.createTable(labelsTable, null);
    }
  }
项目:HIndex    文件:HBaseTestingUtility.java   
/**
 * Create a table.
 * @param htd
 * @param families
 * @param c Configuration to use
 * @return An HTable instance for the created table.
 * @throws IOException
 */
public HTable createTable(HTableDescriptor htd, byte[][] families, Configuration c)
throws IOException {
  for(byte[] family : families) {
    HColumnDescriptor hcd = new HColumnDescriptor(family);
    // Disable blooms (they are on by default as of 0.95) but we disable them here because
    // tests have hard coded counts of what to expect in block cache, etc., and blooms being
    // on is interfering.
    hcd.setBloomFilterType(BloomType.NONE);
    htd.addFamily(hcd);
  }
  getHBaseAdmin().createTable(htd);
  // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
  waitUntilAllRegionsAssigned(htd.getTableName());
  return new HTable(c, htd.getTableName());
}
项目:HIndex    文件:HBaseTestingUtility.java   
/**
 * Create a table.
 * @param tableName
 * @param families
 * @param c Configuration to use
 * @return An HTable instance for the created table.
 * @throws IOException
 */
public HTable createTable(byte[] tableName, byte[][] families,
    final Configuration c)
throws IOException {
  HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
  for(byte[] family : families) {
    HColumnDescriptor hcd = new HColumnDescriptor(family);
    // Disable blooms (they are on by default as of 0.95) but we disable them here because
    // tests have hard coded counts of what to expect in block cache, etc., and blooms being
    // on is interfering.
    hcd.setBloomFilterType(BloomType.NONE);
    desc.addFamily(hcd);
  }
  getHBaseAdmin().createTable(desc);
  return new HTable(c, tableName);
}
项目:HIndex    文件:LoadTestTool.java   
private void parseColumnFamilyOptions(CommandLine cmd) {
  String dataBlockEncodingStr = cmd.getOptionValue(OPT_DATA_BLOCK_ENCODING);
  dataBlockEncodingAlgo = dataBlockEncodingStr == null ? null :
      DataBlockEncoding.valueOf(dataBlockEncodingStr);

  String compressStr = cmd.getOptionValue(OPT_COMPRESSION);
  compressAlgo = compressStr == null ? Compression.Algorithm.NONE :
      Compression.Algorithm.valueOf(compressStr);

  String bloomStr = cmd.getOptionValue(OPT_BLOOM);
  bloomType = bloomStr == null ? null :
      BloomType.valueOf(bloomStr);

  inMemoryCF = cmd.hasOption(OPT_INMEMORY);
  if (cmd.hasOption(OPT_ENCRYPTION)) {
    cipher = Encryption.getCipher(conf, cmd.getOptionValue(OPT_ENCRYPTION));
  }
}
项目:hbase-in-action    文件:HColumnDescriptorExample.java   
public static void main(String[] args) throws IOException, InterruptedException {
    HColumnDescriptor desc = new HColumnDescriptor("colfam1").setValue("test-key", "test-value")
            .setBloomFilterType(BloomType.ROWCOL);

    log.info("Column Descriptor: " + desc);

    for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> entry : desc.getValues().entrySet()) {
        log.info(Bytes.toString(entry.getKey().get()) + " -> " + Bytes.toString(entry.getValue().get()) + ", ");
    }

    log.info("Defaults: " + HColumnDescriptor.getDefaultValues());

    log.info("Custom: " + desc.toStringCustomizedValues());

    log.info("Units:");
    log.info(HColumnDescriptor.TTL + " -> " + desc.getUnit(HColumnDescriptor.TTL));
    log.info(HColumnDescriptor.BLOCKSIZE + " -> " + desc.getUnit(HColumnDescriptor.BLOCKSIZE));
}
项目:hbase    文件:ThriftUtilities.java   
/**
 * This utility method creates a new Hbase HColumnDescriptor object based on a
 * Thrift ColumnDescriptor "struct".
 *
 * @param in Thrift ColumnDescriptor object
 * @return HColumnDescriptor
 * @throws IllegalArgument if the column name is empty
 */
static public HColumnDescriptor colDescFromThrift(ColumnDescriptor in)
    throws IllegalArgument {
  Compression.Algorithm comp =
    Compression.getCompressionAlgorithmByName(in.compression.toLowerCase(Locale.ROOT));
  BloomType bt =
    BloomType.valueOf(in.bloomFilterType);

  if (in.name == null || !in.name.hasRemaining()) {
    throw new IllegalArgument("column name is empty");
  }
  byte [] parsedName = CellUtil.parseColumn(Bytes.getBytes(in.name))[0];
  HColumnDescriptor col = new HColumnDescriptor(parsedName)
      .setMaxVersions(in.maxVersions)
      .setCompressionType(comp)
      .setInMemory(in.inMemory)
      .setBlockCacheEnabled(in.blockCacheEnabled)
      .setTimeToLive(in.timeToLive > 0 ? in.timeToLive : Integer.MAX_VALUE)
      .setBloomFilterType(bt);
  return col;
}
项目:hbase    文件:VisibilityController.java   
/********************************* Master related hooks **********************************/

  @Override
  public void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
    // Need to create the new system table for labels here
    if (!MetaTableAccessor.tableExists(ctx.getEnvironment().getConnection(), LABELS_TABLE_NAME)) {
      HTableDescriptor labelsTable = new HTableDescriptor(LABELS_TABLE_NAME);
      HColumnDescriptor labelsColumn = new HColumnDescriptor(LABELS_TABLE_FAMILY);
      labelsColumn.setBloomFilterType(BloomType.NONE);
      labelsColumn.setBlockCacheEnabled(false); // We will cache all the labels. No need of normal
                                                 // table block cache.
      labelsTable.addFamily(labelsColumn);
      // Let the "labels" table having only one region always. We are not expecting too many labels in
      // the system.
      labelsTable.setValue(HTableDescriptor.SPLIT_POLICY,
          DisabledRegionSplitPolicy.class.getName());
      try (Admin admin = ctx.getEnvironment().getConnection().getAdmin()) {
        admin.createTable(labelsTable);
      }
    }
  }
项目:hbase    文件:MobUtils.java   
/**
 * Validates a mob file by opening and closing it.
 * @param conf The current configuration.
 * @param fs The current file system.
 * @param path The path where the mob file is saved.
 * @param cacheConfig The current cache config.
 */
private static void validateMobFile(Configuration conf, FileSystem fs, Path path,
    CacheConfig cacheConfig, boolean primaryReplica) throws IOException {
  HStoreFile storeFile = null;
  try {
    storeFile = new HStoreFile(fs, path, conf, cacheConfig, BloomType.NONE, primaryReplica);
    storeFile.initReader();
  } catch (IOException e) {
    LOG.error("Failed to open mob file[" + path + "], keep it in temp directory.", e);
    throw e;
  } finally {
    if (storeFile != null) {
      storeFile.closeStoreFile(false);
    }
  }
}
项目:hbase    文件:BloomFilterUtil.java   
/**
 * Creates a Bloom filter chunk of the given size.
 *
 * @param byteSizeHint the desired number of bytes for the Bloom filter bit
 *          array. Will be increased so that folding is possible.
 * @param errorRate target false positive rate of the Bloom filter
 * @param hashType Bloom filter hash function type
 * @param foldFactor
 * @param bloomType
 * @return the new Bloom filter of the desired size
 */
public static BloomFilterChunk createBySize(int byteSizeHint,
    double errorRate, int hashType, int foldFactor, BloomType bloomType) {
  BloomFilterChunk bbf = new BloomFilterChunk(hashType, bloomType);

  bbf.byteSize = computeFoldableByteSize(byteSizeHint * 8L, foldFactor);
  long bitSize = bbf.byteSize * 8;
  bbf.maxKeys = (int) idealMaxKeys(bitSize, errorRate);
  bbf.hashCount = optimalFunctionCount(bbf.maxKeys, bitSize);

  // Adjust max keys to bring error rate closer to what was requested,
  // because byteSize was adjusted to allow for folding, and hashCount was
  // rounded.
  bbf.maxKeys = (int) computeMaxKeys(bitSize, errorRate, bbf.hashCount);

  return bbf;
}
项目:hbase    文件:BloomFilterFactory.java   
/**
 * Creates a new Delete Family Bloom filter at the time of
 * {@link org.apache.hadoop.hbase.regionserver.HStoreFile} writing.
 * @param conf
 * @param cacheConf
 * @param maxKeys an estimate of the number of keys we expect to insert.
 *        Irrelevant if compound Bloom filters are enabled.
 * @param writer the HFile writer
 * @return the new Bloom filter, or null in case Bloom filters are disabled
 *         or when failed to create one.
 */
public static BloomFilterWriter createDeleteBloomAtWrite(Configuration conf,
    CacheConfig cacheConf, int maxKeys, HFile.Writer writer) {
  if (!isDeleteFamilyBloomEnabled(conf)) {
    LOG.info("Delete Bloom filters are disabled by configuration for "
        + writer.getPath()
        + (conf == null ? " (configuration is null)" : ""));
    return null;
  }

  float err = getErrorRate(conf);

  int maxFold = getMaxFold(conf);
  // In case of compound Bloom filters we ignore the maxKeys hint.
  CompoundBloomFilterWriter bloomWriter = new CompoundBloomFilterWriter(getBloomBlockSize(conf),
      err, Hash.getHashType(conf), maxFold, cacheConf.shouldCacheBloomsOnWrite(),
      null, BloomType.ROW);
  writer.addInlineBlockWriter(bloomWriter);
  return bloomWriter;
}
项目:hbase    文件:BloomFilterChunk.java   
public void add(Cell cell) {
  /*
   * For faster hashing, use combinatorial generation
   * http://www.eecs.harvard.edu/~kirsch/pubs/bbbf/esa06.pdf
   */
  int hash1;
  int hash2;
  HashKey<Cell> hashKey;
  if (this.bloomType == BloomType.ROW) {
    hashKey = new RowBloomHashKey(cell);
    hash1 = this.hash.hash(hashKey, 0);
    hash2 = this.hash.hash(hashKey, hash1);
  } else {
    hashKey = new RowColBloomHashKey(cell);
    hash1 = this.hash.hash(hashKey, 0);
    hash2 = this.hash.hash(hashKey, hash1);
  }
  setHashLoc(hash1, hash2);
}
项目:hbase    文件:CompoundBloomFilter.java   
@Override
public boolean contains(Cell keyCell, ByteBuff bloom, BloomType type) {
  int block = index.rootBlockContainingKey(keyCell);
  if (block < 0) {
    return false; // This key is not in the file.
  }
  boolean result;
  HFileBlock bloomBlock = getBloomBlock(block);
  try {
    ByteBuff bloomBuf = bloomBlock.getBufferReadOnly();
    result = BloomFilterUtil.contains(keyCell, bloomBuf, bloomBlock.headerSize(),
        bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount, type);
  } finally {
    // After the use return back the block if it was served from a cache.
    reader.returnBlock(bloomBlock);
  }
  if (numPositivesPerChunk != null && result) {
    // Update statistics. Only used in unit tests.
    ++numPositivesPerChunk[block];
  }
  return result;
}
项目:HBase-LOB    文件:TestMobFile.java   
@Test
public void testGetScanner() throws Exception {
  FileSystem fs = FileSystem.get(conf);
  Path testDir = FSUtils.getRootDir(conf);
  Path outputDir = new Path(new Path(testDir, TABLE), FAMILY);
  HFileContext meta = new HFileContextBuilder().withBlockSize(8*1024).build();
  StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs)
          .withOutputDir(outputDir)
          .withFileContext(meta)
          .build();
  MobTestUtil.writeStoreFile(writer, getName());

  MobFile mobFile = new MobFile(new StoreFile(fs, writer.getPath(),
      conf, cacheConf, BloomType.NONE));
  assertNotNull(mobFile.getScanner());
  assertTrue(mobFile.getScanner() instanceof StoreFileScanner);
}
项目:hbase    文件:TestPartitionedMobCompactor.java   
/**
 * Gets the number of del cell in the del files
 * @param paths the del file paths
 * @return the cell size
 */
private int countDelCellsInDelFiles(List<Path> paths) throws IOException {
  List<HStoreFile> sfs = new ArrayList<>();
  int size = 0;
  for (Path path : paths) {
    HStoreFile sf = new HStoreFile(fs, path, conf, cacheConf, BloomType.NONE, true);
    sfs.add(sf);
  }
  List<KeyValueScanner> scanners = new ArrayList<>(StoreFileScanner.getScannersForStoreFiles(sfs,
    false, true, false, false, HConstants.LATEST_TIMESTAMP));
  long timeToPurgeDeletes = Math.max(conf.getLong("hbase.hstore.time.to.purge.deletes", 0), 0);
  long ttl = HStore.determineTTLFromFamily(hcd);
  ScanInfo scanInfo = new ScanInfo(conf, hcd, ttl, timeToPurgeDeletes, CellComparatorImpl.COMPARATOR);
  StoreScanner scanner = new StoreScanner(scanInfo, ScanType.COMPACT_RETAIN_DELETES, scanners);
  List<Cell> results = new ArrayList<>();
  boolean hasMore = true;

  while (hasMore) {
    hasMore = scanner.next(results);
    size += results.size();
    results.clear();
  }
  scanner.close();
  return size;
}