Java 类org.apache.hadoop.hbase.regionserver.StoreFile.BloomType 实例源码

项目:gemfirexd-oss    文件:HFileSortedOplog.java   
public HFileSortedOplogWriter() throws IOException {
      writer = HFile.getWriterFactory(hconf, hcache)
          .withPath(fs, path)
          .withBlockSize(sopConfig.getBlockSize())
          .withBytesPerChecksum(sopConfig.getBytesPerChecksum())
          .withChecksumType(HFileSortedOplogFactory.convertChecksum(sopConfig.getChecksum()))
//          .withComparator(sopConfig.getComparator())
          .withCompression(HFileSortedOplogFactory.convertCompression(sopConfig.getCompression()))
          .withDataBlockEncoder(HFileSortedOplogFactory.convertEncoding(sopConfig.getKeyEncoding()))
          .create();

      bfw = sopConfig.isBloomFilterEnabled() ?
//          BloomFilterFactory.createGeneralBloomAtWrite(hconf, hcache, BloomType.ROW,
//              0, writer, sopConfig.getComparator())
          BloomFilterFactory.createGeneralBloomAtWrite(hconf, hcache, BloomType.ROW,
              0, writer)
          : null;
    }
项目:gemfirexd-oss    文件:HFileSortedOplog.java   
public HFileSortedOplogWriter(int keys) throws IOException {
      try {
        int hfileBlockSize = Integer.getInteger(
            HoplogConfig.HFILE_BLOCK_SIZE_CONF, (1 << 16));

        Algorithm compress = Algorithm.valueOf(System.getProperty(HoplogConfig.COMPRESSION,
            HoplogConfig.COMPRESSION_DEFAULT));

//        ByteComparator bc = new ByteComparator();
        writer = HFile.getWriterFactory(conf, cacheConf)
            .withPath(fsProvider.getFS(), path)
            .withBlockSize(hfileBlockSize)
//            .withComparator(bc)
            .withCompression(compress)
            .create();
        bfw = BloomFilterFactory.createGeneralBloomAtWrite(conf, cacheConf, BloomType.ROW, keys,
            writer);

        logger.fine("Created hoplog writer with compression " + compress);
      } catch (IOException e) {
        logger.fine("IO Error while creating writer");
        throw e;
      }
    }
项目:LCIndex-HBase-0.94.16    文件:ThriftUtilities.java   
/**
 * This utility method creates a new Hbase HColumnDescriptor object based on a
 * Thrift ColumnDescriptor "struct".
 *
 * @param in
 *          Thrift ColumnDescriptor object
 * @return HColumnDescriptor
 * @throws IllegalArgument
 */
static public HColumnDescriptor colDescFromThrift(ColumnDescriptor in)
    throws IllegalArgument {
  Compression.Algorithm comp =
    Compression.getCompressionAlgorithmByName(in.compression.toLowerCase());
  StoreFile.BloomType bt =
    BloomType.valueOf(in.bloomFilterType);

  if (in.name == null || !in.name.hasRemaining()) {
    throw new IllegalArgument("column name is empty");
  }
  byte [] parsedName = KeyValue.parseColumn(Bytes.getBytes(in.name))[0];
  HColumnDescriptor col = new HColumnDescriptor(parsedName)
      .setMaxVersions(in.maxVersions)
      .setCompressionType(comp)
      .setInMemory(in.inMemory)
      .setBlockCacheEnabled(in.blockCacheEnabled)
      .setTimeToLive(in.timeToLive)
      .setBloomFilterType(bt);
  return col;
}
项目:LCIndex-HBase-0.94.16    文件:TestBlocksRead.java   
/**
 * Callers must afterward call {@link HRegion#closeHRegion(HRegion)}
 * @param tableName
 * @param callingMethod
 * @param conf
 * @param families
 * @throws IOException
 * @return created and initialized region.
 */
private HRegion initHRegion(byte[] tableName, String callingMethod,
    HBaseConfiguration conf, String family) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(tableName);
  HColumnDescriptor familyDesc;
  for (int i = 0; i < BLOOM_TYPE.length; i++) {
    BloomType bloomType = BLOOM_TYPE[i];
    familyDesc = new HColumnDescriptor(family + "_" + bloomType)
        .setBlocksize(1)
        .setBloomFilterType(BLOOM_TYPE[i]);
    htd.addFamily(familyDesc);
  }

  HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
  Path path = new Path(DIR + callingMethod);
  HRegion r = HRegion.createHRegion(info, path, conf, htd);
  blockCache = new CacheConfig(conf).getBlockCache();
  return r;
}
项目:LCIndex-HBase-0.94.16    文件:TestStoreFile.java   
public void testBloomFilter() throws Exception {
  FileSystem fs = FileSystem.getLocal(conf);
  conf.setFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE,
      (float) 0.01);
  conf.setBoolean(BloomFilterFactory.IO_STOREFILE_BLOOM_ENABLED, true);

  // write the file
  Path f = new Path(ROOT_DIR, getName());
  StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs,
      StoreFile.DEFAULT_BLOCKSIZE_SMALL)
          .withFilePath(f)
          .withBloomType(StoreFile.BloomType.ROW)
          .withMaxKeyCount(2000)
          .withChecksumType(CKTYPE)
          .withBytesPerChecksum(CKBYTES)
          .build();
  bloomWriteRead(writer, fs);
}
项目:gemfirexd-oss    文件:HFileSortedOplog.java   
public HFileSortedOplogWriter() throws IOException {
      writer = HFile.getWriterFactory(hconf, hcache)
          .withPath(fs, path)
          .withBlockSize(sopConfig.getBlockSize())
          .withBytesPerChecksum(sopConfig.getBytesPerChecksum())
          .withChecksumType(HFileSortedOplogFactory.convertChecksum(sopConfig.getChecksum()))
//          .withComparator(sopConfig.getComparator())
          .withCompression(HFileSortedOplogFactory.convertCompression(sopConfig.getCompression()))
          .withDataBlockEncoder(HFileSortedOplogFactory.convertEncoding(sopConfig.getKeyEncoding()))
          .create();

      bfw = sopConfig.isBloomFilterEnabled() ?
//          BloomFilterFactory.createGeneralBloomAtWrite(hconf, hcache, BloomType.ROW,
//              0, writer, sopConfig.getComparator())
          BloomFilterFactory.createGeneralBloomAtWrite(hconf, hcache, BloomType.ROW,
              0, writer)
          : null;
    }
项目:gemfirexd-oss    文件:HFileSortedOplog.java   
public HFileSortedOplogWriter(int keys) throws IOException {
      try {
        int hfileBlockSize = Integer.getInteger(
            HoplogConfig.HFILE_BLOCK_SIZE_CONF, (1 << 16));

        Algorithm compress = Algorithm.valueOf(System.getProperty(HoplogConfig.COMPRESSION,
            HoplogConfig.COMPRESSION_DEFAULT));

//        ByteComparator bc = new ByteComparator();
        writer = HFile.getWriterFactory(conf, cacheConf)
            .withPath(fsProvider.getFS(), path)
            .withBlockSize(hfileBlockSize)
//            .withComparator(bc)
            .withCompression(compress)
            .create();
        bfw = BloomFilterFactory.createGeneralBloomAtWrite(conf, cacheConf, BloomType.ROW, keys,
            writer);

        logger.fine("Created hoplog writer with compression " + compress);
      } catch (IOException e) {
        logger.fine("IO Error while creating writer");
        throw e;
      }
    }
项目:IRIndex    文件:ThriftUtilities.java   
/**
 * This utility method creates a new Hbase HColumnDescriptor object based on a
 * Thrift ColumnDescriptor "struct".
 *
 * @param in
 *          Thrift ColumnDescriptor object
 * @return HColumnDescriptor
 * @throws IllegalArgument
 */
static public HColumnDescriptor colDescFromThrift(ColumnDescriptor in)
    throws IllegalArgument {
  Compression.Algorithm comp =
    Compression.getCompressionAlgorithmByName(in.compression.toLowerCase());
  StoreFile.BloomType bt =
    BloomType.valueOf(in.bloomFilterType);

  if (in.name == null || !in.name.hasRemaining()) {
    throw new IllegalArgument("column name is empty");
  }
  byte [] parsedName = KeyValue.parseColumn(Bytes.getBytes(in.name))[0];
  HColumnDescriptor col = new HColumnDescriptor(parsedName)
      .setMaxVersions(in.maxVersions)
      .setCompressionType(comp)
      .setInMemory(in.inMemory)
      .setBlockCacheEnabled(in.blockCacheEnabled)
      .setTimeToLive(in.timeToLive)
      .setBloomFilterType(bt);
  return col;
}
项目:IRIndex    文件:TestBlocksRead.java   
/**
 * Callers must afterward call {@link HRegion#closeHRegion(HRegion)}
 * @param tableName
 * @param callingMethod
 * @param conf
 * @param families
 * @throws IOException
 * @return created and initialized region.
 */
private HRegion initHRegion(byte[] tableName, String callingMethod,
    HBaseConfiguration conf, String family) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(tableName);
  HColumnDescriptor familyDesc;
  for (int i = 0; i < BLOOM_TYPE.length; i++) {
    BloomType bloomType = BLOOM_TYPE[i];
    familyDesc = new HColumnDescriptor(family + "_" + bloomType)
        .setBlocksize(1)
        .setBloomFilterType(BLOOM_TYPE[i]);
    htd.addFamily(familyDesc);
  }

  HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
  Path path = new Path(DIR + callingMethod);
  HRegion r = HRegion.createHRegion(info, path, conf, htd);
  blockCache = new CacheConfig(conf).getBlockCache();
  return r;
}
项目:IRIndex    文件:TestStoreFile.java   
public void testBloomFilter() throws Exception {
  FileSystem fs = FileSystem.getLocal(conf);
  conf.setFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE,
      (float) 0.01);
  conf.setBoolean(BloomFilterFactory.IO_STOREFILE_BLOOM_ENABLED, true);

  // write the file
  Path f = new Path(ROOT_DIR, getName());
  StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs,
      StoreFile.DEFAULT_BLOCKSIZE_SMALL)
          .withFilePath(f)
          .withBloomType(StoreFile.BloomType.ROW)
          .withMaxKeyCount(2000)
          .withChecksumType(CKTYPE)
          .withBytesPerChecksum(CKBYTES)
          .build();
  bloomWriteRead(writer, fs);
}
项目:RStore    文件:ThriftUtilities.java   
/**
 * This utility method creates a new Hbase HColumnDescriptor object based on a
 * Thrift ColumnDescriptor "struct".
 *
 * @param in
 *          Thrift ColumnDescriptor object
 * @return HColumnDescriptor
 * @throws IllegalArgument
 */
static public HColumnDescriptor colDescFromThrift(ColumnDescriptor in)
    throws IllegalArgument {
  Compression.Algorithm comp =
    Compression.getCompressionAlgorithmByName(in.compression.toLowerCase());
  StoreFile.BloomType bt =
    BloomType.valueOf(in.bloomFilterType);

  if (in.name == null || !in.name.hasRemaining()) {
    throw new IllegalArgument("column name is empty");
  }
  byte [] parsedName = KeyValue.parseColumn(Bytes.getBytes(in.name))[0];
  HColumnDescriptor col = new HColumnDescriptor(parsedName,
      in.maxVersions, comp.getName(), in.inMemory, in.blockCacheEnabled,
      in.timeToLive, bt.toString());
  return col;
}
项目:hbase-benchmark    文件:BenchmarkDriver.java   
/**
 * Attempts to create the table used by this tool with the fixed configuration details
 *
 * @param admin The configured administration used to perform this operation
 */
private void createTable(final HBaseAdmin admin) {
    final String tableName = appConfig.getToolTable();

    try {
        if( !admin.tableExists(tableName) ) {
            HTableDescriptor tableDesc = new HTableDescriptor(tableName.getBytes(Charsets.UTF_8));

            HColumnDescriptor colDesc = new HColumnDescriptor(ConfigConstants.COLUMN_FAMILY);
            colDesc.setBlockCacheEnabled(true).setBlocksize(65536)
                    .setBloomFilterType(BloomType.ROW)
                    .setCompressionType(Algorithm.SNAPPY)
                    .setDataBlockEncoding(DataBlockEncoding.PREFIX)
                    .setMaxVersions(1);

            tableDesc.addFamily(colDesc);

            admin.createTable(tableDesc);
            log.info("Created table: " + tableName);
        } else {
            log.debug("Table already exists, creation skipped");
        }
    } catch (IOException e) {
        log.error("Error occurred during table creation", e);
    }
}
项目:HBase-Research    文件:TestBlocksRead.java   
/**
 * Callers must afterward call {@link HRegion#closeHRegion(HRegion)}
 * @param tableName
 * @param callingMethod
 * @param conf
 * @param families
 * @throws IOException
 * @return created and initialized region.
 */
private HRegion initHRegion(byte[] tableName, String callingMethod,
    HBaseConfiguration conf, String family) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(tableName);
  HColumnDescriptor familyDesc;
  for (int i = 0; i < BLOOM_TYPE.length; i++) {
    BloomType bloomType = BLOOM_TYPE[i];
    familyDesc = new HColumnDescriptor(family + "_" + bloomType)
        .setBlocksize(1)
        .setBloomFilterType(BLOOM_TYPE[i]);
    htd.addFamily(familyDesc);
  }

  HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
  Path path = new Path(DIR + callingMethod);
  HRegion r = HRegion.createHRegion(info, path, conf, htd);
  blockCache = new CacheConfig(conf).getBlockCache();
  return r;
}
项目:hindex    文件:ThriftUtilities.java   
/**
 * This utility method creates a new Hbase HColumnDescriptor object based on a
 * Thrift ColumnDescriptor "struct".
 *
 * @param in
 *          Thrift ColumnDescriptor object
 * @return HColumnDescriptor
 * @throws IllegalArgument
 */
static public HColumnDescriptor colDescFromThrift(ColumnDescriptor in)
    throws IllegalArgument {
  Compression.Algorithm comp =
    Compression.getCompressionAlgorithmByName(in.compression.toLowerCase());
  StoreFile.BloomType bt =
    BloomType.valueOf(in.bloomFilterType);

  if (in.name == null || !in.name.hasRemaining()) {
    throw new IllegalArgument("column name is empty");
  }
  byte [] parsedName = KeyValue.parseColumn(Bytes.getBytes(in.name))[0];
  HColumnDescriptor col = new HColumnDescriptor(parsedName)
      .setMaxVersions(in.maxVersions)
      .setCompressionType(comp)
      .setInMemory(in.inMemory)
      .setBlockCacheEnabled(in.blockCacheEnabled)
      .setTimeToLive(in.timeToLive)
      .setBloomFilterType(bt);
  return col;
}
项目:HBase-Research    文件:TestStoreFile.java   
public void testBloomFilter() throws Exception {
  FileSystem fs = FileSystem.getLocal(conf);
  conf.setFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE,
      (float) 0.01);
  conf.setBoolean(BloomFilterFactory.IO_STOREFILE_BLOOM_ENABLED, true);

  // write the file
  Path f = new Path(ROOT_DIR, getName());
  StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs,
      StoreFile.DEFAULT_BLOCKSIZE_SMALL)
          .withFilePath(f)
          .withBloomType(StoreFile.BloomType.ROW)
          .withMaxKeyCount(2000)
          .withChecksumType(CKTYPE)
          .withBytesPerChecksum(CKBYTES)
          .build();
  bloomWriteRead(writer, fs);
}
项目:hbase-0.94.8-qod    文件:ThriftUtilities.java   
/**
 * This utility method creates a new Hbase HColumnDescriptor object based on a
 * Thrift ColumnDescriptor "struct".
 *
 * @param in
 *          Thrift ColumnDescriptor object
 * @return HColumnDescriptor
 * @throws IllegalArgument
 */
static public HColumnDescriptor colDescFromThrift(ColumnDescriptor in)
    throws IllegalArgument {
  Compression.Algorithm comp =
    Compression.getCompressionAlgorithmByName(in.compression.toLowerCase());
  StoreFile.BloomType bt =
    BloomType.valueOf(in.bloomFilterType);

  if (in.name == null || !in.name.hasRemaining()) {
    throw new IllegalArgument("column name is empty");
  }
  byte [] parsedName = KeyValue.parseColumn(Bytes.getBytes(in.name))[0];
  HColumnDescriptor col = new HColumnDescriptor(parsedName)
      .setMaxVersions(in.maxVersions)
      .setCompressionType(comp)
      .setInMemory(in.inMemory)
      .setBlockCacheEnabled(in.blockCacheEnabled)
      .setTimeToLive(in.timeToLive)
      .setBloomFilterType(bt);
  return col;
}
项目:hindex    文件:TestStoreFile.java   
public void testBloomFilter() throws Exception {
  FileSystem fs = FileSystem.getLocal(conf);
  conf.setFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE,
      (float) 0.01);
  conf.setBoolean(BloomFilterFactory.IO_STOREFILE_BLOOM_ENABLED, true);

  // write the file
  Path f = new Path(ROOT_DIR, getName());
  StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs,
      StoreFile.DEFAULT_BLOCKSIZE_SMALL)
          .withFilePath(f)
          .withBloomType(StoreFile.BloomType.ROW)
          .withMaxKeyCount(2000)
          .withChecksumType(CKTYPE)
          .withBytesPerChecksum(CKBYTES)
          .build();
  bloomWriteRead(writer, fs);
}
项目:hbase-0.94.8-qod    文件:TestStoreFile.java   
public void testBloomFilter() throws Exception {
  FileSystem fs = FileSystem.getLocal(conf);
  conf.setFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE,
      (float) 0.01);
  conf.setBoolean(BloomFilterFactory.IO_STOREFILE_BLOOM_ENABLED, true);

  // write the file
  Path f = new Path(ROOT_DIR, getName());
  StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs,
      StoreFile.DEFAULT_BLOCKSIZE_SMALL)
          .withFilePath(f)
          .withBloomType(StoreFile.BloomType.ROW)
          .withMaxKeyCount(2000)
          .withChecksumType(CKTYPE)
          .withBytesPerChecksum(CKBYTES)
          .build();
  bloomWriteRead(writer, fs);
}
项目:hbase-0.94.8-qod    文件:ThriftUtilities.java   
/**
 * This utility method creates a new Hbase HColumnDescriptor object based on a
 * Thrift ColumnDescriptor "struct".
 *
 * @param in
 *          Thrift ColumnDescriptor object
 * @return HColumnDescriptor
 * @throws IllegalArgument
 */
static public HColumnDescriptor colDescFromThrift(ColumnDescriptor in)
    throws IllegalArgument {
  Compression.Algorithm comp =
    Compression.getCompressionAlgorithmByName(in.compression.toLowerCase());
  StoreFile.BloomType bt =
    BloomType.valueOf(in.bloomFilterType);

  if (in.name == null || !in.name.hasRemaining()) {
    throw new IllegalArgument("column name is empty");
  }
  byte [] parsedName = KeyValue.parseColumn(Bytes.getBytes(in.name))[0];
  HColumnDescriptor col = new HColumnDescriptor(parsedName)
      .setMaxVersions(in.maxVersions)
      .setCompressionType(comp)
      .setInMemory(in.inMemory)
      .setBlockCacheEnabled(in.blockCacheEnabled)
      .setTimeToLive(in.timeToLive)
      .setBloomFilterType(bt);
  return col;
}
项目:hbase-0.94.8-qod    文件:TestBlocksRead.java   
/**
 * Callers must afterward call {@link HRegion#closeHRegion(HRegion)}
 * @param tableName
 * @param callingMethod
 * @param conf
 * @param families
 * @throws IOException
 * @return created and initialized region.
 */
private HRegion initHRegion(byte[] tableName, String callingMethod,
    HBaseConfiguration conf, String family) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(tableName);
  HColumnDescriptor familyDesc;
  for (int i = 0; i < BLOOM_TYPE.length; i++) {
    BloomType bloomType = BLOOM_TYPE[i];
    familyDesc = new HColumnDescriptor(family + "_" + bloomType)
        .setBlocksize(1)
        .setBloomFilterType(BLOOM_TYPE[i]);
    htd.addFamily(familyDesc);
  }

  HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
  Path path = new Path(DIR + callingMethod);
  HRegion r = HRegion.createHRegion(info, path, conf, htd);
  blockCache = new CacheConfig(conf).getBlockCache();
  return r;
}
项目:hindex    文件:TestBlocksRead.java   
/**
 * Callers must afterward call {@link HRegion#closeHRegion(HRegion)}
 * @param tableName
 * @param callingMethod
 * @param conf
 * @param families
 * @throws IOException
 * @return created and initialized region.
 */
private HRegion initHRegion(byte[] tableName, String callingMethod,
    HBaseConfiguration conf, String family) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(tableName);
  HColumnDescriptor familyDesc;
  for (int i = 0; i < BLOOM_TYPE.length; i++) {
    BloomType bloomType = BLOOM_TYPE[i];
    familyDesc = new HColumnDescriptor(family + "_" + bloomType)
        .setBlocksize(1)
        .setBloomFilterType(BLOOM_TYPE[i]);
    htd.addFamily(familyDesc);
  }

  HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
  Path path = new Path(DIR + callingMethod);
  HRegion r = HRegion.createHRegion(info, path, conf, htd);
  blockCache = new CacheConfig(conf).getBlockCache();
  return r;
}
项目:hadooparchitecturebook    文件:App.java   
private static boolean createTable(byte[] tableName, byte[] columnFamilyName,
    short regionCount, long regionMaxSize, HBaseAdmin admin)
    throws IOException {

  if (admin.tableExists(tableName)) {
    return false;
  }

  HTableDescriptor tableDescriptor = new HTableDescriptor();
  tableDescriptor.setName(tableName);

  HColumnDescriptor columnDescriptor = new HColumnDescriptor(columnFamilyName);

  columnDescriptor.setCompressionType(Compression.Algorithm.SNAPPY);
  columnDescriptor.setBlocksize(64 * 1024);
  columnDescriptor.setBloomFilterType(BloomType.ROW);
  columnDescriptor.setMaxVersions(10);
  tableDescriptor.addFamily(columnDescriptor);

  tableDescriptor.setMaxFileSize(regionMaxSize);
  tableDescriptor.setValue(tableDescriptor.SPLIT_POLICY,
      ConstantSizeRegionSplitPolicy.class.getName());

  tableDescriptor.setDeferredLogFlush(true);

  regionCount = (short) Math.abs(regionCount);

  int regionRange = Short.MAX_VALUE / regionCount;
  int counter = 0;

  byte[][] splitKeys = new byte[regionCount][];
  for (byte[] splitKey : splitKeys) {
    counter = counter + regionRange;
    String key = StringUtils.leftPad(Integer.toString(counter), 5, '0');
    splitKey = Bytes.toBytes(key);
    System.out.println(" - Split: " + splitKey);
  }
  return true;
}
项目:LCIndex-HBase-0.94.16    文件:HColumnDescriptor.java   
/**
 * @return bloom filter type used for new StoreFiles in ColumnFamily
 */
public StoreFile.BloomType getBloomFilterType() {
  String n = getValue(BLOOMFILTER);
  if (n == null) {
    n = DEFAULT_BLOOMFILTER;
  }
  return StoreFile.BloomType.valueOf(n.toUpperCase());
}
项目:LCIndex-HBase-0.94.16    文件:TestLoadIncrementalHFiles.java   
/**
 * Test case that creates some regions and loads
 * HFiles that fit snugly inside those regions
 */
@Test
public void testSimpleLoad() throws Exception {
  runTest("testSimpleLoad", BloomType.NONE,
      new byte[][][] {
        new byte[][]{ Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") },
        new byte[][]{ Bytes.toBytes("ddd"), Bytes.toBytes("ooo") },
  });
}
项目:LCIndex-HBase-0.94.16    文件:TestLoadIncrementalHFiles.java   
/**
 * Test case that creates some regions and loads
 * HFiles that cross the boundaries of those regions
 */
@Test
public void testRegionCrossingLoad() throws Exception {
  runTest("testRegionCrossingLoad", BloomType.NONE,
      new byte[][][] {
        new byte[][]{ Bytes.toBytes("aaaa"), Bytes.toBytes("eee") },
        new byte[][]{ Bytes.toBytes("fff"), Bytes.toBytes("zzz") },
  });
}
项目:LCIndex-HBase-0.94.16    文件:TestLoadIncrementalHFiles.java   
/**
 * Test loading into a column family that has a ROW bloom filter.
 */
@Test
public void testRegionCrossingRowBloom() throws Exception {
  runTest("testRegionCrossingLoadRowBloom", BloomType.ROW,
      new byte[][][] {
        new byte[][]{ Bytes.toBytes("aaaa"), Bytes.toBytes("eee") },
        new byte[][]{ Bytes.toBytes("fff"), Bytes.toBytes("zzz") },
  });
}
项目:LCIndex-HBase-0.94.16    文件:TestLoadIncrementalHFiles.java   
/**
 * Test loading into a column family that has a ROWCOL bloom filter.
 */
@Test
public void testRegionCrossingRowColBloom() throws Exception {
  runTest("testRegionCrossingLoadRowColBloom", BloomType.ROWCOL,
      new byte[][][] {
        new byte[][]{ Bytes.toBytes("aaaa"), Bytes.toBytes("eee") },
        new byte[][]{ Bytes.toBytes("fff"), Bytes.toBytes("zzz") },
  });
}
项目:LCIndex-HBase-0.94.16    文件:TestLoadIncrementalHFiles.java   
private void runTest(String testName, BloomType bloomType, 
        byte[][][] hfileRanges) throws Exception {
  Path dir = util.getDataTestDir(testName);
  FileSystem fs = util.getTestFileSystem();
  dir = dir.makeQualified(fs);
  Path familyDir = new Path(dir, Bytes.toString(FAMILY));

  int hfileIdx = 0;
  for (byte[][] range : hfileRanges) {
    byte[] from = range[0];
    byte[] to = range[1];
    createHFile(util.getConfiguration(), fs, new Path(familyDir, "hfile_"
        + hfileIdx++), FAMILY, QUALIFIER, from, to, 1000);
  }
  int expectedRows = hfileIdx * 1000;

  final byte[] TABLE = Bytes.toBytes("mytable_"+testName);

  HBaseAdmin admin = new HBaseAdmin(util.getConfiguration());
  HTableDescriptor htd = new HTableDescriptor(TABLE);
  HColumnDescriptor familyDesc = new HColumnDescriptor(FAMILY);
  familyDesc.setBloomFilterType(bloomType);
  htd.addFamily(familyDesc);
  admin.createTable(htd, SPLIT_KEYS);

  HTable table = new HTable(util.getConfiguration(), TABLE);
  util.waitTableAvailable(TABLE, 30000);

  LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration());
  loader.doBulkLoad(dir, table);

  assertEquals(expectedRows, util.countRows(table));
}
项目:LCIndex-HBase-0.94.16    文件:TestHRegion.java   
/**
  * Testcase to cover bug-fix for HBASE-2823
  * Ensures correct delete when issuing delete row
  * on columns with bloom filter set to row+col (BloomType.ROWCOL)
 */
public void testDeleteRowWithBloomFilter() throws IOException {
  byte [] tableName = Bytes.toBytes("testDeleteRowWithBloomFilter");
  byte [] familyName = Bytes.toBytes("familyName");

  // Create Table
  HColumnDescriptor hcd = new HColumnDescriptor(familyName)
      .setMaxVersions(Integer.MAX_VALUE)
      .setBloomFilterType(BloomType.ROWCOL);

  HTableDescriptor htd = new HTableDescriptor(tableName);
  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
  Path path = new Path(DIR + "TestDeleteRowWithBloomFilter");
  this.region = HRegion.createHRegion(info, path, conf, htd);
  try {
    // Insert some data
    byte row[] = Bytes.toBytes("row1");
    byte col[] = Bytes.toBytes("col1");

    Put put = new Put(row);
    put.add(familyName, col, 1, Bytes.toBytes("SomeRandomValue"));
    region.put(put);
    region.flushcache();

    Delete del = new Delete(row);
    region.delete(del, null, true);
    region.flushcache();

    // Get remaining rows (should have none)
    Get get = new Get(row);
    get.addColumn(familyName, col);

    KeyValue[] keyValues = region.get(get, null).raw();
    assertTrue(keyValues.length == 0);
  } finally {
    HRegion.closeHRegion(this.region);
    this.region = null;
  }
}
项目:LCIndex-HBase-0.94.16    文件:EncodedSeekPerformanceTest.java   
private List<KeyValue> prepareListOfTestSeeks(Path path) throws IOException {
  List<KeyValue> allKeyValues = new ArrayList<KeyValue>();

  // read all of the key values
  StoreFile storeFile = new StoreFile(testingUtility.getTestFileSystem(),
      path, configuration, cacheConf, BloomType.NONE,
      NoOpDataBlockEncoder.INSTANCE);

  StoreFile.Reader reader = storeFile.createReader();
  StoreFileScanner scanner = reader.getStoreFileScanner(true, false);
  KeyValue current;

  scanner.seek(KeyValue.LOWESTKEY);
  while (null != (current = scanner.next())) {
    allKeyValues.add(current);
  }

  storeFile.closeReader(cacheConf.shouldEvictOnClose());

  // pick seeks by random
  List<KeyValue> seeks = new ArrayList<KeyValue>();
  for (int i = 0; i < numberOfSeeks; ++i) {
    KeyValue keyValue = allKeyValues.get(
        randomizer.nextInt(allKeyValues.size()));
    seeks.add(keyValue);
  }

  clearBlockCache();

  return seeks;
}
项目:LCIndex-HBase-0.94.16    文件:TestBlocksRead.java   
private KeyValue[] getData(String family, String row, List<String> columns,
    int expBlocksRowCol, int expBlocksRow, int expBlocksNone)
    throws IOException {
  int[] expBlocks = new int[] { expBlocksRowCol, expBlocksRow, expBlocksNone };
  KeyValue[] kvs = null;

  for (int i = 0; i < BLOOM_TYPE.length; i++) {
    BloomType bloomType = BLOOM_TYPE[i];
    byte[] cf = Bytes.toBytes(family + "_" + bloomType);
    long blocksStart = getBlkAccessCount(cf);
    Get get = new Get(Bytes.toBytes(row));

    for (String column : columns) {
      get.addColumn(cf, Bytes.toBytes(column));
    }

    kvs = region.get(get, null).raw();
    long blocksEnd = getBlkAccessCount(cf);
    if (expBlocks[i] != -1) {
      assertEquals("Blocks Read Check for Bloom: " + bloomType, expBlocks[i],
          blocksEnd - blocksStart);
    }
    System.out.println("Blocks Read for Bloom: " + bloomType + " = "
        + (blocksEnd - blocksStart) + "Expected = " + expBlocks[i]);
  }
  return kvs;
}
项目:LCIndex-HBase-0.94.16    文件:TestCompoundBloomFilter.java   
@Test
public void testCompoundBloomFilter() throws IOException {
  conf.setBoolean(BloomFilterFactory.IO_STOREFILE_BLOOM_ENABLED, true);
  for (int t = 0; t < NUM_TESTS; ++t) {
    conf.setFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE,
        (float) TARGET_ERROR_RATES[t]);

    testIdMsg = "in test #" + t + ":";
    Random generationRand = new Random(GENERATION_SEED);
    List<KeyValue> kvs = createSortedKeyValues(generationRand, NUM_KV[t]);
    BloomType bt = BLOOM_TYPES[t];
    Path sfPath = writeStoreFile(t, bt, kvs);
    readStoreFile(t, bt, kvs, sfPath);
  }
}
项目:LCIndex-HBase-0.94.16    文件:TestCacheOnWriteInSchema.java   
@Before
public void setUp() throws IOException {
  conf = TEST_UTIL.getConfiguration();
  conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MAX_FORMAT_VERSION);
  conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, false);
  conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, false);
  conf.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, false);

  fs = HFileSystem.get(conf);

  // Create the schema
  HColumnDescriptor hcd = new HColumnDescriptor(family);
  hcd.setBloomFilterType(BloomType.ROWCOL);
  cowType.modifyFamilySchema(hcd);
  HTableDescriptor htd = new HTableDescriptor(table);
  htd.addFamily(hcd);

  // Create a store based on the schema
  Path basedir = new Path(DIR);
  Path logdir = new Path(DIR+"/logs");
  Path oldLogDir = new Path(basedir, HConstants.HREGION_OLDLOGDIR_NAME);
  fs.delete(logdir, true);
  HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
  HLog hlog = new HLog(fs, logdir, oldLogDir, conf);
  HRegion region = new HRegion(basedir, hlog, fs, conf, info, htd, null);
  store = new Store(basedir, region, hcd, fs, conf);
}
项目:LCIndex-HBase-0.94.16    文件:TestStoreFile.java   
/**
 * Write a file and then assert that we can read from top and bottom halves
 * using two HalfMapFiles.
 * @throws Exception
 */
public void testBasicHalfMapFile() throws Exception {
  // Make up a directory hierarchy that has a regiondir ("7e0102") and familyname.
  Path outputDir = new Path(new Path(this.testDir, "7e0102"),
      "familyname");
  StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf,
      this.fs, 2 * 1024)
          .withOutputDir(outputDir)
          .build();
  writeStoreFile(writer);
  checkHalfHFile(new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
      StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE));
}
项目:LCIndex-HBase-0.94.16    文件:TestStoreFile.java   
/**
 * Test that our mechanism of writing store files in one region to reference
 * store files in other regions works.
 * @throws IOException
 */
public void testReference()
throws IOException {
  // Make up a directory hierarchy that has a regiondir ("7e0102") and familyname.
  Path storedir = new Path(new Path(this.testDir, "7e0102"), "familyname");
  // Make a store file and write data to it.
  StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf,
      this.fs, 8 * 1024)
          .withOutputDir(storedir)
          .build();
  writeStoreFile(writer);
  StoreFile hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
      StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
  StoreFile.Reader reader = hsf.createReader();
  // Split on a row, not in middle of row.  Midkey returned by reader
  // may be in middle of row.  Create new one with empty column and
  // timestamp.
  KeyValue kv = KeyValue.createKeyValueFromKey(reader.midkey());
  byte [] midRow = kv.getRow();
  kv = KeyValue.createKeyValueFromKey(reader.getLastKey());
  byte [] finalRow = kv.getRow();
  // Make a reference
  Path refPath = StoreFile.split(fs, storedir, hsf, midRow, Range.top);
  StoreFile refHsf = new StoreFile(this.fs, refPath, conf, cacheConf,
      StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
  // Now confirm that I can read from the reference and that it only gets
  // keys from top half of the file.
  HFileScanner s = refHsf.createReader().getScanner(false, false);
  for(boolean first = true; (!s.isSeeked() && s.seekTo()) || s.next();) {
    ByteBuffer bb = s.getKey();
    kv = KeyValue.createKeyValueFromKey(bb);
    if (first) {
      assertTrue(Bytes.equals(kv.getRow(), midRow));
      first = false;
    }
  }
  assertTrue(Bytes.equals(kv.getRow(), finalRow));
}
项目:LCIndex-HBase-0.94.16    文件:TestStoreFile.java   
public void testHFileLink() throws IOException {
  final String columnFamily = "f";

  Configuration testConf = new Configuration(this.conf);
  FSUtils.setRootDir(testConf, this.testDir);

  HRegionInfo hri = new HRegionInfo(Bytes.toBytes("table-link"));
  Path storedir = new Path(new Path(this.testDir,
    new Path(hri.getTableNameAsString(), hri.getEncodedName())), columnFamily);

  // Make a store file and write data to it.
  StoreFile.Writer writer = new StoreFile.WriterBuilder(testConf, cacheConf,
       this.fs, 8 * 1024)
          .withOutputDir(storedir)
          .build();
  Path storeFilePath = writer.getPath();
  writeStoreFile(writer);
  writer.close();

  Path dstPath = new Path(this.testDir, new Path("test-region", columnFamily));
  HFileLink.create(testConf, this.fs, dstPath, hri, storeFilePath.getName());
  Path linkFilePath = new Path(dstPath,
                HFileLink.createHFileLinkName(hri, storeFilePath.getName()));

  // Try to open store file from link
  StoreFile hsf = new StoreFile(this.fs, linkFilePath, testConf, cacheConf,
      StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
  assertTrue(hsf.isLink());

  // Now confirm that I can read from the link
  int count = 1;
  HFileScanner s = hsf.createReader().getScanner(false, false);
  s.seekTo();
  while (s.next()) {
    count++;
  }
  assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1), count);
}
项目:LCIndex-HBase-0.94.16    文件:TestStoreFile.java   
/**
 * Check if data block encoding information is saved correctly in HFile's
 * file info.
 */
public void testDataBlockEncodingMetaData() throws IOException {
  // Make up a directory hierarchy that has a regiondir ("7e0102") and familyname.
  Path dir = new Path(new Path(this.testDir, "7e0102"), "familyname");
  Path path = new Path(dir, "1234567890");

  DataBlockEncoding dataBlockEncoderAlgo =
      DataBlockEncoding.FAST_DIFF;
  HFileDataBlockEncoder dataBlockEncoder =
      new HFileDataBlockEncoderImpl(
          dataBlockEncoderAlgo,
          dataBlockEncoderAlgo);
  cacheConf = new CacheConfig(conf);
  StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs,
      HFile.DEFAULT_BLOCKSIZE)
          .withFilePath(path)
          .withDataBlockEncoder(dataBlockEncoder)
          .withMaxKeyCount(2000)
          .withChecksumType(CKTYPE)
          .withBytesPerChecksum(CKBYTES)
          .build();
  writer.close();

  StoreFile storeFile = new StoreFile(fs, writer.getPath(), conf,
      cacheConf, BloomType.NONE, dataBlockEncoder);
  StoreFile.Reader reader = storeFile.createReader();

  Map<byte[], byte[]> fileInfo = reader.loadFileInfo();
  byte[] value = fileInfo.get(HFileDataBlockEncoder.DATA_BLOCK_ENCODING);

  assertEquals(dataBlockEncoderAlgo.getNameInBytes(), value);
}
项目:LCIndex-HBase-0.94.16    文件:ReadHFile.java   
private void readHFile(Configuration hadoopConf, Configuration hbaseConf, String fsStr,
    String fileName) throws IOException {
  CacheConfig tmpCacheConfig = new CacheConfig(hbaseConf);
  FileSystem fs = null;
  if (fsStr.equalsIgnoreCase("local")) {
    fs = LocalFileSystem.getLocal(hadoopConf);
  } else {
    fs = FileSystem.get(hadoopConf);
  }
  Path path = new Path(fileName);
  if (!fs.exists(path)) {
    System.out.println("WinterTestAID file not exists: " + path);
  } else {
    System.out.println("WinterTestAID reading lccindex hfile: " + path);
    StoreFile sf = new StoreFile(fs, path, hbaseConf, tmpCacheConfig, BloomType.NONE, null);
    Reader reader = sf.createReader();
    System.out.println("WinterTestAID store file attr: " + sf.mWinterGetAttribute());
    StoreFileScanner sss = reader.getStoreFileScanner(false, false);
    sss.seek(KeyValue.LOWESTKEY);
    System.out.println("WinterTestAID store peek value: "
        + LCCIndexConstant.mWinterToPrint(sss.peek()));
    KeyValue kv;
    int counter = 0, printInterval = 1, totalSize = 0;
    while ((kv = sss.next()) != null) {
      if (counter == 0) {
        counter = printInterval;
        System.out
            .println("WinterTestAID hfile keyvalue: " + LCCIndexConstant.mWinterToPrint(kv));
      }
      --counter;
      ++totalSize;
    }
    sss.close();
    reader.close(false);
    System.out.println("WinterTestAID total size: " + totalSize);
    System.out.println("WinterTestAID winter inner mWinterGetScannersForStoreFiles start: "
        + LCCIndexConstant.convertUnknownBytes(reader.getFirstKey()));
  }
}
项目:LCIndex-HBase-0.94.16    文件:WinterTestAID.java   
public static void readHFile(Configuration hbaseConf, Path hfilePath) throws IOException {
  CacheConfig tmpCacheConfig = new CacheConfig(hbaseConf);
  FileSystem hdfs = getHDFS();
  if (!hdfs.exists(hfilePath)) {
    System.out.println("WinterTestAID file not exists: " + hfilePath);
  } else {
    System.out.println("WinterTestAID reading lccindex hfile: " + hfilePath);
    StoreFile sf = new StoreFile(hdfs, hfilePath, hbaseConf, tmpCacheConfig, BloomType.NONE, null);
    Reader reader = sf.createReader();
    System.out.println("WinterTestAID store file attr: " + sf.mWinterGetAttribute());
    StoreFileScanner sss = reader.getStoreFileScanner(false, false);
    sss.seek(KeyValue.LOWESTKEY);
    System.out.println("WinterTestAID store peek value: "
        + LCCIndexConstant.mWinterToPrint(sss.peek()));
    KeyValue kv;
    int counter = 0, printInterval = 1, totalSize = 0;
    while ((kv = sss.next()) != null) {
      if (counter == 0) {
        counter = printInterval;
        System.out
            .println("WinterTestAID hfile keyvalue: " + LCCIndexConstant.mWinterToPrint(kv));
      }
      --counter;
      ++totalSize;
    }
    sss.close();
    reader.close(false);
    System.out.println("WinterTestAID total size: " + totalSize);
    System.out.println("WinterTestAID winter inner mWinterGetScannersForStoreFiles start: "
        + LCCIndexConstant.convertUnknownBytes(reader.getFirstKey()));
  }
}
项目:IRIndex    文件:HColumnDescriptor.java   
/**
 * @return bloom filter type used for new StoreFiles in ColumnFamily
 */
public StoreFile.BloomType getBloomFilterType() {
  String n = getValue(BLOOMFILTER);
  if (n == null) {
    n = DEFAULT_BLOOMFILTER;
  }
  return StoreFile.BloomType.valueOf(n.toUpperCase());
}