Java 类org.apache.hadoop.hbase.io.hfile.CacheConfig 实例源码

项目:ditb    文件:BloomFilterFactory.java   
/**
 * Creates a new Delete Family Bloom filter at the time of
 * {@link org.apache.hadoop.hbase.regionserver.StoreFile} writing.
 * @param conf
 * @param cacheConf
 * @param maxKeys an estimate of the number of keys we expect to insert.
 *        Irrelevant if compound Bloom filters are enabled.
 * @param writer the HFile writer
 * @return the new Bloom filter, or null in case Bloom filters are disabled
 *         or when failed to create one.
 */
public static BloomFilterWriter createDeleteBloomAtWrite(Configuration conf,
    CacheConfig cacheConf, int maxKeys, HFile.Writer writer) {
  if (!isDeleteFamilyBloomEnabled(conf)) {
    LOG.info("Delete Bloom filters are disabled by configuration for "
        + writer.getPath()
        + (conf == null ? " (configuration is null)" : ""));
    return null;
  }

  float err = getErrorRate(conf);

  int maxFold = getMaxFold(conf);
  // In case of compound Bloom filters we ignore the maxKeys hint.
  CompoundBloomFilterWriter bloomWriter = new CompoundBloomFilterWriter(getBloomBlockSize(conf),
      err, Hash.getHashType(conf), maxFold, cacheConf.shouldCacheBloomsOnWrite(),
      KeyValue.RAW_COMPARATOR);
  writer.addInlineBlockWriter(bloomWriter);
  return bloomWriter;
}
项目:ditb    文件:LMDIndexDirectStoreFileScanner.java   
private List<byte[]> initRowKeyList(FileSystem fileSystem, CacheConfig cacheConf,
    Configuration conf, TreeMap<byte[], TreeSet<byte[]>> indexFamilyMap,
    ScanRange.ScanRangeList rangeList) throws IOException {
  // init
  StoreFile bucketStoreFile =
      new StoreFile(fileSystem, LMDIndexParameters.getTmpBucketFilePath(file.getPath()), conf,
          cacheConf, BloomType.NONE);
  StoreFile secondaryStoreFile =
      new StoreFile(fileSystem, LMDIndexParameters.getTmpSecondaryFilePath(file.getPath()), conf,
          cacheConf, BloomType.NONE);
  StoreFileScanner bucketScanner = getStoreFileScanner(bucketStoreFile);
  StoreFileScanner secondaryScanner = getStoreFileScanner(secondaryStoreFile);
  // get hit buckets
  MDRange[] ranges = getRanges(indexFamilyMap, rangeList);
  List<LMDBucket> bucketList = getBucketRanges(bucketScanner, ranges);
  // scan rowkeys based on the buckets
  List<byte[]> rowkeyList = getRawRowkeyList(secondaryScanner, bucketList, ranges);
  // deinit
  bucketScanner.close();
  bucketStoreFile.closeReader(true);
  secondaryScanner.close();
  secondaryStoreFile.closeReader(true);
  return rowkeyList;
}
项目:ditb    文件:TestAccessController.java   
private void createHFile(Path path,
    byte[] family, byte[] qualifier,
    byte[] startKey, byte[] endKey, int numRows) throws IOException {

  HFile.Writer writer = null;
  long now = System.currentTimeMillis();
  try {
    HFileContext context = new HFileContextBuilder().build();
    writer = HFile.getWriterFactory(conf, new CacheConfig(conf))
        .withPath(fs, path)
        .withFileContext(context)
        .create();
    // subtract 2 since numRows doesn't include boundary keys
    for (byte[] key : Bytes.iterateOnSplits(startKey, endKey, true, numRows-2)) {
      KeyValue kv = new KeyValue(key, family, qualifier, now, key);
      writer.append(kv);
    }
  } finally {
    if(writer != null)
      writer.close();
  }
}
项目:ditb    文件:TestRegionObserverInterface.java   
private static void createHFile(
    Configuration conf,
    FileSystem fs, Path path,
    byte[] family, byte[] qualifier) throws IOException {
  HFileContext context = new HFileContextBuilder().build();
  HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf))
      .withPath(fs, path)
      .withFileContext(context)
      .create();
  long now = System.currentTimeMillis();
  try {
    for (int i =1;i<=9;i++) {
      KeyValue kv = new KeyValue(Bytes.toBytes(i+""), family, qualifier, now, Bytes.toBytes(i+""));
      writer.append(kv);
    }
  } finally {
    writer.close();
  }
}
项目:ditb    文件:TestHRegionServerBulkLoad.java   
/**
 * Create an HFile with the given number of rows with a specified value.
 */
public static void createHFile(FileSystem fs, Path path, byte[] family,
    byte[] qualifier, byte[] value, int numRows) throws IOException {
  HFileContext context = new HFileContextBuilder().withBlockSize(BLOCKSIZE)
                          .withCompression(COMPRESSION)
                          .build();
  HFile.Writer writer = HFile
      .getWriterFactory(conf, new CacheConfig(conf))
      .withPath(fs, path)
      .withFileContext(context)
      .create();
  long now = System.currentTimeMillis();
  try {
    // subtract 2 since iterateOnSplits doesn't include boundary keys
    for (int i = 0; i < numRows; i++) {
      KeyValue kv = new KeyValue(rowkey(i), family, qualifier, now, value);
      writer.append(kv);
    }
    writer.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY, Bytes.toBytes(now));
  } finally {
    writer.close();
  }
}
项目:ditb    文件:DataBlockEncodingTool.java   
/**
 * Test a data block encoder on the given HFile. Output results to console.
 * @param kvLimit The limit of KeyValue which will be analyzed.
 * @param hfilePath an HFile path on the file system.
 * @param compressionName Compression algorithm used for comparison.
 * @param doBenchmark Run performance benchmarks.
 * @param doVerify Verify correctness.
 * @throws IOException When pathName is incorrect.
 */
public static void testCodecs(Configuration conf, int kvLimit,
    String hfilePath, String compressionName, boolean doBenchmark,
    boolean doVerify) throws IOException {
  // create environment
  Path path = new Path(hfilePath);
  CacheConfig cacheConf = new CacheConfig(conf);
  FileSystem fs = FileSystem.get(conf);
  StoreFile hsf = new StoreFile(fs, path, conf, cacheConf,
    BloomType.NONE);

  StoreFile.Reader reader = hsf.createReader();
  reader.loadFileInfo();
  KeyValueScanner scanner = reader.getStoreFileScanner(true, true);

  // run the utilities
  DataBlockEncodingTool comp = new DataBlockEncodingTool(compressionName);
  int majorVersion = reader.getHFileVersion();
  comp.useHBaseChecksum = majorVersion > 2
      || (majorVersion == 2 && reader.getHFileMinorVersion() >= HFileReaderV2.MINOR_VERSION_WITH_CHECKSUM);
  comp.checkStatistics(scanner, kvLimit);
  if (doVerify) {
    comp.verifyCodecs(scanner, kvLimit);
  }
  if (doBenchmark) {
    comp.benchmarkCodecs();
  }
  comp.displayStatistics();

  // cleanup
  scanner.close();
  reader.close(cacheConf.shouldEvictOnClose());
}
项目:ditb    文件:TestBlocksRead.java   
/**
 * Callers must afterward call {@link HRegion#closeHRegion(HRegion)}
 * @param tableName
 * @param callingMethod
 * @param conf
 * @param family
 * @throws IOException
 * @return created and initialized region.
 */
private HRegion initHRegion(byte[] tableName, String callingMethod,
    HBaseConfiguration conf, String family) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  HColumnDescriptor familyDesc;
  for (int i = 0; i < BLOOM_TYPE.length; i++) {
    BloomType bloomType = BLOOM_TYPE[i];
    familyDesc = new HColumnDescriptor(family + "_" + bloomType)
        .setBlocksize(1)
        .setBloomFilterType(BLOOM_TYPE[i]);
    htd.addFamily(familyDesc);
  }

  HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
  Path path = new Path(DIR + callingMethod);
  HRegion r = HRegion.createHRegion(info, path, conf, htd);
  blockCache = new CacheConfig(conf).getBlockCache();
  return r;
}
项目:ditb    文件:TestStore.java   
/**
 * Verify that compression and data block encoding are respected by the
 * Store.createWriterInTmp() method, used on store flush.
 */
@Test
public void testCreateWriter() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  FileSystem fs = FileSystem.get(conf);

  HColumnDescriptor hcd = new HColumnDescriptor(family);
  hcd.setCompressionType(Compression.Algorithm.GZ);
  hcd.setDataBlockEncoding(DataBlockEncoding.DIFF);
  init(name.getMethodName(), conf, hcd);

  // Test createWriterInTmp()
  StoreFile.Writer writer = store.createWriterInTmp(4, hcd.getCompression(), false, true, false);
  Path path = writer.getPath();
  writer.append(new KeyValue(row, family, qf1, Bytes.toBytes(1)));
  writer.append(new KeyValue(row, family, qf2, Bytes.toBytes(2)));
  writer.append(new KeyValue(row2, family, qf1, Bytes.toBytes(3)));
  writer.append(new KeyValue(row2, family, qf2, Bytes.toBytes(4)));
  writer.close();

  // Verify that compression and encoding settings are respected
  HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf), conf);
  Assert.assertEquals(hcd.getCompressionType(), reader.getCompressionAlgorithm());
  Assert.assertEquals(hcd.getDataBlockEncoding(), reader.getDataBlockEncoding());
  reader.close();
}
项目:ditb    文件:TestStore.java   
private void addStoreFile() throws IOException {
  StoreFile f = this.store.getStorefiles().iterator().next();
  Path storedir = f.getPath().getParent();
  long seqid = this.store.getMaxSequenceId();
  Configuration c = TEST_UTIL.getConfiguration();
  FileSystem fs = FileSystem.get(c);
  HFileContext fileContext = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL).build();
  StoreFile.Writer w = new StoreFile.WriterBuilder(c, new CacheConfig(c),
      fs)
          .withOutputDir(storedir)
          .withFileContext(fileContext)
          .build();
  w.appendMetadata(seqid + 1, false);
  w.close();
  LOG.info("Added store file:" + w.getPath());
}
项目:ditb    文件:TestEncryptionRandomKeying.java   
private static byte[] extractHFileKey(Path path) throws Exception {
  HFile.Reader reader = HFile.createReader(TEST_UTIL.getTestFileSystem(), path,
    new CacheConfig(conf), conf);
  try {
    reader.loadFileInfo();
    Encryption.Context cryptoContext = reader.getFileContext().getEncryptionContext();
    assertNotNull("Reader has a null crypto context", cryptoContext);
    Key key = cryptoContext.getKey();
    if (key == null) {
      return null;
    }
    return key.getEncoded();
  } finally {
    reader.close();
  }
}
项目:pbase    文件:TestHRegionServerBulkLoad.java   
/**
 * Create an HFile with the given number of rows with a specified value.
 */
public static void createHFile(FileSystem fs, Path path, byte[] family,
    byte[] qualifier, byte[] value, int numRows) throws IOException {
  HFileContext context = new HFileContextBuilder().withBlockSize(BLOCKSIZE)
                          .withCompression(COMPRESSION)
                          .build();
  HFile.Writer writer = HFile
      .getWriterFactory(conf, new CacheConfig(conf))
      .withPath(fs, path)
      .withFileContext(context)
      .create();
  long now = System.currentTimeMillis();
  try {
    // subtract 2 since iterateOnSplits doesn't include boundary keys
    for (int i = 0; i < numRows; i++) {
      KeyValue kv = new KeyValue(rowkey(i), family, qualifier, now, value);
      writer.append(kv);
    }
    writer.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY, Bytes.toBytes(now));
  } finally {
    writer.close();
  }
}
项目:pbase    文件:TestEncryptionRandomKeying.java   
private static byte[] extractHFileKey(Path path) throws Exception {
  HFile.Reader reader = HFile.createReader(TEST_UTIL.getTestFileSystem(), path,
    new CacheConfig(conf), conf);
  try {
    reader.loadFileInfo();
    Encryption.Context cryptoContext = reader.getFileContext().getEncryptionContext();
    assertNotNull("Reader has a null crypto context", cryptoContext);
    Key key = cryptoContext.getKey();
    if (key == null) {
      return null;
    }
    return key.getEncoded();
  } finally {
    reader.close();
  }
}
项目:LCIndex-HBase-0.94.16    文件:TestLoadIncrementalHFiles.java   
/**
 * Create an HFile with the given number of rows between a given
 * start key and end key.
 * TODO put me in an HFileTestUtil or something?
 */
static void createHFile(
    Configuration conf,
    FileSystem fs, Path path,
    byte[] family, byte[] qualifier,
    byte[] startKey, byte[] endKey, int numRows) throws IOException
{
  HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf))
      .withPath(fs, path)
      .withBlockSize(BLOCKSIZE)
      .withCompression(COMPRESSION)
      .withComparator(KeyValue.KEY_COMPARATOR)
      .create();
  long now = System.currentTimeMillis();
  try {
    // subtract 2 since iterateOnSplits doesn't include boundary keys
    for (byte[] key : Bytes.iterateOnSplits(startKey, endKey, numRows-2)) {
      KeyValue kv = new KeyValue(key, family, qualifier, now, key);
      writer.append(kv);
    }
  } finally {
    writer.close();
  }
}
项目:HIndex    文件:TestEncryptionRandomKeying.java   
private static byte[] extractHFileKey(Path path) throws Exception {
  HFile.Reader reader = HFile.createReader(TEST_UTIL.getTestFileSystem(), path,
    new CacheConfig(conf), conf);
  try {
    reader.loadFileInfo();
    Encryption.Context cryptoContext = reader.getFileContext().getEncryptionContext();
    assertNotNull("Reader has a null crypto context", cryptoContext);
    Key key = cryptoContext.getKey();
    if (key == null) {
      return null;
    }
    return key.getEncoded();
  } finally {
    reader.close();
  }
}
项目:HIndex    文件:TestRegionObserverInterface.java   
private static void createHFile(
    Configuration conf,
    FileSystem fs, Path path,
    byte[] family, byte[] qualifier) throws IOException {
  HFileContext context = new HFileContextBuilder().build();
  HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf))
      .withPath(fs, path)
      .withFileContext(context)
      .create();
  long now = System.currentTimeMillis();
  try {
    for (int i =1;i<=9;i++) {
      KeyValue kv = new KeyValue(Bytes.toBytes(i+""), family, qualifier, now, Bytes.toBytes(i+""));
      writer.append(kv);
    }
  } finally {
    writer.close();
  }
}
项目:HIndex    文件:HFileTestUtil.java   
/**
 * Create an HFile with the given number of rows between a given
 * start key and end key.
 */
public static void createHFile(
    Configuration configuration,
    FileSystem fs, Path path,
    byte[] family, byte[] qualifier,
    byte[] startKey, byte[] endKey, int numRows) throws IOException
{
  HFileContext meta = new HFileContextBuilder().build();
  HFile.Writer writer = HFile.getWriterFactory(configuration, new CacheConfig(configuration))
      .withPath(fs, path)
      .withFileContext(meta)
      .create();
  long now = System.currentTimeMillis();
  try {
    // subtract 2 since iterateOnSplits doesn't include boundary keys
    for (byte[] key : Bytes.iterateOnSplits(startKey, endKey, numRows-2)) {
      KeyValue kv = new KeyValue(key, family, qualifier, now, key);
      writer.append(kv);
    }
  } finally {
    writer.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY,
        Bytes.toBytes(System.currentTimeMillis()));
    writer.close();
  }
}
项目:Megh    文件:HadoopFilePerformanceTest.java   
private void readHFileSeq(Path file, Compression.Algorithm compression) throws Exception
{

  CacheConfig cacheConf = new CacheConfig(conf);
  HFile.Reader reader = HFile.createReader(hdfs, file, cacheConf, conf);
  HFileScanner scanner = reader.getScanner(true, true, false);

  scanner.seekTo();

  @SuppressWarnings("unused")
  KeyValue kv = null;
  while (scanner.next()) {
    kv = scanner.getKeyValue();
    //logger.debug("key: {} value: {}", new String (kv.getKey()), new String (kv.getValue()));
  }

}
项目:Megh    文件:HadoopFilePerformanceTest.java   
private void readHFileSeqId(Path file, Compression.Algorithm compression) throws Exception
{
  CacheConfig cacheConf = new CacheConfig(conf);
  HFile.Reader reader = HFile.createReader(hdfs, file, cacheConf, conf);
  HFileScanner scanner = reader.getScanner(true, true, false);

  @SuppressWarnings("unused")
  KeyValue kv = null;
  scanner.seekTo();

  for (int i = 0; i < testSize; i++) {
    scanner.seekTo(getKey(i).getBytes());
    kv = scanner.getKeyValue();
    //logger.debug("key: {} value: {}", new String (kv.getKey()), new String (kv.getValue()));
  }
}
项目:terrapin    文件:HFileOutputFormat.java   
public RecordWriter<BytesWritable, BytesWritable> getRecordWriter(
        TaskAttemptContext context) throws IOException {
  // Get the path of the temporary output file
  final Path outputPath = FileOutputFormat.getOutputPath(context);
  final Path outputDir = new FileOutputCommitter(outputPath, context).getWorkPath();
  final Configuration conf = context.getConfiguration();
  final FileSystem fs = outputDir.getFileSystem(conf);

  int blockSize = conf.getInt(Constants.HFILE_BLOCKSIZE, 16384);
  // Default to snappy.
  Compression.Algorithm compressionAlgorithm = getAlgorithm(
      conf.get(Constants.HFILE_COMPRESSION));
  final StoreFile.Writer writer =
      new StoreFile.WriterBuilder(conf, new CacheConfig(conf), fs, blockSize)
          .withFilePath(hfilePath(outputPath, context.getTaskAttemptID().getTaskID().getId()))
          .withCompression(compressionAlgorithm)
          .build();
  return new HFileRecordWriter(writer);
}
项目:HIndex    文件:TestIndexHalfStoreFileReader.java   
private void doTestOfScanAndReseek(Path p, FileSystem fs, Reference bottom,
    CacheConfig cacheConf, Configuration conf) throws IOException {
  final IndexHalfStoreFileReader halfreader =
      new IndexHalfStoreFileReader(fs, p, cacheConf, bottom, conf);
  halfreader.loadFileInfo();
  final HFileScanner scanner = halfreader.getScanner(false, false);
  KeyValue getseekTorowKey3 = getSeekToRowKey();
  scanner.seekTo(getseekTorowKey3.getBuffer(), 8, 17);
  boolean next = scanner.next();
  KeyValue keyValue = null;
  if (next) {
    keyValue = scanner.getKeyValue();
  }
  byte[] expectedRow = getExpected();
  byte[] actualRow = keyValue.getRow();
  Assert.assertArrayEquals(expectedRow, actualRow);
  halfreader.close(true);
}
项目:pbase    文件:BloomFilterFactory.java   
/**
 * Creates a new Delete Family Bloom filter at the time of
 * {@link org.apache.hadoop.hbase.regionserver.StoreFile} writing.
 * @param conf
 * @param cacheConf
 * @param maxKeys an estimate of the number of keys we expect to insert.
 *        Irrelevant if compound Bloom filters are enabled.
 * @param writer the HFile writer
 * @return the new Bloom filter, or null in case Bloom filters are disabled
 *         or when failed to create one.
 */
public static BloomFilterWriter createDeleteBloomAtWrite(Configuration conf,
    CacheConfig cacheConf, int maxKeys, HFile.Writer writer) {
  if (!isDeleteFamilyBloomEnabled(conf)) {
    LOG.info("Delete Bloom filters are disabled by configuration for "
        + writer.getPath()
        + (conf == null ? " (configuration is null)" : ""));
    return null;
  }

  float err = getErrorRate(conf);

  int maxFold = getMaxFold(conf);
  // In case of compound Bloom filters we ignore the maxKeys hint.
  CompoundBloomFilterWriter bloomWriter = new CompoundBloomFilterWriter(getBloomBlockSize(conf),
      err, Hash.getHashType(conf), maxFold, cacheConf.shouldCacheBloomsOnWrite(),
      KeyValue.RAW_COMPARATOR);
  writer.addInlineBlockWriter(bloomWriter);
  return bloomWriter;
}
项目:HIndex    文件:TestIndexHalfStoreFileReader.java   
@Test
public void testIndexHalfStoreFileReaderWithSeekTo() throws Exception {
  HBaseTestingUtility test_util = new HBaseTestingUtility();
  String root_dir = test_util.getDataTestDir("TestIndexHalfStoreFile").toString();
  Path p = new Path(root_dir, "test");
  Configuration conf = test_util.getConfiguration();
  FileSystem fs = FileSystem.get(conf);
  CacheConfig cacheConf = new CacheConfig(conf);
  HFileContext meta = new HFileContextBuilder().withBlockSize(1024).build();

  HFile.Writer w =
      HFile.getWriterFactory(conf, cacheConf).withPath(fs, p).withFileContext(meta)
          .withComparator(KeyValue.COMPARATOR).create();
  String usertableName = "testIndexHalfStore";
  List<KeyValue> items = genSomeKeys(usertableName);
  for (KeyValue kv : items) {
    w.append(kv);
  }
  w.close();
  HFile.Reader r = HFile.createReader(fs, p, cacheConf, conf);
  r.loadFileInfo();
  byte[] midkey = "005".getBytes();
  Reference top = new Reference(midkey, Reference.Range.top);
  doTestOfScanAndReseek(p, fs, top, cacheConf, conf);
  r.close();
}
项目:HIndex    文件:CompressionTest.java   
public static void doSmokeTest(FileSystem fs, Path path, String codec)
throws Exception {
  Configuration conf = HBaseConfiguration.create();
  HFileContext context = new HFileContextBuilder()
                         .withCompression(AbstractHFileWriter.compressionByName(codec)).build();
  HFile.Writer writer = HFile.getWriterFactoryNoCache(conf)
      .withPath(fs, path)
      .withFileContext(context)
      .create();
  writer.append(Bytes.toBytes("testkey"), Bytes.toBytes("testval"));
  writer.appendFileInfo(Bytes.toBytes("infokey"), Bytes.toBytes("infoval"));
  writer.close();

  HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf), conf);
  reader.loadFileInfo();
  byte[] key = reader.getFirstKey();
  boolean rc = Bytes.toString(key).equals("testkey");
  reader.close();

  if (!rc) {
    throw new Exception("Read back incorrect result: " +
                        Bytes.toStringBinary(key));
  }
}
项目:ditb    文件:HFileTest.java   
private void scanHFileOnLocalFS(Path file) throws IOException {
  HColumnDescriptor family = desc.getFamily(Bytes.toBytes("f"));
  CacheConfig cacheConf = new CacheConfig(conf, family);
  HFile.Reader reader = HFile.createReader(LocalFileSystem.getLocal(conf), file, cacheConf, conf);
  HFileScanner scanner = reader.getScanner(false, false, false);
  scanner.seekTo();
  int n = 0;
  do {
    Cell cell = scanner.getKeyValue();
    printKV(cell);
    ++n;
  } while (scanner.next());
}
项目:ditb    文件:CompressionTest.java   
public static void doSmokeTest(FileSystem fs, Path path, String codec)
throws Exception {
  Configuration conf = HBaseConfiguration.create();
  HFileContext context = new HFileContextBuilder()
                         .withCompression(AbstractHFileWriter.compressionByName(codec)).build();
  HFile.Writer writer = HFile.getWriterFactoryNoCache(conf)
      .withPath(fs, path)
      .withFileContext(context)
      .create();
  // Write any-old Cell...
  final byte [] rowKey = Bytes.toBytes("compressiontestkey");
  Cell c = CellUtil.createCell(rowKey, Bytes.toBytes("compressiontestval"));
  writer.append(c);
  writer.appendFileInfo(Bytes.toBytes("compressioninfokey"), Bytes.toBytes("compressioninfoval"));
  writer.close();
  Cell cc = null;
  HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf), conf);
  try {
    reader.loadFileInfo();
    HFileScanner scanner = reader.getScanner(false, true);
    scanner.seekTo(); // position to the start of file
    // Scanner does not do Cells yet. Do below for now till fixed.
    cc = scanner.getKeyValue();
    if (CellComparator.compareRows(c, cc) != 0) {
      throw new Exception("Read back incorrect result: " + c.toString() + " vs " + cc.toString());
    }
  } finally {
    reader.close();
  }
}
项目:ditb    文件:BloomFilterFactory.java   
/**
 * Creates a new general (Row or RowCol) Bloom filter at the time of
 * {@link org.apache.hadoop.hbase.regionserver.StoreFile} writing.
 *
 * @param conf
 * @param cacheConf
 * @param bloomType
 * @param maxKeys an estimate of the number of keys we expect to insert.
 *        Irrelevant if compound Bloom filters are enabled.
 * @param writer the HFile writer
 * @return the new Bloom filter, or null in case Bloom filters are disabled
 *         or when failed to create one.
 */
public static BloomFilterWriter createGeneralBloomAtWrite(Configuration conf,
    CacheConfig cacheConf, BloomType bloomType, int maxKeys,
    HFile.Writer writer) {
  if (!isGeneralBloomEnabled(conf)) {
    LOG.trace("Bloom filters are disabled by configuration for "
        + writer.getPath()
        + (conf == null ? " (configuration is null)" : ""));
    return null;
  } else if (bloomType == BloomType.NONE) {
    LOG.trace("Bloom filter is turned off for the column family");
    return null;
  }

  float err = getErrorRate(conf);

  // In case of row/column Bloom filter lookups, each lookup is an OR if two
  // separate lookups. Therefore, if each lookup's false positive rate is p,
  // the resulting false positive rate is err = 1 - (1 - p)^2, and
  // p = 1 - sqrt(1 - err).
  if (bloomType == BloomType.ROWCOL) {
    err = (float) (1 - Math.sqrt(1 - err));
  }

  int maxFold = conf.getInt(IO_STOREFILE_BLOOM_MAX_FOLD,
      MAX_ALLOWED_FOLD_FACTOR);

  // Do we support compound bloom filters?
  // In case of compound Bloom filters we ignore the maxKeys hint.
  CompoundBloomFilterWriter bloomWriter = new CompoundBloomFilterWriter(getBloomBlockSize(conf),
      err, Hash.getHashType(conf), maxFold, cacheConf.shouldCacheBloomsOnWrite(),
      bloomType == BloomType.ROWCOL ? KeyValue.COMPARATOR : KeyValue.RAW_COMPARATOR);
  writer.addInlineBlockWriter(bloomWriter);
  return bloomWriter;
}
项目:ditb    文件:HFileCorruptionChecker.java   
public HFileCorruptionChecker(Configuration conf, ExecutorService executor,
    boolean quarantine) throws IOException {
  this.conf = conf;
  this.fs = FileSystem.get(conf);
  this.cacheConf = new CacheConfig(conf);
  this.executor = executor;
  this.inQuarantineMode = quarantine;
}
项目:ditb    文件:StoreFile.java   
/**
 * Constructor, loads a reader and it's indices, etc. May allocate a substantial amount of ram
 * depending on the underlying files (10-20MB?).
 *
 * @param fs          The current file system to use.
 * @param fileInfo    The store file information.
 * @param conf        The current configuration.
 * @param cacheConf   The cache configuration and block cache reference.
 * @param cfBloomType The bloom type to use for this store file as specified by column family
 *                    configuration. This may or may not be the same as the Bloom filter type actually
 *                    present in the HFile, because column family configuration might change. If this is
 *                    {@link BloomType#NONE}, the existing Bloom filter is ignored.
 * @throws IOException When opening the reader fails.
 */
public StoreFile(final FileSystem fs, final StoreFileInfo fileInfo, final Configuration conf,
    final CacheConfig cacheConf, final BloomType cfBloomType) throws IOException {
  this.fs = fs;
  this.fileInfo = fileInfo;
  this.cacheConf = cacheConf;

  if (BloomFilterFactory.isGeneralBloomEnabled(conf)) {
    this.cfBloomType = cfBloomType;
  } else {
    LOG.info("Ignoring bloom filter check for file " + this.getPath() + ": " + "cfBloomType="
        + cfBloomType + " (disabled in config)");
    this.cfBloomType = BloomType.NONE;
  }
}
项目:ditb    文件:StoreFile.java   
/**
 * Creates an HFile.Writer that also write helpful meta data.
 *
 * @param fs           file system to write to
 * @param path         file name to create
 * @param conf         user configuration
 * @param comparator   key comparator
 * @param bloomType    bloom filter setting
 * @param maxKeys      the expected maximum number of keys to be added. Was used for Bloom filter
 *                     size in {@link HFile} format version 1.
 * @param favoredNodes
 * @param fileContext  - The HFile context
 * @throws IOException problem writing to FS
 */
private Writer(FileSystem fs, Path path, final Configuration conf, CacheConfig cacheConf,
    final KVComparator comparator, BloomType bloomType, long maxKeys,
    InetSocketAddress[] favoredNodes, HFileContext fileContext) throws IOException {
  writer = HFile.getWriterFactory(conf, cacheConf).withPath(fs, path).withComparator(comparator)
      .withFavoredNodes(favoredNodes).withFileContext(fileContext).create();

  this.kvComparator = comparator;

  generalBloomFilterWriter = BloomFilterFactory
      .createGeneralBloomAtWrite(conf, cacheConf, bloomType,
          (int) Math.min(maxKeys, Integer.MAX_VALUE), writer);

  if (generalBloomFilterWriter != null) {
    this.bloomType = bloomType;
    if (LOG.isTraceEnabled()) LOG.trace(
        "Bloom filter type for " + path + ": " + this.bloomType + ", "
            + generalBloomFilterWriter.getClass().getSimpleName());
  } else {
    // Not using Bloom filters.
    this.bloomType = BloomType.NONE;
  }

  // initialize delete family Bloom filter when there is NO RowCol Bloom
  // filter
  if (this.bloomType != BloomType.ROWCOL) {
    this.deleteFamilyBloomFilterWriter = BloomFilterFactory
        .createDeleteBloomAtWrite(conf, cacheConf, (int) Math.min(maxKeys, Integer.MAX_VALUE),
            writer);
  } else {
    deleteFamilyBloomFilterWriter = null;
  }
  if (deleteFamilyBloomFilterWriter != null) {
    if (LOG.isTraceEnabled()) LOG.trace(
        "Delete Family Bloom filter type for " + path + ": " + deleteFamilyBloomFilterWriter
            .getClass().getSimpleName());
  }
}
项目:ditb    文件:MetricsRegionServerWrapperImpl.java   
/**
 * It's possible that due to threading the block cache could not be initialized
 * yet (testing multiple region servers in one jvm).  So we need to try and initialize
 * the blockCache and cacheStats reference multiple times until we succeed.
 */
private synchronized  void initBlockCache() {
  CacheConfig cacheConfig = this.regionServer.cacheConfig;
  if (cacheConfig != null && this.blockCache == null) {
    this.blockCache = cacheConfig.getBlockCache();
  }

  if (this.blockCache != null && this.cacheStats == null) {
    this.cacheStats = blockCache.getStats();
  }
}
项目:ditb    文件:HeapMemoryManager.java   
public static HeapMemoryManager create(Configuration conf, FlushRequester memStoreFlusher,
              Server server, RegionServerAccounting regionServerAccounting) {
  BlockCache blockCache = CacheConfig.instantiateBlockCache(conf);
  if (blockCache instanceof ResizableBlockCache) {
    return new HeapMemoryManager((ResizableBlockCache) blockCache, memStoreFlusher, server,
               regionServerAccounting);
  }
  return null;
}
项目:ditb    文件:LMDIndexDirectStoreFileScanner.java   
public LMDIndexDirectStoreFileScanner(StoreFile file, boolean canUseDrop, boolean cacheBlocks,
    boolean usePread, boolean isCompaction, ScanQueryMatcher matcher, long readPt,
    boolean isPrimaryReplica, TreeMap<byte[], TreeSet<byte[]>> indexFamilyMap,
    ScanRange.ScanRangeList rangeList, FileSystem fileSystem, CacheConfig cacheConf,
    Configuration conf, boolean rowkeyAsResult) throws IOException {
  this.canUseDrop = canUseDrop;
  this.cacheBlocks = cacheBlocks;
  this.usePread = usePread;
  this.isCompaction = isCompaction;
  this.matcher = matcher;
  this.readPt = readPt;
  this.isPrimaryReplica = isPrimaryReplica;
  this.file = file;
  this.rowkeyAsResult = rowkeyAsResult;
  try {
    rawRowkeyList = initRowKeyList(fileSystem, cacheConf, conf, indexFamilyMap, rangeList);
    System.out.println("get " + rawRowkeyList.size() + " secondary rowkeys from " + this.file
        + ", now rowkeyAsResult=" + rowkeyAsResult);
    if (!rowkeyAsResult) {
      Collections.sort(rawRowkeyList, Bytes.BYTES_COMPARATOR);
      rawDataScanner = getStoreFileScanner(file);
    }
  } catch (IOException e) {
    System.out.println("error in LMDIndexStoreFileScanner, " + e);
    throw e;
  }
}
项目:ditb    文件:LMDIndexDirectStoreFileScanner.java   
public static List<KeyValueScanner> getLMDIndexDirectScannersForStoreFiles(
    Collection<StoreFile> files, boolean cacheBlocks, boolean usePread, boolean isCompaction,
    boolean canUseDrop, ScanQueryMatcher matcher, long readPt, boolean isPrimaryReplica,
    TreeMap<byte[], TreeSet<byte[]>> indexFamilyMap, ScanRange.ScanRangeList rangeList,
    FileSystem fileSystem, CacheConfig cacheConf, Configuration conf, boolean rowkeyAsResult)
    throws IOException {
  List<KeyValueScanner> scanners = new ArrayList<>(files.size());
  for (StoreFile file : files) {
    scanners.add(
        new LMDIndexDirectStoreFileScanner(file, canUseDrop, cacheBlocks, usePread, isCompaction,
            matcher, readPt, isPrimaryReplica, indexFamilyMap, rangeList, fileSystem, cacheConf,
            conf, rowkeyAsResult));
  }
  return scanners;
}
项目:ditb    文件:HRegion.java   
/**
 * Facility for dumping and compacting catalog tables. Only does catalog tables since these are
 * only tables we for sure know schema on. For usage run:
 * <pre>
 *   ./bin/hbase org.apache.hadoop.hbase.regionserver.HRegion
 * </pre>
 *
 * @throws IOException
 */
public static void main(String[] args) throws IOException {
  if (args.length < 1) {
    printUsageAndExit(null);
  }
  boolean majorCompact = false;
  if (args.length > 1) {
    if (!args[1].toLowerCase().startsWith("major")) {
      printUsageAndExit("ERROR: Unrecognized option <" + args[1] + ">");
    }
    majorCompact = true;
  }
  final Path tableDir = new Path(args[0]);
  final Configuration c = HBaseConfiguration.create();
  final FileSystem fs = FileSystem.get(c);
  final Path logdir = new Path(c.get("hbase.tmp.dir"));
  final String logname = "wal" + FSUtils.getTableName(tableDir) + System.currentTimeMillis();

  final Configuration walConf = new Configuration(c);
  FSUtils.setRootDir(walConf, logdir);
  final WALFactory wals = new WALFactory(walConf, null, logname);
  try {
    processTable(fs, tableDir, wals, c, majorCompact);
  } finally {
    wals.close();
    // TODO: is this still right?
    BlockCache bc = new CacheConfig(c).getBlockCache();
    if (bc != null) bc.shutdown();
  }
}
项目:ditb    文件:RegionCoprocessorHost.java   
/**
 * @param fs fileystem to read from
 * @param p path to the file
 * @param in {@link FSDataInputStreamWrapper}
 * @param size Full size of the file
 * @param cacheConf
 * @param r original reference file. This will be not null only when reading a split file.
 * @return a Reader instance to use instead of the base reader if overriding
 * default behavior, null otherwise
 * @throws IOException
 */
public StoreFile.Reader preStoreFileReaderOpen(final FileSystem fs, final Path p,
    final FSDataInputStreamWrapper in, final long size, final CacheConfig cacheConf,
    final Reference r) throws IOException {
  return execOperationWithResult(null,
      coprocessors.isEmpty() ? null : new RegionOperationWithResult<StoreFile.Reader>() {
    @Override
    public void call(RegionObserver oserver, ObserverContext<RegionCoprocessorEnvironment> ctx)
        throws IOException {
      setResult(oserver.preStoreFileReaderOpen(ctx, fs, p, in, size, cacheConf, r, getResult()));
    }
  });
}
项目:ditb    文件:RegionCoprocessorHost.java   
/**
 * @param fs fileystem to read from
 * @param p path to the file
 * @param in {@link FSDataInputStreamWrapper}
 * @param size Full size of the file
 * @param cacheConf
 * @param r original reference file. This will be not null only when reading a split file.
 * @param reader the base reader instance
 * @return The reader to use
 * @throws IOException
 */
public StoreFile.Reader postStoreFileReaderOpen(final FileSystem fs, final Path p,
    final FSDataInputStreamWrapper in, final long size, final CacheConfig cacheConf,
    final Reference r, final StoreFile.Reader reader) throws IOException {
  return execOperationWithResult(reader,
      coprocessors.isEmpty() ? null : new RegionOperationWithResult<StoreFile.Reader>() {
    @Override
    public void call(RegionObserver oserver, ObserverContext<RegionCoprocessorEnvironment> ctx)
        throws IOException {
      setResult(oserver.postStoreFileReaderOpen(ctx, fs, p, in, size, cacheConf, r, getResult()));
    }
  });
}
项目:ditb    文件:HalfStoreFileReader.java   
/**
 * Creates a half file reader for a normal hfile.
 * @param fs fileystem to read from
 * @param p path to hfile
 * @param cacheConf
 * @param r original reference file (contains top or bottom)
 * @param conf Configuration
 * @throws IOException
 */
public HalfStoreFileReader(final FileSystem fs, final Path p,
    final CacheConfig cacheConf, final Reference r, final Configuration conf)
    throws IOException {
  super(fs, p, cacheConf, conf);
  // This is not actual midkey for this half-file; its just border
  // around which we split top and bottom.  Have to look in files to find
  // actual last and first keys for bottom and top halves.  Half-files don't
  // have an actual midkey themselves. No midkey is how we indicate file is
  // not splittable.
  this.splitkey = r.getSplitKey();
  this.splitCell = new KeyValue.KeyOnlyKeyValue(this.splitkey, 0, this.splitkey.length);
  // Is it top or bottom half?
  this.top = Reference.isTopFileRegion(r.getFileRegion());
}
项目:ditb    文件:HalfStoreFileReader.java   
/**
 * Creates a half file reader for a hfile referred to by an hfilelink.
 * @param fs fileystem to read from
 * @param p path to hfile
 * @param in {@link FSDataInputStreamWrapper}
 * @param size Full size of the hfile file
 * @param cacheConf
 * @param r original reference file (contains top or bottom)
 * @param conf Configuration
 * @throws IOException
 */
public HalfStoreFileReader(final FileSystem fs, final Path p, final FSDataInputStreamWrapper in,
    long size, final CacheConfig cacheConf,  final Reference r, final Configuration conf)
    throws IOException {
  super(fs, p, in, size, cacheConf, conf);
  // This is not actual midkey for this half-file; its just border
  // around which we split top and bottom.  Have to look in files to find
  // actual last and first keys for bottom and top halves.  Half-files don't
  // have an actual midkey themselves. No midkey is how we indicate file is
  // not splittable.
  this.splitkey = r.getSplitKey();
  this.splitCell = new KeyValue.KeyOnlyKeyValue(this.splitkey, 0, this.splitkey.length);
  // Is it top or bottom half?
  this.top = Reference.isTopFileRegion(r.getFileRegion());
}
项目:ditb    文件:TestImportTsv.java   
/**
 * Method returns the total KVs in given hfile
 * @param fs File System
 * @param p HFile path
 * @return KV count in the given hfile
 * @throws IOException
 */
private static int getKVCountFromHfile(FileSystem fs, Path p) throws IOException {
  Configuration conf = util.getConfiguration();
  HFile.Reader reader = HFile.createReader(fs, p, new CacheConfig(conf), conf);
  reader.loadFileInfo();
  HFileScanner scanner = reader.getScanner(false, false);
  scanner.seekTo();
  int count = 0;
  do {
    count++;
  } while (scanner.next());
  reader.close();
  return count;
}
项目:ditb    文件:TestImportTSVWithVisibilityLabels.java   
/**
 * Method returns the total KVs in given hfile
 * @param fs File System
 * @param p HFile path
 * @return KV count in the given hfile
 * @throws IOException
 */
private static int getKVCountFromHfile(FileSystem fs, Path p) throws IOException {
  Configuration conf = util.getConfiguration();
  HFile.Reader reader = HFile.createReader(fs, p, new CacheConfig(conf), conf);
  reader.loadFileInfo();
  HFileScanner scanner = reader.getScanner(false, false);
  scanner.seekTo();
  int count = 0;
  do {
    count++;
  } while (scanner.next());
  reader.close();
  return count;
}