Java 类org.apache.hadoop.hbase.io.compress.Compression 实例源码

项目:mumu-hbase    文件:HBaseTableOperation.java   
/**
 * 往表中添加列族
 *
 * @param tableName  表名
 * @param familyName 列族名
 */
public void addColumn(String tableName, String familyName) {
    HBaseConfiguration hBaseConfiguration = new HBaseConfiguration();
    Admin admin = hBaseConfiguration.admin();
    TableName tb = TableName.valueOf(tableName);
    try {
        if (admin.tableExists(tb)) {
            HColumnDescriptor columnDescriptor = new HColumnDescriptor(familyName);

            columnDescriptor.setMaxVersions(1);//设置列族保留的最多版本
            columnDescriptor.setCompressionType(Compression.Algorithm.GZ);//设置压缩算法
            columnDescriptor.setCompactionCompressionType(Compression.Algorithm.GZ);//合并压缩算法

            admin.addColumn(tb, columnDescriptor);
        } else {
            log.info("表名【" + tableName + "】不存在");
        }
    } catch (IOException e) {
        log.error(e);
    } finally {
        hBaseConfiguration.close();
    }
}
项目:ditb    文件:ThriftUtilities.java   
/**
 * This utility method creates a new Hbase HColumnDescriptor object based on a
 * Thrift ColumnDescriptor "struct".
 *
 * @param in
 *          Thrift ColumnDescriptor object
 * @return HColumnDescriptor
 * @throws IllegalArgument
 */
static public HColumnDescriptor colDescFromThrift(ColumnDescriptor in)
    throws IllegalArgument {
  Compression.Algorithm comp =
    Compression.getCompressionAlgorithmByName(in.compression.toLowerCase());
  BloomType bt =
    BloomType.valueOf(in.bloomFilterType);

  if (in.name == null || !in.name.hasRemaining()) {
    throw new IllegalArgument("column name is empty");
  }
  byte [] parsedName = KeyValue.parseColumn(Bytes.getBytes(in.name))[0];
  HColumnDescriptor col = new HColumnDescriptor(parsedName)
      .setMaxVersions(in.maxVersions)
      .setCompressionType(comp)
      .setInMemory(in.inMemory)
      .setBlockCacheEnabled(in.blockCacheEnabled)
      .setTimeToLive(in.timeToLive > 0 ? in.timeToLive : Integer.MAX_VALUE)
      .setBloomFilterType(bt);
  return col;
}
项目:ditb    文件:CompressionTest.java   
public static boolean testCompression(String codec) {
  codec = codec.toLowerCase();

  Compression.Algorithm a;

  try {
    a = Compression.getCompressionAlgorithmByName(codec);
  } catch (IllegalArgumentException e) {
    LOG.warn("Codec type: " + codec + " is not known");
    return false;
  }

  try {
    testCompression(a);
    return true;
  } catch (IOException ignored) {
    LOG.warn("Can't instantiate codec: " + codec, ignored);
    return false;
  }
}
项目:ditb    文件:CompressionTest.java   
public static void testCompression(Compression.Algorithm algo)
    throws IOException {
  if (compressionTestResults[algo.ordinal()] != null) {
    if (compressionTestResults[algo.ordinal()]) {
      return ; // already passed test, dont do it again.
    } else {
      // failed.
      throw new DoNotRetryIOException("Compression algorithm '" + algo.getName() + "'" +
      " previously failed test.");
    }
  }

  try {
    Compressor c = algo.getCompressor();
    algo.returnCompressor(c);
    compressionTestResults[algo.ordinal()] = true; // passes
  } catch (Throwable t) {
    compressionTestResults[algo.ordinal()] = false; // failure
    throw new DoNotRetryIOException(t);
  }
}
项目:ditb    文件:FixedFileTrailer.java   
/**
 * Deserialize the file trailer as writable data
 * @param input
 * @throws IOException
 */
void deserializeFromWritable(DataInput input) throws IOException {
  fileInfoOffset = input.readLong();
  loadOnOpenDataOffset = input.readLong();
  dataIndexCount = input.readInt();
  uncompressedDataIndexSize = input.readLong();
  metaIndexCount = input.readInt();

  totalUncompressedBytes = input.readLong();
  entryCount = input.readLong();
  compressionCodec = Compression.Algorithm.values()[input.readInt()];
  numDataIndexLevels = input.readInt();
  firstDataBlockOffset = input.readLong();
  lastDataBlockOffset = input.readLong();
  // TODO this is a classname encoded into an  HFile's trailer. We are going to need to have 
  // some compat code here.
  setComparatorClass(getComparatorClass(Bytes.readStringFixedSize(input,
      MAX_COMPARATOR_NAME_LENGTH)));
}
项目:ditb    文件:TestHFileOutputFormat2.java   
/**
 * Test for {@link HFileOutputFormat2#configureCompression(org.apache.hadoop.hbase.client.Table,
 * Configuration)} and {@link HFileOutputFormat2#createFamilyCompressionMap
 * (Configuration)}.
 * Tests that the compression map is correctly serialized into
 * and deserialized from configuration
 *
 * @throws IOException
 */
@Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test
public void testSerializeDeserializeFamilyCompressionMap() throws IOException {
  for (int numCfs = 0; numCfs <= 3; numCfs++) {
    Configuration conf = new Configuration(this.util.getConfiguration());
    Map<String, Compression.Algorithm> familyToCompression =
        getMockColumnFamiliesForCompression(numCfs);
    Table table = Mockito.mock(HTable.class);
    setupMockColumnFamiliesForCompression(table, familyToCompression);
    HFileOutputFormat2.configureCompression(conf, table.getTableDescriptor());

    // read back family specific compression setting from the configuration
    Map<byte[], Algorithm> retrievedFamilyToCompressionMap = HFileOutputFormat2
        .createFamilyCompressionMap(conf);

    // test that we have a value for all column families that matches with the
    // used mock values
    for (Entry<String, Algorithm> entry : familyToCompression.entrySet()) {
      assertEquals("Compression configuration incorrect for column family:"
          + entry.getKey(), entry.getValue(),
          retrievedFamilyToCompressionMap.get(entry.getKey().getBytes()));
    }
  }
}
项目:ditb    文件:TestHFileOutputFormat.java   
/**
 * Test for {@link HFileOutputFormat#configureCompression(org.apache.hadoop.hbase.client.Table,
 * Configuration)} and {@link HFileOutputFormat#createFamilyCompressionMap
 * (Configuration)}.
 * Tests that the compression map is correctly serialized into
 * and deserialized from configuration
 *
 * @throws IOException
 */
@Test
public void testSerializeDeserializeFamilyCompressionMap() throws IOException {
  for (int numCfs = 0; numCfs <= 3; numCfs++) {
    Configuration conf = new Configuration(this.util.getConfiguration());
    Map<String, Compression.Algorithm> familyToCompression =
        getMockColumnFamiliesForCompression(numCfs);
    Table table = Mockito.mock(HTable.class);
    setupMockColumnFamiliesForCompression(table, familyToCompression);
    HFileOutputFormat.configureCompression(table, conf);

    // read back family specific compression setting from the configuration
    Map<byte[], Algorithm> retrievedFamilyToCompressionMap = HFileOutputFormat
        .createFamilyCompressionMap(conf);

    // test that we have a value for all column families that matches with the
    // used mock values
    for (Entry<String, Algorithm> entry : familyToCompression.entrySet()) {
      assertEquals("Compression configuration incorrect for column family:"
          + entry.getKey(), entry.getValue(),
          retrievedFamilyToCompressionMap.get(entry.getKey().getBytes()));
    }
  }
}
项目:ditb    文件:LoadTestTool.java   
private void parseColumnFamilyOptions(CommandLine cmd) {
  String dataBlockEncodingStr = cmd.getOptionValue(OPT_DATA_BLOCK_ENCODING);
  dataBlockEncodingAlgo = dataBlockEncodingStr == null ? null :
      DataBlockEncoding.valueOf(dataBlockEncodingStr);

  String compressStr = cmd.getOptionValue(OPT_COMPRESSION);
  compressAlgo = compressStr == null ? Compression.Algorithm.NONE :
      Compression.Algorithm.valueOf(compressStr);

  String bloomStr = cmd.getOptionValue(OPT_BLOOM);
  bloomType = bloomStr == null ? BloomType.ROW :
      BloomType.valueOf(bloomStr);

  inMemoryCF = cmd.hasOption(OPT_INMEMORY);
  if (cmd.hasOption(OPT_ENCRYPTION)) {
    cipher = Encryption.getCipher(conf, cmd.getOptionValue(OPT_ENCRYPTION));
  }

}
项目:ditb    文件:TestStore.java   
/**
 * Verify that compression and data block encoding are respected by the
 * Store.createWriterInTmp() method, used on store flush.
 */
@Test
public void testCreateWriter() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  FileSystem fs = FileSystem.get(conf);

  HColumnDescriptor hcd = new HColumnDescriptor(family);
  hcd.setCompressionType(Compression.Algorithm.GZ);
  hcd.setDataBlockEncoding(DataBlockEncoding.DIFF);
  init(name.getMethodName(), conf, hcd);

  // Test createWriterInTmp()
  StoreFile.Writer writer = store.createWriterInTmp(4, hcd.getCompression(), false, true, false);
  Path path = writer.getPath();
  writer.append(new KeyValue(row, family, qf1, Bytes.toBytes(1)));
  writer.append(new KeyValue(row, family, qf2, Bytes.toBytes(2)));
  writer.append(new KeyValue(row2, family, qf1, Bytes.toBytes(3)));
  writer.append(new KeyValue(row2, family, qf2, Bytes.toBytes(4)));
  writer.close();

  // Verify that compression and encoding settings are respected
  HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf), conf);
  Assert.assertEquals(hcd.getCompressionType(), reader.getCompressionAlgorithm());
  Assert.assertEquals(hcd.getDataBlockEncoding(), reader.getDataBlockEncoding());
  reader.close();
}
项目:ditb    文件:TestHFileBlock.java   
static HFileBlock.Writer createTestV2Block(Compression.Algorithm algo,
    boolean includesMemstoreTS, boolean includesTag) throws IOException {
  final BlockType blockType = BlockType.DATA;
  HFileContext meta = new HFileContextBuilder()
                      .withCompression(algo)
                      .withIncludesMvcc(includesMemstoreTS)
                      .withIncludesTags(includesTag)
                      .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
                      .build();
  HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
  DataOutputStream dos = hbw.startWriting(blockType);
  writeTestBlockContents(dos);
  dos.flush();
  hbw.ensureBlockReady();
  assertEquals(1000 * 4, hbw.getUncompressedSizeWithoutHeader());
  hbw.release();
  return hbw;
}
项目:ditb    文件:TestHFileBlock.java   
static void assertBuffersEqual(ByteBuffer expectedBuffer,
    ByteBuffer actualBuffer, Compression.Algorithm compression,
    DataBlockEncoding encoding, boolean pread) {
  if (!actualBuffer.equals(expectedBuffer)) {
    int prefix = 0;
    int minLimit = Math.min(expectedBuffer.limit(), actualBuffer.limit());
    while (prefix < minLimit &&
        expectedBuffer.get(prefix) == actualBuffer.get(prefix)) {
      prefix++;
    }

    fail(String.format(
        "Content mismatch for %s, commonPrefix %d, expected %s, got %s",
        buildMessageDetails(compression, encoding, pread), prefix,
        nextBytesToStr(expectedBuffer, prefix),
        nextBytesToStr(actualBuffer, prefix)));
  }
}
项目:ditb    文件:TestHFile.java   
public void testNullMetaBlocks() throws Exception {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  for (Compression.Algorithm compressAlgo : 
      HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
    Path mFile = new Path(ROOT_DIR, "nometa_" + compressAlgo + ".hfile");
    FSDataOutputStream fout = createFSOutput(mFile);
    HFileContext meta = new HFileContextBuilder().withCompression(compressAlgo)
                        .withBlockSize(minBlockSize).build();
    Writer writer = HFile.getWriterFactory(conf, cacheConf)
        .withOutputStream(fout)
        .withFileContext(meta)
        .create();
    KeyValue kv = new KeyValue("foo".getBytes(), "f1".getBytes(), null, "value".getBytes());
    writer.append(kv);
    writer.close();
    fout.close();
    Reader reader = HFile.createReader(fs, mFile, cacheConf, conf);
    reader.loadFileInfo();
    assertNull(reader.getMetaBlock("non-existant", false));
  }
}
项目:ditb    文件:TestSeekToBlockWithEncoders.java   
private void seekToTheKey(KeyValue expected, List<KeyValue> kvs, KeyValue toSeek)
    throws IOException {
  // create all seekers
  List<DataBlockEncoder.EncodedSeeker> encodedSeekers = new ArrayList<DataBlockEncoder.EncodedSeeker>();
  for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
    if (encoding.getEncoder() == null || encoding == DataBlockEncoding.PREFIX_TREE) {
      continue;
    }

    DataBlockEncoder encoder = encoding.getEncoder();
    HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false)
        .withIncludesMvcc(false).withIncludesTags(false)
        .withCompression(Compression.Algorithm.NONE).build();
    HFileBlockEncodingContext encodingContext = encoder.newDataBlockEncodingContext(encoding,
        HConstants.HFILEBLOCK_DUMMY_HEADER, meta);
    ByteBuffer encodedBuffer = TestDataBlockEncoders.encodeKeyValues(encoding, kvs,
        encodingContext);
    DataBlockEncoder.EncodedSeeker seeker = encoder.createSeeker(KeyValue.COMPARATOR,
        encoder.newDataBlockDecodingContext(meta));
    seeker.setCurrentBuffer(encodedBuffer);
    encodedSeekers.add(seeker);
  }
  // test it!
  // try a few random seeks
  checkSeekingConsistency(encodedSeekers, toSeek, expected);
}
项目:ditb    文件:TestDataBlockEncoders.java   
private HFileBlockEncodingContext getEncodingContext(Compression.Algorithm algo,
    DataBlockEncoding encoding) {
  DataBlockEncoder encoder = encoding.getEncoder();
  HFileContext meta = new HFileContextBuilder()
                      .withHBaseCheckSum(false)
                      .withIncludesMvcc(includesMemstoreTS)
                      .withIncludesTags(includesTags)
                      .withCompression(algo).build();
  if (encoder != null) {
    return encoder.newDataBlockEncodingContext(encoding,
        HConstants.HFILEBLOCK_DUMMY_HEADER, meta);
  } else {
    return new HFileBlockDefaultEncodingContext(encoding,
        HConstants.HFILEBLOCK_DUMMY_HEADER, meta);
  }
}
项目:ditb    文件:TestDataBlockEncoders.java   
private void testAlgorithm(byte[] encodedData, ByteBuffer unencodedDataBuf,
    DataBlockEncoder encoder) throws IOException {
  // decode
  ByteArrayInputStream bais = new ByteArrayInputStream(encodedData, ENCODED_DATA_OFFSET,
      encodedData.length - ENCODED_DATA_OFFSET);
  DataInputStream dis = new DataInputStream(bais);
  ByteBuffer actualDataset;
  HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false)
      .withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTags)
      .withCompression(Compression.Algorithm.NONE).build();
  actualDataset = encoder.decodeKeyValues(dis, encoder.newDataBlockDecodingContext(meta));
  actualDataset.rewind();

  // this is because in case of prefix tree the decoded stream will not have
  // the
  // mvcc in it.
  assertEquals("Encoding -> decoding gives different results for " + encoder,
      Bytes.toStringBinary(unencodedDataBuf), Bytes.toStringBinary(actualDataset));
}
项目:ditb    文件:HFileContext.java   
public HFileContext(boolean useHBaseChecksum, boolean includesMvcc, boolean includesTags,
    Compression.Algorithm compressAlgo, boolean compressTags, ChecksumType checksumType,
    int bytesPerChecksum, int blockSize, DataBlockEncoding encoding,
    Encryption.Context cryptoContext, long fileCreateTime) {
  this.usesHBaseChecksum = useHBaseChecksum;
  this.includesMvcc =  includesMvcc;
  this.includesTags = includesTags;
  this.compressAlgo = compressAlgo;
  this.compressTags = compressTags;
  this.checksumType = checksumType;
  this.bytesPerChecksum = bytesPerChecksum;
  this.blocksize = blockSize;
  if (encoding != null) {
    this.encoding = encoding;
  }
  this.cryptoContext = cryptoContext;
  this.fileCreateTime = fileCreateTime;
}
项目:stroom-timeline    文件:TimelineTable.java   
@Override
HTableDescriptor getTableDesctptor() {
    HColumnDescriptor metaFamily = new HColumnDescriptor(COL_FAMILY_META);

    //compress the content family as each cell value can be quite large
    //and should compress well
    //TODO currently set to GZ compression as SNAPPY does not seem to be supported
    //by the HBaseTEstingUtility. Need to see if we want to change to SNAPPY (quicker but
    //doesn't compress as well as GZ) and if so how we can get it working in junits or test
    //for its presence.
    HColumnDescriptor contentFamily = new HColumnDescriptor(COL_FAMILY_CONTENT)
            .setCompressionType(Compression.Algorithm.GZ);

    timeline.getRetention().ifPresent(retention -> {
        metaFamily.setTimeToLive(Math.toIntExact(retention.getSeconds()));
        contentFamily.setTimeToLive(Math.toIntExact(retention.getSeconds()));
    });

    HTableDescriptor tableDescriptor = new HTableDescriptor(tableName)
            .addFamily(metaFamily)
            .addFamily(contentFamily);

    return tableDescriptor;
}
项目:Megh    文件:HadoopFilePerformanceTest.java   
@Test
public void testHFileWriteGZ() throws Exception
{
  Path file = Testfile.HFILE_GZ.filepath();
  logger.info("Writing {} with {} key/value pairs", file, String.format("%,d", testSize));

  startTimer();
  writeHFile(file, Compression.Algorithm.GZ);
  logger.info("Duration: {}",  stopTimer(Testfile.HFILE_GZ, "WRITE"));

  Assert.assertTrue(hdfs.exists(file));
  ContentSummary fileInfo = hdfs.getContentSummary(file);
  logger.debug("Space consumed: {} bytes in {} files",
      String.format("%,d", fileInfo.getSpaceConsumed()),
      String.format("%,d", fileInfo.getFileCount()));
}
项目:Megh    文件:HadoopFilePerformanceTest.java   
private void readHFileSeq(Path file, Compression.Algorithm compression) throws Exception
{

  CacheConfig cacheConf = new CacheConfig(conf);
  HFile.Reader reader = HFile.createReader(hdfs, file, cacheConf, conf);
  HFileScanner scanner = reader.getScanner(true, true, false);

  scanner.seekTo();

  @SuppressWarnings("unused")
  KeyValue kv = null;
  while (scanner.next()) {
    kv = scanner.getKeyValue();
    //logger.debug("key: {} value: {}", new String (kv.getKey()), new String (kv.getValue()));
  }

}
项目:Megh    文件:HadoopFilePerformanceTest.java   
private void readHFileSeqId(Path file, Compression.Algorithm compression) throws Exception
{
  CacheConfig cacheConf = new CacheConfig(conf);
  HFile.Reader reader = HFile.createReader(hdfs, file, cacheConf, conf);
  HFileScanner scanner = reader.getScanner(true, true, false);

  @SuppressWarnings("unused")
  KeyValue kv = null;
  scanner.seekTo();

  for (int i = 0; i < testSize; i++) {
    scanner.seekTo(getKey(i).getBytes());
    kv = scanner.getKeyValue();
    //logger.debug("key: {} value: {}", new String (kv.getKey()), new String (kv.getValue()));
  }
}
项目:Megh    文件:HadoopFilePerformanceTest.java   
private void readHFileRandom(Path file, Compression.Algorithm compression) throws Exception
{
  CacheConfig cacheConf = new CacheConfig(conf);
  HFile.Reader reader = HFile.createReader(hdfs, file, cacheConf, conf);
  HFileScanner scanner = reader.getScanner(true, true, false);

  @SuppressWarnings("unused")
  KeyValue kv = null;
  scanner.seekTo();
  Random random = new Random();
  for (int i = 0; i < testSize; i++) {
    scanner.seekTo();
    scanner.seekTo(getKey(random.nextInt(testSize)).getBytes());
    kv = scanner.getKeyValue();
    //logger.debug("key: {} value: {}", new String (kv.getKey()), new String (kv.getValue()));
  }
}
项目:Megh    文件:HadoopFilePerformanceTest.java   
@Test
public void testHFileRead() throws Exception
{

  Path file = Testfile.HFILE.filepath();
  Compression.Algorithm compression = Compression.Algorithm.NONE;
  logger.info("Reading {} with {} key/value pairs", file, String.format("%,d", testSize));
  writeHFile(file, compression);

  startTimer();
  readHFileSeq(file, compression);
  logger.info("Duration for scanner.next() SEQUENTIAL keys: {}", stopTimer(Testfile.HFILE, "READ-SEQ"));

  startTimer();
  readHFileSeqId(file, compression);
  logger.info("Duration for scanner.seekTo(key) SEQUENTIAL keys: {}", stopTimer(Testfile.HFILE, "READ-SEQ-ID"));

  startTimer();
  readHFileRandom(file, compression);
  logger.info("Duration for scanner.seekTo(key) RANDOM keys: {}", stopTimer(Testfile.HFILE, "READ-RAND"));

}
项目:cloud-bigtable-client    文件:TestColumnDescriptorAdapter.java   
@Test
public void ignoredOptionsAreIgnored() {
  // We're really checking to make certain we don't trigger an exception for an ignored option:
  descriptor.setCompressionType(Compression.Algorithm.LZ4);
  descriptor.setCompactionCompressionType(Compression.Algorithm.LZ4);
  descriptor.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF);
  descriptor.setBlockCacheEnabled(false);
  descriptor.setCacheDataOnWrite(true);
  descriptor.setCacheDataInL1(true);
  descriptor.setEvictBlocksOnClose(false);
  descriptor.setBloomFilterType(BloomType.ROW);
  descriptor.setPrefetchBlocksOnOpen(true);
  descriptor.setBlocksize(16 * 1024);
  descriptor.setScope(1); // REPLICATION_SCOPE
  descriptor.setInMemory(true);

  ColumnFamily.Builder result = adapter.adapt(descriptor)
      .clearName()
      .clearGcExpression();

  Assert.assertArrayEquals(
      new byte[0],
      result.build().toByteArray());
}
项目:pbase    文件:CompressionTest.java   
public static boolean testCompression(String codec) {
  codec = codec.toLowerCase();

  Compression.Algorithm a;

  try {
    a = Compression.getCompressionAlgorithmByName(codec);
  } catch (IllegalArgumentException e) {
    LOG.warn("Codec type: " + codec + " is not known");
    return false;
  }

  try {
    testCompression(a);
    return true;
  } catch (IOException ignored) {
    LOG.warn("Can't instantiate codec: " + codec, ignored);
    return false;
  }
}
项目:pbase    文件:CompressionTest.java   
public static void testCompression(Compression.Algorithm algo)
    throws IOException {
  if (compressionTestResults[algo.ordinal()] != null) {
    if (compressionTestResults[algo.ordinal()]) {
      return ; // already passed test, dont do it again.
    } else {
      // failed.
      throw new IOException("Compression algorithm '" + algo.getName() + "'" +
      " previously failed test.");
    }
  }

  try {
    Compressor c = algo.getCompressor();
    algo.returnCompressor(c);
    compressionTestResults[algo.ordinal()] = true; // passes
  } catch (Throwable t) {
    compressionTestResults[algo.ordinal()] = false; // failure
    throw new IOException(t);
  }
}
项目:HIndex    文件:TestHFileOutputFormat2.java   
/**
 * Test for {@link HFileOutputFormat2#configureCompression(HTable,
 * Configuration)} and {@link HFileOutputFormat2#createFamilyCompressionMap
 * (Configuration)}.
 * Tests that the compression map is correctly serialized into
 * and deserialized from configuration
 *
 * @throws IOException
 */
@Test
public void testSerializeDeserializeFamilyCompressionMap() throws IOException {
  for (int numCfs = 0; numCfs <= 3; numCfs++) {
    Configuration conf = new Configuration(this.util.getConfiguration());
    Map<String, Compression.Algorithm> familyToCompression =
        getMockColumnFamiliesForCompression(numCfs);
    HTable table = Mockito.mock(HTable.class);
    setupMockColumnFamiliesForCompression(table, familyToCompression);
    HFileOutputFormat2.configureCompression(table, conf);

    // read back family specific compression setting from the configuration
    Map<byte[], Algorithm> retrievedFamilyToCompressionMap = HFileOutputFormat2
        .createFamilyCompressionMap(conf);

    // test that we have a value for all column families that matches with the
    // used mock values
    for (Entry<String, Algorithm> entry : familyToCompression.entrySet()) {
      assertEquals("Compression configuration incorrect for column family:"
          + entry.getKey(), entry.getValue(),
          retrievedFamilyToCompressionMap.get(entry.getKey().getBytes()));
    }
  }
}
项目:pbase    文件:HStore.java   
private HFileContext createFileContext(Compression.Algorithm compression,
                                       boolean includeMVCCReadpoint, boolean includesTag, Encryption.Context cryptoContext) {
    if (compression == null) {
        compression = HFile.DEFAULT_COMPRESSION_ALGORITHM;
    }
    HFileContext hFileContext = new HFileContextBuilder()
            .withIncludesMvcc(includeMVCCReadpoint)
            .withIncludesTags(includesTag)
            .withCompression(compression)
            .withCompressTags(family.isCompressTags())
            .withChecksumType(checksumType)
            .withBytesPerCheckSum(bytesPerChecksum)
            .withBlockSize(blocksize)
            .withHBaseCheckSum(true)
            .withDataBlockEncoding(family.getDataBlockEncoding())
            .withEncryptionContext(cryptoContext)
            .build();
    return hFileContext;
}
项目:pbase    文件:FixedFileTrailer.java   
/**
 * Deserialize the file trailer as writable data
 * @param input
 * @throws IOException
 */
void deserializeFromWritable(DataInput input) throws IOException {
  fileInfoOffset = input.readLong();
  loadOnOpenDataOffset = input.readLong();
  dataIndexCount = input.readInt();
  uncompressedDataIndexSize = input.readLong();
  metaIndexCount = input.readInt();

  totalUncompressedBytes = input.readLong();
  entryCount = input.readLong();
  compressionCodec = Compression.Algorithm.values()[input.readInt()];
  numDataIndexLevels = input.readInt();
  firstDataBlockOffset = input.readLong();
  lastDataBlockOffset = input.readLong();
  // TODO this is a classname encoded into an  HFile's trailer. We are going to need to have 
  // some compat code here.
  setComparatorClass(getComparatorClass(Bytes.readStringFixedSize(input,
      MAX_COMPARATOR_NAME_LENGTH)));
}
项目:HIndex    文件:TestHFileBlock.java   
static void assertBuffersEqual(ByteBuffer expectedBuffer,
    ByteBuffer actualBuffer, Compression.Algorithm compression,
    DataBlockEncoding encoding, boolean pread) {
  if (!actualBuffer.equals(expectedBuffer)) {
    int prefix = 0;
    int minLimit = Math.min(expectedBuffer.limit(), actualBuffer.limit());
    while (prefix < minLimit &&
        expectedBuffer.get(prefix) == actualBuffer.get(prefix)) {
      prefix++;
    }

    fail(String.format(
        "Content mismath for compression %s, encoding %s, " +
        "pread %s, commonPrefix %d, expected %s, got %s",
        compression, encoding, pread, prefix,
        nextBytesToStr(expectedBuffer, prefix),
        nextBytesToStr(actualBuffer, prefix)));
  }
}
项目:pbase    文件:TestHFileOutputFormat2.java   
/**
 * Test for {@link HFileOutputFormat2#configureCompression(org.apache.hadoop.hbase.client.Table,
 * Configuration)} and {@link HFileOutputFormat2#createFamilyCompressionMap
 * (Configuration)}.
 * Tests that the compression map is correctly serialized into
 * and deserialized from configuration
 *
 * @throws IOException
 */
@Test
public void testSerializeDeserializeFamilyCompressionMap() throws IOException {
  for (int numCfs = 0; numCfs <= 3; numCfs++) {
    Configuration conf = new Configuration(this.util.getConfiguration());
    Map<String, Compression.Algorithm> familyToCompression =
        getMockColumnFamiliesForCompression(numCfs);
    Table table = Mockito.mock(HTable.class);
    setupMockColumnFamiliesForCompression(table, familyToCompression);
    HFileOutputFormat2.configureCompression(table, conf);

    // read back family specific compression setting from the configuration
    Map<byte[], Algorithm> retrievedFamilyToCompressionMap = HFileOutputFormat2
        .createFamilyCompressionMap(conf);

    // test that we have a value for all column families that matches with the
    // used mock values
    for (Entry<String, Algorithm> entry : familyToCompression.entrySet()) {
      assertEquals("Compression configuration incorrect for column family:"
          + entry.getKey(), entry.getValue(),
          retrievedFamilyToCompressionMap.get(entry.getKey().getBytes()));
    }
  }
}
项目:pbase    文件:TestHFileOutputFormat.java   
/**
 * Test for {@link HFileOutputFormat#configureCompression(org.apache.hadoop.hbase.client.Table,
 * Configuration)} and {@link HFileOutputFormat#createFamilyCompressionMap
 * (Configuration)}.
 * Tests that the compression map is correctly serialized into
 * and deserialized from configuration
 *
 * @throws IOException
 */
@Test
public void testSerializeDeserializeFamilyCompressionMap() throws IOException {
  for (int numCfs = 0; numCfs <= 3; numCfs++) {
    Configuration conf = new Configuration(this.util.getConfiguration());
    Map<String, Compression.Algorithm> familyToCompression =
        getMockColumnFamiliesForCompression(numCfs);
    Table table = Mockito.mock(HTable.class);
    setupMockColumnFamiliesForCompression(table, familyToCompression);
    HFileOutputFormat.configureCompression(table, conf);

    // read back family specific compression setting from the configuration
    Map<byte[], Algorithm> retrievedFamilyToCompressionMap = HFileOutputFormat
        .createFamilyCompressionMap(conf);

    // test that we have a value for all column families that matches with the
    // used mock values
    for (Entry<String, Algorithm> entry : familyToCompression.entrySet()) {
      assertEquals("Compression configuration incorrect for column family:"
          + entry.getKey(), entry.getValue(),
          retrievedFamilyToCompressionMap.get(entry.getKey().getBytes()));
    }
  }
}
项目:HIndex    文件:FixedFileTrailer.java   
/**
 * Deserialize the file trailer as writable data
 * @param input
 * @throws IOException
 */
void deserializeFromWritable(DataInput input) throws IOException {
  fileInfoOffset = input.readLong();
  loadOnOpenDataOffset = input.readLong();
  dataIndexCount = input.readInt();
  uncompressedDataIndexSize = input.readLong();
  metaIndexCount = input.readInt();

  totalUncompressedBytes = input.readLong();
  entryCount = input.readLong();
  compressionCodec = Compression.Algorithm.values()[input.readInt()];
  numDataIndexLevels = input.readInt();
  firstDataBlockOffset = input.readLong();
  lastDataBlockOffset = input.readLong();
  // TODO this is a classname encoded into an  HFile's trailer. We are going to need to have 
  // some compat code here.
  setComparatorClass(getComparatorClass(Bytes.readStringFixedSize(input,
      MAX_COMPARATOR_NAME_LENGTH)));
}
项目:pbase    文件:TestHFileBlock.java   
static void assertBuffersEqual(ByteBuffer expectedBuffer,
    ByteBuffer actualBuffer, Compression.Algorithm compression,
    DataBlockEncoding encoding, boolean pread) {
  if (!actualBuffer.equals(expectedBuffer)) {
    int prefix = 0;
    int minLimit = Math.min(expectedBuffer.limit(), actualBuffer.limit());
    while (prefix < minLimit &&
        expectedBuffer.get(prefix) == actualBuffer.get(prefix)) {
      prefix++;
    }

    fail(String.format(
        "Content mismatch for %s, commonPrefix %d, expected %s, got %s",
        buildMessageDetails(compression, encoding, pread), prefix,
        nextBytesToStr(expectedBuffer, prefix),
        nextBytesToStr(actualBuffer, prefix)));
  }
}
项目:pbase    文件:TestCacheOnWrite.java   
@Parameters
public static Collection<Object[]> getParameters() throws IOException {
  List<Object[]> cowTypes = new ArrayList<Object[]>();
  for (BlockCache blockache : getBlockCaches()) {
    for (CacheOnWriteType cowType : CacheOnWriteType.values()) {
      for (Compression.Algorithm compress : HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
        for (BlockEncoderTestType encoderType : BlockEncoderTestType.values()) {
          for (boolean cacheCompressedData : new boolean[] { false, true }) {
            cowTypes.add(new Object[] { cowType, compress, encoderType, cacheCompressedData, blockache});
          }
        }
      }
    }
  }
  return cowTypes;
}