Java 类org.apache.hadoop.hbase.io.hfile.Compression.Algorithm 实例源码

项目:gemfirexd-oss    文件:HFileSortedOplog.java   
public HFileSortedOplogWriter(int keys) throws IOException {
      try {
        int hfileBlockSize = Integer.getInteger(
            HoplogConfig.HFILE_BLOCK_SIZE_CONF, (1 << 16));

        Algorithm compress = Algorithm.valueOf(System.getProperty(HoplogConfig.COMPRESSION,
            HoplogConfig.COMPRESSION_DEFAULT));

//        ByteComparator bc = new ByteComparator();
        writer = HFile.getWriterFactory(conf, cacheConf)
            .withPath(fsProvider.getFS(), path)
            .withBlockSize(hfileBlockSize)
//            .withComparator(bc)
            .withCompression(compress)
            .create();
        bfw = BloomFilterFactory.createGeneralBloomAtWrite(conf, cacheConf, BloomType.ROW, keys,
            writer);

        logger.fine("Created hoplog writer with compression " + compress);
      } catch (IOException e) {
        logger.fine("IO Error while creating writer");
        throw e;
      }
    }
项目:LCIndex-HBase-0.94.16    文件:HFileBlock.java   
public FSReaderV2(FSDataInputStream istream, FSDataInputStream istreamNoFsChecksum,
    Algorithm compressAlgo, long fileSize, int minorVersion, HFileSystem hfs, Path path)
    throws IOException {
  super(istream, istreamNoFsChecksum, compressAlgo, fileSize, minorVersion, hfs, path);

  if (hfs != null) {
    // Check the configuration to determine whether hbase-level
    // checksum verification is needed or not.
    useHBaseChecksum = hfs.useHBaseChecksum();
  } else {
    // The configuration does not specify anything about hbase checksum
    // validations. Set it to true here assuming that we will verify
    // hbase checksums for all reads. For older files that do not have
    // stored checksums, this flag will be reset later.
    useHBaseChecksum = true;
  }

  // for older versions, hbase did not store checksums.
  if (getMinorVersion() < MINOR_VERSION_WITH_CHECKSUM) {
    useHBaseChecksum = false;
  }
  this.useHBaseChecksumConfigured = useHBaseChecksum;
}
项目:LCIndex-HBase-0.94.16    文件:TestHFileOutputFormat.java   
/**
 * Test for
 * {@link HFileOutputFormat#createFamilyCompressionMap(Configuration)}. Tests
 * that the compression map is correctly deserialized from configuration
 *
 * @throws IOException
 */
@Test
public void testCreateFamilyCompressionMap() throws IOException {
  for (int numCfs = 0; numCfs <= 3; numCfs++) {
    Configuration conf = new Configuration(this.util.getConfiguration());
    Map<String, Compression.Algorithm> familyToCompression = getMockColumnFamilies(numCfs);
    HTable table = Mockito.mock(HTable.class);
    setupMockColumnFamilies(table, familyToCompression);
    HFileOutputFormat.configureCompression(table, conf);

    // read back family specific compression setting from the configuration
    Map<byte[], String> retrievedFamilyToCompressionMap = HFileOutputFormat.createFamilyCompressionMap(conf);

    // test that we have a value for all column families that matches with the
    // used mock values
    for (Entry<String, Algorithm> entry : familyToCompression.entrySet()) {
      assertEquals("Compression configuration incorrect for column family:" + entry.getKey(), entry.getValue()
                   .getName(), retrievedFamilyToCompressionMap.get(entry.getKey().getBytes()));
    }
  }
}
项目:gemfirexd-oss    文件:HFileSortedOplog.java   
public HFileSortedOplogWriter(int keys) throws IOException {
      try {
        int hfileBlockSize = Integer.getInteger(
            HoplogConfig.HFILE_BLOCK_SIZE_CONF, (1 << 16));

        Algorithm compress = Algorithm.valueOf(System.getProperty(HoplogConfig.COMPRESSION,
            HoplogConfig.COMPRESSION_DEFAULT));

//        ByteComparator bc = new ByteComparator();
        writer = HFile.getWriterFactory(conf, cacheConf)
            .withPath(fsProvider.getFS(), path)
            .withBlockSize(hfileBlockSize)
//            .withComparator(bc)
            .withCompression(compress)
            .create();
        bfw = BloomFilterFactory.createGeneralBloomAtWrite(conf, cacheConf, BloomType.ROW, keys,
            writer);

        logger.fine("Created hoplog writer with compression " + compress);
      } catch (IOException e) {
        logger.fine("IO Error while creating writer");
        throw e;
      }
    }
项目:IRIndex    文件:HFileBlock.java   
public FSReaderV2(FSDataInputStream istream, 
    FSDataInputStream istreamNoFsChecksum, Algorithm compressAlgo,
    long fileSize, int minorVersion, HFileSystem hfs, Path path) 
  throws IOException {
  super(istream, istreamNoFsChecksum, compressAlgo, fileSize, 
        minorVersion, hfs, path);

  if (hfs != null) {
    // Check the configuration to determine whether hbase-level
    // checksum verification is needed or not.
    useHBaseChecksum = hfs.useHBaseChecksum();
  } else {
    // The configuration does not specify anything about hbase checksum
    // validations. Set it to true here assuming that we will verify
    // hbase checksums for all reads. For older files that do not have 
    // stored checksums, this flag will be reset later.
    useHBaseChecksum = true;
  }

  // for older versions, hbase did not store checksums.
  if (getMinorVersion() < MINOR_VERSION_WITH_CHECKSUM) {
    useHBaseChecksum = false;
  }
  this.useHBaseChecksumConfigured = useHBaseChecksum;
}
项目:IRIndex    文件:TestHFileOutputFormat.java   
/**
 * Test for
 * {@link HFileOutputFormat#createFamilyCompressionMap(Configuration)}. Tests
 * that the compression map is correctly deserialized from configuration
 *
 * @throws IOException
 */
@Test
public void testCreateFamilyCompressionMap() throws IOException {
  for (int numCfs = 0; numCfs <= 3; numCfs++) {
    Configuration conf = new Configuration(this.util.getConfiguration());
    Map<String, Compression.Algorithm> familyToCompression = getMockColumnFamilies(numCfs);
    HTable table = Mockito.mock(HTable.class);
    setupMockColumnFamilies(table, familyToCompression);
    HFileOutputFormat.configureCompression(table, conf);

    // read back family specific compression setting from the configuration
    Map<byte[], String> retrievedFamilyToCompressionMap = HFileOutputFormat.createFamilyCompressionMap(conf);

    // test that we have a value for all column families that matches with the
    // used mock values
    for (Entry<String, Algorithm> entry : familyToCompression.entrySet()) {
      assertEquals("Compression configuration incorrect for column family:" + entry.getKey(), entry.getValue()
                   .getName(), retrievedFamilyToCompressionMap.get(entry.getKey().getBytes()));
    }
  }
}
项目:hbase-benchmark    文件:BenchmarkDriver.java   
/**
 * Attempts to create the table used by this tool with the fixed configuration details
 *
 * @param admin The configured administration used to perform this operation
 */
private void createTable(final HBaseAdmin admin) {
    final String tableName = appConfig.getToolTable();

    try {
        if( !admin.tableExists(tableName) ) {
            HTableDescriptor tableDesc = new HTableDescriptor(tableName.getBytes(Charsets.UTF_8));

            HColumnDescriptor colDesc = new HColumnDescriptor(ConfigConstants.COLUMN_FAMILY);
            colDesc.setBlockCacheEnabled(true).setBlocksize(65536)
                    .setBloomFilterType(BloomType.ROW)
                    .setCompressionType(Algorithm.SNAPPY)
                    .setDataBlockEncoding(DataBlockEncoding.PREFIX)
                    .setMaxVersions(1);

            tableDesc.addFamily(colDesc);

            admin.createTable(tableDesc);
            log.info("Created table: " + tableName);
        } else {
            log.debug("Table already exists, creation skipped");
        }
    } catch (IOException e) {
        log.error("Error occurred during table creation", e);
    }
}
项目:HBase-Research    文件:HFileBlock.java   
public FSReaderV2(FSDataInputStream istream, 
    FSDataInputStream istreamNoFsChecksum, Algorithm compressAlgo,
    long fileSize, int minorVersion, HFileSystem hfs, Path path) 
  throws IOException {
  super(istream, istreamNoFsChecksum, compressAlgo, fileSize, 
        minorVersion, hfs, path);

  if (hfs != null) {
    // Check the configuration to determine whether hbase-level
    // checksum verification is needed or not.
    useHBaseChecksum = hfs.useHBaseChecksum();
  } else {
    // The configuration does not specify anything about hbase checksum
    // validations. Set it to true here assuming that we will verify
    // hbase checksums for all reads. For older files that do not have 
    // stored checksums, this flag will be reset later.
    useHBaseChecksum = true;
  }

  // for older versions, hbase did not store checksums.
  if (getMinorVersion() < MINOR_VERSION_WITH_CHECKSUM) {
    useHBaseChecksum = false;
  }
  this.useHBaseChecksumConfigured = useHBaseChecksum;
}
项目:HBase-Research    文件:TestHFileOutputFormat.java   
/**
 * Test for
 * {@link HFileOutputFormat#createFamilyCompressionMap(Configuration)}. Tests
 * that the compression map is correctly deserialized from configuration
 *
 * @throws IOException
 */
@Test
public void testCreateFamilyCompressionMap() throws IOException {
  for (int numCfs = 0; numCfs <= 3; numCfs++) {
    Configuration conf = new Configuration(this.util.getConfiguration());
    Map<String, Compression.Algorithm> familyToCompression = getMockColumnFamilies(numCfs);
    HTable table = Mockito.mock(HTable.class);
    setupMockColumnFamilies(table, familyToCompression);
    HFileOutputFormat.configureCompression(table, conf);

    // read back family specific compression setting from the configuration
    Map<byte[], String> retrievedFamilyToCompressionMap = HFileOutputFormat.createFamilyCompressionMap(conf);

    // test that we have a value for all column families that matches with the
    // used mock values
    for (Entry<String, Algorithm> entry : familyToCompression.entrySet()) {
      assertEquals("Compression configuration incorrect for column family:" + entry.getKey(), entry.getValue()
                   .getName(), retrievedFamilyToCompressionMap.get(entry.getKey().getBytes()));
    }
  }
}
项目:hbase-0.94.8-qod    文件:HFileBlock.java   
public FSReaderV2(FSDataInputStream istream, 
    FSDataInputStream istreamNoFsChecksum, Algorithm compressAlgo,
    long fileSize, int minorVersion, HFileSystem hfs, Path path) 
  throws IOException {
  super(istream, istreamNoFsChecksum, compressAlgo, fileSize, 
        minorVersion, hfs, path);

  if (hfs != null) {
    // Check the configuration to determine whether hbase-level
    // checksum verification is needed or not.
    useHBaseChecksum = hfs.useHBaseChecksum();
  } else {
    // The configuration does not specify anything about hbase checksum
    // validations. Set it to true here assuming that we will verify
    // hbase checksums for all reads. For older files that do not have 
    // stored checksums, this flag will be reset later.
    useHBaseChecksum = true;
  }

  // for older versions, hbase did not store checksums.
  if (getMinorVersion() < MINOR_VERSION_WITH_CHECKSUM) {
    useHBaseChecksum = false;
  }
  this.useHBaseChecksumConfigured = useHBaseChecksum;
}
项目:hbase-0.94.8-qod    文件:TestHFileOutputFormat.java   
/**
 * Test for
 * {@link HFileOutputFormat#createFamilyCompressionMap(Configuration)}. Tests
 * that the compression map is correctly deserialized from configuration
 *
 * @throws IOException
 */
@Test
public void testCreateFamilyCompressionMap() throws IOException {
  for (int numCfs = 0; numCfs <= 3; numCfs++) {
    Configuration conf = new Configuration(this.util.getConfiguration());
    Map<String, Compression.Algorithm> familyToCompression = getMockColumnFamilies(numCfs);
    HTable table = Mockito.mock(HTable.class);
    setupMockColumnFamilies(table, familyToCompression);
    HFileOutputFormat.configureCompression(table, conf);

    // read back family specific compression setting from the configuration
    Map<byte[], String> retrievedFamilyToCompressionMap = HFileOutputFormat.createFamilyCompressionMap(conf);

    // test that we have a value for all column families that matches with the
    // used mock values
    for (Entry<String, Algorithm> entry : familyToCompression.entrySet()) {
      assertEquals("Compression configuration incorrect for column family:" + entry.getKey(), entry.getValue()
                   .getName(), retrievedFamilyToCompressionMap.get(entry.getKey().getBytes()));
    }
  }
}
项目:hbase-0.94.8-qod    文件:HFileBlock.java   
public FSReaderV2(FSDataInputStream istream, 
    FSDataInputStream istreamNoFsChecksum, Algorithm compressAlgo,
    long fileSize, int minorVersion, HFileSystem hfs, Path path) 
  throws IOException {
  super(istream, istreamNoFsChecksum, compressAlgo, fileSize, 
        minorVersion, hfs, path);

  if (hfs != null) {
    // Check the configuration to determine whether hbase-level
    // checksum verification is needed or not.
    useHBaseChecksum = hfs.useHBaseChecksum();
  } else {
    // The configuration does not specify anything about hbase checksum
    // validations. Set it to true here assuming that we will verify
    // hbase checksums for all reads. For older files that do not have 
    // stored checksums, this flag will be reset later.
    useHBaseChecksum = true;
  }

  // for older versions, hbase did not store checksums.
  if (getMinorVersion() < MINOR_VERSION_WITH_CHECKSUM) {
    useHBaseChecksum = false;
  }
  this.useHBaseChecksumConfigured = useHBaseChecksum;
}
项目:hbase-0.94.8-qod    文件:TestHFileOutputFormat.java   
/**
 * Test for
 * {@link HFileOutputFormat#createFamilyCompressionMap(Configuration)}. Tests
 * that the compression map is correctly deserialized from configuration
 *
 * @throws IOException
 */
@Test
public void testCreateFamilyCompressionMap() throws IOException {
  for (int numCfs = 0; numCfs <= 3; numCfs++) {
    Configuration conf = new Configuration(this.util.getConfiguration());
    Map<String, Compression.Algorithm> familyToCompression = getMockColumnFamilies(numCfs);
    HTable table = Mockito.mock(HTable.class);
    setupMockColumnFamilies(table, familyToCompression);
    HFileOutputFormat.configureCompression(table, conf);

    // read back family specific compression setting from the configuration
    Map<byte[], String> retrievedFamilyToCompressionMap = HFileOutputFormat.createFamilyCompressionMap(conf);

    // test that we have a value for all column families that matches with the
    // used mock values
    for (Entry<String, Algorithm> entry : familyToCompression.entrySet()) {
      assertEquals("Compression configuration incorrect for column family:" + entry.getKey(), entry.getValue()
                   .getName(), retrievedFamilyToCompressionMap.get(entry.getKey().getBytes()));
    }
  }
}
项目:hindex    文件:HFileBlock.java   
public FSReaderV2(FSDataInputStream istream, 
    FSDataInputStream istreamNoFsChecksum, Algorithm compressAlgo,
    long fileSize, int minorVersion, HFileSystem hfs, Path path) 
  throws IOException {
  super(istream, istreamNoFsChecksum, compressAlgo, fileSize, 
        minorVersion, hfs, path);

  if (hfs != null) {
    // Check the configuration to determine whether hbase-level
    // checksum verification is needed or not.
    useHBaseChecksum = hfs.useHBaseChecksum();
  } else {
    // The configuration does not specify anything about hbase checksum
    // validations. Set it to true here assuming that we will verify
    // hbase checksums for all reads. For older files that do not have 
    // stored checksums, this flag will be reset later.
    useHBaseChecksum = true;
  }

  // for older versions, hbase did not store checksums.
  if (getMinorVersion() < MINOR_VERSION_WITH_CHECKSUM) {
    useHBaseChecksum = false;
  }
  this.useHBaseChecksumConfigured = useHBaseChecksum;
}
项目:hindex    文件:TestHFileOutputFormat.java   
/**
 * Test for
 * {@link HFileOutputFormat#createFamilyCompressionMap(Configuration)}. Tests
 * that the compression map is correctly deserialized from configuration
 *
 * @throws IOException
 */
@Test
public void testCreateFamilyCompressionMap() throws IOException {
  for (int numCfs = 0; numCfs <= 3; numCfs++) {
    Configuration conf = new Configuration(this.util.getConfiguration());
    Map<String, Compression.Algorithm> familyToCompression = getMockColumnFamilies(numCfs);
    HTable table = Mockito.mock(HTable.class);
    setupMockColumnFamilies(table, familyToCompression);
    HFileOutputFormat.configureCompression(table, conf);

    // read back family specific compression setting from the configuration
    Map<byte[], String> retrievedFamilyToCompressionMap = HFileOutputFormat.createFamilyCompressionMap(conf);

    // test that we have a value for all column families that matches with the
    // used mock values
    for (Entry<String, Algorithm> entry : familyToCompression.entrySet()) {
      assertEquals("Compression configuration incorrect for column family:" + entry.getKey(), entry.getValue()
                   .getName(), retrievedFamilyToCompressionMap.get(entry.getKey().getBytes()));
    }
  }
}
项目:LCIndex-HBase-0.94.16    文件:HFileWriterV1.java   
@Override
public Writer createWriter(FileSystem fs, Path path,
    FSDataOutputStream ostream, int blockSize,
    Algorithm compressAlgo, HFileDataBlockEncoder dataBlockEncoder,
    KeyComparator comparator, final ChecksumType checksumType,
    final int bytesPerChecksum, boolean includeMVCCReadpoint) throws IOException {
  // version 1 does not implement checksums
  return new HFileWriterV1(conf, cacheConf, fs, path, ostream, blockSize,
      compressAlgo, dataBlockEncoder, comparator);
}
项目:LCIndex-HBase-0.94.16    文件:HFileWriterV1.java   
/** Constructor that takes a path, creates and closes the output stream. */
public HFileWriterV1(Configuration conf, CacheConfig cacheConf,
    FileSystem fs, Path path, FSDataOutputStream ostream,
    int blockSize, Compression.Algorithm compress,
    HFileDataBlockEncoder blockEncoder,
    final KeyComparator comparator) throws IOException {
  super(cacheConf, ostream == null ? createOutputStream(conf, fs, path) : ostream, path,
      blockSize, compress, blockEncoder, comparator);
  SchemaMetrics.configureGlobally(conf);
}
项目:LCIndex-HBase-0.94.16    文件:HFileBlock.java   
/**
 * @param compressionAlgorithm compression algorithm to use
 * @param dataBlockEncoderAlgo data block encoding algorithm to use
 * @param checksumType type of checksum
 * @param bytesPerChecksum bytes per checksum
 */
public Writer(Compression.Algorithm compressionAlgorithm,
    HFileDataBlockEncoder dataBlockEncoder, boolean includesMemstoreTS, int minorVersion,
    ChecksumType checksumType, int bytesPerChecksum) {
  this.minorVersion = minorVersion;
  compressAlgo = compressionAlgorithm == null ? NONE : compressionAlgorithm;
  this.dataBlockEncoder =
      dataBlockEncoder != null ? dataBlockEncoder : NoOpDataBlockEncoder.INSTANCE;

  baosInMemory = new ByteArrayOutputStream();
  if (compressAlgo != NONE) {
    compressor = compressionAlgorithm.getCompressor();
    compressedByteStream = new ByteArrayOutputStream();
    try {
      compressionStream =
          compressionAlgorithm.createPlainCompressionStream(compressedByteStream, compressor);
    } catch (IOException e) {
      throw new RuntimeException("Could not create compression stream " + "for algorithm "
          + compressionAlgorithm, e);
    }
  }
  if (minorVersion > MINOR_VERSION_NO_CHECKSUM && bytesPerChecksum < HEADER_SIZE_WITH_CHECKSUMS) {
    throw new RuntimeException("Unsupported value of bytesPerChecksum. " + " Minimum is "
        + HEADER_SIZE_WITH_CHECKSUMS + " but the configured value is " + bytesPerChecksum);
  }

  prevOffsetByType = new long[BlockType.values().length];
  for (int i = 0; i < prevOffsetByType.length; ++i)
    prevOffsetByType[i] = -1;

  this.includesMemstoreTS = includesMemstoreTS;
  this.checksumType = checksumType;
  this.bytesPerChecksum = bytesPerChecksum;
}
项目:LCIndex-HBase-0.94.16    文件:HFileBlock.java   
public AbstractFSReader(FSDataInputStream istream, FSDataInputStream istreamNoFsChecksum,
    Algorithm compressAlgo, long fileSize, int minorVersion, HFileSystem hfs, Path path)
    throws IOException {
  this.istream = istream;
  this.compressAlgo = compressAlgo;
  this.fileSize = fileSize;
  this.minorVersion = minorVersion;
  this.hfs = hfs;
  this.path = path;
  this.hdrSize = headerSize(minorVersion);
  this.istreamNoFsChecksum = istreamNoFsChecksum;
}
项目:LCIndex-HBase-0.94.16    文件:HBaseTestingUtility.java   
/**
 * Create all combinations of Bloom filters and compression algorithms for
 * testing.
 */
private static List<Object[]> bloomAndCompressionCombinations() {
  List<Object[]> configurations = new ArrayList<Object[]>();
  for (Compression.Algorithm comprAlgo :
       HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
    for (StoreFile.BloomType bloomType : StoreFile.BloomType.values()) {
      configurations.add(new Object[] { comprAlgo, bloomType });
    }
  }
  return Collections.unmodifiableList(configurations);
}
项目:LCIndex-HBase-0.94.16    文件:HBaseTestingUtility.java   
/**
 * Creates a pre-split table for load testing. If the table already exists,
 * logs a warning and continues.
 * @return the number of regions the table was split into
 */
public static int createPreSplitLoadTestTable(Configuration conf,
    byte[] tableName, byte[] columnFamily, Algorithm compression,
    DataBlockEncoding dataBlockEncoding) throws IOException {
  HTableDescriptor desc = new HTableDescriptor(tableName);
  HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
  hcd.setDataBlockEncoding(dataBlockEncoding);
  hcd.setCompressionType(compression);
  return createPreSplitLoadTestTable(conf, desc, hcd);
}
项目:LCIndex-HBase-0.94.16    文件:HBaseTestingUtility.java   
/**
 * Get supported compression algorithms.
 * @return supported compression algorithms.
 */
public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
  String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
  List<Compression.Algorithm> supportedAlgos = new ArrayList<Compression.Algorithm>();
  for (String algoName : allAlgos) {
    try {
      Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
      algo.getCompressor();
      supportedAlgos.add(algo);
    } catch (Throwable t) {
      // this algo is not available
    }
  }
  return supportedAlgos.toArray(new Compression.Algorithm[0]);
}
项目:LCIndex-HBase-0.94.16    文件:TestHFileOutputFormat.java   
private void setupMockColumnFamilies(HTable table,
  Map<String, Compression.Algorithm> familyToCompression) throws IOException
{
  HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAME);
  for (Entry<String, Compression.Algorithm> entry : familyToCompression.entrySet()) {
    mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey())
        .setMaxVersions(1)
        .setCompressionType(entry.getValue())
        .setBlockCacheEnabled(false)
        .setTimeToLive(0));
  }
  Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor();
}
项目:LCIndex-HBase-0.94.16    文件:TestHFileWriterV2.java   
@Test
public void testHFileFormatV2() throws IOException {
  Path hfilePath = new Path(TEST_UTIL.getDataTestDir(),
  "testHFileFormatV2");
  final Compression.Algorithm compressAlgo = Compression.Algorithm.GZ;
  final int entryCount = 10000;
  writeDataAndReadFromHFile(hfilePath, compressAlgo, entryCount, false);
}
项目:LCIndex-HBase-0.94.16    文件:TestHFileWriterV2.java   
@Test
public void testMidKeyInHFile() throws IOException{
  Path hfilePath = new Path(TEST_UTIL.getDataTestDir(),
  "testMidKeyInHFile");
  Compression.Algorithm compressAlgo = Compression.Algorithm.NONE;
  int entryCount = 50000;
  writeDataAndReadFromHFile(hfilePath, compressAlgo, entryCount, true);
}
项目:IRIndex    文件:HFileWriterV1.java   
@Override
public Writer createWriter(FileSystem fs, Path path,
    FSDataOutputStream ostream, int blockSize,
    Algorithm compressAlgo, HFileDataBlockEncoder dataBlockEncoder,
    KeyComparator comparator, final ChecksumType checksumType,
    final int bytesPerChecksum, boolean includeMVCCReadpoint) throws IOException {
  // version 1 does not implement checksums
  return new HFileWriterV1(conf, cacheConf, fs, path, ostream, blockSize,
      compressAlgo, dataBlockEncoder, comparator);
}
项目:IRIndex    文件:HFileWriterV1.java   
/** Constructor that takes a path, creates and closes the output stream. */
public HFileWriterV1(Configuration conf, CacheConfig cacheConf,
    FileSystem fs, Path path, FSDataOutputStream ostream,
    int blockSize, Compression.Algorithm compress,
    HFileDataBlockEncoder blockEncoder,
    final KeyComparator comparator) throws IOException {
  super(cacheConf, ostream == null ? createOutputStream(conf, fs, path) : ostream, path,
      blockSize, compress, blockEncoder, comparator);
  SchemaMetrics.configureGlobally(conf);
}
项目:IRIndex    文件:HFileBlock.java   
/**
 * @param compressionAlgorithm compression algorithm to use
 * @param dataBlockEncoderAlgo data block encoding algorithm to use
 * @param checksumType type of checksum
 * @param bytesPerChecksum bytes per checksum
 */
public Writer(Compression.Algorithm compressionAlgorithm,
      HFileDataBlockEncoder dataBlockEncoder, boolean includesMemstoreTS,
      int minorVersion,
      ChecksumType checksumType, int bytesPerChecksum) {
  this.minorVersion = minorVersion;
  compressAlgo = compressionAlgorithm == null ? NONE : compressionAlgorithm;
  this.dataBlockEncoder = dataBlockEncoder != null
      ? dataBlockEncoder : NoOpDataBlockEncoder.INSTANCE;

  baosInMemory = new ByteArrayOutputStream();
  if (compressAlgo != NONE) {
    compressor = compressionAlgorithm.getCompressor();
    compressedByteStream = new ByteArrayOutputStream();
    try {
      compressionStream =
          compressionAlgorithm.createPlainCompressionStream(
              compressedByteStream, compressor);
    } catch (IOException e) {
      throw new RuntimeException("Could not create compression stream " + 
          "for algorithm " + compressionAlgorithm, e);
    }
  }
  if (minorVersion > MINOR_VERSION_NO_CHECKSUM
      && bytesPerChecksum < HEADER_SIZE_WITH_CHECKSUMS) {
    throw new RuntimeException("Unsupported value of bytesPerChecksum. " +
        " Minimum is " + HEADER_SIZE_WITH_CHECKSUMS + " but the configured value is " +
        bytesPerChecksum);
  }

  prevOffsetByType = new long[BlockType.values().length];
  for (int i = 0; i < prevOffsetByType.length; ++i)
    prevOffsetByType[i] = -1;

  this.includesMemstoreTS = includesMemstoreTS;
  this.checksumType = checksumType;
  this.bytesPerChecksum = bytesPerChecksum;
}