Java 类org.apache.hadoop.hbase.io.hfile.HFile.Writer 实例源码

项目:ditb    文件:TestHFile.java   
/**
 * Create a truncated hfile and verify that exception thrown.
 */
public void testCorruptTruncatedHFile() throws IOException {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  Path f = new Path(ROOT_DIR, getName());
  HFileContext  context = new HFileContextBuilder().build();
  Writer w = HFile.getWriterFactory(conf, cacheConf).withPath(this.fs, f)
      .withFileContext(context).create();
  writeSomeRecords(w, 0, 100, false);
  w.close();

  Path trunc = new Path(f.getParent(), "trucated");
  truncateFile(fs, w.getPath(), trunc);

  try {
    Reader r = HFile.createReader(fs, trunc, cacheConf, conf);
  } catch (CorruptHFileException che) {
    // Expected failure
    return;
  }
  fail("Should have thrown exception");
}
项目:ditb    文件:HFileWriterV2.java   
/** Additional initialization steps */
protected void finishInit(final Configuration conf) {
  if (fsBlockWriter != null)
    throw new IllegalStateException("finishInit called twice");

  fsBlockWriter = new HFileBlock.Writer(blockEncoder, hFileContext);

  // Data block index writer
  boolean cacheIndexesOnWrite = cacheConf.shouldCacheIndexesOnWrite();
  dataBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter(fsBlockWriter,
      cacheIndexesOnWrite ? cacheConf : null,
      cacheIndexesOnWrite ? name : null);
  dataBlockIndexWriter.setMaxChunkSize(
      HFileBlockIndex.getMaxChunkSize(conf));
  inlineBlockWriters.add(dataBlockIndexWriter);

  // Meta data block index writer
  metaBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter();
  if (LOG.isTraceEnabled()) LOG.trace("Initialized with " + cacheConf);
}
项目:ditb    文件:TestHFile.java   
private void metablocks(final String compress) throws Exception {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  Path mFile = new Path(ROOT_DIR, "meta.hfile");
  FSDataOutputStream fout = createFSOutput(mFile);
  HFileContext meta = new HFileContextBuilder()
                      .withCompression(AbstractHFileWriter.compressionByName(compress))
                      .withBlockSize(minBlockSize).build();
  Writer writer = HFile.getWriterFactory(conf, cacheConf)
      .withOutputStream(fout)
      .withFileContext(meta)
      .create();
  someTestingWithMetaBlock(writer);
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(mFile);
  Reader reader = HFile.createReaderFromStream(mFile, fs.open(mFile),
      this.fs.getFileStatus(mFile).getLen(), cacheConf, conf);
  reader.loadFileInfo();
  // No data -- this should return false.
  assertFalse(reader.getScanner(false, false).seekTo());
  someReadingWithMetaBlock(reader);
  fs.delete(mFile, true);
  reader.close();
  fin.close();
}
项目:ditb    文件:TestHFile.java   
public void testNullMetaBlocks() throws Exception {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  for (Compression.Algorithm compressAlgo : 
      HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
    Path mFile = new Path(ROOT_DIR, "nometa_" + compressAlgo + ".hfile");
    FSDataOutputStream fout = createFSOutput(mFile);
    HFileContext meta = new HFileContextBuilder().withCompression(compressAlgo)
                        .withBlockSize(minBlockSize).build();
    Writer writer = HFile.getWriterFactory(conf, cacheConf)
        .withOutputStream(fout)
        .withFileContext(meta)
        .create();
    KeyValue kv = new KeyValue("foo".getBytes(), "f1".getBytes(), null, "value".getBytes());
    writer.append(kv);
    writer.close();
    fout.close();
    Reader reader = HFile.createReader(fs, mFile, cacheConf, conf);
    reader.loadFileInfo();
    assertNull(reader.getMetaBlock("non-existant", false));
  }
}
项目:LCIndex-HBase-0.94.16    文件:HFileWriterV2.java   
/** Additional initialization steps */
private void finishInit(final Configuration conf) {
  if (fsBlockWriter != null)
    throw new IllegalStateException("finishInit called twice");

  // HFile filesystem-level (non-caching) block writer
  fsBlockWriter = new HFileBlock.Writer(compressAlgo, blockEncoder,
      includeMemstoreTS, minorVersion, checksumType, bytesPerChecksum);

  // Data block index writer
  boolean cacheIndexesOnWrite = cacheConf.shouldCacheIndexesOnWrite();
  dataBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter(fsBlockWriter,
      cacheIndexesOnWrite ? cacheConf.getBlockCache(): null,
      cacheIndexesOnWrite ? name : null);
  dataBlockIndexWriter.setMaxChunkSize(
      HFileBlockIndex.getMaxChunkSize(conf));
  inlineBlockWriters.add(dataBlockIndexWriter);

  // Meta data block index writer
  metaBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter();
  LOG.debug("Initialized with " + cacheConf);

  if (isSchemaConfigured()) {
    schemaConfigurationChanged();
  }
}
项目:LCIndex-HBase-0.94.16    文件:TestHFile.java   
/**
 * Create a truncated hfile and verify that exception thrown.
 */
public void testCorruptTruncatedHFile() throws IOException {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  Path f = new Path(ROOT_DIR, getName());
  Writer w = HFile.getWriterFactory(conf, cacheConf).withPath(this.fs, f).create();
  writeSomeRecords(w, 0, 100);
  w.close();

  Path trunc = new Path(f.getParent(), "trucated");
  truncateFile(fs, w.getPath(), trunc);

  try {
    Reader r = HFile.createReader(fs, trunc, cacheConf);
  } catch (CorruptHFileException che) {
    // Expected failure
    return;
  }
  fail("Should have thrown exception");
}
项目:LCIndex-HBase-0.94.16    文件:TestHFile.java   
private void metablocks(final String compress) throws Exception {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  Path mFile = new Path(ROOT_DIR, "meta.hfile");
  FSDataOutputStream fout = createFSOutput(mFile);
  Writer writer = HFile.getWriterFactory(conf, cacheConf)
      .withOutputStream(fout)
      .withBlockSize(minBlockSize)
      .withCompression(compress)
      .create();
  someTestingWithMetaBlock(writer);
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(mFile);
  Reader reader = HFile.createReaderFromStream(mFile, fs.open(mFile),
      this.fs.getFileStatus(mFile).getLen(), cacheConf);
  reader.loadFileInfo();
  // No data -- this should return false.
  assertFalse(reader.getScanner(false, false).seekTo());
  someReadingWithMetaBlock(reader);
  fs.delete(mFile, true);
  reader.close();
  fin.close();
}
项目:LCIndex-HBase-0.94.16    文件:TestHFile.java   
public void testNullMetaBlocks() throws Exception {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  for (Compression.Algorithm compressAlgo : 
      HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
    Path mFile = new Path(ROOT_DIR, "nometa_" + compressAlgo + ".hfile");
    FSDataOutputStream fout = createFSOutput(mFile);
    Writer writer = HFile.getWriterFactory(conf, cacheConf)
        .withOutputStream(fout)
        .withBlockSize(minBlockSize)
        .withCompression(compressAlgo)
        .create();
    writer.append("foo".getBytes(), "value".getBytes());
    writer.close();
    fout.close();
    Reader reader = HFile.createReader(fs, mFile, cacheConf);
    reader.loadFileInfo();
    assertNull(reader.getMetaBlock("non-existant", false));
  }
}
项目:LCIndex-HBase-0.94.16    文件:TestHFile.java   
public void testComparator() throws IOException {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  Path mFile = new Path(ROOT_DIR, "meta.tfile");
  FSDataOutputStream fout = createFSOutput(mFile);
  KeyComparator comparator = new KeyComparator() {
    @Override
    public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2,
        int l2) {
      return -Bytes.compareTo(b1, s1, l1, b2, s2, l2);
    }
    @Override
    public int compare(byte[] o1, byte[] o2) {
      return compare(o1, 0, o1.length, o2, 0, o2.length);
    }
  };
  Writer writer = HFile.getWriterFactory(conf, cacheConf)
      .withOutputStream(fout)
      .withBlockSize(minBlockSize)
      .withComparator(comparator)
      .create();
  writer.append("3".getBytes(), "0".getBytes());
  writer.append("2".getBytes(), "0".getBytes());
  writer.append("1".getBytes(), "0".getBytes());
  writer.close();
}
项目:pbase    文件:HFileWriterV2.java   
/** Additional initialization steps */
protected void finishInit(final Configuration conf) {
  if (fsBlockWriter != null)
    throw new IllegalStateException("finishInit called twice");

  fsBlockWriter = new HFileBlock.Writer(blockEncoder, hFileContext);

  // Data block index writer
  boolean cacheIndexesOnWrite = cacheConf.shouldCacheIndexesOnWrite();
  dataBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter(fsBlockWriter,
      cacheIndexesOnWrite ? cacheConf : null,
      cacheIndexesOnWrite ? name : null);
  dataBlockIndexWriter.setMaxChunkSize(
      HFileBlockIndex.getMaxChunkSize(conf));
  inlineBlockWriters.add(dataBlockIndexWriter);

  // Meta data block index writer
  metaBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter();
  if (LOG.isTraceEnabled()) LOG.trace("Initialized with " + cacheConf);
}
项目:pbase    文件:TestHFile.java   
/**
 * Create a truncated hfile and verify that exception thrown.
 */
public void testCorruptTruncatedHFile() throws IOException {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  Path f = new Path(ROOT_DIR, getName());
  HFileContext  context = new HFileContextBuilder().build();
  Writer w = HFile.getWriterFactory(conf, cacheConf).withPath(this.fs, f)
      .withFileContext(context).create();
  writeSomeRecords(w, 0, 100, false);
  w.close();

  Path trunc = new Path(f.getParent(), "trucated");
  truncateFile(fs, w.getPath(), trunc);

  try {
    Reader r = HFile.createReader(fs, trunc, cacheConf, conf);
  } catch (CorruptHFileException che) {
    // Expected failure
    return;
  }
  fail("Should have thrown exception");
}
项目:pbase    文件:TestHFile.java   
private int writeSomeRecords(Writer writer, int start, int n, boolean useTags)
    throws IOException {
  String value = "value";
  KeyValue kv;
  for (int i = start; i < (start + n); i++) {
    String key = String.format(localFormatter, Integer.valueOf(i));
    if (useTags) {
      Tag t = new Tag((byte) 1, "myTag1");
      Tag[] tags = new Tag[1];
      tags[0] = t;
      kv = new KeyValue(Bytes.toBytes(key), Bytes.toBytes("family"), Bytes.toBytes("qual"),
          HConstants.LATEST_TIMESTAMP, Bytes.toBytes(value + key), tags);
      writer.append(kv);
    } else {
      kv = new KeyValue(Bytes.toBytes(key), Bytes.toBytes("family"), Bytes.toBytes("qual"),
          Bytes.toBytes(value + key));
      writer.append(kv);
    }
  }
  return (start + n);
}
项目:pbase    文件:TestHFile.java   
private void metablocks(final String compress) throws Exception {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  Path mFile = new Path(ROOT_DIR, "meta.hfile");
  FSDataOutputStream fout = createFSOutput(mFile);
  HFileContext meta = new HFileContextBuilder()
                      .withCompression(AbstractHFileWriter.compressionByName(compress))
                      .withBlockSize(minBlockSize).build();
  Writer writer = HFile.getWriterFactory(conf, cacheConf)
      .withOutputStream(fout)
      .withFileContext(meta)
      .create();
  someTestingWithMetaBlock(writer);
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(mFile);
  Reader reader = HFile.createReaderFromStream(mFile, fs.open(mFile),
      this.fs.getFileStatus(mFile).getLen(), cacheConf, conf);
  reader.loadFileInfo();
  // No data -- this should return false.
  assertFalse(reader.getScanner(false, false).seekTo());
  someReadingWithMetaBlock(reader);
  fs.delete(mFile, true);
  reader.close();
  fin.close();
}
项目:pbase    文件:TestHFile.java   
public void testNullMetaBlocks() throws Exception {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  for (Compression.Algorithm compressAlgo : 
      HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
    Path mFile = new Path(ROOT_DIR, "nometa_" + compressAlgo + ".hfile");
    FSDataOutputStream fout = createFSOutput(mFile);
    HFileContext meta = new HFileContextBuilder().withCompression(compressAlgo)
                        .withBlockSize(minBlockSize).build();
    Writer writer = HFile.getWriterFactory(conf, cacheConf)
        .withOutputStream(fout)
        .withFileContext(meta)
        .create();
    KeyValue kv = new KeyValue("foo".getBytes(), "f1".getBytes(), null, "value".getBytes());
    writer.append(kv);
    writer.close();
    fout.close();
    Reader reader = HFile.createReader(fs, mFile, cacheConf, conf);
    reader.loadFileInfo();
    assertNull(reader.getMetaBlock("non-existant", false));
  }
}
项目:HIndex    文件:HFileWriterV2.java   
/** Additional initialization steps */
protected void finishInit(final Configuration conf) {
  if (fsBlockWriter != null)
    throw new IllegalStateException("finishInit called twice");

  fsBlockWriter = new HFileBlock.Writer(blockEncoder, hFileContext);

  // Data block index writer
  boolean cacheIndexesOnWrite = cacheConf.shouldCacheIndexesOnWrite();
  dataBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter(fsBlockWriter,
      cacheIndexesOnWrite ? cacheConf.getBlockCache(): null,
      cacheIndexesOnWrite ? name : null);
  dataBlockIndexWriter.setMaxChunkSize(
      HFileBlockIndex.getMaxChunkSize(conf));
  inlineBlockWriters.add(dataBlockIndexWriter);

  // Meta data block index writer
  metaBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter();
  if (LOG.isTraceEnabled()) LOG.trace("Initialized with " + cacheConf);
}
项目:HIndex    文件:TestHFile.java   
/**
 * Create a truncated hfile and verify that exception thrown.
 */
public void testCorruptTruncatedHFile() throws IOException {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  Path f = new Path(ROOT_DIR, getName());
  HFileContext  context = new HFileContextBuilder().build();
  Writer w = HFile.getWriterFactory(conf, cacheConf).withPath(this.fs, f)
      .withFileContext(context).create();
  writeSomeRecords(w, 0, 100, false);
  w.close();

  Path trunc = new Path(f.getParent(), "trucated");
  truncateFile(fs, w.getPath(), trunc);

  try {
    Reader r = HFile.createReader(fs, trunc, cacheConf, conf);
  } catch (CorruptHFileException che) {
    // Expected failure
    return;
  }
  fail("Should have thrown exception");
}
项目:HIndex    文件:TestHFile.java   
private void metablocks(final String compress) throws Exception {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  Path mFile = new Path(ROOT_DIR, "meta.hfile");
  FSDataOutputStream fout = createFSOutput(mFile);
  HFileContext meta = new HFileContextBuilder()
                      .withCompression(AbstractHFileWriter.compressionByName(compress))
                      .withBlockSize(minBlockSize).build();
  Writer writer = HFile.getWriterFactory(conf, cacheConf)
      .withOutputStream(fout)
      .withFileContext(meta)
      .create();
  someTestingWithMetaBlock(writer);
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(mFile);
  Reader reader = HFile.createReaderFromStream(mFile, fs.open(mFile),
      this.fs.getFileStatus(mFile).getLen(), cacheConf, conf);
  reader.loadFileInfo();
  // No data -- this should return false.
  assertFalse(reader.getScanner(false, false).seekTo());
  someReadingWithMetaBlock(reader);
  fs.delete(mFile, true);
  reader.close();
  fin.close();
}
项目:HIndex    文件:TestHFile.java   
public void testNullMetaBlocks() throws Exception {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  for (Compression.Algorithm compressAlgo : 
      HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
    Path mFile = new Path(ROOT_DIR, "nometa_" + compressAlgo + ".hfile");
    FSDataOutputStream fout = createFSOutput(mFile);
    HFileContext meta = new HFileContextBuilder().withCompression(compressAlgo)
                        .withBlockSize(minBlockSize).build();
    Writer writer = HFile.getWriterFactory(conf, cacheConf)
        .withOutputStream(fout)
        .withFileContext(meta)
        .create();
    writer.append("foo".getBytes(), "value".getBytes());
    writer.close();
    fout.close();
    Reader reader = HFile.createReader(fs, mFile, cacheConf, conf);
    reader.loadFileInfo();
    assertNull(reader.getMetaBlock("non-existant", false));
  }
}
项目:IRIndex    文件:HFileWriterV2.java   
/** Additional initialization steps */
private void finishInit(final Configuration conf) {
  if (fsBlockWriter != null)
    throw new IllegalStateException("finishInit called twice");

  // HFile filesystem-level (non-caching) block writer
  fsBlockWriter = new HFileBlock.Writer(compressAlgo, blockEncoder,
      includeMemstoreTS, minorVersion, checksumType, bytesPerChecksum);

  // Data block index writer
  boolean cacheIndexesOnWrite = cacheConf.shouldCacheIndexesOnWrite();
  dataBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter(fsBlockWriter,
      cacheIndexesOnWrite ? cacheConf.getBlockCache(): null,
      cacheIndexesOnWrite ? name : null);
  dataBlockIndexWriter.setMaxChunkSize(
      HFileBlockIndex.getMaxChunkSize(conf));
  inlineBlockWriters.add(dataBlockIndexWriter);

  // Meta data block index writer
  metaBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter();
  LOG.debug("Initialized with " + cacheConf);

  if (isSchemaConfigured()) {
    schemaConfigurationChanged();
  }
}
项目:IRIndex    文件:TestHFile.java   
/**
 * Create a truncated hfile and verify that exception thrown.
 */
public void testCorruptTruncatedHFile() throws IOException {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  Path f = new Path(ROOT_DIR, getName());
  Writer w = HFile.getWriterFactory(conf, cacheConf).withPath(this.fs, f).create();
  writeSomeRecords(w, 0, 100);
  w.close();

  Path trunc = new Path(f.getParent(), "trucated");
  truncateFile(fs, w.getPath(), trunc);

  try {
    Reader r = HFile.createReader(fs, trunc, cacheConf);
  } catch (CorruptHFileException che) {
    // Expected failure
    return;
  }
  fail("Should have thrown exception");
}
项目:IRIndex    文件:TestHFile.java   
private void metablocks(final String compress) throws Exception {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  Path mFile = new Path(ROOT_DIR, "meta.hfile");
  FSDataOutputStream fout = createFSOutput(mFile);
  Writer writer = HFile.getWriterFactory(conf, cacheConf)
      .withOutputStream(fout)
      .withBlockSize(minBlockSize)
      .withCompression(compress)
      .create();
  someTestingWithMetaBlock(writer);
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(mFile);
  Reader reader = HFile.createReaderFromStream(mFile, fs.open(mFile),
      this.fs.getFileStatus(mFile).getLen(), cacheConf);
  reader.loadFileInfo();
  // No data -- this should return false.
  assertFalse(reader.getScanner(false, false).seekTo());
  someReadingWithMetaBlock(reader);
  fs.delete(mFile, true);
  reader.close();
  fin.close();
}
项目:IRIndex    文件:TestHFile.java   
public void testNullMetaBlocks() throws Exception {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  for (Compression.Algorithm compressAlgo : 
      HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
    Path mFile = new Path(ROOT_DIR, "nometa_" + compressAlgo + ".hfile");
    FSDataOutputStream fout = createFSOutput(mFile);
    Writer writer = HFile.getWriterFactory(conf, cacheConf)
        .withOutputStream(fout)
        .withBlockSize(minBlockSize)
        .withCompression(compressAlgo)
        .create();
    writer.append("foo".getBytes(), "value".getBytes());
    writer.close();
    fout.close();
    Reader reader = HFile.createReader(fs, mFile, cacheConf);
    reader.loadFileInfo();
    assertNull(reader.getMetaBlock("non-existant", false));
  }
}
项目:IRIndex    文件:TestHFile.java   
public void testComparator() throws IOException {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  Path mFile = new Path(ROOT_DIR, "meta.tfile");
  FSDataOutputStream fout = createFSOutput(mFile);
  KeyComparator comparator = new KeyComparator() {
    @Override
    public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2,
        int l2) {
      return -Bytes.compareTo(b1, s1, l1, b2, s2, l2);
    }
    @Override
    public int compare(byte[] o1, byte[] o2) {
      return compare(o1, 0, o1.length, o2, 0, o2.length);
    }
  };
  Writer writer = HFile.getWriterFactory(conf, cacheConf)
      .withOutputStream(fout)
      .withBlockSize(minBlockSize)
      .withComparator(comparator)
      .create();
  writer.append("3".getBytes(), "0".getBytes());
  writer.append("2".getBytes(), "0".getBytes());
  writer.append("1".getBytes(), "0".getBytes());
  writer.close();
}
项目:hbase    文件:TestHFile.java   
/**
 * Create a truncated hfile and verify that exception thrown.
 */
@Test
public void testCorruptTruncatedHFile() throws IOException {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  Path f = new Path(ROOT_DIR, testName.getMethodName());
  HFileContext  context = new HFileContextBuilder().build();
  Writer w = HFile.getWriterFactory(conf, cacheConf).withPath(this.fs, f)
      .withFileContext(context).create();
  writeSomeRecords(w, 0, 100, false);
  w.close();

  Path trunc = new Path(f.getParent(), "trucated");
  truncateFile(fs, w.getPath(), trunc);

  try {
    Reader r = HFile.createReader(fs, trunc, cacheConf, true, conf);
  } catch (CorruptHFileException che) {
    // Expected failure
    return;
  }
  fail("Should have thrown exception");
}
项目:hbase    文件:TestHFile.java   
private int writeSomeRecords(Writer writer, int start, int n, boolean useTags)
    throws IOException {
  String value = "value";
  KeyValue kv;
  for (int i = start; i < (start + n); i++) {
    String key = String.format(localFormatter, Integer.valueOf(i));
    if (useTags) {
      Tag t = new ArrayBackedTag((byte) 1, "myTag1");
      Tag[] tags = new Tag[1];
      tags[0] = t;
      kv = new KeyValue(Bytes.toBytes(key), Bytes.toBytes("family"), Bytes.toBytes("qual"),
          HConstants.LATEST_TIMESTAMP, Bytes.toBytes(value + key), tags);
      writer.append(kv);
    } else {
      kv = new KeyValue(Bytes.toBytes(key), Bytes.toBytes("family"), Bytes.toBytes("qual"),
          Bytes.toBytes(value + key));
      writer.append(kv);
    }
  }
  return (start + n);
}
项目:hbase    文件:TestHFile.java   
private void metablocks(final String compress) throws Exception {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  Path mFile = new Path(ROOT_DIR, "meta.hfile");
  FSDataOutputStream fout = createFSOutput(mFile);
  HFileContext meta = new HFileContextBuilder()
                      .withCompression(HFileWriterImpl.compressionByName(compress))
                      .withBlockSize(minBlockSize).build();
  Writer writer = HFile.getWriterFactory(conf, cacheConf)
      .withOutputStream(fout)
      .withFileContext(meta)
      .create();
  someTestingWithMetaBlock(writer);
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(mFile);
  Reader reader = HFile.createReaderFromStream(mFile, fs.open(mFile),
      this.fs.getFileStatus(mFile).getLen(), cacheConf, conf);
  reader.loadFileInfo();
  // No data -- this should return false.
  assertFalse(reader.getScanner(false, false).seekTo());
  someReadingWithMetaBlock(reader);
  fs.delete(mFile, true);
  reader.close();
  fin.close();
}
项目:hbase    文件:TestHFile.java   
@Test
public void testNullMetaBlocks() throws Exception {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  for (Compression.Algorithm compressAlgo :
      HBaseCommonTestingUtility.COMPRESSION_ALGORITHMS) {
    Path mFile = new Path(ROOT_DIR, "nometa_" + compressAlgo + ".hfile");
    FSDataOutputStream fout = createFSOutput(mFile);
    HFileContext meta = new HFileContextBuilder().withCompression(compressAlgo)
                        .withBlockSize(minBlockSize).build();
    Writer writer = HFile.getWriterFactory(conf, cacheConf)
        .withOutputStream(fout)
        .withFileContext(meta)
        .create();
    KeyValue kv = new KeyValue("foo".getBytes(), "f1".getBytes(), null, "value".getBytes());
    writer.append(kv);
    writer.close();
    fout.close();
    Reader reader = HFile.createReader(fs, mFile, cacheConf, true, conf);
    reader.loadFileInfo();
    assertNull(reader.getMetaBlock("non-existant", false));
  }
}
项目:RStore    文件:HFileWriterV2.java   
/** Additional initialization steps */
private void finishInit(final Configuration conf) {
  if (fsBlockWriter != null)
    throw new IllegalStateException("finishInit called twice");

  // HFile filesystem-level (non-caching) block writer
  fsBlockWriter = new HFileBlock.Writer(compressAlgo);

  // Data block index writer
  boolean cacheIndexesOnWrite = cacheConf.shouldCacheIndexesOnWrite();
  dataBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter(fsBlockWriter,
      cacheIndexesOnWrite ? cacheConf.getBlockCache(): null,
      cacheIndexesOnWrite ? name : null);
  dataBlockIndexWriter.setMaxChunkSize(
      HFileBlockIndex.getMaxChunkSize(conf));
  inlineBlockWriters.add(dataBlockIndexWriter);

  // Meta data block index writer
  metaBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter();

  LOG.debug("Initialized with " + cacheConf);
}
项目:PyroDB    文件:HFileWriterV2.java   
/** Additional initialization steps */
protected void finishInit(final Configuration conf) {
  if (fsBlockWriter != null)
    throw new IllegalStateException("finishInit called twice");

  fsBlockWriter = new HFileBlock.Writer(blockEncoder, hFileContext);

  // Data block index writer
  boolean cacheIndexesOnWrite = cacheConf.shouldCacheIndexesOnWrite();
  dataBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter(fsBlockWriter,
      cacheIndexesOnWrite ? cacheConf.getBlockCache(): null,
      cacheIndexesOnWrite ? name : null);
  dataBlockIndexWriter.setMaxChunkSize(
      HFileBlockIndex.getMaxChunkSize(conf));
  inlineBlockWriters.add(dataBlockIndexWriter);

  // Meta data block index writer
  metaBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter();
  if (LOG.isTraceEnabled()) LOG.trace("Initialized with " + cacheConf);
}
项目:PyroDB    文件:TestHFile.java   
/**
 * Create a truncated hfile and verify that exception thrown.
 */
public void testCorruptTruncatedHFile() throws IOException {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  Path f = new Path(ROOT_DIR, getName());
  HFileContext  context = new HFileContextBuilder().build();
  Writer w = HFile.getWriterFactory(conf, cacheConf).withPath(this.fs, f)
      .withFileContext(context).create();
  writeSomeRecords(w, 0, 100, false);
  w.close();

  Path trunc = new Path(f.getParent(), "trucated");
  truncateFile(fs, w.getPath(), trunc);

  try {
    Reader r = HFile.createReader(fs, trunc, cacheConf, conf);
  } catch (CorruptHFileException che) {
    // Expected failure
    return;
  }
  fail("Should have thrown exception");
}
项目:PyroDB    文件:TestHFile.java   
private int writeSomeRecords(Writer writer, int start, int n, boolean useTags)
    throws IOException {
  String value = "value";
  KeyValue kv;
  for (int i = start; i < (start + n); i++) {
    String key = String.format(localFormatter, Integer.valueOf(i));
    if (useTags) {
      Tag t = new Tag((byte) 1, "myTag1");
      Tag[] tags = new Tag[1];
      tags[0] = t;
      kv = new KeyValue(Bytes.toBytes(key), Bytes.toBytes("family"), Bytes.toBytes("qual"),
          HConstants.LATEST_TIMESTAMP, Bytes.toBytes(value + key), tags);
      writer.append(kv);
    } else {
      kv = new KeyValue(Bytes.toBytes(key), Bytes.toBytes("family"), Bytes.toBytes("qual"),
          Bytes.toBytes(value + key));
      writer.append(kv);
    }
  }
  return (start + n);
}
项目:PyroDB    文件:TestHFile.java   
private void metablocks(final String compress) throws Exception {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  Path mFile = new Path(ROOT_DIR, "meta.hfile");
  FSDataOutputStream fout = createFSOutput(mFile);
  HFileContext meta = new HFileContextBuilder()
                      .withCompression(AbstractHFileWriter.compressionByName(compress))
                      .withBlockSize(minBlockSize).build();
  Writer writer = HFile.getWriterFactory(conf, cacheConf)
      .withOutputStream(fout)
      .withFileContext(meta)
      .create();
  someTestingWithMetaBlock(writer);
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(mFile);
  Reader reader = HFile.createReaderFromStream(mFile, fs.open(mFile),
      this.fs.getFileStatus(mFile).getLen(), cacheConf, conf);
  reader.loadFileInfo();
  // No data -- this should return false.
  assertFalse(reader.getScanner(false, false).seekTo());
  someReadingWithMetaBlock(reader);
  fs.delete(mFile, true);
  reader.close();
  fin.close();
}
项目:PyroDB    文件:TestHFile.java   
public void testNullMetaBlocks() throws Exception {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  for (Compression.Algorithm compressAlgo : 
      HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
    Path mFile = new Path(ROOT_DIR, "nometa_" + compressAlgo + ".hfile");
    FSDataOutputStream fout = createFSOutput(mFile);
    HFileContext meta = new HFileContextBuilder().withCompression(compressAlgo)
                        .withBlockSize(minBlockSize).build();
    Writer writer = HFile.getWriterFactory(conf, cacheConf)
        .withOutputStream(fout)
        .withFileContext(meta)
        .create();
    writer.append("foo".getBytes(), "value".getBytes());
    writer.close();
    fout.close();
    Reader reader = HFile.createReader(fs, mFile, cacheConf, conf);
    reader.loadFileInfo();
    assertNull(reader.getMetaBlock("non-existant", false));
  }
}
项目:c5    文件:HFileWriterV2.java   
/** Additional initialization steps */
private void finishInit(final Configuration conf) {
  if (fsBlockWriter != null)
    throw new IllegalStateException("finishInit called twice");

  // HFile filesystem-level (non-caching) block writer
  fsBlockWriter = new HFileBlock.Writer(compressAlgo, blockEncoder,
      includeMemstoreTS, checksumType, bytesPerChecksum);

  // Data block index writer
  boolean cacheIndexesOnWrite = cacheConf.shouldCacheIndexesOnWrite();
  dataBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter(fsBlockWriter,
      cacheIndexesOnWrite ? cacheConf.getBlockCache(): null,
      cacheIndexesOnWrite ? name : null);
  dataBlockIndexWriter.setMaxChunkSize(
      HFileBlockIndex.getMaxChunkSize(conf));
  inlineBlockWriters.add(dataBlockIndexWriter);

  // Meta data block index writer
  metaBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter();
  if (LOG.isTraceEnabled()) LOG.trace("Initialized with " + cacheConf);
}
项目:c5    文件:TestHFile.java   
/**
 * Create a truncated hfile and verify that exception thrown.
 */
public void testCorruptTruncatedHFile() throws IOException {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  Path f = new Path(ROOT_DIR, getName());
  Writer w = HFile.getWriterFactory(conf, cacheConf).withPath(this.fs, f).create();
  writeSomeRecords(w, 0, 100);
  w.close();

  Path trunc = new Path(f.getParent(), "trucated");
  truncateFile(fs, w.getPath(), trunc);

  try {
    Reader r = HFile.createReader(fs, trunc, cacheConf);
  } catch (CorruptHFileException che) {
    // Expected failure
    return;
  }
  fail("Should have thrown exception");
}
项目:c5    文件:TestHFile.java   
private void metablocks(final String compress) throws Exception {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  Path mFile = new Path(ROOT_DIR, "meta.hfile");
  FSDataOutputStream fout = createFSOutput(mFile);
  Writer writer = HFile.getWriterFactory(conf, cacheConf)
      .withOutputStream(fout)
      .withBlockSize(minBlockSize)
      .withCompression(compress)
      .create();
  someTestingWithMetaBlock(writer);
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(mFile);
  Reader reader = HFile.createReaderFromStream(mFile, fs.open(mFile),
      this.fs.getFileStatus(mFile).getLen(), cacheConf);
  reader.loadFileInfo();
  // No data -- this should return false.
  assertFalse(reader.getScanner(false, false).seekTo());
  someReadingWithMetaBlock(reader);
  fs.delete(mFile, true);
  reader.close();
  fin.close();
}
项目:c5    文件:TestHFile.java   
public void testNullMetaBlocks() throws Exception {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  for (Compression.Algorithm compressAlgo : 
      HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
    Path mFile = new Path(ROOT_DIR, "nometa_" + compressAlgo + ".hfile");
    FSDataOutputStream fout = createFSOutput(mFile);
    Writer writer = HFile.getWriterFactory(conf, cacheConf)
        .withOutputStream(fout)
        .withBlockSize(minBlockSize)
        .withCompression(compressAlgo)
        .create();
    writer.append("foo".getBytes(), "value".getBytes());
    writer.close();
    fout.close();
    Reader reader = HFile.createReader(fs, mFile, cacheConf);
    reader.loadFileInfo();
    assertNull(reader.getMetaBlock("non-existant", false));
  }
}
项目:HBase-Research    文件:HFileWriterV2.java   
/** Additional initialization steps */
private void finishInit(final Configuration conf) {
  if (fsBlockWriter != null)
    throw new IllegalStateException("finishInit called twice");

  // HFile filesystem-level (non-caching) block writer
  fsBlockWriter = new HFileBlock.Writer(compressAlgo, blockEncoder,
      includeMemstoreTS, minorVersion, checksumType, bytesPerChecksum);

  // Data block index writer
  boolean cacheIndexesOnWrite = cacheConf.shouldCacheIndexesOnWrite();
  dataBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter(fsBlockWriter,
      cacheIndexesOnWrite ? cacheConf.getBlockCache(): null,
      cacheIndexesOnWrite ? name : null);
  dataBlockIndexWriter.setMaxChunkSize(
      HFileBlockIndex.getMaxChunkSize(conf));
  inlineBlockWriters.add(dataBlockIndexWriter);

  // Meta data block index writer
  metaBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter();
  LOG.debug("Initialized with " + cacheConf);

  if (isSchemaConfigured()) {
    schemaConfigurationChanged();
  }
}
项目:HBase-Research    文件:TestHFile.java   
/**
 * Create a truncated hfile and verify that exception thrown.
 */
public void testCorruptTruncatedHFile() throws IOException {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  Path f = new Path(ROOT_DIR, getName());
  Writer w = HFile.getWriterFactory(conf, cacheConf).withPath(this.fs, f).create();
  writeSomeRecords(w, 0, 100);
  w.close();

  Path trunc = new Path(f.getParent(), "trucated");
  truncateFile(fs, w.getPath(), trunc);

  try {
    Reader r = HFile.createReader(fs, trunc, cacheConf);
  } catch (CorruptHFileException che) {
    // Expected failure
    return;
  }
  fail("Should have thrown exception");
}
项目:HBase-Research    文件:TestHFile.java   
private void metablocks(final String compress) throws Exception {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  Path mFile = new Path(ROOT_DIR, "meta.hfile");
  FSDataOutputStream fout = createFSOutput(mFile);
  Writer writer = HFile.getWriterFactory(conf, cacheConf)
      .withOutputStream(fout)
      .withBlockSize(minBlockSize)
      .withCompression(compress)
      .create();
  someTestingWithMetaBlock(writer);
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(mFile);
  Reader reader = HFile.createReaderFromStream(mFile, fs.open(mFile),
      this.fs.getFileStatus(mFile).getLen(), cacheConf);
  reader.loadFileInfo();
  // No data -- this should return false.
  assertFalse(reader.getScanner(false, false).seekTo());
  someReadingWithMetaBlock(reader);
  fs.delete(mFile, true);
  reader.close();
  fin.close();
}