Java 类org.apache.hadoop.hbase.regionserver.wal.HLogFactory 实例源码

项目:incubator-tephra    文件:TransactionProcessorTest.java   
private HRegion createRegion(String tableName, byte[] family, long ttl) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  HColumnDescriptor cfd = new HColumnDescriptor(family);
  if (ttl > 0) {
    cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl));
  }
  cfd.setMaxVersions(10);
  htd.addFamily(cfd);
  htd.addCoprocessor(TransactionProcessor.class.getName());
  Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), htd.getTableName());
  Path hlogPath = new Path(FSUtils.getRootDir(conf) + "/hlog");
  FileSystem fs = FileSystem.get(conf);
  assertTrue(fs.mkdirs(tablePath));
  HLog hLog = HLogFactory.createHLog(fs, hlogPath, tableName, conf);
  HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName));
  HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tablePath, regionInfo);
  return new HRegion(regionFS, hLog, conf, htd, new MockRegionServerServices(conf, null));
}
项目:incubator-tephra    文件:TransactionProcessorTest.java   
private HRegion createRegion(String tableName, byte[] family, long ttl) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  HColumnDescriptor cfd = new HColumnDescriptor(family);
  if (ttl > 0) {
    cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl));
  }
  cfd.setMaxVersions(10);
  htd.addFamily(cfd);
  htd.addCoprocessor(TransactionProcessor.class.getName());
  Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), htd.getTableName());
  Path hlogPath = new Path(FSUtils.getRootDir(conf) + "/hlog");
  FileSystem fs = FileSystem.get(conf);
  assertTrue(fs.mkdirs(tablePath));
  HLog hLog = HLogFactory.createHLog(fs, hlogPath, tableName, conf);
  HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName));
  HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tablePath, regionInfo);
  return new HRegion(regionFS, hLog, conf, htd, new MockRegionServerServices(conf, null));
}
项目:HIndex    文件:ReplicationHLogReaderManager.java   
/**
 * Opens the file at the current position
 * @param path
 * @return an HLog reader.
 * @throws IOException
 */
public HLog.Reader openReader(Path path) throws IOException {
  // Detect if this is a new file, if so get a new reader else
  // reset the current reader so that we see the new data
  if (this.reader == null || !this.lastPath.equals(path)) {
    this.closeReader();
    this.reader = HLogFactory.createReader(this.fs, path, this.conf);
    this.lastPath = path;
  } else {
    try {
      this.reader.reset();
    } catch (NullPointerException npe) {
      throw new IOException("NPE resetting reader, likely HDFS-4380", npe);
    }
  }
  return this.reader;
}
项目:HIndex    文件:HLogInputFormat.java   
@Override
public void initialize(InputSplit split, TaskAttemptContext context)
    throws IOException, InterruptedException {
  HLogSplit hsplit = (HLogSplit)split;
  Path logFile = new Path(hsplit.getLogFileName());
  Configuration conf = context.getConfiguration();
  LOG.info("Opening reader for "+split);
  try {
    this.reader = HLogFactory.createReader(logFile.getFileSystem(conf), 
        logFile, conf);
  } catch (EOFException x) {
    LOG.info("Ignoring corrupted HLog file: " + logFile
        + " (This is normal when a RegionServer crashed.)");
  }
  this.startTime = hsplit.getStartTime();
  this.endTime = hsplit.getEndTime();
}
项目:HIndex    文件:HRegion.java   
/**
 * Convenience method creating new HRegions. Used by createTable.
 * The {@link HLog} for the created region needs to be closed
 * explicitly, if it is not null.
 * Use {@link HRegion#getLog()} to get access.
 *
 * @param info Info for region to create.
 * @param rootDir Root directory for HBase instance
 * @param tableDir table directory
 * @param conf
 * @param hTableDescriptor
 * @param hlog shared HLog
 * @param initialize - true to initialize the region
 * @param ignoreHLog - true to skip generate new hlog if it is null, mostly for createTable
 * @return new HRegion
 * @throws IOException
 */
public static HRegion createHRegion(final HRegionInfo info, final Path rootDir, final Path tableDir,
                                    final Configuration conf,
                                    final HTableDescriptor hTableDescriptor,
                                    final HLog hlog,
                                    final boolean initialize, final boolean ignoreHLog)
    throws IOException {
  LOG.info("creating HRegion " + info.getTable().getNameAsString()
      + " HTD == " + hTableDescriptor + " RootDir = " + rootDir +
      " Table name == " + info.getTable().getNameAsString());
  FileSystem fs = FileSystem.get(conf);
  HRegionFileSystem rfs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, info);
  HLog effectiveHLog = hlog;
  if (hlog == null && !ignoreHLog) {
    effectiveHLog = HLogFactory.createHLog(fs, rfs.getRegionDir(),
                                           HConstants.HREGION_LOGDIR_NAME, conf);
  }
  HRegion region = HRegion.newHRegion(tableDir,
      effectiveHLog, fs, conf, info, hTableDescriptor, null);
  if (initialize) {
    // If initializing, set the sequenceId. It is also required by HLogPerformanceEvaluation when
    // verifying the WALEdits.
    region.setSequenceId(region.initialize());
  }
  return region;
}
项目:HIndex    文件:TestSnapshotLogSplitter.java   
private void verifyRecoverEdits(final Path tableDir, final TableName tableName,
    final Map<byte[], byte[]> regionsMap) throws IOException {
  for (FileStatus regionStatus: FSUtils.listStatus(fs, tableDir)) {
    assertTrue(regionStatus.getPath().getName().startsWith(tableName.getNameAsString()));
    Path regionEdits = HLogUtil.getRegionDirRecoveredEditsDir(regionStatus.getPath());
    byte[] regionName = Bytes.toBytes(regionStatus.getPath().getName());
    assertFalse(regionsMap.containsKey(regionName));
    for (FileStatus logStatus: FSUtils.listStatus(fs, regionEdits)) {
      HLog.Reader reader = HLogFactory.createReader(fs, logStatus.getPath(), conf);
      try {
        HLog.Entry entry;
        while ((entry = reader.next()) != null) {
          HLogKey key = entry.getKey();
          assertEquals(tableName, key.getTablename());
          assertArrayEquals(regionName, key.getEncodedRegionName());
        }
      } finally {
        reader.close();
      }
    }
  }
}
项目:HIndex    文件:TestSnapshotLogSplitter.java   
private void writeTestLog(final Path logFile) throws IOException {
  fs.mkdirs(logFile.getParent());
  HLog.Writer writer = HLogFactory.createWALWriter(fs, logFile, conf);
  try {
    for (int i = 0; i < 7; ++i) {
      TableName tableName = getTableName(i);
      for (int j = 0; j < 10; ++j) {
        byte[] regionName = getRegionName(tableName, j);
        for (int k = 0; k < 50; ++k) {
          byte[] rowkey = Bytes.toBytes("row-" + k);
          HLogKey key = new HLogKey(regionName, tableName, (long)k,
            System.currentTimeMillis(), HConstants.DEFAULT_CLUSTER_ID);
          WALEdit edit = new WALEdit();
          edit.add(new KeyValue(rowkey, TEST_FAMILY, TEST_QUALIFIER, rowkey));
          writer.append(new HLog.Entry(key, edit));
        }
      }
    }
  } finally {
    writer.close();
  }
}
项目:HIndex    文件:TestStore.java   
@SuppressWarnings("deprecation")
private Store init(String methodName, Configuration conf, HTableDescriptor htd,
    HColumnDescriptor hcd) throws IOException {
  //Setting up a Store
  Path basedir = new Path(DIR+methodName);
  Path tableDir = FSUtils.getTableDir(basedir, htd.getTableName());
  String logName = "logs";
  Path logdir = new Path(basedir, logName);

  FileSystem fs = FileSystem.get(conf);

  fs.delete(logdir, true);

  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
  HLog hlog = HLogFactory.createHLog(fs, basedir, logName, conf);
  HRegion region = new HRegion(tableDir, hlog, fs, conf, info, htd, null);

  store = new HStore(region, hcd, conf);
  return store;
}
项目:PyroDB    文件:ReplicationHLogReaderManager.java   
/**
 * Opens the file at the current position
 * @param path
 * @return an HLog reader.
 * @throws IOException
 */
public HLog.Reader openReader(Path path) throws IOException {
  // Detect if this is a new file, if so get a new reader else
  // reset the current reader so that we see the new data
  if (this.reader == null || !this.lastPath.equals(path)) {
    this.closeReader();
    this.reader = HLogFactory.createReader(this.fs, path, this.conf);
    this.lastPath = path;
  } else {
    try {
      this.reader.reset();
    } catch (NullPointerException npe) {
      throw new IOException("NPE resetting reader, likely HDFS-4380", npe);
    }
  }
  return this.reader;
}
项目:PyroDB    文件:HLogInputFormat.java   
@Override
public void initialize(InputSplit split, TaskAttemptContext context)
    throws IOException, InterruptedException {
  HLogSplit hsplit = (HLogSplit)split;
  Path logFile = new Path(hsplit.getLogFileName());
  Configuration conf = context.getConfiguration();
  LOG.info("Opening reader for "+split);
  try {
    this.reader = HLogFactory.createReader(logFile.getFileSystem(conf), 
        logFile, conf);
  } catch (EOFException x) {
    LOG.info("Ignoring corrupted HLog file: " + logFile
        + " (This is normal when a RegionServer crashed.)");
  }
  this.startTime = hsplit.getStartTime();
  this.endTime = hsplit.getEndTime();
}
项目:PyroDB    文件:HRegion.java   
/**
 * Convenience method creating new HRegions. Used by createTable.
 * The {@link HLog} for the created region needs to be closed
 * explicitly, if it is not null.
 * Use {@link HRegion#getLog()} to get access.
 *
 * @param info Info for region to create.
 * @param rootDir Root directory for HBase instance
 * @param tableDir table directory
 * @param conf
 * @param hTableDescriptor
 * @param hlog shared HLog
 * @param initialize - true to initialize the region
 * @param ignoreHLog - true to skip generate new hlog if it is null, mostly for createTable
 * @return new HRegion
 * @throws IOException
 */
public static HRegion createHRegion(final HRegionInfo info, final Path rootDir, final Path tableDir,
                                    final Configuration conf,
                                    final HTableDescriptor hTableDescriptor,
                                    final HLog hlog,
                                    final boolean initialize, final boolean ignoreHLog)
    throws IOException {
  LOG.info("creating HRegion " + info.getTable().getNameAsString()
      + " HTD == " + hTableDescriptor + " RootDir = " + rootDir +
      " Table name == " + info.getTable().getNameAsString());
  FileSystem fs = FileSystem.get(conf);
  HRegionFileSystem rfs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, info);
  HLog effectiveHLog = hlog;
  if (hlog == null && !ignoreHLog) {
    effectiveHLog = HLogFactory.createHLog(fs, rfs.getRegionDir(),
                                           HConstants.HREGION_LOGDIR_NAME, conf);
  }
  HRegion region = HRegion.newHRegion(tableDir,
      effectiveHLog, fs, conf, info, hTableDescriptor, null);
  if (initialize) {
    // If initializing, set the sequenceId. It is also required by HLogPerformanceEvaluation when
    // verifying the WALEdits.
    region.setSequenceId(region.initialize());
  }
  return region;
}
项目:PyroDB    文件:TestStore.java   
@SuppressWarnings("deprecation")
private Store init(String methodName, Configuration conf, HTableDescriptor htd,
    HColumnDescriptor hcd) throws IOException {
  //Setting up a Store
  Path basedir = new Path(DIR+methodName);
  Path tableDir = FSUtils.getTableDir(basedir, htd.getTableName());
  String logName = "logs";
  Path logdir = new Path(basedir, logName);

  FileSystem fs = FileSystem.get(conf);

  fs.delete(logdir, true);

  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
  HLog hlog = HLogFactory.createHLog(fs, basedir, logName, conf);
  HRegion region = new HRegion(tableDir, hlog, fs, conf, info, htd, null);

  store = new HStore(region, hcd, conf);
  return store;
}
项目:c5    文件:ReplicationHLogReaderManager.java   
/**
 * Opens the file at the current position
 * @param path
 * @return an HLog reader.
 * @throws IOException
 */
public HLog.Reader openReader(Path path) throws IOException {
  // Detect if this is a new file, if so get a new reader else
  // reset the current reader so that we see the new data
  if (this.reader == null || !this.lastPath.equals(path)) {
    this.closeReader();
    this.reader = HLogFactory.createReader(this.fs, path, this.conf);
    this.lastPath = path;
  } else {
    try {
      this.reader.reset();
    } catch (NullPointerException npe) {
      throw new IOException("NPE resetting reader, likely HDFS-4380", npe);
    }
  }
  return this.reader;
}
项目:c5    文件:HLogInputFormat.java   
@Override
public void initialize(InputSplit split, TaskAttemptContext context)
    throws IOException, InterruptedException {
  HLogSplit hsplit = (HLogSplit)split;
  Path logFile = new Path(hsplit.getLogFileName());
  Configuration conf = context.getConfiguration();
  LOG.info("Opening reader for "+split);
  try {
    this.reader = HLogFactory.createReader(logFile.getFileSystem(conf), 
        logFile, conf);
  } catch (EOFException x) {
    LOG.info("Ignoring corrupted HLog file: " + logFile
        + " (This is normal when a RegionServer crashed.)");
  }
  this.startTime = hsplit.getStartTime();
  this.endTime = hsplit.getEndTime();
}
项目:c5    文件:HRegion.java   
/**
 * Convenience method creating new HRegions. Used by createTable.
 * The {@link HLog} for the created region needs to be closed
 * explicitly, if it is not null.
 * Use {@link HRegion#getLog()} to get access.
 *
 * @param info Info for region to create.
 * @param rootDir Root directory for HBase instance
 * @param conf
 * @param hTableDescriptor
 * @param hlog shared HLog
 * @param initialize - true to initialize the region
 * @param ignoreHLog - true to skip generate new hlog if it is null, mostly for createTable
 * @return new HRegion
 * @throws IOException
 */
public static HRegion createHRegion(final HRegionInfo info, final Path rootDir,
                                    final Configuration conf,
                                    final HTableDescriptor hTableDescriptor,
                                    final HLog hlog,
                                    final boolean initialize, final boolean ignoreHLog)
    throws IOException {
  LOG.info("creating HRegion " + info.getTable().getNameAsString()
      + " HTD == " + hTableDescriptor + " RootDir = " + rootDir +
      " Table name == " + info.getTable().getNameAsString());

  Path tableDir = FSUtils.getTableDir(rootDir, info.getTable());
  FileSystem fs = FileSystem.get(conf);
  HRegionFileSystem rfs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, info);
  HLog effectiveHLog = hlog;
  if (hlog == null && !ignoreHLog) {
    effectiveHLog = HLogFactory.createHLog(fs, rfs.getRegionDir(),
                                           HConstants.HREGION_LOGDIR_NAME, conf);
  }
  HRegion region = HRegion.newHRegion(tableDir,
      effectiveHLog, fs, conf, info, hTableDescriptor, null);
  if (initialize) {
    region.initialize();
  }
  return region;
}
项目:c5    文件:TestSnapshotLogSplitter.java   
private void verifyRecoverEdits(final Path tableDir, final TableName tableName,
    final Map<byte[], byte[]> regionsMap) throws IOException {
  for (FileStatus regionStatus: FSUtils.listStatus(fs, tableDir)) {
    assertTrue(regionStatus.getPath().getName().startsWith(tableName.getNameAsString()));
    Path regionEdits = HLogUtil.getRegionDirRecoveredEditsDir(regionStatus.getPath());
    byte[] regionName = Bytes.toBytes(regionStatus.getPath().getName());
    assertFalse(regionsMap.containsKey(regionName));
    for (FileStatus logStatus: FSUtils.listStatus(fs, regionEdits)) {
      HLog.Reader reader = HLogFactory.createReader(fs, logStatus.getPath(), conf);
      try {
        HLog.Entry entry;
        while ((entry = reader.next()) != null) {
          HLogKey key = entry.getKey();
          assertEquals(tableName, key.getTablename());
          assertArrayEquals(regionName, key.getEncodedRegionName());
        }
      } finally {
        reader.close();
      }
    }
  }
}
项目:c5    文件:TestSnapshotLogSplitter.java   
private void writeTestLog(final Path logFile) throws IOException {
  fs.mkdirs(logFile.getParent());
  HLog.Writer writer = HLogFactory.createWALWriter(fs, logFile, conf);
  try {
    for (int i = 0; i < 7; ++i) {
      TableName tableName = getTableName(i);
      for (int j = 0; j < 10; ++j) {
        byte[] regionName = getRegionName(tableName, j);
        for (int k = 0; k < 50; ++k) {
          byte[] rowkey = Bytes.toBytes("row-" + k);
          HLogKey key = new HLogKey(regionName, tableName, (long)k,
            System.currentTimeMillis(), HConstants.DEFAULT_CLUSTER_ID);
          WALEdit edit = new WALEdit();
          edit.add(new KeyValue(rowkey, TEST_FAMILY, TEST_QUALIFIER, rowkey));
          writer.append(new HLog.Entry(key, edit));
        }
      }
    }
  } finally {
    writer.close();
  }
}
项目:c5    文件:TestStore.java   
@SuppressWarnings("deprecation")
private void init(String methodName, Configuration conf, HTableDescriptor htd,
    HColumnDescriptor hcd) throws IOException {
  //Setting up a Store
  Path basedir = new Path(DIR+methodName);
  Path tableDir = FSUtils.getTableDir(basedir, htd.getTableName());
  String logName = "logs";
  Path logdir = new Path(basedir, logName);

  FileSystem fs = FileSystem.get(conf);

  fs.delete(logdir, true);

  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
  HLog hlog = HLogFactory.createHLog(fs, basedir, logName, conf);
  HRegion region = new HRegion(tableDir, hlog, fs, conf, info, htd, null);

  store = new HStore(region, hcd, conf);
}
项目:HBase-LOB    文件:TestHMobStore.java   
private void init(String methodName, Configuration conf, HTableDescriptor htd,
    HColumnDescriptor hcd, boolean testStore) throws IOException {
  //Setting up tje Region and Store
  Path basedir = new Path(DIR+methodName);
  Path tableDir = FSUtils.getTableDir(basedir, htd.getTableName());
  String logName = "logs";
  Path logdir = new Path(basedir, logName);
  FileSystem fs = FileSystem.get(conf);
  fs.delete(logdir, true);

  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
  HLog hlog = HLogFactory.createHLog(fs, basedir, logName, conf);
  region = new HRegion(tableDir, hlog, fs, conf, info, htd, null);
  store = new HMobStore(region, hcd, conf);
  if(testStore) {
    init(conf, hcd);
  }
}
项目:DominoHBase    文件:HLogInputFormat.java   
@Override
public void initialize(InputSplit split, TaskAttemptContext context)
    throws IOException, InterruptedException {
  HLogSplit hsplit = (HLogSplit)split;
  Path logFile = new Path(hsplit.getLogFileName());
  Configuration conf = context.getConfiguration();
  LOG.info("Opening reader for "+split);
  try {
    this.reader = HLogFactory.createReader(logFile.getFileSystem(conf), 
        logFile, conf);
  } catch (EOFException x) {
    LOG.info("Ignoring corrupted HLog file: " + logFile
        + " (This is normal when a RegionServer crashed.)");
  }
  this.startTime = hsplit.getStartTime();
  this.endTime = hsplit.getEndTime();
}
项目:DominoHBase    文件:HMerge.java   
protected Merger(Configuration conf, FileSystem fs, final byte [] tableName)
throws IOException {
  this.conf = conf;
  this.fs = fs;
  this.maxFilesize = conf.getLong(HConstants.HREGION_MAX_FILESIZE,
      HConstants.DEFAULT_MAX_FILE_SIZE);

  this.tabledir = new Path(
      fs.makeQualified(new Path(conf.get(HConstants.HBASE_DIR))),
      Bytes.toString(tableName)
  );
  this.htd = FSTableDescriptors.getTableDescriptor(this.fs, this.tabledir);
  String logname = "merge_" + System.currentTimeMillis() + HConstants.HREGION_LOGDIR_NAME;

  this.hlog = HLogFactory.createHLog(fs, tabledir, logname, conf);
}
项目:DominoHBase    文件:HLogPerformanceEvaluation.java   
/**
 * Verify the content of the WAL file.
 * Verify that sequenceids are ascending and that the file has expected number
 * of edits.
 * @param wal
 * @return Count of edits.
 * @throws IOException
 */
private long verify(final Path wal, final boolean verbose) throws IOException {
  HLog.Reader reader = HLogFactory.createReader(wal.getFileSystem(getConf()), 
      wal, getConf());
  long previousSeqid = -1;
  long count = 0;
  try {
    while (true) {
      Entry e = reader.next();
      if (e == null) break;
      count++;
      long seqid = e.getKey().getLogSeqNum();
      if (verbose) LOG.info("seqid=" + seqid);
      if (previousSeqid >= seqid) {
        throw new IllegalStateException("wal=" + wal.getName() +
          ", previousSeqid=" + previousSeqid + ", seqid=" + seqid);
      }
      previousSeqid = seqid;
    }
  } finally {
    reader.close();
  }
  return count;
}
项目:DominoHBase    文件:TestStore.java   
private void init(String methodName, Configuration conf,
    HColumnDescriptor hcd) throws IOException {
  //Setting up a Store
  Path basedir = new Path(DIR+methodName);
  String logName = "logs";
  Path logdir = new Path(basedir, logName);

  FileSystem fs = FileSystem.get(conf);

  fs.delete(logdir, true);

  HTableDescriptor htd = new HTableDescriptor(table);
  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
  HLog hlog = HLogFactory.createHLog(fs, basedir, logName, conf);
  HRegion region = new HRegion(basedir, hlog, fs, conf, info, htd, null);

  store = new HStore(basedir, region, hcd, fs, conf);
}
项目:HIndex    文件:MetaUtils.java   
/**
 * @return the HLog
 * @throws IOException e
 */
public synchronized HLog getLog() throws IOException {
  if (this.log == null) {
    String logName = 
        HConstants.HREGION_LOGDIR_NAME + "_" + System.currentTimeMillis();
    this.log = HLogFactory.createHLog(this.fs, this.fs.getHomeDirectory(),
                                      logName, this.conf);
  }
  return this.log;
}
项目:HIndex    文件:HMerge.java   
protected Merger(Configuration conf, FileSystem fs, final TableName tableName)
throws IOException {
  this.conf = conf;
  this.fs = fs;
  this.maxFilesize = conf.getLong(HConstants.HREGION_MAX_FILESIZE,
      HConstants.DEFAULT_MAX_FILE_SIZE);

  this.rootDir = FSUtils.getRootDir(conf);
  Path tabledir = FSUtils.getTableDir(this.rootDir, tableName);
  this.htd = FSTableDescriptors.getTableDescriptorFromFs(this.fs, tabledir);
  String logname = "merge_" + System.currentTimeMillis() + HConstants.HREGION_LOGDIR_NAME;

  this.hlog = HLogFactory.createHLog(fs, tabledir, logname, conf);
}
项目:HIndex    文件:HRegionServer.java   
private HLog getMetaWAL() throws IOException {
  if (this.hlogForMeta != null) return this.hlogForMeta;
  final String logName = HLogUtil.getHLogDirectoryName(this.serverNameFromMasterPOV.toString());
  Path logdir = new Path(rootDir, logName);
  if (LOG.isDebugEnabled()) LOG.debug("logdir=" + logdir);
  this.hlogForMeta = HLogFactory.createMetaHLog(this.fs.getBackingFs(), rootDir, logName,
    this.conf, getMetaWALActionListeners(), this.serverNameFromMasterPOV.toString());
  return this.hlogForMeta;
}
项目:HIndex    文件:HRegion.java   
/**
 * Facility for dumping and compacting catalog tables.
 * Only does catalog tables since these are only tables we for sure know
 * schema on.  For usage run:
 * <pre>
 *   ./bin/hbase org.apache.hadoop.hbase.regionserver.HRegion
 * </pre>
 * @param args
 * @throws IOException
 */
public static void main(String[] args) throws IOException {
  if (args.length < 1) {
    printUsageAndExit(null);
  }
  boolean majorCompact = false;
  if (args.length > 1) {
    if (!args[1].toLowerCase().startsWith("major")) {
      printUsageAndExit("ERROR: Unrecognized option <" + args[1] + ">");
    }
    majorCompact = true;
  }
  final Path tableDir = new Path(args[0]);
  final Configuration c = HBaseConfiguration.create();
  final FileSystem fs = FileSystem.get(c);
  final Path logdir = new Path(c.get("hbase.tmp.dir"));
  final String logname = "hlog" + FSUtils.getTableName(tableDir) + System.currentTimeMillis();

  final HLog log = HLogFactory.createHLog(fs, logdir, logname, c);
  try {
    processTable(fs, tableDir, log, c, majorCompact);
  } finally {
     log.close();
     // TODO: is this still right?
     BlockCache bc = new CacheConfig(c).getBlockCache();
     if (bc != null) bc.shutdown();
  }
}
项目:HIndex    文件:TestReplicationSource.java   
/**
 * Sanity check that we can move logs around while we are reading
 * from them. Should this test fail, ReplicationSource would have a hard
 * time reading logs that are being archived.
 * @throws Exception
 */
@Test
public void testLogMoving() throws Exception{
  Path logPath = new Path(logDir, "log");
  if (!FS.exists(logDir)) FS.mkdirs(logDir);
  if (!FS.exists(oldLogDir)) FS.mkdirs(oldLogDir);
  HLog.Writer writer = HLogFactory.createWALWriter(FS,
    logPath, conf);
  for(int i = 0; i < 3; i++) {
    byte[] b = Bytes.toBytes(Integer.toString(i));
    KeyValue kv = new KeyValue(b,b,b);
    WALEdit edit = new WALEdit();
    edit.add(kv);
    HLogKey key = new HLogKey(b, TableName.valueOf(b), 0, 0,
        HConstants.DEFAULT_CLUSTER_ID);
    writer.append(new HLog.Entry(key, edit));
    writer.sync();
  }
  writer.close();

  HLog.Reader reader = HLogFactory.createReader(FS, 
      logPath, conf);
  HLog.Entry entry = reader.next();
  assertNotNull(entry);

  Path oldLogPath = new Path(oldLogDir, "log");
  FS.rename(logPath, oldLogPath);

  entry = reader.next();
  assertNotNull(entry);

  entry = reader.next();
  entry = reader.next();

  assertNull(entry);

}
项目:HIndex    文件:TestReplicationHLogReaderManager.java   
@Before
public void setUp() throws Exception {
  logManager = new ReplicationHLogReaderManager(fs, conf);
  List<WALActionsListener> listeners = new ArrayList<WALActionsListener>();
  pathWatcher = new PathWatcher();
  listeners.add(pathWatcher);
  log = HLogFactory.createHLog(fs, hbaseDir, "test", conf, listeners, "some server");
}
项目:HIndex    文件:TestDistributedLogSplitting.java   
private int countHLog(Path log, FileSystem fs, Configuration conf)
throws IOException {
  int count = 0;
  HLog.Reader in = HLogFactory.createReader(fs, log, conf);
  while (in.next() != null) {
    count++;
  }
  return count;
}
项目:HIndex    文件:TestWALObserver.java   
/**
 * Test to see CP loaded successfully or not. There is a duplication at
 * TestHLog, but the purpose of that one is to see whether the loaded CP will
 * impact existing HLog tests or not.
 */
@Test
public void testWALObserverLoaded() throws Exception {
  HLog log = HLogFactory.createHLog(fs, hbaseRootDir,
      TestWALObserver.class.getName(), conf);
  assertNotNull(getCoprocessor(log));
}
项目:HIndex    文件:TestDefaultCompactSelection.java   
@Override
public void setUp() throws Exception {
  // setup config values necessary for store
  this.conf = TEST_UTIL.getConfiguration();
  this.conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 0);
  this.conf.setInt("hbase.hstore.compaction.min", minFiles);
  this.conf.setInt("hbase.hstore.compaction.max", maxFiles);
  this.conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, minSize);
  this.conf.setLong("hbase.hstore.compaction.max.size", maxSize);
  this.conf.setFloat("hbase.hstore.compaction.ratio", 1.0F);

  //Setting up a Store
  Path basedir = new Path(DIR);
  String logName = "logs";
  Path logdir = new Path(DIR, logName);
  HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes("family"));
  FileSystem fs = FileSystem.get(conf);

  fs.delete(logdir, true);

  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(Bytes.toBytes("table")));
  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);

  hlog = HLogFactory.createHLog(fs, basedir, logName, conf);
  region = HRegion.createHRegion(info, basedir, conf, htd);
  HRegion.closeHRegion(region);
  Path tableDir = FSUtils.getTableDir(basedir, htd.getTableName());
  region = new HRegion(tableDir, hlog, fs, conf, info, htd, null);

  store = new HStore(region, hcd, conf);

  TEST_FILE = region.getRegionFileSystem().createTempName();
  fs.createNewFile(TEST_FILE);
}
项目:HIndex    文件:TestSplitTransaction.java   
@Before public void setup() throws IOException {
  this.fs = FileSystem.get(TEST_UTIL.getConfiguration());
  TEST_UTIL.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, CustomObserver.class.getName());
  this.fs.delete(this.testdir, true);
  this.wal = HLogFactory.createHLog(fs, this.testdir, "logs",
    TEST_UTIL.getConfiguration());

  this.parent = createRegion(this.testdir, this.wal);
  RegionCoprocessorHost host = new RegionCoprocessorHost(this.parent, null, TEST_UTIL.getConfiguration());
  this.parent.setCoprocessorHost(host);
  TEST_UTIL.getConfiguration().setBoolean("hbase.testing.nocluster", true);
}
项目:HIndex    文件:TestCacheOnWriteInSchema.java   
@Before
public void setUp() throws IOException {
  // parameterized tests add [#] suffix get rid of [ and ].
  table = Bytes.toBytes(name.getMethodName().replaceAll("[\\[\\]]", "_"));

  conf = TEST_UTIL.getConfiguration();
  conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MAX_FORMAT_VERSION);
  conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, false);
  conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, false);
  conf.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, false);

  fs = HFileSystem.get(conf);

  // Create the schema
  HColumnDescriptor hcd = new HColumnDescriptor(family);
  hcd.setBloomFilterType(BloomType.ROWCOL);
  cowType.modifyFamilySchema(hcd);
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
  htd.addFamily(hcd);

  // Create a store based on the schema
  Path basedir = new Path(DIR);
  String logName = "logs";
  Path logdir = new Path(DIR, logName);
  fs.delete(logdir, true);

  HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
  hlog = HLogFactory.createHLog(fs, basedir, logName, conf);

  region = TEST_UTIL.createLocalHRegion(info, htd, hlog);
  store = new HStore(region, hcd, conf);
}
项目:PyroDB    文件:MetaUtils.java   
/**
 * @return the HLog
 * @throws IOException e
 */
public synchronized HLog getLog() throws IOException {
  if (this.log == null) {
    String logName = 
        HConstants.HREGION_LOGDIR_NAME + "_" + System.currentTimeMillis();
    this.log = HLogFactory.createHLog(this.fs, this.fs.getHomeDirectory(),
                                      logName, this.conf);
  }
  return this.log;
}
项目:PyroDB    文件:HMerge.java   
protected Merger(Configuration conf, FileSystem fs, final TableName tableName)
throws IOException {
  this.conf = conf;
  this.fs = fs;
  this.maxFilesize = conf.getLong(HConstants.HREGION_MAX_FILESIZE,
      HConstants.DEFAULT_MAX_FILE_SIZE);

  this.rootDir = FSUtils.getRootDir(conf);
  Path tabledir = FSUtils.getTableDir(this.rootDir, tableName);
  this.htd = FSTableDescriptors.getTableDescriptorFromFs(this.fs, tabledir);
  String logname = "merge_" + System.currentTimeMillis() + HConstants.HREGION_LOGDIR_NAME;

  this.hlog = HLogFactory.createHLog(fs, tabledir, logname, conf);
}
项目:PyroDB    文件:HRegionServer.java   
private HLog getMetaWAL() throws IOException {
  if (this.hlogForMeta != null) return this.hlogForMeta;
  final String logName = HLogUtil.getHLogDirectoryName(this.serverName.toString());
  Path logdir = new Path(rootDir, logName);
  if (LOG.isDebugEnabled()) LOG.debug("logdir=" + logdir);
  this.hlogForMeta = HLogFactory.createMetaHLog(this.fs.getBackingFs(), rootDir, logName,
    this.conf, getMetaWALActionListeners(), this.serverName.toString());
  return this.hlogForMeta;
}
项目:PyroDB    文件:HRegion.java   
/**
 * Facility for dumping and compacting catalog tables.
 * Only does catalog tables since these are only tables we for sure know
 * schema on.  For usage run:
 * <pre>
 *   ./bin/hbase org.apache.hadoop.hbase.regionserver.HRegion
 * </pre>
 * @param args
 * @throws IOException
 */
public static void main(String[] args) throws IOException {
  if (args.length < 1) {
    printUsageAndExit(null);
  }
  boolean majorCompact = false;
  if (args.length > 1) {
    if (!args[1].toLowerCase().startsWith("major")) {
      printUsageAndExit("ERROR: Unrecognized option <" + args[1] + ">");
    }
    majorCompact = true;
  }
  final Path tableDir = new Path(args[0]);
  final Configuration c = HBaseConfiguration.create();
  final FileSystem fs = FileSystem.get(c);
  final Path logdir = new Path(c.get("hbase.tmp.dir"));
  final String logname = "hlog" + FSUtils.getTableName(tableDir) + System.currentTimeMillis();

  final HLog log = HLogFactory.createHLog(fs, logdir, logname, c);
  try {
    processTable(fs, tableDir, log, c, majorCompact);
  } finally {
     log.close();
     // TODO: is this still right?
     BlockCache bc = new CacheConfig(c).getBlockCache();
     if (bc != null) bc.shutdown();
  }
}
项目:PyroDB    文件:TestReplicationSource.java   
/**
 * Sanity check that we can move logs around while we are reading
 * from them. Should this test fail, ReplicationSource would have a hard
 * time reading logs that are being archived.
 * @throws Exception
 */
@Test
public void testLogMoving() throws Exception{
  Path logPath = new Path(logDir, "log");
  if (!FS.exists(logDir)) FS.mkdirs(logDir);
  if (!FS.exists(oldLogDir)) FS.mkdirs(oldLogDir);
  HLog.Writer writer = HLogFactory.createWALWriter(FS,
    logPath, conf);
  for(int i = 0; i < 3; i++) {
    byte[] b = Bytes.toBytes(Integer.toString(i));
    KeyValue kv = new KeyValue(b,b,b);
    WALEdit edit = new WALEdit();
    edit.add(kv);
    HLogKey key = new HLogKey(b, TableName.valueOf(b), 0, 0,
        HConstants.DEFAULT_CLUSTER_ID);
    writer.append(new HLog.Entry(key, edit));
    writer.sync();
  }
  writer.close();

  HLog.Reader reader = HLogFactory.createReader(FS, 
      logPath, conf);
  HLog.Entry entry = reader.next();
  assertNotNull(entry);

  Path oldLogPath = new Path(oldLogDir, "log");
  FS.rename(logPath, oldLogPath);

  entry = reader.next();
  assertNotNull(entry);

  entry = reader.next();
  entry = reader.next();

  assertNull(entry);

}
项目:PyroDB    文件:TestReplicationHLogReaderManager.java   
@Before
public void setUp() throws Exception {
  logManager = new ReplicationHLogReaderManager(fs, conf);
  List<WALActionsListener> listeners = new ArrayList<WALActionsListener>();
  pathWatcher = new PathWatcher();
  listeners.add(pathWatcher);
  log = HLogFactory.createHLog(fs, hbaseDir, "test", conf, listeners, "some server");
}