Java 类org.apache.hadoop.hbase.regionserver.LastSequenceId 实例源码

项目:ditb    文件:WALSplitter.java   
WALSplitter(final WALFactory factory, Configuration conf, Path rootDir,
    FileSystem fs, LastSequenceId idChecker,
    CoordinatedStateManager csm, RecoveryMode mode) {
  this.conf = HBaseConfiguration.create(conf);
  String codecClassName = conf
      .get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
  this.conf.set(HConstants.RPC_CODEC_CONF_KEY, codecClassName);
  this.rootDir = rootDir;
  this.fs = fs;
  this.sequenceIdChecker = idChecker;
  this.csm = (BaseCoordinatedStateManager)csm;
  this.walFactory = factory;
  this.controller = new PipelineController();

  entryBuffers = new EntryBuffers(controller,
      this.conf.getInt("hbase.regionserver.hlog.splitlog.buffersize",
          128*1024*1024));

  // a larger minBatchSize may slow down recovery because replay writer has to wait for
  // enough edits before replaying them
  this.minBatchSize = this.conf.getInt("hbase.regionserver.wal.logreplay.batch.size", 64);
  this.distributedLogReplay = (RecoveryMode.LOG_REPLAY == mode);

  this.numWriterThreads = this.conf.getInt("hbase.regionserver.hlog.splitlog.writer.threads", 3);
  if (csm != null && this.distributedLogReplay) {
    outputSink = new LogReplayOutputSink(controller, entryBuffers, numWriterThreads);
  } else {
    if (this.distributedLogReplay) {
      LOG.info("ZooKeeperWatcher is passed in as NULL so disable distrubitedLogRepaly.");
    }
    this.distributedLogReplay = false;
    outputSink = new LogRecoveredEditsOutputSink(controller, entryBuffers, numWriterThreads);
  }

}
项目:pbase    文件:WALSplitter.java   
WALSplitter(final WALFactory factory, Configuration conf, Path rootDir,
    FileSystem fs, LastSequenceId idChecker,
    CoordinatedStateManager csm, RecoveryMode mode) {
  this.conf = HBaseConfiguration.create(conf);
  String codecClassName = conf
      .get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
  this.conf.set(HConstants.RPC_CODEC_CONF_KEY, codecClassName);
  this.rootDir = rootDir;
  this.fs = fs;
  this.sequenceIdChecker = idChecker;
  this.csm = (BaseCoordinatedStateManager)csm;
  this.walFactory = factory;

  entryBuffers = new EntryBuffers(
      this.conf.getInt("hbase.regionserver.hlog.splitlog.buffersize",
          128*1024*1024));

  // a larger minBatchSize may slow down recovery because replay writer has to wait for
  // enough edits before replaying them
  this.minBatchSize = this.conf.getInt("hbase.regionserver.wal.logreplay.batch.size", 64);
  this.distributedLogReplay = (RecoveryMode.LOG_REPLAY == mode);

  this.numWriterThreads = this.conf.getInt("hbase.regionserver.hlog.splitlog.writer.threads", 3);
  if (csm != null && this.distributedLogReplay) {
    outputSink = new LogReplayOutputSink(numWriterThreads);
  } else {
    if (this.distributedLogReplay) {
      LOG.info("ZooKeeperWatcher is passed in as NULL so disable distrubitedLogRepaly.");
    }
    this.distributedLogReplay = false;
    outputSink = new LogRecoveredEditsOutputSink(numWriterThreads);
  }

}
项目:HIndex    文件:HLogSplitter.java   
HLogSplitter(Configuration conf, Path rootDir,
    FileSystem fs, LastSequenceId idChecker, ZooKeeperWatcher zkw) {
  this.conf = HBaseConfiguration.create(conf);
  String codecClassName = conf
      .get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
  this.conf.set(HConstants.RPC_CODEC_CONF_KEY, codecClassName);
  this.rootDir = rootDir;
  this.fs = fs;
  this.sequenceIdChecker = idChecker;
  this.watcher = zkw;

  entryBuffers = new EntryBuffers(
      this.conf.getInt("hbase.regionserver.hlog.splitlog.buffersize",
          128*1024*1024));

  // a larger minBatchSize may slow down recovery because replay writer has to wait for
  // enough edits before replaying them
  this.minBatchSize = this.conf.getInt("hbase.regionserver.wal.logreplay.batch.size", 64);
  this.distributedLogReplay = HLogSplitter.isDistributedLogReplay(this.conf);

  this.numWriterThreads = this.conf.getInt("hbase.regionserver.hlog.splitlog.writer.threads", 3);
  if (zkw != null && this.distributedLogReplay) {
    outputSink = new LogReplayOutputSink(numWriterThreads);
  } else {
    if (this.distributedLogReplay) {
      LOG.info("ZooKeeperWatcher is passed in as NULL so disable distrubitedLogRepaly.");
    }
    this.distributedLogReplay = false;
    outputSink = new LogRecoveredEditsOutputSink(numWriterThreads);
  }

}
项目:hbase    文件:WALSplitter.java   
@VisibleForTesting
WALSplitter(final WALFactory factory, Configuration conf, Path rootDir,
    FileSystem fs, LastSequenceId idChecker,
    SplitLogWorkerCoordination splitLogWorkerCoordination) {
  this.conf = HBaseConfiguration.create(conf);
  String codecClassName = conf
      .get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
  this.conf.set(HConstants.RPC_CODEC_CONF_KEY, codecClassName);
  this.rootDir = rootDir;
  this.fs = fs;
  this.sequenceIdChecker = idChecker;
  this.splitLogWorkerCoordination = splitLogWorkerCoordination;

  this.walFactory = factory;
  PipelineController controller = new PipelineController();

  this.splitWriterCreationBounded = conf.getBoolean(SPLIT_WRITER_CREATION_BOUNDED, false);

  entryBuffers = new EntryBuffers(controller,
      this.conf.getInt("hbase.regionserver.hlog.splitlog.buffersize", 128 * 1024 * 1024),
      splitWriterCreationBounded);

  int numWriterThreads = this.conf.getInt("hbase.regionserver.hlog.splitlog.writer.threads", 3);
  if(splitWriterCreationBounded){
    outputSink = new BoundedLogWriterCreationOutputSink(
        controller, entryBuffers, numWriterThreads);
  }else {
    outputSink = new LogRecoveredEditsOutputSink(controller, entryBuffers, numWriterThreads);
  }
}
项目:hbase    文件:WALSplitter.java   
/**
 * Splits a WAL file into region's recovered-edits directory.
 * This is the main entry point for distributed log splitting from SplitLogWorker.
 * <p>
 * If the log file has N regions then N recovered.edits files will be produced.
 * <p>
 * @return false if it is interrupted by the progress-able.
 */
public static boolean splitLogFile(Path rootDir, FileStatus logfile, FileSystem fs,
    Configuration conf, CancelableProgressable reporter, LastSequenceId idChecker,
    SplitLogWorkerCoordination splitLogWorkerCoordination, final WALFactory factory)
    throws IOException {
  WALSplitter s = new WALSplitter(factory, conf, rootDir, fs, idChecker,
      splitLogWorkerCoordination);
  return s.splitLogFile(logfile, reporter);
}
项目:PyroDB    文件:HLogSplitter.java   
HLogSplitter(Configuration conf, Path rootDir,
    FileSystem fs, LastSequenceId idChecker, ZooKeeperWatcher zkw,
    CoordinatedStateManager csm) {
  this.conf = HBaseConfiguration.create(conf);
  String codecClassName = conf
      .get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
  this.conf.set(HConstants.RPC_CODEC_CONF_KEY, codecClassName);
  this.rootDir = rootDir;
  this.fs = fs;
  this.sequenceIdChecker = idChecker;
  this.watcher = zkw;
  this.csm = csm;

  entryBuffers = new EntryBuffers(
      this.conf.getInt("hbase.regionserver.hlog.splitlog.buffersize",
          128*1024*1024));

  // a larger minBatchSize may slow down recovery because replay writer has to wait for
  // enough edits before replaying them
  this.minBatchSize = this.conf.getInt("hbase.regionserver.wal.logreplay.batch.size", 64);
  this.distributedLogReplay = HLogSplitter.isDistributedLogReplay(this.conf);

  this.numWriterThreads = this.conf.getInt("hbase.regionserver.hlog.splitlog.writer.threads", 3);
  if (zkw != null && csm != null && this.distributedLogReplay) {
    outputSink = new LogReplayOutputSink(numWriterThreads);
  } else {
    if (this.distributedLogReplay) {
      LOG.info("ZooKeeperWatcher is passed in as NULL so disable distrubitedLogRepaly.");
    }
    this.distributedLogReplay = false;
    outputSink = new LogRecoveredEditsOutputSink(numWriterThreads);
  }

}
项目:c5    文件:HLogSplitter.java   
HLogSplitter(Configuration conf, Path rootDir,
    FileSystem fs, LastSequenceId idChecker, ZooKeeperWatcher zkw) {
  this.conf = conf;
  this.rootDir = rootDir;
  this.fs = fs;
  this.sequenceIdChecker = idChecker;
  this.watcher = zkw;

  entryBuffers = new EntryBuffers(
      conf.getInt("hbase.regionserver.hlog.splitlog.buffersize",
          128*1024*1024));

  // a larger minBatchSize may slow down recovery because replay writer has to wait for
  // enough edits before replaying them
  this.minBatchSize = conf.getInt("hbase.regionserver.wal.logreplay.batch.size", 64);
  this.distributedLogReplay = this.conf.getBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY,
    HConstants.DEFAULT_DISTRIBUTED_LOG_REPLAY_CONFIG);

  this.numWriterThreads = conf.getInt("hbase.regionserver.hlog.splitlog.writer.threads", 3);
  if (zkw != null && this.distributedLogReplay) {
    outputSink = new LogReplayOutputSink(numWriterThreads);
  } else {
    if (this.distributedLogReplay) {
      LOG.info("ZooKeeperWatcher is passed in as NULL so disable distrubitedLogRepaly.");
    }
    this.distributedLogReplay = false;
    outputSink = new LogRecoveredEditsOutputSink(numWriterThreads);
  }
}
项目:DominoHBase    文件:HLogSplitter.java   
public HLogSplitter(Configuration conf, Path rootDir, Path srcDir,
    Path oldLogDir, FileSystem fs, LastSequenceId idChecker) {
  this.conf = conf;
  this.rootDir = rootDir;
  this.srcDir = srcDir;
  this.oldLogDir = oldLogDir;
  this.fs = fs;
  this.sequenceIdChecker = idChecker;

  entryBuffers = new EntryBuffers(
      conf.getInt("hbase.regionserver.hlog.splitlog.buffersize",
          128*1024*1024));
  outputSink = new OutputSink();
}
项目:ditb    文件:WALSplitter.java   
/**
 * Splits a WAL file into region's recovered-edits directory.
 * This is the main entry point for distributed log splitting from SplitLogWorker.
 * <p>
 * If the log file has N regions then N recovered.edits files will be produced.
 * <p>
 * @param rootDir
 * @param logfile
 * @param fs
 * @param conf
 * @param reporter
 * @param idChecker
 * @param cp coordination state manager
 * @return false if it is interrupted by the progress-able.
 * @throws IOException
 */
public static boolean splitLogFile(Path rootDir, FileStatus logfile, FileSystem fs,
    Configuration conf, CancelableProgressable reporter, LastSequenceId idChecker,
    CoordinatedStateManager cp, RecoveryMode mode, final WALFactory factory) throws IOException {
  WALSplitter s = new WALSplitter(factory, conf, rootDir, fs, idChecker, cp, mode);
  return s.splitLogFile(logfile, reporter);
}
项目:pbase    文件:WALSplitter.java   
/**
 * Splits a WAL file into region's recovered-edits directory.
 * This is the main entry point for distributed log splitting from SplitLogWorker.
 * <p>
 * If the log file has N regions then N recovered.edits files will be produced.
 * <p>
 * @param rootDir
 * @param logfile
 * @param fs
 * @param conf
 * @param reporter
 * @param idChecker
 * @param cp coordination state manager
 * @return false if it is interrupted by the progress-able.
 * @throws IOException
 */
public static boolean splitLogFile(Path rootDir, FileStatus logfile, FileSystem fs,
    Configuration conf, CancelableProgressable reporter, LastSequenceId idChecker,
    CoordinatedStateManager cp, RecoveryMode mode, final WALFactory factory) throws IOException {
  WALSplitter s = new WALSplitter(factory, conf, rootDir, fs, idChecker, cp, mode);
  return s.splitLogFile(logfile, reporter);
}
项目:HIndex    文件:HLogSplitter.java   
/**
 * Splits a HLog file into region's recovered-edits directory.
 * This is the main entry point for distributed log splitting from SplitLogWorker.
 * <p>
 * If the log file has N regions then N recovered.edits files will be produced.
 * <p>
 * @param rootDir
 * @param logfile
 * @param fs
 * @param conf
 * @param reporter
 * @param idChecker
 * @param zkw ZooKeeperWatcher if it's null, we will back to the old-style log splitting where we
 *          dump out recoved.edits files for regions to replay on.
 * @return false if it is interrupted by the progress-able.
 * @throws IOException
 */
public static boolean splitLogFile(Path rootDir, FileStatus logfile, FileSystem fs,
    Configuration conf, CancelableProgressable reporter, LastSequenceId idChecker,
    ZooKeeperWatcher zkw) throws IOException {
  HLogSplitter s = new HLogSplitter(conf, rootDir, fs, idChecker, zkw);
  return s.splitLogFile(logfile, reporter);
}
项目:PyroDB    文件:HLogSplitter.java   
/**
 * Splits a HLog file into region's recovered-edits directory.
 * This is the main entry point for distributed log splitting from SplitLogWorker.
 * <p>
 * If the log file has N regions then N recovered.edits files will be produced.
 * <p>
 * @param rootDir
 * @param logfile
 * @param fs
 * @param conf
 * @param reporter
 * @param idChecker
 * @param zkw ZooKeeperWatcher if it's null, we will back to the old-style log splitting where we
 *          dump out recoved.edits files for regions to replay on.
 * @return false if it is interrupted by the progress-able.
 * @throws IOException
 */
public static boolean splitLogFile(Path rootDir, FileStatus logfile, FileSystem fs,
    Configuration conf, CancelableProgressable reporter, LastSequenceId idChecker,
    ZooKeeperWatcher zkw, CoordinatedStateManager cp) throws IOException {
  HLogSplitter s = new HLogSplitter(conf, rootDir, fs, idChecker, zkw,
    cp);
  return s.splitLogFile(logfile, reporter);
}
项目:c5    文件:HLogSplitter.java   
/**
 * Splits a HLog file into region's recovered-edits directory.
 * This is the main entry point for distributed log splitting from SplitLogWorker.
 * <p>
 * If the log file has N regions then N recovered.edits files will be produced.
 * <p>
 * @param rootDir
 * @param logfile
 * @param fs
 * @param conf
 * @param reporter
 * @param idChecker
 * @param zkw ZooKeeperWatcher if it's null, we will back to the old-style log splitting where we
 *          dump out recoved.edits files for regions to replay on.
 * @return false if it is interrupted by the progress-able.
 * @throws IOException
 */
public static boolean splitLogFile(Path rootDir, FileStatus logfile, FileSystem fs,
    Configuration conf, CancelableProgressable reporter, LastSequenceId idChecker,
    ZooKeeperWatcher zkw) throws IOException {
  HLogSplitter s = new HLogSplitter(conf, rootDir, fs, idChecker, zkw);
  return s.splitLogFile(logfile, reporter);
}
项目:DominoHBase    文件:HLogSplitter.java   
/**
 * Splits a HLog file into region's recovered-edits directory
 * <p>
 * If the log file has N regions then N recovered.edits files will be
 * produced.
 * <p>
 * @param rootDir
 * @param logfile
 * @param fs
 * @param conf
 * @param reporter
 * @param idChecker
 * @return false if it is interrupted by the progress-able.
 * @throws IOException
 */
static public boolean splitLogFile(Path rootDir, FileStatus logfile,
    FileSystem fs, Configuration conf, CancelableProgressable reporter,
    LastSequenceId idChecker)
    throws IOException {
  HLogSplitter s = new HLogSplitter(conf, rootDir, null, null /* oldLogDir */, fs, idChecker);
  return s.splitLogFile(logfile, reporter);
}