Java 类org.apache.hadoop.hbase.replication.regionserver.Replication 实例源码

项目:hbase    文件:TestReplicationEmptyWALRecovery.java   
/**
 * Waits until there is only one log(the current writing one) in the replication queue
 * @param numRs number of regionservers
 */
private void waitForLogAdvance(int numRs) throws Exception {
  Waiter.waitFor(conf1, 10000, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      for (int i = 0; i < numRs; i++) {
        HRegionServer hrs = utility1.getHBaseCluster().getRegionServer(i);
        RegionInfo regionInfo =
            utility1.getHBaseCluster().getRegions(htable1.getName()).get(0).getRegionInfo();
        WAL wal = hrs.getWAL(regionInfo);
        Path currentFile = ((AbstractFSWAL<?>) wal).getCurrentFileName();
        Replication replicationService = (Replication) utility1.getHBaseCluster()
            .getRegionServer(i).getReplicationSourceService();
        for (ReplicationSourceInterface rsi : replicationService.getReplicationManager()
            .getSources()) {
          ReplicationSource source = (ReplicationSource) rsi;
          if (!currentFile.equals(source.getCurrentPath())) {
            return false;
          }
        }
      }
      return true;
    }
  });
}
项目:RStore    文件:HRegionServer.java   
/**
 * Setup WAL log and replication if enabled.
 * Replication setup is done in here because it wants to be hooked up to WAL.
 * @return A WAL instance.
 * @throws IOException
 */
private HLog setupWALAndReplication() throws IOException {
  final Path oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME);
  Path logdir = new Path(rootDir,
    HLog.getHLogDirectoryName(this.serverNameFromMasterPOV.toString()));
  if (LOG.isDebugEnabled()) LOG.debug("logdir=" + logdir);
  if (this.fs.exists(logdir)) {
    throw new RegionServerRunningException("Region server has already " +
      "created directory at " + this.serverNameFromMasterPOV.toString());
  }

  // Instantiate replication manager if replication enabled.  Pass it the
  // log directories.
  try {
    this.replicationHandler = Replication.isReplication(this.conf)?
      new Replication(this, this.fs, logdir, oldLogDir): null;
  } catch (KeeperException e) {
    throw new IOException("Failed replication handler create", e);
  }
  return instantiateHLog(logdir, oldLogDir);
}
项目:ditb    文件:TestReplicationSmallTests.java   
/**
 * Test for HBASE-9038, Replication.scopeWALEdits would NPE if it wasn't filtering out
 * the compaction WALEdit
 * @throws Exception
 */
@Test(timeout=300000)
public void testCompactionWALEdits() throws Exception {
  WALProtos.CompactionDescriptor compactionDescriptor =
      WALProtos.CompactionDescriptor.getDefaultInstance();
  HRegionInfo hri = new HRegionInfo(htable1.getName(),
    HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
  WALEdit edit = WALEdit.createCompaction(hri, compactionDescriptor);
  Replication.scopeWALEdits(htable1.getTableDescriptor(), new WALKey(), edit);
}
项目:pbase    文件:TestReplicationSmallTests.java   
/**
 * Test for HBASE-9038, Replication.scopeWALEdits would NPE if it wasn't filtering out
 * the compaction WALEdit
 * @throws Exception
 */
@Test(timeout=300000)
public void testCompactionWALEdits() throws Exception {
  WALProtos.CompactionDescriptor compactionDescriptor =
      WALProtos.CompactionDescriptor.getDefaultInstance();
  HRegionInfo hri = new HRegionInfo(htable1.getName(),
    HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
  WALEdit edit = WALEdit.createCompaction(hri, compactionDescriptor);
  Replication.scopeWALEdits(htable1.getTableDescriptor(), new WALKey(), edit);
}
项目:HIndex    文件:TestReplicationSmallTests.java   
/**
 * Test for HBASE-9038, Replication.scopeWALEdits would NPE if it wasn't filtering out
 * the compaction WALEdit
 * @throws Exception
 */
@Test(timeout=300000)
public void testCompactionWALEdits() throws Exception {
  WALProtos.CompactionDescriptor compactionDescriptor =
      WALProtos.CompactionDescriptor.getDefaultInstance();
  WALEdit edit = WALEdit.createCompaction(compactionDescriptor);
  Replication.scopeWALEdits(htable1.getTableDescriptor(), new HLogKey(), edit);
}
项目:PyroDB    文件:TestReplicationSmallTests.java   
/**
 * Test for HBASE-9038, Replication.scopeWALEdits would NPE if it wasn't filtering out
 * the compaction WALEdit
 * @throws Exception
 */
@Test(timeout=300000)
public void testCompactionWALEdits() throws Exception {
  WALProtos.CompactionDescriptor compactionDescriptor =
      WALProtos.CompactionDescriptor.getDefaultInstance();
  WALEdit edit = WALEdit.createCompaction(compactionDescriptor);
  Replication.scopeWALEdits(htable1.getTableDescriptor(), new HLogKey(), edit);
}
项目:c5    文件:TestReplicationSmallTests.java   
/**
 * Test for HBASE-9038, Replication.scopeWALEdits would NPE if it wasn't filtering out
 * the compaction WALEdit
 * @throws Exception
 */
@Test(timeout=300000)
public void testCompactionWALEdits() throws Exception {
  WALProtos.CompactionDescriptor compactionDescriptor =
      WALProtos.CompactionDescriptor.getDefaultInstance();
  WALEdit edit = WALEdit.createCompaction(compactionDescriptor);
  Replication.scopeWALEdits(htable1.getTableDescriptor(), new HLogKey(), edit);
}
项目:ditb    文件:HMaster.java   
/**
 * Initializes the HMaster. The steps are as follows:
 * <p>
 * <ol>
 * <li>Initialize the local HRegionServer
 * <li>Start the ActiveMasterManager.
 * </ol>
 * <p>
 * Remaining steps of initialization occur in
 * #finishActiveMasterInitialization(MonitoredTask) after
 * the master becomes the active one.
 *
 * @throws InterruptedException
 * @throws KeeperException
 * @throws IOException
 */
public HMaster(final Configuration conf, CoordinatedStateManager csm)
    throws IOException, KeeperException, InterruptedException {
  super(conf, csm);
  this.rsFatals = new MemoryBoundedLogMessageBuffer(
    conf.getLong("hbase.master.buffer.for.rs.fatals", 1*1024*1024));

  LOG.info("hbase.rootdir=" + FSUtils.getRootDir(this.conf) +
    ", hbase.cluster.distributed=" + this.conf.getBoolean(HConstants.CLUSTER_DISTRIBUTED, false));

  // Disable usage of meta replicas in the master
  this.conf.setBoolean(HConstants.USE_META_REPLICAS, false);

  Replication.decorateMasterConfiguration(this.conf);

  // Hack! Maps DFSClient => Master for logs.  HDFS made this
  // config param for task trackers, but we can piggyback off of it.
  if (this.conf.get("mapreduce.task.attempt.id") == null) {
    this.conf.set("mapreduce.task.attempt.id", "hb_m_" + this.serverName.toString());
  }

  // should we check the compression codec type at master side, default true, HBASE-6370
  this.masterCheckCompression = conf.getBoolean("hbase.master.check.compression", true);

  // should we check encryption settings at master side, default true
  this.masterCheckEncryption = conf.getBoolean("hbase.master.check.encryption", true);

  this.metricsMaster = new MetricsMaster(new MetricsMasterWrapperImpl(this));

  // preload table descriptor at startup
  this.preLoadTableDescriptors = conf.getBoolean("hbase.master.preload.tabledescriptors", true);

  // Do we publish the status?

  boolean shouldPublish = conf.getBoolean(HConstants.STATUS_PUBLISHED,
      HConstants.STATUS_PUBLISHED_DEFAULT);
  Class<? extends ClusterStatusPublisher.Publisher> publisherClass =
      conf.getClass(ClusterStatusPublisher.STATUS_PUBLISHER_CLASS,
          ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS,
          ClusterStatusPublisher.Publisher.class);

  if (shouldPublish) {
    if (publisherClass == null) {
      LOG.warn(HConstants.STATUS_PUBLISHED + " is true, but " +
          ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS +
          " is not set - not publishing status");
    } else {
      clusterStatusPublisherChore = new ClusterStatusPublisher(this, conf, publisherClass);
      getChoreService().scheduleChore(clusterStatusPublisherChore);
    }
  }

  // Some unit tests don't need a cluster, so no zookeeper at all
  if (!conf.getBoolean("hbase.testing.nocluster", false)) {
    activeMasterManager = new ActiveMasterManager(zooKeeper, this.serverName, this);
    int infoPort = putUpJettyServer();
    startActiveMasterManager(infoPort);
  } else {
    activeMasterManager = null;
  }
}
项目:ditb    文件:TestLogsCleaner.java   
@Test
public void testLogCleaning() throws Exception{
  Configuration conf = TEST_UTIL.getConfiguration();
  // set TTL
  long ttl = 10000;
  conf.setLong("hbase.master.logcleaner.ttl", ttl);
  conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT);
  Replication.decorateMasterConfiguration(conf);
  Server server = new DummyServer();
  ReplicationQueues repQueues =
      ReplicationFactory.getReplicationQueues(server.getZooKeeper(), conf, server);
  repQueues.init(server.getServerName().toString());
  final Path oldLogDir = new Path(TEST_UTIL.getDataTestDir(),
      HConstants.HREGION_OLDLOGDIR_NAME);
  String fakeMachineName =
    URLEncoder.encode(server.getServerName().toString(), "UTF8");

  final FileSystem fs = FileSystem.get(conf);

  // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
  long now = System.currentTimeMillis();
  fs.delete(oldLogDir, true);
  fs.mkdirs(oldLogDir);
  // Case 1: 2 invalid files, which would be deleted directly
  fs.createNewFile(new Path(oldLogDir, "a"));
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + "a"));
  // Case 2: 1 "recent" file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  System.out.println("Now is: " + now);
  for (int i = 1; i < 31; i++) {
    // Case 3: old files which would be deletable for the first log cleaner
    // (TimeToLiveLogCleaner), and also for the second (ReplicationLogCleaner)
    Path fileName = new Path(oldLogDir, fakeMachineName + "." + (now - i) );
    fs.createNewFile(fileName);
    // Case 4: put 3 old log files in ZK indicating that they are scheduled
    // for replication so these files would pass the first log cleaner
    // (TimeToLiveLogCleaner) but would be rejected by the second
    // (ReplicationLogCleaner)
    if (i % (30/3) == 1) {
      repQueues.addLog(fakeMachineName, fileName.getName());
      System.out.println("Replication log file: " + fileName);
    }
  }

  // sleep for sometime to get newer modifcation time
  Thread.sleep(ttl);
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + now));

  // Case 2: 1 newer file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + (now + 10000) ));

  for (FileStatus stat : fs.listStatus(oldLogDir)) {
    System.out.println(stat.getPath().toString());
  }

  assertEquals(34, fs.listStatus(oldLogDir).length);

  LogCleaner cleaner  = new LogCleaner(1000, server, conf, fs, oldLogDir);

  cleaner.chore();

  // We end up with the current log file, a newer one and the 3 old log
  // files which are scheduled for replication
  TEST_UTIL.waitFor(1000, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      return 5 == fs.listStatus(oldLogDir).length;
    }
  });

  for (FileStatus file : fs.listStatus(oldLogDir)) {
    System.out.println("Kept log files: " + file.getPath().getName());
  }
}
项目:LCIndex-HBase-0.94.16    文件:TestLogsCleaner.java   
@Test
public void testLogCleaning() throws Exception{
  Configuration conf = TEST_UTIL.getConfiguration();
  // set TTL
  long ttl = 2000;
  conf.setLong("hbase.master.logcleaner.ttl", ttl);
  conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
  Replication.decorateMasterConfiguration(conf);
  Server server = new DummyServer();
  ReplicationZookeeper zkHelper =
      new ReplicationZookeeper(server, new AtomicBoolean(true));

  Path oldLogDir = new Path(TEST_UTIL.getDataTestDir(),
      HConstants.HREGION_OLDLOGDIR_NAME);
  String fakeMachineName =
    URLEncoder.encode(server.getServerName().toString(), "UTF8");

  FileSystem fs = FileSystem.get(conf);
  LogCleaner cleaner  = new LogCleaner(1000, server, conf, fs, oldLogDir);

  // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
  long now = System.currentTimeMillis();
  fs.delete(oldLogDir, true);
  fs.mkdirs(oldLogDir);
  // Case 1: 2 invalid files, which would be deleted directly
  fs.createNewFile(new Path(oldLogDir, "a"));
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + "a"));
  // Case 2: 1 "recent" file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  System.out.println("Now is: " + now);
  for (int i = 1; i < 31; i++) {
    // Case 3: old files which would be deletable for the first log cleaner
    // (TimeToLiveLogCleaner), and also for the second (ReplicationLogCleaner)
    Path fileName = new Path(oldLogDir, fakeMachineName + "." + (now - i) );
    fs.createNewFile(fileName);
    // Case 4: put 3 old log files in ZK indicating that they are scheduled
    // for replication so these files would pass the first log cleaner
    // (TimeToLiveLogCleaner) but would be rejected by the second
    // (ReplicationLogCleaner)
    if (i % (30/3) == 1) {
      zkHelper.addLogToList(fileName.getName(), fakeMachineName);
      System.out.println("Replication log file: " + fileName);
    }
  }

  // sleep for sometime to get newer modifcation time 
  Thread.sleep(ttl);
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + now));

  // Case 2: 1 newer file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + (now + 10000) ));

  for (FileStatus stat : fs.listStatus(oldLogDir)) {
    System.out.println(stat.getPath().toString());
  }

  assertEquals(34, fs.listStatus(oldLogDir).length);

  cleaner.chore();

  // We end up with the current log file, a newer one and the 3 old log
  // files which are scheduled for replication
  assertEquals(5, fs.listStatus(oldLogDir).length);

  for (FileStatus file : fs.listStatus(oldLogDir)) {
    System.out.println("Kept log files: " + file.getPath().getName());
  }
}
项目:pbase    文件:HMaster.java   
/**
 * Initializes the HMaster. The steps are as follows:
 * <p/>
 * <ol>
 * <li>Initialize the local HRegionServer
 * <li>Start the ActiveMasterManager.
 * </ol>
 * <p/>
 * Remaining steps of initialization occur in
 * #finishActiveMasterInitialization(MonitoredTask) after
 * the master becomes the active one.
 *
 * @throws InterruptedException
 * @throws KeeperException
 * @throws IOException
 */
public HMaster(final Configuration conf, CoordinatedStateManager csm)
        throws IOException, KeeperException, InterruptedException {
    super(conf, csm);
    this.rsFatals = new MemoryBoundedLogMessageBuffer(
            conf.getLong("hbase.master.buffer.for.rs.fatals", 1 * 1024 * 1024));

    LOG.info("hbase.rootdir=" + FSUtils.getRootDir(this.conf) +
            ", hbase.cluster.distributed=" + this.conf.getBoolean(HConstants.CLUSTER_DISTRIBUTED, false));

    Replication.decorateMasterConfiguration(this.conf);

    // Hack! Maps DFSClient => Master for logs.  HDFS made this
    // config param for task trackers, but we can piggyback off of it.
    if (this.conf.get("mapreduce.task.attempt.id") == null) {
        this.conf.set("mapreduce.task.attempt.id", "hb_m_" + this.serverName.toString());
    }

    // should we check the compression codec type at master side, default true, HBASE-6370
    this.masterCheckCompression = conf.getBoolean("hbase.master.check.compression", true);

    // should we check encryption settings at master side, default true
    this.masterCheckEncryption = conf.getBoolean("hbase.master.check.encryption", true);

    this.metricsMaster = new MetricsMaster(new MetricsMasterWrapperImpl(this));
    //暴露给hadoop用的接口

    // preload table descriptor at startup
    this.preLoadTableDescriptors = conf.getBoolean("hbase.master.preload.tabledescriptors", true);

    // Do we publish the status?

    boolean shouldPublish = conf.getBoolean(HConstants.STATUS_PUBLISHED,
            HConstants.STATUS_PUBLISHED_DEFAULT);
    Class<? extends ClusterStatusPublisher.Publisher> publisherClass =
            conf.getClass(ClusterStatusPublisher.STATUS_PUBLISHER_CLASS,
                    ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS,
                    ClusterStatusPublisher.Publisher.class);

    if (shouldPublish) {
        if (publisherClass == null) {
            LOG.warn(HConstants.STATUS_PUBLISHED + " is true, but " +
                    ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS +
                    " is not set - not publishing status");
        } else {
            clusterStatusPublisherChore = new ClusterStatusPublisher(this, conf, publisherClass);
            Threads.setDaemonThreadRunning(clusterStatusPublisherChore.getThread());
        }
    }
    activeMasterManager = new ActiveMasterManager(zooKeeper, this.serverName, this);
    int infoPort = putUpJettyServer();
    startActiveMasterManager(infoPort);
}
项目:pbase    文件:TestLogsCleaner.java   
@Test
public void testLogCleaning() throws Exception{
  Configuration conf = TEST_UTIL.getConfiguration();
  // set TTL
  long ttl = 10000;
  conf.setLong("hbase.master.logcleaner.ttl", ttl);
  conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT);
  Replication.decorateMasterConfiguration(conf);
  Server server = new DummyServer();
  ReplicationQueues repQueues =
      ReplicationFactory.getReplicationQueues(server.getZooKeeper(), conf, server);
  repQueues.init(server.getServerName().toString());
  final Path oldLogDir = new Path(TEST_UTIL.getDataTestDir(),
      HConstants.HREGION_OLDLOGDIR_NAME);
  String fakeMachineName =
    URLEncoder.encode(server.getServerName().toString(), "UTF8");

  final FileSystem fs = FileSystem.get(conf);

  // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
  long now = System.currentTimeMillis();
  fs.delete(oldLogDir, true);
  fs.mkdirs(oldLogDir);
  // Case 1: 2 invalid files, which would be deleted directly
  fs.createNewFile(new Path(oldLogDir, "a"));
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + "a"));
  // Case 2: 1 "recent" file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  System.out.println("Now is: " + now);
  for (int i = 1; i < 31; i++) {
    // Case 3: old files which would be deletable for the first log cleaner
    // (TimeToLiveLogCleaner), and also for the second (ReplicationLogCleaner)
    Path fileName = new Path(oldLogDir, fakeMachineName + "." + (now - i) );
    fs.createNewFile(fileName);
    // Case 4: put 3 old log files in ZK indicating that they are scheduled
    // for replication so these files would pass the first log cleaner
    // (TimeToLiveLogCleaner) but would be rejected by the second
    // (ReplicationLogCleaner)
    if (i % (30/3) == 1) {
      repQueues.addLog(fakeMachineName, fileName.getName());
      System.out.println("Replication log file: " + fileName);
    }
  }

  // sleep for sometime to get newer modifcation time
  Thread.sleep(ttl);
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + now));

  // Case 2: 1 newer file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + (now + 10000) ));

  for (FileStatus stat : fs.listStatus(oldLogDir)) {
    System.out.println(stat.getPath().toString());
  }

  assertEquals(34, fs.listStatus(oldLogDir).length);

  LogCleaner cleaner  = new LogCleaner(1000, server, conf, fs, oldLogDir);
  cleaner.chore();

  // We end up with the current log file, a newer one and the 3 old log
  // files which are scheduled for replication
  TEST_UTIL.waitFor(1000, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      return 5 == fs.listStatus(oldLogDir).length;
    }
  });

  for (FileStatus file : fs.listStatus(oldLogDir)) {
    System.out.println("Kept log files: " + file.getPath().getName());
  }
}
项目:HIndex    文件:TestLogsCleaner.java   
@Test
public void testLogCleaning() throws Exception{
  Configuration conf = TEST_UTIL.getConfiguration();
  // set TTL
  long ttl = 10000;
  conf.setLong("hbase.master.logcleaner.ttl", ttl);
  conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT);
  Replication.decorateMasterConfiguration(conf);
  Server server = new DummyServer();
  ReplicationQueues repQueues =
      ReplicationFactory.getReplicationQueues(server.getZooKeeper(), conf, server);
  repQueues.init(server.getServerName().toString());
  final Path oldLogDir = new Path(TEST_UTIL.getDataTestDir(),
      HConstants.HREGION_OLDLOGDIR_NAME);
  String fakeMachineName =
    URLEncoder.encode(server.getServerName().toString(), "UTF8");

  final FileSystem fs = FileSystem.get(conf);

  // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
  long now = System.currentTimeMillis();
  fs.delete(oldLogDir, true);
  fs.mkdirs(oldLogDir);
  // Case 1: 2 invalid files, which would be deleted directly
  fs.createNewFile(new Path(oldLogDir, "a"));
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + "a"));
  // Case 2: 1 "recent" file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  System.out.println("Now is: " + now);
  for (int i = 1; i < 31; i++) {
    // Case 3: old files which would be deletable for the first log cleaner
    // (TimeToLiveLogCleaner), and also for the second (ReplicationLogCleaner)
    Path fileName = new Path(oldLogDir, fakeMachineName + "." + (now - i) );
    fs.createNewFile(fileName);
    // Case 4: put 3 old log files in ZK indicating that they are scheduled
    // for replication so these files would pass the first log cleaner
    // (TimeToLiveLogCleaner) but would be rejected by the second
    // (ReplicationLogCleaner)
    if (i % (30/3) == 1) {
      repQueues.addLog(fakeMachineName, fileName.getName());
      System.out.println("Replication log file: " + fileName);
    }
  }

  // sleep for sometime to get newer modifcation time
  Thread.sleep(ttl);
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + now));

  // Case 2: 1 newer file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + (now + 10000) ));

  for (FileStatus stat : fs.listStatus(oldLogDir)) {
    System.out.println(stat.getPath().toString());
  }

  assertEquals(34, fs.listStatus(oldLogDir).length);

  LogCleaner cleaner  = new LogCleaner(1000, server, conf, fs, oldLogDir);
  cleaner.chore();

  // We end up with the current log file, a newer one and the 3 old log
  // files which are scheduled for replication
  TEST_UTIL.waitFor(1000, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      return 5 == fs.listStatus(oldLogDir).length;
    }
  });

  for (FileStatus file : fs.listStatus(oldLogDir)) {
    System.out.println("Kept log files: " + file.getPath().getName());
  }
}
项目:IRIndex    文件:TestLogsCleaner.java   
@Test
public void testLogCleaning() throws Exception{
  Configuration conf = TEST_UTIL.getConfiguration();
  // set TTL
  long ttl = 2000;
  conf.setLong("hbase.master.logcleaner.ttl", ttl);
  conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
  Replication.decorateMasterConfiguration(conf);
  Server server = new DummyServer();
  ReplicationZookeeper zkHelper =
      new ReplicationZookeeper(server, new AtomicBoolean(true));

  Path oldLogDir = new Path(TEST_UTIL.getDataTestDir(),
      HConstants.HREGION_OLDLOGDIR_NAME);
  String fakeMachineName =
    URLEncoder.encode(server.getServerName().toString(), "UTF8");

  FileSystem fs = FileSystem.get(conf);
  LogCleaner cleaner  = new LogCleaner(1000, server, conf, fs, oldLogDir);

  // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
  long now = System.currentTimeMillis();
  fs.delete(oldLogDir, true);
  fs.mkdirs(oldLogDir);
  // Case 1: 2 invalid files, which would be deleted directly
  fs.createNewFile(new Path(oldLogDir, "a"));
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + "a"));
  // Case 2: 1 "recent" file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  System.out.println("Now is: " + now);
  for (int i = 1; i < 31; i++) {
    // Case 3: old files which would be deletable for the first log cleaner
    // (TimeToLiveLogCleaner), and also for the second (ReplicationLogCleaner)
    Path fileName = new Path(oldLogDir, fakeMachineName + "." + (now - i) );
    fs.createNewFile(fileName);
    // Case 4: put 3 old log files in ZK indicating that they are scheduled
    // for replication so these files would pass the first log cleaner
    // (TimeToLiveLogCleaner) but would be rejected by the second
    // (ReplicationLogCleaner)
    if (i % (30/3) == 1) {
      zkHelper.addLogToList(fileName.getName(), fakeMachineName);
      System.out.println("Replication log file: " + fileName);
    }
  }

  // sleep for sometime to get newer modifcation time 
  Thread.sleep(ttl);
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + now));

  // Case 2: 1 newer file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + (now + 10000) ));

  for (FileStatus stat : fs.listStatus(oldLogDir)) {
    System.out.println(stat.getPath().toString());
  }

  assertEquals(34, fs.listStatus(oldLogDir).length);

  cleaner.chore();

  // We end up with the current log file, a newer one and the 3 old log
  // files which are scheduled for replication
  assertEquals(5, fs.listStatus(oldLogDir).length);

  for (FileStatus file : fs.listStatus(oldLogDir)) {
    System.out.println("Kept log files: " + file.getPath().getName());
  }
}
项目:hbase    文件:TestReplicationSource.java   
/**
 * Tests that recovered queues are preserved on a regionserver shutdown.
 * See HBASE-18192
 * @throws Exception
 */
@Test
public void testServerShutdownRecoveredQueue() throws Exception {
  try {
    // Ensure single-threaded WAL
    conf.set("hbase.wal.provider", "defaultProvider");
    conf.setInt("replication.sleep.before.failover", 2000);
    // Introduces a delay in regionserver shutdown to give the race condition a chance to kick in.
    conf.set(HConstants.REGION_SERVER_IMPL, ShutdownDelayRegionServer.class.getName());
    MiniHBaseCluster cluster = TEST_UTIL.startMiniCluster(2);
    TEST_UTIL_PEER.startMiniCluster(1);

    HRegionServer serverA = cluster.getRegionServer(0);
    final ReplicationSourceManager managerA =
        ((Replication) serverA.getReplicationSourceService()).getReplicationManager();
    HRegionServer serverB = cluster.getRegionServer(1);
    final ReplicationSourceManager managerB =
        ((Replication) serverB.getReplicationSourceService()).getReplicationManager();
    final Admin admin = TEST_UTIL.getAdmin();

    final String peerId = "TestPeer";
    admin.addReplicationPeer(peerId,
        new ReplicationPeerConfig().setClusterKey(TEST_UTIL_PEER.getClusterKey()));
    // Wait for replication sources to come up
    Waiter.waitFor(conf, 20000, new Waiter.Predicate<Exception>() {
      @Override public boolean evaluate() throws Exception {
        return !(managerA.getSources().isEmpty() || managerB.getSources().isEmpty());
      }
    });
    // Disabling peer makes sure there is at least one log to claim when the server dies
    // The recovered queue will also stay there until the peer is disabled even if the
    // WALs it contains have no data.
    admin.disableReplicationPeer(peerId);

    // Stopping serverA
    // It's queues should be claimed by the only other alive server i.e. serverB
    cluster.stopRegionServer(serverA.getServerName());
    Waiter.waitFor(conf, 20000, new Waiter.Predicate<Exception>() {
      @Override public boolean evaluate() throws Exception {
        return managerB.getOldSources().size() == 1;
      }
    });

    final HRegionServer serverC = cluster.startRegionServer().getRegionServer();
    serverC.waitForServerOnline();
    Waiter.waitFor(conf, 20000, new Waiter.Predicate<Exception>() {
      @Override public boolean evaluate() throws Exception {
        return serverC.getReplicationSourceService() != null;
      }
    });
    final ReplicationSourceManager managerC =
        ((Replication) serverC.getReplicationSourceService()).getReplicationManager();
    // Sanity check
    assertEquals(0, managerC.getOldSources().size());

    // Stopping serverB
    // Now serverC should have two recovered queues:
    // 1. The serverB's normal queue
    // 2. serverA's recovered queue on serverB
    cluster.stopRegionServer(serverB.getServerName());
    Waiter.waitFor(conf, 20000, new Waiter.Predicate<Exception>() {
      @Override public boolean evaluate() throws Exception {
        return managerC.getOldSources().size() == 2;
      }
    });
    admin.enableReplicationPeer(peerId);
    Waiter.waitFor(conf, 20000, new Waiter.Predicate<Exception>() {
      @Override public boolean evaluate() throws Exception {
        return managerC.getOldSources().size() == 0;
      }
    });
  } finally {
    conf.set(HConstants.REGION_SERVER_IMPL, HRegionServer.class.getName());
  }
}
项目:RStore    文件:HMaster.java   
/**
 * Initializes the HMaster. The steps are as follows:
 * <p>
 * <ol>
 * <li>Initialize HMaster RPC and address
 * <li>Connect to ZooKeeper.
 * </ol>
 * <p>
 * Remaining steps of initialization occur in {@link #run()} so that they
 * run in their own thread rather than within the context of the constructor.
 * @throws InterruptedException
 */
public HMaster(final Configuration conf)
throws IOException, KeeperException, InterruptedException {
  this.conf = new Configuration(conf);
  // Disable the block cache on the master
  this.conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
  // Set how many times to retry talking to another server over HConnection.
  HConnectionManager.setServerSideHConnectionRetries(this.conf, LOG);
  // Server to handle client requests.
  String hostname = DNS.getDefaultHost(
    conf.get("hbase.master.dns.interface", "default"),
    conf.get("hbase.master.dns.nameserver", "default"));
  int port = conf.getInt(HConstants.MASTER_PORT, HConstants.DEFAULT_MASTER_PORT);
  // Creation of a HSA will force a resolve.
  InetSocketAddress initialIsa = new InetSocketAddress(hostname, port);
  if (initialIsa.getAddress() == null) {
    throw new IllegalArgumentException("Failed resolve of " + this.isa);
  }
  int numHandlers = conf.getInt("hbase.master.handler.count",
    conf.getInt("hbase.regionserver.handler.count", 25));
  this.rpcServer = HBaseRPC.getServer(this,
    new Class<?>[]{HMasterInterface.class, HMasterRegionInterface.class},
      initialIsa.getHostName(), // BindAddress is IP we got for this server.
      initialIsa.getPort(),
      numHandlers,
      0, // we dont use high priority handlers in master
      conf.getBoolean("hbase.rpc.verbose", false), conf,
      0); // this is a DNC w/o high priority handlers
  // Set our address.
  this.isa = this.rpcServer.getListenerAddress();
  this.serverName = new ServerName(this.isa.getHostName(),
    this.isa.getPort(), System.currentTimeMillis());
  this.rsFatals = new MemoryBoundedLogMessageBuffer(
      conf.getLong("hbase.master.buffer.for.rs.fatals", 1*1024*1024));

  // initialize server principal (if using secure Hadoop)
  User.login(conf, "hbase.master.keytab.file",
    "hbase.master.kerberos.principal", this.isa.getHostName());

  // set the thread name now we have an address
  setName(MASTER + "-" + this.serverName.toString());

  Replication.decorateMasterConfiguration(this.conf);

  // Hack! Maps DFSClient => Master for logs.  HDFS made this
  // config param for task trackers, but we can piggyback off of it.
  if (this.conf.get("mapred.task.id") == null) {
    this.conf.set("mapred.task.id", "hb_m_" + this.serverName.toString());
  }

  this.zooKeeper = new ZooKeeperWatcher(conf, MASTER + ":" + isa.getPort(), this, true);
  this.rpcServer.startThreads();
  this.metrics = new MasterMetrics(getServerName().toString());
}
项目:PyroDB    文件:HMaster.java   
/**
 * Initializes the HMaster. The steps are as follows:
 * <p>
 * <ol>
 * <li>Initialize the local HRegionServer
 * <li>Start the ActiveMasterManager.
 * </ol>
 * <p>
 * Remaining steps of initialization occur in
 * {@link #finishActiveMasterInitialization(MonitoredTask)} after
 * the master becomes the active one.
 *
 * @throws InterruptedException
 * @throws KeeperException
 * @throws IOException
 */
public HMaster(final Configuration conf, CoordinatedStateManager csm)
    throws IOException, KeeperException, InterruptedException {
  super(conf, csm);
  this.rsFatals = new MemoryBoundedLogMessageBuffer(
    conf.getLong("hbase.master.buffer.for.rs.fatals", 1*1024*1024));

  LOG.info("hbase.rootdir=" + FSUtils.getRootDir(this.conf) +
      ", hbase.cluster.distributed=" + this.conf.getBoolean("hbase.cluster.distributed", false));

  Replication.decorateMasterConfiguration(this.conf);

  // Hack! Maps DFSClient => Master for logs.  HDFS made this
  // config param for task trackers, but we can piggyback off of it.
  if (this.conf.get("mapreduce.task.attempt.id") == null) {
    this.conf.set("mapreduce.task.attempt.id", "hb_m_" + this.serverName.toString());
  }

  //should we check the compression codec type at master side, default true, HBASE-6370
  this.masterCheckCompression = conf.getBoolean("hbase.master.check.compression", true);

  this.metricsMaster = new MetricsMaster( new MetricsMasterWrapperImpl(this));

  // Do we publish the status?
  boolean shouldPublish = conf.getBoolean(HConstants.STATUS_PUBLISHED,
      HConstants.STATUS_PUBLISHED_DEFAULT);
  Class<? extends ClusterStatusPublisher.Publisher> publisherClass =
      conf.getClass(ClusterStatusPublisher.STATUS_PUBLISHER_CLASS,
          ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS,
          ClusterStatusPublisher.Publisher.class);

  if (shouldPublish) {
    if (publisherClass == null) {
      LOG.warn(HConstants.STATUS_PUBLISHED + " is true, but " +
          ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS +
          " is not set - not publishing status");
    } else {
      clusterStatusPublisherChore = new ClusterStatusPublisher(this, conf, publisherClass);
      Threads.setDaemonThreadRunning(clusterStatusPublisherChore.getThread());
    }
  }
  startActiveMasterManager();
  putUpJettyServer();
  LOG.info("Shen Li: HMaster hostname " + getServerName().getHostname());
}
项目:PyroDB    文件:TestLogsCleaner.java   
@Test
public void testLogCleaning() throws Exception{
  Configuration conf = TEST_UTIL.getConfiguration();
  // set TTL
  long ttl = 10000;
  conf.setLong("hbase.master.logcleaner.ttl", ttl);
  conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT);
  Replication.decorateMasterConfiguration(conf);
  Server server = new DummyServer();
  ReplicationQueues repQueues =
      ReplicationFactory.getReplicationQueues(server.getZooKeeper(), conf, server);
  repQueues.init(server.getServerName().toString());
  final Path oldLogDir = new Path(TEST_UTIL.getDataTestDir(),
      HConstants.HREGION_OLDLOGDIR_NAME);
  String fakeMachineName =
    URLEncoder.encode(server.getServerName().toString(), "UTF8");

  final FileSystem fs = FileSystem.get(conf);

  // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
  long now = System.currentTimeMillis();
  fs.delete(oldLogDir, true);
  fs.mkdirs(oldLogDir);
  // Case 1: 2 invalid files, which would be deleted directly
  fs.createNewFile(new Path(oldLogDir, "a"));
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + "a"));
  // Case 2: 1 "recent" file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  System.out.println("Now is: " + now);
  for (int i = 1; i < 31; i++) {
    // Case 3: old files which would be deletable for the first log cleaner
    // (TimeToLiveLogCleaner), and also for the second (ReplicationLogCleaner)
    Path fileName = new Path(oldLogDir, fakeMachineName + "." + (now - i) );
    fs.createNewFile(fileName);
    // Case 4: put 3 old log files in ZK indicating that they are scheduled
    // for replication so these files would pass the first log cleaner
    // (TimeToLiveLogCleaner) but would be rejected by the second
    // (ReplicationLogCleaner)
    if (i % (30/3) == 1) {
      repQueues.addLog(fakeMachineName, fileName.getName());
      System.out.println("Replication log file: " + fileName);
    }
  }

  // sleep for sometime to get newer modifcation time
  Thread.sleep(ttl);
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + now));

  // Case 2: 1 newer file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + (now + 10000) ));

  for (FileStatus stat : fs.listStatus(oldLogDir)) {
    System.out.println(stat.getPath().toString());
  }

  assertEquals(34, fs.listStatus(oldLogDir).length);

  LogCleaner cleaner  = new LogCleaner(1000, server, conf, fs, oldLogDir);
  cleaner.chore();

  // We end up with the current log file, a newer one and the 3 old log
  // files which are scheduled for replication
  TEST_UTIL.waitFor(1000, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      return 5 == fs.listStatus(oldLogDir).length;
    }
  });

  for (FileStatus file : fs.listStatus(oldLogDir)) {
    System.out.println("Kept log files: " + file.getPath().getName());
  }
}
项目:c5    文件:TestLogsCleaner.java   
@Test
public void testLogCleaning() throws Exception{
  Configuration conf = TEST_UTIL.getConfiguration();
  // set TTL
  long ttl = 10000;
  conf.setLong("hbase.master.logcleaner.ttl", ttl);
  conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT);
  Replication.decorateMasterConfiguration(conf);
  Server server = new DummyServer();
  ReplicationQueues repQueues =
      ReplicationFactory.getReplicationQueues(server.getZooKeeper(), conf, server);
  repQueues.init(server.getServerName().toString());
  Path oldLogDir = new Path(TEST_UTIL.getDataTestDir(),
      HConstants.HREGION_OLDLOGDIR_NAME);
  String fakeMachineName =
    URLEncoder.encode(server.getServerName().toString(), "UTF8");

  FileSystem fs = FileSystem.get(conf);
  LogCleaner cleaner  = new LogCleaner(1000, server, conf, fs, oldLogDir);

  // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
  long now = System.currentTimeMillis();
  fs.delete(oldLogDir, true);
  fs.mkdirs(oldLogDir);
  // Case 1: 2 invalid files, which would be deleted directly
  fs.createNewFile(new Path(oldLogDir, "a"));
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + "a"));
  // Case 2: 1 "recent" file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  System.out.println("Now is: " + now);
  for (int i = 1; i < 31; i++) {
    // Case 3: old files which would be deletable for the first log cleaner
    // (TimeToLiveLogCleaner), and also for the second (ReplicationLogCleaner)
    Path fileName = new Path(oldLogDir, fakeMachineName + "." + (now - i) );
    fs.createNewFile(fileName);
    // Case 4: put 3 old log files in ZK indicating that they are scheduled
    // for replication so these files would pass the first log cleaner
    // (TimeToLiveLogCleaner) but would be rejected by the second
    // (ReplicationLogCleaner)
    if (i % (30/3) == 1) {
      repQueues.addLog(fakeMachineName, fileName.getName());
      System.out.println("Replication log file: " + fileName);
    }
  }

  // sleep for sometime to get newer modifcation time
  Thread.sleep(ttl);
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + now));

  // Case 2: 1 newer file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + (now + 10000) ));

  for (FileStatus stat : fs.listStatus(oldLogDir)) {
    System.out.println(stat.getPath().toString());
  }

  assertEquals(34, fs.listStatus(oldLogDir).length);

  cleaner.chore();

  // We end up with the current log file, a newer one and the 3 old log
  // files which are scheduled for replication
  assertEquals(5, fs.listStatus(oldLogDir).length);

  for (FileStatus file : fs.listStatus(oldLogDir)) {
    System.out.println("Kept log files: " + file.getPath().getName());
  }
}
项目:HBase-Research    文件:HMaster.java   
/**
 * Initializes the HMaster. The steps are as follows:
 * <p>
 * <ol>
 * <li>Initialize HMaster RPC and address
 * <li>Connect to ZooKeeper.
 * </ol>
 * <p>
 * Remaining steps of initialization occur in {@link #run()} so that they
 * run in their own thread rather than within the context of the constructor.
 * @throws InterruptedException
 */
public HMaster(final Configuration conf)
throws IOException, KeeperException, InterruptedException {
  this.conf = new Configuration(conf);
  // Disable the block cache on the master
  this.conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
  // Set how many times to retry talking to another server over HConnection.
  HConnectionManager.setServerSideHConnectionRetries(this.conf, LOG);
  // Server to handle client requests.
  String hostname = conf.get("hbase.master.ipc.address",
    Strings.domainNamePointerToHostName(DNS.getDefaultHost(
      conf.get("hbase.master.dns.interface", "default"),
      conf.get("hbase.master.dns.nameserver", "default"))));
  int port = conf.getInt(HConstants.MASTER_PORT, HConstants.DEFAULT_MASTER_PORT);
  // Test that the hostname is reachable
  InetSocketAddress initialIsa = new InetSocketAddress(hostname, port);
  if (initialIsa.getAddress() == null) {
    throw new IllegalArgumentException("Failed resolve of hostname " + initialIsa);
  }
  int numHandlers = conf.getInt("hbase.master.handler.count",
    conf.getInt("hbase.regionserver.handler.count", 25));
  this.rpcServer = HBaseRPC.getServer(this,
    new Class<?>[]{HMasterInterface.class, HMasterRegionInterface.class},
      initialIsa.getHostName(), // This is bindAddress if set else it's hostname
      initialIsa.getPort(),
      numHandlers,
      0, // we dont use high priority handlers in master
      conf.getBoolean("hbase.rpc.verbose", false), conf,
      0); // this is a DNC w/o high priority handlers
  // Set our address.
  this.isa = this.rpcServer.getListenerAddress();
  this.serverName = new ServerName(this.isa.getHostName(),
    this.isa.getPort(), System.currentTimeMillis());
  this.rsFatals = new MemoryBoundedLogMessageBuffer(
      conf.getLong("hbase.master.buffer.for.rs.fatals", 1*1024*1024));

  // login the zookeeper client principal (if using security)
  ZKUtil.loginClient(this.conf, "hbase.zookeeper.client.keytab.file",
    "hbase.zookeeper.client.kerberos.principal", this.isa.getHostName());

  // initialize server principal (if using secure Hadoop)
  User.login(conf, "hbase.master.keytab.file",
    "hbase.master.kerberos.principal", this.isa.getHostName());

  // set the thread name now we have an address
  setName(MASTER + "-" + this.serverName.toString());

  Replication.decorateMasterConfiguration(this.conf);

  // Hack! Maps DFSClient => Master for logs.  HDFS made this
  // config param for task trackers, but we can piggyback off of it.
  if (this.conf.get("mapred.task.id") == null) {
    this.conf.set("mapred.task.id", "hb_m_" + this.serverName.toString());
  }

  this.zooKeeper = new ZooKeeperWatcher(conf, MASTER + ":" + isa.getPort(), this, true);
  this.rpcServer.startThreads();
  this.metrics = new MasterMetrics(getServerName().toString());

  // Health checker thread.
  int sleepTime = this.conf.getInt(HConstants.HEALTH_CHORE_WAKE_FREQ,
    HConstants.DEFAULT_THREAD_WAKE_FREQUENCY);
  if (isHealthCheckerConfigured()) {
    healthCheckChore = new HealthCheckChore(sleepTime, this, getConfiguration());
  }

  this.shouldSplitMetaSeparately = conf.getBoolean(HLog.SEPARATE_HLOG_FOR_META, false);
  waitingOnLogSplitting = this.conf.getBoolean("hbase.master.wait.for.log.splitting", false);
}
项目:HBase-Research    文件:TestLogsCleaner.java   
@Test
public void testLogCleaning() throws Exception{
  Configuration conf = TEST_UTIL.getConfiguration();
  // set TTL
  long ttl = 2000;
  conf.setLong("hbase.master.logcleaner.ttl", ttl);
  conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
  Replication.decorateMasterConfiguration(conf);
  Server server = new DummyServer();
  ReplicationZookeeper zkHelper =
      new ReplicationZookeeper(server, new AtomicBoolean(true));

  Path oldLogDir = new Path(TEST_UTIL.getDataTestDir(),
      HConstants.HREGION_OLDLOGDIR_NAME);
  String fakeMachineName =
    URLEncoder.encode(server.getServerName().toString(), "UTF8");

  FileSystem fs = FileSystem.get(conf);
  LogCleaner cleaner  = new LogCleaner(1000, server, conf, fs, oldLogDir);

  // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
  long now = System.currentTimeMillis();
  fs.delete(oldLogDir, true);
  fs.mkdirs(oldLogDir);
  // Case 1: 2 invalid files, which would be deleted directly
  fs.createNewFile(new Path(oldLogDir, "a"));
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + "a"));
  // Case 2: 1 "recent" file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  System.out.println("Now is: " + now);
  for (int i = 1; i < 31; i++) {
    // Case 3: old files which would be deletable for the first log cleaner
    // (TimeToLiveLogCleaner), and also for the second (ReplicationLogCleaner)
    Path fileName = new Path(oldLogDir, fakeMachineName + "." + (now - i) );
    fs.createNewFile(fileName);
    // Case 4: put 3 old log files in ZK indicating that they are scheduled
    // for replication so these files would pass the first log cleaner
    // (TimeToLiveLogCleaner) but would be rejected by the second
    // (ReplicationLogCleaner)
    if (i % (30/3) == 1) {
      zkHelper.addLogToList(fileName.getName(), fakeMachineName);
      System.out.println("Replication log file: " + fileName);
    }
  }

  // sleep for sometime to get newer modifcation time 
  Thread.sleep(ttl);
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + now));

  // Case 2: 1 newer file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + (now + 10000) ));

  for (FileStatus stat : fs.listStatus(oldLogDir)) {
    System.out.println(stat.getPath().toString());
  }

  assertEquals(34, fs.listStatus(oldLogDir).length);

  cleaner.chore();

  // We end up with the current log file, a newer one and the 3 old log
  // files which are scheduled for replication
  assertEquals(5, fs.listStatus(oldLogDir).length);

  for (FileStatus file : fs.listStatus(oldLogDir)) {
    System.out.println("Kept log files: " + file.getPath().getName());
  }
}
项目:hbase-0.94.8-qod    文件:TestLogsCleaner.java   
@Test
public void testLogCleaning() throws Exception{
  Configuration conf = TEST_UTIL.getConfiguration();
  // set TTL
  long ttl = 2000;
  conf.setLong("hbase.master.logcleaner.ttl", ttl);
  conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
  Replication.decorateMasterConfiguration(conf);
  Server server = new DummyServer();
  ReplicationZookeeper zkHelper =
      new ReplicationZookeeper(server, new AtomicBoolean(true));

  Path oldLogDir = new Path(TEST_UTIL.getDataTestDir(),
      HConstants.HREGION_OLDLOGDIR_NAME);
  String fakeMachineName =
    URLEncoder.encode(server.getServerName().toString(), "UTF8");

  FileSystem fs = FileSystem.get(conf);
  LogCleaner cleaner  = new LogCleaner(1000, server, conf, fs, oldLogDir);

  // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
  long now = System.currentTimeMillis();
  fs.delete(oldLogDir, true);
  fs.mkdirs(oldLogDir);
  // Case 1: 2 invalid files, which would be deleted directly
  fs.createNewFile(new Path(oldLogDir, "a"));
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + "a"));
  // Case 2: 1 "recent" file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  System.out.println("Now is: " + now);
  for (int i = 1; i < 31; i++) {
    // Case 3: old files which would be deletable for the first log cleaner
    // (TimeToLiveLogCleaner), and also for the second (ReplicationLogCleaner)
    Path fileName = new Path(oldLogDir, fakeMachineName + "." + (now - i) );
    fs.createNewFile(fileName);
    // Case 4: put 3 old log files in ZK indicating that they are scheduled
    // for replication so these files would pass the first log cleaner
    // (TimeToLiveLogCleaner) but would be rejected by the second
    // (ReplicationLogCleaner)
    if (i % (30/3) == 1) {
      zkHelper.addLogToList(fileName.getName(), fakeMachineName);
      System.out.println("Replication log file: " + fileName);
    }
  }

  // sleep for sometime to get newer modifcation time 
  Thread.sleep(ttl);
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + now));

  // Case 2: 1 newer file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + (now + 10000) ));

  for (FileStatus stat : fs.listStatus(oldLogDir)) {
    System.out.println(stat.getPath().toString());
  }

  assertEquals(34, fs.listStatus(oldLogDir).length);

  cleaner.chore();

  // We end up with the current log file, a newer one and the 3 old log
  // files which are scheduled for replication
  assertEquals(5, fs.listStatus(oldLogDir).length);

  for (FileStatus file : fs.listStatus(oldLogDir)) {
    System.out.println("Kept log files: " + file.getPath().getName());
  }
}
项目:hbase-0.94.8-qod    文件:TestLogsCleaner.java   
@Test
public void testLogCleaning() throws Exception{
  Configuration conf = TEST_UTIL.getConfiguration();
  // set TTL
  long ttl = 2000;
  conf.setLong("hbase.master.logcleaner.ttl", ttl);
  conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
  Replication.decorateMasterConfiguration(conf);
  Server server = new DummyServer();
  ReplicationZookeeper zkHelper =
      new ReplicationZookeeper(server, new AtomicBoolean(true));

  Path oldLogDir = new Path(TEST_UTIL.getDataTestDir(),
      HConstants.HREGION_OLDLOGDIR_NAME);
  String fakeMachineName =
    URLEncoder.encode(server.getServerName().toString(), "UTF8");

  FileSystem fs = FileSystem.get(conf);
  LogCleaner cleaner  = new LogCleaner(1000, server, conf, fs, oldLogDir);

  // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
  long now = System.currentTimeMillis();
  fs.delete(oldLogDir, true);
  fs.mkdirs(oldLogDir);
  // Case 1: 2 invalid files, which would be deleted directly
  fs.createNewFile(new Path(oldLogDir, "a"));
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + "a"));
  // Case 2: 1 "recent" file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  System.out.println("Now is: " + now);
  for (int i = 1; i < 31; i++) {
    // Case 3: old files which would be deletable for the first log cleaner
    // (TimeToLiveLogCleaner), and also for the second (ReplicationLogCleaner)
    Path fileName = new Path(oldLogDir, fakeMachineName + "." + (now - i) );
    fs.createNewFile(fileName);
    // Case 4: put 3 old log files in ZK indicating that they are scheduled
    // for replication so these files would pass the first log cleaner
    // (TimeToLiveLogCleaner) but would be rejected by the second
    // (ReplicationLogCleaner)
    if (i % (30/3) == 1) {
      zkHelper.addLogToList(fileName.getName(), fakeMachineName);
      System.out.println("Replication log file: " + fileName);
    }
  }

  // sleep for sometime to get newer modifcation time 
  Thread.sleep(ttl);
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + now));

  // Case 2: 1 newer file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + (now + 10000) ));

  for (FileStatus stat : fs.listStatus(oldLogDir)) {
    System.out.println(stat.getPath().toString());
  }

  assertEquals(34, fs.listStatus(oldLogDir).length);

  cleaner.chore();

  // We end up with the current log file, a newer one and the 3 old log
  // files which are scheduled for replication
  assertEquals(5, fs.listStatus(oldLogDir).length);

  for (FileStatus file : fs.listStatus(oldLogDir)) {
    System.out.println("Kept log files: " + file.getPath().getName());
  }
}
项目:DominoHBase    文件:TestLogsCleaner.java   
@Test
public void testLogCleaning() throws Exception{
  Configuration conf = TEST_UTIL.getConfiguration();
  // set TTL
  long ttl = 2000;
  conf.setLong("hbase.master.logcleaner.ttl", ttl);
  conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
  Replication.decorateMasterConfiguration(conf);
  Server server = new DummyServer();
  ReplicationZookeeper zkHelper =
      new ReplicationZookeeper(server, new AtomicBoolean(true));

  Path oldLogDir = new Path(TEST_UTIL.getDataTestDir(),
      HConstants.HREGION_OLDLOGDIR_NAME);
  String fakeMachineName =
    URLEncoder.encode(server.getServerName().toString(), "UTF8");

  FileSystem fs = FileSystem.get(conf);
  LogCleaner cleaner  = new LogCleaner(1000, server, conf, fs, oldLogDir);

  // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
  long now = System.currentTimeMillis();
  fs.delete(oldLogDir, true);
  fs.mkdirs(oldLogDir);
  // Case 1: 2 invalid files, which would be deleted directly
  fs.createNewFile(new Path(oldLogDir, "a"));
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + "a"));
  // Case 2: 1 "recent" file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  System.out.println("Now is: " + now);
  for (int i = 1; i < 31; i++) {
    // Case 3: old files which would be deletable for the first log cleaner
    // (TimeToLiveLogCleaner), and also for the second (ReplicationLogCleaner)
    Path fileName = new Path(oldLogDir, fakeMachineName + "." + (now - i) );
    fs.createNewFile(fileName);
    // Case 4: put 3 old log files in ZK indicating that they are scheduled
    // for replication so these files would pass the first log cleaner
    // (TimeToLiveLogCleaner) but would be rejected by the second
    // (ReplicationLogCleaner)
    if (i % (30/3) == 1) {
      zkHelper.addLogToList(fileName.getName(), fakeMachineName);
      System.out.println("Replication log file: " + fileName);
    }
  }

  // sleep for sometime to get newer modifcation time
  Thread.sleep(ttl);
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + now));

  // Case 2: 1 newer file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + (now + 10000) ));

  for (FileStatus stat : fs.listStatus(oldLogDir)) {
    System.out.println(stat.getPath().toString());
  }

  assertEquals(34, fs.listStatus(oldLogDir).length);

  cleaner.chore();

  // We end up with the current log file, a newer one and the 3 old log
  // files which are scheduled for replication
  assertEquals(5, fs.listStatus(oldLogDir).length);

  for (FileStatus file : fs.listStatus(oldLogDir)) {
    System.out.println("Kept log files: " + file.getPath().getName());
  }
}
项目:hindex    文件:TestLogsCleaner.java   
@Test
public void testLogCleaning() throws Exception{
  Configuration conf = TEST_UTIL.getConfiguration();
  // set TTL
  long ttl = 2000;
  conf.setLong("hbase.master.logcleaner.ttl", ttl);
  conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
  Replication.decorateMasterConfiguration(conf);
  Server server = new DummyServer();
  ReplicationZookeeper zkHelper =
      new ReplicationZookeeper(server, new AtomicBoolean(true));

  Path oldLogDir = new Path(TEST_UTIL.getDataTestDir(),
      HConstants.HREGION_OLDLOGDIR_NAME);
  String fakeMachineName =
    URLEncoder.encode(server.getServerName().toString(), "UTF8");

  FileSystem fs = FileSystem.get(conf);
  LogCleaner cleaner  = new LogCleaner(1000, server, conf, fs, oldLogDir);

  // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
  long now = System.currentTimeMillis();
  fs.delete(oldLogDir, true);
  fs.mkdirs(oldLogDir);
  // Case 1: 2 invalid files, which would be deleted directly
  fs.createNewFile(new Path(oldLogDir, "a"));
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + "a"));
  // Case 2: 1 "recent" file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  System.out.println("Now is: " + now);
  for (int i = 1; i < 31; i++) {
    // Case 3: old files which would be deletable for the first log cleaner
    // (TimeToLiveLogCleaner), and also for the second (ReplicationLogCleaner)
    Path fileName = new Path(oldLogDir, fakeMachineName + "." + (now - i) );
    fs.createNewFile(fileName);
    // Case 4: put 3 old log files in ZK indicating that they are scheduled
    // for replication so these files would pass the first log cleaner
    // (TimeToLiveLogCleaner) but would be rejected by the second
    // (ReplicationLogCleaner)
    if (i % (30/3) == 1) {
      zkHelper.addLogToList(fileName.getName(), fakeMachineName);
      System.out.println("Replication log file: " + fileName);
    }
  }

  // sleep for sometime to get newer modifcation time 
  Thread.sleep(ttl);
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + now));

  // Case 2: 1 newer file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + (now + 10000) ));

  for (FileStatus stat : fs.listStatus(oldLogDir)) {
    System.out.println(stat.getPath().toString());
  }

  assertEquals(34, fs.listStatus(oldLogDir).length);

  cleaner.chore();

  // We end up with the current log file, a newer one and the 3 old log
  // files which are scheduled for replication
  assertEquals(5, fs.listStatus(oldLogDir).length);

  for (FileStatus file : fs.listStatus(oldLogDir)) {
    System.out.println("Kept log files: " + file.getPath().getName());
  }
}