Java 类org.apache.hadoop.hbase.Waiter 实例源码

项目:ditb    文件:TestReplicationEndpoint.java   
@Test (timeout=120000)
public void testWALEntryFilterFromReplicationEndpoint() throws Exception {
  admin.addPeer("testWALEntryFilterFromReplicationEndpoint",
    new ReplicationPeerConfig().setClusterKey(ZKConfig.getZooKeeperClusterKey(conf1))
      .setReplicationEndpointImpl(ReplicationEndpointWithWALEntryFilter.class.getName()), null);
  // now replicate some data.
  try (Connection connection = ConnectionFactory.createConnection(conf1)) {
    doPut(connection, Bytes.toBytes("row1"));
    doPut(connection, row);
    doPut(connection, Bytes.toBytes("row2"));
  }

  Waiter.waitFor(conf1, 60000, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      return ReplicationEndpointForTest.replicateCount.get() >= 1;
    }
  });

  Assert.assertNull(ReplicationEndpointWithWALEntryFilter.ex.get());
  admin.removePeer("testWALEntryFilterFromReplicationEndpoint");
}
项目:ditb    文件:TestSplitLogWorker.java   
private boolean waitForCounterBoolean(final AtomicLong ctr, final long oldval, final long newval,
    long timems, boolean failIfTimeout) throws Exception {

  long timeWaited = TEST_UTIL.waitFor(timems, 10, failIfTimeout,
    new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
          return (ctr.get() >= newval);
    }
  });

  if( timeWaited > 0) {
    // when not timed out
    assertEquals(newval, ctr.get());
  }
  return true;
}
项目:pbase    文件:TestReplicationEndpoint.java   
@Test (timeout=120000)
public void testWALEntryFilterFromReplicationEndpoint() throws Exception {
  admin.addPeer("testWALEntryFilterFromReplicationEndpoint",
    new ReplicationPeerConfig().setClusterKey(ZKUtil.getZooKeeperClusterKey(conf1))
      .setReplicationEndpointImpl(ReplicationEndpointWithWALEntryFilter.class.getName()), null);
  // now replicate some data.
  try (Connection connection = ConnectionFactory.createConnection(conf1)) {
    doPut(connection, Bytes.toBytes("row1"));
    doPut(connection, row);
    doPut(connection, Bytes.toBytes("row2"));
  }

  Waiter.waitFor(conf1, 60000, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      return ReplicationEndpointForTest.replicateCount.get() >= 1;
    }
  });

  Assert.assertNull(ReplicationEndpointWithWALEntryFilter.ex.get());
  admin.removePeer("testWALEntryFilterFromReplicationEndpoint");
}
项目:pbase    文件:SnapshotTestingUtils.java   
public static void waitForTableToBeOnline(final HBaseTestingUtility util,
                                          final TableName tableName)
    throws IOException, InterruptedException {
  HRegionServer rs = util.getRSForFirstRegionInTable(tableName);
  List<HRegion> onlineRegions = rs.getOnlineRegions(tableName);
  for (HRegion region : onlineRegions) {
    region.waitForFlushesAndCompactions();
  }
  // Wait up to 60 seconds for a table to be available.
  final HBaseAdmin hBaseAdmin = util.getHBaseAdmin();
  util.waitFor(60000, new Waiter.Predicate<IOException>() {
    @Override
    public boolean evaluate() throws IOException {
      return hBaseAdmin.isTableAvailable(tableName);
    }
  });
}
项目:pbase    文件:TestSplitLogWorker.java   
private boolean waitForCounterBoolean(final AtomicLong ctr, final long oldval, final long newval,
    long timems, boolean failIfTimeout) throws Exception {

  long timeWaited = TEST_UTIL.waitFor(timems, 10, failIfTimeout,
    new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
          return (ctr.get() >= newval);
    }
  });

  if( timeWaited > 0) {
    // when not timed out
    assertEquals(newval, ctr.get());
  }
  return true;
}
项目:HIndex    文件:TestSplitLogWorker.java   
private boolean waitForCounterBoolean(final AtomicLong ctr, final long oldval, final long newval,
    long timems, boolean failIfTimeout) throws Exception {

  long timeWaited = TEST_UTIL.waitFor(timems, 10, failIfTimeout,
    new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
          return (ctr.get() >= newval);
    }
  });

  if( timeWaited > 0) {
    // when not timed out
    assertEquals(newval, ctr.get());
  }
  return true;
}
项目:hbase    文件:TestReplicationEmptyWALRecovery.java   
/**
 * Waits until there is only one log(the current writing one) in the replication queue
 * @param numRs number of regionservers
 */
private void waitForLogAdvance(int numRs) throws Exception {
  Waiter.waitFor(conf1, 10000, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      for (int i = 0; i < numRs; i++) {
        HRegionServer hrs = utility1.getHBaseCluster().getRegionServer(i);
        RegionInfo regionInfo =
            utility1.getHBaseCluster().getRegions(htable1.getName()).get(0).getRegionInfo();
        WAL wal = hrs.getWAL(regionInfo);
        Path currentFile = ((AbstractFSWAL<?>) wal).getCurrentFileName();
        Replication replicationService = (Replication) utility1.getHBaseCluster()
            .getRegionServer(i).getReplicationSourceService();
        for (ReplicationSourceInterface rsi : replicationService.getReplicationManager()
            .getSources()) {
          ReplicationSource source = (ReplicationSource) rsi;
          if (!currentFile.equals(source.getCurrentPath())) {
            return false;
          }
        }
      }
      return true;
    }
  });
}
项目:hbase    文件:TestReplicationSourceManager.java   
private static void waitPeer(final String peerId,
    ReplicationSourceManager manager, final boolean waitForSource) {
  ReplicationPeers rp = manager.getReplicationPeers();
  Waiter.waitFor(conf, 20000, () -> {
    if (waitForSource) {
      ReplicationSourceInterface rs = manager.getSource(peerId);
      if (rs == null) {
        return false;
      }
      if (rs instanceof ReplicationSourceDummy) {
        return ((ReplicationSourceDummy)rs).isStartup();
      }
      return true;
    } else {
      return (rp.getPeer(peerId) != null);
    }
  });
}
项目:hbase    文件:TestReplicationSourceManager.java   
/**
 * Remove a peer and wait for it to get cleaned up
 * @param peerId
 * @throws Exception
 */
private void removePeerAndWait(final String peerId) throws Exception {
  final ReplicationPeers rp = manager.getReplicationPeers();
  if (rp.getPeerStorage().listPeerIds().contains(peerId)) {
    rp.getPeerStorage().removePeer(peerId);
    try {
      manager.removePeer(peerId);
    } catch (Exception e) {
      // ignore the failed exception and continue.
    }
  }
  Waiter.waitFor(conf, 20000, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      Collection<String> peers = rp.getPeerStorage().listPeerIds();
      return (!manager.getAllQueues().contains(peerId)) && (rp.getPeer(peerId) == null)
          && (!peers.contains(peerId)) && manager.getSource(peerId) == null;
    }
  });
}
项目:hbase    文件:TestGetProcedureResult.java   
@Test
public void testRace() throws Exception {
  ProcedureExecutor<?> executor =
    UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor();
  DummyProcedure p = new DummyProcedure();
  long procId = executor.submitProcedure(p);
  p.failureSet.await();
  assertEquals(GetProcedureResultResponse.State.RUNNING, getState(procId));
  p.canRollback.countDown();
  UTIL.waitFor(30000, new Waiter.ExplainingPredicate<Exception>() {

    @Override
    public boolean evaluate() throws Exception {
      return getState(procId) == GetProcedureResultResponse.State.FINISHED;
    }

    @Override
    public String explainFailure() throws Exception {
      return "Procedure pid=" + procId + " is still in " + getState(procId) +
        " state, expected " + GetProcedureResultResponse.State.FINISHED;
    }
  });
}
项目:hbase    文件:TestHTableMultiplexerFlushCache.java   
private static void checkExistence(final Table htable, final byte[] row, final byte[] family,
    final byte[] quality,
    final byte[] value) throws Exception {
  // verify that the Get returns the correct result
  TEST_UTIL.waitFor(30000, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      Result r;
      Get get = new Get(row);
      get.addColumn(family, quality);
      r = htable.get(get);
      return r != null && r.getValue(family, quality) != null
          && Bytes.toStringBinary(value).equals(
          Bytes.toStringBinary(r.getValue(family, quality)));
    }
  });
}
项目:hbase    文件:TestAsyncNamespaceAdminApi.java   
@Test(timeout = 60000)
public void testCreateAndDelete() throws Exception {
  String testName = "testCreateAndDelete";
  String nsName = prefix + "_" + testName;

  // create namespace and verify
  admin.createNamespace(NamespaceDescriptor.create(nsName).build()).join();
  assertEquals(3, admin.listNamespaceDescriptors().get().size());
  TEST_UTIL.waitFor(60000, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      return zkNamespaceManager.list().size() == 3;
    }
  });
  assertNotNull(zkNamespaceManager.get(nsName));
  // delete namespace and verify
  admin.deleteNamespace(nsName).join();
  assertEquals(2, admin.listNamespaceDescriptors().get().size());
  assertEquals(2, zkNamespaceManager.list().size());
  assertNull(zkNamespaceManager.get(nsName));
}
项目:hbase    文件:TestSuperUserQuotaPermissions.java   
private void waitForTableToEnterQuotaViolation(TableName tn) throws Exception {
  // Verify that the RegionServer has the quota in violation
  final HRegionServer rs = TEST_UTIL.getHBaseCluster().getRegionServer(0);
  Waiter.waitFor(TEST_UTIL.getConfiguration(), 30 * 1000, 1000, new Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      Map<TableName,SpaceQuotaSnapshot> snapshots =
          rs.getRegionServerSpaceQuotaManager().copyQuotaSnapshots();
      SpaceQuotaSnapshot snapshot = snapshots.get(tn);
      if (snapshot == null) {
        LOG.info("Found no snapshot for " + tn);
        return false;
      }
      LOG.info("Found snapshot " + snapshot);
      return snapshot.getQuotaStatus().isInViolation();
    }
  });
}
项目:hbase    文件:TestQuotaStatusRPCs.java   
@Test
public void testRegionSizesFromMaster() throws Exception {
  final long tableSize = 1024L * 10L; // 10KB
  final int numRegions = 10;
  final TableName tn = helper.createTableWithRegions(numRegions);
  // Will write at least `tableSize` data
  helper.writeData(tn, tableSize);

  final HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster();
  final MasterQuotaManager quotaManager = master.getMasterQuotaManager();
  // Make sure the master has all of the reports
  Waiter.waitFor(TEST_UTIL.getConfiguration(), 30 * 1000, new Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      Map<RegionInfo,Long> regionSizes = quotaManager.snapshotRegionSizes();
      LOG.trace("Region sizes=" + regionSizes);
      return numRegions == countRegionsForTable(tn, regionSizes) &&
          tableSize <= getTableSize(tn, regionSizes);
    }
  });

  Map<TableName,Long> sizes = QuotaTableUtil.getMasterReportedTableSizes(TEST_UTIL.getConnection());
  Long size = sizes.get(tn);
  assertNotNull("No reported size for " + tn, size);
  assertTrue("Reported table size was " + size, size.longValue() >= tableSize);
}
项目:hbase    文件:TestFavoredStochasticLoadBalancer.java   
private void stopServersAndWaitUntilProcessed(List<ServerName> currentFN) throws Exception {
  for (ServerName sn : currentFN) {
    for (JVMClusterUtil.RegionServerThread rst : cluster.getLiveRegionServerThreads()) {
      if (ServerName.isSameAddress(sn, rst.getRegionServer().getServerName())) {
        LOG.info("Shutting down server: " + sn);
        cluster.stopRegionServer(rst.getRegionServer().getServerName());
        cluster.waitForRegionServerToStop(rst.getRegionServer().getServerName(), 60000);
      }
    }
  }

  // Wait until dead servers are processed.
  TEST_UTIL.waitFor(60000, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      return !master.getServerManager().areDeadServersInProgress();
    }
  });

  assertEquals("Not all servers killed",
      SLAVES - currentFN.size(), cluster.getLiveRegionServerThreads().size());
}
项目:hbase    文件:AbstractTestDLS.java   
private void startCluster(int numRS) throws Exception {
  SplitLogCounters.resetCounters();
  LOG.info("Starting cluster");
  conf.setLong("hbase.splitlog.max.resubmit", 0);
  // Make the failure test faster
  conf.setInt("zookeeper.recovery.retry", 0);
  conf.setInt(HConstants.REGIONSERVER_INFO_PORT, -1);
  conf.setFloat(HConstants.LOAD_BALANCER_SLOP_KEY, (float) 100.0); // no load balancing
  conf.setInt("hbase.regionserver.wal.max.splitters", 3);
  conf.setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10);
  conf.set("hbase.wal.provider", getWalProvider());
  TEST_UTIL.startMiniHBaseCluster(NUM_MASTERS, numRS);
  cluster = TEST_UTIL.getHBaseCluster();
  LOG.info("Waiting for active/ready master");
  cluster.waitForActiveAndReadyMaster();
  master = cluster.getMaster();
  TEST_UTIL.waitFor(120000, 200, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      return cluster.getLiveRegionServerThreads().size() >= numRS;
    }
  });
}
项目:hbase    文件:TestSplitLogWorker.java   
private boolean waitForCounterBoolean(final LongAdder ctr, final long oldval, final long newval,
    long timems, boolean failIfTimeout) throws Exception {

  long timeWaited = TEST_UTIL.waitFor(timems, 10, failIfTimeout,
    new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
          return (ctr.sum() >= newval);
    }
  });

  if( timeWaited > 0) {
    // when not timed out
    assertEquals(newval, ctr.sum());
  }
  return true;
}
项目:hbase    文件:TestRSGroupsOfflineMode.java   
@BeforeClass
public static void setUp() throws Exception {
  TEST_UTIL = new HBaseTestingUtility();
  TEST_UTIL.getConfiguration().set(
      HConstants.HBASE_MASTER_LOADBALANCER_CLASS,
      RSGroupBasedLoadBalancer.class.getName());
  TEST_UTIL.getConfiguration().set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
      RSGroupAdminEndpoint.class.getName());
  TEST_UTIL.getConfiguration().set(
      ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
      "1");
  TEST_UTIL.startMiniCluster(2, 3);
  cluster = TEST_UTIL.getHBaseCluster();
  master = ((MiniHBaseCluster)cluster).getMaster();
  master.balanceSwitch(false);
  hbaseAdmin = TEST_UTIL.getAdmin();
  //wait till the balancer is in online mode
  TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      return master.isInitialized() &&
          ((RSGroupBasedLoadBalancer) master.getLoadBalancer()).isOnline() &&
          master.getServerManager().getOnlineServersList().size() >= 3;
    }
  });
}
项目:hbase    文件:TestRSGroups.java   
@Test
public void testNamespaceCreateAndAssign() throws Exception {
  LOG.info("testNamespaceCreateAndAssign");
  String nsName = tablePrefix+"_foo";
  final TableName tableName = TableName.valueOf(nsName, tablePrefix + "_testCreateAndAssign");
  RSGroupInfo appInfo = addGroup("appInfo", 1);
  admin.createNamespace(NamespaceDescriptor.create(nsName)
      .addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, "appInfo").build());
  final HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor("f"));
  admin.createTable(desc);
  //wait for created table to be assigned
  TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      return getTableRegionMap().get(desc.getTableName()) != null;
    }
  });
  ServerName targetServer =
      ServerName.parseServerName(appInfo.getServers().iterator().next().toString());
  AdminProtos.AdminService.BlockingInterface rs =
    ((ClusterConnection) admin.getConnection()).getAdmin(targetServer);
  //verify it was assigned to the right group
  Assert.assertEquals(1, ProtobufUtil.getOnlineRegions(rs).size());
}
项目:hbase    文件:TestRSGroups.java   
@Test
public void testDefaultNamespaceCreateAndAssign() throws Exception {
  LOG.info("testDefaultNamespaceCreateAndAssign");
  String tableName = tablePrefix + "_testCreateAndAssign";
  admin.modifyNamespace(NamespaceDescriptor.create("default")
      .addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, "default").build());
  final HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
  desc.addFamily(new HColumnDescriptor("f"));
  admin.createTable(desc);
  //wait for created table to be assigned
  TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      return getTableRegionMap().get(desc.getTableName()) != null;
    }
  });
}
项目:hbase    文件:TestStatusResource.java   
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  conf = TEST_UTIL.getConfiguration();
  TEST_UTIL.startMiniCluster(1, 1);
  TEST_UTIL.createTable(TableName.valueOf("TestStatusResource"), Bytes.toBytes("D"));
  TEST_UTIL.createTable(TableName.valueOf("TestStatusResource2"), Bytes.toBytes("D"));
  REST_TEST_UTIL.startServletContainer(conf);
  Cluster cluster = new Cluster();
  cluster.add("localhost", REST_TEST_UTIL.getServletPort());
  client = new Client(cluster);
  context = JAXBContext.newInstance(StorageClusterStatusModel.class);
  TEST_UTIL.waitFor(6000, new Waiter.Predicate<IOException>() {
    @Override
    public boolean evaluate() throws IOException {
      return TEST_UTIL.getMiniHBaseCluster().getClusterStatus().getAverageLoad() > 0;
    }
  });
}
项目:PyroDB    文件:TestSplitLogWorker.java   
private boolean waitForCounterBoolean(final AtomicLong ctr, final long oldval, final long newval,
    long timems, boolean failIfTimeout) throws Exception {

  long timeWaited = TEST_UTIL.waitFor(timems, 10, failIfTimeout,
    new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
          return (ctr.get() >= newval);
    }
  });

  if( timeWaited > 0) {
    // when not timed out
    assertEquals(newval, ctr.get());
  }
  return true;
}
项目:c5    文件:TestSplitLogWorker.java   
private boolean waitForCounterBoolean(final AtomicLong ctr, final long oldval, final long newval,
    long timems, boolean failIfTimeout) throws Exception {

  long timeWaited = TEST_UTIL.waitFor(timems, 10, failIfTimeout,
    new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
          return (ctr.get() >= newval);
    }
  });

  if( timeWaited > 0) {
    // when not timed out
    assertEquals(newval, ctr.get());
  }
  return true;
}
项目:DominoHBase    文件:TestSplitLogWorker.java   
private boolean waitForCounterBoolean(final AtomicLong ctr, final long oldval, long newval,
    long timems, boolean failIfTimeout) throws Exception {

  long timeWaited = TEST_UTIL.waitFor(timems, 10, failIfTimeout,
    new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      return (ctr.get() != oldval);
    }
  });

  if( timeWaited > 0) {
    // when not timed out
    assertEquals(newval, ctr.get());
  }
  return true;
}
项目:ditb    文件:TestReplicationEndpoint.java   
@Test (timeout=120000)
public void testCustomReplicationEndpoint() throws Exception {
  // test installing a custom replication endpoint other than the default one.
  admin.addPeer("testCustomReplicationEndpoint",
    new ReplicationPeerConfig().setClusterKey(ZKConfig.getZooKeeperClusterKey(conf1))
      .setReplicationEndpointImpl(ReplicationEndpointForTest.class.getName()), null);

  // check whether the class has been constructed and started
  Waiter.waitFor(conf1, 60000, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      return ReplicationEndpointForTest.contructedCount.get() >= numRegionServers;
    }
  });

  Waiter.waitFor(conf1, 60000, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      return ReplicationEndpointForTest.startedCount.get() >= numRegionServers;
    }
  });

  Assert.assertEquals(0, ReplicationEndpointForTest.replicateCount.get());

  // now replicate some data.
  doPut(Bytes.toBytes("row42"));

  Waiter.waitFor(conf1, 60000, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      return ReplicationEndpointForTest.replicateCount.get() >= 1;
    }
  });

  doAssert(Bytes.toBytes("row42"));

  admin.removePeer("testCustomReplicationEndpoint");
}
项目:ditb    文件:TestReplicationEndpoint.java   
@Test (timeout=120000)
public void testReplicationEndpointReturnsFalseOnReplicate() throws Exception {
  Assert.assertEquals(0, ReplicationEndpointForTest.replicateCount.get());
  Assert.assertTrue(!ReplicationEndpointReturningFalse.replicated.get());
  int peerCount = admin.getPeersCount();
  final String id = "testReplicationEndpointReturnsFalseOnReplicate";
  admin.addPeer(id,
    new ReplicationPeerConfig().setClusterKey(ZKConfig.getZooKeeperClusterKey(conf1))
      .setReplicationEndpointImpl(ReplicationEndpointReturningFalse.class.getName()), null);
  // This test is flakey and then there is so much stuff flying around in here its, hard to
  // debug.  Peer needs to be up for the edit to make it across. This wait on
  // peer count seems to be a hack that has us not progress till peer is up.
  if (admin.getPeersCount() <= peerCount) {
    LOG.info("Waiting on peercount to go up from " + peerCount);
    Threads.sleep(100);
  }
  // now replicate some data
  doPut(row);

  Waiter.waitFor(conf1, 60000, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      // Looks like replication endpoint returns false unless we put more than 10 edits. We
      // only send over one edit.
      int count = ReplicationEndpointForTest.replicateCount.get();
      LOG.info("count=" + count);
      return ReplicationEndpointReturningFalse.replicated.get();
    }
  });
  if (ReplicationEndpointReturningFalse.ex.get() != null) {
    throw ReplicationEndpointReturningFalse.ex.get();
  }

  admin.removePeer("testReplicationEndpointReturnsFalseOnReplicate");
}
项目:ditb    文件:TestMultiRespectsLimits.java   
@Test
public void testMultiLimits() throws Exception {
  final TableName name = TableName.valueOf("testMultiLimits");
  Table t = TEST_UTIL.createTable(name, FAMILY);
  TEST_UTIL.loadTable(t, FAMILY, false);

  // Split the table to make sure that the chunking happens accross regions.
  try (final Admin admin = TEST_UTIL.getHBaseAdmin()) {
    admin.split(name);
    TEST_UTIL.waitFor(60000, new Waiter.Predicate<Exception>() {
      @Override
      public boolean evaluate() throws Exception {
        return admin.getTableRegions(name).size() > 1;
      }
    });
  }
  List<Get> gets = new ArrayList<>(MAX_SIZE);

  for (int i = 0; i < MAX_SIZE; i++) {
    gets.add(new Get(HBaseTestingUtility.ROWS[i]));
  }

  RpcServerInterface rpcServer = TEST_UTIL.getHBaseCluster().getRegionServer(0).getRpcServer();
  BaseSource s = rpcServer.getMetrics().getMetricsSource();
  long startingExceptions = METRICS_ASSERT.getCounter("exceptions", s);
  long startingMultiExceptions = METRICS_ASSERT.getCounter("exceptions.multiResponseTooLarge", s);

  Result[] results = t.get(gets);
  assertEquals(MAX_SIZE, results.length);

  // Cells from TEST_UTIL.loadTable have a length of 27.
  // Multiplying by less than that gives an easy lower bound on size.
  // However in reality each kv is being reported as much higher than that.
  METRICS_ASSERT.assertCounterGt("exceptions",
      startingExceptions + ((MAX_SIZE * 25) / MAX_SIZE), s);
  METRICS_ASSERT.assertCounterGt("exceptions.multiResponseTooLarge",
      startingMultiExceptions + ((MAX_SIZE * 25) / MAX_SIZE), s);
}
项目:ditb    文件:TestWarmupRegion.java   
/**
 * @throws java.lang.Exception
 */
@Before
public void setUp() throws Exception {
  table = TEST_UTIL.createTable(TABLENAME, FAMILY);

  // future timestamp
  for (int i = 0; i < numRows; i++) {
    long ts = System.currentTimeMillis() * 2;
    Put put = new Put(ROW, ts);
    put.add(FAMILY, COLUMN, VALUE);
    table.put(put);
  }

  // major compaction, purged future deletes
  TEST_UTIL.getHBaseAdmin().flush(TABLENAME);
  TEST_UTIL.getHBaseAdmin().majorCompact(TABLENAME);

  // waiting for the major compaction to complete
  TEST_UTIL.waitFor(6000, new Waiter.Predicate<IOException>() {
    @Override
    public boolean evaluate() throws IOException {
      return TEST_UTIL.getHBaseAdmin().getCompactionState(TABLENAME) ==
          AdminProtos.GetRegionInfoResponse.CompactionState.NONE;
    }
  });

  table.close();
}
项目:ditb    文件:TestSplitLogManager.java   
private void waitForCounter(final Expr e, final long oldval, long newval, long timems)
    throws Exception {

  TEST_UTIL.waitFor(timems, 10, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      return (e.eval() != oldval);
    }
  });

  assertEquals(newval, e.eval());
}
项目:ditb    文件:TestHeapMemoryManager.java   
private void waitForTune(final MemstoreFlusherStub memStoreFlusher,
                         final long oldMemstoreHeapSize) throws Exception {
  // Allow the tuner to run once and do necessary memory up
  UTIL.waitFor(10000, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      return oldMemstoreHeapSize != memStoreFlusher.memstoreSize;
    }
  });
}
项目:ditb    文件:TestAsyncIPC.java   
@Test
public void testAsyncConnectionSetup() throws Exception {
  TestRpcServer rpcServer = new TestRpcServer();
  AsyncRpcClient client = createRpcClient(CONF);
  try {
    rpcServer.start();
    InetSocketAddress address = rpcServer.getListenerAddress();
    if (address == null) {
      throw new IOException("Listener channel is closed");
    }
    MethodDescriptor md = SERVICE.getDescriptorForType().findMethodByName("echo");
    EchoRequestProto param = EchoRequestProto.newBuilder().setMessage("hello").build();

    RpcChannel channel =
        client.createRpcChannel(ServerName.valueOf(address.getHostName(), address.getPort(),
          System.currentTimeMillis()), User.getCurrent(), 0);

    final AtomicBoolean done = new AtomicBoolean(false);

    channel.callMethod(md, new PayloadCarryingRpcController(), param, md.getOutputType()
        .toProto(), new RpcCallback<Message>() {
      @Override
      public void run(Message parameter) {
        done.set(true);
      }
    });

    TEST_UTIL.waitFor(1000, new Waiter.Predicate<Exception>() {
      @Override
      public boolean evaluate() throws Exception {
        return done.get();
      }
    });
  } finally {
    client.close();
    rpcServer.stop();
  }
}
项目:ditb    文件:TestNamespaceAuditor.java   
@Test
public void testStatePreserve() throws Exception {
  final String nsp1 = prefix + "_testStatePreserve";
  NamespaceDescriptor nspDesc =
      NamespaceDescriptor.create(nsp1)
          .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "20")
          .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "10").build();
  ADMIN.createNamespace(nspDesc);
  TableName tableOne = TableName.valueOf(nsp1 + TableName.NAMESPACE_DELIM + "table1");
  TableName tableTwo = TableName.valueOf(nsp1 + TableName.NAMESPACE_DELIM + "table2");
  TableName tableThree = TableName.valueOf(nsp1 + TableName.NAMESPACE_DELIM + "table3");
  HTableDescriptor tableDescOne = new HTableDescriptor(tableOne);
  HTableDescriptor tableDescTwo = new HTableDescriptor(tableTwo);
  HTableDescriptor tableDescThree = new HTableDescriptor(tableThree);
  ADMIN.createTable(tableDescOne, Bytes.toBytes("1"), Bytes.toBytes("1000"), 3);
  ADMIN.createTable(tableDescTwo, Bytes.toBytes("1"), Bytes.toBytes("1000"), 3);
  ADMIN.createTable(tableDescThree, Bytes.toBytes("1"), Bytes.toBytes("1000"), 4);
  ADMIN.disableTable(tableThree);
  deleteTable(tableThree);
  // wait for chore to complete
  UTIL.waitFor(1000, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      return (getNamespaceState(nsp1).getTables().size() == 2);
    }
  });
  NamespaceTableAndRegionInfo before = getNamespaceState(nsp1);
  restartMaster();
  NamespaceTableAndRegionInfo after = getNamespaceState(nsp1);
  assertEquals("Expected: " + before.getTables() + " Found: " + after.getTables(), before
      .getTables().size(), after.getTables().size());
}
项目:ditb    文件:TestNamespaceAuditor.java   
private static void waitForQuotaEnabled() throws Exception {
  UTIL.waitFor(60000, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      HMaster master = UTIL.getHBaseCluster().getMaster();
      if (master == null) {
        return false;
      }
      MasterQuotaManager quotaManager = master.getMasterQuotaManager();
      return quotaManager != null && quotaManager.isQuotaEnabled();
    }
  });
}
项目:pbase    文件:TestReplicationEndpoint.java   
@Test (timeout=120000)
public void testCustomReplicationEndpoint() throws Exception {
  // test installing a custom replication endpoint other than the default one.
  admin.addPeer("testCustomReplicationEndpoint",
    new ReplicationPeerConfig().setClusterKey(ZKUtil.getZooKeeperClusterKey(conf1))
      .setReplicationEndpointImpl(ReplicationEndpointForTest.class.getName()), null);

  // check whether the class has been constructed and started
  Waiter.waitFor(conf1, 60000, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      return ReplicationEndpointForTest.contructedCount.get() >= numRegionServers;
    }
  });

  Waiter.waitFor(conf1, 60000, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      return ReplicationEndpointForTest.startedCount.get() >= numRegionServers;
    }
  });

  Assert.assertEquals(0, ReplicationEndpointForTest.replicateCount.get());

  // now replicate some data.
  doPut(Bytes.toBytes("row42"));

  Waiter.waitFor(conf1, 60000, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      return ReplicationEndpointForTest.replicateCount.get() >= 1;
    }
  });

  doAssert(Bytes.toBytes("row42"));

  admin.removePeer("testCustomReplicationEndpoint");
}
项目:pbase    文件:TestReplicationEndpoint.java   
@Test (timeout=120000)
public void testReplicationEndpointReturnsFalseOnReplicate() throws Exception {
  Assert.assertEquals(0, ReplicationEndpointForTest.replicateCount.get());
  Assert.assertTrue(!ReplicationEndpointReturningFalse.replicated.get());
  int peerCount = admin.getPeersCount();
  final String id = "testReplicationEndpointReturnsFalseOnReplicate";
  admin.addPeer(id,
    new ReplicationPeerConfig().setClusterKey(ZKUtil.getZooKeeperClusterKey(conf1))
      .setReplicationEndpointImpl(ReplicationEndpointReturningFalse.class.getName()), null);
  // This test is flakey and then there is so much stuff flying around in here its, hard to
  // debug.  Peer needs to be up for the edit to make it across. This wait on
  // peer count seems to be a hack that has us not progress till peer is up.
  if (admin.getPeersCount() <= peerCount) {
    LOG.info("Waiting on peercount to go up from " + peerCount);
    Threads.sleep(100);
  }
  // now replicate some data
  doPut(row);

  Waiter.waitFor(conf1, 60000, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      // Looks like replication endpoint returns false unless we put more than 10 edits. We
      // only send over one edit.
      int count = ReplicationEndpointForTest.replicateCount.get();
      LOG.info("count=" + count);
      return ReplicationEndpointReturningFalse.replicated.get();
    }
  });
  if (ReplicationEndpointReturningFalse.ex.get() != null) {
    throw ReplicationEndpointReturningFalse.ex.get();
  }

  admin.removePeer("testReplicationEndpointReturnsFalseOnReplicate");
}
项目:pbase    文件:TestSplitLogManager.java   
private void waitForCounter(final Expr e, final long oldval, long newval, long timems)
    throws Exception {

  TEST_UTIL.waitFor(timems, 10, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      return (e.eval() != oldval);
    }
  });

  assertEquals(newval, e.eval());
}
项目:pbase    文件:TestDistributedLogSplitting.java   
private void abortRSAndWaitForRecovery(HRegionServer hrs, final ZooKeeperWatcher zkw,
    final int numRegions) throws Exception {
  final MiniHBaseCluster tmpCluster = this.cluster;

  // abort RS
  LOG.info("Aborting region server: " + hrs.getServerName());
  hrs.abort("testing");

  // wait for abort completes
  TEST_UTIL.waitFor(120000, 200, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      return (tmpCluster.getLiveRegionServerThreads().size() <= (NUM_RS - 1));
    }
  });

  // wait for regions come online
  TEST_UTIL.waitFor(180000, 200, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      return (HBaseTestingUtility.getAllOnlineRegions(tmpCluster).size()
          >= (numRegions + 1));
    }
  });

  // wait for all regions are fully recovered
  TEST_UTIL.waitFor(180000, 200, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      List<String> recoveringRegions = zkw.getRecoverableZooKeeper().getChildren(
        zkw.recoveringRegionsZNode, false);
      return (recoveringRegions != null && recoveringRegions.size() == 0);
    }
  });
}
项目:pbase    文件:TestSplitTransactionOnCluster.java   
@Test(timeout = 120000)
public void testFailedSplit() throws Exception {
  TableName tableName = TableName.valueOf("testFailedSplit");
  byte[] colFamily = Bytes.toBytes("info");
  TESTING_UTIL.createTable(tableName, colFamily);
  Connection connection = ConnectionFactory.createConnection(TESTING_UTIL.getConfiguration());
  HTable table = (HTable) connection.getTable(tableName);
  try {
    TESTING_UTIL.loadTable(table, colFamily);
    List<HRegionInfo> regions = TESTING_UTIL.getHBaseAdmin().getTableRegions(tableName);
    assertTrue(regions.size() == 1);
    final HRegion actualRegion = cluster.getRegions(tableName).get(0);
    actualRegion.getCoprocessorHost().load(FailingSplitRegionObserver.class,
      Coprocessor.PRIORITY_USER, actualRegion.getBaseConf());

    // The following split would fail.
    admin.split(tableName);
    FailingSplitRegionObserver observer = (FailingSplitRegionObserver) actualRegion
        .getCoprocessorHost().findCoprocessor(FailingSplitRegionObserver.class.getName());
    assertNotNull(observer);
    observer.latch.await();
    observer.postSplit.await();
    LOG.info("Waiting for region to come out of RIT");
    TESTING_UTIL.waitFor(60000, 1000, new Waiter.Predicate<Exception>() {
      @Override
      public boolean evaluate() throws Exception {
        RegionStates regionStates = cluster.getMaster().getAssignmentManager().getRegionStates();
        Map<String, RegionState> rit = regionStates.getRegionsInTransition();
        return (rit.size() == 0);
      }
    });
    regions = TESTING_UTIL.getHBaseAdmin().getTableRegions(tableName);
    assertTrue(regions.size() == 1);
    assertTrue(admin.balancer());
  } finally {
    table.close();
    connection.close();
    TESTING_UTIL.deleteTable(tableName);
  }
}
项目:HIndex    文件:TestSplitLogManager.java   
private void waitForCounter(final Expr e, final long oldval, long newval, long timems)
    throws Exception {

  TEST_UTIL.waitFor(timems, 10, new Waiter.Predicate<Exception>() {
      @Override
      public boolean evaluate() throws Exception {
          return (e.eval() != oldval);
      }
  });

  assertEquals(newval, e.eval());
}
项目:HIndex    文件:TestDistributedLogSplitting.java   
private void abortRSAndWaitForRecovery(HRegionServer hrs, final ZooKeeperWatcher zkw,
    final int numRegions) throws Exception {
  final MiniHBaseCluster tmpCluster = this.cluster;

  // abort RS
  LOG.info("Aborting region server: " + hrs.getServerName());
  hrs.abort("testing");

  // wait for abort completes
  TEST_UTIL.waitFor(120000, 200, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      return (tmpCluster.getLiveRegionServerThreads().size() <= (NUM_RS - 1));
    }
  });

  // wait for regions come online
  TEST_UTIL.waitFor(180000, 200, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      return (getAllOnlineRegions(tmpCluster).size() >= (numRegions + 1));
    }
  });

  // wait for all regions are fully recovered
  TEST_UTIL.waitFor(180000, 200, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      List<String> recoveringRegions = zkw.getRecoverableZooKeeper().getChildren(
        zkw.recoveringRegionsZNode, false);
      return (recoveringRegions != null && recoveringRegions.size() == 0);
    }
  });
}