Java 类org.apache.hadoop.hbase.ServerName 实例源码

项目:ditb    文件:RegionStateStore.java   
/**
 * Returns the {@link ServerName} from catalog table {@link Result}
 * where the region is transitioning. It should be the same as
 * {@link HRegionInfo#getServerName(Result)} if the server is at OPEN state.
 * @param r Result to pull the transitioning server name from
 * @return A ServerName instance or {@link HRegionInfo#getServerName(Result)}
 * if necessary fields not found or empty.
 */
static ServerName getRegionServer(final Result r, int replicaId) {
  Cell cell = r.getColumnLatestCell(HConstants.CATALOG_FAMILY, getServerNameColumn(replicaId));
  if (cell == null || cell.getValueLength() == 0) {
    RegionLocations locations = MetaTableAccessor.getRegionLocations(r);
    if (locations != null) {
      HRegionLocation location = locations.getRegionLocation(replicaId);
      if (location != null) {
        return location.getServerName();
      }
    }
    return null;
  }
  return ServerName.parseServerName(Bytes.toString(cell.getValueArray(),
    cell.getValueOffset(), cell.getValueLength()));
}
项目:ditb    文件:RpcRetryingCallerWithReadReplicas.java   
/**
 * Two responsibilities
 * - if the call is already completed (by another replica) stops the retries.
 * - set the location to the right region, depending on the replica.
 */
@Override
public void prepare(final boolean reload) throws IOException {
  if (controller.isCanceled()) return;

  if (Thread.interrupted()) {
    throw new InterruptedIOException();
  }

  if (reload || location == null) {
    RegionLocations rl = getRegionLocations(false, id, cConnection, tableName, get.getRow());
    location = id < rl.size() ? rl.getRegionLocation(id) : null;
  }

  if (location == null || location.getServerName() == null) {
    // With this exception, there will be a retry. The location can be null for a replica
    //  when the table is created or after a split.
    throw new HBaseIOException("There is no location for replica id #" + id);
  }

  ServerName dest = location.getServerName();

  setStub(cConnection.getClient(dest));
}
项目:dremio-oss    文件:HBaseGroupScan.java   
@Override
public Iterator<RegionWork> getSplits(ExecutionNodeMap executionNodes) {
  List<RegionWork> work = new ArrayList<>();

  for (Entry<HRegionInfo, ServerName> entry : regionsToScan.entrySet()) {
    long bytes = statsCalculator.getRegionSizeInBytes(entry.getKey().getRegionName());
    String name = entry.getValue().getHostname();
    NodeEndpoint endpoint = executionNodes.getEndpoint(name);
    if(endpoint != null){
      work.add(new RegionWork(entry.getKey(), bytes, new EndpointAffinity(endpoint, bytes)));
    } else {
      work.add(new RegionWork(entry.getKey(), bytes));
    }
  }
  return work.iterator();
}
项目:ditb    文件:TestSplitLogManager.java   
@Ignore("DLR is broken by HBASE-12751") @Test(timeout=60000)
public void testGetPreviousRecoveryMode() throws Exception {
  LOG.info("testGetPreviousRecoveryMode");
  SplitLogCounters.resetCounters();
  // Not actually enabling DLR for the cluster, just for the ZkCoordinatedStateManager to use.
  // The test is just manipulating ZK manually anyways.
  conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, true);

  zkw.getRecoverableZooKeeper().create(ZKSplitLog.getEncodedNodeName(zkw, "testRecovery"),
    new SplitLogTask.Unassigned(
      ServerName.valueOf("mgr,1,1"), RecoveryMode.LOG_SPLITTING).toByteArray(),
      Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);

  slm = new SplitLogManager(ds, conf, stopper, master, DUMMY_MASTER);
  LOG.info("Mode1=" + slm.getRecoveryMode());
  assertTrue(slm.isLogSplitting());
  zkw.getRecoverableZooKeeper().delete(ZKSplitLog.getEncodedNodeName(zkw, "testRecovery"), -1);
  LOG.info("Mode2=" + slm.getRecoveryMode());
  slm.setRecoveryMode(false);
  LOG.info("Mode3=" + slm.getRecoveryMode());
  assertTrue("Mode4=" + slm.getRecoveryMode(), slm.isLogReplaying());
}
项目:ditb    文件:TestHBaseFsck.java   
/**
 * Get region info from local cluster.
 */
Map<ServerName, List<String>> getDeployedHRIs(final HBaseAdmin admin) throws IOException {
  ClusterStatus status = admin.getClusterStatus();
  Collection<ServerName> regionServers = status.getServers();
  Map<ServerName, List<String>> mm =
      new HashMap<ServerName, List<String>>();
  for (ServerName hsi : regionServers) {
    AdminProtos.AdminService.BlockingInterface server = ((HConnection) connection).getAdmin(hsi);

    // list all online regions from this region server
    List<HRegionInfo> regions = ProtobufUtil.getOnlineRegions(server);
    List<String> regionNames = new ArrayList<String>();
    for (HRegionInfo hri : regions) {
      regionNames.add(hri.getRegionNameAsString());
    }
    mm.put(hsi, regionNames);
  }
  return mm;
}
项目:ditb    文件:ProtobufUtil.java   
/**
 * A helper to close a region given a region name
 * using admin protocol.
 *
 * @param admin
 * @param regionName
 * @param versionOfClosingNode
 * @return true if the region is closed
 * @throws IOException
 */
public static boolean closeRegion(final RpcController controller,
    final AdminService.BlockingInterface admin,
    final ServerName server,
    final byte[] regionName,
    final int versionOfClosingNode, final ServerName destinationServer,
    final boolean transitionInZK) throws IOException {
  CloseRegionRequest closeRegionRequest =
    RequestConverter.buildCloseRegionRequest(server,
      regionName, versionOfClosingNode, destinationServer, transitionInZK);
  try {
    CloseRegionResponse response = admin.closeRegion(controller, closeRegionRequest);
    return ResponseConverter.isClosed(response);
  } catch (ServiceException se) {
    throw getRemoteException(se);
  }
}
项目:ditb    文件:Action.java   
/** Returns current region servers - active master */
protected ServerName[] getCurrentServers() throws IOException {
  ClusterStatus clusterStatus = cluster.getClusterStatus();
  Collection<ServerName> regionServers = clusterStatus.getServers();
  int count = regionServers == null ? 0 : regionServers.size();
  if (count <= 0) {
    return new ServerName [] {};
  }
  ServerName master = clusterStatus.getMaster();
  if (master == null || !regionServers.contains(master)) {
    return regionServers.toArray(new ServerName[count]);
  }
  if (count == 1) {
    return new ServerName [] {};
  }
  ArrayList<ServerName> tmp = new ArrayList<ServerName>(count);
  tmp.addAll(regionServers);
  tmp.remove(master);
  return tmp.toArray(new ServerName[count-1]);
}
项目:ditb    文件:EnableTableHandler.java   
/**
 * @param regionsInMeta
 * @return List of regions neither in transition nor assigned.
 * @throws IOException
 */
private Map<HRegionInfo, ServerName> regionsToAssignWithServerName(
    final List<Pair<HRegionInfo, ServerName>> regionsInMeta) throws IOException {
  Map<HRegionInfo, ServerName> regionsToAssign =
      new HashMap<HRegionInfo, ServerName>(regionsInMeta.size());
  RegionStates regionStates = this.assignmentManager.getRegionStates();
  for (Pair<HRegionInfo, ServerName> regionLocation : regionsInMeta) {
    HRegionInfo hri = regionLocation.getFirst();
    ServerName sn = regionLocation.getSecond();
    if (regionStates.isRegionOffline(hri)) {
      regionsToAssign.put(hri, sn);
    } else {
      if (LOG.isDebugEnabled()) {
        LOG.debug("Skipping assign for the region " + hri + " during enable table "
            + hri.getTable() + " because its already in tranition or assigned.");
      }
    }
  }
  return regionsToAssign;
}
项目:ditb    文件:ZKSplitTransactionCoordination.java   
/**
 * Creates a new ephemeral node in the PENDING_SPLIT state for the specified region. Create it
 * ephemeral in case regionserver dies mid-split.
 * <p>
 * Does not transition nodes from other states. If a node already exists for this region, an
 * Exception will be thrown.
 * @param parent region to be created as offline
 * @param serverName server event originates from
 * @param hri_a daughter region
 * @param hri_b daughter region
 * @throws IOException
 */

@Override
public void startSplitTransaction(HRegion parent, ServerName serverName, HRegionInfo hri_a,
    HRegionInfo hri_b) throws IOException {

  HRegionInfo region = parent.getRegionInfo();
  try {

    LOG.debug(watcher.prefix("Creating ephemeral node for " + region.getEncodedName()
        + " in PENDING_SPLIT state"));
    byte[] payload = HRegionInfo.toDelimitedByteArray(hri_a, hri_b);
    RegionTransition rt =
        RegionTransition.createRegionTransition(RS_ZK_REQUEST_REGION_SPLIT,
          region.getRegionName(), serverName, payload);
    String node = ZKAssign.getNodeName(watcher, region.getEncodedName());
    if (!ZKUtil.createEphemeralNodeAndWatch(watcher, node, rt.toByteArray())) {
      throw new IOException("Failed create of ephemeral " + node);
    }

  } catch (KeeperException e) {
    throw new IOException("Failed creating PENDING_SPLIT znode on "
        + parent.getRegionInfo().getRegionNameAsString(), e);
  }

}
项目:ditb    文件:SplitLogManager.java   
/**
 * Its OK to construct this object even when region-servers are not online. It does lookup the
 * orphan tasks in coordination engine but it doesn't block waiting for them to be done.
 * @param server the server instance
 * @param conf the HBase configuration
 * @param stopper the stoppable in case anything is wrong
 * @param master the master services
 * @param serverName the master server name
 * @throws IOException
 */
public SplitLogManager(Server server, Configuration conf, Stoppable stopper,
    MasterServices master, ServerName serverName) throws IOException {
  this.server = server;
  this.conf = conf;
  this.stopper = stopper;
  this.choreService = new ChoreService(serverName.toString() + "_splitLogManager_");
  if (server.getCoordinatedStateManager() != null) {
    SplitLogManagerCoordination coordination =
        ((BaseCoordinatedStateManager) server.getCoordinatedStateManager())
            .getSplitLogManagerCoordination();
    Set<String> failedDeletions = Collections.synchronizedSet(new HashSet<String>());
    SplitLogManagerDetails details =
        new SplitLogManagerDetails(tasks, master, failedDeletions, serverName);
    coordination.setDetails(details);
    coordination.init();
    // Determine recovery mode
  }
  this.unassignedTimeout =
      conf.getInt("hbase.splitlog.manager.unassigned.timeout", DEFAULT_UNASSIGNED_TIMEOUT);
  this.timeoutMonitor =
      new TimeoutMonitor(conf.getInt("hbase.splitlog.manager.timeoutmonitor.period", 1000),
          stopper);
  choreService.scheduleChore(timeoutMonitor);
}
项目:ditb    文件:FavoredNodeAssignmentHelper.java   
/**
 * Update meta table with favored nodes info
 * @param regionToFavoredNodes
 * @param conf
 * @throws IOException
 */
public static void updateMetaWithFavoredNodesInfo(
    Map<HRegionInfo, List<ServerName>> regionToFavoredNodes,
    Configuration conf) throws IOException {
  List<Put> puts = new ArrayList<Put>();
  for (Map.Entry<HRegionInfo, List<ServerName>> entry : regionToFavoredNodes.entrySet()) {
    Put put = makePutFromRegionInfo(entry.getKey(), entry.getValue());
    if (put != null) {
      puts.add(put);
    }
  }
  // Write the region assignments to the meta table.
  // TODO: See above overrides take a Connection rather than a Configuration only the
  // Connection is a short circuit connection. That is not going to good in all cases, when
  // master and meta are not colocated. Fix when this favored nodes feature is actually used
  // someday.
  try (Connection connection = ConnectionFactory.createConnection(conf)) {
    try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME)) {
      metaTable.put(puts);
    }
  }
  LOG.info("Added " + puts.size() + " regions in META");
}
项目:ditb    文件:TestFavoredNodeAssignmentHelper.java   
@Ignore("Disabled for now until FavoredNodes gets finished as a feature") @Test
public void testSecondaryAndTertiaryPlacementWithLessThanTwoServersInRacks() {
  // Test the case where we have two racks but with less than two servers in each
  // We will not have enough machines to select secondary/tertiary
  Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
  rackToServerCount.put("rack1", 1);
  rackToServerCount.put("rack2", 1);
  Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
    primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount);
  FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
  Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
  List<HRegionInfo> regions = primaryRSMapAndHelper.getThird();
  assertTrue(primaryRSMap.size() == 6);
  Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
        helper.placeSecondaryAndTertiaryRS(primaryRSMap);
  for (HRegionInfo region : regions) {
    // not enough secondary/tertiary room to place the regions
    assertTrue(secondaryAndTertiaryMap.get(region) == null);
  }
}
项目:ditb    文件:BaseLoadBalancer.java   
int getLeastLoadedTopServerForRegion(int region) {
  if (regionFinder != null) {
    List<ServerName> topLocalServers = regionFinder.getTopBlockLocations(regions[region]);
    int leastLoadedServerIndex = -1;
    int load = Integer.MAX_VALUE;
    for (ServerName sn : topLocalServers) {
      if (!serversToIndex.containsKey(sn.getHostAndPort())) {
        continue;
      }
      int index = serversToIndex.get(sn.getHostAndPort());
      if (regionsPerServer[index] == null) {
        continue;
      }
      int tempLoad = regionsPerServer[index].length;
      if (tempLoad <= load) {
        leastLoadedServerIndex = index;
        load = tempLoad;
      }
    }
    return leastLoadedServerIndex;
  } else {
    return -1;
  }
}
项目:ditb    文件:ServerManager.java   
/**
 * If this server is on the dead list, reject it with a YouAreDeadException.
 * If it was dead but came back with a new start code, remove the old entry
 * from the dead list.
 * @param serverName
 * @param what START or REPORT
 * @throws org.apache.hadoop.hbase.YouAreDeadException
 */
private void checkIsDead(final ServerName serverName, final String what)
    throws YouAreDeadException {
  if (this.deadservers.isDeadServer(serverName)) {
    // host name, port and start code all match with existing one of the
    // dead servers. So, this server must be dead.
    String message = "Server " + what + " rejected; currently processing " +
        serverName + " as dead server";
    LOG.debug(message);
    throw new YouAreDeadException(message);
  }
  // remove dead server with same hostname and port of newly checking in rs after master
  // initialization.See HBASE-5916 for more information.
  if ((this.services == null || ((HMaster) this.services).isInitialized())
      && this.deadservers.cleanPreviousInstance(serverName)) {
    // This server has now become alive after we marked it as dead.
    // We removed it's previous entry from the dead list to reflect it.
    LOG.debug(what + ":" + " Server " + serverName + " came back up," +
        " removed it from the dead servers list");
  }
}
项目:ditb    文件:MetaScanner.java   
/**
 * Lists all of the table regions currently in META.
 * @param connection
 * @param tableName
 * @return Map of all user-space regions to servers
 * @throws IOException
 */
public static NavigableMap<HRegionInfo, ServerName> allTableRegions(
    Connection connection, final TableName tableName) throws IOException {
  final NavigableMap<HRegionInfo, ServerName> regions =
    new TreeMap<HRegionInfo, ServerName>();
  MetaScannerVisitor visitor = new TableMetaScannerVisitor(tableName) {
    @Override
    public boolean processRowInternal(Result result) throws IOException {
      RegionLocations locations = MetaTableAccessor.getRegionLocations(result);
      if (locations == null) return true;
      for (HRegionLocation loc : locations.getRegionLocations()) {
        if (loc != null) {
          HRegionInfo regionInfo = loc.getRegionInfo();
          regions.put(new UnmodifyableHRegionInfo(regionInfo), loc.getServerName());
        }
      }
      return true;
    }
  };
  metaScan(connection, visitor, tableName);
  return regions;
}
项目:ditb    文件:RequestConverter.java   
/**
 * Create a protocol buffer OpenRegionRequest to open a list of regions
 *
 * @param server the serverName for the RPC
 * @param regionOpenInfos info of a list of regions to open
 * @param openForReplay
 * @return a protocol buffer OpenRegionRequest
 */
public static OpenRegionRequest
    buildOpenRegionRequest(ServerName server, final List<Triple<HRegionInfo, Integer,
        List<ServerName>>> regionOpenInfos, Boolean openForReplay) {
  OpenRegionRequest.Builder builder = OpenRegionRequest.newBuilder();
  for (Triple<HRegionInfo, Integer, List<ServerName>> regionOpenInfo: regionOpenInfos) {
    Integer second = regionOpenInfo.getSecond();
    int versionOfOfflineNode = second == null ? -1 : second.intValue();
    builder.addOpenInfo(buildRegionOpenInfo(regionOpenInfo.getFirst(), versionOfOfflineNode,
      regionOpenInfo.getThird(), openForReplay));
  }
  if (server != null) {
    builder.setServerStartCode(server.getStartcode());
  }
  // send the master's wall clock time as well, so that the RS can refer to it
  builder.setMasterSystemTime(EnvironmentEdgeManager.currentTime());
  return builder.build();
}
项目:ditb    文件:TestClientNoCluster.java   
static MultiResponse doMultiResponse(final SortedMap<byte [], Pair<HRegionInfo, ServerName>> meta,
    final AtomicLong sequenceids, final MultiRequest request) {
  // Make a response to match the request.  Act like there were no failures.
  ClientProtos.MultiResponse.Builder builder = ClientProtos.MultiResponse.newBuilder();
  // Per Region.
  RegionActionResult.Builder regionActionResultBuilder =
      RegionActionResult.newBuilder();
  ResultOrException.Builder roeBuilder = ResultOrException.newBuilder();
  for (RegionAction regionAction: request.getRegionActionList()) {
    regionActionResultBuilder.clear();
    // Per Action in a Region.
    for (ClientProtos.Action action: regionAction.getActionList()) {
      roeBuilder.clear();
      // Return empty Result and proper index as result.
      roeBuilder.setResult(ClientProtos.Result.getDefaultInstance());
      roeBuilder.setIndex(action.getIndex());
      regionActionResultBuilder.addResultOrException(roeBuilder.build());
    }
    builder.addRegionActionResult(regionActionResultBuilder.build());
  }
  return builder.build();
}
项目:ditb    文件:RegionStates.java   
synchronized boolean isServerDeadAndNotProcessed(ServerName server) {
  if (server == null) return false;
  if (serverManager.isServerOnline(server)) {
    String hostAndPort = server.getHostAndPort();
    long startCode = server.getStartcode();
    Long deadCode = deadServers.get(hostAndPort);
    if (deadCode == null || startCode > deadCode.longValue()) {
      if (serverManager.isServerReachable(server)) {
        return false;
      }
      // The size of deadServers won't grow unbounded.
      deadServers.put(hostAndPort, Long.valueOf(startCode));
    }
    // Watch out! If the server is not dead, the region could
    // remain unassigned. That's why ServerManager#isServerReachable
    // should use some retry.
    //
    // We cache this info since it is very unlikely for that
    // instance to come back up later on. We don't want to expire
    // the server since we prefer to let it die naturally.
    LOG.warn("Couldn't reach online server " + server);
  }
  // Now, we know it's dead. Check if it's processed
  return !processedServers.containsKey(server);
}
项目:ditb    文件:PreemptiveFastFailInterceptor.java   
/**
 * Handles failures encountered when communicating with a server.
 *
 * Updates the FailureInfo in repeatedFailuresMap to reflect the failure.
 * Throws RepeatedConnectException if the client is in Fast fail mode.
 *
 * @param serverName
 * @param t
 *          - the throwable to be handled.
 * @throws PreemptiveFastFailException
 */
private void handleFailureToServer(ServerName serverName, Throwable t) {
  if (serverName == null || t == null) {
    return;
  }
  long currentTime = EnvironmentEdgeManager.currentTime();
  FailureInfo fInfo = repeatedFailuresMap.get(serverName);
  if (fInfo == null) {
    fInfo = new FailureInfo(currentTime);
    FailureInfo oldfInfo = repeatedFailuresMap.putIfAbsent(serverName, fInfo);

    if (oldfInfo != null) {
      fInfo = oldfInfo;
    }
  }
  fInfo.timeOfLatestAttemptMilliSec = currentTime;
  fInfo.numConsecutiveFailures.incrementAndGet();
}
项目:ditb    文件:TestRegionLocationFinder.java   
@Test
public void testGetTopBlockLocations() throws Exception {
  for (int i = 0; i < ServerNum; i++) {
    HRegionServer server = cluster.getRegionServer(i);
    for (Region region : server.getOnlineRegions(tableName)) {
      List<ServerName> servers = finder.getTopBlockLocations(region.getRegionInfo());
      // test table may have empty region
      if (region.getHDFSBlocksDistribution().getUniqueBlocksTotalWeight() == 0) {
        continue;
      }
      List<String> topHosts = region.getHDFSBlocksDistribution().getTopHosts();
      // rs and datanode may have different host in local machine test
      if (!topHosts.contains(server.getServerName().getHostname())) {
        continue;
      }
      for (int j = 0; j < ServerNum; j++) {
        ServerName serverName = cluster.getRegionServer(j).getServerName();
        assertTrue(servers.contains(serverName));
      }
    }
  }
}
项目:ditb    文件:HRegionServer.java   
private RegionServerStartupResponse reportForDuty() throws IOException {
  ServerName masterServerName = createRegionServerStatusStub();
  if (masterServerName == null) return null;
  RegionServerStartupResponse result = null;
  try {
    rpcServices.requestCount.set(0);
    LOG.info(
        "reportForDuty to master=" + masterServerName + " with port=" + rpcServices.isa.getPort()
            + ", startcode=" + this.startcode);
    long now = EnvironmentEdgeManager.currentTime();
    int port = rpcServices.isa.getPort();
    RegionServerStartupRequest.Builder request = RegionServerStartupRequest.newBuilder();
    if (shouldUseThisHostnameInstead()) {
      request.setUseThisHostnameInstead(useThisHostnameInstead);
    }
    request.setPort(port);
    request.setServerStartCode(this.startcode);
    request.setServerCurrentTime(now);
    result = this.rssStub.regionServerStartup(null, request.build());
  } catch (ServiceException se) {
    IOException ioe = ProtobufUtil.getRemoteException(se);
    if (ioe instanceof ClockOutOfSyncException) {
      LOG.fatal("Master rejected startup because clock is out of sync", ioe);
      // Re-throw IOE will cause RS to abort
      throw ioe;
    } else if (ioe instanceof ServerNotRunningYetException) {
      LOG.debug("Master is not running yet");
    } else {
      LOG.warn("error telling master we are up", se);
    }
    rssStub = null;
  }
  return result;
}
项目:QDrill    文件:TestHBaseRegionScanAssignments.java   
@Test
public void testHBaseGroupScanAssignmentMix() throws Exception {
  NavigableMap<HRegionInfo,ServerName> regionsToScan = Maps.newTreeMap();
  regionsToScan.put(new HRegionInfo(TABLE_NAME, splits[0], splits[1]), SERVER_A);
  regionsToScan.put(new HRegionInfo(TABLE_NAME, splits[1], splits[2]), SERVER_B);
  regionsToScan.put(new HRegionInfo(TABLE_NAME, splits[2], splits[3]), SERVER_B);
  regionsToScan.put(new HRegionInfo(TABLE_NAME, splits[3], splits[4]), SERVER_A);
  regionsToScan.put(new HRegionInfo(TABLE_NAME, splits[4], splits[5]), SERVER_A);
  regionsToScan.put(new HRegionInfo(TABLE_NAME, splits[5], splits[6]), SERVER_D);
  regionsToScan.put(new HRegionInfo(TABLE_NAME, splits[6], splits[7]), SERVER_C);
  regionsToScan.put(new HRegionInfo(TABLE_NAME, splits[7], splits[0]), SERVER_D);

  final List<DrillbitEndpoint> endpoints = Lists.newArrayList();
  final DrillbitEndpoint DB_A = DrillbitEndpoint.newBuilder().setAddress(HOST_A).setControlPort(1234).build();
  endpoints.add(DB_A);
  endpoints.add(DB_A);
  final DrillbitEndpoint DB_B = DrillbitEndpoint.newBuilder().setAddress(HOST_B).setControlPort(1234).build();
  endpoints.add(DB_B);
  final DrillbitEndpoint DB_D = DrillbitEndpoint.newBuilder().setAddress(HOST_D).setControlPort(1234).build();
  endpoints.add(DB_D);
  final DrillbitEndpoint DB_X = DrillbitEndpoint.newBuilder().setAddress(HOST_X).setControlPort(1234).build();
  endpoints.add(DB_X);

  HBaseGroupScan scan = new HBaseGroupScan();
  scan.setRegionsToScan(regionsToScan);
  scan.setHBaseScanSpec(new HBaseScanSpec(TABLE_NAME_STR, splits[0], splits[0], null));
  scan.applyAssignments(endpoints);

  int i = 0;
  assertEquals(2, scan.getSpecificScan(i++).getRegionScanSpecList().size()); // 'A'
  assertEquals(1, scan.getSpecificScan(i++).getRegionScanSpecList().size()); // 'A'
  assertEquals(2, scan.getSpecificScan(i++).getRegionScanSpecList().size()); // 'B'
  assertEquals(2, scan.getSpecificScan(i++).getRegionScanSpecList().size()); // 'D'
  assertEquals(1, scan.getSpecificScan(i++).getRegionScanSpecList().size()); // 'X'
  testParallelizationWidth(scan, i);
}
项目:ditb    文件:SplitLogManager.java   
public void markRegionsRecovering(ServerName server, Set<HRegionInfo> userRegions)
    throws InterruptedIOException, IOException {
  if (userRegions == null || (!isLogReplaying())) {
    return;
  }
  try {
    this.recoveringRegionLock.lock();
    // mark that we're creating recovering regions
    ((BaseCoordinatedStateManager) this.server.getCoordinatedStateManager())
        .getSplitLogManagerCoordination().markRegionsRecovering(server, userRegions);
  } finally {
    this.recoveringRegionLock.unlock();
  }

}
项目:ditb    文件:TestReplicationSinkManager.java   
@Test
public void testChooseSinks_LessThanRatioAvailable() {
  List<ServerName> serverNames = Lists.newArrayList(mock(ServerName.class),
    mock(ServerName.class));

  when(replicationEndpoint.getRegionServers())
        .thenReturn(serverNames);

  sinkManager.chooseSinks();

  assertEquals(1, sinkManager.getNumSinks());
}
项目:ditb    文件:MetaTableLocator.java   
/**
 * Gets the meta region location, if available.  Does not block.
 * @param zkw
 * @param replicaId
 * @return server name
 */
public ServerName getMetaRegionLocation(final ZooKeeperWatcher zkw, int replicaId) {
  try {
    RegionState state = getMetaRegionState(zkw, replicaId);
    return state.isOpened() ? state.getServerName() : null;
  } catch (KeeperException ke) {
    return null;
  }
}
项目:ditb    文件:HBaseFsckRepair.java   
/**
 * Contacts a region server and waits up to hbase.hbck.close.timeout ms
 * (default 120s) to close the region.  This bypasses the active hmaster.
 */
@SuppressWarnings("deprecation")
public static void closeRegionSilentlyAndWait(HConnection connection,
    ServerName server, HRegionInfo region) throws IOException, InterruptedException {
  long timeout = connection.getConfiguration()
    .getLong("hbase.hbck.close.timeout", 120000);
  ServerManager.closeRegionSilentlyAndWait((ClusterConnection)connection, server,
      region, timeout);
}
项目:ditb    文件:HBaseFsckRepair.java   
/**
 * Fix multiple assignment by doing silent closes on each RS hosting the region
 * and then force ZK unassigned node to OFFLINE to trigger assignment by
 * master.
 *
 * @param connection HBase connection to the cluster
 * @param region Region to undeploy
 * @param servers list of Servers to undeploy from
 */
public static void fixMultiAssignment(HConnection connection, HRegionInfo region,
    List<ServerName> servers)
throws IOException, KeeperException, InterruptedException {
  HRegionInfo actualRegion = new HRegionInfo(region);

  // Close region on the servers silently
  for(ServerName server : servers) {
    closeRegionSilentlyAndWait(connection, server, actualRegion);
  }

  // Force ZK node to OFFLINE so master assigns
  forceOfflineInZK(connection.getAdmin(), actualRegion);
}
项目:ditb    文件:DeadServer.java   
public synchronized void finish(ServerName sn) {
  numProcessing--;
  if (LOG.isDebugEnabled()) LOG.debug("Finished " + sn + "; numProcessing=" + numProcessing);

  assert numProcessing >= 0: "Number of dead servers in processing should always be non-negative";

  if (numProcessing < 0) {
    LOG.error("Number of dead servers in processing = " + numProcessing
        + ". Something went wrong, this should always be non-negative.");
    numProcessing = 0;
  }
  if (numProcessing == 0) { processing = false; }
}
项目:ditb    文件:HMaster.java   
public String getRegionServerVersion(final ServerName sn) {
  RegionServerInfo info = this.regionServerTracker.getRegionServerInfo(sn);
  if (info != null && info.hasVersionInfo()) {
    return info.getVersionInfo().getVersion();
  }
  return "Unknown";
}
项目:ditb    文件:RestartRandomZKNodeAction.java   
@Override
public void perform() throws Exception {
  LOG.info("Performing action: Restart random zookeeper node");
  ServerName server = PolicyBasedChaosMonkey.selectRandomItem(
      ZKServerTool.readZKNodes(getConf()));
  restartZKNode(server, sleepTime);
}
项目:ditb    文件:RegionMergeTransactionImpl.java   
private void mergeRegionsAndPutMetaEntries(HConnection hConnection,
    HRegionInfo mergedRegion, HRegionInfo regionA, HRegionInfo regionB,
    ServerName serverName, List<Mutation> metaEntries,
    int regionReplication) throws IOException {
  prepareMutationsForMerge(mergedRegion, regionA, regionB, serverName, metaEntries,
    regionReplication);
  MetaTableAccessor.mutateMetaTable(hConnection, metaEntries);
}
项目:ditb    文件:RequestConverter.java   
/**
 * Create a CloseRegionRequest for a given region name
 *
 * @param regionName the name of the region to close
 * @param transitionInZK indicator if to transition in ZK
 * @return a CloseRegionRequest
 */
public static CloseRegionRequest buildCloseRegionRequest(ServerName server,
    final byte[] regionName, final boolean transitionInZK) {
  CloseRegionRequest.Builder builder = CloseRegionRequest.newBuilder();
  RegionSpecifier region = buildRegionSpecifier(
    RegionSpecifierType.REGION_NAME, regionName);
  builder.setRegion(region);
  builder.setTransitionInZK(transitionInZK);
  if (server != null) {
    builder.setServerStartCode(server.getStartcode());
  }
  return builder.build();
}
项目:ditb    文件:BaseLoadBalancer.java   
/**
 * Round robin a list of regions to a list of servers
 */
private void roundRobinAssignment(Cluster cluster, List<HRegionInfo> regions,
    List<HRegionInfo> unassignedRegions, List<ServerName> servers,
    Map<ServerName, List<HRegionInfo>> assignments) {

  int numServers = servers.size();
  int numRegions = regions.size();
  int max = (int) Math.ceil((float) numRegions / numServers);
  int serverIdx = 0;
  if (numServers > 1) {
    serverIdx = RANDOM.nextInt(numServers);
  }
  int regionIdx = 0;

  for (int j = 0; j < numServers; j++) {
    ServerName server = servers.get((j + serverIdx) % numServers);
    List<HRegionInfo> serverRegions = new ArrayList<HRegionInfo>(max);
    for (int i = regionIdx; i < numRegions; i += numServers) {
      HRegionInfo region = regions.get(i % numRegions);
      if (cluster.wouldLowerAvailability(region, server)) {
        unassignedRegions.add(region);
      } else {
        serverRegions.add(region);
        cluster.doAssignRegion(region, server);
      }
    }
    assignments.put(server, serverRegions);
    regionIdx++;
  }
}
项目:ditb    文件:TestActiveMasterManager.java   
public DummyMaster(ZooKeeperWatcher zk, ServerName master) {
  this.clusterStatusTracker =
    new ClusterStatusTracker(zk, this);
  clusterStatusTracker.start();

  this.activeMasterManager =
    new ActiveMasterManager(zk, master, this);
  zk.registerListener(activeMasterManager);
}
项目:ditb    文件:ZkSplitLogWorkerCoordination.java   
/**
 * Try to own the task by transitioning the zk node data from UNASSIGNED to OWNED.
 * <p>
 * This method is also used to periodically heartbeat the task progress by transitioning the node
 * from OWNED to OWNED.
 * <p>
 * @param isFirstTime shows whther it's the first attempt.
 * @param zkw zk wathcer
 * @param server name
 * @param task to own
 * @param taskZKVersion version of the task in zk
 * @return non-negative integer value when task can be owned by current region server otherwise -1
 */
protected static int attemptToOwnTask(boolean isFirstTime, ZooKeeperWatcher zkw,
    ServerName server, String task, RecoveryMode mode, int taskZKVersion) {
  int latestZKVersion = FAILED_TO_OWN_TASK;
  try {
    SplitLogTask slt = new SplitLogTask.Owned(server, mode);
    Stat stat = zkw.getRecoverableZooKeeper().setData(task, slt.toByteArray(), taskZKVersion);
    if (stat == null) {
      LOG.warn("zk.setData() returned null for path " + task);
      SplitLogCounters.tot_wkr_task_heartbeat_failed.incrementAndGet();
      return FAILED_TO_OWN_TASK;
    }
    latestZKVersion = stat.getVersion();
    SplitLogCounters.tot_wkr_task_heartbeat.incrementAndGet();
    return latestZKVersion;
  } catch (KeeperException e) {
    if (!isFirstTime) {
      if (e.code().equals(KeeperException.Code.NONODE)) {
        LOG.warn("NONODE failed to assert ownership for " + task, e);
      } else if (e.code().equals(KeeperException.Code.BADVERSION)) {
        LOG.warn("BADVERSION failed to assert ownership for " + task, e);
      } else {
        LOG.warn("failed to assert ownership for " + task, e);
      }
    }
  } catch (InterruptedException e1) {
    LOG.warn("Interrupted while trying to assert ownership of " + task + " "
        + StringUtils.stringifyException(e1));
    Thread.currentThread().interrupt();
  }
  SplitLogCounters.tot_wkr_task_heartbeat_failed.incrementAndGet();
  return FAILED_TO_OWN_TASK;
}
项目:ditb    文件:TestRegionStates.java   
@Test
public void testWeDontReturnDrainingServersForOurBalancePlans() throws Exception {
  Server server = mock(Server.class);
  when(server.getServerName()).thenReturn(ServerName.valueOf("master,1,1"));
  Configuration configuration = mock(Configuration.class);
  when(server.getConfiguration()).thenReturn(configuration);
  TableStateManager tsm = mock(TableStateManager.class);
  ServerManager sm = mock(ServerManager.class);
  when(sm.isServerOnline(isA(ServerName.class))).thenReturn(true);

  RegionStateStore rss = mock(RegionStateStore.class);
  RegionStates regionStates = new RegionStates(server, tsm, sm, rss);

  ServerName one = mockServer("one", 1);
  ServerName two = mockServer("two", 1);
  ServerName three = mockServer("three", 1);

  when(sm.getDrainingServersList()).thenReturn(Arrays.asList(three));

  regionStates.regionOnline(createFakeRegion(), one);
  regionStates.regionOnline(createFakeRegion(), two);
  regionStates.regionOnline(createFakeRegion(), three);


  Map<TableName, Map<ServerName, List<HRegionInfo>>> result =
      regionStates.getAssignmentsByTable();
  for (Map<ServerName, List<HRegionInfo>> map : result.values()) {
    assertFalse(map.keySet().contains(three));
  }
}
项目:ditb    文件:RegionPlacementMaintainer.java   
/**
 * @param favoredNodesStr The String of favored nodes
 * @return the list of ServerName for the byte array of favored nodes.
 */
public static List<ServerName> getFavoredNodeList(String favoredNodesStr) {
  String[] favoredNodesArray = StringUtils.split(favoredNodesStr, ",");
  if (favoredNodesArray == null)
    return null;

  List<ServerName> serverList = new ArrayList<ServerName>();
  for (String hostNameAndPort : favoredNodesArray) {
    serverList.add(ServerName.valueOf(hostNameAndPort, ServerName.NON_STARTCODE));
  }
  return serverList;
}
项目:ditb    文件:TestAssignmentManagerOnCluster.java   
@Override
public ServerName randomAssignment(HRegionInfo regionInfo,
    List<ServerName> servers) {
  if (regionInfo.getEncodedName().equals(controledRegion)) {
    return null;
  }
  return super.randomAssignment(regionInfo, servers);
}
项目:ditb    文件:BalancerTestBase.java   
protected TreeMap<ServerName, List<HRegionInfo>> mockClusterServers(int[] mockCluster, int numTables) {
  int numServers = mockCluster.length;
  TreeMap<ServerName, List<HRegionInfo>> servers = new TreeMap<ServerName, List<HRegionInfo>>();
  for (int i = 0; i < numServers; i++) {
    int numRegions = mockCluster[i];
    ServerAndLoad sal = randomServer(0);
    List<HRegionInfo> regions = randomRegions(numRegions, numTables);
    servers.put(sal.getServerName(), regions);
  }
  return servers;
}
项目:ditb    文件:TestSplitLogManager.java   
@Before
public void setup() throws Exception {
  TEST_UTIL = new HBaseTestingUtility();
  TEST_UTIL.startMiniZKCluster();
  conf = TEST_UTIL.getConfiguration();
  // Use a different ZK wrapper instance for each tests.
  zkw =
      new ZooKeeperWatcher(conf, "split-log-manager-tests" + UUID.randomUUID().toString(), null);
  ds = new DummyServer(zkw, conf);

  ZKUtil.deleteChildrenRecursively(zkw, zkw.baseZNode);
  ZKUtil.createAndFailSilent(zkw, zkw.baseZNode);
  assertTrue(ZKUtil.checkExists(zkw, zkw.baseZNode) != -1);
  LOG.debug(zkw.baseZNode + " created");
  ZKUtil.createAndFailSilent(zkw, zkw.splitLogZNode);
  assertTrue(ZKUtil.checkExists(zkw, zkw.splitLogZNode) != -1);
  LOG.debug(zkw.splitLogZNode + " created");

  stopped = false;
  resetCounters();

  // By default, we let the test manage the error as before, so the server
  // does not appear as dead from the master point of view, only from the split log pov.
  Mockito.when(sm.isServerOnline(Mockito.any(ServerName.class))).thenReturn(true);
  Mockito.when(master.getServerManager()).thenReturn(sm);

  to = 12000;
  conf.setInt(HConstants.HBASE_SPLITLOG_MANAGER_TIMEOUT, to);
  conf.setInt("hbase.splitlog.manager.unassigned.timeout", 2 * to);

  conf.setInt("hbase.splitlog.manager.timeoutmonitor.period", 100);
  to = to + 4 * 100;

  this.mode =
      (conf.getBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, false) ? RecoveryMode.LOG_REPLAY
          : RecoveryMode.LOG_SPLITTING);
}