Java 类org.apache.hadoop.hbase.master.handler.ServerShutdownHandler 实例源码

项目:pbase    文件:ServerManager.java   
public synchronized void processDeadServer(final ServerName serverName, boolean shouldSplitWal) {
  // When assignment manager is cleaning up the zookeeper nodes and rebuilding the
  // in-memory region states, region servers could be down. Meta table can and
  // should be re-assigned, log splitting can be done too. However, it is better to
  // wait till the cleanup is done before re-assigning user regions.
  //
  // We should not wait in the server shutdown handler thread since it can clog
  // the handler threads and meta table could not be re-assigned in case
  // the corresponding server is down. So we queue them up here instead.
  if (!services.getAssignmentManager().isFailoverCleanupDone()) {
    requeuedDeadServers.put(serverName, shouldSplitWal);
    return;
  }

  this.deadservers.add(serverName);
  this.services.getExecutorService().submit(
    new ServerShutdownHandler(this.master, this.services, this.deadservers, serverName,
        shouldSplitWal));
}
项目:HIndex    文件:ServerManager.java   
public synchronized void processDeadServer(final ServerName serverName, boolean shouldSplitHlog) {
  // When assignment manager is cleaning up the zookeeper nodes and rebuilding the
  // in-memory region states, region servers could be down. Meta table can and
  // should be re-assigned, log splitting can be done too. However, it is better to
  // wait till the cleanup is done before re-assigning user regions.
  //
  // We should not wait in the server shutdown handler thread since it can clog
  // the handler threads and meta table could not be re-assigned in case
  // the corresponding server is down. So we queue them up here instead.
  if (!services.getAssignmentManager().isFailoverCleanupDone()) {
    requeuedDeadServers.put(serverName, shouldSplitHlog);
    return;
  }

  this.deadservers.add(serverName);
  this.services.getExecutorService().submit(
    new ServerShutdownHandler(this.master, this.services, this.deadservers, serverName,
        shouldSplitHlog));
}
项目:PyroDB    文件:ServerManager.java   
public synchronized void processDeadServer(final ServerName serverName, boolean shouldSplitHlog) {
  // When assignment manager is cleaning up the zookeeper nodes and rebuilding the
  // in-memory region states, region servers could be down. Meta table can and
  // should be re-assigned, log splitting can be done too. However, it is better to
  // wait till the cleanup is done before re-assigning user regions.
  //
  // We should not wait in the server shutdown handler thread since it can clog
  // the handler threads and meta table could not be re-assigned in case
  // the corresponding server is down. So we queue them up here instead.
  if (!services.getAssignmentManager().isFailoverCleanupDone()) {
    requeuedDeadServers.put(serverName, shouldSplitHlog);
    return;
  }

  this.deadservers.add(serverName);
  this.services.getExecutorService().submit(
    new ServerShutdownHandler(this.master, this.services, this.deadservers, serverName,
        shouldSplitHlog));
}
项目:c5    文件:ServerManager.java   
public synchronized void processDeadServer(final ServerName serverName, boolean shouldSplitHlog) {
  // When assignment manager is cleaning up the zookeeper nodes and rebuilding the
  // in-memory region states, region servers could be down. Meta table can and
  // should be re-assigned, log splitting can be done too. However, it is better to
  // wait till the cleanup is done before re-assigning user regions.
  //
  // We should not wait in the server shutdown handler thread since it can clog
  // the handler threads and meta table could not be re-assigned in case
  // the corresponding server is down. So we queue them up here instead.
  if (!services.getAssignmentManager().isFailoverCleanupDone()) {
    requeuedDeadServers.put(serverName, shouldSplitHlog);
    return;
  }

  this.deadservers.add(serverName);
  this.services.getExecutorService().submit(
    new ServerShutdownHandler(this.master, this.services, this.deadservers, serverName,
        shouldSplitHlog));
}
项目:DominoHBase    文件:ServerManager.java   
public synchronized void processDeadServer(final ServerName serverName) {
  // When assignment manager is cleaning up the zookeeper nodes and rebuilding the
  // in-memory region states, region servers could be down. Root/meta table can and
  // should be re-assigned, log splitting can be done too. However, it is better to
  // wait till the cleanup is done before re-assigning user regions.
  //
  // We should not wait in the server shutdown handler thread since it can clog
  // the handler threads and root/meta table could not be re-assigned in case
  // the corresponding server is down. So we queue them up here instead.
  if (!services.getAssignmentManager().isFailoverCleanupDone()) {
    requeuedDeadServers.add(serverName);
    return;
  }

  this.deadservers.add(serverName);
  this.services.getExecutorService().submit(new ServerShutdownHandler(
    this.master, this.services, this.deadservers, serverName, false));
}
项目:LCIndex-HBase-0.94.16    文件:HMaster.java   
void fixupDaughters(final MonitoredTask status) throws IOException, KeeperException {
  final Map<HRegionInfo, Result> offlineSplitParents =
    new HashMap<HRegionInfo, Result>();
  // This visitor collects offline split parents in the .META. table
  MetaReader.Visitor visitor = new MetaReader.Visitor() {
    @Override
    public boolean visit(Result r) throws IOException {
      if (r == null || r.isEmpty()) return true;
      HRegionInfo info =
        MetaReader.parseHRegionInfoFromCatalogResult(
          r, HConstants.REGIONINFO_QUALIFIER);
      if (info == null) return true; // Keep scanning
      if (info.isOffline() && info.isSplit()) {
        offlineSplitParents.put(info, r);
      }
      // Returning true means "keep scanning"
      return true;
    }
  };
  // Run full scan of .META. catalog table passing in our custom visitor
  MetaReader.fullScan(this.catalogTracker, visitor);
  // Now work on our list of found parents. See if any we can clean up.
  int fixups = 0;
  for (Map.Entry<HRegionInfo, Result> e : offlineSplitParents.entrySet()) {
    String node = ZKAssign.getNodeName(zooKeeper, e.getKey().getEncodedName());
    byte[] data = ZKUtil.getData(zooKeeper, node);
    if (data == null) { // otherwise, splitting is still going on, skip it
      fixups += ServerShutdownHandler.fixupDaughters(
        e.getValue(), assignmentManager, catalogTracker);
    }
  }
  if (fixups != 0) {
    LOG.info("Scanned the catalog and fixed up " + fixups +
      " missing daughter region(s)");
  }
}
项目:IRIndex    文件:HMaster.java   
void fixupDaughters(final MonitoredTask status) throws IOException, KeeperException {
  final Map<HRegionInfo, Result> offlineSplitParents =
    new HashMap<HRegionInfo, Result>();
  // This visitor collects offline split parents in the .META. table
  MetaReader.Visitor visitor = new MetaReader.Visitor() {
    @Override
    public boolean visit(Result r) throws IOException {
      if (r == null || r.isEmpty()) return true;
      HRegionInfo info =
        MetaReader.parseHRegionInfoFromCatalogResult(
          r, HConstants.REGIONINFO_QUALIFIER);
      if (info == null) return true; // Keep scanning
      if (info.isOffline() && info.isSplit()) {
        offlineSplitParents.put(info, r);
      }
      // Returning true means "keep scanning"
      return true;
    }
  };
  // Run full scan of .META. catalog table passing in our custom visitor
  MetaReader.fullScan(this.catalogTracker, visitor);
  // Now work on our list of found parents. See if any we can clean up.
  int fixups = 0;
  for (Map.Entry<HRegionInfo, Result> e : offlineSplitParents.entrySet()) {
    String node = ZKAssign.getNodeName(zooKeeper, e.getKey().getEncodedName());
    byte[] data = ZKUtil.getData(zooKeeper, node);
    if (data == null) { // otherwise, splitting is still going on, skip it
      fixups += ServerShutdownHandler.fixupDaughters(
        e.getValue(), assignmentManager, catalogTracker);
    }
  }
  if (fixups != 0) {
    LOG.info("Scanned the catalog and fixed up " + fixups +
      " missing daughter region(s)");
  }
}
项目:RStore    文件:HMaster.java   
void fixupDaughters(final MonitoredTask status) throws IOException {
  final Map<HRegionInfo, Result> offlineSplitParents =
    new HashMap<HRegionInfo, Result>();
  // This visitor collects offline split parents in the .META. table
  MetaReader.Visitor visitor = new MetaReader.Visitor() {
    @Override
    public boolean visit(Result r) throws IOException {
      if (r == null || r.isEmpty()) return true;
      HRegionInfo info =
        MetaReader.parseHRegionInfoFromCatalogResult(
          r, HConstants.REGIONINFO_QUALIFIER);
      if (info == null) return true; // Keep scanning
      if (info.isOffline() && info.isSplit()) {
        offlineSplitParents.put(info, r);
      }
      // Returning true means "keep scanning"
      return true;
    }
  };
  // Run full scan of .META. catalog table passing in our custom visitor
  MetaReader.fullScan(this.catalogTracker, visitor);
  // Now work on our list of found parents. See if any we can clean up.
  int fixups = 0;
  for (Map.Entry<HRegionInfo, Result> e : offlineSplitParents.entrySet()) {
    fixups += ServerShutdownHandler.fixupDaughters(
        e.getValue(), assignmentManager, catalogTracker);
  }
  if (fixups != 0) {
    LOG.info("Scanned the catalog and fixed up " + fixups +
      " missing daughter region(s)");
  }
}
项目:HBase-Research    文件:HMaster.java   
void fixupDaughters(final MonitoredTask status) throws IOException {
  final Map<HRegionInfo, Result> offlineSplitParents =
    new HashMap<HRegionInfo, Result>();
  // This visitor collects offline split parents in the .META. table
  MetaReader.Visitor visitor = new MetaReader.Visitor() {
    @Override
    public boolean visit(Result r) throws IOException {
      if (r == null || r.isEmpty()) return true;
      HRegionInfo info =
        MetaReader.parseHRegionInfoFromCatalogResult(
          r, HConstants.REGIONINFO_QUALIFIER);
      if (info == null) return true; // Keep scanning
      if (info.isOffline() && info.isSplit()) {
        offlineSplitParents.put(info, r);
      }
      // Returning true means "keep scanning"
      return true;
    }
  };
  // Run full scan of .META. catalog table passing in our custom visitor
  MetaReader.fullScan(this.catalogTracker, visitor);
  // Now work on our list of found parents. See if any we can clean up.
  int fixups = 0;
  for (Map.Entry<HRegionInfo, Result> e : offlineSplitParents.entrySet()) {
    fixups += ServerShutdownHandler.fixupDaughters(
        e.getValue(), assignmentManager, catalogTracker);
  }
  if (fixups != 0) {
    LOG.info("Scanned the catalog and fixed up " + fixups +
      " missing daughter region(s)");
  }
}
项目:hbase-0.94.8-qod    文件:HMaster.java   
void fixupDaughters(final MonitoredTask status) throws IOException {
  final Map<HRegionInfo, Result> offlineSplitParents =
    new HashMap<HRegionInfo, Result>();
  // This visitor collects offline split parents in the .META. table
  MetaReader.Visitor visitor = new MetaReader.Visitor() {
    @Override
    public boolean visit(Result r) throws IOException {
      if (r == null || r.isEmpty()) return true;
      HRegionInfo info =
        MetaReader.parseHRegionInfoFromCatalogResult(
          r, HConstants.REGIONINFO_QUALIFIER);
      if (info == null) return true; // Keep scanning
      if (info.isOffline() && info.isSplit()) {
        offlineSplitParents.put(info, r);
      }
      // Returning true means "keep scanning"
      return true;
    }
  };
  // Run full scan of .META. catalog table passing in our custom visitor
  MetaReader.fullScan(this.catalogTracker, visitor);
  // Now work on our list of found parents. See if any we can clean up.
  int fixups = 0;
  for (Map.Entry<HRegionInfo, Result> e : offlineSplitParents.entrySet()) {
    fixups += ServerShutdownHandler.fixupDaughters(
        e.getValue(), assignmentManager, catalogTracker);
  }
  if (fixups != 0) {
    LOG.info("Scanned the catalog and fixed up " + fixups +
      " missing daughter region(s)");
  }
}
项目:hbase-0.94.8-qod    文件:HMaster.java   
void fixupDaughters(final MonitoredTask status) throws IOException {
  final Map<HRegionInfo, Result> offlineSplitParents =
    new HashMap<HRegionInfo, Result>();
  // This visitor collects offline split parents in the .META. table
  MetaReader.Visitor visitor = new MetaReader.Visitor() {
    @Override
    public boolean visit(Result r) throws IOException {
      if (r == null || r.isEmpty()) return true;
      HRegionInfo info =
        MetaReader.parseHRegionInfoFromCatalogResult(
          r, HConstants.REGIONINFO_QUALIFIER);
      if (info == null) return true; // Keep scanning
      if (info.isOffline() && info.isSplit()) {
        offlineSplitParents.put(info, r);
      }
      // Returning true means "keep scanning"
      return true;
    }
  };
  // Run full scan of .META. catalog table passing in our custom visitor
  MetaReader.fullScan(this.catalogTracker, visitor);
  // Now work on our list of found parents. See if any we can clean up.
  int fixups = 0;
  for (Map.Entry<HRegionInfo, Result> e : offlineSplitParents.entrySet()) {
    fixups += ServerShutdownHandler.fixupDaughters(
        e.getValue(), assignmentManager, catalogTracker);
  }
  if (fixups != 0) {
    LOG.info("Scanned the catalog and fixed up " + fixups +
      " missing daughter region(s)");
  }
}
项目:DominoHBase    文件:HMaster.java   
void fixupDaughters(final MonitoredTask status) throws IOException {
  final Map<HRegionInfo, Result> offlineSplitParents =
    new HashMap<HRegionInfo, Result>();
  // This visitor collects offline split parents in the .META. table
  MetaReader.Visitor visitor = new MetaReader.Visitor() {
    @Override
    public boolean visit(Result r) throws IOException {
      if (r == null || r.isEmpty()) return true;
      HRegionInfo info =
        HRegionInfo.getHRegionInfo(r);
      if (info == null) return true; // Keep scanning
      if (info.isOffline() && info.isSplit()) {
        offlineSplitParents.put(info, r);
      }
      // Returning true means "keep scanning"
      return true;
    }
  };
  // Run full scan of .META. catalog table passing in our custom visitor
  MetaReader.fullScan(this.catalogTracker, visitor);
  // Now work on our list of found parents. See if any we can clean up.
  int fixups = 0;
  for (Map.Entry<HRegionInfo, Result> e : offlineSplitParents.entrySet()) {
    ServerName sn = HRegionInfo.getServerName(e.getValue());
    if (!serverManager.isServerDead(sn)) { // Otherwise, let SSH take care of it
      fixups += ServerShutdownHandler.fixupDaughters(
        e.getValue(), assignmentManager, catalogTracker);
    }
  }
  if (fixups != 0) {
    LOG.info("Scanned the catalog and fixed up " + fixups +
      " missing daughter region(s)");
  }
}
项目:hindex    文件:HMaster.java   
void fixupDaughters(final MonitoredTask status) throws IOException {
  final Map<HRegionInfo, Result> offlineSplitParents =
    new HashMap<HRegionInfo, Result>();
  // This visitor collects offline split parents in the .META. table
  MetaReader.Visitor visitor = new MetaReader.Visitor() {
    @Override
    public boolean visit(Result r) throws IOException {
      if (r == null || r.isEmpty()) return true;
      HRegionInfo info =
        MetaReader.parseHRegionInfoFromCatalogResult(
          r, HConstants.REGIONINFO_QUALIFIER);
      if (info == null) return true; // Keep scanning
      if (info.isOffline() && info.isSplit()) {
        offlineSplitParents.put(info, r);
      }
      // Returning true means "keep scanning"
      return true;
    }
  };
  // Run full scan of .META. catalog table passing in our custom visitor
  MetaReader.fullScan(this.catalogTracker, visitor);
  // Now work on our list of found parents. See if any we can clean up.
  int fixups = 0;
  for (Map.Entry<HRegionInfo, Result> e : offlineSplitParents.entrySet()) {
    fixups += ServerShutdownHandler.fixupDaughters(
        e.getValue(), assignmentManager, catalogTracker);
  }
  if (fixups != 0) {
    LOG.info("Scanned the catalog and fixed up " + fixups +
      " missing daughter region(s)");
  }
}
项目:LCIndex-HBase-0.94.16    文件:ServerManager.java   
public synchronized void expireServer(final ServerName serverName) {
  boolean carryingRoot = services.getAssignmentManager().isCarryingRoot(serverName);
  if (!services.isServerShutdownHandlerEnabled() && (!carryingRoot || !this.isSSHForRootEnabled)) {
    LOG.info("Master doesn't enable ServerShutdownHandler during initialization, "
        + "delay expiring server " + serverName);
    this.deadNotExpiredServers.add(serverName);
    return;
  }
  if (!this.onlineServers.containsKey(serverName)) {
    LOG.warn("Received expiration of " + serverName +
      " but server is not currently online");
  }
  if (this.deadservers.contains(serverName)) {
    // TODO: Can this happen?  It shouldn't be online in this case?
    LOG.warn("Received expiration of " + serverName +
        " but server shutdown is already in progress");
    return;
  }
  // Remove the server from the known servers lists and update load info BUT
  // add to deadservers first; do this so it'll show in dead servers list if
  // not in online servers list.
  this.deadservers.add(serverName);
  this.onlineServers.remove(serverName);
  synchronized (onlineServers) {
    onlineServers.notifyAll();
  }
  this.serverConnections.remove(serverName);
  // If cluster is going down, yes, servers are going to be expiring; don't
  // process as a dead server
  if (this.clusterShutdown) {
    LOG.info("Cluster shutdown set; " + serverName +
      " expired; onlineServers=" + this.onlineServers.size());
    if (this.onlineServers.isEmpty()) {
      master.stop("Cluster shutdown set; onlineServer=0");
    }
    return;
  }

  boolean carryingMeta = services.getAssignmentManager().isCarryingMeta(serverName);
  if (carryingRoot || carryingMeta) {
    this.services.getExecutorService().submit(new MetaServerShutdownHandler(this.master,
      this.services, this.deadservers, serverName, carryingRoot, carryingMeta));
  } else {
    this.services.getExecutorService().submit(new ServerShutdownHandler(this.master,
      this.services, this.deadservers, serverName, true));
  }
  LOG.debug("Added=" + serverName +
    " to dead servers, submitted shutdown handler to be executed, root=" +
      carryingRoot + ", meta=" + carryingMeta);
}
项目:LCIndex-HBase-0.94.16    文件:AssignmentManager.java   
/**
 * Processes list of dead servers from result of META scan and regions in RIT
 * <p>
 * This is used for failover to recover the lost regions that belonged to
 * RegionServers which failed while there was no active master or regions 
 * that were in RIT.
 * <p>
 * 
 * @param deadServers
 *          The list of dead servers which failed while there was no active
 *          master. Can be null.
 * @param nodes
 *          The regions in RIT
 * @throws IOException
 * @throws KeeperException
 */
private void processDeadServersAndRecoverLostRegions(
    Map<ServerName, List<Pair<HRegionInfo, Result>>> deadServers,
    List<String> nodes) throws IOException, KeeperException {
  if (null != deadServers) {
    Set<ServerName> actualDeadServers = this.serverManager.getDeadServers();
    for (Map.Entry<ServerName, List<Pair<HRegionInfo, Result>>> deadServer : 
      deadServers.entrySet()) {
      // skip regions of dead servers because SSH will process regions during rs expiration.
      // see HBASE-5916
      if (actualDeadServers.contains(deadServer.getKey())) {
        for (Pair<HRegionInfo, Result> deadRegion : deadServer.getValue()) {
          HRegionInfo hri = deadRegion.getFirst();
          // Delete znode of region in transition if table is disabled or disabling. If a region
          // server went down during master initialization then SSH cannot handle the regions of
          // partially disabled tables because in memory region state information may not be
          // available with master.
          deleteNodeAndOfflineRegion(hri);
          nodes.remove(deadRegion.getFirst().getEncodedName());
        }
        continue;
      }
      List<Pair<HRegionInfo, Result>> regions = deadServer.getValue();
      for (Pair<HRegionInfo, Result> region : regions) {
        HRegionInfo regionInfo = region.getFirst();
        Result result = region.getSecond();
        // If region was in transition (was in zk) force it offline for
        // reassign
        try {
          RegionTransitionData data = ZKAssign.getData(watcher,
              regionInfo.getEncodedName());

          // If zk node of this region has been updated by a live server,
          // we consider that this region is being handled.
          // So we should skip it and process it in
          // processRegionsInTransition.
          if (data != null && data.getOrigin() != null && 
              serverManager.isServerOnline(data.getOrigin())) {
            LOG.info("The region " + regionInfo.getEncodedName()
                + "is being handled on " + data.getOrigin());
            continue;
          }
          // Process with existing RS shutdown code
          boolean assign = ServerShutdownHandler.processDeadRegion(
              regionInfo, result, this, this.catalogTracker);
          if (assign) {
            ZKAssign.createOrForceNodeOffline(watcher, regionInfo,
                master.getServerName());
            if (!nodes.contains(regionInfo.getEncodedName())) {
              nodes.add(regionInfo.getEncodedName());
            }
          }
        } catch (KeeperException.NoNodeException nne) {
          // This is fine
        }
      }
    }
  }

  if (!nodes.isEmpty()) {
    for (String encodedRegionName : nodes) {
      processRegionInTransition(encodedRegionName, null, deadServers);
    }
  }
}
项目:pbase    文件:ServerManager.java   
public synchronized void expireServer(final ServerName serverName) {
  if (serverName.equals(master.getServerName())) {
    if (!(master.isAborted() || master.isStopped())) {
      master.stop("We lost our znode?");
    }
    return;
  }
  if (!services.isServerShutdownHandlerEnabled()) {
    LOG.info("Master doesn't enable ServerShutdownHandler during initialization, "
        + "delay expiring server " + serverName);
    this.queuedDeadServers.add(serverName);
    return;
  }
  if (this.deadservers.isDeadServer(serverName)) {
    // TODO: Can this happen?  It shouldn't be online in this case?
    LOG.warn("Expiration of " + serverName +
        " but server shutdown already in progress");
    return;
  }
  synchronized (onlineServers) {
    if (!this.onlineServers.containsKey(serverName)) {
      LOG.warn("Expiration of " + serverName + " but server not online");
    }
    // Remove the server from the known servers lists and update load info BUT
    // add to deadservers first; do this so it'll show in dead servers list if
    // not in online servers list.
    this.deadservers.add(serverName);
    this.onlineServers.remove(serverName);
    onlineServers.notifyAll();
  }
  this.rsAdmins.remove(serverName);
  // If cluster is going down, yes, servers are going to be expiring; don't
  // process as a dead server
  if (this.clusterShutdown) {
    LOG.info("Cluster shutdown set; " + serverName +
      " expired; onlineServers=" + this.onlineServers.size());
    if (this.onlineServers.isEmpty()) {
      master.stop("Cluster shutdown set; onlineServer=0");
    }
    return;
  }

  boolean carryingMeta = services.getAssignmentManager().isCarryingMeta(serverName);
  if (carryingMeta) {
    this.services.getExecutorService().submit(new MetaServerShutdownHandler(this.master,
      this.services, this.deadservers, serverName));
  } else {
    this.services.getExecutorService().submit(new ServerShutdownHandler(this.master,
      this.services, this.deadservers, serverName, true));
  }
  LOG.debug("Added=" + serverName +
    " to dead servers, submitted shutdown handler to be executed meta=" + carryingMeta);

  // Tell our listeners that a server was removed
  if (!this.listeners.isEmpty()) {
    for (ServerListener listener : this.listeners) {
      listener.serverRemoved(serverName);
    }
  }
}
项目:pbase    文件:TestAssignmentManager.java   
private void processServerShutdownHandler(AssignmentManager am, boolean splitRegion)
    throws IOException, ServiceException {
  // Make sure our new AM gets callbacks; once registered, can't unregister.
  // Thats ok because we make a new zk watcher for each test.
  this.watcher.registerListenerFirst(am);

  // Need to set up a fake scan of meta for the servershutdown handler
  // Make an RS Interface implementation.  Make it so a scanner can go against it.
  ClientProtos.ClientService.BlockingInterface implementation =
    Mockito.mock(ClientProtos.ClientService.BlockingInterface.class);
  // Get a meta row result that has region up on SERVERNAME_A

  Result r;
  if (splitRegion) {
    r = MetaMockingUtil.getMetaTableRowResultAsSplitRegion(REGIONINFO, SERVERNAME_A);
  } else {
    r = MetaMockingUtil.getMetaTableRowResult(REGIONINFO, SERVERNAME_A);
  }

  final ScanResponse.Builder builder = ScanResponse.newBuilder();
  builder.setMoreResults(true);
  builder.addCellsPerResult(r.size());
  final List<CellScannable> cellScannables = new ArrayList<CellScannable>(1);
  cellScannables.add(r);
  Mockito.when(implementation.scan(
    (RpcController)Mockito.any(), (ScanRequest)Mockito.any())).
    thenAnswer(new Answer<ScanResponse>() {
        @Override
        public ScanResponse answer(InvocationOnMock invocation) throws Throwable {
          PayloadCarryingRpcController controller = (PayloadCarryingRpcController) invocation
              .getArguments()[0];
          if (controller != null) {
            controller.setCellScanner(CellUtil.createCellScanner(cellScannables));
          }
          return builder.build();
        }
    });

  // Get a connection w/ mocked up common methods.
  ClusterConnection connection =
    HConnectionTestingUtility.getMockedConnectionAndDecorate(HTU.getConfiguration(),
      null, implementation, SERVERNAME_B, REGIONINFO);
  // These mocks were done up when all connections were managed.  World is different now we
  // moved to unmanaged connections.  It messes up the intercepts done in these tests.
  // Just mark connections as marked and then down in MetaTableAccessor, it will go the path
  // that picks up the above mocked up 'implementation' so 'scans' of meta return the expected
  // result.  Redo in new realm of unmanaged connections.
  Mockito.when(connection.isManaged()).thenReturn(true);
  try {
    // Make it so we can get a catalogtracker from servermanager.. .needed
    // down in guts of server shutdown handler.
    Mockito.when(this.server.getConnection()).thenReturn(connection);

    // Now make a server shutdown handler instance and invoke process.
    // Have it that SERVERNAME_A died.
    DeadServer deadServers = new DeadServer();
    deadServers.add(SERVERNAME_A);
    // I need a services instance that will return the AM
    MasterFileSystem fs = Mockito.mock(MasterFileSystem.class);
    Mockito.doNothing().when(fs).setLogRecoveryMode();
    Mockito.when(fs.getLogRecoveryMode()).thenReturn(RecoveryMode.LOG_REPLAY);
    MasterServices services = Mockito.mock(MasterServices.class);
    Mockito.when(services.getAssignmentManager()).thenReturn(am);
    Mockito.when(services.getServerManager()).thenReturn(this.serverManager);
    Mockito.when(services.getZooKeeper()).thenReturn(this.watcher);
    Mockito.when(services.getMasterFileSystem()).thenReturn(fs);
    Mockito.when(services.getConnection()).thenReturn(connection);
    ServerShutdownHandler handler = new ServerShutdownHandler(this.server,
        services, deadServers, SERVERNAME_A, false);
    am.failoverCleanupDone.set(true);
    handler.process();
    // The region in r will have been assigned.  It'll be up in zk as unassigned.
  } finally {
    if (connection != null) connection.close();
  }
}
项目:HIndex    文件:ServerManager.java   
public synchronized void expireServer(final ServerName serverName) {
  if (!services.isServerShutdownHandlerEnabled()) {
    LOG.info("Master doesn't enable ServerShutdownHandler during initialization, "
        + "delay expiring server " + serverName);
    this.queuedDeadServers.add(serverName);
    return;
  }
  if (this.deadservers.isDeadServer(serverName)) {
    // TODO: Can this happen?  It shouldn't be online in this case?
    LOG.warn("Expiration of " + serverName +
        " but server shutdown already in progress");
    return;
  }
  synchronized (onlineServers) {
    if (!this.onlineServers.containsKey(serverName)) {
      LOG.warn("Expiration of " + serverName + " but server not online");
    }
    // Remove the server from the known servers lists and update load info BUT
    // add to deadservers first; do this so it'll show in dead servers list if
    // not in online servers list.
    this.deadservers.add(serverName);
    this.onlineServers.remove(serverName);
    onlineServers.notifyAll();
  }
  this.rsAdmins.remove(serverName);
  // If cluster is going down, yes, servers are going to be expiring; don't
  // process as a dead server
  if (this.clusterShutdown) {
    LOG.info("Cluster shutdown set; " + serverName +
      " expired; onlineServers=" + this.onlineServers.size());
    if (this.onlineServers.isEmpty()) {
      master.stop("Cluster shutdown set; onlineServer=0");
    }
    return;
  }

  boolean carryingMeta = services.getAssignmentManager().isCarryingMeta(serverName);
  if (carryingMeta) {
    this.services.getExecutorService().submit(new MetaServerShutdownHandler(this.master,
      this.services, this.deadservers, serverName));
  } else {
    this.services.getExecutorService().submit(new ServerShutdownHandler(this.master,
      this.services, this.deadservers, serverName, true));
  }
  LOG.debug("Added=" + serverName +
    " to dead servers, submitted shutdown handler to be executed meta=" + carryingMeta);

  // Tell our listeners that a server was removed
  if (!this.listeners.isEmpty()) {
    for (ServerListener listener : this.listeners) {
      listener.serverRemoved(serverName);
    }
  }
}
项目:HIndex    文件:TestAssignmentManager.java   
private void processServerShutdownHandler(CatalogTracker ct, AssignmentManager am, boolean splitRegion)
    throws IOException, ServiceException {
  // Make sure our new AM gets callbacks; once registered, can't unregister.
  // Thats ok because we make a new zk watcher for each test.
  this.watcher.registerListenerFirst(am);

  // Need to set up a fake scan of meta for the servershutdown handler
  // Make an RS Interface implementation.  Make it so a scanner can go against it.
  ClientProtos.ClientService.BlockingInterface implementation =
    Mockito.mock(ClientProtos.ClientService.BlockingInterface.class);
  // Get a meta row result that has region up on SERVERNAME_A

  Result r;
  if (splitRegion) {
    r = MetaMockingUtil.getMetaTableRowResultAsSplitRegion(REGIONINFO, SERVERNAME_A);
  } else {
    r = MetaMockingUtil.getMetaTableRowResult(REGIONINFO, SERVERNAME_A);
  }

  final ScanResponse.Builder builder = ScanResponse.newBuilder();
  builder.setMoreResults(true);
  builder.addCellsPerResult(r.size());
  final List<CellScannable> cellScannables = new ArrayList<CellScannable>(1);
  cellScannables.add(r);
  Mockito.when(implementation.scan(
    (RpcController)Mockito.any(), (ScanRequest)Mockito.any())).
    thenAnswer(new Answer<ScanResponse>() {
        @Override
        public ScanResponse answer(InvocationOnMock invocation) throws Throwable {
          PayloadCarryingRpcController controller = (PayloadCarryingRpcController) invocation
              .getArguments()[0];
          if (controller != null) {
            controller.setCellScanner(CellUtil.createCellScanner(cellScannables));
          }
          return builder.build();
        }
    });

  // Get a connection w/ mocked up common methods.
  HConnection connection =
    HConnectionTestingUtility.getMockedConnectionAndDecorate(HTU.getConfiguration(),
      null, implementation, SERVERNAME_B, REGIONINFO);

  // Make it so we can get a catalogtracker from servermanager.. .needed
  // down in guts of server shutdown handler.
  Mockito.when(ct.getConnection()).thenReturn(connection);
  Mockito.when(this.server.getCatalogTracker()).thenReturn(ct);

  // Now make a server shutdown handler instance and invoke process.
  // Have it that SERVERNAME_A died.
  DeadServer deadServers = new DeadServer();
  deadServers.add(SERVERNAME_A);
  // I need a services instance that will return the AM
  MasterServices services = Mockito.mock(MasterServices.class);
  Mockito.when(services.getAssignmentManager()).thenReturn(am);
  Mockito.when(services.getServerManager()).thenReturn(this.serverManager);
  Mockito.when(services.getZooKeeper()).thenReturn(this.watcher);
  ServerShutdownHandler handler = new ServerShutdownHandler(this.server,
    services, deadServers, SERVERNAME_A, false);
  am.failoverCleanupDone.set(true);
  handler.process();
  // The region in r will have been assigned.  It'll be up in zk as unassigned.
}
项目:IRIndex    文件:ServerManager.java   
public synchronized void expireServer(final ServerName serverName) {
  boolean carryingRoot = services.getAssignmentManager().isCarryingRoot(serverName);
  if (!services.isServerShutdownHandlerEnabled() && (!carryingRoot || !this.isSSHForRootEnabled)) {
    LOG.info("Master doesn't enable ServerShutdownHandler during initialization, "
        + "delay expiring server " + serverName);
    this.deadNotExpiredServers.add(serverName);
    return;
  }
  if (!this.onlineServers.containsKey(serverName)) {
    LOG.warn("Received expiration of " + serverName +
      " but server is not currently online");
  }
  if (this.deadservers.contains(serverName)) {
    // TODO: Can this happen?  It shouldn't be online in this case?
    LOG.warn("Received expiration of " + serverName +
        " but server shutdown is already in progress");
    return;
  }
  // Remove the server from the known servers lists and update load info BUT
  // add to deadservers first; do this so it'll show in dead servers list if
  // not in online servers list.
  this.deadservers.add(serverName);
  this.onlineServers.remove(serverName);
  synchronized (onlineServers) {
    onlineServers.notifyAll();
  }
  this.serverConnections.remove(serverName);
  // If cluster is going down, yes, servers are going to be expiring; don't
  // process as a dead server
  if (this.clusterShutdown) {
    LOG.info("Cluster shutdown set; " + serverName +
      " expired; onlineServers=" + this.onlineServers.size());
    if (this.onlineServers.isEmpty()) {
      master.stop("Cluster shutdown set; onlineServer=0");
    }
    return;
  }

  boolean carryingMeta = services.getAssignmentManager().isCarryingMeta(serverName);
  if (carryingRoot || carryingMeta) {
    this.services.getExecutorService().submit(new MetaServerShutdownHandler(this.master,
      this.services, this.deadservers, serverName, carryingRoot, carryingMeta));
  } else {
    this.services.getExecutorService().submit(new ServerShutdownHandler(this.master,
      this.services, this.deadservers, serverName, true));
  }
  LOG.debug("Added=" + serverName +
    " to dead servers, submitted shutdown handler to be executed, root=" +
      carryingRoot + ", meta=" + carryingMeta);
}
项目:IRIndex    文件:AssignmentManager.java   
/**
 * Processes list of dead servers from result of META scan and regions in RIT
 * <p>
 * This is used for failover to recover the lost regions that belonged to
 * RegionServers which failed while there was no active master or regions 
 * that were in RIT.
 * <p>
 * 
 * @param deadServers
 *          The list of dead servers which failed while there was no active
 *          master. Can be null.
 * @param nodes
 *          The regions in RIT
 * @throws IOException
 * @throws KeeperException
 */
private void processDeadServersAndRecoverLostRegions(
    Map<ServerName, List<Pair<HRegionInfo, Result>>> deadServers,
    List<String> nodes) throws IOException, KeeperException {
  if (null != deadServers) {
    Set<ServerName> actualDeadServers = this.serverManager.getDeadServers();
    for (Map.Entry<ServerName, List<Pair<HRegionInfo, Result>>> deadServer : 
      deadServers.entrySet()) {
      // skip regions of dead servers because SSH will process regions during rs expiration.
      // see HBASE-5916
      if (actualDeadServers.contains(deadServer.getKey())) {
        for (Pair<HRegionInfo, Result> deadRegion : deadServer.getValue()) {
          HRegionInfo hri = deadRegion.getFirst();
          // Delete znode of region in transition if table is disabled or disabling. If a region
          // server went down during master initialization then SSH cannot handle the regions of
          // partially disabled tables because in memory region state information may not be
          // available with master.
          deleteNodeAndOfflineRegion(hri);
          nodes.remove(deadRegion.getFirst().getEncodedName());
        }
        continue;
      }
      List<Pair<HRegionInfo, Result>> regions = deadServer.getValue();
      for (Pair<HRegionInfo, Result> region : regions) {
        HRegionInfo regionInfo = region.getFirst();
        Result result = region.getSecond();
        // If region was in transition (was in zk) force it offline for
        // reassign
        try {
          RegionTransitionData data = ZKAssign.getData(watcher,
              regionInfo.getEncodedName());

          // If zk node of this region has been updated by a live server,
          // we consider that this region is being handled.
          // So we should skip it and process it in
          // processRegionsInTransition.
          if (data != null && data.getOrigin() != null && 
              serverManager.isServerOnline(data.getOrigin())) {
            LOG.info("The region " + regionInfo.getEncodedName()
                + "is being handled on " + data.getOrigin());
            continue;
          }
          // Process with existing RS shutdown code
          boolean assign = ServerShutdownHandler.processDeadRegion(
              regionInfo, result, this, this.catalogTracker);
          if (assign) {
            ZKAssign.createOrForceNodeOffline(watcher, regionInfo,
                master.getServerName());
            if (!nodes.contains(regionInfo.getEncodedName())) {
              nodes.add(regionInfo.getEncodedName());
            }
          }
        } catch (KeeperException.NoNodeException nne) {
          // This is fine
        }
      }
    }
  }

  if (!nodes.isEmpty()) {
    for (String encodedRegionName : nodes) {
      processRegionInTransition(encodedRegionName, null, deadServers);
    }
  }
}
项目:RStore    文件:ServerManager.java   
public synchronized void expireServer(final ServerName serverName) {
  if (!this.onlineServers.containsKey(serverName)) {
    LOG.warn("Received expiration of " + serverName +
      " but server is not currently online");
    return;
  }
  if (this.deadservers.contains(serverName)) {
    // TODO: Can this happen?  It shouldn't be online in this case?
    LOG.warn("Received expiration of " + serverName +
        " but server shutdown is already in progress");
    return;
  }
  // Remove the server from the known servers lists and update load info BUT
  // add to deadservers first; do this so it'll show in dead servers list if
  // not in online servers list.
  this.deadservers.add(serverName);
  this.onlineServers.remove(serverName);
  this.serverConnections.remove(serverName);
  // If cluster is going down, yes, servers are going to be expiring; don't
  // process as a dead server
  if (this.clusterShutdown) {
    LOG.info("Cluster shutdown set; " + serverName +
      " expired; onlineServers=" + this.onlineServers.size());
    if (this.onlineServers.isEmpty()) {
      master.stop("Cluster shutdown set; onlineServer=0");
    }
    return;
  }

  boolean carryingRoot = services.getAssignmentManager().isCarryingRoot(serverName);
  boolean carryingMeta = services.getAssignmentManager().isCarryingMeta(serverName);
  if (carryingRoot || carryingMeta) {
    this.services.getExecutorService().submit(new MetaServerShutdownHandler(this.master,
      this.services, this.deadservers, serverName, carryingRoot, carryingMeta));
  } else {
    this.services.getExecutorService().submit(new ServerShutdownHandler(this.master,
      this.services, this.deadservers, serverName, true));
  }
  LOG.debug("Added=" + serverName +
    " to dead servers, submitted shutdown handler to be executed, root=" +
      carryingRoot + ", meta=" + carryingMeta);
}
项目:RStore    文件:AssignmentManager.java   
/**
 * Processes list of dead servers from result of META scan and regions in RIT
 * <p>
 * This is used for failover to recover the lost regions that belonged to
 * RegionServers which failed while there was no active master or regions 
 * that were in RIT.
 * <p>
 * 
 * @param deadServers
 *          The list of dead servers which failed while there was no active
 *          master. Can be null.
 * @param nodes
 *          The regions in RIT
 * @throws IOException
 * @throws KeeperException
 */
private void processDeadServersAndRecoverLostRegions(
    Map<ServerName, List<Pair<HRegionInfo, Result>>> deadServers,
    List<String> nodes) throws IOException, KeeperException {
  if (null != deadServers) {
    for (Map.Entry<ServerName, List<Pair<HRegionInfo, Result>>> deadServer : 
      deadServers.entrySet()) {
      List<Pair<HRegionInfo, Result>> regions = deadServer.getValue();
      for (Pair<HRegionInfo, Result> region : regions) {
        HRegionInfo regionInfo = region.getFirst();
        Result result = region.getSecond();
        // If region was in transition (was in zk) force it offline for
        // reassign
        try {
          RegionTransitionData data = ZKAssign.getData(watcher,
              regionInfo.getEncodedName());

          // If zk node of this region has been updated by a live server,
          // we consider that this region is being handled.
          // So we should skip it and process it in
          // processRegionsInTransition.
          if (data != null && data.getOrigin() != null && 
              serverManager.isServerOnline(data.getOrigin())) {
            LOG.info("The region " + regionInfo.getEncodedName()
                + "is being handled on " + data.getOrigin());
            continue;
          }
          // Process with existing RS shutdown code
          boolean assign = ServerShutdownHandler.processDeadRegion(
              regionInfo, result, this, this.catalogTracker);
          if (assign) {
            ZKAssign.createOrForceNodeOffline(watcher, regionInfo,
                master.getServerName());
            if (!nodes.contains(regionInfo.getEncodedName())) {
              nodes.add(regionInfo.getEncodedName());
            }
          }
        } catch (KeeperException.NoNodeException nne) {
          // This is fine
        }
      }
    }
  }

  if (!nodes.isEmpty()) {
    for (String encodedRegionName : nodes) {
      processRegionInTransition(encodedRegionName, null, deadServers);
    }
  }
}
项目:PyroDB    文件:ServerManager.java   
public synchronized void expireServer(final ServerName serverName) {
  if (serverName.equals(master.getServerName())) {
    if (!(master.isAborted() || master.isStopped())) {
      master.stop("We lost our znode?");
    }
    return;
  }
  if (!services.isServerShutdownHandlerEnabled()) {
    LOG.info("Master doesn't enable ServerShutdownHandler during initialization, "
        + "delay expiring server " + serverName);
    this.queuedDeadServers.add(serverName);
    return;
  }
  if (this.deadservers.isDeadServer(serverName)) {
    // TODO: Can this happen?  It shouldn't be online in this case?
    LOG.warn("Expiration of " + serverName +
        " but server shutdown already in progress");
    return;
  }
  synchronized (onlineServers) {
    if (!this.onlineServers.containsKey(serverName)) {
      LOG.warn("Expiration of " + serverName + " but server not online");
    }
    // Remove the server from the known servers lists and update load info BUT
    // add to deadservers first; do this so it'll show in dead servers list if
    // not in online servers list.
    this.deadservers.add(serverName);
    this.onlineServers.remove(serverName);
    onlineServers.notifyAll();
  }
  this.rsAdmins.remove(serverName);
  // If cluster is going down, yes, servers are going to be expiring; don't
  // process as a dead server
  if (this.clusterShutdown) {
    LOG.info("Cluster shutdown set; " + serverName +
      " expired; onlineServers=" + this.onlineServers.size());
    if (this.onlineServers.isEmpty()) {
      master.stop("Cluster shutdown set; onlineServer=0");
    }
    return;
  }

  boolean carryingMeta = services.getAssignmentManager().isCarryingMeta(serverName);
  if (carryingMeta) {
    this.services.getExecutorService().submit(new MetaServerShutdownHandler(this.master,
      this.services, this.deadservers, serverName));
  } else {
    this.services.getExecutorService().submit(new ServerShutdownHandler(this.master,
      this.services, this.deadservers, serverName, true));
  }
  LOG.debug("Added=" + serverName +
    " to dead servers, submitted shutdown handler to be executed meta=" + carryingMeta);
}
项目:PyroDB    文件:TestAssignmentManager.java   
private void processServerShutdownHandler(CatalogTracker ct, AssignmentManager am, boolean splitRegion)
    throws IOException, ServiceException {
  // Make sure our new AM gets callbacks; once registered, can't unregister.
  // Thats ok because we make a new zk watcher for each test.
  this.watcher.registerListenerFirst(am);

  // Need to set up a fake scan of meta for the servershutdown handler
  // Make an RS Interface implementation.  Make it so a scanner can go against it.
  ClientProtos.ClientService.BlockingInterface implementation =
    Mockito.mock(ClientProtos.ClientService.BlockingInterface.class);
  // Get a meta row result that has region up on SERVERNAME_A

  Result r;
  if (splitRegion) {
    r = MetaMockingUtil.getMetaTableRowResultAsSplitRegion(REGIONINFO, SERVERNAME_A);
  } else {
    r = MetaMockingUtil.getMetaTableRowResult(REGIONINFO, SERVERNAME_A);
  }

  final ScanResponse.Builder builder = ScanResponse.newBuilder();
  builder.setMoreResults(true);
  builder.addCellsPerResult(r.size());
  final List<CellScannable> cellScannables = new ArrayList<CellScannable>(1);
  cellScannables.add(r);
  Mockito.when(implementation.scan(
    (RpcController)Mockito.any(), (ScanRequest)Mockito.any())).
    thenAnswer(new Answer<ScanResponse>() {
        @Override
        public ScanResponse answer(InvocationOnMock invocation) throws Throwable {
          PayloadCarryingRpcController controller = (PayloadCarryingRpcController) invocation
              .getArguments()[0];
          if (controller != null) {
            controller.setCellScanner(CellUtil.createCellScanner(cellScannables));
          }
          return builder.build();
        }
    });

  // Get a connection w/ mocked up common methods.
  HConnection connection =
    HConnectionTestingUtility.getMockedConnectionAndDecorate(HTU.getConfiguration(),
      null, implementation, SERVERNAME_B, REGIONINFO);

  // Make it so we can get a catalogtracker from servermanager.. .needed
  // down in guts of server shutdown handler.
  Mockito.when(ct.getConnection()).thenReturn(connection);
  Mockito.when(this.server.getCatalogTracker()).thenReturn(ct);

  // Now make a server shutdown handler instance and invoke process.
  // Have it that SERVERNAME_A died.
  DeadServer deadServers = new DeadServer();
  deadServers.add(SERVERNAME_A);
  // I need a services instance that will return the AM
  MasterServices services = Mockito.mock(MasterServices.class);
  Mockito.when(services.getAssignmentManager()).thenReturn(am);
  Mockito.when(services.getServerManager()).thenReturn(this.serverManager);
  Mockito.when(services.getZooKeeper()).thenReturn(this.watcher);
  ServerShutdownHandler handler = new ServerShutdownHandler(this.server,
    services, deadServers, SERVERNAME_A, false);
  am.failoverCleanupDone.set(true);
  handler.process();
  // The region in r will have been assigned.  It'll be up in zk as unassigned.
}
项目:c5    文件:ServerManager.java   
public synchronized void expireServer(final ServerName serverName) {
  if (!services.isServerShutdownHandlerEnabled()) {
    LOG.info("Master doesn't enable ServerShutdownHandler during initialization, "
        + "delay expiring server " + serverName);
    this.queuedDeadServers.add(serverName);
    return;
  }
  if (!this.onlineServers.containsKey(serverName)) {
    LOG.warn("Expiration of " + serverName +
      " but server not online");
  }
  if (this.deadservers.isDeadServer(serverName)) {
    // TODO: Can this happen?  It shouldn't be online in this case?
    LOG.warn("Expiration of " + serverName +
        " but server shutdown already in progress");
    return;
  }
  // Remove the server from the known servers lists and update load info BUT
  // add to deadservers first; do this so it'll show in dead servers list if
  // not in online servers list.
  this.deadservers.add(serverName);
  this.onlineServers.remove(serverName);
  synchronized (onlineServers) {
    onlineServers.notifyAll();
  }
  this.rsAdmins.remove(serverName);
  // If cluster is going down, yes, servers are going to be expiring; don't
  // process as a dead server
  if (this.clusterShutdown) {
    LOG.info("Cluster shutdown set; " + serverName +
      " expired; onlineServers=" + this.onlineServers.size());
    if (this.onlineServers.isEmpty()) {
      master.stop("Cluster shutdown set; onlineServer=0");
    }
    return;
  }

  boolean carryingMeta = services.getAssignmentManager().isCarryingMeta(serverName);
  if (carryingMeta) {
    this.services.getExecutorService().submit(new MetaServerShutdownHandler(this.master,
      this.services, this.deadservers, serverName));
  } else {
    this.services.getExecutorService().submit(new ServerShutdownHandler(this.master,
      this.services, this.deadservers, serverName, true));
  }
  LOG.debug("Added=" + serverName +
    " to dead servers, submitted shutdown handler to be executed meta=" + carryingMeta);
}
项目:c5    文件:TestAssignmentManager.java   
private void processServerShutdownHandler(CatalogTracker ct, AssignmentManager am, boolean splitRegion)
    throws IOException, ServiceException {
  // Make sure our new AM gets callbacks; once registered, can't unregister.
  // Thats ok because we make a new zk watcher for each test.
  this.watcher.registerListenerFirst(am);

  // Need to set up a fake scan of meta for the servershutdown handler
  // Make an RS Interface implementation.  Make it so a scanner can go against it.
  ClientProtos.ClientService.BlockingInterface implementation =
    Mockito.mock(ClientProtos.ClientService.BlockingInterface.class);
  // Get a meta row result that has region up on SERVERNAME_A

  Result r;
  if (splitRegion) {
    r = MetaMockingUtil.getMetaTableRowResultAsSplitRegion(REGIONINFO, SERVERNAME_A);
  } else {
    r = MetaMockingUtil.getMetaTableRowResult(REGIONINFO, SERVERNAME_A);
  }

  final ScanResponse.Builder builder = ScanResponse.newBuilder();
  builder.setMoreResults(true);
  builder.addCellsPerResult(r.size());
  final List<CellScannable> cellScannables = new ArrayList<CellScannable>(1);
  cellScannables.add(r);
  Mockito.when(implementation.scan(
    (RpcController)Mockito.any(), (ScanRequest)Mockito.any())).
    thenAnswer(new Answer<ScanResponse>() {
        public ScanResponse answer(InvocationOnMock invocation) throws Throwable {
          PayloadCarryingRpcController controller = (PayloadCarryingRpcController) invocation
              .getArguments()[0];
          if (controller != null) {
            controller.setCellScanner(CellUtil.createCellScanner(cellScannables));
          }
          return builder.build();
        }
    });

  // Get a connection w/ mocked up common methods.
  HConnection connection =
    HConnectionTestingUtility.getMockedConnectionAndDecorate(HTU.getConfiguration(),
      null, implementation, SERVERNAME_B, REGIONINFO);

  // Make it so we can get a catalogtracker from servermanager.. .needed
  // down in guts of server shutdown handler.
  Mockito.when(ct.getConnection()).thenReturn(connection);
  Mockito.when(this.server.getCatalogTracker()).thenReturn(ct);

  // Now make a server shutdown handler instance and invoke process.
  // Have it that SERVERNAME_A died.
  DeadServer deadServers = new DeadServer();
  deadServers.add(SERVERNAME_A);
  // I need a services instance that will return the AM
  MasterServices services = Mockito.mock(MasterServices.class);
  Mockito.when(services.getAssignmentManager()).thenReturn(am);
  Mockito.when(services.getServerManager()).thenReturn(this.serverManager);
  Mockito.when(services.getZooKeeper()).thenReturn(this.watcher);
  ServerShutdownHandler handler = new ServerShutdownHandler(this.server,
    services, deadServers, SERVERNAME_A, false);
  am.failoverCleanupDone.set(true);
  handler.process();
  // The region in r will have been assigned.  It'll be up in zk as unassigned.
}
项目:HBase-Research    文件:ServerManager.java   
public synchronized void expireServer(final ServerName serverName) {
  boolean carryingRoot = services.getAssignmentManager().isCarryingRoot(serverName);
  if (!services.isServerShutdownHandlerEnabled() && (!carryingRoot || !this.isSSHForRootEnabled)) {
    LOG.info("Master doesn't enable ServerShutdownHandler during initialization, "
        + "delay expiring server " + serverName);
    this.deadNotExpiredServers.add(serverName);
    return;
  }
  if (!this.onlineServers.containsKey(serverName)) {
    LOG.warn("Received expiration of " + serverName +
      " but server is not currently online");
  }
  if (this.deadservers.contains(serverName)) {
    // TODO: Can this happen?  It shouldn't be online in this case?
    LOG.warn("Received expiration of " + serverName +
        " but server shutdown is already in progress");
    return;
  }
  // Remove the server from the known servers lists and update load info BUT
  // add to deadservers first; do this so it'll show in dead servers list if
  // not in online servers list.
  this.deadservers.add(serverName);
  this.onlineServers.remove(serverName);
  synchronized (onlineServers) {
    onlineServers.notifyAll();
  }
  this.serverConnections.remove(serverName);
  // If cluster is going down, yes, servers are going to be expiring; don't
  // process as a dead server
  if (this.clusterShutdown) {
    LOG.info("Cluster shutdown set; " + serverName +
      " expired; onlineServers=" + this.onlineServers.size());
    if (this.onlineServers.isEmpty()) {
      master.stop("Cluster shutdown set; onlineServer=0");
    }
    return;
  }

  boolean carryingMeta = services.getAssignmentManager().isCarryingMeta(serverName);
  if (carryingRoot || carryingMeta) {
    this.services.getExecutorService().submit(new MetaServerShutdownHandler(this.master,
      this.services, this.deadservers, serverName, carryingRoot, carryingMeta));
  } else {
    this.services.getExecutorService().submit(new ServerShutdownHandler(this.master,
      this.services, this.deadservers, serverName, true));
  }
  LOG.debug("Added=" + serverName +
    " to dead servers, submitted shutdown handler to be executed, root=" +
      carryingRoot + ", meta=" + carryingMeta);
}
项目:HBase-Research    文件:AssignmentManager.java   
/**
 * Processes list of dead servers from result of META scan and regions in RIT
 * <p>
 * This is used for failover to recover the lost regions that belonged to
 * RegionServers which failed while there was no active master or regions 
 * that were in RIT.
 * <p>
 * 
 * @param deadServers
 *          The list of dead servers which failed while there was no active
 *          master. Can be null.
 * @param nodes
 *          The regions in RIT
 * @throws IOException
 * @throws KeeperException
 */
private void processDeadServersAndRecoverLostRegions(
    Map<ServerName, List<Pair<HRegionInfo, Result>>> deadServers,
    List<String> nodes) throws IOException, KeeperException {
  if (null != deadServers) {
    Set<ServerName> actualDeadServers = this.serverManager.getDeadServers();
    for (Map.Entry<ServerName, List<Pair<HRegionInfo, Result>>> deadServer : 
      deadServers.entrySet()) {
      // skip regions of dead servers because SSH will process regions during rs expiration.
      // see HBASE-5916
      if (actualDeadServers.contains(deadServer.getKey())) {
        for (Pair<HRegionInfo, Result> deadRegion : deadServer.getValue()) {
          HRegionInfo hri = deadRegion.getFirst();
          // Delete znode of region in transition if table is disabled or disabling. If a region
          // server went down during master initialization then SSH cannot handle the regions of
          // partially disabled tables because in memory region state information may not be
          // available with master.
          deleteNodeAndOfflineRegion(hri);
          nodes.remove(deadRegion.getFirst().getEncodedName());
        }
        continue;
      }
      List<Pair<HRegionInfo, Result>> regions = deadServer.getValue();
      for (Pair<HRegionInfo, Result> region : regions) {
        HRegionInfo regionInfo = region.getFirst();
        Result result = region.getSecond();
        // If region was in transition (was in zk) force it offline for
        // reassign
        try {
          RegionTransitionData data = ZKAssign.getData(watcher,
              regionInfo.getEncodedName());

          // If zk node of this region has been updated by a live server,
          // we consider that this region is being handled.
          // So we should skip it and process it in
          // processRegionsInTransition.
          if (data != null && data.getOrigin() != null && 
              serverManager.isServerOnline(data.getOrigin())) {
            LOG.info("The region " + regionInfo.getEncodedName()
                + "is being handled on " + data.getOrigin());
            continue;
          }
          // Process with existing RS shutdown code
          boolean assign = ServerShutdownHandler.processDeadRegion(
              regionInfo, result, this, this.catalogTracker);
          if (assign) {
            ZKAssign.createOrForceNodeOffline(watcher, regionInfo,
                master.getServerName());
            if (!nodes.contains(regionInfo.getEncodedName())) {
              nodes.add(regionInfo.getEncodedName());
            }
          }
        } catch (KeeperException.NoNodeException nne) {
          // This is fine
        }
      }
    }
  }

  if (!nodes.isEmpty()) {
    for (String encodedRegionName : nodes) {
      processRegionInTransition(encodedRegionName, null, deadServers);
    }
  }
}
项目:hbase-0.94.8-qod    文件:ServerManager.java   
public synchronized void expireServer(final ServerName serverName) {
  boolean carryingRoot = services.getAssignmentManager().isCarryingRoot(serverName);
  if (!services.isServerShutdownHandlerEnabled() && (!carryingRoot || !this.isSSHForRootEnabled)) {
    LOG.info("Master doesn't enable ServerShutdownHandler during initialization, "
        + "delay expiring server " + serverName);
    this.deadNotExpiredServers.add(serverName);
    return;
  }
  if (!this.onlineServers.containsKey(serverName)) {
    LOG.warn("Received expiration of " + serverName +
      " but server is not currently online");
  }
  if (this.deadservers.contains(serverName)) {
    // TODO: Can this happen?  It shouldn't be online in this case?
    LOG.warn("Received expiration of " + serverName +
        " but server shutdown is already in progress");
    return;
  }
  // Remove the server from the known servers lists and update load info BUT
  // add to deadservers first; do this so it'll show in dead servers list if
  // not in online servers list.
  this.deadservers.add(serverName);
  this.onlineServers.remove(serverName);
  synchronized (onlineServers) {
    onlineServers.notifyAll();
  }
  this.serverConnections.remove(serverName);
  // If cluster is going down, yes, servers are going to be expiring; don't
  // process as a dead server
  if (this.clusterShutdown) {
    LOG.info("Cluster shutdown set; " + serverName +
      " expired; onlineServers=" + this.onlineServers.size());
    if (this.onlineServers.isEmpty()) {
      master.stop("Cluster shutdown set; onlineServer=0");
    }
    return;
  }

  boolean carryingMeta = services.getAssignmentManager().isCarryingMeta(serverName);
  if (carryingRoot || carryingMeta) {
    this.services.getExecutorService().submit(new MetaServerShutdownHandler(this.master,
      this.services, this.deadservers, serverName, carryingRoot, carryingMeta));
  } else {
    this.services.getExecutorService().submit(new ServerShutdownHandler(this.master,
      this.services, this.deadservers, serverName, true));
  }
  LOG.debug("Added=" + serverName +
    " to dead servers, submitted shutdown handler to be executed, root=" +
      carryingRoot + ", meta=" + carryingMeta);
}
项目:hbase-0.94.8-qod    文件:AssignmentManager.java   
/**
 * Processes list of dead servers from result of META scan and regions in RIT
 * <p>
 * This is used for failover to recover the lost regions that belonged to
 * RegionServers which failed while there was no active master or regions 
 * that were in RIT.
 * <p>
 * 
 * @param deadServers
 *          The list of dead servers which failed while there was no active
 *          master. Can be null.
 * @param nodes
 *          The regions in RIT
 * @throws IOException
 * @throws KeeperException
 */
private void processDeadServersAndRecoverLostRegions(
    Map<ServerName, List<Pair<HRegionInfo, Result>>> deadServers,
    List<String> nodes) throws IOException, KeeperException {
  if (null != deadServers) {
    Set<ServerName> actualDeadServers = this.serverManager.getDeadServers();
    for (Map.Entry<ServerName, List<Pair<HRegionInfo, Result>>> deadServer : 
      deadServers.entrySet()) {
      // skip regions of dead servers because SSH will process regions during rs expiration.
      // see HBASE-5916
      if (actualDeadServers.contains(deadServer.getKey())) {
        for (Pair<HRegionInfo, Result> deadRegion : deadServer.getValue()) {
          HRegionInfo hri = deadRegion.getFirst();
          // Delete znode of region in transition if table is disabled or disabling. If a region
          // server went down during master initialization then SSH cannot handle the regions of
          // partially disabled tables because in memory region state information may not be
          // available with master.
          deleteNodeAndOfflineRegion(hri);
          nodes.remove(deadRegion.getFirst().getEncodedName());
        }
        continue;
      }
      List<Pair<HRegionInfo, Result>> regions = deadServer.getValue();
      for (Pair<HRegionInfo, Result> region : regions) {
        HRegionInfo regionInfo = region.getFirst();
        Result result = region.getSecond();
        // If region was in transition (was in zk) force it offline for
        // reassign
        try {
          RegionTransitionData data = ZKAssign.getData(watcher,
              regionInfo.getEncodedName());

          // If zk node of this region has been updated by a live server,
          // we consider that this region is being handled.
          // So we should skip it and process it in
          // processRegionsInTransition.
          if (data != null && data.getOrigin() != null && 
              serverManager.isServerOnline(data.getOrigin())) {
            LOG.info("The region " + regionInfo.getEncodedName()
                + "is being handled on " + data.getOrigin());
            continue;
          }
          // Process with existing RS shutdown code
          boolean assign = ServerShutdownHandler.processDeadRegion(
              regionInfo, result, this, this.catalogTracker);
          if (assign) {
            ZKAssign.createOrForceNodeOffline(watcher, regionInfo,
                master.getServerName());
            if (!nodes.contains(regionInfo.getEncodedName())) {
              nodes.add(regionInfo.getEncodedName());
            }
          }
        } catch (KeeperException.NoNodeException nne) {
          // This is fine
        }
      }
    }
  }

  if (!nodes.isEmpty()) {
    for (String encodedRegionName : nodes) {
      processRegionInTransition(encodedRegionName, null, deadServers);
    }
  }
}
项目:hbase-0.94.8-qod    文件:ServerManager.java   
public synchronized void expireServer(final ServerName serverName) {
  boolean carryingRoot = services.getAssignmentManager().isCarryingRoot(serverName);
  if (!services.isServerShutdownHandlerEnabled() && (!carryingRoot || !this.isSSHForRootEnabled)) {
    LOG.info("Master doesn't enable ServerShutdownHandler during initialization, "
        + "delay expiring server " + serverName);
    this.deadNotExpiredServers.add(serverName);
    return;
  }
  if (!this.onlineServers.containsKey(serverName)) {
    LOG.warn("Received expiration of " + serverName +
      " but server is not currently online");
  }
  if (this.deadservers.contains(serverName)) {
    // TODO: Can this happen?  It shouldn't be online in this case?
    LOG.warn("Received expiration of " + serverName +
        " but server shutdown is already in progress");
    return;
  }
  // Remove the server from the known servers lists and update load info BUT
  // add to deadservers first; do this so it'll show in dead servers list if
  // not in online servers list.
  this.deadservers.add(serverName);
  this.onlineServers.remove(serverName);
  synchronized (onlineServers) {
    onlineServers.notifyAll();
  }
  this.serverConnections.remove(serverName);
  // If cluster is going down, yes, servers are going to be expiring; don't
  // process as a dead server
  if (this.clusterShutdown) {
    LOG.info("Cluster shutdown set; " + serverName +
      " expired; onlineServers=" + this.onlineServers.size());
    if (this.onlineServers.isEmpty()) {
      master.stop("Cluster shutdown set; onlineServer=0");
    }
    return;
  }

  boolean carryingMeta = services.getAssignmentManager().isCarryingMeta(serverName);
  if (carryingRoot || carryingMeta) {
    this.services.getExecutorService().submit(new MetaServerShutdownHandler(this.master,
      this.services, this.deadservers, serverName, carryingRoot, carryingMeta));
  } else {
    this.services.getExecutorService().submit(new ServerShutdownHandler(this.master,
      this.services, this.deadservers, serverName, true));
  }
  LOG.debug("Added=" + serverName +
    " to dead servers, submitted shutdown handler to be executed, root=" +
      carryingRoot + ", meta=" + carryingMeta);
}
项目:hbase-0.94.8-qod    文件:AssignmentManager.java   
/**
 * Processes list of dead servers from result of META scan and regions in RIT
 * <p>
 * This is used for failover to recover the lost regions that belonged to
 * RegionServers which failed while there was no active master or regions 
 * that were in RIT.
 * <p>
 * 
 * @param deadServers
 *          The list of dead servers which failed while there was no active
 *          master. Can be null.
 * @param nodes
 *          The regions in RIT
 * @throws IOException
 * @throws KeeperException
 */
private void processDeadServersAndRecoverLostRegions(
    Map<ServerName, List<Pair<HRegionInfo, Result>>> deadServers,
    List<String> nodes) throws IOException, KeeperException {
  if (null != deadServers) {
    Set<ServerName> actualDeadServers = this.serverManager.getDeadServers();
    for (Map.Entry<ServerName, List<Pair<HRegionInfo, Result>>> deadServer : 
      deadServers.entrySet()) {
      // skip regions of dead servers because SSH will process regions during rs expiration.
      // see HBASE-5916
      if (actualDeadServers.contains(deadServer.getKey())) {
        for (Pair<HRegionInfo, Result> deadRegion : deadServer.getValue()) {
          HRegionInfo hri = deadRegion.getFirst();
          // Delete znode of region in transition if table is disabled or disabling. If a region
          // server went down during master initialization then SSH cannot handle the regions of
          // partially disabled tables because in memory region state information may not be
          // available with master.
          deleteNodeAndOfflineRegion(hri);
          nodes.remove(deadRegion.getFirst().getEncodedName());
        }
        continue;
      }
      List<Pair<HRegionInfo, Result>> regions = deadServer.getValue();
      for (Pair<HRegionInfo, Result> region : regions) {
        HRegionInfo regionInfo = region.getFirst();
        Result result = region.getSecond();
        // If region was in transition (was in zk) force it offline for
        // reassign
        try {
          RegionTransitionData data = ZKAssign.getData(watcher,
              regionInfo.getEncodedName());

          // If zk node of this region has been updated by a live server,
          // we consider that this region is being handled.
          // So we should skip it and process it in
          // processRegionsInTransition.
          if (data != null && data.getOrigin() != null && 
              serverManager.isServerOnline(data.getOrigin())) {
            LOG.info("The region " + regionInfo.getEncodedName()
                + "is being handled on " + data.getOrigin());
            continue;
          }
          // Process with existing RS shutdown code
          boolean assign = ServerShutdownHandler.processDeadRegion(
              regionInfo, result, this, this.catalogTracker);
          if (assign) {
            ZKAssign.createOrForceNodeOffline(watcher, regionInfo,
                master.getServerName());
            if (!nodes.contains(regionInfo.getEncodedName())) {
              nodes.add(regionInfo.getEncodedName());
            }
          }
        } catch (KeeperException.NoNodeException nne) {
          // This is fine
        }
      }
    }
  }

  if (!nodes.isEmpty()) {
    for (String encodedRegionName : nodes) {
      processRegionInTransition(encodedRegionName, null, deadServers);
    }
  }
}
项目:DominoHBase    文件:ServerManager.java   
public synchronized void expireServer(final ServerName serverName) {
  if (!services.isServerShutdownHandlerEnabled()) {
    LOG.info("Master doesn't enable ServerShutdownHandler during initialization, "
        + "delay expiring server " + serverName);
    this.queuedDeadServers.add(serverName);
    return;
  }
  if (!this.onlineServers.containsKey(serverName)) {
    LOG.warn("Received expiration of " + serverName +
      " but server is not currently online");
  }
  if (this.deadservers.contains(serverName)) {
    // TODO: Can this happen?  It shouldn't be online in this case?
    LOG.warn("Received expiration of " + serverName +
        " but server shutdown is already in progress");
    return;
  }
  // Remove the server from the known servers lists and update load info BUT
  // add to deadservers first; do this so it'll show in dead servers list if
  // not in online servers list.
  this.deadservers.add(serverName);
  this.onlineServers.remove(serverName);
  synchronized (onlineServers) {
    onlineServers.notifyAll();
  }
  this.serverConnections.remove(serverName);
  // If cluster is going down, yes, servers are going to be expiring; don't
  // process as a dead server
  if (this.clusterShutdown) {
    LOG.info("Cluster shutdown set; " + serverName +
      " expired; onlineServers=" + this.onlineServers.size());
    if (this.onlineServers.isEmpty()) {
      master.stop("Cluster shutdown set; onlineServer=0");
    }
    return;
  }

  boolean carryingRoot = services.getAssignmentManager().isCarryingRoot(serverName);
  boolean carryingMeta = services.getAssignmentManager().isCarryingMeta(serverName);
  if (carryingRoot || carryingMeta) {
    this.services.getExecutorService().submit(new MetaServerShutdownHandler(this.master,
      this.services, this.deadservers, serverName, carryingRoot, carryingMeta));
  } else {
    this.services.getExecutorService().submit(new ServerShutdownHandler(this.master,
      this.services, this.deadservers, serverName, true));
  }
  LOG.debug("Added=" + serverName +
    " to dead servers, submitted shutdown handler to be executed, root=" +
      carryingRoot + ", meta=" + carryingMeta);
}
项目:DominoHBase    文件:TestAssignmentManager.java   
private void processServerShutdownHandler(CatalogTracker ct, AssignmentManager am, boolean splitRegion)
    throws IOException, ServiceException {
  // Make sure our new AM gets callbacks; once registered, can't unregister.
  // Thats ok because we make a new zk watcher for each test.
  this.watcher.registerListenerFirst(am);

  // Need to set up a fake scan of meta for the servershutdown handler
  // Make an RS Interface implementation.  Make it so a scanner can go against it.
  ClientProtocol implementation = Mockito.mock(ClientProtocol.class);
  // Get a meta row result that has region up on SERVERNAME_A

  Result r = null;
  if (splitRegion) {
    r = MetaMockingUtil.getMetaTableRowResultAsSplitRegion(REGIONINFO, SERVERNAME_A);
  } else {
    r = MetaMockingUtil.getMetaTableRowResult(REGIONINFO, SERVERNAME_A);
  }

  ScanResponse.Builder builder = ScanResponse.newBuilder();
  builder.setMoreResults(true);
  builder.addResult(ProtobufUtil.toResult(r));
  Mockito.when(implementation.scan(
    (RpcController)Mockito.any(), (ScanRequest)Mockito.any())).
      thenReturn(builder.build());

  // Get a connection w/ mocked up common methods.
  HConnection connection =
    HConnectionTestingUtility.getMockedConnectionAndDecorate(HTU.getConfiguration(),
      null, implementation, SERVERNAME_B, REGIONINFO);

  // Make it so we can get a catalogtracker from servermanager.. .needed
  // down in guts of server shutdown handler.
  Mockito.when(ct.getConnection()).thenReturn(connection);
  Mockito.when(this.server.getCatalogTracker()).thenReturn(ct);

  // Now make a server shutdown handler instance and invoke process.
  // Have it that SERVERNAME_A died.
  DeadServer deadServers = new DeadServer();
  deadServers.add(SERVERNAME_A);
  // I need a services instance that will return the AM
  MasterServices services = Mockito.mock(MasterServices.class);
  Mockito.when(services.getAssignmentManager()).thenReturn(am);
  Mockito.when(services.getServerManager()).thenReturn(this.serverManager);
  Mockito.when(services.getZooKeeper()).thenReturn(this.watcher);
  ServerShutdownHandler handler = new ServerShutdownHandler(this.server,
    services, deadServers, SERVERNAME_A, false);
  am.failoverCleanupDone.set(true);
  handler.process();
  // The region in r will have been assigned.  It'll be up in zk as unassigned.
}
项目:hindex    文件:ServerManager.java   
public synchronized void expireServer(final ServerName serverName) {
  boolean carryingRoot = services.getAssignmentManager().isCarryingRoot(serverName);
  if (!services.isServerShutdownHandlerEnabled() && (!carryingRoot || !this.isSSHForRootEnabled)) {
    LOG.info("Master doesn't enable ServerShutdownHandler during initialization, "
        + "delay expiring server " + serverName);
    this.deadNotExpiredServers.add(serverName);
    return;
  }
  if (!this.onlineServers.containsKey(serverName)) {
    LOG.warn("Received expiration of " + serverName +
      " but server is not currently online");
  }
  if (this.deadservers.contains(serverName)) {
    // TODO: Can this happen?  It shouldn't be online in this case?
    LOG.warn("Received expiration of " + serverName +
        " but server shutdown is already in progress");
    return;
  }
  // Remove the server from the known servers lists and update load info BUT
  // add to deadservers first; do this so it'll show in dead servers list if
  // not in online servers list.
  this.deadservers.add(serverName);
  this.onlineServers.remove(serverName);
  synchronized (onlineServers) {
    onlineServers.notifyAll();
  }
  this.serverConnections.remove(serverName);
  // If cluster is going down, yes, servers are going to be expiring; don't
  // process as a dead server
  if (this.clusterShutdown) {
    LOG.info("Cluster shutdown set; " + serverName +
      " expired; onlineServers=" + this.onlineServers.size());
    if (this.onlineServers.isEmpty()) {
      master.stop("Cluster shutdown set; onlineServer=0");
    }
    return;
  }

  boolean carryingMeta = services.getAssignmentManager().isCarryingMeta(serverName);
  if (carryingRoot || carryingMeta) {
    this.services.getExecutorService().submit(new MetaServerShutdownHandler(this.master,
      this.services, this.deadservers, serverName, carryingRoot, carryingMeta));
  } else {
    this.services.getExecutorService().submit(new ServerShutdownHandler(this.master,
      this.services, this.deadservers, serverName, true));
  }
  LOG.debug("Added=" + serverName +
    " to dead servers, submitted shutdown handler to be executed, root=" +
      carryingRoot + ", meta=" + carryingMeta);
}
项目:hindex    文件:AssignmentManager.java   
/**
 * Processes list of dead servers from result of META scan and regions in RIT
 * <p>
 * This is used for failover to recover the lost regions that belonged to
 * RegionServers which failed while there was no active master or regions 
 * that were in RIT.
 * <p>
 * 
 * @param deadServers
 *          The list of dead servers which failed while there was no active
 *          master. Can be null.
 * @param nodes
 *          The regions in RIT
 * @throws IOException
 * @throws KeeperException
 */
private void processDeadServersAndRecoverLostRegions(
    Map<ServerName, List<Pair<HRegionInfo, Result>>> deadServers,
    List<String> nodes) throws IOException, KeeperException {
  if (null != deadServers) {
    Set<ServerName> actualDeadServers = this.serverManager.getDeadServers();
    for (Map.Entry<ServerName, List<Pair<HRegionInfo, Result>>> deadServer : 
      deadServers.entrySet()) {
      // skip regions of dead servers because SSH will process regions during rs expiration.
      // see HBASE-5916
      if (actualDeadServers.contains(deadServer.getKey())) {
        for (Pair<HRegionInfo, Result> deadRegion : deadServer.getValue()) {
          HRegionInfo hri = deadRegion.getFirst();
          // Delete znode of region in transition if table is disabled or disabling. If a region
          // server went down during master initialization then SSH cannot handle the regions of
          // partially disabled tables because in memory region state information may not be
          // available with master.
          deleteNodeAndOfflineRegion(hri);
          nodes.remove(deadRegion.getFirst().getEncodedName());
        }
        continue;
      }
      List<Pair<HRegionInfo, Result>> regions = deadServer.getValue();
      for (Pair<HRegionInfo, Result> region : regions) {
        HRegionInfo regionInfo = region.getFirst();
        Result result = region.getSecond();
        // If region was in transition (was in zk) force it offline for
        // reassign
        try {
          RegionTransitionData data = ZKAssign.getData(watcher,
              regionInfo.getEncodedName());

          // If zk node of this region has been updated by a live server,
          // we consider that this region is being handled.
          // So we should skip it and process it in
          // processRegionsInTransition.
          if (data != null && data.getOrigin() != null && 
              serverManager.isServerOnline(data.getOrigin())) {
            LOG.info("The region " + regionInfo.getEncodedName()
                + "is being handled on " + data.getOrigin());
            continue;
          }
          // Process with existing RS shutdown code
          boolean assign = ServerShutdownHandler.processDeadRegion(
              regionInfo, result, this, this.catalogTracker);
          if (assign) {
            ZKAssign.createOrForceNodeOffline(watcher, regionInfo,
                master.getServerName());
            if (!nodes.contains(regionInfo.getEncodedName())) {
              nodes.add(regionInfo.getEncodedName());
            }
          }
        } catch (KeeperException.NoNodeException nne) {
          // This is fine
        }
      }
    }
  }

  if (!nodes.isEmpty()) {
    for (String encodedRegionName : nodes) {
      processRegionInTransition(encodedRegionName, null, deadServers);
    }
  }
}