/** * Update a region state. It will be put in transition if not already there. * * If we can't find the region info based on the region name in * the transition, log a warning and return null. */ public RegionState updateRegionState( final RegionTransition transition, final State state) { byte [] regionName = transition.getRegionName(); HRegionInfo regionInfo = getRegionInfo(regionName); if (regionInfo == null) { String prettyRegionName = HRegionInfo.prettyPrint( HRegionInfo.encodeRegionName(regionName)); LOG.warn("Failed to find region " + prettyRegionName + " in updating its state to " + state + " based on region transition " + transition); return null; } return updateRegionState(regionInfo, state, transition.getServerName()); }
/** * Gets current state of all regions of the table. * This method looks at the in-memory state. It does not go to <code>hbase:meta</code>. * Method guaranteed to return keys for all states * in {@link org.apache.hadoop.hbase.master.RegionState.State} * * @param tableName * @return Online regions from <code>tableName</code> */ public synchronized Map<RegionState.State, List<HRegionInfo>> getRegionByStateOfTable(TableName tableName) { Map<RegionState.State, List<HRegionInfo>> tableRegions = new HashMap<State, List<HRegionInfo>>(); for (State state : State.values()) { tableRegions.put(state, new ArrayList<HRegionInfo>()); } Map<String, RegionState> indexMap = regionStatesTableIndex.get(tableName); if (indexMap == null) return tableRegions; for (RegionState regionState : indexMap.values()) { tableRegions.get(regionState.getState()).add(regionState.getRegion()); } return tableRegions; }
@Override protected void populatePool(ExecutorService pool) { RegionStates regionStates = assignmentManager.getRegionStates(); for (HRegionInfo region: regions) { if (regionStates.isRegionInTransition(region) && !regionStates.isRegionInState(region, State.FAILED_CLOSE)) { continue; } final HRegionInfo hri = region; pool.execute(Trace.wrap("DisableTableHandler.BulkDisabler",new Runnable() { public void run() { assignmentManager.unassign(hri, true); } })); } }
private void assignMetaZkLess(RegionStates regionStates, RegionState regionState, long timeout, Set<ServerName> previouslyFailedRs) throws IOException, KeeperException { ServerName currentServer = regionState.getServerName(); if (serverManager.isServerOnline(currentServer)) { LOG.info("Meta was in transition on " + currentServer); assignmentManager.processRegionInTransitionZkLess(); } else { if (currentServer != null) { if (regionState.getRegion().getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) { splitMetaLogBeforeAssignment(currentServer); regionStates.logSplit(HRegionInfo.FIRST_META_REGIONINFO); previouslyFailedRs.add(currentServer); } } LOG.info("Re-assigning hbase:meta, it was on " + currentServer); regionStates.updateRegionState(regionState.getRegion(), State.OFFLINE); assignmentManager.assignMeta(regionState.getRegion()); } }
protected static void waitRegionInTransition(final MasterProcedureEnv env, final List<HRegionInfo> regions) throws IOException, CoordinatedStateException { final AssignmentManager am = env.getMasterServices().getAssignmentManager(); final RegionStates states = am.getRegionStates(); for (final HRegionInfo region : regions) { ProcedureSyncWait.waitFor(env, "regions " + region.getRegionNameAsString() + " in transition", new ProcedureSyncWait.Predicate<Boolean>() { @Override public Boolean evaluate() throws IOException { if (states.isRegionInState(region, State.FAILED_OPEN)) { am.regionOffline(region); } return !states.isRegionInTransition(region); } }); } }
/** * Set region as OFFLINED up in zookeeper * * @param state * @return the version of the offline node if setting of the OFFLINE node was * successful, -1 otherwise. */ private int setOfflineInZooKeeper(final RegionState state, final ServerName destination) { if (!state.isClosed() && !state.isOffline()) { String msg = "Unexpected state : " + state + " .. Cannot transit it to OFFLINE."; this.server.abort(msg, new IllegalStateException(msg)); return -1; } regionStates.updateRegionState(state.getRegion(), State.OFFLINE); int versionOfOfflineNode; try { // get the version after setting the znode to OFFLINE versionOfOfflineNode = ZKAssign.createOrForceNodeOffline(watcher, state.getRegion(), destination); if (versionOfOfflineNode == -1) { LOG.warn("Attempted to create/force node into OFFLINE state before " + "completing assignment but failed to do so for " + state); return -1; } } catch (KeeperException e) { server.abort("Unexpected ZK exception creating/setting node OFFLINE", e); return -1; } return versionOfOfflineNode; }
/** * Recover the tables that were not fully moved to DISABLED state. These * tables are in DISABLING state when the master restarted/switched. * * @throws KeeperException * @throws TableNotFoundException * @throws IOException */ private void recoverTableInDisablingState() throws KeeperException, IOException, CoordinatedStateException { Set<TableName> disablingTables = tableStateManager.getTablesInStates(ZooKeeperProtos.Table.State.DISABLING); if (disablingTables.size() != 0) { for (TableName tableName : disablingTables) { // Recover by calling DisableTableHandler LOG.info("The table " + tableName + " is in DISABLING state. Hence recovering by moving the table" + " to DISABLED state."); new DisableTableHandler(this.server, tableName, this, tableLockManager, true).prepare().process(); } } }
/** * Recover the tables that are not fully moved to ENABLED state. These tables * are in ENABLING state when the master restarted/switched * * @throws KeeperException * @throws org.apache.hadoop.hbase.TableNotFoundException * @throws IOException */ private void recoverTableInEnablingState() throws KeeperException, IOException, CoordinatedStateException { Set<TableName> enablingTables = tableStateManager. getTablesInStates(ZooKeeperProtos.Table.State.ENABLING); if (enablingTables.size() != 0) { for (TableName tableName : enablingTables) { // Recover by calling EnableTableHandler LOG.info("The table " + tableName + " is in ENABLING state. Hence recovering by moving the table" + " to ENABLED state."); // enableTable in sync way during master startup, // no need to invoke coprocessor EnableTableHandler eth = new EnableTableHandler(this.server, tableName, this, tableLockManager, true); try { eth.prepare(); } catch (TableNotFoundException e) { LOG.warn("Table " + tableName + " not found in hbase:meta to recover."); continue; } eth.process(); } } }
/** * Set region as OFFLINED up in zookeeper asynchronously. * @param state * @return True if we succeeded, false otherwise (State was incorrect or failed * updating zk). */ private boolean asyncSetOfflineInZooKeeper(final RegionState state, final AsyncCallback.StringCallback cb, final ServerName destination) { if (!state.isClosed() && !state.isOffline()) { this.server.abort("Unexpected state trying to OFFLINE; " + state, new IllegalStateException()); return false; } regionStates.updateRegionState(state.getRegion(), State.OFFLINE); try { ZKAssign.asyncCreateNodeOffline(watcher, state.getRegion(), destination, cb, state); } catch (KeeperException e) { if (e instanceof NodeExistsException) { LOG.warn("Node for " + state.getRegion() + " already exists"); } else { server.abort("Unexpected ZK exception creating/setting node OFFLINE", e); } return false; } return true; }
private void onRegionOpen(final HRegionInfo hri, final ServerName sn, long openSeqNum) { regionOnline(hri, sn, openSeqNum); if (useZKForAssignment) { try { // Delete the ZNode if exists ZKAssign.deleteNodeFailSilent(watcher, hri); } catch (KeeperException ke) { server.abort("Unexpected ZK exception deleting node " + hri, ke); } } // reset the count, if any failedOpenTracker.remove(hri.getEncodedName()); if (getTableStateManager().isTableState(hri.getTable(), ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) { invokeUnAssign(hri); } }
private String onRegionSplitReverted(ServerName sn, final HRegionInfo p, final HRegionInfo a, final HRegionInfo b) { String s = checkInStateForSplit(sn, p, a, b); if (!org.apache.commons.lang.StringUtils.isEmpty(s)) { return s; } regionOnline(p, sn); regionOffline(a); regionOffline(b); if (getTableStateManager().isTableState(p.getTable(), ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) { invokeUnAssign(p); } return null; }
private void assignMetaZkLess(RegionStates regionStates, RegionState regionState, long timeout, Set<ServerName> previouslyFailedRs) throws IOException, KeeperException { ServerName currentServer = regionState.getServerName(); if (serverManager.isServerOnline(currentServer)) { LOG.info("Meta was in transition on " + currentServer); assignmentManager.processRegionInTransitionZkLess(); } else { if (currentServer != null) { splitMetaLogBeforeAssignment(currentServer); regionStates.logSplit(HRegionInfo.FIRST_META_REGIONINFO); previouslyFailedRs.add(currentServer); } LOG.info("Re-assigning hbase:meta, it was on " + currentServer); regionStates.updateRegionState(HRegionInfo.FIRST_META_REGIONINFO, State.OFFLINE); assignmentManager.assignMeta(); } }
/** * Waits until the specified region has completed assignment. * <p> * If the region is already assigned, returns immediately. Otherwise, method * blocks until the region is assigned. * @param regionInfo region to wait on assignment for * @throws InterruptedException */ public boolean waitForAssignment(HRegionInfo regionInfo) throws InterruptedException { while (!regionStates.isRegionOnline(regionInfo)) { if (regionStates.isRegionInState(regionInfo, State.FAILED_OPEN) || this.server.isStopped()) { return false; } // We should receive a notification, but it's // better to have a timeout to recheck the condition here: // it lowers the impact of a race condition if any regionStates.waitForUpdate(100); } return true; }
private void onRegionOpen( final HRegionInfo hri, final ServerName sn, long openSeqNum) { regionOnline(hri, sn, openSeqNum); if (useZKForAssignment) { try { // Delete the ZNode if exists ZKAssign.deleteNodeFailSilent(watcher, hri); } catch (KeeperException ke) { server.abort("Unexpected ZK exception deleting node " + hri, ke); } } // reset the count, if any failedOpenTracker.remove(hri.getEncodedName()); if (getTableStateManager().isTableState(hri.getTable(), ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) { invokeUnAssign(hri); } }
@Test (timeout=180000) public void testForceAssignMergingRegion() throws Exception { // Region to use in test. final HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO; // Need a mocked catalog tracker. LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer( server.getConfiguration()); // Create an AM. AssignmentManager am = new AssignmentManager(this.server, this.serverManager, balancer, null, null, master.getTableLockManager()); RegionStates regionStates = am.getRegionStates(); try { // First set the state of the region to merging regionStates.updateRegionState(hri, RegionState.State.MERGING); // Now, try to assign it with force new plan am.assign(hri, true, true); assertEquals("The region should be still in merging state", RegionState.State.MERGING, regionStates.getRegionState(hri).getState()); } finally { am.shutdown(); } }
/** * Update a region state. It will be put in transition if not already there. * * If we can't find the region info based on the region name in * the transition, log a warning and return null. */ public synchronized RegionState updateRegionState( final RegionTransition transition, final State state) { byte [] regionName = transition.getRegionName(); HRegionInfo regionInfo = getRegionInfo(regionName); if (regionInfo == null) { String prettyRegionName = HRegionInfo.prettyPrint( HRegionInfo.encodeRegionName(regionName)); LOG.warn("Failed to find region " + prettyRegionName + " in updating its state to " + state + " based on region transition " + transition); return null; } return updateRegionState(regionInfo, state, transition.getServerName()); }
/** * A region is offline, won't be in transition any more. Its state * should be the specified expected state, which can only be * Split/Merged/Offline/null(=Offline)/SplittingNew/MergingNew. */ public synchronized void regionOffline( final HRegionInfo hri, final State expectedState) { Preconditions.checkArgument(expectedState == null || RegionState.isUnassignable(expectedState), "Offlined region should not be " + expectedState); String encodedName = hri.getEncodedName(); State newState = expectedState == null ? State.OFFLINE : expectedState; updateRegionState(hri, newState); regionsInTransition.remove(encodedName); ServerName oldServerName = regionAssignments.remove(hri); if (oldServerName != null) { LOG.info("Offlined " + hri.getShortNameToLog() + " from " + oldServerName); Set<HRegionInfo> oldRegions = serverHoldings.get(oldServerName); oldRegions.remove(hri); if (oldRegions.isEmpty()) { serverHoldings.remove(oldServerName); } } }
@Test public void testForceAssignMergingRegion() throws Exception { // Region to use in test. final HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO; // Need a mocked catalog tracker. CatalogTracker ct = Mockito.mock(CatalogTracker.class); LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer( server.getConfiguration()); // Create an AM. AssignmentManager am = new AssignmentManager(this.server, this.serverManager, ct, balancer, null, null, master.getTableLockManager()); RegionStates regionStates = am.getRegionStates(); try { // First set the state of the region to merging regionStates.updateRegionState(hri, RegionState.State.MERGING); // Now, try to assign it with force new plan am.assign(hri, true, true); assertEquals("The region should be still in merging state", RegionState.State.MERGING, regionStates.getRegionState(hri).getState()); } finally { am.shutdown(); } }
/** * Add a region to RegionStates with the specified state. * If the region is already in RegionStates, this call has * no effect, and the original state is returned. * * @param hri the region info to create a state for * @param newState the state to the region in set to * @param serverName the server the region is transitioning on * @param lastHost the last server that hosts the region * @return the current state */ public synchronized RegionState createRegionState(final HRegionInfo hri, State newState, ServerName serverName, ServerName lastHost) { if (newState == null || (newState == State.OPEN && serverName == null)) { newState = State.OFFLINE; } if (hri.isOffline() && hri.isSplit()) { newState = State.SPLIT; serverName = null; } String encodedName = hri.getEncodedName(); RegionState regionState = regionStates.get(encodedName); if (regionState != null) { LOG.warn("Tried to create a state for a region already in RegionStates, " + "used existing: " + regionState + ", ignored new: " + newState); } else { regionState = new RegionState(hri, newState, serverName); putRegionState(regionState); if (newState == State.OPEN) { if (!serverName.equals(lastHost)) { LOG.warn("Open region's last host " + lastHost + " should be the same as the current one " + serverName + ", ignored the last and used the current one"); lastHost = serverName; } lastAssignments.put(encodedName, lastHost); regionAssignments.put(hri, lastHost); } else if (!regionState.isUnassignable()) { regionsInTransition.put(encodedName, regionState); } if (lastHost != null && newState != State.SPLIT) { addToServerHoldings(lastHost, hri); if (newState != State.OPEN) { oldAssignments.put(encodedName, lastHost); } } } return regionState; }
/** * Update a region state. It will be put in transition if not already there. */ public RegionState updateRegionState( final HRegionInfo hri, final State state) { RegionState regionState = getRegionState(hri.getEncodedName()); return updateRegionState(hri, state, regionState == null ? null : regionState.getServerName()); }
/** * Transition a region state to OPEN from OPENING/PENDING_OPEN */ public synchronized RegionState transitionOpenFromPendingOpenOrOpeningOnServer( final RegionTransition transition, final RegionState fromState, final ServerName sn) { if(fromState.isPendingOpenOrOpeningOnServer(sn)){ return updateRegionState(transition, State.OPEN); } return null; }
/** * A region is online, won't be in transition any more. * We can't confirm it is really online on specified region server * because it hasn't been put in region server's online region list yet. */ public void regionOnline(final HRegionInfo hri, final ServerName serverName, long openSeqNum) { String encodedName = hri.getEncodedName(); if (!serverManager.isServerOnline(serverName)) { // This is possible if the region server dies before master gets a // chance to handle ZK event in time. At this time, if the dead server // is already processed by SSH, we should ignore this event. // If not processed yet, ignore and let SSH deal with it. LOG.warn("Ignored, " + encodedName + " was opened on a dead server: " + serverName); return; } updateRegionState(hri, State.OPEN, serverName, openSeqNum); synchronized (this) { regionsInTransition.remove(encodedName); ServerName oldServerName = regionAssignments.put(hri, serverName); if (!serverName.equals(oldServerName)) { if (LOG.isDebugEnabled()) { LOG.debug("Onlined " + hri.getShortNameToLog() + " on " + serverName); } addToServerHoldings(serverName, hri); addToReplicaMapping(hri); if (oldServerName == null) { oldServerName = oldAssignments.remove(encodedName); } if (oldServerName != null && !oldServerName.equals(serverName) && serverHoldings.containsKey(oldServerName)) { LOG.info("Offlined " + hri.getShortNameToLog() + " from " + oldServerName); removeFromServerHoldings(oldServerName, hri); } } } }
/** * A region is offline, won't be in transition any more. Its state * should be the specified expected state, which can only be * Split/Merged/Offline/null(=Offline)/SplittingNew/MergingNew. */ public void regionOffline( final HRegionInfo hri, final State expectedState) { Preconditions.checkArgument(expectedState == null || RegionState.isUnassignable(expectedState), "Offlined region should not be " + expectedState); if (isRegionInState(hri, State.SPLITTING_NEW, State.MERGING_NEW)) { // Remove it from all region maps deleteRegion(hri); return; } State newState = expectedState == null ? State.OFFLINE : expectedState; updateRegionState(hri, newState); String encodedName = hri.getEncodedName(); synchronized (this) { regionsInTransition.remove(encodedName); ServerName oldServerName = regionAssignments.remove(hri); if (oldServerName != null && serverHoldings.containsKey(oldServerName)) { if (newState == State.MERGED || newState == State.SPLIT || hri.isMetaRegion() || tableStateManager.isTableState(hri.getTable(), ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) { // Offline the region only if it's merged/split, or the table is disabled/disabling. // Otherwise, offline it from this server only when it is online on a different server. LOG.info("Offlined " + hri.getShortNameToLog() + " from " + oldServerName); removeFromServerHoldings(oldServerName, hri); removeFromReplicaMapping(hri); } else { // Need to remember it so that we can offline it from this // server when it is online on a different server. oldAssignments.put(encodedName, oldServerName); } } } }
static boolean isOneOfStates(RegionState regionState, State... states) { State s = regionState != null ? regionState.getState() : null; for (State state: states) { if (s == state) return true; } return false; }
/** * Pull the region state from a catalog table {@link Result}. * @param r Result to pull the region state from * @return the region state, or OPEN if there's no value written. */ static State getRegionState(final Result r, int replicaId) { Cell cell = r.getColumnLatestCell(HConstants.CATALOG_FAMILY, getStateColumn(replicaId)); if (cell == null || cell.getValueLength() == 0) return State.OPEN; return State.valueOf(Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); }
private void handleDisableTable() throws IOException, CoordinatedStateException { // Set table disabling flag up in zk. this.assignmentManager.getTableStateManager().setTableState(this.tableName, ZooKeeperProtos.Table.State.DISABLING); boolean done = false; while (true) { // Get list of online regions that are of this table. Regions that are // already closed will not be included in this list; i.e. the returned // list is not ALL regions in a table, its all online regions according // to the in-memory state on this master. final List<HRegionInfo> regions = this.assignmentManager .getRegionStates().getRegionsOfTable(tableName); if (regions.size() == 0) { done = true; break; } LOG.info("Offlining " + regions.size() + " regions."); BulkDisabler bd = new BulkDisabler(this.server, regions); try { if (bd.bulkAssign()) { done = true; break; } } catch (InterruptedException e) { LOG.warn("Disable was interrupted"); // Preserve the interrupt. Thread.currentThread().interrupt(); break; } } // Flip the table to disabled if success. if (done) this.assignmentManager.getTableStateManager().setTableState(this.tableName, ZooKeeperProtos.Table.State.DISABLED); LOG.info("Disabled table, " + this.tableName + ", is done=" + done); }
@Override public void checkTableModifiable(final TableName tableName) throws IOException, TableNotFoundException, TableNotDisabledException { if (isCatalogTable(tableName)) { throw new IOException("Can't modify catalog tables"); } if (!MetaTableAccessor.tableExists(getConnection(), tableName)) { throw new TableNotFoundException(tableName); } if (!getAssignmentManager().getTableStateManager(). isTableState(tableName, ZooKeeperProtos.Table.State.DISABLED)) { throw new TableNotDisabledException(tableName); } }
private boolean isDisabledorDisablingRegionInRIT(final HRegionInfo region) { if (this.tableStateManager.isTableState(region.getTable(), ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING) || replicasToClose.contains(region)) { LOG.info("Table " + region.getTable() + " is disabled or disabling;" + " skipping assign of " + region.getRegionNameAsString()); offlineDisabledRegion(region); return true; } return false; }
/** * Waits until the specified region has completed assignment, or the deadline is reached. * @param regionSet set of region to wait on. the set is modified and the assigned regions removed * @param waitTillAllAssigned true if we should wait all the regions to be assigned * @param deadline the timestamp after which the wait is aborted * @return true if all the regions are assigned false otherwise. * @throws InterruptedException */ protected boolean waitForAssignment(final Collection<HRegionInfo> regionSet, final boolean waitTillAllAssigned, final long deadline) throws InterruptedException { // We're not synchronizing on regionsInTransition now because we don't use any iterator. while (!regionSet.isEmpty() && !server.isStopped() && deadline > System.currentTimeMillis()) { int failedOpenCount = 0; Iterator<HRegionInfo> regionInfoIterator = regionSet.iterator(); while (regionInfoIterator.hasNext()) { HRegionInfo hri = regionInfoIterator.next(); if (regionStates.isRegionOnline(hri) || regionStates.isRegionInState(hri, State.SPLITTING, State.SPLIT, State.MERGING, State.MERGED)) { regionInfoIterator.remove(); } else if (regionStates.isRegionInState(hri, State.FAILED_OPEN)) { failedOpenCount++; } } if (!waitTillAllAssigned) { // No need to wait, let assignment going on asynchronously break; } if (!regionSet.isEmpty()) { if (failedOpenCount == regionSet.size()) { // all the regions we are waiting had an error on open. break; } regionStates.waitForUpdate(100); } } return regionSet.isEmpty(); }
/** * Assigns all user regions, if any exist. Used during cluster startup. * <p> * This is a synchronous call and will return once every region has been * assigned. If anything fails, an exception is thrown and the cluster * should be shutdown. * @throws InterruptedException * @throws IOException */ private void assignAllUserRegions(Map<HRegionInfo, ServerName> allRegions) throws IOException, InterruptedException { if (allRegions == null || allRegions.isEmpty()) return; // Determine what type of assignment to do on startup boolean retainAssignment = server.getConfiguration(). getBoolean("hbase.master.startup.retainassign", true); Set<HRegionInfo> regionsFromMetaScan = allRegions.keySet(); if (retainAssignment) { assign(allRegions); } else { List<HRegionInfo> regions = new ArrayList<HRegionInfo>(regionsFromMetaScan); assign(regions); } for (HRegionInfo hri : regionsFromMetaScan) { TableName tableName = hri.getTable(); if (!tableStateManager.isTableState(tableName, ZooKeeperProtos.Table.State.ENABLED)) { setEnabledTable(tableName); } } // assign all the replicas that were not recorded in the meta assign(replicaRegionsNotRecordedInMeta(regionsFromMetaScan, server)); }
void processRegionInTransitionZkLess() { // We need to send RPC call again for PENDING_OPEN/PENDING_CLOSE regions // in case the RPC call is not sent out yet before the master was shut down // since we update the state before we send the RPC call. We can't update // the state after the RPC call. Otherwise, we don't know what's happened // to the region if the master dies right after the RPC call is out. Map<String, RegionState> rits = regionStates.getRegionsInTransition(); for (RegionState regionState : rits.values()) { LOG.info("Processing " + regionState); ServerName serverName = regionState.getServerName(); // Server could be null in case of FAILED_OPEN when master cannot find a region plan. In that // case, try assigning it here. if (serverName != null && !serverManager.getOnlineServers().containsKey(serverName)) { LOG.info("Server " + serverName + " isn't online. SSH will handle this"); continue; } HRegionInfo regionInfo = regionState.getRegion(); State state = regionState.getState(); switch (state) { case CLOSED: invokeAssign(regionInfo); break; case PENDING_OPEN: retrySendRegionOpen(regionState); break; case PENDING_CLOSE: retrySendRegionClose(regionState); break; case FAILED_CLOSE: case FAILED_OPEN: invokeUnAssign(regionInfo); break; default: // No process for other states } } }