Java 类org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory 实例源码

项目:pbase    文件:TestAssignmentManager.java   
@Test (timeout=180000)
public void testForceAssignMergingRegion() throws Exception {
  // Region to use in test.
  final HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO;
  // Need a mocked catalog tracker.
  LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(
    server.getConfiguration());
  // Create an AM.
  AssignmentManager am = new AssignmentManager(this.server,
    this.serverManager, balancer, null, null, master.getTableLockManager());
  RegionStates regionStates = am.getRegionStates();
  try {
    // First set the state of the region to merging
    regionStates.updateRegionState(hri, RegionState.State.MERGING);
    // Now, try to assign it with force new plan
    am.assign(hri, true, true);
    assertEquals("The region should be still in merging state",
      RegionState.State.MERGING, regionStates.getRegionState(hri).getState());
  } finally {
    am.shutdown();
  }
}
项目:HIndex    文件:TestAssignmentManager.java   
@Test
public void testForceAssignMergingRegion() throws Exception {
  // Region to use in test.
  final HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO;
  // Need a mocked catalog tracker.
  CatalogTracker ct = Mockito.mock(CatalogTracker.class);
  LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(
    server.getConfiguration());
  // Create an AM.
  AssignmentManager am = new AssignmentManager(this.server,
    this.serverManager, ct, balancer, null, null, master.getTableLockManager());
  RegionStates regionStates = am.getRegionStates();
  try {
    // First set the state of the region to merging
    regionStates.updateRegionState(hri, RegionState.State.MERGING);
    // Now, try to assign it with force new plan
    am.assign(hri, true, true);
    assertEquals("The region should be still in merging state",
      RegionState.State.MERGING, regionStates.getRegionState(hri).getState());
  } finally {
    am.shutdown();
  }
}
项目:PyroDB    文件:TestAssignmentManager.java   
@Test
public void testForceAssignMergingRegion() throws Exception {
  // Region to use in test.
  final HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO;
  // Need a mocked catalog tracker.
  CatalogTracker ct = Mockito.mock(CatalogTracker.class);
  LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(
    server.getConfiguration());
  // Create an AM.
  AssignmentManager am = new AssignmentManager(this.server,
    this.serverManager, ct, balancer, null, null, master.getTableLockManager());
  RegionStates regionStates = am.getRegionStates();
  try {
    // First set the state of the region to merging
    regionStates.updateRegionState(hri, RegionState.State.MERGING);
    // Now, try to assign it with force new plan
    am.assign(hri, true, true);
    assertEquals("The region should be still in merging state",
      RegionState.State.MERGING, regionStates.getRegionState(hri).getState());
  } finally {
    am.shutdown();
  }
}
项目:c5    文件:TestAssignmentManager.java   
@Test
public void testForceAssignMergingRegion() throws Exception {
  // Region to use in test.
  final HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO;
  // Need a mocked catalog tracker.
  CatalogTracker ct = Mockito.mock(CatalogTracker.class);
  LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(
    server.getConfiguration());
  // Create an AM.
  AssignmentManager am = new AssignmentManager(this.server,
    this.serverManager, ct, balancer, null, null, master.getTableLockManager());
  RegionStates regionStates = am.getRegionStates();
  try {
    // First set the state of the region to merging
    regionStates.updateRegionState(hri, RegionState.State.MERGING);
    // Now, try to assign it with force new plan
    am.assign(hri, true, true);
    assertEquals("The region should be still in merging state",
      RegionState.State.MERGING, regionStates.getRegionState(hri).getState());
  } finally {
    am.shutdown();
  }
}
项目:pbase    文件:HMaster.java   
/**
 * Initialize all ZK based system trackers.
 *
 * @throws IOException
 * @throws InterruptedException
 * @throws KeeperException
 * @throws CoordinatedStateException
 */
void initializeZKBasedSystemTrackers() throws IOException,
        InterruptedException, KeeperException, CoordinatedStateException {
    this.balancer = LoadBalancerFactory.getLoadBalancer(conf);
    this.loadBalancerTracker = new LoadBalancerTracker(zooKeeper, this);
    this.loadBalancerTracker.start();
    this.assignmentManager = new AssignmentManager(this, serverManager,
            this.balancer, this.service, this.metricsMaster,
            this.tableLockManager);
    zooKeeper.registerListenerFirst(assignmentManager);

    this.regionServerTracker = new RegionServerTracker(zooKeeper, this,
            this.serverManager);
    this.regionServerTracker.start();

    this.drainingServerTracker = new DrainingServerTracker(zooKeeper, this,
            this.serverManager);
    this.drainingServerTracker.start();

    // Set the cluster as up.  If new RSs, they'll be waiting on this before
    // going ahead with their startup.
    boolean wasUp = this.clusterStatusTracker.isClusterUp();
    if (!wasUp) this.clusterStatusTracker.setClusterUp();

    LOG.info("Server active/primary master=" + this.serverName +
            ", sessionid=0x" +
            Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) +
            ", setting cluster-up flag (Was=" + wasUp + ")");

    // create/initialize the snapshot manager and other procedure managers
    this.snapshotManager = new SnapshotManager();
    this.mpmHost = new MasterProcedureManagerHost();
    this.mpmHost.register(this.snapshotManager);
    this.mpmHost.register(new MasterFlushTableProcedureManager());
    this.mpmHost.loadProcedures(conf);
    this.mpmHost.initialize(this, this.metricsMaster);
}
项目:pbase    文件:TestAssignmentManager.java   
@Test (timeout=180000)
public void testUnassignWithSplitAtSameTime() throws KeeperException,
    IOException, CoordinatedStateException {
  // Region to use in test.
  final HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO;
  // First amend the servermanager mock so that when we do send close of the
  // first meta region on SERVERNAME_A, it will return true rather than
  // default null.
  Mockito.when(this.serverManager.sendRegionClose(SERVERNAME_A, hri, -1)).thenReturn(true);
  // Need a mocked catalog tracker.
  LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(server
      .getConfiguration());
  // Create an AM.
  AssignmentManager am = new AssignmentManager(this.server,
    this.serverManager, balancer, null, null, master.getTableLockManager());
  try {
    // First make sure my mock up basically works.  Unassign a region.
    unassign(am, SERVERNAME_A, hri);
    // This delete will fail if the previous unassign did wrong thing.
    ZKAssign.deleteClosingNode(this.watcher, hri, SERVERNAME_A);
    // Now put a SPLITTING region in the way.  I don't have to assert it
    // go put in place.  This method puts it in place then asserts it still
    // owns it by moving state from SPLITTING to SPLITTING.
    int version = createNodeSplitting(this.watcher, hri, SERVERNAME_A);
    // Now, retry the unassign with the SPLTTING in place.  It should just
    // complete without fail; a sort of 'silent' recognition that the
    // region to unassign has been split and no longer exists: TOOD: what if
    // the split fails and the parent region comes back to life?
    unassign(am, SERVERNAME_A, hri);
    // This transition should fail if the znode has been messed with.
    ZKAssign.transitionNode(this.watcher, hri, SERVERNAME_A,
      EventType.RS_ZK_REGION_SPLITTING, EventType.RS_ZK_REGION_SPLITTING, version);
    assertFalse(am.getRegionStates().isRegionInTransition(hri));
  } finally {
    am.shutdown();
  }
}
项目:pbase    文件:TestAssignmentManager.java   
/**
 * Tests an on-the-fly RPC that was scheduled for the earlier RS on the same port
 * for openRegion. AM should assign this somewhere else. (HBASE-9721)
 */
@SuppressWarnings("unchecked")
@Test (timeout=180000)
public void testOpenCloseRegionRPCIntendedForPreviousServer() throws Exception {
  Mockito.when(this.serverManager.sendRegionOpen(Mockito.eq(SERVERNAME_B), Mockito.eq(REGIONINFO),
    Mockito.anyInt(), (List<ServerName>)Mockito.any()))
    .thenThrow(new DoNotRetryIOException());
  this.server.getConfiguration().setInt("hbase.assignment.maximum.attempts", 100);

  HRegionInfo hri = REGIONINFO;
  LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(
    server.getConfiguration());
  // Create an AM.
  AssignmentManager am = new AssignmentManager(this.server,
    this.serverManager, balancer, null, null, master.getTableLockManager());
  RegionStates regionStates = am.getRegionStates();
  try {
    am.regionPlans.put(REGIONINFO.getEncodedName(),
      new RegionPlan(REGIONINFO, null, SERVERNAME_B));

    // Should fail once, but succeed on the second attempt for the SERVERNAME_A
    am.assign(hri, true, false);
  } finally {
    assertEquals(SERVERNAME_A, regionStates.getRegionState(REGIONINFO).getServerName());
    am.shutdown();
  }
}
项目:HIndex    文件:HMaster.java   
/**
 * Initialize all ZK based system trackers.
 * @throws IOException
 * @throws InterruptedException
 */
void initializeZKBasedSystemTrackers() throws IOException,
    InterruptedException, KeeperException {
  this.catalogTracker = createCatalogTracker(this.zooKeeper, this.conf, this);
  this.catalogTracker.start();

  this.balancer = LoadBalancerFactory.getLoadBalancer(conf);
  this.loadBalancerTracker = new LoadBalancerTracker(zooKeeper, this);
  this.loadBalancerTracker.start();
  this.assignmentManager = new AssignmentManager(this, serverManager,
    this.catalogTracker, this.balancer, this.executorService, this.metricsMaster,
    this.tableLockManager);
  zooKeeper.registerListenerFirst(assignmentManager);

  this.regionServerTracker = new RegionServerTracker(zooKeeper, this,
      this.serverManager);
  this.regionServerTracker.start();

  this.drainingServerTracker = new DrainingServerTracker(zooKeeper, this,
    this.serverManager);
  this.drainingServerTracker.start();

  // Set the cluster as up.  If new RSs, they'll be waiting on this before
  // going ahead with their startup.
  boolean wasUp = this.clusterStatusTracker.isClusterUp();
  if (!wasUp) this.clusterStatusTracker.setClusterUp();

  LOG.info("Server active/primary master=" + this.serverName +
      ", sessionid=0x" +
      Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) +
      ", setting cluster-up flag (Was=" + wasUp + ")");

  // create/initialize the snapshot manager and other procedure managers
  this.snapshotManager = new SnapshotManager();
  this.mpmHost = new MasterProcedureManagerHost();
  this.mpmHost.register(this.snapshotManager);
  this.mpmHost.loadProcedures(conf);
  this.mpmHost.initialize(this, this.metricsMaster);
}
项目:HIndex    文件:TestAssignmentManager.java   
@Test
public void testUnassignWithSplitAtSameTime() throws KeeperException, IOException {
  // Region to use in test.
  final HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO;
  // First amend the servermanager mock so that when we do send close of the
  // first meta region on SERVERNAME_A, it will return true rather than
  // default null.
  Mockito.when(this.serverManager.sendRegionClose(SERVERNAME_A, hri, -1)).thenReturn(true);
  // Need a mocked catalog tracker.
  CatalogTracker ct = Mockito.mock(CatalogTracker.class);
  LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(server
      .getConfiguration());
  // Create an AM.
  AssignmentManager am = new AssignmentManager(this.server,
    this.serverManager, ct, balancer, null, null, master.getTableLockManager());
  try {
    // First make sure my mock up basically works.  Unassign a region.
    unassign(am, SERVERNAME_A, hri);
    // This delete will fail if the previous unassign did wrong thing.
    ZKAssign.deleteClosingNode(this.watcher, hri, SERVERNAME_A);
    // Now put a SPLITTING region in the way.  I don't have to assert it
    // go put in place.  This method puts it in place then asserts it still
    // owns it by moving state from SPLITTING to SPLITTING.
    int version = createNodeSplitting(this.watcher, hri, SERVERNAME_A);
    // Now, retry the unassign with the SPLTTING in place.  It should just
    // complete without fail; a sort of 'silent' recognition that the
    // region to unassign has been split and no longer exists: TOOD: what if
    // the split fails and the parent region comes back to life?
    unassign(am, SERVERNAME_A, hri);
    // This transition should fail if the znode has been messed with.
    ZKAssign.transitionNode(this.watcher, hri, SERVERNAME_A,
      EventType.RS_ZK_REGION_SPLITTING, EventType.RS_ZK_REGION_SPLITTING, version);
    assertFalse(am.getRegionStates().isRegionInTransition(hri));
  } finally {
    am.shutdown();
  }
}
项目:HIndex    文件:TestAssignmentManager.java   
/**
 * Tests an on-the-fly RPC that was scheduled for the earlier RS on the same port
 * for openRegion. AM should assign this somewhere else. (HBASE-9721)
 */
@SuppressWarnings("unchecked")
@Test
public void testOpenCloseRegionRPCIntendedForPreviousServer() throws Exception {
  Mockito.when(this.serverManager.sendRegionOpen(Mockito.eq(SERVERNAME_B), Mockito.eq(REGIONINFO),
    Mockito.anyInt(), (List<ServerName>)Mockito.any()))
    .thenThrow(new DoNotRetryIOException());
  this.server.getConfiguration().setInt("hbase.assignment.maximum.attempts", 100);

  HRegionInfo hri = REGIONINFO;
  CatalogTracker ct = Mockito.mock(CatalogTracker.class);
  LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(
    server.getConfiguration());
  // Create an AM.
  AssignmentManager am = new AssignmentManager(this.server,
    this.serverManager, ct, balancer, null, null, master.getTableLockManager());
  RegionStates regionStates = am.getRegionStates();
  try {
    am.regionPlans.put(REGIONINFO.getEncodedName(),
      new RegionPlan(REGIONINFO, null, SERVERNAME_B));

    // Should fail once, but succeed on the second attempt for the SERVERNAME_A
    am.assign(hri, true, false);
  } finally {
    assertEquals(SERVERNAME_A, regionStates.getRegionState(REGIONINFO).getServerName());
  }
}
项目:PyroDB    文件:HMaster.java   
/**
 * Initialize all ZK based system trackers.
 * @throws IOException
 * @throws InterruptedException
 * @throws KeeperException
 * @throws CoordinatedStateException
 */
void initializeZKBasedSystemTrackers() throws IOException,
    InterruptedException, KeeperException, CoordinatedStateException {
  this.balancer = LoadBalancerFactory.getLoadBalancer(conf);
  this.loadBalancerTracker = new LoadBalancerTracker(zooKeeper, this);
  this.loadBalancerTracker.start();
  this.assignmentManager = new AssignmentManager(this, serverManager,
    this.catalogTracker, this.balancer, this.service, this.metricsMaster,
    this.tableLockManager);
  zooKeeper.registerListenerFirst(assignmentManager);

  this.regionServerTracker = new RegionServerTracker(zooKeeper, this,
      this.serverManager);
  this.regionServerTracker.start();

  this.drainingServerTracker = new DrainingServerTracker(zooKeeper, this,
    this.serverManager);
  this.drainingServerTracker.start();

  // Set the cluster as up.  If new RSs, they'll be waiting on this before
  // going ahead with their startup.
  boolean wasUp = this.clusterStatusTracker.isClusterUp();
  if (!wasUp) this.clusterStatusTracker.setClusterUp();

  LOG.info("Server active/primary master=" + this.serverName +
      ", sessionid=0x" +
      Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) +
      ", setting cluster-up flag (Was=" + wasUp + ")");

  // create/initialize the snapshot manager and other procedure managers
  this.snapshotManager = new SnapshotManager();
  this.mpmHost = new MasterProcedureManagerHost();
  this.mpmHost.register(this.snapshotManager);
  this.mpmHost.register(new MasterFlushTableProcedureManager());
  this.mpmHost.loadProcedures(conf);
  this.mpmHost.initialize(this, this.metricsMaster);
}
项目:PyroDB    文件:TestAssignmentManager.java   
@Test
public void testUnassignWithSplitAtSameTime() throws KeeperException,
    IOException, CoordinatedStateException {
  // Region to use in test.
  final HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO;
  // First amend the servermanager mock so that when we do send close of the
  // first meta region on SERVERNAME_A, it will return true rather than
  // default null.
  Mockito.when(this.serverManager.sendRegionClose(SERVERNAME_A, hri, -1)).thenReturn(true);
  // Need a mocked catalog tracker.
  CatalogTracker ct = Mockito.mock(CatalogTracker.class);
  LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(server
      .getConfiguration());
  // Create an AM.
  AssignmentManager am = new AssignmentManager(this.server,
    this.serverManager, ct, balancer, null, null, master.getTableLockManager());
  try {
    // First make sure my mock up basically works.  Unassign a region.
    unassign(am, SERVERNAME_A, hri);
    // This delete will fail if the previous unassign did wrong thing.
    ZKAssign.deleteClosingNode(this.watcher, hri, SERVERNAME_A);
    // Now put a SPLITTING region in the way.  I don't have to assert it
    // go put in place.  This method puts it in place then asserts it still
    // owns it by moving state from SPLITTING to SPLITTING.
    int version = createNodeSplitting(this.watcher, hri, SERVERNAME_A);
    // Now, retry the unassign with the SPLTTING in place.  It should just
    // complete without fail; a sort of 'silent' recognition that the
    // region to unassign has been split and no longer exists: TOOD: what if
    // the split fails and the parent region comes back to life?
    unassign(am, SERVERNAME_A, hri);
    // This transition should fail if the znode has been messed with.
    ZKAssign.transitionNode(this.watcher, hri, SERVERNAME_A,
      EventType.RS_ZK_REGION_SPLITTING, EventType.RS_ZK_REGION_SPLITTING, version);
    assertFalse(am.getRegionStates().isRegionInTransition(hri));
  } finally {
    am.shutdown();
  }
}
项目:PyroDB    文件:TestAssignmentManager.java   
/**
 * Tests an on-the-fly RPC that was scheduled for the earlier RS on the same port
 * for openRegion. AM should assign this somewhere else. (HBASE-9721)
 */
@SuppressWarnings("unchecked")
@Test
public void testOpenCloseRegionRPCIntendedForPreviousServer() throws Exception {
  Mockito.when(this.serverManager.sendRegionOpen(Mockito.eq(SERVERNAME_B), Mockito.eq(REGIONINFO),
    Mockito.anyInt(), (List<ServerName>)Mockito.any()))
    .thenThrow(new DoNotRetryIOException());
  this.server.getConfiguration().setInt("hbase.assignment.maximum.attempts", 100);

  HRegionInfo hri = REGIONINFO;
  CatalogTracker ct = Mockito.mock(CatalogTracker.class);
  LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(
    server.getConfiguration());
  // Create an AM.
  AssignmentManager am = new AssignmentManager(this.server,
    this.serverManager, ct, balancer, null, null, master.getTableLockManager());
  RegionStates regionStates = am.getRegionStates();
  try {
    am.regionPlans.put(REGIONINFO.getEncodedName(),
      new RegionPlan(REGIONINFO, null, SERVERNAME_B));

    // Should fail once, but succeed on the second attempt for the SERVERNAME_A
    am.assign(hri, true, false);
  } finally {
    assertEquals(SERVERNAME_A, regionStates.getRegionState(REGIONINFO).getServerName());
  }
}
项目:c5    文件:HMaster.java   
/**
 * Initialize all ZK based system trackers.
 * @throws IOException
 * @throws InterruptedException
 */
void initializeZKBasedSystemTrackers() throws IOException,
    InterruptedException, KeeperException {
  this.catalogTracker = createCatalogTracker(this.zooKeeper, this.conf, this);
  this.catalogTracker.start();

  this.balancer = LoadBalancerFactory.getLoadBalancer(conf);
  this.loadBalancerTracker = new LoadBalancerTracker(zooKeeper, this);
  this.loadBalancerTracker.start();
  this.assignmentManager = new AssignmentManager(this, serverManager,
    this.catalogTracker, this.balancer, this.executorService, this.metricsMaster,
    this.tableLockManager);
  zooKeeper.registerListenerFirst(assignmentManager);

  this.regionServerTracker = new RegionServerTracker(zooKeeper, this,
      this.serverManager);
  this.regionServerTracker.start();

  this.drainingServerTracker = new DrainingServerTracker(zooKeeper, this,
    this.serverManager);
  this.drainingServerTracker.start();

  // Set the cluster as up.  If new RSs, they'll be waiting on this before
  // going ahead with their startup.
  boolean wasUp = this.clusterStatusTracker.isClusterUp();
  if (!wasUp) this.clusterStatusTracker.setClusterUp();

  LOG.info("Server active/primary master=" + this.serverName +
      ", sessionid=0x" +
      Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) +
      ", setting cluster-up flag (Was=" + wasUp + ")");

  // create the snapshot manager
  this.snapshotManager = new SnapshotManager(this, this.metricsMaster);
}
项目:c5    文件:TestAssignmentManager.java   
@Test
public void testUnassignWithSplitAtSameTime() throws KeeperException, IOException {
  // Region to use in test.
  final HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO;
  // First amend the servermanager mock so that when we do send close of the
  // first meta region on SERVERNAME_A, it will return true rather than
  // default null.
  Mockito.when(this.serverManager.sendRegionClose(SERVERNAME_A, hri, -1)).thenReturn(true);
  // Need a mocked catalog tracker.
  CatalogTracker ct = Mockito.mock(CatalogTracker.class);
  LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(server
      .getConfiguration());
  // Create an AM.
  AssignmentManager am = new AssignmentManager(this.server,
    this.serverManager, ct, balancer, null, null, master.getTableLockManager());
  try {
    // First make sure my mock up basically works.  Unassign a region.
    unassign(am, SERVERNAME_A, hri);
    // This delete will fail if the previous unassign did wrong thing.
    ZKAssign.deleteClosingNode(this.watcher, hri, SERVERNAME_A);
    // Now put a SPLITTING region in the way.  I don't have to assert it
    // go put in place.  This method puts it in place then asserts it still
    // owns it by moving state from SPLITTING to SPLITTING.
    int version = createNodeSplitting(this.watcher, hri, SERVERNAME_A);
    // Now, retry the unassign with the SPLTTING in place.  It should just
    // complete without fail; a sort of 'silent' recognition that the
    // region to unassign has been split and no longer exists: TOOD: what if
    // the split fails and the parent region comes back to life?
    unassign(am, SERVERNAME_A, hri);
    // This transition should fail if the znode has been messed with.
    ZKAssign.transitionNode(this.watcher, hri, SERVERNAME_A,
      EventType.RS_ZK_REGION_SPLITTING, EventType.RS_ZK_REGION_SPLITTING, version);
    assertFalse(am.getRegionStates().isRegionInTransition(hri));
  } finally {
    am.shutdown();
  }
}
项目:DominoHBase    文件:HMaster.java   
/**
 * Initialize all ZK based system trackers.
 * @throws IOException
 * @throws InterruptedException
 */
private void initializeZKBasedSystemTrackers() throws IOException,
    InterruptedException, KeeperException {
  this.catalogTracker = createCatalogTracker(this.zooKeeper, this.conf,
      this, conf.getInt("hbase.master.catalog.timeout", 600000));
  this.catalogTracker.start();

  this.balancer = LoadBalancerFactory.getLoadBalancer(conf);
  this.loadBalancerTracker = new LoadBalancerTracker(zooKeeper, this);
  this.loadBalancerTracker.start();
  this.assignmentManager = new AssignmentManager(this, serverManager,
    this.catalogTracker, this.balancer, this.executorService, this.metricsMaster);
  zooKeeper.registerListenerFirst(assignmentManager);

  this.regionServerTracker = new RegionServerTracker(zooKeeper, this,
      this.serverManager);
  this.regionServerTracker.start();

  this.drainingServerTracker = new DrainingServerTracker(zooKeeper, this,
    this.serverManager);
  this.drainingServerTracker.start();

  // Set the cluster as up.  If new RSs, they'll be waiting on this before
  // going ahead with their startup.
  boolean wasUp = this.clusterStatusTracker.isClusterUp();
  if (!wasUp) this.clusterStatusTracker.setClusterUp();

  LOG.info("Server active/primary master; " + this.serverName +
      ", sessionid=0x" +
      Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) +
      ", cluster-up flag was=" + wasUp);
}
项目:DominoHBase    文件:TestAssignmentManager.java   
@Test
public void testUnassignWithSplitAtSameTime() throws KeeperException, IOException {
  // Region to use in test.
  final HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO;
  // First amend the servermanager mock so that when we do send close of the
  // first meta region on SERVERNAME_A, it will return true rather than
  // default null.
  Mockito.when(this.serverManager.sendRegionClose(SERVERNAME_A, hri, -1)).thenReturn(true);
  // Need a mocked catalog tracker.
  CatalogTracker ct = Mockito.mock(CatalogTracker.class);
  LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(server
      .getConfiguration());
  // Create an AM.
  AssignmentManager am = new AssignmentManager(this.server,
    this.serverManager, ct, balancer, null, null);
  try {
    // First make sure my mock up basically works.  Unassign a region.
    unassign(am, SERVERNAME_A, hri);
    // This delete will fail if the previous unassign did wrong thing.
    ZKAssign.deleteClosingNode(this.watcher, hri);
    // Now put a SPLITTING region in the way.  I don't have to assert it
    // go put in place.  This method puts it in place then asserts it still
    // owns it by moving state from SPLITTING to SPLITTING.
    int version = createNodeSplitting(this.watcher, hri, SERVERNAME_A);
    // Now, retry the unassign with the SPLTTING in place.  It should just
    // complete without fail; a sort of 'silent' recognition that the
    // region to unassign has been split and no longer exists: TOOD: what if
    // the split fails and the parent region comes back to life?
    unassign(am, SERVERNAME_A, hri);
    // This transition should fail if the znode has been messed with.
    ZKAssign.transitionNode(this.watcher, hri, SERVERNAME_A,
      EventType.RS_ZK_REGION_SPLITTING, EventType.RS_ZK_REGION_SPLITTING, version);
    assertFalse(am.getRegionStates().isRegionInTransition(hri));
  } finally {
    am.shutdown();
  }
}
项目:ditb    文件:HMaster.java   
/**
 * Initialize all ZK based system trackers.
 * @throws IOException
 * @throws InterruptedException
 * @throws KeeperException
 * @throws CoordinatedStateException
 */
void initializeZKBasedSystemTrackers() throws IOException,
    InterruptedException, KeeperException, CoordinatedStateException {
  this.balancer = LoadBalancerFactory.getLoadBalancer(conf);
  this.normalizer = RegionNormalizerFactory.getRegionNormalizer(conf);
  this.normalizer.setMasterServices(this);
  this.loadBalancerTracker = new LoadBalancerTracker(zooKeeper, this);
  this.loadBalancerTracker.start();
  this.regionNormalizerTracker = new RegionNormalizerTracker(zooKeeper, this);
  this.regionNormalizerTracker.start();
  this.assignmentManager = new AssignmentManager(this, serverManager,
    this.balancer, this.service, this.metricsMaster,
    this.tableLockManager);
  zooKeeper.registerListenerFirst(assignmentManager);

  this.regionServerTracker = new RegionServerTracker(zooKeeper, this,
      this.serverManager);
  this.regionServerTracker.start();

  this.drainingServerTracker = new DrainingServerTracker(zooKeeper, this,
    this.serverManager);
  this.drainingServerTracker.start();

  // Set the cluster as up.  If new RSs, they'll be waiting on this before
  // going ahead with their startup.
  boolean wasUp = this.clusterStatusTracker.isClusterUp();
  if (!wasUp) this.clusterStatusTracker.setClusterUp();

  LOG.info("Server active/primary master=" + this.serverName +
      ", sessionid=0x" +
      Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) +
      ", setting cluster-up flag (Was=" + wasUp + ")");

  // create/initialize the snapshot manager and other procedure managers
  this.snapshotManager = new SnapshotManager();
  this.mpmHost = new MasterProcedureManagerHost();
  this.mpmHost.register(this.snapshotManager);
  this.mpmHost.register(new MasterFlushTableProcedureManager());
  this.mpmHost.loadProcedures(conf);
  this.mpmHost.initialize(this, this.metricsMaster);
}
项目:ditb    文件:TestRegionPlacement2.java   
@Test
public void testFavoredNodesPresentForRoundRobinAssignment() throws HBaseIOException {
  LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(TEST_UTIL.getConfiguration());
  balancer.setMasterServices(TEST_UTIL.getMiniHBaseCluster().getMaster());
  List<ServerName> servers = new ArrayList<ServerName>();
  for (int i = 0; i < SLAVES; i++) {
    ServerName server = TEST_UTIL.getMiniHBaseCluster().getRegionServer(i).getServerName();
    servers.add(server);
  }
  List<HRegionInfo> regions = new ArrayList<HRegionInfo>(1);
  HRegionInfo region = new HRegionInfo(TableName.valueOf("foobar"));
  regions.add(region);
  Map<ServerName,List<HRegionInfo>> assignmentMap = balancer.roundRobinAssignment(regions,
      servers);
  Set<ServerName> serverBefore = assignmentMap.keySet();
  List<ServerName> favoredNodesBefore =
      ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region);
  assertTrue(favoredNodesBefore.size() == 3);
  // the primary RS should be the one that the balancer's assignment returns
  assertTrue(ServerName.isSameHostnameAndPort(serverBefore.iterator().next(),
      favoredNodesBefore.get(PRIMARY)));
  // now remove the primary from the list of available servers
  List<ServerName> removedServers = removeMatchingServers(serverBefore, servers);
  // call roundRobinAssignment with the modified servers list
  assignmentMap = balancer.roundRobinAssignment(regions, servers);
  List<ServerName> favoredNodesAfter =
      ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region);
  assertTrue(favoredNodesAfter.size() == 3);
  // We don't expect the favored nodes assignments to change in multiple calls
  // to the roundRobinAssignment method in the balancer (relevant for AssignmentManager.assign
  // failures)
  assertTrue(favoredNodesAfter.containsAll(favoredNodesBefore));
  Set<ServerName> serverAfter = assignmentMap.keySet();
  // We expect the new RegionServer assignee to be one of the favored nodes
  // chosen earlier.
  assertTrue(ServerName.isSameHostnameAndPort(serverAfter.iterator().next(),
               favoredNodesBefore.get(SECONDARY)) ||
             ServerName.isSameHostnameAndPort(serverAfter.iterator().next(),
               favoredNodesBefore.get(TERTIARY)));

  // put back the primary in the list of available servers
  servers.addAll(removedServers);
  // now roundRobinAssignment with the modified servers list should return the primary
  // as the regionserver assignee
  assignmentMap = balancer.roundRobinAssignment(regions, servers);
  Set<ServerName> serverWithPrimary = assignmentMap.keySet();
  assertTrue(serverBefore.containsAll(serverWithPrimary));

  // Make all the favored nodes unavailable for assignment
  removeMatchingServers(favoredNodesAfter, servers);
  // call roundRobinAssignment with the modified servers list
  assignmentMap = balancer.roundRobinAssignment(regions, servers);
  List<ServerName> favoredNodesNow =
      ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region);
  assertTrue(favoredNodesNow.size() == 3);
  assertTrue(!favoredNodesNow.contains(favoredNodesAfter.get(PRIMARY)) &&
      !favoredNodesNow.contains(favoredNodesAfter.get(SECONDARY)) &&
      !favoredNodesNow.contains(favoredNodesAfter.get(TERTIARY)));
}
项目:ditb    文件:TestRegionPlacement2.java   
@Test
public void testFavoredNodesPresentForRandomAssignment() throws HBaseIOException {
  LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(TEST_UTIL.getConfiguration());
  balancer.setMasterServices(TEST_UTIL.getMiniHBaseCluster().getMaster());
  List<ServerName> servers = new ArrayList<ServerName>();
  for (int i = 0; i < SLAVES; i++) {
    ServerName server = TEST_UTIL.getMiniHBaseCluster().getRegionServer(i).getServerName();
    servers.add(server);
  }
  List<HRegionInfo> regions = new ArrayList<HRegionInfo>(1);
  HRegionInfo region = new HRegionInfo(TableName.valueOf("foobar"));
  regions.add(region);
  ServerName serverBefore = balancer.randomAssignment(region, servers);
  List<ServerName> favoredNodesBefore =
      ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region);
  assertTrue(favoredNodesBefore.size() == 3);
  // the primary RS should be the one that the balancer's assignment returns
  assertTrue(ServerName.isSameHostnameAndPort(serverBefore,favoredNodesBefore.get(PRIMARY)));
  // now remove the primary from the list of servers
  removeMatchingServers(serverBefore, servers);
  // call randomAssignment with the modified servers list
  ServerName serverAfter = balancer.randomAssignment(region, servers);
  List<ServerName> favoredNodesAfter =
      ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region);
  assertTrue(favoredNodesAfter.size() == 3);
  // We don't expect the favored nodes assignments to change in multiple calls
  // to the randomAssignment method in the balancer (relevant for AssignmentManager.assign
  // failures)
  assertTrue(favoredNodesAfter.containsAll(favoredNodesBefore));
  // We expect the new RegionServer assignee to be one of the favored nodes
  // chosen earlier.
  assertTrue(ServerName.isSameHostnameAndPort(serverAfter, favoredNodesBefore.get(SECONDARY)) ||
             ServerName.isSameHostnameAndPort(serverAfter, favoredNodesBefore.get(TERTIARY)));
  // Make all the favored nodes unavailable for assignment
  removeMatchingServers(favoredNodesAfter, servers);
  // call randomAssignment with the modified servers list
  balancer.randomAssignment(region, servers);
  List<ServerName> favoredNodesNow =
      ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region);
  assertTrue(favoredNodesNow.size() == 3);
  assertTrue(!favoredNodesNow.contains(favoredNodesAfter.get(PRIMARY)) &&
      !favoredNodesNow.contains(favoredNodesAfter.get(SECONDARY)) &&
      !favoredNodesNow.contains(favoredNodesAfter.get(TERTIARY)));
}
项目:pbase    文件:TestRegionPlacement2.java   
@Test
public void testFavoredNodesPresentForRoundRobinAssignment() throws HBaseIOException {
  LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(TEST_UTIL.getConfiguration());
  balancer.setMasterServices(TEST_UTIL.getMiniHBaseCluster().getMaster());
  List<ServerName> servers = new ArrayList<ServerName>();
  for (int i = 0; i < SLAVES; i++) {
    ServerName server = TEST_UTIL.getMiniHBaseCluster().getRegionServer(i).getServerName();
    servers.add(server);
  }
  List<HRegionInfo> regions = new ArrayList<HRegionInfo>(1);
  HRegionInfo region = new HRegionInfo(TableName.valueOf("foobar"));
  regions.add(region);
  Map<ServerName,List<HRegionInfo>> assignmentMap = balancer.roundRobinAssignment(regions,
      servers);
  Set<ServerName> serverBefore = assignmentMap.keySet();
  List<ServerName> favoredNodesBefore =
      ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region);
  assertTrue(favoredNodesBefore.size() == 3);
  // the primary RS should be the one that the balancer's assignment returns
  assertTrue(ServerName.isSameHostnameAndPort(serverBefore.iterator().next(),
      favoredNodesBefore.get(PRIMARY)));
  // now remove the primary from the list of available servers
  List<ServerName> removedServers = removeMatchingServers(serverBefore, servers);
  // call roundRobinAssignment with the modified servers list
  assignmentMap = balancer.roundRobinAssignment(regions, servers);
  List<ServerName> favoredNodesAfter =
      ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region);
  assertTrue(favoredNodesAfter.size() == 3);
  // We don't expect the favored nodes assignments to change in multiple calls
  // to the roundRobinAssignment method in the balancer (relevant for AssignmentManager.assign
  // failures)
  assertTrue(favoredNodesAfter.containsAll(favoredNodesBefore));
  Set<ServerName> serverAfter = assignmentMap.keySet();
  // We expect the new RegionServer assignee to be one of the favored nodes
  // chosen earlier.
  assertTrue(ServerName.isSameHostnameAndPort(serverAfter.iterator().next(),
               favoredNodesBefore.get(SECONDARY)) ||
             ServerName.isSameHostnameAndPort(serverAfter.iterator().next(),
               favoredNodesBefore.get(TERTIARY)));

  // put back the primary in the list of available servers
  servers.addAll(removedServers);
  // now roundRobinAssignment with the modified servers list should return the primary
  // as the regionserver assignee
  assignmentMap = balancer.roundRobinAssignment(regions, servers);
  Set<ServerName> serverWithPrimary = assignmentMap.keySet();
  assertTrue(serverBefore.containsAll(serverWithPrimary));

  // Make all the favored nodes unavailable for assignment
  removeMatchingServers(favoredNodesAfter, servers);
  // call roundRobinAssignment with the modified servers list
  assignmentMap = balancer.roundRobinAssignment(regions, servers);
  List<ServerName> favoredNodesNow =
      ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region);
  assertTrue(favoredNodesNow.size() == 3);
  assertTrue(!favoredNodesNow.contains(favoredNodesAfter.get(PRIMARY)) &&
      !favoredNodesNow.contains(favoredNodesAfter.get(SECONDARY)) &&
      !favoredNodesNow.contains(favoredNodesAfter.get(TERTIARY)));
}
项目:pbase    文件:TestRegionPlacement2.java   
@Test
public void testFavoredNodesPresentForRandomAssignment() throws HBaseIOException {
  LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(TEST_UTIL.getConfiguration());
  balancer.setMasterServices(TEST_UTIL.getMiniHBaseCluster().getMaster());
  List<ServerName> servers = new ArrayList<ServerName>();
  for (int i = 0; i < SLAVES; i++) {
    ServerName server = TEST_UTIL.getMiniHBaseCluster().getRegionServer(i).getServerName();
    servers.add(server);
  }
  List<HRegionInfo> regions = new ArrayList<HRegionInfo>(1);
  HRegionInfo region = new HRegionInfo(TableName.valueOf("foobar"));
  regions.add(region);
  ServerName serverBefore = balancer.randomAssignment(region, servers);
  List<ServerName> favoredNodesBefore =
      ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region);
  assertTrue(favoredNodesBefore.size() == 3);
  // the primary RS should be the one that the balancer's assignment returns
  assertTrue(ServerName.isSameHostnameAndPort(serverBefore,favoredNodesBefore.get(PRIMARY)));
  // now remove the primary from the list of servers
  removeMatchingServers(serverBefore, servers);
  // call randomAssignment with the modified servers list
  ServerName serverAfter = balancer.randomAssignment(region, servers);
  List<ServerName> favoredNodesAfter =
      ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region);
  assertTrue(favoredNodesAfter.size() == 3);
  // We don't expect the favored nodes assignments to change in multiple calls
  // to the randomAssignment method in the balancer (relevant for AssignmentManager.assign
  // failures)
  assertTrue(favoredNodesAfter.containsAll(favoredNodesBefore));
  // We expect the new RegionServer assignee to be one of the favored nodes
  // chosen earlier.
  assertTrue(ServerName.isSameHostnameAndPort(serverAfter, favoredNodesBefore.get(SECONDARY)) ||
             ServerName.isSameHostnameAndPort(serverAfter, favoredNodesBefore.get(TERTIARY)));
  // Make all the favored nodes unavailable for assignment
  removeMatchingServers(favoredNodesAfter, servers);
  // call randomAssignment with the modified servers list
  balancer.randomAssignment(region, servers);
  List<ServerName> favoredNodesNow =
      ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region);
  assertTrue(favoredNodesNow.size() == 3);
  assertTrue(!favoredNodesNow.contains(favoredNodesAfter.get(PRIMARY)) &&
      !favoredNodesNow.contains(favoredNodesAfter.get(SECONDARY)) &&
      !favoredNodesNow.contains(favoredNodesAfter.get(TERTIARY)));
}
项目:pbase    文件:TestAssignmentManager.java   
/**
 * Tests AssignmentManager balance function.  Runs a balance moving a region
 * from one server to another mocking regionserver responding over zk.
 * @throws IOException
 * @throws KeeperException
 * @throws DeserializationException
 */
@Test (timeout=180000)
public void testBalance() throws IOException, KeeperException, DeserializationException,
    InterruptedException, CoordinatedStateException {
  // Create and startup an executor.  This is used by AssignmentManager
  // handling zk callbacks.
  ExecutorService executor = startupMasterExecutor("testBalanceExecutor");

  // We need a mocked catalog tracker.
  LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(server
      .getConfiguration());
  // Create an AM.
  AssignmentManager am = new AssignmentManager(this.server,
    this.serverManager, balancer, executor, null, master.getTableLockManager());
  am.failoverCleanupDone.set(true);
  try {
    // Make sure our new AM gets callbacks; once registered, can't unregister.
    // Thats ok because we make a new zk watcher for each test.
    this.watcher.registerListenerFirst(am);
    // Call the balance function but fake the region being online first at
    // SERVERNAME_A.  Create a balance plan.
    am.regionOnline(REGIONINFO, SERVERNAME_A);
    // Balance region from A to B.
    RegionPlan plan = new RegionPlan(REGIONINFO, SERVERNAME_A, SERVERNAME_B);
    am.balance(plan);

    RegionStates regionStates = am.getRegionStates();
    // Must be failed to close since the server is fake
    assertTrue(regionStates.isRegionInTransition(REGIONINFO)
      && regionStates.isRegionInState(REGIONINFO, State.FAILED_CLOSE));
    // Move it back to pending_close
    regionStates.updateRegionState(REGIONINFO, State.PENDING_CLOSE);

    // Now fake the region closing successfully over on the regionserver; the
    // regionserver will have set the region in CLOSED state.  This will
    // trigger callback into AM. The below zk close call is from the RS close
    // region handler duplicated here because its down deep in a private
    // method hard to expose.
    int versionid =
      ZKAssign.transitionNodeClosed(this.watcher, REGIONINFO, SERVERNAME_A, -1);
    assertNotSame(versionid, -1);
    // AM is going to notice above CLOSED and queue up a new assign.  The
    // assign will go to open the region in the new location set by the
    // balancer.  The zk node will be OFFLINE waiting for regionserver to
    // transition it through OPENING, OPENED.  Wait till we see the OFFLINE
    // zk node before we proceed.
    Mocking.waitForRegionPendingOpenInRIT(am, REGIONINFO.getEncodedName());

    // Get current versionid else will fail on transition from OFFLINE to OPENING below
    versionid = ZKAssign.getVersion(this.watcher, REGIONINFO);
    assertNotSame(-1, versionid);
    // This uglyness below is what the openregionhandler on RS side does.
    versionid = ZKAssign.transitionNode(server.getZooKeeper(), REGIONINFO,
      SERVERNAME_B, EventType.M_ZK_REGION_OFFLINE,
      EventType.RS_ZK_REGION_OPENING, versionid);
    assertNotSame(-1, versionid);
    // Move znode from OPENING to OPENED as RS does on successful open.
    versionid =
      ZKAssign.transitionNodeOpened(this.watcher, REGIONINFO, SERVERNAME_B, versionid);
    assertNotSame(-1, versionid);
    // Wait on the handler removing the OPENED znode.
    while(regionStates.isRegionInTransition(REGIONINFO)) Threads.sleep(1);
  } finally {
    executor.shutdown();
    am.shutdown();
    // Clean up all znodes
    ZKAssign.deleteAllNodes(this.watcher);
  }
}
项目:pbase    文件:TestAssignmentManager.java   
private void testCaseWithPartiallyDisabledState(Table.State state) throws KeeperException,
    IOException, CoordinatedStateException, ServiceException {
  // Create and startup an executor. This is used by AssignmentManager
  // handling zk callbacks.
  ExecutorService executor = startupMasterExecutor("testSSHWhenDisableTableInProgress");
  LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(server.getConfiguration());
  ZKAssign.deleteAllNodes(this.watcher);

  // Create an AM.
  AssignmentManager am = new AssignmentManager(this.server,
    this.serverManager, balancer, executor, null, master.getTableLockManager());
  // adding region to regions and servers maps.
  am.regionOnline(REGIONINFO, SERVERNAME_A);
  // adding region in pending close.
  am.getRegionStates().updateRegionState(REGIONINFO, State.PENDING_CLOSE);
  if (state == Table.State.DISABLING) {
    am.getTableStateManager().setTableState(REGIONINFO.getTable(),
      Table.State.DISABLING);
  } else {
    am.getTableStateManager().setTableState(REGIONINFO.getTable(),
      Table.State.DISABLED);
  }
  RegionTransition data = RegionTransition.createRegionTransition(EventType.M_ZK_REGION_CLOSING,
      REGIONINFO.getRegionName(), SERVERNAME_A);
  // RegionTransitionData data = new
  // RegionTransitionData(EventType.M_ZK_REGION_CLOSING,
  // REGIONINFO.getRegionName(), SERVERNAME_A);
  String node = ZKAssign.getNodeName(this.watcher, REGIONINFO.getEncodedName());
  // create znode in M_ZK_REGION_CLOSING state.
  ZKUtil.createAndWatch(this.watcher, node, data.toByteArray());

  try {
    processServerShutdownHandler(am, false);
    // check znode deleted or not.
    // In both cases the znode should be deleted.
    assertTrue("The znode should be deleted.", ZKUtil.checkExists(this.watcher, node) == -1);
    // check whether in rit or not. In the DISABLING case also the below
    // assert will be true but the piece of code added for HBASE-5927 will not
    // do that.
    if (state == Table.State.DISABLED) {
      assertFalse("Region state of region in pending close should be removed from rit.",
          am.getRegionStates().isRegionsInTransition());
    }
  } finally {
    am.setEnabledTable(REGIONINFO.getTable());
    executor.shutdown();
    am.shutdown();
    // Clean up all znodes
    ZKAssign.deleteAllNodes(this.watcher);
  }
}
项目:pbase    文件:TestAssignmentManager.java   
/**
 * Create an {@link AssignmentManagerWithExtrasForTesting} that has mocked
 * {@link CatalogTracker} etc.
 * @param server
 * @param manager
 * @return An AssignmentManagerWithExtras with mock connections, etc.
 * @throws IOException
 * @throws KeeperException
 */
private AssignmentManagerWithExtrasForTesting setUpMockedAssignmentManager(final Server server,
    final ServerManager manager) throws IOException, KeeperException,
      ServiceException, CoordinatedStateException {
  // Make an RS Interface implementation. Make it so a scanner can go against
  // it and a get to return the single region, REGIONINFO, this test is
  // messing with. Needed when "new master" joins cluster. AM will try and
  // rebuild its list of user regions and it will also get the HRI that goes
  // with an encoded name by doing a Get on hbase:meta
  ClientProtos.ClientService.BlockingInterface ri =
    Mockito.mock(ClientProtos.ClientService.BlockingInterface.class);
  // Get a meta row result that has region up on SERVERNAME_A for REGIONINFO
  Result r = MetaMockingUtil.getMetaTableRowResult(REGIONINFO, SERVERNAME_A);
  final ScanResponse.Builder builder = ScanResponse.newBuilder();
  builder.setMoreResults(true);
  builder.addCellsPerResult(r.size());
  final List<CellScannable> rows = new ArrayList<CellScannable>(1);
  rows.add(r);
  Answer<ScanResponse> ans = new Answer<ClientProtos.ScanResponse>() {
    @Override
    public ScanResponse answer(InvocationOnMock invocation) throws Throwable {
      PayloadCarryingRpcController controller = (PayloadCarryingRpcController) invocation
          .getArguments()[0];
      if (controller != null) {
        controller.setCellScanner(CellUtil.createCellScanner(rows));
      }
      return builder.build();
    }
  };
  if (enabling) {
    Mockito.when(ri.scan((RpcController) Mockito.any(), (ScanRequest) Mockito.any()))
        .thenAnswer(ans).thenAnswer(ans).thenAnswer(ans).thenAnswer(ans).thenAnswer(ans)
        .thenReturn(ScanResponse.newBuilder().setMoreResults(false).build());
  } else {
    Mockito.when(ri.scan((RpcController) Mockito.any(), (ScanRequest) Mockito.any())).thenAnswer(
        ans);
  }
  // If a get, return the above result too for REGIONINFO
  GetResponse.Builder getBuilder = GetResponse.newBuilder();
  getBuilder.setResult(ProtobufUtil.toResult(r));
  Mockito.when(ri.get((RpcController)Mockito.any(), (GetRequest) Mockito.any())).
    thenReturn(getBuilder.build());
  // Get a connection w/ mocked up common methods.
  ClusterConnection connection = (ClusterConnection)HConnectionTestingUtility.
    getMockedConnectionAndDecorate(HTU.getConfiguration(), null,
      ri, SERVERNAME_B, REGIONINFO);
  // These mocks were done up when all connections were managed.  World is different now we
  // moved to unmanaged connections.  It messes up the intercepts done in these tests.
  // Just mark connections as marked and then down in MetaTableAccessor, it will go the path
  // that picks up the above mocked up 'implementation' so 'scans' of meta return the expected
  // result.  Redo in new realm of unmanaged connections.
  Mockito.when(connection.isManaged()).thenReturn(true);
  // Make it so we can get the connection from our mocked catalogtracker
  // Create and startup an executor. Used by AM handling zk callbacks.
  ExecutorService executor = startupMasterExecutor("mockedAMExecutor");
  this.balancer = LoadBalancerFactory.getLoadBalancer(server.getConfiguration());
  AssignmentManagerWithExtrasForTesting am = new AssignmentManagerWithExtrasForTesting(
    server, connection, manager, this.balancer, executor, new NullTableLockManager());
  return am;
}
项目:pbase    文件:TestAssignmentManager.java   
/**
 * Test assignment related ZK events are ignored by AM if the region is not known
 * by AM to be in transition. During normal operation, all assignments are started
 * by AM (not considering split/merge), if an event is received but the region
 * is not in transition, the event must be a very late one. So it can be ignored.
 * During master failover, since AM watches assignment znodes after failover cleanup
 * is completed, when an event comes in, AM should already have the region in transition
 * if ZK is used during the assignment action (only hbck doesn't use ZK for region
 * assignment). So during master failover, we can ignored such events too.
 */
@Test (timeout=180000)
public void testAssignmentEventIgnoredIfNotExpected() throws KeeperException, IOException,
    CoordinatedStateException {
  // Region to use in test.
  final HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO;
  LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(
    server.getConfiguration());
  final AtomicBoolean zkEventProcessed = new AtomicBoolean(false);
  // Create an AM.
  AssignmentManager am = new AssignmentManager(this.server,
    this.serverManager, balancer, null, null, master.getTableLockManager()) {

    @Override
    void handleRegion(final RegionTransition rt, OpenRegionCoordination coordination,
                      OpenRegionCoordination.OpenRegionDetails ord) {
      super.handleRegion(rt, coordination, ord);
      if (rt != null && Bytes.equals(hri.getRegionName(),
        rt.getRegionName()) && rt.getEventType() == EventType.RS_ZK_REGION_OPENING) {
        zkEventProcessed.set(true);
      }
    }
  };
  try {
    // First make sure the region is not in transition
    am.getRegionStates().regionOffline(hri);
    zkEventProcessed.set(false); // Reset it before faking zk transition
    this.watcher.registerListenerFirst(am);
    assertFalse("The region should not be in transition",
      am.getRegionStates().isRegionInTransition(hri));
    ZKAssign.createNodeOffline(this.watcher, hri, SERVERNAME_A);
    // Trigger a transition event
    ZKAssign.transitionNodeOpening(this.watcher, hri, SERVERNAME_A);
    long startTime = EnvironmentEdgeManager.currentTime();
    while (!zkEventProcessed.get()) {
      assertTrue("Timed out in waiting for ZK event to be processed",
        EnvironmentEdgeManager.currentTime() - startTime < 30000);
      Threads.sleepWithoutInterrupt(100);
    }
    assertFalse(am.getRegionStates().isRegionInTransition(hri));
  } finally {
    am.shutdown();
  }
}
项目:HIndex    文件:TestRegionPlacement.java   
@Test
public void testFavoredNodesPresentForRoundRobinAssignment() throws HBaseIOException {
  LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(TEST_UTIL.getConfiguration());
  balancer.setMasterServices(TEST_UTIL.getMiniHBaseCluster().getMaster());
  List<ServerName> servers = new ArrayList<ServerName>();
  for (int i = 0; i < SLAVES; i++) {
    ServerName server = TEST_UTIL.getMiniHBaseCluster().getRegionServer(i).getServerName();
    servers.add(server);
  }
  List<HRegionInfo> regions = new ArrayList<HRegionInfo>(1);
  HRegionInfo region = new HRegionInfo(TableName.valueOf("foobar"));
  regions.add(region);
  Map<ServerName,List<HRegionInfo>> assignmentMap = balancer.roundRobinAssignment(regions,
      servers);
  Set<ServerName> serverBefore = assignmentMap.keySet();
  List<ServerName> favoredNodesBefore =
      ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region);
  assertTrue(favoredNodesBefore.size() == 3);
  // the primary RS should be the one that the balancer's assignment returns
  assertTrue(ServerName.isSameHostnameAndPort(serverBefore.iterator().next(),
      favoredNodesBefore.get(PRIMARY)));
  // now remove the primary from the list of available servers
  List<ServerName> removedServers = removeMatchingServers(serverBefore, servers);
  // call roundRobinAssignment with the modified servers list
  assignmentMap = balancer.roundRobinAssignment(regions, servers);
  List<ServerName> favoredNodesAfter =
      ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region);
  assertTrue(favoredNodesAfter.size() == 3);
  // We don't expect the favored nodes assignments to change in multiple calls
  // to the roundRobinAssignment method in the balancer (relevant for AssignmentManager.assign
  // failures)
  assertTrue(favoredNodesAfter.containsAll(favoredNodesBefore));
  Set<ServerName> serverAfter = assignmentMap.keySet();
  // We expect the new RegionServer assignee to be one of the favored nodes
  // chosen earlier.
  assertTrue(ServerName.isSameHostnameAndPort(serverAfter.iterator().next(),
               favoredNodesBefore.get(SECONDARY)) ||
             ServerName.isSameHostnameAndPort(serverAfter.iterator().next(),
               favoredNodesBefore.get(TERTIARY)));

  // put back the primary in the list of available servers
  servers.addAll(removedServers);
  // now roundRobinAssignment with the modified servers list should return the primary
  // as the regionserver assignee
  assignmentMap = balancer.roundRobinAssignment(regions, servers);
  Set<ServerName> serverWithPrimary = assignmentMap.keySet();
  assertTrue(serverBefore.containsAll(serverWithPrimary));

  // Make all the favored nodes unavailable for assignment
  removeMatchingServers(favoredNodesAfter, servers);
  // call roundRobinAssignment with the modified servers list
  assignmentMap = balancer.roundRobinAssignment(regions, servers);
  List<ServerName> favoredNodesNow =
      ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region);
  assertTrue(favoredNodesNow.size() == 3);
  assertTrue(!favoredNodesNow.contains(favoredNodesAfter.get(PRIMARY)) &&
      !favoredNodesNow.contains(favoredNodesAfter.get(SECONDARY)) &&
      !favoredNodesNow.contains(favoredNodesAfter.get(TERTIARY)));
}
项目:HIndex    文件:TestRegionPlacement.java   
@Test
public void testFavoredNodesPresentForRandomAssignment() throws HBaseIOException {
  LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(TEST_UTIL.getConfiguration());
  balancer.setMasterServices(TEST_UTIL.getMiniHBaseCluster().getMaster());
  List<ServerName> servers = new ArrayList<ServerName>();
  for (int i = 0; i < SLAVES; i++) {
    ServerName server = TEST_UTIL.getMiniHBaseCluster().getRegionServer(i).getServerName();
    servers.add(server);
  }
  List<HRegionInfo> regions = new ArrayList<HRegionInfo>(1);
  HRegionInfo region = new HRegionInfo(TableName.valueOf("foobar"));
  regions.add(region);
  ServerName serverBefore = balancer.randomAssignment(region, servers);
  List<ServerName> favoredNodesBefore =
      ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region);
  assertTrue(favoredNodesBefore.size() == 3);
  // the primary RS should be the one that the balancer's assignment returns
  assertTrue(ServerName.isSameHostnameAndPort(serverBefore,favoredNodesBefore.get(PRIMARY)));
  // now remove the primary from the list of servers
  removeMatchingServers(serverBefore, servers);
  // call randomAssignment with the modified servers list
  ServerName serverAfter = balancer.randomAssignment(region, servers);
  List<ServerName> favoredNodesAfter =
      ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region);
  assertTrue(favoredNodesAfter.size() == 3);
  // We don't expect the favored nodes assignments to change in multiple calls
  // to the randomAssignment method in the balancer (relevant for AssignmentManager.assign
  // failures)
  assertTrue(favoredNodesAfter.containsAll(favoredNodesBefore));
  // We expect the new RegionServer assignee to be one of the favored nodes
  // chosen earlier.
  assertTrue(ServerName.isSameHostnameAndPort(serverAfter, favoredNodesBefore.get(SECONDARY)) ||
             ServerName.isSameHostnameAndPort(serverAfter, favoredNodesBefore.get(TERTIARY)));
  // Make all the favored nodes unavailable for assignment
  removeMatchingServers(favoredNodesAfter, servers);
  // call randomAssignment with the modified servers list
  balancer.randomAssignment(region, servers);
  List<ServerName> favoredNodesNow =
      ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region);
  assertTrue(favoredNodesNow.size() == 3);
  assertTrue(!favoredNodesNow.contains(favoredNodesAfter.get(PRIMARY)) &&
      !favoredNodesNow.contains(favoredNodesAfter.get(SECONDARY)) &&
      !favoredNodesNow.contains(favoredNodesAfter.get(TERTIARY)));
}
项目:HIndex    文件:TestAssignmentManager.java   
/**
 * Tests AssignmentManager balance function.  Runs a balance moving a region
 * from one server to another mocking regionserver responding over zk.
 * @throws IOException
 * @throws KeeperException
 * @throws DeserializationException
 */
@Test
public void testBalance()
  throws IOException, KeeperException, DeserializationException, InterruptedException {
  // Create and startup an executor.  This is used by AssignmentManager
  // handling zk callbacks.
  ExecutorService executor = startupMasterExecutor("testBalanceExecutor");

  // We need a mocked catalog tracker.
  CatalogTracker ct = Mockito.mock(CatalogTracker.class);
  LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(server
      .getConfiguration());
  // Create an AM.
  AssignmentManager am = new AssignmentManager(this.server,
    this.serverManager, ct, balancer, executor, null, master.getTableLockManager());
  am.failoverCleanupDone.set(true);
  try {
    // Make sure our new AM gets callbacks; once registered, can't unregister.
    // Thats ok because we make a new zk watcher for each test.
    this.watcher.registerListenerFirst(am);
    // Call the balance function but fake the region being online first at
    // SERVERNAME_A.  Create a balance plan.
    am.regionOnline(REGIONINFO, SERVERNAME_A);
    // Balance region from A to B.
    RegionPlan plan = new RegionPlan(REGIONINFO, SERVERNAME_A, SERVERNAME_B);
    am.balance(plan);

    RegionStates regionStates = am.getRegionStates();
    // Must be failed to close since the server is fake
    assertTrue(regionStates.isRegionInTransition(REGIONINFO)
      && regionStates.isRegionInState(REGIONINFO, State.FAILED_CLOSE));
    // Move it back to pending_close
    regionStates.updateRegionState(REGIONINFO, State.PENDING_CLOSE);

    // Now fake the region closing successfully over on the regionserver; the
    // regionserver will have set the region in CLOSED state.  This will
    // trigger callback into AM. The below zk close call is from the RS close
    // region handler duplicated here because its down deep in a private
    // method hard to expose.
    int versionid =
      ZKAssign.transitionNodeClosed(this.watcher, REGIONINFO, SERVERNAME_A, -1);
    assertNotSame(versionid, -1);
    // AM is going to notice above CLOSED and queue up a new assign.  The
    // assign will go to open the region in the new location set by the
    // balancer.  The zk node will be OFFLINE waiting for regionserver to
    // transition it through OPENING, OPENED.  Wait till we see the OFFLINE
    // zk node before we proceed.
    Mocking.waitForRegionPendingOpenInRIT(am, REGIONINFO.getEncodedName());

    // Get current versionid else will fail on transition from OFFLINE to OPENING below
    versionid = ZKAssign.getVersion(this.watcher, REGIONINFO);
    assertNotSame(-1, versionid);
    // This uglyness below is what the openregionhandler on RS side does.
    versionid = ZKAssign.transitionNode(server.getZooKeeper(), REGIONINFO,
      SERVERNAME_B, EventType.M_ZK_REGION_OFFLINE,
      EventType.RS_ZK_REGION_OPENING, versionid);
    assertNotSame(-1, versionid);
    // Move znode from OPENING to OPENED as RS does on successful open.
    versionid =
      ZKAssign.transitionNodeOpened(this.watcher, REGIONINFO, SERVERNAME_B, versionid);
    assertNotSame(-1, versionid);
    // Wait on the handler removing the OPENED znode.
    while(regionStates.isRegionInTransition(REGIONINFO)) Threads.sleep(1);
  } finally {
    executor.shutdown();
    am.shutdown();
    // Clean up all znodes
    ZKAssign.deleteAllNodes(this.watcher);
  }
}
项目:HIndex    文件:TestAssignmentManager.java   
private void testCaseWithPartiallyDisabledState(Table.State state) throws KeeperException,
    IOException, NodeExistsException, ServiceException {
  // Create and startup an executor. This is used by AssignmentManager
  // handling zk callbacks.
  ExecutorService executor = startupMasterExecutor("testSSHWhenDisableTableInProgress");
  // We need a mocked catalog tracker.
  CatalogTracker ct = Mockito.mock(CatalogTracker.class);
  LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(server.getConfiguration());
  ZKAssign.deleteAllNodes(this.watcher);

  // Create an AM.
  AssignmentManager am = new AssignmentManager(this.server,
    this.serverManager, ct, balancer, executor, null, master.getTableLockManager());
  // adding region to regions and servers maps.
  am.regionOnline(REGIONINFO, SERVERNAME_A);
  // adding region in pending close.
  am.getRegionStates().updateRegionState(REGIONINFO, State.PENDING_CLOSE);
  if (state == Table.State.DISABLING) {
    am.getZKTable().setDisablingTable(REGIONINFO.getTable());
  } else {
    am.getZKTable().setDisabledTable(REGIONINFO.getTable());
  }
  RegionTransition data = RegionTransition.createRegionTransition(EventType.M_ZK_REGION_CLOSING,
      REGIONINFO.getRegionName(), SERVERNAME_A);
  // RegionTransitionData data = new
  // RegionTransitionData(EventType.M_ZK_REGION_CLOSING,
  // REGIONINFO.getRegionName(), SERVERNAME_A);
  String node = ZKAssign.getNodeName(this.watcher, REGIONINFO.getEncodedName());
  // create znode in M_ZK_REGION_CLOSING state.
  ZKUtil.createAndWatch(this.watcher, node, data.toByteArray());

  try {
    processServerShutdownHandler(ct, am, false);
    // check znode deleted or not.
    // In both cases the znode should be deleted.
    assertTrue("The znode should be deleted.", ZKUtil.checkExists(this.watcher, node) == -1);
    // check whether in rit or not. In the DISABLING case also the below
    // assert will be true but the piece of code added for HBASE-5927 will not
    // do that.
    if (state == Table.State.DISABLED) {
      assertFalse("Region state of region in pending close should be removed from rit.",
          am.getRegionStates().isRegionsInTransition());
    }
  } finally {
    am.setEnabledTable(REGIONINFO.getTable());
    executor.shutdown();
    am.shutdown();
    // Clean up all znodes
    ZKAssign.deleteAllNodes(this.watcher);
  }
}
项目:HIndex    文件:TestAssignmentManager.java   
/**
 * Create an {@link AssignmentManagerWithExtrasForTesting} that has mocked
 * {@link CatalogTracker} etc.
 * @param server
 * @param manager
 * @return An AssignmentManagerWithExtras with mock connections, etc.
 * @throws IOException
 * @throws KeeperException
 */
private AssignmentManagerWithExtrasForTesting setUpMockedAssignmentManager(final Server server,
    final ServerManager manager) throws IOException, KeeperException, ServiceException {
  // We need a mocked catalog tracker. Its used by our AM instance.
  CatalogTracker ct = Mockito.mock(CatalogTracker.class);
  // Make an RS Interface implementation. Make it so a scanner can go against
  // it and a get to return the single region, REGIONINFO, this test is
  // messing with. Needed when "new master" joins cluster. AM will try and
  // rebuild its list of user regions and it will also get the HRI that goes
  // with an encoded name by doing a Get on hbase:meta
  ClientProtos.ClientService.BlockingInterface ri =
    Mockito.mock(ClientProtos.ClientService.BlockingInterface.class);
  // Get a meta row result that has region up on SERVERNAME_A for REGIONINFO
  Result r = MetaMockingUtil.getMetaTableRowResult(REGIONINFO, SERVERNAME_A);
  final ScanResponse.Builder builder = ScanResponse.newBuilder();
  builder.setMoreResults(true);
  builder.addCellsPerResult(r.size());
  final List<CellScannable> rows = new ArrayList<CellScannable>(1);
  rows.add(r);
  Answer<ScanResponse> ans = new Answer<ClientProtos.ScanResponse>() {
    @Override
    public ScanResponse answer(InvocationOnMock invocation) throws Throwable {
      PayloadCarryingRpcController controller = (PayloadCarryingRpcController) invocation
          .getArguments()[0];
      if (controller != null) {
        controller.setCellScanner(CellUtil.createCellScanner(rows));
      }
      return builder.build();
    }
  };
  if (enabling) {
    Mockito.when(ri.scan((RpcController) Mockito.any(), (ScanRequest) Mockito.any()))
        .thenAnswer(ans).thenAnswer(ans).thenAnswer(ans).thenAnswer(ans).thenAnswer(ans)
        .thenReturn(ScanResponse.newBuilder().setMoreResults(false).build());
  } else {
    Mockito.when(ri.scan((RpcController) Mockito.any(), (ScanRequest) Mockito.any())).thenAnswer(
        ans);
  }
  // If a get, return the above result too for REGIONINFO
  GetResponse.Builder getBuilder = GetResponse.newBuilder();
  getBuilder.setResult(ProtobufUtil.toResult(r));
  Mockito.when(ri.get((RpcController)Mockito.any(), (GetRequest) Mockito.any())).
    thenReturn(getBuilder.build());
  // Get a connection w/ mocked up common methods.
  HConnection connection = HConnectionTestingUtility.
    getMockedConnectionAndDecorate(HTU.getConfiguration(), null,
      ri, SERVERNAME_B, REGIONINFO);
  // Make it so we can get the connection from our mocked catalogtracker
  Mockito.when(ct.getConnection()).thenReturn(connection);
  // Create and startup an executor. Used by AM handling zk callbacks.
  ExecutorService executor = startupMasterExecutor("mockedAMExecutor");
  this.balancer = LoadBalancerFactory.getLoadBalancer(server.getConfiguration());
  AssignmentManagerWithExtrasForTesting am = new AssignmentManagerWithExtrasForTesting(
    server, manager, ct, this.balancer, executor, new NullTableLockManager());
  return am;
}
项目:HIndex    文件:TestAssignmentManager.java   
/**
 * Test assignment related ZK events are ignored by AM if the region is not known
 * by AM to be in transition. During normal operation, all assignments are started
 * by AM (not considering split/merge), if an event is received but the region
 * is not in transition, the event must be a very late one. So it can be ignored.
 * During master failover, since AM watches assignment znodes after failover cleanup
 * is completed, when an event comes in, AM should already have the region in transition
 * if ZK is used during the assignment action (only hbck doesn't use ZK for region
 * assignment). So during master failover, we can ignored such events too.
 */
@Test
public void testAssignmentEventIgnoredIfNotExpected() throws KeeperException, IOException {
  // Region to use in test.
  final HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO;
  // Need a mocked catalog tracker.
  CatalogTracker ct = Mockito.mock(CatalogTracker.class);
  LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(
    server.getConfiguration());
  final AtomicBoolean zkEventProcessed = new AtomicBoolean(false);
  // Create an AM.
  AssignmentManager am = new AssignmentManager(this.server,
    this.serverManager, ct, balancer, null, null, master.getTableLockManager()) {

    @Override
    void handleRegion(final RegionTransition rt, int expectedVersion) {
      super.handleRegion(rt, expectedVersion);
      if (rt != null && Bytes.equals(hri.getRegionName(),
        rt.getRegionName()) && rt.getEventType() == EventType.RS_ZK_REGION_OPENING) {
        zkEventProcessed.set(true);
      }
    }
  };
  try {
    // First make sure the region is not in transition
    am.getRegionStates().regionOffline(hri);
    zkEventProcessed.set(false); // Reset it before faking zk transition
    this.watcher.registerListenerFirst(am);
    assertFalse("The region should not be in transition",
      am.getRegionStates().isRegionInTransition(hri));
    ZKAssign.createNodeOffline(this.watcher, hri, SERVERNAME_A);
    // Trigger a transition event
    ZKAssign.transitionNodeOpening(this.watcher, hri, SERVERNAME_A);
    long startTime = EnvironmentEdgeManager.currentTimeMillis();
    while (!zkEventProcessed.get()) {
      assertTrue("Timed out in waiting for ZK event to be processed",
        EnvironmentEdgeManager.currentTimeMillis() - startTime < 30000);
      Threads.sleepWithoutInterrupt(100);
    }
    assertFalse(am.getRegionStates().isRegionInTransition(hri));
  } finally {
    am.shutdown();
  }
}
项目:hbase    文件:HMaster.java   
/**
 * Initialize all ZK based system trackers.
 */
void initializeZKBasedSystemTrackers() throws IOException, InterruptedException, KeeperException,
    CoordinatedStateException, ReplicationException {
  this.balancer = LoadBalancerFactory.getLoadBalancer(conf);
  this.normalizer = RegionNormalizerFactory.getRegionNormalizer(conf);
  this.normalizer.setMasterServices(this);
  this.normalizer.setMasterRpcServices((MasterRpcServices)rpcServices);
  this.loadBalancerTracker = new LoadBalancerTracker(zooKeeper, this);
  this.loadBalancerTracker.start();

  this.regionNormalizerTracker = new RegionNormalizerTracker(zooKeeper, this);
  this.regionNormalizerTracker.start();

  this.splitOrMergeTracker = new SplitOrMergeTracker(zooKeeper, conf, this);
  this.splitOrMergeTracker.start();

  // Create Assignment Manager
  this.assignmentManager = new AssignmentManager(this);
  this.assignmentManager.start();

  this.replicationPeerManager = ReplicationPeerManager.create(zooKeeper, conf);

  this.regionServerTracker = new RegionServerTracker(zooKeeper, this, this.serverManager);
  this.regionServerTracker.start();

  this.drainingServerTracker = new DrainingServerTracker(zooKeeper, this, this.serverManager);
  this.drainingServerTracker.start();

  this.maintenanceModeTracker = new MasterMaintenanceModeTracker(zooKeeper);
  this.maintenanceModeTracker.start();

  // Set the cluster as up.  If new RSs, they'll be waiting on this before
  // going ahead with their startup.
  boolean wasUp = this.clusterStatusTracker.isClusterUp();
  if (!wasUp) this.clusterStatusTracker.setClusterUp();

  LOG.info("Active/primary master=" + this.serverName +
      ", sessionid=0x" +
      Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) +
      ", setting cluster-up flag (Was=" + wasUp + ")");

  // create/initialize the snapshot manager and other procedure managers
  this.snapshotManager = new SnapshotManager();
  this.mpmHost = new MasterProcedureManagerHost();
  this.mpmHost.register(this.snapshotManager);
  this.mpmHost.register(new MasterFlushTableProcedureManager());
  this.mpmHost.loadProcedures(conf);
  this.mpmHost.initialize(this, this.metricsMaster);
}
项目:hbase    文件:TestRegionPlacement2.java   
@Test
public void testFavoredNodesPresentForRoundRobinAssignment() throws HBaseIOException {
  LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(TEST_UTIL.getConfiguration());
  balancer.setMasterServices(TEST_UTIL.getMiniHBaseCluster().getMaster());
  balancer.initialize();
  List<ServerName> servers = new ArrayList<>();
  for (int i = 0; i < SLAVES; i++) {
    ServerName server = TEST_UTIL.getMiniHBaseCluster().getRegionServer(i).getServerName();
    servers.add(server);
  }
  List<RegionInfo> regions = new ArrayList<>(1);
  RegionInfo region = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build();
  regions.add(region);
  Map<ServerName,List<RegionInfo>> assignmentMap = balancer.roundRobinAssignment(regions,
      servers);
  Set<ServerName> serverBefore = assignmentMap.keySet();
  List<ServerName> favoredNodesBefore =
      ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region);
  assertTrue(favoredNodesBefore.size() == FavoredNodeAssignmentHelper.FAVORED_NODES_NUM);
  // the primary RS should be the one that the balancer's assignment returns
  assertTrue(ServerName.isSameAddress(serverBefore.iterator().next(),
      favoredNodesBefore.get(PRIMARY)));
  // now remove the primary from the list of available servers
  List<ServerName> removedServers = removeMatchingServers(serverBefore, servers);
  // call roundRobinAssignment with the modified servers list
  assignmentMap = balancer.roundRobinAssignment(regions, servers);
  List<ServerName> favoredNodesAfter =
      ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region);
  assertTrue(favoredNodesAfter.size() == FavoredNodeAssignmentHelper.FAVORED_NODES_NUM);
  // We don't expect the favored nodes assignments to change in multiple calls
  // to the roundRobinAssignment method in the balancer (relevant for AssignmentManager.assign
  // failures)
  assertTrue(favoredNodesAfter.containsAll(favoredNodesBefore));
  Set<ServerName> serverAfter = assignmentMap.keySet();
  // We expect the new RegionServer assignee to be one of the favored nodes
  // chosen earlier.
  assertTrue(ServerName.isSameAddress(serverAfter.iterator().next(),
               favoredNodesBefore.get(SECONDARY)) ||
             ServerName.isSameAddress(serverAfter.iterator().next(),
               favoredNodesBefore.get(TERTIARY)));

  // put back the primary in the list of available servers
  servers.addAll(removedServers);
  // now roundRobinAssignment with the modified servers list should return the primary
  // as the regionserver assignee
  assignmentMap = balancer.roundRobinAssignment(regions, servers);
  Set<ServerName> serverWithPrimary = assignmentMap.keySet();
  assertTrue(serverBefore.containsAll(serverWithPrimary));

  // Make all the favored nodes unavailable for assignment
  removeMatchingServers(favoredNodesAfter, servers);
  // call roundRobinAssignment with the modified servers list
  assignmentMap = balancer.roundRobinAssignment(regions, servers);
  List<ServerName> favoredNodesNow =
      ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region);
  assertTrue(favoredNodesNow.size() == FavoredNodeAssignmentHelper.FAVORED_NODES_NUM);
  assertTrue(!favoredNodesNow.contains(favoredNodesAfter.get(PRIMARY)) &&
      !favoredNodesNow.contains(favoredNodesAfter.get(SECONDARY)) &&
      !favoredNodesNow.contains(favoredNodesAfter.get(TERTIARY)));
}
项目:hbase    文件:TestRegionPlacement2.java   
@Test
public void testFavoredNodesPresentForRandomAssignment() throws HBaseIOException {
  LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(TEST_UTIL.getConfiguration());
  balancer.setMasterServices(TEST_UTIL.getMiniHBaseCluster().getMaster());
  balancer.initialize();
  List<ServerName> servers = new ArrayList<>();
  for (int i = 0; i < SLAVES; i++) {
    ServerName server = TEST_UTIL.getMiniHBaseCluster().getRegionServer(i).getServerName();
    servers.add(server);
  }
  List<RegionInfo> regions = new ArrayList<>(1);
  RegionInfo region = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build();
  regions.add(region);
  ServerName serverBefore = balancer.randomAssignment(region, servers);
  List<ServerName> favoredNodesBefore =
      ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region);
  assertTrue(favoredNodesBefore.size() == FavoredNodeAssignmentHelper.FAVORED_NODES_NUM);
  // the primary RS should be the one that the balancer's assignment returns
  assertTrue(ServerName.isSameAddress(serverBefore,favoredNodesBefore.get(PRIMARY)));
  // now remove the primary from the list of servers
  removeMatchingServers(serverBefore, servers);
  // call randomAssignment with the modified servers list
  ServerName serverAfter = balancer.randomAssignment(region, servers);
  List<ServerName> favoredNodesAfter =
      ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region);
  assertTrue(favoredNodesAfter.size() == FavoredNodeAssignmentHelper.FAVORED_NODES_NUM);
  // We don't expect the favored nodes assignments to change in multiple calls
  // to the randomAssignment method in the balancer (relevant for AssignmentManager.assign
  // failures)
  assertTrue(favoredNodesAfter.containsAll(favoredNodesBefore));
  // We expect the new RegionServer assignee to be one of the favored nodes
  // chosen earlier.
  assertTrue(ServerName.isSameAddress(serverAfter, favoredNodesBefore.get(SECONDARY)) ||
             ServerName.isSameAddress(serverAfter, favoredNodesBefore.get(TERTIARY)));
  // Make all the favored nodes unavailable for assignment
  removeMatchingServers(favoredNodesAfter, servers);
  // call randomAssignment with the modified servers list
  balancer.randomAssignment(region, servers);
  List<ServerName> favoredNodesNow =
      ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region);
  assertTrue(favoredNodesNow.size() == FavoredNodeAssignmentHelper.FAVORED_NODES_NUM);
  assertTrue(!favoredNodesNow.contains(favoredNodesAfter.get(PRIMARY)) &&
      !favoredNodesNow.contains(favoredNodesAfter.get(SECONDARY)) &&
      !favoredNodesNow.contains(favoredNodesAfter.get(TERTIARY)));
}
项目:PyroDB    文件:TestRegionPlacement.java   
@Test
public void testFavoredNodesPresentForRoundRobinAssignment() throws HBaseIOException {
  LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(TEST_UTIL.getConfiguration());
  balancer.setMasterServices(TEST_UTIL.getMiniHBaseCluster().getMaster());
  List<ServerName> servers = new ArrayList<ServerName>();
  for (int i = 0; i < SLAVES; i++) {
    ServerName server = TEST_UTIL.getMiniHBaseCluster().getRegionServer(i).getServerName();
    servers.add(server);
  }
  List<HRegionInfo> regions = new ArrayList<HRegionInfo>(1);
  HRegionInfo region = new HRegionInfo(TableName.valueOf("foobar"));
  regions.add(region);
  Map<ServerName,List<HRegionInfo>> assignmentMap = balancer.roundRobinAssignment(regions,
      servers);
  Set<ServerName> serverBefore = assignmentMap.keySet();
  List<ServerName> favoredNodesBefore =
      ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region);
  assertTrue(favoredNodesBefore.size() == 3);
  // the primary RS should be the one that the balancer's assignment returns
  assertTrue(ServerName.isSameHostnameAndPort(serverBefore.iterator().next(),
      favoredNodesBefore.get(PRIMARY)));
  // now remove the primary from the list of available servers
  List<ServerName> removedServers = removeMatchingServers(serverBefore, servers);
  // call roundRobinAssignment with the modified servers list
  assignmentMap = balancer.roundRobinAssignment(regions, servers);
  List<ServerName> favoredNodesAfter =
      ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region);
  assertTrue(favoredNodesAfter.size() == 3);
  // We don't expect the favored nodes assignments to change in multiple calls
  // to the roundRobinAssignment method in the balancer (relevant for AssignmentManager.assign
  // failures)
  assertTrue(favoredNodesAfter.containsAll(favoredNodesBefore));
  Set<ServerName> serverAfter = assignmentMap.keySet();
  // We expect the new RegionServer assignee to be one of the favored nodes
  // chosen earlier.
  assertTrue(ServerName.isSameHostnameAndPort(serverAfter.iterator().next(),
               favoredNodesBefore.get(SECONDARY)) ||
             ServerName.isSameHostnameAndPort(serverAfter.iterator().next(),
               favoredNodesBefore.get(TERTIARY)));

  // put back the primary in the list of available servers
  servers.addAll(removedServers);
  // now roundRobinAssignment with the modified servers list should return the primary
  // as the regionserver assignee
  assignmentMap = balancer.roundRobinAssignment(regions, servers);
  Set<ServerName> serverWithPrimary = assignmentMap.keySet();
  assertTrue(serverBefore.containsAll(serverWithPrimary));

  // Make all the favored nodes unavailable for assignment
  removeMatchingServers(favoredNodesAfter, servers);
  // call roundRobinAssignment with the modified servers list
  assignmentMap = balancer.roundRobinAssignment(regions, servers);
  List<ServerName> favoredNodesNow =
      ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region);
  assertTrue(favoredNodesNow.size() == 3);
  assertTrue(!favoredNodesNow.contains(favoredNodesAfter.get(PRIMARY)) &&
      !favoredNodesNow.contains(favoredNodesAfter.get(SECONDARY)) &&
      !favoredNodesNow.contains(favoredNodesAfter.get(TERTIARY)));
}
项目:PyroDB    文件:TestRegionPlacement.java   
@Test
public void testFavoredNodesPresentForRandomAssignment() throws HBaseIOException {
  LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(TEST_UTIL.getConfiguration());
  balancer.setMasterServices(TEST_UTIL.getMiniHBaseCluster().getMaster());
  List<ServerName> servers = new ArrayList<ServerName>();
  for (int i = 0; i < SLAVES; i++) {
    ServerName server = TEST_UTIL.getMiniHBaseCluster().getRegionServer(i).getServerName();
    servers.add(server);
  }
  List<HRegionInfo> regions = new ArrayList<HRegionInfo>(1);
  HRegionInfo region = new HRegionInfo(TableName.valueOf("foobar"));
  regions.add(region);
  ServerName serverBefore = balancer.randomAssignment(region, servers);
  List<ServerName> favoredNodesBefore =
      ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region);
  assertTrue(favoredNodesBefore.size() == 3);
  // the primary RS should be the one that the balancer's assignment returns
  assertTrue(ServerName.isSameHostnameAndPort(serverBefore,favoredNodesBefore.get(PRIMARY)));
  // now remove the primary from the list of servers
  removeMatchingServers(serverBefore, servers);
  // call randomAssignment with the modified servers list
  ServerName serverAfter = balancer.randomAssignment(region, servers);
  List<ServerName> favoredNodesAfter =
      ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region);
  assertTrue(favoredNodesAfter.size() == 3);
  // We don't expect the favored nodes assignments to change in multiple calls
  // to the randomAssignment method in the balancer (relevant for AssignmentManager.assign
  // failures)
  assertTrue(favoredNodesAfter.containsAll(favoredNodesBefore));
  // We expect the new RegionServer assignee to be one of the favored nodes
  // chosen earlier.
  assertTrue(ServerName.isSameHostnameAndPort(serverAfter, favoredNodesBefore.get(SECONDARY)) ||
             ServerName.isSameHostnameAndPort(serverAfter, favoredNodesBefore.get(TERTIARY)));
  // Make all the favored nodes unavailable for assignment
  removeMatchingServers(favoredNodesAfter, servers);
  // call randomAssignment with the modified servers list
  balancer.randomAssignment(region, servers);
  List<ServerName> favoredNodesNow =
      ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region);
  assertTrue(favoredNodesNow.size() == 3);
  assertTrue(!favoredNodesNow.contains(favoredNodesAfter.get(PRIMARY)) &&
      !favoredNodesNow.contains(favoredNodesAfter.get(SECONDARY)) &&
      !favoredNodesNow.contains(favoredNodesAfter.get(TERTIARY)));
}
项目:PyroDB    文件:TestAssignmentManager.java   
/**
 * Tests AssignmentManager balance function.  Runs a balance moving a region
 * from one server to another mocking regionserver responding over zk.
 * @throws IOException
 * @throws KeeperException
 * @throws DeserializationException
 */
@Test
public void testBalance() throws IOException, KeeperException, DeserializationException,
    InterruptedException, CoordinatedStateException {
  // Create and startup an executor.  This is used by AssignmentManager
  // handling zk callbacks.
  ExecutorService executor = startupMasterExecutor("testBalanceExecutor");

  // We need a mocked catalog tracker.
  CatalogTracker ct = Mockito.mock(CatalogTracker.class);
  LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(server
      .getConfiguration());
  // Create an AM.
  AssignmentManager am = new AssignmentManager(this.server,
    this.serverManager, ct, balancer, executor, null, master.getTableLockManager());
  am.failoverCleanupDone.set(true);
  try {
    // Make sure our new AM gets callbacks; once registered, can't unregister.
    // Thats ok because we make a new zk watcher for each test.
    this.watcher.registerListenerFirst(am);
    // Call the balance function but fake the region being online first at
    // SERVERNAME_A.  Create a balance plan.
    am.regionOnline(REGIONINFO, SERVERNAME_A);
    // Balance region from A to B.
    RegionPlan plan = new RegionPlan(REGIONINFO, SERVERNAME_A, SERVERNAME_B);
    am.balance(plan);

    RegionStates regionStates = am.getRegionStates();
    // Must be failed to close since the server is fake
    assertTrue(regionStates.isRegionInTransition(REGIONINFO)
      && regionStates.isRegionInState(REGIONINFO, State.FAILED_CLOSE));
    // Move it back to pending_close
    regionStates.updateRegionState(REGIONINFO, State.PENDING_CLOSE);

    // Now fake the region closing successfully over on the regionserver; the
    // regionserver will have set the region in CLOSED state.  This will
    // trigger callback into AM. The below zk close call is from the RS close
    // region handler duplicated here because its down deep in a private
    // method hard to expose.
    int versionid =
      ZKAssign.transitionNodeClosed(this.watcher, REGIONINFO, SERVERNAME_A, -1);
    assertNotSame(versionid, -1);
    // AM is going to notice above CLOSED and queue up a new assign.  The
    // assign will go to open the region in the new location set by the
    // balancer.  The zk node will be OFFLINE waiting for regionserver to
    // transition it through OPENING, OPENED.  Wait till we see the OFFLINE
    // zk node before we proceed.
    Mocking.waitForRegionPendingOpenInRIT(am, REGIONINFO.getEncodedName());

    // Get current versionid else will fail on transition from OFFLINE to OPENING below
    versionid = ZKAssign.getVersion(this.watcher, REGIONINFO);
    assertNotSame(-1, versionid);
    // This uglyness below is what the openregionhandler on RS side does.
    versionid = ZKAssign.transitionNode(server.getZooKeeper(), REGIONINFO,
      SERVERNAME_B, EventType.M_ZK_REGION_OFFLINE,
      EventType.RS_ZK_REGION_OPENING, versionid);
    assertNotSame(-1, versionid);
    // Move znode from OPENING to OPENED as RS does on successful open.
    versionid =
      ZKAssign.transitionNodeOpened(this.watcher, REGIONINFO, SERVERNAME_B, versionid);
    assertNotSame(-1, versionid);
    // Wait on the handler removing the OPENED znode.
    while(regionStates.isRegionInTransition(REGIONINFO)) Threads.sleep(1);
  } finally {
    executor.shutdown();
    am.shutdown();
    // Clean up all znodes
    ZKAssign.deleteAllNodes(this.watcher);
  }
}
项目:PyroDB    文件:TestAssignmentManager.java   
private void testCaseWithPartiallyDisabledState(Table.State state) throws KeeperException,
    IOException, CoordinatedStateException, ServiceException {
  // Create and startup an executor. This is used by AssignmentManager
  // handling zk callbacks.
  ExecutorService executor = startupMasterExecutor("testSSHWhenDisableTableInProgress");
  // We need a mocked catalog tracker.
  CatalogTracker ct = Mockito.mock(CatalogTracker.class);
  LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(server.getConfiguration());
  ZKAssign.deleteAllNodes(this.watcher);

  // Create an AM.
  AssignmentManager am = new AssignmentManager(this.server,
    this.serverManager, ct, balancer, executor, null, master.getTableLockManager());
  // adding region to regions and servers maps.
  am.regionOnline(REGIONINFO, SERVERNAME_A);
  // adding region in pending close.
  am.getRegionStates().updateRegionState(REGIONINFO, State.PENDING_CLOSE);
  if (state == Table.State.DISABLING) {
    am.getTableStateManager().setTableState(REGIONINFO.getTable(),
      Table.State.DISABLING);
  } else {
    am.getTableStateManager().setTableState(REGIONINFO.getTable(),
      Table.State.DISABLED);
  }
  RegionTransition data = RegionTransition.createRegionTransition(EventType.M_ZK_REGION_CLOSING,
      REGIONINFO.getRegionName(), SERVERNAME_A);
  // RegionTransitionData data = new
  // RegionTransitionData(EventType.M_ZK_REGION_CLOSING,
  // REGIONINFO.getRegionName(), SERVERNAME_A);
  String node = ZKAssign.getNodeName(this.watcher, REGIONINFO.getEncodedName());
  // create znode in M_ZK_REGION_CLOSING state.
  ZKUtil.createAndWatch(this.watcher, node, data.toByteArray());

  try {
    processServerShutdownHandler(ct, am, false);
    // check znode deleted or not.
    // In both cases the znode should be deleted.
    assertTrue("The znode should be deleted.", ZKUtil.checkExists(this.watcher, node) == -1);
    // check whether in rit or not. In the DISABLING case also the below
    // assert will be true but the piece of code added for HBASE-5927 will not
    // do that.
    if (state == Table.State.DISABLED) {
      assertFalse("Region state of region in pending close should be removed from rit.",
          am.getRegionStates().isRegionsInTransition());
    }
  } finally {
    am.setEnabledTable(REGIONINFO.getTable());
    executor.shutdown();
    am.shutdown();
    // Clean up all znodes
    ZKAssign.deleteAllNodes(this.watcher);
  }
}
项目:PyroDB    文件:TestAssignmentManager.java   
/**
 * Create an {@link AssignmentManagerWithExtrasForTesting} that has mocked
 * {@link CatalogTracker} etc.
 * @param server
 * @param manager
 * @return An AssignmentManagerWithExtras with mock connections, etc.
 * @throws IOException
 * @throws KeeperException
 */
private AssignmentManagerWithExtrasForTesting setUpMockedAssignmentManager(final Server server,
    final ServerManager manager) throws IOException, KeeperException,
      ServiceException, CoordinatedStateException {
  // We need a mocked catalog tracker. Its used by our AM instance.
  CatalogTracker ct = Mockito.mock(CatalogTracker.class);
  // Make an RS Interface implementation. Make it so a scanner can go against
  // it and a get to return the single region, REGIONINFO, this test is
  // messing with. Needed when "new master" joins cluster. AM will try and
  // rebuild its list of user regions and it will also get the HRI that goes
  // with an encoded name by doing a Get on hbase:meta
  ClientProtos.ClientService.BlockingInterface ri =
    Mockito.mock(ClientProtos.ClientService.BlockingInterface.class);
  // Get a meta row result that has region up on SERVERNAME_A for REGIONINFO
  Result r = MetaMockingUtil.getMetaTableRowResult(REGIONINFO, SERVERNAME_A);
  final ScanResponse.Builder builder = ScanResponse.newBuilder();
  builder.setMoreResults(true);
  builder.addCellsPerResult(r.size());
  final List<CellScannable> rows = new ArrayList<CellScannable>(1);
  rows.add(r);
  Answer<ScanResponse> ans = new Answer<ClientProtos.ScanResponse>() {
    @Override
    public ScanResponse answer(InvocationOnMock invocation) throws Throwable {
      PayloadCarryingRpcController controller = (PayloadCarryingRpcController) invocation
          .getArguments()[0];
      if (controller != null) {
        controller.setCellScanner(CellUtil.createCellScanner(rows));
      }
      return builder.build();
    }
  };
  if (enabling) {
    Mockito.when(ri.scan((RpcController) Mockito.any(), (ScanRequest) Mockito.any()))
        .thenAnswer(ans).thenAnswer(ans).thenAnswer(ans).thenAnswer(ans).thenAnswer(ans)
        .thenReturn(ScanResponse.newBuilder().setMoreResults(false).build());
  } else {
    Mockito.when(ri.scan((RpcController) Mockito.any(), (ScanRequest) Mockito.any())).thenAnswer(
        ans);
  }
  // If a get, return the above result too for REGIONINFO
  GetResponse.Builder getBuilder = GetResponse.newBuilder();
  getBuilder.setResult(ProtobufUtil.toResult(r));
  Mockito.when(ri.get((RpcController)Mockito.any(), (GetRequest) Mockito.any())).
    thenReturn(getBuilder.build());
  // Get a connection w/ mocked up common methods.
  HConnection connection = HConnectionTestingUtility.
    getMockedConnectionAndDecorate(HTU.getConfiguration(), null,
      ri, SERVERNAME_B, REGIONINFO);
  // Make it so we can get the connection from our mocked catalogtracker
  Mockito.when(ct.getConnection()).thenReturn(connection);
  // Create and startup an executor. Used by AM handling zk callbacks.
  ExecutorService executor = startupMasterExecutor("mockedAMExecutor");
  this.balancer = LoadBalancerFactory.getLoadBalancer(server.getConfiguration());
  AssignmentManagerWithExtrasForTesting am = new AssignmentManagerWithExtrasForTesting(
    server, manager, ct, this.balancer, executor, new NullTableLockManager());
  return am;
}