Java 类org.apache.hadoop.hbase.util.Triple 实例源码

项目:ditb    文件:ServerManager.java   
/**
 * Sends an OPEN RPC to the specified server to open the specified region.
 * <p>
 * Open should not fail but can if server just crashed.
 * <p>
 * @param server server to open a region
 * @param regionOpenInfos info of a list of regions to open
 * @return a list of region opening states
 */
public List<RegionOpeningState> sendRegionOpen(ServerName server,
    List<Triple<HRegionInfo, Integer, List<ServerName>>> regionOpenInfos)
throws IOException {
  AdminService.BlockingInterface admin = getRsAdmin(server);
  if (admin == null) {
    LOG.warn("Attempting to send OPEN RPC to server " + server.toString() +
      " failed because no RPC connection found to this server");
    return null;
  }

  OpenRegionRequest request = RequestConverter.buildOpenRegionRequest(server, regionOpenInfos,
    (RecoveryMode.LOG_REPLAY == this.services.getMasterFileSystem().getLogRecoveryMode()));
  try {
    OpenRegionResponse response = admin.openRegion(null, request);
    return ResponseConverter.getRegionOpeningStateList(response);
  } catch (ServiceException se) {
    throw ProtobufUtil.getRemoteException(se);
  }
}
项目:ditb    文件:TestFavoredNodeAssignmentHelper.java   
@Ignore("Disabled for now until FavoredNodes gets finished as a feature") @Test
public void testSecondaryAndTertiaryPlacementWithSingleRack() {
  // Test the case where there is a single rack and we need to choose
  // Primary/Secondary/Tertiary from a single rack.
  Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
  rackToServerCount.put("rack1", 10);
  // have lots of regions to test with
  Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
    primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(60000, rackToServerCount);
  FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
  Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
  List<HRegionInfo> regions = primaryRSMapAndHelper.getThird();
  Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
      helper.placeSecondaryAndTertiaryRS(primaryRSMap);
  // although we created lots of regions we should have no overlap on the
  // primary/secondary/tertiary for any given region
  for (HRegionInfo region : regions) {
    ServerName[] secondaryAndTertiaryServers = secondaryAndTertiaryMap.get(region);
    assertTrue(!secondaryAndTertiaryServers[0].equals(primaryRSMap.get(region)));
    assertTrue(!secondaryAndTertiaryServers[1].equals(primaryRSMap.get(region)));
    assertTrue(!secondaryAndTertiaryServers[0].equals(secondaryAndTertiaryServers[1]));
  }
}
项目:ditb    文件:TestFavoredNodeAssignmentHelper.java   
@Ignore("Disabled for now until FavoredNodes gets finished as a feature") @Test
public void testSecondaryAndTertiaryPlacementWithSingleServer() {
  // Test the case where we have a single node in the cluster. In this case
  // the primary can be assigned but the secondary/tertiary would be null
  Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
  rackToServerCount.put("rack1", 1);
  Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
    primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(1, rackToServerCount);
  FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
  Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
  List<HRegionInfo> regions = primaryRSMapAndHelper.getThird();

  Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
      helper.placeSecondaryAndTertiaryRS(primaryRSMap);
  // no secondary/tertiary placement in case of a single RegionServer
  assertTrue(secondaryAndTertiaryMap.get(regions.get(0)) == null);
}
项目:ditb    文件:TestFavoredNodeAssignmentHelper.java   
@Ignore("Disabled for now until FavoredNodes gets finished as a feature") @Test
public void testSecondaryAndTertiaryPlacementWithLessThanTwoServersInRacks() {
  // Test the case where we have two racks but with less than two servers in each
  // We will not have enough machines to select secondary/tertiary
  Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
  rackToServerCount.put("rack1", 1);
  rackToServerCount.put("rack2", 1);
  Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
    primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount);
  FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
  Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
  List<HRegionInfo> regions = primaryRSMapAndHelper.getThird();
  assertTrue(primaryRSMap.size() == 6);
  Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
        helper.placeSecondaryAndTertiaryRS(primaryRSMap);
  for (HRegionInfo region : regions) {
    // not enough secondary/tertiary room to place the regions
    assertTrue(secondaryAndTertiaryMap.get(region) == null);
  }
}
项目:ditb    文件:TestFavoredNodeAssignmentHelper.java   
private Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
secondaryAndTertiaryRSPlacementHelper(
    int regionCount, Map<String, Integer> rackToServerCount) {
  Map<HRegionInfo, ServerName> primaryRSMap = new HashMap<HRegionInfo, ServerName>();
  List<ServerName> servers = getServersFromRack(rackToServerCount);
  FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager);
  Map<ServerName, List<HRegionInfo>> assignmentMap =
      new HashMap<ServerName, List<HRegionInfo>>();
  helper.initialize();
  // create regions
  List<HRegionInfo> regions = new ArrayList<HRegionInfo>(regionCount);
  for (int i = 0; i < regionCount; i++) {
    HRegionInfo region = new HRegionInfo(TableName.valueOf("foobar"),
        Bytes.toBytes(i), Bytes.toBytes(i + 1));
    regions.add(region);
  }
  // place the regions
  helper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions);
  return new Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
                 (primaryRSMap, helper, regions);
}
项目:ditb    文件:RequestConverter.java   
/**
 * Create a protocol buffer OpenRegionRequest to open a list of regions
 *
 * @param server the serverName for the RPC
 * @param regionOpenInfos info of a list of regions to open
 * @param openForReplay
 * @return a protocol buffer OpenRegionRequest
 */
public static OpenRegionRequest
    buildOpenRegionRequest(ServerName server, final List<Triple<HRegionInfo, Integer,
        List<ServerName>>> regionOpenInfos, Boolean openForReplay) {
  OpenRegionRequest.Builder builder = OpenRegionRequest.newBuilder();
  for (Triple<HRegionInfo, Integer, List<ServerName>> regionOpenInfo: regionOpenInfos) {
    Integer second = regionOpenInfo.getSecond();
    int versionOfOfflineNode = second == null ? -1 : second.intValue();
    builder.addOpenInfo(buildRegionOpenInfo(regionOpenInfo.getFirst(), versionOfOfflineNode,
      regionOpenInfo.getThird(), openForReplay));
  }
  if (server != null) {
    builder.setServerStartCode(server.getStartcode());
  }
  // send the master's wall clock time as well, so that the RS can refer to it
  builder.setMasterSystemTime(EnvironmentEdgeManager.currentTime());
  return builder.build();
}
项目:pbase    文件:ServerManager.java   
/**
 * Sends an OPEN RPC to the specified server to open the specified region.
 * <p>
 * Open should not fail but can if server just crashed.
 * <p>
 * @param server server to open a region
 * @param regionOpenInfos info of a list of regions to open
 * @return a list of region opening states
 */
public List<RegionOpeningState> sendRegionOpen(ServerName server,
    List<Triple<HRegionInfo, Integer, List<ServerName>>> regionOpenInfos)
throws IOException {
  AdminService.BlockingInterface admin = getRsAdmin(server);
  if (admin == null) {
    LOG.warn("Attempting to send OPEN RPC to server " + server.toString() +
      " failed because no RPC connection found to this server");
    return null;
  }

  OpenRegionRequest request = RequestConverter.buildOpenRegionRequest(server, regionOpenInfos,
    (RecoveryMode.LOG_REPLAY == this.services.getMasterFileSystem().getLogRecoveryMode()));
  try {
    OpenRegionResponse response = admin.openRegion(null, request);
    return ResponseConverter.getRegionOpeningStateList(response);
  } catch (ServiceException se) {
    throw ProtobufUtil.getRemoteException(se);
  }
}
项目:pbase    文件:TestFavoredNodeAssignmentHelper.java   
@Test
public void testSecondaryAndTertiaryPlacementWithSingleRack() {
  // Test the case where there is a single rack and we need to choose
  // Primary/Secondary/Tertiary from a single rack.
  Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
  rackToServerCount.put("rack1", 10);
  // have lots of regions to test with
  Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
    primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(60000, rackToServerCount);
  FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
  Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
  List<HRegionInfo> regions = primaryRSMapAndHelper.getThird();
  Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
      helper.placeSecondaryAndTertiaryRS(primaryRSMap);
  // although we created lots of regions we should have no overlap on the
  // primary/secondary/tertiary for any given region
  for (HRegionInfo region : regions) {
    ServerName[] secondaryAndTertiaryServers = secondaryAndTertiaryMap.get(region);
    assertTrue(!secondaryAndTertiaryServers[0].equals(primaryRSMap.get(region)));
    assertTrue(!secondaryAndTertiaryServers[1].equals(primaryRSMap.get(region)));
    assertTrue(!secondaryAndTertiaryServers[0].equals(secondaryAndTertiaryServers[1]));
  }
}
项目:pbase    文件:TestFavoredNodeAssignmentHelper.java   
@Test
public void testSecondaryAndTertiaryPlacementWithSingleServer() {
  // Test the case where we have a single node in the cluster. In this case
  // the primary can be assigned but the secondary/tertiary would be null
  Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
  rackToServerCount.put("rack1", 1);
  Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
    primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(1, rackToServerCount);
  FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
  Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
  List<HRegionInfo> regions = primaryRSMapAndHelper.getThird();

  Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
      helper.placeSecondaryAndTertiaryRS(primaryRSMap);
  // no secondary/tertiary placement in case of a single RegionServer
  assertTrue(secondaryAndTertiaryMap.get(regions.get(0)) == null);
}
项目:pbase    文件:TestFavoredNodeAssignmentHelper.java   
@Test
public void testSecondaryAndTertiaryPlacementWithLessThanTwoServersInRacks() {
  // Test the case where we have two racks but with less than two servers in each
  // We will not have enough machines to select secondary/tertiary
  Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
  rackToServerCount.put("rack1", 1);
  rackToServerCount.put("rack2", 1);
  Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
    primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount);
  FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
  Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
  List<HRegionInfo> regions = primaryRSMapAndHelper.getThird();
  assertTrue(primaryRSMap.size() == 6);
  Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
        helper.placeSecondaryAndTertiaryRS(primaryRSMap);
  for (HRegionInfo region : regions) {
    // not enough secondary/tertiary room to place the regions
    assertTrue(secondaryAndTertiaryMap.get(region) == null);
  }
}
项目:pbase    文件:TestFavoredNodeAssignmentHelper.java   
private Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
secondaryAndTertiaryRSPlacementHelper(
    int regionCount, Map<String, Integer> rackToServerCount) {
  Map<HRegionInfo, ServerName> primaryRSMap = new HashMap<HRegionInfo, ServerName>();
  List<ServerName> servers = getServersFromRack(rackToServerCount);
  FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager);
  Map<ServerName, List<HRegionInfo>> assignmentMap =
      new HashMap<ServerName, List<HRegionInfo>>();
  helper.initialize();
  // create regions
  List<HRegionInfo> regions = new ArrayList<HRegionInfo>(regionCount);
  for (int i = 0; i < regionCount; i++) {
    HRegionInfo region = new HRegionInfo(TableName.valueOf("foobar"),
        Bytes.toBytes(i), Bytes.toBytes(i + 1));
    regions.add(region);
  }
  // place the regions
  helper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions);
  return new Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
                 (primaryRSMap, helper, regions);
}
项目:pbase    文件:RequestConverter.java   
/**
 * Create a protocol buffer OpenRegionRequest to open a list of regions
 *
 * @param server the serverName for the RPC
 * @param regionOpenInfos info of a list of regions to open
 * @param openForReplay
 * @return a protocol buffer OpenRegionRequest
 */
public static OpenRegionRequest
    buildOpenRegionRequest(ServerName server, final List<Triple<HRegionInfo, Integer,
        List<ServerName>>> regionOpenInfos, Boolean openForReplay) {
  OpenRegionRequest.Builder builder = OpenRegionRequest.newBuilder();
  for (Triple<HRegionInfo, Integer, List<ServerName>> regionOpenInfo: regionOpenInfos) {
    Integer second = regionOpenInfo.getSecond();
    int versionOfOfflineNode = second == null ? -1 : second.intValue();
    builder.addOpenInfo(buildRegionOpenInfo(regionOpenInfo.getFirst(), versionOfOfflineNode,
      regionOpenInfo.getThird(), openForReplay));
  }
  if (server != null) {
    builder.setServerStartCode(server.getStartcode());
  }
  return builder.build();
}
项目:HIndex    文件:ServerManager.java   
/**
 * Sends an OPEN RPC to the specified server to open the specified region.
 * <p>
 * Open should not fail but can if server just crashed.
 * <p>
 * @param server server to open a region
 * @param regionOpenInfos info of a list of regions to open
 * @return a list of region opening states
 */
public List<RegionOpeningState> sendRegionOpen(ServerName server,
    List<Triple<HRegionInfo, Integer, List<ServerName>>> regionOpenInfos)
throws IOException {
  AdminService.BlockingInterface admin = getRsAdmin(server);
  if (admin == null) {
    LOG.warn("Attempting to send OPEN RPC to server " + server.toString() +
      " failed because no RPC connection found to this server");
    return null;
  }

  OpenRegionRequest request =
    RequestConverter.buildOpenRegionRequest(regionOpenInfos);
  try {
    OpenRegionResponse response = admin.openRegion(null, request);
    return ResponseConverter.getRegionOpeningStateList(response);
  } catch (ServiceException se) {
    throw ProtobufUtil.getRemoteException(se);
  }
}
项目:HIndex    文件:TestFavoredNodeAssignmentHelper.java   
@Test
public void testSecondaryAndTertiaryPlacementWithSingleRack() {
  // Test the case where there is a single rack and we need to choose
  // Primary/Secondary/Tertiary from a single rack.
  Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
  rackToServerCount.put("rack1", 10);
  // have lots of regions to test with
  Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
    primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(60000, rackToServerCount);
  FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
  Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
  List<HRegionInfo> regions = primaryRSMapAndHelper.getThird();
  Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
      helper.placeSecondaryAndTertiaryRS(primaryRSMap);
  // although we created lots of regions we should have no overlap on the
  // primary/secondary/tertiary for any given region
  for (HRegionInfo region : regions) {
    ServerName[] secondaryAndTertiaryServers = secondaryAndTertiaryMap.get(region);
    assertTrue(!secondaryAndTertiaryServers[0].equals(primaryRSMap.get(region)));
    assertTrue(!secondaryAndTertiaryServers[1].equals(primaryRSMap.get(region)));
    assertTrue(!secondaryAndTertiaryServers[0].equals(secondaryAndTertiaryServers[1]));
  }
}
项目:HIndex    文件:TestFavoredNodeAssignmentHelper.java   
@Test
public void testSecondaryAndTertiaryPlacementWithSingleServer() {
  // Test the case where we have a single node in the cluster. In this case
  // the primary can be assigned but the secondary/tertiary would be null
  Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
  rackToServerCount.put("rack1", 1);
  Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
    primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(1, rackToServerCount);
  FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
  Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
  List<HRegionInfo> regions = primaryRSMapAndHelper.getThird();

  Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
      helper.placeSecondaryAndTertiaryRS(primaryRSMap);
  // no secondary/tertiary placement in case of a single RegionServer
  assertTrue(secondaryAndTertiaryMap.get(regions.get(0)) == null);
}
项目:HIndex    文件:TestFavoredNodeAssignmentHelper.java   
@Test
public void testSecondaryAndTertiaryPlacementWithLessThanTwoServersInRacks() {
  // Test the case where we have two racks but with less than two servers in each
  // We will not have enough machines to select secondary/tertiary
  Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
  rackToServerCount.put("rack1", 1);
  rackToServerCount.put("rack2", 1);
  Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
    primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount);
  FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
  Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
  List<HRegionInfo> regions = primaryRSMapAndHelper.getThird();
  assertTrue(primaryRSMap.size() == 6);
  Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
        helper.placeSecondaryAndTertiaryRS(primaryRSMap);
  for (HRegionInfo region : regions) {
    // not enough secondary/tertiary room to place the regions
    assertTrue(secondaryAndTertiaryMap.get(region) == null);
  }
}
项目:HIndex    文件:TestFavoredNodeAssignmentHelper.java   
private Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
secondaryAndTertiaryRSPlacementHelper(
    int regionCount, Map<String, Integer> rackToServerCount) {
  Map<HRegionInfo, ServerName> primaryRSMap = new HashMap<HRegionInfo, ServerName>();
  List<ServerName> servers = getServersFromRack(rackToServerCount);
  FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager);
  Map<ServerName, List<HRegionInfo>> assignmentMap =
      new HashMap<ServerName, List<HRegionInfo>>();
  helper.initialize();
  // create regions
  List<HRegionInfo> regions = new ArrayList<HRegionInfo>(regionCount);
  for (int i = 0; i < regionCount; i++) {
    HRegionInfo region = new HRegionInfo(TableName.valueOf("foobar"),
        Bytes.toBytes(i), Bytes.toBytes(i + 1));
    regions.add(region);
  }
  // place the regions
  helper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions);
  return new Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
                 (primaryRSMap, helper, regions);
}
项目:hbase    文件:TestFavoredNodeAssignmentHelper.java   
@Test
public void testSecondaryAndTertiaryPlacementWithSingleRack() {
  // Test the case where there is a single rack and we need to choose
  // Primary/Secondary/Tertiary from a single rack.
  Map<String,Integer> rackToServerCount = new HashMap<>();
  rackToServerCount.put("rack1", 10);
  // have lots of regions to test with
  Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<RegionInfo>>
    primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(60000, rackToServerCount);
  FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
  Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
  List<RegionInfo> regions = primaryRSMapAndHelper.getThird();
  Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap =
      helper.placeSecondaryAndTertiaryRS(primaryRSMap);
  // although we created lots of regions we should have no overlap on the
  // primary/secondary/tertiary for any given region
  for (RegionInfo region : regions) {
    ServerName[] secondaryAndTertiaryServers = secondaryAndTertiaryMap.get(region);
    assertNotNull(secondaryAndTertiaryServers);
    assertTrue(primaryRSMap.containsKey(region));
    assertTrue(!secondaryAndTertiaryServers[0].equals(primaryRSMap.get(region)));
    assertTrue(!secondaryAndTertiaryServers[1].equals(primaryRSMap.get(region)));
    assertTrue(!secondaryAndTertiaryServers[0].equals(secondaryAndTertiaryServers[1]));
  }
}
项目:hbase    文件:TestFavoredNodeAssignmentHelper.java   
@Test
public void testSecondaryAndTertiaryPlacementWithSingleServer() {
  // Test the case where we have a single node in the cluster. In this case
  // the primary can be assigned but the secondary/tertiary would be null
  Map<String,Integer> rackToServerCount = new HashMap<>();
  rackToServerCount.put("rack1", 1);
  Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<RegionInfo>>
    primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(1, rackToServerCount);
  FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
  Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
  List<RegionInfo> regions = primaryRSMapAndHelper.getThird();

  Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap =
      helper.placeSecondaryAndTertiaryRS(primaryRSMap);
  // no secondary/tertiary placement in case of a single RegionServer
  assertTrue(secondaryAndTertiaryMap.get(regions.get(0)) == null);
}
项目:hbase    文件:TestFavoredNodeAssignmentHelper.java   
@Test
public void testSecondaryAndTertiaryPlacementWithLessThanTwoServersInRacks() {
  // Test the case where we have two racks but with less than two servers in each
  // We will not have enough machines to select secondary/tertiary
  Map<String,Integer> rackToServerCount = new HashMap<>();
  rackToServerCount.put("rack1", 1);
  rackToServerCount.put("rack2", 1);
  Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<RegionInfo>>
    primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount);
  FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
  Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
  List<RegionInfo> regions = primaryRSMapAndHelper.getThird();
  assertTrue(primaryRSMap.size() == 6);
  Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap =
        helper.placeSecondaryAndTertiaryRS(primaryRSMap);
  for (RegionInfo region : regions) {
    // not enough secondary/tertiary room to place the regions
    assertTrue(secondaryAndTertiaryMap.get(region) == null);
  }
}
项目:hbase    文件:TestFavoredNodeAssignmentHelper.java   
private Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<RegionInfo>>
secondaryAndTertiaryRSPlacementHelper(
    int regionCount, Map<String, Integer> rackToServerCount) {
  Map<RegionInfo, ServerName> primaryRSMap = new HashMap<RegionInfo, ServerName>();
  List<ServerName> servers = getServersFromRack(rackToServerCount);
  FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager);
  Map<ServerName, List<RegionInfo>> assignmentMap =
      new HashMap<ServerName, List<RegionInfo>>();
  helper.initialize();
  // create regions
  List<RegionInfo> regions = new ArrayList<>(regionCount);
  for (int i = 0; i < regionCount; i++) {
    regions.add(RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
        .setStartKey(Bytes.toBytes(i))
        .setEndKey(Bytes.toBytes(i + 1))
        .build());
  }
  // place the regions
  helper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions);
  return new Triple<>(primaryRSMap, helper, regions);
}
项目:hbase    文件:Export.java   
/**
 * Sets up the actual job.
 *
 * @param conf  The current configuration.
 * @param args  The command line parameters.
 * @return The newly created job.
 * @throws IOException When setting up the job fails.
 */
public static Job createSubmittableJob(Configuration conf, String[] args)
throws IOException {
  Triple<TableName, Scan, Path> arguments = ExportUtils.getArgumentsFromCommandLine(conf, args);
  String tableName = arguments.getFirst().getNameAsString();
  Path outputDir = arguments.getThird();
  Job job = Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" + tableName));
  job.setJobName(NAME + "_" + tableName);
  job.setJarByClass(Export.class);
  // Set optional scan parameters
  Scan s = arguments.getSecond();
  IdentityTableMapper.initJob(tableName, s, IdentityTableMapper.class, job);
  // No reducers.  Just write straight to output files.
  job.setNumReduceTasks(0);
  job.setOutputFormatClass(SequenceFileOutputFormat.class);
  job.setOutputKeyClass(ImmutableBytesWritable.class);
  job.setOutputValueClass(Result.class);
  FileOutputFormat.setOutputPath(job, outputDir); // job conf doesn't contain the conf so doesn't have a default fs.
  return job;
}
项目:hbase    文件:TestInterfaceAudienceAnnotations.java   
@Ignore @Test
public void testProtosInParamTypes() throws ClassNotFoundException, IOException, LinkageError {
  Set<Class<?>> classes = findPublicClasses();
  List<Triple<Class<?>, Method, Class<?>>> protosParamType = new ArrayList<>();
  for (Class<?> clazz : classes) {
    findProtoInParamType(clazz, protosParamType);
  }

  if (protosParamType.size() != 0) {
    LOG.info("These are the methods that have Protos as the param type");
    for (Triple<Class<?>, Method, Class<?>> pair : protosParamType) {
      LOG.info(pair.getFirst().getName() + " " + pair.getSecond().getName() + " "
          + pair.getThird().getName());
    }
  }

  Assert.assertEquals("Public exposed methods should not have protos in param type", 0,
    protosParamType.size());
}
项目:hbase    文件:TestInterfaceAudienceAnnotations.java   
private void findProtoInParamType(Class<?> clazz,
    List<Triple<Class<?>, Method, Class<?>>> protosParamType) {
  Triple<Class<?>, Method, Class<?>> paramType = new Triple<>();
  Method[] methods = clazz.getMethods();
  paramType.setFirst(clazz);
  for (Method method : methods) {
    if (clazz.isInterface() || method.getModifiers() == Modifier.PUBLIC) {
      if (!isInterfacePrivateMethod(method)) {
        Class<?>[] parameters = method.getParameterTypes();
        for (Class<?> param : parameters) {
          if (param.getName().contains(HBASE_PROTOBUF)) {
            paramType.setSecond(method);
            paramType.setThird(param);
            protosParamType.add(paramType);
            break;
          }
        }
      }
    }
  }
}
项目:PyroDB    文件:ServerManager.java   
/**
 * Sends an OPEN RPC to the specified server to open the specified region.
 * <p>
 * Open should not fail but can if server just crashed.
 * <p>
 * @param server server to open a region
 * @param regionOpenInfos info of a list of regions to open
 * @return a list of region opening states
 */
public List<RegionOpeningState> sendRegionOpen(ServerName server,
    List<Triple<HRegionInfo, Integer, List<ServerName>>> regionOpenInfos)
throws IOException {
  AdminService.BlockingInterface admin = getRsAdmin(server);
  if (admin == null) {
    LOG.warn("Attempting to send OPEN RPC to server " + server.toString() +
      " failed because no RPC connection found to this server");
    return null;
  }

  OpenRegionRequest request =
    RequestConverter.buildOpenRegionRequest(regionOpenInfos);
  try {
    OpenRegionResponse response = admin.openRegion(null, request);
    return ResponseConverter.getRegionOpeningStateList(response);
  } catch (ServiceException se) {
    throw ProtobufUtil.getRemoteException(se);
  }
}
项目:PyroDB    文件:TestFavoredNodeAssignmentHelper.java   
@Test
public void testSecondaryAndTertiaryPlacementWithSingleRack() {
  // Test the case where there is a single rack and we need to choose
  // Primary/Secondary/Tertiary from a single rack.
  Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
  rackToServerCount.put("rack1", 10);
  // have lots of regions to test with
  Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
    primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(60000, rackToServerCount);
  FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
  Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
  List<HRegionInfo> regions = primaryRSMapAndHelper.getThird();
  Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
      helper.placeSecondaryAndTertiaryRS(primaryRSMap);
  // although we created lots of regions we should have no overlap on the
  // primary/secondary/tertiary for any given region
  for (HRegionInfo region : regions) {
    ServerName[] secondaryAndTertiaryServers = secondaryAndTertiaryMap.get(region);
    assertTrue(!secondaryAndTertiaryServers[0].equals(primaryRSMap.get(region)));
    assertTrue(!secondaryAndTertiaryServers[1].equals(primaryRSMap.get(region)));
    assertTrue(!secondaryAndTertiaryServers[0].equals(secondaryAndTertiaryServers[1]));
  }
}
项目:PyroDB    文件:TestFavoredNodeAssignmentHelper.java   
@Test
public void testSecondaryAndTertiaryPlacementWithSingleServer() {
  // Test the case where we have a single node in the cluster. In this case
  // the primary can be assigned but the secondary/tertiary would be null
  Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
  rackToServerCount.put("rack1", 1);
  Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
    primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(1, rackToServerCount);
  FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
  Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
  List<HRegionInfo> regions = primaryRSMapAndHelper.getThird();

  Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
      helper.placeSecondaryAndTertiaryRS(primaryRSMap);
  // no secondary/tertiary placement in case of a single RegionServer
  assertTrue(secondaryAndTertiaryMap.get(regions.get(0)) == null);
}
项目:PyroDB    文件:TestFavoredNodeAssignmentHelper.java   
@Test
public void testSecondaryAndTertiaryPlacementWithLessThanTwoServersInRacks() {
  // Test the case where we have two racks but with less than two servers in each
  // We will not have enough machines to select secondary/tertiary
  Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
  rackToServerCount.put("rack1", 1);
  rackToServerCount.put("rack2", 1);
  Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
    primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount);
  FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
  Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
  List<HRegionInfo> regions = primaryRSMapAndHelper.getThird();
  assertTrue(primaryRSMap.size() == 6);
  Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
        helper.placeSecondaryAndTertiaryRS(primaryRSMap);
  for (HRegionInfo region : regions) {
    // not enough secondary/tertiary room to place the regions
    assertTrue(secondaryAndTertiaryMap.get(region) == null);
  }
}
项目:PyroDB    文件:TestFavoredNodeAssignmentHelper.java   
private Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
secondaryAndTertiaryRSPlacementHelper(
    int regionCount, Map<String, Integer> rackToServerCount) {
  Map<HRegionInfo, ServerName> primaryRSMap = new HashMap<HRegionInfo, ServerName>();
  List<ServerName> servers = getServersFromRack(rackToServerCount);
  FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager);
  Map<ServerName, List<HRegionInfo>> assignmentMap =
      new HashMap<ServerName, List<HRegionInfo>>();
  helper.initialize();
  // create regions
  List<HRegionInfo> regions = new ArrayList<HRegionInfo>(regionCount);
  for (int i = 0; i < regionCount; i++) {
    HRegionInfo region = new HRegionInfo(TableName.valueOf("foobar"),
        Bytes.toBytes(i), Bytes.toBytes(i + 1));
    regions.add(region);
  }
  // place the regions
  helper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions);
  return new Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
                 (primaryRSMap, helper, regions);
}
项目:c5    文件:ServerManager.java   
/**
 * Sends an OPEN RPC to the specified server to open the specified region.
 * <p>
 * Open should not fail but can if server just crashed.
 * <p>
 * @param server server to open a region
 * @param regionOpenInfos info of a list of regions to open
 * @return a list of region opening states
 */
public List<RegionOpeningState> sendRegionOpen(ServerName server,
    List<Triple<HRegionInfo, Integer, List<ServerName>>> regionOpenInfos)
throws IOException {
  AdminService.BlockingInterface admin = getRsAdmin(server);
  if (admin == null) {
    LOG.warn("Attempting to send OPEN RPC to server " + server.toString() +
      " failed because no RPC connection found to this server");
    return null;
  }

  OpenRegionRequest request =
    RequestConverter.buildOpenRegionRequest(regionOpenInfos);
  try {
    OpenRegionResponse response = admin.openRegion(null, request);
    return ResponseConverter.getRegionOpeningStateList(response);
  } catch (ServiceException se) {
    throw ProtobufUtil.getRemoteException(se);
  }
}
项目:c5    文件:TestFavoredNodeAssignmentHelper.java   
@Test
public void testSecondaryAndTertiaryPlacementWithSingleRack() {
  // Test the case where there is a single rack and we need to choose
  // Primary/Secondary/Tertiary from a single rack.
  Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
  rackToServerCount.put("rack1", 10);
  // have lots of regions to test with
  Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
    primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(60000, rackToServerCount);
  FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
  Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
  List<HRegionInfo> regions = primaryRSMapAndHelper.getThird();
  Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
      helper.placeSecondaryAndTertiaryRS(primaryRSMap);
  // although we created lots of regions we should have no overlap on the
  // primary/secondary/tertiary for any given region
  for (HRegionInfo region : regions) {
    ServerName[] secondaryAndTertiaryServers = secondaryAndTertiaryMap.get(region);
    assertTrue(!secondaryAndTertiaryServers[0].equals(primaryRSMap.get(region)));
    assertTrue(!secondaryAndTertiaryServers[1].equals(primaryRSMap.get(region)));
    assertTrue(!secondaryAndTertiaryServers[0].equals(secondaryAndTertiaryServers[1]));
  }
}
项目:c5    文件:TestFavoredNodeAssignmentHelper.java   
@Test
public void testSecondaryAndTertiaryPlacementWithSingleServer() {
  // Test the case where we have a single node in the cluster. In this case
  // the primary can be assigned but the secondary/tertiary would be null
  Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
  rackToServerCount.put("rack1", 1);
  Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
    primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(1, rackToServerCount);
  FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
  Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
  List<HRegionInfo> regions = primaryRSMapAndHelper.getThird();

  Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
      helper.placeSecondaryAndTertiaryRS(primaryRSMap);
  // no secondary/tertiary placement in case of a single RegionServer
  assertTrue(secondaryAndTertiaryMap.get(regions.get(0)) == null);
}
项目:c5    文件:TestFavoredNodeAssignmentHelper.java   
@Test
public void testSecondaryAndTertiaryPlacementWithLessThanTwoServersInRacks() {
  // Test the case where we have two racks but with less than two servers in each
  // We will not have enough machines to select secondary/tertiary
  Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
  rackToServerCount.put("rack1", 1);
  rackToServerCount.put("rack2", 1);
  Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
    primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount);
  FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
  Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
  List<HRegionInfo> regions = primaryRSMapAndHelper.getThird();
  assertTrue(primaryRSMap.size() == 6);
  Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
        helper.placeSecondaryAndTertiaryRS(primaryRSMap);
  for (HRegionInfo region : regions) {
    // not enough secondary/tertiary room to place the regions
    assertTrue(secondaryAndTertiaryMap.get(region) == null);
  }
}
项目:c5    文件:TestFavoredNodeAssignmentHelper.java   
private Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
secondaryAndTertiaryRSPlacementHelper(
    int regionCount, Map<String, Integer> rackToServerCount) {
  Map<HRegionInfo, ServerName> primaryRSMap = new HashMap<HRegionInfo, ServerName>();
  List<ServerName> servers = getServersFromRack(rackToServerCount);
  FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager);
  Map<ServerName, List<HRegionInfo>> assignmentMap =
      new HashMap<ServerName, List<HRegionInfo>>();
  helper.initialize();
  // create regions
  List<HRegionInfo> regions = new ArrayList<HRegionInfo>(regionCount);
  for (int i = 0; i < regionCount; i++) {
    HRegionInfo region = new HRegionInfo(TableName.valueOf("foobar"),
        Bytes.toBytes(i), Bytes.toBytes(i + 1));
    regions.add(region);
  }
  // place the regions
  helper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions);
  return new Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
                 (primaryRSMap, helper, regions);
}
项目:DominoHBase    文件:HConnectionManager.java   
/**
 * Wait for one of tasks to be done, and remove it from the list.
 * @return the tasks done.
 */
private Triple<MultiAction<R>, HRegionLocation, Future<MultiResponse>>
removeFirstDone() throws InterruptedException {
  while (true) {
    synchronized (finishedTasks) {
      if (!finishedTasks.isEmpty()) {
        MultiAction<R> done = finishedTasks.remove(finishedTasks.size() - 1);

        // We now need to remove it from the inProgress part.
        Iterator<Triple<MultiAction<R>, HRegionLocation, Future<MultiResponse>>> it =
          inProgress.iterator();
        while (it.hasNext()) {
          Triple<MultiAction<R>, HRegionLocation, Future<MultiResponse>> task = it.next();
          if (task.getFirst() == done) { // We have the exact object. No java equals here.
            it.remove();
            return task;
          }
        }
        LOG.error("Development error: We didn't see a task in the list. " +
          done.getRegions());
      }
      finishedTasks.wait(10);
    }
  }
}
项目:ditb    文件:TestFavoredNodeAssignmentHelper.java   
@Ignore("Disabled for now until FavoredNodes gets finished as a feature") @Test
public void testSecondaryAndTertiaryPlacementWithMultipleRacks() {
  // Test the case where we have multiple racks and the region servers
  // belong to multiple racks
  Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
  rackToServerCount.put("rack1", 10);
  rackToServerCount.put("rack2", 10);

  Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
    primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(60000, rackToServerCount);
  FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
  Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();

  assertTrue(primaryRSMap.size() == 60000);
  Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
      helper.placeSecondaryAndTertiaryRS(primaryRSMap);
  assertTrue(secondaryAndTertiaryMap.size() == 60000);
  // for every region, the primary should be on one rack and the secondary/tertiary
  // on another (we create a lot of regions just to increase probability of failure)
  for (Map.Entry<HRegionInfo, ServerName[]> entry : secondaryAndTertiaryMap.entrySet()) {
    ServerName[] allServersForRegion = entry.getValue();
    String primaryRSRack = rackManager.getRack(primaryRSMap.get(entry.getKey()));
    String secondaryRSRack = rackManager.getRack(allServersForRegion[0]);
    String tertiaryRSRack = rackManager.getRack(allServersForRegion[1]);
    assertTrue(!primaryRSRack.equals(secondaryRSRack));
    assertTrue(secondaryRSRack.equals(tertiaryRSRack));
  }
}
项目:ditb    文件:TestFavoredNodeAssignmentHelper.java   
@Ignore("Disabled for now until FavoredNodes gets finished as a feature") @Test
public void testSecondaryAndTertiaryPlacementWithMoreThanOneServerInPrimaryRack() {
  // Test the case where there is only one server in one rack and another rack
  // has more servers. We try to choose secondary/tertiary on different
  // racks than what the primary is on. But if the other rack doesn't have
  // enough nodes to have both secondary/tertiary RSs, the tertiary is placed
  // on the same rack as the primary server is on
  Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
  rackToServerCount.put("rack1", 2);
  rackToServerCount.put("rack2", 1);
  Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
    primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount);
  FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
  Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
  List<HRegionInfo> regions = primaryRSMapAndHelper.getThird();
  assertTrue(primaryRSMap.size() == 6);
  Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
        helper.placeSecondaryAndTertiaryRS(primaryRSMap);
  for (HRegionInfo region : regions) {
    ServerName s = primaryRSMap.get(region);
    ServerName secondaryRS = secondaryAndTertiaryMap.get(region)[0];
    ServerName tertiaryRS = secondaryAndTertiaryMap.get(region)[1];
    if (rackManager.getRack(s).equals("rack1")) {
      assertTrue(rackManager.getRack(secondaryRS).equals("rack2") &&
          rackManager.getRack(tertiaryRS).equals("rack1"));
    }
    if (rackManager.getRack(s).equals("rack2")) {
      assertTrue(rackManager.getRack(secondaryRS).equals("rack1") &&
          rackManager.getRack(tertiaryRS).equals("rack1"));
    }
  }
}
项目:pbase    文件:TestFavoredNodeAssignmentHelper.java   
@Test
public void testSecondaryAndTertiaryPlacementWithMultipleRacks() {
  // Test the case where we have multiple racks and the region servers
  // belong to multiple racks
  Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
  rackToServerCount.put("rack1", 10);
  rackToServerCount.put("rack2", 10);

  Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
    primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(60000, rackToServerCount);
  FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
  Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();

  assertTrue(primaryRSMap.size() == 60000);
  Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
      helper.placeSecondaryAndTertiaryRS(primaryRSMap);
  assertTrue(secondaryAndTertiaryMap.size() == 60000);
  // for every region, the primary should be on one rack and the secondary/tertiary
  // on another (we create a lot of regions just to increase probability of failure)
  for (Map.Entry<HRegionInfo, ServerName[]> entry : secondaryAndTertiaryMap.entrySet()) {
    ServerName[] allServersForRegion = entry.getValue();
    String primaryRSRack = rackManager.getRack(primaryRSMap.get(entry.getKey()));
    String secondaryRSRack = rackManager.getRack(allServersForRegion[0]);
    String tertiaryRSRack = rackManager.getRack(allServersForRegion[1]);
    assertTrue(!primaryRSRack.equals(secondaryRSRack));
    assertTrue(secondaryRSRack.equals(tertiaryRSRack));
  }
}
项目:pbase    文件:TestFavoredNodeAssignmentHelper.java   
@Test
public void testSecondaryAndTertiaryPlacementWithMoreThanOneServerInPrimaryRack() {
  // Test the case where there is only one server in one rack and another rack
  // has more servers. We try to choose secondary/tertiary on different
  // racks than what the primary is on. But if the other rack doesn't have
  // enough nodes to have both secondary/tertiary RSs, the tertiary is placed
  // on the same rack as the primary server is on
  Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
  rackToServerCount.put("rack1", 2);
  rackToServerCount.put("rack2", 1);
  Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
    primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount);
  FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
  Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
  List<HRegionInfo> regions = primaryRSMapAndHelper.getThird();
  assertTrue(primaryRSMap.size() == 6);
  Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
        helper.placeSecondaryAndTertiaryRS(primaryRSMap);
  for (HRegionInfo region : regions) {
    ServerName s = primaryRSMap.get(region);
    ServerName secondaryRS = secondaryAndTertiaryMap.get(region)[0];
    ServerName tertiaryRS = secondaryAndTertiaryMap.get(region)[1];
    if (rackManager.getRack(s).equals("rack1")) {
      assertTrue(rackManager.getRack(secondaryRS).equals("rack2") &&
          rackManager.getRack(tertiaryRS).equals("rack1"));
    }
    if (rackManager.getRack(s).equals("rack2")) {
      assertTrue(rackManager.getRack(secondaryRS).equals("rack1") &&
          rackManager.getRack(tertiaryRS).equals("rack1"));
    }
  }
}
项目:HIndex    文件:TestFavoredNodeAssignmentHelper.java   
@Test
public void testSecondaryAndTertiaryPlacementWithMultipleRacks() {
  // Test the case where we have multiple racks and the region servers
  // belong to multiple racks
  Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
  rackToServerCount.put("rack1", 10);
  rackToServerCount.put("rack2", 10);

  Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
    primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(60000, rackToServerCount);
  FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
  Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();

  assertTrue(primaryRSMap.size() == 60000);
  Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
      helper.placeSecondaryAndTertiaryRS(primaryRSMap);
  assertTrue(secondaryAndTertiaryMap.size() == 60000);
  // for every region, the primary should be on one rack and the secondary/tertiary
  // on another (we create a lot of regions just to increase probability of failure)
  for (Map.Entry<HRegionInfo, ServerName[]> entry : secondaryAndTertiaryMap.entrySet()) {
    ServerName[] allServersForRegion = entry.getValue();
    String primaryRSRack = rackManager.getRack(primaryRSMap.get(entry.getKey()));
    String secondaryRSRack = rackManager.getRack(allServersForRegion[0]);
    String tertiaryRSRack = rackManager.getRack(allServersForRegion[1]);
    assertTrue(!primaryRSRack.equals(secondaryRSRack));
    assertTrue(secondaryRSRack.equals(tertiaryRSRack));
  }
}