Java 类org.apache.hadoop.yarn.api.records.Resource 实例源码

项目:hadoop    文件:TestInMemoryPlan.java   
private Map<ReservationInterval, ReservationRequest> generateAllocation(
    int startTime, int[] alloc, boolean isStep) {
  Map<ReservationInterval, ReservationRequest> req =
      new HashMap<ReservationInterval, ReservationRequest>();
  int numContainers = 0;
  for (int i = 0; i < alloc.length; i++) {
    if (isStep) {
      numContainers = alloc[i] + i;
    } else {
      numContainers = alloc[i];
    }
    ReservationRequest rr =
        ReservationRequest.newInstance(Resource.newInstance(1024, 1, 1),
            (numContainers));
    req.put(new ReservationInterval(startTime + i, startTime + i + 1), rr);
  }
  return req;
}
项目:pai    文件:MockResourceManager.java   
private void initNodeIds(int nodeNums, int containerNums, Resource resource) {
  Random portRandom = new Random();
  Random ipRandom = new Random();
  for (int i = 0; i < nodeNums; i++) {
    NodeReport nodeReport = Records.newRecord(NodeReport.class);
    nodeReport.setNumContainers(containerNums);
    nodeReport.setNodeLabels(new HashSet<>());
    nodeReport.setNodeState(NodeState.RUNNING);
    nodeReport.setCapability(resource);
    nodeReport.setUsed(Resource.newInstance(0, 0));

    int port = 1024 + portRandom.nextInt(65535 - 1024 + 1);
    StringBuilder hostStr = new StringBuilder();
    for (int j = 0; j < 4; j++) {
      hostStr.append(".").append(ipRandom.nextInt(256));
    }
    NodeId nodeId = NodeId.newInstance(hostStr.substring(1), port);
    nodeReport.setNodeId(nodeId);
    nodeReport.setHttpAddress(nodeId.getHost());

    nodeReportList.add(nodeReport);
  }
}
项目:hadoop    文件:TestResourceTrackerService.java   
@Test
public void testNodeRegistrationSuccess() throws Exception {
  writeToHostsFile("host2");
  Configuration conf = new Configuration();
  conf.set(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH, hostFile
      .getAbsolutePath());
  rm = new MockRM(conf);
  rm.start();

  ResourceTrackerService resourceTrackerService = rm.getResourceTrackerService();
  RegisterNodeManagerRequest req = Records.newRecord(
      RegisterNodeManagerRequest.class);
  NodeId nodeId = NodeId.newInstance("host2", 1234);
  Resource capability = BuilderUtils.newResource(1024, 1, 1);
  req.setResource(capability);
  req.setNodeId(nodeId);
  req.setHttpPort(1234);
  req.setNMVersion(YarnVersionInfo.getVersion());
  // trying to register a invalid node.
  RegisterNodeManagerResponse response = resourceTrackerService.registerNodeManager(req);
  Assert.assertEquals(NodeAction.NORMAL,response.getNodeAction());
}
项目:hadoop    文件:FSAppAttempt.java   
/**
 * Called when this application already has an existing reservation on the
 * given node.  Sees whether we can turn the reservation into an allocation.
 * Also checks whether the application needs the reservation anymore, and
 * releases it if not.
 *
 * @param node
 *     Node that the application has an existing reservation on
 */
public Resource assignReservedContainer(FSSchedulerNode node) {
  RMContainer rmContainer = node.getReservedContainer();
  Priority priority = rmContainer.getReservedPriority();

  // Make sure the application still needs requests at this priority
  if (getTotalRequiredResources(priority) == 0) {
    unreserve(priority, node);
    return Resources.none();
  }

  // Fail early if the reserved container won't fit.
  // Note that we have an assumption here that there's only one container size
  // per priority.
  if (!Resources.fitsIn(node.getReservedContainer().getReservedResource(),
      node.getAvailableResource())) {
    return Resources.none();
  }

  return assignContainer(node, true);
}
项目:hadoop    文件:LeafQueue.java   
private void updateCurrentResourceLimits(
    ResourceLimits currentResourceLimits, Resource clusterResource) {
  // TODO: need consider non-empty node labels when resource limits supports
  // node labels
  // Even if ParentQueue will set limits respect child's max queue capacity,
  // but when allocating reserved container, CapacityScheduler doesn't do
  // this. So need cap limits by queue's max capacity here.
  this.cachedResourceLimitsForHeadroom = new ResourceLimits(currentResourceLimits.getLimit());
  Resource queueMaxResource =
      Resources.multiplyAndNormalizeDown(resourceCalculator, labelManager
          .getResourceByLabel(RMNodeLabelsManager.NO_LABEL, clusterResource),
          queueCapacities
              .getAbsoluteMaximumCapacity(RMNodeLabelsManager.NO_LABEL),
          minimumAllocation);
  this.cachedResourceLimitsForHeadroom.setLimit(Resources.min(resourceCalculator,
      clusterResource, queueMaxResource, currentResourceLimits.getLimit()));
}
项目:hadoop    文件:MockNodes.java   
public MockRMNodeImpl(NodeId nodeId, String nodeAddr, String httpAddress,
    Resource perNode, String rackName, String healthReport,
    long lastHealthReportTime, int cmdPort, String hostName, NodeState state,
    Set<String> labels) {
  this.nodeId = nodeId;
  this.nodeAddr = nodeAddr;
  this.httpAddress = httpAddress;
  this.perNode = perNode;
  this.rackName = rackName;
  this.healthReport = healthReport;
  this.lastHealthReportTime = lastHealthReportTime;
  this.cmdPort = cmdPort;
  this.hostName = hostName;
  this.state = state;
  this.labels = labels;
}
项目:hadoop    文件:ParentQueue.java   
@Override
public void attachContainer(Resource clusterResource,
    FiCaSchedulerApp application, RMContainer rmContainer) {
  if (application != null) {
    FiCaSchedulerNode node =
        scheduler.getNode(rmContainer.getContainer().getNodeId());
    super.allocateResource(clusterResource, rmContainer.getContainer()
        .getResource(), node.getLabels());
    LOG.info("movedContainer" + " queueMoveIn=" + getQueueName()
        + " usedCapacity=" + getUsedCapacity() + " absoluteUsedCapacity="
        + getAbsoluteUsedCapacity() + " used=" + queueUsage.getUsed() + " cluster="
        + clusterResource);
    // Inform the parent
    if (parent != null) {
      parent.attachContainer(clusterResource, application, rmContainer);
    }
  }
}
项目:hadoop    文件:TestReservationInputValidator.java   
private ReservationSubmissionRequest createSimpleReservationSubmissionRequest(
    int numRequests, int numContainers, long arrival, long deadline,
    long duration) {
  // create a request with a single atomic ask
  ReservationSubmissionRequest request =
      new ReservationSubmissionRequestPBImpl();
  ReservationDefinition rDef = new ReservationDefinitionPBImpl();
  rDef.setArrival(arrival);
  rDef.setDeadline(deadline);
  if (numRequests > 0) {
    ReservationRequests reqs = new ReservationRequestsPBImpl();
    rDef.setReservationRequests(reqs);
    if (numContainers > 0) {
      ReservationRequest r =
          ReservationRequest.newInstance(Resource.newInstance(1024, 1, 1),
              numContainers, 1, duration);

      reqs.setReservationResources(Collections.singletonList(r));
      reqs.setInterpreter(ReservationRequestInterpreter.R_ALL);
    }
  }
  request.setQueue(PLAN_NAME);
  request.setReservationDefinition(rDef);
  return request;
}
项目:hadoop    文件:RMNodeLabelsManager.java   
public Resource getQueueResource(String queueName, Set<String> queueLabels,
    Resource clusterResource) {
  try {
    readLock.lock();
    if (queueLabels.contains(ANY)) {
      return clusterResource;
    }
    Queue q = queueCollections.get(queueName);
    if (null == q) {
      return Resources.none();
    }
    return q.resource;
  } finally {
    readLock.unlock();
  }
}
项目:hadoop    文件:AbstractYarnScheduler.java   
protected void refreshMaximumAllocation(Resource newMaxAlloc) {
  maxAllocWriteLock.lock();
  try {
    configuredMaximumAllocation = Resources.clone(newMaxAlloc);
    int maxMemory = newMaxAlloc.getMemory();
    if (maxNodeMemory != -1) {
      maxMemory = Math.min(maxMemory, maxNodeMemory);
    }
    int maxVcores = newMaxAlloc.getVirtualCores();
    if (maxNodeVCores != -1) {
      maxVcores = Math.min(maxVcores, maxNodeVCores);
    }
    int maxGcores = newMaxAlloc.getGpuCores();
    if (maxNodeGCores != -1) {
      maxGcores = Math.min(maxGcores, maxNodeGCores);
    }
    maximumAllocation = Resources.createResource(maxMemory, maxVcores, maxGcores);
  } finally {
    maxAllocWriteLock.unlock();
  }
}
项目:hadoop    文件:LeafQueue.java   
public synchronized Resource getUserAMResourceLimitPerPartition(
    String nodePartition) {
  /*
   * The user am resource limit is based on the same approach as the user
   * limit (as it should represent a subset of that). This means that it uses
   * the absolute queue capacity (per partition) instead of the max and is
   * modified by the userlimit and the userlimit factor as is the userlimit
   */
  float effectiveUserLimit = Math.max(userLimit / 100.0f,
      1.0f / Math.max(getActiveUsersManager().getNumActiveUsers(), 1));

  Resource queuePartitionResource = Resources.multiplyAndNormalizeUp(
      resourceCalculator,
      labelManager.getResourceByLabel(nodePartition, lastClusterResource),
      queueCapacities.getAbsoluteCapacity(nodePartition), minimumAllocation);

  return Resources.multiplyAndNormalizeUp(resourceCalculator,
      queuePartitionResource,
      queueCapacities.getMaxAMResourcePercentage(nodePartition)
          * effectiveUserLimit * userLimitFactor, minimumAllocation);
}
项目:hadoop    文件:TestInMemoryReservationAllocation.java   
@Test
public void testBlocks() {
  ReservationId reservationID =
      ReservationId.newInstance(rand.nextLong(), rand.nextLong());
  int[] alloc = { 10, 10, 10, 10, 10, 10 };
  int start = 100;
  ReservationDefinition rDef =
      createSimpleReservationDefinition(start, start + alloc.length + 1,
          alloc.length);
  Map<ReservationInterval, ReservationRequest> allocations =
      generateAllocation(start, alloc, false, false);
  ReservationAllocation rAllocation =
      new InMemoryReservationAllocation(reservationID, rDef, user, planName,
          start, start + alloc.length + 1, allocations, resCalc, minAlloc);
  doAssertions(rAllocation, reservationID, rDef, allocations, start, alloc);
  Assert.assertFalse(rAllocation.containsGangs());
  for (int i = 0; i < alloc.length; i++) {
    Assert.assertEquals(Resource.newInstance(1024 * (alloc[i]), (alloc[i]), (alloc[i])),
        rAllocation.getResourcesAtTime(start + i));
  }
}
项目:hadoop    文件:ContainerHistoryData.java   
@Public
@Unstable
public static ContainerHistoryData newInstance(ContainerId containerId,
    Resource allocatedResource, NodeId assignedNode, Priority priority,
    long startTime, long finishTime, String diagnosticsInfo,
    int containerExitCode, ContainerState containerState) {
  ContainerHistoryData containerHD = new ContainerHistoryData();
  containerHD.setContainerId(containerId);
  containerHD.setAllocatedResource(allocatedResource);
  containerHD.setAssignedNode(assignedNode);
  containerHD.setPriority(priority);
  containerHD.setStartTime(startTime);
  containerHD.setFinishTime(finishTime);
  containerHD.setDiagnosticsInfo(diagnosticsInfo);
  containerHD.setContainerExitStatus(containerExitCode);
  containerHD.setContainerState(containerState);
  return containerHD;
}
项目:hadoop    文件:MockNodes.java   
public static List<RMNode> newNodes(int racks, int nodesPerRack,
                                      Resource perNode) {
  List<RMNode> list = Lists.newArrayList();
  for (int i = 0; i < racks; ++i) {
    for (int j = 0; j < nodesPerRack; ++j) {
      if (j == (nodesPerRack - 1)) {
        // One unhealthy node per rack.
        list.add(nodeInfo(i, perNode, NodeState.UNHEALTHY));
      }
      if (j == 0) {
        // One node with label
        list.add(nodeInfo(i, perNode, NodeState.RUNNING, ImmutableSet.of("x")));
      } else {
        list.add(newNodeInfo(i, perNode));
      }
    }
  }
  return list;
}
项目:hadoop    文件:TestResourceMgrDelegate.java   
private ApplicationReport getApplicationReport(
    YarnApplicationState yarnApplicationState,
    FinalApplicationStatus finalApplicationStatus) {
  ApplicationReport appReport = Mockito.mock(ApplicationReport.class);
  ApplicationResourceUsageReport appResources = Mockito
      .mock(ApplicationResourceUsageReport.class);
  Mockito.when(appReport.getApplicationId()).thenReturn(
      ApplicationId.newInstance(0, 0));
  Mockito.when(appResources.getNeededResources()).thenReturn(
      Records.newRecord(Resource.class));
  Mockito.when(appResources.getReservedResources()).thenReturn(
      Records.newRecord(Resource.class));
  Mockito.when(appResources.getUsedResources()).thenReturn(
      Records.newRecord(Resource.class));
  Mockito.when(appReport.getApplicationResourceUsageReport()).thenReturn(
      appResources);
  Mockito.when(appReport.getYarnApplicationState()).thenReturn(
      yarnApplicationState);
  Mockito.when(appReport.getFinalApplicationStatus()).thenReturn(
      finalApplicationStatus);

  return appReport;
}
项目:hadoop    文件:TestRLESparseResourceAllocation.java   
private Map<ReservationInterval, ReservationRequest> generateAllocation(
    int startTime, int[] alloc, boolean isStep) {
  Map<ReservationInterval, ReservationRequest> req =
      new HashMap<ReservationInterval, ReservationRequest>();
  int numContainers = 0;
  for (int i = 0; i < alloc.length; i++) {
    if (isStep) {
      numContainers = alloc[i] + i;
    } else {
      numContainers = alloc[i];
    }
    req.put(new ReservationInterval(startTime + i, startTime + i + 1),

    ReservationRequest.newInstance(Resource.newInstance(1024, 1, 1),
        (numContainers)));
  }
  return req;
}
项目:hadoop    文件:TestApplicationClientProtocolOnHA.java   
@Test(timeout = 15000)
public void testSubmitApplicationOnHA() throws Exception {
  ApplicationSubmissionContext appContext =
      Records.newRecord(ApplicationSubmissionContext.class);
  appContext.setApplicationId(cluster.createFakeAppId());
  ContainerLaunchContext amContainer =
      Records.newRecord(ContainerLaunchContext.class);
  appContext.setAMContainerSpec(amContainer);
  Resource capability = Records.newRecord(Resource.class);
  capability.setMemory(10);
  capability.setVirtualCores(1);
  capability.setGpuCores(1);
  appContext.setResource(capability);
  ApplicationId appId = client.submitApplication(appContext);
  Assert.assertTrue(getActiveRM().getRMContext().getRMApps()
      .containsKey(appId));
}
项目:hadoop    文件:AbstractCSQueue.java   
synchronized void allocateResource(Resource clusterResource, 
    Resource resource, Set<String> nodeLabels) {

  // Update usedResources by labels
  if (nodeLabels == null || nodeLabels.isEmpty()) {
    queueUsage.incUsed(resource);
  } else {
    Set<String> anls = (accessibleLabels.contains(RMNodeLabelsManager.ANY))
        ? labelManager.getClusterNodeLabels() : accessibleLabels;
    for (String label : Sets.intersection(anls, nodeLabels)) {
      queueUsage.incUsed(label, resource);
    }
  }

  ++numContainers;
  if (null == nodeLabels || nodeLabels.isEmpty()) {
    CSQueueUtils.updateQueueStatistics(resourceCalculator, this, getParent(),
        labelManager.getResourceByLabel(RMNodeLabelsManager.NO_LABEL, clusterResource), minimumAllocation);
  }
}
项目:hadoop    文件:TestInMemoryReservationAllocation.java   
private Map<ReservationInterval, ReservationRequest> generateAllocation(
    int startTime, int[] alloc, boolean isStep, boolean isGang) {
  Map<ReservationInterval, ReservationRequest> req =
      new HashMap<ReservationInterval, ReservationRequest>();
  int numContainers = 0;
  for (int i = 0; i < alloc.length; i++) {
    if (isStep) {
      numContainers = alloc[i] + i;
    } else {
      numContainers = alloc[i];
    }
    ReservationRequest rr =
        ReservationRequest.newInstance(Resource.newInstance(1024, 1, 1),
            (numContainers));
    if (isGang) {
      rr.setConcurrency(numContainers);
    }
    req.put(new ReservationInterval(startTime + i, startTime + i + 1), rr);
  }
  return req;
}
项目:hadoop    文件:TestInMemoryPlan.java   
@Test
public void testAddReservation() {
  Plan plan =
      new InMemoryPlan(queueMetrics, policy, agent, totalCapacity, 1L,
          resCalc, minAlloc, maxAlloc, planName, replanner, true);
  ReservationId reservationID =
      ReservationSystemTestUtil.getNewReservationId();
  int[] alloc = { 10, 10, 10, 10, 10, 10 };
  int start = 100;
  Map<ReservationInterval, ReservationRequest> allocations =
      generateAllocation(start, alloc, false);
  ReservationDefinition rDef =
      createSimpleReservationDefinition(start, start + alloc.length,
          alloc.length, allocations.values());
  ReservationAllocation rAllocation =
      new InMemoryReservationAllocation(reservationID, rDef, user, planName,
          start, start + alloc.length, allocations, resCalc, minAlloc);
  Assert.assertNull(plan.getReservationById(reservationID));
  try {
    plan.addReservation(rAllocation);
  } catch (PlanningException e) {
    Assert.fail(e.getMessage());
  }
  doAssertions(plan, rAllocation);
  for (int i = 0; i < alloc.length; i++) {
    Assert.assertEquals(Resource.newInstance(1024 * (alloc[i]), (alloc[i]), (alloc[i])),
            plan.getTotalCommittedResources(start + i));
    Assert.assertEquals(Resource.newInstance(1024 * (alloc[i]), (alloc[i]), (alloc[i])),
            plan.getConsumptionForUser(user, start + i));
  }
}
项目:hadoop    文件:FifoScheduler.java   
@Override
public void recoverContainer(Resource clusterResource,
    SchedulerApplicationAttempt schedulerAttempt, RMContainer rmContainer) {
  if (rmContainer.getState().equals(RMContainerState.COMPLETED)) {
    return;
  }
  increaseUsedResources(rmContainer);
  updateAppHeadRoom(schedulerAttempt);
  updateAvailableResourcesMetrics();
}
项目:hadoop    文件:QueueMetrics.java   
public void releaseResources(String user, int containers, Resource res) {
  allocatedContainers.decr(containers);
  aggregateContainersReleased.incr(containers);
  allocatedMB.decr(res.getMemory() * containers);
  allocatedVCores.decr(res.getVirtualCores() * containers);
  allocatedGCores.decr(res.getGpuCores() * containers);
  QueueMetrics userMetrics = getUserMetrics(user);
  if (userMetrics != null) {
    userMetrics.releaseResources(user, containers, res);
  }
  if (parent != null) {
    parent.releaseResources(user, containers, res);
  }
}
项目:hadoop    文件:Resources.java   
public static Resource multiplyAndRoundDown(Resource lhs, double by) {
  Resource out = clone(lhs);
  out.setMemory((int)(lhs.getMemory() * by));
  out.setVirtualCores((int)(lhs.getVirtualCores() * by));
  out.setGpuCores((int)(lhs.getGpuCores() * by));
  return out;
}
项目:TensorFlowOnYARN    文件:ApplicationMaster.java   
/**
 * Setup the request that will be sent to the RM for the container ask.
 *
 * @return the setup ResourceRequest to be sent to RM
 */
private ContainerRequest setupContainerAskForRM() {
  // Set up resource type requirements
  // For now, memory and CPU are supported so we set memory and cpu requirements
  Resource capability = Resource.newInstance(containerMemory, containerVCores);
  Priority priority = Priority.newInstance(0);

  return new ContainerRequest(capability, null, null, priority);
}
项目:hadoop    文件:RegisterNodeManagerRequestPBImpl.java   
@Override
public void setResource(Resource resource) {
  maybeInitBuilder();
  if (resource == null) 
    builder.clearResource();
  this.resource = resource;
}
项目:hadoop    文件:BuilderUtils.java   
public static Container newContainer(ContainerId containerId, NodeId nodeId,
    String nodeHttpAddress, Resource resource, Priority priority,
    Token containerToken) {
  Container container = recordFactory.newRecordInstance(Container.class);
  container.setId(containerId);
  container.setNodeId(nodeId);
  container.setNodeHttpAddress(nodeHttpAddress);
  container.setResource(resource);
  container.setPriority(priority);
  container.setContainerToken(containerToken);
  return container;
}
项目:hadoop    文件:SchedulerUtils.java   
/**
 * Utility method to normalize a list of resource requests, by insuring that
 * the memory for each request is a multiple of minMemory and is not zero.
 */
public static void normalizeRequests(
    List<ResourceRequest> asks,
    ResourceCalculator resourceCalculator, 
    Resource clusterResource,
    Resource minimumResource,
    Resource maximumResource,
    Resource incrementResource) {
  for (ResourceRequest ask : asks) {
    normalizeRequest(
        ask, resourceCalculator, clusterResource, minimumResource,
        maximumResource, incrementResource);
  }
}
项目:hadoop    文件:NodeReportPBImpl.java   
@Override
public Resource getCapability() {
  if (this.capability != null) {
    return this.capability;
  }

  NodeReportProtoOrBuilder p = viaProto ? proto : builder;
  if (!p.hasCapability()) {
    return null;
  }
  this.capability = convertFromProtoFormat(p.getCapability());
  return this.capability;
}
项目:hadoop    文件:NodeReportPBImpl.java   
@Override
public void setCapability(Resource capability) {
  maybeInitBuilder();
  if (capability == null)
    builder.clearCapability();
  this.capability = capability;
}
项目:hadoop    文件:TestResourceTrackerService.java   
@Test
public void testNodeRegistrationVersionLessThanRM() throws Exception {
  writeToHostsFile("host2");
  Configuration conf = new Configuration();
  conf.set(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH, hostFile
      .getAbsolutePath());
  conf.set(YarnConfiguration.RM_NODEMANAGER_MINIMUM_VERSION,"EqualToRM" );
  rm = new MockRM(conf);
  rm.start();
  String nmVersion = "1.9.9";

  ResourceTrackerService resourceTrackerService = rm.getResourceTrackerService();
  RegisterNodeManagerRequest req = Records.newRecord(
      RegisterNodeManagerRequest.class);
  NodeId nodeId = NodeId.newInstance("host2", 1234);
  Resource capability = BuilderUtils.newResource(1024, 1, 1);
  req.setResource(capability);
  req.setNodeId(nodeId);
  req.setHttpPort(1234);
  req.setNMVersion(nmVersion);
  // trying to register a invalid node.
  RegisterNodeManagerResponse response = resourceTrackerService.registerNodeManager(req);
  Assert.assertEquals(NodeAction.SHUTDOWN,response.getNodeAction());
  Assert.assertTrue("Diagnostic message did not contain: 'Disallowed NodeManager " +
      "Version "+ nmVersion + ", is less than the minimum version'",
      response.getDiagnosticsMessage().contains("Disallowed NodeManager Version " +
          nmVersion + ", is less than the minimum version "));

}
项目:hadoop    文件:TestApplicationCleanup.java   
public static NMContainerStatus createNMContainerStatus(
    ApplicationAttemptId appAttemptId, int id, ContainerState containerState,
    int memory) {
  ContainerId containerId = ContainerId.newContainerId(appAttemptId, id);
  NMContainerStatus containerReport =
      NMContainerStatus.newInstance(containerId, containerState,
          Resource.newInstance(memory, 1, 1), "recover container", 0,
          Priority.newInstance(0), 0);
  return containerReport;
}
项目:hadoop    文件:LeafQueue.java   
public void assignContainer(Resource resource,
    Set<String> nodeLabels) {
  if (nodeLabels == null || nodeLabels.isEmpty()) {
    userResourceUsage.incUsed(resource);
  } else {
    for (String label : nodeLabels) {
      userResourceUsage.incUsed(label, resource);
    }
  }
}
项目:hadoop    文件:WindowsSecureContainerExecutor.java   
@Override
protected CommandExecutor buildCommandExecutor(String wrapperScriptPath,
    String containerIdStr, String userName, Path pidFile, Resource resource,
    File wordDir, Map<String, String> environment) throws IOException {
   return new WintuilsProcessStubExecutor(
       wordDir.toString(),
       containerIdStr, userName, pidFile.toString(), 
       "cmd /c " + wrapperScriptPath);
 }
项目:hadoop    文件:TestRMNodeTransitions.java   
private RMNodeImpl getRebootedNode() {
  NodeId nodeId = BuilderUtils.newNodeId("localhost", 0);
  Resource capability = Resource.newInstance(4096, 4, 4);
  RMNodeImpl node = new RMNodeImpl(nodeId, rmContext,null, 0, 0,
      null, capability, null);
  node.handle(new RMNodeStartedEvent(node.getNodeID(), null, null));
  Assert.assertEquals(NodeState.RUNNING, node.getState());
  node.handle(new RMNodeEvent(node.getNodeID(), RMNodeEventType.REBOOTING));
  Assert.assertEquals(NodeState.REBOOTED, node.getState());
  return node;
}
项目:hadoop    文件:RMContainerRequestor.java   
private void addResourceRequest(Priority priority, String resourceName,
    Resource capability, String nodeLabelExpression) {
  Map<String, Map<Resource, ResourceRequest>> remoteRequests =
    this.remoteRequestsTable.get(priority);
  if (remoteRequests == null) {
    remoteRequests = new HashMap<String, Map<Resource, ResourceRequest>>();
    this.remoteRequestsTable.put(priority, remoteRequests);
    if (LOG.isDebugEnabled()) {
      LOG.debug("Added priority=" + priority);
    }
  }
  Map<Resource, ResourceRequest> reqMap = remoteRequests.get(resourceName);
  if (reqMap == null) {
    reqMap = new HashMap<Resource, ResourceRequest>();
    remoteRequests.put(resourceName, reqMap);
  }
  ResourceRequest remoteRequest = reqMap.get(capability);
  if (remoteRequest == null) {
    remoteRequest = recordFactory.newRecordInstance(ResourceRequest.class);
    remoteRequest.setPriority(priority);
    remoteRequest.setResourceName(resourceName);
    remoteRequest.setCapability(capability);
    remoteRequest.setNumContainers(0);
    remoteRequest.setNodeLabelExpression(nodeLabelExpression);
    reqMap.put(capability, remoteRequest);
  }
  remoteRequest.setNumContainers(remoteRequest.getNumContainers() + 1);

  // Note this down for next interaction with ResourceManager
  addResourceRequestToAsk(remoteRequest);
  if (LOG.isDebugEnabled()) {
    LOG.debug("addResourceRequest:" + " applicationId="
        + applicationId.getId() + " priority=" + priority.getPriority()
        + " resourceName=" + resourceName + " numContainers="
        + remoteRequest.getNumContainers() + " #asks=" + ask.size());
  }
}
项目:hadoop    文件:ApplicationResourceUsageReportPBImpl.java   
@Override
public synchronized void setNeededResources(Resource reserved_resources) {
  maybeInitBuilder();
  if (reserved_resources == null)
    builder.clearNeededResources();
  this.neededResources = reserved_resources;
}
项目:hadoop    文件:ResourceRequestPBImpl.java   
@Override
public void setCapability(Resource capability) {
  maybeInitBuilder();
  if (capability == null) 
    builder.clearCapability();
  this.capability = capability;
}
项目:scheduling-connector-for-hadoop    文件:SlurmApplicationClient.java   
protected Resource getMaxCapability() {
  int maxAllocMemory = conf.getInt(
      YARN_APPLICATION_HPC_SCHEDULER_MAXIMUM_ALLOCATION_MB,
      DEFAULT_YARN_APPLICATION_HPC_SCHEDULER_MAXIMUM_ALLOCATION_MB);
  int maxAllocCPUs = conf.getInt(
      YARN_APPLICATION_HPC_SCHEDULER_MAXIMUM_ALLOCATION_VCORES,
      DEFAULT_YARN_APPLICATION_HPC_SCHEDULER_MAXIMUM_ALLOCATION_VCORES);
  return Resource.newInstance(maxAllocMemory, maxAllocCPUs);
}
项目:hadoop    文件:RMNodeLabelsManager.java   
public void deactivateNode(NodeId nodeId) {
  try {
    writeLock.lock();

    // save if we have a node before
    Map<String, Host> before = cloneNodeMap(ImmutableSet.of(nodeId));
    Node nm = getNMInNodeSet(nodeId);
    if (null != nm) {
      if (null == nm.labels) {
        // When node deactivated, remove the nm from node collection if no
        // labels explicitly set for this particular nm

        // Save labels first, we need to remove label->nodes relation later
        Set<String> savedNodeLabels = getLabelsOnNode(nodeId);

        // Remove this node in nodes collection
        nodeCollections.get(nodeId.getHost()).nms.remove(nodeId);

        // Remove this node in labels->node
        removeNodeFromLabels(nodeId, savedNodeLabels);
      } else {
        // set nm is not running, and its resource = 0
        nm.running = false;
        nm.resource = Resource.newInstance(0, 0, 0);
      }
    }

    // get the node after edition
    Map<String, Host> after = cloneNodeMap(ImmutableSet.of(nodeId));

    updateResourceMappings(before, after);
  } finally {
    writeLock.unlock();
  }
}
项目:hadoop    文件:TestGreedyReservationAgent.java   
@SuppressWarnings("javadoc")
@Test
public void testSimple() throws PlanningException {

  prepareBasicPlan();

  // create a request with a single atomic ask
  ReservationDefinition rr = new ReservationDefinitionPBImpl();
  rr.setArrival(5 * step);
  rr.setDeadline(20 * step);
  ReservationRequest r = ReservationRequest.newInstance(
      Resource.newInstance(2048, 2, 2), 10, 5, 10 * step);
  ReservationRequests reqs = new ReservationRequestsPBImpl();
  reqs.setReservationResources(Collections.singletonList(r));
  rr.setReservationRequests(reqs);

  ReservationId reservationID = ReservationSystemTestUtil
      .getNewReservationId();
  agent.createReservation(reservationID, "u1", plan, rr);

  assertTrue("Agent-based allocation failed", reservationID != null);
  assertTrue("Agent-based allocation failed", plan.getAllReservations()
      .size() == 3);

  ReservationAllocation cs = plan.getReservationById(reservationID);

  System.out.println("--------AFTER SIMPLE ALLOCATION (queue: "
      + reservationID + ")----------");
  System.out.println(plan.toString());
  System.out.println(plan.toCumulativeString());

  for (long i = 10 * step; i < 20 * step; i++) {
    assertTrue(
        "Agent-based allocation unexpected",
        Resources.equals(cs.getResourcesAtTime(i),
            Resource.newInstance(2048 * 10, 2 * 10, 2 * 10)));
  }

}