Java 类org.apache.hadoop.yarn.api.records.ApplicationAttemptId 实例源码

项目:angel    文件:LocalResourceManager.java   
private void allocate(LocalRMEvent event) throws IOException, InvalidParameterException {
  ApplicationId appId = event.getAppId();
  int attemptIndex;
  if(appIdToAttemptIndexMap.containsKey(appId)) {
    attemptIndex = appIdToAttemptIndexMap.get(appId);     
  } else {
    attemptIndex = 1;
  }

  appIdToAttemptIndexMap.put(appId, attemptIndex + 1);

  if(attemptIndex > maxAttemptNum) {
    return;
  }
  LocalClusterContext.get().setMaster(null);
  stopWorkerAndPS();

  LocalMaster master = new LocalMaster(ApplicationAttemptId.newInstance(appId, attemptIndex));
  master.start();
  LocalClusterContext.get().setMaster(master);
}
项目:hadoop    文件:Application.java   
public Application(String user, String queue, ResourceManager resourceManager) 
    throws YarnException {
  this.user = user;
  this.queue = queue;
  this.resourceManager = resourceManager;
  // register an application
  GetNewApplicationRequest request =
          Records.newRecord(GetNewApplicationRequest.class);
  GetNewApplicationResponse newApp = 
      this.resourceManager.getClientRMService().getNewApplication(request);
  this.applicationId = newApp.getApplicationId();

  this.applicationAttemptId =
      ApplicationAttemptId.newInstance(this.applicationId,
        this.numAttempts.getAndIncrement());
}
项目:hadoop    文件:TestClientRMService.java   
@Test
public void testGetContainerReport() throws YarnException, IOException {
  ClientRMService rmService = createRMService();
  RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
  GetContainerReportRequest request = recordFactory
      .newRecordInstance(GetContainerReportRequest.class);
  ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(
      ApplicationId.newInstance(123456, 1), 1);
  ContainerId containerId = ContainerId.newContainerId(attemptId, 1);
  request.setContainerId(containerId);

  try {
    GetContainerReportResponse response = rmService
        .getContainerReport(request);
    Assert.assertEquals(containerId, response.getContainerReport()
        .getContainerId());
  } catch (ApplicationNotFoundException ex) {
    Assert.fail(ex.getMessage());
  }
}
项目:hadoop    文件:TestMemoryApplicationHistoryStore.java   
@Test
public void testMassiveWriteContainerHistory() throws IOException {
  long mb = 1024 * 1024;
  Runtime runtime = Runtime.getRuntime();
  long usedMemoryBefore = (runtime.totalMemory() - runtime.freeMemory()) / mb;
  int numContainers = 100000;
  ApplicationId appId = ApplicationId.newInstance(0, 1);
  ApplicationAttemptId appAttemptId =
      ApplicationAttemptId.newInstance(appId, 1);
  for (int i = 1; i <= numContainers; ++i) {
    ContainerId containerId = ContainerId.newContainerId(appAttemptId, i);
    writeContainerStartData(containerId);
    writeContainerFinishData(containerId);
  }
  long usedMemoryAfter = (runtime.totalMemory() - runtime.freeMemory()) / mb;
  Assert.assertTrue((usedMemoryAfter - usedMemoryBefore) < 400);
}
项目:hadoop    文件:TestFSRMStateStore.java   
@Override
protected void modifyAppState() throws Exception {
  // imitate appAttemptFile1 is still .new, but old one is deleted
  String appAttemptIdStr1 = "appattempt_1352994193343_0001_000001";
  ApplicationAttemptId attemptId1 =
      ConverterUtils.toApplicationAttemptId(appAttemptIdStr1);
  Path appDir =
          fsTester.store.getAppDir(attemptId1.getApplicationId().toString());
  Path appAttemptFile1 =
      new Path(appDir, attemptId1.toString() + ".new");
  FileSystemRMStateStore fileSystemRMStateStore =
      (FileSystemRMStateStore) fsTester.getRMStateStore();
  fileSystemRMStateStore.renameFile(appAttemptFile1,
          new Path(appAttemptFile1.getParent(),
                  appAttemptFile1.getName() + ".new"));
}
项目:hadoop    文件:TestAHSClient.java   
@Test(timeout = 10000)
public void testGetApplicationAttempt() throws YarnException, IOException {
  Configuration conf = new Configuration();
  final AHSClient client = new MockAHSClient();
  client.init(conf);
  client.start();

  List<ApplicationReport> expectedReports =
      ((MockAHSClient) client).getReports();

  ApplicationId applicationId = ApplicationId.newInstance(1234, 5);
  ApplicationAttemptId appAttemptId =
      ApplicationAttemptId.newInstance(applicationId, 1);
  ApplicationAttemptReport report =
      client.getApplicationAttemptReport(appAttemptId);
  Assert.assertNotNull(report);
  Assert.assertEquals(report.getApplicationAttemptId().toString(),
    expectedReports.get(0).getCurrentApplicationAttemptId().toString());
  client.stop();
}
项目:hadoop    文件:TestFairScheduler.java   
@Test
public void testPreemptionVariablesForQueueCreatedRuntime() throws Exception {
  conf.set(FairSchedulerConfiguration.USER_AS_DEFAULT_QUEUE, "true");
  scheduler.init(conf);
  scheduler.start();
  scheduler.reinitialize(conf, resourceManager.getRMContext());

  // Set preemption variables for the root queue
  FSParentQueue root = scheduler.getQueueManager().getRootQueue();
  root.setMinSharePreemptionTimeout(10000);
  root.setFairSharePreemptionTimeout(15000);
  root.setFairSharePreemptionThreshold(.6f);

  // User1 submits one application
  ApplicationAttemptId appAttemptId = createAppAttemptId(1, 1);
  createApplicationWithAMResource(appAttemptId, "default", "user1", null);

  // The user1 queue should inherit the configurations from the root queue
  FSLeafQueue userQueue =
      scheduler.getQueueManager().getLeafQueue("user1", true);
  assertEquals(1, userQueue.getNumRunnableApps());
  assertEquals(10000, userQueue.getMinSharePreemptionTimeout());
  assertEquals(15000, userQueue.getFairSharePreemptionTimeout());
  assertEquals(.6f, userQueue.getFairSharePreemptionThreshold(), 0.001);
}
项目:hadoop    文件:TestYARNTokenIdentifier.java   
@Test
public void testAMRMTokenIdentifier() throws IOException {
  ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(
      ApplicationId.newInstance(1, 1), 1);
  int masterKeyId = 1;

  AMRMTokenIdentifier token = new AMRMTokenIdentifier(appAttemptId, masterKeyId);

  AMRMTokenIdentifier anotherToken = new AMRMTokenIdentifier();
  byte[] tokenContent = token.getBytes();
  DataInputBuffer dib = new DataInputBuffer();
  dib.reset(tokenContent, tokenContent.length);
  anotherToken.readFields(dib);

  // verify the whole record equals with original record
  Assert.assertEquals("Token is not the same after serialization " +
      "and deserialization.", token, anotherToken);

  Assert.assertEquals("ApplicationAttemptId from proto is not the same with original token",
      anotherToken.getApplicationAttemptId(), appAttemptId);

  Assert.assertEquals("masterKeyId from proto is not the same with original token",
      anotherToken.getKeyId(), masterKeyId);
}
项目:hadoop    文件:TestApplicationHistoryClientService.java   
@Test
public void testContainers() throws IOException, YarnException {
  ApplicationId appId = ApplicationId.newInstance(0, 1);
  ApplicationAttemptId appAttemptId =
      ApplicationAttemptId.newInstance(appId, 1);
  ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1);
  ContainerId containerId1 = ContainerId.newContainerId(appAttemptId, 2);
  GetContainersRequest request =
      GetContainersRequest.newInstance(appAttemptId);
  GetContainersResponse response =
      clientService.getContainers(request);
  List<ContainerReport> containers = response.getContainerList();
  Assert.assertNotNull(containers);
  Assert.assertEquals(containerId, containers.get(0).getContainerId());
  Assert.assertEquals(containerId1, containers.get(1).getContainerId());
}
项目:hadoop    文件:TestAHSClient.java   
@Test(timeout = 10000)
public void testGetContainers() throws YarnException, IOException {
  Configuration conf = new Configuration();
  final AHSClient client = new MockAHSClient();
  client.init(conf);
  client.start();

  ApplicationId applicationId = ApplicationId.newInstance(1234, 5);
  ApplicationAttemptId appAttemptId =
      ApplicationAttemptId.newInstance(applicationId, 1);
  List<ContainerReport> reports = client.getContainers(appAttemptId);
  Assert.assertNotNull(reports);
  Assert.assertEquals(reports.get(0).getContainerId(),
    (ContainerId.newContainerId(appAttemptId, 1)));
  Assert.assertEquals(reports.get(1).getContainerId(),
    (ContainerId.newContainerId(appAttemptId, 2)));
  client.stop();
}
项目:hadoop    文件:TestAHSWebServices.java   
@Test
public void testMultipleContainers() throws Exception {
  ApplicationId appId = ApplicationId.newInstance(0, 1);
  ApplicationAttemptId appAttemptId =
      ApplicationAttemptId.newInstance(appId, 1);
  WebResource r = resource();
  ClientResponse response =
      r.path("ws").path("v1").path("applicationhistory").path("apps")
        .path(appId.toString()).path("appattempts")
        .path(appAttemptId.toString()).path("containers")
        .queryParam("user.name", USERS[round])
        .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
  if (round == 1) {
    assertEquals(
        Status.FORBIDDEN, response.getClientResponseStatus());
    return;
  }
  assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
  JSONObject json = response.getEntity(JSONObject.class);
  assertEquals("incorrect number of elements", 1, json.length());
  JSONObject containers = json.getJSONObject("containers");
  assertEquals("incorrect number of elements", 1, containers.length());
  JSONArray array = containers.getJSONArray("container");
  assertEquals("incorrect number of elements", 5, array.length());
}
项目:hadoop    文件:TestYarnClient.java   
private ContainerReport getContainer(
    ContainerId containerId,
    HashMap<ApplicationAttemptId, List<ContainerReport>> containersToAppAttemptMapping)
    throws YarnException, IOException {
  List<ContainerReport> containersForAppAttempt =
      containersToAppAttemptMapping.get(containerId
          .getApplicationAttemptId());
  if (containersForAppAttempt == null) {
    throw new ApplicationNotFoundException(containerId
        .getApplicationAttemptId().getApplicationId() + " is not found ");
  }
  Iterator<ContainerReport> iterator = containersForAppAttempt.iterator();
  while (iterator.hasNext()) {
    ContainerReport next = iterator.next();
    if (next.getContainerId().equals(containerId)) {
      return next;
    }
  }
  throw new ContainerNotFoundException(containerId + " is not found ");
}
项目:hadoop    文件:YarnClientImpl.java   
@Override
public ApplicationAttemptReport getApplicationAttemptReport(
    ApplicationAttemptId appAttemptId) throws YarnException, IOException {
  try {
    GetApplicationAttemptReportRequest request = Records
        .newRecord(GetApplicationAttemptReportRequest.class);
    request.setApplicationAttemptId(appAttemptId);
    GetApplicationAttemptReportResponse response = rmClient
        .getApplicationAttemptReport(request);
    return response.getApplicationAttemptReport();
  } catch (YarnException e) {
    if (!historyServiceEnabled) {
      // Just throw it as usual if historyService is not enabled.
      throw e;
    }
    // Even if history-service is enabled, treat all exceptions still the same
    // except the following
    if (e.getClass() != ApplicationNotFoundException.class) {
      throw e;
    }
    return historyClient.getApplicationAttemptReport(appAttemptId);
  }
}
项目:hadoop    文件:MemoryApplicationHistoryStore.java   
@Override
public void applicationAttemptStarted(
    ApplicationAttemptStartData appAttemptStart) throws IOException {
  ConcurrentMap<ApplicationAttemptId, ApplicationAttemptHistoryData> subMap =
      getSubMap(appAttemptStart.getApplicationAttemptId().getApplicationId());
  ApplicationAttemptHistoryData oldData =
      subMap.putIfAbsent(appAttemptStart.getApplicationAttemptId(),
        ApplicationAttemptHistoryData.newInstance(
          appAttemptStart.getApplicationAttemptId(),
          appAttemptStart.getHost(), appAttemptStart.getRPCPort(),
          appAttemptStart.getMasterContainerId(), null, null, null, null));
  if (oldData != null) {
    throw new IOException("The start information of application attempt "
        + appAttemptStart.getApplicationAttemptId() + " is already stored.");
  }
}
项目:hadoop    文件:ZKRMStateStore.java   
@Override
public synchronized void updateApplicationAttemptStateInternal(
    ApplicationAttemptId appAttemptId,
    ApplicationAttemptStateData attemptStateDataPB)
    throws Exception {
  String appIdStr = appAttemptId.getApplicationId().toString();
  String appAttemptIdStr = appAttemptId.toString();
  String appDirPath = getNodePath(rmAppRoot, appIdStr);
  String nodeUpdatePath = getNodePath(appDirPath, appAttemptIdStr);
  if (LOG.isDebugEnabled()) {
    LOG.debug("Storing final state info for attempt: " + appAttemptIdStr
        + " at: " + nodeUpdatePath);
  }
  byte[] attemptStateData = attemptStateDataPB.getProto().toByteArray();

  if (existsWithRetries(nodeUpdatePath, false) != null) {
    setDataWithRetries(nodeUpdatePath, attemptStateData, -1);
  } else {
    createWithRetries(nodeUpdatePath, attemptStateData, zkAcl,
      CreateMode.PERSISTENT);
    LOG.debug(appAttemptId + " znode didn't exist. Created a new znode to"
        + " update the application attempt state.");
  }
}
项目:hadoop    文件:TestContainerManagerSecurity.java   
private void stopContainer(YarnRPC rpc, Token nmToken,
    List<ContainerId> containerId, ApplicationAttemptId appAttemptId,
    NodeId nodeId) throws Exception {
  StopContainersRequest request =
      StopContainersRequest.newInstance(containerId);
  ContainerManagementProtocol proxy = null;
  try {
    proxy =
        getContainerManagementProtocolProxy(rpc, nmToken, nodeId,
            appAttemptId.toString());
    StopContainersResponse response = proxy.stopContainers(request);
    if (response.getFailedRequests() != null &&
        response.getFailedRequests().containsKey(containerId)) {
      parseAndThrowException(response.getFailedRequests().get(containerId)
          .deSerialize());
    }
  } catch (Exception e) {
    if (proxy != null) {
      rpc.stopProxy(proxy, conf);
    }
  }
}
项目:hadoop    文件:TestClientRMService.java   
@Test
public void testGetContainers() throws YarnException, IOException {
  ClientRMService rmService = createRMService();
  RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
  GetContainersRequest request = recordFactory
      .newRecordInstance(GetContainersRequest.class);
  ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(
      ApplicationId.newInstance(123456, 1), 1);
  ContainerId containerId = ContainerId.newContainerId(attemptId, 1);
  request.setApplicationAttemptId(attemptId);
  try {
    GetContainersResponse response = rmService.getContainers(request);
    Assert.assertEquals(containerId, response.getContainerList().get(0)
        .getContainerId());
  } catch (ApplicationNotFoundException ex) {
    Assert.fail(ex.getMessage());
  }
}
项目:hadoop    文件:MemoryApplicationHistoryStore.java   
@Override
public void applicationAttemptFinished(
    ApplicationAttemptFinishData appAttemptFinish) throws IOException {
  ConcurrentMap<ApplicationAttemptId, ApplicationAttemptHistoryData> subMap =
      getSubMap(appAttemptFinish.getApplicationAttemptId().getApplicationId());
  ApplicationAttemptHistoryData data =
      subMap.get(appAttemptFinish.getApplicationAttemptId());
  if (data == null) {
    throw new IOException("The finish information of application attempt "
        + appAttemptFinish.getApplicationAttemptId() + " is stored before"
        + " the start information.");
  }
  // Make the assumption that YarnApplicationAttemptState should not be null
  // if the finish information is already recorded
  if (data.getYarnApplicationAttemptState() != null) {
    throw new IOException("The finish information of application attempt "
        + appAttemptFinish.getApplicationAttemptId() + " is already stored.");
  }
  data.setTrackingURL(appAttemptFinish.getTrackingURL());
  data.setDiagnosticsInfo(appAttemptFinish.getDiagnosticsInfo());
  data
    .setFinalApplicationStatus(appAttemptFinish.getFinalApplicationStatus());
  data.setYarnApplicationAttemptState(appAttemptFinish
    .getYarnApplicationAttemptState());
}
项目:hadoop    文件:NMTokenSecretManagerInRM.java   
@Private
@VisibleForTesting
public boolean isApplicationAttemptNMTokenPresent(
    ApplicationAttemptId appAttemptId, NodeId nodeId) {
  try {
    this.readLock.lock();
    HashSet<NodeId> nodes = this.appAttemptToNodeKeyMap.get(appAttemptId);
    if (nodes != null && nodes.contains(nodeId)) {
      return true;
    } else {
      return false;
    }
  } finally {
    this.readLock.unlock();
  }
}
项目:hadoop    文件:TestJobImpl.java   
@Test
public void testReportDiagnostics() throws Exception {
  JobID jobID = JobID.forName("job_1234567890000_0001");
  JobId jobId = TypeConverter.toYarn(jobID);
  final String diagMsg = "some diagnostic message";
  final JobDiagnosticsUpdateEvent diagUpdateEvent =
      new JobDiagnosticsUpdateEvent(jobId, diagMsg);
  MRAppMetrics mrAppMetrics = MRAppMetrics.create();
  AppContext mockContext = mock(AppContext.class);
  when(mockContext.hasSuccessfullyUnregistered()).thenReturn(true);
  JobImpl job = new JobImpl(jobId, Records
      .newRecord(ApplicationAttemptId.class), new Configuration(),
      mock(EventHandler.class),
      null, mock(JobTokenSecretManager.class), null,
      new SystemClock(), null,
      mrAppMetrics, null, true, null, 0, null, mockContext, null, null);
  job.handle(diagUpdateEvent);
  String diagnostics = job.getReport().getDiagnostics();
  Assert.assertNotNull(diagnostics);
  Assert.assertTrue(diagnostics.contains(diagMsg));

  job = new JobImpl(jobId, Records
      .newRecord(ApplicationAttemptId.class), new Configuration(),
      mock(EventHandler.class),
      null, mock(JobTokenSecretManager.class), null,
      new SystemClock(), null,
      mrAppMetrics, null, true, null, 0, null, mockContext, null, null);
  job.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
  job.handle(diagUpdateEvent);
  diagnostics = job.getReport().getDiagnostics();
  Assert.assertNotNull(diagnostics);
  Assert.assertTrue(diagnostics.contains(diagMsg));
}
项目:hadoop    文件:AppAttemptRegisteredEvent.java   
public AppAttemptRegisteredEvent(
    ApplicationAttemptId appAttemptId,
    String host,
    int rpcPort,
    String trackingUrl,
    String originalTrackingUrl,
    ContainerId masterContainerId,
    long registeredTime) {
  super(SystemMetricsEventType.APP_ATTEMPT_REGISTERED, registeredTime);
  this.appAttemptId = appAttemptId;
  this.host = host;
  this.rpcPort = rpcPort;
  // This is the tracking URL after the application attempt is registered
  this.trackingUrl = trackingUrl;
  this.originalTrackingUrl = originalTrackingUrl;
  this.masterContainerId = masterContainerId;
}
项目:hadoop    文件:TestLeafQueue.java   
@Test
public void testUserQueueAcl() throws Exception {

  // Manipulate queue 'a'
  LeafQueue d = stubLeafQueue((LeafQueue) queues.get(D));

  // Users
  final String user_d = "user_d";

  // Submit applications
  final ApplicationAttemptId appAttemptId_0 = TestUtils
      .getMockApplicationAttemptId(0, 1);
  FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_d, d, null,
      spyRMContext);
  d.submitApplicationAttempt(app_0, user_d);

  // Attempt the same application again
  final ApplicationAttemptId appAttemptId_1 = TestUtils
      .getMockApplicationAttemptId(0, 2);
  FiCaSchedulerApp app_1 = new FiCaSchedulerApp(appAttemptId_1, user_d, d, null,
      spyRMContext);
  d.submitApplicationAttempt(app_1, user_d); // same user
}
项目:hadoop    文件:CapacityScheduler.java   
@Override
public void preemptContainer(ApplicationAttemptId aid, RMContainer cont) {
  if(LOG.isDebugEnabled()){
    LOG.debug("PREEMPT_CONTAINER: application:" + aid.toString() +
        " container: " + cont.toString());
  }
  FiCaSchedulerApp app = getApplicationAttempt(aid);
  if (app != null) {
    app.addPreemptContainer(cont.getContainerId());
  }
}
项目:hadoop    文件:AbstractYarnScheduler.java   
public List<ResourceRequest> getPendingResourceRequestsForAttempt(
    ApplicationAttemptId attemptId) {
  SchedulerApplicationAttempt attempt = getApplicationAttempt(attemptId);
  if (attempt != null) {
    return attempt.getAppSchedulingInfo().getAllResourceRequests();
  }
  return null;
}
项目:hadoop    文件:ClientToAMTokenSecretManager.java   
public ClientToAMTokenSecretManager(
    ApplicationAttemptId applicationAttemptID, byte[] key) {
  super();
  if (key !=  null) {
    this.masterKey = SecretManager.createSecretKey(key);
  } else {
    this.masterKey = null;
  }

}
项目:hadoop    文件:AbstractYarnScheduler.java   
private RMContainer recoverAndCreateContainer(NMContainerStatus status,
    RMNode node) {
  Container container =
      Container.newInstance(status.getContainerId(), node.getNodeID(),
        node.getHttpAddress(), status.getAllocatedResource(),
        status.getPriority(), null);
  ApplicationAttemptId attemptId =
      container.getId().getApplicationAttemptId();
  RMContainer rmContainer =
      new RMContainerImpl(container, attemptId, node.getNodeID(),
        applications.get(attemptId.getApplicationId()).getUser(), rmContext,
        status.getCreationTime());
  return rmContainer;
}
项目:hadoop    文件:ApplicationHistoryManagerOnTimelineStore.java   
@Override
public ContainerReport getAMContainer(ApplicationAttemptId appAttemptId)
    throws YarnException, IOException {
  ApplicationAttemptReport appAttempt =
      getApplicationAttempt(appAttemptId, false);
  return getContainer(appAttempt.getAMContainerId());
}
项目:hadoop    文件:NMTokenSecretManagerInRM.java   
public NMTokenSecretManagerInRM(Configuration conf) {
  this.conf = conf;
  timer = new Timer();
  rollingInterval = this.conf.getLong(
      YarnConfiguration.RM_NMTOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS,
      YarnConfiguration.DEFAULT_RM_NMTOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS)
      * 1000;
  // Add an activation delay. This is to address the following race: RM may
  // roll over master-key, scheduling may happen at some point of time, an
  // NMToken created with a password generated off new master key, but NM
  // might not have come again to RM to update the shared secret: so AM has a
  // valid password generated off new secret but NM doesn't know about the
  // secret yet.
  // Adding delay = 1.5 * expiry interval makes sure that all active NMs get
  // the updated shared-key.
  this.activationDelay =
      (long) (conf.getLong(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS,
          YarnConfiguration.DEFAULT_RM_NM_EXPIRY_INTERVAL_MS) * 1.5);
  LOG.info("NMTokenKeyRollingInterval: " + this.rollingInterval
      + "ms and NMTokenKeyActivationDelay: " + this.activationDelay
      + "ms");
  if (rollingInterval <= activationDelay * 2) {
    throw new IllegalArgumentException(
        YarnConfiguration.RM_NMTOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS
            + " should be more than 3 X "
            + YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS);
  }
  appAttemptToNodeKeyMap =
      new ConcurrentHashMap<ApplicationAttemptId, HashSet<NodeId>>();
}
项目:hadoop    文件:TestMRAppMaster.java   
@Test
public void testMRAppMasterSuccessLock() throws IOException,
    InterruptedException {
  String applicationAttemptIdStr = "appattempt_1317529182569_0004_000002";
  String containerIdStr = "container_1317529182569_0004_000002_1";
  String userName = "TestAppMasterUser";
  JobConf conf = new JobConf();
  conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
  ApplicationAttemptId applicationAttemptId = ConverterUtils
      .toApplicationAttemptId(applicationAttemptIdStr);
  JobId jobId =  TypeConverter.toYarn(
      TypeConverter.fromYarn(applicationAttemptId.getApplicationId()));
  Path start = MRApps.getStartJobCommitFile(conf, userName, jobId);
  Path end = MRApps.getEndJobCommitSuccessFile(conf, userName, jobId);
  FileSystem fs = FileSystem.get(conf);
  fs.create(start).close();
  fs.create(end).close();
  ContainerId containerId = ConverterUtils.toContainerId(containerIdStr);
  MRAppMaster appMaster =
      new MRAppMasterTest(applicationAttemptId, containerId, "host", -1, -1,
          System.currentTimeMillis(), false, false);
  boolean caught = false;
  try {
    MRAppMaster.initAndStartAppMaster(appMaster, conf, userName);
  } catch (IOException e) {
    //The IO Exception is expected
    LOG.info("Caught expected Exception", e);
    caught = true;
  }
  assertTrue(caught);
  assertTrue(appMaster.errorHappenedShutDown);
  assertEquals(JobStateInternal.SUCCEEDED, appMaster.forcedState);
  appMaster.stop();

  // verify the final status is SUCCEEDED
  verifyFailedStatus((MRAppMasterTest)appMaster, "SUCCEEDED");
}
项目:hadoop    文件:GetContainersRequest.java   
@Public
@Unstable
public static GetContainersRequest newInstance(
    ApplicationAttemptId applicationAttemptId) {
  GetContainersRequest request =
      Records.newRecord(GetContainersRequest.class);
  request.setApplicationAttemptId(applicationAttemptId);
  return request;
}
项目:hadoop    文件:ApplicationReportPBImpl.java   
@Override
public void setCurrentApplicationAttemptId(ApplicationAttemptId applicationAttemptId) {
  maybeInitBuilder();
  if (applicationAttemptId == null)
    builder.clearCurrentApplicationAttemptId();
  this.currentApplicationAttemptId = applicationAttemptId;
}
项目:hadoop    文件:TestRMAppAttemptTransitions.java   
@Test
public void testRunningToFailed() {
  Container amContainer = allocateApplicationAttempt();
  launchApplicationAttempt(amContainer);
  runApplicationAttempt(amContainer, "host", 8042, "oldtrackingurl", false);
  String containerDiagMsg = "some error";
  int exitCode = 123;
  ContainerStatus cs = BuilderUtils.newContainerStatus(amContainer.getId(),
      ContainerState.COMPLETE, containerDiagMsg, exitCode);
  ApplicationAttemptId appAttemptId = applicationAttempt.getAppAttemptId();
  NodeId anyNodeId = NodeId.newInstance("host", 1234);
  applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent(
      appAttemptId, cs, anyNodeId));

  // ignored ContainerFinished and Expire at FinalSaving if we were supposed
  // to Failed state.
  assertEquals(RMAppAttemptState.FINAL_SAVING,
    applicationAttempt.getAppAttemptState());
  applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent(
    applicationAttempt.getAppAttemptId(), BuilderUtils.newContainerStatus(
      amContainer.getId(), ContainerState.COMPLETE, "", 0), anyNodeId));
  applicationAttempt.handle(new RMAppAttemptEvent(
    applicationAttempt.getAppAttemptId(), RMAppAttemptEventType.EXPIRE));
  assertEquals(RMAppAttemptState.FINAL_SAVING,
    applicationAttempt.getAppAttemptState()); 
  assertEquals(YarnApplicationAttemptState.RUNNING,
      applicationAttempt.createApplicationAttemptState());
  sendAttemptUpdateSavedEvent(applicationAttempt);
  assertEquals(RMAppAttemptState.FAILED,
      applicationAttempt.getAppAttemptState());
  assertEquals(0, applicationAttempt.getJustFinishedContainers().size());
  assertEquals(amContainer, applicationAttempt.getMasterContainer());
  assertEquals(0, application.getRanNodes().size());
  String rmAppPageUrl = pjoin(RM_WEBAPP_ADDR, "cluster", "app",
      applicationAttempt.getAppAttemptId().getApplicationId());
  assertEquals(rmAppPageUrl, applicationAttempt.getOriginalTrackingUrl());
  assertEquals(rmAppPageUrl, applicationAttempt.getTrackingUrl());
  verifyAMHostAndPortInvalidated();
  verifyApplicationAttemptFinished(RMAppAttemptState.FAILED);
}
项目:hadoop    文件:TestFairScheduler.java   
@Test (timeout = 500000)
public void testContainerReservationAttemptExceedingQueueMax()
    throws Exception {
  conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
  PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE));
  out.println("<?xml version=\"1.0\"?>");
  out.println("<allocations>");
  out.println("<queue name=\"root\">");
  out.println("<queue name=\"queue1\">");
  out.println("<maxResources>2048mb,5vcores,5gcores</maxResources>");
  out.println("</queue>");
  out.println("<queue name=\"queue2\">");
  out.println("<maxResources>2048mb,10vcores,10gcores</maxResources>");
  out.println("</queue>");
  out.println("</queue>");
  out.println("</allocations>");
  out.close();

  scheduler.init(conf);
  scheduler.start();
  scheduler.reinitialize(conf, resourceManager.getRMContext());

  // Add a node
  RMNode node1 =
      MockNodes
          .newNodeInfo(1, Resources.createResource(3072, 5, 5), 1, "127.0.0.1");
  NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
  scheduler.handle(nodeEvent1);

  // Queue 1 requests full capacity of the queue
  createSchedulingRequest(2048, "queue1", "user1", 1);
  scheduler.update();
  NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node1);
  scheduler.handle(updateEvent);

  // Make sure queue 1 is allocated app capacity
  assertEquals(2048, scheduler.getQueueManager().getQueue("queue1").
      getResourceUsage().getMemory());

  // Now queue 2 requests likewise
  createSchedulingRequest(1024, "queue2", "user2", 1);
  scheduler.update();
  scheduler.handle(updateEvent);

  // Make sure queue 2 is allocated app capacity
  assertEquals(1024, scheduler.getQueueManager().getQueue("queue2").
      getResourceUsage().getMemory());

  ApplicationAttemptId attId1 = createSchedulingRequest(1024, "queue1", "user1", 1);
  scheduler.update();
  scheduler.handle(updateEvent);

  // Ensure the reservation does not get created as allocated memory of
  // queue1 exceeds max
  assertEquals(0, scheduler.getSchedulerApp(attId1).
      getCurrentReservation().getMemory());
}
项目:hadoop    文件:ApplicationAttemptFinishDataPBImpl.java   
@Override
public void
    setApplicationAttemptId(ApplicationAttemptId applicationAttemptId) {
  maybeInitBuilder();
  if (applicationAttemptId == null) {
    builder.clearApplicationAttemptId();
  }
  this.applicationAttemptId = applicationAttemptId;
}
项目:hadoop    文件:TestApplicatonReport.java   
protected static ApplicationReport createApplicationReport(
    int appIdInt, int appAttemptIdInt, long timestamp) {
  ApplicationId appId = ApplicationId.newInstance(timestamp, appIdInt);
  ApplicationAttemptId appAttemptId =
      ApplicationAttemptId.newInstance(appId, appAttemptIdInt);
  ApplicationReport appReport =
      ApplicationReport.newInstance(appId, appAttemptId, "user", "queue",
        "appname", "host", 124, null, YarnApplicationState.FINISHED,
        "diagnostics", "url", 0, 0, FinalApplicationStatus.SUCCEEDED, null,
        "N/A", 0.53789f, YarnConfiguration.DEFAULT_APPLICATION_TYPE, null);
  return appReport;
}
项目:hadoop    文件:WebServices.java   
protected static ApplicationAttemptId parseApplicationAttemptId(
    String appAttemptId) {
  if (appAttemptId == null || appAttemptId.isEmpty()) {
    throw new NotFoundException("appAttemptId, " + appAttemptId
        + ", is empty or null");
  }
  ApplicationAttemptId aaid =
      ConverterUtils.toApplicationAttemptId(appAttemptId);
  if (aaid == null) {
    throw new NotFoundException("appAttemptId is null");
  }
  return aaid;
}
项目:hadoop    文件:BuilderUtils.java   
public static ContainerId newContainerId(int appId, int appAttemptId,
    long timestamp, long id) {
  ApplicationId applicationId = newApplicationId(timestamp, appId);
  ApplicationAttemptId applicationAttemptId = newApplicationAttemptId(
      applicationId, appAttemptId);
  ContainerId cId = newContainerId(applicationAttemptId, id);
  return cId;
}
项目:hadoop    文件:BuilderUtils.java   
public static ApplicationReport newApplicationReport(
    ApplicationId applicationId, ApplicationAttemptId applicationAttemptId,
    String user, String queue, String name, String host, int rpcPort,
    Token clientToAMToken, YarnApplicationState state, String diagnostics,
    String url, long startTime, long finishTime,
    FinalApplicationStatus finalStatus,
    ApplicationResourceUsageReport appResources, String origTrackingUrl,
    float progress, String appType, Token amRmToken, Set<String> tags) {
  ApplicationReport report = recordFactory
      .newRecordInstance(ApplicationReport.class);
  report.setApplicationId(applicationId);
  report.setCurrentApplicationAttemptId(applicationAttemptId);
  report.setUser(user);
  report.setQueue(queue);
  report.setName(name);
  report.setHost(host);
  report.setRpcPort(rpcPort);
  report.setClientToAMToken(clientToAMToken);
  report.setYarnApplicationState(state);
  report.setDiagnostics(diagnostics);
  report.setTrackingUrl(url);
  report.setStartTime(startTime);
  report.setFinishTime(finishTime);
  report.setFinalApplicationStatus(finalStatus);
  report.setApplicationResourceUsageReport(appResources);
  report.setOriginalTrackingUrl(origTrackingUrl);
  report.setProgress(progress);
  report.setApplicationType(appType);
  report.setAMRMToken(amRmToken);
  report.setApplicationTags(tags);
  return report;
}
项目:hadoop    文件:ResourceSchedulerWrapper.java   
@Override
public Allocation allocate(ApplicationAttemptId attemptId,
                           List<ResourceRequest> resourceRequests,
                           List<ContainerId> containerIds,
                           List<String> strings, List<String> strings2) {
  if (metricsON) {
    final Timer.Context context = schedulerAllocateTimer.time();
    Allocation allocation = null;
    try {
      allocation = scheduler.allocate(attemptId, resourceRequests,
              containerIds, strings, strings2);
      return allocation;
    } finally {
      context.stop();
      schedulerAllocateCounter.inc();
      try {
        updateQueueWithAllocateRequest(allocation, attemptId,
                resourceRequests, containerIds);
      } catch (IOException e) {
        e.printStackTrace();
      }
    }
  } else {
    return scheduler.allocate(attemptId,
            resourceRequests, containerIds, strings, strings2);
  }
}
项目:hadoop    文件:AppSchedulingInfo.java   
public AppSchedulingInfo(ApplicationAttemptId appAttemptId,
    String user, Queue queue, ActiveUsersManager activeUsersManager,
    long epoch) {
  this.applicationAttemptId = appAttemptId;
  this.applicationId = appAttemptId.getApplicationId();
  this.queue = queue;
  this.queueName = queue.getQueueName();
  this.user = user;
  this.activeUsersManager = activeUsersManager;
  this.containerIdCounter = new AtomicLong(epoch << EPOCH_BIT_SHIFT);
}