Java 类org.apache.hadoop.yarn.api.records.ApplicationId 实例源码

项目:hadoop    文件:TestJobImpl.java   
private static StubbedJob createStubbedJob(Configuration conf,
    Dispatcher dispatcher, int numSplits, AppContext appContext) {
  JobID jobID = JobID.forName("job_1234567890000_0001");
  JobId jobId = TypeConverter.toYarn(jobID);
  if (appContext == null) {
    appContext = mock(AppContext.class);
    when(appContext.hasSuccessfullyUnregistered()).thenReturn(true);
  }
  StubbedJob job = new StubbedJob(jobId,
      ApplicationAttemptId.newInstance(ApplicationId.newInstance(0, 0), 0),
      conf,dispatcher.getEventHandler(), true, "somebody", numSplits, appContext);
  dispatcher.register(JobEventType.class, job);
  EventHandler mockHandler = mock(EventHandler.class);
  dispatcher.register(TaskEventType.class, mockHandler);
  dispatcher.register(org.apache.hadoop.mapreduce.jobhistory.EventType.class,
      mockHandler);
  dispatcher.register(JobFinishEvent.Type.class, mockHandler);
  return job;
}
项目:hadoop    文件:WebAppProxyServlet.java   
/**
 * Warn the user that the link may not be safe!
 * @param resp the http response
 * @param link the link to point to
 * @param user the user that owns the link.
 * @throws IOException on any error.
 */
private static void warnUserPage(HttpServletResponse resp, String link, 
    String user, ApplicationId id) throws IOException {
  //Set the cookie when we warn which overrides the query parameter
  //This is so that if a user passes in the approved query parameter without
  //having first visited this page then this page will still be displayed 
  resp.addCookie(makeCheckCookie(id, false));
  resp.setContentType(MimeType.HTML);
  Page p = new Page(resp.getWriter());
  p.html().
    h1("WARNING: The following page may not be safe!").
    h3().
    _("click ").a(link, "here").
    _(" to continue to an Application Master web interface owned by ", user).
    _().
  _();
}
项目:hadoop    文件:ApplicationFinishedEvent.java   
public ApplicationFinishedEvent(
    ApplicationId appId,
    String diagnosticsInfo,
    FinalApplicationStatus appStatus,
    YarnApplicationState state,
    ApplicationAttemptId latestAppAttemptId,
    long finishedTime,
    RMAppMetrics appMetrics) {
  super(SystemMetricsEventType.APP_FINISHED, finishedTime);
  this.appId = appId;
  this.diagnosticsInfo = diagnosticsInfo;
  this.appStatus = appStatus;
  this.latestAppAttemptId = latestAppAttemptId;
  this.state = state;
  this.appMetrics=appMetrics;
}
项目:hadoop    文件:LeveldbRMStateStore.java   
@Override
protected void removeApplicationStateInternal(ApplicationStateData appState)
    throws IOException {
  ApplicationId appId =
      appState.getApplicationSubmissionContext().getApplicationId();
  String appKey = getApplicationNodeKey(appId);
  try {
    WriteBatch batch = db.createWriteBatch();
    try {
      batch.delete(bytes(appKey));
      for (ApplicationAttemptId attemptId : appState.attempts.keySet()) {
        String attemptKey = getApplicationAttemptNodeKey(appKey, attemptId);
        batch.delete(bytes(attemptKey));
      }
      if (LOG.isDebugEnabled()) {
        LOG.debug("Removing state for app " + appId + " and "
            + appState.attempts.size() + " attempts" + " at " + appKey);
      }
      db.write(batch);
    } finally {
      batch.close();
    }
  } catch (DBException e) {
    throw new IOException(e);
  }
}
项目:hadoop    文件:SharedCacheClientImpl.java   
@Override
public void release(ApplicationId applicationId, String resourceKey)
    throws YarnException {
  ReleaseSharedCacheResourceRequest request = Records.newRecord(
      ReleaseSharedCacheResourceRequest.class);
  request.setAppId(applicationId);
  request.setResourceKey(resourceKey);
  try {
    // We do not care about the response because it is empty.
    this.scmClient.release(request);
  } catch (Exception e) {
    // Just catching IOException isn't enough.
    // RPC call can throw ConnectionException.
    throw new YarnException(e);
  }
}
项目:hadoop    文件:TestInMemorySCMStore.java   
private Map<String, String> startStoreWithResources() throws Exception {
  Map<String, String> initialCachedResources = new HashMap<String, String>();
  int count = 10;
  for (int i = 0; i < count; i++) {
    String key = String.valueOf(i);
    String fileName = key + ".jar";
    initialCachedResources.put(key, fileName);
  }
  doReturn(new ArrayList<ApplicationId>()).when(checker)
      .getActiveApplications();
  doReturn(initialCachedResources).when(store).getInitialCachedResources(
      isA(FileSystem.class), isA(Configuration.class));
  this.store.init(new Configuration());
  this.store.start();
  return initialCachedResources;
}
项目:hadoop    文件:TestStagingCleanup.java   
@Test (timeout = 30000)
public void testNoDeletionofStagingOnReboot() throws IOException {
  conf.set(MRJobConfig.MAPREDUCE_JOB_DIR, stagingJobDir);
  fs = mock(FileSystem.class);
  when(fs.delete(any(Path.class),anyBoolean())).thenReturn(true);
  String user = UserGroupInformation.getCurrentUser().getShortUserName();
  Path stagingDir = MRApps.getStagingAreaDir(conf, user);
  when(fs.exists(stagingDir)).thenReturn(true);
  ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(),
      0);
  ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
  ContainerAllocator mockAlloc = mock(ContainerAllocator.class);
  Assert.assertTrue(MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS > 1);
  MRAppMaster appMaster = new TestMRApp(attemptId, mockAlloc,
      JobStateInternal.REBOOT, MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS);
  appMaster.init(conf);
  appMaster.start();
  //shutdown the job, not the lastRetry
  appMaster.shutDownJob();
  //test whether notifyIsLastAMRetry called
  Assert.assertEquals(false, ((TestMRApp)appMaster).getTestIsLastAMRetry());
  verify(fs, times(0)).delete(stagingJobPath, true);
}
项目:hadoop    文件:AllApplicationsPage.java   
@Override
protected void render(Block html) {

  TBODY<TABLE<BODY<Hamlet>>> tableBody =
    html
      .body()
        .table("#applications")
          .thead()
            .tr()
              .td()._("ApplicationId")._()
              .td()._("ApplicationState")._()
            ._()
           ._()
           .tbody();
  for (Entry<ApplicationId, Application> entry : this.nmContext
      .getApplications().entrySet()) {
    AppInfo info = new AppInfo(entry.getValue());
    tableBody
      .tr()
        .td().a(url("application", info.getId()), info.getId())._()
        .td()._(info.getState())
        ._()
      ._();
  }
  tableBody._()._()._();
}
项目:hadoop    文件:TestSubmitApplicationWithRMHA.java   
@Test
public void
    testHandleRMHABeforeSubmitApplicationCallWithSavedApplicationState()
        throws Exception {
  // start two RMs, and transit rm1 to active, rm2 to standby
  startRMs();

  // get a new applicationId from rm1
  ApplicationId appId = rm1.getNewAppId().getApplicationId();

  // Do the failover
  explicitFailover();

  // submit the application with previous assigned applicationId
  // to current active rm: rm2
  RMApp app1 =
      rm2.submitApp(200, "", UserGroupInformation
          .getCurrentUser().getShortUserName(), null, false, null,
          configuration.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
              YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS), null, null,
          false, false, true, appId);

  // verify application submission
  verifySubmitApp(rm2, app1, appId);
}
项目:hadoop    文件:TestApplicationHistoryClientService.java   
@Test
public void testContainerReport() throws IOException, YarnException {
  ApplicationId appId = ApplicationId.newInstance(0, 1);
  ApplicationAttemptId appAttemptId =
      ApplicationAttemptId.newInstance(appId, 1);
  ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1);
  GetContainerReportRequest request =
      GetContainerReportRequest.newInstance(containerId);
  GetContainerReportResponse response =
      clientService.getContainerReport(request);
  ContainerReport container = response.getContainerReport();
  Assert.assertNotNull(container);
  Assert.assertEquals(containerId, container.getContainerId());
  Assert.assertEquals("http://0.0.0.0:8188/applicationhistory/logs/" +
      "test host:100/container_0_0001_01_000001/" +
      "container_0_0001_01_000001/user1", container.getLogUrl());
}
项目:scheduling-connector-for-hadoop    文件:SlurmApplicationClient.java   
@SuppressWarnings("deprecation")
@Override
public void submitApplication(ApplicationSubmissionContext context)
    throws IOException {
  int waitingTime = conf.getInt(
      HPCConfiguration.YARN_APPLICATION_HPC_CLIENT_RS_MAX_WAIT_MS,
      HPCConfiguration.DEFAULT_YARN_APPLICATION_HPC_CLIENT_RS_MAX_WAIT_MS);
  int noOfTimes = conf.getInt(
      HPCConfiguration.YARN_APPLICATION_HPC_CLIENT_RS_RETRIES_MAX,
      HPCConfiguration.DEFAULT_YARN_APPLICATION_HPC_CLIENT_RS_RETRIES_MAX);
  ApplicationId applicationId = context.getApplicationId();
  String nodeName = checkAndWaitForResourcesToBeAvailable(
      applicationId, waitingTime, noOfTimes);

  HPCCommandExecutor.launchContainer(
        context.getAMContainerSpec(),
      ContainerId.newInstance(
          ApplicationAttemptId.newInstance(applicationId, 1), 1).toString(),
      context.getApplicationName(), conf, applicationId.getId(), nodeName);
  HPCCommandExecutor.setJobState(applicationId.getId(), "running::0", conf);
}
项目:hadoop    文件:TestJobImpl.java   
private boolean testUberDecision(Configuration conf) {
  JobID jobID = JobID.forName("job_1234567890000_0001");
  JobId jobId = TypeConverter.toYarn(jobID);
  MRAppMetrics mrAppMetrics = MRAppMetrics.create();
  JobImpl job =
      new JobImpl(jobId, ApplicationAttemptId.newInstance(
        ApplicationId.newInstance(0, 0), 0), conf, mock(EventHandler.class),
        null, new JobTokenSecretManager(), new Credentials(), null, null,
        mrAppMetrics, null, true, null, 0, null, null, null, null);
  InitTransition initTransition = getInitTransition(2);
  JobEvent mockJobEvent = mock(JobEvent.class);
  initTransition.transition(job, mockJobEvent);
  boolean isUber = job.isUber();
  return isUber;
}
项目:hadoop    文件:ConverterUtils.java   
public static ApplicationId toApplicationId(
    String appIdStr) {
  Iterator<String> it = _split(appIdStr).iterator();
  if (!it.next().equals(APPLICATION_PREFIX)) {
    throw new IllegalArgumentException("Invalid ApplicationId prefix: "
        + appIdStr + ". The valid ApplicationId should start with prefix "
        + APPLICATION_PREFIX);
  }
  try {
    return toApplicationId(it);
  } catch (NumberFormatException n) {
    throw new IllegalArgumentException("Invalid ApplicationId: "
        + appIdStr, n);
  } catch (NoSuchElementException e) {
    throw new IllegalArgumentException("Invalid ApplicationId: "
        + appIdStr, e);
  }
}
项目:scheduling-connector-for-hadoop    文件:PBSApplicationClient.java   
@Override
public NewApplicationResponse getNewApplication() throws IOException {
  NewApplicationResponse response = new NewApplicationResponse();

  int priority = conf.getInt(YARN_APPLICATION_HPC_AM_PRIORITY,
      DEFAULT_YARN_APPLICATION_HPC_AM_PRIORITY);
  int amMemory = conf.getInt(YARN_APPLICATION_HPC_AM_RESOURCE_MEMORY_MB,
      DEFAULT_YARN_APPLICATION_HPC_AM_RESOURCE_MEMORY_MB);
  int cpus = conf.getInt(YARN_APPLICATION_HPC_AM_RESOURCE_CPU_VCORES,
      DEFAULT_YARN_APPLICATION_HPC_AM_RESOURCE_CPU_VCORES);

  SocketWrapper socket = SocketFactory.createSocket();
  String hostName = socket.getHostName();
  int port = socket.getPort();
  int jobid = PBSCommandExecutor.submitAndGetPBSJobId(conf, priority,
      amMemory, cpus, hostName, port);
  SocketCache.addSocket(jobid, socket);

  ApplicationId applicationId = ApplicationId.newInstance(
      getClusterTimestamp(), jobid);
  response.setApplicationId(applicationId);
  response.setMaxCapability(getMaxCapability());
  response.setMinCapability(getMinCapability());
  return response;
}
项目:hadoop    文件:BaseContainerManagerTest.java   
static void waitForApplicationState(ContainerManagerImpl containerManager,
    ApplicationId appID, ApplicationState finalState)
    throws InterruptedException {
  // Wait for app-finish
  Application app =
      containerManager.getContext().getApplications().get(appID);
  int timeout = 0;
  while (!(app.getApplicationState().equals(finalState))
      && timeout++ < 15) {
    LOG.info("Waiting for app to reach " + finalState
        + ".. Current state is "
        + app.getApplicationState());
    Thread.sleep(1000);
  }

  Assert.assertTrue("App is not in " + finalState + " yet!! Timedout!!",
      app.getApplicationState().equals(finalState));
}
项目:hadoop    文件:RMWebServices.java   
@GET
@Path("/apps/{appid}")
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
public AppInfo getApp(@Context HttpServletRequest hsr,
    @PathParam("appid") String appId) {
  init();
  if (appId == null || appId.isEmpty()) {
    throw new NotFoundException("appId, " + appId + ", is empty or null");
  }
  ApplicationId id;
  id = ConverterUtils.toApplicationId(recordFactory, appId);
  if (id == null) {
    throw new NotFoundException("appId is null");
  }
  RMApp app = rm.getRMContext().getRMApps().get(id);
  if (app == null) {
    throw new NotFoundException("app with id: " + appId + " not found");
  }
  return new AppInfo(rm, app, hasAccess(app, hsr), hsr.getScheme() + "://");
}
项目:hadoop    文件:ActiveUsersManager.java   
/**
 * An application has no more outstanding requests.
 * 
 * @param user application user 
 * @param applicationId deactivated application
 */
@Lock({Queue.class, SchedulerApplicationAttempt.class})
synchronized public void deactivateApplication(
    String user, ApplicationId applicationId) {
  Set<ApplicationId> userApps = usersApplications.get(user);
  if (userApps != null) {
    if (userApps.remove(applicationId)) {
      metrics.deactivateApp(user);
    }
    if (userApps.isEmpty()) {
      usersApplications.remove(user);
      --activeUsers;
      metrics.decrActiveUsers();
      LOG.debug("User " + user + " removed from activeUsers, currently: " + 
          activeUsers);
    }
  }
}
项目:hadoop    文件:MemoryApplicationHistoryStore.java   
@Override
public Map<ApplicationAttemptId, ApplicationAttemptHistoryData>
    getApplicationAttempts(ApplicationId appId) {
  ConcurrentMap<ApplicationAttemptId, ApplicationAttemptHistoryData> subMap =
      applicationAttemptData.get(appId);
  if (subMap == null) {
    return Collections
      .<ApplicationAttemptId, ApplicationAttemptHistoryData> emptyMap();
  } else {
    return new HashMap<ApplicationAttemptId, ApplicationAttemptHistoryData>(
      subMap);
  }
}
项目:hadoop    文件:RMAppManager.java   
protected void recoverApplication(ApplicationStateData appState,
    RMState rmState) throws Exception {
  ApplicationSubmissionContext appContext =
      appState.getApplicationSubmissionContext();
  ApplicationId appId = appContext.getApplicationId();

  // create and recover app.
  RMAppImpl application =
      createAndPopulateNewRMApp(appContext, appState.getSubmitTime(),
          appState.getUser(), true);

  application.handle(new RMAppRecoverEvent(appId, rmState));
}
项目:hadoop    文件:TestFifoScheduler.java   
@Test
public void testNodeUpdateBeforeAppAttemptInit() throws Exception {
  FifoScheduler scheduler = new FifoScheduler();
  MockRM rm = new MockRM(conf);
  scheduler.setRMContext(rm.getRMContext());
  scheduler.init(conf);
  scheduler.start();
  scheduler.reinitialize(conf, rm.getRMContext());

  RMNode node = MockNodes.newNodeInfo(1,
          Resources.createResource(1024, 4, 4), 1, "127.0.0.1");
  scheduler.handle(new NodeAddedSchedulerEvent(node));

  ApplicationId appId = ApplicationId.newInstance(0, 1);
  scheduler.addApplication(appId, "queue1", "user1", false);

  NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node);
  try {
    scheduler.handle(updateEvent);
  } catch (NullPointerException e) {
      Assert.fail();
  }

  ApplicationAttemptId attId = ApplicationAttemptId.newInstance(appId, 1);
  scheduler.addApplicationAttempt(attId, false, false);

  rm.stop();
}
项目:hadoop    文件:TestProportionalCapacityPreemptionPolicy.java   
FiCaSchedulerApp mockApp(int qid, int id, int used, int pending, int reserved,
    int gran) {
  FiCaSchedulerApp app = mock(FiCaSchedulerApp.class);

  ApplicationId appId = ApplicationId.newInstance(TS, id);
  ApplicationAttemptId appAttId = ApplicationAttemptId.newInstance(appId, 0);
  when(app.getApplicationId()).thenReturn(appId);
  when(app.getApplicationAttemptId()).thenReturn(appAttId);

  int cAlloc = 0;
  Resource unit = Resource.newInstance(gran, 0, 0);
  List<RMContainer> cReserved = new ArrayList<RMContainer>();
  for (int i = 0; i < reserved; i += gran) {
    cReserved.add(mockContainer(appAttId, cAlloc, unit, priority.CONTAINER
        .getValue()));
    ++cAlloc;
  }
  when(app.getReservedContainers()).thenReturn(cReserved);

  List<RMContainer> cLive = new ArrayList<RMContainer>();
  for (int i = 0; i < used; i += gran) {
    if(setAMContainer && i == 0){
      cLive.add(mockContainer(appAttId, cAlloc, unit, priority.AMCONTAINER
          .getValue()));
    }else if(setLabeledContainer && i ==1){
      cLive.add(mockContainer(appAttId, cAlloc, unit,
          priority.LABELEDCONTAINER.getValue()));
      ++used;
    }
    else{
      cLive.add(mockContainer(appAttId, cAlloc, unit, priority.CONTAINER
          .getValue()));
    }
    ++cAlloc;
  }
  when(app.getLiveContainers()).thenReturn(cLive);
  return app;
}
项目:hadoop    文件:TestApplicationACLsManager.java   
@Test
public void testCheckAccessWithNullACLS() {
  Configuration conf = new Configuration();
  conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE,
      true);
  conf.set(YarnConfiguration.YARN_ADMIN_ACL,
      ADMIN_USER);
  ApplicationACLsManager aclManager = new ApplicationACLsManager(conf);
  UserGroupInformation appOwner = UserGroupInformation
      .createRemoteUser(APP_OWNER);
  ApplicationId appId = ApplicationId.newInstance(1, 1);
  //Application ACL is not added

  //Application Owner should have all access even if Application ACL is not added
  assertTrue(aclManager.checkAccess(appOwner, ApplicationAccessType.MODIFY_APP, 
      APP_OWNER, appId));
  assertTrue(aclManager.checkAccess(appOwner, ApplicationAccessType.VIEW_APP, 
      APP_OWNER, appId));

  //Admin should have all access
  UserGroupInformation adminUser = UserGroupInformation
      .createRemoteUser(ADMIN_USER);
  assertTrue(aclManager.checkAccess(adminUser, ApplicationAccessType.VIEW_APP, 
      APP_OWNER, appId));
  assertTrue(aclManager.checkAccess(adminUser, ApplicationAccessType.MODIFY_APP, 
      APP_OWNER, appId));

  // A regular user should Not have access
  UserGroupInformation testUser1 = UserGroupInformation
      .createRemoteUser(TESTUSER1);
  assertFalse(aclManager.checkAccess(testUser1, ApplicationAccessType.VIEW_APP, 
      APP_OWNER, appId));
  assertFalse(aclManager.checkAccess(testUser1, ApplicationAccessType.MODIFY_APP, 
      APP_OWNER, appId));
}
项目:hadoop    文件:ParentQueue.java   
@Override
public void submitApplication(ApplicationId applicationId, String user,
    String queue) throws AccessControlException {

  synchronized (this) {
    // Sanity check
    if (queue.equals(queueName)) {
      throw new AccessControlException("Cannot submit application " +
          "to non-leaf queue: " + queueName);
    }

    if (state != QueueState.RUNNING) {
      throw new AccessControlException("Queue " + getQueuePath() +
          " is STOPPED. Cannot accept submission of application: " +
          applicationId);
    }

    addApplication(applicationId, user);
  }

  // Inform the parent queue
  if (parent != null) {
    try {
      parent.submitApplication(applicationId, user, queue);
    } catch (AccessControlException ace) {
      LOG.info("Failed to submit application to parent-queue: " + 
          parent.getQueuePath(), ace);
      removeApplication(applicationId, user);
      throw ace;
    }
  }
}
项目:hadoop    文件:UnmanagedAMLauncher.java   
private ApplicationAttemptReport monitorCurrentAppAttempt(
    ApplicationId appId, YarnApplicationAttemptState attemptState)
    throws YarnException, IOException {
  long startTime = System.currentTimeMillis();
  ApplicationAttemptId attemptId = null;
  while (true) {
    if (attemptId == null) {
      attemptId =
          rmClient.getApplicationReport(appId)
            .getCurrentApplicationAttemptId();
    }
    ApplicationAttemptReport attemptReport = null;
    if (attemptId != null) {
      attemptReport = rmClient.getApplicationAttemptReport(attemptId);
      if (attemptState.equals(attemptReport.getYarnApplicationAttemptState())) {
        return attemptReport;
      }
    }
    LOG.info("Current attempt state of " + appId + " is " + (attemptReport == null
          ? " N/A " : attemptReport.getYarnApplicationAttemptState())
              + ", waiting for current attempt to reach " + attemptState);
    try {
      Thread.sleep(1000);
    } catch (InterruptedException e) {
      LOG.warn("Interrupted while waiting for current attempt of " + appId
          + " to reach " + attemptState);
    }
    if (System.currentTimeMillis() - startTime > AM_STATE_WAIT_TIMEOUT_MS) {
      String errmsg =
          "Timeout for waiting current attempt of " + appId + " to reach "
              + attemptState;
      LOG.error(errmsg);
      throw new RuntimeException(errmsg);
    }
  }
}
项目:hadoop    文件:NMMemoryStateStoreService.java   
@Override
public synchronized void finishResourceLocalization(String user,
    ApplicationId appId, LocalizedResourceProto proto) {
  TrackerState ts = getTrackerState(new TrackerKey(user, appId));
  Path localPath = new Path(proto.getLocalPath());
  ts.inProgressMap.remove(localPath);
  ts.localizedResources.put(localPath, proto);
}
项目:hadoop    文件:TestClientRMService.java   
@Test (expected = ApplicationNotFoundException.class)
public void testMoveAbsentApplication() throws YarnException {
  RMContext rmContext = mock(RMContext.class);
  when(rmContext.getRMApps()).thenReturn(
      new ConcurrentHashMap<ApplicationId, RMApp>());
  ClientRMService rmService = new ClientRMService(rmContext, null, null,
      null, null, null);
  ApplicationId applicationId =
      BuilderUtils.newApplicationId(System.currentTimeMillis(), 0);
  MoveApplicationAcrossQueuesRequest request =
      MoveApplicationAcrossQueuesRequest.newInstance(applicationId, "newqueue");
  rmService.moveApplicationAcrossQueues(request);
}
项目:hadoop    文件:TestApplicationHistoryManagerOnTimelineStore.java   
@Test
public void testGetAMContainer() throws Exception {
  final ApplicationAttemptId appAttemptId =
      ApplicationAttemptId.newInstance(ApplicationId.newInstance(0, 1), 1);
  ContainerReport container;
  if (callerUGI == null) {
    container = historyManager.getAMContainer(appAttemptId);
  } else {
    try {
      container =
          callerUGI.doAs(new PrivilegedExceptionAction<ContainerReport> () {
        @Override
        public ContainerReport run() throws Exception {
          return historyManager.getAMContainer(appAttemptId);
        }
      });
      if (callerUGI != null && callerUGI.getShortUserName().equals("user3")) {
        // The exception is expected
        Assert.fail();
      }
    } catch (AuthorizationException e) {
      if (callerUGI != null && callerUGI.getShortUserName().equals("user3")) {
        // The exception is expected
        return;
      }
      throw e;
    }
  }
  Assert.assertNotNull(container);
  Assert.assertEquals(appAttemptId, container.getContainerId()
      .getApplicationAttemptId());
}
项目:hadoop    文件:LogHandlerAppStartedEvent.java   
public LogHandlerAppStartedEvent(ApplicationId appId, String user,
    Credentials credentials, ContainerLogsRetentionPolicy retentionPolicy,
    Map<ApplicationAccessType, String> appAcls,
    LogAggregationContext logAggregationContext) {
  super(LogHandlerEventType.APPLICATION_STARTED);
  this.applicationId = appId;
  this.user = user;
  this.credentials = credentials;
  this.retentionPolicy = retentionPolicy;
  this.appAcls = appAcls;
  this.logAggregationContext = logAggregationContext;
}
项目:hadoop    文件:MockRM.java   
public ApplicationReport getApplicationReport(ApplicationId appId)
    throws YarnException, IOException {
  ApplicationClientProtocol client = getClientRMService();
  GetApplicationReportResponse response =
      client.getApplicationReport(GetApplicationReportRequest
          .newInstance(appId));
  return response.getApplicationReport();
}
项目:hadoop    文件:BuilderUtils.java   
public static ContainerId newContainerId(int appId, int appAttemptId,
    long timestamp, long id) {
  ApplicationId applicationId = newApplicationId(timestamp, appId);
  ApplicationAttemptId applicationAttemptId = newApplicationAttemptId(
      applicationId, appAttemptId);
  ContainerId cId = newContainerId(applicationAttemptId, id);
  return cId;
}
项目:hadoop    文件:TestYarnClient.java   
@Test(timeout = 10000)
public void testGetContainerReport() throws YarnException, IOException {
  Configuration conf = new Configuration();
  conf.setBoolean(YarnConfiguration.APPLICATION_HISTORY_ENABLED,
      true);
  final YarnClient client = new MockYarnClient();
  client.init(conf);
  client.start();

  List<ApplicationReport> expectedReports = ((MockYarnClient) client)
      .getReports();

  ApplicationId applicationId = ApplicationId.newInstance(1234, 5);
  ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(
      applicationId, 1);
  ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1);
  ContainerReport report = client.getContainerReport(containerId);
  Assert.assertNotNull(report);
  Assert.assertEquals(report.getContainerId().toString(),
      (ContainerId.newContainerId(expectedReports.get(0)
          .getCurrentApplicationAttemptId(), 1)).toString());
  containerId = ContainerId.newContainerId(appAttemptId, 3);
  report = client.getContainerReport(containerId);
  Assert.assertNotNull(report);
  Assert.assertEquals(report.getContainerId().toString(),
      (ContainerId.newContainerId(expectedReports.get(0)
          .getCurrentApplicationAttemptId(), 3)).toString());
  client.stop();
}
项目:spydra    文件:HistoryLogUtils.java   
/**
 * Dumps the full job logs for a particular application to stdout
 *
 * @param applicationId application to dump logs for
 */
public static void dumpFullLogs(Configuration cfg, ApplicationId applicationId) {
  LogCLIHelpers logCLIHelpers = new LogCLIHelpers();
  // TODO: Add the proper base dir settings etc...

  logCLIHelpers.setConf(cfg);
  try {
    logCLIHelpers.dumpAllContainersLogs(applicationId, cfg.get(SPYDRA_HISTORY_USERNAME_PROPERTY), System.out);
  } catch (IOException e) {
    logger.error("Failed dumping log files for application " + applicationId.toString(), e);
  }
}
项目:hadoop    文件:NodeHeartbeatResponsePBImpl.java   
private void initSystemCredentials() {
  NodeHeartbeatResponseProtoOrBuilder p = viaProto ? proto : builder;
  List<SystemCredentialsForAppsProto> list = p.getSystemCredentialsForAppsList();
  this.systemCredentials = new HashMap<ApplicationId, ByteBuffer> ();
  for (SystemCredentialsForAppsProto c : list) {
    ApplicationId appId = convertFromProtoFormat(c.getAppId());
    ByteBuffer byteBuffer = ProtoUtils.convertFromProtoFormat(c.getCredentialsForApp());
    this.systemCredentials.put(appId, byteBuffer);
  }
}
项目:hadoop    文件:TestRecovery.java   
private MapTaskImpl getMockMapTask(long clusterTimestamp, EventHandler eh) {

    ApplicationId appId = ApplicationId.newInstance(clusterTimestamp, 1);
    JobId jobId = MRBuilderUtils.newJobId(appId, 1);

    int partitions = 2;

    Path remoteJobConfFile = mock(Path.class);
    JobConf conf = new JobConf();
    TaskAttemptListener taskAttemptListener = mock(TaskAttemptListener.class);
    Token<JobTokenIdentifier> jobToken =
        (Token<JobTokenIdentifier>) mock(Token.class);
    Credentials credentials = null;
    Clock clock = new SystemClock();
    int appAttemptId = 3;
    MRAppMetrics metrics = mock(MRAppMetrics.class);
    Resource minContainerRequirements = mock(Resource.class);
    when(minContainerRequirements.getMemory()).thenReturn(1000);

    ClusterInfo clusterInfo = mock(ClusterInfo.class);
    AppContext appContext = mock(AppContext.class);
    when(appContext.getClusterInfo()).thenReturn(clusterInfo);

    TaskSplitMetaInfo taskSplitMetaInfo = mock(TaskSplitMetaInfo.class);
    MapTaskImpl mapTask = new MapTaskImpl(jobId, partitions,
        eh, remoteJobConfFile, conf,
        taskSplitMetaInfo, taskAttemptListener, jobToken, credentials, clock,
        appAttemptId, metrics, appContext);
    return mapTask;
  }
项目:hadoop    文件:RegisterNodeManagerRequestPBImpl.java   
private void initRunningApplications() {
  if (this.runningApplications != null) {
    return;
  }
  RegisterNodeManagerRequestProtoOrBuilder p = viaProto ? proto : builder;
  List<ApplicationIdProto> list = p.getRunningApplicationsList();
  this.runningApplications = new ArrayList<ApplicationId>();
  for (ApplicationIdProto c : list) {
    this.runningApplications.add(convertFromProtoFormat(c));
  }
}
项目:hadoop    文件:ConverterUtils.java   
private static ApplicationAttemptId toApplicationAttemptId(
    Iterator<String> it) throws NumberFormatException {
  ApplicationId appId = ApplicationId.newInstance(Long.parseLong(it.next()),
      Integer.parseInt(it.next()));
  ApplicationAttemptId appAttemptId =
      ApplicationAttemptId.newInstance(appId, Integer.parseInt(it.next()));
  return appAttemptId;
}
项目:hadoop    文件:TestYarnCLI.java   
@Test
public void testGetContainerReport() throws Exception {
  ApplicationCLI cli = createAndGetAppCLI();
  ApplicationId applicationId = ApplicationId.newInstance(1234, 5);
  ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(
      applicationId, 1);
  ContainerId containerId = ContainerId.newContainerId(attemptId, 1);
  ContainerReport container = ContainerReport.newInstance(containerId, null,
      NodeId.newInstance("host", 1234), Priority.UNDEFINED, 1234, 5678,
      "diagnosticInfo", "logURL", 0, ContainerState.COMPLETE,
      "http://" + NodeId.newInstance("host", 2345).toString());
  when(client.getContainerReport(any(ContainerId.class))).thenReturn(
      container);
  int result = cli.run(new String[] { "container", "-status",
      containerId.toString() });
  assertEquals(0, result);
  verify(client).getContainerReport(containerId);
  ByteArrayOutputStream baos = new ByteArrayOutputStream();
  PrintWriter pw = new PrintWriter(baos);
  pw.println("Container Report : ");
  pw.println("\tContainer-Id : container_1234_0005_01_000001");
  pw.println("\tStart-Time : 1234");
  pw.println("\tFinish-Time : 5678");
  pw.println("\tState : COMPLETE");
  pw.println("\tLOG-URL : logURL");
  pw.println("\tHost : host:1234");
  pw.println("\tNodeHttpAddress : http://host:2345");
  pw.println("\tDiagnostics : diagnosticInfo");
  pw.close();
  String appReportStr = baos.toString("UTF-8");
  Assert.assertEquals(appReportStr, sysOutStream.toString());
  verify(sysOut, times(1)).println(isA(String.class));
}
项目:hadoop    文件:ApplicationImpl.java   
public ApplicationImpl(Dispatcher dispatcher, String user, ApplicationId appId,
    Credentials credentials, Context context) {
  this.dispatcher = dispatcher;
  this.user = user;
  this.appId = appId;
  this.credentials = credentials;
  this.aclsManager = context.getApplicationACLsManager();
  this.context = context;
  ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
  readLock = lock.readLock();
  writeLock = lock.writeLock();
  stateMachine = stateMachineFactory.make(this);
}
项目:hadoop    文件:SystemMetricsPublisher.java   
private static TimelineEntity createApplicationEntity(
    ApplicationId applicationId) {
  TimelineEntity entity = new TimelineEntity();
  entity.setEntityType(ApplicationMetricsConstants.ENTITY_TYPE);
  entity.setEntityId(applicationId.toString());
  return entity;
}
项目:hadoop    文件:TestLogsCLI.java   
@Test(timeout = 5000l)
public void testUnknownApplicationId() throws Exception {
  Configuration conf = new YarnConfiguration();
  YarnClient mockYarnClient = createMockYarnClientUnknownApp();
  LogsCLI cli = new LogsCLIForTest(mockYarnClient);
  cli.setConf(conf);

  int exitCode = cli.run(new String[] { "-applicationId",
      ApplicationId.newInstance(1, 1).toString() });

  // Error since no logs present for the app.
  assertTrue(exitCode != 0);
  assertTrue(sysErrStream.toString().startsWith(
      "Unable to get ApplicationState"));
}