Java 类org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext 实例源码

项目:AthenaX    文件:AthenaXYarnClusterDescriptor.java   
@Override
public YarnClusterClient deploy() {
  ApplicationSubmissionContext context = Records.newRecord(ApplicationSubmissionContext.class);
  context.setApplicationId(job.yarnAppId());
  ApplicationReport report;
  try {
    report = startAppMaster(context);

    Configuration conf = getFlinkConfiguration();
    conf.setString(JobManagerOptions.ADDRESS.key(), report.getHost());
    conf.setInteger(JobManagerOptions.PORT.key(), report.getRpcPort());

    return createYarnClusterClient(this, yarnClient, report, conf, false);
  } catch (Exception e) {
    throw new RuntimeException(e);
  }
}
项目:big_data    文件:YARNRunner.java   
@Override
public JobStatus submitJob(JobID jobId, String jobSubmitDir, Credentials ts)
        throws IOException, InterruptedException {

    addHistoryToken(ts);

    // Construct necessary information to start the MR AM
    ApplicationSubmissionContext appContext = createApplicationSubmissionContext(conf, jobSubmitDir, ts);

    // Submit to ResourceManager
    try {
        ApplicationId applicationId = resMgrDelegate.submitApplication(appContext);

        ApplicationReport appMaster = resMgrDelegate.getApplicationReport(applicationId);
        String diagnostics = (appMaster == null ? "application report is null" : appMaster.getDiagnostics());
        if (appMaster == null || appMaster.getYarnApplicationState() == YarnApplicationState.FAILED
                || appMaster.getYarnApplicationState() == YarnApplicationState.KILLED) {
            throw new IOException("Failed to run job : " + diagnostics);
        }
        return clientCache.getClient(jobId).getJobStatus(jobId);
    } catch (YarnException e) {
        throw new IOException(e);
    }
}
项目:hadoop    文件:BuilderUtils.java   
public static ApplicationSubmissionContext newApplicationSubmissionContext(
    ApplicationId applicationId, String applicationName, String queue,
    Priority priority, ContainerLaunchContext amContainer,
    boolean isUnmanagedAM, boolean cancelTokensWhenComplete,
    int maxAppAttempts, Resource resource, String applicationType) {
  ApplicationSubmissionContext context =
      recordFactory.newRecordInstance(ApplicationSubmissionContext.class);
  context.setApplicationId(applicationId);
  context.setApplicationName(applicationName);
  context.setQueue(queue);
  context.setPriority(priority);
  context.setAMContainerSpec(amContainer);
  context.setUnmanagedAM(isUnmanagedAM);
  context.setCancelTokensWhenComplete(cancelTokensWhenComplete);
  context.setMaxAppAttempts(maxAppAttempts);
  context.setResource(resource);
  context.setApplicationType(applicationType);
  return context;
}
项目:hadoop    文件:AMLauncher.java   
private ContainerLaunchContext createAMContainerLaunchContext(
    ApplicationSubmissionContext applicationMasterContext,
    ContainerId containerID) throws IOException {

  // Construct the actual Container
  ContainerLaunchContext container = 
      applicationMasterContext.getAMContainerSpec();
  LOG.info("Command to launch container "
      + containerID
      + " : "
      + StringUtils.arrayToString(container.getCommands().toArray(
          new String[0])));

  // Finalize the container
  setupTokens(container, containerID);

  return container;
}
项目:hadoop    文件:RMAppAttemptImpl.java   
public RMAppAttemptImpl(ApplicationAttemptId appAttemptId,
    RMContext rmContext, YarnScheduler scheduler,
    ApplicationMasterService masterService,
    ApplicationSubmissionContext submissionContext,
    Configuration conf, boolean maybeLastAttempt, ResourceRequest amReq) {
  this.conf = conf;
  this.applicationAttemptId = appAttemptId;
  this.rmContext = rmContext;
  this.eventHandler = rmContext.getDispatcher().getEventHandler();
  this.submissionContext = submissionContext;
  this.scheduler = scheduler;
  this.masterService = masterService;

  ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
  this.readLock = lock.readLock();
  this.writeLock = lock.writeLock();

  this.proxiedTrackingUrl = generateProxyUriWithScheme();
  this.maybeLastAttempt = maybeLastAttempt;
  this.stateMachine = stateMachineFactory.make(this);

  this.attemptMetrics =
      new RMAppAttemptMetrics(applicationAttemptId, rmContext);

  this.amReq = amReq;
}
项目:hadoop    文件:SchedulerApplicationAttempt.java   
public SchedulerApplicationAttempt(ApplicationAttemptId applicationAttemptId, 
    String user, Queue queue, ActiveUsersManager activeUsersManager,
    RMContext rmContext) {
  Preconditions.checkNotNull(rmContext, "RMContext should not be null");
  this.rmContext = rmContext;
  this.appSchedulingInfo = 
      new AppSchedulingInfo(applicationAttemptId, user, queue,  
          activeUsersManager, rmContext.getEpoch());
  this.queue = queue;
  this.pendingRelease = new HashSet<ContainerId>();
  this.attemptId = applicationAttemptId;
  if (rmContext.getRMApps() != null &&
      rmContext.getRMApps()
          .containsKey(applicationAttemptId.getApplicationId())) {
    ApplicationSubmissionContext appSubmissionContext =
        rmContext.getRMApps().get(applicationAttemptId.getApplicationId())
            .getApplicationSubmissionContext();
    if (appSubmissionContext != null) {
      unmanagedAM = appSubmissionContext.getUnmanagedAM();
      this.logAggregationContext =
          appSubmissionContext.getLogAggregationContext();
    }
  }
}
项目:hadoop    文件:TestAppManager.java   
@SuppressWarnings("deprecation")
@Before
public void setUp() {
  long now = System.currentTimeMillis();

  rmContext = mockRMContext(1, now - 10);
  ResourceScheduler scheduler = mockResourceScheduler();
  Configuration conf = new Configuration();
  ApplicationMasterService masterService =
      new ApplicationMasterService(rmContext, scheduler);
  appMonitor = new TestRMAppManager(rmContext,
      new ClientToAMTokenSecretManagerInRM(), scheduler, masterService,
      new ApplicationACLsManager(conf), conf);

  appId = MockApps.newAppID(1);
  RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
  asContext =
      recordFactory.newRecordInstance(ApplicationSubmissionContext.class);
  asContext.setApplicationId(appId);
  asContext.setAMContainerSpec(mockContainerLaunchContext(recordFactory));
  asContext.setResource(mockResource());
  setupDispatcher(rmContext, conf);
}
项目:hadoop    文件:TestRMAppTransitions.java   
protected RMApp testCreateAppSubmittedRecovery(
    ApplicationSubmissionContext submissionContext) throws IOException {
  RMApp application = createNewTestApp(submissionContext);
  // NEW => SUBMITTED event RMAppEventType.RECOVER
  RMState state = new RMState();
  ApplicationStateData appState =
      ApplicationStateData.newInstance(123, 123, null, "user");
  state.getApplicationState().put(application.getApplicationId(), appState);
  RMAppEvent event =
      new RMAppRecoverEvent(application.getApplicationId(), state);


  application.handle(event);
  assertStartTimeSet(application);
  assertAppState(RMAppState.SUBMITTED, application);
  return application;
}
项目:hadoop    文件:TestRMAppTransitions.java   
protected RMApp testCreateAppFinished(
    ApplicationSubmissionContext submissionContext,
    String diagnostics) throws IOException {
  // unmanaged AMs don't use the FINISHING state
  RMApp application = null;
  if (submissionContext != null && submissionContext.getUnmanagedAM()) {
    application = testCreateAppRunning(submissionContext);
  } else {
    application = testCreateAppFinishing(submissionContext);
  }
  // RUNNING/FINISHING => FINISHED event RMAppEventType.ATTEMPT_FINISHED
  RMAppEvent finishedEvent = new RMAppFinishedAttemptEvent(
      application.getApplicationId(), diagnostics);
  application.handle(finishedEvent);
  assertAppState(RMAppState.FINISHED, application);
  assertTimesAtFinish(application);
  // finished without a proper unregister implies failed
  assertFinalAppStatus(FinalApplicationStatus.FAILED, application);
  Assert.assertTrue("Finished app missing diagnostics",
      application.getDiagnostics().indexOf(diagnostics) != -1);
  return application;
}
项目:hadoop    文件:TestRMAppTransitions.java   
@Test (timeout = 30000)
public void testAppRecoverPath() throws IOException {
  LOG.info("--- START: testAppRecoverPath ---");
  ApplicationSubmissionContext sub =
      Records.newRecord(ApplicationSubmissionContext.class);
  ContainerLaunchContext clc =
      Records.newRecord(ContainerLaunchContext.class);
  Credentials credentials = new Credentials();
  DataOutputBuffer dob = new DataOutputBuffer();
  credentials.writeTokenStorageToStream(dob);
  ByteBuffer securityTokens =
      ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
  clc.setTokens(securityTokens);
  sub.setAMContainerSpec(clc);
  testCreateAppSubmittedRecovery(sub);
}
项目:hadoop    文件:RMHATestBase.java   
@Override
protected void submitApplication(
    ApplicationSubmissionContext submissionContext, long submitTime,
    String user) throws YarnException {
  //Do nothing, just add the application to RMContext
  RMAppImpl application =
      new RMAppImpl(submissionContext.getApplicationId(), this.rmContext,
          this.conf, submissionContext.getApplicationName(), user,
          submissionContext.getQueue(), submissionContext,
          this.rmContext.getScheduler(),
          this.rmContext.getApplicationMasterService(),
          submitTime, submissionContext.getApplicationType(),
          submissionContext.getApplicationTags(), null);
  this.rmContext.getRMApps().put(submissionContext.getApplicationId(),
      application);
  //Do not send RMAppEventType.START event
  //so the state of Application will not reach to NEW_SAVING state.
}
项目:hadoop    文件:TestClientRMService.java   
@Test
public void testGetApplicationResourceUsageReportDummy() throws YarnException,
    IOException {
  ApplicationAttemptId attemptId = getApplicationAttemptId(1);
  YarnScheduler yarnScheduler = mockYarnScheduler();
  RMContext rmContext = mock(RMContext.class);
  mockRMContext(yarnScheduler, rmContext);
  when(rmContext.getDispatcher().getEventHandler()).thenReturn(
      new EventHandler<Event>() {
        public void handle(Event event) {
        }
      });
  ApplicationSubmissionContext asContext = 
      mock(ApplicationSubmissionContext.class);
  YarnConfiguration config = new YarnConfiguration();
  RMAppAttemptImpl rmAppAttemptImpl = new RMAppAttemptImpl(attemptId,
      rmContext, yarnScheduler, null, asContext, config, false, null);
  ApplicationResourceUsageReport report = rmAppAttemptImpl
      .getApplicationResourceUsageReport();
  assertEquals(report, RMServerUtils.DUMMY_APPLICATION_RESOURCE_USAGE_REPORT);
}
项目:hadoop    文件:TestClientRMService.java   
@SuppressWarnings("deprecation")
private SubmitApplicationRequest mockSubmitAppRequest(ApplicationId appId,
      String name, String queue, Set<String> tags, boolean unmanaged) {

  ContainerLaunchContext amContainerSpec = mock(ContainerLaunchContext.class);

  Resource resource = Resources.createResource(
      YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB);

  ApplicationSubmissionContext submissionContext =
      recordFactory.newRecordInstance(ApplicationSubmissionContext.class);
  submissionContext.setAMContainerSpec(amContainerSpec);
  submissionContext.setApplicationName(name);
  submissionContext.setQueue(queue);
  submissionContext.setApplicationId(appId);
  submissionContext.setResource(resource);
  submissionContext.setApplicationType(appType);
  submissionContext.setApplicationTags(tags);
  submissionContext.setUnmanagedAM(unmanaged);

  SubmitApplicationRequest submitRequest =
      recordFactory.newRecordInstance(SubmitApplicationRequest.class);
  submitRequest.setApplicationSubmissionContext(submissionContext);
  return submitRequest;
}
项目:hadoop    文件:RMStateStoreTestBase.java   
protected RMApp storeApp(RMStateStore store, ApplicationId appId,
    long submitTime,
    long startTime) throws Exception {
  ApplicationSubmissionContext context =
      new ApplicationSubmissionContextPBImpl();
  context.setApplicationId(appId);

  RMApp mockApp = mock(RMApp.class);
  when(mockApp.getApplicationId()).thenReturn(appId);
  when(mockApp.getSubmitTime()).thenReturn(submitTime);
  when(mockApp.getStartTime()).thenReturn(startTime);
  when(mockApp.getApplicationSubmissionContext()).thenReturn(context);
  when(mockApp.getUser()).thenReturn("test");
  store.storeNewApplication(mockApp);
  return mockApp;
}
项目:hadoop    文件:TestApplicationClientProtocolOnHA.java   
@Test(timeout = 15000)
public void testSubmitApplicationOnHA() throws Exception {
  ApplicationSubmissionContext appContext =
      Records.newRecord(ApplicationSubmissionContext.class);
  appContext.setApplicationId(cluster.createFakeAppId());
  ContainerLaunchContext amContainer =
      Records.newRecord(ContainerLaunchContext.class);
  appContext.setAMContainerSpec(amContainer);
  Resource capability = Records.newRecord(Resource.class);
  capability.setMemory(10);
  capability.setVirtualCores(1);
  capability.setGpuCores(1);
  appContext.setResource(capability);
  ApplicationId appId = client.submitApplication(appContext);
  Assert.assertTrue(getActiveRM().getRMContext().getRMApps()
      .containsKey(appId));
}
项目:hadoop    文件:TestYARNRunner.java   
@Test(timeout=20000)
public void testJobSubmissionFailure() throws Exception {
  when(resourceMgrDelegate.submitApplication(any(ApplicationSubmissionContext.class))).
  thenReturn(appId);
  ApplicationReport report = mock(ApplicationReport.class);
  when(report.getApplicationId()).thenReturn(appId);
  when(report.getDiagnostics()).thenReturn(failString);
  when(report.getYarnApplicationState()).thenReturn(YarnApplicationState.FAILED);
  when(resourceMgrDelegate.getApplicationReport(appId)).thenReturn(report);
  Credentials credentials = new Credentials();
  File jobxml = new File(testWorkDir, "job.xml");
  OutputStream out = new FileOutputStream(jobxml);
  conf.writeXml(out);
  out.close();
  try {
    yarnRunner.submitJob(jobId, testWorkDir.getAbsolutePath().toString(), credentials);
  } catch(IOException io) {
    LOG.info("Logging exception:", io);
    assertTrue(io.getLocalizedMessage().contains(failString));
  }
}
项目:hadoop    文件:TestYARNRunner.java   
@Test(timeout=20000)
public void testAMProfiler() throws Exception {
  JobConf jobConf = new JobConf();

  jobConf.setBoolean(MRJobConfig.MR_AM_PROFILE, true);

  YARNRunner yarnRunner = new YARNRunner(jobConf);

  ApplicationSubmissionContext submissionContext =
      buildSubmitContext(yarnRunner, jobConf);

  ContainerLaunchContext containerSpec = submissionContext.getAMContainerSpec();
  List<String> commands = containerSpec.getCommands();

  for(String command : commands) {
    if (command != null) {
      if (command.contains(PROFILE_PARAMS)) {
        return;
      }
    }
  }
  throw new IllegalStateException("Profiler opts not found!");
}
项目:scheduling-connector-for-hadoop    文件:SlurmApplicationClient.java   
@SuppressWarnings("deprecation")
@Override
public void submitApplication(ApplicationSubmissionContext context)
    throws IOException {
  int waitingTime = conf.getInt(
      HPCConfiguration.YARN_APPLICATION_HPC_CLIENT_RS_MAX_WAIT_MS,
      HPCConfiguration.DEFAULT_YARN_APPLICATION_HPC_CLIENT_RS_MAX_WAIT_MS);
  int noOfTimes = conf.getInt(
      HPCConfiguration.YARN_APPLICATION_HPC_CLIENT_RS_RETRIES_MAX,
      HPCConfiguration.DEFAULT_YARN_APPLICATION_HPC_CLIENT_RS_RETRIES_MAX);
  ApplicationId applicationId = context.getApplicationId();
  String nodeName = checkAndWaitForResourcesToBeAvailable(
      applicationId, waitingTime, noOfTimes);

  HPCCommandExecutor.launchContainer(
        context.getAMContainerSpec(),
      ContainerId.newInstance(
          ApplicationAttemptId.newInstance(applicationId, 1), 1).toString(),
      context.getApplicationName(), conf, applicationId.getId(), nodeName);
  HPCCommandExecutor.setJobState(applicationId.getId(), "running::0", conf);
}
项目:aliyun-oss-hadoop-fs    文件:SystemMetricsPublisher.java   
@SuppressWarnings("unchecked")
public void appCreated(RMApp app, long createdTime) {
  if (publishSystemMetrics) {
    ApplicationSubmissionContext appSubmissionContext =
        app.getApplicationSubmissionContext();
    dispatcher.getEventHandler().handle(
        new ApplicationCreatedEvent(
            app.getApplicationId(),
            app.getName(),
            app.getApplicationType(),
            app.getUser(),
            app.getQueue(),
            app.getSubmitTime(),
            createdTime, app.getApplicationTags(),
            appSubmissionContext.getUnmanagedAM(),
            appSubmissionContext.getPriority(),
            app.getAppNodeLabelExpression(),
            app.getAmNodeLabelExpression(),
            app.getCallerContext()));
  }
}
项目:aliyun-oss-hadoop-fs    文件:AMLauncher.java   
private ContainerLaunchContext createAMContainerLaunchContext(
    ApplicationSubmissionContext applicationMasterContext,
    ContainerId containerID) throws IOException {

  // Construct the actual Container
  ContainerLaunchContext container =
      applicationMasterContext.getAMContainerSpec();
  LOG.info("Command to launch container "
      + containerID
      + " : "
      + StringUtils.arrayToString(container.getCommands().toArray(
          new String[0])));

  // Finalize the container
  setupTokens(container, containerID);

  return container;
}
项目:big-c    文件:TestYARNRunner.java   
@Test(timeout=20000)
public void testJobSubmissionFailure() throws Exception {
  when(resourceMgrDelegate.submitApplication(any(ApplicationSubmissionContext.class))).
  thenReturn(appId);
  ApplicationReport report = mock(ApplicationReport.class);
  when(report.getApplicationId()).thenReturn(appId);
  when(report.getDiagnostics()).thenReturn(failString);
  when(report.getYarnApplicationState()).thenReturn(YarnApplicationState.FAILED);
  when(resourceMgrDelegate.getApplicationReport(appId)).thenReturn(report);
  Credentials credentials = new Credentials();
  File jobxml = new File(testWorkDir, "job.xml");
  OutputStream out = new FileOutputStream(jobxml);
  conf.writeXml(out);
  out.close();
  try {
    yarnRunner.submitJob(jobId, testWorkDir.getAbsolutePath().toString(), credentials);
  } catch(IOException io) {
    LOG.info("Logging exception:", io);
    assertTrue(io.getLocalizedMessage().contains(failString));
  }
}
项目:aliyun-oss-hadoop-fs    文件:SchedulerApplicationAttempt.java   
public SchedulerApplicationAttempt(ApplicationAttemptId applicationAttemptId, 
    String user, Queue queue, ActiveUsersManager activeUsersManager,
    RMContext rmContext) {
  Preconditions.checkNotNull(rmContext, "RMContext should not be null");
  this.rmContext = rmContext;
  this.appSchedulingInfo = 
      new AppSchedulingInfo(applicationAttemptId, user, queue,  
          activeUsersManager, rmContext.getEpoch(), attemptResourceUsage);
  this.queue = queue;
  this.pendingRelease = new HashSet<ContainerId>();
  this.attemptId = applicationAttemptId;
  if (rmContext.getRMApps() != null &&
      rmContext.getRMApps()
          .containsKey(applicationAttemptId.getApplicationId())) {
    RMApp rmApp = rmContext.getRMApps().get(applicationAttemptId.getApplicationId());
    ApplicationSubmissionContext appSubmissionContext =
        rmApp
            .getApplicationSubmissionContext();
    appAttempt = rmApp.getCurrentAppAttempt();
    if (appSubmissionContext != null) {
      unmanagedAM = appSubmissionContext.getUnmanagedAM();
      this.logAggregationContext =
          appSubmissionContext.getLogAggregationContext();
    }
  }
}
项目:big-c    文件:TestApplicationClientProtocolOnHA.java   
@Test(timeout = 15000)
public void testSubmitApplicationOnHA() throws Exception {
  ApplicationSubmissionContext appContext =
      Records.newRecord(ApplicationSubmissionContext.class);
  appContext.setApplicationId(cluster.createFakeAppId());
  ContainerLaunchContext amContainer =
      Records.newRecord(ContainerLaunchContext.class);
  appContext.setAMContainerSpec(amContainer);
  Resource capability = Records.newRecord(Resource.class);
  capability.setMemory(10);
  capability.setVirtualCores(1);
  appContext.setResource(capability);
  ApplicationId appId = client.submitApplication(appContext);
  Assert.assertTrue(getActiveRM().getRMContext().getRMApps()
      .containsKey(appId));
}
项目:aliyun-oss-hadoop-fs    文件:TestAppManager.java   
@SuppressWarnings("deprecation")
@Before
public void setUp() {
  long now = System.currentTimeMillis();

  rmContext = mockRMContext(1, now - 10);
  ResourceScheduler scheduler = mockResourceScheduler();
  ((RMContextImpl)rmContext).setScheduler(scheduler);
  Configuration conf = new Configuration();
  ApplicationMasterService masterService =
      new ApplicationMasterService(rmContext, scheduler);
  appMonitor = new TestRMAppManager(rmContext,
      new ClientToAMTokenSecretManagerInRM(), scheduler, masterService,
      new ApplicationACLsManager(conf), conf);

  appId = MockApps.newAppID(1);
  RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
  asContext =
      recordFactory.newRecordInstance(ApplicationSubmissionContext.class);
  asContext.setApplicationId(appId);
  asContext.setAMContainerSpec(mockContainerLaunchContext(recordFactory));
  asContext.setResource(mockResource());
  setupDispatcher(rmContext, conf);
}
项目:big-c    文件:SchedulerApplicationAttempt.java   
public SchedulerApplicationAttempt(ApplicationAttemptId applicationAttemptId, 
    String user, Queue queue, ActiveUsersManager activeUsersManager,
    RMContext rmContext) {
  Preconditions.checkNotNull(rmContext, "RMContext should not be null");
  this.rmContext = rmContext;
  this.appSchedulingInfo = 
      new AppSchedulingInfo(applicationAttemptId, user, queue,  
          activeUsersManager, rmContext.getEpoch());
  this.queue = queue;
  this.pendingRelease = new HashSet<ContainerId>();
  this.attemptId = applicationAttemptId;
  if (rmContext.getRMApps() != null &&
      rmContext.getRMApps()
          .containsKey(applicationAttemptId.getApplicationId())) {
    ApplicationSubmissionContext appSubmissionContext =
        rmContext.getRMApps().get(applicationAttemptId.getApplicationId())
            .getApplicationSubmissionContext();
    if (appSubmissionContext != null) {
      unmanagedAM = appSubmissionContext.getUnmanagedAM();
      this.logAggregationContext =
          appSubmissionContext.getLogAggregationContext();
    }
  }
}
项目:big-c    文件:TestRMAppTransitions.java   
protected RMApp testCreateAppFinished(
    ApplicationSubmissionContext submissionContext,
    String diagnostics) throws IOException {
  // unmanaged AMs don't use the FINISHING state
  RMApp application = null;
  if (submissionContext != null && submissionContext.getUnmanagedAM()) {
    application = testCreateAppRunning(submissionContext);
  } else {
    application = testCreateAppFinishing(submissionContext);
  }
  // RUNNING/FINISHING => FINISHED event RMAppEventType.ATTEMPT_FINISHED
  RMAppEvent finishedEvent = new RMAppFinishedAttemptEvent(
      application.getApplicationId(), diagnostics);
  application.handle(finishedEvent);
  assertAppState(RMAppState.FINISHED, application);
  assertTimesAtFinish(application);
  // finished without a proper unregister implies failed
  assertFinalAppStatus(FinalApplicationStatus.FAILED, application);
  Assert.assertTrue("Finished app missing diagnostics",
      application.getDiagnostics().indexOf(diagnostics) != -1);
  return application;
}
项目:big-c    文件:TestRMAppTransitions.java   
@Test (timeout = 30000)
public void testAppRecoverPath() throws IOException {
  LOG.info("--- START: testAppRecoverPath ---");
  ApplicationSubmissionContext sub =
      Records.newRecord(ApplicationSubmissionContext.class);
  ContainerLaunchContext clc =
      Records.newRecord(ContainerLaunchContext.class);
  Credentials credentials = new Credentials();
  DataOutputBuffer dob = new DataOutputBuffer();
  credentials.writeTokenStorageToStream(dob);
  ByteBuffer securityTokens =
      ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
  clc.setTokens(securityTokens);
  sub.setAMContainerSpec(clc);
  testCreateAppSubmittedRecovery(sub);
}
项目:aliyun-oss-hadoop-fs    文件:TestRMAppTransitions.java   
@Test (timeout = 30000)
public void testAppRecoverPath() throws IOException {
  LOG.info("--- START: testAppRecoverPath ---");
  ApplicationSubmissionContext sub =
      Records.newRecord(ApplicationSubmissionContext.class);
  ContainerLaunchContext clc =
      Records.newRecord(ContainerLaunchContext.class);
  Credentials credentials = new Credentials();
  DataOutputBuffer dob = new DataOutputBuffer();
  credentials.writeTokenStorageToStream(dob);
  ByteBuffer securityTokens =
      ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
  clc.setTokens(securityTokens);
  sub.setAMContainerSpec(clc);
  testCreateAppSubmittedRecovery(sub);
}
项目:aliyun-oss-hadoop-fs    文件:RMHATestBase.java   
@Override
protected void submitApplication(
    ApplicationSubmissionContext submissionContext, long submitTime,
    String user) throws YarnException {
  //Do nothing, just add the application to RMContext
  RMAppImpl application =
      new RMAppImpl(submissionContext.getApplicationId(), this.rmContext,
          this.conf, submissionContext.getApplicationName(), user,
          submissionContext.getQueue(), submissionContext,
          this.rmContext.getScheduler(),
          this.rmContext.getApplicationMasterService(),
          submitTime, submissionContext.getApplicationType(),
          submissionContext.getApplicationTags(), null);
  this.rmContext.getRMApps().put(submissionContext.getApplicationId(),
      application);
  //Do not send RMAppEventType.START event
  //so the state of Application will not reach to NEW_SAVING state.
}
项目:aliyun-oss-hadoop-fs    文件:TestClientRMService.java   
@Test
public void testGetApplicationResourceUsageReportDummy() throws YarnException,
    IOException {
  ApplicationAttemptId attemptId = getApplicationAttemptId(1);
  YarnScheduler yarnScheduler = mockYarnScheduler();
  RMContext rmContext = mock(RMContext.class);
  mockRMContext(yarnScheduler, rmContext);
  when(rmContext.getDispatcher().getEventHandler()).thenReturn(
      new EventHandler<Event>() {
        public void handle(Event event) {
        }
      });
  ApplicationSubmissionContext asContext = 
      mock(ApplicationSubmissionContext.class);
  YarnConfiguration config = new YarnConfiguration();
  RMAppAttemptImpl rmAppAttemptImpl = new RMAppAttemptImpl(attemptId,
      rmContext, yarnScheduler, null, asContext, config, false, null);
  ApplicationResourceUsageReport report = rmAppAttemptImpl
      .getApplicationResourceUsageReport();
  assertEquals(report, RMServerUtils.DUMMY_APPLICATION_RESOURCE_USAGE_REPORT);
}
项目:big-c    文件:RMHATestBase.java   
@Override
protected void submitApplication(
    ApplicationSubmissionContext submissionContext, long submitTime,
    String user) throws YarnException {
  //Do nothing, just add the application to RMContext
  RMAppImpl application =
      new RMAppImpl(submissionContext.getApplicationId(), this.rmContext,
          this.conf, submissionContext.getApplicationName(), user,
          submissionContext.getQueue(), submissionContext,
          this.rmContext.getScheduler(),
          this.rmContext.getApplicationMasterService(),
          submitTime, submissionContext.getApplicationType(),
          submissionContext.getApplicationTags(), null);
  this.rmContext.getRMApps().put(submissionContext.getApplicationId(),
      application);
  //Do not send RMAppEventType.START event
  //so the state of Application will not reach to NEW_SAVING state.
}
项目:aliyun-oss-hadoop-fs    文件:TestApplicationClientProtocolOnHA.java   
@Test(timeout = 15000)
public void testSubmitApplicationOnHA() throws Exception {
  ApplicationSubmissionContext appContext =
      Records.newRecord(ApplicationSubmissionContext.class);
  appContext.setApplicationId(cluster.createFakeAppId());
  ContainerLaunchContext amContainer =
      Records.newRecord(ContainerLaunchContext.class);
  appContext.setAMContainerSpec(amContainer);
  Resource capability = Records.newRecord(Resource.class);
  capability.setMemory(10);
  capability.setVirtualCores(1);
  appContext.setResource(capability);
  ApplicationId appId = client.submitApplication(appContext);
  Assert.assertTrue(getActiveRM().getRMContext().getRMApps()
      .containsKey(appId));
}
项目:big-c    文件:TestClientRMService.java   
@Test
public void testGetApplicationResourceUsageReportDummy() throws YarnException,
    IOException {
  ApplicationAttemptId attemptId = getApplicationAttemptId(1);
  YarnScheduler yarnScheduler = mockYarnScheduler();
  RMContext rmContext = mock(RMContext.class);
  mockRMContext(yarnScheduler, rmContext);
  when(rmContext.getDispatcher().getEventHandler()).thenReturn(
      new EventHandler<Event>() {
        public void handle(Event event) {
        }
      });
  ApplicationSubmissionContext asContext = 
      mock(ApplicationSubmissionContext.class);
  YarnConfiguration config = new YarnConfiguration();
  RMAppAttemptImpl rmAppAttemptImpl = new RMAppAttemptImpl(attemptId,
      rmContext, yarnScheduler, null, asContext, config, false, null);
  ApplicationResourceUsageReport report = rmAppAttemptImpl
      .getApplicationResourceUsageReport();
  assertEquals(report, RMServerUtils.DUMMY_APPLICATION_RESOURCE_USAGE_REPORT);
}
项目:big-c    文件:AMLauncher.java   
private ContainerLaunchContext createAMContainerLaunchContext(
    ApplicationSubmissionContext applicationMasterContext,
    ContainerId containerID) throws IOException {

  // Construct the actual Container
  ContainerLaunchContext container = 
      applicationMasterContext.getAMContainerSpec();
  LOG.info("Command to launch container "
      + containerID
      + " : "
      + StringUtils.arrayToString(container.getCommands().toArray(
          new String[0])));

  // Finalize the container
  setupTokens(container, containerID);

  return container;
}
项目:aliyun-oss-hadoop-fs    文件:TestYARNRunner.java   
@Test(timeout=20000)
public void testAMProfiler() throws Exception {
  JobConf jobConf = new JobConf();

  jobConf.setBoolean(MRJobConfig.MR_AM_PROFILE, true);

  YARNRunner yarnRunner = new YARNRunner(jobConf);

  ApplicationSubmissionContext submissionContext =
      buildSubmitContext(yarnRunner, jobConf);

  ContainerLaunchContext containerSpec = submissionContext.getAMContainerSpec();
  List<String> commands = containerSpec.getCommands();

  for(String command : commands) {
    if (command != null) {
      if (command.contains(PROFILE_PARAMS)) {
        return;
      }
    }
  }
  throw new IllegalStateException("Profiler opts not found!");
}
项目:aliyun-oss-hadoop-fs    文件:TestYARNRunner.java   
@Test
public void testJobPriority() throws Exception {
  JobConf jobConf = new JobConf();

  jobConf.set(MRJobConfig.PRIORITY, "LOW");

  YARNRunner yarnRunner = new YARNRunner(jobConf);
  ApplicationSubmissionContext appSubCtx = buildSubmitContext(yarnRunner,
      jobConf);

  // 2 corresponds to LOW
  assertEquals(appSubCtx.getPriority(), Priority.newInstance(2));

  // Set an integer explicitly
  jobConf.set(MRJobConfig.PRIORITY, "12");

  yarnRunner = new YARNRunner(jobConf);
  appSubCtx = buildSubmitContext(yarnRunner,
      jobConf);

  // Verify whether 12 is set to submission context
  assertEquals(appSubCtx.getPriority(), Priority.newInstance(12));
}
项目:aliyun-oss-hadoop-fs    文件:TestYARNRunner.java   
private ApplicationSubmissionContext buildSubmitContext(
    YARNRunner yarnRunner, JobConf jobConf) throws IOException {
  File jobxml = new File(testWorkDir, MRJobConfig.JOB_CONF_FILE);
  OutputStream out = new FileOutputStream(jobxml);
  conf.writeXml(out);
  out.close();

  File jobsplit = new File(testWorkDir, MRJobConfig.JOB_SPLIT);
  out = new FileOutputStream(jobsplit);
  out.close();

  File jobsplitmetainfo = new File(testWorkDir,
      MRJobConfig.JOB_SPLIT_METAINFO);
  out = new FileOutputStream(jobsplitmetainfo);
  out.close();

  return yarnRunner.createApplicationSubmissionContext(jobConf,
      testWorkDir.toString(), new Credentials());
}
项目:aliyun-oss-hadoop-fs    文件:TestHadoopArchiveLogs.java   
private static RMApp createRMApp(int id, Configuration conf, RMContext rmContext,
     final LogAggregationStatus aggStatus) {
  ApplicationId appId = ApplicationId.newInstance(CLUSTER_TIMESTAMP, id);
  ApplicationSubmissionContext submissionContext =
      ApplicationSubmissionContext.newInstance(appId, "test", "default",
          Priority.newInstance(0), null, true, true,
          2, Resource.newInstance(10, 2), "test");
  return new RMAppImpl(appId, rmContext, conf, "test",
      USER, "default", submissionContext, rmContext.getScheduler(),
      rmContext.getApplicationMasterService(),
      System.currentTimeMillis(), "test",
      null, null) {
    @Override
    public ApplicationReport createAndGetApplicationReport(
        String clientUserName, boolean allowAccess) {
      ApplicationReport report =
          super.createAndGetApplicationReport(clientUserName, allowAccess);
      report.setLogAggregationStatus(aggStatus);
      return report;
    }
  };
}
项目:big-c    文件:RMAppAttemptImpl.java   
public RMAppAttemptImpl(ApplicationAttemptId appAttemptId,
    RMContext rmContext, YarnScheduler scheduler,
    ApplicationMasterService masterService,
    ApplicationSubmissionContext submissionContext,
    Configuration conf, boolean maybeLastAttempt, ResourceRequest amReq) {
  this.conf = conf;
  this.applicationAttemptId = appAttemptId;
  this.rmContext = rmContext;
  this.eventHandler = rmContext.getDispatcher().getEventHandler();
  this.submissionContext = submissionContext;
  this.scheduler = scheduler;
  this.masterService = masterService;

  ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
  this.readLock = lock.readLock();
  this.writeLock = lock.writeLock();

  this.proxiedTrackingUrl = generateProxyUriWithScheme();
  this.maybeLastAttempt = maybeLastAttempt;
  this.stateMachine = stateMachineFactory.make(this);

  this.attemptMetrics =
      new RMAppAttemptMetrics(applicationAttemptId, rmContext);

  this.amReq = amReq;
}
项目:pai    文件:StatusManager.java   
private void associateFrameworkWithApplication(String frameworkName, ApplicationSubmissionContext applicationContext) {
  FrameworkStatus frameworkStatus = getFrameworkStatus(frameworkName);
  String applicationId = applicationContext.getApplicationId().toString();

  // Construct BaseStatus
  frameworkStatus.setApplicationId(applicationId);
  frameworkStatus.setApplicationProgress((float) 0);

  // Construct ExtensionStatus
  associatedApplicationIdLocators.put(applicationId, frameworkName);
}