Java 类org.apache.hadoop.mapred.MapReduceChildJVM 实例源码

项目:hadoop    文件:TaskAttemptImpl.java   
static ContainerLaunchContext createContainerLaunchContext(
    Map<ApplicationAccessType, String> applicationACLs,
    Configuration conf, Token<JobTokenIdentifier> jobToken, Task remoteTask,
    final org.apache.hadoop.mapred.JobID oldJobId,
    WrappedJvmID jvmID,
    TaskAttemptListener taskAttemptListener,
    Credentials credentials) {

  synchronized (commonContainerSpecLock) {
    if (commonContainerSpec == null) {
      commonContainerSpec = createCommonContainerLaunchContext(
          applicationACLs, conf, jobToken, oldJobId, credentials);
    }
  }

  // Fill in the fields needed per-container that are missing in the common
  // spec.

  boolean userClassesTakesPrecedence =
    conf.getBoolean(MRJobConfig.MAPREDUCE_JOB_USER_CLASSPATH_FIRST, false);

  // Setup environment by cloning from common env.
  Map<String, String> env = commonContainerSpec.getEnvironment();
  Map<String, String> myEnv = new HashMap<String, String>(env.size());
  myEnv.putAll(env);
  if (userClassesTakesPrecedence) {
    myEnv.put(Environment.CLASSPATH_PREPEND_DISTCACHE.name(), "true");
  }
  MapReduceChildJVM.setVMEnv(myEnv, remoteTask);

  // Set up the launch command
  List<String> commands = MapReduceChildJVM.getVMCommand(
      taskAttemptListener.getAddress(), remoteTask, jvmID);

  // Duplicate the ByteBuffers for access by multiple containers.
  Map<String, ByteBuffer> myServiceData = new HashMap<String, ByteBuffer>();
  for (Entry<String, ByteBuffer> entry : commonContainerSpec
              .getServiceData().entrySet()) {
    myServiceData.put(entry.getKey(), entry.getValue().duplicate());
  }

  // Construct the actual Container
  ContainerLaunchContext container = ContainerLaunchContext.newInstance(
      commonContainerSpec.getLocalResources(), myEnv, commands,
      myServiceData, commonContainerSpec.getTokens().duplicate(),
      applicationACLs);

  return container;
}
项目:aliyun-oss-hadoop-fs    文件:TaskAttemptImpl.java   
static ContainerLaunchContext createContainerLaunchContext(
    Map<ApplicationAccessType, String> applicationACLs,
    Configuration conf, Token<JobTokenIdentifier> jobToken, Task remoteTask,
    final org.apache.hadoop.mapred.JobID oldJobId,
    WrappedJvmID jvmID,
    TaskAttemptListener taskAttemptListener,
    Credentials credentials) {

  synchronized (commonContainerSpecLock) {
    if (commonContainerSpec == null) {
      commonContainerSpec = createCommonContainerLaunchContext(
          applicationACLs, conf, jobToken, oldJobId, credentials);
    }
  }

  // Fill in the fields needed per-container that are missing in the common
  // spec.

  boolean userClassesTakesPrecedence =
    conf.getBoolean(MRJobConfig.MAPREDUCE_JOB_USER_CLASSPATH_FIRST, false);

  // Setup environment by cloning from common env.
  Map<String, String> env = commonContainerSpec.getEnvironment();
  Map<String, String> myEnv = new HashMap<String, String>(env.size());
  myEnv.putAll(env);
  if (userClassesTakesPrecedence) {
    myEnv.put(Environment.CLASSPATH_PREPEND_DISTCACHE.name(), "true");
  }
  MapReduceChildJVM.setVMEnv(myEnv, remoteTask);

  // Set up the launch command
  List<String> commands = MapReduceChildJVM.getVMCommand(
      taskAttemptListener.getAddress(), remoteTask, jvmID);

  // Duplicate the ByteBuffers for access by multiple containers.
  Map<String, ByteBuffer> myServiceData = new HashMap<String, ByteBuffer>();
  for (Entry<String, ByteBuffer> entry : commonContainerSpec
              .getServiceData().entrySet()) {
    myServiceData.put(entry.getKey(), entry.getValue().duplicate());
  }

  // Construct the actual Container
  ContainerLaunchContext container = ContainerLaunchContext.newInstance(
      commonContainerSpec.getLocalResources(), myEnv, commands,
      myServiceData, commonContainerSpec.getTokens().duplicate(),
      applicationACLs);

  return container;
}
项目:big-c    文件:TaskAttemptImpl.java   
static ContainerLaunchContext createContainerLaunchContext(
    Map<ApplicationAccessType, String> applicationACLs,
    Configuration conf, Token<JobTokenIdentifier> jobToken, Task remoteTask,
    final org.apache.hadoop.mapred.JobID oldJobId,
    WrappedJvmID jvmID,
    TaskAttemptListener taskAttemptListener,
    Credentials credentials) {

  synchronized (commonContainerSpecLock) {
    if (commonContainerSpec == null) {
      commonContainerSpec = createCommonContainerLaunchContext(
          applicationACLs, conf, jobToken, oldJobId, credentials);
    }
  }

  // Fill in the fields needed per-container that are missing in the common
  // spec.

  boolean userClassesTakesPrecedence =
    conf.getBoolean(MRJobConfig.MAPREDUCE_JOB_USER_CLASSPATH_FIRST, false);

  // Setup environment by cloning from common env.
  Map<String, String> env = commonContainerSpec.getEnvironment();
  Map<String, String> myEnv = new HashMap<String, String>(env.size());
  myEnv.putAll(env);
  if (userClassesTakesPrecedence) {
    myEnv.put(Environment.CLASSPATH_PREPEND_DISTCACHE.name(), "true");
  }
  MapReduceChildJVM.setVMEnv(myEnv, remoteTask);

  // Set up the launch command
  List<String> commands = MapReduceChildJVM.getVMCommand(
      taskAttemptListener.getAddress(), remoteTask, jvmID);

  // Duplicate the ByteBuffers for access by multiple containers.
  Map<String, ByteBuffer> myServiceData = new HashMap<String, ByteBuffer>();
  for (Entry<String, ByteBuffer> entry : commonContainerSpec
              .getServiceData().entrySet()) {
    myServiceData.put(entry.getKey(), entry.getValue().duplicate());
  }

  // Construct the actual Container
  ContainerLaunchContext container = ContainerLaunchContext.newInstance(
      commonContainerSpec.getLocalResources(), myEnv, commands,
      myServiceData, commonContainerSpec.getTokens().duplicate(),
      applicationACLs);

  return container;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TaskAttemptImpl.java   
static ContainerLaunchContext createContainerLaunchContext(
    Map<ApplicationAccessType, String> applicationACLs,
    Configuration conf, Token<JobTokenIdentifier> jobToken, Task remoteTask,
    final org.apache.hadoop.mapred.JobID oldJobId,
    WrappedJvmID jvmID,
    TaskAttemptListener taskAttemptListener,
    Credentials credentials) {

  synchronized (commonContainerSpecLock) {
    if (commonContainerSpec == null) {
      commonContainerSpec = createCommonContainerLaunchContext(
          applicationACLs, conf, jobToken, oldJobId, credentials);
    }
  }

  // Fill in the fields needed per-container that are missing in the common
  // spec.

  // Setup environment by cloning from common env.
  Map<String, String> env = commonContainerSpec.getEnvironment();
  Map<String, String> myEnv = new HashMap<String, String>(env.size());
  myEnv.putAll(env);
  MapReduceChildJVM.setVMEnv(myEnv, remoteTask);

  // Set up the launch command
  List<String> commands = MapReduceChildJVM.getVMCommand(
      taskAttemptListener.getAddress(), remoteTask, jvmID);

  // Duplicate the ByteBuffers for access by multiple containers.
  Map<String, ByteBuffer> myServiceData = new HashMap<String, ByteBuffer>();
  for (Entry<String, ByteBuffer> entry : commonContainerSpec
              .getServiceData().entrySet()) {
    myServiceData.put(entry.getKey(), entry.getValue().duplicate());
  }

  // Construct the actual Container
  ContainerLaunchContext container = ContainerLaunchContext.newInstance(
      commonContainerSpec.getLocalResources(), myEnv, commands,
      myServiceData, commonContainerSpec.getTokens().duplicate(),
      applicationACLs);

  return container;
}
项目:hadoop-plus    文件:TaskAttemptImpl.java   
static ContainerLaunchContext createContainerLaunchContext(
    Map<ApplicationAccessType, String> applicationACLs,
    Configuration conf, Token<JobTokenIdentifier> jobToken, Task remoteTask,
    final org.apache.hadoop.mapred.JobID oldJobId,
    WrappedJvmID jvmID,
    TaskAttemptListener taskAttemptListener,
    Credentials credentials) {

  synchronized (commonContainerSpecLock) {
    if (commonContainerSpec == null) {
      commonContainerSpec = createCommonContainerLaunchContext(
          applicationACLs, conf, jobToken, oldJobId, credentials);
    }
  }

  // Fill in the fields needed per-container that are missing in the common
  // spec.

  // Setup environment by cloning from common env.
  Map<String, String> env = commonContainerSpec.getEnvironment();
  Map<String, String> myEnv = new HashMap<String, String>(env.size());
  myEnv.putAll(env);
  MapReduceChildJVM.setVMEnv(myEnv, remoteTask);

  // Set up the launch command
  List<String> commands = MapReduceChildJVM.getVMCommand(
      taskAttemptListener.getAddress(), remoteTask, jvmID);

  // Duplicate the ByteBuffers for access by multiple containers.
  Map<String, ByteBuffer> myServiceData = new HashMap<String, ByteBuffer>();
  for (Entry<String, ByteBuffer> entry : commonContainerSpec
              .getServiceData().entrySet()) {
    myServiceData.put(entry.getKey(), entry.getValue().duplicate());
  }

  // Construct the actual Container
  ContainerLaunchContext container = ContainerLaunchContext.newInstance(
      commonContainerSpec.getLocalResources(), myEnv, commands,
      myServiceData, commonContainerSpec.getTokens().duplicate(),
      applicationACLs);

  return container;
}
项目:FlexMap    文件:TaskAttemptImpl.java   
static ContainerLaunchContext createContainerLaunchContext(
    Map<ApplicationAccessType, String> applicationACLs,
    Configuration conf, Token<JobTokenIdentifier> jobToken, Task remoteTask,
    final org.apache.hadoop.mapred.JobID oldJobId,
    WrappedJvmID jvmID,
    TaskAttemptListener taskAttemptListener,
    Credentials credentials) {

  synchronized (commonContainerSpecLock) {
    if (commonContainerSpec == null) {
      commonContainerSpec = createCommonContainerLaunchContext(
          applicationACLs, conf, jobToken, oldJobId, credentials);
    }
  }

  // Fill in the fields needed per-container that are missing in the common
  // spec.

  // Setup environment by cloning from common env.
  Map<String, String> env = commonContainerSpec.getEnvironment();
  Map<String, String> myEnv = new HashMap<String, String>(env.size());
  myEnv.putAll(env);
  MapReduceChildJVM.setVMEnv(myEnv, remoteTask);

  // Set up the launch command
  List<String> commands = MapReduceChildJVM.getVMCommand(
      taskAttemptListener.getAddress(), remoteTask, jvmID);

  // Duplicate the ByteBuffers for access by multiple containers.
  Map<String, ByteBuffer> myServiceData = new HashMap<String, ByteBuffer>();
  for (Entry<String, ByteBuffer> entry : commonContainerSpec
              .getServiceData().entrySet()) {
    myServiceData.put(entry.getKey(), entry.getValue().duplicate());
  }

  // Construct the actual Container
  ContainerLaunchContext container = ContainerLaunchContext.newInstance(
      commonContainerSpec.getLocalResources(), myEnv, commands,
      myServiceData, commonContainerSpec.getTokens().duplicate(),
      applicationACLs);

  return container;
}
项目:hops    文件:TaskAttemptImpl.java   
static ContainerLaunchContext createContainerLaunchContext(
    Map<ApplicationAccessType, String> applicationACLs,
    Configuration conf, Token<JobTokenIdentifier> jobToken, Task remoteTask,
    final org.apache.hadoop.mapred.JobID oldJobId,
    WrappedJvmID jvmID,
    TaskAttemptListener taskAttemptListener,
    Credentials credentials) {

  synchronized (commonContainerSpecLock) {
    if (commonContainerSpec == null) {
      commonContainerSpec = createCommonContainerLaunchContext(
          applicationACLs, conf, jobToken, oldJobId, credentials);
    }
  }

  // Fill in the fields needed per-container that are missing in the common
  // spec.

  boolean userClassesTakesPrecedence =
    conf.getBoolean(MRJobConfig.MAPREDUCE_JOB_USER_CLASSPATH_FIRST, false);

  // Setup environment by cloning from common env.
  Map<String, String> env = commonContainerSpec.getEnvironment();
  Map<String, String> myEnv = new HashMap<String, String>(env.size());
  myEnv.putAll(env);
  if (userClassesTakesPrecedence) {
    myEnv.put(Environment.CLASSPATH_PREPEND_DISTCACHE.name(), "true");
  }
  MapReduceChildJVM.setVMEnv(myEnv, remoteTask);

  // Set up the launch command
  List<String> commands = MapReduceChildJVM.getVMCommand(
      taskAttemptListener.getAddress(), remoteTask, jvmID);

  // Duplicate the ByteBuffers for access by multiple containers.
  Map<String, ByteBuffer> myServiceData = new HashMap<String, ByteBuffer>();
  for (Entry<String, ByteBuffer> entry : commonContainerSpec
              .getServiceData().entrySet()) {
    myServiceData.put(entry.getKey(), entry.getValue().duplicate());
  }

  // Construct the actual Container
  ContainerLaunchContext container = ContainerLaunchContext.newInstance(
      commonContainerSpec.getLocalResources(), myEnv, commands,
      myServiceData, commonContainerSpec.getTokens().duplicate(),
      applicationACLs);

  return container;
}
项目:hadoop-TCP    文件:TaskAttemptImpl.java   
static ContainerLaunchContext createContainerLaunchContext(
    Map<ApplicationAccessType, String> applicationACLs,
    Configuration conf, Token<JobTokenIdentifier> jobToken, Task remoteTask,
    final org.apache.hadoop.mapred.JobID oldJobId,
    WrappedJvmID jvmID,
    TaskAttemptListener taskAttemptListener,
    Credentials credentials) {

  synchronized (commonContainerSpecLock) {
    if (commonContainerSpec == null) {
      commonContainerSpec = createCommonContainerLaunchContext(
          applicationACLs, conf, jobToken, oldJobId, credentials);
    }
  }

  // Fill in the fields needed per-container that are missing in the common
  // spec.

  // Setup environment by cloning from common env.
  Map<String, String> env = commonContainerSpec.getEnvironment();
  Map<String, String> myEnv = new HashMap<String, String>(env.size());
  myEnv.putAll(env);
  MapReduceChildJVM.setVMEnv(myEnv, remoteTask);

  // Set up the launch command
  List<String> commands = MapReduceChildJVM.getVMCommand(
      taskAttemptListener.getAddress(), remoteTask, jvmID);

  // Duplicate the ByteBuffers for access by multiple containers.
  Map<String, ByteBuffer> myServiceData = new HashMap<String, ByteBuffer>();
  for (Entry<String, ByteBuffer> entry : commonContainerSpec
              .getServiceData().entrySet()) {
    myServiceData.put(entry.getKey(), entry.getValue().duplicate());
  }

  // Construct the actual Container
  ContainerLaunchContext container = ContainerLaunchContext.newInstance(
      commonContainerSpec.getLocalResources(), myEnv, commands,
      myServiceData, commonContainerSpec.getTokens().duplicate(),
      applicationACLs);

  return container;
}
项目:hardfs    文件:TaskAttemptImpl.java   
static ContainerLaunchContext createContainerLaunchContext(
    Map<ApplicationAccessType, String> applicationACLs,
    Configuration conf, Token<JobTokenIdentifier> jobToken, Task remoteTask,
    final org.apache.hadoop.mapred.JobID oldJobId,
    WrappedJvmID jvmID,
    TaskAttemptListener taskAttemptListener,
    Credentials credentials) {

  synchronized (commonContainerSpecLock) {
    if (commonContainerSpec == null) {
      commonContainerSpec = createCommonContainerLaunchContext(
          applicationACLs, conf, jobToken, oldJobId, credentials);
    }
  }

  // Fill in the fields needed per-container that are missing in the common
  // spec.

  // Setup environment by cloning from common env.
  Map<String, String> env = commonContainerSpec.getEnvironment();
  Map<String, String> myEnv = new HashMap<String, String>(env.size());
  myEnv.putAll(env);
  MapReduceChildJVM.setVMEnv(myEnv, remoteTask);

  // Set up the launch command
  List<String> commands = MapReduceChildJVM.getVMCommand(
      taskAttemptListener.getAddress(), remoteTask, jvmID);

  // Duplicate the ByteBuffers for access by multiple containers.
  Map<String, ByteBuffer> myServiceData = new HashMap<String, ByteBuffer>();
  for (Entry<String, ByteBuffer> entry : commonContainerSpec
              .getServiceData().entrySet()) {
    myServiceData.put(entry.getKey(), entry.getValue().duplicate());
  }

  // Construct the actual Container
  ContainerLaunchContext container = ContainerLaunchContext.newInstance(
      commonContainerSpec.getLocalResources(), myEnv, commands,
      myServiceData, commonContainerSpec.getTokens().duplicate(),
      applicationACLs);

  return container;
}
项目:hadoop-on-lustre2    文件:TaskAttemptImpl.java   
static ContainerLaunchContext createContainerLaunchContext(
    Map<ApplicationAccessType, String> applicationACLs,
    Configuration conf, Token<JobTokenIdentifier> jobToken, Task remoteTask,
    final org.apache.hadoop.mapred.JobID oldJobId,
    WrappedJvmID jvmID,
    TaskAttemptListener taskAttemptListener,
    Credentials credentials) {

  synchronized (commonContainerSpecLock) {
    if (commonContainerSpec == null) {
      commonContainerSpec = createCommonContainerLaunchContext(
          applicationACLs, conf, jobToken, oldJobId, credentials);
    }
  }

  // Fill in the fields needed per-container that are missing in the common
  // spec.

  // Setup environment by cloning from common env.
  Map<String, String> env = commonContainerSpec.getEnvironment();
  Map<String, String> myEnv = new HashMap<String, String>(env.size());
  myEnv.putAll(env);
  MapReduceChildJVM.setVMEnv(myEnv, remoteTask);

  // Set up the launch command
  List<String> commands = MapReduceChildJVM.getVMCommand(
      taskAttemptListener.getAddress(), remoteTask, jvmID);

  // Duplicate the ByteBuffers for access by multiple containers.
  Map<String, ByteBuffer> myServiceData = new HashMap<String, ByteBuffer>();
  for (Entry<String, ByteBuffer> entry : commonContainerSpec
              .getServiceData().entrySet()) {
    myServiceData.put(entry.getKey(), entry.getValue().duplicate());
  }

  // Construct the actual Container
  ContainerLaunchContext container = ContainerLaunchContext.newInstance(
      commonContainerSpec.getLocalResources(), myEnv, commands,
      myServiceData, commonContainerSpec.getTokens().duplicate(),
      applicationACLs);

  return container;
}