Java 类org.apache.hadoop.util.ResourceCalculatorPlugin 实例源码

项目:hadoop-2.6.0-cdh5.4.3    文件:TestTTResourceReporting.java   
/**
 * Test that verifies default values are configured and reported correctly.
 * 
 * @throws Exception
 */
public void testDefaultResourceValues()
    throws Exception {
  JobConf conf = new JobConf();
  try {
    // Memory values are disabled by default.
    conf.setClass(
        org.apache.hadoop.mapred.TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,       
        DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
    setUpCluster(conf);
    JobConf jobConf = miniMRCluster.createJobConf();
    jobConf.setClass(
        org.apache.hadoop.mapred.TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,
        DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
    runSleepJob(jobConf);
    verifyTestResults();
  } finally {
    tearDownCluster();
  }
}
项目:hadoop-EAR    文件:TestTTResourceReporting.java   
/**
 * Test that verifies default values are configured and reported correctly.
 * 
 * @throws Exception
 */
@Test(timeout=60000)
public void testDefaultResourceValues()
    throws Exception {
  JobConf conf = new JobConf();
  try {
    // Memory values are disabled by default.
    conf.setClass(
        org.apache.hadoop.mapred.TaskTracker.MAPRED_TASKTRACKER_MEMORY_CALCULATOR_PLUGIN_PROPERTY,
        DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
    setUpCluster(conf);
    JobConf jobConf = miniMRCluster.createJobConf();
    jobConf.setClass(
        org.apache.hadoop.mapred.TaskTracker.MAPRED_TASKTRACKER_MEMORY_CALCULATOR_PLUGIN_PROPERTY,
        DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
    runSleepJob(jobConf);
    verifyTestResults();
  } finally {
    tearDownCluster();
  }
}
项目:hadoop-EAR    文件:CoronaTaskTracker.java   
@Override
int getMaxActualSlots(JobConf conf, int numCpuOnTT, TaskType type) {
  Map<Integer, Map<ResourceType, Integer>> cpuToResourcePartitioning =
    CoronaConf.getUncachedCpuToResourcePartitioning(conf);
  if (numCpuOnTT == ResourceCalculatorPlugin.UNAVAILABLE) {
    numCpuOnTT = 1;
  }
  Map<ResourceType, Integer> resourceTypeToCountMap =
    ClusterNode.getResourceTypeToCountMap(numCpuOnTT,
                                          cpuToResourcePartitioning);
  switch (type) {
  case MAP:
    return resourceTypeToCountMap.get(ResourceType.MAP);
  case REDUCE:
    return resourceTypeToCountMap.get(ResourceType.REDUCE);
  default:
    throw new RuntimeException("getMaxActualSlots: Illegal type " + type);
  }
}
项目:hadoop-on-lustre    文件:TestTTResourceReporting.java   
/**
 * Test that verifies default values are configured and reported correctly.
 * 
 * @throws Exception
 */
public void testDefaultResourceValues()
    throws Exception {
  JobConf conf = new JobConf();
  try {
    // Memory values are disabled by default.
    conf.setClass(
        org.apache.hadoop.mapred.TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,       
        DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
    setUpCluster(conf);
    JobConf jobConf = miniMRCluster.createJobConf();
    jobConf.setClass(
        org.apache.hadoop.mapred.TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,
        DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
    runSleepJob(jobConf);
    verifyTestResults();
  } finally {
    tearDownCluster();
  }
}
项目:RDFS    文件:TestTTResourceReporting.java   
/**
 * Test that verifies default values are configured and reported correctly.
 * 
 * @throws Exception
 */
public void testDefaultResourceValues()
    throws Exception {
  JobConf conf = new JobConf();
  try {
    // Memory values are disabled by default.
    conf.setClass(
        org.apache.hadoop.mapred.TaskTracker.MAPRED_TASKTRACKER_MEMORY_CALCULATOR_PLUGIN_PROPERTY,
        DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
    setUpCluster(conf);
    JobConf jobConf = miniMRCluster.createJobConf();
    jobConf.setClass(
        org.apache.hadoop.mapred.TaskTracker.MAPRED_TASKTRACKER_MEMORY_CALCULATOR_PLUGIN_PROPERTY,
        DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
    runSleepJob(jobConf);
    verifyTestResults();
  } finally {
    tearDownCluster();
  }
}
项目:RDFS    文件:CoronaTaskTracker.java   
@Override
int getMaxActualSlots(JobConf conf, int numCpuOnTT, TaskType type) {
  Map<Integer, Map<ResourceType, Integer>> cpuToResourcePartitioning =
    CoronaConf.getUncachedCpuToResourcePartitioning(conf);
  if (numCpuOnTT == ResourceCalculatorPlugin.UNAVAILABLE) {
    numCpuOnTT = 1;
  }
  Map<ResourceType, Integer> resourceTypeToCountMap =
    ClusterNode.getResourceTypeToCountMap(numCpuOnTT,
                                          cpuToResourcePartitioning);
  switch (type) {
  case MAP:
    return resourceTypeToCountMap.get(ResourceType.MAP);
  case REDUCE:
    return resourceTypeToCountMap.get(ResourceType.REDUCE);
  default:
    throw new RuntimeException("getMaxActualSlots: Illegal type " + type);
  }
}
项目:hanoi-hadoop-2.0.0-cdh    文件:TestTTResourceReporting.java   
/**
 * Test that verifies default values are configured and reported correctly.
 * 
 * @throws Exception
 */
public void testDefaultResourceValues()
    throws Exception {
  JobConf conf = new JobConf();
  try {
    // Memory values are disabled by default.
    conf.setClass(
        org.apache.hadoop.mapred.TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,       
        DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
    setUpCluster(conf);
    JobConf jobConf = miniMRCluster.createJobConf();
    jobConf.setClass(
        org.apache.hadoop.mapred.TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,
        DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
    runSleepJob(jobConf);
    verifyTestResults();
  } finally {
    tearDownCluster();
  }
}
项目:hortonworks-extension    文件:TestTTResourceReporting.java   
/**
 * Test that verifies default values are configured and reported correctly.
 * 
 * @throws Exception
 */
public void testDefaultResourceValues()
    throws Exception {
  JobConf conf = new JobConf();
  try {
    // Memory values are disabled by default.
    conf.setClass(
        org.apache.hadoop.mapred.TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,       
        DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
    setUpCluster(conf);
    JobConf jobConf = miniMRCluster.createJobConf();
    jobConf.setClass(
        org.apache.hadoop.mapred.TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,
        DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
    runSleepJob(jobConf);
    verifyTestResults();
  } finally {
    tearDownCluster();
  }
}
项目:hortonworks-extension    文件:TestTTResourceReporting.java   
/**
 * Test that verifies default values are configured and reported correctly.
 * 
 * @throws Exception
 */
public void testDefaultResourceValues()
    throws Exception {
  JobConf conf = new JobConf();
  try {
    // Memory values are disabled by default.
    conf.setClass(
        org.apache.hadoop.mapred.TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,       
        DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
    setUpCluster(conf);
    JobConf jobConf = miniMRCluster.createJobConf();
    jobConf.setClass(
        org.apache.hadoop.mapred.TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,
        DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
    runSleepJob(jobConf);
    verifyTestResults();
  } finally {
    tearDownCluster();
  }
}
项目:RDFS    文件:Task.java   
public void initialize(JobConf job, JobID id, 
                       Reporter reporter,
                       boolean useNewApi) throws IOException, 
                                                 ClassNotFoundException,
                                                 InterruptedException {
  jobContext = new JobContext(job, id, reporter);
  taskContext = new TaskAttemptContext(job, taskId, reporter);
  if (getState() == TaskStatus.State.UNASSIGNED) {
    setState(TaskStatus.State.RUNNING);
  }
  if (useNewApi) {
    LOG.debug("using new api for output committer");
    outputFormat =
      ReflectionUtils.newInstance(taskContext.getOutputFormatClass(), job);
    committer = outputFormat.getOutputCommitter(taskContext);
  } else {
    committer = conf.getOutputCommitter();
  }
  Path outputPath = FileOutputFormat.getOutputPath(conf);
  if (outputPath != null) {
    if ((committer instanceof FileOutputCommitter)) {
      FileOutputFormat.setWorkOutputPath(conf, 
        ((FileOutputCommitter)committer).getTempTaskOutputPath(taskContext));
    } else {
      FileOutputFormat.setWorkOutputPath(conf, outputPath);
    }
  }
  committer.setupTask(taskContext);
  Class<? extends ResourceCalculatorPlugin> clazz = conf.getClass(
      TaskTracker.MAPRED_TASKTRACKER_MEMORY_CALCULATOR_PLUGIN_PROPERTY,
          null, ResourceCalculatorPlugin.class);
  resourceCalculator = ResourceCalculatorPlugin
          .getResourceCalculatorPlugin(clazz, conf);
  LOG.info(" Using ResourceCalculatorPlugin : " + resourceCalculator);
  if (resourceCalculator != null) {
    initCpuCumulativeTime =
      resourceCalculator.getProcResourceValues().getCumulativeCpuTime();
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:Task.java   
public void initialize(JobConf job, JobID id, 
                       Reporter reporter,
                       boolean useNewApi) throws IOException, 
                                                 ClassNotFoundException,
                                                 InterruptedException {
  jobContext = new JobContextImpl(job, id, reporter);
  taskContext = new TaskAttemptContextImpl(job, taskId, reporter);
  if (getState() == TaskStatus.State.UNASSIGNED) {
    setState(TaskStatus.State.RUNNING);
  }
  if (useNewApi) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("using new api for output committer");
    }
    outputFormat =
      ReflectionUtils.newInstance(taskContext.getOutputFormatClass(), job);
    committer = outputFormat.getOutputCommitter(taskContext);
  } else {
    committer = conf.getOutputCommitter();
  }
  Path outputPath = FileOutputFormat.getOutputPath(conf);
  if (outputPath != null) {
    if ((committer instanceof FileOutputCommitter)) {
      FileOutputFormat.setWorkOutputPath(conf, 
        ((FileOutputCommitter)committer).getTempTaskOutputPath(taskContext));
    } else {
      FileOutputFormat.setWorkOutputPath(conf, outputPath);
    }
  }
  committer.setupTask(taskContext);
  Class<? extends ResourceCalculatorPlugin> clazz =
      conf.getClass(TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,
          null, ResourceCalculatorPlugin.class);
  resourceCalculator = ResourceCalculatorPlugin
          .getResourceCalculatorPlugin(clazz, conf);
  LOG.info(" Using ResourceCalculatorPlugin : " + resourceCalculator);
  if (resourceCalculator != null) {
    initCpuCumulativeTime =
      resourceCalculator.getProcResourceValues().getCumulativeCpuTime();
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestTTResourceReporting.java   
/**
 * Test that verifies that configured values are reported correctly.
 * 
 * @throws Exception
 */
public void testConfiguredResourceValues()
    throws Exception {
  JobConf conf = new JobConf();
  conf.setLong("totalVmemOnTT", 4 * 1024 * 1024 * 1024L);
  conf.setLong("totalPmemOnTT", 2 * 1024 * 1024 * 1024L);
  conf.setLong("mapSlotMemorySize", 1 * 512L);
  conf.setLong("reduceSlotMemorySize", 1 * 1024L);
  conf.setLong("availableVmemOnTT", 4 * 1024 * 1024 * 1024L);
  conf.setLong("availablePmemOnTT", 2 * 1024 * 1024 * 1024L);
  conf.setLong("cumulativeCpuTime", 10000L);
  conf.setLong("cpuFrequency", 2000000L);
  conf.setInt("numProcessors", 8);
  conf.setFloat("cpuUsage", 15.5F);
  conf.setLong("procCumulativeCpuTime", 1000L);
  conf.setLong("procVirtualMemorySize", 2 * 1024 * 1024 * 1024L);
  conf.setLong("procPhysicalMemorySize", 1024 * 1024 * 1024L);

  conf.setClass(
      org.apache.hadoop.mapred.TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,       
      DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
  conf.setLong(DummyResourceCalculatorPlugin.MAXVMEM_TESTING_PROPERTY,
      4 * 1024 * 1024 * 1024L);
  conf.setLong(DummyResourceCalculatorPlugin.MAXPMEM_TESTING_PROPERTY,
      2 * 1024 * 1024 * 1024L);
  conf.setLong(JobTracker.MAPRED_CLUSTER_MAP_MEMORY_MB_PROPERTY, 512L);
  conf.setLong(JobTracker.MAPRED_CLUSTER_REDUCE_MEMORY_MB_PROPERTY, 1024L);
  conf.setLong(DummyResourceCalculatorPlugin.CUMULATIVE_CPU_TIME, 10000L);
  conf.setLong(DummyResourceCalculatorPlugin.CPU_FREQUENCY, 2000000L);
  conf.setInt(DummyResourceCalculatorPlugin.NUM_PROCESSORS, 8);
  conf.setFloat(DummyResourceCalculatorPlugin.CPU_USAGE, 15.5F);
  try {
    setUpCluster(conf);
    JobConf jobConf = miniMRCluster.createJobConf();
    jobConf.setMemoryForMapTask(1 * 1024L);
    jobConf.setMemoryForReduceTask(2 * 1024L);
    jobConf.setClass(
      org.apache.hadoop.mapred.TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,
      DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
    jobConf.setLong(DummyResourceCalculatorPlugin.PROC_CUMULATIVE_CPU_TIME, 1000L);
    jobConf.setLong(DummyResourceCalculatorPlugin.PROC_VMEM_TESTING_PROPERTY,
                    2 * 1024 * 1024 * 1024L);
    jobConf.setLong(DummyResourceCalculatorPlugin.PROC_PMEM_TESTING_PROPERTY,
                    1024 * 1024 * 1024L);
    runSleepJob(jobConf);
    verifyTestResults();
  } finally {
    tearDownCluster();
  }
}
项目:hadoop-EAR    文件:Task.java   
public void initialize(JobConf job, JobID id, 
                       Reporter reporter,
                       boolean useNewApi) throws IOException, 
                                                 ClassNotFoundException,
                                                 InterruptedException {
  jobContext = new JobContext(job, id, reporter);
  taskContext = new TaskAttemptContext(job, taskId, reporter);
  if (getState() == TaskStatus.State.UNASSIGNED) {
    setState(TaskStatus.State.RUNNING);
  }
  if (useNewApi) {
    LOG.debug("using new api for output committer");
    outputFormat =
      ReflectionUtils.newInstance(taskContext.getOutputFormatClass(), job);
    committer = outputFormat.getOutputCommitter(taskContext);
  } else {
    committer = conf.getOutputCommitter();
  }
  Path outputPath = FileOutputFormat.getOutputPath(conf);
  if (outputPath != null) {
    if ((committer instanceof FileOutputCommitter)) {
      FileOutputFormat.setWorkOutputPath(conf, 
        ((FileOutputCommitter)committer).getTempTaskOutputPath(taskContext));
    } else {
      FileOutputFormat.setWorkOutputPath(conf, outputPath);
    }
  }
  committer.setupTask(taskContext);
  Class<? extends ResourceCalculatorPlugin> clazz = conf.getClass(
      TaskTracker.MAPRED_TASKTRACKER_MEMORY_CALCULATOR_PLUGIN_PROPERTY,
          null, ResourceCalculatorPlugin.class);
  resourceCalculator = ResourceCalculatorPlugin
          .getResourceCalculatorPlugin(clazz, conf);
  LOG.info(" Using ResourceCalculatorPlugin : " + resourceCalculator);
  if (resourceCalculator != null) {
    initCpuCumulativeTime =
      resourceCalculator.getProcResourceValues().getCumulativeCpuTime();
  }

  jmxThreadInfoTracker = new JMXThreadBasedMetrics();
  jmxThreadInfoTracker.registerThreadToTask(
      "MAIN_TASK", Thread.currentThread().getId());
  this.initJvmCpuCumulativeTime = 
      jmxThreadInfoTracker.getCumulativeCPUTime();

  cgResourceTracker = new CGroupResourceTracker (
                        job, CGroupResourceTracker.RESOURCE_TRAKCER_TYPE.TASK, 
                        taskId.toString(), resourceCalculator);
}
项目:hadoop-EAR    文件:CGroupResourceTracker.java   
public CGroupResourceTracker(
         JobConf conf, RESOURCE_TRAKCER_TYPE trackerType, 
         String target,
         ResourceCalculatorPlugin plugin) {
  this.resourceCalculaotr = plugin;

  boolean taskMemoryControlGroupEnabled = conf.getBoolean(
      TaskTracker.MAPRED_TASKTRACKER_CGROUP_MEM_ENABLE_PROPERTY,
      TaskTracker.DEFAULT_MAPRED_TASKTRACKER_CGROUP_MEM_ENABLE_PROPERTY);

  if (taskMemoryControlGroupEnabled) {
    if (MemoryControlGroup.isAvailable()) {
      switch (trackerType) {
        case JOB_TRACKER:
          String jtRootpath = conf.get(
              TaskTrackerMemoryControlGroup.CGROUP_MEM_JT_ROOT, 
              TaskTrackerMemoryControlGroup.DEFAULT_JT_ROOT);
          memControlGroup= new MemoryControlGroup(jtRootpath);

          if (isMemTrackerAvailable()) {
            LOG.info("A CGroupResourceTracker for JOB_TRACKER created.");
          }
          break;
        case TASK_TRACKER:
          String ttRootpath = conf.get(
              TaskTrackerMemoryControlGroup.CGROUP_MEM_TT_ROOT, 
              TaskTrackerMemoryControlGroup.DEFAULT_TT_ROOT);
            memControlGroup = new MemoryControlGroup(ttRootpath);

          if (isMemTrackerAvailable()) {
            LOG.info("A CGroupResourceTracker for TASK_TRACKER created.");
          }
          break;
        case TASK:
          String rootpath = conf.get(
            TaskTrackerMemoryControlGroup.CGROUP_MEM_ROOT_PROPERTY, 
            TaskTrackerMemoryControlGroup.DEFAULT_CGROUP_MEM_ROOT);
          MemoryControlGroup container = new MemoryControlGroup(rootpath);
          memControlGroup = 
            container.getSubGroup(target);

          if (isMemTrackerAvailable()) {
            LOG.info("A CGroupResourceTracker for TASK:" + 
              target + " created.");
          }
          break;
      }
    }
  }
}
项目:hadoop-EAR    文件:TestTTCpuToTaskSlots.java   
@Test
public void TestCpuToMapTasksConfig() throws Exception {
  JobConf conf = new JobConf();
  conf.set("mapred.tasktracker.map.tasks.maximum", "3");
  conf.set("mapred.tasktracker.reduce.tasks.maximum", "1");
  // Test with the original settings
  try {
    miniMRCluster =
        new MiniMRCluster(1, "file:///", 3, null, null, conf);
    taskTracker = miniMRCluster.getTaskTrackerRunner(0).getTaskTracker();
    Assert.assertEquals(3, taskTracker.getMaxCurrentMapTasks());
    Assert.assertEquals(1, taskTracker.getMaxCurrentReduceTasks());
  } finally {
    if (miniMRCluster != null) {
      miniMRCluster.shutdown();
    }
  }

  // Test with the # CPU -> mappers settings
  conf.setClass(org.apache.hadoop.mapred.TaskTracker.
      MAPRED_TASKTRACKER_MEMORY_CALCULATOR_PLUGIN_PROPERTY,
      DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
  conf.set("mapred.tasktracker.cpus.to.maptasks", "4:6, 8:9, 16:15");
  conf.set("mapred.tasktracker.cpus.to.reducetasks", "4:3, 8:7, 16:12");

  // 4 CPU -> 6 mappers, 3 reducers
  try {
    conf.set(DummyResourceCalculatorPlugin.NUM_PROCESSORS, "4");
    miniMRCluster =
        new MiniMRCluster(1, "file:///", 3, null, null, conf);
    taskTracker = miniMRCluster.getTaskTrackerRunner(0).getTaskTracker();
    Assert.assertEquals(6, taskTracker.getMaxCurrentMapTasks());
    Assert.assertEquals(3, taskTracker.getMaxCurrentReduceTasks());
  } finally {
    if (miniMRCluster != null) {
      miniMRCluster.shutdown();
    }
  }

  // 8 CPU -> 9 mappers, 7 reduces
  try {
    conf.set(DummyResourceCalculatorPlugin.NUM_PROCESSORS, "8");
    miniMRCluster =
        new MiniMRCluster(1, "file:///", 3, null, null, conf);
    taskTracker = miniMRCluster.getTaskTrackerRunner(0).getTaskTracker();
    Assert.assertEquals(9, taskTracker.getMaxCurrentMapTasks());
    Assert.assertEquals(7, taskTracker.getMaxCurrentReduceTasks());
  } finally {
    if (miniMRCluster != null) {
      miniMRCluster.shutdown();
    }
  }

  // 16 CPU -> 15 mappers, 12 reduces
  try {
    conf.set(DummyResourceCalculatorPlugin.NUM_PROCESSORS, "16");
    miniMRCluster =
        new MiniMRCluster(1, "file:///", 3, null, null, conf);
    taskTracker = miniMRCluster.getTaskTrackerRunner(0).getTaskTracker();
    Assert.assertEquals(15, taskTracker.getMaxCurrentMapTasks());
    Assert.assertEquals(12, taskTracker.getMaxCurrentReduceTasks());
  } finally {
    if (miniMRCluster != null) {
      miniMRCluster.shutdown();
    }
  }

  // 11 CPU -> 3 mappers, 1 reduce (back to default)
  try {
    conf.set(DummyResourceCalculatorPlugin.NUM_PROCESSORS, "11");
    miniMRCluster =
        new MiniMRCluster(1, "file:///", 3, null, null, conf);
    taskTracker = miniMRCluster.getTaskTrackerRunner(0).getTaskTracker();
    Assert.assertEquals(3, taskTracker.getMaxCurrentMapTasks());
    Assert.assertEquals(1, taskTracker.getMaxCurrentReduceTasks());
  } finally {
    if (miniMRCluster != null) {
      miniMRCluster.shutdown();
    }
  }
}
项目:hadoop-EAR    文件:TestTTResourceReporting.java   
/**
 * Test that verifies that configured values are reported correctly.
 * 
 * @throws Exception
 */
@Test(timeout=60000)
public void testConfiguredResourceValues()
    throws Exception {
  JobConf conf = new JobConf();
  conf.setLong("totalVmemOnTT", 4 * 1024 * 1024 * 1024L);
  conf.setLong("totalPmemOnTT", 2 * 1024 * 1024 * 1024L);
  conf.setLong("mapSlotMemorySize", 1 * 512L);
  conf.setLong("reduceSlotMemorySize", 1 * 1024L);
  conf.setLong("availableVmemOnTT", 4 * 1024 * 1024 * 1024L);
  conf.setLong("availablePmemOnTT", 2 * 1024 * 1024 * 1024L);
  conf.setLong("cumulativeCpuTime", 10000L);
  conf.setLong("cpuFrequency", 2000000L);
  conf.setInt("numProcessors", 8);
  conf.setFloat("cpuUsage", 15.5F);
  conf.setLong("procCumulativeCpuTime", 1000L);
  conf.setLong("procVirtualMemorySize", 1 * 1024 * 1024 * 1024L);
  conf.setLong("procPhysicalMemorySize", 1024 * 1024 * 1024L);

  conf.setClass(
      org.apache.hadoop.mapred.TaskTracker.MAPRED_TASKTRACKER_MEMORY_CALCULATOR_PLUGIN_PROPERTY,
      DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
  conf.setLong(DummyResourceCalculatorPlugin.MAXVMEM_TESTING_PROPERTY,
      4 * 1024 * 1024 * 1024L);
  conf.setLong(DummyResourceCalculatorPlugin.MAXPMEM_TESTING_PROPERTY,
      2 * 1024 * 1024 * 1024L);
  conf.setLong(JobTracker.MAPRED_CLUSTER_MAP_MEMORY_MB_PROPERTY, 512L);
  conf.setLong(
      JobTracker.MAPRED_CLUSTER_REDUCE_MEMORY_MB_PROPERTY, 1024L);
  conf.setLong(DummyResourceCalculatorPlugin.CUMULATIVE_CPU_TIME, 10000L);
  conf.setLong(DummyResourceCalculatorPlugin.CPU_FREQUENCY, 2000000L);
  conf.setInt(DummyResourceCalculatorPlugin.NUM_PROCESSORS, 8);
  conf.setFloat(DummyResourceCalculatorPlugin.CPU_USAGE, 15.5F);
  try {
    setUpCluster(conf);
    JobConf jobConf = miniMRCluster.createJobConf();
    jobConf.setMemoryForMapTask(4 * 1024L);
    jobConf.setMemoryForReduceTask(4 * 1024L);
    jobConf.setClass(
      org.apache.hadoop.mapred.TaskTracker.MAPRED_TASKTRACKER_MEMORY_CALCULATOR_PLUGIN_PROPERTY,
      DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
    jobConf.setLong(DummyResourceCalculatorPlugin.PROC_CUMULATIVE_CPU_TIME, 1000L);
    jobConf.setLong(DummyResourceCalculatorPlugin.PROC_VMEM_TESTING_PROPERTY,
                    1 * 1024 * 1024 * 1024L);
    jobConf.setLong(DummyResourceCalculatorPlugin.PROC_PMEM_TESTING_PROPERTY,
                    1024 * 1024 * 1024L);
    runSleepJob(jobConf);
    verifyTestResults();
  } finally {
    tearDownCluster();
  }
}
项目:hadoop-on-lustre    文件:Task.java   
public void initialize(JobConf job, JobID id, 
                       Reporter reporter,
                       boolean useNewApi) throws IOException, 
                                                 ClassNotFoundException,
                                                 InterruptedException {
  jobContext = new JobContext(job, id, reporter);
  taskContext = new TaskAttemptContext(job, taskId, reporter);
  if (getState() == TaskStatus.State.UNASSIGNED) {
    setState(TaskStatus.State.RUNNING);
  }
  if (useNewApi) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("using new api for output committer");
    }
    outputFormat =
      ReflectionUtils.newInstance(taskContext.getOutputFormatClass(), job);
    committer = outputFormat.getOutputCommitter(taskContext);
  } else {
    committer = conf.getOutputCommitter();
  }
  Path outputPath = FileOutputFormat.getOutputPath(conf);
  if (outputPath != null) {
    if ((committer instanceof FileOutputCommitter)) {
      FileOutputFormat.setWorkOutputPath(conf, 
        ((FileOutputCommitter)committer).getTempTaskOutputPath(taskContext));
    } else {
      FileOutputFormat.setWorkOutputPath(conf, outputPath);
    }
  }
  committer.setupTask(taskContext);
  Class<? extends ResourceCalculatorPlugin> clazz =
      conf.getClass(TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,
          null, ResourceCalculatorPlugin.class);
  resourceCalculator = ResourceCalculatorPlugin
          .getResourceCalculatorPlugin(clazz, conf);
  LOG.info(" Using ResourceCalculatorPlugin : " + resourceCalculator);
  if (resourceCalculator != null) {
    initCpuCumulativeTime =
      resourceCalculator.getProcResourceValues().getCumulativeCpuTime();
  }
}
项目:hadoop-on-lustre    文件:TestTTResourceReporting.java   
/**
 * Test that verifies that configured values are reported correctly.
 * 
 * @throws Exception
 */
public void testConfiguredResourceValues()
    throws Exception {
  JobConf conf = new JobConf();
  conf.setLong("totalVmemOnTT", 4 * 1024 * 1024 * 1024L);
  conf.setLong("totalPmemOnTT", 2 * 1024 * 1024 * 1024L);
  conf.setLong("mapSlotMemorySize", 1 * 512L);
  conf.setLong("reduceSlotMemorySize", 1 * 1024L);
  conf.setLong("availableVmemOnTT", 4 * 1024 * 1024 * 1024L);
  conf.setLong("availablePmemOnTT", 2 * 1024 * 1024 * 1024L);
  conf.setLong("cumulativeCpuTime", 10000L);
  conf.setLong("cpuFrequency", 2000000L);
  conf.setInt("numProcessors", 8);
  conf.setFloat("cpuUsage", 15.5F);
  conf.setLong("procCumulativeCpuTime", 1000L);
  conf.setLong("procVirtualMemorySize", 2 * 1024 * 1024 * 1024L);
  conf.setLong("procPhysicalMemorySize", 1024 * 1024 * 1024L);

  conf.setClass(
      org.apache.hadoop.mapred.TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,       
      DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
  conf.setLong(DummyResourceCalculatorPlugin.MAXVMEM_TESTING_PROPERTY,
      4 * 1024 * 1024 * 1024L);
  conf.setLong(DummyResourceCalculatorPlugin.MAXPMEM_TESTING_PROPERTY,
      2 * 1024 * 1024 * 1024L);
  conf.setLong(JobTracker.MAPRED_CLUSTER_MAP_MEMORY_MB_PROPERTY, 512L);
  conf.setLong(JobTracker.MAPRED_CLUSTER_REDUCE_MEMORY_MB_PROPERTY, 1024L);
  conf.setLong(DummyResourceCalculatorPlugin.CUMULATIVE_CPU_TIME, 10000L);
  conf.setLong(DummyResourceCalculatorPlugin.CPU_FREQUENCY, 2000000L);
  conf.setInt(DummyResourceCalculatorPlugin.NUM_PROCESSORS, 8);
  conf.setFloat(DummyResourceCalculatorPlugin.CPU_USAGE, 15.5F);
  try {
    setUpCluster(conf);
    JobConf jobConf = miniMRCluster.createJobConf();
    jobConf.setMemoryForMapTask(1 * 1024L);
    jobConf.setMemoryForReduceTask(2 * 1024L);
    jobConf.setClass(
      org.apache.hadoop.mapred.TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,
      DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
    jobConf.setLong(DummyResourceCalculatorPlugin.PROC_CUMULATIVE_CPU_TIME, 1000L);
    jobConf.setLong(DummyResourceCalculatorPlugin.PROC_VMEM_TESTING_PROPERTY,
                    2 * 1024 * 1024 * 1024L);
    jobConf.setLong(DummyResourceCalculatorPlugin.PROC_PMEM_TESTING_PROPERTY,
                    1024 * 1024 * 1024L);
    runSleepJob(jobConf);
    verifyTestResults();
  } finally {
    tearDownCluster();
  }
}
项目:RDFS    文件:TestTTCpuToTaskSlots.java   
@Test
public void TestCpuToMapTasksConfig() throws Exception {
  JobConf conf = new JobConf();
  conf.set("mapred.tasktracker.map.tasks.maximum", "3");
  conf.set("mapred.tasktracker.reduce.tasks.maximum", "1");
  // Test with the original settings
  try {
    miniMRCluster =
        new MiniMRCluster(1, "file:///", 3, null, null, conf);
    taskTracker = miniMRCluster.getTaskTrackerRunner(0).getTaskTracker();
    Assert.assertEquals(3, taskTracker.getMaxCurrentMapTasks());
    Assert.assertEquals(1, taskTracker.getMaxCurrentReduceTasks());
  } finally {
    if (miniMRCluster != null) {
      miniMRCluster.shutdown();
    }
  }

  // Test with the # CPU -> mappers settings
  conf.setClass(org.apache.hadoop.mapred.TaskTracker.
      MAPRED_TASKTRACKER_MEMORY_CALCULATOR_PLUGIN_PROPERTY,
      DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
  conf.set("mapred.tasktracker.cpus.to.maptasks", "4:6, 8:9, 16:15");
  conf.set("mapred.tasktracker.cpus.to.reducetasks", "4:3, 8:7, 16:12");

  // 4 CPU -> 6 mappers, 3 reducers
  try {
    conf.set(DummyResourceCalculatorPlugin.NUM_PROCESSORS, "4");
    miniMRCluster =
        new MiniMRCluster(1, "file:///", 3, null, null, conf);
    taskTracker = miniMRCluster.getTaskTrackerRunner(0).getTaskTracker();
    Assert.assertEquals(6, taskTracker.getMaxCurrentMapTasks());
    Assert.assertEquals(3, taskTracker.getMaxCurrentReduceTasks());
  } finally {
    if (miniMRCluster != null) {
      miniMRCluster.shutdown();
    }
  }

  // 8 CPU -> 9 mappers, 7 reduces
  try {
    conf.set(DummyResourceCalculatorPlugin.NUM_PROCESSORS, "8");
    miniMRCluster =
        new MiniMRCluster(1, "file:///", 3, null, null, conf);
    taskTracker = miniMRCluster.getTaskTrackerRunner(0).getTaskTracker();
    Assert.assertEquals(9, taskTracker.getMaxCurrentMapTasks());
    Assert.assertEquals(7, taskTracker.getMaxCurrentReduceTasks());
  } finally {
    if (miniMRCluster != null) {
      miniMRCluster.shutdown();
    }
  }

  // 16 CPU -> 15 mappers, 12 reduces
  try {
    conf.set(DummyResourceCalculatorPlugin.NUM_PROCESSORS, "16");
    miniMRCluster =
        new MiniMRCluster(1, "file:///", 3, null, null, conf);
    taskTracker = miniMRCluster.getTaskTrackerRunner(0).getTaskTracker();
    Assert.assertEquals(15, taskTracker.getMaxCurrentMapTasks());
    Assert.assertEquals(12, taskTracker.getMaxCurrentReduceTasks());
  } finally {
    if (miniMRCluster != null) {
      miniMRCluster.shutdown();
    }
  }

  // 11 CPU -> 3 mappers, 1 reduce (back to default)
  try {
    conf.set(DummyResourceCalculatorPlugin.NUM_PROCESSORS, "11");
    miniMRCluster =
        new MiniMRCluster(1, "file:///", 3, null, null, conf);
    taskTracker = miniMRCluster.getTaskTrackerRunner(0).getTaskTracker();
    Assert.assertEquals(3, taskTracker.getMaxCurrentMapTasks());
    Assert.assertEquals(1, taskTracker.getMaxCurrentReduceTasks());
  } finally {
    if (miniMRCluster != null) {
      miniMRCluster.shutdown();
    }
  }
}
项目:RDFS    文件:TestTTResourceReporting.java   
/**
 * Test that verifies that configured values are reported correctly.
 * 
 * @throws Exception
 */
public void testConfiguredResourceValues()
    throws Exception {
  JobConf conf = new JobConf();
  conf.setLong("totalVmemOnTT", 4 * 1024 * 1024 * 1024L);
  conf.setLong("totalPmemOnTT", 2 * 1024 * 1024 * 1024L);
  conf.setLong("mapSlotMemorySize", 1 * 512L);
  conf.setLong("reduceSlotMemorySize", 1 * 1024L);
  conf.setLong("availableVmemOnTT", 4 * 1024 * 1024 * 1024L);
  conf.setLong("availablePmemOnTT", 2 * 1024 * 1024 * 1024L);
  conf.setLong("cumulativeCpuTime", 10000L);
  conf.setLong("cpuFrequency", 2000000L);
  conf.setInt("numProcessors", 8);
  conf.setFloat("cpuUsage", 15.5F);
  conf.setLong("procCumulativeCpuTime", 1000L);
  conf.setLong("procVirtualMemorySize", 2 * 1024 * 1024 * 1024L);
  conf.setLong("procPhysicalMemorySize", 1024 * 1024 * 1024L);

  conf.setClass(
      org.apache.hadoop.mapred.TaskTracker.MAPRED_TASKTRACKER_MEMORY_CALCULATOR_PLUGIN_PROPERTY,
      DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
  conf.setLong(DummyResourceCalculatorPlugin.MAXVMEM_TESTING_PROPERTY,
      4 * 1024 * 1024 * 1024L);
  conf.setLong(DummyResourceCalculatorPlugin.MAXPMEM_TESTING_PROPERTY,
      2 * 1024 * 1024 * 1024L);
  conf.setLong(JobTracker.MAPRED_CLUSTER_MAP_MEMORY_MB_PROPERTY, 512L);
  conf.setLong(
      JobTracker.MAPRED_CLUSTER_REDUCE_MEMORY_MB_PROPERTY, 1024L);
  conf.setLong(DummyResourceCalculatorPlugin.CUMULATIVE_CPU_TIME, 10000L);
  conf.setLong(DummyResourceCalculatorPlugin.CPU_FREQUENCY, 2000000L);
  conf.setInt(DummyResourceCalculatorPlugin.NUM_PROCESSORS, 8);
  conf.setFloat(DummyResourceCalculatorPlugin.CPU_USAGE, 15.5F);
  try {
    setUpCluster(conf);
    JobConf jobConf = miniMRCluster.createJobConf();
    jobConf.setMemoryForMapTask(1 * 1024L);
    jobConf.setMemoryForReduceTask(2 * 1024L);
    jobConf.setClass(
      org.apache.hadoop.mapred.TaskTracker.MAPRED_TASKTRACKER_MEMORY_CALCULATOR_PLUGIN_PROPERTY,
      DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
    jobConf.setLong(DummyResourceCalculatorPlugin.PROC_CUMULATIVE_CPU_TIME, 1000L);
    jobConf.setLong(DummyResourceCalculatorPlugin.PROC_VMEM_TESTING_PROPERTY,
                    2 * 1024 * 1024 * 1024L);
    jobConf.setLong(DummyResourceCalculatorPlugin.PROC_PMEM_TESTING_PROPERTY,
                    1024 * 1024 * 1024L);
    runSleepJob(jobConf);
    verifyTestResults();
  } finally {
    tearDownCluster();
  }
}
项目:hanoi-hadoop-2.0.0-cdh    文件:Task.java   
public void initialize(JobConf job, JobID id, 
                       Reporter reporter,
                       boolean useNewApi) throws IOException, 
                                                 ClassNotFoundException,
                                                 InterruptedException {
  jobContext = new JobContextImpl(job, id, reporter);
  taskContext = new TaskAttemptContextImpl(job, taskId, reporter);
  if (getState() == TaskStatus.State.UNASSIGNED) {
    setState(TaskStatus.State.RUNNING);
  }
  if (useNewApi) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("using new api for output committer");
    }
    outputFormat =
      ReflectionUtils.newInstance(taskContext.getOutputFormatClass(), job);
    committer = outputFormat.getOutputCommitter(taskContext);
  } else {
    committer = conf.getOutputCommitter();
  }
  Path outputPath = FileOutputFormat.getOutputPath(conf);
  if (outputPath != null) {
    if ((committer instanceof FileOutputCommitter)) {
      FileOutputFormat.setWorkOutputPath(conf, 
        ((FileOutputCommitter)committer).getTempTaskOutputPath(taskContext));
    } else {
      FileOutputFormat.setWorkOutputPath(conf, outputPath);
    }
  }
  committer.setupTask(taskContext);
  Class<? extends ResourceCalculatorPlugin> clazz =
      conf.getClass(TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,
          null, ResourceCalculatorPlugin.class);
  resourceCalculator = ResourceCalculatorPlugin
          .getResourceCalculatorPlugin(clazz, conf);
  LOG.info(" Using ResourceCalculatorPlugin : " + resourceCalculator);
  if (resourceCalculator != null) {
    initCpuCumulativeTime =
      resourceCalculator.getProcResourceValues().getCumulativeCpuTime();
  }
}
项目:hanoi-hadoop-2.0.0-cdh    文件:TestTTResourceReporting.java   
/**
 * Test that verifies that configured values are reported correctly.
 * 
 * @throws Exception
 */
public void testConfiguredResourceValues()
    throws Exception {
  JobConf conf = new JobConf();
  conf.setLong("totalVmemOnTT", 4 * 1024 * 1024 * 1024L);
  conf.setLong("totalPmemOnTT", 2 * 1024 * 1024 * 1024L);
  conf.setLong("mapSlotMemorySize", 1 * 512L);
  conf.setLong("reduceSlotMemorySize", 1 * 1024L);
  conf.setLong("availableVmemOnTT", 4 * 1024 * 1024 * 1024L);
  conf.setLong("availablePmemOnTT", 2 * 1024 * 1024 * 1024L);
  conf.setLong("cumulativeCpuTime", 10000L);
  conf.setLong("cpuFrequency", 2000000L);
  conf.setInt("numProcessors", 8);
  conf.setFloat("cpuUsage", 15.5F);
  conf.setLong("procCumulativeCpuTime", 1000L);
  conf.setLong("procVirtualMemorySize", 2 * 1024 * 1024 * 1024L);
  conf.setLong("procPhysicalMemorySize", 1024 * 1024 * 1024L);

  conf.setClass(
      org.apache.hadoop.mapred.TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,       
      DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
  conf.setLong(DummyResourceCalculatorPlugin.MAXVMEM_TESTING_PROPERTY,
      4 * 1024 * 1024 * 1024L);
  conf.setLong(DummyResourceCalculatorPlugin.MAXPMEM_TESTING_PROPERTY,
      2 * 1024 * 1024 * 1024L);
  conf.setLong(JobTracker.MAPRED_CLUSTER_MAP_MEMORY_MB_PROPERTY, 512L);
  conf.setLong(JobTracker.MAPRED_CLUSTER_REDUCE_MEMORY_MB_PROPERTY, 1024L);
  conf.setLong(DummyResourceCalculatorPlugin.CUMULATIVE_CPU_TIME, 10000L);
  conf.setLong(DummyResourceCalculatorPlugin.CPU_FREQUENCY, 2000000L);
  conf.setInt(DummyResourceCalculatorPlugin.NUM_PROCESSORS, 8);
  conf.setFloat(DummyResourceCalculatorPlugin.CPU_USAGE, 15.5F);
  try {
    setUpCluster(conf);
    JobConf jobConf = miniMRCluster.createJobConf();
    jobConf.setMemoryForMapTask(1 * 1024L);
    jobConf.setMemoryForReduceTask(2 * 1024L);
    jobConf.setClass(
      org.apache.hadoop.mapred.TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,
      DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
    jobConf.setLong(DummyResourceCalculatorPlugin.PROC_CUMULATIVE_CPU_TIME, 1000L);
    jobConf.setLong(DummyResourceCalculatorPlugin.PROC_VMEM_TESTING_PROPERTY,
                    2 * 1024 * 1024 * 1024L);
    jobConf.setLong(DummyResourceCalculatorPlugin.PROC_PMEM_TESTING_PROPERTY,
                    1024 * 1024 * 1024L);
    runSleepJob(jobConf);
    verifyTestResults();
  } finally {
    tearDownCluster();
  }
}
项目:mammoth    文件:Task.java   
public void initialize(JobConf job, JobID id, 
                       Reporter reporter,
                       boolean useNewApi) throws IOException, 
                                                 ClassNotFoundException,
                                                 InterruptedException {
  jobContext = new JobContext(job, id, reporter);
  taskContext = new TaskAttemptContext(job, taskId, reporter);
  if (getState() == TaskStatus.State.UNASSIGNED) {
    setState(TaskStatus.State.RUNNING);
  }
  if (useNewApi) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("using new api for output committer");
    }
    outputFormat =
      ReflectionUtils.newInstance(taskContext.getOutputFormatClass(), job);
    committer = outputFormat.getOutputCommitter(taskContext);
  } else {
    committer = conf.getOutputCommitter();
  }
  Path outputPath = FileOutputFormat.getOutputPath(conf);
  if (outputPath != null) {
    if ((committer instanceof FileOutputCommitter)) {
      FileOutputFormat.setWorkOutputPath(conf, 
        ((FileOutputCommitter)committer).getTempTaskOutputPath(taskContext));
    } else {
      FileOutputFormat.setWorkOutputPath(conf, outputPath);
    }
  }
  committer.setupTask(taskContext);
  Class<? extends ResourceCalculatorPlugin> clazz =
      conf.getClass(TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,
          null, ResourceCalculatorPlugin.class);
  resourceCalculator = ResourceCalculatorPlugin
          .getResourceCalculatorPlugin(clazz, conf);
  LOG.info(" Using ResourceCalculatorPlugin : " + resourceCalculator);
  if (resourceCalculator != null) {
    initCpuCumulativeTime =
      resourceCalculator.getProcResourceValues().getCumulativeCpuTime();
  }
}
项目:hortonworks-extension    文件:Task.java   
public void initialize(JobConf job, JobID id, 
                       Reporter reporter,
                       boolean useNewApi) throws IOException, 
                                                 ClassNotFoundException,
                                                 InterruptedException {
  jobContext = new JobContext(job, id, reporter);
  taskContext = new TaskAttemptContext(job, taskId, reporter);
  if (getState() == TaskStatus.State.UNASSIGNED) {
    setState(TaskStatus.State.RUNNING);
  }
  if (useNewApi) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("using new api for output committer");
    }
    outputFormat =
      ReflectionUtils.newInstance(taskContext.getOutputFormatClass(), job);
    committer = outputFormat.getOutputCommitter(taskContext);
  } else {
    committer = conf.getOutputCommitter();
  }
  Path outputPath = FileOutputFormat.getOutputPath(conf);
  if (outputPath != null) {
    if ((committer instanceof FileOutputCommitter)) {
      FileOutputFormat.setWorkOutputPath(conf, 
        ((FileOutputCommitter)committer).getTempTaskOutputPath(taskContext));
    } else {
      FileOutputFormat.setWorkOutputPath(conf, outputPath);
    }
  }
  committer.setupTask(taskContext);
  Class<? extends ResourceCalculatorPlugin> clazz =
      conf.getClass(TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,
          null, ResourceCalculatorPlugin.class);
  resourceCalculator = ResourceCalculatorPlugin
          .getResourceCalculatorPlugin(clazz, conf);
  LOG.info(" Using ResourceCalculatorPlugin : " + resourceCalculator);
  if (resourceCalculator != null) {
    initCpuCumulativeTime =
      resourceCalculator.getProcResourceValues().getCumulativeCpuTime();
  }
}
项目:hortonworks-extension    文件:TestTTResourceReporting.java   
/**
 * Test that verifies that configured values are reported correctly.
 * 
 * @throws Exception
 */
public void testConfiguredResourceValues()
    throws Exception {
  JobConf conf = new JobConf();
  conf.setLong("totalVmemOnTT", 4 * 1024 * 1024 * 1024L);
  conf.setLong("totalPmemOnTT", 2 * 1024 * 1024 * 1024L);
  conf.setLong("mapSlotMemorySize", 1 * 512L);
  conf.setLong("reduceSlotMemorySize", 1 * 1024L);
  conf.setLong("availableVmemOnTT", 4 * 1024 * 1024 * 1024L);
  conf.setLong("availablePmemOnTT", 2 * 1024 * 1024 * 1024L);
  conf.setLong("cumulativeCpuTime", 10000L);
  conf.setLong("cpuFrequency", 2000000L);
  conf.setInt("numProcessors", 8);
  conf.setFloat("cpuUsage", 15.5F);
  conf.setLong("procCumulativeCpuTime", 1000L);
  conf.setLong("procVirtualMemorySize", 2 * 1024 * 1024 * 1024L);
  conf.setLong("procPhysicalMemorySize", 1024 * 1024 * 1024L);

  conf.setClass(
      org.apache.hadoop.mapred.TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,       
      DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
  conf.setLong(DummyResourceCalculatorPlugin.MAXVMEM_TESTING_PROPERTY,
      4 * 1024 * 1024 * 1024L);
  conf.setLong(DummyResourceCalculatorPlugin.MAXPMEM_TESTING_PROPERTY,
      2 * 1024 * 1024 * 1024L);
  conf.setLong(JobTracker.MAPRED_CLUSTER_MAP_MEMORY_MB_PROPERTY, 512L);
  conf.setLong(JobTracker.MAPRED_CLUSTER_REDUCE_MEMORY_MB_PROPERTY, 1024L);
  conf.setLong(DummyResourceCalculatorPlugin.CUMULATIVE_CPU_TIME, 10000L);
  conf.setLong(DummyResourceCalculatorPlugin.CPU_FREQUENCY, 2000000L);
  conf.setInt(DummyResourceCalculatorPlugin.NUM_PROCESSORS, 8);
  conf.setFloat(DummyResourceCalculatorPlugin.CPU_USAGE, 15.5F);
  try {
    setUpCluster(conf);
    JobConf jobConf = miniMRCluster.createJobConf();
    jobConf.setMemoryForMapTask(1 * 1024L);
    jobConf.setMemoryForReduceTask(2 * 1024L);
    jobConf.setClass(
      org.apache.hadoop.mapred.TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,
      DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
    jobConf.setLong(DummyResourceCalculatorPlugin.PROC_CUMULATIVE_CPU_TIME, 1000L);
    jobConf.setLong(DummyResourceCalculatorPlugin.PROC_VMEM_TESTING_PROPERTY,
                    2 * 1024 * 1024 * 1024L);
    jobConf.setLong(DummyResourceCalculatorPlugin.PROC_PMEM_TESTING_PROPERTY,
                    1024 * 1024 * 1024L);
    runSleepJob(jobConf);
    verifyTestResults();
  } finally {
    tearDownCluster();
  }
}
项目:hortonworks-extension    文件:Task.java   
public void initialize(JobConf job, JobID id, 
                       Reporter reporter,
                       boolean useNewApi) throws IOException, 
                                                 ClassNotFoundException,
                                                 InterruptedException {
  jobContext = new JobContext(job, id, reporter);
  taskContext = new TaskAttemptContext(job, taskId, reporter);
  if (getState() == TaskStatus.State.UNASSIGNED) {
    setState(TaskStatus.State.RUNNING);
  }
  if (useNewApi) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("using new api for output committer");
    }
    outputFormat =
      ReflectionUtils.newInstance(taskContext.getOutputFormatClass(), job);
    committer = outputFormat.getOutputCommitter(taskContext);
  } else {
    committer = conf.getOutputCommitter();
  }
  Path outputPath = FileOutputFormat.getOutputPath(conf);
  if (outputPath != null) {
    if ((committer instanceof FileOutputCommitter)) {
      FileOutputFormat.setWorkOutputPath(conf, 
        ((FileOutputCommitter)committer).getTempTaskOutputPath(taskContext));
    } else {
      FileOutputFormat.setWorkOutputPath(conf, outputPath);
    }
  }
  committer.setupTask(taskContext);
  Class<? extends ResourceCalculatorPlugin> clazz =
      conf.getClass(TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,
          null, ResourceCalculatorPlugin.class);
  resourceCalculator = ResourceCalculatorPlugin
          .getResourceCalculatorPlugin(clazz, conf);
  LOG.info(" Using ResourceCalculatorPlugin : " + resourceCalculator);
  if (resourceCalculator != null) {
    initCpuCumulativeTime =
      resourceCalculator.getProcResourceValues().getCumulativeCpuTime();
  }
}
项目:hortonworks-extension    文件:TestTTResourceReporting.java   
/**
 * Test that verifies that configured values are reported correctly.
 * 
 * @throws Exception
 */
public void testConfiguredResourceValues()
    throws Exception {
  JobConf conf = new JobConf();
  conf.setLong("totalVmemOnTT", 4 * 1024 * 1024 * 1024L);
  conf.setLong("totalPmemOnTT", 2 * 1024 * 1024 * 1024L);
  conf.setLong("mapSlotMemorySize", 1 * 512L);
  conf.setLong("reduceSlotMemorySize", 1 * 1024L);
  conf.setLong("availableVmemOnTT", 4 * 1024 * 1024 * 1024L);
  conf.setLong("availablePmemOnTT", 2 * 1024 * 1024 * 1024L);
  conf.setLong("cumulativeCpuTime", 10000L);
  conf.setLong("cpuFrequency", 2000000L);
  conf.setInt("numProcessors", 8);
  conf.setFloat("cpuUsage", 15.5F);
  conf.setLong("procCumulativeCpuTime", 1000L);
  conf.setLong("procVirtualMemorySize", 2 * 1024 * 1024 * 1024L);
  conf.setLong("procPhysicalMemorySize", 1024 * 1024 * 1024L);

  conf.setClass(
      org.apache.hadoop.mapred.TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,       
      DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
  conf.setLong(DummyResourceCalculatorPlugin.MAXVMEM_TESTING_PROPERTY,
      4 * 1024 * 1024 * 1024L);
  conf.setLong(DummyResourceCalculatorPlugin.MAXPMEM_TESTING_PROPERTY,
      2 * 1024 * 1024 * 1024L);
  conf.setLong(JobTracker.MAPRED_CLUSTER_MAP_MEMORY_MB_PROPERTY, 512L);
  conf.setLong(JobTracker.MAPRED_CLUSTER_REDUCE_MEMORY_MB_PROPERTY, 1024L);
  conf.setLong(DummyResourceCalculatorPlugin.CUMULATIVE_CPU_TIME, 10000L);
  conf.setLong(DummyResourceCalculatorPlugin.CPU_FREQUENCY, 2000000L);
  conf.setInt(DummyResourceCalculatorPlugin.NUM_PROCESSORS, 8);
  conf.setFloat(DummyResourceCalculatorPlugin.CPU_USAGE, 15.5F);
  try {
    setUpCluster(conf);
    JobConf jobConf = miniMRCluster.createJobConf();
    jobConf.setMemoryForMapTask(1 * 1024L);
    jobConf.setMemoryForReduceTask(2 * 1024L);
    jobConf.setClass(
      org.apache.hadoop.mapred.TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,
      DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
    jobConf.setLong(DummyResourceCalculatorPlugin.PROC_CUMULATIVE_CPU_TIME, 1000L);
    jobConf.setLong(DummyResourceCalculatorPlugin.PROC_VMEM_TESTING_PROPERTY,
                    2 * 1024 * 1024 * 1024L);
    jobConf.setLong(DummyResourceCalculatorPlugin.PROC_PMEM_TESTING_PROPERTY,
                    1024 * 1024 * 1024L);
    runSleepJob(jobConf);
    verifyTestResults();
  } finally {
    tearDownCluster();
  }
}