Java 类org.apache.hadoop.mapred.Task.Counter 实例源码

项目:hadoop-EAR    文件:CoronaJobTracker.java   
public Map<ResourceType, List<Long>> getResourceUsageMap() {
  if (this.job == null) {
    return getStdResourceUsageMap();
  }
  Counters counters = job.getCounters();

  Map<ResourceType, List<Long>> resourceUsageMap = new HashMap<ResourceType, List<Long>>();

  List<Long> mapperUsages = new ArrayList<Long>();
  mapperUsages.add(counters.getCounter(JobInProgress.Counter.MAX_MAP_MEM_BYTES));
  mapperUsages.add(counters.getCounter(JobInProgress.Counter.MAX_MAP_INST_MEM_BYTES));
  mapperUsages.add(counters.getCounter(JobInProgress.Counter.MAX_MAP_RSS_MEM_BYTES));

  List<Long> reducerUsages = new ArrayList<Long>();
  reducerUsages.add(counters.getCounter(JobInProgress.Counter.MAX_REDUCE_MEM_BYTES));
  reducerUsages.add(counters.getCounter(JobInProgress.Counter.MAX_REDUCE_INST_MEM_BYTES));
  reducerUsages.add(counters.getCounter(JobInProgress.Counter.MAX_REDUCE_RSS_MEM_BYTES));

  resourceUsageMap.put(ResourceType.MAP, mapperUsages);
  resourceUsageMap.put(ResourceType.REDUCE, reducerUsages);
  resourceUsageMap.put(ResourceType.JOBTRACKER, new ArrayList<Long>());

  return resourceUsageMap;
}
项目:RDFS    文件:ReduceTask.java   
public SkippingReduceValuesIterator(RawKeyValueIterator in,
    RawComparator<KEY> comparator, Class<KEY> keyClass,
    Class<VALUE> valClass, Configuration conf, TaskReporter reporter,
    TaskUmbilicalProtocol umbilical) throws IOException {
  super(in, comparator, keyClass, valClass, conf, reporter);
  this.umbilical = umbilical;
  this.skipGroupCounter =
    reporter.getCounter(Counter.REDUCE_SKIPPED_GROUPS);
  this.skipRecCounter =
    reporter.getCounter(Counter.REDUCE_SKIPPED_RECORDS);
  this.toWriteSkipRecs = toWriteSkipRecs() &&
    SkipBadRecords.getSkipOutputPath(conf)!=null;
  this.keyClass = keyClass;
  this.valClass = valClass;
  this.reporter = reporter;
  skipIt = getSkipRanges().skipRangeIterator();
  mayBeSkip();
}
项目:mammoth    文件:DefaultJvmMemoryManager.java   
MapSpiller(JobConf job,TaskAttemptID tid, TaskReporter rep) throws ClassNotFoundException {
    reporter = rep;
    conf = job;
    this.taskId = tid;
    mapOutputFile.setConf(conf);
    mapOutputByteCounter = reporter.getCounter(MAP_OUTPUT_BYTES);
    Counters.Counter combineInputCounter = 
       reporter.getCounter(COMBINE_INPUT_RECORDS);
     combineOutputCounter = reporter.getCounter(COMBINE_OUTPUT_RECORDS);
     fileOutputByteCounter = reporter.getCounter(MAP_OUTPUT_MATERIALIZED_BYTES);
// combiner
   combinerRunner = CombinerRunner.create(conf, taskId, 
                                          combineInputCounter,
                                          reporter, null);
   if (combinerRunner != null) {
     combineCollector= new CombineOutputCollector(combineOutputCounter, reporter, conf);
   } else {
     combineCollector = null;
   }            
    indexCacheList = new ArrayList<SpillRecord>();
    spilledRecordsCounter = reporter.getCounter(Counter.SPILLED_RECORDS);
}
项目:hadoop-EAR    文件:MapSpillSortCounters.java   
public void finalCounterUpdate() {
  setCounterValue(Counter.MAP_SPILL_NUMBER, numSpillsVal);
  setCounterValue(Counter.MAP_SPILL_CPU, mapSpillCPUVal);
  setCounterValue(Counter.MAP_SPILL_WALLCLOCK, mapSpillWallClockVal);
  setCounterValue(Counter.MAP_SPILL_BYTES, mapSpillBytesVal);
  setCounterValue(Counter.MAP_MEM_SORT_CPU, mapMemSortCPUVal);
  setCounterValue(Counter.MAP_MEM_SORT_WALLCLOCK, mapMemSortWallClockVal);
  setCounterValue(Counter.MAP_MERGE_CPU, mapMergeCPUVal);
  setCounterValue(Counter.MAP_MERGE_WALLCLOCK, mapMergeWallClockVal);
  setCounterValue(Counter.MAP_SPILL_SINGLERECORD_NUM, mapSpillSingleRecordNum);

  setCounterValue(Counter.MAP_SPILL_CPU_JVM, mapSpillJVMCPUVal);
  setCounterValue(Counter.MAP_MEM_SORT_CPU_JVM, mapMemSortJVMCPUVal);
  setCounterValue(Counter.MAP_MERGE_CPU_JVM, mapMergeJVMCPUVal);
}
项目:hadoop-EAR    文件:MapTask.java   
SkippingRecordReader(RecordReader<K,V> raw, TaskUmbilicalProtocol umbilical,
                     TaskReporter reporter) throws IOException{
  super(raw, reporter);
  this.umbilical = umbilical;
  this.skipRecCounter = reporter.getCounter(Counter.MAP_SKIPPED_RECORDS);
  this.toWriteSkipRecs = toWriteSkipRecs() &&  
    SkipBadRecords.getSkipOutputPath(conf)!=null;
  skipIt = getSkipRanges().skipRangeIterator();
}
项目:hadoop-EAR    文件:MapTask.java   
@Override
public void run(final JobConf job, final TaskUmbilicalProtocol umbilical)
  throws IOException, ClassNotFoundException, InterruptedException {
  this.umbilical = umbilical;
  taskStartTime = System.currentTimeMillis();

  // start thread that will handle communication with parent
  TaskReporter reporter = new TaskReporter(getProgress(), umbilical);
  reporter.startCommunicationThread();
  boolean useNewApi = job.getUseNewMapper();
  initialize(job, getJobID(), reporter, useNewApi);

  // check if it is a cleanupJobTask
  if (jobCleanup) {
    runJobCleanupTask(umbilical, reporter);
    return;
  }
  if (jobSetup) {
    runJobSetupTask(umbilical, reporter);
    return;
  }
  if (taskCleanup) {
    runTaskCleanupTask(umbilical, reporter);
    return;
  }

  if (useNewApi) {
    runNewMapper(job, split, umbilical, reporter);
  } else {
    runOldMapper(job, split, umbilical, reporter);
  }
  taskEndTime = System.currentTimeMillis();
  Counters.Counter taskWallClock = reporter.getCounter(Counter.MAP_TASK_WALLCLOCK);
  taskWallClock.setValue(taskEndTime - taskStartTime);
  done(umbilical, reporter);
}
项目:hadoop-EAR    文件:MapTask.java   
public void finalCounterUpdate() {
  setCounterValue(Counter.MAP_SPILL_NUMBER, numSpillsVal);
  setCounterValue(Counter.MAP_SPILL_CPU, mapSpillCPUVal);
  setCounterValue(Counter.MAP_SPILL_WALLCLOCK, mapSpillWallClockVal);
  setCounterValue(Counter.MAP_SPILL_BYTES, mapSpillBytesVal);
  setCounterValue(Counter.MAP_MEM_SORT_CPU, mapMemSortCPUVal);
  setCounterValue(Counter.MAP_MEM_SORT_WALLCLOCK, mapMemSortWallClockVal);
  setCounterValue(Counter.MAP_MERGE_CPU, mapMergeCPUVal);
  setCounterValue(Counter.MAP_MERGE_WALLCLOCK, mapMergeWallClockVal);

  setCounterValue(Counter.MAP_SPILL_CPU_JVM, mapSpillJVMCPUVal);
  setCounterValue(Counter.MAP_MEM_SORT_CPU_JVM, mapMemSortJVMCPUVal);
  setCounterValue(Counter.MAP_MERGE_CPU_JVM, mapMergeJVMCPUVal);
}
项目:hadoop-EAR    文件:SessionDriver.java   
public void incCMClientRetryCounter () {
  if (iface instanceof CoronaJobTracker) {
    Counters jobCounters = ((CoronaJobTracker)iface).getJobCounters();
    if (jobCounters != null) {
      LOG.info("inc retry session counter");
      jobCounters.incrCounter(JobInProgress.Counter.NUM_SESSION_DRIVER_CM_CLIENT_RETRY, 1);
    }
  }
}
项目:hadoop-EAR    文件:CoronaJobTracker.java   
void updateRJTFailoverCounters() {
  if (job == null || 
      stateFetcher.jtFailoverMetrics.restartNum == 0) {
    return;
  }

  job.jobCounters.findCounter(JobInProgress.Counter.NUM_RJT_FAILOVER).
    setValue(stateFetcher.jtFailoverMetrics.restartNum);
  job.jobCounters.findCounter(JobInProgress.Counter.STATE_FETCH_COST_MILLIS).
    setValue(stateFetcher.jtFailoverMetrics.fetchStateCost);

  if (stateFetcher.jtFailoverMetrics.savedMappers > 0) {
    job.jobCounters.findCounter(JobInProgress.Counter.NUM_SAVED_MAPPERS).
      setValue(stateFetcher.jtFailoverMetrics.savedMappers);
    job.jobCounters.findCounter(JobInProgress.Counter.SAVED_MAP_CPU_MILLIS).
      setValue(stateFetcher.jtFailoverMetrics.savedMapCPU);
    job.jobCounters.findCounter(JobInProgress.Counter.SAVED_MAP_WALLCLOCK_MILLIS).
      setValue(stateFetcher.jtFailoverMetrics.savedMapWallclock);
  }
  if (stateFetcher.jtFailoverMetrics.savedReducers > 0) {
    job.jobCounters.findCounter(JobInProgress.Counter.NUM_SAVED_REDUCERS).
      setValue(stateFetcher.jtFailoverMetrics.savedReducers);
    job.jobCounters.findCounter(JobInProgress.Counter.SAVED_REDUCE_CPU_MILLIS).
      setValue(stateFetcher.jtFailoverMetrics.savedReduceCPU);
    job.jobCounters.findCounter(JobInProgress.Counter.SAVED_REDUCE_WALLCLOCK_MILLIS).
      setValue(stateFetcher.jtFailoverMetrics.savedReduceWallclock);
  }
}
项目:RDFS    文件:MapSpillSortCounters.java   
public void finalCounterUpdate() {
  setCounterValue(Counter.MAP_SPILL_NUMBER, numSpillsVal);
  setCounterValue(Counter.MAP_SPILL_CPU, mapSpillCPUVal);
  setCounterValue(Counter.MAP_SPILL_WALLCLOCK, mapSpillWallClockVal);
  setCounterValue(Counter.MAP_SPILL_BYTES, mapSpillBytesVal);
  setCounterValue(Counter.MAP_MEM_SORT_CPU, mapMemSortCPUVal);
  setCounterValue(Counter.MAP_MEM_SORT_WALLCLOCK, mapMemSortWallClockVal);
  setCounterValue(Counter.MAP_MERGE_CPU, mapMergeCPUVal);
  setCounterValue(Counter.MAP_MERGE_WALLCLOCK, mapMergeWallClockVal);
  setCounterValue(Counter.MAP_SPILL_SINGLERECORD_NUM, mapSpillSingleRecordNum);
}
项目:RDFS    文件:ReduceTask.java   
private void setCPUCounter(ProcResourceValues startProcVals,
    ProcResourceValues endProcVals,
    org.apache.hadoop.mapred.Counters.Counter counter) {
  long cpuUsed = 0;
  if (startProcVals != null &&  endProcVals != null) {
    long cpuStartVal = startProcVals.getCumulativeCpuTime();
    long cpuEndVal = endProcVals.getCumulativeCpuTime();
    if (cpuEndVal > cpuStartVal) {
      cpuUsed = cpuEndVal - cpuStartVal;
    }
  }
  counter.setValue(cpuUsed);
}
项目:hadoop-EAR    文件:MapSpillSortCounters.java   
private void setCounterValue(Counter counter, long value) {
  Counters.Counter counterObj = reporter.getCounter(counter);
  if (counterObj != null) {
    counterObj.setValue(value);
  }
}
项目:hadoop-EAR    文件:MapTask.java   
private void setCounterValue(Counter counter, long value) {
  Counters.Counter counterObj = reporter.getCounter(counter);
  if (counterObj != null) {
    counterObj.setValue(value);
  }
}
项目:RDFS    文件:MapSpillSortCounters.java   
private void setCounterValue(Counter counter, long value) {
  Counters.Counter counterObj = reporter.getCounter(counter);
  if (counterObj != null) {
    counterObj.setValue(value);
  }
}
项目:RDFS    文件:ReduceTask.java   
private void setWallClockCounter(long wallClock,
    org.apache.hadoop.mapred.Counters.Counter counter) {
  counter.setValue(wallClock);
}
项目:RDFS    文件:ReduceTask.java   
NewTrackingRecordWriter(org.apache.hadoop.mapreduce.RecordWriter<K,V> real,
                        org.apache.hadoop.mapreduce.Counter recordCounter) {
  this.real = real;
  this.outputRecordCounter = recordCounter;
}
项目:RDFS    文件:ReduceTask.java   
public ReduceCopier(TaskUmbilicalProtocol umbilical, JobConf conf,
                    TaskReporter reporter
                    )throws ClassNotFoundException, IOException {

  configureClasspath(conf);
  this.reporter = reporter;
  this.shuffleClientMetrics = new ShuffleClientMetrics(conf);
  this.umbilical = umbilical;
  this.reduceTask = ReduceTask.this;

  this.scheduledCopies = new ArrayList<MapOutputLocation>(100);
  this.copyResults = new ArrayList<CopyResult>(100);
  this.numCopiers = conf.getInt("mapred.reduce.parallel.copies", 5);
  this.maxInFlight = 4 * numCopiers;
  Counters.Counter combineInputCounter =
    reporter.getCounter(Task.Counter.COMBINE_INPUT_RECORDS);
  this.combinerRunner = CombinerRunner.create(conf, getTaskID(),
                                              combineInputCounter,
                                              reporter, null);
  if (combinerRunner != null) {
    combineCollector =
      new CombineOutputCollector(reduceCombineOutputCounter);
  }

  this.ioSortFactor = conf.getInt("io.sort.factor", 10);

  this.abortFailureLimit = Math.max(30, numMaps / 10);

  this.maxFetchFailuresBeforeReporting = conf.getInt(
      "mapreduce.reduce.shuffle.maxfetchfailures", REPORT_FAILURE_LIMIT);

  this.maxFailedUniqueFetches = Math.min(numMaps,
                                         this.maxFailedUniqueFetches);
  this.maxInMemOutputs = conf.getInt("mapred.inmem.merge.threshold", 1000);
  this.maxInMemCopyPer =
    conf.getFloat("mapred.job.shuffle.merge.percent", 0.66f);
  final float maxRedPer =
    conf.getFloat("mapred.job.reduce.input.buffer.percent", 0f);
  if (maxRedPer > 1.0 || maxRedPer < 0.0) {
    throw new IOException("mapred.job.reduce.input.buffer.percent" +
                          maxRedPer);
  }
  this.maxInMemReduce = (int)Math.min(
      Runtime.getRuntime().maxMemory() * maxRedPer, Integer.MAX_VALUE);

  // Setup the RamManager
  ramManager = new ShuffleRamManager(conf);

  localFileSys = FileSystem.getLocal(conf);

  rfs = ((LocalFileSystem)localFileSys).getRaw();

  // hosts -> next contact time
  this.penaltyBox = new LinkedHashMap<String, Long>();

  // hostnames
  this.uniqueHosts = new HashSet<String>();

  // Seed the random number generator with a reasonably globally unique seed
  long randomSeed = System.nanoTime() +
                    (long)Math.pow(this.reduceTask.getPartition(),
                                   (this.reduceTask.getPartition()%10)
                                  );
  this.random = new Random(randomSeed);
  this.maxMapRuntime = 0;
  this.reportReadErrorImmediately =
    conf.getBoolean("mapreduce.reduce.shuffle.notify.readerror", true);
}