Java 类org.apache.hadoop.mapred.TaskTracker.ShuffleServerMetrics 实例源码

项目:hadoop-EAR    文件:NettyMapOutputAttributes.java   
public NettyMapOutputAttributes(
    JobConf jobConf, TaskTracker taskTracker, FileSystem localFS,
    LocalDirAllocator localDirAllocator,
    ShuffleServerMetrics shuffleServerMetrics) {
  this.taskTracker = taskTracker;
  this.localFS = localFS;
  this.localDirAllocator = localDirAllocator;
  this.jobConf = jobConf;
  this.shuffleServerMetrics = shuffleServerMetrics;
}
项目:RDFS    文件:NettyMapOutputAttributes.java   
public NettyMapOutputAttributes(
    JobConf jobConf, TaskTracker taskTracker, FileSystem localFS,
    LocalDirAllocator localDirAllocator,
    ShuffleServerMetrics shuffleServerMetrics) {
  this.taskTracker = taskTracker;
  this.localFS = localFS;
  this.localDirAllocator = localDirAllocator;
  this.jobConf = jobConf;
  this.shuffleServerMetrics = shuffleServerMetrics;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestShuffleExceptionCount.java   
public void checkException(IOException ie, String exceptionMsgRegex,
    String exceptionStackRegex, ShuffleServerMetrics shuffleMetrics) {
  super.checkException(ie, exceptionMsgRegex, exceptionStackRegex,
      shuffleMetrics);
}
项目:hadoop-EAR    文件:NettyMapOutputAttributes.java   
public ShuffleServerMetrics getShuffleServerMetrics() {
  return shuffleServerMetrics;
}
项目:RDFS    文件:NettyMapOutputAttributes.java   
public ShuffleServerMetrics getShuffleServerMetrics() {
  return shuffleServerMetrics;
}
项目:RDFS    文件:ShuffleHandler.java   
protected ChannelFuture sendMapOutput(ChannelHandlerContext ctx, Channel ch,
    String jobId, String mapId, int reduce) throws IOException {
  LocalDirAllocator lDirAlloc = attributes.getLocalDirAllocator();
  FileSystem rfs = ((LocalFileSystem) attributes.getLocalFS()).getRaw();

  ShuffleServerMetrics shuffleMetrics = attributes.getShuffleServerMetrics();
  TaskTracker tracker = attributes.getTaskTracker();

  // Index file
  Path indexFileName = lDirAlloc.getLocalPathToRead(
      TaskTracker.getIntermediateOutputDir(jobId, mapId)
      + "/file.out.index", attributes.getJobConf());
  // Map-output file
  Path mapOutputFileName = lDirAlloc.getLocalPathToRead(
      TaskTracker.getIntermediateOutputDir(jobId, mapId)
      + "/file.out", attributes.getJobConf());

  /**
   * Read the index file to get the information about where
   * the map-output for the given reducer is available.
   */
  IndexRecord info = tracker.getIndexInformation(mapId, reduce,indexFileName);

  HttpResponse response = new DefaultHttpResponse(HTTP_1_1, OK);

  //set the custom "from-map-task" http header to the map task from which
  //the map output data is being transferred
  response.setHeader(MRConstants.FROM_MAP_TASK, mapId);

  //set the custom "Raw-Map-Output-Length" http header to
  //the raw (decompressed) length
  response.setHeader(MRConstants.RAW_MAP_OUTPUT_LENGTH,
      Long.toString(info.rawLength));

  //set the custom "Map-Output-Length" http header to
  //the actual number of bytes being transferred
  response.setHeader(MRConstants.MAP_OUTPUT_LENGTH,
      Long.toString(info.partLength));

  //set the custom "for-reduce-task" http header to the reduce task number
  //for which this map output is being transferred
  response.setHeader(MRConstants.FOR_REDUCE_TASK, Integer.toString(reduce));

  ch.write(response);
  File spillfile = new File(mapOutputFileName.toString());
  RandomAccessFile spill;
  try {
    spill = new RandomAccessFile(spillfile, "r");
  } catch (FileNotFoundException e) {
    LOG.info(spillfile + " not found");
    return null;
  }
  final FileRegion partition = new DefaultFileRegion(
    spill.getChannel(), info.startOffset, info.partLength);
  ChannelFuture writeFuture = ch.write(partition);
  writeFuture.addListener(new ChanneFutureListenerMetrics(partition));
  shuffleMetrics.outputBytes(info.partLength); // optimistic
  LOG.info("Sending out " + info.partLength + " bytes for reduce: " +
           reduce + " from map: " + mapId + " given " +
           info.partLength + "/" + info.rawLength);
  return writeFuture;
}
项目:hanoi-hadoop-2.0.0-cdh    文件:TestShuffleExceptionCount.java   
public void checkException(IOException ie, String exceptionMsgRegex,
    String exceptionStackRegex, ShuffleServerMetrics shuffleMetrics) {
  super.checkException(ie, exceptionMsgRegex, exceptionStackRegex,
      shuffleMetrics);
}
项目:mapreduce-fork    文件:TestShuffleExceptionCount.java   
public void checkException(IOException ie, String exceptionMsgRegex,
    String exceptionStackRegex, ShuffleServerMetrics shuffleMetrics) {
  super.checkException(ie, exceptionMsgRegex, exceptionStackRegex,
      shuffleMetrics);
}