Java 类org.apache.hadoop.mapred.ProgressSplitsBlock 实例源码

项目:hadoop    文件:ReduceAttemptFinishedEvent.java   
/**
 * Create an event to record completion of a reduce attempt
 * @param id Attempt Id
 * @param taskType Type of task
 * @param taskStatus Status of the task
 * @param shuffleFinishTime Finish time of the shuffle phase
 * @param sortFinishTime Finish time of the sort phase
 * @param finishTime Finish time of the attempt
 * @param hostname Name of the host where the attempt executed
 * @param port RPC port for the tracker host.
 * @param rackName Name of the rack where the attempt executed
 * @param state State of the attempt
 * @param counters Counters for the attempt
 * @param allSplits the "splits", or a pixelated graph of various
 *        measurable worker node state variables against progress.
 *        Currently there are four; wallclock time, CPU time,
 *        virtual memory and physical memory.  
 */
public ReduceAttemptFinishedEvent
  (TaskAttemptID id, TaskType taskType, String taskStatus, 
   long shuffleFinishTime, long sortFinishTime, long finishTime,
   String hostname, int port,  String rackName, String state, 
   Counters counters, int[][] allSplits) {
  this.attemptId = id;
  this.taskType = taskType;
  this.taskStatus = taskStatus;
  this.shuffleFinishTime = shuffleFinishTime;
  this.sortFinishTime = sortFinishTime;
  this.finishTime = finishTime;
  this.hostname = hostname;
  this.rackName = rackName;
  this.port = port;
  this.state = state;
  this.counters = counters;
  this.allSplits = allSplits;
  this.clockSplits = ProgressSplitsBlock.arrayGetWallclockTime(allSplits);
  this.cpuUsages = ProgressSplitsBlock.arrayGetCPUTime(allSplits);
  this.gpuUsages = ProgressSplitsBlock.arrayGetGPUTime(allSplits);
  this.vMemKbytes = ProgressSplitsBlock.arrayGetVMemKbytes(allSplits);
  this.physMemKbytes = ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
}
项目:hadoop    文件:MapAttemptFinishedEvent.java   
/** 
 * Create an event for successful completion of map attempts
 * @param id Task Attempt ID
 * @param taskType Type of the task
 * @param taskStatus Status of the task
 * @param mapFinishTime Finish time of the map phase
 * @param finishTime Finish time of the attempt
 * @param hostname Name of the host where the map executed
 * @param port RPC port for the tracker host.
 * @param rackName Name of the rack where the map executed
 * @param state State string for the attempt
 * @param counters Counters for the attempt
 * @param allSplits the "splits", or a pixelated graph of various
 *        measurable worker node state variables against progress.
 *        Currently there are four; wallclock time, CPU time,
 *        virtual memory and physical memory. 
 *
 *        If you have no splits data, code {@code null} for this
 *        parameter. 
 */
public MapAttemptFinishedEvent
    (TaskAttemptID id, TaskType taskType, String taskStatus, 
     long mapFinishTime, long finishTime, String hostname, int port, 
     String rackName, String state, Counters counters, int[][] allSplits) {
  this.attemptId = id;
  this.taskType = taskType;
  this.taskStatus = taskStatus;
  this.mapFinishTime = mapFinishTime;
  this.finishTime = finishTime;
  this.hostname = hostname;
  this.rackName = rackName;
  this.port = port;
  this.state = state;
  this.counters = counters;
  this.allSplits = allSplits;
  this.clockSplits = ProgressSplitsBlock.arrayGetWallclockTime(allSplits);
  this.cpuUsages = ProgressSplitsBlock.arrayGetCPUTime(allSplits);
  this.gpuUsages = ProgressSplitsBlock.arrayGetGPUTime(allSplits);
  this.vMemKbytes = ProgressSplitsBlock.arrayGetVMemKbytes(allSplits);
  this.physMemKbytes = ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
}
项目:aliyun-oss-hadoop-fs    文件:ReduceAttemptFinishedEvent.java   
/**
 * Create an event to record completion of a reduce attempt
 * @param id Attempt Id
 * @param taskType Type of task
 * @param taskStatus Status of the task
 * @param shuffleFinishTime Finish time of the shuffle phase
 * @param sortFinishTime Finish time of the sort phase
 * @param finishTime Finish time of the attempt
 * @param hostname Name of the host where the attempt executed
 * @param port RPC port for the tracker host.
 * @param rackName Name of the rack where the attempt executed
 * @param state State of the attempt
 * @param counters Counters for the attempt
 * @param allSplits the "splits", or a pixelated graph of various
 *        measurable worker node state variables against progress.
 *        Currently there are four; wallclock time, CPU time,
 *        virtual memory and physical memory.  
 */
public ReduceAttemptFinishedEvent
  (TaskAttemptID id, TaskType taskType, String taskStatus, 
   long shuffleFinishTime, long sortFinishTime, long finishTime,
   String hostname, int port,  String rackName, String state, 
   Counters counters, int[][] allSplits) {
  this.attemptId = id;
  this.taskType = taskType;
  this.taskStatus = taskStatus;
  this.shuffleFinishTime = shuffleFinishTime;
  this.sortFinishTime = sortFinishTime;
  this.finishTime = finishTime;
  this.hostname = hostname;
  this.rackName = rackName;
  this.port = port;
  this.state = state;
  this.counters = counters;
  this.allSplits = allSplits;
  this.clockSplits = ProgressSplitsBlock.arrayGetWallclockTime(allSplits);
  this.cpuUsages = ProgressSplitsBlock.arrayGetCPUTime(allSplits);
  this.vMemKbytes = ProgressSplitsBlock.arrayGetVMemKbytes(allSplits);
  this.physMemKbytes = ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
}
项目:aliyun-oss-hadoop-fs    文件:TaskAttemptUnsuccessfulCompletionEvent.java   
/** 
 * Create an event to record the unsuccessful completion of attempts
 * @param id Attempt ID
 * @param taskType Type of the task
 * @param status Status of the attempt
 * @param finishTime Finish time of the attempt
 * @param hostname Name of the host where the attempt executed
 * @param port rpc port for for the tracker
 * @param rackName Name of the rack where the attempt executed
 * @param error Error string
 * @param counters Counters for the attempt
 * @param allSplits the "splits", or a pixelated graph of various
 *        measurable worker node state variables against progress.
 *        Currently there are four; wallclock time, CPU time,
 *        virtual memory and physical memory.  
 */
public TaskAttemptUnsuccessfulCompletionEvent
     (TaskAttemptID id, TaskType taskType,
      String status, long finishTime,
      String hostname, int port, String rackName,
      String error, Counters counters, int[][] allSplits) {
  this.attemptId = id;
  this.taskType = taskType;
  this.status = status;
  this.finishTime = finishTime;
  this.hostname = hostname;
  this.port = port;
  this.rackName = rackName;
  this.error = error;
  this.counters = counters;
  this.allSplits = allSplits;
  this.clockSplits =
      ProgressSplitsBlock.arrayGetWallclockTime(allSplits);
  this.cpuUsages =
      ProgressSplitsBlock.arrayGetCPUTime(allSplits);
  this.vMemKbytes =
      ProgressSplitsBlock.arrayGetVMemKbytes(allSplits);
  this.physMemKbytes =
      ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
}
项目:aliyun-oss-hadoop-fs    文件:MapAttemptFinishedEvent.java   
/** 
 * Create an event for successful completion of map attempts
 * @param id Task Attempt ID
 * @param taskType Type of the task
 * @param taskStatus Status of the task
 * @param mapFinishTime Finish time of the map phase
 * @param finishTime Finish time of the attempt
 * @param hostname Name of the host where the map executed
 * @param port RPC port for the tracker host.
 * @param rackName Name of the rack where the map executed
 * @param state State string for the attempt
 * @param counters Counters for the attempt
 * @param allSplits the "splits", or a pixelated graph of various
 *        measurable worker node state variables against progress.
 *        Currently there are four; wallclock time, CPU time,
 *        virtual memory and physical memory. 
 *
 *        If you have no splits data, code {@code null} for this
 *        parameter. 
 */
public MapAttemptFinishedEvent
    (TaskAttemptID id, TaskType taskType, String taskStatus, 
     long mapFinishTime, long finishTime, String hostname, int port, 
     String rackName, String state, Counters counters, int[][] allSplits) {
  this.attemptId = id;
  this.taskType = taskType;
  this.taskStatus = taskStatus;
  this.mapFinishTime = mapFinishTime;
  this.finishTime = finishTime;
  this.hostname = hostname;
  this.rackName = rackName;
  this.port = port;
  this.state = state;
  this.counters = counters;
  this.allSplits = allSplits;
  this.clockSplits = ProgressSplitsBlock.arrayGetWallclockTime(allSplits);
  this.cpuUsages = ProgressSplitsBlock.arrayGetCPUTime(allSplits);
  this.vMemKbytes = ProgressSplitsBlock.arrayGetVMemKbytes(allSplits);
  this.physMemKbytes = ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
}
项目:big-c    文件:ReduceAttemptFinishedEvent.java   
/**
 * Create an event to record completion of a reduce attempt
 * @param id Attempt Id
 * @param taskType Type of task
 * @param taskStatus Status of the task
 * @param shuffleFinishTime Finish time of the shuffle phase
 * @param sortFinishTime Finish time of the sort phase
 * @param finishTime Finish time of the attempt
 * @param hostname Name of the host where the attempt executed
 * @param port RPC port for the tracker host.
 * @param rackName Name of the rack where the attempt executed
 * @param state State of the attempt
 * @param counters Counters for the attempt
 * @param allSplits the "splits", or a pixelated graph of various
 *        measurable worker node state variables against progress.
 *        Currently there are four; wallclock time, CPU time,
 *        virtual memory and physical memory.  
 */
public ReduceAttemptFinishedEvent
  (TaskAttemptID id, TaskType taskType, String taskStatus, 
   long shuffleFinishTime, long sortFinishTime, long finishTime,
   String hostname, int port,  String rackName, String state, 
   Counters counters, int[][] allSplits) {
  this.attemptId = id;
  this.taskType = taskType;
  this.taskStatus = taskStatus;
  this.shuffleFinishTime = shuffleFinishTime;
  this.sortFinishTime = sortFinishTime;
  this.finishTime = finishTime;
  this.hostname = hostname;
  this.rackName = rackName;
  this.port = port;
  this.state = state;
  this.counters = counters;
  this.allSplits = allSplits;
  this.clockSplits = ProgressSplitsBlock.arrayGetWallclockTime(allSplits);
  this.cpuUsages = ProgressSplitsBlock.arrayGetCPUTime(allSplits);
  this.vMemKbytes = ProgressSplitsBlock.arrayGetVMemKbytes(allSplits);
  this.physMemKbytes = ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
}
项目:big-c    文件:TaskAttemptUnsuccessfulCompletionEvent.java   
/** 
 * Create an event to record the unsuccessful completion of attempts
 * @param id Attempt ID
 * @param taskType Type of the task
 * @param status Status of the attempt
 * @param finishTime Finish time of the attempt
 * @param hostname Name of the host where the attempt executed
 * @param port rpc port for for the tracker
 * @param rackName Name of the rack where the attempt executed
 * @param error Error string
 * @param counters Counters for the attempt
 * @param allSplits the "splits", or a pixelated graph of various
 *        measurable worker node state variables against progress.
 *        Currently there are four; wallclock time, CPU time,
 *        virtual memory and physical memory.  
 */
public TaskAttemptUnsuccessfulCompletionEvent
     (TaskAttemptID id, TaskType taskType,
      String status, long finishTime,
      String hostname, int port, String rackName,
      String error, Counters counters, int[][] allSplits) {
  this.attemptId = id;
  this.taskType = taskType;
  this.status = status;
  this.finishTime = finishTime;
  this.hostname = hostname;
  this.port = port;
  this.rackName = rackName;
  this.error = error;
  this.counters = counters;
  this.allSplits = allSplits;
  this.clockSplits =
      ProgressSplitsBlock.arrayGetWallclockTime(allSplits);
  this.cpuUsages =
      ProgressSplitsBlock.arrayGetCPUTime(allSplits);
  this.vMemKbytes =
      ProgressSplitsBlock.arrayGetVMemKbytes(allSplits);
  this.physMemKbytes =
      ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
}
项目:big-c    文件:MapAttemptFinishedEvent.java   
/** 
 * Create an event for successful completion of map attempts
 * @param id Task Attempt ID
 * @param taskType Type of the task
 * @param taskStatus Status of the task
 * @param mapFinishTime Finish time of the map phase
 * @param finishTime Finish time of the attempt
 * @param hostname Name of the host where the map executed
 * @param port RPC port for the tracker host.
 * @param rackName Name of the rack where the map executed
 * @param state State string for the attempt
 * @param counters Counters for the attempt
 * @param allSplits the "splits", or a pixelated graph of various
 *        measurable worker node state variables against progress.
 *        Currently there are four; wallclock time, CPU time,
 *        virtual memory and physical memory. 
 *
 *        If you have no splits data, code {@code null} for this
 *        parameter. 
 */
public MapAttemptFinishedEvent
    (TaskAttemptID id, TaskType taskType, String taskStatus, 
     long mapFinishTime, long finishTime, String hostname, int port, 
     String rackName, String state, Counters counters, int[][] allSplits) {
  this.attemptId = id;
  this.taskType = taskType;
  this.taskStatus = taskStatus;
  this.mapFinishTime = mapFinishTime;
  this.finishTime = finishTime;
  this.hostname = hostname;
  this.rackName = rackName;
  this.port = port;
  this.state = state;
  this.counters = counters;
  this.allSplits = allSplits;
  this.clockSplits = ProgressSplitsBlock.arrayGetWallclockTime(allSplits);
  this.cpuUsages = ProgressSplitsBlock.arrayGetCPUTime(allSplits);
  this.vMemKbytes = ProgressSplitsBlock.arrayGetVMemKbytes(allSplits);
  this.physMemKbytes = ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:ReduceAttemptFinishedEvent.java   
/**
 * Create an event to record completion of a reduce attempt
 * @param id Attempt Id
 * @param taskType Type of task
 * @param taskStatus Status of the task
 * @param shuffleFinishTime Finish time of the shuffle phase
 * @param sortFinishTime Finish time of the sort phase
 * @param finishTime Finish time of the attempt
 * @param hostname Name of the host where the attempt executed
 * @param port RPC port for the tracker host.
 * @param rackName Name of the rack where the attempt executed
 * @param state State of the attempt
 * @param counters Counters for the attempt
 * @param allSplits the "splits", or a pixelated graph of various
 *        measurable worker node state variables against progress.
 *        Currently there are four; wallclock time, CPU time,
 *        virtual memory and physical memory.  
 */
public ReduceAttemptFinishedEvent
  (TaskAttemptID id, TaskType taskType, String taskStatus, 
   long shuffleFinishTime, long sortFinishTime, long finishTime,
   String hostname, int port,  String rackName, String state, 
   Counters counters, int[][] allSplits) {
  this.attemptId = id;
  this.taskType = taskType;
  this.taskStatus = taskStatus;
  this.shuffleFinishTime = shuffleFinishTime;
  this.sortFinishTime = sortFinishTime;
  this.finishTime = finishTime;
  this.hostname = hostname;
  this.rackName = rackName;
  this.port = port;
  this.state = state;
  this.counters = counters;
  this.allSplits = allSplits;
  this.clockSplits = ProgressSplitsBlock.arrayGetWallclockTime(allSplits);
  this.cpuUsages = ProgressSplitsBlock.arrayGetCPUTime(allSplits);
  this.vMemKbytes = ProgressSplitsBlock.arrayGetVMemKbytes(allSplits);
  this.physMemKbytes = ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TaskAttemptUnsuccessfulCompletionEvent.java   
/** 
 * Create an event to record the unsuccessful completion of attempts
 * @param id Attempt ID
 * @param taskType Type of the task
 * @param status Status of the attempt
 * @param finishTime Finish time of the attempt
 * @param hostname Name of the host where the attempt executed
 * @param port rpc port for for the tracker
 * @param rackName Name of the rack where the attempt executed
 * @param error Error string
 * @param counters Counters for the attempt
 * @param allSplits the "splits", or a pixelated graph of various
 *        measurable worker node state variables against progress.
 *        Currently there are four; wallclock time, CPU time,
 *        virtual memory and physical memory.  
 */
public TaskAttemptUnsuccessfulCompletionEvent
     (TaskAttemptID id, TaskType taskType,
      String status, long finishTime,
      String hostname, int port, String rackName,
      String error, Counters counters, int[][] allSplits) {
  this.attemptId = id;
  this.taskType = taskType;
  this.status = status;
  this.finishTime = finishTime;
  this.hostname = hostname;
  this.port = port;
  this.rackName = rackName;
  this.error = error;
  this.counters = counters;
  this.allSplits = allSplits;
  this.clockSplits =
      ProgressSplitsBlock.arrayGetWallclockTime(allSplits);
  this.cpuUsages =
      ProgressSplitsBlock.arrayGetCPUTime(allSplits);
  this.vMemKbytes =
      ProgressSplitsBlock.arrayGetVMemKbytes(allSplits);
  this.physMemKbytes =
      ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:MapAttemptFinishedEvent.java   
/** 
 * Create an event for successful completion of map attempts
 * @param id Task Attempt ID
 * @param taskType Type of the task
 * @param taskStatus Status of the task
 * @param mapFinishTime Finish time of the map phase
 * @param finishTime Finish time of the attempt
 * @param hostname Name of the host where the map executed
 * @param port RPC port for the tracker host.
 * @param rackName Name of the rack where the map executed
 * @param state State string for the attempt
 * @param counters Counters for the attempt
 * @param allSplits the "splits", or a pixelated graph of various
 *        measurable worker node state variables against progress.
 *        Currently there are four; wallclock time, CPU time,
 *        virtual memory and physical memory. 
 *
 *        If you have no splits data, code {@code null} for this
 *        parameter. 
 */
public MapAttemptFinishedEvent
    (TaskAttemptID id, TaskType taskType, String taskStatus, 
     long mapFinishTime, long finishTime, String hostname, int port, 
     String rackName, String state, Counters counters, int[][] allSplits) {
  this.attemptId = id;
  this.taskType = taskType;
  this.taskStatus = taskStatus;
  this.mapFinishTime = mapFinishTime;
  this.finishTime = finishTime;
  this.hostname = hostname;
  this.rackName = rackName;
  this.port = port;
  this.state = state;
  this.counters = counters;
  this.allSplits = allSplits;
  this.clockSplits = ProgressSplitsBlock.arrayGetWallclockTime(allSplits);
  this.cpuUsages = ProgressSplitsBlock.arrayGetCPUTime(allSplits);
  this.vMemKbytes = ProgressSplitsBlock.arrayGetVMemKbytes(allSplits);
  this.physMemKbytes = ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
}
项目:hadoop-plus    文件:ReduceAttemptFinishedEvent.java   
/**
 * Create an event to record completion of a reduce attempt
 * @param id Attempt Id
 * @param taskType Type of task
 * @param taskStatus Status of the task
 * @param shuffleFinishTime Finish time of the shuffle phase
 * @param sortFinishTime Finish time of the sort phase
 * @param finishTime Finish time of the attempt
 * @param hostname Name of the host where the attempt executed
 * @param port RPC port for the tracker host.
 * @param rackName Name of the rack where the attempt executed
 * @param state State of the attempt
 * @param counters Counters for the attempt
 * @param allSplits the "splits", or a pixelated graph of various
 *        measurable worker node state variables against progress.
 *        Currently there are four; wallclock time, CPU time,
 *        virtual memory and physical memory.  
 */
public ReduceAttemptFinishedEvent
  (TaskAttemptID id, TaskType taskType, String taskStatus, 
   long shuffleFinishTime, long sortFinishTime, long finishTime,
   String hostname, int port,  String rackName, String state, 
   Counters counters, int[][] allSplits) {
  this.attemptId = id;
  this.taskType = taskType;
  this.taskStatus = taskStatus;
  this.shuffleFinishTime = shuffleFinishTime;
  this.sortFinishTime = sortFinishTime;
  this.finishTime = finishTime;
  this.hostname = hostname;
  this.rackName = rackName;
  this.port = port;
  this.state = state;
  this.counters = counters;
  this.allSplits = allSplits;
  this.clockSplits = ProgressSplitsBlock.arrayGetWallclockTime(allSplits);
  this.cpuUsages = ProgressSplitsBlock.arrayGetCPUTime(allSplits);
  this.vMemKbytes = ProgressSplitsBlock.arrayGetVMemKbytes(allSplits);
  this.physMemKbytes = ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
}
项目:hadoop-plus    文件:TaskAttemptUnsuccessfulCompletionEvent.java   
/** 
 * Create an event to record the unsuccessful completion of attempts
 * @param id Attempt ID
 * @param taskType Type of the task
 * @param status Status of the attempt
 * @param finishTime Finish time of the attempt
 * @param hostname Name of the host where the attempt executed
 * @param port rpc port for for the tracker
 * @param rackName Name of the rack where the attempt executed
 * @param error Error string
 * @param counters Counters for the attempt
 * @param allSplits the "splits", or a pixelated graph of various
 *        measurable worker node state variables against progress.
 *        Currently there are four; wallclock time, CPU time,
 *        virtual memory and physical memory.  
 */
public TaskAttemptUnsuccessfulCompletionEvent
     (TaskAttemptID id, TaskType taskType,
      String status, long finishTime,
      String hostname, int port, String rackName,
      String error, Counters counters, int[][] allSplits) {
  this.attemptId = id;
  this.taskType = taskType;
  this.status = status;
  this.finishTime = finishTime;
  this.hostname = hostname;
  this.port = port;
  this.rackName = rackName;
  this.error = error;
  this.counters = counters;
  this.allSplits = allSplits;
  this.clockSplits =
      ProgressSplitsBlock.arrayGetWallclockTime(allSplits);
  this.cpuUsages =
      ProgressSplitsBlock.arrayGetCPUTime(allSplits);
  this.vMemKbytes =
      ProgressSplitsBlock.arrayGetVMemKbytes(allSplits);
  this.physMemKbytes =
      ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
}
项目:hadoop-plus    文件:MapAttemptFinishedEvent.java   
/** 
 * Create an event for successful completion of map attempts
 * @param id Task Attempt ID
 * @param taskType Type of the task
 * @param taskStatus Status of the task
 * @param mapFinishTime Finish time of the map phase
 * @param finishTime Finish time of the attempt
 * @param hostname Name of the host where the map executed
 * @param port RPC port for the tracker host.
 * @param rackName Name of the rack where the map executed
 * @param state State string for the attempt
 * @param counters Counters for the attempt
 * @param allSplits the "splits", or a pixelated graph of various
 *        measurable worker node state variables against progress.
 *        Currently there are four; wallclock time, CPU time,
 *        virtual memory and physical memory. 
 *
 *        If you have no splits data, code {@code null} for this
 *        parameter. 
 */
public MapAttemptFinishedEvent
    (TaskAttemptID id, TaskType taskType, String taskStatus, 
     long mapFinishTime, long finishTime, String hostname, int port, 
     String rackName, String state, Counters counters, int[][] allSplits) {
  this.attemptId = id;
  this.taskType = taskType;
  this.taskStatus = taskStatus;
  this.mapFinishTime = mapFinishTime;
  this.finishTime = finishTime;
  this.hostname = hostname;
  this.rackName = rackName;
  this.port = port;
  this.state = state;
  this.counters = counters;
  this.allSplits = allSplits;
  this.clockSplits = ProgressSplitsBlock.arrayGetWallclockTime(allSplits);
  this.cpuUsages = ProgressSplitsBlock.arrayGetCPUTime(allSplits);
  this.vMemKbytes = ProgressSplitsBlock.arrayGetVMemKbytes(allSplits);
  this.physMemKbytes = ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
}
项目:hops    文件:ReduceAttemptFinishedEvent.java   
/**
 * Create an event to record completion of a reduce attempt
 * @param id Attempt Id
 * @param taskType Type of task
 * @param taskStatus Status of the task
 * @param shuffleFinishTime Finish time of the shuffle phase
 * @param sortFinishTime Finish time of the sort phase
 * @param finishTime Finish time of the attempt
 * @param hostname Name of the host where the attempt executed
 * @param port RPC port for the tracker host.
 * @param rackName Name of the rack where the attempt executed
 * @param state State of the attempt
 * @param counters Counters for the attempt
 * @param allSplits the "splits", or a pixelated graph of various
 *        measurable worker node state variables against progress.
 *        Currently there are four; wallclock time, CPU time,
 *        virtual memory and physical memory.  
 */
public ReduceAttemptFinishedEvent
  (TaskAttemptID id, TaskType taskType, String taskStatus, 
   long shuffleFinishTime, long sortFinishTime, long finishTime,
   String hostname, int port,  String rackName, String state, 
   Counters counters, int[][] allSplits) {
  this.attemptId = id;
  this.taskType = taskType;
  this.taskStatus = taskStatus;
  this.shuffleFinishTime = shuffleFinishTime;
  this.sortFinishTime = sortFinishTime;
  this.finishTime = finishTime;
  this.hostname = hostname;
  this.rackName = rackName;
  this.port = port;
  this.state = state;
  this.counters = counters;
  this.allSplits = allSplits;
  this.clockSplits = ProgressSplitsBlock.arrayGetWallclockTime(allSplits);
  this.cpuUsages = ProgressSplitsBlock.arrayGetCPUTime(allSplits);
  this.vMemKbytes = ProgressSplitsBlock.arrayGetVMemKbytes(allSplits);
  this.physMemKbytes = ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
}
项目:hops    文件:TaskAttemptUnsuccessfulCompletionEvent.java   
/** 
 * Create an event to record the unsuccessful completion of attempts
 * @param id Attempt ID
 * @param taskType Type of the task
 * @param status Status of the attempt
 * @param finishTime Finish time of the attempt
 * @param hostname Name of the host where the attempt executed
 * @param port rpc port for for the tracker
 * @param rackName Name of the rack where the attempt executed
 * @param error Error string
 * @param counters Counters for the attempt
 * @param allSplits the "splits", or a pixelated graph of various
 *        measurable worker node state variables against progress.
 *        Currently there are four; wallclock time, CPU time,
 *        virtual memory and physical memory.  
 */
public TaskAttemptUnsuccessfulCompletionEvent
     (TaskAttemptID id, TaskType taskType,
      String status, long finishTime,
      String hostname, int port, String rackName,
      String error, Counters counters, int[][] allSplits) {
  this.attemptId = id;
  this.taskType = taskType;
  this.status = status;
  this.finishTime = finishTime;
  this.hostname = hostname;
  this.port = port;
  this.rackName = rackName;
  this.error = error;
  this.counters = counters;
  this.allSplits = allSplits;
  this.clockSplits =
      ProgressSplitsBlock.arrayGetWallclockTime(allSplits);
  this.cpuUsages =
      ProgressSplitsBlock.arrayGetCPUTime(allSplits);
  this.vMemKbytes =
      ProgressSplitsBlock.arrayGetVMemKbytes(allSplits);
  this.physMemKbytes =
      ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
}
项目:hops    文件:MapAttemptFinishedEvent.java   
/** 
 * Create an event for successful completion of map attempts
 * @param id Task Attempt ID
 * @param taskType Type of the task
 * @param taskStatus Status of the task
 * @param mapFinishTime Finish time of the map phase
 * @param finishTime Finish time of the attempt
 * @param hostname Name of the host where the map executed
 * @param port RPC port for the tracker host.
 * @param rackName Name of the rack where the map executed
 * @param state State string for the attempt
 * @param counters Counters for the attempt
 * @param allSplits the "splits", or a pixelated graph of various
 *        measurable worker node state variables against progress.
 *        Currently there are four; wallclock time, CPU time,
 *        virtual memory and physical memory. 
 *
 *        If you have no splits data, code {@code null} for this
 *        parameter. 
 */
public MapAttemptFinishedEvent
    (TaskAttemptID id, TaskType taskType, String taskStatus, 
     long mapFinishTime, long finishTime, String hostname, int port, 
     String rackName, String state, Counters counters, int[][] allSplits) {
  this.attemptId = id;
  this.taskType = taskType;
  this.taskStatus = taskStatus;
  this.mapFinishTime = mapFinishTime;
  this.finishTime = finishTime;
  this.hostname = hostname;
  this.rackName = rackName;
  this.port = port;
  this.state = state;
  this.counters = counters;
  this.allSplits = allSplits;
  this.clockSplits = ProgressSplitsBlock.arrayGetWallclockTime(allSplits);
  this.cpuUsages = ProgressSplitsBlock.arrayGetCPUTime(allSplits);
  this.vMemKbytes = ProgressSplitsBlock.arrayGetVMemKbytes(allSplits);
  this.physMemKbytes = ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
}
项目:hadoop-TCP    文件:ReduceAttemptFinishedEvent.java   
/**
 * Create an event to record completion of a reduce attempt
 * @param id Attempt Id
 * @param taskType Type of task
 * @param taskStatus Status of the task
 * @param shuffleFinishTime Finish time of the shuffle phase
 * @param sortFinishTime Finish time of the sort phase
 * @param finishTime Finish time of the attempt
 * @param hostname Name of the host where the attempt executed
 * @param port RPC port for the tracker host.
 * @param rackName Name of the rack where the attempt executed
 * @param state State of the attempt
 * @param counters Counters for the attempt
 * @param allSplits the "splits", or a pixelated graph of various
 *        measurable worker node state variables against progress.
 *        Currently there are four; wallclock time, CPU time,
 *        virtual memory and physical memory.  
 */
public ReduceAttemptFinishedEvent
  (TaskAttemptID id, TaskType taskType, String taskStatus, 
   long shuffleFinishTime, long sortFinishTime, long finishTime,
   String hostname, int port,  String rackName, String state, 
   Counters counters, int[][] allSplits) {
  this.attemptId = id;
  this.taskType = taskType;
  this.taskStatus = taskStatus;
  this.shuffleFinishTime = shuffleFinishTime;
  this.sortFinishTime = sortFinishTime;
  this.finishTime = finishTime;
  this.hostname = hostname;
  this.rackName = rackName;
  this.port = port;
  this.state = state;
  this.counters = counters;
  this.allSplits = allSplits;
  this.clockSplits = ProgressSplitsBlock.arrayGetWallclockTime(allSplits);
  this.cpuUsages = ProgressSplitsBlock.arrayGetCPUTime(allSplits);
  this.vMemKbytes = ProgressSplitsBlock.arrayGetVMemKbytes(allSplits);
  this.physMemKbytes = ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
}
项目:hadoop-TCP    文件:TaskAttemptUnsuccessfulCompletionEvent.java   
/** 
 * Create an event to record the unsuccessful completion of attempts
 * @param id Attempt ID
 * @param taskType Type of the task
 * @param status Status of the attempt
 * @param finishTime Finish time of the attempt
 * @param hostname Name of the host where the attempt executed
 * @param port rpc port for for the tracker
 * @param rackName Name of the rack where the attempt executed
 * @param error Error string
 * @param counters Counters for the attempt
 * @param allSplits the "splits", or a pixelated graph of various
 *        measurable worker node state variables against progress.
 *        Currently there are four; wallclock time, CPU time,
 *        virtual memory and physical memory.  
 */
public TaskAttemptUnsuccessfulCompletionEvent
     (TaskAttemptID id, TaskType taskType,
      String status, long finishTime,
      String hostname, int port, String rackName,
      String error, Counters counters, int[][] allSplits) {
  this.attemptId = id;
  this.taskType = taskType;
  this.status = status;
  this.finishTime = finishTime;
  this.hostname = hostname;
  this.port = port;
  this.rackName = rackName;
  this.error = error;
  this.counters = counters;
  this.allSplits = allSplits;
  this.clockSplits =
      ProgressSplitsBlock.arrayGetWallclockTime(allSplits);
  this.cpuUsages =
      ProgressSplitsBlock.arrayGetCPUTime(allSplits);
  this.vMemKbytes =
      ProgressSplitsBlock.arrayGetVMemKbytes(allSplits);
  this.physMemKbytes =
      ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
}
项目:hadoop-TCP    文件:MapAttemptFinishedEvent.java   
/** 
 * Create an event for successful completion of map attempts
 * @param id Task Attempt ID
 * @param taskType Type of the task
 * @param taskStatus Status of the task
 * @param mapFinishTime Finish time of the map phase
 * @param finishTime Finish time of the attempt
 * @param hostname Name of the host where the map executed
 * @param port RPC port for the tracker host.
 * @param rackName Name of the rack where the map executed
 * @param state State string for the attempt
 * @param counters Counters for the attempt
 * @param allSplits the "splits", or a pixelated graph of various
 *        measurable worker node state variables against progress.
 *        Currently there are four; wallclock time, CPU time,
 *        virtual memory and physical memory. 
 *
 *        If you have no splits data, code {@code null} for this
 *        parameter. 
 */
public MapAttemptFinishedEvent
    (TaskAttemptID id, TaskType taskType, String taskStatus, 
     long mapFinishTime, long finishTime, String hostname, int port, 
     String rackName, String state, Counters counters, int[][] allSplits) {
  this.attemptId = id;
  this.taskType = taskType;
  this.taskStatus = taskStatus;
  this.mapFinishTime = mapFinishTime;
  this.finishTime = finishTime;
  this.hostname = hostname;
  this.rackName = rackName;
  this.port = port;
  this.state = state;
  this.counters = counters;
  this.allSplits = allSplits;
  this.clockSplits = ProgressSplitsBlock.arrayGetWallclockTime(allSplits);
  this.cpuUsages = ProgressSplitsBlock.arrayGetCPUTime(allSplits);
  this.vMemKbytes = ProgressSplitsBlock.arrayGetVMemKbytes(allSplits);
  this.physMemKbytes = ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
}
项目:hardfs    文件:ReduceAttemptFinishedEvent.java   
/**
 * Create an event to record completion of a reduce attempt
 * @param id Attempt Id
 * @param taskType Type of task
 * @param taskStatus Status of the task
 * @param shuffleFinishTime Finish time of the shuffle phase
 * @param sortFinishTime Finish time of the sort phase
 * @param finishTime Finish time of the attempt
 * @param hostname Name of the host where the attempt executed
 * @param port RPC port for the tracker host.
 * @param rackName Name of the rack where the attempt executed
 * @param state State of the attempt
 * @param counters Counters for the attempt
 * @param allSplits the "splits", or a pixelated graph of various
 *        measurable worker node state variables against progress.
 *        Currently there are four; wallclock time, CPU time,
 *        virtual memory and physical memory.  
 */
public ReduceAttemptFinishedEvent
  (TaskAttemptID id, TaskType taskType, String taskStatus, 
   long shuffleFinishTime, long sortFinishTime, long finishTime,
   String hostname, int port,  String rackName, String state, 
   Counters counters, int[][] allSplits) {
  this.attemptId = id;
  this.taskType = taskType;
  this.taskStatus = taskStatus;
  this.shuffleFinishTime = shuffleFinishTime;
  this.sortFinishTime = sortFinishTime;
  this.finishTime = finishTime;
  this.hostname = hostname;
  this.rackName = rackName;
  this.port = port;
  this.state = state;
  this.counters = counters;
  this.allSplits = allSplits;
  this.clockSplits = ProgressSplitsBlock.arrayGetWallclockTime(allSplits);
  this.cpuUsages = ProgressSplitsBlock.arrayGetCPUTime(allSplits);
  this.vMemKbytes = ProgressSplitsBlock.arrayGetVMemKbytes(allSplits);
  this.physMemKbytes = ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
}
项目:hardfs    文件:TaskAttemptUnsuccessfulCompletionEvent.java   
/** 
 * Create an event to record the unsuccessful completion of attempts
 * @param id Attempt ID
 * @param taskType Type of the task
 * @param status Status of the attempt
 * @param finishTime Finish time of the attempt
 * @param hostname Name of the host where the attempt executed
 * @param port rpc port for for the tracker
 * @param rackName Name of the rack where the attempt executed
 * @param error Error string
 * @param counters Counters for the attempt
 * @param allSplits the "splits", or a pixelated graph of various
 *        measurable worker node state variables against progress.
 *        Currently there are four; wallclock time, CPU time,
 *        virtual memory and physical memory.  
 */
public TaskAttemptUnsuccessfulCompletionEvent
     (TaskAttemptID id, TaskType taskType,
      String status, long finishTime,
      String hostname, int port, String rackName,
      String error, Counters counters, int[][] allSplits) {
  this.attemptId = id;
  this.taskType = taskType;
  this.status = status;
  this.finishTime = finishTime;
  this.hostname = hostname;
  this.port = port;
  this.rackName = rackName;
  this.error = error;
  this.counters = counters;
  this.allSplits = allSplits;
  this.clockSplits =
      ProgressSplitsBlock.arrayGetWallclockTime(allSplits);
  this.cpuUsages =
      ProgressSplitsBlock.arrayGetCPUTime(allSplits);
  this.vMemKbytes =
      ProgressSplitsBlock.arrayGetVMemKbytes(allSplits);
  this.physMemKbytes =
      ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
}
项目:hardfs    文件:MapAttemptFinishedEvent.java   
/** 
 * Create an event for successful completion of map attempts
 * @param id Task Attempt ID
 * @param taskType Type of the task
 * @param taskStatus Status of the task
 * @param mapFinishTime Finish time of the map phase
 * @param finishTime Finish time of the attempt
 * @param hostname Name of the host where the map executed
 * @param port RPC port for the tracker host.
 * @param rackName Name of the rack where the map executed
 * @param state State string for the attempt
 * @param counters Counters for the attempt
 * @param allSplits the "splits", or a pixelated graph of various
 *        measurable worker node state variables against progress.
 *        Currently there are four; wallclock time, CPU time,
 *        virtual memory and physical memory. 
 *
 *        If you have no splits data, code {@code null} for this
 *        parameter. 
 */
public MapAttemptFinishedEvent
    (TaskAttemptID id, TaskType taskType, String taskStatus, 
     long mapFinishTime, long finishTime, String hostname, int port, 
     String rackName, String state, Counters counters, int[][] allSplits) {
  this.attemptId = id;
  this.taskType = taskType;
  this.taskStatus = taskStatus;
  this.mapFinishTime = mapFinishTime;
  this.finishTime = finishTime;
  this.hostname = hostname;
  this.rackName = rackName;
  this.port = port;
  this.state = state;
  this.counters = counters;
  this.allSplits = allSplits;
  this.clockSplits = ProgressSplitsBlock.arrayGetWallclockTime(allSplits);
  this.cpuUsages = ProgressSplitsBlock.arrayGetCPUTime(allSplits);
  this.vMemKbytes = ProgressSplitsBlock.arrayGetVMemKbytes(allSplits);
  this.physMemKbytes = ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
}
项目:hadoop-on-lustre2    文件:ReduceAttemptFinishedEvent.java   
/**
 * Create an event to record completion of a reduce attempt
 * @param id Attempt Id
 * @param taskType Type of task
 * @param taskStatus Status of the task
 * @param shuffleFinishTime Finish time of the shuffle phase
 * @param sortFinishTime Finish time of the sort phase
 * @param finishTime Finish time of the attempt
 * @param hostname Name of the host where the attempt executed
 * @param port RPC port for the tracker host.
 * @param rackName Name of the rack where the attempt executed
 * @param state State of the attempt
 * @param counters Counters for the attempt
 * @param allSplits the "splits", or a pixelated graph of various
 *        measurable worker node state variables against progress.
 *        Currently there are four; wallclock time, CPU time,
 *        virtual memory and physical memory.  
 */
public ReduceAttemptFinishedEvent
  (TaskAttemptID id, TaskType taskType, String taskStatus, 
   long shuffleFinishTime, long sortFinishTime, long finishTime,
   String hostname, int port,  String rackName, String state, 
   Counters counters, int[][] allSplits) {
  this.attemptId = id;
  this.taskType = taskType;
  this.taskStatus = taskStatus;
  this.shuffleFinishTime = shuffleFinishTime;
  this.sortFinishTime = sortFinishTime;
  this.finishTime = finishTime;
  this.hostname = hostname;
  this.rackName = rackName;
  this.port = port;
  this.state = state;
  this.counters = counters;
  this.allSplits = allSplits;
  this.clockSplits = ProgressSplitsBlock.arrayGetWallclockTime(allSplits);
  this.cpuUsages = ProgressSplitsBlock.arrayGetCPUTime(allSplits);
  this.vMemKbytes = ProgressSplitsBlock.arrayGetVMemKbytes(allSplits);
  this.physMemKbytes = ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
}
项目:hadoop-on-lustre2    文件:TaskAttemptUnsuccessfulCompletionEvent.java   
/** 
 * Create an event to record the unsuccessful completion of attempts
 * @param id Attempt ID
 * @param taskType Type of the task
 * @param status Status of the attempt
 * @param finishTime Finish time of the attempt
 * @param hostname Name of the host where the attempt executed
 * @param port rpc port for for the tracker
 * @param rackName Name of the rack where the attempt executed
 * @param error Error string
 * @param counters Counters for the attempt
 * @param allSplits the "splits", or a pixelated graph of various
 *        measurable worker node state variables against progress.
 *        Currently there are four; wallclock time, CPU time,
 *        virtual memory and physical memory.  
 */
public TaskAttemptUnsuccessfulCompletionEvent
     (TaskAttemptID id, TaskType taskType,
      String status, long finishTime,
      String hostname, int port, String rackName,
      String error, Counters counters, int[][] allSplits) {
  this.attemptId = id;
  this.taskType = taskType;
  this.status = status;
  this.finishTime = finishTime;
  this.hostname = hostname;
  this.port = port;
  this.rackName = rackName;
  this.error = error;
  this.counters = counters;
  this.allSplits = allSplits;
  this.clockSplits =
      ProgressSplitsBlock.arrayGetWallclockTime(allSplits);
  this.cpuUsages =
      ProgressSplitsBlock.arrayGetCPUTime(allSplits);
  this.vMemKbytes =
      ProgressSplitsBlock.arrayGetVMemKbytes(allSplits);
  this.physMemKbytes =
      ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
}
项目:hadoop-on-lustre2    文件:MapAttemptFinishedEvent.java   
/** 
 * Create an event for successful completion of map attempts
 * @param id Task Attempt ID
 * @param taskType Type of the task
 * @param taskStatus Status of the task
 * @param mapFinishTime Finish time of the map phase
 * @param finishTime Finish time of the attempt
 * @param hostname Name of the host where the map executed
 * @param port RPC port for the tracker host.
 * @param rackName Name of the rack where the map executed
 * @param state State string for the attempt
 * @param counters Counters for the attempt
 * @param allSplits the "splits", or a pixelated graph of various
 *        measurable worker node state variables against progress.
 *        Currently there are four; wallclock time, CPU time,
 *        virtual memory and physical memory. 
 *
 *        If you have no splits data, code {@code null} for this
 *        parameter. 
 */
public MapAttemptFinishedEvent
    (TaskAttemptID id, TaskType taskType, String taskStatus, 
     long mapFinishTime, long finishTime, String hostname, int port, 
     String rackName, String state, Counters counters, int[][] allSplits) {
  this.attemptId = id;
  this.taskType = taskType;
  this.taskStatus = taskStatus;
  this.mapFinishTime = mapFinishTime;
  this.finishTime = finishTime;
  this.hostname = hostname;
  this.rackName = rackName;
  this.port = port;
  this.state = state;
  this.counters = counters;
  this.allSplits = allSplits;
  this.clockSplits = ProgressSplitsBlock.arrayGetWallclockTime(allSplits);
  this.cpuUsages = ProgressSplitsBlock.arrayGetCPUTime(allSplits);
  this.vMemKbytes = ProgressSplitsBlock.arrayGetVMemKbytes(allSplits);
  this.physMemKbytes = ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
}
项目:hadoop    文件:ReduceAttemptFinishedEvent.java   
public Object getDatum() {
  if (datum == null) {
    datum = new ReduceAttemptFinished();
    datum.taskid = new Utf8(attemptId.getTaskID().toString());
    datum.attemptId = new Utf8(attemptId.toString());
    datum.taskType = new Utf8(taskType.name());
    datum.taskStatus = new Utf8(taskStatus);
    datum.shuffleFinishTime = shuffleFinishTime;
    datum.sortFinishTime = sortFinishTime;
    datum.finishTime = finishTime;
    datum.hostname = new Utf8(hostname);
    datum.port = port;
    if (rackName != null) {
      datum.rackname = new Utf8(rackName);
    }
    datum.state = new Utf8(state);
    datum.counters = EventWriter.toAvro(counters);

    datum.clockSplits = AvroArrayUtils.toAvro(ProgressSplitsBlock
      .arrayGetWallclockTime(allSplits));
    datum.cpuUsages = AvroArrayUtils.toAvro(ProgressSplitsBlock
      .arrayGetCPUTime(allSplits));
    datum.gpuUsages = AvroArrayUtils.toAvro(ProgressSplitsBlock
      .arrayGetGPUTime(allSplits));
    datum.vMemKbytes = AvroArrayUtils.toAvro(ProgressSplitsBlock
      .arrayGetVMemKbytes(allSplits));
    datum.physMemKbytes = AvroArrayUtils.toAvro(ProgressSplitsBlock
      .arrayGetPhysMemKbytes(allSplits));
  }
  return datum;
}
项目:hadoop    文件:TaskAttemptUnsuccessfulCompletionEvent.java   
/** 
 * Create an event to record the unsuccessful completion of attempts
 * @param id Attempt ID
 * @param taskType Type of the task
 * @param status Status of the attempt
 * @param finishTime Finish time of the attempt
 * @param hostname Name of the host where the attempt executed
 * @param port rpc port for for the tracker
 * @param rackName Name of the rack where the attempt executed
 * @param error Error string
 * @param counters Counters for the attempt
 * @param allSplits the "splits", or a pixelated graph of various
 *        measurable worker node state variables against progress.
 *        Currently there are four; wallclock time, CPU time,
 *        virtual memory and physical memory.  
 */
public TaskAttemptUnsuccessfulCompletionEvent
     (TaskAttemptID id, TaskType taskType,
      String status, long finishTime,
      String hostname, int port, String rackName,
      String error, Counters counters, int[][] allSplits) {
  this.attemptId = id;
  this.taskType = taskType;
  this.status = status;
  this.finishTime = finishTime;
  this.hostname = hostname;
  this.port = port;
  this.rackName = rackName;
  this.error = error;
  this.counters = counters;
  this.allSplits = allSplits;
  this.clockSplits =
      ProgressSplitsBlock.arrayGetWallclockTime(allSplits);
  this.cpuUsages =
      ProgressSplitsBlock.arrayGetCPUTime(allSplits);
  this.gpuUsages =
      ProgressSplitsBlock.arrayGetGPUTime(allSplits);
  this.vMemKbytes =
      ProgressSplitsBlock.arrayGetVMemKbytes(allSplits);
  this.physMemKbytes =
      ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
}
项目:hadoop    文件:TaskAttemptUnsuccessfulCompletionEvent.java   
public Object getDatum() {
  if(datum == null) {
    datum = new TaskAttemptUnsuccessfulCompletion();
    datum.taskid = new Utf8(attemptId.getTaskID().toString());
    datum.taskType = new Utf8(taskType.name());
    datum.attemptId = new Utf8(attemptId.toString());
    datum.finishTime = finishTime;
    datum.hostname = new Utf8(hostname);
    if (rackName != null) {
      datum.rackname = new Utf8(rackName);
    }
    datum.port = port;
    datum.error = new Utf8(error);
    datum.status = new Utf8(status);

    datum.counters = EventWriter.toAvro(counters);

    datum.clockSplits = AvroArrayUtils.toAvro(ProgressSplitsBlock
        .arrayGetWallclockTime(allSplits));
    datum.cpuUsages = AvroArrayUtils.toAvro(ProgressSplitsBlock
        .arrayGetCPUTime(allSplits));
    datum.gpuUsages = AvroArrayUtils.toAvro(ProgressSplitsBlock
        .arrayGetGPUTime(allSplits));
    datum.vMemKbytes = AvroArrayUtils.toAvro(ProgressSplitsBlock
        .arrayGetVMemKbytes(allSplits));
    datum.physMemKbytes = AvroArrayUtils.toAvro(ProgressSplitsBlock
        .arrayGetPhysMemKbytes(allSplits));
  }
  return datum;
}
项目:hadoop    文件:MapAttemptFinishedEvent.java   
public Object getDatum() {
  if (datum == null) {
    datum = new MapAttemptFinished();
    datum.taskid = new Utf8(attemptId.getTaskID().toString());
    datum.attemptId = new Utf8(attemptId.toString());
    datum.taskType = new Utf8(taskType.name());
    datum.taskStatus = new Utf8(taskStatus);
    datum.mapFinishTime = mapFinishTime;
    datum.finishTime = finishTime;
    datum.hostname = new Utf8(hostname);
    datum.port = port;
    if (rackName != null) {
      datum.rackname = new Utf8(rackName);
    }
    datum.state = new Utf8(state);
    datum.counters = EventWriter.toAvro(counters);

    datum.clockSplits = AvroArrayUtils.toAvro(ProgressSplitsBlock
      .arrayGetWallclockTime(allSplits));
    datum.cpuUsages = AvroArrayUtils.toAvro(ProgressSplitsBlock
      .arrayGetCPUTime(allSplits));
    datum.gpuUsages = AvroArrayUtils.toAvro(ProgressSplitsBlock
      .arrayGetGPUTime(allSplits));
    datum.vMemKbytes = AvroArrayUtils.toAvro(ProgressSplitsBlock
      .arrayGetVMemKbytes(allSplits));
    datum.physMemKbytes = AvroArrayUtils.toAvro(ProgressSplitsBlock
      .arrayGetPhysMemKbytes(allSplits));
  }
  return datum;
}
项目:aliyun-oss-hadoop-fs    文件:ReduceAttemptFinishedEvent.java   
public Object getDatum() {
  if (datum == null) {
    datum = new ReduceAttemptFinished();
    datum.setTaskid(new Utf8(attemptId.getTaskID().toString()));
    datum.setAttemptId(new Utf8(attemptId.toString()));
    datum.setTaskType(new Utf8(taskType.name()));
    datum.setTaskStatus(new Utf8(taskStatus));
    datum.setShuffleFinishTime(shuffleFinishTime);
    datum.setSortFinishTime(sortFinishTime);
    datum.setFinishTime(finishTime);
    datum.setHostname(new Utf8(hostname));
    datum.setPort(port);
    if (rackName != null) {
      datum.setRackname(new Utf8(rackName));
    }
    datum.setState(new Utf8(state));
    datum.setCounters(EventWriter.toAvro(counters));

    datum.setClockSplits(AvroArrayUtils.toAvro(ProgressSplitsBlock
        .arrayGetWallclockTime(allSplits)));
    datum.setCpuUsages(AvroArrayUtils.toAvro(ProgressSplitsBlock
        .arrayGetCPUTime(allSplits)));
    datum.setVMemKbytes(AvroArrayUtils.toAvro(ProgressSplitsBlock
        .arrayGetVMemKbytes(allSplits)));
    datum.setPhysMemKbytes(AvroArrayUtils.toAvro(ProgressSplitsBlock
        .arrayGetPhysMemKbytes(allSplits)));
  }
  return datum;
}
项目:aliyun-oss-hadoop-fs    文件:TaskAttemptUnsuccessfulCompletionEvent.java   
public Object getDatum() {
  if(datum == null) {
    datum = new TaskAttemptUnsuccessfulCompletion();
    datum.setTaskid(new Utf8(attemptId.getTaskID().toString()));
    datum.setTaskType(new Utf8(taskType.name()));
    datum.setAttemptId(new Utf8(attemptId.toString()));
    datum.setFinishTime(finishTime);
    datum.setHostname(new Utf8(hostname));
    if (rackName != null) {
      datum.setRackname(new Utf8(rackName));
    }
    datum.setPort(port);
    datum.setError(new Utf8(error));
    datum.setStatus(new Utf8(status));

    datum.setCounters(EventWriter.toAvro(counters));

    datum.setClockSplits(AvroArrayUtils.toAvro(ProgressSplitsBlock
        .arrayGetWallclockTime(allSplits)));
    datum.setCpuUsages(AvroArrayUtils.toAvro(ProgressSplitsBlock
        .arrayGetCPUTime(allSplits)));
    datum.setVMemKbytes(AvroArrayUtils.toAvro(ProgressSplitsBlock
        .arrayGetVMemKbytes(allSplits)));
    datum.setPhysMemKbytes(AvroArrayUtils.toAvro(ProgressSplitsBlock
        .arrayGetPhysMemKbytes(allSplits)));
  }
  return datum;
}
项目:aliyun-oss-hadoop-fs    文件:MapAttemptFinishedEvent.java   
public Object getDatum() {
  if (datum == null) {
    datum = new MapAttemptFinished();
    datum.setTaskid(new Utf8(attemptId.getTaskID().toString()));
    datum.setAttemptId(new Utf8(attemptId.toString()));
    datum.setTaskType(new Utf8(taskType.name()));
    datum.setTaskStatus(new Utf8(taskStatus));
    datum.setMapFinishTime(mapFinishTime);
    datum.setFinishTime(finishTime);
    datum.setHostname(new Utf8(hostname));
    datum.setPort(port);
    if (rackName != null) {
      datum.setRackname(new Utf8(rackName));
    }
    datum.setState(new Utf8(state));
    datum.setCounters(EventWriter.toAvro(counters));

    datum.setClockSplits(AvroArrayUtils.toAvro(ProgressSplitsBlock
        .arrayGetWallclockTime(allSplits)));
    datum.setCpuUsages(AvroArrayUtils.toAvro(ProgressSplitsBlock
        .arrayGetCPUTime(allSplits)));
    datum.setVMemKbytes(AvroArrayUtils.toAvro(ProgressSplitsBlock
        .arrayGetVMemKbytes(allSplits)));
    datum.setPhysMemKbytes(AvroArrayUtils.toAvro(ProgressSplitsBlock
        .arrayGetPhysMemKbytes(allSplits)));
  }
  return datum;
}
项目:big-c    文件:ReduceAttemptFinishedEvent.java   
public Object getDatum() {
  if (datum == null) {
    datum = new ReduceAttemptFinished();
    datum.taskid = new Utf8(attemptId.getTaskID().toString());
    datum.attemptId = new Utf8(attemptId.toString());
    datum.taskType = new Utf8(taskType.name());
    datum.taskStatus = new Utf8(taskStatus);
    datum.shuffleFinishTime = shuffleFinishTime;
    datum.sortFinishTime = sortFinishTime;
    datum.finishTime = finishTime;
    datum.hostname = new Utf8(hostname);
    datum.port = port;
    if (rackName != null) {
      datum.rackname = new Utf8(rackName);
    }
    datum.state = new Utf8(state);
    datum.counters = EventWriter.toAvro(counters);

    datum.clockSplits = AvroArrayUtils.toAvro(ProgressSplitsBlock
      .arrayGetWallclockTime(allSplits));
    datum.cpuUsages = AvroArrayUtils.toAvro(ProgressSplitsBlock
      .arrayGetCPUTime(allSplits));
    datum.vMemKbytes = AvroArrayUtils.toAvro(ProgressSplitsBlock
      .arrayGetVMemKbytes(allSplits));
    datum.physMemKbytes = AvroArrayUtils.toAvro(ProgressSplitsBlock
      .arrayGetPhysMemKbytes(allSplits));
  }
  return datum;
}
项目:big-c    文件:TaskAttemptUnsuccessfulCompletionEvent.java   
public Object getDatum() {
  if(datum == null) {
    datum = new TaskAttemptUnsuccessfulCompletion();
    datum.taskid = new Utf8(attemptId.getTaskID().toString());
    datum.taskType = new Utf8(taskType.name());
    datum.attemptId = new Utf8(attemptId.toString());
    datum.finishTime = finishTime;
    datum.hostname = new Utf8(hostname);
    if (rackName != null) {
      datum.rackname = new Utf8(rackName);
    }
    datum.port = port;
    datum.error = new Utf8(error);
    datum.status = new Utf8(status);

    datum.counters = EventWriter.toAvro(counters);

    datum.clockSplits = AvroArrayUtils.toAvro(ProgressSplitsBlock
        .arrayGetWallclockTime(allSplits));
    datum.cpuUsages = AvroArrayUtils.toAvro(ProgressSplitsBlock
        .arrayGetCPUTime(allSplits));
    datum.vMemKbytes = AvroArrayUtils.toAvro(ProgressSplitsBlock
        .arrayGetVMemKbytes(allSplits));
    datum.physMemKbytes = AvroArrayUtils.toAvro(ProgressSplitsBlock
        .arrayGetPhysMemKbytes(allSplits));
  }
  return datum;
}
项目:big-c    文件:MapAttemptFinishedEvent.java   
public Object getDatum() {
  if (datum == null) {
    datum = new MapAttemptFinished();
    datum.taskid = new Utf8(attemptId.getTaskID().toString());
    datum.attemptId = new Utf8(attemptId.toString());
    datum.taskType = new Utf8(taskType.name());
    datum.taskStatus = new Utf8(taskStatus);
    datum.mapFinishTime = mapFinishTime;
    datum.finishTime = finishTime;
    datum.hostname = new Utf8(hostname);
    datum.port = port;
    if (rackName != null) {
      datum.rackname = new Utf8(rackName);
    }
    datum.state = new Utf8(state);
    datum.counters = EventWriter.toAvro(counters);

    datum.clockSplits = AvroArrayUtils.toAvro(ProgressSplitsBlock
      .arrayGetWallclockTime(allSplits));
    datum.cpuUsages = AvroArrayUtils.toAvro(ProgressSplitsBlock
      .arrayGetCPUTime(allSplits));
    datum.vMemKbytes = AvroArrayUtils.toAvro(ProgressSplitsBlock
      .arrayGetVMemKbytes(allSplits));
    datum.physMemKbytes = AvroArrayUtils.toAvro(ProgressSplitsBlock
      .arrayGetPhysMemKbytes(allSplits));
  }
  return datum;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:ReduceAttemptFinishedEvent.java   
public Object getDatum() {
  if (datum == null) {
    datum = new ReduceAttemptFinished();
    datum.taskid = new Utf8(attemptId.getTaskID().toString());
    datum.attemptId = new Utf8(attemptId.toString());
    datum.taskType = new Utf8(taskType.name());
    datum.taskStatus = new Utf8(taskStatus);
    datum.shuffleFinishTime = shuffleFinishTime;
    datum.sortFinishTime = sortFinishTime;
    datum.finishTime = finishTime;
    datum.hostname = new Utf8(hostname);
    datum.port = port;
    if (rackName != null) {
      datum.rackname = new Utf8(rackName);
    }
    datum.state = new Utf8(state);
    datum.counters = EventWriter.toAvro(counters);

    datum.clockSplits = AvroArrayUtils.toAvro(ProgressSplitsBlock
      .arrayGetWallclockTime(allSplits));
    datum.cpuUsages = AvroArrayUtils.toAvro(ProgressSplitsBlock
      .arrayGetCPUTime(allSplits));
    datum.vMemKbytes = AvroArrayUtils.toAvro(ProgressSplitsBlock
      .arrayGetVMemKbytes(allSplits));
    datum.physMemKbytes = AvroArrayUtils.toAvro(ProgressSplitsBlock
      .arrayGetPhysMemKbytes(allSplits));
  }
  return datum;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TaskAttemptUnsuccessfulCompletionEvent.java   
public Object getDatum() {
  if(datum == null) {
    datum = new TaskAttemptUnsuccessfulCompletion();
    datum.taskid = new Utf8(attemptId.getTaskID().toString());
    datum.taskType = new Utf8(taskType.name());
    datum.attemptId = new Utf8(attemptId.toString());
    datum.finishTime = finishTime;
    datum.hostname = new Utf8(hostname);
    if (rackName != null) {
      datum.rackname = new Utf8(rackName);
    }
    datum.port = port;
    datum.error = new Utf8(error);
    datum.status = new Utf8(status);

    datum.counters = EventWriter.toAvro(counters);

    datum.clockSplits = AvroArrayUtils.toAvro(ProgressSplitsBlock
        .arrayGetWallclockTime(allSplits));
    datum.cpuUsages = AvroArrayUtils.toAvro(ProgressSplitsBlock
        .arrayGetCPUTime(allSplits));
    datum.vMemKbytes = AvroArrayUtils.toAvro(ProgressSplitsBlock
        .arrayGetVMemKbytes(allSplits));
    datum.physMemKbytes = AvroArrayUtils.toAvro(ProgressSplitsBlock
        .arrayGetPhysMemKbytes(allSplits));
  }
  return datum;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:MapAttemptFinishedEvent.java   
public Object getDatum() {
  if (datum == null) {
    datum = new MapAttemptFinished();
    datum.taskid = new Utf8(attemptId.getTaskID().toString());
    datum.attemptId = new Utf8(attemptId.toString());
    datum.taskType = new Utf8(taskType.name());
    datum.taskStatus = new Utf8(taskStatus);
    datum.mapFinishTime = mapFinishTime;
    datum.finishTime = finishTime;
    datum.hostname = new Utf8(hostname);
    datum.port = port;
    if (rackName != null) {
      datum.rackname = new Utf8(rackName);
    }
    datum.state = new Utf8(state);
    datum.counters = EventWriter.toAvro(counters);

    datum.clockSplits = AvroArrayUtils.toAvro(ProgressSplitsBlock
      .arrayGetWallclockTime(allSplits));
    datum.cpuUsages = AvroArrayUtils.toAvro(ProgressSplitsBlock
      .arrayGetCPUTime(allSplits));
    datum.vMemKbytes = AvroArrayUtils.toAvro(ProgressSplitsBlock
      .arrayGetVMemKbytes(allSplits));
    datum.physMemKbytes = AvroArrayUtils.toAvro(ProgressSplitsBlock
      .arrayGetPhysMemKbytes(allSplits));
  }
  return datum;
}
项目:hadoop-plus    文件:ReduceAttemptFinishedEvent.java   
public Object getDatum() {
  if (datum == null) {
    datum = new ReduceAttemptFinished();
    datum.taskid = new Utf8(attemptId.getTaskID().toString());
    datum.attemptId = new Utf8(attemptId.toString());
    datum.taskType = new Utf8(taskType.name());
    datum.taskStatus = new Utf8(taskStatus);
    datum.shuffleFinishTime = shuffleFinishTime;
    datum.sortFinishTime = sortFinishTime;
    datum.finishTime = finishTime;
    datum.hostname = new Utf8(hostname);
    datum.port = port;
    if (rackName != null) {
      datum.rackname = new Utf8(rackName);
    }
    datum.state = new Utf8(state);
    datum.counters = EventWriter.toAvro(counters);

    datum.clockSplits = AvroArrayUtils.toAvro(ProgressSplitsBlock
      .arrayGetWallclockTime(allSplits));
    datum.cpuUsages = AvroArrayUtils.toAvro(ProgressSplitsBlock
      .arrayGetCPUTime(allSplits));
    datum.vMemKbytes = AvroArrayUtils.toAvro(ProgressSplitsBlock
      .arrayGetVMemKbytes(allSplits));
    datum.physMemKbytes = AvroArrayUtils.toAvro(ProgressSplitsBlock
      .arrayGetPhysMemKbytes(allSplits));
  }
  return datum;
}