Java 类org.apache.hadoop.mapred.TaskLog.LogName 实例源码

项目:hadoop    文件:TestTaskLog.java   
/**
 * test without TASK_LOG_DIR
 * 
 * @throws IOException
 */
@Test (timeout=50000)
public void testTaskLogWithoutTaskLogDir() throws IOException {
  // TaskLog tasklog= new TaskLog();
  System.clearProperty(YarnConfiguration.YARN_APP_CONTAINER_LOG_DIR);

  // test TaskLog

  assertEquals(TaskLog.getMRv2LogDir(), null);
  TaskAttemptID taid = mock(TaskAttemptID.class);
  JobID jid = new JobID("job", 1);

  when(taid.getJobID()).thenReturn(jid);
  when(taid.toString()).thenReturn("JobId");

  File f = TaskLog.getTaskLogFile(taid, true, LogName.STDOUT);
  assertTrue(f.getAbsolutePath().endsWith("stdout"));

}
项目:aliyun-oss-hadoop-fs    文件:TestTaskLog.java   
/**
 * test without TASK_LOG_DIR
 * 
 * @throws IOException
 */
@Test (timeout=50000)
public void testTaskLogWithoutTaskLogDir() throws IOException {
  // TaskLog tasklog= new TaskLog();
  System.clearProperty(YarnConfiguration.YARN_APP_CONTAINER_LOG_DIR);

  // test TaskLog

  assertEquals(TaskLog.getMRv2LogDir(), null);
  TaskAttemptID taid = mock(TaskAttemptID.class);
  JobID jid = new JobID("job", 1);

  when(taid.getJobID()).thenReturn(jid);
  when(taid.toString()).thenReturn("JobId");

  File f = TaskLog.getTaskLogFile(taid, true, LogName.STDOUT);
  assertTrue(f.getAbsolutePath().endsWith("stdout"));

}
项目:big-c    文件:TestTaskLog.java   
/**
 * test without TASK_LOG_DIR
 * 
 * @throws IOException
 */
@Test (timeout=50000)
public void testTaskLogWithoutTaskLogDir() throws IOException {
  // TaskLog tasklog= new TaskLog();
  System.clearProperty(YarnConfiguration.YARN_APP_CONTAINER_LOG_DIR);

  // test TaskLog

  assertEquals(TaskLog.getMRv2LogDir(), null);
  TaskAttemptID taid = mock(TaskAttemptID.class);
  JobID jid = new JobID("job", 1);

  when(taid.getJobID()).thenReturn(jid);
  when(taid.toString()).thenReturn("JobId");

  File f = TaskLog.getTaskLogFile(taid, true, LogName.STDOUT);
  assertTrue(f.getAbsolutePath().endsWith("stdout"));

}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestTaskLog.java   
/**
 * test without TASK_LOG_DIR
 * 
 * @throws IOException
 */
@Test (timeout=50000)
public void testTaskLogWithoutTaskLogDir() throws IOException {
  // TaskLog tasklog= new TaskLog();
  System.clearProperty(YarnConfiguration.YARN_APP_CONTAINER_LOG_DIR);

  // test TaskLog

  assertEquals(TaskLog.getMRv2LogDir(), null);
  TaskAttemptID taid = mock(TaskAttemptID.class);
  JobID jid = new JobID("job", 1);

  when(taid.getJobID()).thenReturn(jid);
  when(taid.toString()).thenReturn("JobId");

  File f = TaskLog.getTaskLogFile(taid, true, LogName.STDOUT);
  assertTrue(f.getAbsolutePath().endsWith("stdout"));

}
项目:hadoop-2.6.0-cdh5.4.3    文件:TaskLogsTruncater.java   
/**
 * @param lInfo
 * @param taskLogFileDetails
 * @param updatedTaskLogFileDetails
 * @param logName
 */
private void copyOriginalIndexFileInfo(JVMInfo lInfo,
    Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
    Map<Task, Map<LogName, LogFileDetail>> updatedTaskLogFileDetails,
    LogName logName) {
  if (TaskLog.LOGS_TRACKED_BY_INDEX_FILES.contains(logName)) {
    for (Task task : lInfo.getAllAttempts()) {
      if (!updatedTaskLogFileDetails.containsKey(task)) {
        updatedTaskLogFileDetails.put(task,
            new HashMap<LogName, LogFileDetail>());
      }
      updatedTaskLogFileDetails.get(task).put(logName,
          taskLogFileDetails.get(task).get(logName));
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TaskLogsTruncater.java   
/**
 * Check if truncation of logs is needed for the given jvmInfo. If all the
 * tasks that ran in a JVM are within the log-limits, then truncation is not
 * needed. Otherwise it is needed.
 * 
 * @param lInfo
 * @param taskLogFileDetails
 * @param logName
 * @return true if truncation is needed, false otherwise
 */
private boolean isTruncationNeeded(JVMInfo lInfo,
    Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
    LogName logName) {
  boolean truncationNeeded = false;
  LogFileDetail logFileDetail = null;
  for (Task task : lInfo.getAllAttempts()) {
    long taskRetainSize =
        (task.isMapTask() ? mapRetainSize : reduceRetainSize);
    Map<LogName, LogFileDetail> allLogsFileDetails =
        taskLogFileDetails.get(task);
    logFileDetail = allLogsFileDetails.get(logName);
    if (taskRetainSize > MINIMUM_RETAIN_SIZE_FOR_TRUNCATION
        && logFileDetail.length > taskRetainSize) {
      truncationNeeded = true;
      break;
    }
  }
  return truncationNeeded;
}
项目:hadoop-EAR    文件:TaskLogsMonitor.java   
/**
 * @param lInfo
 * @param taskLogFileDetails
 * @param updatedTaskLogFileDetails
 * @param logName
 */
private void revertIndexFileInfo(PerJVMInfo lInfo,
    Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
    Map<Task, Map<LogName, LogFileDetail>> updatedTaskLogFileDetails,
    LogName logName) {
  if (TaskLog.LOGS_TRACKED_BY_INDEX_FILES.contains(logName)) {
    for (Task task : lInfo.allAttempts) {
      if (!updatedTaskLogFileDetails.containsKey(task)) {
        updatedTaskLogFileDetails.put(task,
            new HashMap<LogName, LogFileDetail>());
      }
      updatedTaskLogFileDetails.get(task).put(logName,
          taskLogFileDetails.get(task).get(logName));
    }
  }
}
项目:hadoop-EAR    文件:TaskLogsMonitor.java   
/**
 * Check if truncation of logs is needed for the given jvmInfo. If all the
 * tasks that ran in a JVM are within the log-limits, then truncation is not
 * needed. Otherwise it is needed.
 * 
 * @param lInfo
 * @param taskLogFileDetails
 * @param logName
 * @return true if truncation is needed, false otherwise
 */
private boolean isTruncationNeeded(PerJVMInfo lInfo,
    Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
    LogName logName) {
  boolean truncationNeeded = false;
  LogFileDetail logFileDetail = null;
  for (Task task : lInfo.allAttempts) {
    long taskRetainSize =
        (task.isMapTask() ? mapRetainSize : reduceRetainSize);
    Map<LogName, LogFileDetail> allLogsFileDetails =
        taskLogFileDetails.get(task);
    logFileDetail = allLogsFileDetails.get(logName);
    if (taskRetainSize > MINIMUM_RETAIN_SIZE_FOR_TRUNCATION
        && logFileDetail.length > taskRetainSize) {
      truncationNeeded = true;
      break;
    }
  }
  return truncationNeeded;
}
项目:hadoop-plus    文件:TestTaskLog.java   
/**
 * test without TASK_LOG_DIR
 * 
 * @throws IOException
 */
@Test (timeout=50000)
public void testTaskLogWithoutTaskLogDir() throws IOException {
  // TaskLog tasklog= new TaskLog();
  System.clearProperty(YarnConfiguration.YARN_APP_CONTAINER_LOG_DIR);

  // test TaskLog

  assertEquals(TaskLog.getMRv2LogDir(), null);
  TaskAttemptID taid = mock(TaskAttemptID.class);
  JobID jid = new JobID("job", 1);

  when(taid.getJobID()).thenReturn(jid);
  when(taid.toString()).thenReturn("JobId");

  File f = TaskLog.getTaskLogFile(taid, true, LogName.STDOUT);
  assertTrue(f.getAbsolutePath().endsWith("stdout"));

}
项目:FlexMap    文件:TestTaskLog.java   
/**
 * test without TASK_LOG_DIR
 * 
 * @throws IOException
 */
@Test (timeout=50000)
public void testTaskLogWithoutTaskLogDir() throws IOException {
  // TaskLog tasklog= new TaskLog();
  System.clearProperty(YarnConfiguration.YARN_APP_CONTAINER_LOG_DIR);

  // test TaskLog

  assertEquals(TaskLog.getMRv2LogDir(), null);
  TaskAttemptID taid = mock(TaskAttemptID.class);
  JobID jid = new JobID("job", 1);

  when(taid.getJobID()).thenReturn(jid);
  when(taid.toString()).thenReturn("JobId");

  File f = TaskLog.getTaskLogFile(taid, true, LogName.STDOUT);
  assertTrue(f.getAbsolutePath().endsWith("stdout"));

}
项目:hops    文件:TestTaskLog.java   
/**
 * test without TASK_LOG_DIR
 * 
 * @throws IOException
 */
@Test (timeout=50000)
public void testTaskLogWithoutTaskLogDir() throws IOException {
  // TaskLog tasklog= new TaskLog();
  System.clearProperty(YarnConfiguration.YARN_APP_CONTAINER_LOG_DIR);

  // test TaskLog

  assertEquals(TaskLog.getMRv2LogDir(), null);
  TaskAttemptID taid = mock(TaskAttemptID.class);
  JobID jid = new JobID("job", 1);

  when(taid.getJobID()).thenReturn(jid);
  when(taid.toString()).thenReturn("JobId");

  File f = TaskLog.getTaskLogFile(taid, true, LogName.STDOUT);
  assertTrue(f.getAbsolutePath().endsWith("stdout"));

}
项目:hadoop-TCP    文件:TestTaskLog.java   
/**
 * test without TASK_LOG_DIR
 * 
 * @throws IOException
 */
@Test (timeout=50000)
public void testTaskLogWithoutTaskLogDir() throws IOException {
  // TaskLog tasklog= new TaskLog();
  System.clearProperty(YarnConfiguration.YARN_APP_CONTAINER_LOG_DIR);

  // test TaskLog

  assertEquals(TaskLog.getMRv2LogDir(), null);
  TaskAttemptID taid = mock(TaskAttemptID.class);
  JobID jid = new JobID("job", 1);

  when(taid.getJobID()).thenReturn(jid);
  when(taid.toString()).thenReturn("JobId");

  File f = TaskLog.getTaskLogFile(taid, true, LogName.STDOUT);
  assertTrue(f.getAbsolutePath().endsWith("stdout"));

}
项目:hadoop-on-lustre    文件:TaskLogsTruncater.java   
/**
 * @param lInfo
 * @param taskLogFileDetails
 * @param updatedTaskLogFileDetails
 * @param logName
 */
private void copyOriginalIndexFileInfo(JVMInfo lInfo,
    Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
    Map<Task, Map<LogName, LogFileDetail>> updatedTaskLogFileDetails,
    LogName logName) {
  if (TaskLog.LOGS_TRACKED_BY_INDEX_FILES.contains(logName)) {
    for (Task task : lInfo.getAllAttempts()) {
      if (!updatedTaskLogFileDetails.containsKey(task)) {
        updatedTaskLogFileDetails.put(task,
            new HashMap<LogName, LogFileDetail>());
      }
      updatedTaskLogFileDetails.get(task).put(logName,
          taskLogFileDetails.get(task).get(logName));
    }
  }
}
项目:hadoop-on-lustre    文件:TaskLogsTruncater.java   
/**
 * Check if truncation of logs is needed for the given jvmInfo. If all the
 * tasks that ran in a JVM are within the log-limits, then truncation is not
 * needed. Otherwise it is needed.
 * 
 * @param lInfo
 * @param taskLogFileDetails
 * @param logName
 * @return true if truncation is needed, false otherwise
 */
private boolean isTruncationNeeded(JVMInfo lInfo,
    Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
    LogName logName) {
  boolean truncationNeeded = false;
  LogFileDetail logFileDetail = null;
  for (Task task : lInfo.getAllAttempts()) {
    long taskRetainSize =
        (task.isMapTask() ? mapRetainSize : reduceRetainSize);
    Map<LogName, LogFileDetail> allLogsFileDetails =
        taskLogFileDetails.get(task);
    logFileDetail = allLogsFileDetails.get(logName);
    if (taskRetainSize > MINIMUM_RETAIN_SIZE_FOR_TRUNCATION
        && logFileDetail.length > taskRetainSize) {
      truncationNeeded = true;
      break;
    }
  }
  return truncationNeeded;
}
项目:hardfs    文件:TestTaskLog.java   
/**
 * test without TASK_LOG_DIR
 * 
 * @throws IOException
 */
@Test (timeout=50000)
public void testTaskLogWithoutTaskLogDir() throws IOException {
  // TaskLog tasklog= new TaskLog();
  System.clearProperty(YarnConfiguration.YARN_APP_CONTAINER_LOG_DIR);

  // test TaskLog

  assertEquals(TaskLog.getMRv2LogDir(), null);
  TaskAttemptID taid = mock(TaskAttemptID.class);
  JobID jid = new JobID("job", 1);

  when(taid.getJobID()).thenReturn(jid);
  when(taid.toString()).thenReturn("JobId");

  File f = TaskLog.getTaskLogFile(taid, true, LogName.STDOUT);
  assertTrue(f.getAbsolutePath().endsWith("stdout"));

}
项目:hadoop-on-lustre2    文件:TestTaskLog.java   
/**
 * test without TASK_LOG_DIR
 * 
 * @throws IOException
 */
@Test (timeout=50000)
public void testTaskLogWithoutTaskLogDir() throws IOException {
  // TaskLog tasklog= new TaskLog();
  System.clearProperty(YarnConfiguration.YARN_APP_CONTAINER_LOG_DIR);

  // test TaskLog

  assertEquals(TaskLog.getMRv2LogDir(), null);
  TaskAttemptID taid = mock(TaskAttemptID.class);
  JobID jid = new JobID("job", 1);

  when(taid.getJobID()).thenReturn(jid);
  when(taid.toString()).thenReturn("JobId");

  File f = TaskLog.getTaskLogFile(taid, true, LogName.STDOUT);
  assertTrue(f.getAbsolutePath().endsWith("stdout"));

}
项目:RDFS    文件:TaskLogsMonitor.java   
/**
 * @param lInfo
 * @param taskLogFileDetails
 * @param updatedTaskLogFileDetails
 * @param logName
 */
private void revertIndexFileInfo(PerJVMInfo lInfo,
    Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
    Map<Task, Map<LogName, LogFileDetail>> updatedTaskLogFileDetails,
    LogName logName) {
  if (TaskLog.LOGS_TRACKED_BY_INDEX_FILES.contains(logName)) {
    for (Task task : lInfo.allAttempts) {
      if (!updatedTaskLogFileDetails.containsKey(task)) {
        updatedTaskLogFileDetails.put(task,
            new HashMap<LogName, LogFileDetail>());
      }
      updatedTaskLogFileDetails.get(task).put(logName,
          taskLogFileDetails.get(task).get(logName));
    }
  }
}
项目:RDFS    文件:TaskLogsMonitor.java   
/**
 * Check if truncation of logs is needed for the given jvmInfo. If all the
 * tasks that ran in a JVM are within the log-limits, then truncation is not
 * needed. Otherwise it is needed.
 * 
 * @param lInfo
 * @param taskLogFileDetails
 * @param logName
 * @return true if truncation is needed, false otherwise
 */
private boolean isTruncationNeeded(PerJVMInfo lInfo,
    Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
    LogName logName) {
  boolean truncationNeeded = false;
  LogFileDetail logFileDetail = null;
  for (Task task : lInfo.allAttempts) {
    long taskRetainSize =
        (task.isMapTask() ? mapRetainSize : reduceRetainSize);
    Map<LogName, LogFileDetail> allLogsFileDetails =
        taskLogFileDetails.get(task);
    logFileDetail = allLogsFileDetails.get(logName);
    if (taskRetainSize > MINIMUM_RETAIN_SIZE_FOR_TRUNCATION
        && logFileDetail.length > taskRetainSize) {
      truncationNeeded = true;
      break;
    }
  }
  return truncationNeeded;
}
项目:hanoi-hadoop-2.0.0-cdh    文件:TaskLogsTruncater.java   
/**
 * @param lInfo
 * @param taskLogFileDetails
 * @param updatedTaskLogFileDetails
 * @param logName
 */
private void copyOriginalIndexFileInfo(JVMInfo lInfo,
    Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
    Map<Task, Map<LogName, LogFileDetail>> updatedTaskLogFileDetails,
    LogName logName) {
  if (TaskLog.LOGS_TRACKED_BY_INDEX_FILES.contains(logName)) {
    for (Task task : lInfo.getAllAttempts()) {
      if (!updatedTaskLogFileDetails.containsKey(task)) {
        updatedTaskLogFileDetails.put(task,
            new HashMap<LogName, LogFileDetail>());
      }
      updatedTaskLogFileDetails.get(task).put(logName,
          taskLogFileDetails.get(task).get(logName));
    }
  }
}
项目:hanoi-hadoop-2.0.0-cdh    文件:TaskLogsTruncater.java   
/**
 * Check if truncation of logs is needed for the given jvmInfo. If all the
 * tasks that ran in a JVM are within the log-limits, then truncation is not
 * needed. Otherwise it is needed.
 * 
 * @param lInfo
 * @param taskLogFileDetails
 * @param logName
 * @return true if truncation is needed, false otherwise
 */
private boolean isTruncationNeeded(JVMInfo lInfo,
    Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
    LogName logName) {
  boolean truncationNeeded = false;
  LogFileDetail logFileDetail = null;
  for (Task task : lInfo.getAllAttempts()) {
    long taskRetainSize =
        (task.isMapTask() ? mapRetainSize : reduceRetainSize);
    Map<LogName, LogFileDetail> allLogsFileDetails =
        taskLogFileDetails.get(task);
    logFileDetail = allLogsFileDetails.get(logName);
    if (taskRetainSize > MINIMUM_RETAIN_SIZE_FOR_TRUNCATION
        && logFileDetail.length > taskRetainSize) {
      truncationNeeded = true;
      break;
    }
  }
  return truncationNeeded;
}
项目:mammoth    文件:TaskLogsTruncater.java   
/**
 * @param lInfo
 * @param taskLogFileDetails
 * @param updatedTaskLogFileDetails
 * @param logName
 */
private void copyOriginalIndexFileInfo(JVMInfo lInfo,
    Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
    Map<Task, Map<LogName, LogFileDetail>> updatedTaskLogFileDetails,
    LogName logName) {
  if (TaskLog.LOGS_TRACKED_BY_INDEX_FILES.contains(logName)) {
    for (Task task : lInfo.getAllAttempts()) {
      if (!updatedTaskLogFileDetails.containsKey(task)) {
        updatedTaskLogFileDetails.put(task,
            new HashMap<LogName, LogFileDetail>());
      }
      updatedTaskLogFileDetails.get(task).put(logName,
          taskLogFileDetails.get(task).get(logName));
    }
  }
}
项目:mammoth    文件:TaskLogsTruncater.java   
/**
 * Check if truncation of logs is needed for the given jvmInfo. If all the
 * tasks that ran in a JVM are within the log-limits, then truncation is not
 * needed. Otherwise it is needed.
 * 
 * @param lInfo
 * @param taskLogFileDetails
 * @param logName
 * @return true if truncation is needed, false otherwise
 */
private boolean isTruncationNeeded(JVMInfo lInfo,
    Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
    LogName logName) {
  boolean truncationNeeded = false;
  LogFileDetail logFileDetail = null;
  for (Task task : lInfo.getAllAttempts()) {
    long taskRetainSize =
        (task.isMapTask() ? mapRetainSize : reduceRetainSize);
    Map<LogName, LogFileDetail> allLogsFileDetails =
        taskLogFileDetails.get(task);
    logFileDetail = allLogsFileDetails.get(logName);
    if (taskRetainSize > MINIMUM_RETAIN_SIZE_FOR_TRUNCATION
        && logFileDetail.length > taskRetainSize) {
      truncationNeeded = true;
      break;
    }
  }
  return truncationNeeded;
}
项目:hortonworks-extension    文件:TaskLogsTruncater.java   
/**
 * @param lInfo
 * @param taskLogFileDetails
 * @param updatedTaskLogFileDetails
 * @param logName
 */
private void copyOriginalIndexFileInfo(JVMInfo lInfo,
    Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
    Map<Task, Map<LogName, LogFileDetail>> updatedTaskLogFileDetails,
    LogName logName) {
  if (TaskLog.LOGS_TRACKED_BY_INDEX_FILES.contains(logName)) {
    for (Task task : lInfo.getAllAttempts()) {
      if (!updatedTaskLogFileDetails.containsKey(task)) {
        updatedTaskLogFileDetails.put(task,
            new HashMap<LogName, LogFileDetail>());
      }
      updatedTaskLogFileDetails.get(task).put(logName,
          taskLogFileDetails.get(task).get(logName));
    }
  }
}
项目:hortonworks-extension    文件:TaskLogsTruncater.java   
/**
 * Check if truncation of logs is needed for the given jvmInfo. If all the
 * tasks that ran in a JVM are within the log-limits, then truncation is not
 * needed. Otherwise it is needed.
 * 
 * @param lInfo
 * @param taskLogFileDetails
 * @param logName
 * @return true if truncation is needed, false otherwise
 */
private boolean isTruncationNeeded(JVMInfo lInfo,
    Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
    LogName logName) {
  boolean truncationNeeded = false;
  LogFileDetail logFileDetail = null;
  for (Task task : lInfo.getAllAttempts()) {
    long taskRetainSize =
        (task.isMapTask() ? mapRetainSize : reduceRetainSize);
    Map<LogName, LogFileDetail> allLogsFileDetails =
        taskLogFileDetails.get(task);
    logFileDetail = allLogsFileDetails.get(logName);
    if (taskRetainSize > MINIMUM_RETAIN_SIZE_FOR_TRUNCATION
        && logFileDetail.length > taskRetainSize) {
      truncationNeeded = true;
      break;
    }
  }
  return truncationNeeded;
}
项目:hortonworks-extension    文件:TaskLogsTruncater.java   
/**
 * @param lInfo
 * @param taskLogFileDetails
 * @param updatedTaskLogFileDetails
 * @param logName
 */
private void copyOriginalIndexFileInfo(JVMInfo lInfo,
    Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
    Map<Task, Map<LogName, LogFileDetail>> updatedTaskLogFileDetails,
    LogName logName) {
  if (TaskLog.LOGS_TRACKED_BY_INDEX_FILES.contains(logName)) {
    for (Task task : lInfo.getAllAttempts()) {
      if (!updatedTaskLogFileDetails.containsKey(task)) {
        updatedTaskLogFileDetails.put(task,
            new HashMap<LogName, LogFileDetail>());
      }
      updatedTaskLogFileDetails.get(task).put(logName,
          taskLogFileDetails.get(task).get(logName));
    }
  }
}
项目:hortonworks-extension    文件:TaskLogsTruncater.java   
/**
 * Check if truncation of logs is needed for the given jvmInfo. If all the
 * tasks that ran in a JVM are within the log-limits, then truncation is not
 * needed. Otherwise it is needed.
 * 
 * @param lInfo
 * @param taskLogFileDetails
 * @param logName
 * @return true if truncation is needed, false otherwise
 */
private boolean isTruncationNeeded(JVMInfo lInfo,
    Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
    LogName logName) {
  boolean truncationNeeded = false;
  LogFileDetail logFileDetail = null;
  for (Task task : lInfo.getAllAttempts()) {
    long taskRetainSize =
        (task.isMapTask() ? mapRetainSize : reduceRetainSize);
    Map<LogName, LogFileDetail> allLogsFileDetails =
        taskLogFileDetails.get(task);
    logFileDetail = allLogsFileDetails.get(logName);
    if (taskRetainSize > MINIMUM_RETAIN_SIZE_FOR_TRUNCATION
        && logFileDetail.length > taskRetainSize) {
      truncationNeeded = true;
      break;
    }
  }
  return truncationNeeded;
}
项目:angel    文件:WorkerJVM.java   
/**
 * Create worker attempt jvm command
 * @param conf application configuration
 * @param appid application id
 * @param workerAttemptId worker attempt id
 * @return
 */
public static List<String> getVMCommand(Configuration conf, ApplicationId appid, WorkerAttemptId workerAttemptId) {
  Vector<String> vargs = new Vector<String>(8);

  vargs.add(Environment.JAVA_HOME.$() + "/bin/java");

  String javaOpts = getChildJavaOpts(conf, appid, workerAttemptId);
  LOG.debug("javaOpts=" + javaOpts);

  String[] javaOptsSplit = javaOpts.split(" ");
  for (int i = 0; i < javaOptsSplit.length; i++) {
    vargs.add(javaOptsSplit[i]);
  }

  Path childTmpDir = new Path(Environment.PWD.$(), YarnConfiguration.DEFAULT_CONTAINER_TEMP_DIR);
  vargs.add("-Djava.io.tmpdir=" + childTmpDir);

  // Setup the log4j prop
  long logSize = 0;
  setupLog4jProperties(conf, vargs, logSize);

  // Add main class and its arguments
  String workerClassName =
      conf.get(AngelConf.ANGEL_WORKER_CLASS,
          AngelConf.DEFAULT_ANGEL_WORKER_CLASS);
  vargs.add(workerClassName);

  vargs.add("1>" + getTaskLogFile(TaskLog.LogName.STDOUT));
  vargs.add("2>" + getTaskLogFile(TaskLog.LogName.STDERR));

  // Final commmand
  StringBuilder mergedCommand = new StringBuilder();
  for (CharSequence str : vargs) {
    mergedCommand.append(str).append(" ");
  }
  Vector<String> vargsFinal = new Vector<String>(1);
  vargsFinal.add(mergedCommand.toString());
  return vargsFinal;
}
项目:hadoop    文件:MapReduceTestUtil.java   
/**
 * Reads tasklog and returns it as string after trimming it.
 * 
 * @param filter
 *          Task log filter; can be STDOUT, STDERR, SYSLOG, DEBUGOUT, PROFILE
 * @param taskId
 *          The task id for which the log has to collected
 * @param isCleanup
 *          whether the task is a cleanup attempt or not.
 * @return task log as string
 * @throws IOException
 */
public static String readTaskLog(TaskLog.LogName filter,
    org.apache.hadoop.mapred.TaskAttemptID taskId, boolean isCleanup)
    throws IOException {
  // string buffer to store task log
  StringBuffer result = new StringBuffer();
  int res;

  // reads the whole tasklog into inputstream
  InputStream taskLogReader = new TaskLog.Reader(taskId, filter, 0, -1,
      isCleanup);
  // construct string log from inputstream.
  byte[] b = new byte[65536];
  while (true) {
    res = taskLogReader.read(b);
    if (res > 0) {
      result.append(new String(b));
    } else {
      break;
    }
  }
  taskLogReader.close();

  // trim the string and return it
  String str = result.toString();
  str = str.trim();
  return str;
}
项目:hadoop    文件:TestTaskLog.java   
public String readTaskLog(TaskLog.LogName filter,
    org.apache.hadoop.mapred.TaskAttemptID taskId, boolean isCleanup)
    throws IOException {
  // string buffer to store task log
  StringBuffer result = new StringBuffer();
  int res;

  // reads the whole tasklog into inputstream
  InputStream taskLogReader = new TaskLog.Reader(taskId, filter, 0, -1,
      isCleanup);
  // construct string log from inputstream.
  byte[] b = new byte[65536];
  while (true) {
    res = taskLogReader.read(b);
    if (res > 0) {
      result.append(new String(b));
    } else {
      break;
    }
  }
  taskLogReader.close();

  // trim the string and return it
  String str = result.toString();
  str = str.trim();
  return str;
}
项目:aliyun-oss-hadoop-fs    文件:MapReduceTestUtil.java   
/**
 * Reads tasklog and returns it as string after trimming it.
 * 
 * @param filter
 *          Task log filter; can be STDOUT, STDERR, SYSLOG, DEBUGOUT, PROFILE
 * @param taskId
 *          The task id for which the log has to collected
 * @param isCleanup
 *          whether the task is a cleanup attempt or not.
 * @return task log as string
 * @throws IOException
 */
public static String readTaskLog(TaskLog.LogName filter,
    org.apache.hadoop.mapred.TaskAttemptID taskId, boolean isCleanup)
    throws IOException {
  // string buffer to store task log
  StringBuffer result = new StringBuffer();
  int res;

  // reads the whole tasklog into inputstream
  InputStream taskLogReader = new TaskLog.Reader(taskId, filter, 0, -1,
      isCleanup);
  // construct string log from inputstream.
  byte[] b = new byte[65536];
  while (true) {
    res = taskLogReader.read(b);
    if (res > 0) {
      result.append(new String(b));
    } else {
      break;
    }
  }
  taskLogReader.close();

  // trim the string and return it
  String str = result.toString();
  str = str.trim();
  return str;
}
项目:aliyun-oss-hadoop-fs    文件:TestTaskLog.java   
public String readTaskLog(TaskLog.LogName filter,
    org.apache.hadoop.mapred.TaskAttemptID taskId, boolean isCleanup)
    throws IOException {
  // string buffer to store task log
  StringBuffer result = new StringBuffer();
  int res;

  // reads the whole tasklog into inputstream
  InputStream taskLogReader = new TaskLog.Reader(taskId, filter, 0, -1,
      isCleanup);
  // construct string log from inputstream.
  byte[] b = new byte[65536];
  while (true) {
    res = taskLogReader.read(b);
    if (res > 0) {
      result.append(new String(b));
    } else {
      break;
    }
  }
  taskLogReader.close();

  // trim the string and return it
  String str = result.toString();
  str = str.trim();
  return str;
}
项目:big-c    文件:MapReduceTestUtil.java   
/**
 * Reads tasklog and returns it as string after trimming it.
 * 
 * @param filter
 *          Task log filter; can be STDOUT, STDERR, SYSLOG, DEBUGOUT, PROFILE
 * @param taskId
 *          The task id for which the log has to collected
 * @param isCleanup
 *          whether the task is a cleanup attempt or not.
 * @return task log as string
 * @throws IOException
 */
public static String readTaskLog(TaskLog.LogName filter,
    org.apache.hadoop.mapred.TaskAttemptID taskId, boolean isCleanup)
    throws IOException {
  // string buffer to store task log
  StringBuffer result = new StringBuffer();
  int res;

  // reads the whole tasklog into inputstream
  InputStream taskLogReader = new TaskLog.Reader(taskId, filter, 0, -1,
      isCleanup);
  // construct string log from inputstream.
  byte[] b = new byte[65536];
  while (true) {
    res = taskLogReader.read(b);
    if (res > 0) {
      result.append(new String(b));
    } else {
      break;
    }
  }
  taskLogReader.close();

  // trim the string and return it
  String str = result.toString();
  str = str.trim();
  return str;
}
项目:big-c    文件:TestTaskLog.java   
public String readTaskLog(TaskLog.LogName filter,
    org.apache.hadoop.mapred.TaskAttemptID taskId, boolean isCleanup)
    throws IOException {
  // string buffer to store task log
  StringBuffer result = new StringBuffer();
  int res;

  // reads the whole tasklog into inputstream
  InputStream taskLogReader = new TaskLog.Reader(taskId, filter, 0, -1,
      isCleanup);
  // construct string log from inputstream.
  byte[] b = new byte[65536];
  while (true) {
    res = taskLogReader.read(b);
    if (res > 0) {
      result.append(new String(b));
    } else {
      break;
    }
  }
  taskLogReader.close();

  // trim the string and return it
  String str = result.toString();
  str = str.trim();
  return str;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:MapReduceTestUtil.java   
/**
 * Reads tasklog and returns it as string after trimming it.
 * 
 * @param filter
 *          Task log filter; can be STDOUT, STDERR, SYSLOG, DEBUGOUT, PROFILE
 * @param taskId
 *          The task id for which the log has to collected
 * @param isCleanup
 *          whether the task is a cleanup attempt or not.
 * @return task log as string
 * @throws IOException
 */
public static String readTaskLog(TaskLog.LogName filter,
    org.apache.hadoop.mapred.TaskAttemptID taskId, boolean isCleanup)
    throws IOException {
  // string buffer to store task log
  StringBuffer result = new StringBuffer();
  int res;

  // reads the whole tasklog into inputstream
  InputStream taskLogReader = new TaskLog.Reader(taskId, filter, 0, -1,
      isCleanup);
  // construct string log from inputstream.
  byte[] b = new byte[65536];
  while (true) {
    res = taskLogReader.read(b);
    if (res > 0) {
      result.append(new String(b));
    } else {
      break;
    }
  }
  taskLogReader.close();

  // trim the string and return it
  String str = result.toString();
  str = str.trim();
  return str;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestTaskLog.java   
public String readTaskLog(TaskLog.LogName filter,
    org.apache.hadoop.mapred.TaskAttemptID taskId, boolean isCleanup)
    throws IOException {
  // string buffer to store task log
  StringBuffer result = new StringBuffer();
  int res;

  // reads the whole tasklog into inputstream
  InputStream taskLogReader = new TaskLog.Reader(taskId, filter, 0, -1,
      isCleanup);
  // construct string log from inputstream.
  byte[] b = new byte[65536];
  while (true) {
    res = taskLogReader.read(b);
    if (res > 0) {
      result.append(new String(b));
    } else {
      break;
    }
  }
  taskLogReader.close();

  // trim the string and return it
  String str = result.toString();
  str = str.trim();
  return str;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TaskLogsTruncater.java   
/**
 * Check the log file sizes generated by the attempts that ran in a
 * particular JVM
 * @param lInfo
 * @return
 * @throws IOException
 */
public boolean shouldTruncateLogs(JVMInfo lInfo) throws IOException {
  // Read the log-file details for all the attempts that ran in this JVM
  Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails;
  try {
    taskLogFileDetails = getAllLogsFileDetails(lInfo.getAllAttempts());
  } catch (IOException e) {
    LOG.warn(
        "Exception in truncateLogs while getting allLogsFileDetails()."
            + " Ignoring the truncation of logs of this process.", e);
    return false;
  }

  File attemptLogDir = lInfo.getLogLocation();

  for (LogName logName : LogName.values()) {

    File logFile = new File(attemptLogDir, logName.toString());

    if (logFile.exists()) {
      if(!isTruncationNeeded(lInfo, taskLogFileDetails, logName)) {
        LOG.debug("Truncation is not needed for "
            + logFile.getAbsolutePath());
      } else return true;
    }
  }
  return false;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TaskLogsTruncater.java   
/**
 * Get the logFileDetails of all the list of attempts passed.
 * @param allAttempts the attempts we are interested in
 * 
 * @return a map of task to the log-file detail
 * @throws IOException
 */
private Map<Task, Map<LogName, LogFileDetail>> getAllLogsFileDetails(
    final List<Task> allAttempts) throws IOException {
  Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails =
      new HashMap<Task, Map<LogName, LogFileDetail>>();
  for (Task task : allAttempts) {
    Map<LogName, LogFileDetail> allLogsFileDetails;
    allLogsFileDetails =
        TaskLog.getAllLogsFileDetails(task.getTaskID(), task.isTaskCleanupTask());
    taskLogFileDetails.put(task, allLogsFileDetails);
  }
  return taskLogFileDetails;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TaskLogsTruncater.java   
/**
 * Truncation of logs is done. Now sync the index files to reflect the
 * truncated sizes.
 * 
 * @param firstAttempt
 * @param updatedTaskLogFileDetails
 */
private void updateIndicesAfterLogTruncation(String location,
    Map<Task, Map<LogName, LogFileDetail>> updatedTaskLogFileDetails) {
  for (Entry<Task, Map<LogName, LogFileDetail>> entry : 
                              updatedTaskLogFileDetails.entrySet()) {
    Task task = entry.getKey();
    Map<LogName, LogFileDetail> logFileDetails = entry.getValue();
    Map<LogName, Long[]> logLengths = new HashMap<LogName, Long[]>();
    // set current and previous lengths
    for (LogName logName : TaskLog.LOGS_TRACKED_BY_INDEX_FILES) {
      logLengths.put(logName, new Long[] { Long.valueOf(0L),
          Long.valueOf(0L) });
      LogFileDetail lfd = logFileDetails.get(logName);
      if (lfd != null) {
        // Set previous lengths
        logLengths.get(logName)[0] = Long.valueOf(lfd.start);
        // Set current lengths
        logLengths.get(logName)[1] = Long.valueOf(lfd.start + lfd.length);
      }
    }
    try {
      TaskLog.writeToIndexFile(location, task.getTaskID(),
          task.isTaskCleanupTask(), logLengths);
    } catch (IOException ioe) {
      LOG.warn("Exception encountered while updating index file of task "
          + task.getTaskID()
          + ". Ignoring and continuing with other tasks.", ioe);
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestTaskLogsTruncater.java   
private void writeBytes(TaskAttemptID firstAttemptID, TaskAttemptID attemptID,
    LogName logName, long numBytes, boolean random, char data) throws IOException {

  File logFile = TaskLog.getTaskLogFile(firstAttemptID, false, logName);
  File logLocation = logFile.getParentFile();

  LOG.info("Going to write " + numBytes + " real bytes to the log file "
      + logFile);

  if (!logLocation.exists()
      && !logLocation.mkdirs()) {
    throw new IOException("Couldn't create all ancestor dirs for "
        + logFile);
  }

  File attemptDir = TaskLog.getAttemptDir(attemptID, false);
  if (!attemptDir.exists() && !attemptDir.mkdirs()) {
    throw new IOException("Couldn't create all ancestor dirs for "
        + logFile);
  }

  // Need to call up front to set currenttaskid.
  TaskLog.syncLogs(logLocation.toString(), attemptID, false, true);

  FileOutputStream outputStream = new FileOutputStream(logFile, true);
  Random r = new Random();
  for (long i = 0; i < numBytes; i++) {
    if(random) {
      outputStream.write(r.nextInt());
    } else {
      outputStream.write(data);
    }
  }
  outputStream.close();
  TaskLog.syncLogs(logLocation.toString(), attemptID, false, true);
  LOG.info("Written " + logFile.length() + " real bytes to the log file "
      + logFile);
}
项目:hadoop-EAR    文件:TaskLogsMonitor.java   
/**
 * Get the logFileDetails of all the list of attempts passed.
 * 
 * @param lInfo
 * @return a map of task to the log-file detail
 * @throws IOException
 */
private Map<Task, Map<LogName, LogFileDetail>> getAllLogsFileDetails(
    final List<Task> allAttempts) throws IOException {
  Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails =
      new HashMap<Task, Map<LogName, LogFileDetail>>();
  for (Task task : allAttempts) {
    Map<LogName, LogFileDetail> allLogsFileDetails;
    allLogsFileDetails =
        TaskLog.getAllLogsFileDetails(task.getTaskID(),
            task.isTaskCleanupTask());
    taskLogFileDetails.put(task, allLogsFileDetails);
  }
  return taskLogFileDetails;
}