Java 类org.apache.hadoop.mapreduce.split.JobSplit.SplitMetaInfo 实例源码

项目:hadoop-2.6.0-cdh5.4.3    文件:JobSplitWriter.java   
private static SplitMetaInfo[] writeOldSplits(
    org.apache.hadoop.mapred.InputSplit[] splits,
    FSDataOutputStream out) throws IOException {
  SplitMetaInfo[] info = new SplitMetaInfo[splits.length];
  if (splits.length != 0) {
    int i = 0;
    long offset = out.getPos();
    for(org.apache.hadoop.mapred.InputSplit split: splits) {
      long prevLen = out.getPos();
      Text.writeString(out, split.getClass().getName());
      split.write(out);
      long currLen = out.getPos();
      info[i++] = new JobSplit.SplitMetaInfo( 
          split.getLocations(), offset,
          split.getLength());
      offset += currLen - prevLen;
    }
  }
  return info;
}
项目:guagua    文件:GuaguaSplitWriter.java   
private static SplitMetaInfo[] writeOldSplits(org.apache.hadoop.mapred.InputSplit[] splits, FSDataOutputStream out,
        Configuration conf) throws IOException {
    SplitMetaInfo[] info = new SplitMetaInfo[splits.length];
    if(splits.length != 0) {
        int i = 0;
        long offset = out.getPos();
        for(org.apache.hadoop.mapred.InputSplit split: splits) {
            long prevLen = out.getPos();
            Text.writeString(out, split.getClass().getName());
            split.write(out);
            long currLen = out.getPos();
            String[] locations = split.getLocations();
            final int max_loc = conf.getInt(MAX_SPLIT_LOCATIONS, 10);
            if(locations.length > max_loc) {
                LOG.warn("Max block location exceeded for split: " + split + " splitsize: " + locations.length
                        + " maxsize: " + max_loc);
                locations = Arrays.copyOf(locations, max_loc);
            }
            info[i++] = new JobSplit.SplitMetaInfo(locations, offset, split.getLength());
            offset += currLen - prevLen;
        }
    }
    return info;
}
项目:guagua    文件:GuaguaSplitWriter.java   
private static void writeJobSplitMetaInfo(FileSystem fs, Path filename, FsPermission p, int splitMetaInfoVersion,
        JobSplit.SplitMetaInfo[] allSplitMetaInfo) throws IOException {
    // write the splits meta-info to a file for the job tracker
    FSDataOutputStream out = null;
    try {
        out = FileSystem.create(fs, filename, p);
        out.write(META_SPLIT_FILE_HEADER);
        WritableUtils.writeVInt(out, splitMetaInfoVersion);
        WritableUtils.writeVInt(out, allSplitMetaInfo.length);
        for(JobSplit.SplitMetaInfo splitMetaInfo: allSplitMetaInfo) {
            splitMetaInfo.write(out);
        }
    } finally {
        IOUtils.closeStream(out);
    }
}
项目:hanoi-hadoop-2.0.0-cdh    文件:JobSplitWriter.java   
private static SplitMetaInfo[] writeOldSplits(
    org.apache.hadoop.mapred.InputSplit[] splits,
    FSDataOutputStream out) throws IOException {
  SplitMetaInfo[] info = new SplitMetaInfo[splits.length];
  if (splits.length != 0) {
    int i = 0;
    long offset = out.size();
    for(org.apache.hadoop.mapred.InputSplit split: splits) {
      int prevLen = out.size();
      Text.writeString(out, split.getClass().getName());
      split.write(out);
      int currLen = out.size();
      info[i++] = new JobSplit.SplitMetaInfo( 
          split.getLocations(), offset,
          split.getLength());
      offset += currLen - prevLen;
    }
  }
  return info;
}
项目:mapreduce-fork    文件:JobSplitWriter.java   
private static SplitMetaInfo[] writeOldSplits(
    org.apache.hadoop.mapred.InputSplit[] splits,
    FSDataOutputStream out) throws IOException {
  SplitMetaInfo[] info = new SplitMetaInfo[splits.length];
  if (splits.length != 0) {
    int i = 0;
    long offset = out.size();
    for(org.apache.hadoop.mapred.InputSplit split: splits) {
      int prevLen = out.size();
      Text.writeString(out, split.getClass().getName());
      split.write(out);
      int currLen = out.size();
      info[i++] = new JobSplit.SplitMetaInfo( 
          split.getLocations(), offset,
          split.getLength());
      offset += currLen - prevLen;
    }
  }
  return info;
}
项目:hadoop    文件:JobSplitWriter.java   
public static <T extends InputSplit> void createSplitFiles(Path jobSubmitDir, 
    Configuration conf, FileSystem fs, T[] splits) 
throws IOException, InterruptedException {
  FSDataOutputStream out = createFile(fs, 
      JobSubmissionFiles.getJobSplitFile(jobSubmitDir), conf);
  SplitMetaInfo[] info = writeNewSplits(conf, splits, out);
  out.close();
  writeJobSplitMetaInfo(fs,JobSubmissionFiles.getJobSplitMetaFile(jobSubmitDir), 
      new FsPermission(JobSubmissionFiles.JOB_FILE_PERMISSION), splitVersion,
      info);
}
项目:hadoop    文件:JobSplitWriter.java   
public static void createSplitFiles(Path jobSubmitDir, 
    Configuration conf, FileSystem fs, 
    org.apache.hadoop.mapred.InputSplit[] splits) 
throws IOException {
  FSDataOutputStream out = createFile(fs, 
      JobSubmissionFiles.getJobSplitFile(jobSubmitDir), conf);
  SplitMetaInfo[] info = writeOldSplits(splits, out, conf);
  out.close();
  writeJobSplitMetaInfo(fs,JobSubmissionFiles.getJobSplitMetaFile(jobSubmitDir), 
      new FsPermission(JobSubmissionFiles.JOB_FILE_PERMISSION), splitVersion,
      info);
}
项目:hadoop    文件:JobSplitWriter.java   
@SuppressWarnings("unchecked")
private static <T extends InputSplit> 
SplitMetaInfo[] writeNewSplits(Configuration conf, 
    T[] array, FSDataOutputStream out)
throws IOException, InterruptedException {

  SplitMetaInfo[] info = new SplitMetaInfo[array.length];
  if (array.length != 0) {
    SerializationFactory factory = new SerializationFactory(conf);
    int i = 0;
    int maxBlockLocations = conf.getInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY,
        MRConfig.MAX_BLOCK_LOCATIONS_DEFAULT);
    long offset = out.getPos();
    for(T split: array) {
      long prevCount = out.getPos();
      Text.writeString(out, split.getClass().getName());
      Serializer<T> serializer = 
        factory.getSerializer((Class<T>) split.getClass());
      serializer.open(out);
      serializer.serialize(split);
      long currCount = out.getPos();
      String[] locations = split.getLocations();
      if (locations.length > maxBlockLocations) {
        LOG.warn("Max block location exceeded for split: "
            + split + " splitsize: " + locations.length +
            " maxsize: " + maxBlockLocations);
        locations = Arrays.copyOf(locations, maxBlockLocations);
      }
      info[i++] = 
        new JobSplit.SplitMetaInfo( 
            locations, offset,
            split.getLength());
      offset += currCount - prevCount;
    }
  }
  return info;
}
项目:hadoop    文件:JobSplitWriter.java   
private static SplitMetaInfo[] writeOldSplits(
    org.apache.hadoop.mapred.InputSplit[] splits,
    FSDataOutputStream out, Configuration conf) throws IOException {
  SplitMetaInfo[] info = new SplitMetaInfo[splits.length];
  if (splits.length != 0) {
    int i = 0;
    long offset = out.getPos();
    int maxBlockLocations = conf.getInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY,
        MRConfig.MAX_BLOCK_LOCATIONS_DEFAULT);
    for(org.apache.hadoop.mapred.InputSplit split: splits) {
      long prevLen = out.getPos();
      Text.writeString(out, split.getClass().getName());
      split.write(out);
      long currLen = out.getPos();
      String[] locations = split.getLocations();
      if (locations.length > maxBlockLocations) {
        LOG.warn("Max block location exceeded for split: "
            + split + " splitsize: " + locations.length +
            " maxsize: " + maxBlockLocations);
        locations = Arrays.copyOf(locations,maxBlockLocations);
      }
      info[i++] = new JobSplit.SplitMetaInfo( 
          locations, offset,
          split.getLength());
      offset += currLen - prevLen;
    }
  }
  return info;
}
项目:hadoop    文件:JobSplitWriter.java   
private static void writeJobSplitMetaInfo(FileSystem fs, Path filename, 
    FsPermission p, int splitMetaInfoVersion, 
    JobSplit.SplitMetaInfo[] allSplitMetaInfo) 
throws IOException {
  // write the splits meta-info to a file for the job tracker
  FSDataOutputStream out = 
    FileSystem.create(fs, filename, p);
  out.write(JobSplit.META_SPLIT_FILE_HEADER);
  WritableUtils.writeVInt(out, splitMetaInfoVersion);
  WritableUtils.writeVInt(out, allSplitMetaInfo.length);
  for (JobSplit.SplitMetaInfo splitMetaInfo : allSplitMetaInfo) {
    splitMetaInfo.write(out);
  }
  out.close();
}
项目:aliyun-oss-hadoop-fs    文件:JobSplitWriter.java   
public static <T extends InputSplit> void createSplitFiles(Path jobSubmitDir, 
    Configuration conf, FileSystem fs, T[] splits) 
throws IOException, InterruptedException {
  FSDataOutputStream out = createFile(fs, 
      JobSubmissionFiles.getJobSplitFile(jobSubmitDir), conf);
  SplitMetaInfo[] info = writeNewSplits(conf, splits, out);
  out.close();
  writeJobSplitMetaInfo(fs,JobSubmissionFiles.getJobSplitMetaFile(jobSubmitDir), 
      new FsPermission(JobSubmissionFiles.JOB_FILE_PERMISSION), splitVersion,
      info);
}
项目:aliyun-oss-hadoop-fs    文件:JobSplitWriter.java   
public static void createSplitFiles(Path jobSubmitDir, 
    Configuration conf, FileSystem fs, 
    org.apache.hadoop.mapred.InputSplit[] splits) 
throws IOException {
  FSDataOutputStream out = createFile(fs, 
      JobSubmissionFiles.getJobSplitFile(jobSubmitDir), conf);
  SplitMetaInfo[] info = writeOldSplits(splits, out, conf);
  out.close();
  writeJobSplitMetaInfo(fs,JobSubmissionFiles.getJobSplitMetaFile(jobSubmitDir), 
      new FsPermission(JobSubmissionFiles.JOB_FILE_PERMISSION), splitVersion,
      info);
}
项目:aliyun-oss-hadoop-fs    文件:JobSplitWriter.java   
@SuppressWarnings("unchecked")
private static <T extends InputSplit> 
SplitMetaInfo[] writeNewSplits(Configuration conf, 
    T[] array, FSDataOutputStream out)
throws IOException, InterruptedException {

  SplitMetaInfo[] info = new SplitMetaInfo[array.length];
  if (array.length != 0) {
    SerializationFactory factory = new SerializationFactory(conf);
    int i = 0;
    int maxBlockLocations = conf.getInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY,
        MRConfig.MAX_BLOCK_LOCATIONS_DEFAULT);
    long offset = out.getPos();
    for(T split: array) {
      long prevCount = out.getPos();
      Text.writeString(out, split.getClass().getName());
      Serializer<T> serializer = 
        factory.getSerializer((Class<T>) split.getClass());
      serializer.open(out);
      serializer.serialize(split);
      long currCount = out.getPos();
      String[] locations = split.getLocations();
      if (locations.length > maxBlockLocations) {
        LOG.warn("Max block location exceeded for split: "
            + split + " splitsize: " + locations.length +
            " maxsize: " + maxBlockLocations);
        locations = Arrays.copyOf(locations, maxBlockLocations);
      }
      info[i++] = 
        new JobSplit.SplitMetaInfo( 
            locations, offset,
            split.getLength());
      offset += currCount - prevCount;
    }
  }
  return info;
}
项目:aliyun-oss-hadoop-fs    文件:JobSplitWriter.java   
private static SplitMetaInfo[] writeOldSplits(
    org.apache.hadoop.mapred.InputSplit[] splits,
    FSDataOutputStream out, Configuration conf) throws IOException {
  SplitMetaInfo[] info = new SplitMetaInfo[splits.length];
  if (splits.length != 0) {
    int i = 0;
    long offset = out.getPos();
    int maxBlockLocations = conf.getInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY,
        MRConfig.MAX_BLOCK_LOCATIONS_DEFAULT);
    for(org.apache.hadoop.mapred.InputSplit split: splits) {
      long prevLen = out.getPos();
      Text.writeString(out, split.getClass().getName());
      split.write(out);
      long currLen = out.getPos();
      String[] locations = split.getLocations();
      if (locations.length > maxBlockLocations) {
        LOG.warn("Max block location exceeded for split: "
            + split + " splitsize: " + locations.length +
            " maxsize: " + maxBlockLocations);
        locations = Arrays.copyOf(locations,maxBlockLocations);
      }
      info[i++] = new JobSplit.SplitMetaInfo( 
          locations, offset,
          split.getLength());
      offset += currLen - prevLen;
    }
  }
  return info;
}
项目:aliyun-oss-hadoop-fs    文件:JobSplitWriter.java   
private static void writeJobSplitMetaInfo(FileSystem fs, Path filename, 
    FsPermission p, int splitMetaInfoVersion, 
    JobSplit.SplitMetaInfo[] allSplitMetaInfo) 
throws IOException {
  // write the splits meta-info to a file for the job tracker
  FSDataOutputStream out = 
    FileSystem.create(fs, filename, p);
  out.write(JobSplit.META_SPLIT_FILE_HEADER);
  WritableUtils.writeVInt(out, splitMetaInfoVersion);
  WritableUtils.writeVInt(out, allSplitMetaInfo.length);
  for (JobSplit.SplitMetaInfo splitMetaInfo : allSplitMetaInfo) {
    splitMetaInfo.write(out);
  }
  out.close();
}
项目:big-c    文件:JobSplitWriter.java   
public static <T extends InputSplit> void createSplitFiles(Path jobSubmitDir, 
    Configuration conf, FileSystem fs, T[] splits) 
throws IOException, InterruptedException {
  FSDataOutputStream out = createFile(fs, 
      JobSubmissionFiles.getJobSplitFile(jobSubmitDir), conf);
  SplitMetaInfo[] info = writeNewSplits(conf, splits, out);
  out.close();
  writeJobSplitMetaInfo(fs,JobSubmissionFiles.getJobSplitMetaFile(jobSubmitDir), 
      new FsPermission(JobSubmissionFiles.JOB_FILE_PERMISSION), splitVersion,
      info);
}
项目:big-c    文件:JobSplitWriter.java   
public static void createSplitFiles(Path jobSubmitDir, 
    Configuration conf, FileSystem fs, 
    org.apache.hadoop.mapred.InputSplit[] splits) 
throws IOException {
  FSDataOutputStream out = createFile(fs, 
      JobSubmissionFiles.getJobSplitFile(jobSubmitDir), conf);
  SplitMetaInfo[] info = writeOldSplits(splits, out, conf);
  out.close();
  writeJobSplitMetaInfo(fs,JobSubmissionFiles.getJobSplitMetaFile(jobSubmitDir), 
      new FsPermission(JobSubmissionFiles.JOB_FILE_PERMISSION), splitVersion,
      info);
}
项目:big-c    文件:JobSplitWriter.java   
@SuppressWarnings("unchecked")
private static <T extends InputSplit> 
SplitMetaInfo[] writeNewSplits(Configuration conf, 
    T[] array, FSDataOutputStream out)
throws IOException, InterruptedException {

  SplitMetaInfo[] info = new SplitMetaInfo[array.length];
  if (array.length != 0) {
    SerializationFactory factory = new SerializationFactory(conf);
    int i = 0;
    int maxBlockLocations = conf.getInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY,
        MRConfig.MAX_BLOCK_LOCATIONS_DEFAULT);
    long offset = out.getPos();
    for(T split: array) {
      long prevCount = out.getPos();
      Text.writeString(out, split.getClass().getName());
      Serializer<T> serializer = 
        factory.getSerializer((Class<T>) split.getClass());
      serializer.open(out);
      serializer.serialize(split);
      long currCount = out.getPos();
      String[] locations = split.getLocations();
      if (locations.length > maxBlockLocations) {
        LOG.warn("Max block location exceeded for split: "
            + split + " splitsize: " + locations.length +
            " maxsize: " + maxBlockLocations);
        locations = Arrays.copyOf(locations, maxBlockLocations);
      }
      info[i++] = 
        new JobSplit.SplitMetaInfo( 
            locations, offset,
            split.getLength());
      offset += currCount - prevCount;
    }
  }
  return info;
}
项目:big-c    文件:JobSplitWriter.java   
private static SplitMetaInfo[] writeOldSplits(
    org.apache.hadoop.mapred.InputSplit[] splits,
    FSDataOutputStream out, Configuration conf) throws IOException {
  SplitMetaInfo[] info = new SplitMetaInfo[splits.length];
  if (splits.length != 0) {
    int i = 0;
    long offset = out.getPos();
    int maxBlockLocations = conf.getInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY,
        MRConfig.MAX_BLOCK_LOCATIONS_DEFAULT);
    for(org.apache.hadoop.mapred.InputSplit split: splits) {
      long prevLen = out.getPos();
      Text.writeString(out, split.getClass().getName());
      split.write(out);
      long currLen = out.getPos();
      String[] locations = split.getLocations();
      if (locations.length > maxBlockLocations) {
        LOG.warn("Max block location exceeded for split: "
            + split + " splitsize: " + locations.length +
            " maxsize: " + maxBlockLocations);
        locations = Arrays.copyOf(locations,maxBlockLocations);
      }
      info[i++] = new JobSplit.SplitMetaInfo( 
          locations, offset,
          split.getLength());
      offset += currLen - prevLen;
    }
  }
  return info;
}
项目:big-c    文件:JobSplitWriter.java   
private static void writeJobSplitMetaInfo(FileSystem fs, Path filename, 
    FsPermission p, int splitMetaInfoVersion, 
    JobSplit.SplitMetaInfo[] allSplitMetaInfo) 
throws IOException {
  // write the splits meta-info to a file for the job tracker
  FSDataOutputStream out = 
    FileSystem.create(fs, filename, p);
  out.write(JobSplit.META_SPLIT_FILE_HEADER);
  WritableUtils.writeVInt(out, splitMetaInfoVersion);
  WritableUtils.writeVInt(out, allSplitMetaInfo.length);
  for (JobSplit.SplitMetaInfo splitMetaInfo : allSplitMetaInfo) {
    splitMetaInfo.write(out);
  }
  out.close();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:JobSplitWriter.java   
public static <T extends InputSplit> void createSplitFiles(Path jobSubmitDir, 
    Configuration conf, FileSystem fs, T[] splits) 
throws IOException, InterruptedException {
  FSDataOutputStream out = createFile(fs, 
      JobSubmissionFiles.getJobSplitFile(jobSubmitDir), conf);
  SplitMetaInfo[] info = writeNewSplits(conf, splits, out);
  out.close();
  writeJobSplitMetaInfo(fs,JobSubmissionFiles.getJobSplitMetaFile(jobSubmitDir), 
      new FsPermission(JobSubmissionFiles.JOB_FILE_PERMISSION), splitVersion,
      info);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:JobSplitWriter.java   
public static void createSplitFiles(Path jobSubmitDir, 
    Configuration conf, FileSystem fs, 
    org.apache.hadoop.mapred.InputSplit[] splits) 
throws IOException {
  FSDataOutputStream out = createFile(fs, 
      JobSubmissionFiles.getJobSplitFile(jobSubmitDir), conf);
  SplitMetaInfo[] info = writeOldSplits(splits, out, conf);
  out.close();
  writeJobSplitMetaInfo(fs,JobSubmissionFiles.getJobSplitMetaFile(jobSubmitDir), 
      new FsPermission(JobSubmissionFiles.JOB_FILE_PERMISSION), splitVersion,
      info);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:JobSplitWriter.java   
@SuppressWarnings("unchecked")
private static <T extends InputSplit> 
SplitMetaInfo[] writeNewSplits(Configuration conf, 
    T[] array, FSDataOutputStream out)
throws IOException, InterruptedException {

  SplitMetaInfo[] info = new SplitMetaInfo[array.length];
  if (array.length != 0) {
    SerializationFactory factory = new SerializationFactory(conf);
    int i = 0;
    int maxBlockLocations = conf.getInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY,
        MRConfig.MAX_BLOCK_LOCATIONS_DEFAULT);
    long offset = out.getPos();
    for(T split: array) {
      long prevCount = out.getPos();
      Text.writeString(out, split.getClass().getName());
      Serializer<T> serializer = 
        factory.getSerializer((Class<T>) split.getClass());
      serializer.open(out);
      serializer.serialize(split);
      long currCount = out.getPos();
      String[] locations = split.getLocations();
      if (locations.length > maxBlockLocations) {
        LOG.warn("Max block location exceeded for split: "
            + split + " splitsize: " + locations.length +
            " maxsize: " + maxBlockLocations);
        locations = Arrays.copyOf(locations, maxBlockLocations);
      }
      info[i++] = 
        new JobSplit.SplitMetaInfo( 
            locations, offset,
            split.getLength());
      offset += currCount - prevCount;
    }
  }
  return info;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:JobSplitWriter.java   
private static SplitMetaInfo[] writeOldSplits(
    org.apache.hadoop.mapred.InputSplit[] splits,
    FSDataOutputStream out, Configuration conf) throws IOException {
  SplitMetaInfo[] info = new SplitMetaInfo[splits.length];
  if (splits.length != 0) {
    int i = 0;
    long offset = out.getPos();
    int maxBlockLocations = conf.getInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY,
        MRConfig.MAX_BLOCK_LOCATIONS_DEFAULT);
    for(org.apache.hadoop.mapred.InputSplit split: splits) {
      long prevLen = out.getPos();
      Text.writeString(out, split.getClass().getName());
      split.write(out);
      long currLen = out.getPos();
      String[] locations = split.getLocations();
      if (locations.length > maxBlockLocations) {
        LOG.warn("Max block location exceeded for split: "
            + split + " splitsize: " + locations.length +
            " maxsize: " + maxBlockLocations);
        locations = Arrays.copyOf(locations,maxBlockLocations);
      }
      info[i++] = new JobSplit.SplitMetaInfo( 
          locations, offset,
          split.getLength());
      offset += currLen - prevLen;
    }
  }
  return info;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:JobSplitWriter.java   
private static void writeJobSplitMetaInfo(FileSystem fs, Path filename, 
    FsPermission p, int splitMetaInfoVersion, 
    JobSplit.SplitMetaInfo[] allSplitMetaInfo) 
throws IOException {
  // write the splits meta-info to a file for the job tracker
  FSDataOutputStream out = 
    FileSystem.create(fs, filename, p);
  out.write(JobSplit.META_SPLIT_FILE_HEADER);
  WritableUtils.writeVInt(out, splitMetaInfoVersion);
  WritableUtils.writeVInt(out, allSplitMetaInfo.length);
  for (JobSplit.SplitMetaInfo splitMetaInfo : allSplitMetaInfo) {
    splitMetaInfo.write(out);
  }
  out.close();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:JobSplitWriter.java   
public static <T extends InputSplit> void createSplitFiles(Path jobSubmitDir, 
    Configuration conf, FileSystem fs, T[] splits) 
throws IOException, InterruptedException {
  FSDataOutputStream out = createFile(fs, 
      JobSubmissionFiles.getJobSplitFile(jobSubmitDir), conf);
  SplitMetaInfo[] info = writeNewSplits(conf, splits, out);
  out.close();
  writeJobSplitMetaInfo(fs,JobSubmissionFiles.getJobSplitMetaFile(jobSubmitDir), 
      new FsPermission(JobSubmissionFiles.JOB_FILE_PERMISSION), splitVersion,
      info);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:JobSplitWriter.java   
public static void createSplitFiles(Path jobSubmitDir, 
    Configuration conf, FileSystem fs, 
    org.apache.hadoop.mapred.InputSplit[] splits) 
throws IOException {
  FSDataOutputStream out = createFile(fs, 
      JobSubmissionFiles.getJobSplitFile(jobSubmitDir), conf);
  SplitMetaInfo[] info = writeOldSplits(splits, out);
  out.close();
  writeJobSplitMetaInfo(fs,JobSubmissionFiles.getJobSplitMetaFile(jobSubmitDir), 
      new FsPermission(JobSubmissionFiles.JOB_FILE_PERMISSION), splitVersion,
      info);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:JobSplitWriter.java   
@SuppressWarnings("unchecked")
private static <T extends InputSplit> 
SplitMetaInfo[] writeNewSplits(Configuration conf, 
    T[] array, FSDataOutputStream out)
throws IOException, InterruptedException {

  SplitMetaInfo[] info = new SplitMetaInfo[array.length];
  if (array.length != 0) {
    SerializationFactory factory = new SerializationFactory(conf);
    int i = 0;
    long offset = out.getPos();
    for(T split: array) {
      long prevCount = out.getPos();
      Text.writeString(out, split.getClass().getName());
      Serializer<T> serializer = 
        factory.getSerializer((Class<T>) split.getClass());
      serializer.open(out);
      serializer.serialize(split);
      long currCount = out.getPos();
      info[i++] = 
        new JobSplit.SplitMetaInfo( 
            split.getLocations(), offset,
            split.getLength());
      offset += currCount - prevCount;
    }
  }
  return info;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:JobSplitWriter.java   
private static void writeJobSplitMetaInfo(FileSystem fs, Path filename, 
    FsPermission p, int splitMetaInfoVersion, 
    JobSplit.SplitMetaInfo[] allSplitMetaInfo) 
throws IOException {
  // write the splits meta-info to a file for the job tracker
  FSDataOutputStream out = 
    FileSystem.create(fs, filename, p);
  out.write(JobSplit.META_SPLIT_FILE_HEADER);
  WritableUtils.writeVInt(out, splitMetaInfoVersion);
  WritableUtils.writeVInt(out, allSplitMetaInfo.length);
  for (JobSplit.SplitMetaInfo splitMetaInfo : allSplitMetaInfo) {
    splitMetaInfo.write(out);
  }
  out.close();
}
项目:hadoop-plus    文件:JobSplitWriter.java   
public static <T extends InputSplit> void createSplitFiles(Path jobSubmitDir, 
    Configuration conf, FileSystem fs, T[] splits) 
throws IOException, InterruptedException {
  FSDataOutputStream out = createFile(fs, 
      JobSubmissionFiles.getJobSplitFile(jobSubmitDir), conf);
  SplitMetaInfo[] info = writeNewSplits(conf, splits, out);
  out.close();
  writeJobSplitMetaInfo(fs,JobSubmissionFiles.getJobSplitMetaFile(jobSubmitDir), 
      new FsPermission(JobSubmissionFiles.JOB_FILE_PERMISSION), splitVersion,
      info);
}
项目:hadoop-plus    文件:JobSplitWriter.java   
public static void createSplitFiles(Path jobSubmitDir, 
    Configuration conf, FileSystem fs, 
    org.apache.hadoop.mapred.InputSplit[] splits) 
throws IOException {
  FSDataOutputStream out = createFile(fs, 
      JobSubmissionFiles.getJobSplitFile(jobSubmitDir), conf);
  SplitMetaInfo[] info = writeOldSplits(splits, out, conf);
  out.close();
  writeJobSplitMetaInfo(fs,JobSubmissionFiles.getJobSplitMetaFile(jobSubmitDir), 
      new FsPermission(JobSubmissionFiles.JOB_FILE_PERMISSION), splitVersion,
      info);
}
项目:hadoop-plus    文件:JobSplitWriter.java   
@SuppressWarnings("unchecked")
private static <T extends InputSplit> 
SplitMetaInfo[] writeNewSplits(Configuration conf, 
    T[] array, FSDataOutputStream out)
throws IOException, InterruptedException {

  SplitMetaInfo[] info = new SplitMetaInfo[array.length];
  if (array.length != 0) {
    SerializationFactory factory = new SerializationFactory(conf);
    int i = 0;
    int maxBlockLocations = conf.getInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY,
        MRConfig.MAX_BLOCK_LOCATIONS_DEFAULT);
    long offset = out.getPos();
    for(T split: array) {
      long prevCount = out.getPos();
      Text.writeString(out, split.getClass().getName());
      Serializer<T> serializer = 
        factory.getSerializer((Class<T>) split.getClass());
      serializer.open(out);
      serializer.serialize(split);
      long currCount = out.getPos();
      String[] locations = split.getLocations();
      if (locations.length > maxBlockLocations) {
        throw new IOException("Max block location exceeded for split: "
            + split + " splitsize: " + locations.length +
            " maxsize: " + maxBlockLocations);
      }
      info[i++] = 
        new JobSplit.SplitMetaInfo( 
            locations, offset,
            split.getLength());
      offset += currCount - prevCount;
    }
  }
  return info;
}
项目:hadoop-plus    文件:JobSplitWriter.java   
private static SplitMetaInfo[] writeOldSplits(
    org.apache.hadoop.mapred.InputSplit[] splits,
    FSDataOutputStream out, Configuration conf) throws IOException {
  SplitMetaInfo[] info = new SplitMetaInfo[splits.length];
  if (splits.length != 0) {
    int i = 0;
    long offset = out.getPos();
    int maxBlockLocations = conf.getInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY,
        MRConfig.MAX_BLOCK_LOCATIONS_DEFAULT);
    for(org.apache.hadoop.mapred.InputSplit split: splits) {
      long prevLen = out.getPos();
      Text.writeString(out, split.getClass().getName());
      split.write(out);
      long currLen = out.getPos();
      String[] locations = split.getLocations();
      if (locations.length > maxBlockLocations) {
        throw new IOException("Max block location exceeded for split: "
            + split + " splitsize: " + locations.length +
            " maxsize: " + maxBlockLocations);
      }
      info[i++] = new JobSplit.SplitMetaInfo( 
          locations, offset,
          split.getLength());
      offset += currLen - prevLen;
    }
  }
  return info;
}
项目:hadoop-plus    文件:JobSplitWriter.java   
private static void writeJobSplitMetaInfo(FileSystem fs, Path filename, 
    FsPermission p, int splitMetaInfoVersion, 
    JobSplit.SplitMetaInfo[] allSplitMetaInfo) 
throws IOException {
  // write the splits meta-info to a file for the job tracker
  FSDataOutputStream out = 
    FileSystem.create(fs, filename, p);
  out.write(JobSplit.META_SPLIT_FILE_HEADER);
  WritableUtils.writeVInt(out, splitMetaInfoVersion);
  WritableUtils.writeVInt(out, allSplitMetaInfo.length);
  for (JobSplit.SplitMetaInfo splitMetaInfo : allSplitMetaInfo) {
    splitMetaInfo.write(out);
  }
  out.close();
}
项目:FlexMap    文件:JobSplitWriter.java   
public static <T extends InputSplit> void createSplitFiles(Path jobSubmitDir, 
    Configuration conf, FileSystem fs, T[] splits) 
throws IOException, InterruptedException {
  FSDataOutputStream out = createFile(fs, 
      JobSubmissionFiles.getJobSplitFile(jobSubmitDir), conf);
  SplitMetaInfo[] info = writeNewSplits(conf, splits, out);
  out.close();
  writeJobSplitMetaInfo(fs,JobSubmissionFiles.getJobSplitMetaFile(jobSubmitDir), 
      new FsPermission(JobSubmissionFiles.JOB_FILE_PERMISSION), splitVersion,
      info);
}
项目:FlexMap    文件:JobSplitWriter.java   
public static void createSplitFiles(Path jobSubmitDir, 
    Configuration conf, FileSystem fs, 
    org.apache.hadoop.mapred.InputSplit[] splits) 
throws IOException {
  FSDataOutputStream out = createFile(fs, 
      JobSubmissionFiles.getJobSplitFile(jobSubmitDir), conf);
  SplitMetaInfo[] info = writeOldSplits(splits, out, conf);
  out.close();
  writeJobSplitMetaInfo(fs,JobSubmissionFiles.getJobSplitMetaFile(jobSubmitDir), 
      new FsPermission(JobSubmissionFiles.JOB_FILE_PERMISSION), splitVersion,
      info);
}
项目:FlexMap    文件:JobSplitWriter.java   
@SuppressWarnings("unchecked")
private static <T extends InputSplit> 
SplitMetaInfo[] writeNewSplits(Configuration conf, 
    T[] array, FSDataOutputStream out)
throws IOException, InterruptedException {

  SplitMetaInfo[] info = new SplitMetaInfo[array.length];
  if (array.length != 0) {
    SerializationFactory factory = new SerializationFactory(conf);
    int i = 0;
    int maxBlockLocations = conf.getInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY,
        MRConfig.MAX_BLOCK_LOCATIONS_DEFAULT);
    long offset = out.getPos();
    for(T split: array) {
      long prevCount = out.getPos();
      Text.writeString(out, split.getClass().getName());
      Serializer<T> serializer = 
        factory.getSerializer((Class<T>) split.getClass());
      serializer.open(out);
      serializer.serialize(split);
      long currCount = out.getPos();
      String[] locations = split.getLocations();
      if (locations.length > maxBlockLocations) {
        LOG.warn("Max block location exceeded for split: "
            + split + " splitsize: " + locations.length +
            " maxsize: " + maxBlockLocations);
        locations = Arrays.copyOf(locations, maxBlockLocations);
      }
      info[i++] = 
        new JobSplit.SplitMetaInfo( 
            locations, offset,
            split.getLength());
      offset += currCount - prevCount;
    }
  }
  return info;
}
项目:FlexMap    文件:JobSplitWriter.java   
private static SplitMetaInfo[] writeOldSplits(
    org.apache.hadoop.mapred.InputSplit[] splits,
    FSDataOutputStream out, Configuration conf) throws IOException {
  SplitMetaInfo[] info = new SplitMetaInfo[splits.length];
  if (splits.length != 0) {
    int i = 0;
    long offset = out.getPos();
    int maxBlockLocations = conf.getInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY,
        MRConfig.MAX_BLOCK_LOCATIONS_DEFAULT);
    for(org.apache.hadoop.mapred.InputSplit split: splits) {
      long prevLen = out.getPos();
      Text.writeString(out, split.getClass().getName());
      split.write(out);
      long currLen = out.getPos();
      String[] locations = split.getLocations();
      if (locations.length > maxBlockLocations) {
        LOG.warn("Max block location exceeded for split: "
            + split + " splitsize: " + locations.length +
            " maxsize: " + maxBlockLocations);
        locations = Arrays.copyOf(locations,maxBlockLocations);
      }
      info[i++] = new JobSplit.SplitMetaInfo( 
          locations, offset,
          split.getLength());
      offset += currLen - prevLen;
    }
  }
  return info;
}
项目:FlexMap    文件:JobSplitWriter.java   
private static void writeJobSplitMetaInfo(FileSystem fs, Path filename, 
    FsPermission p, int splitMetaInfoVersion, 
    JobSplit.SplitMetaInfo[] allSplitMetaInfo) 
throws IOException {
  // write the splits meta-info to a file for the job tracker
  FSDataOutputStream out = 
    FileSystem.create(fs, filename, p);
  out.write(JobSplit.META_SPLIT_FILE_HEADER);
  WritableUtils.writeVInt(out, splitMetaInfoVersion);
  WritableUtils.writeVInt(out, allSplitMetaInfo.length);
  for (JobSplit.SplitMetaInfo splitMetaInfo : allSplitMetaInfo) {
    splitMetaInfo.write(out);
  }
  out.close();
}
项目:hops    文件:JobSplitWriter.java   
public static <T extends InputSplit> void createSplitFiles(Path jobSubmitDir, 
    Configuration conf, FileSystem fs, T[] splits) 
throws IOException, InterruptedException {
  FSDataOutputStream out = createFile(fs, 
      JobSubmissionFiles.getJobSplitFile(jobSubmitDir), conf);
  SplitMetaInfo[] info = writeNewSplits(conf, splits, out);
  out.close();
  writeJobSplitMetaInfo(fs,JobSubmissionFiles.getJobSplitMetaFile(jobSubmitDir), 
      new FsPermission(JobSubmissionFiles.JOB_FILE_PERMISSION), splitVersion,
      info);
}