Java 类org.apache.hadoop.mapred.gridmix.RandomAlgorithms.Selector 实例源码

项目:hadoop    文件:FilePool.java   
@Override
public long selectFiles(long targetSize, Collection<FileStatus> files)
    throws IOException {
  if (targetSize >= getSize()) {
    files.addAll(curdir);
    return getSize();
  }

  Selector selector = new Selector(curdir.size(), (double) targetSize
      / getSize(), rand);

  ArrayList<Integer> selected = new ArrayList<Integer>();
  long ret = 0L;
  do {
    int index = selector.next();
    selected.add(index);
    ret += curdir.get(index).getLen();
  } while (ret < targetSize);

  for (Integer i : selected) {
    files.add(curdir.get(i));
  }

  return ret;
}
项目:aliyun-oss-hadoop-fs    文件:FilePool.java   
@Override
public long selectFiles(long targetSize, Collection<FileStatus> files)
    throws IOException {
  if (targetSize >= getSize()) {
    files.addAll(curdir);
    return getSize();
  }

  Selector selector = new Selector(curdir.size(), (double) targetSize
      / getSize(), rand);

  ArrayList<Integer> selected = new ArrayList<Integer>();
  long ret = 0L;
  do {
    int index = selector.next();
    selected.add(index);
    ret += curdir.get(index).getLen();
  } while (ret < targetSize);

  for (Integer i : selected) {
    files.add(curdir.get(i));
  }

  return ret;
}
项目:big-c    文件:FilePool.java   
@Override
public long selectFiles(long targetSize, Collection<FileStatus> files)
    throws IOException {
  if (targetSize >= getSize()) {
    files.addAll(curdir);
    return getSize();
  }

  Selector selector = new Selector(curdir.size(), (double) targetSize
      / getSize(), rand);

  ArrayList<Integer> selected = new ArrayList<Integer>();
  long ret = 0L;
  do {
    int index = selector.next();
    selected.add(index);
    ret += curdir.get(index).getLen();
  } while (ret < targetSize);

  for (Integer i : selected) {
    files.add(curdir.get(i));
  }

  return ret;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FilePool.java   
@Override
public long selectFiles(long targetSize, Collection<FileStatus> files)
    throws IOException {
  if (targetSize >= getSize()) {
    files.addAll(curdir);
    return getSize();
  }

  Selector selector = new Selector(curdir.size(), (double) targetSize
      / getSize(), rand);

  ArrayList<Integer> selected = new ArrayList<Integer>();
  long ret = 0L;
  do {
    int index = selector.next();
    selected.add(index);
    ret += curdir.get(index).getLen();
  } while (ret < targetSize);

  for (Integer i : selected) {
    files.add(curdir.get(i));
  }

  return ret;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FilePool.java   
@Override
public long selectFiles(long targetSize, Collection<FileStatus> files)
    throws IOException {
  if (targetSize >= getSize()) {
    files.addAll(curdir);
    return getSize();
  }

  Selector selector = new Selector(curdir.size(), (double) targetSize
      / getSize(), rand);

  ArrayList<Integer> selected = new ArrayList<Integer>();
  long ret = 0L;
  do {
    int index = selector.next();
    selected.add(index);
    ret += curdir.get(index).getLen();
  } while (ret < targetSize);

  for (Integer i : selected) {
    files.add(curdir.get(i));
  }

  return ret;
}
项目:hadoop-plus    文件:FilePool.java   
@Override
public long selectFiles(long targetSize, Collection<FileStatus> files)
    throws IOException {
  if (targetSize >= getSize()) {
    files.addAll(curdir);
    return getSize();
  }

  Selector selector = new Selector(curdir.size(), (double) targetSize
      / getSize(), rand);

  ArrayList<Integer> selected = new ArrayList<Integer>();
  long ret = 0L;
  do {
    int index = selector.next();
    selected.add(index);
    ret += curdir.get(index).getLen();
  } while (ret < targetSize);

  for (Integer i : selected) {
    files.add(curdir.get(i));
  }

  return ret;
}
项目:hops    文件:FilePool.java   
@Override
public long selectFiles(long targetSize, Collection<FileStatus> files)
    throws IOException {
  if (targetSize >= getSize()) {
    files.addAll(curdir);
    return getSize();
  }

  Selector selector = new Selector(curdir.size(), (double) targetSize
      / getSize(), rand);

  ArrayList<Integer> selected = new ArrayList<Integer>();
  long ret = 0L;
  do {
    int index = selector.next();
    selected.add(index);
    ret += curdir.get(index).getLen();
  } while (ret < targetSize);

  for (Integer i : selected) {
    files.add(curdir.get(i));
  }

  return ret;
}
项目:hadoop-TCP    文件:FilePool.java   
@Override
public long selectFiles(long targetSize, Collection<FileStatus> files)
    throws IOException {
  if (targetSize >= getSize()) {
    files.addAll(curdir);
    return getSize();
  }

  Selector selector = new Selector(curdir.size(), (double) targetSize
      / getSize(), rand);

  ArrayList<Integer> selected = new ArrayList<Integer>();
  long ret = 0L;
  do {
    int index = selector.next();
    selected.add(index);
    ret += curdir.get(index).getLen();
  } while (ret < targetSize);

  for (Integer i : selected) {
    files.add(curdir.get(i));
  }

  return ret;
}
项目:hadoop-on-lustre    文件:FilePool.java   
@Override
public long selectFiles(long targetSize, Collection<FileStatus> files)
    throws IOException {
  if (targetSize >= getSize()) {
    files.addAll(curdir);
    return getSize();
  }

  Selector selector = new Selector(curdir.size(), (double) targetSize
      / getSize(), rand);

  ArrayList<Integer> selected = new ArrayList<Integer>();
  long ret = 0L;
  do {
    int index = selector.next();
    selected.add(index);
    ret += curdir.get(index).getLen();
  } while (ret < targetSize);

  for (Integer i : selected) {
    files.add(curdir.get(i));
  }

  return ret;
}
项目:hardfs    文件:FilePool.java   
@Override
public long selectFiles(long targetSize, Collection<FileStatus> files)
    throws IOException {
  if (targetSize >= getSize()) {
    files.addAll(curdir);
    return getSize();
  }

  Selector selector = new Selector(curdir.size(), (double) targetSize
      / getSize(), rand);

  ArrayList<Integer> selected = new ArrayList<Integer>();
  long ret = 0L;
  do {
    int index = selector.next();
    selected.add(index);
    ret += curdir.get(index).getLen();
  } while (ret < targetSize);

  for (Integer i : selected) {
    files.add(curdir.get(i));
  }

  return ret;
}
项目:hadoop-on-lustre2    文件:FilePool.java   
@Override
public long selectFiles(long targetSize, Collection<FileStatus> files)
    throws IOException {
  if (targetSize >= getSize()) {
    files.addAll(curdir);
    return getSize();
  }

  Selector selector = new Selector(curdir.size(), (double) targetSize
      / getSize(), rand);

  ArrayList<Integer> selected = new ArrayList<Integer>();
  long ret = 0L;
  do {
    int index = selector.next();
    selected.add(index);
    ret += curdir.get(index).getLen();
  } while (ret < targetSize);

  for (Integer i : selected) {
    files.add(curdir.get(i));
  }

  return ret;
}
项目:hanoi-hadoop-2.0.0-cdh    文件:FilePool.java   
@Override
public long selectFiles(long targetSize, Collection<FileStatus> files)
    throws IOException {
  if (targetSize >= getSize()) {
    files.addAll(curdir);
    return getSize();
  }

  Selector selector = new Selector(curdir.size(), (double) targetSize
      / getSize(), rand);

  ArrayList<Integer> selected = new ArrayList<Integer>();
  long ret = 0L;
  do {
    int index = selector.next();
    selected.add(index);
    ret += curdir.get(index).getLen();
  } while (ret < targetSize);

  for (Integer i : selected) {
    files.add(curdir.get(i));
  }

  return ret;
}
项目:mapreduce-fork    文件:FilePool.java   
@Override
public long selectFiles(long targetSize, Collection<FileStatus> files)
    throws IOException {
  if (targetSize >= getSize()) {
    files.addAll(curdir);
    return getSize();
  }

  Selector selector = new Selector(curdir.size(), (double) targetSize
      / getSize(), rand);

  ArrayList<Integer> selected = new ArrayList<Integer>();
  long ret = 0L;
  do {
    int index = selector.next();
    selected.add(index);
    ret += curdir.get(index).getLen();
  } while (ret < targetSize);

  for (Integer i : selected) {
    files.add(curdir.get(i));
  }

  return ret;
}
项目:hortonworks-extension    文件:FilePool.java   
@Override
public long selectFiles(long targetSize, Collection<FileStatus> files)
    throws IOException {
  if (targetSize >= getSize()) {
    files.addAll(curdir);
    return getSize();
  }

  Selector selector = new Selector(curdir.size(), (double) targetSize
      / getSize(), rand);

  ArrayList<Integer> selected = new ArrayList<Integer>();
  long ret = 0L;
  do {
    int index = selector.next();
    selected.add(index);
    ret += curdir.get(index).getLen();
  } while (ret < targetSize);

  for (Integer i : selected) {
    files.add(curdir.get(i));
  }

  return ret;
}
项目:hortonworks-extension    文件:FilePool.java   
@Override
public long selectFiles(long targetSize, Collection<FileStatus> files)
    throws IOException {
  if (targetSize >= getSize()) {
    files.addAll(curdir);
    return getSize();
  }

  Selector selector = new Selector(curdir.size(), (double) targetSize
      / getSize(), rand);

  ArrayList<Integer> selected = new ArrayList<Integer>();
  long ret = 0L;
  do {
    int index = selector.next();
    selected.add(index);
    ret += curdir.get(index).getLen();
  } while (ret < targetSize);

  for (Integer i : selected) {
    files.add(curdir.get(i));
  }

  return ret;
}
项目:hadoop    文件:SleepJob.java   
public SleepJob(Configuration conf, long submissionMillis, JobStory jobdesc,
    Path outRoot, UserGroupInformation ugi, int seq, int numLocations,
    String[] hosts) throws IOException {
  super(conf, submissionMillis, jobdesc, outRoot, ugi, seq);
  this.fakeLocations = numLocations;
  this.hosts = hosts.clone();
  this.selector = (fakeLocations > 0)? new Selector(hosts.length, (float) fakeLocations
      / hosts.length, rand.get()) : null;
  this.mapTasksOnly = conf.getBoolean(SLEEPJOB_MAPTASK_ONLY, false);
  mapMaxSleepTime = conf.getLong(GRIDMIX_SLEEP_MAX_MAP_TIME, Long.MAX_VALUE);
  reduceMaxSleepTime = conf.getLong(GRIDMIX_SLEEP_MAX_REDUCE_TIME,
      Long.MAX_VALUE);
}
项目:aliyun-oss-hadoop-fs    文件:SleepJob.java   
public SleepJob(Configuration conf, long submissionMillis, JobStory jobdesc,
    Path outRoot, UserGroupInformation ugi, int seq, int numLocations,
    String[] hosts) throws IOException {
  super(conf, submissionMillis, jobdesc, outRoot, ugi, seq);
  this.fakeLocations = numLocations;
  this.hosts = hosts.clone();
  this.selector = (fakeLocations > 0)? new Selector(hosts.length, (float) fakeLocations
      / hosts.length, rand.get()) : null;
  this.mapTasksOnly = conf.getBoolean(SLEEPJOB_MAPTASK_ONLY, false);
  mapMaxSleepTime = conf.getLong(GRIDMIX_SLEEP_MAX_MAP_TIME, Long.MAX_VALUE);
  reduceMaxSleepTime = conf.getLong(GRIDMIX_SLEEP_MAX_REDUCE_TIME,
      Long.MAX_VALUE);
}
项目:big-c    文件:SleepJob.java   
public SleepJob(Configuration conf, long submissionMillis, JobStory jobdesc,
    Path outRoot, UserGroupInformation ugi, int seq, int numLocations,
    String[] hosts) throws IOException {
  super(conf, submissionMillis, jobdesc, outRoot, ugi, seq);
  this.fakeLocations = numLocations;
  this.hosts = hosts.clone();
  this.selector = (fakeLocations > 0)? new Selector(hosts.length, (float) fakeLocations
      / hosts.length, rand.get()) : null;
  this.mapTasksOnly = conf.getBoolean(SLEEPJOB_MAPTASK_ONLY, false);
  mapMaxSleepTime = conf.getLong(GRIDMIX_SLEEP_MAX_MAP_TIME, Long.MAX_VALUE);
  reduceMaxSleepTime = conf.getLong(GRIDMIX_SLEEP_MAX_REDUCE_TIME,
      Long.MAX_VALUE);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:SleepJob.java   
public SleepJob(Configuration conf, long submissionMillis, JobStory jobdesc,
    Path outRoot, UserGroupInformation ugi, int seq, int numLocations,
    String[] hosts) throws IOException {
  super(conf, submissionMillis, jobdesc, outRoot, ugi, seq);
  this.fakeLocations = numLocations;
  this.hosts = hosts.clone();
  this.selector = (fakeLocations > 0)? new Selector(hosts.length, (float) fakeLocations
      / hosts.length, rand.get()) : null;
  this.mapTasksOnly = conf.getBoolean(SLEEPJOB_MAPTASK_ONLY, false);
  mapMaxSleepTime = conf.getLong(GRIDMIX_SLEEP_MAX_MAP_TIME, Long.MAX_VALUE);
  reduceMaxSleepTime = conf.getLong(GRIDMIX_SLEEP_MAX_REDUCE_TIME,
      Long.MAX_VALUE);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:SleepJob.java   
public SleepJob(Configuration conf, long submissionMillis, JobStory jobdesc,
    Path outRoot, UserGroupInformation ugi, int seq, int numLocations,
    String[] hosts) throws IOException {
  super(conf, submissionMillis, jobdesc, outRoot, ugi, seq);
  this.fakeLocations = numLocations;
  this.hosts = hosts;
  this.selector = (fakeLocations > 0)? new Selector(hosts.length, (float) fakeLocations
      / hosts.length, rand.get()) : null;
  this.mapTasksOnly = conf.getBoolean(SLEEPJOB_MAPTASK_ONLY, false);
  mapMaxSleepTime = conf.getLong(GRIDMIX_SLEEP_MAX_MAP_TIME, Long.MAX_VALUE);
  reduceMaxSleepTime = conf.getLong(GRIDMIX_SLEEP_MAX_REDUCE_TIME,
      Long.MAX_VALUE);
}
项目:hadoop-plus    文件:SleepJob.java   
public SleepJob(Configuration conf, long submissionMillis, JobStory jobdesc,
    Path outRoot, UserGroupInformation ugi, int seq, int numLocations,
    String[] hosts) throws IOException {
  super(conf, submissionMillis, jobdesc, outRoot, ugi, seq);
  this.fakeLocations = numLocations;
  this.hosts = hosts.clone();
  this.selector = (fakeLocations > 0)? new Selector(hosts.length, (float) fakeLocations
      / hosts.length, rand.get()) : null;
  this.mapTasksOnly = conf.getBoolean(SLEEPJOB_MAPTASK_ONLY, false);
  mapMaxSleepTime = conf.getLong(GRIDMIX_SLEEP_MAX_MAP_TIME, Long.MAX_VALUE);
  reduceMaxSleepTime = conf.getLong(GRIDMIX_SLEEP_MAX_REDUCE_TIME,
      Long.MAX_VALUE);
}
项目:hops    文件:SleepJob.java   
public SleepJob(Configuration conf, long submissionMillis, JobStory jobdesc,
    Path outRoot, UserGroupInformation ugi, int seq, int numLocations,
    String[] hosts) throws IOException {
  super(conf, submissionMillis, jobdesc, outRoot, ugi, seq);
  this.fakeLocations = numLocations;
  this.hosts = hosts.clone();
  this.selector = (fakeLocations > 0)? new Selector(hosts.length, (float) fakeLocations
      / hosts.length, rand.get()) : null;
  this.mapTasksOnly = conf.getBoolean(SLEEPJOB_MAPTASK_ONLY, false);
  mapMaxSleepTime = conf.getLong(GRIDMIX_SLEEP_MAX_MAP_TIME, Long.MAX_VALUE);
  reduceMaxSleepTime = conf.getLong(GRIDMIX_SLEEP_MAX_REDUCE_TIME,
      Long.MAX_VALUE);
}
项目:hadoop-TCP    文件:SleepJob.java   
public SleepJob(Configuration conf, long submissionMillis, JobStory jobdesc,
    Path outRoot, UserGroupInformation ugi, int seq, int numLocations,
    String[] hosts) throws IOException {
  super(conf, submissionMillis, jobdesc, outRoot, ugi, seq);
  this.fakeLocations = numLocations;
  this.hosts = hosts.clone();
  this.selector = (fakeLocations > 0)? new Selector(hosts.length, (float) fakeLocations
      / hosts.length, rand.get()) : null;
  this.mapTasksOnly = conf.getBoolean(SLEEPJOB_MAPTASK_ONLY, false);
  mapMaxSleepTime = conf.getLong(GRIDMIX_SLEEP_MAX_MAP_TIME, Long.MAX_VALUE);
  reduceMaxSleepTime = conf.getLong(GRIDMIX_SLEEP_MAX_REDUCE_TIME,
      Long.MAX_VALUE);
}
项目:hadoop-on-lustre    文件:SleepJob.java   
public SleepJob(Configuration conf, long submissionMillis, JobStory jobdesc,
    Path outRoot, UserGroupInformation ugi, int seq, int numLocations,
    String[] hosts) throws IOException {
  super(conf, submissionMillis, jobdesc, outRoot, ugi, seq);
  this.fakeLocations = numLocations;
  this.hosts = hosts;
  this.selector = (fakeLocations > 0)? new Selector(hosts.length, (float) fakeLocations
      / hosts.length, rand.get()) : null;
  this.mapTasksOnly = conf.getBoolean(SLEEPJOB_MAPTASK_ONLY, false);
  mapMaxSleepTime = conf.getLong(GRIDMIX_SLEEP_MAX_MAP_TIME, Long.MAX_VALUE);
  reduceMaxSleepTime = conf.getLong(GRIDMIX_SLEEP_MAX_REDUCE_TIME,
      Long.MAX_VALUE);
}
项目:hardfs    文件:SleepJob.java   
public SleepJob(Configuration conf, long submissionMillis, JobStory jobdesc,
    Path outRoot, UserGroupInformation ugi, int seq, int numLocations,
    String[] hosts) throws IOException {
  super(conf, submissionMillis, jobdesc, outRoot, ugi, seq);
  this.fakeLocations = numLocations;
  this.hosts = hosts.clone();
  this.selector = (fakeLocations > 0)? new Selector(hosts.length, (float) fakeLocations
      / hosts.length, rand.get()) : null;
  this.mapTasksOnly = conf.getBoolean(SLEEPJOB_MAPTASK_ONLY, false);
  mapMaxSleepTime = conf.getLong(GRIDMIX_SLEEP_MAX_MAP_TIME, Long.MAX_VALUE);
  reduceMaxSleepTime = conf.getLong(GRIDMIX_SLEEP_MAX_REDUCE_TIME,
      Long.MAX_VALUE);
}
项目:hadoop-on-lustre2    文件:SleepJob.java   
public SleepJob(Configuration conf, long submissionMillis, JobStory jobdesc,
    Path outRoot, UserGroupInformation ugi, int seq, int numLocations,
    String[] hosts) throws IOException {
  super(conf, submissionMillis, jobdesc, outRoot, ugi, seq);
  this.fakeLocations = numLocations;
  this.hosts = hosts.clone();
  this.selector = (fakeLocations > 0)? new Selector(hosts.length, (float) fakeLocations
      / hosts.length, rand.get()) : null;
  this.mapTasksOnly = conf.getBoolean(SLEEPJOB_MAPTASK_ONLY, false);
  mapMaxSleepTime = conf.getLong(GRIDMIX_SLEEP_MAX_MAP_TIME, Long.MAX_VALUE);
  reduceMaxSleepTime = conf.getLong(GRIDMIX_SLEEP_MAX_REDUCE_TIME,
      Long.MAX_VALUE);
}
项目:hanoi-hadoop-2.0.0-cdh    文件:SleepJob.java   
public SleepJob(Configuration conf, long submissionMillis, JobStory jobdesc,
    Path outRoot, UserGroupInformation ugi, int seq, int numLocations,
    String[] hosts) throws IOException {
  super(conf, submissionMillis, jobdesc, outRoot, ugi, seq);
  this.fakeLocations = numLocations;
  this.hosts = hosts;
  this.selector = (fakeLocations > 0)? new Selector(hosts.length, (float) fakeLocations
      / hosts.length, rand.get()) : null;
  this.mapTasksOnly = conf.getBoolean(SLEEPJOB_MAPTASK_ONLY, false);
  mapMaxSleepTime = conf.getLong(GRIDMIX_SLEEP_MAX_MAP_TIME, Long.MAX_VALUE);
  reduceMaxSleepTime = conf.getLong(GRIDMIX_SLEEP_MAX_REDUCE_TIME,
      Long.MAX_VALUE);
}
项目:mapreduce-fork    文件:SleepJob.java   
public SleepJob(Configuration conf, long submissionMillis, JobStory jobdesc,
    Path outRoot, UserGroupInformation ugi, int seq, int numLocations,
    String[] hosts) throws IOException {
  super(conf, submissionMillis, jobdesc, outRoot, ugi, seq);
  this.fakeLocations = numLocations;
  this.hosts = hosts;
  this.selector = (fakeLocations > 0)? new Selector(hosts.length, (float) fakeLocations
      / hosts.length, rand.get()) : null;
  this.mapTasksOnly = conf.getBoolean(SLEEPJOB_MAPTASK_ONLY, false);
  mapMaxSleepTime = conf.getLong(GRIDMIX_SLEEP_MAX_MAP_TIME, Long.MAX_VALUE);
  reduceMaxSleepTime = conf.getLong(GRIDMIX_SLEEP_MAX_REDUCE_TIME,
      Long.MAX_VALUE);
}
项目:hortonworks-extension    文件:SleepJob.java   
public SleepJob(Configuration conf, long submissionMillis, JobStory jobdesc,
    Path outRoot, UserGroupInformation ugi, int seq, int numLocations,
    String[] hosts) throws IOException {
  super(conf, submissionMillis, jobdesc, outRoot, ugi, seq);
  this.fakeLocations = numLocations;
  this.hosts = hosts;
  this.selector = (fakeLocations > 0)? new Selector(hosts.length, (float) fakeLocations
      / hosts.length, rand.get()) : null;
  this.mapTasksOnly = conf.getBoolean(SLEEPJOB_MAPTASK_ONLY, false);
  mapMaxSleepTime = conf.getLong(GRIDMIX_SLEEP_MAX_MAP_TIME, Long.MAX_VALUE);
  reduceMaxSleepTime = conf.getLong(GRIDMIX_SLEEP_MAX_REDUCE_TIME,
      Long.MAX_VALUE);
}
项目:hortonworks-extension    文件:SleepJob.java   
public SleepJob(Configuration conf, long submissionMillis, JobStory jobdesc,
    Path outRoot, UserGroupInformation ugi, int seq, int numLocations,
    String[] hosts) throws IOException {
  super(conf, submissionMillis, jobdesc, outRoot, ugi, seq);
  this.fakeLocations = numLocations;
  this.hosts = hosts;
  this.selector = (fakeLocations > 0)? new Selector(hosts.length, (float) fakeLocations
      / hosts.length, rand.get()) : null;
  this.mapTasksOnly = conf.getBoolean(SLEEPJOB_MAPTASK_ONLY, false);
  mapMaxSleepTime = conf.getLong(GRIDMIX_SLEEP_MAX_MAP_TIME, Long.MAX_VALUE);
  reduceMaxSleepTime = conf.getLong(GRIDMIX_SLEEP_MAX_REDUCE_TIME,
      Long.MAX_VALUE);
}