Java 类org.apache.hadoop.fs.slive.Constants.Distribution 实例源码

项目:hadoop    文件:WeightSelector.java   
private void configureWeights(ConfigExtractor e) {
  weights = new HashMap<Distribution, Weightable>();
  weights.put(Distribution.UNIFORM, new UniformWeight());
  // weights.put(Distribution.BEG, new BeginWeight());
  // weights.put(Distribution.END, new EndWeight());
  // weights.put(Distribution.MID, new MidWeight());
}
项目:hadoop    文件:ConfigMerger.java   
/**
 * Gets the base set of operations to use
 * 
 * @return Map
 */
private Map<OperationType, OperationData> getBaseOperations() {
  Map<OperationType, OperationData> base = new HashMap<OperationType, OperationData>();
  // add in all the operations
  // since they will all be applied unless changed
  OperationType[] types = OperationType.values();
  for (OperationType type : types) {
    base.put(type, new OperationData(Distribution.UNIFORM, null));
  }
  return base;
}
项目:hadoop    文件:OperationData.java   
/**
 * Expects a comma separated list (where the first element is the ratio
 * (between 0 and 100)) and the second element is the distribution (if
 * non-existent then uniform will be selected). If an empty list is passed in
 * then this element will just set the distribution (to uniform) and leave the
 * percent as null.
 */
OperationData(String data) {
  String pieces[] = Helper.getTrimmedStrings(data);
  distribution = Distribution.UNIFORM;
  percent = null;
  if (pieces.length == 1) {
    percent = (Double.parseDouble(pieces[0]) / 100.0d);
  } else if (pieces.length >= 2) {
    percent = (Double.parseDouble(pieces[0]) / 100.0d);
    distribution = Distribution.valueOf(StringUtils.toUpperCase(pieces[1]));
  }
}
项目:hadoop    文件:ArgumentParser.java   
/**
 * @return the option set to be used in command line parsing
 */
private Options getOptions() {
  Options cliopt = new Options();
  cliopt.addOption(ConfigOption.MAPS);
  cliopt.addOption(ConfigOption.REDUCES);
  cliopt.addOption(ConfigOption.PACKET_SIZE);
  cliopt.addOption(ConfigOption.OPS);
  cliopt.addOption(ConfigOption.DURATION);
  cliopt.addOption(ConfigOption.EXIT_ON_ERROR);
  cliopt.addOption(ConfigOption.SLEEP_TIME);
  cliopt.addOption(ConfigOption.TRUNCATE_WAIT);
  cliopt.addOption(ConfigOption.FILES);
  cliopt.addOption(ConfigOption.DIR_SIZE);
  cliopt.addOption(ConfigOption.BASE_DIR);
  cliopt.addOption(ConfigOption.RESULT_FILE);
  cliopt.addOption(ConfigOption.CLEANUP);
  {
    String distStrs[] = new String[Distribution.values().length];
    Distribution distValues[] = Distribution.values();
    for (int i = 0; i < distValues.length; ++i) {
      distStrs[i] = distValues[i].lowerName();
    }
    String opdesc = String.format(Constants.OP_DESCR, StringUtils
        .arrayToString(distStrs));
    for (OperationType type : OperationType.values()) {
      String opname = type.lowerName();
      cliopt.addOption(new Option(opname, true, opdesc));
    }
  }
  cliopt.addOption(ConfigOption.REPLICATION_AM);
  cliopt.addOption(ConfigOption.BLOCK_SIZE);
  cliopt.addOption(ConfigOption.READ_SIZE);
  cliopt.addOption(ConfigOption.WRITE_SIZE);
  cliopt.addOption(ConfigOption.APPEND_SIZE);
  cliopt.addOption(ConfigOption.TRUNCATE_SIZE);
  cliopt.addOption(ConfigOption.RANDOM_SEED);
  cliopt.addOption(ConfigOption.QUEUE_NAME);
  cliopt.addOption(ConfigOption.HELP);
  return cliopt;
}
项目:aliyun-oss-hadoop-fs    文件:WeightSelector.java   
private void configureWeights(ConfigExtractor e) {
  weights = new HashMap<Distribution, Weightable>();
  weights.put(Distribution.UNIFORM, new UniformWeight());
  // weights.put(Distribution.BEG, new BeginWeight());
  // weights.put(Distribution.END, new EndWeight());
  // weights.put(Distribution.MID, new MidWeight());
}
项目:aliyun-oss-hadoop-fs    文件:ConfigMerger.java   
/**
 * Gets the base set of operations to use
 * 
 * @return Map
 */
private Map<OperationType, OperationData> getBaseOperations() {
  Map<OperationType, OperationData> base = new HashMap<OperationType, OperationData>();
  // add in all the operations
  // since they will all be applied unless changed
  OperationType[] types = OperationType.values();
  for (OperationType type : types) {
    base.put(type, new OperationData(Distribution.UNIFORM, null));
  }
  return base;
}
项目:aliyun-oss-hadoop-fs    文件:OperationData.java   
/**
 * Expects a comma separated list (where the first element is the ratio
 * (between 0 and 100)) and the second element is the distribution (if
 * non-existent then uniform will be selected). If an empty list is passed in
 * then this element will just set the distribution (to uniform) and leave the
 * percent as null.
 */
OperationData(String data) {
  String pieces[] = Helper.getTrimmedStrings(data);
  distribution = Distribution.UNIFORM;
  percent = null;
  if (pieces.length == 1) {
    percent = (Double.parseDouble(pieces[0]) / 100.0d);
  } else if (pieces.length >= 2) {
    percent = (Double.parseDouble(pieces[0]) / 100.0d);
    distribution = Distribution.valueOf(StringUtils.toUpperCase(pieces[1]));
  }
}
项目:aliyun-oss-hadoop-fs    文件:ArgumentParser.java   
/**
 * @return the option set to be used in command line parsing
 */
private Options getOptions() {
  Options cliopt = new Options();
  cliopt.addOption(ConfigOption.MAPS);
  cliopt.addOption(ConfigOption.REDUCES);
  cliopt.addOption(ConfigOption.PACKET_SIZE);
  cliopt.addOption(ConfigOption.OPS);
  cliopt.addOption(ConfigOption.DURATION);
  cliopt.addOption(ConfigOption.EXIT_ON_ERROR);
  cliopt.addOption(ConfigOption.SLEEP_TIME);
  cliopt.addOption(ConfigOption.TRUNCATE_WAIT);
  cliopt.addOption(ConfigOption.FILES);
  cliopt.addOption(ConfigOption.DIR_SIZE);
  cliopt.addOption(ConfigOption.BASE_DIR);
  cliopt.addOption(ConfigOption.RESULT_FILE);
  cliopt.addOption(ConfigOption.CLEANUP);
  {
    String distStrs[] = new String[Distribution.values().length];
    Distribution distValues[] = Distribution.values();
    for (int i = 0; i < distValues.length; ++i) {
      distStrs[i] = distValues[i].lowerName();
    }
    String opdesc = String.format(Constants.OP_DESCR, StringUtils
        .arrayToString(distStrs));
    for (OperationType type : OperationType.values()) {
      String opname = type.lowerName();
      cliopt.addOption(new Option(opname, true, opdesc));
    }
  }
  cliopt.addOption(ConfigOption.REPLICATION_AM);
  cliopt.addOption(ConfigOption.BLOCK_SIZE);
  cliopt.addOption(ConfigOption.READ_SIZE);
  cliopt.addOption(ConfigOption.WRITE_SIZE);
  cliopt.addOption(ConfigOption.APPEND_SIZE);
  cliopt.addOption(ConfigOption.TRUNCATE_SIZE);
  cliopt.addOption(ConfigOption.RANDOM_SEED);
  cliopt.addOption(ConfigOption.QUEUE_NAME);
  cliopt.addOption(ConfigOption.HELP);
  return cliopt;
}
项目:big-c    文件:WeightSelector.java   
private void configureWeights(ConfigExtractor e) {
  weights = new HashMap<Distribution, Weightable>();
  weights.put(Distribution.UNIFORM, new UniformWeight());
  // weights.put(Distribution.BEG, new BeginWeight());
  // weights.put(Distribution.END, new EndWeight());
  // weights.put(Distribution.MID, new MidWeight());
}
项目:big-c    文件:ConfigMerger.java   
/**
 * Gets the base set of operations to use
 * 
 * @return Map
 */
private Map<OperationType, OperationData> getBaseOperations() {
  Map<OperationType, OperationData> base = new HashMap<OperationType, OperationData>();
  // add in all the operations
  // since they will all be applied unless changed
  OperationType[] types = OperationType.values();
  for (OperationType type : types) {
    base.put(type, new OperationData(Distribution.UNIFORM, null));
  }
  return base;
}
项目:big-c    文件:OperationData.java   
/**
 * Expects a comma separated list (where the first element is the ratio
 * (between 0 and 100)) and the second element is the distribution (if
 * non-existent then uniform will be selected). If an empty list is passed in
 * then this element will just set the distribution (to uniform) and leave the
 * percent as null.
 */
OperationData(String data) {
  String pieces[] = Helper.getTrimmedStrings(data);
  distribution = Distribution.UNIFORM;
  percent = null;
  if (pieces.length == 1) {
    percent = (Double.parseDouble(pieces[0]) / 100.0d);
  } else if (pieces.length >= 2) {
    percent = (Double.parseDouble(pieces[0]) / 100.0d);
    distribution = Distribution.valueOf(StringUtils.toUpperCase(pieces[1]));
  }
}
项目:big-c    文件:ArgumentParser.java   
/**
 * @return the option set to be used in command line parsing
 */
private Options getOptions() {
  Options cliopt = new Options();
  cliopt.addOption(ConfigOption.MAPS);
  cliopt.addOption(ConfigOption.REDUCES);
  cliopt.addOption(ConfigOption.PACKET_SIZE);
  cliopt.addOption(ConfigOption.OPS);
  cliopt.addOption(ConfigOption.DURATION);
  cliopt.addOption(ConfigOption.EXIT_ON_ERROR);
  cliopt.addOption(ConfigOption.SLEEP_TIME);
  cliopt.addOption(ConfigOption.TRUNCATE_WAIT);
  cliopt.addOption(ConfigOption.FILES);
  cliopt.addOption(ConfigOption.DIR_SIZE);
  cliopt.addOption(ConfigOption.BASE_DIR);
  cliopt.addOption(ConfigOption.RESULT_FILE);
  cliopt.addOption(ConfigOption.CLEANUP);
  {
    String distStrs[] = new String[Distribution.values().length];
    Distribution distValues[] = Distribution.values();
    for (int i = 0; i < distValues.length; ++i) {
      distStrs[i] = distValues[i].lowerName();
    }
    String opdesc = String.format(Constants.OP_DESCR, StringUtils
        .arrayToString(distStrs));
    for (OperationType type : OperationType.values()) {
      String opname = type.lowerName();
      cliopt.addOption(new Option(opname, true, opdesc));
    }
  }
  cliopt.addOption(ConfigOption.REPLICATION_AM);
  cliopt.addOption(ConfigOption.BLOCK_SIZE);
  cliopt.addOption(ConfigOption.READ_SIZE);
  cliopt.addOption(ConfigOption.WRITE_SIZE);
  cliopt.addOption(ConfigOption.APPEND_SIZE);
  cliopt.addOption(ConfigOption.TRUNCATE_SIZE);
  cliopt.addOption(ConfigOption.RANDOM_SEED);
  cliopt.addOption(ConfigOption.QUEUE_NAME);
  cliopt.addOption(ConfigOption.HELP);
  return cliopt;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:WeightSelector.java   
private void configureWeights(ConfigExtractor e) {
  weights = new HashMap<Distribution, Weightable>();
  weights.put(Distribution.UNIFORM, new UniformWeight());
  // weights.put(Distribution.BEG, new BeginWeight());
  // weights.put(Distribution.END, new EndWeight());
  // weights.put(Distribution.MID, new MidWeight());
}
项目:hadoop-2.6.0-cdh5.4.3    文件:ConfigMerger.java   
/**
 * Gets the base set of operations to use
 * 
 * @return Map
 */
private Map<OperationType, OperationData> getBaseOperations() {
  Map<OperationType, OperationData> base = new HashMap<OperationType, OperationData>();
  // add in all the operations
  // since they will all be applied unless changed
  OperationType[] types = OperationType.values();
  for (OperationType type : types) {
    base.put(type, new OperationData(Distribution.UNIFORM, null));
  }
  return base;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:OperationData.java   
/**
 * Expects a comma separated list (where the first element is the ratio
 * (between 0 and 100)) and the second element is the distribution (if
 * non-existent then uniform will be selected). If an empty list is passed in
 * then this element will just set the distribution (to uniform) and leave the
 * percent as null.
 */
OperationData(String data) {
  String pieces[] = Helper.getTrimmedStrings(data);
  distribution = Distribution.UNIFORM;
  percent = null;
  if (pieces.length == 1) {
    percent = (Double.parseDouble(pieces[0]) / 100.0d);
  } else if (pieces.length >= 2) {
    percent = (Double.parseDouble(pieces[0]) / 100.0d);
    distribution = Distribution.valueOf(pieces[1].toUpperCase());
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:ArgumentParser.java   
/**
 * @return the option set to be used in command line parsing
 */
private Options getOptions() {
  Options cliopt = new Options();
  cliopt.addOption(ConfigOption.MAPS);
  cliopt.addOption(ConfigOption.REDUCES);
  cliopt.addOption(ConfigOption.PACKET_SIZE);
  cliopt.addOption(ConfigOption.OPS);
  cliopt.addOption(ConfigOption.DURATION);
  cliopt.addOption(ConfigOption.EXIT_ON_ERROR);
  cliopt.addOption(ConfigOption.SLEEP_TIME);
  cliopt.addOption(ConfigOption.FILES);
  cliopt.addOption(ConfigOption.DIR_SIZE);
  cliopt.addOption(ConfigOption.BASE_DIR);
  cliopt.addOption(ConfigOption.RESULT_FILE);
  cliopt.addOption(ConfigOption.CLEANUP);
  {
    String distStrs[] = new String[Distribution.values().length];
    Distribution distValues[] = Distribution.values();
    for (int i = 0; i < distValues.length; ++i) {
      distStrs[i] = distValues[i].lowerName();
    }
    String opdesc = String.format(Constants.OP_DESCR, StringUtils
        .arrayToString(distStrs));
    for (OperationType type : OperationType.values()) {
      String opname = type.lowerName();
      cliopt.addOption(new Option(opname, true, opdesc));
    }
  }
  cliopt.addOption(ConfigOption.REPLICATION_AM);
  cliopt.addOption(ConfigOption.BLOCK_SIZE);
  cliopt.addOption(ConfigOption.READ_SIZE);
  cliopt.addOption(ConfigOption.WRITE_SIZE);
  cliopt.addOption(ConfigOption.APPEND_SIZE);
  cliopt.addOption(ConfigOption.RANDOM_SEED);
  cliopt.addOption(ConfigOption.QUEUE_NAME);
  cliopt.addOption(ConfigOption.HELP);
  return cliopt;
}
项目:hadoop-plus    文件:WeightSelector.java   
private void configureWeights(ConfigExtractor e) {
  weights = new HashMap<Distribution, Weightable>();
  weights.put(Distribution.UNIFORM, new UniformWeight());
  // weights.put(Distribution.BEG, new BeginWeight());
  // weights.put(Distribution.END, new EndWeight());
  // weights.put(Distribution.MID, new MidWeight());
}
项目:hadoop-plus    文件:ConfigMerger.java   
/**
 * Gets the base set of operations to use
 * 
 * @return Map
 */
private Map<OperationType, OperationData> getBaseOperations() {
  Map<OperationType, OperationData> base = new HashMap<OperationType, OperationData>();
  // add in all the operations
  // since they will all be applied unless changed
  OperationType[] types = OperationType.values();
  for (OperationType type : types) {
    base.put(type, new OperationData(Distribution.UNIFORM, null));
  }
  return base;
}
项目:hadoop-plus    文件:OperationData.java   
/**
 * Expects a comma separated list (where the first element is the ratio
 * (between 0 and 100)) and the second element is the distribution (if
 * non-existent then uniform will be selected). If an empty list is passed in
 * then this element will just set the distribution (to uniform) and leave the
 * percent as null.
 */
OperationData(String data) {
  String pieces[] = Helper.getTrimmedStrings(data);
  distribution = Distribution.UNIFORM;
  percent = null;
  if (pieces.length == 1) {
    percent = (Double.parseDouble(pieces[0]) / 100.0d);
  } else if (pieces.length >= 2) {
    percent = (Double.parseDouble(pieces[0]) / 100.0d);
    distribution = Distribution.valueOf(pieces[1].toUpperCase());
  }
}
项目:hadoop-plus    文件:ArgumentParser.java   
/**
 * @return the option set to be used in command line parsing
 */
private Options getOptions() {
  Options cliopt = new Options();
  cliopt.addOption(ConfigOption.MAPS);
  cliopt.addOption(ConfigOption.REDUCES);
  cliopt.addOption(ConfigOption.PACKET_SIZE);
  cliopt.addOption(ConfigOption.OPS);
  cliopt.addOption(ConfigOption.DURATION);
  cliopt.addOption(ConfigOption.EXIT_ON_ERROR);
  cliopt.addOption(ConfigOption.SLEEP_TIME);
  cliopt.addOption(ConfigOption.FILES);
  cliopt.addOption(ConfigOption.DIR_SIZE);
  cliopt.addOption(ConfigOption.BASE_DIR);
  cliopt.addOption(ConfigOption.RESULT_FILE);
  cliopt.addOption(ConfigOption.CLEANUP);
  {
    String distStrs[] = new String[Distribution.values().length];
    Distribution distValues[] = Distribution.values();
    for (int i = 0; i < distValues.length; ++i) {
      distStrs[i] = distValues[i].lowerName();
    }
    String opdesc = String.format(Constants.OP_DESCR, StringUtils
        .arrayToString(distStrs));
    for (OperationType type : OperationType.values()) {
      String opname = type.lowerName();
      cliopt.addOption(new Option(opname, true, opdesc));
    }
  }
  cliopt.addOption(ConfigOption.REPLICATION_AM);
  cliopt.addOption(ConfigOption.BLOCK_SIZE);
  cliopt.addOption(ConfigOption.READ_SIZE);
  cliopt.addOption(ConfigOption.WRITE_SIZE);
  cliopt.addOption(ConfigOption.APPEND_SIZE);
  cliopt.addOption(ConfigOption.RANDOM_SEED);
  cliopt.addOption(ConfigOption.QUEUE_NAME);
  cliopt.addOption(ConfigOption.HELP);
  return cliopt;
}
项目:FlexMap    文件:WeightSelector.java   
private void configureWeights(ConfigExtractor e) {
  weights = new HashMap<Distribution, Weightable>();
  weights.put(Distribution.UNIFORM, new UniformWeight());
  // weights.put(Distribution.BEG, new BeginWeight());
  // weights.put(Distribution.END, new EndWeight());
  // weights.put(Distribution.MID, new MidWeight());
}
项目:FlexMap    文件:ConfigMerger.java   
/**
 * Gets the base set of operations to use
 * 
 * @return Map
 */
private Map<OperationType, OperationData> getBaseOperations() {
  Map<OperationType, OperationData> base = new HashMap<OperationType, OperationData>();
  // add in all the operations
  // since they will all be applied unless changed
  OperationType[] types = OperationType.values();
  for (OperationType type : types) {
    base.put(type, new OperationData(Distribution.UNIFORM, null));
  }
  return base;
}
项目:FlexMap    文件:OperationData.java   
/**
 * Expects a comma separated list (where the first element is the ratio
 * (between 0 and 100)) and the second element is the distribution (if
 * non-existent then uniform will be selected). If an empty list is passed in
 * then this element will just set the distribution (to uniform) and leave the
 * percent as null.
 */
OperationData(String data) {
  String pieces[] = Helper.getTrimmedStrings(data);
  distribution = Distribution.UNIFORM;
  percent = null;
  if (pieces.length == 1) {
    percent = (Double.parseDouble(pieces[0]) / 100.0d);
  } else if (pieces.length >= 2) {
    percent = (Double.parseDouble(pieces[0]) / 100.0d);
    distribution = Distribution.valueOf(pieces[1].toUpperCase());
  }
}
项目:FlexMap    文件:ArgumentParser.java   
/**
 * @return the option set to be used in command line parsing
 */
private Options getOptions() {
  Options cliopt = new Options();
  cliopt.addOption(ConfigOption.MAPS);
  cliopt.addOption(ConfigOption.REDUCES);
  cliopt.addOption(ConfigOption.PACKET_SIZE);
  cliopt.addOption(ConfigOption.OPS);
  cliopt.addOption(ConfigOption.DURATION);
  cliopt.addOption(ConfigOption.EXIT_ON_ERROR);
  cliopt.addOption(ConfigOption.SLEEP_TIME);
  cliopt.addOption(ConfigOption.FILES);
  cliopt.addOption(ConfigOption.DIR_SIZE);
  cliopt.addOption(ConfigOption.BASE_DIR);
  cliopt.addOption(ConfigOption.RESULT_FILE);
  cliopt.addOption(ConfigOption.CLEANUP);
  {
    String distStrs[] = new String[Distribution.values().length];
    Distribution distValues[] = Distribution.values();
    for (int i = 0; i < distValues.length; ++i) {
      distStrs[i] = distValues[i].lowerName();
    }
    String opdesc = String.format(Constants.OP_DESCR, StringUtils
        .arrayToString(distStrs));
    for (OperationType type : OperationType.values()) {
      String opname = type.lowerName();
      cliopt.addOption(new Option(opname, true, opdesc));
    }
  }
  cliopt.addOption(ConfigOption.REPLICATION_AM);
  cliopt.addOption(ConfigOption.BLOCK_SIZE);
  cliopt.addOption(ConfigOption.READ_SIZE);
  cliopt.addOption(ConfigOption.WRITE_SIZE);
  cliopt.addOption(ConfigOption.APPEND_SIZE);
  cliopt.addOption(ConfigOption.RANDOM_SEED);
  cliopt.addOption(ConfigOption.QUEUE_NAME);
  cliopt.addOption(ConfigOption.HELP);
  return cliopt;
}
项目:hops    文件:WeightSelector.java   
private void configureWeights(ConfigExtractor e) {
  weights = new HashMap<Distribution, Weightable>();
  weights.put(Distribution.UNIFORM, new UniformWeight());
  // weights.put(Distribution.BEG, new BeginWeight());
  // weights.put(Distribution.END, new EndWeight());
  // weights.put(Distribution.MID, new MidWeight());
}
项目:hops    文件:ConfigMerger.java   
/**
 * Gets the base set of operations to use
 * 
 * @return Map
 */
private Map<OperationType, OperationData> getBaseOperations() {
  Map<OperationType, OperationData> base = new HashMap<OperationType, OperationData>();
  // add in all the operations
  // since they will all be applied unless changed
  OperationType[] types = OperationType.values();
  for (OperationType type : types) {
    base.put(type, new OperationData(Distribution.UNIFORM, null));
  }
  return base;
}
项目:hops    文件:OperationData.java   
/**
 * Expects a comma separated list (where the first element is the ratio
 * (between 0 and 100)) and the second element is the distribution (if
 * non-existent then uniform will be selected). If an empty list is passed in
 * then this element will just set the distribution (to uniform) and leave the
 * percent as null.
 */
OperationData(String data) {
  String pieces[] = Helper.getTrimmedStrings(data);
  distribution = Distribution.UNIFORM;
  percent = null;
  if (pieces.length == 1) {
    percent = (Double.parseDouble(pieces[0]) / 100.0d);
  } else if (pieces.length >= 2) {
    percent = (Double.parseDouble(pieces[0]) / 100.0d);
    distribution = Distribution.valueOf(StringUtils.toUpperCase(pieces[1]));
  }
}
项目:hops    文件:ArgumentParser.java   
/**
 * @return the option set to be used in command line parsing
 */
private Options getOptions() {
  Options cliopt = new Options();
  cliopt.addOption(ConfigOption.MAPS);
  cliopt.addOption(ConfigOption.REDUCES);
  cliopt.addOption(ConfigOption.PACKET_SIZE);
  cliopt.addOption(ConfigOption.OPS);
  cliopt.addOption(ConfigOption.DURATION);
  cliopt.addOption(ConfigOption.EXIT_ON_ERROR);
  cliopt.addOption(ConfigOption.SLEEP_TIME);
  cliopt.addOption(ConfigOption.TRUNCATE_WAIT);
  cliopt.addOption(ConfigOption.FILES);
  cliopt.addOption(ConfigOption.DIR_SIZE);
  cliopt.addOption(ConfigOption.BASE_DIR);
  cliopt.addOption(ConfigOption.RESULT_FILE);
  cliopt.addOption(ConfigOption.CLEANUP);
  {
    String distStrs[] = new String[Distribution.values().length];
    Distribution distValues[] = Distribution.values();
    for (int i = 0; i < distValues.length; ++i) {
      distStrs[i] = distValues[i].lowerName();
    }
    String opdesc = String.format(Constants.OP_DESCR, StringUtils
        .arrayToString(distStrs));
    for (OperationType type : OperationType.values()) {
      String opname = type.lowerName();
      cliopt.addOption(new Option(opname, true, opdesc));
    }
  }
  cliopt.addOption(ConfigOption.REPLICATION_AM);
  cliopt.addOption(ConfigOption.BLOCK_SIZE);
  cliopt.addOption(ConfigOption.READ_SIZE);
  cliopt.addOption(ConfigOption.WRITE_SIZE);
  cliopt.addOption(ConfigOption.APPEND_SIZE);
  cliopt.addOption(ConfigOption.TRUNCATE_SIZE);
  cliopt.addOption(ConfigOption.RANDOM_SEED);
  cliopt.addOption(ConfigOption.QUEUE_NAME);
  cliopt.addOption(ConfigOption.HELP);
  return cliopt;
}
项目:hadoop-TCP    文件:WeightSelector.java   
private void configureWeights(ConfigExtractor e) {
  weights = new HashMap<Distribution, Weightable>();
  weights.put(Distribution.UNIFORM, new UniformWeight());
  // weights.put(Distribution.BEG, new BeginWeight());
  // weights.put(Distribution.END, new EndWeight());
  // weights.put(Distribution.MID, new MidWeight());
}
项目:hadoop-TCP    文件:ConfigMerger.java   
/**
 * Gets the base set of operations to use
 * 
 * @return Map
 */
private Map<OperationType, OperationData> getBaseOperations() {
  Map<OperationType, OperationData> base = new HashMap<OperationType, OperationData>();
  // add in all the operations
  // since they will all be applied unless changed
  OperationType[] types = OperationType.values();
  for (OperationType type : types) {
    base.put(type, new OperationData(Distribution.UNIFORM, null));
  }
  return base;
}
项目:hadoop-TCP    文件:OperationData.java   
/**
 * Expects a comma separated list (where the first element is the ratio
 * (between 0 and 100)) and the second element is the distribution (if
 * non-existent then uniform will be selected). If an empty list is passed in
 * then this element will just set the distribution (to uniform) and leave the
 * percent as null.
 */
OperationData(String data) {
  String pieces[] = Helper.getTrimmedStrings(data);
  distribution = Distribution.UNIFORM;
  percent = null;
  if (pieces.length == 1) {
    percent = (Double.parseDouble(pieces[0]) / 100.0d);
  } else if (pieces.length >= 2) {
    percent = (Double.parseDouble(pieces[0]) / 100.0d);
    distribution = Distribution.valueOf(pieces[1].toUpperCase());
  }
}
项目:hadoop-TCP    文件:ArgumentParser.java   
/**
 * @return the option set to be used in command line parsing
 */
private Options getOptions() {
  Options cliopt = new Options();
  cliopt.addOption(ConfigOption.MAPS);
  cliopt.addOption(ConfigOption.REDUCES);
  cliopt.addOption(ConfigOption.PACKET_SIZE);
  cliopt.addOption(ConfigOption.OPS);
  cliopt.addOption(ConfigOption.DURATION);
  cliopt.addOption(ConfigOption.EXIT_ON_ERROR);
  cliopt.addOption(ConfigOption.SLEEP_TIME);
  cliopt.addOption(ConfigOption.FILES);
  cliopt.addOption(ConfigOption.DIR_SIZE);
  cliopt.addOption(ConfigOption.BASE_DIR);
  cliopt.addOption(ConfigOption.RESULT_FILE);
  cliopt.addOption(ConfigOption.CLEANUP);
  {
    String distStrs[] = new String[Distribution.values().length];
    Distribution distValues[] = Distribution.values();
    for (int i = 0; i < distValues.length; ++i) {
      distStrs[i] = distValues[i].lowerName();
    }
    String opdesc = String.format(Constants.OP_DESCR, StringUtils
        .arrayToString(distStrs));
    for (OperationType type : OperationType.values()) {
      String opname = type.lowerName();
      cliopt.addOption(new Option(opname, true, opdesc));
    }
  }
  cliopt.addOption(ConfigOption.REPLICATION_AM);
  cliopt.addOption(ConfigOption.BLOCK_SIZE);
  cliopt.addOption(ConfigOption.READ_SIZE);
  cliopt.addOption(ConfigOption.WRITE_SIZE);
  cliopt.addOption(ConfigOption.APPEND_SIZE);
  cliopt.addOption(ConfigOption.RANDOM_SEED);
  cliopt.addOption(ConfigOption.QUEUE_NAME);
  cliopt.addOption(ConfigOption.HELP);
  return cliopt;
}
项目:hardfs    文件:WeightSelector.java   
private void configureWeights(ConfigExtractor e) {
  weights = new HashMap<Distribution, Weightable>();
  weights.put(Distribution.UNIFORM, new UniformWeight());
  // weights.put(Distribution.BEG, new BeginWeight());
  // weights.put(Distribution.END, new EndWeight());
  // weights.put(Distribution.MID, new MidWeight());
}
项目:hardfs    文件:ConfigMerger.java   
/**
 * Gets the base set of operations to use
 * 
 * @return Map
 */
private Map<OperationType, OperationData> getBaseOperations() {
  Map<OperationType, OperationData> base = new HashMap<OperationType, OperationData>();
  // add in all the operations
  // since they will all be applied unless changed
  OperationType[] types = OperationType.values();
  for (OperationType type : types) {
    base.put(type, new OperationData(Distribution.UNIFORM, null));
  }
  return base;
}
项目:hardfs    文件:OperationData.java   
/**
 * Expects a comma separated list (where the first element is the ratio
 * (between 0 and 100)) and the second element is the distribution (if
 * non-existent then uniform will be selected). If an empty list is passed in
 * then this element will just set the distribution (to uniform) and leave the
 * percent as null.
 */
OperationData(String data) {
  String pieces[] = Helper.getTrimmedStrings(data);
  distribution = Distribution.UNIFORM;
  percent = null;
  if (pieces.length == 1) {
    percent = (Double.parseDouble(pieces[0]) / 100.0d);
  } else if (pieces.length >= 2) {
    percent = (Double.parseDouble(pieces[0]) / 100.0d);
    distribution = Distribution.valueOf(pieces[1].toUpperCase());
  }
}
项目:hardfs    文件:ArgumentParser.java   
/**
 * @return the option set to be used in command line parsing
 */
private Options getOptions() {
  Options cliopt = new Options();
  cliopt.addOption(ConfigOption.MAPS);
  cliopt.addOption(ConfigOption.REDUCES);
  cliopt.addOption(ConfigOption.PACKET_SIZE);
  cliopt.addOption(ConfigOption.OPS);
  cliopt.addOption(ConfigOption.DURATION);
  cliopt.addOption(ConfigOption.EXIT_ON_ERROR);
  cliopt.addOption(ConfigOption.SLEEP_TIME);
  cliopt.addOption(ConfigOption.FILES);
  cliopt.addOption(ConfigOption.DIR_SIZE);
  cliopt.addOption(ConfigOption.BASE_DIR);
  cliopt.addOption(ConfigOption.RESULT_FILE);
  cliopt.addOption(ConfigOption.CLEANUP);
  {
    String distStrs[] = new String[Distribution.values().length];
    Distribution distValues[] = Distribution.values();
    for (int i = 0; i < distValues.length; ++i) {
      distStrs[i] = distValues[i].lowerName();
    }
    String opdesc = String.format(Constants.OP_DESCR, StringUtils
        .arrayToString(distStrs));
    for (OperationType type : OperationType.values()) {
      String opname = type.lowerName();
      cliopt.addOption(new Option(opname, true, opdesc));
    }
  }
  cliopt.addOption(ConfigOption.REPLICATION_AM);
  cliopt.addOption(ConfigOption.BLOCK_SIZE);
  cliopt.addOption(ConfigOption.READ_SIZE);
  cliopt.addOption(ConfigOption.WRITE_SIZE);
  cliopt.addOption(ConfigOption.APPEND_SIZE);
  cliopt.addOption(ConfigOption.RANDOM_SEED);
  cliopt.addOption(ConfigOption.QUEUE_NAME);
  cliopt.addOption(ConfigOption.HELP);
  return cliopt;
}
项目:hadoop-on-lustre2    文件:WeightSelector.java   
private void configureWeights(ConfigExtractor e) {
  weights = new HashMap<Distribution, Weightable>();
  weights.put(Distribution.UNIFORM, new UniformWeight());
  // weights.put(Distribution.BEG, new BeginWeight());
  // weights.put(Distribution.END, new EndWeight());
  // weights.put(Distribution.MID, new MidWeight());
}
项目:hadoop-on-lustre2    文件:ConfigMerger.java   
/**
 * Gets the base set of operations to use
 * 
 * @return Map
 */
private Map<OperationType, OperationData> getBaseOperations() {
  Map<OperationType, OperationData> base = new HashMap<OperationType, OperationData>();
  // add in all the operations
  // since they will all be applied unless changed
  OperationType[] types = OperationType.values();
  for (OperationType type : types) {
    base.put(type, new OperationData(Distribution.UNIFORM, null));
  }
  return base;
}
项目:hadoop-on-lustre2    文件:OperationData.java   
/**
 * Expects a comma separated list (where the first element is the ratio
 * (between 0 and 100)) and the second element is the distribution (if
 * non-existent then uniform will be selected). If an empty list is passed in
 * then this element will just set the distribution (to uniform) and leave the
 * percent as null.
 */
OperationData(String data) {
  String pieces[] = Helper.getTrimmedStrings(data);
  distribution = Distribution.UNIFORM;
  percent = null;
  if (pieces.length == 1) {
    percent = (Double.parseDouble(pieces[0]) / 100.0d);
  } else if (pieces.length >= 2) {
    percent = (Double.parseDouble(pieces[0]) / 100.0d);
    distribution = Distribution.valueOf(pieces[1].toUpperCase());
  }
}
项目:hadoop-on-lustre2    文件:ArgumentParser.java   
/**
 * @return the option set to be used in command line parsing
 */
private Options getOptions() {
  Options cliopt = new Options();
  cliopt.addOption(ConfigOption.MAPS);
  cliopt.addOption(ConfigOption.REDUCES);
  cliopt.addOption(ConfigOption.PACKET_SIZE);
  cliopt.addOption(ConfigOption.OPS);
  cliopt.addOption(ConfigOption.DURATION);
  cliopt.addOption(ConfigOption.EXIT_ON_ERROR);
  cliopt.addOption(ConfigOption.SLEEP_TIME);
  cliopt.addOption(ConfigOption.FILES);
  cliopt.addOption(ConfigOption.DIR_SIZE);
  cliopt.addOption(ConfigOption.BASE_DIR);
  cliopt.addOption(ConfigOption.RESULT_FILE);
  cliopt.addOption(ConfigOption.CLEANUP);
  {
    String distStrs[] = new String[Distribution.values().length];
    Distribution distValues[] = Distribution.values();
    for (int i = 0; i < distValues.length; ++i) {
      distStrs[i] = distValues[i].lowerName();
    }
    String opdesc = String.format(Constants.OP_DESCR, StringUtils
        .arrayToString(distStrs));
    for (OperationType type : OperationType.values()) {
      String opname = type.lowerName();
      cliopt.addOption(new Option(opname, true, opdesc));
    }
  }
  cliopt.addOption(ConfigOption.REPLICATION_AM);
  cliopt.addOption(ConfigOption.BLOCK_SIZE);
  cliopt.addOption(ConfigOption.READ_SIZE);
  cliopt.addOption(ConfigOption.WRITE_SIZE);
  cliopt.addOption(ConfigOption.APPEND_SIZE);
  cliopt.addOption(ConfigOption.RANDOM_SEED);
  cliopt.addOption(ConfigOption.QUEUE_NAME);
  cliopt.addOption(ConfigOption.HELP);
  return cliopt;
}