Java 类org.apache.hadoop.util.HostsFileReader 实例源码

项目:aliyun-oss-hadoop-fs    文件:Balancer.java   
private static int processHostList(String[] args, int i, String type,
    Set<String> nodes) {
  Preconditions.checkArgument(++i < args.length,
      "List of %s nodes | -f <filename> is missing: args=%s",
      type, Arrays.toString(args));
  if ("-f".equalsIgnoreCase(args[i])) {
    Preconditions.checkArgument(++i < args.length,
        "File containing %s nodes is not specified: args=%s",
        type, Arrays.toString(args));

    final String filename = args[i];
    try {
      HostsFileReader.readFileToSet(type, filename, nodes);
    } catch (IOException e) {
      throw new IllegalArgumentException(
          "Failed to read " + type + " node list from file: " + filename);
    }
  } else {
    final String[] addresses = StringUtils.getTrimmedStrings(args[i]);
    nodes.addAll(Arrays.asList(addresses));
  }
  return i;
}
项目:hadoop-EAR    文件:FaultTolerantBlockPlacementPolicy.java   
/** {@inheritDoc} */
public void initialize(Configuration conf,
                       FSClusterStats stats,
                       NetworkTopology clusterMap,
                       HostsFileReader hostsReader,
                       DNSToSwitchMapping dnsToSwitchMapping,
                       FSNamesystem ns) {
  super.initialize(
    conf, stats, clusterMap, hostsReader, dnsToSwitchMapping, ns);
  this.namesystem = ns;
  // Default
  this.stripeLen = 0;
  this.considerLoad = conf.getBoolean("dfs.replication.considerLoad", true);
  FSNamesystem.LOG.info("F4: Block placement will consider load: "
    + this.considerLoad);
  initParityConfigs();
  this.stagingDir = conf.get("dfs.f4.staging", "/staging");
  this.localDir = conf.get("dfs.f4.local", "/local");
}
项目:hadoop-EAR    文件:NodeManager.java   
/**
 * Constructor for the NodeManager, used when reading back the state of
 * NodeManager from disk.
 * @param clusterManager The ClusterManager instance
 * @param hostsReader The HostsReader instance
 * @param coronaSerializer The CoronaSerializer instance, which will be used
 *                         to read JSON from disk
 * @throws IOException
 */
public NodeManager(ClusterManager clusterManager,
                   HostsFileReader hostsReader,
                   CoronaSerializer coronaSerializer)
  throws IOException {
  this(clusterManager, hostsReader);

  // Expecting the START_OBJECT token for nodeManager
  coronaSerializer.readStartObjectToken("nodeManager");
  readNameToNode(coronaSerializer);
  readHostsToSessions(coronaSerializer);
  readNameToApps(coronaSerializer);
  // Expecting the END_OBJECT token for ClusterManager
  coronaSerializer.readEndObjectToken("nodeManager");

  // topologyCache need not be serialized, it will eventually be rebuilt.
  // cpuToResourcePartitioning and resourceLimit need not be serialized,
  // they can be read from the conf.
}
项目:hadoop-EAR    文件:BlockPlacementPolicy.java   
/**
 * Get an instance of the configured Block Placement Policy based on the
 * value of the configuration paramater dfs.block.replicator.classname.
 * 
 * @param conf the configuration to be used
 * @param stats an object thatis used to retrieve the load on the cluster
 * @param clusterMap the network topology of the cluster
 * @param namesystem the FSNamesystem
 * @return an instance of BlockPlacementPolicy
 */
public static BlockPlacementPolicy getInstance(Configuration conf, 
                                               FSClusterStats stats,
                                               NetworkTopology clusterMap,
                                               HostsFileReader hostsReader,
                                               DNSToSwitchMapping dnsToSwitchMapping,
                                               FSNamesystem namesystem) {
  Class<? extends BlockPlacementPolicy> replicatorClass =
                    conf.getClass("dfs.block.replicator.classname",
                                  BlockPlacementPolicyDefault.class,
                                  BlockPlacementPolicy.class);
  BlockPlacementPolicy replicator = (BlockPlacementPolicy) ReflectionUtils.newInstance(
                                                           replicatorClass, conf);
  replicator.initialize(conf, stats, clusterMap, hostsReader, 
                        dnsToSwitchMapping, namesystem);
  return replicator;
}
项目:hadoop-EAR    文件:BlockPlacementPolicyConfigurable.java   
/** {@inheritDoc} */
public void initialize(Configuration conf, FSClusterStats stats,
    NetworkTopology clusterMap, HostsFileReader hostsReader,
    DNSToSwitchMapping dnsToSwitchMapping, FSNamesystem ns) {
  super.initialize(conf, stats, clusterMap, hostsReader, dnsToSwitchMapping, ns);
  this.rackWindow = conf.getInt("dfs.replication.rackwindow", 2);
  this.machineWindow = conf.getInt("dfs.replication.machineWindow", 5);
  this.racks = new ArrayList<String>();
  this.hostsReader = hostsReader;
  this.dnsToSwitchMapping = dnsToSwitchMapping;
  hostsUpdated(true);
  if (r == null) {
    r = new Random();
  }
  LOG.info("BlockPlacementPolicyConfigurable initialized");
}
项目:RDFS    文件:BlockPlacementPolicy.java   
/**
 * Get an instance of the configured Block Placement Policy based on the
 * value of the configuration paramater dfs.block.replicator.classname.
 * 
 * @param conf the configuration to be used
 * @param stats an object thatis used to retrieve the load on the cluster
 * @param clusterMap the network topology of the cluster
 * @param namesystem the FSNamesystem
 * @return an instance of BlockPlacementPolicy
 */
public static BlockPlacementPolicy getInstance(Configuration conf, 
                                               FSClusterStats stats,
                                               NetworkTopology clusterMap,
                                               HostsFileReader hostsReader,
                                               DNSToSwitchMapping dnsToSwitchMapping,
                                               FSNamesystem namesystem) {
  Class<? extends BlockPlacementPolicy> replicatorClass =
                    conf.getClass("dfs.block.replicator.classname",
                                  BlockPlacementPolicyDefault.class,
                                  BlockPlacementPolicy.class);
  BlockPlacementPolicy replicator = (BlockPlacementPolicy) ReflectionUtils.newInstance(
                                                           replicatorClass, conf);
  replicator.initialize(conf, stats, clusterMap, hostsReader, 
                        dnsToSwitchMapping, namesystem);
  return replicator;
}
项目:RDFS    文件:BlockPlacementPolicyConfigurable.java   
/** {@inheritDoc} */
public void initialize(Configuration conf, FSClusterStats stats,
    NetworkTopology clusterMap, HostsFileReader hostsReader,
    DNSToSwitchMapping dnsToSwitchMapping, FSNamesystem ns) {
  super.initialize(conf, stats, clusterMap, hostsReader, dnsToSwitchMapping, ns);
  this.rackWindow = conf.getInt("dfs.replication.rackwindow", 2);
  this.machineWindow = conf.getInt("dfs.replication.machineWindow", 5);
  this.racks = new ArrayList<String>();
  this.hostsReader = hostsReader;
  this.dnsToSwitchMapping = dnsToSwitchMapping;
  hostsUpdated(true);
  if (r == null) {
    r = new Random();
  }
  LOG.info("BlockPlacementPolicyConfigurable initialized");
}
项目:hadoop    文件:NodesListManager.java   
private HostsFileReader createHostsFileReader(String includesFile,
    String excludesFile) throws IOException, YarnException {
  HostsFileReader hostsReader =
      new HostsFileReader(includesFile,
          (includesFile == null || includesFile.isEmpty()) ? null
              : this.rmContext.getConfigurationProvider()
                  .getConfigurationInputStream(this.conf, includesFile),
          excludesFile,
          (excludesFile == null || excludesFile.isEmpty()) ? null
              : this.rmContext.getConfigurationProvider()
                  .getConfigurationInputStream(this.conf, excludesFile));
  return hostsReader;
}
项目:hadoop    文件:Dispatcher.java   
/**
 * Read set of host names from a file
 * 
 * @return set of host names
 */
static Set<String> getHostListFromFile(String fileName, String type) {
  Set<String> nodes = new HashSet<String>();
  try {
    HostsFileReader.readFileToSet(type, fileName, nodes);
    return StringUtils.getTrimmedStrings(nodes);
  } catch (IOException e) {
    throw new IllegalArgumentException(
        "Failed to read host list from file: " + fileName);
  }
}
项目:hadoop    文件:HostFileManager.java   
private static HostSet readFile(String type, String filename)
        throws IOException {
  HostSet res = new HostSet();
  if (!filename.isEmpty()) {
    HashSet<String> entrySet = new HashSet<String>();
    HostsFileReader.readFileToSet(type, filename, entrySet);
    for (String str : entrySet) {
      InetSocketAddress addr = parseEntry(type, filename, str);
      if (addr != null) {
        res.add(addr);
      }
    }
  }
  return res;
}
项目:aliyun-oss-hadoop-fs    文件:NodesListManager.java   
private HostsFileReader createHostsFileReader(String includesFile,
    String excludesFile) throws IOException, YarnException {
  HostsFileReader hostsReader =
      new HostsFileReader(includesFile,
          (includesFile == null || includesFile.isEmpty()) ? null
              : this.rmContext.getConfigurationProvider()
                  .getConfigurationInputStream(this.conf, includesFile),
          excludesFile,
          (excludesFile == null || excludesFile.isEmpty()) ? null
              : this.rmContext.getConfigurationProvider()
                  .getConfigurationInputStream(this.conf, excludesFile));
  return hostsReader;
}
项目:aliyun-oss-hadoop-fs    文件:HostFileManager.java   
private static HostSet readFile(String type, String filename)
        throws IOException {
  HostSet res = new HostSet();
  if (!filename.isEmpty()) {
    HashSet<String> entrySet = new HashSet<String>();
    HostsFileReader.readFileToSet(type, filename, entrySet);
    for (String str : entrySet) {
      InetSocketAddress addr = parseEntry(type, filename, str);
      if (addr != null) {
        res.add(addr);
      }
    }
  }
  return res;
}
项目:big-c    文件:NodesListManager.java   
private HostsFileReader createHostsFileReader(String includesFile,
    String excludesFile) throws IOException, YarnException {
  HostsFileReader hostsReader =
      new HostsFileReader(includesFile,
          (includesFile == null || includesFile.isEmpty()) ? null
              : this.rmContext.getConfigurationProvider()
                  .getConfigurationInputStream(this.conf, includesFile),
          excludesFile,
          (excludesFile == null || excludesFile.isEmpty()) ? null
              : this.rmContext.getConfigurationProvider()
                  .getConfigurationInputStream(this.conf, excludesFile));
  return hostsReader;
}
项目:big-c    文件:Dispatcher.java   
/**
 * Read set of host names from a file
 * 
 * @return set of host names
 */
static Set<String> getHostListFromFile(String fileName, String type) {
  Set<String> nodes = new HashSet<String>();
  try {
    HostsFileReader.readFileToSet(type, fileName, nodes);
    return StringUtils.getTrimmedStrings(nodes);
  } catch (IOException e) {
    throw new IllegalArgumentException(
        "Failed to read host list from file: " + fileName);
  }
}
项目:big-c    文件:HostFileManager.java   
private static HostSet readFile(String type, String filename)
        throws IOException {
  HostSet res = new HostSet();
  if (!filename.isEmpty()) {
    HashSet<String> entrySet = new HashSet<String>();
    HostsFileReader.readFileToSet(type, filename, entrySet);
    for (String str : entrySet) {
      InetSocketAddress addr = parseEntry(type, filename, str);
      if (addr != null) {
        res.add(addr);
      }
    }
  }
  return res;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:NodesListManager.java   
private HostsFileReader createHostsFileReader(String includesFile,
    String excludesFile) throws IOException, YarnException {
  HostsFileReader hostsReader =
      new HostsFileReader(includesFile,
          (includesFile == null || includesFile.isEmpty()) ? null
              : this.rmContext.getConfigurationProvider()
                  .getConfigurationInputStream(this.conf, includesFile),
          excludesFile,
          (excludesFile == null || excludesFile.isEmpty()) ? null
              : this.rmContext.getConfigurationProvider()
                  .getConfigurationInputStream(this.conf, excludesFile));
  return hostsReader;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:Dispatcher.java   
/**
 * Read set of host names from a file
 * 
 * @return set of host names
 */
static Set<String> getHostListFromFile(String fileName, String type) {
  Set<String> nodes = new HashSet<String>();
  try {
    HostsFileReader.readFileToSet(type, fileName, nodes);
    return StringUtils.getTrimmedStrings(nodes);
  } catch (IOException e) {
    throw new IllegalArgumentException(
        "Failed to read host list from file: " + fileName);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:HostFileManager.java   
private static HostSet readFile(String type, String filename)
        throws IOException {
  HostSet res = new HostSet();
  if (!filename.isEmpty()) {
    HashSet<String> entrySet = new HashSet<String>();
    HostsFileReader.readFileToSet(type, filename, entrySet);
    for (String str : entrySet) {
      InetSocketAddress addr = parseEntry(type, filename, str);
      if (addr != null) {
        res.add(addr);
      }
    }
  }
  return res;
}
项目:hadoop-EAR    文件:TestTTMover.java   
public void testFileModifications() throws Exception {
  System.out.println(TEST_ROOT_DIR);
  Configuration conf = new Configuration();
  File hosts = new File(TEST_ROOT_DIR, "hosts.file");
  if (!hosts.exists()) {
    hosts.createNewFile();
  }
  FileWriter writer = new FileWriter(hosts);
  writer.write("host1.host.com\n");
  writer.write("host2.host.com\n");

  writer.close();



  TTMover mover = new TTMoverTestStub(TEST_ROOT_DIR.toString());
  mover.setConf(conf);

  mover.addHostToFile(hosts.getAbsolutePath(), "host3.host.com");
  HostsFileReader reader =
          new HostsFileReader(hosts.getAbsolutePath(), hosts.getAbsolutePath());
  System.out.println(reader.getHosts().toString());
  assertEquals(3, reader.getHosts().size());

  mover.removeHostFromFile(hosts.getAbsolutePath(), "host1.host.com");

  reader.refresh();
  assertEquals(2, reader.getHosts().size());

  mover.restoreFile(hosts.getAbsolutePath());

  reader.refresh();
  assertEquals(2, reader.getHosts().size());

  assertTrue(reader.getHosts().contains("host1.host.com"));
  assertFalse(reader.getHosts().contains("host3.host.com"));
}
项目:hadoop-EAR    文件:TestTTMover.java   
public void testHostRemove() throws Exception {
  Configuration conf = new Configuration();
  conf.set("mapred.hosts", "hosts.include");
  conf.set("mapred.hosts.exclude", "hosts.exclude");

  File hostsInclude = new File(TEST_ROOT_DIR, "hosts.include");
  File hostsExclude = new File(TEST_ROOT_DIR, "hosts.exclude");
  File slaves = new File(TEST_ROOT_DIR, "slaves");

  if (hostsExclude.exists()) {
    hostsExclude.delete();
  }
  hostsExclude.createNewFile();

  FileWriter writer = new FileWriter(hostsInclude);
  writer.write("host1\nhost2\n");
  writer.close();
  writer = new FileWriter(slaves);
  writer.write("host1\nhost2\n");
  writer.close();

  TTMoverTestStub mover = new TTMoverTestStub(TEST_ROOT_DIR.toString());
  mover.setConf(conf);
  mover.run(new String[]{"-remove", "host1"});

  HostsFileReader reader =
          new HostsFileReader(hostsInclude.getAbsolutePath(),
          hostsExclude.getAbsolutePath());
  assertTrue(reader.getExcludedHosts().contains("host1"));

  assertTrue(reader.getHosts().contains("host2"));
  assertFalse(reader.getHosts().contains("host1"));
}
项目:hadoop-EAR    文件:TestTTMover.java   
public void testHostAdd() throws Exception {
  Configuration conf = new Configuration();
  conf.set("mapred.hosts", "hosts.include");
  conf.set("mapred.hosts.exclude", "hosts.exclude");

  File hostsInclude = new File(TEST_ROOT_DIR, "hosts.include");
  File hostsExclude = new File(TEST_ROOT_DIR, "hosts.exclude");
  File slaves = new File(TEST_ROOT_DIR, "slaves");


  FileWriter writer = new FileWriter(hostsInclude);
  writer.write("host1\nhost2\n");
  writer.close();

  writer = new FileWriter(slaves);
  writer.write("host1\nhost2\n");
  writer.close();

  writer = new FileWriter(hostsExclude);
  writer.write("host3\n");
  writer.close();

  HostsFileReader reader =
          new HostsFileReader(hostsInclude.getAbsolutePath(),
          hostsExclude.getAbsolutePath());

  assertEquals(2, reader.getHosts().size());

  TTMoverTestStub mover = new TTMoverTestStub(TEST_ROOT_DIR.toString());
  mover.setConf(conf);
  mover.run(new String[]{"-add", "host3"});

  reader.refresh();
  assertFalse(reader.getExcludedHosts().contains("host3"));

  assertTrue(reader.getHosts().contains("host3"));

}
项目:hadoop-EAR    文件:BlockPlacementPolicyRaid.java   
/** {@inheritDoc} */
@Override
public void initialize(Configuration conf,  FSClusterStats stats,
                       NetworkTopology clusterMap, HostsFileReader hostsReader,
                       DNSToSwitchMapping dnsToSwitchMapping, FSNamesystem namesystem) {
  super.initialize(conf, stats, clusterMap, 
                   hostsReader, dnsToSwitchMapping, namesystem);
  this.conf = conf;
  this.minFileSize = conf.getLong(RaidNode.MINIMUM_RAIDABLE_FILESIZE_KEY,
      RaidNode.MINIMUM_RAIDABLE_FILESIZE);
  this.namesystem = namesystem;
  this.cachedLocatedBlocks = new CachedLocatedBlocks(conf);
  this.cachedFullPathNames = new CachedFullPathNames(conf);
}
项目:hadoop-EAR    文件:ClusterManager.java   
/**
 * This method is used when the ClusterManager is restarting after going down
 * while in Safe Mode. It starts the process of recovering the original
 * CM state by reading back the state in JSON form.
 * @param hostsReader The HostsReader instance
 * @throws IOException
 */
private void recoverClusterManagerFromDisk(HostsFileReader hostsReader)
  throws IOException {
  LOG.info("Restoring state from " +
    new java.io.File(conf.getCMStateFile()).getAbsolutePath());

  // This will prevent the expireNodes and expireSessions threads from
  // expiring the nodes and sessions respectively
  safeMode = true;
  LOG.info("Safe mode is now: " + (this.safeMode ? "ON" : "OFF"));

  CoronaSerializer coronaSerializer = new CoronaSerializer(conf);

  // Expecting the START_OBJECT token for ClusterManager
  coronaSerializer.readStartObjectToken("ClusterManager");

  coronaSerializer.readField("startTime");
  startTime = coronaSerializer.readValueAs(Long.class);

  coronaSerializer.readField("nodeManager");
  nodeManager = new NodeManager(this, hostsReader, coronaSerializer);
  nodeManager.setConf(conf);

  coronaSerializer.readField("sessionManager");
  sessionManager = new SessionManager(this, coronaSerializer);

  coronaSerializer.readField("sessionNotifier");
  sessionNotifier = new SessionNotifier(sessionManager, this, metrics,
                                        coronaSerializer);

  // Expecting the END_OBJECT token for ClusterManager
  coronaSerializer.readEndObjectToken("ClusterManager");

  lastRestartTime = clock.getTime();
}
项目:hadoop-EAR    文件:NodeManager.java   
/**
 * NodeManager constructor given a cluster manager and a
 * {@link HostsFileReader} for includes/excludes lists
 * @param clusterManager the cluster manager
 * @param hostsReader the host reader for includes/excludes
 */
public NodeManager(
  ClusterManager clusterManager, HostsFileReader hostsReader) {
  this.hostsReader = hostsReader;
  LOG.info("Included hosts: " + hostsReader.getHostNames().size() +
      " Excluded hosts: " + hostsReader.getExcludedHosts().size());
  this.clusterManager = clusterManager;
  this.expireNodesThread = new Thread(this.expireNodes,
                                     "expireNodes");
  this.expireNodesThread.setDaemon(true);
  this.expireNodesThread.start();
  this.faultManager = new FaultManager(this);
}
项目:hadoop-EAR    文件:HdfsProxy.java   
private static boolean sendCommand(Configuration conf, String path)
    throws IOException {
  setupSslProps(conf);
  int sslPort = getSslAddr(conf).getPort();
  int err = 0;
  StringBuilder b = new StringBuilder();
  HostsFileReader hostsReader = new HostsFileReader(conf.get("hdfsproxy.hosts",
      "hdfsproxy-hosts"), "");
  Set<String> hostsList = hostsReader.getHosts();
  for (String hostname : hostsList) {
    HttpsURLConnection connection = null;
    try {
      connection = openConnection(hostname, sslPort, path);
      connection.connect();
      if (connection.getResponseCode() != HttpServletResponse.SC_OK) {
        b.append("\n\t" + hostname + ": " + connection.getResponseCode()
            + " " + connection.getResponseMessage());
        err++;
      }
    } catch (IOException e) {
      b.append("\n\t" + hostname + ": " + e.getLocalizedMessage());
      err++;
    } finally {
      if (connection != null)
        connection.disconnect();
    }
  }
  if (err > 0) {
    System.err.print("Command failed on the following "
        + err + " host" + (err==1?":":"s:") + b.toString() + "\n");
    return true;
  }
  return false;
}
项目:hadoop-EAR    文件:BlockPlacementPolicyDefault.java   
/** {@inheritDoc} */
public void initialize(Configuration conf, FSClusterStats stats,
    NetworkTopology clusterMap, HostsFileReader hostsReader,
    DNSToSwitchMapping dnsToSwitchMapping, FSNamesystem ns) {
  this.considerLoad = conf.getBoolean("dfs.replication.considerLoad", true);
  this.minBlocksToWrite = conf.getInt("dfs.replication.minBlocksToWrite",
                                      FSConstants.MIN_BLOCKS_FOR_WRITE);                                        
  this.stats = stats;
  this.clusterMap = clusterMap;
  Configuration newConf = new Configuration();
  this.attemptMultiplier = newConf.getInt("dfs.replication.attemptMultiplier", 200);
  FSNamesystem.LOG.info("Value for min blocks to write " + this.minBlocksToWrite);
}
项目:hadoop-EAR    文件:HdfsProxy.java   
private static boolean sendCommand(Configuration conf, String path)
    throws IOException {
  setupSslProps(conf);
  int sslPort = getSslAddr(conf).getPort();
  int err = 0;
  StringBuilder b = new StringBuilder();
  HostsFileReader hostsReader = new HostsFileReader(conf.get("hdfsproxy.hosts",
      "hdfsproxy-hosts"), "");
  Set<String> hostsList = hostsReader.getHosts();
  for (String hostname : hostsList) {
    HttpsURLConnection connection = null;
    try {
      connection = openConnection(hostname, sslPort, path);
      connection.connect();
      if (connection.getResponseCode() != HttpServletResponse.SC_OK) {
        b.append("\n\t" + hostname + ": " + connection.getResponseCode()
            + " " + connection.getResponseMessage());
        err++;
      }
    } catch (IOException e) {
      b.append("\n\t" + hostname + ": " + e.getLocalizedMessage());
      err++;
    } finally {
      if (connection != null)
        connection.disconnect();
    }
  }
  if (err > 0) {
    System.err.print("Command failed on the following "
        + err + " host" + (err==1?":":"s:") + b.toString() + "\n");
    return true;
  }
  return false;
}
项目:hadoop-plus    文件:NodesListManager.java   
@Override
protected void serviceInit(Configuration conf) throws Exception {

  this.conf = conf;

  // Read the hosts/exclude files to restrict access to the RM
  try {
    this.hostsReader = 
      new HostsFileReader(
          conf.get(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH, 
              YarnConfiguration.DEFAULT_RM_NODES_INCLUDE_FILE_PATH),
          conf.get(YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH, 
              YarnConfiguration.DEFAULT_RM_NODES_EXCLUDE_FILE_PATH)
              );
    printConfiguredHosts();
  } catch (IOException ioe) {
    LOG.warn("Failed to init hostsReader, disabling", ioe);
    try {
      this.hostsReader = 
        new HostsFileReader(YarnConfiguration.DEFAULT_RM_NODES_INCLUDE_FILE_PATH, 
            YarnConfiguration.DEFAULT_RM_NODES_EXCLUDE_FILE_PATH);
    } catch (IOException ioe2) {
      // Should *never* happen
      this.hostsReader = null;
      throw new YarnRuntimeException(ioe2);
    }
  }
  super.serviceInit(conf);
}
项目:hadoop-plus    文件:HostFileManager.java   
void readFile(String type, String filename) throws IOException {
  if (filename.isEmpty()) {
    return;
  }
  HashSet<String> entrySet = new HashSet<String>();
  HostsFileReader.readFileToSet(type, filename, entrySet);
  for (String str : entrySet) {
    Entry entry = Entry.parse(filename, str);
    add(entry);
  }
}
项目:FlexMap    文件:Dispatcher.java   
/**
 * Read set of host names from a file
 * 
 * @return set of host names
 */
static Set<String> getHostListFromFile(String fileName, String type) {
  Set<String> nodes = new HashSet<String>();
  try {
    HostsFileReader.readFileToSet(type, fileName, nodes);
    return StringUtils.getTrimmedStrings(nodes);
  } catch (IOException e) {
    throw new IllegalArgumentException(
        "Failed to read host list from file: " + fileName);
  }
}
项目:FlexMap    文件:HostFileManager.java   
private static HostSet readFile(String type, String filename)
        throws IOException {
  HostSet res = new HostSet();
  if (!filename.isEmpty()) {
    HashSet<String> entrySet = new HashSet<String>();
    HostsFileReader.readFileToSet(type, filename, entrySet);
    for (String str : entrySet) {
      InetSocketAddress addr = parseEntry(type, filename, str);
      if (addr != null) {
        res.add(addr);
      }
    }
  }
  return res;
}
项目:hops    文件:NodesListManager.java   
private HostsFileReader createHostsFileReader(String includesFile,
    String excludesFile) throws IOException, YarnException {
  HostsFileReader hostsReader =
      new HostsFileReader(includesFile,
          (includesFile == null || includesFile.isEmpty()) ? null
              : this.rmContext.getConfigurationProvider()
                  .getConfigurationInputStream(this.conf, includesFile),
          excludesFile,
          (excludesFile == null || excludesFile.isEmpty()) ? null
              : this.rmContext.getConfigurationProvider()
                  .getConfigurationInputStream(this.conf, excludesFile));
  return hostsReader;
}
项目:hadoop-TCP    文件:NodesListManager.java   
@Override
protected void serviceInit(Configuration conf) throws Exception {

  this.conf = conf;

  // Read the hosts/exclude files to restrict access to the RM
  try {
    this.hostsReader = 
      new HostsFileReader(
          conf.get(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH, 
              YarnConfiguration.DEFAULT_RM_NODES_INCLUDE_FILE_PATH),
          conf.get(YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH, 
              YarnConfiguration.DEFAULT_RM_NODES_EXCLUDE_FILE_PATH)
              );
    printConfiguredHosts();
  } catch (IOException ioe) {
    LOG.warn("Failed to init hostsReader, disabling", ioe);
    try {
      this.hostsReader = 
        new HostsFileReader(YarnConfiguration.DEFAULT_RM_NODES_INCLUDE_FILE_PATH, 
            YarnConfiguration.DEFAULT_RM_NODES_EXCLUDE_FILE_PATH);
    } catch (IOException ioe2) {
      // Should *never* happen
      this.hostsReader = null;
      throw new YarnRuntimeException(ioe2);
    }
  }
  super.serviceInit(conf);
}
项目:hadoop-TCP    文件:HostFileManager.java   
void readFile(String type, String filename) throws IOException {
  if (filename.isEmpty()) {
    return;
  }
  HashSet<String> entrySet = new HashSet<String>();
  HostsFileReader.readFileToSet(type, filename, entrySet);
  for (String str : entrySet) {
    Entry entry = Entry.parse(filename, str);
    add(entry);
  }
}
项目:hardfs    文件:NodesListManager.java   
@Override
protected void serviceInit(Configuration conf) throws Exception {

  this.conf = conf;

  // Read the hosts/exclude files to restrict access to the RM
  try {
    this.hostsReader = 
      new HostsFileReader(
          conf.get(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH, 
              YarnConfiguration.DEFAULT_RM_NODES_INCLUDE_FILE_PATH),
          conf.get(YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH, 
              YarnConfiguration.DEFAULT_RM_NODES_EXCLUDE_FILE_PATH)
              );
    printConfiguredHosts();
  } catch (IOException ioe) {
    LOG.warn("Failed to init hostsReader, disabling", ioe);
    try {
      this.hostsReader = 
        new HostsFileReader(YarnConfiguration.DEFAULT_RM_NODES_INCLUDE_FILE_PATH, 
            YarnConfiguration.DEFAULT_RM_NODES_EXCLUDE_FILE_PATH);
    } catch (IOException ioe2) {
      // Should *never* happen
      this.hostsReader = null;
      throw new YarnRuntimeException(ioe2);
    }
  }
  super.serviceInit(conf);
}
项目:hardfs    文件:HostFileManager.java   
void readFile(String type, String filename) throws IOException {
  if (filename.isEmpty()) {
    return;
  }
  HashSet<String> entrySet = new HashSet<String>();
  HostsFileReader.readFileToSet(type, filename, entrySet);
  for (String str : entrySet) {
    Entry entry = Entry.parse(filename, str);
    add(entry);
  }
}
项目:hadoop-on-lustre2    文件:NodesListManager.java   
private HostsFileReader createHostsFileReader(String includesFile,
    String excludesFile) throws IOException, YarnException {
  HostsFileReader hostsReader =
      new HostsFileReader(includesFile,
          (includesFile == null || includesFile.isEmpty()) ? null
              : this.rmContext.getConfigurationProvider()
                  .getConfigurationInputStream(this.conf, includesFile),
          excludesFile,
          (excludesFile == null || excludesFile.isEmpty()) ? null
              : this.rmContext.getConfigurationProvider()
                  .getConfigurationInputStream(this.conf, excludesFile));
  return hostsReader;
}
项目:hadoop-on-lustre2    文件:HostFileManager.java   
void readFile(String type, String filename) throws IOException {
  if (filename.isEmpty()) {
    return;
  }
  HashSet<String> entrySet = new HashSet<String>();
  HostsFileReader.readFileToSet(type, filename, entrySet);
  for (String str : entrySet) {
    Entry entry = Entry.parse(filename, str);
    add(entry);
  }
}
项目:RDFS    文件:TestTTMover.java   
public void testFileModifications() throws Exception {
  System.out.println(TEST_ROOT_DIR);
  Configuration conf = new Configuration();
  File hosts = new File(TEST_ROOT_DIR, "hosts.file");
  if (!hosts.exists()) {
    hosts.createNewFile();
  }
  FileWriter writer = new FileWriter(hosts);
  writer.write("host1.host.com\n");
  writer.write("host2.host.com\n");

  writer.close();



  TTMover mover = new TTMoverTestStub(TEST_ROOT_DIR.toString());
  mover.setConf(conf);

  mover.addHostToFile(hosts.getAbsolutePath(), "host3.host.com");
  HostsFileReader reader =
          new HostsFileReader(hosts.getAbsolutePath(), hosts.getAbsolutePath());
  System.out.println(reader.getHosts().toString());
  assertEquals(3, reader.getHosts().size());

  mover.removeHostFromFile(hosts.getAbsolutePath(), "host1.host.com");

  reader.refresh();
  assertEquals(2, reader.getHosts().size());

  mover.restoreFile(hosts.getAbsolutePath());

  reader.refresh();
  assertEquals(2, reader.getHosts().size());

  assertTrue(reader.getHosts().contains("host1.host.com"));
  assertFalse(reader.getHosts().contains("host3.host.com"));
}
项目:RDFS    文件:TestTTMover.java   
public void testHostRemove() throws Exception {
  Configuration conf = new Configuration();
  conf.set("mapred.hosts", "hosts.include");
  conf.set("mapred.hosts.exclude", "hosts.exclude");

  File hostsInclude = new File(TEST_ROOT_DIR, "hosts.include");
  File hostsExclude = new File(TEST_ROOT_DIR, "hosts.exclude");
  File slaves = new File(TEST_ROOT_DIR, "slaves");

  if (hostsExclude.exists()) {
    hostsExclude.delete();
  }
  hostsExclude.createNewFile();

  FileWriter writer = new FileWriter(hostsInclude);
  writer.write("host1\nhost2\n");
  writer.close();
  writer = new FileWriter(slaves);
  writer.write("host1\nhost2\n");
  writer.close();

  TTMoverTestStub mover = new TTMoverTestStub(TEST_ROOT_DIR.toString());
  mover.setConf(conf);
  mover.run(new String[]{"-remove", "host1"});

  HostsFileReader reader =
          new HostsFileReader(hostsInclude.getAbsolutePath(),
          hostsExclude.getAbsolutePath());
  assertTrue(reader.getExcludedHosts().contains("host1"));

  assertTrue(reader.getHosts().contains("host2"));
  assertFalse(reader.getHosts().contains("host1"));
}