Java 类org.apache.hadoop.util.ServicePlugin 实例源码

项目:hadoop    文件:NameNode.java   
/** Start the services common to active and standby states */
private void startCommonServices(Configuration conf) throws IOException {
  namesystem.startCommonServices(conf, haContext);
  registerNNSMXBean();
  if (NamenodeRole.NAMENODE != role) {
    startHttpServer(conf);
    httpServer.setNameNodeAddress(getNameNodeAddress());
    httpServer.setFSImage(getFSImage());
  }
  rpcServer.start();
  plugins = conf.getInstances(DFS_NAMENODE_PLUGINS_KEY,
      ServicePlugin.class);
  for (ServicePlugin p: plugins) {
    try {
      p.start(this);
    } catch (Throwable t) {
      LOG.warn("ServicePlugin " + p + " could not be started", t);
    }
  }
  LOG.info(getRole() + " RPC up at: " + rpcServer.getRpcAddress());
  if (rpcServer.getServiceRpcAddress() != null) {
    LOG.info(getRole() + " service RPC up at: "
        + rpcServer.getServiceRpcAddress());
  }
}
项目:aliyun-oss-hadoop-fs    文件:NameNode.java   
/** Start the services common to active and standby states */
private void startCommonServices(Configuration conf) throws IOException {
  namesystem.startCommonServices(conf, haContext);
  registerNNSMXBean();
  if (NamenodeRole.NAMENODE != role) {
    startHttpServer(conf);
    httpServer.setNameNodeAddress(getNameNodeAddress());
    httpServer.setFSImage(getFSImage());
  }
  rpcServer.start();
  plugins = conf.getInstances(DFS_NAMENODE_PLUGINS_KEY,
      ServicePlugin.class);
  for (ServicePlugin p: plugins) {
    try {
      p.start(this);
    } catch (Throwable t) {
      LOG.warn("ServicePlugin " + p + " could not be started", t);
    }
  }
  LOG.info(getRole() + " RPC up at: " + rpcServer.getRpcAddress());
  if (rpcServer.getServiceRpcAddress() != null) {
    LOG.info(getRole() + " service RPC up at: "
        + rpcServer.getServiceRpcAddress());
  }
}
项目:big-c    文件:NameNode.java   
/** Start the services common to active and standby states */
private void startCommonServices(Configuration conf) throws IOException {
  namesystem.startCommonServices(conf, haContext);
  registerNNSMXBean();
  if (NamenodeRole.NAMENODE != role) {
    startHttpServer(conf);
    httpServer.setNameNodeAddress(getNameNodeAddress());
    httpServer.setFSImage(getFSImage());
  }
  rpcServer.start();
  plugins = conf.getInstances(DFS_NAMENODE_PLUGINS_KEY,
      ServicePlugin.class);
  for (ServicePlugin p: plugins) {
    try {
      p.start(this);
    } catch (Throwable t) {
      LOG.warn("ServicePlugin " + p + " could not be started", t);
    }
  }
  LOG.info(getRole() + " RPC up at: " + rpcServer.getRpcAddress());
  if (rpcServer.getServiceRpcAddress() != null) {
    LOG.info(getRole() + " service RPC up at: "
        + rpcServer.getServiceRpcAddress());
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:NameNode.java   
/** Start the services common to active and standby states */
private void startCommonServices(Configuration conf) throws IOException {
  namesystem.startCommonServices(conf, haContext);
  registerNNSMXBean();
  if (NamenodeRole.NAMENODE != role) {
    startHttpServer(conf);
    httpServer.setNameNodeAddress(getNameNodeAddress());
    httpServer.setFSImage(getFSImage());
  }
  rpcServer.start();
  plugins = conf.getInstances(DFS_NAMENODE_PLUGINS_KEY,
      ServicePlugin.class);
  for (ServicePlugin p: plugins) {
    try {
      p.start(this);
    } catch (Throwable t) {
      LOG.warn("ServicePlugin " + p + " could not be started", t);
    }
  }
  LOG.info(getRole() + " RPC up at: " + rpcServer.getRpcAddress());
  if (rpcServer.getServiceRpcAddress() != null) {
    LOG.info(getRole() + " service RPC up at: "
        + rpcServer.getServiceRpcAddress());
  }
}
项目:hadoop-plus    文件:NameNode.java   
/** Start the services common to active and standby states */
private void startCommonServices(Configuration conf) throws IOException {
  namesystem.startCommonServices(conf, haContext);
  if (NamenodeRole.NAMENODE != role) {
    startHttpServer(conf);
    httpServer.setNameNodeAddress(getNameNodeAddress());
    httpServer.setFSImage(getFSImage());
  }
  rpcServer.start();
  plugins = conf.getInstances(DFS_NAMENODE_PLUGINS_KEY,
      ServicePlugin.class);
  for (ServicePlugin p: plugins) {
    try {
      p.start(this);
    } catch (Throwable t) {
      LOG.warn("ServicePlugin " + p + " could not be started", t);
    }
  }
  LOG.info(getRole() + " RPC up at: " + rpcServer.getRpcAddress());
  if (rpcServer.getServiceRpcAddress() != null) {
    LOG.info(getRole() + " service RPC up at: "
        + rpcServer.getServiceRpcAddress());
  }
}
项目:FlexMap    文件:NameNode.java   
/** Start the services common to active and standby states */
private void startCommonServices(Configuration conf) throws IOException {
  namesystem.startCommonServices(conf, haContext);
  registerNNSMXBean();
  if (NamenodeRole.NAMENODE != role) {
    startHttpServer(conf);
    httpServer.setNameNodeAddress(getNameNodeAddress());
    httpServer.setFSImage(getFSImage());
  }
  rpcServer.start();
  plugins = conf.getInstances(DFS_NAMENODE_PLUGINS_KEY,
      ServicePlugin.class);
  for (ServicePlugin p: plugins) {
    try {
      p.start(this);
    } catch (Throwable t) {
      LOG.warn("ServicePlugin " + p + " could not be started", t);
    }
  }
  LOG.info(getRole() + " RPC up at: " + rpcServer.getRpcAddress());
  if (rpcServer.getServiceRpcAddress() != null) {
    LOG.info(getRole() + " service RPC up at: "
        + rpcServer.getServiceRpcAddress());
  }
}
项目:hops    文件:NameNode.java   
/**
 * Start the services common to active and standby states
 */
private void startCommonServices(Configuration conf) throws IOException {
  startHttpServer(conf);

  startLeaderElectionService();

  namesystem.startCommonServices(conf);

  rpcServer.start();
  plugins = conf.getInstances(DFS_NAMENODE_PLUGINS_KEY, ServicePlugin.class);
  for (ServicePlugin p : plugins) {
    try {
      p.start(this);
    } catch (Throwable t) {
      LOG.warn("ServicePlugin " + p + " could not be started", t);
    }
  }
  LOG.info(getRole() + " RPC up at: " + rpcServer.getRpcAddress());
  if (rpcServer.getServiceRpcAddress() != null) {
    LOG.info(getRole() + " service RPC up at: " +
        rpcServer.getServiceRpcAddress());
  }
}
项目:hops    文件:NameNode.java   
private void stopCommonServices() {
  if (namesystem != null) {
    namesystem.close();
  }
  if (rpcServer != null) {
    rpcServer.stop();
  }
  if (leaderElection != null && leaderElection.isRunning()) {
    leaderElection.stopElectionThread();
  }
  if (plugins != null) {
    for (ServicePlugin p : plugins) {
      try {
        p.stop();
      } catch (Throwable t) {
        LOG.warn("ServicePlugin " + p + " could not be stopped", t);
      }
    }
  }
  stopHttpServer();
}
项目:hadoop-TCP    文件:NameNode.java   
/** Start the services common to active and standby states */
private void startCommonServices(Configuration conf) throws IOException {
  namesystem.startCommonServices(conf, haContext);
  if (NamenodeRole.NAMENODE != role) {
    startHttpServer(conf);
    httpServer.setNameNodeAddress(getNameNodeAddress());
    httpServer.setFSImage(getFSImage());
  }
  rpcServer.start();
  plugins = conf.getInstances(DFS_NAMENODE_PLUGINS_KEY,
      ServicePlugin.class);
  for (ServicePlugin p: plugins) {
    try {
      p.start(this);
    } catch (Throwable t) {
      LOG.warn("ServicePlugin " + p + " could not be started", t);
    }
  }
  LOG.info(getRole() + " RPC up at: " + rpcServer.getRpcAddress());
  if (rpcServer.getServiceRpcAddress() != null) {
    LOG.info(getRole() + " service RPC up at: "
        + rpcServer.getServiceRpcAddress());
  }
}
项目:hardfs    文件:NameNode.java   
/** Start the services common to active and standby states */
private void startCommonServices(Configuration conf) throws IOException {
  namesystem.startCommonServices(conf, haContext);
  if (NamenodeRole.NAMENODE != role) {
    startHttpServer(conf);
    httpServer.setNameNodeAddress(getNameNodeAddress());
    httpServer.setFSImage(getFSImage());
  }
  rpcServer.start();
  plugins = conf.getInstances(DFS_NAMENODE_PLUGINS_KEY,
      ServicePlugin.class);
  for (ServicePlugin p: plugins) {
    try {
      p.start(this);
    } catch (Throwable t) {
      LOG.warn("ServicePlugin " + p + " could not be started", t);
    }
  }
  LOG.info(getRole() + " RPC up at: " + rpcServer.getRpcAddress());
  if (rpcServer.getServiceRpcAddress() != null) {
    LOG.info(getRole() + " service RPC up at: "
        + rpcServer.getServiceRpcAddress());
  }
}
项目:hadoop-on-lustre2    文件:NameNode.java   
/** Start the services common to active and standby states */
private void startCommonServices(Configuration conf) throws IOException {
  namesystem.startCommonServices(conf, haContext);
  registerNNSMXBean();
  if (NamenodeRole.NAMENODE != role) {
    startHttpServer(conf);
    httpServer.setNameNodeAddress(getNameNodeAddress());
    httpServer.setFSImage(getFSImage());
  }
  rpcServer.start();
  plugins = conf.getInstances(DFS_NAMENODE_PLUGINS_KEY,
      ServicePlugin.class);
  for (ServicePlugin p: plugins) {
    try {
      p.start(this);
    } catch (Throwable t) {
      LOG.warn("ServicePlugin " + p + " could not be started", t);
    }
  }
  LOG.info(getRole() + " RPC up at: " + rpcServer.getRpcAddress());
  if (rpcServer.getServiceRpcAddress() != null) {
    LOG.info(getRole() + " service RPC up at: "
        + rpcServer.getServiceRpcAddress());
  }
}
项目:cumulus    文件:NameNode.java   
/**
 * Activate name-node servers and threads.
 */
void activate(Configuration conf) throws IOException {
  if ((isRole(NamenodeRole.ACTIVE))
      && (UserGroupInformation.isSecurityEnabled())) {
    namesystem.activateSecretManager();
  }
  namesystem.activate(conf);
  startHttpServer(conf);
  server.start();  //start RPC server
  if (serviceRpcServer != null) {
    serviceRpcServer.start();      
  }
  startTrashEmptier(conf);

  plugins = conf.getInstances("dfs.namenode.plugins", ServicePlugin.class);
  for (ServicePlugin p: plugins) {
    try {
      p.start(this);
    } catch (Throwable t) {
      LOG.warn("ServicePlugin " + p + " could not be started", t);
    }
  }

  nnStator.activate();//add by xianyu
}
项目:mapreduce-fork    文件:TestJobTrackerPlugins.java   
@Test
public void test() throws Exception {
  JobConf conf = new JobConf();
  conf.set(JTConfig.JT_IPC_ADDRESS, "localhost:0");
  conf.set(JTConfig.JT_HTTP_ADDRESS, "0.0.0.0:0");
  conf.setClass(JTConfig.JT_PLUGINS, FakeServicePlugin.class,
      ServicePlugin.class);

  assertNull("Plugin not created", FakeServicePlugin.getInstance());

  JobTracker jobTracker = JobTracker.startTracker(conf);
  assertNotNull("Plugin created", FakeServicePlugin.getInstance());
  assertSame("Service is jobTracker",
      FakeServicePlugin.getInstance().getService(), jobTracker);
  assertFalse("Plugin not stopped",
      FakeServicePlugin.getInstance().isStopped());

  jobTracker.close();
  assertTrue("Plugin stopped", FakeServicePlugin.getInstance().isStopped());
}
项目:hadoop    文件:NameNode.java   
private void stopCommonServices() {
  if(rpcServer != null) rpcServer.stop();
  if(namesystem != null) namesystem.close();
  if (pauseMonitor != null) pauseMonitor.stop();
  if (plugins != null) {
    for (ServicePlugin p : plugins) {
      try {
        p.stop();
      } catch (Throwable t) {
        LOG.warn("ServicePlugin " + p + " could not be stopped", t);
      }
    }
  }   
  stopHttpServer();
}
项目:hadoop    文件:DataNode.java   
private void startPlugins(Configuration conf) {
  plugins = conf.getInstances(DFS_DATANODE_PLUGINS_KEY, ServicePlugin.class);
  for (ServicePlugin p: plugins) {
    try {
      p.start(this);
      LOG.info("Started plug-in " + p);
    } catch (Throwable t) {
      LOG.warn("ServicePlugin " + p + " could not be started", t);
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:NameNode.java   
private void stopCommonServices() {
  if(rpcServer != null) rpcServer.stop();
  if(namesystem != null) namesystem.close();
  if (pauseMonitor != null) pauseMonitor.stop();
  if (plugins != null) {
    for (ServicePlugin p : plugins) {
      try {
        p.stop();
      } catch (Throwable t) {
        LOG.warn("ServicePlugin " + p + " could not be stopped", t);
      }
    }
  }   
  stopHttpServer();
}
项目:aliyun-oss-hadoop-fs    文件:DataNode.java   
private void startPlugins(Configuration conf) {
  plugins = conf.getInstances(DFS_DATANODE_PLUGINS_KEY, ServicePlugin.class);
  for (ServicePlugin p: plugins) {
    try {
      p.start(this);
      LOG.info("Started plug-in " + p);
    } catch (Throwable t) {
      LOG.warn("ServicePlugin " + p + " could not be started", t);
    }
  }
}
项目:big-c    文件:NameNode.java   
private void stopCommonServices() {
  if(rpcServer != null) rpcServer.stop();
  if(namesystem != null) namesystem.close();
  if (pauseMonitor != null) pauseMonitor.stop();
  if (plugins != null) {
    for (ServicePlugin p : plugins) {
      try {
        p.stop();
      } catch (Throwable t) {
        LOG.warn("ServicePlugin " + p + " could not be stopped", t);
      }
    }
  }   
  stopHttpServer();
}
项目:big-c    文件:DataNode.java   
private void startPlugins(Configuration conf) {
  plugins = conf.getInstances(DFS_DATANODE_PLUGINS_KEY, ServicePlugin.class);
  for (ServicePlugin p: plugins) {
    try {
      p.start(this);
      LOG.info("Started plug-in " + p);
    } catch (Throwable t) {
      LOG.warn("ServicePlugin " + p + " could not be started", t);
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:NameNode.java   
private void stopCommonServices() {
  if(rpcServer != null) rpcServer.stop();
  if(namesystem != null) namesystem.close();
  if (pauseMonitor != null) pauseMonitor.stop();
  if (plugins != null) {
    for (ServicePlugin p : plugins) {
      try {
        p.stop();
      } catch (Throwable t) {
        LOG.warn("ServicePlugin " + p + " could not be stopped", t);
      }
    }
  }   
  stopHttpServer();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DataNode.java   
private void startPlugins(Configuration conf) {
  plugins = conf.getInstances(DFS_DATANODE_PLUGINS_KEY, ServicePlugin.class);
  for (ServicePlugin p: plugins) {
    try {
      p.start(this);
      LOG.info("Started plug-in " + p);
    } catch (Throwable t) {
      LOG.warn("ServicePlugin " + p + " could not be started", t);
    }
  }
}
项目:hadoop-plus    文件:NameNode.java   
private void stopCommonServices() {
  if(namesystem != null) namesystem.close();
  if(rpcServer != null) rpcServer.stop();
  if (plugins != null) {
    for (ServicePlugin p : plugins) {
      try {
        p.stop();
      } catch (Throwable t) {
        LOG.warn("ServicePlugin " + p + " could not be stopped", t);
      }
    }
  }   
  stopHttpServer();
}
项目:hadoop-plus    文件:DataNode.java   
private void startPlugins(Configuration conf) {
  plugins = conf.getInstances(DFS_DATANODE_PLUGINS_KEY, ServicePlugin.class);
  for (ServicePlugin p: plugins) {
    try {
      p.start(this);
      LOG.info("Started plug-in " + p);
    } catch (Throwable t) {
      LOG.warn("ServicePlugin " + p + " could not be started", t);
    }
  }
}
项目:PDHC    文件:CheckerNode.java   
private void startPlugins(Configuration conf) {
  plugins = conf.getInstances(DFS_DATANODE_PLUGINS_KEY, ServicePlugin.class);
  for (ServicePlugin p: plugins) {
    try {
      p.start(this);
      LOG.info("Started plug-in " + p);
    } catch (Throwable t) {
      LOG.warn("ServicePlugin " + p + " could not be started", t);
    }
  }
}
项目:FlexMap    文件:NameNode.java   
private void stopCommonServices() {
  if(rpcServer != null) rpcServer.stop();
  if(namesystem != null) namesystem.close();
  if (pauseMonitor != null) pauseMonitor.stop();
  if (plugins != null) {
    for (ServicePlugin p : plugins) {
      try {
        p.stop();
      } catch (Throwable t) {
        LOG.warn("ServicePlugin " + p + " could not be stopped", t);
      }
    }
  }   
  stopHttpServer();
}
项目:FlexMap    文件:DataNode.java   
private void startPlugins(Configuration conf) {
  plugins = conf.getInstances(DFS_DATANODE_PLUGINS_KEY, ServicePlugin.class);
  for (ServicePlugin p: plugins) {
    try {
      p.start(this);
      LOG.info("Started plug-in " + p);
    } catch (Throwable t) {
      LOG.warn("ServicePlugin " + p + " could not be started", t);
    }
  }
}
项目:hops    文件:DataNode.java   
private void startPlugins(Configuration conf) {
  plugins = conf.getInstances(DFS_DATANODE_PLUGINS_KEY, ServicePlugin.class);
  for (ServicePlugin p : plugins) {
    try {
      p.start(this);
      LOG.info("Started plug-in " + p);
    } catch (Throwable t) {
      LOG.warn("ServicePlugin " + p + " could not be started", t);
    }
  }
}
项目:hadoop-TCP    文件:NameNode.java   
private void stopCommonServices() {
  if(namesystem != null) namesystem.close();
  if(rpcServer != null) rpcServer.stop();
  if (plugins != null) {
    for (ServicePlugin p : plugins) {
      try {
        p.stop();
      } catch (Throwable t) {
        LOG.warn("ServicePlugin " + p + " could not be stopped", t);
      }
    }
  }   
  stopHttpServer();
}
项目:hardfs    文件:NameNode.java   
private void stopCommonServices() {
  if(namesystem != null) namesystem.close();
  if(rpcServer != null) rpcServer.stop();
  if (plugins != null) {
    for (ServicePlugin p : plugins) {
      try {
        p.stop();
      } catch (Throwable t) {
        LOG.warn("ServicePlugin " + p + " could not be stopped", t);
      }
    }
  }   
  stopHttpServer();
}
项目:hadoop-on-lustre2    文件:NameNode.java   
private void stopCommonServices() {
  if(rpcServer != null) rpcServer.stop();
  if(namesystem != null) namesystem.close();
  if (pauseMonitor != null) pauseMonitor.stop();
  if (plugins != null) {
    for (ServicePlugin p : plugins) {
      try {
        p.stop();
      } catch (Throwable t) {
        LOG.warn("ServicePlugin " + p + " could not be stopped", t);
      }
    }
  }   
  stopHttpServer();
}
项目:cumulus    文件:NameNode.java   
/**
 * Stop all NameNode threads and wait for all to finish.
 */
public void stop() {
  synchronized(this) {
    if (stopRequested)
      return;
    stopRequested = true;
  }
  if (plugins != null) {
    for (ServicePlugin p : plugins) {
      try {
        p.stop();
      } catch (Throwable t) {
        LOG.warn("ServicePlugin " + p + " could not be stopped", t);
      }
    }
  }
  try {
    if (httpServer != null) httpServer.stop();
  } catch (Exception e) {
    LOG.error(StringUtils.stringifyException(e));
  }
  if(namesystem != null) namesystem.close();
  if(emptier != null) emptier.interrupt();
  if(server != null) server.stop();
  if(serviceRpcServer != null) serviceRpcServer.stop();
  if (myMetrics != null) {
    myMetrics.shutdown();
  }
  if (namesystem != null) {
    namesystem.shutdown();
  }
}
项目:cumulus    文件:DataNode.java   
private void startPlugins(Configuration conf) {
  plugins = conf.getInstances("dfs.datanode.plugins", ServicePlugin.class);
  for (ServicePlugin p: plugins) {
    try {
      p.start(this);
      LOG.info("Started plug-in " + p);
    } catch (Throwable t) {
      LOG.warn("ServicePlugin " + p + " could not be started", t);
    }
  }
}