Java 类org.jboss.netty.channel.socket.nio.NioWorkerPool 实例源码

项目:mandrel    文件:NiftyClient.java   
public NiftyClient(NettyClientConfig nettyClientConfig, boolean local) {
    this.nettyClientConfig = nettyClientConfig;
    if (local) {
        log.warn("Using local client");
        this.channelFactory = new DefaultLocalClientChannelFactory();
        this.timer = null;
        this.bossExecutor = null;
        this.workerExecutor = null;
        this.defaultSocksProxyAddress = null;
    } else {
        this.timer = nettyClientConfig.getTimer();
        this.bossExecutor = nettyClientConfig.getBossExecutor();
        this.workerExecutor = nettyClientConfig.getWorkerExecutor();
        this.defaultSocksProxyAddress = nettyClientConfig.getDefaultSocksProxyAddress();

        int bossThreadCount = nettyClientConfig.getBossThreadCount();
        int workerThreadCount = nettyClientConfig.getWorkerThreadCount();

        NioWorkerPool workerPool = new NioWorkerPool(workerExecutor, workerThreadCount, ThreadNameDeterminer.CURRENT);
        NioClientBossPool bossPool = new NioClientBossPool(bossExecutor, bossThreadCount, timer, ThreadNameDeterminer.CURRENT);

        this.channelFactory = new NioClientSocketChannelFactory(bossPool, workerPool);
    }
}
项目:apm-agent    文件:PinpointSocketFactory.java   
private NioClientSocketChannelFactory createChannelFactory(int bossCount, int workerCount, Timer timer) {
    ExecutorService boss = Executors.newCachedThreadPool(new PinpointThreadFactory("Pinpoint-Client-Boss", true));
    NioClientBossPool bossPool = new NioClientBossPool(boss, bossCount, timer, ThreadNameDeterminer.CURRENT);

    ExecutorService worker = Executors.newCachedThreadPool(new PinpointThreadFactory("Pinpoint-Client-Worker", true));
    NioWorkerPool workerPool = new NioWorkerPool(worker, workerCount, ThreadNameDeterminer.CURRENT);
    return new NioClientSocketChannelFactory(bossPool, workerPool);
}
项目:apm-agent    文件:PinpointServerSocket.java   
private ServerBootstrap createBootStrap(int bossCount, int workerCount) {
    // profiler, collector
    ExecutorService boss = Executors.newCachedThreadPool(new PinpointThreadFactory("Pinpoint-Server-Boss"));
    NioServerBossPool nioServerBossPool = new NioServerBossPool(boss, bossCount, ThreadNameDeterminer.CURRENT);

    ExecutorService worker = Executors.newCachedThreadPool(new PinpointThreadFactory("Pinpoint-Server-Worker"));
    NioWorkerPool nioWorkerPool = new NioWorkerPool(worker, workerCount, ThreadNameDeterminer.CURRENT);

    NioServerSocketChannelFactory nioClientSocketChannelFactory = new NioServerSocketChannelFactory(nioServerBossPool, nioWorkerPool);
    return new ServerBootstrap(nioClientSocketChannelFactory);
}
项目:mandrel    文件:NettyServerTransport.java   
public void start() {
    if (local) {
        channelFactory = new DefaultLocalServerChannelFactory();
    } else {
        bossExecutor = nettyServerConfig.getBossExecutor();
        int bossThreadCount = nettyServerConfig.getBossThreadCount();
        ioWorkerExecutor = nettyServerConfig.getWorkerExecutor();
        int ioWorkerThreadCount = nettyServerConfig.getWorkerThreadCount();
        channelFactory = new NioServerSocketChannelFactory(new NioServerBossPool(bossExecutor, bossThreadCount, ThreadNameDeterminer.CURRENT),
                new NioWorkerPool(ioWorkerExecutor, ioWorkerThreadCount, ThreadNameDeterminer.CURRENT));
    }

    start(channelFactory);
}
项目:pinpoint    文件:DefaultPinpointClientFactory.java   
private NioClientSocketChannelFactory createChannelFactory(int bossCount, int workerCount, Timer timer) {
    ExecutorService boss = Executors.newCachedThreadPool(new PinpointThreadFactory("Pinpoint-Client-Boss", true));
    NioClientBossPool bossPool = new NioClientBossPool(boss, bossCount, timer, ThreadNameDeterminer.CURRENT);

    ExecutorService worker = Executors.newCachedThreadPool(new PinpointThreadFactory("Pinpoint-Client-Worker", true));
    NioWorkerPool workerPool = new NioWorkerPool(worker, workerCount, ThreadNameDeterminer.CURRENT);
    return new NioClientSocketChannelFactory(bossPool, workerPool);
}
项目:pinpoint    文件:PinpointServerAcceptor.java   
private ServerBootstrap createBootStrap(int bossCount, int workerCount) {
    // profiler, collector
    ExecutorService boss = Executors.newCachedThreadPool(new PinpointThreadFactory("Pinpoint-Server-Boss", true));
    NioServerBossPool nioServerBossPool = new NioServerBossPool(boss, bossCount, ThreadNameDeterminer.CURRENT);

    ExecutorService worker = Executors.newCachedThreadPool(new PinpointThreadFactory("Pinpoint-Server-Worker", true));
    NioWorkerPool nioWorkerPool = new NioWorkerPool(worker, workerCount, ThreadNameDeterminer.CURRENT);

    NioServerSocketChannelFactory nioClientSocketChannelFactory = new NioServerSocketChannelFactory(nioServerBossPool, nioWorkerPool);
    return new ServerBootstrap(nioClientSocketChannelFactory);
}
项目:libraft    文件:RaftAgent.java   
/**
 * Initialize the local Raft server.
 * <p/>
 * Sets up the service implementation classes, creates database
 * tables and starts any thread pools necessary. Following this
 * call all service classes are <strong>fully initialized</strong>.
 * Even though various threads are started they <strong>will not</strong>
 * use or interact with the service implementation classes. Callers
 * still have exclusive access to the system.
 * <p/>
 * This method should <strong>only</strong> be called once before {@link RaftAgent#start()}.
 *
 * @throws StorageException if the persistence components cannot be initialized
 * @throws IllegalStateException if this method is called multiple times
 */
public synchronized void initialize() throws StorageException {
    checkState(!running);
    checkState(!initialized);
    checkState(setupConversion);

    // start up the snapshots subsystem
    snapshotStore.initialize();
    // check that the snapshot metadata and the filesystem agree
    // FIXME (AG): this _may_ be expensive, especially if the user never bothers to clean out snapshots!
    // FIXME (AG): warning, warning - this is upfront work - probably a very, very bad idea
    snapshotStore.reconcileSnapshots();

    // initialize the log and store
    jdbcLog.initialize();
    jdbcStore.initialize();

    // initialize the various thread pools
    nonIoExecutorService = MoreExecutors.listeningDecorator(Executors.newCachedThreadPool());
    ioExecutorService = Executors.newCachedThreadPool();
    serverBossPool = new NioServerBossPool(ioExecutorService, 1);
    clientBossPool = new NioClientBossPool(ioExecutorService, 1);
    workerPool = new NioWorkerPool(ioExecutorService, 3);

    // TODO (AG): avoid creating threads in the initialize() method
    // initialize the networking subsystem
    sharedWorkerPool = new ShareableWorkerPool<NioWorker>(workerPool);
    ServerSocketChannelFactory serverChannelFactory = new NioServerSocketChannelFactory(serverBossPool, sharedWorkerPool);
    ClientSocketChannelFactory clientChannelFactory = new NioClientSocketChannelFactory(clientBossPool, sharedWorkerPool);
    raftNetworkClient.initialize(nonIoExecutorService, serverChannelFactory, clientChannelFactory, raftAlgorithm);

    raftAlgorithm.initialize();

    initialized = true;
}
项目:Camel    文件:NettyWorkerPoolBuilder.java   
/**
 * Creates a new worker pool.
 */
public WorkerPool build() {
    int count = workerCount > 0 ? workerCount : NettyHelper.DEFAULT_IO_THREADS;
    workerPool = new NioWorkerPool(Executors.newCachedThreadPool(), count, new CamelNettyThreadNameDeterminer(pattern, name));
    return workerPool;
}
项目:mandrel    文件:ThriftServer.java   
@Inject
public ThriftServer(final NiftyProcessor processor, ThriftServerConfig config, @ThriftServerTimer Timer timer,
        Map<String, ThriftFrameCodecFactory> availableFrameCodecFactories, Map<String, TDuplexProtocolFactory> availableProtocolFactories,
        @ThriftServerWorkerExecutor Map<String, ExecutorService> availableWorkerExecutors, NiftySecurityFactoryHolder securityFactoryHolder, boolean local) {
    checkNotNull(availableFrameCodecFactories, "availableFrameCodecFactories cannot be null");
    checkNotNull(availableProtocolFactories, "availableProtocolFactories cannot be null");

    NiftyProcessorFactory processorFactory = new NiftyProcessorFactory() {
        @Override
        public NiftyProcessor getProcessor(TTransport transport) {
            return processor;
        }
    };

    String transportName = config.getTransportName();
    String protocolName = config.getProtocolName();

    checkState(availableFrameCodecFactories.containsKey(transportName), "No available server transport named " + transportName);
    checkState(availableProtocolFactories.containsKey(protocolName), "No available server protocol named " + protocolName);

    workerExecutor = config.getOrBuildWorkerExecutor(availableWorkerExecutors);
    if (local) {
        log.warn("Using local server");
        configuredPort = 0;
        ioThreads = 0;
        ioExecutor = null;
        acceptorThreads = 0;
        acceptorExecutor = null;
        serverChannelFactory = new DefaultLocalServerChannelFactory();
    } else {
        configuredPort = config.getPort();

        acceptorExecutor = newCachedThreadPool(new ThreadFactoryBuilder().setNameFormat("thrift-acceptor-%s").build());
        acceptorThreads = config.getAcceptorThreadCount();
        ioExecutor = newCachedThreadPool(new ThreadFactoryBuilder().setNameFormat("thrift-io-%s").build());
        ioThreads = config.getIoThreadCount();

        serverChannelFactory = new NioServerSocketChannelFactory(new NioServerBossPool(acceptorExecutor, acceptorThreads, ThreadNameDeterminer.CURRENT),
                new NioWorkerPool(ioExecutor, ioThreads, ThreadNameDeterminer.CURRENT));
    }

    ThriftServerDef thriftServerDef = ThriftServerDef.newBuilder().name("thrift").listen(configuredPort)
            .limitFrameSizeTo((int) config.getMaxFrameSize().toBytes()).clientIdleTimeout(config.getIdleConnectionTimeout())
            .withProcessorFactory(processorFactory).limitConnectionsTo(config.getConnectionLimit())
            .limitQueuedResponsesPerConnection(config.getMaxQueuedResponsesPerConnection())
            .thriftFrameCodecFactory(availableFrameCodecFactories.get(transportName)).protocol(availableProtocolFactories.get(protocolName))
            .withSecurityFactory(securityFactoryHolder.niftySecurityFactory).using(workerExecutor).taskTimeout(config.getTaskExpirationTimeout()).build();

    NettyServerConfigBuilder nettyServerConfigBuilder = NettyServerConfig.newBuilder();

    nettyServerConfigBuilder.getServerSocketChannelConfig().setBacklog(config.getAcceptBacklog());
    nettyServerConfigBuilder.setBossThreadCount(config.getAcceptorThreadCount());
    nettyServerConfigBuilder.setWorkerThreadCount(config.getIoThreadCount());
    nettyServerConfigBuilder.setTimer(timer);

    NettyServerConfig nettyServerConfig = nettyServerConfigBuilder.build();

    transport = new NettyServerTransport(thriftServerDef, nettyServerConfig, allChannels, local);
}
项目:simple-netty-source    文件:NioClientSocketChannelFactory.java   
/**
 * Creates a new instance.
 *
 * @param bossExecutor
 *        the {@link Executor} which will execute the boss thread
 * @param workerExecutor
 *        the {@link Executor} which will execute the worker threads
 * @param bossCount
 *        the maximum number of boss threads
 * @param workerCount
 *        the maximum number of I/O worker threads
 */
public NioClientSocketChannelFactory(
        Executor bossExecutor, Executor workerExecutor,
        int bossCount, int workerCount) {
    this(bossExecutor, bossCount, new NioWorkerPool(workerExecutor, workerCount));
}
项目:simple-netty-source    文件:NioServerSocketChannelFactory.java   
/**
 * Create a new instance.
 *
 * @param bossExecutor
 *        the {@link Executor} which will execute the boss threads
 * @param bossCount
 *        the number of boss threads
 * @param workerExecutor
 *        the {@link Executor} which will execute the I/O worker threads
 * @param workerCount
 *        the maximum number of I/O worker threads
 */
public NioServerSocketChannelFactory(
        Executor bossExecutor, int bossCount, Executor workerExecutor,
        int workerCount) {
    this(bossExecutor, bossCount, new NioWorkerPool(workerExecutor, workerCount));
}