@Override public void connect(final InetSocketAddress socketAddress) { workerGroup = new NioEventLoopGroup(workerGroupThreads); Bootstrap bootstrap = new Bootstrap(); try { bootstrap .group(workerGroup) .channel(NioSocketChannel.class) .option(ChannelOption.SO_KEEPALIVE, true) .option(ChannelOption.TCP_NODELAY, true) .handler(clientChannelInitializer); } catch (final Exception ex) { throw new ClientException(ex); } channel = bootstrap.connect(socketAddress.getAddress().getHostAddress(), socketAddress.getPort()).syncUninterruptibly().channel(); }
@Override protected void initChannel(SocketChannel ch) throws Exception { try { ch.config().setOption(ChannelOption.TCP_NODELAY, true); ch.config().setOption(ChannelOption.IP_TOS, 0x18); } catch (ChannelException ex) { // IP_TOS not supported by platform, ignore } ch.config().setAllocator(PooledByteBufAllocator.DEFAULT); PacketRegistry r = new PacketRegistry(); ch.pipeline().addLast("idleStateHandler", new IdleStateHandler(0, 0, 30)); ch.pipeline().addLast(new HttpServerCodec()); ch.pipeline().addLast(new HttpObjectAggregator(65536)); ch.pipeline().addLast(new WebSocketHandler()); ch.pipeline().addLast(new PacketDecoder(r)); ch.pipeline().addLast(new PacketEncoder(r)); ch.pipeline().addLast(mExecutorGroup, "serverHandler", new ClientHandler(mServer)); }
@Override protected FastdfsPool newPool(InetSocketAddress addr) { if (LOG.isDebugEnabled()) { LOG.debug("channel pool created : {}", addr); } Bootstrap bootstrap = new Bootstrap().channel(NioSocketChannel.class).group(loopGroup); bootstrap.remoteAddress(addr); bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, (int) connectTimeout); bootstrap.option(ChannelOption.TCP_NODELAY, true); bootstrap.option(ChannelOption.SO_KEEPALIVE, true); return new FastdfsPool( bootstrap, readTimeout, idleTimeout, maxConnPerHost ); }
public void start() { bootstrap.group(group).channel(NioSocketChannel.class); bootstrap.option(ChannelOption.TCP_NODELAY, true); bootstrap.handler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel ch) throws Exception { // ch.pipeline().addLast(new IdleStateHandler(1, 1, 5)); ch.pipeline().addLast(new KyroMsgDecoder()); ch.pipeline().addLast(new KyroMsgEncoder()); ch.pipeline().addLast(new ClientHandler()); } }); new ScheduledThreadPoolExecutor(1).scheduleAtFixedRate(new Runnable() { @Override public void run() { scanResponseTable(3000); } }, 1000, 1000, TimeUnit.MILLISECONDS); }
public static void main(String[] args) throws Exception { EventLoopGroup bossGroup = new NioEventLoopGroup(1); EventLoopGroup workerGroup = new NioEventLoopGroup(); ServerBootstrap b = new ServerBootstrap(); b.group(bossGroup, workerGroup) .channel(NioServerSocketChannel.class) .option(ChannelOption.SO_REUSEADDR, true) .childHandler(new ChannelInitializer<NioSocketChannel>() { @Override protected void initChannel(NioSocketChannel ch) throws Exception { ch.pipeline().addLast(new SimpleServerHandler()); } }); b.bind(8090).sync().channel().closeFuture().sync(); }
protected AbstractNettyServer(String serverName) { this.serverName = Objects.requireNonNull(serverName, "server name"); bootstrap = new ServerBootstrap(); if (Epoll.isAvailable()) { bootstrap.option(ChannelOption.SO_BACKLOG, 1024).channel(EpollServerSocketChannel.class) .childOption(ChannelOption.SO_LINGER, 0).childOption(ChannelOption.SO_REUSEADDR, true) .childOption(ChannelOption.SO_KEEPALIVE, true); log.info(serverName + " epoll init"); } else { bootstrap.channel(NioServerSocketChannel.class); log.info(serverName + " nio init"); } bootstrap.group(bossGroup, workerGroup).option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) .childOption(ChannelOption.TCP_NODELAY, true).childHandler(new ChannelInitializer<SocketChannel>() { @Override protected void initChannel(SocketChannel ch) throws Exception { initPipeline(ch.pipeline()); } }); }
/** * Connect to remote servers. We'll initiate the connection to * any nodes with a lower ID so that there will be a single connection * between each pair of nodes which we'll use symmetrically */ protected void startClients(RPCChannelInitializer channelInitializer) { final Bootstrap bootstrap = new Bootstrap(); bootstrap.group(workerGroup) .channel(NioSocketChannel.class) .option(ChannelOption.SO_REUSEADDR, true) .option(ChannelOption.SO_KEEPALIVE, true) .option(ChannelOption.TCP_NODELAY, true) .option(ChannelOption.SO_SNDBUF, SEND_BUFFER_SIZE) .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, CONNECT_TIMEOUT) .handler(channelInitializer); clientBootstrap = bootstrap; ScheduledExecutorService ses = syncManager.getThreadPool().getScheduledExecutor(); reconnectTask = new SingletonTask(ses, new ConnectTask()); reconnectTask.reschedule(0, TimeUnit.SECONDS); }
public ChannelFuture connectAsync(String host, int port, String remoteId, boolean discoveryMode) { ethereumListener.trace("Connecting to: " + host + ":" + port); EthereumChannelInitializer ethereumChannelInitializer = ctx.getBean(EthereumChannelInitializer.class, remoteId); ethereumChannelInitializer.setPeerDiscoveryMode(discoveryMode); Bootstrap b = new Bootstrap(); b.group(workerGroup); b.channel(NioSocketChannel.class); b.option(ChannelOption.SO_KEEPALIVE, true); b.option(ChannelOption.MESSAGE_SIZE_ESTIMATOR, DefaultMessageSizeEstimator.DEFAULT); b.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, config.peerConnectionTimeout()); b.remoteAddress(host, port); b.handler(ethereumChannelInitializer); // Start the client. return b.connect(); }
private final Future<Void> bootstrap(final NettyChannel channel) { final Promise<Void> p = Promise.apply(); new Bootstrap().group(eventLoopGroup).channel(NioSocketChannel.class) .option(ChannelOption.SO_KEEPALIVE, true) .option(ChannelOption.AUTO_READ, false) .handler(new ChannelInitializer<io.netty.channel.Channel>() { @Override protected void initChannel(final io.netty.channel.Channel ch) throws Exception { ch.pipeline().addLast(new MessageDecoder(), new MessageEncoder(), new FlowControlHandler(), channel); } }) .connect(new InetSocketAddress(host, port)) .addListener(future -> p.become(Future.VOID)); return p; }
/** * 启动服务 * * @throws Exception 异常 */ public void start() throws Exception { try { ServerBootstrap bootstrap = new ServerBootstrap(); bootstrap.group(bossGroup, workerGroup) .channel(NioServerSocketChannel.class) .childHandler(channelInitializer) .option(ChannelOption.SO_BACKLOG, aceServerConfig.getBackSize()) .childOption(ChannelOption.SO_KEEPALIVE, true) .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT); ChannelFuture future = bootstrap.bind(aceServerConfig.getPort()).sync(); System.out.println("ace server starter on port : " + aceServerConfig.getPort()); future.channel().closeFuture().sync(); } finally { close(); } }
@Async("myTaskAsyncPool") public void run(int udpReceivePort) { EventLoopGroup group = new NioEventLoopGroup(); logger.info("Server start! Udp Receive msg Port:" + udpReceivePort); try { Bootstrap b = new Bootstrap(); b.group(group) .channel(NioDatagramChannel.class) .option(ChannelOption.SO_BROADCAST, true) .handler(new UdpServerHandler()); // 设置服务端接收消息的 Handler (保存消息到 mysql 和 redis 中) b.bind(udpReceivePort).sync().channel().closeFuture().await(); } catch (InterruptedException e) { e.printStackTrace(); } finally { group.shutdownGracefully(); } }
public Receiver ( final ReceiverHandlerFactory factory, final SocketAddress addr ) { this.factory = factory; this.bossGroup = new NioEventLoopGroup (); this.workerGroup = new NioEventLoopGroup (); this.bootstrap = new ServerBootstrap (); this.bootstrap.group ( this.bossGroup, this.workerGroup ); this.bootstrap.channel ( NioServerSocketChannel.class ); this.bootstrap.option ( ChannelOption.SO_BACKLOG, 5 ); this.bootstrap.option ( ChannelOption.SO_REUSEADDR, true ); this.bootstrap.childHandler ( new ChannelInitializer<SocketChannel> () { @Override protected void initChannel ( final SocketChannel ch ) throws Exception { handleInitChannel ( ch ); } } ); this.channel = this.bootstrap.bind ( addr ).channel (); logger.info ( "Receiver running ..." ); }
private ChannelFuture bindToPlainSocket() throws InterruptedException { String hostname = configuration.getHostName(); int port = Integer.parseInt(configuration.getPlain().getPort()); ServerBootstrap b = new ServerBootstrap(); b.group(bossGroup, workerGroup) .channel(NioServerSocketChannel.class) .childHandler(new SocketChannelInitializer(ioExecutors)) .option(ChannelOption.SO_BACKLOG, 128) .childOption(ChannelOption.SO_KEEPALIVE, true); // Bind and start to accept incoming connections. ChannelFuture future = b.bind(hostname, port).sync(); LOGGER.info("Listening AMQP on " + hostname + ":" + port); return future; }
private ChannelFuture bindToSslSocket() throws InterruptedException, CertificateException, UnrecoverableKeyException, NoSuchAlgorithmException, KeyStoreException, KeyManagementException, IOException { String hostname = configuration.getHostName(); int port = Integer.parseInt(configuration.getSsl().getPort()); ServerBootstrap b = new ServerBootstrap(); b.group(bossGroup, workerGroup) .channel(NioServerSocketChannel.class) .childHandler(new SslSocketChannelInitializer(ioExecutors, new SslHandlerFactory(configuration))) .option(ChannelOption.SO_BACKLOG, 128) .childOption(ChannelOption.SO_KEEPALIVE, true); // Bind and start to accept incoming connections. ChannelFuture future = b.bind(hostname, port).sync(); LOGGER.info("Listening AMQP/" + configuration.getSsl().getProtocol() + " on " + hostname + ":" + port); return future; }
public void run() { try { // Configure the server. EventLoopGroup group = new NioEventLoopGroup(); try { ServerBootstrap b = new ServerBootstrap(); b.option(ChannelOption.SO_BACKLOG, 1024); b.group(group) .channel(NioServerSocketChannel.class) .handler(new LoggingHandler(LogLevel.INFO)) .childHandler(new Http2ServerInitializer(mSslCtx)); sServerChannel = b.bind(PORT).sync().channel(); Log.i(TAG, "Netty HTTP/2 server started on " + getServerUrl()); sBlock.open(); sServerChannel.closeFuture().sync(); } finally { group.shutdownGracefully(); } Log.i(TAG, "Stopped Http2TestServerRunnable!"); } catch (Exception e) { Log.e(TAG, e.toString()); } }
/** * 初始化连接池 */ public void init() { bootstrap = new Bootstrap(); eventLoopGroup = new NioEventLoopGroup(); bootstrap.group(eventLoopGroup).channel(NioSocketChannel.class) .option(ChannelOption.SO_KEEPALIVE, true) .option(ChannelOption.TCP_NODELAY, true) .handler(new LoggingHandler()); //所有的公用一个eventloopgroup, 对于客户端来说应该问题不大! poolMap = new AbstractChannelPoolMap<InetSocketAddress, FixedChannelPool>() { @Override protected FixedChannelPool newPool(InetSocketAddress key) { return new FixedChannelPool(bootstrap.remoteAddress(key), new FixedChannelPoolHandler(), 2); } }; //预先建立好链接 serverListConfig.getAddressList().stream().forEach(address -> { poolMap.get(address); }); }
@PostConstruct public void init() throws Exception { log.info("Setting resource leak detector level to {}", leakDetectorLevel); ResourceLeakDetector.setLevel(ResourceLeakDetector.Level.valueOf(leakDetectorLevel.toUpperCase())); log.info("Starting MQTT transport..."); log.info("Lookup MQTT transport adaptor {}", adaptorName); // this.adaptor = (MqttTransportAdaptor) appContext.getBean(adaptorName); log.info("Starting MQTT transport server"); bossGroup = new NioEventLoopGroup(bossGroupThreadCount); workerGroup = new NioEventLoopGroup(workerGroupThreadCount); ServerBootstrap b = new ServerBootstrap(); b.group(bossGroup, workerGroup).option(ChannelOption.SO_BACKLOG, 1000).option(ChannelOption.TCP_NODELAY, true) .childOption(ChannelOption.SO_KEEPALIVE, true).channel(NioServerSocketChannel.class) .childHandler(new MqttTransportServerInitializer(msgProducer, deviceService, authService, assetService, assetAuthService, relationService, sslHandlerProvider)); serverChannel = b.bind(host, port).sync().channel(); log.info("Mqtt transport started: {}:{}!", host, port); }
public void run() throws Exception { EventLoopGroup bossGroup = new NioEventLoopGroup(); EventLoopGroup workerGroup = new NioEventLoopGroup(); try { ServerBootstrap b = new ServerBootstrap(); b.group(bossGroup, workerGroup) .channel(NioServerSocketChannel.class) .localAddress(new InetSocketAddress(port)) .childHandler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel ch) throws Exception { ch.pipeline().addLast(new EchoServerHandler()); } }) .option(ChannelOption.SO_BACKLOG, 128) .childOption(ChannelOption.SO_KEEPALIVE, true); ChannelFuture f = b.bind(port).sync(); f.channel().closeFuture().sync(); } finally { workerGroup.shutdownGracefully(); bossGroup.shutdownGracefully(); } }
public void start() throws InterruptedException { EventLoopGroup acceptors = new NioEventLoopGroup(socksProperties.getAcceptors()); EventLoopGroup workers = new NioEventLoopGroup(); EventLoopGroup forwarders = new NioEventLoopGroup(); try { ServerBootstrap bootstrap = new ServerBootstrap(); bootstrap.group(acceptors, workers) .channel(NioServerSocketChannel.class) .option(ChannelOption.SO_BACKLOG, socksProperties.getBacklog()) .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, socksProperties.getConnectTimeoutMillis()) .childHandler(new Socks5WorkerChannelInitializer(socksProperties, forwarders)); Address address = socksProperties.getListen(); ChannelFuture future = bootstrap.bind(address.getHost(), address.getPort()).sync(); future.channel().closeFuture().sync(); } finally { forwarders.shutdownGracefully(); workers.shutdownGracefully(); acceptors.shutdownGracefully(); } }
public ChannelFuture connectAsync(String host, int port, String remoteId, boolean discoveryMode) { ethereumListener.trace("Connecting to: " + host + ":" + port); EthereumChannelInitializer ethereumChannelInitializer = ethereumChannelInitializerFactory.newInstance(remoteId); ethereumChannelInitializer.setPeerDiscoveryMode(discoveryMode); Bootstrap b = new Bootstrap(); b.group(workerGroup); b.channel(NioSocketChannel.class); b.option(ChannelOption.SO_KEEPALIVE, true); b.option(ChannelOption.MESSAGE_SIZE_ESTIMATOR, DefaultMessageSizeEstimator.DEFAULT); b.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, config.peerConnectionTimeout()); b.remoteAddress(host, port); b.handler(ethereumChannelInitializer); // Start the client. return b.connect(); }
private void startServer() throws Exception { Runtime.getRuntime().addShutdownHook(new Thread(this::shutdown)); try { ServerBootstrap bootstrap = new ServerBootstrap(); bootstrap.group(masterGroup, slaveGroup) .channel(NioServerSocketChannel.class) .childHandler(new HttpServerInitializer(razor)) .option(ChannelOption.SO_BACKLOG, 128) .childOption(ChannelOption.SO_KEEPALIVE, true); this.channel = bootstrap.bind(env.get(ENV_KEY_SERVER_HOST, DEFAULT_SERVER_HOST), env.getInt(ENV_KEY_SERVER_PORT, DEFAULT_SERVER_PORT)).sync().channel(); log.info("{} started and listen on {}", HttpServerHandler.class.getName(), channel.localAddress()); } catch (final InterruptedException e){ log.error("Netty server startup failed, error: {}", e.getMessage()); } }
public void start(){ ServerBootstrap bootstrap = new ServerBootstrap(); bootstrap.group(workerGroup,bossGroup) .channel(NioServerSocketChannel.class) .childHandler(new TCPHandlerInitializer(this)) .option(ChannelOption.SO_BACKLOG, 512) .childOption(ChannelOption.SO_KEEPALIVE, true); Channel serverChannel = bootstrap.bind(new InetSocketAddress(port)).channel(); ChannelFuture future = serverChannel.closeFuture(); try { System.out.println("MQTT服务器已启动..."); future.sync(); } catch (InterruptedException e) { e.printStackTrace(); } }
@Override public void startUp(FloodlightModuleContext context) throws FloodlightModuleException { shutdown = false; workerExecutor = new NioEventLoopGroup(); timer = new HashedWheelTimer(); pipelineFactory = new RemoteSyncChannelInitializer(timer, this); final Bootstrap bootstrap = new Bootstrap() .channel(NioSocketChannel.class) .group(workerExecutor) .option(ChannelOption.SO_REUSEADDR, true) .option(ChannelOption.SO_KEEPALIVE, true) .option(ChannelOption.TCP_NODELAY, true) .option(ChannelOption.SO_SNDBUF, RPCService.SEND_BUFFER_SIZE) .option(ChannelOption.SO_RCVBUF, RPCService.SEND_BUFFER_SIZE) .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, RPCService.CONNECT_TIMEOUT) .handler(pipelineFactory); clientBootstrap = bootstrap; }
@Override public void subscribe(final Subscriber<? super O> subscriber) { try { final List<NetworkInterface> interfaces = Collections .list(NetworkInterface.getNetworkInterfaces()); UdpServer .create(opts -> opts.option(ChannelOption.SO_REUSEADDR, true) .connectAddress(() -> new InetSocketAddress(this.port)) .protocolFamily(InternetProtocolFamily.IPv4)) .newHandler((in, out) -> { Flux.fromIterable(interfaces) .flatMap(iface -> in.join(this.address, iface)) .thenMany(in.receive().asByteArray()) .map(this.parser) .subscribe(subscriber); return Flux.never(); }) .subscribe(); } catch (final SocketException exception) { Flux.<O>error(exception).subscribe(subscriber); } }
@Override public void init() { super.init(); b.group(bossGroup, workGroup) .channel(NioServerSocketChannel.class) .option(ChannelOption.SO_KEEPALIVE, false) .option(ChannelOption.TCP_NODELAY, true) .option(ChannelOption.SO_BACKLOG, 1024) .localAddress(new InetSocketAddress(port)) .childHandler(new ChannelInitializer<SocketChannel>() { @Override protected void initChannel(SocketChannel ch) throws Exception { ch.pipeline().addLast(defLoopGroup, new HttpRequestDecoder(), //请求解码器 new HttpObjectAggregator(65536),//将多个消息转换成单一的消息对象 new HttpResponseEncoder(), // 响应编码器 new HttpServerHandler(snowFlake)//自定义处理器 ); } }); }
@Override public void init() { super.init(); b.group(bossGroup, workGroup) .channel(NioServerSocketChannel.class) .option(ChannelOption.SO_KEEPALIVE, true) .option(ChannelOption.TCP_NODELAY, true) .option(ChannelOption.SO_BACKLOG, 1024) .localAddress(new InetSocketAddress(port)) .childHandler(new ChannelInitializer<SocketChannel>() { @Override protected void initChannel(SocketChannel ch) throws Exception { ch.pipeline().addLast(defLoopGroup, new SdkServerDecoder(12), // 自定义解码器 new SdkServerEncoder(), // 自定义编码器 new SdkServerHandler(snowFlake) // 自定义处理器 ); } }); }
/** *@description 监听指定端口 *@time 创建时间:2017年7月21日下午3:50:26 *@param port *@throws InterruptedException *@author dzn */ public void bind(int port) throws InterruptedException{ EventLoopGroup bossGroup = new NioEventLoopGroup(); EventLoopGroup workGroup = new NioEventLoopGroup(); try{ ServerBootstrap server = new ServerBootstrap(); server.group(bossGroup, workGroup) .channel(NioServerSocketChannel.class) .option(ChannelOption.SO_BACKLOG, 1024) .childHandler(new ChildChannelHandler()); ChannelFuture cf = server.bind(port).sync(); System.out.println("服务器已启动, 监控端口号为 : " + port); cf.channel().closeFuture().sync(); }finally{ bossGroup.shutdownGracefully(); workGroup.shutdownGracefully(); } }
@Override public void start(final int port) { bossGroup = new NioEventLoopGroup(bossGroupThreads); workerGroup = new NioEventLoopGroup(workerGroupThreads); ServerBootstrap serverBootstrap = new ServerBootstrap(); try { serverBootstrap .group(bossGroup, workerGroup) .channel(NioServerSocketChannel.class) .option(ChannelOption.SO_BACKLOG, backlogSize) .childOption(ChannelOption.SO_KEEPALIVE, true) .childOption(ChannelOption.TCP_NODELAY, true) .childHandler(serializeType.getServerChannelInitializer().newInstance()); channel = serverBootstrap.bind(port).sync().channel(); } catch (final Exception ex) { throw new ServerException(Server.SYSTEM_MESSAGE_ID, ex); } }
@Override protected void initChannel(SocketChannel ch) throws Exception { try { ch.config().setOption(ChannelOption.IP_TOS, 0x18); // ch.config().setOption(ChannelOption.WRITE_BUFFER_HIGH_WATER_MARK, 32 * 1024); // ch.config().setOption(ChannelOption.WRITE_BUFFER_LOW_WATER_MARK, 8 * 1024); } catch (ChannelException ex) { // IP_TOS not supported by platform, ignore } ch.config().setAllocator(PooledByteBufAllocator.DEFAULT); PacketRegistry r = new PacketRegistry(); ch.pipeline().addLast(new HttpServerCodec()); ch.pipeline().addLast(new HttpObjectAggregator(65536)); ch.pipeline().addLast(new WebSocketHandler()); ch.pipeline().addLast(new PacketDecoder(r)); ch.pipeline().addLast(new PacketEncoder(r)); ch.pipeline().addLast(new ClientHandler(server)); }
@Override public void prepare(final Benchmark benchmark) { this.concurrencyLevel = benchmark.concurrencyLevel; this.targetBacklog = benchmark.targetBacklog; ChannelInitializer<SocketChannel> channelInitializer = new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel channel) throws Exception { ChannelPipeline pipeline = channel.pipeline(); if (benchmark.tls) { SslClient sslClient = SslClient.localhost(); SSLEngine engine = sslClient.sslContext.createSSLEngine(); engine.setUseClientMode(true); pipeline.addLast("ssl", new SslHandler(engine)); } pipeline.addLast("codec", new HttpClientCodec()); pipeline.addLast("inflater", new HttpContentDecompressor()); pipeline.addLast("handler", new HttpChannel(channel)); } }; bootstrap = new Bootstrap(); bootstrap.group(new NioEventLoopGroup(concurrencyLevel)) .option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) .channel(NioSocketChannel.class) .handler(channelInitializer); }
@Override public synchronized void start() { bossGroup = new NioEventLoopGroup(); // (1) workerGroup = new NioEventLoopGroup(); try { b = new ServerBootstrap(); // (2) b.group(bossGroup, workerGroup) .channel(NioServerSocketChannel.class) // (3) .childHandler(new ChannelInitializer<SocketChannel>() { // (4) @Override public void initChannel(SocketChannel ch) throws Exception { ch.pipeline().addLast(new ByteArrayDecoder()); ch.pipeline().addLast(new ByteArrayEncoder()); ch.pipeline().addLast(new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE, 0, 4, 0, 4)); ch.pipeline().addLast(new IdleStateHandler(heartTime, heartTime, heartTime, TimeUnit.SECONDS)); ch.pipeline().addLast(new DeliveryHandler(deliveryService)); } }) .option(ChannelOption.SO_BACKLOG, 128) // (5) .childOption(ChannelOption.SO_KEEPALIVE, true); // (6) // Bind and start to accept incoming connections. b.bind(settingService.getDeliveryPort()); logger.info("socket: "+settingService.getDeliveryPort()+" starting...."); // Wait until the server socket is closed. // In this example, this does not happen, but you can do that to gracefully } catch (Exception e) { e.printStackTrace(); } }
protected void startNotificationRegisterEndpoint(final String host, final int port) { Runnable notificationRegisterEndpointRunnable = new Runnable() { @Override public void run() { bossGroup = new NioEventLoopGroup(1); workerGroup = new NioEventLoopGroup(); try { ServerBootstrap b = new ServerBootstrap(); b.option(ChannelOption.SO_BACKLOG, 1024); b.group(bossGroup, workerGroup) .channel(NioServerSocketChannel.class) .handler(new LoggingHandler(LogLevel.INFO)) .childHandler(new NotificationRegisterServerInitializer()); Channel ch = b.bind(host, port).sync().channel(); logger.info(String.format("Notification register endpoint started at %s:%s", host, port)); ch.closeFuture().sync(); } catch (InterruptedException ex) { logger.info("Notification register endpoint was interrupted."); } finally { bossGroup.shutdownGracefully(); workerGroup.shutdownGracefully(); } } }; notificationEndpointThread = new Thread(notificationRegisterEndpointRunnable); notificationEndpointThread.start(); }
public void start() throws Exception { // Configure SSL. final SslContext sslCtx; if (SSL) { SelfSignedCertificate ssc = new SelfSignedCertificate(); sslCtx = SslContextBuilder.forServer(ssc.certificate(), ssc.privateKey()).build(); } else { sslCtx = null; } // Configure the server. EventLoopGroup bossGroup = new NioEventLoopGroup(1); EventLoopGroup workerGroup = new NioEventLoopGroup(); try { ServerBootstrap b = new ServerBootstrap(); b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class).option(ChannelOption.SO_BACKLOG, 100) .handler(new LoggingHandler(LogLevel.INFO)).childHandler(new FileServerHandlerInitializer()); // Start the server. ChannelFuture f = b.bind(getHostAddress(), PORT).sync(); // System.out.println("server is started "+f.isSuccess()); setStarted(true); // Wait until the server socket is closed. f.channel().closeFuture().sync(); } finally { // Shut down all event loops to terminate all threads. bossGroup.shutdownGracefully(); workerGroup.shutdownGracefully(); } }
@Override public void start() { this.bossGroup = NettyUtils.createEventLoopGroup(1); this.workerGroup = new NioEventLoopGroup(4); Class<? extends ServerChannel> serverChannelClazz = NettyUtils.getServerChannelClass(); ChannelHandler channelHandler = new CardeaServerChannelInitializer(this.backendManager); this.logger.info("Starting backend handling tasks."); this.executorService .scheduleAtFixedRate(new CheckDeadBackendsTask(this.backendManager), 10, 10, TimeUnit.SECONDS); this.executorService .scheduleAtFixedRate(new BackendRecoverTask(this.backendManager), 10, 10, TimeUnit.SECONDS); this.logger.info("Starting server and proxying all connections on *:", this.config.getServerPort()); ServerBootstrap serverBootstrap = new ServerBootstrap(); try { serverBootstrap .channel(serverChannelClazz) .group(this.bossGroup, this.workerGroup) .childHandler(channelHandler) .childOption(ChannelOption.AUTO_READ, false) .bind(this.config.getServerPort()) .sync().channel().closeFuture().sync(); } catch (InterruptedException e) { e.printStackTrace(); } this.logger.info("Started reverse proxy on *:", this.config.getServerPort()); }
@Override protected void doStart() { try { bootstrap.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class); bootstrap.option(ChannelOption.SO_BACKLOG, 1024); bootstrap.option(ChannelOption.SO_REUSEADDR, true); bootstrap.childOption(ChannelOption.TCP_NODELAY, true); bootstrap.childOption(ChannelOption.SO_KEEPALIVE, true); bootstrap.childOption(ChannelOption.CONNECT_TIMEOUT_MILLIS, 3000); bootstrap.localAddress(serverconfig.getListenPort()); bootstrap.childHandler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel ch) throws IOException { ch.pipeline().addLast("idleState", new IdleStateHandler(0, 0, 60, TimeUnit.SECONDS)); ch.pipeline().addLast("heartbeat", new HeartbeatHandler()); ch.pipeline().addLast(new KyroMsgDecoder()); ch.pipeline().addLast(new KyroMsgEncoder()); ch.pipeline().addLast("invoker", new NettyServerHandler()); } }); ChannelFuture channelFuture = this.bootstrap.bind().sync(); // channelFuture.channel().closeFuture().sync(); logger.info("server started on port:" + serverconfig.getListenPort()); respScheduler.scheduleAtFixedRate(new Runnable() { @Override public void run() { scanResponseTable(5000); } }, 60 * 1000, 60 * 1000, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { logger.error("", e); System.exit(-1); } }
<T extends ServerSocketChannel> RpcServer(EventLoopGroup eventLoopGroup, EventExecutorGroup eventExecutor, Class<T> channel, SocketAddress address) { this.address = address; this.allChannels = new DefaultChannelGroup(eventLoopGroup.next()); this.handler = new ServerHandler(allChannels); this.bootstrap = new ServerBootstrap(); bootstrap.channel(channel); bootstrap.childHandler(new ServerInitializer(eventExecutor, handler)); bootstrap.group(eventLoopGroup); bootstrap.option(ChannelOption.TCP_NODELAY, true); }
private Bootstrap createBootstrap(RedisClientConfig config, Type type) { Bootstrap bootstrap = new Bootstrap() .channel(config.getSocketChannelClass()) .group(config.getGroup()) .remoteAddress(addr); bootstrap.handler(new RedisChannelInitializer(bootstrap, config, this, channels, type)); bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, config.getConnectTimeout()); bootstrap.option(ChannelOption.SO_KEEPALIVE, config.isKeepAlive()); bootstrap.option(ChannelOption.TCP_NODELAY, config.isTcpNoDelay()); return bootstrap; }
public AbstractServer(ServerConfig config, int port) throws InterruptedException { this.port = port; this.config = config; serverStateRef = new AtomicReference<>(AbstractServer.ServerState.Created); boss = new NioEventLoopGroup(); worker = new NioEventLoopGroup(); bootstrap = new ServerBootstrap(); bootstrap.group(boss, worker).channel(NioServerSocketChannel.class) .option(ChannelOption.SO_BACKLOG, config.getSoBacklog()) .option(ChannelOption.SO_KEEPALIVE, config.getSoKeepAlive()) .option(ChannelOption.TCP_NODELAY, config.getTcpNoDelay()) .handler(new LoggingHandler(LogLevel.INFO)).childHandler(newChannelInitializer()); }