Java 类org.apache.commons.lang3.concurrent.BasicThreadFactory 实例源码

项目:csap-core    文件:TransferManager.java   
/**
 * 
 * Very transient
 * 
 * @param timeOutSeconds
 * @param numberOfThreads
 * @param outputWriter
 */
public TransferManager( Application csapApp, int timeOutSeconds, BufferedWriter outputWriter ) {

    this.csapApp = csapApp;

    logger.debug( "Number of workers: {}", csapApp.lifeCycleSettings().getNumberWorkerThreads() );
    this.timeOutSeconds = timeOutSeconds;

    osCommandRunner = new OsCommandRunner( timeOutSeconds, 1, "TransferMgr" );

    this.globalWriterForResults = outputWriter;
    updateProgress( "\nExecuting distribution using : " + csapApp.lifeCycleSettings().getNumberWorkerThreads() + " threads.\n\n" );

    BasicThreadFactory schedFactory = new BasicThreadFactory.Builder()
        .namingPattern( "CsapFileTransfer-%d" )
        .daemon( true )
        .priority( Thread.NORM_PRIORITY )
        .build();

    fileTransferService = Executors.newFixedThreadPool( csapApp.lifeCycleSettings().getNumberWorkerThreads(), schedFactory );

    fileTransferComplete = new ExecutorCompletionService<String>( fileTransferService );
}
项目:csap-core    文件:CsapEventClient.java   
public CsapEventClient( ) {

        BasicThreadFactory eventThreadFactory = new BasicThreadFactory.Builder()
            .namingPattern( "CsapEventPost-%d" )
            .daemon( true )
            .priority( Thread.NORM_PRIORITY + 1 )
            .build();

        eventPostQueue = new ArrayBlockingQueue<>( MAX_EVENT_BACKLOG );
        // Use a single thread to sequence and post
        // eventPostPool = Executors.newFixedThreadPool(1, schedFactory, queue);
        // really only needs to be 1 - adding the others for lt scenario
        eventPostPool = new ThreadPoolExecutor( 1, 1,
            30, TimeUnit.SECONDS,
            eventPostQueue, eventThreadFactory );

        eventPostCompletionService = new ExecutorCompletionService<String>(
            eventPostPool );
    }
项目:csap-core    文件:HostCollector.java   
protected void scheduleCollection( Runnable collector) {
    // Thread commandThread = new Thread( this );
    // commandThread.start();
    String scheduleName = collector.getClass().getSimpleName() + "_" + collectionIntervalSeconds ;
    BasicThreadFactory schedFactory = new BasicThreadFactory.Builder()

            .namingPattern( scheduleName +"-%d" )
                .daemon( true )
                .priority( Thread.NORM_PRIORITY )
                .build();
    // Single collection thread
    scheduledExecutorService = Executors
            .newScheduledThreadPool( 1, schedFactory );
    int initialSleep = 10 ;
    if (this.collectionIntervalSeconds >= 60) {
        initialSleep += 30 + rg.nextInt(30) ;
    }

    scheduledExecutorService
            .scheduleAtFixedRate( collector, initialSleep, collectionIntervalSeconds, TimeUnit.SECONDS );



    logger.info("Adding Job: {}", scheduleName);
}
项目:GoPush    文件:NodeServerInfoWatchdog.java   
@PostConstruct
    public void init() {

        scheduledExecutorService = new ScheduledThreadPoolExecutor(1,
                new BasicThreadFactory.Builder().namingPattern("SendNodeServerInfo-schedule-pool-%d").daemon(true).build());
        scheduledExecutorService.scheduleAtFixedRate(() ->
                {
                    //将负载加载到ZK中
                    if (!CollectionUtils.isEmpty(dataCenterChannelStore.getAllChannels())) {
                        dataCenterChannelStore.getAllChannels().stream().forEach(e -> {
                            log.info("channel id:{}, {}", e.id(), e);
                        });
                    }
                    applicationEventPublisher.publishEvent(
                            NodeServerInfoEvent.builder()
                                    .name(goPushNodeServerConfig.getName())
                                    .nodeServerInfo(watch())
                                    .build());
//                写入zk 其实不需要发送 NodeInfoReq
                    nodeSender.send(NodeInfoReq.builder().build());
                }
                , delay, delay, TimeUnit.MILLISECONDS);

    }
项目:LiteGraph    文件:GremlinExecutor.java   
public GremlinExecutor create() {
    final BasicThreadFactory threadFactory = new BasicThreadFactory.Builder().namingPattern("gremlin-executor-default-%d").build();

    final AtomicBoolean poolCreatedByBuilder = new AtomicBoolean();
    final AtomicBoolean suppliedExecutor = new AtomicBoolean(true);
    final AtomicBoolean suppliedScheduledExecutor = new AtomicBoolean(true);

    final ExecutorService es = Optional.ofNullable(executorService).orElseGet(() -> {
        poolCreatedByBuilder.set(true);
        suppliedExecutor.set(false);
        return Executors.newScheduledThreadPool(4, threadFactory);
    });
    executorService = es;

    final ScheduledExecutorService ses = Optional.ofNullable(scheduledExecutorService).orElseGet(() -> {
        // if the pool is created by the builder and we need another just re-use it, otherwise create
        // a new one of those guys
        suppliedScheduledExecutor.set(false);
        return (poolCreatedByBuilder.get()) ?
                (ScheduledExecutorService) es : Executors.newScheduledThreadPool(4, threadFactory);
    });
    scheduledExecutorService = ses;

    return new GremlinExecutor(this, suppliedExecutor.get(), suppliedScheduledExecutor.get());
}
项目:MyTv    文件:Main.java   
/**
 * 初始化应用数据
 */
private static void initDbData(final MyTvData data) {
    final TvService tvService = new TvServiceImpl();
    makeCache(tvService);

    // 启动抓取任务
    ExecutorService executorService = Executors
            .newSingleThreadExecutor(new BasicThreadFactory.Builder()
                    .namingPattern("Mytv_Crawl_Task_%d").build());
    executorService.execute(new Runnable() {

        @Override
        public void run() {
            runCrawlTask(data, tvService);
        }
    });
    executorService.shutdown();
    // 启动每天定时任务
    logger.info("create everyday crawl task.");
    createEverydayCron(data, tvService);
}
项目:metrics-jvm-nonaccumulating    文件:NonAccumulatingGarbageCollectorMetricSet.java   
/**
 * Constructor sets up the scheduled executor service that runs a background task to
 * calculate non-accumulating gauge readings at periodic intervals.
 *
 * @param garbageCollectorMetricSet a metric set that collects counts and times of garbage collections
 * @param interval the time interval over which to calculate non-accumulating gauge readings
 *                 for all the gauges in {@code garbageCollectorMetricSet}
 * @param scheduledExecutorService scheduled executor service that runs the task to calculate
 *                                 non-accumulating gauge readings at a frequency determined by
 *                                 {@code interval}.
 */
public NonAccumulatingGarbageCollectorMetricSet(
        GarbageCollectorMetricSet garbageCollectorMetricSet, long interval,
        ScheduledExecutorService scheduledExecutorService) {
    this.garbageCollectorMetricSet = garbageCollectorMetricSet;
    this.interval = interval;
    previousValues = new HashMap<String, Long>();
    nonAccumulatingValues = new ConcurrentHashMap<String, Long>();
    if (scheduledExecutorService == null) {
        BasicThreadFactory basicThreadFactory = new BasicThreadFactory.Builder()
                .namingPattern("metrics-gc-stats-update-%d")
                .daemon(false)
                .priority(Thread.NORM_PRIORITY)
                .build();
        this.scheduledExecutorService = Executors.newSingleThreadScheduledExecutor(basicThreadFactory);
    } else {
        this.scheduledExecutorService = scheduledExecutorService;
    }
    scheduleBackgroundCollectionOfNonAccumulatingValues();
}
项目:msb-java    文件:ConsumerExecutorFactoryImpl.java   
@Override
public ExecutorService createConsumerThreadPool(int numberOfThreads, int queueCapacity) {
    BasicThreadFactory threadFactory = new BasicThreadFactory.Builder()
            .namingPattern("msb-consumer-thread-%d")
            .build();

    BlockingQueue<Runnable> queue;
    if (queueCapacity == QUEUE_SIZE_UNLIMITED) {
        queue = new LinkedBlockingQueue<>();
    } else {
        queue = new ArrayBlockingQueue<>(queueCapacity);
    }

    return new ThreadPoolExecutor(numberOfThreads, numberOfThreads,
            0L, TimeUnit.MILLISECONDS,
            queue,
            threadFactory);
}
项目:tinkerpop    文件:GremlinExecutor.java   
public GremlinExecutor create() {
    final BasicThreadFactory threadFactory = new BasicThreadFactory.Builder().namingPattern("gremlin-executor-default-%d").build();

    final AtomicBoolean poolCreatedByBuilder = new AtomicBoolean();
    final AtomicBoolean suppliedExecutor = new AtomicBoolean(true);
    final AtomicBoolean suppliedScheduledExecutor = new AtomicBoolean(true);

    final ExecutorService es = Optional.ofNullable(executorService).orElseGet(() -> {
        poolCreatedByBuilder.set(true);
        suppliedExecutor.set(false);
        return Executors.newScheduledThreadPool(4, threadFactory);
    });
    executorService = es;

    final ScheduledExecutorService ses = Optional.ofNullable(scheduledExecutorService).orElseGet(() -> {
        // if the pool is created by the builder and we need another just re-use it, otherwise create
        // a new one of those guys
        suppliedScheduledExecutor.set(false);
        return (poolCreatedByBuilder.get()) ?
                (ScheduledExecutorService) es : Executors.newScheduledThreadPool(4, threadFactory);
    });
    scheduledExecutorService = ses;

    return new GremlinExecutor(this, suppliedExecutor.get(), suppliedScheduledExecutor.get());
}
项目:mandrel    文件:FrontierContainer.java   
public FrontierContainer(Job job, Accumulators accumulators, MandrelClient client) {
    super(accumulators, job, client);
    context.setDefinition(job);

    // Init stores
    MetadataStore metadatastore = job.getDefinition().getStores().getMetadataStore().build(context);
    metadatastore.init();
    MetadataStores.add(job.getId(), metadatastore);

    // Init frontier
    frontier = job.getDefinition().getFrontier().build(context);

    // Revisitor
    BasicThreadFactory threadFactory = new BasicThreadFactory.Builder().namingPattern("frontier-" + job.getId() + "-%d").daemon(true)
            .priority(Thread.MAX_PRIORITY).build();
    executor = Executors.newFixedThreadPool(1, threadFactory);
    revisitor = new Revisitor(frontier, metadatastore);
    executor.submit(revisitor);

    current.set(ContainerStatus.INITIATED);
}
项目:bifroest    文件:Fetcher.java   
public Fetcher( E environment, Class<I> inputType, Class<U> unitType ) {
    this.environment = environment;
    handlerCreator = new SourceHandlerCreator<>( inputType, unitType );
    this.inputType = inputType;
    this.unitType = unitType;

    this.optimizer = ThreadCountOptimizer.withDefaultStrategies( environment );
    this.sourceWatchdogInterval = ( new DurationParser() ).parse(
            environment.getConfiguration().getJSONObject( "fetcher" ).getString( "source-watchdog-interval" ) );

    ThreadFactory threads = new BasicThreadFactory.Builder().namingPattern( "FetchThread[initial]" ).build();
    fetchPool = new ThreadPoolExecutor(
            1, 1, // thread count is set to the real initial value on the first run()
            0L, TimeUnit.MILLISECONDS,
            new LinkedBlockingQueue<Runnable>(),
            threads
    );
}
项目:lens    文件:QueryExecutionServiceImpl.java   
private void startQueryExpirer() {
  ThreadFactory factory = new BasicThreadFactory.Builder()
    .namingPattern("QueryExpirer-%d")
    .daemon(true)
    .priority(Thread.NORM_PRIORITY)
    .build();
  queryExpirer = Executors.newSingleThreadScheduledExecutor(factory);
  long expiryRunInterval = conf.getLong(QUERY_EXPIRY_INTERVAL_MILLIS, DEFAULT_QUERY_EXPIRY_INTERVAL_MILLIS);
  queryExpirer.scheduleWithFixedDelay(new Runnable() {
    @Override
    public void run() {
      try {
        expireQueries();
      } catch (Exception e) {
        incrCounter(QUERY_EXPIRY_FAILURE_COUNTER);
        log.error("Unable to expire queries", e);
      }
    }
  }, expiryRunInterval, expiryRunInterval, TimeUnit.MILLISECONDS);
  log.info("Enabled periodic exipry of queries at {} millis interval", expiryRunInterval);
}
项目:fastmq    文件:ZkOffsetStorageImpl.java   
public ZkOffsetStorageImpl(LogInfoStorage logInfoStorage,
    AsyncCuratorFramework asyncCuratorFramework) {
    this.logInfoStorage = logInfoStorage;
    this.asyncCuratorFramework = asyncCuratorFramework;
    offsetThreadPool = Executors.newSingleThreadExecutor(
        new BasicThreadFactory.Builder().uncaughtExceptionHandler((t, e) -> logger
            .error("Uncaught exception of thread :" + t.getClass().getName(), e))
            .build());
}
项目:csap-core    文件:HostStatusManager.java   
public HostStatusManager(
        Application csapApplication,
        int numberOfThreads,
        ArrayList<String> hostsToQuery ) {

    this.csapApp = csapApplication;

    csapApp.loadCacheFromDisk( getAlertHistory(), this.getClass().getSimpleName() );

    alertThrottleTimer = CsapSimpleCache.builder(
        csapApplication.getCsapCoreService().getAlerts().getThrottle().getFrequency(),
        CsapSimpleCache.parseTimeUnit(
            csapApplication.getCsapCoreService().getAlerts().getThrottle().getTimeUnit(),
            TimeUnit.HOURS ),
        HostStatusManager.class,
        "Global Alert Throttle" );

    logger.warn( "Constructed with thread count: {}, connectionTimeout: {} Host Count: {}, \n Hosts: {}, \n Alert: {}",
        numberOfThreads, this.connectionTimeoutSeconds, hostsToQuery.size(), hostsToQuery,
        csapApplication.getCsapCoreService().getAlerts() );

    BasicThreadFactory statusFactory = new BasicThreadFactory.Builder()
        .namingPattern( "CsapHostStatus-%d" )
        .daemon( true )
        .priority( Thread.NORM_PRIORITY )
        .build();

    hostStatusWorkers = Executors.newFixedThreadPool( numberOfThreads, statusFactory );

    hostStatusService = new ExecutorCompletionService<AgentStatus>( hostStatusWorkers );


    hostList = new CopyOnWriteArrayList<String>( hostsToQuery );

    initialize_refresh_worker() ;
    restartHostRefreshTimer( 3 );

}
项目:csap-core    文件:HostStatusManager.java   
private void initialize_refresh_worker() {

    BasicThreadFactory schedFactory = new BasicThreadFactory.Builder()
            .namingPattern( "CsapHostJobsScheduler-%d" )
            .daemon( true )
            .priority( Thread.NORM_PRIORITY )
            .build();

    hostStatusScheduler = Executors.newScheduledThreadPool( 1, schedFactory );
}
项目:GoPush    文件:DataCenterInfoWatchdog.java   
@PostConstruct
public void init() {
    scheduledExecutorService = new ScheduledThreadPoolExecutor(1,
            new BasicThreadFactory.Builder().namingPattern("SendDataCenterInfo-schedule-pool-%d").daemon(true).build());
    scheduledExecutorService.scheduleAtFixedRate(() -> applicationEventPublisher.publishEvent(DataCenterInfoEvent.builder()
            .name(goPushDataCenterConfig.getName())
            .dataCenterInfo(watch())
            .build()), delay, delay, TimeUnit.MILLISECONDS);
}
项目:fpm    文件:GenerateFullPbf.java   
public GenerateFullPbf(OsmMerger osmMerger, String inputDirectoryPath, String outputDirectoryPath, String outputFileName, int nbThreads) {
    this.osmMerger = osmMerger;
    this.inputDirectoryPath = inputDirectoryPath;
    this.outputDirectoryPath = outputDirectoryPath;
    this.outputFileName = outputFileName;
    BasicThreadFactory threadFactory = new Builder().namingPattern("mappy-GenerateFullPbf-%d").daemon(false).build();
    executorService = new ThreadPoolExecutor(nbThreads, nbThreads, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<>(), threadFactory);
}
项目:smarti    文件:ConversationIndexer.java   
@Autowired
public ConversationIndexer(SolrCoreContainer solrServer, StoreService storeService){
    this.solrServer = solrServer;
    this.storeService = storeService;
    this.indexerPool = Executors.newSingleThreadExecutor(
            new BasicThreadFactory.Builder().namingPattern("conversation-indexing-thread-%d").daemon(true).build());
}
项目:smarti    文件:ProcessingConfiguration.java   
public ExecutorService createExecuterService(){
    return Executors.newFixedThreadPool(numThreads <= 0 ? DEFAULT_NUM_THREADS : numThreads, 
            new BasicThreadFactory.Builder()
            .daemon(true)
            .namingPattern(THREAD_NAME)
            .build());
}
项目:stallion-core    文件:HealthTracker.java   
public static void start() {
    BasicThreadFactory factory = new BasicThreadFactory.Builder()
            .namingPattern("stallion-health-tracker-thread-%d")
            .build();
    instance().timedChecker = new ScheduledThreadPoolExecutor(2, factory);
    instance().timedChecker.scheduleAtFixedRate(instance().metrics, 0, 1, TimeUnit.MINUTES);
    instance().timedChecker.scheduleAtFixedRate(instance().dailyMetrics, 0, 24*60, TimeUnit.MINUTES);
}
项目:stallion-core    文件:AsyncCoordinator.java   
protected AsyncCoordinator() {
    threads = new ArrayList<>();
    int poolSize = 4;
    BasicThreadFactory factory = new BasicThreadFactory.Builder()
            .namingPattern("stallion-async-task-runnable-%d")
            .build();
    // Create an executor service for single-threaded execution
    pool = Executors.newFixedThreadPool(poolSize, factory);

}
项目:stallion-core    文件:JobCoordinator.java   
private JobCoordinator() {
    queue = new PriorityBlockingQueue<>();
    BasicThreadFactory factory = new BasicThreadFactory.Builder()
            .namingPattern("stallion-job-execution-thread-%d")
            .build();
    // Create an executor service for single-threaded execution
    pool = Executors.newFixedThreadPool(25, factory);
    registeredJobs = new HashSet<>();
}
项目:AwesomeJavaLibraryExamples    文件:ExampleThreading.java   
public static void main(String[] args) throws ExecutionException, InterruptedException
{
   List<Future<?>> randomTasks = new LinkedList<>();
   ExecutorService executorService = Executors.newCachedThreadPool(
      new BasicThreadFactory.Builder()
         .daemon(true)
         .namingPattern("ExampleThread")
         .build()
   );

   try
   {
      Future<String> result = executorService.submit(new HelloCallable());

      System.out.printf("Result %s\n", result.get()); //will wait until the submitted job is done

      for(int count = 0; count < 10; count++)
      {
         randomTasks.add(executorService.submit(new RandomTask(count)));
      }

      waitForTheFuture(randomTasks);
   }
   finally
   {
      executorService.shutdown(); //make sure you shut it down
   }
}
项目:lernplattform-crawler    文件:DownloadScheduler.java   
private void initialize() {
    BasicThreadFactory factory = new BasicThreadFactory.Builder()
            .namingPattern("download-slave-%d")
            .build();
    executor = Executors.newFixedThreadPool(SLAVE_POOL_SIZE, factory);
    statistics.setDownloadFolderLocation(DOWNLOAD_DIRECTORY_LOCATION);
    safekeeper.loadIDsFromFile();
    stopWatch.start();
}
项目:LiteGraph    文件:Cluster.java   
private Manager(final Builder builder) {
    this.loadBalancingStrategy = builder.loadBalancingStrategy;
    this.authProps = builder.authProps;
    this.contactPoints = builder.getContactPoints();

    connectionPoolSettings = new Settings.ConnectionPoolSettings();
    connectionPoolSettings.maxInProcessPerConnection = builder.maxInProcessPerConnection;
    connectionPoolSettings.minInProcessPerConnection = builder.minInProcessPerConnection;
    connectionPoolSettings.maxSimultaneousUsagePerConnection = builder.maxSimultaneousUsagePerConnection;
    connectionPoolSettings.minSimultaneousUsagePerConnection = builder.minSimultaneousUsagePerConnection;
    connectionPoolSettings.maxSize = builder.maxConnectionPoolSize;
    connectionPoolSettings.minSize = builder.minConnectionPoolSize;
    connectionPoolSettings.maxWaitForConnection = builder.maxWaitForConnection;
    connectionPoolSettings.maxWaitForSessionClose = builder.maxWaitForSessionClose;
    connectionPoolSettings.maxContentLength = builder.maxContentLength;
    connectionPoolSettings.reconnectInitialDelay = builder.reconnectInitialDelay;
    connectionPoolSettings.reconnectInterval = builder.reconnectInterval;
    connectionPoolSettings.resultIterationBatchSize = builder.resultIterationBatchSize;
    connectionPoolSettings.enableSsl = builder.enableSsl;
    connectionPoolSettings.trustCertChainFile = builder.trustCertChainFile;
    connectionPoolSettings.keyCertChainFile = builder.keyCertChainFile;
    connectionPoolSettings.keyFile = builder.keyFile;
    connectionPoolSettings.keyPassword = builder.keyPassword;
    connectionPoolSettings.channelizer = builder.channelizer;

    sslContextOptional = Optional.ofNullable(builder.sslContext);

    this.factory = new Factory(builder.nioPoolSize);
    this.serializer = builder.serializer;
    this.executor = Executors.newScheduledThreadPool(builder.workerPoolSize,
            new BasicThreadFactory.Builder().namingPattern("gremlin-driver-worker-%d").build());
}
项目:flowable-engine    文件:DefaultAsyncJobExecutor.java   
protected void initAsyncJobExecutionThreadPool() {
    if (threadPoolQueue == null) {
        LOGGER.info("Creating thread pool queue of size {}", queueSize);
        threadPoolQueue = new ArrayBlockingQueue<>(queueSize);
    }

    if (executorService == null) {
        LOGGER.info("Creating executor service with corePoolSize {}, maxPoolSize {} and keepAliveTime {}", corePoolSize, maxPoolSize, keepAliveTime);

        BasicThreadFactory threadFactory = new BasicThreadFactory.Builder().namingPattern("flowable-async-job-executor-thread-%d").build();
        executorService = new ThreadPoolExecutor(corePoolSize, maxPoolSize, keepAliveTime, TimeUnit.MILLISECONDS, threadPoolQueue, threadFactory);
    }
}
项目:msb-java    文件:TimeoutManager.java   
private RunOnShutdownScheduledExecutorDecorator createTimeoutExecutorDecorator(int threadPoolSize) {
    BasicThreadFactory threadFactory = new BasicThreadFactory.Builder()
            .namingPattern("timer-provider-thread-%d")
            .build();

    return new RunOnShutdownScheduledExecutorDecorator("timeout manager", threadPoolSize, threadFactory);
}
项目:tinkerpop    文件:Cluster.java   
private Manager(final Builder builder) {
    validateBuilder(builder);

    this.loadBalancingStrategy = builder.loadBalancingStrategy;
    this.authProps = builder.authProps;
    this.contactPoints = builder.getContactPoints();

    connectionPoolSettings = new Settings.ConnectionPoolSettings();
    connectionPoolSettings.maxInProcessPerConnection = builder.maxInProcessPerConnection;
    connectionPoolSettings.minInProcessPerConnection = builder.minInProcessPerConnection;
    connectionPoolSettings.maxSimultaneousUsagePerConnection = builder.maxSimultaneousUsagePerConnection;
    connectionPoolSettings.minSimultaneousUsagePerConnection = builder.minSimultaneousUsagePerConnection;
    connectionPoolSettings.maxSize = builder.maxConnectionPoolSize;
    connectionPoolSettings.minSize = builder.minConnectionPoolSize;
    connectionPoolSettings.maxWaitForConnection = builder.maxWaitForConnection;
    connectionPoolSettings.maxWaitForSessionClose = builder.maxWaitForSessionClose;
    connectionPoolSettings.maxContentLength = builder.maxContentLength;
    connectionPoolSettings.reconnectInterval = builder.reconnectInterval;
    connectionPoolSettings.resultIterationBatchSize = builder.resultIterationBatchSize;
    connectionPoolSettings.enableSsl = builder.enableSsl;
    connectionPoolSettings.trustCertChainFile = builder.trustCertChainFile;
    connectionPoolSettings.keyCertChainFile = builder.keyCertChainFile;
    connectionPoolSettings.keyFile = builder.keyFile;
    connectionPoolSettings.keyPassword = builder.keyPassword;
    connectionPoolSettings.keepAliveInterval = builder.keepAliveInterval;
    connectionPoolSettings.channelizer = builder.channelizer;

    sslContextOptional = Optional.ofNullable(builder.sslContext);

    nioPoolSize = builder.nioPoolSize;
    workerPoolSize = builder.workerPoolSize;
    port = builder.port;

    this.factory = new Factory(builder.nioPoolSize);
    this.serializer = builder.serializer;
    this.executor = Executors.newScheduledThreadPool(builder.workerPoolSize,
            new BasicThreadFactory.Builder().namingPattern("gremlin-driver-worker-%d").build());
}
项目:bifroest    文件:ServerThread.java   
public ServerThread( E environment, JSONObject config, ThrottleControl throttle, IncomingConnectionHandlerFactory<E> connectionHandlerFactory ) throws IOException {
    this.name = config.getString( "name" );
    setName( "Server-" + name );
    this.server = new ServerSocket( config.getInt( "port" ) );
    this.maximumPoolSize = config.getInt( "poolsize" );
    ThreadFactory threads = new BasicThreadFactory.Builder().namingPattern( name + "-%d" ).build();
    this.queue = new LinkedBlockingQueue<Runnable>();
    threadPool = new ThreadPoolExecutor(
            1, 1, // thread count is set to the real initial value on the first run()
            0L, TimeUnit.MILLISECONDS,
            queue,
            threads
    );
    this.throttle = throttle;
    this.connectionHandlerFactory = connectionHandlerFactory;

    JSONObject monitor = config.optJSONObject( "monitor" );
    if ( monitor != null ) {
        long warnLimit = JSONUtils.getTime( "warnlimit", monitor, TIME_UNIT.SECOND );
        long frequency = JSONUtils.getTime( "frequency", monitor, TIME_UNIT.SECOND );
        environment.taskRunner().runRepeated( new CommandMonitor( name, warnLimit ), "Command Monitor", Duration.ZERO, Duration.ofSeconds( frequency ), false );
    }

    SimpleProgramStateTracker.forContext( CLIENT_TIMING + name )
            .storingIn( "Server.ClientTiming." + name )
            .build();
}
项目:ontrack    文件:JobConfig.java   
@Bean
public ScheduledExecutorService jobExecutorService() {
    return Executors.newScheduledThreadPool(
            ontrackConfigProperties.getJobs().getPoolSize(),
            new BasicThreadFactory.Builder()
                    .daemon(true)
                    .namingPattern("job-%s")
                    .build()
    );
}
项目:khronus-java-client    文件:BoundedBuffer.java   
public BoundedBuffer(KhronusConfig config) {
    this.measures = new LinkedBlockingQueue<Measure>(config.getMaximumMeasures());
    this.sender = new Sender(config);
    this.jsonSerializer = new JsonSerializer(config.getSendIntervalMillis(), config.getApplicationName());

    BasicThreadFactory threadFactory = new BasicThreadFactory.Builder().namingPattern("KhronusClientSender").build();
    this.executor = Executors.newScheduledThreadPool(1, threadFactory);
    this.executor.scheduleWithFixedDelay(send(), config.getSendIntervalMillis(), config.getSendIntervalMillis(), TimeUnit.MILLISECONDS);

    LOG.debug("Buffer to store metrics created [MaximumMeasures: {}; SendIntervalMillis: {}]",
            config.getMaximumMeasures(), config.getSendIntervalMillis());
}
项目:lens    文件:LensServices.java   
public synchronized void start() {
  if (getServiceState() != STATE.STARTED) {
    super.start();
  }

  if (!isServerStatePersistenceEnabled) {
    log.info("Server restart is not enabled. Not persisting lens server state");
  } else {
    ThreadFactory factory = new BasicThreadFactory.Builder()
      .namingPattern("Lens-server-snapshotter-Thread-%d")
      .daemon(true)
      .priority(Thread.NORM_PRIORITY)
      .build();
    serverSnapshotScheduler = Executors.newSingleThreadScheduledExecutor(factory);
    serverSnapshotScheduler.scheduleWithFixedDelay(new Runnable() {
      @Override
      public void run() {
        try {
          final String runId = UUID.randomUUID().toString();
          logSegregationContext.setLogSegregationId(runId);
          persistLensServiceState();
          log.info("SnapShot of Lens Services created");
        } catch (Exception e) {
          incrCounter(SERVER_STATE_PERSISTENCE_ERRORS);
          log.error("Unable to persist lens server state", e);
        }
      }
    }, serverStatePersistenceInterval, serverStatePersistenceInterval, TimeUnit.MILLISECONDS);
    log.info("Enabled periodic persistence of lens server state at {} millis interval",
      serverStatePersistenceInterval);
  }
}
项目:lens    文件:QueryExecutionServiceImpl.java   
private void startQueryCancellationPool() {
  ThreadFactory factory = new BasicThreadFactory.Builder()
    .namingPattern("query-cancellation-pool-Thread-%d")
    .priority(Thread.NORM_PRIORITY)
    .build();
  //Using fixed values for pool . corePoolSize = maximumPoolSize = 3  and keepAliveTime = 60 secs
  queryCancellationPool = new ThreadPoolExecutor(3, 3, 60, TimeUnit.SECONDS, new LinkedBlockingQueue(), factory);
}
项目:lens    文件:EventServiceImpl.java   
@Override
public synchronized void init(HiveConf hiveConf) {
  int numProcs = Runtime.getRuntime().availableProcessors();
  ThreadFactory factory = new BasicThreadFactory.Builder()
    .namingPattern("Event_Service_Thread-%d")
    .daemon(false)
    .priority(Thread.NORM_PRIORITY)
    .build();
  eventHandlerPool = Executors.newFixedThreadPool(hiveConf.getInt(LensConfConstants.EVENT_SERVICE_THREAD_POOL_SIZE,
    numProcs), factory);
  super.init(hiveConf);
}
项目:lens    文件:AsyncEventListener.java   
/**
 * Create an asynchronous event listener which uses a thread poool to process events.
 *
 * @param poolSize       size of the event processing pool
 * @param timeOutSeconds time out in seconds when an idle thread is destroyed
 * @param isDaemon       if the threads used to process should be daemon threads,
 *                       if false, then implementation should call stop()
 *                       to stop the thread pool
 */
public AsyncEventListener(int poolSize, long timeOutSeconds, final boolean isDaemon) {
  eventQueue = new LinkedBlockingQueue<>();

  ThreadFactory factory = new BasicThreadFactory.Builder()
    .namingPattern(getName()+"_AsyncThread-%d")
    .daemon(isDaemon)
    .priority(Thread.NORM_PRIORITY)
    .build();
  // fixed pool with min and max equal to poolSize
  processor = new ThreadPoolExecutor(poolSize, poolSize, timeOutSeconds, TimeUnit.SECONDS, eventQueue, factory);
  processor.allowCoreThreadTimeOut(true);
}
项目:MediaCrawler    文件:BlockingThreadPoolExecutor.java   
public BlockingThreadPoolExecutor(int corePoolSize, String poolName,
        BlockingQueue<Runnable> workQueue) {
    super(corePoolSize, corePoolSize, 0, TimeUnit.SECONDS, workQueue,
            new BasicThreadFactory.Builder()
                    .namingPattern(poolName + "-%d")
                    .priority(Thread.MAX_PRIORITY).build(), defaultHandler);
}
项目:csap-core    文件:ServiceJobRunner.java   
public ServiceJobRunner( Application csapApplication ) {

        this.csapApplication = csapApplication;

        long initialDelay = 5;
        long interval = 60;

        TimeUnit logRotationTimeUnit = TimeUnit.MINUTES;

        if ( Application.isRunningOnDesktop() ) {
            logger.warn( "Setting DESKTOP to seconds" );
            logRotationTimeUnit = TimeUnit.SECONDS;
        }



        logger.warn(
            "Creating job schedule thread, invoked: {} {}.",
            interval, logRotationTimeUnit );

        BasicThreadFactory schedFactory = new BasicThreadFactory.Builder()
                .namingPattern( "CsapLogRotation-%d" )
                .daemon( true )
                .priority( Thread.NORM_PRIORITY )
                .build();

        jobTimerService = Executors
                .newScheduledThreadPool( 1, schedFactory );

        ScheduledFuture<?> jobHandle = jobTimerService
            .scheduleAtFixedRate(
                () -> findAndRunActiveJobs(),
                initialDelay,
                interval,
                logRotationTimeUnit );


        logger.warn(
            "Creating job runner thread pool: {} threads.  Maximum jobs queued: {}",
            MAX_JOBS_CONCURRENT, MAX_JOBS_QUEUED );

        BasicThreadFactory jobRunnerThreadFactory = new BasicThreadFactory.Builder()
            .namingPattern( "CsapServiceJobRunner-%d" )
            .daemon( true )
            .priority( Thread.NORM_PRIORITY + 1 )
            .build();
        //
        jobRunnerQueue = new ArrayBlockingQueue<>( MAX_JOBS_QUEUED );

        jobRunnerService = new ThreadPoolExecutor( 
            MAX_JOBS_CONCURRENT, MAX_JOBS_CONCURRENT,
            30, TimeUnit.SECONDS,
            jobRunnerQueue,
            jobRunnerThreadFactory );
    }
项目:elastic-job-cloud    文件:ExecutorServiceObject.java   
public ExecutorServiceObject(final String namingPattern, final int threadSize) {
    workQueue = new LinkedBlockingQueue<>();
    threadPoolExecutor = new ThreadPoolExecutor(threadSize, threadSize, 5L, TimeUnit.MINUTES, workQueue, 
            new BasicThreadFactory.Builder().namingPattern(Joiner.on("-").join(namingPattern, "%s")).build());
    threadPoolExecutor.allowCoreThreadTimeOut(true);
}
项目:sponge    文件:DefaultThreadPoolManager.java   
protected BasicThreadFactory createThreadFactory(Object named) {
    return new BasicThreadFactory.Builder().namingPattern(named.toString() + "-%d").build();
}
项目:rug-resolver    文件:MdcThreadPoolExecutor.java   
public static ExecutorService newFixedThreadPool(int nThreads, String name) {
    BasicThreadFactory factory = new BasicThreadFactory.Builder().namingPattern(name + "-%d")
            .daemon(true).priority(Thread.MAX_PRIORITY).build();
    return new MdcThreadPoolExecutor(nThreads, nThreads, 0L, TimeUnit.MILLISECONDS,
            new LinkedBlockingQueue<Runnable>(), factory);
}