Java 类com.google.common.base.Stopwatch 实例源码

项目:elastic-db-tools-for-java    文件:RangeShardMap.java   
/**
 * Removes a range mapping.
 *
 * @param mapping
 *            Mapping being removed.
 * @param mappingLockToken
 *            An instance of <see cref="MappingLockToken"/>
 */
public void deleteMapping(RangeMapping mapping,
        MappingLockToken mappingLockToken) {
    ExceptionUtils.disallowNullArgument(mapping, "mapping");
    ExceptionUtils.disallowNullArgument(mappingLockToken, "mappingLockToken");

    try (ActivityIdScope activityIdScope = new ActivityIdScope(UUID.randomUUID())) {
        log.info("DeleteMapping Start; Shard: {}", mapping.getShard().getLocation());

        Stopwatch stopwatch = Stopwatch.createStarted();

        this.rsm.remove(mapping, mappingLockToken.getLockOwnerId());

        stopwatch.stop();

        log.info("DeleteMapping Complete; Shard: {}; Duration: {}", mapping.getShard().getLocation(), stopwatch.elapsed(TimeUnit.MILLISECONDS));
    }
}
项目:hashsdn-controller    文件:IntegrationTestKit.java   
public static void verifyShardState(final AbstractDataStore datastore, final String shardName,
        final Consumer<OnDemandShardState> verifier) throws Exception {
    ActorContext actorContext = datastore.getActorContext();

    Future<ActorRef> future = actorContext.findLocalShardAsync(shardName);
    ActorRef shardActor = Await.result(future, Duration.create(10, TimeUnit.SECONDS));

    AssertionError lastError = null;
    Stopwatch sw = Stopwatch.createStarted();
    while (sw.elapsed(TimeUnit.SECONDS) <= 5) {
        OnDemandShardState shardState = (OnDemandShardState)actorContext
                .executeOperation(shardActor, GetOnDemandRaftState.INSTANCE);

        try {
            verifier.accept(shardState);
            return;
        } catch (AssertionError e) {
            lastError = e;
            Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
        }
    }

    throw lastError;
}
项目:hashsdn-controller    文件:TestActorFactory.java   
@SuppressWarnings("checkstyle:IllegalCatch")
private void verifyActorReady(ActorRef actorRef) {
    // Sometimes we see messages go to dead letters soon after creation - it seems the actor isn't quite
    // in a state yet to receive messages or isn't actually created yet. This seems to happen with
    // actorSelection so, to alleviate it, we use an actorSelection and send an Identify message with
    // retries to ensure it's ready.

    Timeout timeout = new Timeout(100, TimeUnit.MILLISECONDS);
    Throwable lastError = null;
    Stopwatch sw = Stopwatch.createStarted();
    while (sw.elapsed(TimeUnit.SECONDS) <= 10) {
        try {
            ActorSelection actorSelection = system.actorSelection(actorRef.path().toString());
            Future<Object> future = Patterns.ask(actorSelection, new Identify(""), timeout);
            ActorIdentity reply = (ActorIdentity)Await.result(future, timeout.duration());
            Assert.assertNotNull("Identify returned null", reply.getRef());
            return;
        } catch (Exception | AssertionError e) {
            Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
            lastError = e;
        }
    }

    throw new RuntimeException(lastError);
}
项目:mapr-music    文件:AlbumDao.java   
/**
 * Returns number of albums according to the specified language.
 *
 * @param language language code.
 * @return number of albums with specified language code.
 */
public long getTotalNumByLanguage(String language) {
    return processStore((connection, store) -> {

        Stopwatch stopwatch = Stopwatch.createStarted();

        QueryCondition languageEqualsCondition = connection.newCondition()
                .is("language", QueryCondition.Op.EQUAL, language)
                .build();

        Query query = connection.newQuery()
                .select("_id")
                .where(languageEqualsCondition)
                .build();

        DocumentStream documentStream = store.findQuery(query);
        long totalNum = 0;
        for (Document ignored : documentStream) {
            totalNum++;
        }

        log.debug("Counting '{}' albums by language '{}' took {}", totalNum, language, stopwatch);

        return totalNum;
    });
}
项目:dremio-oss    文件:EventLoopCloseable.java   
@Override
public void close() throws Exception {
  try {
    Stopwatch watch = Stopwatch.createStarted();
    // this takes 1s to complete
    // known issue: https://github.com/netty/netty/issues/2545
    eventLoop.shutdownGracefully(0, 0, TimeUnit.SECONDS);
    eventLoop.terminationFuture().sync();

    long elapsed = watch.elapsed(MILLISECONDS);
    if (elapsed > 1200) {
      logger.info("closed eventLoopGroups in " + elapsed + " ms");
    }
  } catch (final InterruptedException e) {
    logger.warn("Failure while shutting down bootstrap context event loops.", e);

    // Preserve evidence that the interruption occurred so that code higher up on the call stack can learn of the
    // interruption and respond to it if it wants to.
    Thread.currentThread().interrupt();
  }
}
项目:hashsdn-controller    文件:AbstractEntityOwnershipTest.java   
protected void verifyNodeRemoved(YangInstanceIdentifier path,
        Function<YangInstanceIdentifier,NormalizedNode<?,?>> reader) {
    AssertionError lastError = null;
    Stopwatch sw = Stopwatch.createStarted();
    while (sw.elapsed(TimeUnit.MILLISECONDS) <= 5000) {
        try {
            NormalizedNode<?, ?> node = reader.apply(path);
            Assert.assertNull("Node was not removed at path: " + path, node);
            return;
        } catch (AssertionError e) {
            lastError = e;
            Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
        }
    }

    throw lastError;
}
项目:mapr-music    文件:ArtistRateDao.java   
/**
 * Returns list of Artist rates by user identifier.
 *
 * @param userId user's identifier.
 * @return list of Artist rates.
 */
public List<ArtistRate> getByUserId(String userId) {
    return processStore((connection, store) -> {

        Stopwatch stopwatch = Stopwatch.createStarted();
        Query query = connection.newQuery().where(
                connection.newCondition()
                        .is("user_id", QueryCondition.Op.EQUAL, userId)
                        .build()
        ).build();

        // Fetch all OJAI Documents from this store according to the built query
        DocumentStream documentStream = store.findQuery(query);
        List<ArtistRate> rates = new ArrayList<>();
        for (Document document : documentStream) {
            ArtistRate rate = mapOjaiDocument(document);
            if (rate != null) {
                rates.add(rate);
            }
        }

        log.debug("Get '{}' rates by user id '{}' took {}", rates.size(), userId, stopwatch);

        return rates;
    });
}
项目:mapr-music    文件:ArtistRateDao.java   
/**
 * Returns list of Artist rates by artist identifier.
 *
 * @param artistId artist's identifier.
 * @return list of Artist rates.
 */
public List<ArtistRate> getByArtistId(String artistId) {
    return processStore((connection, store) -> {

        Stopwatch stopwatch = Stopwatch.createStarted();
        Query query = connection.newQuery().where(
                connection.newCondition()
                        .is("document_id", QueryCondition.Op.EQUAL, artistId)
                        .build()
        ).build();

        // Fetch all OJAI Documents from this store according to the built query
        DocumentStream documentStream = store.findQuery(query);
        List<ArtistRate> rates = new ArrayList<>();
        for (Document document : documentStream) {
            ArtistRate rate = mapOjaiDocument(document);
            if (rate != null) {
                rates.add(rate);
            }
        }

        log.debug("Get '{}' rates by artist id '{}' took {}", rates.size(), artistId, stopwatch);

        return rates;
    });
}
项目:elastic-db-tools-for-java    文件:RangeShardMap.java   
/**
 * Gets all the range mappings that exist within given range.
 *
 * @param range
 *            Range value, any mapping overlapping with the range will be returned.
 * @param lookupOptions
 *            Whether to search in the cache and/or store.
 * @return Read-only collection of mappings that satisfy the given range constraint.
 */
public List<RangeMapping> getMappings(Range range,
        LookupOptions lookupOptions) {
    ExceptionUtils.disallowNullArgument(range, "range");

    try (ActivityIdScope activityIdScope = new ActivityIdScope(UUID.randomUUID())) {
        log.info("GetMappings Start; Range: {}; Lookup Options: {}", range, lookupOptions);

        Stopwatch stopwatch = Stopwatch.createStarted();

        List<RangeMapping> rangeMappings = this.rsm.getMappingsForRange(range, null, lookupOptions);

        stopwatch.stop();

        log.info("GetMappings Complete; Range: {}; Lookup Options: {}; Duration: {}", range, lookupOptions,
                stopwatch.elapsed(TimeUnit.MILLISECONDS));

        return rangeMappings;
    }
}
项目:dremio-oss    文件:SimpleParallelizer.java   
/**
 * Generate a set of assigned fragments based on the provided fragment tree. Do not allow parallelization stages
 * to go beyond the global max width.
 *
 * @param options         Option list
 * @param foremanNode     The driving/foreman node for this query.  (this node)
 * @param queryId         The queryId for this query.
 * @param activeEndpoints The list of endpoints to consider for inclusion in planning this query.
 * @param reader          Tool used to read JSON plans
 * @param rootFragment    The root node of the PhysicalPlan that we will be parallelizing.
 * @param session         UserSession of user who launched this query.
 * @param queryContextInfo Info related to the context when query has started.
 * @return The list of generated PlanFragment protobuf objects to be assigned out to the individual nodes.
 * @throws ExecutionSetupException
 */
public List<PlanFragment> getFragments(
    OptionList options,
    NodeEndpoint foremanNode,
    QueryId queryId,
    Collection<NodeEndpoint> activeEndpoints,
    PhysicalPlanReader reader,
    Fragment rootFragment,
    UserSession session,
    QueryContextInformation queryContextInfo,
    FunctionLookupContext functionLookupContext) throws ExecutionSetupException {
  observer.planParallelStart();
  final Stopwatch stopwatch = Stopwatch.createStarted();
  final PlanningSet planningSet = getFragmentsHelper(activeEndpoints, rootFragment);
  observer.planParallelized(planningSet);
  stopwatch.stop();
  observer.planAssignmentTime(stopwatch.elapsed(TimeUnit.MILLISECONDS));
  stopwatch.start();
  List<PlanFragment> fragments = generateWorkUnit(options, foremanNode, queryId, reader, rootFragment, planningSet, session, queryContextInfo, functionLookupContext);
  stopwatch.stop();
  observer.planGenerationTime(stopwatch.elapsed(TimeUnit.MILLISECONDS));
  observer.plansDistributionComplete(new QueryWorkUnit(fragments));
  return fragments;
}
项目:mapr-music    文件:AlbumRateDao.java   
/**
 * {@inheritDoc}
 *
 * @param id        identifier of document, which will be updated.
 * @param albumRate album rate.
 * @return updated album rate.
 */
@Override
public AlbumRate update(String id, AlbumRate albumRate) {
    return processStore((connection, store) -> {

        Stopwatch stopwatch = Stopwatch.createStarted();

        // Create a DocumentMutation to update non-null fields
        DocumentMutation mutation = connection.newMutation();

        // Update only non-null fields
        if (albumRate.getRating() != null) {
            mutation.set("rating", albumRate.getRating());
        }

        // Update the OJAI Document with specified identifier
        store.update(id, mutation);

        Document updatedOjaiDoc = store.findById(id);

        log.debug("Update document from table '{}' with id: '{}'. Elapsed time: {}", tablePath, id, stopwatch);

        // Map Ojai document to the actual instance of model class
        return mapOjaiDocument(updatedOjaiDoc);
    });
}
项目:elastic-db-tools-for-java    文件:ListShardMap.java   
/**
 * Creates and adds a point mapping to ShardMap.
 *
 * @param point
 *            Point for which to create the mapping.
 * @param shard
 *            Shard associated with the point mapping.
 * @return Newly created mapping.
 */
public PointMapping createPointMapping(KeyT point,
        Shard shard) {
    ExceptionUtils.disallowNullArgument(shard, "shard");

    try (ActivityIdScope activityIdScope = new ActivityIdScope(UUID.randomUUID())) {
        PointMappingCreationInfo args = new PointMappingCreationInfo(point, shard, MappingStatus.Online);

        String mappingKey = args.getKey().toString();
        log.info("CreatePointMapping Start; ShardMap name: {}; Point Mapping: {}", this.getName(), mappingKey);

        Stopwatch stopwatch = Stopwatch.createStarted();

        PointMapping pointMapping = lsm.add(new PointMapping(this.getShardMapManager(), args));

        stopwatch.stop();

        log.info("CreatePointMapping Complete; ShardMap name: {}; Point Mapping: {}; Duration: {}", this.getName(), mappingKey,
                stopwatch.elapsed(TimeUnit.MILLISECONDS));

        return pointMapping;
    }
}
项目:dremio-oss    文件:ParquetRecordReaderTest.java   
private void testFull(QueryType type, String planText, String filename,
    int numberOfTimesRead /* specified in json plan */,
    int numberOfRowGroups, int recordsPerRowGroup, boolean testValues) throws Exception {

  // final RecordBatchLoader batchLoader = new RecordBatchLoader(getAllocator());
  final HashMap<String, FieldInfo> fields = new HashMap<>();
  final ParquetTestProperties props =
      new ParquetTestProperties(numberRowGroups, recordsPerRowGroup, DEFAULT_BYTES_PER_PAGE, fields);
  TestFileGenerator.populateFieldInfoMap(props);
  final ParquetResultListener resultListener =
      new ParquetResultListener(getAllocator(), props, numberOfTimesRead, testValues);
  final Stopwatch watch = Stopwatch.createStarted();
  testWithListener(type, planText, resultListener);
  resultListener.getResults();
  // batchLoader.clear();
  System.out.println(String.format("Took %d ms to run query", watch.elapsed(TimeUnit.MILLISECONDS)));
}
项目:elastic-db-tools-for-java    文件:RangeShardMap.java   
/**
 * Gets all the range mappings that exist within given range and given shard.
 *
 * @param range
 *            Range value, any mapping overlapping with the range will be returned.
 * @param shard
 *            Shard for which the mappings will be returned.
 * @param lookupOptions
 *            Whether to search in the cache and/or store.
 * @return Read-only collection of mappings that satisfy the given range and shard constraints.
 */
public List<RangeMapping> getMappings(Range range,
        Shard shard,
        LookupOptions lookupOptions) {
    ExceptionUtils.disallowNullArgument(range, "range");
    ExceptionUtils.disallowNullArgument(shard, "shard");

    try (ActivityIdScope activityIdScope = new ActivityIdScope(UUID.randomUUID())) {
        log.info("GetMappings Start; Shard: {}; Range: {}; Lookup Options: {}", shard.getLocation(), lookupOptions, range);

        Stopwatch stopwatch = Stopwatch.createStarted();

        List<RangeMapping> rangeMappings = this.rsm.getMappingsForRange(range, shard, lookupOptions);

        stopwatch.stop();

        log.info("GetMappings Complete; Shard: {}; Lookup Options: {}; Duration: {}", shard.getLocation(), lookupOptions,
                stopwatch.elapsed(TimeUnit.MILLISECONDS));

        return rangeMappings;
    }
}
项目:elastic-db-tools-for-java    文件:ListShardMap.java   
/**
 * Removes a point mapping.
 *
 * @param mapping
 *            Mapping being removed.
 */
public void deleteMapping(PointMapping mapping) {
    ExceptionUtils.disallowNullArgument(mapping, "mapping");

    try (ActivityIdScope activityIdScope = new ActivityIdScope(UUID.randomUUID())) {
        String mappingKey = mapping.getKey().getRawValue().toString();
        log.info("DeletePointMapping Start; ShardMap name: {}; Point Mapping: {}", this.getName(), mappingKey);

        Stopwatch stopwatch = Stopwatch.createStarted();

        lsm.remove(mapping);

        stopwatch.stop();

        log.info("DeletePointMapping Completed; ShardMap name: {}; Point Mapping: {}; Duration: {}", this.getName(), mappingKey,
                stopwatch.elapsed(TimeUnit.MILLISECONDS));
    }
}
项目:elastic-db-tools-for-java    文件:ShardMapManagerFactory.java   
/**
 * Gets <see cref="ShardMapManager"/> from persisted state in a SQL Server database.
 *
 * @param connectionString
 *            Connection parameters used for performing operations against shard map manager database(s).
 * @param loadPolicy
 *            Initialization policy.
 * @param shardMapManager
 *            Shard map manager object used for performing management and read operations for shard maps, shards and shard mappings or <c>null</c>
 *            in case shard map manager does not exist.
 * @param retryBehavior
 *            Behavior for detecting transient exceptions in the store.
 * @param retryEventHandler
 *            Event handler for store operation retry events.
 * @return <c>true</c> if a shard map manager object was created, <c>false</c> otherwise.
 */
public static boolean tryGetSqlShardMapManager(String connectionString,
        ShardMapManagerLoadPolicy loadPolicy,
        RetryBehavior retryBehavior,
        EventHandler<RetryingEventArgs> retryEventHandler,
        ReferenceObjectHelper<ShardMapManager> shardMapManager) {
    ExceptionUtils.disallowNullArgument(connectionString, "connectionString");
    ExceptionUtils.disallowNullArgument(retryBehavior, "retryBehavior");

    try (ActivityIdScope activityIdScope = new ActivityIdScope(UUID.randomUUID())) {
        log.info("ShardMapManagerFactory TryGetSqlShardMapManager Start; ");

        Stopwatch stopwatch = Stopwatch.createStarted();

        shardMapManager.argValue = ShardMapManagerFactory.getSqlShardMapManager(connectionString, loadPolicy, retryBehavior, retryEventHandler,
                false);

        stopwatch.stop();

        log.info("ShardMapManagerFactory TryGetSqlShardMapManager Complete; Duration:{}", stopwatch.elapsed(TimeUnit.MILLISECONDS));

        return shardMapManager.argValue != null;
    }
}
项目:elastic-db-tools-for-java    文件:ListShardMap.java   
/**
 * Creates and adds a point mapping to ShardMap.
 *
 * @param creationInfo
 *            Information about mapping to be added.
 * @return Newly created mapping.
 */
public PointMapping createPointMapping(PointMappingCreationInfo creationInfo) {
    ExceptionUtils.disallowNullArgument(creationInfo, "args");

    try (ActivityIdScope activityIdScope = new ActivityIdScope(UUID.randomUUID())) {
        Stopwatch stopwatch = Stopwatch.createStarted();

        String mappingKey = creationInfo.getKey().getRawValue().toString();
        log.info("CreatePointMapping Start; ShardMap name: {}; Point Mapping: {} ", this.getName(), mappingKey);

        PointMapping mapping = lsm.add(new PointMapping(this.getShardMapManager(), creationInfo));

        stopwatch.stop();

        log.info("CreatePointMapping Complete; ShardMap name: {}; Point Mapping: {}; Duration: {}", this.getName(), mappingKey,
                stopwatch.elapsed(TimeUnit.MILLISECONDS));

        return mapping;
    }
}
项目:dremio-oss    文件:SampleDataPopulator.java   
private VirtualDatasetUI newDataset(DatasetPath datasetPath, DatasetVersion version,
    From from, List<String> sqlContext) {

  final VirtualDatasetUI ds = com.dremio.dac.explore.DatasetTool.newDatasetBeforeQueryMetadata(datasetPath, version, from, sqlContext, username);
  final SqlQuery query = new SqlQuery(ds.getSql(), ds.getState().getContextList(), username);
  ds.setLastTransform(new Transform(TransformType.createFromParent).setTransformCreateFromParent(new TransformCreateFromParent(from)));
  final QueryMetadata metadata;

  try {
    Stopwatch sw = Stopwatch.createStarted();
    metadata = QueryParser.extract(query, context);
    long t = sw.elapsed(MILLISECONDS);
    if (t > 100) {
      logger.warn(String.format("parsing sql took %dms for %s:\n%s", t, ds.getName(), ds.getSql()));
    }
  } catch (RuntimeException e) {
    Throwables.propagateIfInstanceOf(e, UserException.class);
    throw new ServerErrorException("Produced invalid SQL:\n" + ds.getSql() + "\n" + e.getMessage(), e);
  }
  QuerySemantics.populateSemanticFields(metadata.getRowType(), ds.getState());
  DatasetTool.applyQueryMetadata(ds, metadata);

  return ds;
}
项目:session-to-cookie    文件:WebController.java   
@RequestMapping("/slowmethod/{name}")
@ResponseBody
public String handleSlowMethod(HttpServletRequest servletRequest, @PathVariable("name") String name) {
    HttpSession session = servletRequest.getSession(false);
    if (session == null) {
        log.debug("created new session to save name '" + name + "'");
        // no session was created yet, create a new one and set some session
        // data specific to this user/request
        session = servletRequest.getSession(true);
        session.setAttribute("complex", new SessionObject(name));
    }
    Stopwatch stopwatch = Stopwatch.createStarted();
    // simulate a slow method by calculating some stuff that should take
    // some hundreds milliseconds. We do NOT use Thread.sleep because we
    // want to measure cpu perfomance differences on heavy load (takes ~100ms on my machine). 
    long count = 0;
    for (long i = 0; i < 250000000l; i++) {
        count++;
    }
    return "hello, your session is: " + session + ", count is: " + count + " elapsed time is: "+stopwatch.elapsed(TimeUnit.MILLISECONDS)+", your name is: "
            + ((SessionObject) session.getAttribute("complex")).getName() + "\n" + getHostInfo();
}
项目:grpc-java-contrib    文件:StopwatchServerInterceptor.java   
@Override
public <ReqT, RespT> ServerCall.Listener<ReqT> interceptCall(ServerCall<ReqT, RespT> call, Metadata headers, ServerCallHandler<ReqT, RespT> next) {
    logStart(call.getMethodDescriptor());

    return new ForwardingServerCallListener.SimpleForwardingServerCallListener<ReqT>(next.startCall(call, headers)) {
        private Stopwatch stopwatch = Stopwatch.createStarted();

        @Override
        public void onCancel() {
            super.onCancel();
            logCancel(call.getMethodDescriptor(), Duration.ofNanos(stopwatch.stop().elapsed(TimeUnit.NANOSECONDS)));
        }

        @Override
        public void onComplete() {
            super.onComplete();
            logStop(call.getMethodDescriptor(), Duration.ofNanos(stopwatch.stop().elapsed(TimeUnit.NANOSECONDS)));
        }
    };
}
项目:elastic-db-tools-for-java    文件:RangeShardMap.java   
/**
 * Looks up the key value and returns the corresponding mapping.
 *
 * @param key
 *            Input key value.
 * @param lookupOptions
 *            Whether to search in the cache and/or store.
 * @return Mapping that contains the key value.
 */
public RangeMapping getMappingForKey(KeyT key,
        LookupOptions lookupOptions) {
    try (ActivityIdScope activityIdScope = new ActivityIdScope(UUID.randomUUID())) {
        log.info("GetMapping Start; Range Mapping Key Type: {}; Lookup Options: {}", key.getClass(), lookupOptions);

        Stopwatch stopwatch = Stopwatch.createStarted();

        RangeMapping rangeMapping = this.rsm.lookup(key, lookupOptions);

        stopwatch.stop();

        log.info("GetMapping Complete; Range Mapping Key Type: {}; Lookup Options: {} Duration: {}", key.getClass(), lookupOptions,
                stopwatch.elapsed(TimeUnit.MILLISECONDS));

        return rangeMapping;
    }
}
项目:Java-EX    文件:SecurityUtil.java   
public static String digest(InputStream input, String algorithm) throws IOException, NoSuchAlgorithmException {
  Stopwatch sw = Stopwatch.createStarted();
  int bufferSize = 256 * 1024;
  MessageDigest messageDigest = MessageDigest.getInstance(algorithm);
  try (DigestInputStream digestInputStream = new DigestInputStream(input, messageDigest);) {
    byte[] buffer = new byte[bufferSize];
    while (digestInputStream.read(buffer) > 0) {
      ;
    }
    messageDigest = digestInputStream.getMessageDigest();
    byte[] resultByteArray = messageDigest.digest();
    return byteArrayToHex(resultByteArray);
  } finally {
    sw.stop();
  }
}
项目:elastic-db-tools-for-java    文件:RangeShardMap.java   
/**
 * Merges 2 contiguous mappings into a single mapping. Both left and right mappings should point to the same location and must be contiguous.
 *
 * @param left
 *            Left mapping.
 * @param right
 *            Right mapping.
 * @param leftMappingLockToken
 *            An instance of <see cref="MappingLockToken"/> for the left mapping
 * @param rightMappingLockToken
 *            An instance of <see cref="MappingLockToken"/> for the right mapping
 * @return Mapping that results from the merge operation.
 */
public RangeMapping mergeMappings(RangeMapping left,
        RangeMapping right,
        MappingLockToken leftMappingLockToken,
        MappingLockToken rightMappingLockToken) {
    ExceptionUtils.disallowNullArgument(left, "left");
    ExceptionUtils.disallowNullArgument(right, "right");
    ExceptionUtils.disallowNullArgument(leftMappingLockToken, "leftMappingLockToken");
    ExceptionUtils.disallowNullArgument(rightMappingLockToken, "rightMappingLockToken");

    try (ActivityIdScope activityIdScope = new ActivityIdScope(UUID.randomUUID())) {
        log.info("SplitMapping Start; Left Shard: {}; Right Shard: {}", left.getShard().getLocation(), right.getShard().getLocation());

        Stopwatch stopwatch = Stopwatch.createStarted();

        RangeMapping rangeMapping = this.rsm.merge(left, right, leftMappingLockToken.getLockOwnerId(), rightMappingLockToken.getLockOwnerId());

        stopwatch.stop();

        log.info("SplitMapping Complete; Duration: {}", stopwatch.elapsed(TimeUnit.MILLISECONDS));

        return rangeMapping;
    }
}
项目:dremio-oss    文件:SabotNode.java   
private void doClose() {
  // avoid complaints about double closing
  if (isClosed) {
    return;
  }
  final Stopwatch w = Stopwatch.createStarted();
  logger.debug("Shutdown begun.");

  // wait for anything that is running to complete

  try {
    registry.close();
  } catch(Exception e) {
    logger.warn("Failure on close()", e);
  }

  logger.info("Shutdown completed ({} ms).", w.elapsed(TimeUnit.MILLISECONDS));
  isClosed = true;
}
项目:QDrill    文件:OldAssignmentCreator.java   
OldAssignmentCreator(List<DrillbitEndpoint> incomingEndpoints, List<T> units) {
  logger.debug("Assigning {} units to {} endpoints", units.size(), incomingEndpoints.size());
  Stopwatch watch = new Stopwatch();

  Preconditions.checkArgument(incomingEndpoints.size() <= units.size(), String.format("Incoming endpoints %d "
      + "is greater than number of row groups %d", incomingEndpoints.size(), units.size()));
  this.mappings = ArrayListMultimap.create();
  this.endpoints = Lists.newLinkedList(incomingEndpoints);

  ArrayList<T> rowGroupList = new ArrayList<>(units);
  for (double cutoff : ASSIGNMENT_CUTOFFS) {
    scanAndAssign(rowGroupList, cutoff, false, false);
  }
  scanAndAssign(rowGroupList, 0.0, true, false);
  scanAndAssign(rowGroupList, 0.0, true, true);

  logger.debug("Took {} ms to apply assignments", watch.elapsed(TimeUnit.MILLISECONDS));
  Preconditions.checkState(rowGroupList.isEmpty(), "All readEntries should be assigned by now, but some are still unassigned");
  Preconditions.checkState(!units.isEmpty());

}
项目:QDrill    文件:Drillbit.java   
public void run() throws Exception {
  final Stopwatch w = new Stopwatch().start();
  logger.debug("Startup begun.");
  coord.start(10000);
  storeProvider.start();
  final DrillbitEndpoint md = engine.start();
  manager.start(md, engine.getController(), engine.getDataConnectionCreator(), coord, storeProvider);
  final DrillbitContext drillbitContext = manager.getContext();
  drillbitContext.getStorage().init();
  drillbitContext.getOptionManager().init();
  javaPropertiesToSystemOptions();
  registrationHandle = coord.register(md);
  webServer.start();

  Runtime.getRuntime().addShutdownHook(new ShutdownThread(this, new StackTrace()));
  logger.info("Startup completed ({} ms).", w.elapsed(TimeUnit.MILLISECONDS));
}
项目:okwallet    文件:DeterministicKeyChain.java   
/**
 * Pre-generate enough keys to reach the lookahead size, but only if there are more than the lookaheadThreshold to
 * be generated, so that the Bloom filter does not have to be regenerated that often.
 *
 * The returned mutable list of keys must be inserted into the basic key chain.
 */
private List<DeterministicKey> maybeLookAhead(DeterministicKey parent, int issued, int lookaheadSize, int lookaheadThreshold) {
    checkState(lock.isHeldByCurrentThread());
    final int numChildren = hierarchy.getNumChildren(parent.getPath());
    final int needed = issued + lookaheadSize + lookaheadThreshold - numChildren;

    if (needed <= lookaheadThreshold)
        return new ArrayList<>();

    log.info("{} keys needed for {} = {} issued + {} lookahead size + {} lookahead threshold - {} num children",
            needed, parent.getPathAsString(), issued, lookaheadSize, lookaheadThreshold, numChildren);

    List<DeterministicKey> result  = new ArrayList<>(needed);
    final Stopwatch watch = Stopwatch.createStarted();
    int nextChild = numChildren;
    for (int i = 0; i < needed; i++) {
        DeterministicKey key = HDKeyDerivation.deriveThisOrNextChildKey(parent, nextChild);
        key = key.dropPrivateBytes();
        hierarchy.putKey(key);
        result.add(key);
        nextChild = key.getChildNumber().num() + 1;
    }
    watch.stop();
    log.info("Took {}", watch);
    return result;
}
项目:elastic-db-tools-for-java    文件:RangeShardMap.java   
/**
 * Gets all the range mappings that exist for the given shard.
 *
 * @param shard
 *            Shard for which the mappings will be returned.
 * @param lookupOptions
 *            Whether to search in the cache and/or store.
 * @return Read-only collection of mappings that satisfy the given shard constraint.
 */
public List<RangeMapping> getMappings(Shard shard,
        LookupOptions lookupOptions) {
    ExceptionUtils.disallowNullArgument(shard, "shard");

    try (ActivityIdScope activityIdScope = new ActivityIdScope(UUID.randomUUID())) {
        log.info("GetMappings Start; Shard: {}; Lookup Options: {}", shard.getLocation(), lookupOptions);

        Stopwatch stopwatch = Stopwatch.createStarted();

        List<RangeMapping> rangeMappings = this.rsm.getMappingsForRange(null, shard, lookupOptions);

        stopwatch.stop();

        log.info("GetMappings Complete; Shard: {}; Lookup Options: {}; Duration: {}", shard.getLocation(), lookupOptions,
                stopwatch.elapsed(TimeUnit.MILLISECONDS));

        return rangeMappings;
    }
}
项目:okwallet    文件:WalletApplication.java   
private void initNotificationManager() {
    if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
        final Stopwatch watch = Stopwatch.createStarted();
        final NotificationManager nm = (NotificationManager) getSystemService(Context.NOTIFICATION_SERVICE);

        final NotificationChannel received = new NotificationChannel(Constants.NOTIFICATION_CHANNEL_ID_RECEIVED,
                getString(R.string.notification_channel_received_name), NotificationManager.IMPORTANCE_DEFAULT);
        received.setSound(Uri.parse("android.resource://" + getPackageName() + "/" + R.raw.coins_received),
                new AudioAttributes.Builder().setContentType(AudioAttributes.CONTENT_TYPE_SONIFICATION)
                        .setLegacyStreamType(AudioManager.STREAM_NOTIFICATION)
                        .setUsage(AudioAttributes.USAGE_NOTIFICATION_EVENT).build());
        nm.createNotificationChannel(received);

        final NotificationChannel ongoing = new NotificationChannel(Constants.NOTIFICATION_CHANNEL_ID_ONGOING,
                getString(R.string.notification_channel_ongoing_name), NotificationManager.IMPORTANCE_LOW);
        nm.createNotificationChannel(ongoing);

        final NotificationChannel important = new NotificationChannel(Constants.NOTIFICATION_CHANNEL_ID_IMPORTANT,
                getString(R.string.notification_channel_important_name), NotificationManager.IMPORTANCE_HIGH);
        nm.createNotificationChannel(important);

        log.info("created notification channels, took {}", watch);
    }
}
项目:kafka-connect-cdc-oracle    文件:QueryServiceTest.java   
@Test
public void receiveLCR() throws SQLException, StreamsException, TimeoutException {
  final List<Change> changes = new ArrayList<>();

  doAnswer(invocationOnMock -> {
    Change change = invocationOnMock.getArgument(0);
    changes.add(change);
    return null;
  }).when(this.changeWriter).addChange(any());

  this.serviceManager.startAsync();
  this.serviceManager.awaitHealthy(60, TimeUnit.SECONDS);

  Stopwatch stopwatch = Stopwatch.createStarted();

  while (changes.size() <= 30 || stopwatch.elapsed(TimeUnit.SECONDS) > 60L) {
    OracleChange oracleChange = this.queryService.receiveChange();
    log.trace("oracleChange returned {}", oracleChange);
    if (null != oracleChange) {
      changes.add(oracleChange);
    }
  }
}
项目:creacoinj    文件:MnemonicCode.java   
/**
 * Convert mnemonic word list to seed.
 */
public static byte[] toSeed(List<String> words, String passphrase) {

    // To create binary seed from mnemonic, we use PBKDF2 function
    // with mnemonic sentence (in UTF-8) used as a password and
    // string "mnemonic" + passphrase (again in UTF-8) used as a
    // salt. Iteration count is set to 4096 and HMAC-SHA512 is
    // used as a pseudo-random function. Desired length of the
    // derived key is 512 bits (= 64 bytes).
    //
    String pass = Utils.join(words);
    String salt = "mnemonic" + passphrase;

    final Stopwatch watch = Stopwatch.createStarted();
    byte[] seed = PBKDF2SHA512.derive(pass, salt, PBKDF2_ROUNDS, 64);
    watch.stop();
    log.info("PBKDF2 took {}", watch);
    return seed;
}
项目:hashsdn-controller    文件:IntegrationTestKit.java   
public static void verifyShardStats(final AbstractDataStore datastore, final String shardName,
        final ShardStatsVerifier verifier) throws Exception {
    ActorContext actorContext = datastore.getActorContext();

    Future<ActorRef> future = actorContext.findLocalShardAsync(shardName);
    ActorRef shardActor = Await.result(future, Duration.create(10, TimeUnit.SECONDS));

    AssertionError lastError = null;
    Stopwatch sw = Stopwatch.createStarted();
    while (sw.elapsed(TimeUnit.SECONDS) <= 5) {
        ShardStats shardStats = (ShardStats)actorContext
                .executeOperation(shardActor, Shard.GET_SHARD_MBEAN_MESSAGE);

        try {
            verifier.verify(shardStats);
            return;
        } catch (AssertionError e) {
            lastError = e;
            Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
        }
    }

    throw lastError;
}
项目:bench    文件:StopwatchThread.java   
StopwatchThread(final Metrics metrics, final SystemInfo systemInfo, final ProcessWatcherActorInput message) {
    this.metrics = metrics;
    this.systemInfo = systemInfo;
    this.message = message;

    stopwatch = Stopwatch.createUnstarted();
}
项目:dremio-oss    文件:FileSystemStoragePlugin2.java   
@Override
public boolean hasAccessPermission(String user, NamespaceKey key, DatasetConfig datasetConfig) {
  if (fsPlugin.getConfig().isImpersonationEnabled()) {
    if (datasetConfig.getReadDefinition() != null) { // allow accessing partial datasets
      final FileSystemWrapper userFs = fsPlugin.getFS(user);
      final List<TimedRunnable<Boolean>> permissionCheckTasks = Lists.newArrayList();

      permissionCheckTasks.addAll(getUpdateKeyPermissionTasks(datasetConfig, userFs));
      permissionCheckTasks.addAll(getSplitPermissiomTasks(datasetConfig, userFs, user));

      try {
        Stopwatch stopwatch = Stopwatch.createStarted();
        final List<Boolean> accessPermissions = TimedRunnable.run("check access permission for " + key, logger, permissionCheckTasks, 16);
        stopwatch.stop();
        logger.debug("Checking access permission for {} took {} ms", key, stopwatch.elapsed(TimeUnit.MILLISECONDS));
        for (Boolean permission : accessPermissions) {
          if (!permission) {
            return false;
          }
        }
      } catch (IOException ioe) {
        throw UserException.dataReadError(ioe).build(logger);
      }
    }
  }
  return true;
}
项目:clearwsd    文件:TextCorpusReader.java   
/**
 * Simultaneously parse and write dependency trees to an output stream. Memory usage can be controlled with
 * a provided cache size parameter, which controls the number of trees to parse before writing them (and GC).
 *
 * @param inputStream  input stream
 * @param outputStream output stream
 * @param maxCache     maximum number of trees to parse before writing/flushing
 */
public void parseAndWrite(InputStream inputStream, OutputStream outputStream, int maxCache) {
    List<DepTree> cache = new ArrayList<>();
    int processed = 0;
    Stopwatch sw = Stopwatch.createStarted();
    try (BufferedReader reader = new BufferedReader(new InputStreamReader(inputStream));
         PrintWriter writer = new PrintWriter(outputStream)) {
        String line;
        while ((line = reader.readLine()) != null) {
            line = line.trim();
            if (line.length() == 0) {
                continue;
            }
            for (String sentence : parser.segment(line)) {
                DepTree tree = parser.parse(parser.tokenize(sentence));
                cache.add(tree);
                ++processed;
                if (cache.size() >= maxCache) {
                    CoNllDepTreeReader.writeDependencyTrees(cache, writer);
                    cache = new ArrayList<>();
                    log.debug("Parsing {} trees/s", processed / sw.elapsed(TimeUnit.SECONDS));
                }
            }
        }
        if (cache.size() > 0) {
            CoNllDepTreeReader.writeDependencyTrees(cache, writer);
        }
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}
项目:elastic-db-tools-for-java    文件:Program.java   
private static <T> Result executeMapOperations(String message,
        List<T> items,
        Func1Param<T, T> keySelector,
        Action1Param<T> a) {
    Result result = new Result();

    int latestWrittenCount = Integer.MIN_VALUE;
    int maxCount = items.size();
    Stopwatch sw;
    for (T item : items) {
        int percentComplete = (result.count * 100) / maxCount;
        if (percentComplete / 10 > latestWrittenCount / 10) {
            latestWrittenCount = percentComplete;
            System.out.printf("%1$s %2$s/%3$s (%4$s)%%" + "\r\n", message, item, Collections.max(items, null), percentComplete);
        }

        T key = keySelector.invoke(item);
        sw = Stopwatch.createStarted();
        a.invoke(key);
        sw.stop();

        result.count++;
        result.totalTicks += sw.elapsed(TimeUnit.MILLISECONDS);
    }

    return result;
}
项目:mapr-music    文件:AlbumDao.java   
/**
 * Updates single track.
 *
 * @param albumId identifier of album, for which track will be updated.
 * @param trackId identifier of track, which will be updated.
 * @param track   contains update information.
 * @return updated track.
 */
public Track updateTrack(String albumId, String trackId, Track track) {

    List<Track> existingAlbumTracks = getTracksList(albumId);
    if (existingAlbumTracks == null) {
        return null;
    }

    int trackIndex = getTrackIndexById(existingAlbumTracks, trackId);
    if (trackIndex < 0) {
        return null;
    }

    return processStore((connection, store) -> {

        Stopwatch stopwatch = Stopwatch.createStarted();

        // Update single track
        AlbumMutationBuilder mutationBuilder = AlbumMutationBuilder.forConnection(connection)
                .editTrack(trackIndex, track);

        // Update the OJAI Document with specified identifier
        store.update(albumId, mutationBuilder.build());

        Document updatedOjaiDoc = store.findById(albumId, "tracks");

        // Map Ojai document to the actual instance of model class
        Album updatedAlbum = mapOjaiDocument(updatedOjaiDoc);
        Optional<Track> trackOptional = updatedAlbum.getTrackList().stream()
                .filter(t -> trackId.equals(t.getId()))
                .findAny();

        log.debug("Updating album's track with id '{}' for albumId: '{}' took {}", trackId, albumId, stopwatch);

        return (trackOptional.isPresent()) ? trackOptional.get() : null;
    });
}
项目:mapr-music    文件:ReportingDao.java   
/**
 * Execute the SQL statement and return a list of K/V
 *
 * @param sql query.
 * @return
 */
private List<Pair> populatePaiFromSQL(String sql) {

    Stopwatch stopwatch = Stopwatch.createStarted();
    List<Pair> pairs = new ArrayList<>();
    try {

        log.debug("Executing SQL :\n\t" + sql);

        Statement st = getConnection().createStatement();
        ResultSet rs = st.executeQuery(sql);
        while (rs.next()) {
            String label = rs.getString(1);
            if (label == null || label.trim().isEmpty()) {
                label = "Unknown";
            }
            pairs.add(new Pair(label, rs.getString(2)));
        }
        rs.close();
        st.close();
        connection.close();

    } catch (Exception e) {
        e.printStackTrace();
        // TODO: Manage exception
    }

    log.debug("Performing query: '{}' took: {}", sql, stopwatch);
    return pairs;
}
项目:mapr-music    文件:ArtistRateDao.java   
/**
 * Returns Artist rate according to the specified user identifier and artist identifier.
 *
 * @param userId   user identifier.
 * @param artistId artist identifier.
 * @return artist rate.
 */
public ArtistRate getRate(String userId, String artistId) {
    return processStore((connection, store) -> {

        Stopwatch stopwatch = Stopwatch.createStarted();
        QueryCondition condition = connection.newCondition()
                .and()
                .is("user_id", QueryCondition.Op.EQUAL, userId)
                .is("document_id", QueryCondition.Op.EQUAL, artistId)
                .close()
                .build();

        Query query = connection.newQuery().where(condition).build();

        // Fetch all OJAI Documents from this store according to the built query
        DocumentStream documentStream = store.findQuery(query);
        Iterator<Document> documentIterator = documentStream.iterator();

        if (!documentIterator.hasNext()) {
            return null;
        }

        log.debug("Get rate by artist id '{}' and user id '{}' took {}", artistId, userId, stopwatch);

        return mapOjaiDocument(documentIterator.next());
    });
}
项目:googles-monorepo-demo    文件:UninterruptiblesTest.java   
public void testTryAcquireWithNoWait() {
  Stopwatch stopwatch = Stopwatch.createStarted();
  Semaphore semaphore = new Semaphore(99);
  assertTrue(tryAcquireUninterruptibly(semaphore, 0, MILLISECONDS));
  assertTrue(tryAcquireUninterruptibly(semaphore, -42, MILLISECONDS));
  assertTrue(tryAcquireUninterruptibly(semaphore, LONG_DELAY_MS, MILLISECONDS));
  assertTimeNotPassed(stopwatch, LONG_DELAY_MS);
}