Python math 模块,ceil() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用math.ceil()

项目:visual-search    作者:GYXie    | 项目源码 | 文件源码
def main():
    args.input_data_dir = os.path.abspath(args.input_data_dir)
    if not os.path.exists(args.output_data_dir):
        os.mkdir(args.output_data_dir)
    for dir_path, dir_names, file_names in os.walk(args.input_data_dir):
        if len(file_names) > 0:
            print(dir_path)
            rows = int(math.ceil(len(file_names) / 6.0))
            print(rows)
            fig, axes = plt.subplots(4, 12, subplot_kw={'xticks': [], 'yticks': []})
            fig.subplots_adjust(hspace=0.01, wspace=0.01)
            for ax, file_name in zip(axes.flat, file_names):
                print(file_name)
                img = imread(dir_path + '/' + file_name)
                ax.imshow(img)
                # ax.set_title(os.path.splitext(file_name)[0].replace('.227x227', ''))
            plt.savefig(args.output_data_dir + dir_path.replace(args.input_data_dir, '') + '.pdf')
项目:ISB-CGC-pipelines    作者:isb-cgc    | 项目源码 | 文件源码
def calculateDiskSize(inputFile=None, inputFileSize=None, analysisId=None, scalingFactor=None, roundToNearestGbInterval=None):
        if inputFile is not None:
            fileSize = int(subprocess.check_output(["gsutil", "du", inputFile]).split(' ')[0])

        elif analysisId is not None:
            analysisDetail = CGHubDataUtils.getAnalysisDetail(analysisId)

            if len(analysisDetail["result_set"]["results"]) > 0:
                files = analysisDetail["result_set"]["results"][0]["files"]
                fileSize = sum([int(x["filesize"]) for x in files])
            else:
                print "ERROR: no files found for analysis ID {a}!".format(a=analysisId)
                exit(-1)

        if scalingFactor is not None:
            scalingFactor = int(scalingFactor)
        else:
            scalingFactor = 1

        if roundToNearestGbInterval is not None:
            roundTo = float(roundToNearestGbInterval) * 1000000000

        return int(math.ceil(scalingFactor * fileSize/ roundTo) * roundTo) / 1000000000
项目:topically-driven-language-model    作者:jhlau    | 项目源码 | 文件源码
def compute_dt_dist(docs, labels, tags, model, max_len, batch_size, pad_id, idxvocab, output_file):
    #generate batches
    num_batches = int(math.ceil(float(len(docs)) / batch_size))
    dt_dist = []
    t = []
    combined = []
    docid = 0
    for i in xrange(num_batches):
        x, _, _, t, s = get_batch_doc(docs, labels, tags, i, max_len, cf.tag_len, batch_size, pad_id)
        attention, mean_topic = sess.run([model.attention, model.mean_topic], {model.doc: x, model.tag: t})
        dt_dist.extend(attention[:s])

        if debug:
            for si in xrange(s):
                d = x[si]
                print "\n\nDoc", docid, "=", " ".join([idxvocab[item] for item in d if (item != pad_id)])
                sorted_dist = matutils.argsort(attention[si], reverse=True)
                for ti in sorted_dist:
                    print "Topic", ti, "=", attention[si][ti]
                docid += 1

    np.save(open(output_file, "w"), dt_dist)
项目:onto-lstm    作者:pdasigi    | 项目源码 | 文件源码
def _factor_target_indices(self, Y_inds, vocab_size=None, base=2):
    if vocab_size is None:
      vocab_size = len(self.dp.word_index)
    print >>sys.stderr, "Factoring targets of vocabulary size: %d"%(vocab_size)
    num_vecs = int(math.ceil(math.log(vocab_size)/math.log(base))) + 1
    base_inds = []
    div_Y_inds = Y_inds
    print >>sys.stderr, "Number of factors: %d"%num_vecs
    for i in range(num_vecs):
      new_inds = div_Y_inds % base
      if i == num_vecs - 1:
        if new_inds.sum() == 0:
          # Most significant "digit" is a zero. Omit it.
          break
      base_inds.append(new_inds)
      div_Y_inds = numpy.copy(div_Y_inds/base)
    base_vecs = [self._make_one_hot(base_inds_i, base) for base_inds_i in base_inds]
    return base_vecs
项目:DREAM    作者:LaceyChen17    | 项目源码 | 文件源码
def eval_pred(dr_model, ub):
    '''
        evaluate dream model for predicting next basket on all training users
        in batches
    '''
    item_embedding = dr_model.encode.weight
    dr_model.eval()
    dr_hidden = dr_model.init_hidden(dr_model.config.batch_size)
    start_time = time()
    id_u, score_u = [], [] # user's id, user's score
    num_batchs = ceil(len(ub) / dr_model.config.batch_size)
    for i,x in enumerate(batchify(ub, dr_model.config.batch_size)):
        print(i)
        baskets, lens, uids = x
        _, dynamic_user, _ = dr_model(baskets, lens, dr_hidden)# shape: batch_size, max_len, embedding_size
        dr_hidden = repackage_hidden(dr_hidden)
        for i,l,du in zip(uids, lens, dynamic_user):
            du_latest = du[l - 1].unsqueeze(0) # shape: 1, embedding_size
            score_up = torch.mm(du_latest, item_embedding.t()) # shape: 1, num_item
            score_u.append(score_up.cpu().data.numpy())
            id_u.append(i)
    elapsed = time() - start_time 
    print('[Predicting] Elapsed: {02.2f}'.format(elapsed))
    return score_ub, id_u
项目:DREAM    作者:LaceyChen17    | 项目源码 | 文件源码
def evaluate_dream():
    dr_model.eval()
    dr_hidden = dr_model.init_hidden(dr_config.batch_size) 

    total_loss = 0
    start_time = time()
    num_batchs = ceil(len(test_ub) / dr_config.batch_size)
    for i,x in enumerate(batchify(test_ub, dr_config.batch_size)):
        baskets, lens, _ = x
        dynamic_user, _  = dr_model(baskets, lens, dr_hidden)
        loss = bpr_loss(baskets, dynamic_user, dr_model.encode.weight, dr_config)
        dr_hidden = repackage_hidden(dr_hidden)
        total_loss += loss.data

    # Logging
    elapsed = (time() - start_time) * 1000 / num_batchs
    total_loss = total_loss[0] / num_batchs
    print('[Evaluation]| Epochs {:3d} | Elapsed {:02.2f} | Loss {:05.2f} |'.format(epoch, elapsed, total_loss))
    return total_loss
项目:PyPlanet    作者:PyPlanet    | 项目源码 | 文件源码
def create_vote(self, action, player, finished_event):
        new_vote = Vote()
        new_vote.action = action
        new_vote.requester = player
        new_vote.votes_current = []
        needed_votes = math.ceil(self.instance.player_manager.count_players / 2)
        if needed_votes == math.floor(self.instance.player_manager.count_players / 2):
            needed_votes += 1
        if needed_votes > self.instance.player_manager.count_players:
            needed_votes = self.instance.player_manager.count_players
        new_vote.votes_required = needed_votes
        new_vote.vote_added = self.vote_added
        new_vote.vote_removed = self.vote_removed
        new_vote.vote_finished = finished_event

        asyncio.ensure_future(self.vote_reminder(new_vote))

        return new_vote
项目:charm-swift-proxy    作者:openstack    | 项目源码 | 文件源码
def __call__(self):
        total_processes = _calculate_workers()
        ctxt = {
            "service_name": self.service_name,
            "user": self.user,
            "group": self.group,
            "script": self.script,
            "admin_script": self.admin_script,
            "public_script": self.public_script,
            "processes": int(math.ceil(self.process_weight * total_processes)),
            "admin_processes": int(math.ceil(self.admin_process_weight *
                                             total_processes)),
            "public_processes": int(math.ceil(self.public_process_weight *
                                              total_processes)),
            "threads": 1,
            "usr_bin": git_determine_usr_bin(),
            "python_path": git_determine_python_path(),
        }
        return ctxt
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def split(args):
    chunk_mem_gb = cr_utils.get_mem_gb_request_from_barcode_whitelist(args.barcode_whitelist)
    whitelist_mem_gb = cr_utils.get_mem_gb_request_from_barcode_whitelist(args.barcode_whitelist, args.gem_groups, use_min=False)

    # Estimate the total number of rows in the final molecule info. Worst case.
    total_reads = cr_utils.get_metric_from_json(args.extract_reads_summary, 'total_reads')
    mol_info_rows = total_reads

    # Memory for sorting in MoleculeCounter.concatenate_sort:
    # N = total number of rows
    # 8*N bytes to store the sort indices
    # (8+8+8)*N bytes to load, concatenate, and index into a 64-bit data column
    mol_info_mem_gb = int(math.ceil((32 * mol_info_rows)/1e9))
    join_mem_gb = min(MAX_MEM_GB, max(cr_constants.MIN_MEM_GB, whitelist_mem_gb + mol_info_mem_gb))

    chunks = []
    for chunk_input in args.inputs:
        chunks.append({
            'chunk_input': chunk_input,
            '__mem_gb': chunk_mem_gb,
        })
    join = {
        '__mem_gb': join_mem_gb,
    }
    return {'chunks': chunks, 'join': join}
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def split(args):
    assert len(args.read1s) == len(args.read2s)

    chunks = []

    # Determine the number of buckets required to achieve
    # the given chunk size.
    chunks_per_gem_group = {}
    with open(args.reads_summary) as f:
        reads_summary = json.load(f)
        for gg in args.gem_groups:
            readpairs = reads_summary['%d_total_reads_per_gem_group' % gg]
            chunks_per_gem_group[str(gg)] = max(2,
                                                int(math.ceil(float(readpairs) / \
                                                              args.readpairs_per_chunk)))

    for fastq1, fastq2 in itertools.izip(args.read1s, args.read2s):
        chunks.append({
            'read1s_chunk': fastq1,
            'read2s_chunk': fastq2,
            'chunks_per_gem_group': chunks_per_gem_group,
        })
    return {'chunks': chunks}
项目:sinal2    作者:observerss    | 项目源码 | 文件源码
def run(self):
        c = self.client
        if not c.login():
            log.error('login failed')
            return

        symbols_list = self.split(self.symbols, self.size)
        size = int(math.ceil(1. * len(symbols_list) / self.core))
        child_sl = self.split(symbols_list, size)
        f = open(self.out, 'ab') if self.out else None
        ps, gs = [], []
        for i in range(self.core):
            r, w = gipc.pipe()
            g = gevent.spawn(self.main_on_data, r, f)
            p = gipc.start_process(target=self.spawn_watchs, args=(w, child_sl[i]))
            ps.append(p)

        for p in ps:
            p.join()
        for g in gs:
            g.kill()
            g.join()
项目:yeelight-controller    作者:kevinxw    | 项目源码 | 文件源码
def __calculate_light_brightness(self, current_time, light_policy = {}):
        bright_time, dark_time = light_policy['bright_time'], light_policy['dark_time']
        if current_time < min(bright_time, dark_time) or current_time > max(bright_time, dark_time):
            return -1   # return -1 when current time is not within the bright_time and dark_time range
        # if there is a constant brightness value, return immediately
        if 'const_brightness' in light_policy:
            return light_policy['const_brightness']
        min_brightness, max_brightness = 0, 100
        if 'min_brightness' in light_policy:
            min_brightness = light_policy['min_brightness']
        if 'max_brightness' in light_policy:
            max_brightness = light_policy['max_brightness']
        time_scale = abs(self.__get_diff_between_datetime(bright_time, dark_time))
        time_passed = abs(self.__get_diff_between_datetime(current_time, bright_time))
        brightness = int(math.ceil(min_brightness + float(time_passed) / float(time_scale) * float(max_brightness - min_brightness)))
        brightness += min_brightness
        return brightness
项目:run_lambda    作者:ethantkoenig    | 项目源码 | 文件源码
def build(self):
            end_time = timeit.default_timer()
            end_mem = memory_profiler.memory_usage()[0]

            sys.stdout = self._previous_stdout

            self._log.write("END RequestId: {r}\n".format(
                r=self._context.aws_request_id))

            duration_in_millis = int(math.ceil(1000 * (end_time - self._start_time)))
            # The memory overhead of setting up the AWS Lambda environment
            # (when actually run in AWS) is roughly 14 MB
            max_memory_used_in_mb = (end_mem - self._start_mem) / 1048576 + 14

            self._log.write(
                "REPORT RequestId: {r}\tDuration: {d} ms\t"
                "Max Memory Used: {m} MB\n"
                .format(r=self._context.aws_request_id,
                        d=duration_in_millis,
                        m=max_memory_used_in_mb))

            log = self._log.getvalue()
            return LambdaCallSummary(duration_in_millis, max_memory_used_in_mb, log)
项目:simple_rl    作者:david-abel    | 项目源码 | 文件源码
def end_of_episode(self):
        '''
        Summary:
            Performs miscellaneous end of episode tasks (#printing out useful information, saving stuff, etc.)
        '''

        # self.model = self.weak_learners
        self.add_new_weak_learner()
        self.most_recent_episode = []

        if self.markov_window > 0:
            # num_sampled_trees = int(math.ceil(len(self.weak_learners) / 10.0))
            # self.model = random.sample(self.weak_learners, num_sampled_trees)
            self.model = self.weak_learners[-self.markov_window:]
        else:
            self.model = self.weak_learners

        Agent.end_of_episode(self)
项目:simple_rl    作者:david-abel    | 项目源码 | 文件源码
def _compute_walls(self, width, height):
        '''
        Args:
            width (int)
            height (int)

        Returns:
            (list): Contains (x,y) pairs that define wall locations.
        '''
        walls = []

        half_width = math.ceil(width / 2.0)
        half_height = math.ceil(height / 2.0)

        for i in range(1, width + 1):
            if i == (width + 1) / 3 or i == math.ceil(2 * (width + 1) / 3.0):
                continue
            walls.append((i, half_height))

        for j in range(1, height + 1):
            if j == (height + 1) / 3 or j == math.ceil(2 * (height + 1) / 3.0):
                continue
            walls.append((half_width, j))

        return walls
项目:PyPPSPP    作者:justas-    | 项目源码 | 文件源码
def get_data_hash(self, data_bytes):
        """Calculate Merkle's root hash of the given data bytes"""

        # Calculate tree parameters
        data_len = len(data_bytes)
        tree_populated_width = math.ceil(data_len / self._chunk_len)
        tree_height = math.ceil(math.log2(tree_populated_width))
        tree_width = int(math.pow(2, tree_height))

        tree_bottom_layer = ['\x00'] * tree_width
        with io.BytesIO(data_bytes) as b_data:
            self._initial_hasher(
                b_data,
                tree_populated_width,
                tree_bottom_layer
            )

        # Get Merkle's root hash
        mrh = self._calculate_root_hash(tree_bottom_layer)
        return mrh
项目:pogom-linux    作者:PokeHunterProject    | 项目源码 | 文件源码
def run(self):
        while True:
            if self.scan_config.RESTART:
                self.scan_config.RESTART = False
                if self.scan_config.ACCOUNTS_CHANGED:
                    self.scan_config.ACCOUNTS_CHANGED = False
                    num_workers = min(max(int(math.ceil(len(config['ACCOUNTS']) / 23.0)), 3), 10)
                    self.api.resize_workers(num_workers)
                    self.api.add_accounts(config['ACCOUNTS'])

                    ScanMetrics.NUM_THREADS = num_workers
                    ScanMetrics.NUM_ACCOUNTS = len(config['ACCOUNTS'])

            if (not self.scan_config.SCAN_LOCATIONS or
                    not config.get('ACCOUNTS', None)):
                time.sleep(5)
                continue
            ScanMetrics.STEPS_COMPLETED = 0
            scan_start_time = time.time()
            self.scan()
            ScanMetrics.COMPLETE_SCAN_TIME = time.time() - scan_start_time
项目:flask-restler    作者:klen    | 项目源码 | 文件源码
def make_pagination_headers(limit, curpage, total, link_header=True):
    """Return Link Hypermedia Header."""
    lastpage = int(math.ceil(1.0 * total / limit) - 1)
    headers = {'X-Total-Count': str(total), 'X-Limit': str(limit),
               'X-Page-Last': str(lastpage), 'X-Page': str(curpage)}

    if not link_header:
        return headers

    base = "{}?%s".format(request.path)
    links = {}
    links['first'] = base % urlencode(dict(request.args, **{PAGE_ARG: 0}))
    links['last'] = base % urlencode(dict(request.args, **{PAGE_ARG: lastpage}))
    if curpage:
        links['prev'] = base % urlencode(dict(request.args, **{PAGE_ARG: curpage - 1}))
    if curpage < lastpage:
        links['next'] = base % urlencode(dict(request.args, **{PAGE_ARG: curpage + 1}))

    headers['Link'] = ",".join(['<%s>; rel="%s"' % (v, n) for n, v in links.items()])
    return headers


# pylama:ignore=R0201
项目:tensorboard    作者:dmlc    | 项目源码 | 文件源码
def make_sprite(label_img, save_path):
    import math
    import torch
    import torchvision
    # this ensures the sprite image has correct dimension as described in 
    # https://www.tensorflow.org/get_started/embedding_viz
    nrow = int(math.ceil((label_img.size(0)) ** 0.5))

    # augment images so that #images equals nrow*nrow
    label_img = torch.cat((label_img, torch.randn(nrow ** 2 - label_img.size(0), *label_img.size()[1:]) * 255), 0)

    # Dirty fix: no pixel are appended by make_grid call in save_image (https://github.com/pytorch/vision/issues/206)
    xx = torchvision.utils.make_grid(torch.Tensor(1, 3, 32, 32), padding=0)
    if xx.size(2) == 33:
        sprite = torchvision.utils.make_grid(label_img, nrow=nrow, padding=0)
        sprite = sprite[:, 1:, 1:]
        torchvision.utils.save_image(sprite, os.path.join(save_path, 'sprite.png'))
    else:
        torchvision.utils.save_image(label_img, os.path.join(save_path, 'sprite.png'), nrow=nrow, padding=0)
项目:oscars2016    作者:0x0ece    | 项目源码 | 文件源码
def randint(minvalue, maxvalue):
    """Returns a random integer x with minvalue <= x <= maxvalue"""

    # Safety - get a lot of random data even if the range is fairly
    # small
    min_nbits = 32

    # The range of the random numbers we need to generate
    range = maxvalue - minvalue

    # Which is this number of bytes
    rangebytes = ceil(math.log(range, 2) / 8.)

    # Convert to bits, but make sure it's always at least min_nbits*2
    rangebits = max(rangebytes * 8, min_nbits * 2)

    # Take a random number of bits between min_nbits and rangebits
    nbits = random.randint(min_nbits, rangebits)

    return (read_random_int(nbits) % range) + minvalue
项目:oscars2016    作者:0x0ece    | 项目源码 | 文件源码
def randomized_primality_testing(n, k):
    """Calculates whether n is composite (which is always correct) or
    prime (which is incorrect with error probability 2**-k)

    Returns False if the number if composite, and True if it's
    probably prime.
    """

    q = 0.5     # Property of the jacobi_witness function

    # t = int(math.ceil(k / math.log(1/q, 2)))
    t = ceil(k / math.log(1/q, 2))
    for i in range(t+1):
        x = randint(1, n-1)
        if jacobi_witness(x, n): return False

    return True
项目:oscars2016    作者:0x0ece    | 项目源码 | 文件源码
def getprime(nbits):
    """Returns a prime number of max. 'math.ceil(nbits/8)*8' bits. In
    other words: nbits is rounded up to whole bytes.

    >>> p = getprime(8)
    >>> is_prime(p-1)
    0
    >>> is_prime(p)
    1
    >>> is_prime(p+1)
    0
    """

    while True:
        integer = read_random_int(nbits)

        # Make sure it's odd
        integer |= 1

        # Test for primeness
        if is_prime(integer): break

        # Retry if not prime

    return integer
项目:DeepWorks    作者:daigo0927    | 项目源码 | 文件源码
def combine_images(generated_images):

    total, width, height, ch = generated_images.shape
    cols = int(math.sqrt(total))
    rows = math.ceil(float(total)/cols)

    combined_image = np.zeros((height*rows, width*cols, 3),
                              dtype = generated_images.dtype)

    for index, image in enumerate(generated_images):
        i = int(index/cols)
        j = index % cols
        combined_image[width*i:width*(i+1), height*j:height*(j+1), :]\
            = image

    return combined_image
项目:CausalGAN    作者:mkocaoglu    | 项目源码 | 文件源码
def make_grid(tensor, nrow=8, padding=2,
              normalize=False, scale_each=False):
    """Code based on https://github.com/pytorch/vision/blob/master/torchvision/utils.py"""
    nmaps = tensor.shape[0]
    xmaps = min(nrow, nmaps)
    ymaps = int(math.ceil(float(nmaps) / xmaps))
    height, width = int(tensor.shape[1] + padding), int(tensor.shape[2] + padding)
    grid = np.zeros([height * ymaps + 1 + padding // 2, width * xmaps + 1 + padding // 2, 3], dtype=np.uint8)
    k = 0
    for y in range(ymaps):
        for x in range(xmaps):
            if k >= nmaps:
                break
            h, h_width = y * height + 1 + padding // 2, height - padding
            w, w_width = x * width + 1 + padding // 2, width - padding

            grid[h:h+h_width, w:w+w_width] = tensor[k]
            k = k + 1
    return grid
项目:CausalGAN    作者:mkocaoglu    | 项目源码 | 文件源码
def make_grid(tensor, nrow=8, padding=2,
              normalize=False, scale_each=False):
    """Code based on https://github.com/pytorch/vision/blob/master/torchvision/utils.py
    minor improvement, row/col was reversed"""
    nmaps = tensor.shape[0]
    ymaps = min(nrow, nmaps)
    xmaps = int(math.ceil(float(nmaps) / ymaps))
    height, width = int(tensor.shape[1] + padding), int(tensor.shape[2] + padding)
    grid = np.zeros([height * ymaps + 1 + padding // 2, width * xmaps + 1 + padding // 2, 3], dtype=np.uint8)
    k = 0
    for y in range(ymaps):
        for x in range(xmaps):
            if k >= nmaps:
                break
            h, h_width = y * height + 1 + padding // 2, height - padding
            w, w_width = x * width + 1 + padding // 2, width - padding

            grid[h:h+h_width, w:w+w_width] = tensor[k]
            k = k + 1
    return grid
项目:CausalGAN    作者:mkocaoglu    | 项目源码 | 文件源码
def make_grid(tensor, nrow=8, padding=2,
              normalize=False, scale_each=False):
    """Code based on https://github.com/pytorch/vision/blob/master/torchvision/utils.py"""
    nmaps = tensor.shape[0]
    xmaps = min(nrow, nmaps)
    ymaps = int(math.ceil(float(nmaps) / xmaps))
    height, width = int(tensor.shape[1] + padding), int(tensor.shape[2] + padding)
    grid = np.zeros([height * ymaps + 1 + padding // 2, width * xmaps + 1 + padding // 2, 3], dtype=np.uint8)
    k = 0
    for y in range(ymaps):
        for x in range(xmaps):
            if k >= nmaps:
                break
            h, h_width = y * height + 1 + padding // 2, height - padding
            w, w_width = x * width + 1 + padding // 2, width - padding

            grid[h:h+h_width, w:w+w_width] = tensor[k]
            k = k + 1
    return grid
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def __nearest_pow_2(self,x):
        """
        Find power of two nearest to x
        >>> _nearest_pow_2(3)
        2.0
        >>> _nearest_pow_2(15)
        16.0
        :type x: float
        :param x: Number
        :rtype: Int
        :return: Nearest power of 2 to x
        """
        a = math.pow(2, math.ceil(np.log2(x)))
        b = math.pow(2, math.floor(np.log2(x)))
        if abs(a - x) < abs(b - x):
            return a
        else:
            return b

    # calculate spectrogram of signals
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def _nearest_pow_2(x):
    """
    Find power of two nearest to x
    >>> _nearest_pow_2(3)
    2.0
    >>> _nearest_pow_2(15)
    16.0
    :type x: float
    :param x: Number
    :rtype: Int
    :return: Nearest power of 2 to x
    """
    a = M.pow(2, M.ceil(np.log2(x)))
    b = M.pow(2, M.floor(np.log2(x)))
    if abs(a - x) < abs(b - x):
        return a
    else:
        return b
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def __draw_pk2(self):
        self.__cleanPk2()
        if self.units is not None:
            unique_units = np.unique(self.units)
            unique_units = unique_units.tolist()
            pca_1,pca_2 = self.PCAusedList.currentText().split("-")
            pca_1 = np.int(pca_1)-1
            pca_2 = np.int(pca_2)-1
            if self.wavePCAs[0].shape[0]>2:
                xs = self.wavePCAs[:,pca_1]
                ys = self.wavePCAs[:,pca_2]
                self.PcaScatterItem = []
                seg_num = 5000
                for i,ite_unit in enumerate(unique_units):
                    mask = self.units==ite_unit
                    temp_xs = xs[mask]
                    temp_ys = ys[mask]
                    segs = int(ceil(temp_xs.shape[0]/float(seg_num)))
                    for j in range(segs):
                        temp_xs_j = temp_xs[j*seg_num:(j+1)*seg_num]
                        temp_ys_j = temp_ys[j*seg_num:(j+1)*seg_num]
                        self.PcaScatterItem.append(pg.ScatterPlotItem(temp_xs_j,temp_ys_j,pen=self.colors[ite_unit],brush=self.colors[ite_unit],size=3,symbol="o"))
                for i in range(len(self.PcaScatterItem)):
                    self.pk2.addItem(self.PcaScatterItem[i])
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def __nearest_pow_2(self,x):
        """
        Find power of two nearest to x
        >>> _nearest_pow_2(3)
        2.0
        >>> _nearest_pow_2(15)
        16.0
        :type x: float
        :param x: Number
        :rtype: Int
        :return: Nearest power of 2 to x
        """
        a = math.pow(2, math.ceil(np.log2(x)))
        b = math.pow(2, math.floor(np.log2(x)))
        if abs(a - x) < abs(b - x):
            return a
        else:
            return b

    # calculate spectrogram of signals
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def _nearest_pow_2(x):
    """
    Find power of two nearest to x
    >>> _nearest_pow_2(3)
    2.0
    >>> _nearest_pow_2(15)
    16.0
    :type x: float
    :param x: Number
    :rtype: Int
    :return: Nearest power of 2 to x
    """
    a = M.pow(2, M.ceil(np.log2(x)))
    b = M.pow(2, M.floor(np.log2(x)))
    if abs(a - x) < abs(b - x):
        return a
    else:
        return b
项目:deb-python-cassandra-driver    作者:openstack    | 项目源码 | 文件源码
def test_partial_send(self, *args):
        c = self.make_connection()

        # only write the first four bytes of the OptionsMessage
        write_size = 4
        c._socket.send.side_effect = None
        c._socket.send.return_value = write_size
        c.handle_write(None, 0)

        msg_size = 9  # v3+ frame header
        expected_writes = int(math.ceil(float(msg_size) / write_size))
        size_mod = msg_size % write_size
        last_write_size = size_mod if size_mod else write_size
        self.assertFalse(c.is_defunct)
        self.assertEqual(expected_writes, c._socket.send.call_count)
        self.assertEqual(last_write_size, len(c._socket.send.call_args[0][0]))
项目:deb-python-cassandra-driver    作者:openstack    | 项目源码 | 文件源码
def test_partial_send(self, *args):
        c = self.make_connection()

        # only write the first four bytes of the OptionsMessage
        write_size = 4
        c.socket.send.side_effect = None
        c.socket.send.return_value = write_size
        c.handle_write()

        msg_size = 9  # v3+ frame header
        expected_writes = int(math.ceil(float(msg_size) / write_size))
        size_mod = msg_size % write_size
        last_write_size = size_mod if size_mod else write_size
        self.assertFalse(c.is_defunct)
        self.assertEqual(expected_writes, c.socket.send.call_count)
        self.assertEqual(last_write_size, len(c.socket.send.call_args[0][0]))
项目:Projects    作者:it2school    | 项目源码 | 文件源码
def compute_logarithmic_scale(min_, max_, min_scale, max_scale):
    """Compute an optimal scale for logarithmic"""
    if max_ <= 0 or min_ <= 0:
        return []
    min_order = int(floor(log10(min_)))
    max_order = int(ceil(log10(max_)))
    positions = []
    amplitude = max_order - min_order
    if amplitude <= 1:
        return []
    detail = 10.
    while amplitude * detail < min_scale * 5:
        detail *= 2
    while amplitude * detail > max_scale * 3:
        detail /= 2
    for order in range(min_order, max_order + 1):
        for i in range(int(detail)):
            tick = (10 * i / detail or 1) * 10 ** order
            tick = round_to_scale(tick, tick)
            if min_ <= tick <= max_ and tick not in positions:
                positions.append(tick)
    return positions
项目:DeepLearning_PlantDiseases    作者:MarkoArsenovic    | 项目源码 | 文件源码
def Occlusion_exp(image,occluding_size,occluding_stride,model,preprocess,classes,groundTruth):    
    img = np.copy(image)
    height, width,_= img.shape
    output_height = int(math.ceil((height-occluding_size)/occluding_stride+1))
    output_width = int(math.ceil((width-occluding_size)/occluding_stride+1))
    ocludedImages=[]
    for h in range(output_height):
        for w in range(output_width):
            #occluder region
            h_start = h*occluding_stride
            w_start = w*occluding_stride
            h_end = min(height, h_start + occluding_size)
            w_end = min(width, w_start + occluding_size)

            input_image = copy.copy(img)
            input_image[h_start:h_end,w_start:w_end,:] =  0
            ocludedImages.append(preprocess(Image.fromarray(input_image)))

    L = np.empty(output_height*output_width)
    L.fill(groundTruth)
    L = torch.from_numpy(L)
    tensor_images = torch.stack([img for img in ocludedImages])
    dataset = torch.utils.data.TensorDataset(tensor_images,L) 
    dataloader = torch.utils.data.DataLoader(dataset,batch_size=5,shuffle=False, num_workers=8) 

    heatmap=np.empty(0)
    model.eval()
    for data in dataloader:
        images, labels = data

        if use_gpu:
            images, labels = (images.cuda()), (labels.cuda(async=True))

        outputs = model(Variable(images))
        m = nn.Softmax()
        outputs=m(outputs)
        if use_gpu:   
            outs=outputs.cpu()
        heatmap = np.concatenate((heatmap,outs[0:outs.size()[0],groundTruth].data.numpy()))

    return heatmap.reshape((output_height, output_width))
项目:code    作者:ActiveState    | 项目源码 | 文件源码
def percentile(N, percent, key=lambda x:x):
    """
    Find the percentile of a list of values.

    @parameter N - is a list of values. Note N MUST BE already sorted.
    @parameter percent - a float value from 0.0 to 1.0.
    @parameter key - optional key function to compute value from each element of N.

    @return - the percentile of the values
    """
    if not N:
        return None
    k = (len(N)-1) * percent
    f = math.floor(k)
    c = math.ceil(k)
    if f == c:
        return key(N[int(k)])
    d0 = key(N[int(f)]) * (c-k)
    d1 = key(N[int(c)]) * (k-f)
    return d0+d1

# median is 50th percentile.
项目:code    作者:ActiveState    | 项目源码 | 文件源码
def gen_sieve(ceiling=None):
    if ceiling is not None:
        if ceiling % 2 == 0:
            ceiling -= 1
        highest_prime = math.ceil(math.sqrt(ceiling))
    last_val = 1
    found_primes = []
    yield 2
    while ceiling is None or ceiling > last_val:
        current_val = None
        while current_val is None:
            current_val = last_val = last_val + 2
            for prime, square in found_primes:
                if current_val < square: 
                    break
                if current_val % prime == 0:
                    current_val = None
                    break
        yield current_val
        if ceiling is None or highest_prime > last_val:
            found_primes.append((current_val, current_val ** 2))
项目:Harmonbot    作者:Harmon758    | 项目源码 | 文件源码
def stop_foraging(self):
        if self.last_action and self.last_action[0] == "foraging":
            item = self.last_action[1]
            time_spent = math.ceil(time.time() - self.last_action_time) / 60
            self.last_action = None
            self.last_action_time = None
            item_amount = math.floor(time_spent * self.foraging_rate)
            self.inventory[item] = self.inventory.get(item, 0) + item_amount
            if self.inventory[item] == 0:
                del self.inventory[item]
            self.foraging_xp += item_amount
            secondary_item = forageables[item][0]
            tertiary_item = forageables[item][1]
            secondary_amount = random.randint(0, item_amount)
            tertiary_amount = math.floor(random.randint(0, item_amount) / 100)
            self.inventory[secondary_item] = self.inventory.get(secondary_item, 0) + secondary_amount
            if self.inventory[secondary_item] == 0:
                del self.inventory[secondary_item]
            self.inventory[tertiary_item] = self.inventory.get(tertiary_item, 0) + tertiary_amount
            if self.inventory[tertiary_item] == 0:
                del self.inventory[tertiary_item]
            self.write_data()
            return item, time_spent, item_amount, secondary_amount, tertiary_amount
        else:
            return False, self.last_action
项目:Harmonbot    作者:Harmon758    | 项目源码 | 文件源码
def stop_woodcutting(self):
        if self.last_action and self.last_action[0] == "woodcutting":
            wood_type = self.last_action[1]
            time_spent = math.ceil(time.time() - self.last_action_time) / 60
            self.last_action = None
            self.last_action = None
            current_wood_lvl = wood_lvl(wood_type)
            wood_amount = math.floor(time_spent * self.wood_rate(wood_type) * self.woodcutting_rate)
            xp_amount = current_wood_lvl * wood_amount
            self.inventory[wood_type] = self.inventory.get(wood_type, 0) + wood_amount
            if self.inventory[wood_type] == 0:
                del self.inventory[wood_type]
            self.woodcutting_xp += xp_amount
            self.write_data()
            return wood_type, time_spent, wood_amount, xp_amount
        else:
            return False, self.last_action
项目:ssbio    作者:SBRG    | 项目源码 | 文件源码
def download_mutation_images(self):
        # TODO: dunno if this works
        import ipywidgets
        import math

        views = []
        for g in self.reference_gempro.genes:
            if g.protein.representative_structure:
                view = g.protein.view_all_mutations(alignment_type='seqalign', grouped=False, structure_opacity=0.5,
                                                    opacity_range=(0.6, 1), scale_range=(.5, 5))
                view._remote_call("setSize", target='Widget', args=['300px', '300px'])
                view.download_image(filename='{}_{}_mutations.png'.format(g.id, g.name))
                views.append(view)

        hboxes = [ipywidgets.HBox(views[i * 3:i * 3 + 3])
                  for i in range(int(math.ceil(len(views) / 3.0)))]
        vbox = ipywidgets.VBox(hboxes)
        return vbox
项目:charm-heat    作者:openstack    | 项目源码 | 文件源码
def __call__(self):
        total_processes = _calculate_workers()
        ctxt = {
            "service_name": self.service_name,
            "user": self.user,
            "group": self.group,
            "script": self.script,
            "admin_script": self.admin_script,
            "public_script": self.public_script,
            "processes": int(math.ceil(self.process_weight * total_processes)),
            "admin_processes": int(math.ceil(self.admin_process_weight *
                                             total_processes)),
            "public_processes": int(math.ceil(self.public_process_weight *
                                              total_processes)),
            "threads": 1,
            "usr_bin": git_determine_usr_bin(),
            "python_path": git_determine_python_path(),
        }
        return ctxt
项目:charm-keystone    作者:openstack    | 项目源码 | 文件源码
def __call__(self):
        total_processes = _calculate_workers()
        ctxt = {
            "service_name": self.service_name,
            "user": self.user,
            "group": self.group,
            "script": self.script,
            "admin_script": self.admin_script,
            "public_script": self.public_script,
            "processes": int(math.ceil(self.process_weight * total_processes)),
            "admin_processes": int(math.ceil(self.admin_process_weight *
                                             total_processes)),
            "public_processes": int(math.ceil(self.public_process_weight *
                                              total_processes)),
            "threads": 1,
            "usr_bin": git_determine_usr_bin(),
            "python_path": git_determine_python_path(),
        }
        return ctxt
项目:charm-keystone    作者:openstack    | 项目源码 | 文件源码
def __call__(self):
        total_processes = _calculate_workers()
        ctxt = {
            "service_name": self.service_name,
            "user": self.user,
            "group": self.group,
            "script": self.script,
            "admin_script": self.admin_script,
            "public_script": self.public_script,
            "processes": int(math.ceil(self.process_weight * total_processes)),
            "admin_processes": int(math.ceil(self.admin_process_weight *
                                             total_processes)),
            "public_processes": int(math.ceil(self.public_process_weight *
                                              total_processes)),
            "threads": 1,
            "usr_bin": git_determine_usr_bin(),
            "python_path": git_determine_python_path(),
        }
        return ctxt
项目:charm-keystone    作者:openstack    | 项目源码 | 文件源码
def __call__(self):
        total_processes = _calculate_workers()
        ctxt = {
            "service_name": self.service_name,
            "user": self.user,
            "group": self.group,
            "script": self.script,
            "admin_script": self.admin_script,
            "public_script": self.public_script,
            "processes": int(math.ceil(self.process_weight * total_processes)),
            "admin_processes": int(math.ceil(self.admin_process_weight *
                                             total_processes)),
            "public_processes": int(math.ceil(self.public_process_weight *
                                              total_processes)),
            "threads": 1,
            "usr_bin": git_determine_usr_bin(),
            "python_path": git_determine_python_path(),
        }
        return ctxt
项目:charm-keystone    作者:openstack    | 项目源码 | 文件源码
def __call__(self):
        total_processes = _calculate_workers()
        ctxt = {
            "service_name": self.service_name,
            "user": self.user,
            "group": self.group,
            "script": self.script,
            "admin_script": self.admin_script,
            "public_script": self.public_script,
            "processes": int(math.ceil(self.process_weight * total_processes)),
            "admin_processes": int(math.ceil(self.admin_process_weight *
                                             total_processes)),
            "public_processes": int(math.ceil(self.public_process_weight *
                                              total_processes)),
            "threads": 1,
            "usr_bin": git_determine_usr_bin(),
            "python_path": git_determine_python_path(),
        }
        return ctxt
项目:chainer-visualization    作者:hvy    | 项目源码 | 文件源码
def save_ims(filename, ims, dpi=100, scale=0.5):
    n, c, h, w = ims.shape

    rows = int(math.ceil(math.sqrt(n)))
    cols = int(round(math.sqrt(n)))

    fig, axes = plt.subplots(rows, cols, figsize=(w*cols/dpi*scale, h*rows/dpi*scale), dpi=dpi)

    for i, ax in enumerate(axes.flat):
        if i < n:
            ax.imshow(ims[i].transpose((1, 2, 0)))
        ax.set_xticks([])
        ax.set_yticks([])
        ax.axis('off')

    plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0.1, hspace=0.1)
    plt.savefig(filename, dpi=dpi, bbox_inces='tight', transparent=True)
    plt.clf()
    plt.close()
项目:supremm    作者:ubccr    | 项目源码 | 文件源码
def __init__(self, archivelist):
        self.node_archives = archivelist
        self.jobdir = os.path.dirname(archivelist[0])
        self.job_id = "1"
        self.end_str = "end"
        self.walltime = 9751
        self.nodecount = len(archivelist)
        self.acct = {"end_time": 12312, "id": 1, "uid": "sdf", "user": "werqw"}
        self.nodes = ["node" + str(i) for i in xrange(len(archivelist))]
        self._data = {}

        archive_starts = []
        archive_ends = []
        for archive in archivelist:
            context = pmapi.pmContext(c_pmapi.PM_CONTEXT_ARCHIVE, archive)
            mdata = context.pmGetArchiveLabel()
            archive_starts.append(datetime.datetime.utcfromtimestamp(math.floor(mdata.start)))
            archive_ends.append(datetime.datetime.utcfromtimestamp(math.ceil(context.pmGetArchiveEnd())))

        self.start_datetime = min(archive_starts)
        self.end_datetime = max(archive_ends)
项目:supremm    作者:ubccr    | 项目源码 | 文件源码
def adjust_job_start_end(job):
    """ Set the job node start and end times based on the presence of the special
     job-X-begin and job-X-end archives. Do nothing if these archives are absent
    """

    startarchive = "job-{0}-begin".format(job.job_id)
    endarchive = "job-{0}-end".format(job.job_id)

    for nodename, filepaths in job.rawarchives():
        begin = None
        end = None
        for fname in filepaths:
            filename = os.path.basename(fname)
            if filename.startswith(startarchive):
                context = pmapi.pmContext(c_pmapi.PM_CONTEXT_ARCHIVE, fname)
                mdata = context.pmGetArchiveLabel()
                begin = datetime.datetime.utcfromtimestamp(math.floor(mdata.start))

            if filename.startswith(endarchive):
                context = pmapi.pmContext(c_pmapi.PM_CONTEXT_ARCHIVE, fname)
                end = datetime.datetime.utcfromtimestamp(math.ceil(context.pmGetArchiveEnd()))

        job.setnodebeginend(nodename, begin, end)
项目:mongo_module_ninja    作者:RedBeard0531    | 项目源码 | 文件源码
def run_if_needed(base_file, then_file, now_file):
    # Python uses doubles for mtime so it can't precisely represent linux's
    # nanosecond precision. Round up to next whole second to ensure we get a
    # stable timestamp that is guaranteed to be >= the timestamp of the
    # compiler. This also avoids issues if the compiler is on a file system
    # with high-precision timestamps, but the build directory isn't.
    base_stat = os.stat(base_file)
    mtime = math.ceil(base_stat.st_mtime)
    atime = math.ceil(base_stat.st_atime)

    if (os.path.exists(then_file)
            and os.path.exists(now_file)
            and os.stat(then_file).st_mtime == mtime):
        return # Don't need to do anything.

    createIfNeeded(now_file)
    os.utime(now_file, None) # None means now

    createIfNeeded(then_file)
    os.utime(then_file, (atime, mtime))
项目:Vision2016    作者:Team3309    | 项目源码 | 文件源码
def col_profile(num_cols, height):
    profile = np.zeros(num_cols)
    peak_width = int(math.ceil(num_cols * 0.125))

    # average number of pixels should be height
    for i in range(0, peak_width):
        profile[i] = height
    # average number of pixels should be 10% of height
    for i in range(peak_width, num_cols - peak_width):
        profile[i] = height * .1
    # average number of pixels should be height
    for i in range(num_cols - peak_width, num_cols):
        profile[i] = height

    # normalize to between 0 and 1
    profile *= 1.0 / profile.max()
    return profile