Python numpy 模块,asarray() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.asarray()

项目:AVSR-Deep-Speech    作者:pandeydivesh15    | 项目源码 | 文件源码
def sparse_tuple_from(sequences, dtype=np.int32):
    r"""Creates a sparse representention of ``sequences``.
    Args:

        * sequences: a list of lists of type dtype where each element is a sequence

    Returns a tuple with (indices, values, shape)
    """
    indices = []
    values = []

    for n, seq in enumerate(sequences):
        indices.extend(zip([n]*len(seq), range(len(seq))))
        values.extend(seq)

    indices = np.asarray(indices, dtype=np.int64)
    values = np.asarray(values, dtype=dtype)
    shape = np.asarray([len(sequences), indices.max(0)[1]+1], dtype=np.int64)

    return tf.SparseTensor(indices=indices, values=values, shape=shape)
项目:YellowFin_Pytorch    作者:JianGoForIt    | 项目源码 | 文件源码
def pad_batch(mini_batch):
    mini_batch_size = len(mini_batch)
#     print mini_batch.shape
#     print mini_batch
    max_sent_len1 = int(np.max([len(x[0]) for x in mini_batch]))
    max_sent_len2 = int(np.max([len(x[1]) for x in mini_batch]))
#     print max_sent_len1, max_sent_len2
#     max_token_len = int(np.mean([len(val) for sublist in mini_batch for val in sublist]))
    main_matrix1 = np.zeros((mini_batch_size, max_sent_len1), dtype= np.int)
    main_matrix2 = np.zeros((mini_batch_size, max_sent_len2), dtype= np.int)
    for idx1, i in enumerate(mini_batch):
        for idx2, j in enumerate(i[0]):
            try:
                main_matrix1[i,j] = j
            except IndexError:
                pass
    for idx1, i in enumerate(mini_batch):
        for idx2, j in enumerate(i[1]):
            try:
                main_matrix2[i,j] = j
            except IndexError:
                pass
    main_matrix1_t = Variable(torch.from_numpy(main_matrix1))
    main_matrix2_t = Variable(torch.from_numpy(main_matrix2))
#     print main_matrix1_t.size()
#     print main_matrix2_t.size()
    return [main_matrix1_t, main_matrix2_t]
#     return [Variable(torch.cat((main_matrix1_t, main_matrix2_t), 0))

# def pad_batch(mini_batch):
# #     print mini_batch
# #     print type(mini_batch)
# #     print mini_batch.shape
# #     for i, _ in enumerate(mini_batch):
# #         print i, _
#     return [Variable(torch.from_numpy(np.asarray(_))) for _ in mini_batch[0]]
项目:RasterFairy    作者:Quasimondo    | 项目源码 | 文件源码
def quantize_without_scipy(self, image):
        """" This function can be used if no scipy is availabe.
        It's 7 times slower though.
        """
        w,h = image.size
        px = np.asarray(image).copy()
        memo = {}
        for j in range(w):
            for i in range(h):
                key = (px[i,j,0],px[i,j,1],px[i,j,2])
                try:
                    val = memo[key]
                except KeyError:
                    val = self.convert(*key)
                    memo[key] = val
                px[i,j,0],px[i,j,1],px[i,j,2] = val
        return Image.fromarray(px).convert("RGB").quantize(palette=self.paletteImage())
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def xyz_array_to_pointcloud2(points, stamp=None, frame_id=None):
    '''
    Create a sensor_msgs.PointCloud2 from an array
    of points.
    '''
    msg = PointCloud2()
    if stamp:
        msg.header.stamp = stamp
    if frame_id:
        msg.header.frame_id = frame_id
    if len(points.shape) == 3:
        msg.height = points.shape[1]
        msg.width = points.shape[0]
    else:
        msg.height = 1
        msg.width = len(points)
    msg.fields = [
        PointField('x', 0, PointField.FLOAT32, 1),
        PointField('y', 4, PointField.FLOAT32, 1),
        PointField('z', 8, PointField.FLOAT32, 1)]
    msg.is_bigendian = False
    msg.point_step = 12
    msg.row_step = 12*points.shape[0]
    msg.is_dense = int(np.isfinite(points).all())
    msg.data = np.asarray(points, np.float32).tostring()

    return msg
项目:AVSR-Deep-Speech    作者:pandeydivesh15    | 项目源码 | 文件源码
def text_to_char_array(original):
    r"""
    Given a Python string ``original``, remove unsupported characters, map characters
    to integers and return a numpy array representing the processed string.
    """
    # Create list of sentence's words w/spaces replaced by ''
    result = original.replace(" '", "") # TODO: Deal with this properly
    result = result.replace("'", "")    # TODO: Deal with this properly
    result = result.replace(' ', '  ')
    result = result.split(' ')

    # Tokenize words into letters adding in SPACE_TOKEN where required
    result = np.hstack([SPACE_TOKEN if xt == '' else list(xt) for xt in result])

    # Map characters into indicies
    result = np.asarray([SPACE_INDEX if xt == SPACE_TOKEN else ord(xt) - FIRST_INDEX for xt in result])

    # Add result to results
    return result
项目:AVSR-Deep-Speech    作者:pandeydivesh15    | 项目源码 | 文件源码
def text_to_char_array(original):
    r"""
    Given a Python string ``original``, remove unsupported characters, map characters
    to integers and return a numpy array representing the processed string.
    """
    # Create list of sentence's words w/spaces replaced by ''
    result = original.replace(" '", "") # TODO: Deal with this properly
    result = result.replace("'", "")    # TODO: Deal with this properly
    result = result.replace(' ', '  ')
    result = result.split(' ')

    # Tokenize words into letters adding in SPACE_TOKEN where required
    result = np.hstack([SPACE_TOKEN if xt == '' else list(xt) for xt in result])

    # Map characters into indicies
    result = np.asarray([SPACE_INDEX if xt == SPACE_TOKEN else (
        ord(xt) - FIRST_INDEX if ord(xt)>FIRST_INDEX else 27+int(xt)) for xt in result])
    # Add result to results
    return result
项目:AVSR-Deep-Speech    作者:pandeydivesh15    | 项目源码 | 文件源码
def sparse_tuple_from(sequences, dtype=np.int32):
    r"""Creates a sparse representention of ``sequences``.
    Args:

        * sequences: a list of lists of type dtype where each element is a sequence

    Returns a tuple with (indices, values, shape)
    """
    indices = []
    values = []

    for n, seq in enumerate(sequences):
        indices.extend(zip([n]*len(seq), range(len(seq))))
        values.extend(seq)

    indices = np.asarray(indices, dtype=np.int64)
    values = np.asarray(values, dtype=dtype)
    shape = np.asarray([len(sequences), indices.max(0)[1]+1], dtype=np.int64)

    return tf.SparseTensor(indices=indices, values=values, shape=shape)
项目:treecat    作者:posterior    | 项目源码 | 文件源码
def __init__(self, data, tree_prior, config):
        """Initialize a model with an empty subsample.

        Args:
            data: An [N, V]-shaped numpy array of real-valued data.
            tree_prior: A [K]-shaped numpy array of prior edge log odds, where
                K is the number of edges in the complete graph on V vertices.
            config: A global config dict.
        """
        assert isinstance(data, np.ndarray)
        data = np.asarray(data, np.float32)
        assert len(data.shape) == 2
        N, V = data.shape
        D = config['model_latent_dim']
        E = V - 1  # Number of edges in the tree.
        TreeTrainer.__init__(self, N, V, tree_prior, config)
        self._data = data
        self._latent = np.zeros([N, V, D], np.float32)

        # This is symmetric positive definite.
        self._vert_ss = np.zeros([V, D, D], np.float32)
        # This is arbitrary (not necessarily symmetric).
        self._edge_ss = np.zeros([E, D, D], np.float32)
        # This represents (count, mean, covariance).
        self._feat_ss = np.zeros([V, D, 1 + 1 + D], np.float32)
项目:spyking-circus    作者:spyking-circus    | 项目源码 | 文件源码
def _num_samples(x):
    """Return number of samples in array-like x."""
    if hasattr(x, 'fit'):
        # Don't get num_samples from an ensembles length!
        raise TypeError('Expected sequence or array-like, got '
                        'estimator %s' % x)
    if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
        if hasattr(x, '__array__'):
            x = np.asarray(x)
        else:
            raise TypeError("Expected sequence or array-like, got %s" %
                            type(x))
    if hasattr(x, 'shape'):
        if len(x.shape) == 0:
            raise TypeError("Singleton array %r cannot be considered"
                            " a valid collection." % x)
        return x.shape[0]
    else:
        return len(x)
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def process_rollout(rollout, gamma, lambda_=1.0):
    """
given a rollout, compute its returns and the advantage
"""
    batch_si = np.asarray(rollout.states)
    batch_a = np.asarray(rollout.actions)
    rewards = np.asarray(rollout.rewards)
    vpred_t = np.asarray(rollout.values + [rollout.r])

    rewards_plus_v = np.asarray(rollout.rewards + [rollout.r])
    batch_r = discount(rewards_plus_v, gamma)[:-1]
    delta_t = rewards + gamma * vpred_t[1:] - vpred_t[:-1]
    # this formula for the advantage comes "Generalized Advantage Estimation":
    # https://arxiv.org/abs/1506.02438
    batch_adv = discount(delta_t, gamma * lambda_)

    features = rollout.features[0]
    return Batch(batch_si, batch_a, batch_adv, batch_r, rollout.terminal, features)
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def process_rollout(rollout, gamma, lambda_=1.0):
    """
given a rollout, compute its returns and the advantage
"""
    batch_si = np.asarray(rollout.states)
    batch_a = np.asarray(rollout.actions)
    rewards = np.asarray(rollout.rewards)
    vpred_t = np.asarray(rollout.values + [rollout.r])

    rewards_plus_v = np.asarray(rollout.rewards + [rollout.r])
    batch_r = discount(rewards_plus_v, gamma)[:-1]
    delta_t = rewards + gamma * vpred_t[1:] - vpred_t[:-1]
    # this formula for the advantage comes "Generalized Advantage Estimation":
    # https://arxiv.org/abs/1506.02438
    batch_adv = discount(delta_t, gamma * lambda_)

    features = rollout.features[0]
    return Batch(batch_si, batch_a, batch_adv, batch_r, rollout.terminal, features)
项目:dl-classification    作者:matthieuo    | 项目源码 | 文件源码
def has_tomatoes(self, im_path):
        # load the image
        im = Image.open(im_path)
        im = np.asarray(im, dtype=np.float32)
        im = self.prepare_image(im)

        # launch an inference with the image
        pred = self.sess.run(
            self.output_logits, feed_dict={
                self.img_feed: im.eval(
                    session=self.sess)})

        if np.argmax(pred) == 0:
            print("NOT a tomato ! (confidence : ", pred[0, 0], "%)")
        else:
            print("We have a tomato ! (confidence : ", pred[0, 1], "%)")
项目:snake_game    作者:wing3s    | 项目源码 | 文件源码
def play(self, nb_rounds):
        img_saver = save_image()
        img_saver.next()

        game_cnt = it.count(1)
        for i in xrange(nb_rounds):
            game = self.game(width=self.width, height=self.height)
            screen, _ = game.next()
            img_saver.send(screen)
            frame_cnt = it.count()
            try:
                state = np.asarray([screen] * self.nb_frames)
                while True:
                    frame_cnt.next()
                    act_idx = np.argmax(
                        self.model.predict(state[np.newaxis]), axis=-1)[0]
                    screen, _ = game.send(self.actions[act_idx])
                    state = np.roll(state, 1, axis=0)
                    state[0] = screen
                    img_saver.send(screen)
            except StopIteration:
                print 'Saved %4i frames for game %3i' % (
                    frame_cnt.next(), game_cnt.next())
        img_saver.close()
项目:detection-2016-nipsws    作者:imatge-upc    | 项目源码 | 文件源码
def draw_sequences_test(step, action, qval, draw, region_image, background, path_testing_folder,
                        region_mask, image_name, save_boolean):
    aux = np.asarray(region_image, np.uint8)
    img_offset = (1000 * step, 70)
    footnote_offset = (1000 * step, 550)
    q_predictions_offset = (1000 * step, 500)
    mask_img_offset = (1000 * step, 700)
    img_for_paste = Image.fromarray(aux)
    background.paste(img_for_paste, img_offset)
    mask_img = Image.fromarray(255 * region_mask)
    background.paste(mask_img, mask_img_offset)
    footnote = 'action: ' + str(action)
    q_val_predictions_text = str(qval)
    draw.text(footnote_offset, footnote, (0, 0, 0), font=font)
    draw.text(q_predictions_offset, q_val_predictions_text, (0, 0, 0), font=font)
    file_name = path_testing_folder + image_name + '.png'
    if save_boolean == 1:
        background.save(file_name)
    return background
项目:alchemy    作者:voidrank    | 项目源码 | 文件源码
def encode(obj):
    # return single RLE
    if len(obj.shape) == 2:
        mask = obj
        masks = np.array(np.asarray([mask]))
        masks = _masks_as_fortran_order(masks)
        rles = _mask.encode(masks)
        rle = rles[0]
        return rle
    # return RLEs
    elif len(obj.shape) == 3:
        masks = obj
        masks = _masks_as_fortran_order(masks)
        rles = _mask.encode(masks)
        return rles
    else:
        raise Exception("Not Implement")
项目:scipy2017    作者:deeplycloudy    | 项目源码 | 文件源码
def test_prune_from_top():
    d, traversal = get_four_level_data_traversal()
    reduced_storm_id = [1,]
    d = traversal.reduce_to_entities('storm_id', reduced_storm_id)
    reduced_stroke_id = np.asarray([])
    reduced_flash_id = np.asarray([])
    reduced_trig_id = np.asarray([])
    assert_equal(d['storm_id'], reduced_storm_id)
    assert_equal(d['flash_id'], reduced_flash_id)
    assert_equal(d['stroke_id'], reduced_stroke_id)
    assert_equal(d['trig_id'], reduced_trig_id)

    reduced_storm_id = [2,]
    d = traversal.reduce_to_entities('storm_id', reduced_storm_id)
    reduced_flash_id = [4,5,6,7,8]
    reduced_stroke_id = [13,14,15,19,20,23,46]
    reduced_trig_id = [18,19,20,22,23,25,26,30,31,32]    
    assert_equal(d['storm_id'].data, reduced_storm_id)
    assert_equal(d['flash_id'].data, reduced_flash_id)
    assert_equal(d['stroke_id'].data, reduced_stroke_id)
    assert_equal(d['trig_id'].data, reduced_trig_id)
项目:vae-npvc    作者:JeremyCCHsu    | 项目源码 | 文件源码
def _validate(self, machine, n=10):
        N = n * n
        z = np.random.normal(0., 1., size=[n, self.arch['z_dim']])
        z = np.concatenate([z] * n, axis=1)
        z = np.reshape(z, [N, -1]).astype(np.float32)  # consecutive rows
        y = np.asarray(
            [[5,   0,  0 ],
             [9,   0,  0 ],
             [12,  0,  0 ],
             [17,  0,  0 ],
             [19,  0,  0 ],
             [161, 0,  0 ],
             [170, 0,  0 ],
             [170, 16, 0 ],
             [161, 9,  4 ],
             [19,  24, 50]],
            dtype=np.int64)
        y = np.concatenate([y] * n, axis=0)
        Z = tf.constant(z)
        Y = tf.constant(y)
        Xh = machine.generate(Z, Y) # 100, 64, 64, 3
        Xh = make_png_thumbnail(Xh, n)
        return Xh
项目:Python-Machine-Learning-By-Example    作者:PacktPublishing    | 项目源码 | 文件源码
def get_likelihood(term_document_matrix, label_index, smoothing=0):
    """ Compute likelihood based on training samples
    Args:
        term_document_matrix (sparse matrix)
        label_index (grouped sample indices by class)
        smoothing (integer, additive Laplace smoothing parameter)
    Returns:
        dictionary, with class as key, corresponding conditional probability P(feature|class) vector as value
    """
    likelihood = {}
    for label, index in label_index.items():
        likelihood[label] = term_document_matrix[index, :].sum(axis=0) + smoothing
        likelihood[label] = np.asarray(likelihood[label])[0]
        total_count = likelihood[label].sum()
        likelihood[label] = likelihood[label] / float(total_count)
    return likelihood
项目:dask_gdf    作者:gpuopenanalytics    | 项目源码 | 文件源码
def reset_index(self):
        """Reset index to range based
        """
        dfs = self.to_delayed()
        sizes = np.asarray(compute(*map(delayed(len), dfs)))
        prefixes = np.zeros_like(sizes)
        prefixes[1:] = np.cumsum(sizes[:-1])

        @delayed
        def fix_index(df, startpos):
            return df.set_index(np.arange(start=startpos,
                                          stop=startpos + len(df),
                                          dtype=np.intp))

        outdfs = [fix_index(df, startpos)
                  for df, startpos in zip(dfs, prefixes)]
        return from_delayed(outdfs)
项目:pycma    作者:CMA-ES    | 项目源码 | 文件源码
def __init__(self, dimension,
                 constant_trace='None',
                 randn=np.random.randn,
                 quadratic=False,
                 **kwargs):
        try:
            self.dimension = len(dimension)
            standard_deviations = np.asarray(dimension)
        except TypeError:
            self.dimension = dimension
            standard_deviations = np.ones(dimension)
        assert self.dimension == len(standard_deviations)
        assert len(standard_deviations) == self.dimension

        self.C = standard_deviations**2
        "covariance matrix diagonal"
        self.constant_trace = constant_trace
        self.randn = randn
        self.quadratic = quadratic
        self.count_tell = 0
项目:pycma    作者:CMA-ES    | 项目源码 | 文件源码
def norm(self, x):
        """compute the Mahalanobis norm that is induced by the
        statistical model / sample distribution, specifically by
        covariance matrix ``C``. The expected Mahalanobis norm is
        about ``sqrt(dimension)``.

        Example
        -------
        >>> import cma, numpy as np
        >>> sm = cma.sampler.GaussFullSampler(np.ones(10))
        >>> x = np.random.randn(10)
        >>> d = sm.norm(x)

        `d` is the norm "in" the true sample distribution,
        sampled points have a typical distance of ``sqrt(2*sm.dim)``,
        where ``sm.dim`` is the dimension, and an expected distance of
        close to ``dim**0.5`` to the sample mean zero. In the example,
        `d` is the Euclidean distance, because C = I.
        """
        return sum(np.asarray(x)**2 / self.C)**0.5
项目:pycma    作者:CMA-ES    | 项目源码 | 文件源码
def elli(self, x, rot=0, xoffset=0, cond=1e6, actuator_noise=0.0, both=False):
        """Ellipsoid test objective function"""
        x = np.asarray(x)
        if not isscalar(x[0]):  # parallel evaluation
            return [self.elli(xi, rot) for xi in x]  # could save 20% overall
        if rot:
            x = rotate(x)
        N = len(x)
        if actuator_noise:
            x = x + actuator_noise * np.random.randn(N)

        ftrue = sum(cond**(np.arange(N) / (N - 1.)) * (x + xoffset)**2) \
                if N > 1 else (x + xoffset)**2

        alpha = 0.49 + 1. / N
        beta = 1
        felli = np.random.rand(1)[0]**beta * ftrue * \
                max(1, (10.**9 / (ftrue + 1e-99))**(alpha * np.random.rand(1)[0]))
        # felli = ftrue + 1*np.random.randn(1)[0] / (1e-30 +
        #                                           np.abs(np.random.randn(1)[0]))**0
        if both:
            return (felli, ftrue)
        else:
            # return felli  # possibly noisy value
            return ftrue  # + np.random.randn()
项目:pycma    作者:CMA-ES    | 项目源码 | 文件源码
def __call__(self, *args, **kwargs):
        # late initialization if necessary
        try:
            if not self.__initialized:
                raise AttributeError
        except AttributeError:
            Function.initialize(self, None)
        # find the "right" callable
        callable_ = self.__callable
        if callable_ is None:
            for name in self.function_names_to_evaluate_first_found:
                try:
                    callable_ = getattr(self, name)
                    break
                except AttributeError:
                    pass
        # call with each vector
        if callable_ is not None:
            X, list_revert = utils.as_vector_list(args[0])
            self.evaluations += len(X)
            return list_revert([
                    callable_(np.asarray(x), *args[1:], **kwargs)
                    for x in X])
        else:
            self.evaluations += 1  # somewhat bound to fail
项目:pycma    作者:CMA-ES    | 项目源码 | 文件源码
def __init__(self, fitness_function, multipliers=None, zero=None):
        """
        :param fitness_function: a `callable` object
        :param multipliers: coordinate-wise multipliers.
        :param zero: defines a new zero in preimage space, that is,
            calling the `ScaleCoordinates` instance returns
            ``fitness_function(multipliers * (x - zero))``.

        For both arguments, ``multipliers`` and ``zero``, to fit in
        case the length of the given input, superfluous trailing
        elements are ignored or the last element is recycled.
        """
        ComposedFunction.__init__(self,
                [fitness_function, self.scale_and_offset])
        self.multiplier = multipliers
        if self.multiplier is not None:
            self.multiplier = np.asarray(self.multiplier, dtype=float)
        self.zero = zero
        if zero is not None:
            self.zero = np.asarray(zero, dtype=float)
项目:pycma    作者:CMA-ES    | 项目源码 | 文件源码
def monotoneTFosc(f):
    """Maps [-inf,inf] to [-inf,inf] with different constants
    for positive and negative part.

    """
    if np.isscalar(f):
        if f > 0.:
            f = np.log(f) / 0.1
            f = np.exp(f + 0.49 * (np.sin(f) + np.sin(0.79 * f))) ** 0.1
        elif f < 0.:
            f = np.log(-f) / 0.1
            f = -np.exp(f + 0.49 * (np.sin(0.55 * f) + np.sin(0.31 * f))) ** 0.1
        return f
    else:
        f = np.asarray(f)
        g = f.copy()
        idx = (f > 0)
        g[idx] = np.log(f[idx]) / 0.1
        g[idx] = np.exp(g[idx] + 0.49 * (np.sin(g[idx]) + np.sin(0.79 * g[idx])))**0.1
        idx = (f < 0)
        g[idx] = np.log(-f[idx]) / 0.1
        g[idx] = -np.exp(g[idx] + 0.49 * (np.sin(0.55 * g[idx]) + np.sin(0.31 * g[idx])))**0.1
        return g
项目:convolutional-pose-machines-tensorflow    作者:timctho    | 项目源码 | 文件源码
def make_heatmaps_from_joints(input_size, heatmap_size, gaussian_variance, batch_joints):
    # Generate ground-truth heatmaps from ground-truth 2d joints
    scale_factor = input_size // heatmap_size
    batch_gt_heatmap_np = []
    for i in range(batch_joints.shape[0]):
        gt_heatmap_np = []
        invert_heatmap_np = np.ones(shape=(heatmap_size, heatmap_size))
        for j in range(batch_joints.shape[1]):
            cur_joint_heatmap = make_gaussian(heatmap_size,
                                              gaussian_variance,
                                              center=(batch_joints[i][j] // scale_factor))
            gt_heatmap_np.append(cur_joint_heatmap)
            invert_heatmap_np -= cur_joint_heatmap
        gt_heatmap_np.append(invert_heatmap_np)
        batch_gt_heatmap_np.append(gt_heatmap_np)
    batch_gt_heatmap_np = np.asarray(batch_gt_heatmap_np)
    batch_gt_heatmap_np = np.transpose(batch_gt_heatmap_np, (0, 2, 3, 1))

    return batch_gt_heatmap_np
项目:QUANTAXIS    作者:yutiansut    | 项目源码 | 文件源码
def QA_fetch_get_stock_risk(name, startDate, endDate):
    try:
        from WindPy import w
    except:
        QA_util_log_info('No WindPY Module!')
    w.start()
    if(QA_util_date_valid(endDate) == False):
        QA_util_log_info("wrong date")
    else:
        data = w.wsd(name, "annualyeild_100w,annualyeild_24m,annualyeild_60m,\
                    annualstdevr_100w,annualstdevr_24m,annualstdevr_60m,beta_100w,\
                    beta_24m,beta_60m,avgreturn,avgreturny,stdevry,stdcof,\
                    risk_nonsysrisk1,r2,alpha2,beta,sharpe,treynor,jensen,jenseny,betadf",
                     startDate, endDate, "period=2;returnType=1;index=000001.SH;yield=1")
        if (data.ErrorCode == 0):
            QA_util_log_info("Connent to Wind successfully")
    return pd.DataFrame(np.asarray(data.Data).T, columns=data.Fields, index=data.Times)
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def load_channels(self,normalize = False):
        modalities = []
        modalities.append(nib.load(self.FLAIR_FILE))
        modalities.append(nib.load(self.T1_FILE))

        channels = np.zeros( modalities[0].shape + (2,), dtype=np.float32)

        for index_mod, mod in enumerate(modalities):
            if self.data_augmentation:
                channels[:, :, :, index_mod] = flip_plane(np.asarray(mod.dataobj))
            else:
                channels[:,:,:,index_mod] = np.asarray(mod.dataobj)

            if normalize:
                channels[:, :, :, index_mod] = normalize_image(channels[:,:,:,index_mod], mask = self.load_ROI_mask() )


        return channels
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def load_channels(self,normalize = False):
        modalities = []
        modalities.append(nib.load(self.FLAIR_FILE))
        modalities.append(nib.load(self.T1_FILE))

        channels = np.zeros( modalities[0].shape + (2,), dtype=np.float32)

        for index_mod, mod in enumerate(modalities):
            if self.data_augmentation:
                channels[:, :, :, index_mod] = flip_plane(np.asarray(mod.dataobj))
            else:
                channels[:,:,:,index_mod] = np.asarray(mod.dataobj)

            if normalize:
                channels[:, :, :, index_mod] = normalize_image(channels[:,:,:,index_mod], mask = self.load_ROI_mask() )


        return channels
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def load_channels(self):
        flair = nib.load(self.FLAIR_FILE)
        t1 = nib.load(self.T1_FILE)
        t1c = nib.load(self.T1c_FILE)
        t2 = nib.load(self.T2_FILE)
        to_int = lambda b: 1 if b else 0
        num_input_modalities = to_int(self.booleanFLAIR) + to_int(self.booleanT1) + to_int(self.booleanT1c) + to_int(
            self.booleanT2)

        channels = np.zeros((num_input_modalities,) + flair.shape, dtype=np.float32)

        channels[0] = np.asarray(flair.dataobj) if self.booleanFLAIR is True else None
        channels[to_int(self.booleanFLAIR)] = np.asarray(t1.dataobj) if self.booleanT1 is True else None
        channels[to_int(self.booleanFLAIR) + to_int(self.booleanT1)] = np.asarray(
            t1c.dataobj) if self.booleanT1c is True else None
        channels[to_int(self.booleanFLAIR) + to_int(self.booleanT1) + to_int(self.booleanT1c)] = np.asarray(
            t2.dataobj) if self.booleanT2 is True else None

        return channels
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def load_channels(self, normalize=False):

        modalities = []
        modalities.append(nib.load(self.FLAIR_FILE))
        modalities.append(nib.load(self.T1_FILE))
        modalities.append(nib.load(self.T1c_FILE))
        modalities.append(nib.load(self.T2_FILE))

        channels = np.zeros(modalities[0].shape + (4,), dtype=np.float32)

        if normalize:
            mask = self.load_ROI_mask()

        for index_mod, mod in enumerate(modalities):
            if self.data_augmentation_flag:
                channels[:, :, :, index_mod] = flip_plane(np.asarray(mod.dataobj), plane=self.data_augmentation)
            else:
                channels[:, :, :, index_mod] = np.asarray(mod.dataobj)

            if normalize:
                channels[:, :, :, index_mod] = normalize_image(channels[:, :, :, index_mod], mask=mask)

        return channels
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def load_channels(self, normalize=False):
        modalities = []
        modalities.append(nib.load(self.T2_FILE))
        modalities.append(nib.load(self.T1_FILE))

        channels = np.zeros(modalities[0].shape + (2,), dtype=np.float32)

        for index_mod, mod in enumerate(modalities):
            if self.data_augmentation:
                channels[:, :, :, index_mod] = flip_plane(np.asarray(mod.dataobj), plane=self.data_augmentation)
            else:
                channels[:,:,:,index_mod] = np.asarray(mod.dataobj)

            if normalize:
                channels[:, :, :, index_mod] = normalize_image(channels[:,:,:,index_mod], mask = self.load_ROI_mask() )


        return channels
项目:namegenderclassifier    作者:joaoalvarenga    | 项目源码 | 文件源码
def save(self, model_filename):
        self.__model.save("%s.model" % model_filename)
        np.save("%s.tvocab" % model_filename, np.asarray(self.__trigrams))
        np.save("%s.cvocab" % model_filename, np.asarray(self.__chars))
        np.save("%s.classes" % model_filename, np.asarray(self.__classes))
项目:TDOSE    作者:kasperschmidt    | 项目源码 | 文件源码
def galfit_getheadervalue(compnumber,key,headerinfo):
    """
    Return the paramters of a GALFIT model header

    --- INPUT ---
    compnumber      A string containing the component number to extract info for (number after "COMP_" in header)
    key             The key to extract (keyword after "COMPNUMBER_" in header)
    headerinfo      Header to extract info from.

    """
    hdrinfo = headerinfo[compnumber+'_'+key]

    if '*' in hdrinfo: # handling parameters fixed in GALFIT run
        hdrinfo = hdrinfo.replace('*','')

    if '+/-' in hdrinfo:
        value   = float(hdrinfo.split('+/-')[0])
        error   = float(hdrinfo.split('+/-')[1])
    else:
        value   = float(hdrinfo[1:-1])
        error   = None

    if (key == 'XC') or (key == 'YC'):
        xrange, yrange = headerinfo['FITSECT'][1:-1].split(',')
        xrange = np.asarray(xrange.split(':')).astype(float)
        yrange = np.asarray(yrange.split(':')).astype(float)
        if key == 'XC':
            value = value - xrange[0] + 1.0
        if key == 'YC':
            value = value - yrange[0] + 1.0

    return value, error
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
项目:GELUs    作者:hendrycks    | 项目源码 | 文件源码
def to_categorical(y, nb_classes):
    y = np.asarray(y, dtype='int32')
    if not nb_classes:
        nb_classes = np.max(y)+1
    Y = np.zeros((len(y), nb_classes))
    for i in range(len(y)):
        Y[i, y[i]] = 1.
    return Y

# load training and testing data
项目:onto-lstm    作者:pdasigi    | 项目源码 | 文件源码
def make_one_hot(indices):
        '''
        Accepts an array of indices, and converts them to a one-hot matrix
        '''
        # Making indices 0 based.
        indices = numpy.asarray(indices) - min(indices)
        num_classes = max(indices) + 1
        one_hot_indices = numpy.zeros((len(indices), num_classes))
        for i, ind in enumerate(indices):
            one_hot_indices[i][ind] = 1.0
        return one_hot_indices

    # TODO: Separate methods for returning word inds and conc inds
项目:onto-lstm    作者:pdasigi    | 项目源码 | 文件源码
def get_embedding_matrix(self, embedding_file, onto_aware):
        # embedding_file is a tsv with words on the first column and vectors on the
        # remaining. This will add to word_embedding if for_words is true, or else to 
        # synset embedding.
        # For words that do not have vectors, we sample from a uniform distribution in the
        # range of max and min of the word embedding.
        embedding_map = {}
        rep_max = -float("inf")
        rep_min = float("inf")
        for line in gzip.open(embedding_file):
            ln_parts = line.strip().split()
            if len(ln_parts) == 2:
                continue
            element = ln_parts[0]
            vec = numpy.asarray([float(f) for f in ln_parts[1:]])
            vec_max, vec_min = vec.max(), vec.min()
            if vec_max > rep_max:
                rep_max = vec_max
            if vec_min < rep_min:
                rep_min = vec_min
            embedding_map[element] = vec
        embedding_dim = len(vec)
        target_index = self.synset_index if onto_aware else self.word_index
        # Initialize target embedding with all random vectors
        target_vocab_size = self.get_vocab_size(onto_aware=onto_aware)
        target_embedding = self.numpy_rng.uniform(low=rep_min, high=rep_max, size=(target_vocab_size, embedding_dim))
        num_found_elements = 0
        num_all_elements = 0
        for element in target_index:
            num_all_elements += 1
            if element in embedding_map:
                vec = embedding_map[element]
                target_embedding[target_index[element]] = vec
                num_found_elements += 1
        print >>sys.stderr, "Found vectors for %.4f of the words" % (float(num_found_elements) / num_all_elements)
        return target_embedding
项目:composability_bench    作者:IntelPython    | 项目源码 | 文件源码
def prepare_default(N=100, dtype=np.double):
    return ( np.asarray(np.random.rand(N, N), dtype=dtype), )
    #return toc/trials, (4/3)*N*N*N*1e-9, times
项目:composability_bench    作者:IntelPython    | 项目源码 | 文件源码
def prepare_eig(N=100, dtype=np.double):
    N/=4
    return ( np.asarray(np.random.rand(int(N), int(N)), dtype=dtype), )
项目:composability_bench    作者:IntelPython    | 项目源码 | 文件源码
def prepare_svd(N=100, dtype=np.double):
    N/=2
    return ( np.asarray(np.random.rand(int(N), int(N)), dtype=dtype), False )

#det:    return toc/trials, N*N*N*1e-9, times
项目:composability_bench    作者:IntelPython    | 项目源码 | 文件源码
def prepare_dot(N=100, dtype=np.double):
    N=N*N*10
    A = np.asarray(np.random.rand(int(N)), dtype=dtype)
    return (A, A)
    #return 1.0*toc/(trials), 2*N*N*N*1e-9, times
项目:composability_bench    作者:IntelPython    | 项目源码 | 文件源码
def prepare_dgemm(N=100, trials=3, dtype=np.double):
    LARGEDIM = int(N*2)
    KSIZE = int(N/2)
    A = np.asarray(np.random.rand(LARGEDIM, KSIZE), dtype=dtype)
    B = np.asarray(np.random.rand(KSIZE, LARGEDIM), dtype=dtype)
    return (A, B)
项目:nidaqmx-python    作者:ni    | 项目源码 | 文件源码
def write_one_sample_one_line(self, data, timeout=10):
        """
        Writes a single boolean sample to a single digital output
        channel in a task. The channel can contain only one digital
        line.

        Args:
            data (int): Specifies the boolean sample to write to the
                task.
            timeout (Optional[float]): Specifies the amount of time in
                seconds to wait for the method to write all samples.
                NI-DAQmx performs a timeout check only if the method
                must wait before it writes data. This method returns an
                error if the time elapses. The default timeout is 10
                seconds. If you set timeout to
                nidaqmx.constants.WAIT_INFINITELY, the method waits
                indefinitely. If you set timeout to 0, the method tries
                once to write the submitted samples. If the method could
                not write all the submitted samples, it returns an error
                and the number of samples successfully written.
        """
        auto_start = (self._auto_start if self._auto_start is not 
                      AUTO_START_UNSET else True)

        numpy_array = numpy.asarray([data], dtype=numpy.bool)

        return _write_digital_lines(
            self._handle, numpy_array, 1, auto_start, timeout)
项目:nidaqmx-python    作者:ni    | 项目源码 | 文件源码
def write_one_sample_port_byte(self, data, timeout=10):
        """
        Writes a single 8-bit unsigned integer sample to a single
        digital output channel in a task.

        Use this method for devices with up to 8 lines per port.

        Args:
            data (int): Specifies the 8-bit unsigned integer sample to
                write to the task.
            timeout (Optional[float]): Specifies the amount of time in
                seconds to wait for the method to write all samples.
                NI-DAQmx performs a timeout check only if the method
                must wait before it writes data. This method returns an
                error if the time elapses. The default timeout is 10
                seconds. If you set timeout to
                nidaqmx.constants.WAIT_INFINITELY, the method waits
                indefinitely. If you set timeout to 0, the method tries
                once to write the submitted samples. If the method could
                not write all the submitted samples, it returns an error
                and the number of samples successfully written.
        """
        auto_start = (self._auto_start if self._auto_start is not 
                      AUTO_START_UNSET else True)

        numpy_array = numpy.asarray([data], dtype=numpy.uint8)

        return _write_digital_u_8(
            self._handle, numpy_array, 1, auto_start, timeout)
项目:nidaqmx-python    作者:ni    | 项目源码 | 文件源码
def write_one_sample_port_uint16(self, data, timeout=10):
        """
        Writes a single 16-bit unsigned integer sample to a single
        digital output channel in a task.

        Use this method for devices with up to 16 lines per port.

        Args:
            data (int): Specifies the 16-bit unsigned integer sample to
                write to the task.
            timeout (Optional[float]): Specifies the amount of time in
                seconds to wait for the method to write all samples.
                NI-DAQmx performs a timeout check only if the method
                must wait before it writes data. This method returns an
                error if the time elapses. The default timeout is 10
                seconds. If you set timeout to
                nidaqmx.constants.WAIT_INFINITELY, the method waits
                indefinitely. If you set timeout to 0, the method tries
                once to write the submitted samples. If the method could
                not write all the submitted samples, it returns an error
                and the number of samples successfully written.
        """
        auto_start = (self._auto_start if self._auto_start is not 
                      AUTO_START_UNSET else True)

        numpy_array = numpy.asarray([data], dtype=numpy.uint16)

        return _write_digital_u_16(
            self._handle, numpy_array, 1, auto_start, timeout)
项目:pyfds    作者:emtpb    | 项目源码 | 文件源码
def test_dimension():
    dim = fls.Dimension(3, 0.1)
    assert np.allclose(dim.vector, np.asarray([0, 0.1, 0.2]))
    assert dim.get_index(0.1) == 1
项目:YellowFin_Pytorch    作者:JianGoForIt    | 项目源码 | 文件源码
def gen_minibatch(tokens, features, labels, mini_batch_size, shuffle= True):
    tokens = np.asarray(tokens)[np.where(labels!=0.5)[0]]
    if type(features) is np.ndarray:
      features = np.asarray(features)[np.where(labels!=0.5)[0]]
    else:
      features = np.asarray(features.todense())[np.where(labels!=0.5)[0]]
    labels = np.asarray(labels)[np.where(labels!=0.5)[0]]
#     print tokens.shape
#     print tokens[0]
    for token, feature, label in iterate_minibatches(tokens, features, labels, mini_batch_size, shuffle = shuffle):
#         print 'token', type(token)
#         print token
        token = [_ for _ in pad_batch(token)]
#         print len(token), token[0].size(), token[1].size()
        yield token, Variable(torch.from_numpy(feature)) , Variable(torch.FloatTensor(label), requires_grad= False)
项目:YellowFin_Pytorch    作者:JianGoForIt    | 项目源码 | 文件源码
def gen_minibatch1(tokens, features, mini_batch_size, shuffle= True):
    tokens = np.asarray(tokens)
    features = np.asarray(features.todense())
    print(tokens.shape)
    for token, feature, label in iterate_minibatches(tokens, features, features, mini_batch_size, shuffle = shuffle):
#         print token
#         token = pad_batch(token)
#         print token
        token = [_ for _ in pad_batch(token)]
        yield token, Variable(torch.from_numpy(feature))
项目:YellowFin_Pytorch    作者:JianGoForIt    | 项目源码 | 文件源码
def pad_batch(mini_batch):
    mini_batch_size = len(mini_batch)
#     print mini_batch.shape
#     print mini_batch
    max_sent_len1 = int(np.max([len(x[0]) for x in mini_batch]))
    max_sent_len2 = int(np.max([len(x[1]) for x in mini_batch]))
#     print max_sent_len1, max_sent_len2
#     max_token_len = int(np.mean([len(val) for sublist in mini_batch for val in sublist]))
    main_matrix1 = np.zeros((mini_batch_size, max_sent_len1), dtype= np.int)
    main_matrix2 = np.zeros((mini_batch_size, max_sent_len2), dtype= np.int)
    for idx1, i in enumerate(mini_batch):
        for idx2, j in enumerate(i[0]):
            try:
                main_matrix1[i,j] = j
            except IndexError:
                pass
    for idx1, i in enumerate(mini_batch):
        for idx2, j in enumerate(i[1]):
            try:
                main_matrix2[i,j] = j
            except IndexError:
                pass
    main_matrix1_t = Variable(torch.from_numpy(main_matrix1))
    main_matrix2_t = Variable(torch.from_numpy(main_matrix2))
#     print main_matrix1_t.size()
#     print main_matrix2_t.size()
    return [main_matrix1_t, main_matrix2_t]
#     return [Variable(torch.cat((main_matrix1_t, main_matrix2_t), 0))

# def pad_batch(mini_batch):
# #     print mini_batch
# #     print type(mini_batch)
# #     print mini_batch.shape
# #     for i, _ in enumerate(mini_batch):
# #         print i, _
#     return [Variable(torch.from_numpy(np.asarray(_))) for _ in mini_batch[0]]
项目:YellowFin_Pytorch    作者:JianGoForIt    | 项目源码 | 文件源码
def gen_minibatch(tokens, features, labels, mini_batch_size, shuffle= True):
    tokens = np.asarray(tokens)[np.where(labels!=0.5)[0]]
    features = np.asarray(features.todense())[np.where(labels!=0.5)[0]]
    labels = np.asarray(labels)[np.where(labels!=0.5)[0]]
#     print tokens.shape
#     print tokens[0]
    for token, feature, label in iterate_minibatches(tokens, features, labels, mini_batch_size, shuffle = shuffle):
#         print 'token', type(token)
#         print token
        token = [_ for _ in pad_batch(token)]
#         print len(token), token[0].size(), token[1].size()
        yield token, Variable(torch.from_numpy(feature)) , Variable(torch.FloatTensor(label), requires_grad= False)