Python numpy 模块,tensordot() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.tensordot()

项目:pyballd    作者:Yurlungur    | 项目源码 | 文件源码
def __init__(self,
                 orderx,xmin,xmax,
                 ordery,ymin,ymax):
        "Constructor. Needs order and domain in x and y"
        self.orderx,self.ordery = orderx,ordery
        self.stencils = [PseudoSpectralDiscretization1D(orderx,xmin,xmax),
                         PseudoSpectralDiscretization1D(ordery,ymin,ymax)]
        self.stencil_x,self.stencil_y = self.stencils
        self.quads = [s.quads for s in self.stencils]
        self.colocs = [s.colocation_points for s in self.stencils]
        self.x,self.y = self.colocs
        self.colocs2d = np.meshgrid(*self.colocs,indexing='ij')
        self.X,self.Y = self.colocs2d
        self.weights = [s.weights for s in self.stencils]
        self.weights_x,self.weights_y = self.weights
        self.weights2D = np.tensordot(*self.weights,axes=0)
项目:npstreams    作者:LaurentRDC    | 项目源码 | 文件源码
def itensordot(arrays, axes = 2):
    """
    Yields the cumulative array inner product (dot product) of arrays.

    Parameters
    ----------
    arrays : iterable
        Arrays to be reduced.
    axes : int or (2,) array_like
        * integer_like: If an int N, sum over the last N axes of a 
          and the first N axes of b in order. The sizes of the corresponding axes must match.
        * (2,) array_like: Or, a list of axes to be summed over, first sequence applying to a, 
          second to b. Both elements array_like must be of the same length.

    Yields
    ------
    online_tensordot : ndarray

    See Also
    --------
    numpy.tensordot : Compute the tensordot on two tensors.
    """
    yield from _ireduce_linalg(arrays, np.tensordot, axes = axes)
项目:mpnum    作者:dseuss    | 项目源码 | 文件源码
def tensordot(self, other, axes):
        """Compute tensor dot product along named axes.

        An error will be raised if the remaining axes of self and
        other contain duplicate names.

        :param other: Another named_ndarray instance
        :param axes: List of axis name pairs (self_name, other_name)
            to be contracted
        :returns: Result as named_ndarray
        """
        axes_self = [names[0] for names in axes]
        axes_other = [names[1] for names in axes]
        axespos_self = [self.axispos(name) for name in axes_self]
        axespos_other = [other.axispos(name) for name in axes_other]
        new_names = [name for name in self._axisnames if name not in axes_self]
        new_names += (name for name in other._axisnames if name not in axes_other)
        array = np.tensordot(self._array, other._array,
                             (axespos_self, axespos_other))
        return named_ndarray(array, new_names)
项目:mpnum    作者:dseuss    | 项目源码 | 文件源码
def test_split(nr_sites, local_dim, rank, rgen):
    if nr_sites < 2:
        return
    mpa = factory.random_mpa(nr_sites, local_dim, rank, randstate=rgen)
    for pos in range(nr_sites - 1):
        mpa_l, mpa_r = mpa.split(pos)
        assert len(mpa_l) == pos + 1
        assert len(mpa_l) + len(mpa_r) == nr_sites
        assert_correct_normalization(mpa_l)
        assert_correct_normalization(mpa_r)
        recons = np.tensordot(mpa_l.to_array(), mpa_r.to_array(), axes=(-1, 0))
        assert_array_almost_equal(mpa.to_array(), recons)

    for (lnorm, rnorm) in it.product(range(nr_sites - 1), range(1, nr_sites)):
        mpa_l, mpa_r = mpa.split(nr_sites // 2 - 1)
        assert_correct_normalization(mpa_l)
        assert_correct_normalization(mpa_r)
项目:quadpy    作者:nschloe    | 项目源码 | 文件源码
def transform(xi, cube):
    '''Transform the points `xi` from the reference cube to `cube`.
    '''
    # For d==2, the result used to be computed with
    #
    # out = (
    #     + outer(0.25*(1.0-xi[0])*(1.0-xi[1]), cube[0, 0])
    #     + outer(0.25*(1.0+xi[0])*(1.0-xi[1]), cube[1, 0])
    #     + outer(0.25*(1.0-xi[0])*(1.0+xi[1]), cube[0, 1])
    #     + outer(0.25*(1.0+xi[0])*(1.0+xi[1]), cube[1, 1])
    #     )
    #
    # This array of multiplications and additions is reminiscent of dot(), and
    # indeed tensordot() can handle the situation. We just need to compute the
    # `1+-xi` products and align them with `cube`.
    one_mp_xi = numpy.stack([
        0.5 * (1.0 - xi),
        0.5 * (1.0 + xi),
        ], axis=1)
    a = helpers.n_outer(one_mp_xi)

    # TODO kahan tensordot
    # <https://stackoverflow.com/q/45372098/353337>
    d = xi.shape[0]
    return numpy.tensordot(a, cube, axes=(range(d), range(d)))
项目:algorithm-reference-library    作者:SKA-ScienceDataProcessor    | 项目源码 | 文件源码
def polmatrixmultiply(cm, vec, polaxis=1):
    """Matrix multiply of appropriate axis of vec [...,:] by cm

    For an image vec has axes [nchan, npol, ny, nx] and polaxis=1
    For visibility vec has axes [row, nchan, npol] and polaxis=2

    :param cm: matrix to apply
    :param vec: array to be multiplied [...,:]
    :param polaxis: which axis contains the polarisation
    :return: multiplied vec
    """
    if len(vec.shape) == 1:
        return numpy.dot(cm, vec)
    else:
        # This tensor swaps the first two axes so we need to tranpose back
        result = numpy.tensordot(cm, vec, axes=(1, polaxis))
        permut = list(range(len(result.shape)))
        permut[0], permut[polaxis] = permut[polaxis], permut[0]
        return numpy.transpose(result, axes=permut)
项目:polara    作者:Evfro    | 项目源码 | 文件源码
def slice_recommendations(self, test_data, shape, start, end):
        test_tensor_unfolded, slice_idx = self.get_test_tensor(test_data, shape, start, end)
        num_users = end - start
        num_items = shape[1]
        num_fdbks = shape[2]
        v = self._items_factors
        w = self._feedback_factors

        # assume that w.shape[1] < v.shape[1] (allows for more efficient calculations)
        scores = test_tensor_unfolded.dot(w).reshape(num_users, num_items, w.shape[1])
        scores = np.tensordot(scores, v, axes=(1, 0))
        scores = np.tensordot(np.tensordot(scores, v, axes=(2, 1)), w, axes=(1, 1))
        scores = self.flatten_scores(scores, self.flattener)
        return scores, slice_idx

    # additional functionality: rating pediction
项目:autograd-forward    作者:BB-UCL    | 项目源码 | 文件源码
def test_convolve_generalization():
    ag_convolve = autograd.scipy.signal.convolve
    A_35 = R(3, 5)
    A_34 = R(3, 4)
    A_342 = R(3, 4, 2)
    A_2543 = R(2, 5, 4, 3)
    A_24232 = R(2, 4, 2, 3, 2)

    for mode in ['valid', 'full']:
        assert npo.allclose(ag_convolve(A_35,      A_34, axes=([1], [0]), mode=mode)[1, 2],
                            sp_convolve(A_35[1,:], A_34[:, 2], mode))
        assert npo.allclose(ag_convolve(A_35, A_34, axes=([],[]), dot_axes=([0], [0]), mode=mode),
                            npo.tensordot(A_35, A_34, axes=([0], [0])))
        assert npo.allclose(ag_convolve(A_35, A_342, axes=([1],[2]),
                                        dot_axes=([0], [0]), mode=mode)[2],
                            sum([sp_convolve(A_35[i, :], A_342[i, 2, :], mode)
                                 for i in range(3)]))
        assert npo.allclose(ag_convolve(A_2543, A_24232, axes=([1, 2],[2, 4]),
                                        dot_axes=([0, 3], [0, 3]), mode=mode)[2],
                            sum([sum([sp_convolve(A_2543[i, :, :, j],
                                                 A_24232[i, 2, :, j, :], mode)
                                      for i in range(2)]) for j in range(3)]))
项目:neonCl-underconstruction    作者:hughperkins    | 项目源码 | 文件源码
def calcM(N, Co, U, V):
    GK = U.shape[2]
    Ci = U.shape[3]
    tiles = V.shape[3]
    GN = V.shape[2]

    print('calcM cpu GN', GN, 'N', N)
    U = U.transpose(0,1,2,4,3).reshape(6,6,GK * 32,Ci)[:,:,:Co,:]

    V = V.transpose(
        2,6,0,1,5,3,4).reshape(
        GN * 32, 6, 6, Ci, tiles, tiles)[:N]

    M = np.zeros((N, Co, tiles, tiles, 6, 6), dtype=np.float32)
    for n in range(N):
        for xi in range(6):
            for nu in range(6):
                M[n,:, :, :, xi, nu] = np.tensordot(U[xi,nu], V[n,xi,nu], 1)
    timecheck('calced M')
    return M
项目:tsnet    作者:coxlab    | 项目源码 | 文件源码
def collapse(T, W, divisive=False):

    if divisive: W = W / np.sum(np.square(W.reshape(W.shape[0], -1)), 1)[:,None,None,None]

    if T.shape[-6] == W.shape[0]: # Z ONLY (after 2nd-stage expansion)

        W = np.reshape (W, (1,)*(T.ndim-6) + (W.shape[0],1,1) + W.shape[1:])
        T = ne.evaluate('T*W')
        T = np.reshape (T, T.shape[:-3] + (np.prod(T.shape[-3:]),))
        T = np.sum(T, -1)

    else: # X ONLY (conv, before 2nd-stage expansion)

        T = np.squeeze  (T, -6)
        T = np.tensordot(T, W, ([-3,-2,-1], [1,2,3]))
        T = np.rollaxis (T, -1, 1)

    return T
项目:tsnet    作者:coxlab    | 项目源码 | 文件源码
def backward(self, T, mode='X'):

        if 'X' in mode and 'G' in mode:

            D = T
            X = np.squeeze  (self.X, 1)
            G = np.tensordot(D, X, ([0,2,3],[0,1,2]))
            self.accumulate(G)

        if 'X' in mode: T = uncollapse(T, self.W)
        else          : T = np.sum(T, 1)[:,None]

        O = np.zeros((T.shape[0], 1) + (self.sh[2]-self.w[2]+1, self.sh[3]-self.w[3]+1) + tuple(self.w[1:]) + T.shape[7:], dtype='float32')

        _ = ne.evaluate('T', out=O[self.S]) #O[self.S] = T
        O = unexpand(O)

        return O
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def backward_cpu(self, inputs, grad_outputs):
        x, W = inputs[:2]
        b = inputs[2] if len(inputs) == 3 else None
        gy = grad_outputs[0]
        h, w = x.shape[2:]

        gW = numpy.tensordot(gy, self.col, ((0, 2, 3), (0, 4, 5)))
        gcol = numpy.tensordot(W, gy, (0, 1))
        gcol = numpy.rollaxis(gcol, 3)
        gx = conv.col2im_cpu(gcol, self.sy, self.sx, self.ph, self.pw, h, w)

        if b is None:
            return gx, gW
        else:
            gb = gy.sum(axis=(0, 2, 3))
            return gx, gW, gb
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def forward_cpu(self, inputs):
        x, W = inputs[:2]
        b = inputs[2] if len(inputs) == 3 else None
        kh, kw = W.shape[2:]
        _, _, h, w = x.shape
        gcol = numpy.tensordot(W, x, (0, 1))
        # - k, m, n: shape of out_channel
        # - b: number of inputs
        # - h, w: height and width of kernels
        # k, m, n, b, h, w -> b, k, m, n, h, w
        gcol = numpy.rollaxis(gcol, 3)
        if self.outh is None:
            self.outh = conv.get_deconv_outsize(h, kh, self.sy, self.ph)
        if self.outw is None:
            self.outw = conv.get_deconv_outsize(w, kw, self.sx, self.pw)
        y = conv.col2im_cpu(
            gcol, self.sy, self.sx, self.ph, self.pw, self.outh, self.outw)
        # b, k, h, w
        if b is not None:
            y += b.reshape(1, b.size, 1, 1)
        return y,
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def backward_cpu(self, inputs, grad_outputs):
        x, W = inputs[:2]
        b = inputs[2] if len(inputs) == 3 else None
        gy = grad_outputs[0]
        kh, kw = W.shape[2:]
        col = conv.im2col_cpu(
            gy, kh, kw, self.sy, self.sx, self.ph, self.pw)
        gW = numpy.tensordot(x, col, ([0, 2, 3], [0, 4, 5]))
        gx = numpy.tensordot(col, W, ([1, 2, 3], [1, 2, 3]))
        gx = numpy.rollaxis(gx, 3, 1)

        if b is None:
            return gx, gW
        else:
            gb = gy.sum(axis=(0, 2, 3))
            return gx, gW, gb
项目:movie-recommendation-using-RBM    作者:pinkeshbadjatiya    | 项目源码 | 文件源码
def predictor(self, movie_id, user_id):

        w = self.getW(user_movies[user_id])

        #making predictions part Vq not given
        data = copy.deepcopy(self.data[user_id])
        probs = np.ones(5)

        mx, index = -1, 0

        for i in range(5):
            calc = 1.0
            for j in range(self.F):
                temp = np.tensordot(data, self.getW(user_movies[user_id])[j]) + self.featureBias[j]
                temp = 1.0 + np.exp(temp)
                calc *= temp
            probs[i] = calc

            if mx < probs[i]:
                index = i
                mx = probs[i]

        return index
项目:XNOR-Net    作者:rarilurelo    | 项目源码 | 文件源码
def backward_cpu(self, inputs, grad_outputs):
        x, W = inputs[:2]
        Wb = binarize_cpu(W)
        b = inputs[2] if len(inputs) == 3 else None
        gy = grad_outputs[0]
        h, w = x.shape[2:]

        gW = numpy.tensordot(gy, self.col, ((0, 2, 3), (0, 4, 5)))
        gcol = numpy.tensordot(Wb, gy, (0, 1))
        gcol = numpy.rollaxis(gcol, 3)
        gx = conv.col2im_cpu(gcol, self.sy, self.sx, self.ph, self.pw, h, w)

        if b is None:
            return gx, gW
        else:
            gb = gy.sum(axis=(0, 2, 3))
            return gx, gW, gb
项目:GUINNESS    作者:HirokiNakahara    | 项目源码 | 文件源码
def forward_cpu(self, inputs):
        x, W = inputs[:2]
        b = inputs[2] if len(inputs) == 3 else None
        kh, kw = W.shape[2:]
        self.col = conv.im2col_cpu(
            x, kh, kw, self.sy, self.sx, self.ph, self.pw,
            cover_all=self.cover_all)

        Wb = numpy.where(W>=0,1,-1).astype(W.dtype, copy=False)

        y = numpy.tensordot(
            self.col, Wb, ((1, 2, 3), (1, 2, 3))).astype(x.dtype, copy=False)

        if b is not None:
            y += b

        return numpy.rollaxis(y, 3, 1),
项目:GUINNESS    作者:HirokiNakahara    | 项目源码 | 文件源码
def backward_cpu(self, inputs, grad_outputs):
        x, W = inputs[:2]
        b = inputs[2] if len(inputs) == 3 else None
        gy = grad_outputs[0]
        h, w = x.shape[2:]

        gW = numpy.tensordot(
            gy, self.col, ((0, 2, 3), (0, 4, 5))).astype(W.dtype, copy=False)

        Wb = numpy.where(W>=0,1,-1).astype(W.dtype, copy=False)

        gcol = numpy.tensordot(Wb, gy, (0, 1)).astype(x.dtype, copy=False)

        gcol = numpy.rollaxis(gcol, 3)
        gx = conv.col2im_cpu(gcol, self.sy, self.sx, self.ph, self.pw, h, w)

        if b is None:
            return gx, gW
        else:
            gb = gy.sum(axis=(0, 2, 3))
            return gx, gW, gb
项目:GUINNESS    作者:HirokiNakahara    | 项目源码 | 文件源码
def forward_cpu(self, inputs):
        x, W = inputs[:2]
        b = inputs[2] if len(inputs) == 3 else None
        kh, kw = W.shape[2:]
        self.col = conv.im2col_cpu(
            x, kh, kw, self.sy, self.sx, self.ph, self.pw,
            cover_all=self.cover_all)

        Xb = numpy.where(self.col>0,1,self.col).astype(x.dtype, copy=False)
        Xb = numpy.where(self.col<0,-1,Xb).astype(x.dtype, copy=False)
        Wb = numpy.where(W>=0,1,-1).astype(W.dtype, copy=False)
        y = numpy.tensordot(
            Xb, Wb, ((1, 2, 3), (1, 2, 3))).astype(x.dtype, copy=False)
        if b is not None:
            y += b

        return numpy.rollaxis(y, 3, 1),
项目:GUINNESS    作者:HirokiNakahara    | 项目源码 | 文件源码
def backward_cpu(self, inputs, grad_outputs):
        x, W = inputs[:2]
        b = inputs[2] if len(inputs) == 3 else None
        gy = grad_outputs[0]
        h, w = x.shape[2:]

        gW = numpy.tensordot(
            gy, self.col, ((0, 2, 3), (0, 4, 5))).astype(W.dtype, copy=False)

        Wb = numpy.where(W>=0,1,-1).astype(W.dtype, copy=False)

        gcol = numpy.tensordot(Wb, gy, (0, 1)).astype(x.dtype, copy=False)

        gcol = numpy.rollaxis(gcol, 3)
        gx = conv.col2im_cpu(gcol, self.sy, self.sx, self.ph, self.pw, h, w)

        if b is None:
            return gx, gW
        else:
            gb = gy.sum(axis=(0, 2, 3))
            return gx, gW, gb
项目:LabelsManager    作者:SebastianoF    | 项目源码 | 文件源码
def get_roto_translation_matrix(theta, rotation_axis=np.array([1,0,0]),  translation=np.array([0, 0, 0])):

    n = np.linalg.norm(rotation_axis)
    assert not np.abs(n) < 0.001, 'rotation axis too close to zero.'
    rot = rotation_axis / n

    # rodriguez formula:
    cross_prod = np.array([[0, -rot[2], rot[1]],
                           [rot[2], 0, -rot[0]],
                           [-rot[1], rot[0], 0]])
    rot_part = np.cos(theta) * np.identity(3) + np.sin(theta) * cross_prod + np.tensordot(rot, rot, axes=0)

    # transformations parameters

    rot_transl = np.identity(4)
    rot_transl[:3, :3] = rot_part
    rot_transl[:3, 3] = translation

    return rot_transl
项目:ddnn    作者:kunglab    | 项目源码 | 文件源码
def backward_cpu(self, inputs, grad_outputs):
        x, W = inputs[:2]
        Wb = numpy.where(W>=0, 1, -1).astype(numpy.float32, copy=False)
        b = inputs[2] if len(inputs) == 3 else None
        gy = grad_outputs[0]
        h, w = x.shape[2:]

        gW = numpy.tensordot(gy, self.col, ((0, 2, 3), (0, 4, 5)))
        gcol = numpy.tensordot(Wb, gy, (0, 1))
        gcol = numpy.rollaxis(gcol, 3)
        gx = conv.col2im_cpu(gcol, self.sy, self.sx, self.ph, self.pw, h, w)

        if b is None:
            return gx, gW
        else:
            gb = gy.sum(axis=(0, 2, 3))
            return gx, gW, gb
项目:kuramoto_vicsek    作者:lisamnash    | 项目源码 | 文件源码
def g_def(self, v, t, vbar):
        v = np.reshape(v, self.vshape)
        id_m = np.array([[1, 0], [0, 1]])
        ret = []
        for k in xrange(len(v)):
            t_p = np.tensordot(v[k], v[k], axes=0)
            p_vk = id_m - t_p
            r_ori = 2 * np.pi * np.random.random()
            ret_s = self.nu * np.dot(p_vk, vbar[k]) + self.C * np.dot(p_vk, [np.sin(r_ori), np.cos(r_ori)])

            ret_s = np.reshape(ret_s, [2, 1])
            ret.append(ret_s)

        ret = np.array(ret)

        return np.reshape(ret, 2 * len(ret))
项目:RNTN    作者:munikarmanish    | 项目源码 | 文件源码
def predict(self, tree):
        if tr.isleaf(tree):
            # output = word vector
            try:
                tree.vector = self.L[:, self.word_map[tree[0]]]
            except:
                tree.vector = self.L[:, self.word_map[tr.UNK]]
        else:
            # calculate output of child nodes
            self.predict(tree[0])
            self.predict(tree[1])

            # compute output
            lr = np.hstack([tree[0].vector, tree[1].vector])
            tree.vector = np.tanh(
                np.tensordot(self.V, np.outer(lr, lr), axes=([1, 2], [0, 1])) +
                np.dot(self.W, lr) + self.b)

        # softmax
        import util
        tree.output = util.softmax(np.dot(self.Ws, tree.vector) + self.bs)
        label = np.argmax(tree.output)
        tree.set_label(str(label))
        return tree
项目:BinaryNetConvolution    作者:rarilurelo    | 项目源码 | 文件源码
def backward_cpu(self, inputs, grad_outputs):
        x, W = inputs[:2]
        Wb = numpy.where(W>=0, 1, -1).astype(numpy.float32, copy=False)
        b = inputs[2] if len(inputs) == 3 else None
        gy = grad_outputs[0]
        h, w = x.shape[2:]

        gW = numpy.tensordot(gy, self.col, ((0, 2, 3), (0, 4, 5)))
        gcol = numpy.tensordot(Wb, gy, (0, 1))
        gcol = numpy.rollaxis(gcol, 3)
        gx = conv.col2im_cpu(gcol, self.sy, self.sx, self.ph, self.pw, h, w)

        if b is None:
            return gx, gW
        else:
            gb = gy.sum(axis=(0, 2, 3))
            return gx, gW, gb
项目:Python-iBeacon-Scan    作者:NikNitro    | 项目源码 | 文件源码
def __pow__(self, other):
        if self.data is None:
            raise ValueError("No power without ndarray data.")
        numpy = import_module('numpy')
        free = self.free

        marray = self.data
        for metric in free:
            marray = numpy.tensordot(
                marray,
                numpy.tensordot(
                    metric[0]._tensortype.data,
                    marray,
                    (1, 0)
                ),
                (0, 0)
            )
        pow2 = marray[()]
        return pow2 ** (Rational(1, 2) * other)
项目:chemcoord    作者:mcocdawc    | 项目源码 | 文件源码
def apply_grad_cartesian_tensor(grad_X, zmat_dist):
    """Apply the gradient for transformation to cartesian space onto zmat_dist.

    Args:
        grad_X (:class:`numpy.ndarray`): A ``(3, n, n, 3)`` array.
            The mathematical details of the index layout is explained in
            :meth:`~chemcoord.Cartesian.get_grad_zmat()`.
        zmat_dist (:class:`~chemcoord.Zmat`):
            Distortions in Zmatrix space.

    Returns:
        :class:`~chemcoord.Cartesian`: Distortions in cartesian space.
    """
    columns = ['bond', 'angle', 'dihedral']
    C_dist = zmat_dist.loc[:, columns].values.T
    try:
        C_dist = C_dist.astype('f8')
        C_dist[[1, 2], :] = np.radians(C_dist[[1, 2], :])
    except (TypeError, AttributeError):
        C_dist[[1, 2], :] = sympy.rad(C_dist[[1, 2], :])
    cart_dist = np.tensordot(grad_X, C_dist, axes=([3, 2], [0, 1])).T
    from chemcoord.cartesian_coordinates.cartesian_class_main import Cartesian
    return Cartesian(atoms=zmat_dist['atom'],
                     coords=cart_dist, index=zmat_dist.index)
项目:chainer-graph-cnn    作者:pfnet-research    | 项目源码 | 文件源码
def forward_cpu(self, inputs):
        x, W = inputs[:2]
        n_batch, c_in, N = x.shape
        b = inputs[2] if len(inputs) == 3 else None

        K = self.K
        if x.dtype != self.LmI.dtype:
            self.LmI = self.LmI.astype(x.dtype)

        C = np.empty((n_batch, K, N, c_in), dtype=x.dtype)
        chebyshev_matvec_cpu(C, x, K, n_batch, self.LmI)
        C = C.transpose((0, 3, 1, 2))
        self.C = C
        y = np.tensordot(C, W, ((1, 2), (1, 2)))

        if b is not None:
            y += b

        return np.rollaxis(y, 2, 1),  # y.shape = (n_batch, c_out, N)
项目:chainer-graph-cnn    作者:pfnet-research    | 项目源码 | 文件源码
def forward_gpu(self, inputs):
        x, W = inputs[:2]
        n_batch, c_in, N = x.shape
        b = inputs[2] if len(inputs) == 3 else None
        xp = cuda.get_array_module(x)
        with cuda.get_device(x.data):
            K = self.K
            LmI_data, LmI_indices, LmI_indptr = self.LmI_tuple

            if x.dtype != LmI_data.dtype:
                LmI_data = LmI_data.astype(x.dtype)

            C = xp.empty((K, N, c_in, n_batch), dtype=x.dtype)
            chebyshev_matvec_gpu(C, x, K, n_batch,
                                 LmI_data, LmI_indices, LmI_indptr)

            C = C.transpose((3, 2, 0, 1))
            self.C = C
            y = xp.tensordot(C, W, ((1, 2), (1, 2)))

            if b is not None:
                y += b

            return xp.rollaxis(y, 2, 1),  # y.shape = (n_batch, c_out, N)
项目:sparse    作者:mrocklin    | 项目源码 | 文件源码
def test_tensordot(a_shape, b_shape, axes):
    a = random_x(a_shape)
    b = random_x(b_shape)

    sa = COO.from_numpy(a)
    sb = COO.from_numpy(b)

    assert_eq(np.tensordot(a, b, axes),
              sparse.tensordot(sa, sb, axes))

    assert_eq(np.tensordot(a, b, axes),
              sparse.tensordot(sa, b, axes))

    # assert isinstance(sparse.tensordot(sa, b, axes), COO)

    assert_eq(np.tensordot(a, b, axes),
              sparse.tensordot(a, sb, axes))

    # assert isinstance(sparse.tensordot(a, sb, axes), COO)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_raise_error(self):
        amat = matrix()
        bmat = matrix()
        bvec = vector()

        # Test invalid length for axes
        self.assertRaises(ValueError, tensordot, amat, bmat, (0, 1, 2))

        # Test axes of uneven length
        self.assertRaises(ValueError, tensordot, amat, bmat, ((0, 1), (0)))

        # Test invalid len(axes) given inputs are matrices
        self.assertRaises(ValueError, tensordot, amat, bmat, ((0, 1, 2), (0, 1, 2)))

        # Test invalid axes[1] given that y is a vector
        self.assertRaises(ValueError, tensordot, amat, bvec, (0, 1))

        # Test invalid scalar axes given inputs are matrices
        self.assertRaises(ValueError, tensordot, amat, bvec, 2)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_weird_valid_axes(self):
        # Test matrix-matrix
        amat = matrix()
        bmat = matrix()
        for axes in [0,
                     (1, 0),
                     [1, 0],
                     (1, (0, )),
                     ((1, ), 0),
                     ([1], [0]),
                     ([], [])]:
            c = tensordot(amat, bmat, axes)
            f3 = inplace_func([amat, bmat], c)
            aval = rand(4, 7)
            bval = rand(7, 9)
            self.assertTrue(numpy.allclose(numpy.tensordot(aval, bval, axes),
                                           f3(aval, bval)))
            utt.verify_grad(self.TensorDot(axes), [aval, bval])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_scalar_axes(self):
        # Test matrix-matrix
        amat = fmatrix()
        bmat = dmatrix()
              # We let at float64 to test mix of float32 and float64.
        axes = 1
        aval = rand(4, 5).astype('float32')
        bval = rand(5, 3)
        c = tensordot(amat, bmat, axes)
        f3 = inplace_func([amat, bmat], c)
        self.assertTrue(numpy.allclose(numpy.tensordot(aval, bval, axes),
                                       f3(aval, bval)))
        utt.verify_grad(self.TensorDot(axes), [aval, bval])

        # Test tensor-tensor
        amat = tensor3()
        bmat = tensor3()
        axes = 2
        aval = rand(3, 4, 5)
        bval = rand(4, 5, 3)
        c = tensordot(amat, bmat, axes)
        f3 = inplace_func([amat, bmat], c)
        self.assertTrue(numpy.allclose(numpy.tensordot(aval, bval, axes),
                                       f3(aval, bval)))
        utt.verify_grad(self.TensorDot(axes), [aval, bval])
项目:pactools    作者:pactools    | 项目源码 | 文件源码
def _develop(self, basis):
        """Compute the AR models and gains at instants fixed by newcols

        returns:
        ARcols : array containing the AR parts
        Gcols  : array containing the gains

        The size of ARcols is (1 + ordar_, n_epochs, n_points)
        The size of Gcols is (1, n_epochs, n_points)

        """
        n_basis, n_epochs, n_points = basis.shape
        ordar_ = self.ordar_

        # -------- expand on the basis
        AR_cols_ones = np.ones((1, n_epochs, n_points))
        if ordar_ > 0:
            AR_cols = np.tensordot(self.AR_, basis, axes=([1], [0]))
            AR_cols = np.vstack((AR_cols_ones, AR_cols))
        else:
            AR_cols = AR_cols_ones
        G_cols = self._develop_gain(basis, squared=False, log=False)
        return (AR_cols, G_cols)
项目:pactools    作者:pactools    | 项目源码 | 文件源码
def _develop_parcor(self, LAR, basis):
        single_dim = LAR.ndim == 1
        LAR = np.atleast_2d(LAR)
        # n_basis, n_epochs, n_points = basis.shape
        # ordar, n_basis = LAR.shape
        # ordar, n_epochs, n_points = lar_list.shape
        lar_list = np.tensordot(LAR, basis, axes=([1], [0]))

        if single_dim:
            # n_epochs, n_points = lar_list.shape
            lar_list = lar_list[0, :, :]
        return lar_list

    # ------------------------------------------------ #
    # Functions that should be overloaded              #
    # ------------------------------------------------ #
项目:HamiltonianPy    作者:waltergu    | 项目源码 | 文件源码
def tensordotcontract(tensors):
    '''
    Use np.tensordot to implement the contraction of tensors.

    Parameters
    ----------
    tensors : list of Tensor
        The tensors to be contracted.

    Returns
    -------
    Tensor
        The contracted tensor.
    '''
    def _contract_(a,b):
        common=set(a.labels)&set(b.labels)
        aaxes=[a.axis(label) for label in common]
        baxes=[b.axis(label) for label in common]
        labels=[label for label in it.chain(a.labels,b.labels) if label not in common]
        axes=(aaxes,baxes) if len(common)>0 else 0
        return Tensor(np.tensordot(a,b,axes=axes),labels=labels)
    result=tensors[0]
    for i in xrange(1,len(tensors)):
        result=_contract_(result,tensors[i])
    return result
项目:HamiltonianPy    作者:waltergu    | 项目源码 | 文件源码
def mgf_kmesh(self,omega,kmesh):
        '''
        Returns the mesh of the Green's functions in the mixed representation with respect to momentums.

        Parameters
        ----------
        omega : np.complex128/np.complex64
            The frequency of the mixed Green's functions.
        kmesh : (n+1)d ndarray like
            The kmesh of the mixed Green's functions.
            And n is the spatial dimension of the system.

        Returns
        -------
        3d ndarray
            The mesh of the mixed Green's functions.
        '''
        cgf=self.cgf(omega)
        return np.tensordot(cgf,inv(np.identity(cgf.shape[0],dtype=cgf.dtype)-self.pt_kmesh(kmesh).dot(cgf)),axes=([1],[1])).transpose((1,0,2))
项目:HamiltonianPy    作者:waltergu    | 项目源码 | 文件源码
def VCAEB(engine,app):
    '''
    This method calculates the single particle spectrum along a path in the Brillouin zone.
    '''
    engine.rundependences(app.name)
    engine.cache.pop('pt_kmesh',None)
    erange,kmesh,nk=np.linspace(app.emin,app.emax,app.ne),app.path.mesh('k'),app.path.rank('k')
    result=np.zeros((nk,app.ne,3))
    result[:,:,0]=np.tensordot(np.array(xrange(nk)),np.ones(app.ne),axes=0)
    result[:,:,1]=np.tensordot(np.ones(nk),erange,axes=0)
    for i,omega in enumerate(erange):
        result[:,i,2]=-(np.trace(engine.gf_kmesh(omega+app.mu+app.eta*1j,kmesh),axis1=1,axis2=2)).imag/engine.nopt/np.pi
    name='%s_%s'%(engine,app.name)
    if app.savedata: np.savetxt('%s/%s.dat'%(engine.dout,name),result.reshape((nk*app.ne,3)))
    if app.plot: app.figure('P',result,'%s/%s'%(engine.dout,name))
    if app.returndata: return result
项目:fast-neural-style-pyfunt    作者:dnlcrl    | 项目源码 | 文件源码
def update_grad_input(self, x, grad_output):
        self.grad_input = np.zeros_like(x)
        if x.dims == 3:
            C, H, W = x.shape
            x_flat = x.view(C, H * W)
            self.buffer = np.dot(grad_output, x_flat)
            self.buffer += np.dot(grad_output.T, x_flat)
            self.grad_input = self.buffer.view(C, H, W)
        if x.dims == 4:
            N, C, H, W = x.shape
            x_flat = x.view(N, C, H * W)
            self.buffer = np.tensordot(grad_output, x_flat, 2)
            self.buffer += np.tensordot(grad_output.transpose(2, 3), x_flat, 2)
            self.grad_input = self.buffer.view(N, C, H, W)
        if self.normalize:
            self.buffer /= C * H * W
        return self.grad_input
项目:npstreams    作者:LaurentRDC    | 项目源码 | 文件源码
def test_against_numpy_tensordot(self):
        """ Test against numpy.tensordot in 2D case """
        stream = tuple(np.random.random((8, 8)) for _ in range(2))

        for axis in (0, 1, 2):
            with self.subTest('axis = {}'.format(axis)):
                from_numpy = np.tensordot(*stream)
                from_stream = last(itensordot(stream))

                self.assertSequenceEqual(from_numpy.shape, from_stream.shape)
                self.assertTrue(np.allclose(from_numpy, from_stream))
项目:npstreams    作者:LaurentRDC    | 项目源码 | 文件源码
def test_against_numpy_inner(self):
        """ Test against numpy.tensordot in 2D case """
        stream = tuple(np.random.random((8, 8)) for _ in range(2))

        for axis in (0, 1, 2):
            with self.subTest('axis = {}'.format(axis)):
                from_numpy = np.inner(*stream)
                from_stream = last(iinner(stream))

                self.assertSequenceEqual(from_numpy.shape, from_stream.shape)
                self.assertTrue(np.allclose(from_numpy, from_stream))
项目:mpnum    作者:dseuss    | 项目源码 | 文件源码
def _eig_rightvec_add(rightvec, mpo_lten, mps_lten):
    """Add one column to the right vector.

    :param rightvec: existing right vector
        It has three indices: mps bond, mpo bond, complex conjugate mps bond
    :param op_lten: Local tensor of the MPO
    :param mps_lten: Local tensor of the current MPS eigenstate

    This does the same thing as _eig_leftvec_add(), except that
    'left' and 'right' are exchanged in the contractions (but not in
    the axis names of the input tensors).

    """
    rightvec_names = ('mps_bond', 'mpo_bond', 'cc_mps_bond')
    mpo_names = ('left_mpo_bond', 'phys_row', 'phys_col', 'right_mpo_bond')
    mps_names = ('left_mps_bond', 'phys', 'right_mps_bond')
    rightvec = named_ndarray(rightvec, rightvec_names)
    mpo_lten = named_ndarray(mpo_lten, mpo_names)
    mps_lten = named_ndarray(mps_lten, mps_names)

    contract_mps = (('mps_bond', 'right_mps_bond'),)
    rightvec = rightvec.tensordot(mps_lten, contract_mps)
    rename_mps = (('left_mps_bond', 'mps_bond'),)
    rightvec = rightvec.rename(rename_mps)

    contract_mpo = (
        ('mpo_bond', 'right_mpo_bond'),
        ('phys', 'phys_col'))
    rightvec = rightvec.tensordot(mpo_lten, contract_mpo)
    contract_cc_mps = (
        ('cc_mps_bond', 'right_mps_bond'),
        ('phys_row', 'phys'))
    rightvec = rightvec.tensordot(mps_lten.conj(), contract_cc_mps)
    rename_mps_mpo = (
        ('left_mpo_bond', 'mpo_bond'),
        ('left_mps_bond', 'cc_mps_bond'))
    rightvec = rightvec.rename(rename_mps_mpo)

    rightvec = rightvec.to_array(rightvec_names)
    return rightvec
项目:mpnum    作者:dseuss    | 项目源码 | 文件源码
def _eig_leftvec_add_mps(lv, lt1, lt2):
    """Add one column to the left vector (MPS version)"""
    # MPS 1: Interpreted as |psiXpsi| part of the operator
    # MPS 2: The current eigvectector candidate
    # NB: It would be more efficient to store lt1.conj() instead of lt1.
    # lv axes: 0: mps1 bond, 1: mps2 bond
    lv = np.tensordot(lv, lt1.conj(), axes=(0, 0))
    # lv axes: 0: mps2 bond, 1: physical leg, 2: mps1 bond
    lv = np.tensordot(lv, lt2, axes=((0, 1), (0, 1)))
    # lv axes: 0: mps1 bond, 1: mps2 bond
    return lv
项目:mpnum    作者:dseuss    | 项目源码 | 文件源码
def _eig_rightvec_add_mps(rv, lt1, lt2):
    """Add one column to the right vector (MPS version)"""
    # rv axes: 0: mps1 bond, 1: mps2 bond
    rv = np.tensordot(rv, lt1.conj(), axes=(0, 2))
    # rv axes: 0: mps2 bond, 1: mps1 bond, 2: physical leg
    rv = np.tensordot(rv, lt2, axes=((0, 2), (2, 1)))
    # rv axes: 0: mps1 bond, 1: mps2 bond
    return rv
项目:mpnum    作者:dseuss    | 项目源码 | 文件源码
def dot(mpa1, mpa2, axes=(-1, 0), astype=None):
    """Compute the matrix product representation of the contraction of ``a``
        and ``b`` over the given axes. [:ref:`Sch11 <Sch11>`, Sec. 4.2]

    :param mpa1, mpa2: Factors as MPArrays

    :param axes: Tuple ``(ax1, ax2)`` where ``ax1`` (``ax2``) is a single
        physical leg number or sequence of physical leg numbers
        referring to ``mpa1`` (``mpa2``). The first (second, etc) entries
        of ``ax1`` and ``ax2`` will be contracted. Very similar to the
        ``axes`` argument for :func:`numpy.tensordot()`.
        (default: ``(-1, 0)``)

    .. note:: Note that the default value of ``axes`` is different compared to
              :func:`numpy.tensordot`.

    :param astype: Return type. If ``None``, use the type of ``mpa1``

    :returns: Dot product of the physical arrays

    """
    assert len(mpa1) == len(mpa2), \
        "Length is not equal: {} != {}".format(len(mpa1), len(mpa2))

    # adapt the axes from physical to true legs
    if isinstance(axes[0], collections.Sequence):
        axes = tuple(tuple(ax + 1 if ax >= 0 else ax - 1 for ax in axes2)
                     for axes2 in axes)
    else:
        axes = tuple(ax + 1 if ax >= 0 else ax - 1 for ax in axes)

    ltens = [_local_dot(l, r, axes) for l, r in zip(mpa1.lt, mpa2.lt)]

    if astype is None:
        astype = type(mpa1)
    return astype(ltens)
项目:mpnum    作者:dseuss    | 项目源码 | 文件源码
def _local_dot(ltens_l, ltens_r, axes):
    """Computes the local tensors of a dot product `dot(l, r)`.

    Besides computing the normal dot product, this function rearranges the
    virtual legs in such a way that the result is a valid local tensor again.

    :param ltens_l: Array with `ndim > 1`
    :param ltens_r: Array with `ndim > 1`
    :param axes: Axes to compute dot product using the convention of
        :func:`numpy.tensordot()`. Note that these correspond to the true
        (and not the just the physical) legs of the local tensors
    :returns: Correct local tensor representation

    """
    # number of contracted legs need to be the same
    clegs_l = len(axes[0]) if isinstance(axes[0], collections.Sequence) else 1
    clegs_r = len(axes[1]) if isinstance(axes[0], collections.Sequence) else 1
    assert clegs_l == clegs_r, \
        "Number of contracted legs differ: {} != {}".format(clegs_l, clegs_r)
    res = np.tensordot(ltens_l, ltens_r, axes=axes)
    # Rearrange the virtual-dimension legs
    res = np.rollaxis(res, ltens_l.ndim - clegs_l, 1)
    res = np.rollaxis(res, ltens_l.ndim - clegs_l,
                      ltens_l.ndim + ltens_r.ndim - clegs_l - clegs_r - 1)
    return res.reshape((ltens_l.shape[0] * ltens_r.shape[0], ) +
                       res.shape[2:-2] +
                       (ltens_l.shape[-1] * ltens_r.shape[-1],))
项目:mpnum    作者:dseuss    | 项目源码 | 文件源码
def _adapt_to_add_r(rightvec, compr_lten, tgt_lten):
    """Add one column to the right vector.

    :param rightvec: existing right vector
        It has two indices: `compr_mps_bond` and `tgt_mps_bond`

    :param compr_lten: Local tensor of the compressed MPS
    :param tgt_lten: Local tensor of the target MPS


    Construct R from [:ref:`Sch11 <Sch11>`, Fig. 27, p. 48]. See comments in
    :func:`_adapt_to_add_l()` for further details.

    .. todo:: Adapt tensor leg names.

    """
    rightvec_names = ('compr_bond', 'tgt_bond')
    compr_names = ('compr_left_bond', 'compr_phys', 'compr_right_bond')
    tgt_names = ('tgt_left_bond', 'tgt_phys', 'tgt_right_bond')
    rightvec = named_ndarray(rightvec, rightvec_names)
    compr_lten = named_ndarray(compr_lten, compr_names)
    tgt_lten = named_ndarray(tgt_lten, tgt_names)

    contract_compr_mps = (('compr_bond', 'compr_right_bond'),)
    rightvec = rightvec.tensordot(compr_lten, contract_compr_mps)

    contract_tgt_mps = (
        ('compr_phys', 'tgt_phys'),
        ('tgt_bond', 'tgt_right_bond'))
    rightvec = rightvec.tensordot(tgt_lten.conj(), contract_tgt_mps)
    rename = (
        ('compr_left_bond', 'compr_bond'),
        ('tgt_left_bond', 'tgt_bond'))
    rightvec = rightvec.rename(rename)

    rightvec = rightvec.to_array(rightvec_names)
    return rightvec
项目:mpnum    作者:dseuss    | 项目源码 | 文件源码
def matdot(A, B, axes=((-1,), (0,))):
    """np.tensordot with sane defaults for matrix multiplication"""
    return np.tensordot(A, B, axes=axes)
项目:mpnum    作者:dseuss    | 项目源码 | 文件源码
def pmps_dm_to_array(pmps, global_=False):
    """Convert PMPS to full array representation of the density matrix

    The runtime of this method scales with D**3 instead of D**6 where
    D is the rank and D**6 is the scaling of using :func:`pmps_to_mpo`
    and :func:`to_array`. This is useful for obtaining reduced states
    of a PMPS on non-consecutive sites, as normalizing before using
    :func:`pmps_to_mpo` may not be sufficient to reduce the rank
    in that case.

    .. note:: The resulting array will have dimension-1 physical legs removed.

    """
    out = np.ones((1, 1, 1))
    # Axes: 0 phys, 1 upper rank, 2 lower rank
    for lt in pmps.lt:
        out = np.tensordot(out, lt, axes=(1, 0))
        # Axes: 0 phys, 1 lower rank, 2 phys, 3 anc, 4 upper rank
        out = np.tensordot(out, lt.conj(), axes=((1, 3), (0, 2)))
        # Axes: 0 phys, 1 phys, 2 upper rank, 3 phys, 4 lower rank
        out = np.rollaxis(out, 3, 2)
        # Axes: 0 phys, 1 phys, 2 phys, 3 upper bound, 4 lower rank
        out = out.reshape((-1, out.shape[3], out.shape[4]))
        # Axes: 0 phys, 1 upper rank, 2 lower rank
    out_shape = [dim for dim, _ in pmps.shape for rep in (1, 2) if dim > 1]
    out = out.reshape(out_shape)
    if global_:
        assert len(set(out_shape)) == 1
        out = local_to_global(out, sites=len(out_shape) // 2)
    return out
项目:mpnum    作者:dseuss    | 项目源码 | 文件源码
def test_chain(nr_sites, local_dim, rank, rgen, dtype):
    # This test produces at most `nr_sites` by tensoring two
    # MPOs. This doesn't work for :code:`nr_sites = 1`.
    if nr_sites < 2:
        return

    # NOTE: Everything here is in local form!!!
    mpo = factory.random_mpa(nr_sites // 2, (local_dim, local_dim), rank,
                             randstate=rgen, dtype=dtype)
    op = mpo.to_array()

    # Test with 2-factors with full form
    mpo_double = mp.chain((mpo, mpo))
    op_double = np.tensordot(op, op, axes=(tuple(), ) * 2)
    assert len(mpo_double) == 2 * len(mpo)
    assert_array_almost_equal(op_double, mpo_double.to_array())
    assert_array_equal(mpo_double.ranks, mpo.ranks + (1,) + mpo.ranks)
    assert mpo.dtype == dtype

    # Test 3-factors iteratively (since full form would be too large!!
    diff = mp.chain((mpo, mpo, mpo)) - mp.chain((mpo, mp.chain((mpo, mpo))))
    diff.canonicalize()
    assert len(diff) == 3 * len(mpo)
    assert mp.norm(diff) < 1e-6


# local_dim, rank