Python numpy.linalg 模块,svd() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.linalg.svd()

项目:sport_movements_analysis    作者:guillaumeAssogba    | 项目源码 | 文件源码
def PCA_values(data, centered=True):
    n_samples, n_features = data.shape
    #By default, the data are centered
    if (centered == True):
        data_centered = data - mean(data, axis=0)
    else:
        data_centered = data

    #apply the Single Vector Decomposition 
    U, S, V = linalg.svd(data_centered, full_matrices=False)
    # flip eigenvectors' sign to enforce deterministic output
    U, V = svd_flip(U, V)

    #components
    components_ = V

    #variance explained by PCs
    explained_variance_ratio_ = varianceExplained(S, n_samples)

    return(components_, explained_variance_ratio_)
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def do(self, a, b):
        arr = np.asarray(a)
        m, n = arr.shape
        u, s, vt = linalg.svd(a, 0)
        x, residuals, rank, sv = linalg.lstsq(a, b)
        if m <= n:
            assert_almost_equal(b, dot(a, x))
            assert_equal(rank, m)
        else:
            assert_equal(rank, n)
        assert_almost_equal(sv, sv.__array_wrap__(s))
        if rank == n and m > n:
            expect_resids = (
                np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0)
            expect_resids = np.asarray(expect_resids)
            if len(np.asarray(b).shape) == 1:
                expect_resids.shape = (1,)
                assert_equal(residuals.shape, expect_resids.shape)
        else:
            expect_resids = np.array([]).view(type(x))
        assert_almost_equal(residuals, expect_resids)
        assert_(np.issubdtype(residuals.dtype, np.floating))
        assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
        assert_(imply(isinstance(b, matrix), isinstance(residuals, matrix)))
项目:krpcScripts    作者:jwvanderbeck    | 项目源码 | 文件源码
def do(self, a, b):
        arr = np.asarray(a)
        m, n = arr.shape
        u, s, vt = linalg.svd(a, 0)
        x, residuals, rank, sv = linalg.lstsq(a, b)
        if m <= n:
            assert_almost_equal(b, dot(a, x))
            assert_equal(rank, m)
        else:
            assert_equal(rank, n)
        assert_almost_equal(sv, sv.__array_wrap__(s))
        if rank == n and m > n:
            expect_resids = (
                np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0)
            expect_resids = np.asarray(expect_resids)
            if len(np.asarray(b).shape) == 1:
                expect_resids.shape = (1,)
                assert_equal(residuals.shape, expect_resids.shape)
        else:
            expect_resids = np.array([]).view(type(x))
        assert_almost_equal(residuals, expect_resids)
        assert_(np.issubdtype(residuals.dtype, np.floating))
        assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
        assert_(imply(isinstance(b, matrix), isinstance(residuals, matrix)))
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def do(self, a, b):
        arr = np.asarray(a)
        m, n = arr.shape
        u, s, vt = linalg.svd(a, 0)
        x, residuals, rank, sv = linalg.lstsq(a, b)
        if m <= n:
            assert_almost_equal(b, dot(a, x))
            assert_equal(rank, m)
        else:
            assert_equal(rank, n)
        assert_almost_equal(sv, sv.__array_wrap__(s))
        if rank == n and m > n:
            expect_resids = (
                np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0)
            expect_resids = np.asarray(expect_resids)
            if len(np.asarray(b).shape) == 1:
                expect_resids.shape = (1,)
                assert_equal(residuals.shape, expect_resids.shape)
        else:
            expect_resids = np.array([]).view(type(x))
        assert_almost_equal(residuals, expect_resids)
        assert_(np.issubdtype(residuals.dtype, np.floating))
        assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
        assert_(imply(isinstance(b, matrix), isinstance(residuals, matrix)))
项目:aws-lambda-numpy    作者:vitolimandibhrata    | 项目源码 | 文件源码
def do(self, a, b):
        arr = np.asarray(a)
        m, n = arr.shape
        u, s, vt = linalg.svd(a, 0)
        x, residuals, rank, sv = linalg.lstsq(a, b)
        if m <= n:
            assert_almost_equal(b, dot(a, x))
            assert_equal(rank, m)
        else:
            assert_equal(rank, n)
        assert_almost_equal(sv, sv.__array_wrap__(s))
        if rank == n and m > n:
            expect_resids = (
                np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0)
            expect_resids = np.asarray(expect_resids)
            if len(np.asarray(b).shape) == 1:
                expect_resids.shape = (1,)
                assert_equal(residuals.shape, expect_resids.shape)
        else:
            expect_resids = np.array([]).view(type(x))
        assert_almost_equal(residuals, expect_resids)
        assert_(np.issubdtype(residuals.dtype, np.floating))
        assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
        assert_(imply(isinstance(b, matrix), isinstance(residuals, matrix)))
项目:anompy    作者:takuti    | 项目源码 | 文件源码
def update(self, Y):
        """Alg. 2: Global update of the singular vectors at time t using exact SVD.

        Args:
            Y (numpy array): m-by-n_t matrix which has n_t "normal" unit vectors.

        """

        if not hasattr(self, 's'):
            # initial update
            self.U, self.s, V = ln.svd(Y, full_matrices=False)
        else:
            F = np.concatenate((np.diag(self.s), np.dot(self.U.T, Y)), axis=1)
            U, self.s, V = ln.svd(F, full_matrices=False)
            self.U = np.dot(self.U, U)

        self.U_k = self.U[:, :self.k]
项目:machine-learning    作者:ocsponge    | 项目源码 | 文件源码
def svd_score(data_mat, user, item, sim_meas):
    n = shape(data_mat)[1]
    sim_total = 0.0
    rat_total = 0.0
    u, sigma, vt = la.svd(data_mat)
    sig_mat = mat(eye(4) * sigma[:4])
    data_trans = data_mat.T * u[:, :4] * sig_mat.I
    for j in range(n):
        user_rate = data_mat[user, j]
        if user_rate == 0 or j == item:
            continue
        sim = sim_meas(data_trans[item, :].T, data_trans[j, :].T)
        print('the %d and %d similarity is: %f' % (item, j, sim))
        sim_total += sim
        rat_total += sim * user_rate
    if sim_total == 0:
        return 0
    else:
        return rat_total / sim_total
项目:machine-learning    作者:ocsponge    | 项目源码 | 文件源码
def img_comp(numsv=3, thresh=0.8):
    text = []
    with open('0_5.txt') as fr:
        for line in fr.readlines():
            row = []
            for i in range(32):
                row.append(int(line[i]))
            text.append(row)
    print('*****origin matrix*****')
    text_mat = mat(text)
    print_mat(text_mat, thresh)
    u, sigma, vt = la.svd(text_mat)
    sig = mat(eye(numsv) * sigma[:numsv])
    text_trans = u[:, :numsv] * sig * vt[:numsv, :]
    print('*****reconstructed matrix using %d singular values*****' % numsv)
    print_mat(text_trans, thresh)
项目:parametrix    作者:vincentchoqueuse    | 项目源码 | 文件源码
def estimate_X_TLS(y,H):
    """ Estimator of x for the Linear Model using Total Least Square (TLS). As compared to the classical Least Squares Estimator, the TLS estimator is more suited when H is not exactly known or contained with noise [GOL80]_.


        .. [GOL80] Golub, Gene H., and Charles F. Van Loan. "An analysis of the total least squares problem." SIAM Journal on Numerical Analysis 17.6 (1980): 883-893.

        """
    N,L=H.shape
    H=np.matrix(H)

    Z=np.hstack([H,y])
    U,S,V=lg.svd(Z)
    V=np.matrix(V)
    V=V.H
    VHY=V[:L,L:];
    VYY=V[L:,L:];
    x= -VHY*lg.inv(VYY);
    return x
项目:lambda-numba    作者:rlhotovy    | 项目源码 | 文件源码
def do(self, a, b):
        arr = np.asarray(a)
        m, n = arr.shape
        u, s, vt = linalg.svd(a, 0)
        x, residuals, rank, sv = linalg.lstsq(a, b)
        if m <= n:
            assert_almost_equal(b, dot(a, x))
            assert_equal(rank, m)
        else:
            assert_equal(rank, n)
        assert_almost_equal(sv, sv.__array_wrap__(s))
        if rank == n and m > n:
            expect_resids = (
                np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0)
            expect_resids = np.asarray(expect_resids)
            if len(np.asarray(b).shape) == 1:
                expect_resids.shape = (1,)
                assert_equal(residuals.shape, expect_resids.shape)
        else:
            expect_resids = np.array([]).view(type(x))
        assert_almost_equal(residuals, expect_resids)
        assert_(np.issubdtype(residuals.dtype, np.floating))
        assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
        assert_(imply(isinstance(b, matrix), isinstance(residuals, matrix)))
项目:tf_img_tech    作者:david-berthelot    | 项目源码 | 文件源码
def zca(v):
    """Zero Component Analysis https://github.com/mwv/zca/blob/master/zca/zca.py"""
    v = v.reshape((v.shape[0], -1))
    m = v.mean(0)
    vm = N.ascontiguousarray((v - m).T)
    # print(vm.shape)
    # cov = N.zeros((vm.shape[0], vm.shape[0]), 'f')
    # for x in range(vm.shape[0]):
    #     for y in range(x, vm.shape[0]):
    #         cov[x, y] = cov[y, x] = vm[x].dot(vm[y])
    # cov /= v.shape[0] - 1
    cov = vm.dot(vm.T) / (v.shape[0] - 1)
    u, s = LA.svd(cov, full_matrices=0)[:2]
    w = (u * (1 / N.sqrt(s.clip(1e-6)))).dot(u.T)
    # dw = (u * (1 / N.sqrt(s))).dot(u.T)  # Dewithen
    return m, w
项目:deliver    作者:orchestor    | 项目源码 | 文件源码
def do(self, a, b):
        arr = np.asarray(a)
        m, n = arr.shape
        u, s, vt = linalg.svd(a, 0)
        x, residuals, rank, sv = linalg.lstsq(a, b)
        if m <= n:
            assert_almost_equal(b, dot(a, x))
            assert_equal(rank, m)
        else:
            assert_equal(rank, n)
        assert_almost_equal(sv, sv.__array_wrap__(s))
        if rank == n and m > n:
            expect_resids = (
                np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0)
            expect_resids = np.asarray(expect_resids)
            if len(np.asarray(b).shape) == 1:
                expect_resids.shape = (1,)
                assert_equal(residuals.shape, expect_resids.shape)
        else:
            expect_resids = np.array([]).view(type(x))
        assert_almost_equal(residuals, expect_resids)
        assert_(np.issubdtype(residuals.dtype, np.floating))
        assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
        assert_(imply(isinstance(b, matrix), isinstance(residuals, matrix)))
项目:rensapy    作者:RensaProject    | 项目源码 | 文件源码
def cluster(self, vectors, assign_clusters=False, trace=False):
        assert len(vectors) > 0

        # normalise the vectors
        if self._should_normalise:
            vectors = map(self._normalise, vectors)

        # use SVD to reduce the dimensionality
        if self._svd_dimensions and self._svd_dimensions < len(vectors[0]):
            [u, d, vt] = linalg.svd(numpy.transpose(array(vectors)))
            S = d[:self._svd_dimensions] * \
                numpy.identity(self._svd_dimensions, numpy.Float64)
            T = u[:,:self._svd_dimensions]
            Dt = vt[:self._svd_dimensions,:]
            vectors = numpy.transpose(numpy.matrixmultiply(S, Dt))
            self._Tt = numpy.transpose(T)

        # call abstract method to cluster the vectors
        self.cluster_vectorspace(vectors, trace)

        # assign the vectors to clusters
        if assign_clusters:
            print self._Tt, vectors
            return [self.classify(vector) for vector in vectors]
项目:HamiltonianPy    作者:waltergu    | 项目源码 | 文件源码
def test_B():
    print 'test_B'
    random.seed()
    a=B(*svd(random.random((3,3))))
    print 'a:\n%s'%a.M
    b=random.random((3,3))
    print 'b:\n%s'%b
    c1,c2=b.dot(a.M),a.ldot(b)
    print 'dot(b,a):\n%s'%c1
    print 'a.ldot(b):\n%s'%c2.M
    print 'the difference:\n%s'%(c1-c2.M)
    d1,d2=a.M.dot(b),a.rdot(b)
    print 'a.dot(b):\n%s'%d1
    print 'a.rdot(b):\n%s'%d2.M
    print 'the difference:\n%s'%(d1-d2.M)
    print
项目:HamiltonianPy    作者:waltergu    | 项目源码 | 文件源码
def __init__(self,T,Vs,pos=0):
        if pos>len(Vs) or pos<0:
            raise ValueError('Bs __init__ error: pos(%s) should be in range(0,%s).'%(pos,len(Vs)))
        self.pos=pos
        self.extend([None]*len(Vs))
        Vs=np.array(Vs)
        for i,V in enumerate(Vs[0:pos]):
            if i==0:
                self[i]=B(*svd(V.dot(T)))
            else:
                self[i]=self[i-1].ldot(V.dot(T))
        for i,V in enumerate(Vs[pos:][::-1]):
            if i==0:
                self[len(Vs)-1-i]=B(*svd(V.dot(T)))
            else:
                self[i]=self[len(Vs)-1-i]=self[len(Vs)-i].rdot(V.dot(T))
项目:RePhraser    作者:MissLummie    | 项目源码 | 文件源码
def cluster(self, vectors, assign_clusters=False, trace=False):
        assert len(vectors) > 0

        # normalise the vectors
        if self._should_normalise:
            vectors = map(self._normalise, vectors)

        # use SVD to reduce the dimensionality
        if self._svd_dimensions and self._svd_dimensions < len(vectors[0]):
            [u, d, vt] = linalg.svd(numpy.transpose(array(vectors)))
            S = d[:self._svd_dimensions] * \
                numpy.identity(self._svd_dimensions, numpy.Float64)
            T = u[:,:self._svd_dimensions]
            Dt = vt[:self._svd_dimensions,:]
            vectors = numpy.transpose(numpy.matrixmultiply(S, Dt))
            self._Tt = numpy.transpose(T)

        # call abstract method to cluster the vectors
        self.cluster_vectorspace(vectors, trace)

        # assign the vectors to clusters
        if assign_clusters:
            print self._Tt, vectors
            return [self.classify(vector) for vector in vectors]
项目:PyFusionGUI    作者:SyntaxVoid    | 项目源码 | 文件源码
def flucstruc(input_data, min_dphase = -pi, group=fs_group_geometric, method='rms', separate=True, label=None):
    from pyfusion.data.base import DataSet
    from pyfusion.data.timeseries import FlucStruc

    if label:
        fs_dataset = DataSet(label)
    else:
        fs_dataset = DataSet('flucstrucs_%s' %datetime.now())
    svd_data = input_data.subtract_mean().normalise(method, separate).svd()

    for fs_gr in group(svd_data):
        tmp = FlucStruc(svd_data, fs_gr, input_data.timebase, min_dphase=min_dphase)
        tmp.meta = input_data.meta
        fs_dataset.add(tmp)

    return fs_dataset
项目:nlp_sum    作者:Zhujunnan    | 项目源码 | 文件源码
def __call__(self, documentSet, words_limit, method="mmr", metric="tf", summary_order="origin"):
        dictionary = self._create_dictionary(documentSet)
        self.summary_order = summary_order
        # empty document
        if not dictionary:
            return ()
        if metric.lower() == "tf":
            matrix = self._create_matrix(documentSet, dictionary)
            matrix = self._compute_term_frequency(matrix)
        elif metric.lower() == "tfidf":
            matrix = self._create_tfidf_matrix(documentSet, dictionary)
        else:
            raise ValueError("Don't support your metric now.")
        u, sigma, v = svd(matrix, full_matrices=False)
        ranks = iter(self._compute_ranks(sigma, v))

        if method.lower() == "default":
            return self._get_best_sentences(documentSet.sentences, words_limit,
                                            lambda sent: next(ranks))
        if method.lower() == "mmr":
            return self._get_best_sentences_by_MMR(documentSet.sentences, words_limit,
                                                   matrix, lambda sent: next(ranks))
项目:Alfred    作者:jkachhadia    | 项目源码 | 文件源码
def do(self, a, b):
        arr = np.asarray(a)
        m, n = arr.shape
        u, s, vt = linalg.svd(a, 0)
        x, residuals, rank, sv = linalg.lstsq(a, b)
        if m <= n:
            assert_almost_equal(b, dot(a, x))
            assert_equal(rank, m)
        else:
            assert_equal(rank, n)
        assert_almost_equal(sv, sv.__array_wrap__(s))
        if rank == n and m > n:
            expect_resids = (
                np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0)
            expect_resids = np.asarray(expect_resids)
            if len(np.asarray(b).shape) == 1:
                expect_resids.shape = (1,)
                assert_equal(residuals.shape, expect_resids.shape)
        else:
            expect_resids = np.array([]).view(type(x))
        assert_almost_equal(residuals, expect_resids)
        assert_(np.issubdtype(residuals.dtype, np.floating))
        assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
        assert_(imply(isinstance(b, matrix), isinstance(residuals, matrix)))
项目:sictf    作者:malllabiisc    | 项目源码 | 文件源码
def _initR(X, A, lmbdaR):
    _log.debug('Initializing R (SVD) lambda R: %s' % str(lmbdaR))
    rank = A.shape[1]
    U, S, Vt = svd(A, full_matrices=False)
    Shat = kron(S, S)
    Shat = (Shat / (Shat ** 2 + lmbdaR)).reshape(rank, rank)
    R = []
    ep = 1e-9
    for i in range(len(X)): # parallelize
        Rn = Shat * dot(U.T, X[i].dot(U))
        Rn = dot(Vt.T, dot(Rn, Vt))

        negativeVal = Rn.min()
        Rn.__iadd__(-negativeVal+ep)
        # if Rn.min() < 0 :
        #   print("Negative Rn!")
        #   raw_input("Press Enter: ")
        # Rn = np.eye(rank)
        # Rn = dot(A.T,A)

        R.append(Rn)
    print('Initialized R')
    return R

# ------------------ Update V ------------------------------------------------
项目:Verideals    作者:Derrreks    | 项目源码 | 文件源码
def cluster(self, vectors, assign_clusters=False, trace=False):
        assert len(vectors) > 0

        # normalise the vectors
        if self._should_normalise:
            vectors = map(self._normalise, vectors)

        # use SVD to reduce the dimensionality
        if self._svd_dimensions and self._svd_dimensions < len(vectors[0]):
            [u, d, vt] = linalg.svd(numpy.transpose(array(vectors)))
            S = d[:self._svd_dimensions] * \
                numpy.identity(self._svd_dimensions, numpy.Float64)
            T = u[:,:self._svd_dimensions]
            Dt = vt[:self._svd_dimensions,:]
            vectors = numpy.transpose(numpy.matrixmultiply(S, Dt))
            self._Tt = numpy.transpose(T)

        # call abstract method to cluster the vectors
        self.cluster_vectorspace(vectors, trace)

        # assign the vectors to clusters
        if assign_clusters:
            print self._Tt, vectors
            return [self.classify(vector) for vector in vectors]
项目:flurs    作者:takuti    | 项目源码 | 文件源码
def update_model(self, y):
        y = self.proj.reduce(np.array([y]).T)
        y = preprocessing.normalize(y, norm='l2', axis=0)  # (k, 1)

        if not hasattr(self, 'B'):
            self.p_failure = 0.1
            self.B = np.zeros((self.k, self.ell))
            self.A = np.array([])

        U, s, V = ln.svd(self.B, full_matrices=False)

        # update the tracked orthonormal bases
        self.U_r = U[:, :self.r]

        if self.A.size == 0:
            self.A = np.empty_like(y)
            self.A[:] = y[:]
        else:
            self.A = np.concatenate((self.A, y), axis=1)

        if np.count_nonzero(self.A) >= (self.ell * self.k) or self.A.shape[1] == self.k:
            B = self.__boosted_sparse_shrink(self.A, self.ell, self.p_failure)
            self.B = self.__dense_shrink(np.concatenate((self.B, B), axis=1), self.ell)
            self.A = np.array([])
项目:tissue_analysis    作者:VirtualPlants    | 项目源码 | 文件源码
def projection_matrix(point_set, subspace_rank = 2):
    """
    Compute the projection matrix of a set of point depending on the subspace rank.

    Args:
     - point_set (np.array): list of coordinates of shape (n_point, init_dim).
     - dimension_reduction (int) : the dimension reduction to apply
    """
    point_set = np.array(point_set)
    nb_coord = point_set.shape[0]
    init_dim = point_set.shape[1]
    assert init_dim > subspace_rank
    assert subspace_rank > 0

    centroid = point_set.mean(axis=0)
    if sum(centroid) != 0:
        # - Compute the centered matrix:
        centered_point_set = point_set - centroid
    else:
        centered_point_set = point_set

    # -- Compute the Singular Value Decomposition (SVD) of centered coordinates:
    U,D,V = svd(centered_point_set, full_matrices=False)
    V = V.T

    # -- Compute the projection matrix:
    H = np.dot(V[:,0:subspace_rank], V[:,0:subspace_rank].T)

    return H
项目:mpnum    作者:dseuss    | 项目源码 | 文件源码
def compression(self, method='svd', **kwargs):
        """Return a compression of ``self``. Does not modify ``self``.

        Parameters: See :func:`~compress()`.

        :returns: ``(compressed_mpa, overlap)`` where ``overlap`` is the inner
            product returned by :func:`~compress()`.

        """
        if method == 'svd':
            target = self.copy()
            overlap = target._compress_svd(**kwargs)
            return target, overlap
        elif method == 'var':
            return self._compression_var(**kwargs)
        else:
            raise ValueError('{!r} is not a valid method'.format(method))
项目:mpnum    作者:dseuss    | 项目源码 | 文件源码
def _compress_svd_l(self, rank, relerr, svdfunc):
        """Compresses the MPA in place from right to left using SVD;
        yields a right-canonical state

        See :func:`~compress` for more details and arguments.

        """
        assert rank > 0, "Cannot compress to rank={}".format(rank)
        assert (relerr is None) or ((0. <= relerr) and (relerr <= 1.)), \
            "relerr={} not allowed".format(relerr)

        for site in range(len(self) - 1, 0, -1):
            ltens = self._lt[site]
            matshape = (ltens.shape[0], -1)
            if relerr is None:
                u, sv, v = svdfunc(ltens.reshape(matshape), rank)
                rank_t = len(sv)
            else:
                u, sv, v = svd(ltens.reshape(matshape))
                svsum = np.cumsum(sv) / np.sum(sv)
                rank_relerr = np.searchsorted(svsum, 1 - relerr) + 1
                rank_t = min(ltens.shape[0], v.shape[0], rank, rank_relerr)

            yield sv, rank_t

            newtens = (matdot(self._lt[site - 1], u[:, :rank_t] * sv[None, :rank_t]),
                       v[:rank_t, :].reshape((rank_t, ) + ltens.shape[1:]))
            self._lt.update(slice(site - 1, site + 1), newtens,
                            canonicalization=(None, 'right'))

        yield np.sum(np.abs(self._lt[0])**2)
项目:mpnum    作者:dseuss    | 项目源码 | 文件源码
def _compress_svd_r(self, rank, relerr, svdfunc):
        """Compresses the MPA in place from left to right using SVD;
        yields a left-canonical state

        See :func:`~compress` for parameters
        """
        assert rank > 0, "Cannot compress to rank={}".format(rank)
        assert (relerr is None) or ((0. <= relerr) and (relerr <= 1.)), \
            "Relerr={} not allowed".format(relerr)

        for site in range(len(self) - 1):
            ltens = self._lt[site]
            matshape = (-1, ltens.shape[-1])
            if relerr is None:
                u, sv, v = svdfunc(ltens.reshape(matshape), rank)
                rank_t = len(sv)
            else:
                u, sv, v = svd(ltens.reshape(matshape))
                svsum = np.cumsum(sv) / np.sum(sv)
                rank_relerr = np.searchsorted(svsum, 1 - relerr) + 1
                rank_t = min(ltens.shape[-1], u.shape[1], rank, rank_relerr)

            yield sv, rank_t

            newtens = (u[:, :rank_t].reshape(ltens.shape[:-1] + (rank_t, )),
                       matdot(sv[:rank_t, None] * v[:rank_t, :], self._lt[site + 1]))
            self._lt.update(slice(site, site + 2), newtens,
                            canonicalization=('left', None))

        yield np.sum(np.abs(self._lt[-1])**2)
项目:OpenTDA    作者:outlace    | 项目源码 | 文件源码
def reduce_matrix(A, eps=None):
    if np.size(A) == 0:
        return A, 0, 0
    if np.size(A) == 1:
        return A, 1, []

    m, n = A.shape
    if m != n:
        M = np.zeros(2 * (max(A.shape), ))
        M[:m, :n] = A
    else:
        M = A

    u, s, v = svd(M)
    if eps is None:
        eps = s.max() * max(M.shape) * np.finfo(s.dtype).eps

    null_mask = (s <= eps)

    rank = sum(~null_mask)
    null_space = v[null_mask]

    u = u[~null_mask][:, ~null_mask]
    s = np.diag(s[~null_mask])
    v = v[~null_mask]
    reduced = u.dot(s.dot(v))

    return reduced, rank, null_space
项目:onsager_deep_learning    作者:mborgerding    | 项目源码 | 文件源码
def bernoulli_gaussian_trial(M=250,N=500,L=1000,pnz=.1,kappa=None,SNR=40):

    A = np.random.normal(size=(M, N), scale=1.0 / math.sqrt(M)).astype(np.float32)
    if kappa >= 1:
        # create a random operator with a specific condition number
        U,_,V = la.svd(A,full_matrices=False)
        s = np.logspace( 0, np.log10( 1/kappa),M)
        A = np.dot( U*(s*np.sqrt(N)/la.norm(s)),V).astype(np.float32)
    A_ = tf.constant(A,name='A')
    prob = TFGenerator(A=A,A_=A_,pnz=pnz,kappa=kappa,SNR=SNR)
    prob.name = 'Bernoulli-Gaussian, random A'

    bernoulli_ = tf.to_float( tf.random_uniform( (N,L) ) < pnz)
    xgen_ = bernoulli_ * tf.random_normal( (N,L) )
    noise_var = pnz*N/M * math.pow(10., -SNR / 10.)
    ygen_ = tf.matmul( A_,xgen_) + tf.random_normal( (M,L),stddev=math.sqrt( noise_var ) )

    prob.xval = ((np.random.uniform( 0,1,(N,L))<pnz) * np.random.normal(0,1,(N,L))).astype(np.float32)
    prob.yval = np.matmul(A,prob.xval) + np.random.normal(0,math.sqrt( noise_var ),(M,L))
    prob.xinit = ((np.random.uniform( 0,1,(N,L))<pnz) * np.random.normal(0,1,(N,L))).astype(np.float32)
    prob.yinit = np.matmul(A,prob.xinit) + np.random.normal(0,math.sqrt( noise_var ),(M,L))
    prob.xgen_ = xgen_
    prob.ygen_ = ygen_
    prob.noise_var = noise_var

    return prob
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_svd_build(self, level=rlevel):
        # Ticket 627.
        a = array([[0., 1.], [1., 1.], [2., 1.], [3., 1.]])
        m, n = a.shape
        u, s, vh = linalg.svd(a)

        b = dot(transpose(u[:, n:]), a)

        assert_array_almost_equal(b, np.zeros((2, 2)))
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_large_svd_32bit(self):
        # See gh-4442, 64bit would require very large/slow matrices.
        x = np.eye(1000, 66)
        np.linalg.svd(x)
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_svd_no_uv(self):
        # gh-4733
        for shape in (3, 4), (4, 4), (4, 3):
            for t in float, complex:
                a = np.ones(shape, dtype=t)
                w = linalg.svd(a, compute_uv=False)
                c = np.count_nonzero(np.absolute(w) > 0.5)
                assert_equal(c, 1)
                assert_equal(np.linalg.matrix_rank(a), 1)
                assert_array_less(1, np.linalg.norm(a, ord=2))
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def do(self, a, b):
        u, s, vt = linalg.svd(a, 0)
        assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[..., None, :],
                                           np.asarray(vt)),
                        rtol=get_rtol(u.dtype))
        assert_(imply(isinstance(a, matrix), isinstance(u, matrix)))
        assert_(imply(isinstance(a, matrix), isinstance(vt, matrix)))
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_types(self):
        def check(dtype):
            x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
            u, s, vh = linalg.svd(x)
            assert_equal(u.dtype, dtype)
            assert_equal(s.dtype, get_real_dtype(dtype))
            assert_equal(vh.dtype, dtype)
            s = linalg.svd(x, compute_uv=False)
            assert_equal(s.dtype, get_real_dtype(dtype))

        for dtype in [single, double, csingle, cdouble]:
            yield check, dtype
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def do(self, a, b):
        c = asarray(a)  # a might be a matrix
        s = linalg.svd(c, compute_uv=False)
        old_assert_almost_equal(
            s[..., 0] / s[..., -1], linalg.cond(a, 2), decimal=5)
项目:Video-Classification-Action-Recognition    作者:qijiezhao    | 项目源码 | 文件源码
def _get_orthogonal_init_weights(weights):
    fan_out = weights.size(0)
    fan_in = weights.size(1) * weights.size(2)*weights.size(3)*weights.size(4)

    u, _, v = svd(normal(0.0, 0.01, (fan_out, fan_in)), full_matrices=False)

    if u.shape == (fan_out, fan_in):
        return torch.Tensor(u.reshape(weights.size()))
    else:
        return torch.Tensor(v.reshape(weights.size()))
项目:nimo    作者:wolfram2012    | 项目源码 | 文件源码
def PerspectiveFromPointsOld(source, dest, new_size):
    '''
    Python/Scipy implementation implementation which finds a perspective 
    transform between points.

    Most users should use PerspectiveFromPoints instead.  This method
    may be eliminated in the future.
    '''
    assert len(source) == len(dest)

    src_nrm = pv.AffineNormalizePoints(source)
    source = src_nrm.transformPoints(source)
    dst_nrm = pv.AffineNormalizePoints(dest)
    dest   = dst_nrm.transformPoints(dest)

    A = []
    for i in range(len(source)):
        src = source[i]
        dst = dest[i]

        # See Hartley and Zisserman Ch. 4.1, 4.1.1, 4.4.4
        row1 = [0.0,0.0,0.0,-dst.w*src.x,-dst.w*src.y,-dst.w*src.w,dst.y*src.x,dst.y*src.y,dst.y*src.w]
        row2 = [dst.w*src.x,dst.w*src.y,dst.w*src.w,0.0,0.0,0.0,-dst.x*src.x,-dst.x*src.y,-dst.x*src.w]
        #row3 = [-dst.y*src.x,-dst.y*src.y,-dst.y*src.w,dst.x*src.x,dst.x*src.y,dst.x*src.w,0.0,0.0,0.0]
        A.append(row1)
        A.append(row2)
        #A.append(row3)
    A = np.array(A)
    U,D,Vt = la.svd(A)
    H = Vt[8,:].reshape(3,3)

    matrix = np.dot(dst_nrm.inverse,np.dot(H,src_nrm.matrix))

    return PerspectiveTransform(matrix,new_size)
项目:CSB    作者:csb-toolbox    | 项目源码 | 文件源码
def rmsd(X, Y):
    """
    Calculate the root mean squared deviation (RMSD) using Kabsch' formula.

    @param X: (n, d) input vector
    @type X: numpy array

    @param Y: (n, d) input vector
    @type Y: numpy array

    @return: rmsd value between the input vectors
    @rtype: float
    """

    from numpy import sum, dot, sqrt, clip, average
    from numpy.linalg import svd, det

    X = X - X.mean(0)
    Y = Y - Y.mean(0)

    R_x = sum(X ** 2)
    R_y = sum(Y ** 2)

    V, L, U = svd(dot(Y.T, X))

    if det(dot(V, U)) < 0.:
        L[-1] *= -1

    return sqrt(clip(R_x + R_y - 2 * sum(L), 0., 1e300) / len(X))
项目:CSB    作者:csb-toolbox    | 项目源码 | 文件源码
def wrmsd(X, Y, w):
    """
    Calculate the weighted root mean squared deviation (wRMSD) using Kabsch'
    formula.

    @param X: (n, d) input vector
    @type X: numpy array

    @param Y: (n, d) input vector
    @type Y: numpy array

    @param w: input weights
    @type w: numpy array

    @return: rmsd value between the input vectors
    @rtype: float
    """

    from numpy import sum, dot, sqrt, clip, average
    from numpy.linalg import svd

    ## normalize weights

    w = w / w.sum()

    X = X - dot(w, X)
    Y = Y - dot(w, Y)

    R_x = sum(X.T ** 2 * w)
    R_y = sum(Y.T ** 2 * w)

    L = svd(dot(Y.T * w, X))[1]

    return sqrt(clip(R_x + R_y - 2 * sum(L), 0., 1e300))
项目:l1l2py    作者:slipguru    | 项目源码 | 文件源码
def main():
    """
    """

    A = np.array([[1,-2],[3,5]])

    print A

    res = svd(A)
    u = res[0]
    s = res[1]
    v = res[2]

    print s
项目:uci-statnlp    作者:sameersingh    | 项目源码 | 文件源码
def pca(m, k):
    from numpy.linalg import svd
    from numpy.linalg import eig
    from numpy.linalg import det
    u,s,v = svd(m)
    rs = np.sqrt(np.diag(s[:k]))
    x=np.dot(u[:,:k], rs)
    y=np.dot(rs, v[:k])
    mhat=np.dot(x, y)
    return s, x, y, mhat
项目:krpcScripts    作者:jwvanderbeck    | 项目源码 | 文件源码
def test_svd_build(self, level=rlevel):
        # Ticket 627.
        a = array([[0., 1.], [1., 1.], [2., 1.], [3., 1.]])
        m, n = a.shape
        u, s, vh = linalg.svd(a)

        b = dot(transpose(u[:, n:]), a)

        assert_array_almost_equal(b, np.zeros((2, 2)))
项目:krpcScripts    作者:jwvanderbeck    | 项目源码 | 文件源码
def test_large_svd_32bit(self):
        # See gh-4442, 64bit would require very large/slow matrices.
        x = np.eye(1000, 66)
        np.linalg.svd(x)
项目:krpcScripts    作者:jwvanderbeck    | 项目源码 | 文件源码
def test_svd_no_uv(self):
        # gh-4733
        for shape in (3, 4), (4, 4), (4, 3):
            for t in float, complex:
                a = np.ones(shape, dtype=t)
                w = linalg.svd(a, compute_uv=False)
                c = np.count_nonzero(np.absolute(w) > 0.5)
                assert_equal(c, 1)
                assert_equal(np.linalg.matrix_rank(a), 1)
                assert_array_less(1, np.linalg.norm(a, ord=2))
项目:krpcScripts    作者:jwvanderbeck    | 项目源码 | 文件源码
def do(self, a, b):
        u, s, vt = linalg.svd(a, 0)
        assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[..., None, :],
                                           np.asarray(vt)),
                        rtol=get_rtol(u.dtype))
        assert_(imply(isinstance(a, matrix), isinstance(u, matrix)))
        assert_(imply(isinstance(a, matrix), isinstance(vt, matrix)))
项目:krpcScripts    作者:jwvanderbeck    | 项目源码 | 文件源码
def test_types(self):
        def check(dtype):
            x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
            u, s, vh = linalg.svd(x)
            assert_equal(u.dtype, dtype)
            assert_equal(s.dtype, get_real_dtype(dtype))
            assert_equal(vh.dtype, dtype)
            s = linalg.svd(x, compute_uv=False)
            assert_equal(s.dtype, get_real_dtype(dtype))

        for dtype in [single, double, csingle, cdouble]:
            yield check, dtype
项目:krpcScripts    作者:jwvanderbeck    | 项目源码 | 文件源码
def do(self, a, b):
        c = asarray(a)  # a might be a matrix
        s = linalg.svd(c, compute_uv=False)
        old_assert_almost_equal(
            s[..., 0] / s[..., -1], linalg.cond(a, 2), decimal=5)
项目:crossingNet    作者:melonwan    | 项目源码 | 文件源码
def normRotation(self, tmpSkel = None, refPtIdx = None):
        '''tmpSkel: normalize every palm pose to the tmpSkel pose (template skeleton)
           refPtIdx: indexes of joints on the palm
        '''
        if tmpSkel is None:
            tmpSkel = self.frmList[0].norm_skel

        if refPtIdx is None:
            refPtIdx = self.refPtIdx
        refIdx = []
        for idx in refPtIdx:
            refIdx += [idx*3, idx*3+1, idx*3+2]

        keep_list = set(range(3*self.skel_num)).\
            difference(set(refIdx+range(self.centerPtIdx, self.centerPtIdx+3)))
        keep_list = list(keep_list)

        temp = tmpSkel[refIdx].copy()
        temp.shape = (-1,3)

        for frm in self.frmList:
           model = frm.norm_skel[refIdx] 
           model.shape = (-1,3)

           R = np.zeros((3,3), np.float32)
           for vt, vm in zip(temp, model):
               R = R + np.dot(vm.reshape(3,1), vt.reshape(1,3))

           U,s,V = svd(R, full_matrices=True) 
           R = np.dot(V.transpose(), U.transpose())
           frm.quad = Quaternion(R)
           frm.norm_skel.shape = (-1,3)
           frm.norm_skel = np.dot(R,frm.norm_skel.transpose())
           frm.norm_skel = frm.norm_skel.flatten('F')
           # frm.norm_skel = frm.norm_skel[keep_list]
项目:CRIkit2    作者:CoherentRamanNIST    | 项目源码 | 文件源码
def _calc(self, data, ret_obj):
        """
        Calculate SVD (wrap numpy SVD).
        """
        try:
            if self.rng is None:
                self._U, self._s, self._Vh = _svd(data, full_matrices=False)
            else:
                self._U, self._s, self._Vh = _svd(data[..., self.rng],
                                               full_matrices=False)
        except:
            return False
        else:
            return True
项目:deep_share    作者:luyongxi    | 项目源码 | 文件源码
def truncated_svd(W, k):
    """ Given input filters, return a set of basis and the linear combination
        required to approximate the original input filters
        Input: 
            W: [dxc] matrix, where c is the input dimension, 
                d is the output dimension
        Output:
            B: [kxc] matrix, where c is the input dimension, 
                k is the maximum rank of output filters
            L: [dxk] matrix, where k is the maximum rank of the
                output filters, d is the output dimension

        Note that k <= min(c,d). It is an error if that is encountered.
    """
    d, c = W.shape
    assert k <= min(c,d), 'k={} is too large for c={}, d={}'.format(k,c,d)
    # S in this case is a vector with len=K=min(c,d), and U is [d x K], V is [K x c]
    u, s, v = svd(W, full_matrices=False)
    # compute square of s -> s_sqrt
    s_sqrt = np.sqrt(s[:k])
    # extract L from u
    B = v[:k, :] * s_sqrt[:, np.newaxis]
    # extract B from v
    L = u[:, :k] * s_sqrt

    return B, L
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def test_svd_build(self, level=rlevel):
        # Ticket 627.
        a = array([[0., 1.], [1., 1.], [2., 1.], [3., 1.]])
        m, n = a.shape
        u, s, vh = linalg.svd(a)

        b = dot(transpose(u[:, n:]), a)

        assert_array_almost_equal(b, np.zeros((2, 2)))