Python scipy 模块,eye() 实例源码

我们从Python开源项目中,提取了以下7个代码示例,用于说明如何使用scipy.eye()

项目:sGLMM    作者:YeWenting    | 项目源码 | 文件源码
def factor(X, rho):
    """
    computes cholesky factorization of the kernel K = 1/rho*XX^T + I

    Input:
    X design matrix: n_s x n_f (we assume n_s << n_f)
    rho: regularizaer

    Output:
    L  lower triangular matrix
    U  upper triangular matrix
    """
    n_s, n_f = X.shape
    K = 1 / rho * scipy.dot(X, X.T) + scipy.eye(n_s)
    U = linalg.cholesky(K)
    return U
项目:prml    作者:Yevgnen    | 项目源码 | 文件源码
def _maximum_likelihood(self, X):
        n_samples, n_features = X.shape if X.ndim > 1 else (1, X.shape[0])
        n_components = self.n_components

        # Predict mean
        mu = X.mean(axis=0)

        # Predict covariance
        cov = sp.cov(X, rowvar=0)
        eigvals, eigvecs = self._eig_decomposition(cov)
        sigma2 = ((sp.sum(cov.diagonal()) - sp.sum(eigvals.sum())) /
                  (n_features - n_components))  # FIXME: M < D?

        weight = sp.dot(eigvecs, sp.diag(sp.sqrt(eigvals - sigma2)))
        M = sp.dot(weight.T, weight) + sigma2 * sp.eye(n_components)
        inv_M = spla.inv(M)

        self.eigvals = eigvals
        self.eigvecs = eigvecs
        self.predict_mean = mu
        self.predict_cov = sp.dot(weight, weight.T) + sigma2 * sp.eye(n_features)
        self.latent_mean = sp.transpose(sp.dot(inv_M, sp.dot(weight.T, X.T - mu[:, sp.newaxis])))
        self.latent_cov = sigma2 * inv_M
        self.sigma2 = sigma2    # FIXME!
        self.weight = weight
        self.inv_M = inv_M

        return self.latent_mean
项目:prml    作者:Yevgnen    | 项目源码 | 文件源码
def reconstruct(self, X):
        n_features = sp.atleast_2d(X).shape[1]
        latent = sp.dot(self.inv_M, sp.dot(self.weight.T, (X - self.predict_mean).T))
        eps = sprd.multivariate_normal(sp.zeros(n_features), self.sigma2 * sp.eye(n_features))
        recons = sp.dot(self.weight, latent) + self.predict_mean + eps

        return recons
项目:RFCN    作者:zengxianyu    | 项目源码 | 文件源码
def __MR_affinity_matrix(self,img,labels):        
        W,D = self.__MR_W_D_matrix(img,labels)
        aff = pinv(D-self.weight_parameters['alpha']*W)
        aff[sp.eye(sp.amax(labels)+1).astype(bool)] = 0.0 # diagonal elements to 0
        return aff
项目:CS-LMM    作者:HaohanWang    | 项目源码 | 文件源码
def factor(X, rho):
    """
    computes cholesky factorization of the kernel K = 1/rho*XX^T + I
    Input:
    X design matrix: n_s x n_f (we assume n_s << n_f)
    rho: regularizaer
    Output:
    L  lower triangular matrix
    U  upper triangular matrix
    """
    n_s, n_f = X.shape
    K = 1 / rho * scipy.dot(X, X.T) + scipy.eye(n_s)
    U = linalg.cholesky(K)
    return U
项目:prml    作者:Yevgnen    | 项目源码 | 文件源码
def fit(self, X, T, max_iter=int(1e2), tol=1e-3, bound=1e10):
        """Fit a RVM model with the training data ``(X, T)``."""
        # Initialize the hyperparameters
        self._init_hyperparameters(X, T)

        # Compute design matrix
        n_samples = X.shape[0]
        phi = sp.c_[sp.ones(n_samples), self._compute_design_matrix(X)]  # Add x0

        alpha = self.cov
        beta = self.beta

        log_evidence = -1e10
        for iter in range(max_iter):
            alpha[alpha >= bound] = bound
            rv_indices = sp.nonzero(alpha < bound)[0]
            rv_phi = phi[:, rv_indices]
            rv_alpha = alpha[rv_indices]

            # Compute the posterior distribution
            post_cov = spla.inv(sp.diag(rv_alpha) + beta * sp.dot(rv_phi.T, rv_phi))
            post_mean = beta * sp.dot(post_cov, sp.dot(rv_phi.T, T))

            # Re-estimate the hyperparameters
            gamma = 1 - rv_alpha * post_cov.diagonal()
            rv_alpha = gamma / (post_mean * post_mean)
            beta = (n_samples + 1 - gamma.sum()) / spla.norm(T - sp.dot(rv_phi, post_mean))**2

            # Evalueate the log evidence and test the relative change
            C = sp.eye(rv_phi.shape[0]) / beta + rv_phi.dot(sp.diag(1.0 / rv_alpha)).dot(rv_phi.T)
            log_evidence_new = -0.5 * (sp.log(spla.det(C)) + T.dot(spla.inv(C)).dot((T)))
            diff = spla.norm(log_evidence_new - log_evidence)
            if (diff < tol * spla.norm(log_evidence)):
                break

            log_evidence = log_evidence_new
            alpha[rv_indices] = rv_alpha

        # Should re-compute the posterior distribution
        self.rv_indices = rv_indices
        self.cov = post_cov
        self.mean = post_mean
        self.beta = beta

        return self
项目:prml    作者:Yevgnen    | 项目源码 | 文件源码
def _em(self, X):
        # Constants
        n_samples, n_features = X.shape
        n_components = self.n_components
        max_iter = self.max_iter
        # tol = self.tol

        mu = X.mean(axis=0)
        X_centered = X - sp.atleast_2d(mu)

        # Initialize parameters
        latent_mean = 0
        sigma2 = 1
        weight = sprd.randn(n_features, n_components)

        # Main loop of EM algorithm
        for i in range(max_iter):
            # E step
            M = sp.dot(weight.T, weight) + sigma2 * sp.eye(n_components)
            inv_M = spla.inv(M)
            latent_mean = sp.dot(inv_M, sp.dot(weight.T, X_centered.T)).T

            # M step
            expectation_zzT = n_samples * sigma2 * inv_M + sp.dot(latent_mean.T, latent_mean)

            # Re-estimate W
            weight = sp.dot(sp.dot(X_centered.T, latent_mean), spla.inv(expectation_zzT))
            weight2 = sp.dot(weight.T, weight)

            # Re-estimate \sigma^2
            sigma2 = ((spla.norm(X_centered)**2 -
                       2 * sp.dot(latent_mean.ravel(), sp.dot(X_centered, weight).ravel()) +
                       sp.trace(sp.dot(expectation_zzT, weight2))) /
                      (n_samples * n_features))

        self.predict_mean = mu
        self.predict_cov = sp.dot(weight, weight.T) + sigma2 * sp.eye(n_features)
        self.latent_mean = latent_mean
        self.latent_cov = sigma2 * inv_M
        self.sigma2 = sigma2
        self.weight = weight
        self.inv_M = inv_M

        return self.latent_mean