Python scipy 模块,diag() 实例源码

我们从Python开源项目中,提取了以下6个代码示例,用于说明如何使用scipy.diag()

项目:PleioPred    作者:yiminghu    | 项目源码 | 文件源码
def annopred_inf(beta_hats, pr_sigi, n=1000, reference_ld_mats=None, ld_window_size=100):
    """
    infinitesimal model with snp-specific heritability derived from annotation
    used as the initial values for MCMC of non-infinitesimal model
    """
    num_betas = len(beta_hats)
    updated_betas = sp.empty(num_betas)
    m = len(beta_hats)

    for i, wi in enumerate(range(0, num_betas, ld_window_size)):
        start_i = wi
        stop_i = min(num_betas, wi + ld_window_size)
        curr_window_size = stop_i - start_i
        Li = 1.0/pr_sigi[start_i: stop_i]
        D = reference_ld_mats[i]
        A = (n/(1))*D + sp.diag(Li)
        A_inv = linalg.pinv(A)
        updated_betas[start_i: stop_i] = sp.dot(A_inv / (1.0/n) , beta_hats[start_i: stop_i])  # Adjust the beta_hats

    return updated_betas
项目:prml    作者:Yevgnen    | 项目源码 | 文件源码
def _maximum_likelihood(self, X):
        n_samples, n_features = X.shape if X.ndim > 1 else (1, X.shape[0])
        n_components = self.n_components

        # Predict mean
        mu = X.mean(axis=0)

        # Predict covariance
        cov = sp.cov(X, rowvar=0)
        eigvals, eigvecs = self._eig_decomposition(cov)
        sigma2 = ((sp.sum(cov.diagonal()) - sp.sum(eigvals.sum())) /
                  (n_features - n_components))  # FIXME: M < D?

        weight = sp.dot(eigvecs, sp.diag(sp.sqrt(eigvals - sigma2)))
        M = sp.dot(weight.T, weight) + sigma2 * sp.eye(n_components)
        inv_M = spla.inv(M)

        self.eigvals = eigvals
        self.eigvecs = eigvecs
        self.predict_mean = mu
        self.predict_cov = sp.dot(weight, weight.T) + sigma2 * sp.eye(n_features)
        self.latent_mean = sp.transpose(sp.dot(inv_M, sp.dot(weight.T, X.T - mu[:, sp.newaxis])))
        self.latent_cov = sigma2 * inv_M
        self.sigma2 = sigma2    # FIXME!
        self.weight = weight
        self.inv_M = inv_M

        return self.latent_mean
项目:pyabc    作者:neuralyzer    | 项目源码 | 文件源码
def _calculate_whitening_transformation_matrix(self, sample_from_prior):
        samples_vec = sp.asarray([self._dict_to_to_vect(x)
                                  for x in sample_from_prior])
        # samples_vec is an array of shape nr_samples x nr_features
        means = samples_vec.mean(axis=0)
        centered = samples_vec - means
        covariance = centered.T.dot(centered)
        w, v = la.eigh(covariance)
        self._whitening_transformation_matrix = (
            v.dot(sp.diag(1. / sp.sqrt(w))).dot(v.T))
项目:digital_rf    作者:MITHaystack    | 项目源码 | 文件源码
def outlier_removed_fit(m, w = None, n_iter=10, polyord=7):
    """
    Remove outliers using fited data.

    Args:
        m (:obj:`numpy array`): Phase curve.
        n_iter (:obj:'int'): Number of iteration outlier removal
        polyorder (:obj:'int'): Order of polynomial used.

    Returns:
        fit (:obj:'numpy array'): Curve with outliers removed
    """
    if w is None:
        w = sp.ones_like(m)
    W = sp.diag(sp.sqrt(w))
    m2 = sp.copy(m)
    tv = sp.linspace(-1, 1, num=len(m))
    A = sp.zeros([len(m), polyord])
    for j in range(polyord):
        A[:, j] = tv**(float(j))
    A2 = sp.dot(W,A)
    m2w = sp.dot(m2,W)
    fit = None
    for i in range(n_iter):
        xhat = sp.linalg.lstsq(A2, m2w)[0]
        fit = sp.dot(A, xhat)
        # use gradient for central finite differences which keeps order
        resid = sp.gradient(fit - m2)
        std = sp.std(resid)
        bidx = sp.where(sp.absolute(resid) > 2.0*std)[0]
        for bi in bidx:
            A2[bi,:]=0.0
            m2[bi]=0.0
            m2w[bi]=0.0
    if debug_plot:
        plt.plot(m2,label="outlier removed")
        plt.plot(m,label="original")
        plt.plot(fit,label="fit")
        plt.legend()
        plt.ylim([sp.minimum(fit)-std*3.0,sp.maximum(fit)+std*3.0])
        plt.show()
    return(fit)
项目:prml    作者:Yevgnen    | 项目源码 | 文件源码
def fit(self, X, T, max_iter=int(1e2), tol=1e-3, bound=1e10):
        """Fit a RVM model with the training data ``(X, T)``."""
        # Initialize the hyperparameters
        self._init_hyperparameters(X, T)

        # Compute design matrix
        n_samples = X.shape[0]
        phi = sp.c_[sp.ones(n_samples), self._compute_design_matrix(X)]  # Add x0

        alpha = self.cov
        beta = self.beta

        log_evidence = -1e10
        for iter in range(max_iter):
            alpha[alpha >= bound] = bound
            rv_indices = sp.nonzero(alpha < bound)[0]
            rv_phi = phi[:, rv_indices]
            rv_alpha = alpha[rv_indices]

            # Compute the posterior distribution
            post_cov = spla.inv(sp.diag(rv_alpha) + beta * sp.dot(rv_phi.T, rv_phi))
            post_mean = beta * sp.dot(post_cov, sp.dot(rv_phi.T, T))

            # Re-estimate the hyperparameters
            gamma = 1 - rv_alpha * post_cov.diagonal()
            rv_alpha = gamma / (post_mean * post_mean)
            beta = (n_samples + 1 - gamma.sum()) / spla.norm(T - sp.dot(rv_phi, post_mean))**2

            # Evalueate the log evidence and test the relative change
            C = sp.eye(rv_phi.shape[0]) / beta + rv_phi.dot(sp.diag(1.0 / rv_alpha)).dot(rv_phi.T)
            log_evidence_new = -0.5 * (sp.log(spla.det(C)) + T.dot(spla.inv(C)).dot((T)))
            diff = spla.norm(log_evidence_new - log_evidence)
            if (diff < tol * spla.norm(log_evidence)):
                break

            log_evidence = log_evidence_new
            alpha[rv_indices] = rv_alpha

        # Should re-compute the posterior distribution
        self.rv_indices = rv_indices
        self.cov = post_cov
        self.mean = post_mean
        self.beta = beta

        return self
项目:dzetsaka    作者:lennepkade    | 项目源码 | 文件源码
def predict(self,xt,tau=None,confidenceMap=None):
        '''
        Function that predict the label for sample xt using the learned model
        Inputs:
            xt: the samples to be classified
        Outputs:
            y: the class
            K: the decision value for each class
        '''

        MAX = sp.finfo(sp.float64).max
        E_MAX = sp.log(MAX) # Maximum value that is possible to compute with sp.exp

        ## Get information from the data
        nt = xt.shape[0]        # Number of testing samples
        C = self.ni.shape[0]    # Number of classes

        ## Initialization
        K = sp.empty((nt,C))

        if tau is None:
            TAU=self.tau
        else:
            TAU=tau

        for c in range(C):
            invCov,logdet = self.compute_inverse_logdet(c,TAU)
            cst = logdet - 2*sp.log(self.prop[c]) # Pre compute the constant

            xtc = xt-self.mean[c,:]
            temp = sp.dot(invCov,xtc.T).T
            K[:,c] = sp.sum(xtc*temp,axis=1)+cst
            del temp,xtc        

        yp = sp.argmin(K,1)

        if confidenceMap is None:

            ## Assign the label save in classnum to the minimum value of K 
            yp = self.classnum[yp]

            return yp

        else:

            K *= -0.5
            K[K>E_MAX],K[K<-E_MAX] = E_MAX,-E_MAX
            sp.exp(K,out=K)
            K /= K.sum(axis=1).reshape(nt,1)
            K = K[sp.arange(len(K)),yp]
            #K = sp.diag(K[:,yp])

            yp = self.classnum[yp]

            return yp,K