Python scipy 模块,dot() 实例源码

我们从Python开源项目中,提取了以下46个代码示例,用于说明如何使用scipy.dot()

项目:PleioPred    作者:yiminghu    | 项目源码 | 文件源码
def annopred_inf(beta_hats, pr_sigi, n=1000, reference_ld_mats=None, ld_window_size=100):
    """
    infinitesimal model with snp-specific heritability derived from annotation
    used as the initial values for MCMC of non-infinitesimal model
    """
    num_betas = len(beta_hats)
    updated_betas = sp.empty(num_betas)
    m = len(beta_hats)

    for i, wi in enumerate(range(0, num_betas, ld_window_size)):
        start_i = wi
        stop_i = min(num_betas, wi + ld_window_size)
        curr_window_size = stop_i - start_i
        Li = 1.0/pr_sigi[start_i: stop_i]
        D = reference_ld_mats[i]
        A = (n/(1))*D + sp.diag(Li)
        A_inv = linalg.pinv(A)
        updated_betas[start_i: stop_i] = sp.dot(A_inv / (1.0/n) , beta_hats[start_i: stop_i])  # Adjust the beta_hats

    return updated_betas
项目:sGLMM    作者:YeWenting    | 项目源码 | 文件源码
def matrixMult(A, B):
    try:
        linalg.blas
    except AttributeError:
        return np.dot(A, B)

    if not A.flags['F_CONTIGUOUS']:
        AA = A.T
        transA = True
    else:
        AA = A
        transA = False

    if not B.flags['F_CONTIGUOUS']:
        BB = B.T
        transB = True
    else:
        BB = B
        transB = False

    return linalg.blas.dgemm(alpha=1., a=AA, b=BB, trans_a=transA, trans_b=transB)
项目:sGLMM    作者:YeWenting    | 项目源码 | 文件源码
def factor(X, rho):
    """
    computes cholesky factorization of the kernel K = 1/rho*XX^T + I

    Input:
    X design matrix: n_s x n_f (we assume n_s << n_f)
    rho: regularizaer

    Output:
    L  lower triangular matrix
    U  upper triangular matrix
    """
    n_s, n_f = X.shape
    K = 1 / rho * scipy.dot(X, X.T) + scipy.eye(n_s)
    U = linalg.cholesky(K)
    return U
项目:CS-LMM    作者:HaohanWang    | 项目源码 | 文件源码
def matrixMult(A, B):
    try:
        linalg.blas
    except AttributeError:
        return np.dot(A, B)

    if not A.flags['F_CONTIGUOUS']:
        AA = A.T
        transA = True
    else:
        AA = A
        transA = False

    if not B.flags['F_CONTIGUOUS']:
        BB = B.T
        transB = True
    else:
        BB = B
        transB = False

    return linalg.blas.dgemm(alpha=1., a=AA, b=BB, trans_a=transA, trans_b=transB)
项目:addc    作者:carsonfarmer    | 项目源码 | 文件源码
def linear(x, y, c=0):
    """Compute a linear kernel.

    The Linear kernel is the simplest kernel function. It is given by the
    inner product <x,y> plus an optional constant `c`:
                        K(x, y) = x'y + c
    where `x` and `y` are vectors in the input space (i.e., vectors of
    features computed from training or test samples), and `c` ? 0 is a free
    parameter (default=0). Kernel algorithms using a linear kernel are often
    equivalent to their non-kernel counterparts.
    """
    return dot(x, y) + c
项目:addc    作者:carsonfarmer    | 项目源码 | 文件源码
def sigmoid(x, y, alpha=None, c=-E):
    """Compute a hyperbolic tanget (or sigmoid) kernel.

    The Hyperbolic Tangent Kernel is also known as the Sigmoid Kernel and as
    the Multilayer Perceptron (MLP) kernel. The Sigmoid Kernel comes from the
    Neural Networks field, where the bipolar sigmoid function is often used
    as an activation function for artificial neurons:
                    K(x, y) = tanh(?x'y + c)
    where `x` and `y` are vectors in the input space (i.e., vectors of
    features computed from training or test samples), and tanh is the
    hyperbolic tangent function. There are two adjustable parameters in the
    sigmoid kernel, the slope `alpha` and the intercept constant `c`. A common
    value for alpha is 1/d, where d is the data dimension.

    References
    ----------
    [1] Lin, H. T. and Lin, C. J.: A study on sigmoid kernels for SVM and the
        training of non-PSD kernels by SMO-type methods. Web. Last accessed
        June 28 (2016). <http://www.csie.ntu.edu.tw/~cjlin/papers/tanh.pdf>
    """
    if alpha is None:
        alpha = 1 / len(x)
    return tanh(alpha*dot(x, y) + c)
项目:PleioPred    作者:yiminghu    | 项目源码 | 文件源码
def get_ld_tables(snps, ld_radius=100, ld_window_size=0):
    """
    Calculates LD tables, and the LD score in one go...
    """

    ld_dict = {}
    m,n = snps.shape
    print m,n
    ld_scores = sp.ones(m)
    ret_dict = {}
    for snp_i, snp in enumerate(snps):
        # Calculate D
        start_i = max(0, snp_i - ld_radius)
        stop_i = min(m, snp_i + ld_radius + 1)
        X = snps[start_i: stop_i]
        D_i = sp.dot(snp, X.T) / n
        r2s = D_i ** 2
        ld_dict[snp_i] = D_i
        lds_i = sp.sum(r2s - (1-r2s) / (n-2),dtype='float32')
        #lds_i = sp.sum(r2s - (1-r2s)*empirical_null_r2)
        ld_scores[snp_i] =lds_i
    ret_dict['ld_dict']=ld_dict
    ret_dict['ld_scores']=ld_scores

    if ld_window_size>0:
        ref_ld_matrices = []
        for i, wi in enumerate(range(0, m, ld_window_size)):
            start_i = wi
            stop_i = min(m, wi + ld_window_size)
            curr_window_size = stop_i - start_i
            X = snps[start_i: stop_i]
            D = sp.dot(X, X.T) / n
            ref_ld_matrices.append(D)
        ret_dict['ref_ld_matrices']=ref_ld_matrices
    return ret_dict
项目:HistoricalMap    作者:lennepkade    | 项目源码 | 文件源码
def predict(self,xt,tau=None,proba=None):
        '''
        Function that predict the label for sample xt using the learned model
        Inputs:
            xt: the samples to be classified
        Outputs:
            y: the class
            K: the decision value for each class
        '''
        ## Get information from the data
        nt = xt.shape[0]        # Number of testing samples
        C = self.ni.shape[0]    # Number of classes

        ## Initialization
        K = sp.empty((nt,C))

        if tau is None:
            TAU=self.tau
        else:
            TAU=tau

        for c in range(C):
            invCov,logdet = self.compute_inverse_logdet(c,TAU)
            cst = logdet - 2*sp.log(self.prop[c]) # Pre compute the constant

            xtc = xt-self.mean[c,:]
            temp = sp.dot(invCov,xtc.T).T
            K[:,c] = sp.sum(xtc*temp,axis=1)+cst
            del temp,xtc

        ##

        ## Assign the label save in classnum to the minimum value of K 
        yp = self.classnum[sp.argmin(K,1)]

        ## Reassign label with real value

        if proba is None:
            return yp
        else:
            return yp,K
项目:HistoricalMap    作者:lennepkade    | 项目源码 | 文件源码
def compute_inverse_logdet(self,c,tau):
        Lr = self.L[c,:]+tau # Regularized eigenvalues
        temp = self.Q[c,:,:]*(1/Lr)
        invCov = sp.dot(temp,self.Q[c,:,:].T) # Pre compute the inverse
        logdet = sp.sum(sp.log(Lr)) # Compute the log determinant
        return invCov,logdet
项目:HistoricalMap    作者:lennepkade    | 项目源码 | 文件源码
def BIC(self,x,y,tau=None):
        '''
        Computes the Bayesian Information Criterion of the model
        '''
        ## Get information from the data
        C,d = self.mean.shape
        n = x.shape[0]

        ## Initialization
        if tau is None:
            TAU=self.tau
        else:
            TAU=tau

        ## Penalization
        P = C*(d*(d+3)/2) + (C-1)
        P *= sp.log(n)

        ## Compute the log-likelihood
        L = 0
        for c in range(C):
            j = sp.where(y==(c+1))[0]
            xi = x[j,:]
            invCov,logdet = self.compute_inverse_logdet(c,TAU)
            cst = logdet - 2*sp.log(self.prop[c]) # Pre compute the constant
            xi -= self.mean[c,:]
            temp = sp.dot(invCov,xi.T).T
            K = sp.sum(xi*temp,axis=1)+cst
            L +=sp.sum(K)
            del K,xi

        return L + P
项目:prml    作者:Yevgnen    | 项目源码 | 文件源码
def fit(self, X):
        n_samples, n_features = X.shape
        n_classes = self.n_classes
        max_iter = self.max_iter
        tol = self.tol

        rand_center_idx = sprand.permutation(n_samples)[0:n_classes]
        center = X[rand_center_idx].T
        responsilibity = sp.zeros((n_samples, n_classes))

        for iter in range(max_iter):
            # E step
            dist = sp.expand_dims(X, axis=2) - sp.expand_dims(center, axis=0)
            dist = spla.norm(dist, axis=1)**2
            min_idx = sp.argmin(dist, axis=1)
            responsilibity.fill(0)
            responsilibity[sp.arange(n_samples), min_idx] = 1

            # M step
            center_new = sp.dot(X.T, responsilibity) / sp.sum(responsilibity, axis=0)
            diff = center_new - center
            print('K-Means: {0:5d} {1:4e}'.format(iter, spla.norm(diff) / spla.norm(center)))
            if (spla.norm(diff) < tol * spla.norm(center)):
                break

            center = center_new

        self.center = center.T
        self.responsibility = responsilibity

        return self
项目:prml    作者:Yevgnen    | 项目源码 | 文件源码
def pdf(self, X, k=None):
        if (k is None):
            comps = self.comps
            coef = self.coef
        else:
            comps = [self.comps[k]]
            coef = [self.coef[k]]

        probability = sp.array([comp.pdf(X) for comp in comps])
        weight_probability = sp.dot(coef, probability)

        return weight_probability
项目:prml    作者:Yevgnen    | 项目源码 | 文件源码
def predict(self, X, T, X_new):
        """Predict ``X_new`` with given traning data ``(X, T)``."""
        n_tests = X_new.shape[0]
        phi = sp.r_[sp.ones(n_tests).reshape(1, -1), self._compute_design_matrix(X_new, X)]  # Add x0
        phi = phi[self.rv_indices, :]

        predict_mean = sp.dot(self.mean, phi)
        predict_cov = 1 / self.beta + sp.dot(phi.T, sp.dot(self.cov, phi)).diagonal()

        return predict_mean, predict_cov
项目:prml    作者:Yevgnen    | 项目源码 | 文件源码
def reconstruct(self, X):
        reconstructed = self.mean + sp.dot(sp.dot(X - self.mean, self.eigvecs), self.eigvecs.T)

        return reconstructed
项目:prml    作者:Yevgnen    | 项目源码 | 文件源码
def _maximum_likelihood(self, X):
        n_samples, n_features = X.shape if X.ndim > 1 else (1, X.shape[0])
        n_components = self.n_components

        # Predict mean
        mu = X.mean(axis=0)

        # Predict covariance
        cov = sp.cov(X, rowvar=0)
        eigvals, eigvecs = self._eig_decomposition(cov)
        sigma2 = ((sp.sum(cov.diagonal()) - sp.sum(eigvals.sum())) /
                  (n_features - n_components))  # FIXME: M < D?

        weight = sp.dot(eigvecs, sp.diag(sp.sqrt(eigvals - sigma2)))
        M = sp.dot(weight.T, weight) + sigma2 * sp.eye(n_components)
        inv_M = spla.inv(M)

        self.eigvals = eigvals
        self.eigvecs = eigvecs
        self.predict_mean = mu
        self.predict_cov = sp.dot(weight, weight.T) + sigma2 * sp.eye(n_features)
        self.latent_mean = sp.transpose(sp.dot(inv_M, sp.dot(weight.T, X.T - mu[:, sp.newaxis])))
        self.latent_cov = sigma2 * inv_M
        self.sigma2 = sigma2    # FIXME!
        self.weight = weight
        self.inv_M = inv_M

        return self.latent_mean
项目:prml    作者:Yevgnen    | 项目源码 | 文件源码
def reconstruct(self, X):
        n_features = sp.atleast_2d(X).shape[1]
        latent = sp.dot(self.inv_M, sp.dot(self.weight.T, (X - self.predict_mean).T))
        eps = sprd.multivariate_normal(sp.zeros(n_features), self.sigma2 * sp.eye(n_features))
        recons = sp.dot(self.weight, latent) + self.predict_mean + eps

        return recons
项目:prml    作者:Yevgnen    | 项目源码 | 文件源码
def fit(self, X):
        n_samples, n_featurs = X.shape
        K = self.kernel.inner(X, X)
        I1N = sp.ones((n_samples, n_samples))
        K_centered = K - sp.dot(I1N, K) - sp.dot(K, I1N) + sp.dot(sp.dot(I1N, K), I1N)

        eigvals, eigvecs = self._eig_decomposition(K_centered)

        self.eigvals = eigvals
        self.eigvecs = eigvecs

        Y = sp.dot(K, eigvecs)

        return Y
项目:dzetsaka    作者:lennepkade    | 项目源码 | 文件源码
def compute_inverse_logdet(self,c,tau):
        Lr = self.L[c,:]+tau # Regularized eigenvalues
        temp = self.Q[c,:,:]*(1/Lr)
        invCov = sp.dot(temp,self.Q[c,:,:].T) # Pre compute the inverse
        logdet = sp.sum(sp.log(Lr)) # Compute the log determinant
        return invCov,logdet
项目:dzetsaka    作者:lennepkade    | 项目源码 | 文件源码
def BIC(self,x,y,tau=None):
        '''
        Computes the Bayesian Information Criterion of the model
        '''
        ## Get information from the data
        C,d = self.mean.shape
        n = x.shape[0]

        ## Initialization
        if tau is None:
            TAU=self.tau
        else:
            TAU=tau

        ## Penalization
        P = C*(d*(d+3)/2) + (C-1)
        P *= sp.log(n)

        ## Compute the log-likelihood
        L = 0
        for c in range(C):
            j = sp.where(y==(c+1))[0]
            xi = x[j,:]
            invCov,logdet = self.compute_inverse_logdet(c,TAU)
            cst = logdet - 2*sp.log(self.prop[c]) # Pre compute the constant
            xi -= self.mean[c,:]
            temp = sp.dot(invCov,xi.T).T
            K = sp.sum(xi*temp,axis=1)+cst
            L +=sp.sum(K)
            del K,xi

        return L + P
项目:xam    作者:MaxHalford    | 项目源码 | 文件源码
def _auc_loss(self, coef, X, y):
        fpr, tpr, _ = metrics.roc_curve(y, sp.dot(X, coef))
        return -metrics.auc(fpr, tpr)
项目:xam    作者:MaxHalford    | 项目源码 | 文件源码
def predict(self, X):
        return sp.dot(X, self.coef_)
项目:xam    作者:MaxHalford    | 项目源码 | 文件源码
def score(self, X, y):
        fpr, tpr, _ = metrics.roc_curve(y, sp.dot(X, self.coef_))
        return metrics.auc(fpr, tpr)
项目:Python-for-Finance-Second-Edition    作者:PacktPublishing    | 项目源码 | 文件源码
def portfolioBeta(betaGiven,w):
    #print("betaGiven=",betaGiven,"w=",w)
    return sp.dot(betaGiven,w)
# function 3: estimate Treynor
项目:Python-for-Finance-Second-Edition    作者:PacktPublishing    | 项目源码 | 文件源码
def treynor(R,w):
    betaP=portfolioBeta(betaGiven,w)
    mean_return=sp.mean(R,axis=0)
    ret = sp.array(mean_return)
    return (sp.dot(w,ret) - rf)/betaP
# function 4: for given n-1 weights, return a negative sharpe ratio
项目:Python-for-Finance-Second-Edition    作者:PacktPublishing    | 项目源码 | 文件源码
def portfolioBeta(betaGiven,w):
    #print("betaGiven=",betaGiven,"w=",w)
    return sp.dot(betaGiven,w)
#
# function 3: estimate Treynor
项目:Python-for-Finance-Second-Edition    作者:PacktPublishing    | 项目源码 | 文件源码
def treynor(R,w):
    betaP=portfolioBeta(betaGiven,w)
    mean_return=sp.mean(R,axis=0)
    ret = sp.array(mean_return)
    return (sp.dot(w,ret) - rf)/betaP
#
# function 4: for given n-1 weights, return a negative sharpe ratio
项目:Python-for-Finance-Second-Edition    作者:PacktPublishing    | 项目源码 | 文件源码
def portfolioRet(R,w):
    mean_return=sp.mean(R,axis=0)
    ret = sp.array(mean_return)
    return sp.dot(w,ret)
项目:Python-for-Finance-Second-Edition    作者:PacktPublishing    | 项目源码 | 文件源码
def sharpe(R,w):
    var = portfolio_var(R,w)
    mean_return=sp.mean(R,axis=0)
    ret = sp.array(mean_return)
    return (sp.dot(w,ret) - rf)/sp.sqrt(var)
# function 4: for given n-1 weights, return a negative sharpe ratio
项目:RFCN    作者:zengxianyu    | 项目源码 | 文件源码
def __MR_saliency(self,aff,indictor):
        return sp.dot(aff,indictor)
项目:CS-LMM    作者:HaohanWang    | 项目源码 | 文件源码
def factor(X, rho):
    """
    computes cholesky factorization of the kernel K = 1/rho*XX^T + I
    Input:
    X design matrix: n_s x n_f (we assume n_s << n_f)
    rho: regularizaer
    Output:
    L  lower triangular matrix
    U  upper triangular matrix
    """
    n_s, n_f = X.shape
    K = 1 / rho * scipy.dot(X, X.T) + scipy.eye(n_s)
    U = linalg.cholesky(K)
    return U
项目:SLIC_cityscapes    作者:wpqmanu    | 项目源码 | 文件源码
def calcerror(self, centers, prevcenters,nan_record):
        '''
        L2 norm of location
        '''
        for nan_index in nan_record[::-1]:
            del prevcenters[nan_index]

        # error=sum([scipy.dot(now[:2] - prev[:2], now[:2] - prev[:2]) + scipy.dot(1-np.equal(now[2:], prev[2:]), 1-np.equal(now[2:], prev[2:])) for now, prev in zip(centers, prevcenters)])
        error=sum([scipy.dot(1-np.equal(now[2:], prev[2:]), 1-np.equal(now[2:], prev[2:])) for now, prev in zip(centers, prevcenters)])

        print "error:", error
        return error
项目:SLIC_cityscapes    作者:wpqmanu    | 项目源码 | 文件源码
def iteration(self, centers, stepsize):
        error = sum([scipy.dot(center[:2], center[:2]) for center in centers])
        while error > self.ERROR_THRESHOLD:
            self.assignment(centers, stepsize)  ## M-step Note step size is the initial length/width of a superpixel.
            prevcenters=centers
            centers,nan_record=self.update(centers)  ## E-step
            error = self.calcerror(centers, prevcenters,nan_record)
            print "L2 error:", error

            if self.DEBUGFLAG:
                base, ext = os.path.splitext(self.filename)
                self.filename = base.split("_error")[0] + "_error" + str(error) + ext
                self.resultimg(centers)
        return (centers, self.assignedindex)
项目:SLIC_cityscapes    作者:wpqmanu    | 项目源码 | 文件源码
def calcerror(self, centers, prevcenters):
        '''
        L2 norm of location
        '''
        print "error:", sum(
            [scipy.dot(now[:2] - prev[:2], now[:2] - prev[:2]) for now, prev in zip(centers, prevcenters)])
        return sum([scipy.dot(now[:2] - prev[:2], now[:2] - prev[:2]) for now, prev in zip(centers, prevcenters)])
项目:SLIC_cityscapes    作者:wpqmanu    | 项目源码 | 文件源码
def iteration(self, centers, stepsize):
        error = sum([scipy.dot(center[:2], center[:2]) for center in centers])
        while error > self.ERROR_THRESHOLD:
            self.assignment(centers, stepsize)  ## M-step
            prevcenters, centers = centers, self.update(centers)  ## E-step
            error = self.calcerror(centers, prevcenters)
            print "L2 error:", error

            if self.DEBUGFLAG:
                base, ext = os.path.splitext(self.filename)
                self.filename = base.split("_error")[0] + "_error" + str(error) + ext
                self.resultimg(centers)
        return (centers, self.assignedindex)
项目:SLIC_cityscapes    作者:wpqmanu    | 项目源码 | 文件源码
def calcerror(self, centers, prevcenters,nan_record):
        '''
        L2 norm of location
        '''
        for nan_index in nan_record[::-1]:
            del prevcenters[nan_index]

        # error=sum([scipy.dot(now[:2] - prev[:2], now[:2] - prev[:2]) + scipy.dot(1-np.equal(now[2:], prev[2:]), 1-np.equal(now[2:], prev[2:])) for now, prev in zip(centers, prevcenters)])
        error=sum([scipy.dot(1-np.equal(now[2:], prev[2:]), 1-np.equal(now[2:], prev[2:])) for now, prev in zip(centers, prevcenters)])

        print "error:", error
        return error
项目:SLIC_cityscapes    作者:wpqmanu    | 项目源码 | 文件源码
def calcerror(self, centers, prevcenters,nan_record):
        '''
        L2 norm of location
        '''
        for nan_index in nan_record[::-1]:
            del prevcenters[nan_index]

        error=sum([scipy.dot(now[:2] - prev[:2], now[:2] - prev[:2]) + scipy.dot(1-np.equal(now[2:], prev[2:]), 1-np.equal(now[2:], prev[2:])) for now, prev in zip(centers, prevcenters)])

        print "error:", error
        return error
项目:SLIC_cityscapes    作者:wpqmanu    | 项目源码 | 文件源码
def iteration(self, centers, stepsize):
        error = sum([scipy.dot(center[:2], center[:2]) for center in centers])
        while error > self.ERROR_THRESHOLD:
            self.assignment(centers, stepsize)  ## M-step Note step size is the initial length/width of a superpixel.
            prevcenters=centers
            centers,nan_record=self.update(centers)  ## E-step
            error = self.calcerror(centers, prevcenters,nan_record)
            print "L2 error:", error

            if self.DEBUGFLAG:
                base, ext = os.path.splitext(self.filename)
                self.filename = base.split("_error")[0] + "_error" + str(error) + ext
                self.resultimg(centers)
        return (centers, self.assignedindex)
项目:SLIC_cityscapes    作者:wpqmanu    | 项目源码 | 文件源码
def calcerror(self, centers, prevcenters,nan_record):
        '''
        L2 norm of location
        '''
        for nan_index in nan_record[::-1]:
            del prevcenters[nan_index]

        # error=sum([scipy.dot(now[:2] - prev[:2], now[:2] - prev[:2]) + scipy.dot(1-np.equal(now[2:], prev[2:]), 1-np.equal(now[2:], prev[2:])) for now, prev in zip(centers, prevcenters)])
        error=sum([scipy.dot(1-np.equal(now[2:], prev[2:]), 1-np.equal(now[2:], prev[2:])) for now, prev in zip(centers, prevcenters)])

        print "error:", error
        return error
项目:SLIC_cityscapes    作者:wpqmanu    | 项目源码 | 文件源码
def iteration(self, centers, stepsize):
        error = sum([scipy.dot(center[:2], center[:2]) for center in centers])
        while error > self.ERROR_THRESHOLD:
            self.assignment(centers, stepsize)  ## M-step Note step size is the initial length/width of a superpixel.
            prevcenters=centers
            centers,nan_record=self.update(centers)  ## E-step
            error = self.calcerror(centers, prevcenters,nan_record)
            print "L2 error:", error

            if self.DEBUGFLAG:
                base, ext = os.path.splitext(self.filename)
                self.filename = base.split("_error")[0] + "_error" + str(error) + ext
                self.resultimg(centers)
        return (centers, self.assignedindex)
项目:addc    作者:carsonfarmer    | 项目源码 | 文件源码
def poly(x, y, a=1, c=0, d=2):
    """Compute feature-space similarity over polynomials of the input vectors.

    For degree-`d` (default=2) polynomials, the kernel is defined as:
                        K(x, y) = (a(x'y) + c)^d
    where `x` and `y` are vectors in the input space (i.e., vectors of
    features computed from training or test samples), `c` ? 0 is a free
    parameter trading off the influence of higher-order versus lower-order
    terms in the polynomial, and `a` is an adjustable slope parameter. When
    `c`=0 (the default), the kernel is called homogeneous. The Polynomial
    kernel is a non-stationary kernel. Polynomial kernels are well suited for
    problems where all the training data is normalized.
    """
    return (dot(x, y)/a + c)**d
项目:digital_rf    作者:MITHaystack    | 项目源码 | 文件源码
def outlier_removed_fit(m, w = None, n_iter=10, polyord=7):
    """
    Remove outliers using fited data.

    Args:
        m (:obj:`numpy array`): Phase curve.
        n_iter (:obj:'int'): Number of iteration outlier removal
        polyorder (:obj:'int'): Order of polynomial used.

    Returns:
        fit (:obj:'numpy array'): Curve with outliers removed
    """
    if w is None:
        w = sp.ones_like(m)
    W = sp.diag(sp.sqrt(w))
    m2 = sp.copy(m)
    tv = sp.linspace(-1, 1, num=len(m))
    A = sp.zeros([len(m), polyord])
    for j in range(polyord):
        A[:, j] = tv**(float(j))
    A2 = sp.dot(W,A)
    m2w = sp.dot(m2,W)
    fit = None
    for i in range(n_iter):
        xhat = sp.linalg.lstsq(A2, m2w)[0]
        fit = sp.dot(A, xhat)
        # use gradient for central finite differences which keeps order
        resid = sp.gradient(fit - m2)
        std = sp.std(resid)
        bidx = sp.where(sp.absolute(resid) > 2.0*std)[0]
        for bi in bidx:
            A2[bi,:]=0.0
            m2[bi]=0.0
            m2w[bi]=0.0
    if debug_plot:
        plt.plot(m2,label="outlier removed")
        plt.plot(m,label="original")
        plt.plot(fit,label="fit")
        plt.legend()
        plt.ylim([sp.minimum(fit)-std*3.0,sp.maximum(fit)+std*3.0])
        plt.show()
    return(fit)
项目:prml    作者:Yevgnen    | 项目源码 | 文件源码
def fit(self, X):
        # Constants
        max_iter = self.max_iter
        tol = self.tol
        n_samples, n_features = X.shape
        n_components = self.n_components

        # Initialize parameters
        self._init_params(X)

        # Initialize
        responsibility = sp.empty((n_samples, n_components))
        log_likelihood = self.log_likelihood(X)

        for iter in range(max_iter):

            # E step
            for n in range(n_samples):
                for k in range(n_components):
                    responsibility[n][k] = self.pdf(X[n], k) / self.pdf(X[n])

            # M step
            eff = sp.sum(responsibility, axis=0)
            for k in range(n_components):
                # Update mean
                mean = sp.dot(responsibility[:, k], X) / eff[k]

                # Update covariance
                cov = sp.zeros((n_features, n_features))
                for n in range(n_samples):
                    cov += responsibility[n][k] * sp.outer(X[n] - mean, X[n] - mean)
                cov /= eff[k]

                # Update the k component
                self.comps[k] = multivariate_normal(mean, cov, allow_singular=True)

                # Update mixture coefficient
                self.coef[k] = eff[k] / n_samples

            # Convergent test
            log_likelihood_new = self.log_likelihood(X)
            diff = log_likelihood_new - log_likelihood
            print('GMM: {0:5d}: {1:10.5e} {2:10.5e}'.format(
                iter, log_likelihood_new, spla.norm(diff) / spla.norm(log_likelihood)))
            if (spla.norm(diff) < tol * spla.norm(log_likelihood)):
                break

            log_likelihood = log_likelihood_new

        return self
项目:prml    作者:Yevgnen    | 项目源码 | 文件源码
def fit(self, X, T, max_iter=int(1e2), tol=1e-3, bound=1e10):
        """Fit a RVM model with the training data ``(X, T)``."""
        # Initialize the hyperparameters
        self._init_hyperparameters(X, T)

        # Compute design matrix
        n_samples = X.shape[0]
        phi = sp.c_[sp.ones(n_samples), self._compute_design_matrix(X)]  # Add x0

        alpha = self.cov
        beta = self.beta

        log_evidence = -1e10
        for iter in range(max_iter):
            alpha[alpha >= bound] = bound
            rv_indices = sp.nonzero(alpha < bound)[0]
            rv_phi = phi[:, rv_indices]
            rv_alpha = alpha[rv_indices]

            # Compute the posterior distribution
            post_cov = spla.inv(sp.diag(rv_alpha) + beta * sp.dot(rv_phi.T, rv_phi))
            post_mean = beta * sp.dot(post_cov, sp.dot(rv_phi.T, T))

            # Re-estimate the hyperparameters
            gamma = 1 - rv_alpha * post_cov.diagonal()
            rv_alpha = gamma / (post_mean * post_mean)
            beta = (n_samples + 1 - gamma.sum()) / spla.norm(T - sp.dot(rv_phi, post_mean))**2

            # Evalueate the log evidence and test the relative change
            C = sp.eye(rv_phi.shape[0]) / beta + rv_phi.dot(sp.diag(1.0 / rv_alpha)).dot(rv_phi.T)
            log_evidence_new = -0.5 * (sp.log(spla.det(C)) + T.dot(spla.inv(C)).dot((T)))
            diff = spla.norm(log_evidence_new - log_evidence)
            if (diff < tol * spla.norm(log_evidence)):
                break

            log_evidence = log_evidence_new
            alpha[rv_indices] = rv_alpha

        # Should re-compute the posterior distribution
        self.rv_indices = rv_indices
        self.cov = post_cov
        self.mean = post_mean
        self.beta = beta

        return self
项目:prml    作者:Yevgnen    | 项目源码 | 文件源码
def _em(self, X):
        # Constants
        n_samples, n_features = X.shape
        n_components = self.n_components
        max_iter = self.max_iter
        # tol = self.tol

        mu = X.mean(axis=0)
        X_centered = X - sp.atleast_2d(mu)

        # Initialize parameters
        latent_mean = 0
        sigma2 = 1
        weight = sprd.randn(n_features, n_components)

        # Main loop of EM algorithm
        for i in range(max_iter):
            # E step
            M = sp.dot(weight.T, weight) + sigma2 * sp.eye(n_components)
            inv_M = spla.inv(M)
            latent_mean = sp.dot(inv_M, sp.dot(weight.T, X_centered.T)).T

            # M step
            expectation_zzT = n_samples * sigma2 * inv_M + sp.dot(latent_mean.T, latent_mean)

            # Re-estimate W
            weight = sp.dot(sp.dot(X_centered.T, latent_mean), spla.inv(expectation_zzT))
            weight2 = sp.dot(weight.T, weight)

            # Re-estimate \sigma^2
            sigma2 = ((spla.norm(X_centered)**2 -
                       2 * sp.dot(latent_mean.ravel(), sp.dot(X_centered, weight).ravel()) +
                       sp.trace(sp.dot(expectation_zzT, weight2))) /
                      (n_samples * n_features))

        self.predict_mean = mu
        self.predict_cov = sp.dot(weight, weight.T) + sigma2 * sp.eye(n_features)
        self.latent_mean = latent_mean
        self.latent_cov = sigma2 * inv_M
        self.sigma2 = sigma2
        self.weight = weight
        self.inv_M = inv_M

        return self.latent_mean
项目:dzetsaka    作者:lennepkade    | 项目源码 | 文件源码
def predict(self,xt,tau=None,confidenceMap=None):
        '''
        Function that predict the label for sample xt using the learned model
        Inputs:
            xt: the samples to be classified
        Outputs:
            y: the class
            K: the decision value for each class
        '''

        MAX = sp.finfo(sp.float64).max
        E_MAX = sp.log(MAX) # Maximum value that is possible to compute with sp.exp

        ## Get information from the data
        nt = xt.shape[0]        # Number of testing samples
        C = self.ni.shape[0]    # Number of classes

        ## Initialization
        K = sp.empty((nt,C))

        if tau is None:
            TAU=self.tau
        else:
            TAU=tau

        for c in range(C):
            invCov,logdet = self.compute_inverse_logdet(c,TAU)
            cst = logdet - 2*sp.log(self.prop[c]) # Pre compute the constant

            xtc = xt-self.mean[c,:]
            temp = sp.dot(invCov,xtc.T).T
            K[:,c] = sp.sum(xtc*temp,axis=1)+cst
            del temp,xtc        

        yp = sp.argmin(K,1)

        if confidenceMap is None:

            ## Assign the label save in classnum to the minimum value of K 
            yp = self.classnum[yp]

            return yp

        else:

            K *= -0.5
            K[K>E_MAX],K[K<-E_MAX] = E_MAX,-E_MAX
            sp.exp(K,out=K)
            K /= K.sum(axis=1).reshape(nt,1)
            K = K[sp.arange(len(K)),yp]
            #K = sp.diag(K[:,yp])

            yp = self.classnum[yp]

            return yp,K
项目:livespin    作者:biocompibens    | 项目源码 | 文件源码
def minvalues(self, params2):
        postemp = [[1,2,7,8],[1,2,13,14],[7,8,13,14]]
        #minimum variance of intensity values between centers
        varinten3=[]
        for m in xrange(3):
            post=postemp[m]
            para3=self.params[post]
            if para3[0] > para3[2]:
                para3=[para3[2],para3[3],para3[0],para3[1]]
            para3=np.round(para3)
            inten=[]
            if para3[0]==para3[2]:
                x=para3[0]
                t1=np.min([int(para3[1]), int(para3[3])])
                t2=np.max([int(para3[1]), int(para3[3])])
                for y in xrange(t1, t2+1):
                    inten.append(self.image[np.round(x)][np.round(y)])
            else:
                for x in xrange(int(para3[0]), int(para3[2]+1)):
                    y = (para3[3]-para3[1])*(x-para3[0])/(para3[2]-para3[0])+para3[1]
                    if 0 <= np.round(y) < self.image.shape[1] and 0 <= np.round(x) < self.image.shape[0]:
                        inten.append(self.image[np.round(x)][np.round(y)])
            varinten3.append(np.var(inten))
        self.varmin = np.min(varinten3)
        # distance of 2 and 3 for centers and params: using mean
        center2_3=[]
        para2_3=[]
        for m in xrange(2):
            temp_center=[]
            temp_para=[]
            center2=params2[1+m*6:3+m*6]
            para2=params2[m*6:6+m*6]
            a=mat(para2)
            min_cdis3=np.mean([np.linalg.norm(center2-self.params[1:3]),np.linalg.norm(center2-self.params[7:9]),np.linalg.norm(center2-self.params[13:15])])
            center2_3.append(min_cdis3)
            trc=[]
            for n in xrange(3):
                para3=self.params[n*6:6+n*6]
                b=mat(para3)
                cossim = dot(a,b.T)/linalg.norm(a)/linalg.norm(b)
                cosdis = 1.-np.abs(cossim)
                trc.append(cosdis)
            para2_3.append(np.mean(trc))
        self.center23=np.mean(center2_3)
        self.param23=np.mean(para2_3)