Python scipy 模块,sum() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用scipy.sum()

项目:HistoricalMap    作者:lennepkade    | 项目源码 | 文件源码
def compute_confusion_matrix(self,yp,yr):
        ''' 
        Compute the confusion matrix
        '''
        # Initialization
        n = yp.size
        C=int(yr.max())
        self.confusion_matrix=sp.zeros((C,C))

        # Compute confusion matrix
        for i in range(n):
            self.confusion_matrix[yp[i].astype(int)-1,yr[i].astype(int)-1] +=1

        # Compute overall accuracy
        self.OA=sp.sum(sp.diag(self.confusion_matrix))/n

        # Compute Kappa
        nl = sp.sum(self.confusion_matrix,axis=1)
        nc = sp.sum(self.confusion_matrix,axis=0)
        self.Kappa = ((n**2)*self.OA - sp.sum(nc*nl))/(n**2-sp.sum(nc*nl))

        # TBD Variance du Kappa
项目:glmnet_py    作者:hanfang    | 项目源码 | 文件源码
def glmnet_softmax(x):
    d = x.shape
    nas = scipy.any(scipy.isnan(x), axis = 1)
    if scipy.any(nas):
        pclass = scipy.zeros([d[0], 1])*scipy.NaN
        if scipy.sum(nas) < d[0]:
            pclass2 = glmnet_softmax(x[~nas, :])
            pclass[~nas] = pclass2
            result = pclass
    else:
        maxdist = x[:, 1]
        pclass = scipy.ones([d[0], 1])
        for i in range(1, d[1], 1):
            t = x[:, i] > maxdist
            pclass[t] = i
            maxdist[t] = x[t, i]
        result = pclass

    return(result)    
#=========================
项目:glmnet_py    作者:hanfang    | 项目源码 | 文件源码
def auc(y, prob, w):
    if len(w) == 0:
        mindiff = scipy.amin(scipy.diff(scipy.unique(prob)))
        pert = scipy.random.uniform(0, mindiff/3, prob.size)
        t, rprob = scipy.unique(prob + pert, return_inverse = True)
        n1 = scipy.sum(y, keepdims = True)
        n0 = y.shape[0] - n1
        u = scipy.sum(rprob[y == 1]) - n1*(n1 + 1)/2
        result = u/(n1*n0)
    else:
        op = scipy.argsort(prob)
        y = y[op]
        w = w[op]
        cw = scipy.cumsum(w)
        w1 = w[y == 1]
        cw1 = scipy.cumsum(w1)
        wauc = scipy.sum(w1*(cw[y == 1] - cw1))
        sumw = cw1[-1]
        sumw = sumw*(c1[-1] - sumw)
        result = wauc/sumw
    return(result)    
#=========================
项目:glmnet_py    作者:hanfang    | 项目源码 | 文件源码
def cvcompute(mat, weights, foldid, nlams):
    if len(weights.shape) > 1:
        weights = scipy.reshape(weights, [weights.shape[0], ])
    wisum = scipy.bincount(foldid, weights = weights)
    nfolds = scipy.amax(foldid) + 1
    outmat = scipy.ones([nfolds, mat.shape[1]])*scipy.NaN
    good = scipy.zeros([nfolds, mat.shape[1]])
    mat[scipy.isinf(mat)] = scipy.NaN
    for i in range(nfolds):
        tf = foldid == i
        mati = mat[tf, ]
        wi = weights[tf, ]
        outmat[i, :] = wtmean(mati, wi)
        good[i, 0:nlams[i]] = 1
    N = scipy.sum(good, axis = 0)
    cvcpt = dict()
    cvcpt['cvraw'] = outmat
    cvcpt['weights'] = wisum
    cvcpt['N'] = N

    return(cvcpt)

# end of cvcompute
#=========================
项目:glmnet_py    作者:hanfang    | 项目源码 | 文件源码
def glmnet_softmax(x):
    d = x.shape
    nas = scipy.any(scipy.isnan(x), axis = 1)
    if scipy.any(nas):
        pclass = scipy.zeros([d[0], 1])*scipy.NaN
        if scipy.sum(nas) < d[0]:
            pclass2 = glmnet_softmax(x[~nas, :])
            pclass[~nas] = pclass2
            result = pclass
    else:
        maxdist = x[:, 1]
        pclass = scipy.ones([d[0], 1])
        for i in range(1, d[1], 1):
            t = x[:, i] > maxdist
            pclass[t] = i
            maxdist[t] = x[t, i]
        result = pclass

    return(result)    
#=========================
项目:glmnet_py    作者:hanfang    | 项目源码 | 文件源码
def auc(y, prob, w):
    if len(w) == 0:
        mindiff = scipy.amin(scipy.diff(scipy.unique(prob)))
        pert = scipy.random.uniform(0, mindiff/3, prob.size)
        t, rprob = scipy.unique(prob + pert, return_inverse = True)
        n1 = scipy.sum(y, keepdims = True)
        n0 = y.shape[0] - n1
        u = scipy.sum(rprob[y == 1]) - n1*(n1 + 1)/2
        result = u/(n1*n0)
    else:
        op = scipy.argsort(prob)
        y = y[op]
        w = w[op]
        cw = scipy.cumsum(w)
        w1 = w[y == 1]
        cw1 = scipy.cumsum(w1)
        wauc = scipy.sum(w1*(cw[y == 1] - cw1))
        sumw = cw1[-1]
        sumw = sumw*(c1[-1] - sumw)
        result = wauc/sumw
    return(result)    
#=========================
项目:glmnet_py    作者:hanfang    | 项目源码 | 文件源码
def cvcompute(mat, weights, foldid, nlams):
    if len(weights.shape) > 1:
        weights = scipy.reshape(weights, [weights.shape[0], ])
    wisum = scipy.bincount(foldid, weights = weights)
    nfolds = scipy.amax(foldid) + 1
    outmat = scipy.ones([nfolds, mat.shape[1]])*scipy.NaN
    good = scipy.zeros([nfolds, mat.shape[1]])
    mat[scipy.isinf(mat)] = scipy.NaN
    for i in range(nfolds):
        tf = foldid == i
        mati = mat[tf, ]
        wi = weights[tf, ]
        outmat[i, :] = wtmean(mati, wi)
        good[i, 0:nlams[i]] = 1
    N = scipy.sum(good, axis = 0)
    cvcpt = dict()
    cvcpt['cvraw'] = outmat
    cvcpt['weights'] = wisum
    cvcpt['N'] = N

    return(cvcpt)

# end of cvcompute
#=========================
项目:glmnet_py    作者:hanfang    | 项目源码 | 文件源码
def auc(y, prob, w):
    if len(w) == 0:
        mindiff = scipy.amin(scipy.diff(scipy.unique(prob)))
        pert = scipy.random.uniform(0, mindiff/3, prob.size)
        t, rprob = scipy.unique(prob + pert, return_inverse = True)
        n1 = scipy.sum(y, keepdims = True)
        n0 = y.shape[0] - n1
        u = scipy.sum(rprob[y == 1]) - n1*(n1 + 1)/2
        result = u/(n1*n0)
    else:
        op = scipy.argsort(prob)
        y = y[op]
        w = w[op]
        cw = scipy.cumsum(w)
        w1 = w[y == 1]
        cw1 = scipy.cumsum(w1)
        wauc = scipy.sum(w1*(cw[y == 1] - cw1))
        sumw = cw1[-1]
        sumw = sumw*(c1[-1] - sumw)
        result = wauc/sumw
    return(result)    
#=========================
项目:glmnet_py    作者:hanfang    | 项目源码 | 文件源码
def cvcompute(mat, weights, foldid, nlams):
    if len(weights.shape) > 1:
        weights = scipy.reshape(weights, [weights.shape[0], ])
    wisum = scipy.bincount(foldid, weights = weights)
    nfolds = scipy.amax(foldid) + 1
    outmat = scipy.ones([nfolds, mat.shape[1]])*scipy.NaN
    good = scipy.zeros([nfolds, mat.shape[1]])
    mat[scipy.isinf(mat)] = scipy.NaN
    for i in range(nfolds):
        tf = foldid == i
        mati = mat[tf, ]
        wi = weights[tf, ]
        outmat[i, :] = wtmean(mati, wi)
        good[i, 0:nlams[i]] = 1
    N = scipy.sum(good, axis = 0)
    cvcpt = dict()
    cvcpt['cvraw'] = outmat
    cvcpt['weights'] = wisum
    cvcpt['N'] = N

    return(cvcpt)

# end of cvcompute
#=========================
项目:glmnet_py    作者:hanfang    | 项目源码 | 文件源码
def glmnet_softmax(x):
    d = x.shape
    nas = scipy.any(scipy.isnan(x), axis = 1)
    if scipy.any(nas):
        pclass = scipy.zeros([d[0], 1])*scipy.NaN
        if scipy.sum(nas) < d[0]:
            pclass2 = glmnet_softmax(x[~nas, :])
            pclass[~nas] = pclass2
            result = pclass
    else:
        maxdist = x[:, 1]
        pclass = scipy.ones([d[0], 1])
        for i in range(1, d[1], 1):
            t = x[:, i] > maxdist
            pclass[t] = i
            maxdist[t] = x[t, i]
        result = pclass

    return(result)    
#=========================
项目:glmnet_py    作者:hanfang    | 项目源码 | 文件源码
def cvcompute(mat, weights, foldid, nlams):
    if len(weights.shape) > 1:
        weights = scipy.reshape(weights, [weights.shape[0], ])
    wisum = scipy.bincount(foldid, weights = weights)
    nfolds = scipy.amax(foldid) + 1
    outmat = scipy.ones([nfolds, mat.shape[1]])*scipy.NaN
    good = scipy.zeros([nfolds, mat.shape[1]])
    mat[scipy.isinf(mat)] = scipy.NaN
    for i in range(nfolds):
        tf = foldid == i
        mati = mat[tf, ]
        wi = weights[tf, ]
        outmat[i, :] = wtmean(mati, wi)
        good[i, 0:nlams[i]] = 1
    N = scipy.sum(good, axis = 0)
    cvcpt = dict()
    cvcpt['cvraw'] = outmat
    cvcpt['weights'] = wisum
    cvcpt['N'] = N

    return(cvcpt)

# end of cvcompute
#=========================
项目:pyssp    作者:shunsukeaihara    | 项目源码 | 文件源码
def _alignment(self,ssignal,ksignal):
        starta = 0
        for i in range(len(ssignal))[0::2]:
            if ssignal[i]<-100/32767.0 or ssignal[i]>100/32767.0:
                starta = i
                break
        startb=0
        for i in range(len(ksignal))[0::2]:
            if ksignal[i]<-100/32767.0 or ksignal[i]>100/32767.0:
                startb = i
                break
        start=starta-100
        base = ssignal[start:start+5000]
        small=1000000
        index=0
        for i in range(startb-1000,startb-1000+10000)[0::2]:
            signal = ksignal[i:i+5000]
            score =  math.sqrt(sp.sum(sp.square(sp.array(list(base-signal),sp.float32))))
            if score<small:
                index=i
                small=score
        return  start,index
        #return 0,0
项目:sGLMM    作者:YeWenting    | 项目源码 | 文件源码
def tstat(beta, var, sigma, q, N, log=False):

    """
       Calculates a t-statistic and associated p-value given the estimate of beta and its standard error.
       This is actually an F-test, but when only one hypothesis is being performed, it reduces to a t-test.
    """
    ts = beta / np.sqrt(var * sigma)
    print ts
    # ts = beta / np.sqrt(sigma)
    # ps = 2.0*(1.0 - stats.t.cdf(np.abs(ts), self.N-q))
    # sf == survival function - this is more accurate -- could also use logsf if the precision is not good enough
    if log:
        ps = 2.0 + (stats.t.logsf(np.abs(ts), N - q))
    else:
        ps = 2.0 * (stats.t.sf(np.abs(ts), N - q))
    print ps
    # if not len(ts) == 1 or not len(ps) == 1:
    #     raise Exception("Something bad happened :(")
        # return ts, ps
    return ts.sum(), ps.sum()
项目:dzetsaka    作者:lennepkade    | 项目源码 | 文件源码
def compute_confusion_matrix(self,yp,yr):
        ''' 
        Compute the confusion matrix
        '''
        # Initialization
        n = yp.size
        C=int(yr.max())
        self.confusion_matrix=sp.zeros((C,C))

        # Compute confusion matrix
        for i in range(n):
            self.confusion_matrix[yp[i].astype(int)-1,yr[i].astype(int)-1] +=1

        # Compute overall accuracy
        self.OA=sp.sum(sp.diag(self.confusion_matrix))/n

        # Compute Kappa
        nl = sp.sum(self.confusion_matrix,axis=1)
        nc = sp.sum(self.confusion_matrix,axis=0)
        self.Kappa = ((n**2)*self.OA - sp.sum(nc*nl))/(n**2-sp.sum(nc*nl))

        # TBD Variance du Kappa
项目:RFCN    作者:zengxianyu    | 项目源码 | 文件源码
def __MR_W_D_matrix(self,img,labels):
        s = sp.amax(labels)+1
        vect = self.__MR_superpixel_mean_vector(img,labels)

        adj = self.__MR_get_adj_loop(labels)

        W = sp.spatial.distance.squareform(sp.spatial.distance.pdist(vect))

        W = sp.exp(-1*W / self.weight_parameters['delta'])
        W[adj.astype(np.bool)] = 0


        D = sp.zeros((s,s)).astype(float)
        for i in range(s):
            D[i, i] = sp.sum(W[i])

        return W,D
项目:CS-LMM    作者:HaohanWang    | 项目源码 | 文件源码
def tstat(beta, var, sigma, q, N, log=False):

    """
       Calculates a t-statistic and associated p-value given the estimate of beta and its standard error.
       This is actually an F-test, but when only one hypothesis is being performed, it reduces to a t-test.
    """
    ts = beta / np.sqrt(var * sigma)
    # ts = beta / np.sqrt(sigma)
    # ps = 2.0*(1.0 - stats.t.cdf(np.abs(ts), self.N-q))
    # sf == survival function - this is more accurate -- could also use logsf if the precision is not good enough
    if log:
        ps = 2.0 + (stats.t.logsf(np.abs(ts), N - q))
    else:
        ps = 2.0 * (stats.t.sf(np.abs(ts), N - q))
    if not len(ts) == 1 or not len(ps) == 1:
        raise Exception("Something bad happened :(")
        # return ts, ps
    return ts.sum(), ps.sum()
项目:stocks_analysis    作者:mrdisclaimer    | 项目源码 | 文件源码
def model_error(f, x, y):
    return sp.sum((f(x) - y) ** 2)


# Function import tables with serial number of day
# and Adjusted Closing Prices
项目:SLIC_cityscapes    作者:wpqmanu    | 项目源码 | 文件源码
def calcdistance_mat(self, points, center, spatialmax):
        ## -- L2norm optimized -- ##
        center = scipy.array(center)

        location_center=center[:2]
        color_center=center[2:]

        location_points=points[:,:,:2]
        color_points=points[:,:,2:]

        difs_location=location_points-location_center
        difs_color=1-np.equal(color_points,color_center)
        if len(difs_color.shape)==2:
            difs_color=np.expand_dims(difs_color, axis=2)

        difs=np.concatenate((difs_location,difs_color),axis=2)

        norm = (difs ** 2).astype(float)
        norm[:, :, 0:2] *= (float(self.MM) / (spatialmax * spatialmax))  # color weight on location term
        norm = scipy.sum(norm, 2)
        return norm
项目:SLIC_cityscapes    作者:wpqmanu    | 项目源码 | 文件源码
def calcdistance_mat(self, points, center, spatialmax):
        ## -- L2norm optimized -- ##
        center = scipy.array(center)

        location_center=center[:2]
        color_center=center[2:]

        location_points=points[:,:,:2]
        color_points=points[:,:,2:]

        difs_location=location_points-location_center
        difs_color=1-np.equal(color_points,color_center)
        if len(difs_color.shape)==2:
            difs_color=np.expand_dims(difs_color, axis=2)

        difs=np.concatenate((difs_location,difs_color),axis=2)

        norm = (difs ** 2).astype(float)
        norm[:, :, 0:2] *= (float(self.MM) / (spatialmax * spatialmax))  # color weight on location term
        norm = scipy.sum(norm, 2)
        return norm
项目:SLIC_cityscapes    作者:wpqmanu    | 项目源码 | 文件源码
def calcdistance_mat(self, points, center, spatialmax):
        ## -- L2norm optimized -- ##
        center = scipy.array(center)

        location_center=center[:2]
        color_center=center[2:]

        location_points=points[:,:,:2]
        color_points=points[:,:,2:]

        difs_location=location_points-location_center
        difs_color=1-np.equal(color_points,color_center)
        if len(difs_color.shape)==2:
            difs_color=np.expand_dims(difs_color, axis=2)

        difs=np.concatenate((difs_location,difs_color),axis=2)

        norm = (difs ** 2).astype(float)
        norm[:, :, 0:2] *= (float(self.MM) / (spatialmax * spatialmax))  # color weight on location term
        norm = scipy.sum(norm, 2)
        return norm
项目:SLIC_cityscapes    作者:wpqmanu    | 项目源码 | 文件源码
def calcdistance_mat(self, points, center, spatialmax):
        ## -- L2norm optimized -- ##
        center = scipy.array(center)

        location_center=center[:2]
        color_center=center[2:]

        location_points=points[:,:,:2]
        color_points=points[:,:,2:]

        difs_location=location_points-location_center
        difs_color=1-np.equal(color_points,color_center)
        if len(difs_color.shape)==2:
            difs_color=np.expand_dims(difs_color, axis=2)

        difs=np.concatenate((difs_location,difs_color),axis=2)

        norm = (difs ** 2).astype(float)
        norm[:, :, 0:2] *= (float(self.MM) / (spatialmax * spatialmax))  # color weight on location term
        norm = scipy.sum(norm, 2)
        return norm
项目:PleioPred    作者:yiminghu    | 项目源码 | 文件源码
def _parse_plink_snps_(genotype_file, snp_indices):
    plinkf = plinkfile.PlinkFile(genotype_file)
    samples = plinkf.get_samples()
    num_individs = len(samples)
    num_snps = len(snp_indices)
    raw_snps = sp.empty((num_snps,num_individs),dtype='int8')
    #If these indices are not in order then we place them in the right place while parsing SNPs.
    snp_order = sp.argsort(snp_indices)
    ordered_snp_indices = list(snp_indices[snp_order])
    ordered_snp_indices.reverse()
    print 'Iterating over file to load SNPs'
    snp_i = 0
    next_i = ordered_snp_indices.pop()
    line_i = 0
    max_i = ordered_snp_indices[0]
    while line_i <= max_i:
        if line_i < next_i:
            plinkf.next()
        elif line_i==next_i:
            line = plinkf.next()
            snp = sp.array(line, dtype='int8')
            bin_counts = line.allele_counts()
            if bin_counts[-1]>0:
                mode_v = sp.argmax(bin_counts[:2])
                snp[snp==3] = mode_v
            s_i = snp_order[snp_i]
            raw_snps[s_i]=snp
            if line_i < max_i:
                next_i = ordered_snp_indices.pop()
            snp_i+=1
        line_i +=1
    plinkf.close()
    assert snp_i==len(raw_snps), 'Failed to parse SNPs?'
    num_indivs = len(raw_snps[0])
    freqs = sp.sum(raw_snps,1, dtype='float32')/(2*float(num_indivs))
    return raw_snps, freqs
项目:PleioPred    作者:yiminghu    | 项目源码 | 文件源码
def get_ld_tables(snps, ld_radius=100, ld_window_size=0):
    """
    Calculates LD tables, and the LD score in one go...
    """

    ld_dict = {}
    m,n = snps.shape
    print m,n
    ld_scores = sp.ones(m)
    ret_dict = {}
    for snp_i, snp in enumerate(snps):
        # Calculate D
        start_i = max(0, snp_i - ld_radius)
        stop_i = min(m, snp_i + ld_radius + 1)
        X = snps[start_i: stop_i]
        D_i = sp.dot(snp, X.T) / n
        r2s = D_i ** 2
        ld_dict[snp_i] = D_i
        lds_i = sp.sum(r2s - (1-r2s) / (n-2),dtype='float32')
        #lds_i = sp.sum(r2s - (1-r2s)*empirical_null_r2)
        ld_scores[snp_i] =lds_i
    ret_dict['ld_dict']=ld_dict
    ret_dict['ld_scores']=ld_scores

    if ld_window_size>0:
        ref_ld_matrices = []
        for i, wi in enumerate(range(0, m, ld_window_size)):
            start_i = wi
            stop_i = min(m, wi + ld_window_size)
            curr_window_size = stop_i - start_i
            X = snps[start_i: stop_i]
            D = sp.dot(X, X.T) / n
            ref_ld_matrices.append(D)
        ret_dict['ref_ld_matrices']=ref_ld_matrices
    return ret_dict
项目:HistoricalMap    作者:lennepkade    | 项目源码 | 文件源码
def predict(self,xt,tau=None,proba=None):
        '''
        Function that predict the label for sample xt using the learned model
        Inputs:
            xt: the samples to be classified
        Outputs:
            y: the class
            K: the decision value for each class
        '''
        ## Get information from the data
        nt = xt.shape[0]        # Number of testing samples
        C = self.ni.shape[0]    # Number of classes

        ## Initialization
        K = sp.empty((nt,C))

        if tau is None:
            TAU=self.tau
        else:
            TAU=tau

        for c in range(C):
            invCov,logdet = self.compute_inverse_logdet(c,TAU)
            cst = logdet - 2*sp.log(self.prop[c]) # Pre compute the constant

            xtc = xt-self.mean[c,:]
            temp = sp.dot(invCov,xtc.T).T
            K[:,c] = sp.sum(xtc*temp,axis=1)+cst
            del temp,xtc

        ##

        ## Assign the label save in classnum to the minimum value of K 
        yp = self.classnum[sp.argmin(K,1)]

        ## Reassign label with real value

        if proba is None:
            return yp
        else:
            return yp,K
项目:HistoricalMap    作者:lennepkade    | 项目源码 | 文件源码
def compute_inverse_logdet(self,c,tau):
        Lr = self.L[c,:]+tau # Regularized eigenvalues
        temp = self.Q[c,:,:]*(1/Lr)
        invCov = sp.dot(temp,self.Q[c,:,:].T) # Pre compute the inverse
        logdet = sp.sum(sp.log(Lr)) # Compute the log determinant
        return invCov,logdet
项目:HistoricalMap    作者:lennepkade    | 项目源码 | 文件源码
def BIC(self,x,y,tau=None):
        '''
        Computes the Bayesian Information Criterion of the model
        '''
        ## Get information from the data
        C,d = self.mean.shape
        n = x.shape[0]

        ## Initialization
        if tau is None:
            TAU=self.tau
        else:
            TAU=tau

        ## Penalization
        P = C*(d*(d+3)/2) + (C-1)
        P *= sp.log(n)

        ## Compute the log-likelihood
        L = 0
        for c in range(C):
            j = sp.where(y==(c+1))[0]
            xi = x[j,:]
            invCov,logdet = self.compute_inverse_logdet(c,TAU)
            cst = logdet - 2*sp.log(self.prop[c]) # Pre compute the constant
            xi -= self.mean[c,:]
            temp = sp.dot(invCov,xi.T).T
            K = sp.sum(xi*temp,axis=1)+cst
            L +=sp.sum(K)
            del K,xi

        return L + P
项目:LearningPyQt    作者:manashmndl    | 项目源码 | 文件源码
def error(f, x, y):
    return sp.sum((f(x)-y)**2)
项目:Nightchord    作者:theriley106    | 项目源码 | 文件源码
def compare_images(img1, img2):
    # normalize to compensate for exposure difference, this may be unnecessary
    # consider disabling it
    img1 = normalize(img1)
    img2 = normalize(img2)
    # calculate the difference and its norms
    diff = img1 - img2  # elementwise for scipy arrays
    m_norm = sum(abs(diff))  # Manhattan norm
    z_norm = norm(diff.ravel(), 0)  # Zero norm
    return (m_norm, z_norm)
项目:temci    作者:parttimenerd    | 项目源码 | 文件源码
def geom_std(values: t.List[float]) -> float:
    """
    Calculates the geometric standard deviation for the passed values.
    Source: https://en.wikipedia.org/wiki/Geometric_standard_deviation
    """
    import scipy.stats as stats
    import scipy as sp
    gmean = stats.gmean(values)
    return sp.exp(sp.sqrt(sp.sum([sp.log(x / gmean) ** 2 for x in values]) / len(values)))
项目:glmnet_py    作者:hanfang    | 项目源码 | 文件源码
def wtmean(mat,weights):
    if len(weights.shape) == 1:
        weights = scipy.reshape(weights, [scipy.size(weights), 1])
    wmat = isfinite(mat)*weights
    mat[isnan(mat)] = 0
    swmat = mat*wmat
    tf = weights != 0
    tf = tf[:,0]    
    y = scipy.sum(swmat[tf, :], axis = 0)/scipy.sum(wmat, axis = 0)        
    return y
# end of wtmean
项目:glmnet_py    作者:hanfang    | 项目源码 | 文件源码
def wtmean(mat,weights):
    if len(weights.shape) == 1:
        weights = scipy.reshape(weights, [scipy.size(weights), 1])
    wmat = isfinite(mat)*weights
    mat[isnan(mat)] = 0
    swmat = mat*wmat
    tf = weights != 0
    tf = tf[:,0]    
    y = scipy.sum(swmat[tf, :], axis = 0)/scipy.sum(wmat, axis = 0)        
    return y
# end of wtmean
项目:glmnet_py    作者:hanfang    | 项目源码 | 文件源码
def wtmean(mat,weights):
    if len(weights.shape) == 1:
        weights = scipy.reshape(weights, [scipy.size(weights), 1])
    wmat = isfinite(mat)*weights
    mat[isnan(mat)] = 0
    swmat = mat*wmat
    tf = weights != 0
    tf = tf[:,0]    
    y = scipy.sum(swmat[tf, :], axis = 0)/scipy.sum(wmat, axis = 0)        
    return y
# end of wtmean
项目:5th_place_solution_facebook_check_ins    作者:aikinogard    | 项目源码 | 文件源码
def fixed_point(t, M, I, a2):
    l=7
    I = sci.float128(I)
    M = sci.float128(M)
    a2 = sci.float128(a2)
    f = 2*sci.pi**(2*l)*sci.sum(I**l*a2*sci.exp(-I*sci.pi**2*t))
    for s in range(l, 1, -1):
        K0 = sci.prod(xrange(1, 2*s, 2))/sci.sqrt(2*sci.pi)
        const = (1 + (1/2)**(s + 1/2))/3
        time=(2*const*K0/M/f)**(2/(3+2*s))
        f=2*sci.pi**(2*s)*sci.sum(I**s*a2*sci.exp(-I*sci.pi**2*time))
    return t-(2*M*sci.sqrt(sci.pi)*f)**(-2/5)
项目:OSDN    作者:abhijitbendale    | 项目源码 | 文件源码
def computeOpenMaxProbability(openmax_fc8, openmax_score_u):
    """ Convert the scores in probability value using openmax

    Input:
    ---------------
    openmax_fc8 : modified FC8 layer from Weibull based computation
    openmax_score_u : degree

    Output:
    ---------------
    modified_scores : probability values modified using OpenMax framework,
    by incorporating degree of uncertainity/openness for a given class

    """
    prob_scores, prob_unknowns = [], []
    for channel in range(NCHANNELS):
        channel_scores, channel_unknowns = [], []
        for category in range(NCLASSES):
            channel_scores += [sp.exp(openmax_fc8[channel, category])]

        total_denominator = sp.sum(sp.exp(openmax_fc8[channel, :])) + sp.exp(sp.sum(openmax_score_u[channel, :]))
        prob_scores += [channel_scores/total_denominator ]
        prob_unknowns += [sp.exp(sp.sum(openmax_score_u[channel, :]))/total_denominator]

    prob_scores = sp.asarray(prob_scores)
    prob_unknowns = sp.asarray(prob_unknowns)

    scores = sp.mean(prob_scores, axis = 0)
    unknowns = sp.mean(prob_unknowns, axis=0)
    modified_scores =  scores.tolist() + [unknowns]
    assert len(modified_scores) == 1001
    return modified_scores

#---------------------------------------------------------------------------------
项目:Building-Machine-Learning-Systems-With-Python-Second-Edition    作者:PacktPublishing    | 项目源码 | 文件源码
def error(f, x, y):
    return sp.sum((f(x) - y) ** 2)
项目:sGLMM    作者:YeWenting    | 项目源码 | 文件源码
def nLLeval(ldelta, Uy, S, REML=True):
    """
    evaluate the negative log likelihood of a random effects model:
    nLL = 1/2(n_s*log(2pi) + logdet(K) + 1/ss * y^T(K + deltaI)^{-1}y,
    where K = USU^T.

    Uy: transformed outcome: n_s x 1
    S:  eigenvectors of K: n_s
    ldelta: log-transformed ratio sigma_gg/sigma_ee
    """
    n_s = Uy.shape[0]
    delta = scipy.exp(ldelta)

    # evaluate log determinant
    Sd = S + delta
    ldet = scipy.sum(scipy.log(Sd))

    # evaluate the variance
    Sdi = 1.0 / Sd
    Sdi=Sdi.reshape((Sdi.shape[0],1))
    ss = 1. / n_s * (Uy*Uy*Sdi).sum()
    # evalue the negative log likelihood
    nLL = 0.5 * (n_s * scipy.log(2.0 * scipy.pi) + ldet + n_s + n_s * scipy.log(ss))

    if REML:
        pass

    return nLL
项目:prml    作者:Yevgnen    | 项目源码 | 文件源码
def fit(self, X):
        n_samples, n_features = X.shape
        n_classes = self.n_classes
        max_iter = self.max_iter
        tol = self.tol

        rand_center_idx = sprand.permutation(n_samples)[0:n_classes]
        center = X[rand_center_idx].T
        responsilibity = sp.zeros((n_samples, n_classes))

        for iter in range(max_iter):
            # E step
            dist = sp.expand_dims(X, axis=2) - sp.expand_dims(center, axis=0)
            dist = spla.norm(dist, axis=1)**2
            min_idx = sp.argmin(dist, axis=1)
            responsilibity.fill(0)
            responsilibity[sp.arange(n_samples), min_idx] = 1

            # M step
            center_new = sp.dot(X.T, responsilibity) / sp.sum(responsilibity, axis=0)
            diff = center_new - center
            print('K-Means: {0:5d} {1:4e}'.format(iter, spla.norm(diff) / spla.norm(center)))
            if (spla.norm(diff) < tol * spla.norm(center)):
                break

            center = center_new

        self.center = center.T
        self.responsibility = responsilibity

        return self
项目:prml    作者:Yevgnen    | 项目源码 | 文件源码
def _init_params(self, X):
        init = self.init
        n_samples, n_features = X.shape
        n_components = self.n_components

        if (init == 'kmeans'):
            km = Kmeans(n_components)
            clusters, mean, cov = km.cluster(X)
            coef = sp.array([c.shape[0] / n_samples for c in clusters])
            comps = [multivariate_normal(mean[i], cov[i], allow_singular=True)
                     for i in range(n_components)]
        elif (init == 'rand'):
            coef = sp.absolute(sprand.randn(n_components))
            coef = coef / coef.sum()
            means = X[sprand.permutation(n_samples)[0: n_components]]
            clusters = [[] for i in range(n_components)]
            for x in X:
                idx = sp.argmin([spla.norm(x - mean) for mean in means])
                clusters[idx].append(x)

            comps = []
            for k in range(n_components):
                mean = means[k]
                cov = sp.cov(clusters[k], rowvar=0, ddof=0)
                comps.append(multivariate_normal(mean, cov, allow_singular=True))

        self.coef = coef
        self.comps = comps
项目:prml    作者:Yevgnen    | 项目源码 | 文件源码
def log_likelihood(self, X):
        return sp.sum(sp.log(self.pdf(X)))
项目:prml    作者:Yevgnen    | 项目源码 | 文件源码
def _maximum_likelihood(self, X):
        n_samples, n_features = X.shape if X.ndim > 1 else (1, X.shape[0])
        n_components = self.n_components

        # Predict mean
        mu = X.mean(axis=0)

        # Predict covariance
        cov = sp.cov(X, rowvar=0)
        eigvals, eigvecs = self._eig_decomposition(cov)
        sigma2 = ((sp.sum(cov.diagonal()) - sp.sum(eigvals.sum())) /
                  (n_features - n_components))  # FIXME: M < D?

        weight = sp.dot(eigvecs, sp.diag(sp.sqrt(eigvals - sigma2)))
        M = sp.dot(weight.T, weight) + sigma2 * sp.eye(n_components)
        inv_M = spla.inv(M)

        self.eigvals = eigvals
        self.eigvecs = eigvecs
        self.predict_mean = mu
        self.predict_cov = sp.dot(weight, weight.T) + sigma2 * sp.eye(n_features)
        self.latent_mean = sp.transpose(sp.dot(inv_M, sp.dot(weight.T, X.T - mu[:, sp.newaxis])))
        self.latent_cov = sigma2 * inv_M
        self.sigma2 = sigma2    # FIXME!
        self.weight = weight
        self.inv_M = inv_M

        return self.latent_mean
项目:dzetsaka    作者:lennepkade    | 项目源码 | 文件源码
def compute_inverse_logdet(self,c,tau):
        Lr = self.L[c,:]+tau # Regularized eigenvalues
        temp = self.Q[c,:,:]*(1/Lr)
        invCov = sp.dot(temp,self.Q[c,:,:].T) # Pre compute the inverse
        logdet = sp.sum(sp.log(Lr)) # Compute the log determinant
        return invCov,logdet
项目:dzetsaka    作者:lennepkade    | 项目源码 | 文件源码
def BIC(self,x,y,tau=None):
        '''
        Computes the Bayesian Information Criterion of the model
        '''
        ## Get information from the data
        C,d = self.mean.shape
        n = x.shape[0]

        ## Initialization
        if tau is None:
            TAU=self.tau
        else:
            TAU=tau

        ## Penalization
        P = C*(d*(d+3)/2) + (C-1)
        P *= sp.log(n)

        ## Compute the log-likelihood
        L = 0
        for c in range(C):
            j = sp.where(y==(c+1))[0]
            xi = x[j,:]
            invCov,logdet = self.compute_inverse_logdet(c,TAU)
            cst = logdet - 2*sp.log(self.prop[c]) # Pre compute the constant
            xi -= self.mean[c,:]
            temp = sp.dot(invCov,xi.T).T
            K = sp.sum(xi*temp,axis=1)+cst
            L +=sp.sum(K)
            del K,xi

        return L + P
项目:ML    作者:saurabhsuman47    | 项目源码 | 文件源码
def error(f,x,y):
    return sp.sum((f(x)-y)**2)
项目:CS-LMM    作者:HaohanWang    | 项目源码 | 文件源码
def nLLeval(ldelta, Uy, S, REML=True):
    """
    evaluate the negative log likelihood of a random effects model:
    nLL = 1/2(n_s*log(2pi) + logdet(K) + 1/ss * y^T(K + deltaI)^{-1}y,
    where K = USU^T.
    Uy: transformed outcome: n_s x 1
    S:  eigenvectors of K: n_s
    ldelta: log-transformed ratio sigma_gg/sigma_ee
    """
    n_s = Uy.shape[0]
    delta = scipy.exp(ldelta)

    # evaluate log determinant
    Sd = S + delta
    ldet = scipy.sum(scipy.log(Sd))

    # evaluate the variance
    Sdi = 1.0 / Sd
    Uy = Uy.flatten()
    ss = 1. / n_s * (Uy * Uy * Sdi).sum()

    # evalue the negative log likelihood
    nLL = 0.5 * (n_s * scipy.log(2.0 * scipy.pi) + ldet + n_s + n_s * scipy.log(ss))

    if REML:
        pass

    return nLL
项目:SLIC_cityscapes    作者:wpqmanu    | 项目源码 | 文件源码
def calcerror(self, centers, prevcenters,nan_record):
        '''
        L2 norm of location
        '''
        for nan_index in nan_record[::-1]:
            del prevcenters[nan_index]

        # error=sum([scipy.dot(now[:2] - prev[:2], now[:2] - prev[:2]) + scipy.dot(1-np.equal(now[2:], prev[2:]), 1-np.equal(now[2:], prev[2:])) for now, prev in zip(centers, prevcenters)])
        error=sum([scipy.dot(1-np.equal(now[2:], prev[2:]), 1-np.equal(now[2:], prev[2:])) for now, prev in zip(centers, prevcenters)])

        print "error:", error
        return error
项目:SLIC_cityscapes    作者:wpqmanu    | 项目源码 | 文件源码
def iteration(self, centers, stepsize):
        error = sum([scipy.dot(center[:2], center[:2]) for center in centers])
        while error > self.ERROR_THRESHOLD:
            self.assignment(centers, stepsize)  ## M-step Note step size is the initial length/width of a superpixel.
            prevcenters=centers
            centers,nan_record=self.update(centers)  ## E-step
            error = self.calcerror(centers, prevcenters,nan_record)
            print "L2 error:", error

            if self.DEBUGFLAG:
                base, ext = os.path.splitext(self.filename)
                self.filename = base.split("_error")[0] + "_error" + str(error) + ext
                self.resultimg(centers)
        return (centers, self.assignedindex)
项目:SLIC_cityscapes    作者:wpqmanu    | 项目源码 | 文件源码
def L2norm(vec):
    return sum([item * item for item in vec])
项目:SLIC_cityscapes    作者:wpqmanu    | 项目源码 | 文件源码
def calcdistance_mat(self, points, center, spatialmax):
        ## -- L2norm optimized -- ##
        center = scipy.array(center)
        difs = points - center
        norm = (difs ** 2).astype(float)
        norm[:, :, 0:2] *= (float(self.MM) / (spatialmax * spatialmax))
        norm = scipy.sum(norm, 2)
        return norm
项目:SLIC_cityscapes    作者:wpqmanu    | 项目源码 | 文件源码
def iteration(self, centers, stepsize):
        error = sum([scipy.dot(center[:2], center[:2]) for center in centers])
        while error > self.ERROR_THRESHOLD:
            self.assignment(centers, stepsize)  ## M-step
            prevcenters, centers = centers, self.update(centers)  ## E-step
            error = self.calcerror(centers, prevcenters)
            print "L2 error:", error

            if self.DEBUGFLAG:
                base, ext = os.path.splitext(self.filename)
                self.filename = base.split("_error")[0] + "_error" + str(error) + ext
                self.resultimg(centers)
        return (centers, self.assignedindex)
项目:SLIC_cityscapes    作者:wpqmanu    | 项目源码 | 文件源码
def calcerror(self, centers, prevcenters,nan_record):
        '''
        L2 norm of location
        '''
        for nan_index in nan_record[::-1]:
            del prevcenters[nan_index]

        # error=sum([scipy.dot(now[:2] - prev[:2], now[:2] - prev[:2]) + scipy.dot(1-np.equal(now[2:], prev[2:]), 1-np.equal(now[2:], prev[2:])) for now, prev in zip(centers, prevcenters)])
        error=sum([scipy.dot(1-np.equal(now[2:], prev[2:]), 1-np.equal(now[2:], prev[2:])) for now, prev in zip(centers, prevcenters)])

        print "error:", error
        return error