Python scipy 模块,zeros() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用scipy.zeros()

项目:balu-python    作者:dipaco    | 项目源码 | 文件源码
def edge_LoG(I, sigma):
    LoG = laplace(gaussian(I, sigma=sigma), ksize=3)
    thres = np.absolute(LoG).mean() * 1.0
    output = sp.zeros(LoG.shape)
    w = output.shape[1]
    h = output.shape[0]

    for y in range(1, h - 1):
        for x in range(1, w - 1):
            patch = LoG[y - 1:y + 2, x - 1:x + 2]
            p = LoG[y, x]
            maxP = patch.max()
            minP = patch.min()
            if p > 0:
                zeroCross = True if minP < 0 else False
            else:
                zeroCross = True if maxP > 0 else False
            if ((maxP - minP) > thres) and zeroCross:
                output[y, x] = 1

    #FIXME: It is necesary to define if return the closing of the output or just the output
    #return binary_closing(output)
    return output
项目:ceres    作者:dicortazar    | 项目源码 | 文件源码
def test_fill_missing_fields(self):
        """ Test several cases for the fill_missing_fields method
        """

        empty_columns = []
        columns = ["test1"]
        empty_df = pandas.DataFrame()
        # With empty dataframe and any columns, this always returns empty dataframe
        # A DataFrame with columns but not data is an empty DataFrame
        self.assertTrue(Format().fill_missing_fields(empty_df, empty_columns).empty)
        self.assertTrue(Format().fill_missing_fields(empty_df, columns).empty)
        self.assertEqual(columns, Format().fill_missing_fields(empty_df, columns).columns)

        # With a dataframe with some data, this returns a non-empty dataframe
        df = empty_df.copy()
        df["test"] = scipy.zeros(10)

        self.assertFalse(Format().fill_missing_fields(df, empty_columns).empty)
项目:ceres    作者:dicortazar    | 项目源码 | 文件源码
def fill_missing_fields(self, data, columns):
        """ This  method fills with 0's missing fields

        :param data: original Pandas dataframe
        :param columns: list of columns to be filled in the DataFrame
        :type data: pandas.DataFrame
        :type columns: list of strings

        :returns: Pandas dataframe with missing fields filled with 0's
        :rtype: pandas.DataFrame
        """

        for column in columns:
            if column not in data.columns:
                data[column] = scipy.zeros(len(data))

        return data
项目:sakmapper    作者:szairis    | 项目源码 | 文件源码
def gap(data, refs=None, nrefs=20, ks=range(1,11), method=None):
    shape = data.shape
    if refs is None:
        tops = data.max(axis=0)
        bots = data.min(axis=0)
        dists = scipy.matrix(scipy.diag(tops-bots))

        rands = scipy.random.random_sample(size=(shape[0], shape[1], nrefs))
        for i in range(nrefs):
            rands[:, :, i] = rands[:, :, i]*dists+bots
    else:
        rands = refs
    gaps = scipy.zeros((len(ks),))
    for (i, k) in enumerate(ks):
        g1 = method(n_clusters=k).fit(data)
        (kmc, kml) = (g1.cluster_centers_, g1.labels_)
        disp = sum([euclidean(data[m, :], kmc[kml[m], :]) for m in range(shape[0])])

        refdisps = scipy.zeros((rands.shape[2],))
        for j in range(rands.shape[2]):
            g2 = method(n_clusters=k).fit(rands[:, :, j])
            (kmc, kml) = (g2.cluster_centers_, g2.labels_)
            refdisps[j] = sum([euclidean(rands[m, :, j], kmc[kml[m],:]) for m in range(shape[0])])
        gaps[i] = scipy.log(scipy.mean(refdisps))-scipy.log(disp)
    return gaps
项目:GoodEnoughAlgs    作者:elsander    | 项目源码 | 文件源码
def DistanceMatrix(coords):
    '''Take a set of coordinates and calculate a matrix of
    Euclidean distances between the points.'''

    # we can assume that xs and ys are the same length
    stops = coords.shape[1]
    distMat = scipy.zeros((stops, stops))

    # this will be symmetric, so we only need to calculate
    # the upper triangular
    for i in range(stops):
        for j in range(i + 1, stops):
            xdist = coords[0, i] - coords[0, j]
            ydist = coords[1, i] - coords[1, j]
            distMat[i, j] = math.sqrt(xdist**2 + ydist**2)
    # add the transpose to make it symmetric
    distMat = distMat + distMat.transpose()
    return distMat
项目:HistoricalMap    作者:lennepkade    | 项目源码 | 文件源码
def compute_confusion_matrix(self,yp,yr):
        ''' 
        Compute the confusion matrix
        '''
        # Initialization
        n = yp.size
        C=int(yr.max())
        self.confusion_matrix=sp.zeros((C,C))

        # Compute confusion matrix
        for i in range(n):
            self.confusion_matrix[yp[i].astype(int)-1,yr[i].astype(int)-1] +=1

        # Compute overall accuracy
        self.OA=sp.sum(sp.diag(self.confusion_matrix))/n

        # Compute Kappa
        nl = sp.sum(self.confusion_matrix,axis=1)
        nc = sp.sum(self.confusion_matrix,axis=0)
        self.Kappa = ((n**2)*self.OA - sp.sum(nc*nl))/(n**2-sp.sum(nc*nl))

        # TBD Variance du Kappa
项目:glmnet_py    作者:hanfang    | 项目源码 | 文件源码
def glmnet_softmax(x):
    d = x.shape
    nas = scipy.any(scipy.isnan(x), axis = 1)
    if scipy.any(nas):
        pclass = scipy.zeros([d[0], 1])*scipy.NaN
        if scipy.sum(nas) < d[0]:
            pclass2 = glmnet_softmax(x[~nas, :])
            pclass[~nas] = pclass2
            result = pclass
    else:
        maxdist = x[:, 1]
        pclass = scipy.ones([d[0], 1])
        for i in range(1, d[1], 1):
            t = x[:, i] > maxdist
            pclass[t] = i
            maxdist[t] = x[t, i]
        result = pclass

    return(result)    
#=========================
项目:glmnet_py    作者:hanfang    | 项目源码 | 文件源码
def softmax(x, gap = False):
   d = x.shape
   maxdist = x[:, 0]
   pclass = scipy.zeros([d[0], 1], dtype = scipy.integer)
   for i in range(1, d[1], 1):
       l = x[:, i] > maxdist
       pclass[l] = i
       maxdist[l] = x[l, i]
   if gap == True:
       x = scipy.absolute(maxdist - x)
       x[0:d[0], pclass] = x*scipy.ones([d[1], d[1]])
       #gaps = pmin(x)# not sure what this means; gap is never called with True
       raise ValueError('gap = True is not implemented yet')

   result = dict()
   if gap == True:
       result['pclass'] = pclass
       #result['gaps'] = gaps
       raise ValueError('gap = True is not implemented yet')
   else:
       result['pclass'] = pclass;

   return(result)
# end of softmax
# =========================================
项目:glmnet_py    作者:hanfang    | 项目源码 | 文件源码
def cvcompute(mat, weights, foldid, nlams):
    if len(weights.shape) > 1:
        weights = scipy.reshape(weights, [weights.shape[0], ])
    wisum = scipy.bincount(foldid, weights = weights)
    nfolds = scipy.amax(foldid) + 1
    outmat = scipy.ones([nfolds, mat.shape[1]])*scipy.NaN
    good = scipy.zeros([nfolds, mat.shape[1]])
    mat[scipy.isinf(mat)] = scipy.NaN
    for i in range(nfolds):
        tf = foldid == i
        mati = mat[tf, ]
        wi = weights[tf, ]
        outmat[i, :] = wtmean(mati, wi)
        good[i, 0:nlams[i]] = 1
    N = scipy.sum(good, axis = 0)
    cvcpt = dict()
    cvcpt['cvraw'] = outmat
    cvcpt['weights'] = wisum
    cvcpt['N'] = N

    return(cvcpt)

# end of cvcompute
#=========================
项目:glmnet_py    作者:hanfang    | 项目源码 | 文件源码
def softmax(x, gap = False):
   d = x.shape
   maxdist = x[:, 0]
   pclass = scipy.zeros([d[0], 1], dtype = scipy.integer)
   for i in range(1, d[1], 1):
       l = x[:, i] > maxdist
       pclass[l] = i
       maxdist[l] = x[l, i]
   if gap == True:
       x = scipy.absolute(maxdist - x)
       x[0:d[0], pclass] = x*scipy.ones([d[1], d[1]])
       #gaps = pmin(x)# not sure what this means; gap is never called with True
       raise ValueError('gap = True is not implemented yet')

   result = dict()
   if gap == True:
       result['pclass'] = pclass
       #result['gaps'] = gaps
       raise ValueError('gap = True is not implemented yet')
   else:
       result['pclass'] = pclass;

   return(result)
# end of softmax
# =========================================
项目:glmnet_py    作者:hanfang    | 项目源码 | 文件源码
def cvcompute(mat, weights, foldid, nlams):
    if len(weights.shape) > 1:
        weights = scipy.reshape(weights, [weights.shape[0], ])
    wisum = scipy.bincount(foldid, weights = weights)
    nfolds = scipy.amax(foldid) + 1
    outmat = scipy.ones([nfolds, mat.shape[1]])*scipy.NaN
    good = scipy.zeros([nfolds, mat.shape[1]])
    mat[scipy.isinf(mat)] = scipy.NaN
    for i in range(nfolds):
        tf = foldid == i
        mati = mat[tf, ]
        wi = weights[tf, ]
        outmat[i, :] = wtmean(mati, wi)
        good[i, 0:nlams[i]] = 1
    N = scipy.sum(good, axis = 0)
    cvcpt = dict()
    cvcpt['cvraw'] = outmat
    cvcpt['weights'] = wisum
    cvcpt['N'] = N

    return(cvcpt)

# end of cvcompute
#=========================
项目:glmnet_py    作者:hanfang    | 项目源码 | 文件源码
def glmnet_softmax(x):
    d = x.shape
    nas = scipy.any(scipy.isnan(x), axis = 1)
    if scipy.any(nas):
        pclass = scipy.zeros([d[0], 1])*scipy.NaN
        if scipy.sum(nas) < d[0]:
            pclass2 = glmnet_softmax(x[~nas, :])
            pclass[~nas] = pclass2
            result = pclass
    else:
        maxdist = x[:, 1]
        pclass = scipy.ones([d[0], 1])
        for i in range(1, d[1], 1):
            t = x[:, i] > maxdist
            pclass[t] = i
            maxdist[t] = x[t, i]
        result = pclass

    return(result)    
#=========================
项目:glmnet_py    作者:hanfang    | 项目源码 | 文件源码
def softmax(x, gap = False):
   d = x.shape
   maxdist = x[:, 0]
   pclass = scipy.zeros([d[0], 1], dtype = scipy.integer)
   for i in range(1, d[1], 1):
       l = x[:, i] > maxdist
       pclass[l] = i
       maxdist[l] = x[l, i]
   if gap == True:
       x = scipy.absolute(maxdist - x)
       x[0:d[0], pclass] = x*scipy.ones([d[1], d[1]])
       #gaps = pmin(x)# not sure what this means; gap is never called with True
       raise ValueError('gap = True is not implemented yet')

   result = dict()
   if gap == True:
       result['pclass'] = pclass
       #result['gaps'] = gaps
       raise ValueError('gap = True is not implemented yet')
   else:
       result['pclass'] = pclass;

   return(result)
# end of softmax
# =========================================
项目:glmnet_py    作者:hanfang    | 项目源码 | 文件源码
def cvcompute(mat, weights, foldid, nlams):
    if len(weights.shape) > 1:
        weights = scipy.reshape(weights, [weights.shape[0], ])
    wisum = scipy.bincount(foldid, weights = weights)
    nfolds = scipy.amax(foldid) + 1
    outmat = scipy.ones([nfolds, mat.shape[1]])*scipy.NaN
    good = scipy.zeros([nfolds, mat.shape[1]])
    mat[scipy.isinf(mat)] = scipy.NaN
    for i in range(nfolds):
        tf = foldid == i
        mati = mat[tf, ]
        wi = weights[tf, ]
        outmat[i, :] = wtmean(mati, wi)
        good[i, 0:nlams[i]] = 1
    N = scipy.sum(good, axis = 0)
    cvcpt = dict()
    cvcpt['cvraw'] = outmat
    cvcpt['weights'] = wisum
    cvcpt['N'] = N

    return(cvcpt)

# end of cvcompute
#=========================
项目:glmnet_py    作者:hanfang    | 项目源码 | 文件源码
def glmnet_softmax(x):
    d = x.shape
    nas = scipy.any(scipy.isnan(x), axis = 1)
    if scipy.any(nas):
        pclass = scipy.zeros([d[0], 1])*scipy.NaN
        if scipy.sum(nas) < d[0]:
            pclass2 = glmnet_softmax(x[~nas, :])
            pclass[~nas] = pclass2
            result = pclass
    else:
        maxdist = x[:, 1]
        pclass = scipy.ones([d[0], 1])
        for i in range(1, d[1], 1):
            t = x[:, i] > maxdist
            pclass[t] = i
            maxdist[t] = x[t, i]
        result = pclass

    return(result)    
#=========================
项目:glmnet_py    作者:hanfang    | 项目源码 | 文件源码
def softmax(x, gap = False):
   d = x.shape
   maxdist = x[:, 0]
   pclass = scipy.zeros([d[0], 1], dtype = scipy.integer)
   for i in range(1, d[1], 1):
       l = x[:, i] > maxdist
       pclass[l] = i
       maxdist[l] = x[l, i]
   if gap == True:
       x = scipy.absolute(maxdist - x)
       x[0:d[0], pclass] = x*scipy.ones([d[1], d[1]])
       #gaps = pmin(x)# not sure what this means; gap is never called with True
       raise ValueError('gap = True is not implemented yet')

   result = dict()
   if gap == True:
       result['pclass'] = pclass
       #result['gaps'] = gaps
       raise ValueError('gap = True is not implemented yet')
   else:
       result['pclass'] = pclass;

   return(result)
# end of softmax
# =========================================
项目:glmnet_py    作者:hanfang    | 项目源码 | 文件源码
def cvcompute(mat, weights, foldid, nlams):
    if len(weights.shape) > 1:
        weights = scipy.reshape(weights, [weights.shape[0], ])
    wisum = scipy.bincount(foldid, weights = weights)
    nfolds = scipy.amax(foldid) + 1
    outmat = scipy.ones([nfolds, mat.shape[1]])*scipy.NaN
    good = scipy.zeros([nfolds, mat.shape[1]])
    mat[scipy.isinf(mat)] = scipy.NaN
    for i in range(nfolds):
        tf = foldid == i
        mati = mat[tf, ]
        wi = weights[tf, ]
        outmat[i, :] = wtmean(mati, wi)
        good[i, 0:nlams[i]] = 1
    N = scipy.sum(good, axis = 0)
    cvcpt = dict()
    cvcpt['cvraw'] = outmat
    cvcpt['weights'] = wisum
    cvcpt['N'] = N

    return(cvcpt)

# end of cvcompute
#=========================
项目:pyssp    作者:shunsukeaihara    | 项目源码 | 文件源码
def _reshape_signal(self,sindex,kindex,ssignal,ksignal):
        def reshape(signal,siglen,winsize):
            length =(siglen/winsize+1)*winsize
            ret=sp.zeros(length, sp.float32)
            ret[0:siglen] = signal
            return ret
        slen = len(ssignal)-sindex
        klen = len(ksignal)-kindex
        length = 0
        if slen>klen:
            length = klen
        else:
            length = slen
        ssignal=reshape(ssignal[sindex:sindex+length],length,self._winsize)
        ksignal=reshape(ksignal[kindex:kindex+length],length,self._winsize)
        return ssignal,ksignal
项目:Mendelssohn    作者:diggerdu    | 项目源码 | 文件源码
def istft(X, scale = 1, overlap=4):   
    fftsize=(X.shape[1]-1)*2
    hop = fftsize / overlap
    w = scipy.hanning(fftsize+1)[:-1]
    x = scipy.zeros(X.shape[0]*hop)
    wsum = scipy.zeros(X.shape[0]*hop) 
    for n,i in enumerate(range(0, len(x)-fftsize, hop)): 
        x[i:i+fftsize] += scipy.real(np.fft.irfft(X[n])) * w   # overlap-add
        wsum[i:i+fftsize] += w ** 2.
    pos = wsum != 0
    x[pos] /= wsum[pos]
    x = x * scale
    return x.astype(np.int16)
项目:dzetsaka    作者:lennepkade    | 项目源码 | 文件源码
def compute_confusion_matrix(self,yp,yr):
        ''' 
        Compute the confusion matrix
        '''
        # Initialization
        n = yp.size
        C=int(yr.max())
        self.confusion_matrix=sp.zeros((C,C))

        # Compute confusion matrix
        for i in range(n):
            self.confusion_matrix[yp[i].astype(int)-1,yr[i].astype(int)-1] +=1

        # Compute overall accuracy
        self.OA=sp.sum(sp.diag(self.confusion_matrix))/n

        # Compute Kappa
        nl = sp.sum(self.confusion_matrix,axis=1)
        nc = sp.sum(self.confusion_matrix,axis=0)
        self.Kappa = ((n**2)*self.OA - sp.sum(nc*nl))/(n**2-sp.sum(nc*nl))

        # TBD Variance du Kappa
项目:Defect-Prediction    作者:Jorba123    | 项目源码 | 文件源码
def visulize_matches(matches, k2, k1, img2, img1):
    """ Visualize SIFT keypoint matches."""

    import scipy as sp
    img2 = cv.cvtColor(img2, cv.COLOR_GRAY2BGR)
    h1, w1 = img1.shape[:2]
    h2, w2 = img2.shape[:2]
    view = sp.zeros((max(h1, h2), w1 + w2, 3), sp.uint8)
    view[:h1, :w1, :] = img1  
    view[:h2, w1:, :] = img2
    view[:, :, 1] = view[:, :, 0]  
    view[:, :, 2] = view[:, :, 0]

    for m in matches:
        m = m[0]
        # draw the keypoints
        # print m.queryIdx, m.trainIdx, m.distance
        color = tuple([sp.random.randint(0, 255) for _ in xrange(3)])
        pt1 = (int(k1[m.queryIdx].pt[0]), int(k1[m.queryIdx].pt[1]))
        pt2 = (int(k2[m.trainIdx].pt[0] + w1), int(k2[m.trainIdx].pt[1]))

        cv.line(view, pt1, pt2, color)
    return view
项目:house-price-map    作者:andyljones    | 项目源码 | 文件源码
def get_array(coords, values):
    lon_res, lat_res = get_resolution()    

    indices = get_grid_indices(coords)
    indices['values'] = values
    indices = indices.groupby(['x', 'y'])['values'].agg(['median', 'count']).reset_index()

    x_size = int((LONDON_LONS[1] - LONDON_LONS[0])/lon_res) + 1    
    y_size = int((LONDON_LATS[1] - LONDON_LATS[0])/lat_res) + 1

    x_mask = (indices['x'] < 0) | (indices['x'] >= x_size)
    y_mask = (indices['y'] < 0) | (indices['y'] >= y_size)
    mask = ~(x_mask | y_mask)
    indices = indices.loc[mask]

    arr = sp.nan*sp.zeros((x_size, y_size))
    arr[indices['x'].values, indices['y'].values] = indices['median'].values

    count_arr = sp.zeros((x_size, y_size)) 
    count_arr[indices['x'].values, indices['y'].values] = indices['count'].values

    return arr, count_arr
项目:RFCN    作者:zengxianyu    | 项目源码 | 文件源码
def __MR_W_D_matrix(self,img,labels):
        s = sp.amax(labels)+1
        vect = self.__MR_superpixel_mean_vector(img,labels)

        adj = self.__MR_get_adj_loop(labels)

        W = sp.spatial.distance.squareform(sp.spatial.distance.pdist(vect))

        W = sp.exp(-1*W / self.weight_parameters['delta'])
        W[adj.astype(np.bool)] = 0


        D = sp.zeros((s,s)).astype(float)
        for i in range(s):
            D[i, i] = sp.sum(W[i])

        return W,D
项目:RFCN    作者:zengxianyu    | 项目源码 | 文件源码
def __MR_boundary_indictor(self,labels):
        s = sp.amax(labels)+1
        up_indictor = (sp.zeros((s,1))).astype(float)
        right_indictor = (sp.zeros((s,1))).astype(float)
        low_indictor = (sp.zeros((s,1))).astype(float)
        left_indictor = (sp.zeros((s,1))).astype(float)

        upper_ids = sp.unique(labels[0,:]).astype(int)
        right_ids = sp.unique(labels[:,labels.shape[1]-1]).astype(int)
        low_ids = sp.unique(labels[labels.shape[0]-1,:]).astype(int)
        left_ids = sp.unique(labels[:,0]).astype(int)

        up_indictor[upper_ids] = 1.0
        right_indictor[right_ids] = 1.0
        low_indictor[low_ids] = 1.0
        left_indictor[left_ids] = 1.0

        return up_indictor,right_indictor,low_indictor,left_indictor
项目:astroEMPEROR    作者:ReddTea    | 项目源码 | 文件源码
def alt_results(self, samples, kplanets):
        titles = sp.array(["Amplitude","Period","Longitude", "Phase","Eccentricity", 'Acceleration', 'Jitter', 'Offset', 'MACoefficient', 'MATimescale', 'Stellar Activity'])
        namen = sp.array([])
        ndim = kplanets * 5 + self.nins*2*(self.MOAV+1) + self.totcornum + 1

        RESU = sp.zeros((ndim, 5))
        for k in range(kplanets):
            namen = sp.append(namen, [titles[i] + '_'+str(k) for i in range(5)])
        namen = sp.append(namen, titles[5])  # for acc
        for i in range(self.nins):
            namen = sp.append(namen, [titles[ii] + '_'+str(i+1) for ii in sp.arange(2)+6])
            for c in range(self.MOAV):
                namen = sp.append(namen, [titles[ii] + '_'+str(i+1) + '_'+str(c+1) for ii in sp.arange(2)+8])
        for h in range(self.totcornum):
            namen = sp.append(namen, titles[-1]+'_'+str(h+1))

        alt_res = map(lambda v: (v[2], v[3]-v[2], v[2]-v[1], v[4]-v[2], v[2]-v[0]),
                      zip(*np.percentile(samples, [2, 16, 50, 84, 98], axis=0)))
        logdat = '\nAlternative results with uncertainties based on the 2nd, 16th, 50th, 84th and 98th percentiles of the samples in the marginalized distributions'
        logdat = '\nFormat is like median +- 1-sigma, +- 2-sigma'
        for res in range(ndim):
            logdat += '\n'+namen[res]+'     : '+str(alt_res[res][0])+' +- '+str(alt_res[res][1:3]) +'    2%   +- '+str(alt_res[res][3:5])
            RESU[res] = sp.percentile(samples, [2, 16, 50, 84, 98], axis=0)[:, res]
        print(logdat)
        return RESU
项目:SLIC_cityscapes    作者:wpqmanu    | 项目源码 | 文件源码
def resultimg(self, centers):
        print "show result"
        result = scipy.zeros(self.img.shape[:2], scipy.uint8)
        width, height = result.shape[:2]
        if len(result.shape)>2:
            color_channels=result.shape[2]
        else:
            color_channels=1
        colors = [scipy.array([int(random.uniform(0, 255)) for i in xrange(1)]) for j in xrange(len(centers))]
        for x in xrange(width):
            for y in xrange(height):
                result[x, y] = colors[self.assignedindex[x][y]]

        # cv2.imshow("result", result)
        # cv2.waitKey(10)
        cv2.imwrite(os.path.join(self.result_dir,self.filename+'_superpixel.png'), result)
项目:SLIC_cityscapes    作者:wpqmanu    | 项目源码 | 文件源码
def resultimg(self, centers):
        print "show result"
        result = scipy.zeros(self.img.shape[:2], scipy.uint8)
        width, height = result.shape[:2]
        if len(result.shape)>2:
            color_channels=result.shape[2]
        else:
            color_channels=1
        colors = [scipy.array([int(random.uniform(0, 255)) for i in xrange(1)]) for j in xrange(len(centers))]
        for x in xrange(width):
            for y in xrange(height):
                result[x, y] = colors[self.assignedindex[x][y]]

        # cv2.imshow("result", result)
        # cv2.waitKey(10)
        cv2.imwrite(os.path.join(self.result_dir,self.filename+'_superpixel.png'), result)
项目:SLIC_cityscapes    作者:wpqmanu    | 项目源码 | 文件源码
def update(self, centers):
        # sums = [scipy.zeros(5) for i in range(len(centers))]
        # nums = [0 for i in range(len(centers))]
        # width, height = self.img.shape[:2]
        print "E step"
        new_centers=[]
        nan_record=[]

        for i in xrange(len(centers)):
            current_region=self.xylab[self.assignedindex == i]
            if current_region.size>0: #non-empty region
                new_centers.append(scipy.mean(current_region, 0))
            else: # empty region
                nan_record.append(i)

        # after we get full nan_record list, update assignment index (elimnate those indexes in reverse order)
        for nan_value in nan_record[::-1]:
            self.assignedindex[self.assignedindex>nan_value]=self.assignedindex[self.assignedindex>nan_value]-1


        for new_center_index in range(len(new_centers)):
            # print new_center_index
            new_centers[new_center_index][2:]=self.labimg[math.floor(new_centers[new_center_index][0])][math.floor(new_centers[new_center_index][1])]

        return new_centers,nan_record
项目:SLIC_cityscapes    作者:wpqmanu    | 项目源码 | 文件源码
def resultimg(self, centers):
        print "show result"
        result = scipy.zeros(self.img.shape[:2], scipy.uint8)
        width, height = result.shape[:2]
        if len(result.shape)>2:
            color_channels=result.shape[2]
        else:
            color_channels=1
        colors = [scipy.array([int(random.uniform(0, 255)) for i in xrange(1)]) for j in xrange(len(centers))]
        for x in xrange(width):
            for y in xrange(height):
                result[x, y] = colors[self.assignedindex[x][y]]

        # cv2.imshow("result", result)
        # cv2.waitKey(10)
        cv2.imwrite(os.path.join(self.result_dir,self.filename+'_superpixel.png'), result)
项目:SLIC_cityscapes    作者:wpqmanu    | 项目源码 | 文件源码
def resultimg(self, centers):
        print "show result"
        result = scipy.zeros(self.img.shape[:2], scipy.uint8)
        width, height = result.shape[:2]
        if len(result.shape)>2:
            color_channels=result.shape[2]
        else:
            color_channels=1
        colors = [scipy.array([int(random.uniform(0, 255)) for i in xrange(1)]) for j in xrange(len(centers))]
        for x in xrange(width):
            for y in xrange(height):
                result[x, y] = colors[self.assignedindex[x][y]]

        # cv2.imshow("result", result)
        # cv2.waitKey(10)
        cv2.imwrite(os.path.join(self.result_dir,self.filename+'_superpixel.png'), result)
项目:code    作者:ActiveState    | 项目源码 | 文件源码
def data(self, size=20):
        """Create some fake data in a dataframe"""
        numpy.random.seed(0)
        random.seed(0)
        x = scipy.rand(size)
        M = scipy.zeros([size,size])
        for i in range(size):
            for j in range(size): M[i,j] = abs(x[i] - x[j])
        df = pandas.DataFrame(M, index=[names.get_last_name() for _ in range(size)],
                                 columns=[names.get_first_name() for _ in range(size)])
        df['Mary']['Day'] = 1.5
        df['Issac']['Day'] = 1.0
        return df
项目:HistoricalMap    作者:lennepkade    | 项目源码 | 文件源码
def predict(tau,model,xT,yT):
    err = sp.zeros(tau.size)
    for j,t in enumerate(tau):
        yp = model.predict(xT,tau=t)[0]
        eq = sp.where(yp.ravel()==yT.ravel())[0]
        err[j] = eq.size*100.0/yT.size
    return err
项目:HistoricalMap    作者:lennepkade    | 项目源码 | 文件源码
def cross_validation(self,x,y,tau,v=5):
        ''' 
        Function that computes the cross validation accuracy for the value tau of the regularization
        Input:
            x : the training samples
            y : the labels
            tau : a range of values to be tested
            v : the number of fold
        Output:
            err : the estimated error with cross validation for all tau's value
        '''
        ## Initialization
        ns = x.shape[0]     # Number of samples
        np = tau.size       # Number of parameters to test
        cv = CV()           # Initialization of the indices for the cross validation
        cv.split_data_class(y)
        err = sp.zeros(np)  # Initialization of the errors

        ## Create GMM model for each fold
        model_cv = []
        for i in range(v):
            model_cv.append(GMMR())
            model_cv[i].learn(x[cv.it[i],:], y[cv.it[i]])

        ## Initialization of the pool of processes
        pool = mp.Pool()
        processes = [pool.apply_async(predict,args=(tau,model_cv[i],x[cv.iT[i],:],y[cv.iT[i]])) for i in range(v)]
        pool.close()
        pool.join()
        for p in processes:
            err += p.get()
        err /= v

        ## Free memory        
        for model in model_cv:
            del model
        elf
        del processes,pool,model_cv

        return tau[err.argmax()],err
项目:nimo    作者:wolfram2012    | 项目源码 | 文件源码
def asStackBW(self, size=None):
        '''
        Outputs an image buffer as a 3D numpy array ("stack") of grayscale images.
        @param size: A tuple (w,h) indicating the output size of each frame.
        If None, then the size of the first image in the buffer will be used.
        @return: a 3D array (stack) of the gray scale version of the images
        in the buffer. The dimensions of the stack are (N,w,h), where N is
        the number of images (buffer size), w and h are the width and height
        of each image.        
        '''
        if size==None:
            img0 = self[0]        
            (w,h) = img0.size
        else:
            (w,h) = size

        f = self.getCount()
        stack = sp.zeros((f,w,h))
        for i,img in enumerate(self._data):
            #if img is not (w,h) in size, then resize first
            sz = img.size
            if (w,h) != sz:
                img2 = img.resize((w,h))
                mat = img2.asMatrix2D()
            else:
                mat = img.asMatrix2D()
            stack[i,:,:] = mat

        return stack
项目:glmnet_py    作者:hanfang    | 项目源码 | 文件源码
def auc_mat(y, prob, weights = None):
    if weights == None or len(weights) == 0:
        weights = scipy.ones([y.shape[0], 1])
    wweights = weights*y
    wweights = wweights.flatten()
    wweights = scipy.reshape(wweights, [1, wweights.size])
    ny= y.shape[0]
    a = scipy.zeros([ny, 1])
    b = scipy.ones([ny, 1])
    yy = scipy.vstack((a, b))
    pprob = scipy.vstack((prob,prob))
    result = auc(yy, pprob, wweights)
    return(result)
#=========================
项目:glmnet_py    作者:hanfang    | 项目源码 | 文件源码
def lambda_interp(lambdau, s):
# lambda is the index sequence that is produced by the model
# s is the new vector at which evaluations are required.
# the value is a vector of left and right indices, and a vector of fractions.
# the new values are interpolated bewteen the two using the fraction
# Note: lambda decreases. you take:
# sfrac*left+(1-sfrac*right)
    if len(lambdau) == 1:
        nums = len(s)
        left = scipy.zeros([nums, 1], dtype = scipy.integer)
        right = left
        sfrac = scipy.zeros([nums, 1], dtype = scipy.float64)
    else:
        s[s > scipy.amax(lambdau)] = scipy.amax(lambdau)
        s[s < scipy.amin(lambdau)] = scipy.amin(lambdau)
        k = len(lambdau)
        sfrac = (lambdau[0] - s)/(lambdau[0] - lambdau[k - 1])
        lambdau = (lambdau[0] - lambdau)/(lambdau[0] - lambdau[k - 1]) 
        coord = scipy.interpolate.interp1d(lambdau, range(k))(sfrac)
        left = scipy.floor(coord).astype(scipy.integer, copy = False)
        right = scipy.ceil(coord).astype(scipy.integer, copy = False)
        #
        tf = left != right
        sfrac[tf] = (sfrac[tf] - lambdau[right[tf]])/(lambdau[left[tf]] - lambdau[right[tf]])
        sfrac[~tf] = 1.0
        #if left != right:
        #    sfrac = (sfrac - lambdau[right])/(lambdau[left] - lambdau[right])
        #else:
        #    sfrac[left == right] = 1.0

    result = dict()    
    result['left'] = left
    result['right'] = right
    result['frac'] = sfrac

    return(result)
# end of lambda_interp    
# =========================================
项目:glmnet_py    作者:hanfang    | 项目源码 | 文件源码
def auc_mat(y, prob, weights = None):
    if weights == None or len(weights) == 0:
        weights = scipy.ones([y.shape[0], 1])
    wweights = weights*y
    wweights = wweights.flatten()
    wweights = scipy.reshape(wweights, [1, wweights.size])
    ny= y.shape[0]
    a = scipy.zeros([ny, 1])
    b = scipy.ones([ny, 1])
    yy = scipy.vstack((a, b))
    pprob = scipy.vstack((prob,prob))
    result = auc(yy, pprob, wweights)
    return(result)
#=========================
项目:glmnet_py    作者:hanfang    | 项目源码 | 文件源码
def lambda_interp(lambdau, s):
# lambda is the index sequence that is produced by the model
# s is the new vector at which evaluations are required.
# the value is a vector of left and right indices, and a vector of fractions.
# the new values are interpolated bewteen the two using the fraction
# Note: lambda decreases. you take:
# sfrac*left+(1-sfrac*right)
    if len(lambdau) == 1:
        nums = len(s)
        left = scipy.zeros([nums, 1], dtype = scipy.integer)
        right = left
        sfrac = scipy.zeros([nums, 1], dtype = scipy.float64)
    else:
        s[s > scipy.amax(lambdau)] = scipy.amax(lambdau)
        s[s < scipy.amin(lambdau)] = scipy.amin(lambdau)
        k = len(lambdau)
        sfrac = (lambdau[0] - s)/(lambdau[0] - lambdau[k - 1])
        lambdau = (lambdau[0] - lambdau)/(lambdau[0] - lambdau[k - 1]) 
        coord = scipy.interpolate.interp1d(lambdau, range(k))(sfrac)
        left = scipy.floor(coord).astype(scipy.integer, copy = False)
        right = scipy.ceil(coord).astype(scipy.integer, copy = False)
        #
        tf = left != right
        sfrac[tf] = (sfrac[tf] - lambdau[right[tf]])/(lambdau[left[tf]] - lambdau[right[tf]])
        sfrac[~tf] = 1.0
        #if left != right:
        #    sfrac = (sfrac - lambdau[right])/(lambdau[left] - lambdau[right])
        #else:
        #    sfrac[left == right] = 1.0

    result = dict()    
    result['left'] = left
    result['right'] = right
    result['frac'] = sfrac

    return(result)
# end of lambda_interp    
# =========================================
项目:glmnet_py    作者:hanfang    | 项目源码 | 文件源码
def lambda_interp(lambdau, s):
# lambda is the index sequence that is produced by the model
# s is the new vector at which evaluations are required.
# the value is a vector of left and right indices, and a vector of fractions.
# the new values are interpolated bewteen the two using the fraction
# Note: lambda decreases. you take:
# sfrac*left+(1-sfrac*right)
    if len(lambdau) == 1:
        nums = len(s)
        left = scipy.zeros([nums, 1], dtype = scipy.integer)
        right = left
        sfrac = scipy.zeros([nums, 1], dtype = scipy.float64)
    else:
        s[s > scipy.amax(lambdau)] = scipy.amax(lambdau)
        s[s < scipy.amin(lambdau)] = scipy.amin(lambdau)
        k = len(lambdau)
        sfrac = (lambdau[0] - s)/(lambdau[0] - lambdau[k - 1])
        lambdau = (lambdau[0] - lambdau)/(lambdau[0] - lambdau[k - 1]) 
        coord = scipy.interpolate.interp1d(lambdau, range(k))(sfrac)
        left = scipy.floor(coord).astype(scipy.integer, copy = False)
        right = scipy.ceil(coord).astype(scipy.integer, copy = False)
        #
        tf = left != right
        sfrac[tf] = (sfrac[tf] - lambdau[right[tf]])/(lambdau[left[tf]] - lambdau[right[tf]])
        sfrac[~tf] = 1.0
        #if left != right:
        #    sfrac = (sfrac - lambdau[right])/(lambdau[left] - lambdau[right])
        #else:
        #    sfrac[left == right] = 1.0

    result = dict()    
    result['left'] = left
    result['right'] = right
    result['frac'] = sfrac

    return(result)
# end of lambda_interp    
# =========================================
项目:glmnet_py    作者:hanfang    | 项目源码 | 文件源码
def auc_mat(y, prob, weights = None):
    if weights == None or len(weights) == 0:
        weights = scipy.ones([y.shape[0], 1])
    wweights = weights*y
    wweights = wweights.flatten()
    wweights = scipy.reshape(wweights, [1, wweights.size])
    ny= y.shape[0]
    a = scipy.zeros([ny, 1])
    b = scipy.ones([ny, 1])
    yy = scipy.vstack((a, b))
    pprob = scipy.vstack((prob,prob))
    result = auc(yy, pprob, wweights)
    return(result)
#=========================
项目:Movie-Success-Predictor    作者:Blueteak    | 项目源码 | 文件源码
def create_input(movie_info):
    # don't want to cinlude movie_id, title, country in predicition
    SKIP = 3
    WIDTH = len(movie_info[0]) - SKIP
    X = scipy.zeros((len(movie_info), WIDTH))
    for i in range(0, len(movie_info)):
        for j in range(SKIP, WIDTH):
        try:
                    X[i, j-SKIP] = movie_info[i][j] if movie_info[i][j] != '' else 0
        except Exception:
            pass
    return X
项目:Movie-Success-Predictor    作者:Blueteak    | 项目源码 | 文件源码
def create_output(movie_info):
    Y = scipy.zeros(len(movie_info))
    for i in range(0, len(movie_info)):
        gross = movie_info[i][15]
        if gross > 1000000:
            Y[i] = 1
    print 'Number of successful movies', sum(Y)
    return Y
项目:Movie-Success-Predictor    作者:Blueteak    | 项目源码 | 文件源码
def create_output_before_release(movie_info):
    Y = scipy.zeros(len(movie_info))
    for i in range(0, len(movie_info)):
        gross = movie_info[i][14]
        if gross > 1000000:
            Y[i] = 1
    print 'Number of successful movies', sum(Y)
    return Y
项目:pyssp    作者:shunsukeaihara    | 项目源码 | 文件源码
def subtruction(ssignal,ksignal,window,winsize,method):
    nf = len(ssignal)/(winsize/2) - 1
    out=sp.zeros(len(ssignal),sp.float32)
    for no in xrange(nf):
        s = get_frame(ssignal, winsize, no)
        k = get_frame(ksignal, winsize, no)
        add_signal(out, method.compute(s,k), winsize, no)
    return out
项目:pyssp    作者:shunsukeaihara    | 项目源码 | 文件源码
def fin(size,signal):
    fil = sp.zeros(size,sp.float32)
    for i in xrange(size):
        ratio=sp.log10((i+1)/float(size)*10+1.0)
        if ratio>1.0:
            ratio=1.0
        fil[i] = ratio
    return fil*signal
项目:pyssp    作者:shunsukeaihara    | 项目源码 | 文件源码
def fout(size,signal):
    fil = sp.zeros(size,sp.float32)
    for i in xrange(size):
        ratio = sp.log10((size-i)/float(size)*10+1.0)
        if ratio>1.0:
            ratio = 1.0
        fil[i] = ratio
    return fil*signal
项目:pyssp    作者:shunsukeaihara    | 项目源码 | 文件源码
def vad(vas,signal,winsize,window):
    out=sp.zeros(len(signal),sp.float32)
    for va in vas:
        for i in range(va[0],va[1]):
            add_signal(out,get_frame(signal, winsize, i)*window,winsize,i)
    for va in vas:
        out[(va[0])*winsize/2:(va[0]+4)*winsize/2] = fin(winsize*2,out[(va[0])*winsize/2:(va[0]+4)*winsize/2])
        out[(va[1]-4)*winsize/2:(va[1])*winsize/2] = fout(winsize*2,out[(va[1]-4)*winsize/2:(va[1])*winsize/2])
    return out
项目:pyssp    作者:shunsukeaihara    | 项目源码 | 文件源码
def read(fname,winsize):
    if fname =="-":
        wf=wave.open(sys.stdin,'rb')
        n=wf.getnframes()
        str=wf.readframes(n)
        params = ((wf.getnchannels(), wf.getsampwidth(),
                   wf.getframerate(), wf.getnframes(),
                   wf.getcomptype(), wf.getcompname()))
        siglen=((int )(len(str)/2/winsize) + 1) * winsize
        signal=sp.zeros(siglen, sp.float32)
        signal[0:len(str)/2] = sp.float32(sp.fromstring(str,sp.int16))/32767.0
        return signal,params
    else:
        return read_signal(fname,winsize)
项目:pyssp    作者:shunsukeaihara    | 项目源码 | 文件源码
def noise_reduction(signal,params,winsize,window,ss,ntime):
    out=sp.zeros(len(signal),sp.float32)
    n_pow = compute_avgpowerspectrum(signal[0:winsize*int(params[2] /float(winsize)/(1000.0/ntime))],winsize,window)#maybe 300ms
    nf = len(signal)/(winsize/2) - 1
    for no in xrange(nf):
        s = get_frame(signal, winsize, no)
        add_signal(out, ss.compute_by_noise_pow(s,n_pow), winsize, no)
    return out
项目:pyssp    作者:shunsukeaihara    | 项目源码 | 文件源码
def read(fname,winsize):
    if fname =="-":
        wf=wave.open(sys.stdin,'rb')
        n=wf.getnframes()
        str=wf.readframes(n)
        params = ((wf.getnchannels(), wf.getsampwidth(),
                   wf.getframerate(), wf.getnframes(),
                   wf.getcomptype(), wf.getcompname()))
        siglen=((int )(len(str)/2/winsize) + 1) * winsize
        signal=sp.zeros(siglen, sp.float32)
        signal[0:len(str)/2] = sp.float32(sp.fromstring(str,sp.int16))/32767.0
        return signal,params
    else:
        return read_signal(fname,winsize)