Python numpy 模块,size() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.size()

项目:pycma    作者:CMA-ES    | 项目源码 | 文件源码
def __imul__(self, factor):
        """define ``self *= factor``.

        As a shortcut for::

            self = self.__imul__(factor)

        """
        try:
            if factor == 1:
                return self
        except: pass
        try:
            if (np.size(factor) == np.size(self.scaling) and
                    all(factor == 1)):
                return self
        except: pass
        if self.is_identity and np.size(self.scaling) == 1:
            self.scaling = np.ones(np.size(factor))
        self.is_identity = False
        self.scaling *= factor
        self.dim = np.size(self.scaling)
        return self
项目:CausalGAN    作者:mkocaoglu    | 项目源码 | 文件源码
def fixed_label_diversity(model, config,step=''):
    sample_dir=make_sample_dir(model)
    str_step=str(step) or guess_model_step(model)

    N=64#per image
    n_combo=5#n label combinations

    #0,1 label combinations
    fixed_labels=model.attr.sample(n_combo)[model.cc.node_names]
    size=infer_grid_image_shape(N)

    for j, fx_label in enumerate(fixed_labels.values):
        fx_label=np.reshape(fx_label,[1,-1])
        fx_label=np.tile(fx_label,[N,1])
        do_dict={model.cc.labels: fx_label}

        images, feed_dict= sample(model, do_dict=do_dict)
        fx_file=os.path.join(sample_dir, str_step+'fxlab'+str(j)+'.pdf')
        save_figure_images(model.model_type,images['G'],fx_file,size=size)

    #which image is what label
    fixed_labels=fixed_labels.reset_index(drop=True)
    fixed_labels.to_csv(os.path.join(sample_dir,str_step+'fxlab'+'.csv'))
项目:bayestsa    作者:thalesians    | 项目源码 | 文件源码
def unscentedTransform(X, Wm, Wc, f):
    Y = None
    Ymean = None
    fdim = None
    N = np.shape(X)[1]
    for j in range(0,N):
        fImage = f(X[:,j])
        if Y is None:
            fdim = np.size(fImage)
            Y = np.zeros((fdim, np.shape(X)[1]))
            Ymean = np.zeros(fdim)
        Y[:,j] = fImage
        Ymean += Wm[j] * Y[:,j]
    Ycov = np.zeros((fdim, fdim))
    for j in range(0, N):
        meanAdjustedYj = Y[:,j] - Ymean
        Ycov += np.outer(Wc[j] * meanAdjustedYj, meanAdjustedYj)
    return Y, Ymean, Ycov
项目:pynufft    作者:jyhmiinlin    | 项目源码 | 文件源码
def nufft_T(N, J, K, alpha, beta):
    '''
     equation (29) and (26)Fessler's paper
     create the overlapping matrix CSSC (diagonal dominent matrix)
     of J points
     and then find out the pseudo-inverse of CSSC '''

#     import scipy.linalg
    L = numpy.size(alpha) - 1
#     print('L = ', L, 'J = ',J, 'a b', alpha,beta )
    cssc = numpy.zeros((J, J))
    [j1, j2] = numpy.mgrid[1:J + 1, 1:J + 1]
    overlapping_mat = j2 - j1

    for l1 in range(-L, L + 1):
        for l2 in range(-L, L + 1):
            alf1 = alpha[abs(l1)]
#             if l1 < 0: alf1 = numpy.conj(alf1)
            alf2 = alpha[abs(l2)]
#             if l2 < 0: alf2 = numpy.conj(alf2)
            tmp = overlapping_mat + beta * (l1 - l2)

            tmp = dirichlet(1.0 * tmp / (1.0 * K / N))
            cssc = cssc + alf1 * numpy.conj(alf2) * tmp
    return mat_inv(cssc)
项目:pynufft    作者:jyhmiinlin    | 项目源码 | 文件源码
def nufft_r(om, N, J, K, alpha, beta):
    '''
    equation (30) of Fessler's paper

    '''

    M = numpy.size(om)  # 1D size
    gam = 2.0 * numpy.pi / (K * 1.0)
    nufft_offset0 = nufft_offset(om, J, K)  # om/gam -  nufft_offset , [M,1]
    dk = 1.0 * om / gam - nufft_offset0  # om/gam -  nufft_offset , [M,1]
    arg = outer_sum(-numpy.arange(1, J + 1) * 1.0, dk)
    L = numpy.size(alpha) - 1
#     print('alpha',alpha)
    rr = numpy.zeros((J, M), dtype=numpy.float32)
    rr = iterate_l1(L, alpha, arg, beta, K, N, rr)
    return (rr, arg)
项目:pynufft    作者:jyhmiinlin    | 项目源码 | 文件源码
def test_multidimension():
    for ndims in range(1, 6):
        Nd= ()
        Kd=()
        Jd=()
        om = numpy.random.randn(2,ndims)
        for pp in range(0, ndims):
            Nd += (128,)
            Kd += (256,)
            Jd += (4,)
#         Nd =tuple([slice(0, 16) for ss in range(0, ndims)])  # image size
#         print('setting image dimension Nd...', Nd)
#         Kd = tuple([slice(0, 32) for ss in range(0, ndims)])  # k-space size
#         print('setting spectrum dimension Kd...', Kd)
#         Jd = tuple([slice(0, 6) for ss in range(0, ndims)])   # interpolation size
#         print('setting interpolation size Jd...', Jd)
        NufftObj = NUFFT()
        NufftObj.plan(om, Nd, Kd, Jd)
        print(ndims,'-dimensional NUFFT created!')
#     y = NufftObj.forward(image)
项目:pynufft    作者:jyhmiinlin    | 项目源码 | 文件源码
def nufft_T(N, J, K, alpha, beta):
    '''
     The Equation (29) and (26) in Fessler and Sutton 2003.
     Create the overlapping matrix CSSC (diagonal dominent matrix)
     of J points and find out the pseudo-inverse of CSSC '''

#     import scipy.linalg
    L = numpy.size(alpha) - 1
#     print('L = ', L, 'J = ',J, 'a b', alpha,beta )
    cssc = numpy.zeros((J, J))
    [j1, j2] = numpy.mgrid[1:J + 1, 1:J + 1]
    overlapping_mat = j2 - j1
    for l1 in range(-L, L + 1):
        for l2 in range(-L, L + 1):
            alf1 = alpha[abs(l1)]
#             if l1 < 0: alf1 = numpy.conj(alf1)
            alf2 = alpha[abs(l2)]
#             if l2 < 0: alf2 = numpy.conj(alf2)
            tmp = overlapping_mat + beta * (l1 - l2)

            tmp = dirichlet(1.0 * tmp / (1.0 * K / N))
            cssc = cssc + alf1 * alf2 * tmp

    return mat_inv(cssc)
项目:em_examples    作者:geoscixyz    | 项目源码 | 文件源码
def computePolarVecs(self,karg=False):

        N = len(self.times)
        L = np.reshape(self.L,(3,N))

        if karg is False:
            A = self.computeRotMatrix()
        elif np.size(karg) is 3:
            A = self.computeRotMatrix(karg)
        elif np.size(karg) is 9:
            A = karg

        q = np.zeros((6,N))

        for pp in range(0,N):

            Lpp = np.diag(L[:,pp])
            p = np.dot(A,np.dot(Lpp,A.T))
            q[:,pp] = np.r_[p[:,0],p[[1,2],1],p[2,2]]

        return q
项目:em_examples    作者:geoscixyz    | 项目源码 | 文件源码
def computeMisfit(self,r0):

        assert self.q is not None, "Must have current estimate of polarizations"
        assert self.dunc is not None, "Must have set uncertainties"
        assert self.dobs is not None, "Must have observed data"

        dunc = self.dunc
        dobs = self.dobs
        q = self.q

        Hp = self.computeHp(r0=r0,update=False)
        Brx = self.computeBrx(r0=r0,update=False)
        P = self.computeP(Hp,Brx)

        N = np.size(dobs)

        dpre = np.dot(P,q)

        v = mkvc((dpre-dobs)/dunc)

        Phi = np.dot(v.T,v)

        return Phi/N
项目:em_examples    作者:geoscixyz    | 项目源码 | 文件源码
def computeVecFcn(self,r0):

        assert self.q is not None, "Must have current estimate of polarizations"
        assert self.dunc is not None, "Must have set uncertainties"
        assert self.dobs is not None, "Must have observed data"

        dunc = self.dunc
        dobs = self.dobs
        q = self.q

        Hp = self.computeHp(r0=r0,update=False)
        Brx = self.computeBrx(r0=r0,update=False)
        P = self.computeP(Hp,Brx)

        N = np.size(dobs)

        dpre = np.dot(P,q)

        v = mkvc((dpre-dobs)/dunc)

        return v
项目:em_examples    作者:geoscixyz    | 项目源码 | 文件源码
def computeMisfit(self,r0):

        assert self.q is not None, "Must have current estimate of polarizations"
        assert self.dunc is not None, "Must have set uncertainties"
        assert self.dobs is not None, "Must have observed data"

        dunc = self.dunc
        dobs = self.dobs
        q = self.q

        Hp = self.computeHp(r0=r0,update=False)
        Brx = self.computeBrx(r0=r0,update=False)
        P = self.computeP(Hp,Brx)

        N = np.size(dobs)

        dpre = np.dot(P,q)

        v = mkvc((dpre-dobs)/dunc)

        Phi = np.dot(v.T,v)

        return Phi/N
项目:em_examples    作者:geoscixyz    | 项目源码 | 文件源码
def computeVecFcn(self,r0):

        assert self.q is not None, "Must have current estimate of polarizations"
        assert self.dunc is not None, "Must have set uncertainties"
        assert self.dobs is not None, "Must have observed data"

        dunc = self.dunc
        dobs = self.dobs
        q = self.q

        Hp = self.computeHp(r0=r0,update=False)
        Brx = self.computeBrx(r0=r0,update=False)
        P = self.computeP(Hp,Brx)

        N = np.size(dobs)

        dpre = np.dot(P,q)

        v = mkvc((dpre-dobs)/dunc)

        return v
项目:em_examples    作者:geoscixyz    | 项目源码 | 文件源码
def get_dobs_dunc(self,dpre,FloorVal,Pct):

        # Floor is a fraction of the largest amplitude anomaly for the earliest time channel

        M = np.shape(dpre)[0]
        # Floor = np.zeros(np.shape(dpre))
        # Floor[0:M:3,:] = FloorVal*np.max(np.abs(dpre[0:M:3,:]))
        # Floor[1:M:3,:] = FloorVal*np.max(np.abs(dpre[1:M:3,:]))
        # Floor[2:M:3,:] = FloorVal*np.max(np.abs(dpre[2:M:3,:]))

        Floor = FloorVal*np.max(np.abs(dpre))*np.ones(np.shape(dpre))

        if len(Pct) is 1:
            dunc = Floor + Pct*np.abs(dpre)
        else:
            dunc = Floor
            for ii in range(0,3):
                dunc[ii:M:3] = dunc[ii:M:3] + Pct[ii]*np.abs(dpre[ii:M:3])

        dobs = dpre + dunc*np.random.normal(size=np.shape(dpre))

        self.dunc = dunc
        self.dobs = dobs

        return dobs,dunc
项目:em_examples    作者:geoscixyz    | 项目源码 | 文件源码
def computeMisfit(self,r0):

        assert self.q is not None, "Must have current estimate of polarizations"
        assert self.dunc is not None, "Must have set uncertainties"
        assert self.dobs is not None, "Must have observed data"

        dunc = self.dunc
        dobs = self.dobs
        q = self.q

        Hp = self.computeHp(r0=r0,update=False)
        Brx = self.computeBrx(r0=r0,update=False)
        P = self.computeP(Hp,Brx)

        N = np.size(dobs)

        dpre = np.dot(P,q)

        v = mkvc((dpre-dobs)/dunc)

        Phi = np.dot(v.T,v)

        return Phi/N
项目:em_examples    作者:geoscixyz    | 项目源码 | 文件源码
def computeVecFcn(self,r0):

        assert self.q is not None, "Must have current estimate of polarizations"
        assert self.dunc is not None, "Must have set uncertainties"
        assert self.dobs is not None, "Must have observed data"

        dunc = self.dunc
        dobs = self.dobs
        q = self.q

        Hp = self.computeHp(r0=r0,update=False)
        Brx = self.computeBrx(r0=r0,update=False)
        P = self.computeP(Hp,Brx)

        N = np.size(dobs)

        dpre = np.dot(P,q)

        v = mkvc((dpre-dobs)/dunc)

        return v
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def setUp(self):
        self.f = np.ones(256, dtype=np.float32)
        self.ef = np.ones(self.f.size, dtype=np.bool)
        self.d = np.ones(128, dtype=np.float64)
        self.ed = np.ones(self.d.size, dtype=np.bool)
        # generate values for all permutation of 256bit simd vectors
        s = 0
        for i in range(32):
            self.f[s:s+8] = [i & 2**x for x in range(8)]
            self.ef[s:s+8] = [(i & 2**x) != 0 for x in range(8)]
            s += 8
        s = 0
        for i in range(16):
            self.d[s:s+4] = [i & 2**x for x in range(4)]
            self.ed[s:s+4] = [(i & 2**x) != 0 for x in range(4)]
            s += 4

        self.nf = self.f.copy()
        self.nd = self.d.copy()
        self.nf[self.ef] = np.nan
        self.nd[self.ed] = np.nan
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def __getitem__(self, indx):
        """
        Get the index.

        """
        m = self._mask
        if isinstance(m[indx], ndarray):
            # Can happen when indx is a multi-dimensional field:
            # A = ma.masked_array(data=[([0,1],)], mask=[([True,
            #                     False],)], dtype=[("A", ">i2", (2,))])
            # x = A[0]; y = x["A"]; then y.mask["A"].size==2
            # and we can not say masked/unmasked.
            # The result is no longer mvoid!
            # See also issue #6724.
            return masked_array(
                data=self._data[indx], mask=m[indx],
                fill_value=self._fill_value[indx],
                hard_mask=self._hardmask)
        if m is not nomask and m[indx]:
            return masked
        return self._data[indx]
项目:snn4hrl    作者:florensacc    | 项目源码 | 文件源码
def plot_policy_learned(data_unpickle, color, fig_dir=None):
    #recover the policy
    poli = data_unpickle['policy']
    #range to plot it
    x = np.arange(-3,3,0.01)
    means = np.zeros(np.size(x))
    logstd = np.zeros(np.size(x))
    for i,s in enumerate(x):
        means[i] = poli.get_action(np.array((s,)))[1]['mean']
        logstd[i] = poli.get_action(np.array((s,)))[1]['log_std']
        # means[i] = poli.get_action(np.array([s,]))[1]['mean']
        # logstd[i] = poli.get_action(np.array([s,]))[1]['log_std']

    plt.plot(x, means, color=color, label = 'mean')
    plt.plot(x, logstd, color=color * 0.7, label = 'logstd')
    plt.legend(loc = 5)
    plt.title('Final policy')
    plt.xlabel('state')
    plt.ylabel('Action')
    if fig_dir:
        plt.savefig(os.path.join(fig_dir,'policy_learned'))
    else:
        print("No directory for saving plots")
项目:psp    作者:cmap    | 项目源码 | 文件源码
def distance_function(values, medians):
    """This function calculates the distance metric.
    N.B. Only uses the non-NaN values.

    dist = sum( (s - m)^2 )

    s is the vector of sample values
    m is the vector of probe medians

    Args:
        values (numpy array of floats)
        medians (numpy array of floats)
    Returns:
        dist (float)
    """
    non_nan_idx = ~np.isnan(values)
    assert np.size(non_nan_idx) != 0, "All values in this sample are NaN!"

    non_nan_values = values[non_nan_idx]
    non_nan_medians = medians[non_nan_idx]
    dist = sum(np.square(non_nan_values - non_nan_medians))
    return dist

# tested #
项目:third_person_im    作者:bstadie    | 项目源码 | 文件源码
def update(self, es, function_values, **kwargs):
        """the first and second value in ``function_values``
        must reflect two mirrored solutions sampled
        in direction / in opposite direction of
        the previous mean shift, respectively.

        """
        # TODO: on the linear function, the two mirrored samples lead
        # to a sharp increase of condition of the covariance matrix.
        # They should not be used to update the covariance matrix,
        # if the step-size inreases quickly. This should be fine with
        # negative updates though.
        if not self.initialized:
            self.initialize(es.N, es.opts)
        if 1 < 3:
            # use the ranking difference of the mirrors for adaptation
            # damp = 5 should be fine
            z = np.where(es.fit.idx == 1)[0][0] - np.where(es.fit.idx == 0)[0][0]
            z /= es.popsize - 1  # z in [-1, 1]
        self.s = (1 - self.sp.c) * self.s + self.sp.c * np.sign(z) * np.abs(z)**self.sp.z_exponent
        if self.s > 0:
            es.sigma *= exp(self.s / self.sp.dampup)
        else:
            es.sigma *= exp(self.s / self.sp.dampdown)
        #es.more_to_write.append(10**z)
项目:semantic-segmentation    作者:albertbuchard    | 项目源码 | 文件源码
def F1(tested, truth):
    tested = (tested-0.5)*2
    truth = (truth-0.5)*2
    truth[truth<=0] = -1.
    truth[truth>0] = 1.
    res  = tested+truth
    true_pos = np.size(np.where(res==2))/2.
    pos = np.size(np.where(truth ==1))/2.
    found_pos = np.size(np.where(tested ==1))/2.
    precision = true_pos/found_pos
    recall = true_pos/pos
    F1 = 2.*precision*recall/(precision+recall)
    return F1





# PRE-PROCESSING FUNCTIONS 

# TRAINING SET FUNCTIONS
项目:semantic-segmentation    作者:albertbuchard    | 项目源码 | 文件源码
def mk_rotations(img):
    #
    #   DESCRIPTION
    #       This function create 8 roatation image fro an input image 4 rotation from the raw image and 4 rotation form the transposed 
    #   
    #   INPUTS
    #       img np.array 
    #       
    #   OUTPUTS
    #       rotated_image_img, img90, img180, img270, imgT, imgT90, imgT180,imgT270
    #
    #
    img90 = np.rot90(img)
    img180 = np.rot90(img,k=2)
    img270 = np.rot90(img,k=3)
    imgT = np.zeros(img.shape)
    if np.size(img.shape)>2:
        for i in range(3):
            imgT[:,:,i] =img[:,:,i].T
    else:
        imgT = img.T
    imgT90 = np.rot90(imgT)
    imgT180 = np.rot90(imgT, k=2)
    imgT270 = np.rot90(imgT, k=3)
    return img, img90, img180, img270, imgT, imgT90, imgT180,imgT270
项目:semantic-segmentation    作者:albertbuchard    | 项目源码 | 文件源码
def mk_rotations(img):
    ##INPUT:
    ##  img: a 3D RGB array
    ##OUTPUT
    ##  8 rotated and transposed versions of img

    img90 = np.rot90(img)
    img180 = np.rot90(img,k=2)
    img270 = np.rot90(img,k=3)
    imgT = np.zeros(img.shape)
    if np.size(img.shape)>2:
        for i in range(3):
            imgT[:,:,i] =img[:,:,i].T
    else:
        imgT = img.T
    imgT90 = np.rot90(imgT)
    imgT180 = np.rot90(imgT, k=2)
    imgT270 = np.rot90(imgT, k=3)
    return img, img90, img180, img270, imgT, imgT90, imgT180,imgT270

## Formats an image to save format
项目:speech_feature_extractor    作者:ZhihaoDU    | 项目源码 | 文件源码
def calc_stoi_from_spec(clean_spec, degraded_spec, analysis_len=30):
    freq_bins = np.size(clean_spec, 0)
    frames = np.size(clean_spec, 1)
    x = np.zeros((freq_bins, frames - analysis_len + 1, analysis_len), dtype=np.float32)
    y = np.zeros((freq_bins, frames - analysis_len + 1, analysis_len), dtype=np.float32)
    for j in range(0, freq_bins):
        for m in range(analysis_len - 1, frames, 1):
            x[j, m] = clean_spec[j, m - analysis_len + 1:m + 1]
            y[j, m] = degraded_spec[j, m - analysis_len + 1:m + 1]
            y[j, m] = np.minimum(np.linalg.norm(x[j,m,:])/np.linalg.norm(y[j,m,:])*y[j,m,:],
                                 (1.+np.power(10., 15./20.))*x[j,m,:])  # y is normalized and clipped
    x_mean = np.mean(x, axis=(0, 1))
    y_mean = np.mean(y, axis=(0, 1))
    score = 0.
    for j in range(0, freq_bins):
        for m in range(analysis_len - 1, frames, 1):
            score += np.dot(x[j, m, :] - x_mean, y[j, m, :] - y_mean) / \
                     (np.linalg.norm(x[j, m, :] - x_mean) * np.linalg.norm(y[j, m, :] - y_mean))
    score /= (freq_bins * analysis_len)
    return score
项目:speech_feature_extractor    作者:ZhihaoDU    | 项目源码 | 文件源码
def cochleagram_extractor(xx, sr, win_len, shift_len, channel_number, win_type):
    fcoefs, f = make_erb_filters(sr, channel_number, 50)
    fcoefs = np.flipud(fcoefs)
    xf = erb_frilter_bank(xx, fcoefs)

    if win_type == 'hanning':
        window = np.hanning(channel_number)
    elif win_type == 'hamming':
        window = np.hamming(channel_number)
    elif win_type == 'triangle':
        window = (1 - (np.abs(channel_number - 1 - 2 * np.arange(1, channel_number + 1, 1)) / (channel_number + 1)))
    else:
        window = np.ones(channel_number)
    window = window.reshape((channel_number, 1))

    xe = np.power(xf, 2.0)
    frames = 1 + ((np.size(xe, 1)-win_len) // shift_len)
    cochleagram = np.zeros((channel_number, frames))
    for i in range(frames):
        one_frame = np.multiply(xe[:, i*shift_len:i*shift_len+win_len], np.repeat(window, win_len, 1))
        cochleagram[:, i] = np.sqrt(np.mean(one_frame, 1))

    cochleagram = np.where(cochleagram == 0.0, np.finfo(float).eps, cochleagram)
    return cochleagram
项目:speech_feature_extractor    作者:ZhihaoDU    | 项目源码 | 文件源码
def postaud(x, fmax, fbtype=None):
    if fbtype is None:
        fbtype = 'bark'
    nbands = x.shape[0]
    nframes = x.shape[1]
    nfpts = nbands
    if fbtype == 'bark':
        bancfhz = bark2freq(np.linspace(0, freq2bark(fmax), nfpts))
    fsq = bancfhz * bancfhz
    ftmp = fsq + 1.6e5
    eql = ((fsq/ftmp)**2) * ((fsq + 1.44e6)/(fsq + 9.61e6))
    eql = eql.reshape(np.size(eql), 1)
    z = np.repeat(eql, nframes, axis=1) * x
    z = z ** (1./3.)
    y = np.vstack((z[1, :], z[1:nbands-1, :], z[nbands-2, :]))
    return y
项目:speech_feature_extractor    作者:ZhihaoDU    | 项目源码 | 文件源码
def lpc2cep(a, nout=None):
    nin = np.size(a, 0)
    ncol = np.size(a, 1)
    order = nin - 1
    if nout is None:
        nout = order + 1
    c = np.zeros((nout, ncol))
    c[0, :] = -1. * np.log(a[0, :])
    renormal_coef = np.reshape(a[0,:], (1, ncol))
    renormal_coef = np.repeat(renormal_coef, nin, axis=0)
    a = a / renormal_coef
    for n in range(1, nout):
        sumn = np.zeros(ncol)
        for m in range(1, n+1):
            sumn = sumn + (n-m) * a[m, :] * c[n-m, :]
        c[n, :] = -1. * (a[n, :] + 1. / n * sumn)
    return c
项目:speech_feature_extractor    作者:ZhihaoDU    | 项目源码 | 文件源码
def lpc2cep(a, nout=None):
    nin = np.size(a, 0)
    ncol = np.size(a, 1)
    order = nin - 1
    if nout is None:
        nout = order + 1
    c = np.zeros((nout, ncol))
    c[0, :] = -1. * np.log(a[0, :])
    renormal_coef = np.reshape(a[0,:], (1, ncol))
    renormal_coef = np.repeat(renormal_coef, nin, axis=0)
    a = a / renormal_coef
    for n in range(1, nout):
        sumn = np.zeros(ncol)
        for m in range(1, n+1):
            sumn = sumn + (n-m) * a[m, :] * c[n-m, :]
        c[n, :] = -1. * (a[n, :] + 1. / n * sumn)
    return c
项目:brainiak    作者:brainiak    | 项目源码 | 文件源码
def _run_TR_from_scan_onsets(self, n_T, scan_onsets=None):
        if scan_onsets is None:
            # assume that all data are acquired within the same scan.
            n_run = 1
            run_TRs = np.array([n_T], dtype=int)
        else:
            # Each value in the scan_onsets tells the index at which
            # a new scan starts. For example, if n_T = 500, and
            # scan_onsets = [0,100,200,400], this means that the time points
            # of 0-99 are from the first scan, 100-199 are from the second,
            # 200-399 are from the third and 400-499 are from the fourth
            run_TRs = np.int32(np.diff(np.append(scan_onsets, n_T)))
            run_TRs = np.delete(run_TRs, np.where(run_TRs == 0))
            n_run = run_TRs.size
            # delete run length of 0 in case of duplication in scan_onsets.
            logger.info('I infer that the number of volumes'
                        ' in each scan are: {}'.format(run_TRs))
        return run_TRs, n_run
项目:brainiak    作者:brainiak    | 项目源码 | 文件源码
def _sum_loglike_marginalized(self, L_vec, s2XTAcorrX, YTAcorrY_diag,
                                  sXTAcorrY, half_log_det_X0TAX0,
                                  log_weights, log_fixed_terms,
                                  l_idx, n_C, n_T, n_V, n_X0,
                                  n_grid, rank=None):
        sum_LL_total = 0
        sum_grad_L = np.zeros(np.size(l_idx[0]))
        for subj in range(len(YTAcorrY_diag)):
            LL_total, grad_L = self._loglike_marginalized(
                L_vec, s2XTAcorrX[subj], YTAcorrY_diag[subj],
                sXTAcorrY[subj], half_log_det_X0TAX0[subj], log_weights,
                log_fixed_terms[subj], l_idx, n_C, n_T[subj],
                n_V[subj], n_X0[subj], n_grid, rank)
            sum_LL_total += LL_total
            sum_grad_L += grad_L
        return sum_LL_total, sum_grad_L
项目:Neural-Network    作者:yzldw333    | 项目源码 | 文件源码
def unRollImagesForConv(self,index):
        '''?????index?????
            ?? ?width*?height,   ?????*?channel?  ????size??????
        '''
        i = index
        old_images = self.last_layer.images
        m,old_channel,old_height,old_width = old_images.shape
        newData = []
        #Process unroll the data
        for h in range(0,old_height-self.squareSize+1,self.stride):
            for w in range(0,old_width-self.squareSize+1,self.stride):
                tmp = []
                for c in range(old_channel):
                    tmp.append(old_images[i,c,h:h+self.squareSize,w:w+self.squareSize].reshape(1,self.squareSize**2))
                    #h,w?????,??????????,??old_channel   *   squaireSize?????
                tmp = np.array(tmp).reshape(1,self.squareSize**2*old_channel)
                #?????reshape,????????,????newData
                newData.append(tmp)
                #??????????????????,?????????
        newData = np.array(newData).reshape(self.width*self.height,self.squareSize**2*old_channel)
        return newData
项目:Neural-Network    作者:yzldw333    | 项目源码 | 文件源码
def backward_compute(self):
        m = np.size(self.images,0)
        self.delta = self.delta.reshape(m,self.channel,self.height,self.width)
        newDelta = np.zeros([m,self.last_layer.channel,self.last_layer.height,self.last_layer.width])
        for i in range(m):
            for j in range(self.channel):
                for h in range(self.height):
                    for w in range(self.width):
                        tmpLoc = self.maxIndex[i,j,h*self.width+w]
                        relativeH = tmpLoc//self.squareSize
                        relativeW = tmpLoc - relativeH * self.squareSize
                        lastW = w*self.stride+relativeW
                        lastH = h*self.stride+relativeH
                        newDelta[i,j,lastH,lastW] += self.delta[i,j,h,w]
        self.last_layer.delta = newDelta
        pass
项目:DEPICT    作者:herandy    | 项目源码 | 文件源码
def bestMap(L1, L2):
    if L1.__len__() != L2.__len__():
        print('size(L1) must == size(L2)')

    Label1 = np.unique(L1)
    nClass1 = Label1.__len__()
    Label2 = np.unique(L2)
    nClass2 = Label2.__len__()

    nClass = max(nClass1, nClass2)
    G = np.zeros((nClass, nClass))
    for i in range(nClass1):
        for j in range(nClass2):
            G[i][j] = np.nonzero((L1 == Label1[i]) * (L2 == Label2[j]))[0].__len__()

    c = linear_assignment_.linear_assignment(-G.T)[:, 1]
    newL2 = np.zeros(L2.__len__())
    for i in range(nClass2):
        for j in np.nonzero(L2 == Label2[i])[0]:
            if len(Label1) > c[i]:
                newL2[j] = Label1[c[i]]

    return accuracy_score(L1, newL2)
项目:SERT    作者:cvangysel    | 项目源码 | 文件源码
def entropy(pk, *args, **kwargs):
    """Proxy for scipy.stats.entropy, with normalized Shannon entropy."""
    if 'normalize' in kwargs:
        normalize = kwargs['normalize']
        del kwargs['normalize']
    else:
        normalize = False

    e = scipy.stats.entropy(pk, *args, **kwargs)

    if normalize:
        num_classes = np.size(pk)
        base = kwargs['base'] if 'base' in kwargs else None

        maximum_entropy = np.log(num_classes)
        if base:
            maximum_entropy /= np.log(base)

        e /= maximum_entropy

    return e
项目:pyku    作者:dubvulture    | 项目源码 | 文件源码
def is_grid(self, grid, image):
        """
        Checks the "gridness" by analyzing the results of a hough transform.
        :param grid: binary image
        :return: wheter the object in the image might be a grid or not
        """
        #   - Distance resolution = 1 pixel
        #   - Angle resolution = 1° degree for high line density
        #   - Threshold = 144 hough intersections
        #        8px digit + 3*2px white + 2*1px border = 16px per cell
        #           => 144x144 grid
        #        144 - minimum number of points on the same line
        #       (but due to imperfections in the binarized image it's highly
        #        improbable to detect a 144x144 grid)
        lines = cv2.HoughLines(grid, 1, np.pi / 180, 144)

        if lines is not None and np.size(lines) >= 20:
            lines = lines.reshape((lines.size / 2), 2)
            # theta in [0, pi] (theta > pi => rho < 0)
            # normalise theta in [-pi, pi] and negatives rho
            lines[lines[:, 0] < 0, 1] -= np.pi
            lines[lines[:, 0] < 0, 0] *= -1

            criteria = (cv2.TERM_CRITERIA_EPS, 0, 0.01)
            # split lines into 2 groups to check whether they're perpendicular
            if cv2.__version__[0] == '2':
                density, clmap, centers = cv2.kmeans(
                    lines[:, 1], 2, criteria, 5, cv2.KMEANS_RANDOM_CENTERS)
            else:
                density, clmap, centers = cv2.kmeans(
                    lines[:, 1], 2, None, criteria,
                    5, cv2.KMEANS_RANDOM_CENTERS)

            if self.debug:
                self.save_hough(lines, clmap)

            # Overall variance from respective centers
            var = density / np.size(clmap)
            sin = abs(np.sin(centers[0] - centers[1]))
            # It is probably a grid only if:
            #   - centroids difference is almost a 90° angle (+-15° limit)
            #   - variance is less than 5° (keeping in mind surface distortions)
            return sin > 0.99 and var <= (5*np.pi / 180) ** 2
        else:
            return False
项目:pyku    作者:dubvulture    | 项目源码 | 文件源码
def save_hough(self, lines, clmap):
        """
        :param lines: (rho, theta) pairs
        :param clmap: clusters assigned to lines
        :return: None
        """
        height, width = self.image.shape
        ratio = 600. * (self.step+1) / min(height, width)
        temp = cv2.resize(self.image, None, fx=ratio, fy=ratio,
                          interpolation=cv2.INTER_CUBIC)
        temp = cv2.cvtColor(temp, cv2.COLOR_GRAY2BGR)
        colors = [(0, 127, 255), (255, 0, 127)]

        for i in range(0, np.size(lines) / 2):
            rho = lines[i, 0]
            theta = lines[i, 1]
            color = colors[clmap[i, 0]]
            if theta < np.pi / 4 or theta > 3 * np.pi / 4:
                pt1 = (rho / np.cos(theta), 0)
                pt2 = (rho - height * np.sin(theta) / np.cos(theta), height)
            else:
                pt1 = (0, rho / np.sin(theta))
                pt2 = (width, (rho - width * np.cos(theta)) / np.sin(theta))
            pt1 = (int(pt1[0]), int(pt1[1]))
            pt2 = (int(pt2[0]), int(pt2[1]))
            cv2.line(temp, pt1, pt2, color, 5)

        self.save2image(temp)
项目:pyku    作者:dubvulture    | 项目源码 | 文件源码
def morph(roi):
    ratio = min(28. / np.size(roi, 0), 28. / np.size(roi, 1))
    roi = cv2.resize(roi, None, fx=ratio, fy=ratio,
                     interpolation=cv2.INTER_NEAREST)
    dx = 28 - np.size(roi, 1)
    dy = 28 - np.size(roi, 0)
    px = ((int(dx / 2.)), int(np.ceil(dx / 2.)))
    py = ((int(dy / 2.)), int(np.ceil(dy / 2.)))
    squared = np.pad(roi, (py, px), 'constant', constant_values=0)
    return squared
项目:pyku    作者:dubvulture    | 项目源码 | 文件源码
def is_grid(self, grid, image):
        """
        Checks the "gridness" by analyzing the results of a hough transform.
        :param grid: binary image
        :return: wheter the object in the image might be a grid or not
        """
        #   - Distance resolution = 1 pixel
        #   - Angle resolution = 1° degree for high line density
        #   - Threshold = 144 hough intersections
        #        8px digit + 3*2px white + 2*1px border = 16px per cell
        #           => 144x144 grid
        #        144 - minimum number of points on the same line
        #       (but due to imperfections in the binarized image it's highly
        #        improbable to detect a 144x144 grid)

        lines = cv2.HoughLines(grid, 1, np.pi / 180, 144)

        if lines is not None and np.size(lines) >= 20:
            lines = lines.reshape((lines.size/2), 2)
            # theta in [0, pi] (theta > pi => rho < 0)
            # normalise theta in [-pi, pi] and negatives rho
            lines[lines[:, 0] < 0, 1] -= np.pi
            lines[lines[:, 0] < 0, 0] *= -1

            criteria = (cv2.TERM_CRITERIA_EPS, 0, 0.01)
            # split lines into 2 groups to check whether they're perpendicular
            if cv2.__version__[0] == '2':
                density, clmap, centers = cv2.kmeans(
                    lines[:, 1], 2, criteria,
                    5, cv2.KMEANS_RANDOM_CENTERS)
            else:
                density, clmap, centers = cv2.kmeans(
                    lines[:, 1], 2, None, criteria,
                    5, cv2.KMEANS_RANDOM_CENTERS)

            # Overall variance from respective centers
            var = density / np.size(clmap)
            sin = abs(np.sin(centers[0] - centers[1]))
            # It is probably a grid only if:
            #   - centroids difference is almost a 90° angle (+-15° limit)
            #   - variance is less than 5° (keeping in mind surface distortions)
            return sin > 0.99 and var <= (5*np.pi / 180) ** 2
        else:
            return False
项目:facerecognition    作者:guoxiaolu    | 项目源码 | 文件源码
def prewhiten(x):
    mean = np.mean(x)
    std = np.std(x)
    std_adj = np.maximum(std, 1.0/np.sqrt(x.size))
    y = np.multiply(np.subtract(x, mean), 1/std_adj)
    return y
项目:facerecognition    作者:guoxiaolu    | 项目源码 | 文件源码
def get_label_batch(label_data, batch_size, batch_index):
    nrof_examples = np.size(label_data, 0)
    j = batch_index*batch_size % nrof_examples
    if j+batch_size<=nrof_examples:
        batch = label_data[j:j+batch_size]
    else:
        x1 = label_data[j:nrof_examples]
        x2 = label_data[0:nrof_examples-j]
        batch = np.vstack([x1,x2])
    batch_int = batch.astype(np.int64)
    return batch_int
项目:facerecognition    作者:guoxiaolu    | 项目源码 | 文件源码
def get_batch(image_data, batch_size, batch_index):
    nrof_examples = np.size(image_data, 0)
    j = batch_index*batch_size % nrof_examples
    if j+batch_size<=nrof_examples:
        batch = image_data[j:j+batch_size,:,:,:]
    else:
        x1 = image_data[j:nrof_examples,:,:,:]
        x2 = image_data[0:nrof_examples-j,:,:,:]
        batch = np.vstack([x1,x2])
    batch_float = batch.astype(np.float32)
    return batch_float
项目:facerecognition    作者:guoxiaolu    | 项目源码 | 文件源码
def calculate_accuracy(threshold, dist, actual_issame):
    predict_issame = np.less(dist, threshold)
    tp = np.sum(np.logical_and(predict_issame, actual_issame))
    fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
    tn = np.sum(np.logical_and(np.logical_not(predict_issame), np.logical_not(actual_issame)))
    fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame))

    tpr = 0 if (tp+fn==0) else float(tp) / float(tp+fn)
    fpr = 0 if (fp+tn==0) else float(fp) / float(fp+tn)
    acc = float(tp+tn)/dist.size
    return tpr, fpr, acc
项目:pyfds    作者:emtpb    | 项目源码 | 文件源码
def test_field2d_init():
    # create a field where the main material is 5
    fld = fls.Field2D(100, 0.1, 100, 0.1, 100, 0.1, int(5))
    # check if the "material parameter" 'real' for the complete field is 5
    assert np.allclose(fld.material_vector('real'), 5)
    assert np.size(fld.material_vector('real')) == 10000
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def heap_size(self):
    """Gets the heap size maintained in the class."""
    return len(self._heap)
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def accumulate(self, predictions, actuals, num_positives=None):
    """Accumulate the predictions and their ground truth labels.

    After the function call, we may call peek_ap_at_n to actually calculate
    the average precision.
    Note predictions and actuals must have the same shape.

    Args:
      predictions: a list storing the prediction scores.
      actuals: a list storing the ground truth labels. Any value
      larger than 0 will be treated as positives, otherwise as negatives.
      num_positives = If the 'predictions' and 'actuals' inputs aren't complete,
      then it's possible some true positives were missed in them. In that case,
      you can provide 'num_positives' in order to accurately track recall.

    Raises:
      ValueError: An error occurred when the format of the input is not the
      numpy 1-D array or the shape of predictions and actuals does not match.
    """
    if len(predictions) != len(actuals):
      raise ValueError("the shape of predictions and actuals does not match.")

    if not num_positives is None:
      if not isinstance(num_positives, numbers.Number) or num_positives < 0:
        raise ValueError("'num_positives' was provided but it wan't a nonzero number.")

    if not num_positives is None:
      self._total_positives += num_positives
    else:
      self._total_positives += numpy.size(numpy.where(actuals > 0))
    topk = self._top_n
    heap = self._heap

    for i in range(numpy.size(predictions)):
      if topk is None or len(heap) < topk:
        heapq.heappush(heap, (predictions[i], actuals[i]))
      else:
        if predictions[i] > heap[0][0]:  # heap[0] is the smallest
          heapq.heappop(heap)
          heapq.heappush(heap, (predictions[i], actuals[i]))
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def heap_size(self):
    """Gets the heap size maintained in the class."""
    return len(self._heap)
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def accumulate(self, predictions, actuals, num_positives=None):
    """Accumulate the predictions and their ground truth labels.

    After the function call, we may call peek_ap_at_n to actually calculate
    the average precision.
    Note predictions and actuals must have the same shape.

    Args:
      predictions: a list storing the prediction scores.
      actuals: a list storing the ground truth labels. Any value
      larger than 0 will be treated as positives, otherwise as negatives.
      num_positives = If the 'predictions' and 'actuals' inputs aren't complete,
      then it's possible some true positives were missed in them. In that case,
      you can provide 'num_positives' in order to accurately track recall.

    Raises:
      ValueError: An error occurred when the format of the input is not the
      numpy 1-D array or the shape of predictions and actuals does not match.
    """
    if len(predictions) != len(actuals):
      raise ValueError("the shape of predictions and actuals does not match.")

    if not num_positives is None:
      if not isinstance(num_positives, numbers.Number) or num_positives < 0:
        raise ValueError("'num_positives' was provided but it wan't a nonzero number.")

    if not num_positives is None:
      self._total_positives += num_positives
    else:
      self._total_positives += numpy.size(numpy.where(actuals > 0))
    topk = self._top_n
    heap = self._heap

    for i in range(numpy.size(predictions)):
      if topk is None or len(heap) < topk:
        heapq.heappush(heap, (predictions[i], actuals[i]))
      else:
        if predictions[i] > heap[0][0]:  # heap[0] is the smallest
          heapq.heappop(heap)
          heapq.heappush(heap, (predictions[i], actuals[i]))
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def heap_size(self):
    """Gets the heap size maintained in the class."""
    return len(self._heap)
项目:MKLMM    作者:omerwe    | 项目源码 | 文件源码
def getPosteriorMeanAndVar(self, diagKTestTest, KtrainTest, post, intercept=0):
        L = post['L']
        if (np.size(L) == 0): raise Exception('L is an empty array') #possible to compute it here
        Lchol = np.all((np.all(np.tril(L, -1)==0, axis=0) & (np.diag(L)>0)) & np.isreal(np.diag(L)))
        ns = diagKTestTest.shape[0]
        nperbatch = 5000
        nact = 0

        #allocate mem
        fmu = np.zeros(ns)  #column vector (of length ns) of predictive latent means
        fs2 = np.zeros(ns)  #column vector (of length ns) of predictive latent variances
        while (nact<(ns-1)):
            id = np.arange(nact, np.minimum(nact+nperbatch, ns))
            kss = diagKTestTest[id]     
            Ks = KtrainTest[:, id]
            if (len(post['alpha'].shape) == 1):
                try: Fmu = intercept[id] + Ks.T.dot(post['alpha'])
                except: Fmu = intercept + Ks.T.dot(post['alpha'])
                fmu[id] = Fmu
            else:
                try: Fmu = intercept[id][:, np.newaxis] + Ks.T.dot(post['alpha'])
                except: Fmu = intercept + Ks.T.dot(post['alpha'])
                fmu[id] = Fmu.mean(axis=1)
            if Lchol:
                V = la.solve_triangular(L, Ks*np.tile(post['sW'], (id.shape[0], 1)).T, trans=1, check_finite=False, overwrite_b=True)
                fs2[id] = kss - np.sum(V**2, axis=0)                       #predictive variances                        
            else:
                fs2[id] = kss + np.sum(Ks * (L.dot(Ks)), axis=0)           #predictive variances
            fs2[id] = np.maximum(fs2[id],0)  #remove numerical noise i.e. negative variances        
            nact = id[-1]    #set counter to index of last processed data point

        return fmu, fs2
项目:detection-2016-nipsws    作者:imatge-upc    | 项目源码 | 文件源码
def get_all_images(image_names, path_voc):
    images = []
    for j in range(np.size(image_names)):
        image_name = image_names[0][j]
        string = path_voc + '/JPEGImages/' + image_name + '.jpg'
        images.append(image.load_img(string, False))
    return images