Python matplotlib.mlab 模块,find() 实例源码

我们从Python开源项目中,提取了以下15个代码示例,用于说明如何使用matplotlib.mlab.find()

项目:finite_volume_seismic_model    作者:cjvogl    | 项目源码 | 文件源码
def dZ_at_t(self, t):
        """
        Interpolate dZ to specified time t and return deformation.
        """
        from matplotlib.mlab import find
        if t <= self.times[0]:
            return self.dZ[0,:,:]
        elif t >= self.times[-1]:
            return self.dZ[-1,:,:]
        else:
            n = max(find(self.times <= t))
            t1 = self.times[n]
            t2 = self.times[n+1]
            dz = (t2-t)/(t2-t1) * self.dZ[n,:,:] + \
                 (t-t1)/(t2-t1) * self.dZ[n+1,:,:]
            return dz
项目:finite_volume_seismic_model    作者:cjvogl    | 项目源码 | 文件源码
def dZ_at_t(self, t):
        """
        Interpolate dZ to specified time t and return deformation.
        """
        from matplotlib.mlab import find
        if t <= self.times[0]:
            return self.dZ[0,:,:]
        elif t >= self.times[-1]:
            return self.dZ[-1,:,:]
        else:
            n = max(find(self.times <= t))
            t1 = self.times[n]
            t2 = self.times[n+1]
            dz = (t2-t)/(t2-t1) * self.dZ[n,:,:] + \
                 (t-t1)/(t2-t1) * self.dZ[n+1,:,:]
            return dz
项目:NetPower_TestBed    作者:Vignesh2208    | 项目源码 | 文件源码
def freq_from_crossings(sig, fs):
    """
    Estimate frequency by counting zero crossings
    """
    # Find all indices right before a rising-edge zero crossing
    indices = find((sig[1:] >= 0) & (sig[:-1] < 0))

    # Naive (Measures 1000.185 Hz for 1000 Hz, for instance)
    # crossings = indices

    # More accurate, using linear interpolation to find intersample
    # zero-crossings (Measures 1000.000129 Hz for 1000 Hz, for instance)
    crossings = [i - sig[i] / (sig[i+1] - sig[i]) for i in indices]

    # Some other interpolation based on neighboring points might be better.
    # Spline, cubic, whatever

    return fs / mean(diff(crossings))
项目:NetPower_TestBed    作者:Vignesh2208    | 项目源码 | 文件源码
def freq_from_autocorr(sig, fs):
    """
    Estimate frequency using autocorrelation
    """
    # Calculate autocorrelation (same thing as convolution, but with
    # one input reversed in time), and throw away the negative lags
    corr = fftconvolve(sig, sig[::-1], mode='full')
    corr = corr[len(corr)//2:]

    # Find the first low point
    d = diff(corr)
    start = find(d > 0)[0]

    # Find the next peak after the low point (other than 0 lag).  This bit is
    # not reliable for long signals, due to the desired peak occurring between
    # samples, and other peaks appearing higher.
    # Should use a weighting function to de-emphasize the peaks at longer lags.
    peak = argmax(corr[start:]) + start
    px, py = parabolic(corr, peak)

    return fs / px
项目:neurotools    作者:michaelerule    | 项目源码 | 文件源码
def add_spikes(Y):
    '''
    Parameters
    ----------

    Returns
    -------
    '''
    # plot spikes
    y1,y2 = plt.ylim()
    y3 = y2+.1*(y2-y1)
    for e in find(Y==1):
        plt.plot([e,e],[y2,y3],color='k',lw=.5)
    plt.ylim(y1,y3)
项目:neurotools    作者:michaelerule    | 项目源码 | 文件源码
def reject_outliers(x,percent=10,side='both'):
    '''
    Reject outliers from data based on percentiles.

    Parameters
    ----------
    x : ndarary
        1D numeric array of data values
    percent : number
        percent between 0 and 100 to remove
    side : str
        'left' 'right' or 'both'. Default is 'both'. Remove extreme
        values from the left / right / both sides of the data
        distribution. If both, the percent is halved and removed
        from both the left and the right
    Returns
    -------
    ndarray
        Values with outliers removed
    kept
        Indecies of values kept
    removed
        Indecies of values removed
    '''
    N = len(x)
    remove = outliers(x,percent,side)
    to_remove = find(remove==True)
    to_keep   = find(remove==False)
    return x[to_keep], to_keep, to_remove
项目:neurotools    作者:michaelerule    | 项目源码 | 文件源码
def isInt(v):
    v = str(v).strip()
    return v=='0' or (v if v.find('..') > -1 else v.lstrip('-+').rstrip('0').rstrip('.')).isdigit()
项目:neurotools    作者:michaelerule    | 项目源码 | 文件源码
def bin_spikes_raster(train,binsize=5):
    '''
    Important! This accepts a spike raster, not spike times!
    '''
    bins = int(np.ceil(len(train)/float(binsize)))
    return np.histogram(ml.find(train),bins,(0,bins*binsize))[0]
项目:Inertial-Orbit-Detection    作者:MatrixAI    | 项目源码 | 文件源码
def freq_from_autocorr(signal, sampling_rate):

    corr = fftconvolve(signal, signal[::-1], mode='full')
    corr = corr[len(corr)//2:]
    d = np.diff(corr)
    start = find_index_by_true(d > 0)[0]
    peak = np.argmax(corr[start:]) + start
    px, py = parabolic(corr, peak)
    return sampling_rate / px
项目:Magic-Platform    作者:amiravni    | 项目源码 | 文件源码
def CheckElevation(vec):
    #print vec
    vec_diff =  np.diff(np.array(vec))
    elChange = vec[-1] - vec[1];
    if (elChange > ELEVATION_THRESHOLD):
        #print 'DOWN'
        if (mlab.find(vec_diff >= 0).size > VECTOR_SIZE*ELEVATION_TREND_PRECENT):
            return elChange,HAND_DOWN
    elif (elChange < -ELEVATION_THRESHOLD):
        #print 'UP'
        if (mlab.find(vec_diff <= 0).size > VECTOR_SIZE*ELEVATION_TREND_PRECENT):
            return elChange,HAND_UP
    return 0,HAND_UNKWON
项目:Magic-Platform    作者:amiravni    | 项目源码 | 文件源码
def CheckElevation(vec):
    #print vec
    step = 1.0/len(vec)
    vec_after_polyfit = createElevationVectorAfterPolyfit(mlab.frange(0,1-step,step),vec)
    vec_diff =  np.diff(np.array(vec_after_polyfit))
    elChange = vec_after_polyfit[-1] - vec_after_polyfit[1];
    if (elChange > ELEVATION_THRESHOLD):
        #print 'DOWN'
        if (mlab.find(vec_diff >= 0).size > VECTOR_SIZE*ELEVATION_TREND_PRECENT):
            return elChange,HAND_DOWN
    elif (elChange < -ELEVATION_THRESHOLD):
        #print 'UP'
        if (mlab.find(vec_diff <= 0).size > VECTOR_SIZE*ELEVATION_TREND_PRECENT):
            return elChange,HAND_UP
    return 0,HAND_UNKWON
项目:finite_volume_seismic_model    作者:cjvogl    | 项目源码 | 文件源码
def read(self, path, input_units={}, coordinate_specification="top center",
                         rupture_type="static", verbose=False):
        r"""Read in subfault specification at *path*.

        Creates a list of subfaults from the subfault specification file at
        *path*.

        """

        possible_column_names = """longitude latitude length width depth strike dip
                          rake slip mu rupture_time rise_time rise_time_ending""".split()
        param = {}
        for n in possible_column_names:
            param[n] = n
        # alternative names that might appear in csv file:
        param["rigidity"] = "mu"
        param["rupture time"] = "rupture_time"
        param["rise time"] = "rise_time"

        # Read header of file
        with open(path, 'r') as subfault_file:
            header_line = subfault_file.readline().split(",")
            column_map = {}
            for (n,column_heading) in enumerate(header_line):
                if "(" in column_heading:
                    # Strip out units if present
                    unit_start = column_heading.find("(")
                    unit_end = column_heading.find(")")
                    column_name = column_heading[:unit_start].lower()
                    units = column_heading[unit_start+1:unit_end]
                    if verbose and input_units.get(column_name,units) != units:
                        print "*** Warning: input_units[%s] reset to %s" \
                              % (column_name, units)
                        print "    based on file header"
                        input_units[column_name] = units

                else:
                    column_name = column_heading.lower()
                column_name = column_name.strip()
                if column_name in param.keys():
                    column_key = param[column_name]
                    column_map[column_key] = n
                else:
                    print "*** Warning: column name not recognized: %s" \
                        % column_name

        super(CSVFault, self).read(path, column_map=column_map, skiprows=1,
                                delimiter=",", input_units=input_units,
                                coordinate_specification=coordinate_specification,
                                rupture_type=rupture_type)



# ==============================================================================
#  Sift sub-class of Fault
# ==============================================================================
项目:finite_volume_seismic_model    作者:cjvogl    | 项目源码 | 文件源码
def read(self, path, input_units={}, coordinate_specification="top center",
                         rupture_type="static", verbose=False):
        r"""Read in subfault specification at *path*.

        Creates a list of subfaults from the subfault specification file at
        *path*.

        """

        possible_column_names = """longitude latitude length width depth strike dip
                          rake slip mu rupture_time rise_time rise_time_ending""".split()
        param = {}
        for n in possible_column_names:
            param[n] = n
        # alternative names that might appear in csv file:
        param["rigidity"] = "mu"
        param["rupture time"] = "rupture_time"
        param["rise time"] = "rise_time"

        # Read header of file
        with open(path, 'r') as subfault_file:
            header_line = subfault_file.readline().split(",")
            column_map = {}
            for (n,column_heading) in enumerate(header_line):
                if "(" in column_heading:
                    # Strip out units if present
                    unit_start = column_heading.find("(")
                    unit_end = column_heading.find(")")
                    column_name = column_heading[:unit_start].lower()
                    units = column_heading[unit_start+1:unit_end]
                    if verbose and input_units.get(column_name,units) != units:
                        print "*** Warning: input_units[%s] reset to %s" \
                              % (column_name, units)
                        print "    based on file header"
                        input_units[column_name] = units

                else:
                    column_name = column_heading.lower()
                column_name = column_name.strip()
                if column_name in param.keys():
                    column_key = param[column_name]
                    column_map[column_key] = n
                else:
                    print "*** Warning: column name not recognized: %s" \
                        % column_name

        super(CSVFault, self).read(path, column_map=column_map, skiprows=1,
                                delimiter=",", input_units=input_units,
                                coordinate_specification=coordinate_specification,
                                rupture_type=rupture_type)



# ==============================================================================
#  Sift sub-class of Fault
# ==============================================================================
项目:accpy    作者:kramerfelix    | 项目源码 | 文件源码
def tuneplot(ax1, ax2, data, particleIDs='allIDs', integer=1, addsub=add,
             clipint=True, showlost=False, QQ='Qx', ms=1, clip=[0], showfit=False):
    particleIDs = data[particleIDs]
    if not showlost:
    lost = data['lost'][:, 0]
    clip = concatenate([clip, lost])
    particleIDs = delete(particleIDs, clip)
    Q = addsub(integer, data[QQ][particleIDs])
    if clipint:
        zeroQ = find(logical_or(logical_or(Q == 0.0, Q == 1.0), Q == 0.5))
        if len(zeroQ) > 0:  # trim reference particle with zero tune
            Q = delete(Q, zeroQ)
            particleIDs = delete(particleIDs, zeroQ)
    Qmin, Qmax = nanmin(Q), nanmax(Q)
    Qdif = Qmax - Qmin
    if Qdif == 0.0:
        Qmin -= Qmin/1e4
        Qmax += Qmax/1e4
        Qdif = Qmax - Qmin
    colors = cool((Q - Qmin) / Qdif)
    for i, ID in enumerate(particleIDs):
        ax1.plot(data['x'][:, ID]*1e3, data['xp'][:, ID]*1e3, '.', c=colors[i], ms=ms)
    if showlost:
        for ID in lost:
            ax1.plot(data['x'][:, ID]*1e3, data['xp'][:, ID]*1e3, '.', c='gray', ms=ms)
    sm = ScalarMappable(cmap=rainbow, norm=Normalize(vmin=Qmin, vmax=Qmax))
    sm._A = []
    ax1.set_xlabel(r'Position $x$ / (mm)')
    ax1.set_ylabel(r'Angle $x^\prime$ / (mrad)')
    emittance = data['A'][particleIDs]/pi
    action = emittance/2

    # tune shift with action
    fitfun = lambda x, a, b: a + b*x
    popt, pcov = curve_fit(fitfun, action, Q)
    perr = sqrt(diag(pcov))
    action2 = linspace(nanmin(action), nanmax(action), 1000)
    fit1 = fitfun(action2, *popt)
    print(popt[1]*1e-6*1250)

    for i, ID in enumerate(particleIDs):
        ax2.plot(action[i]*1e6, Q[i], 'o', c=colors[i], ms=ms + 1)
    if showfit:
    ax2.plot(action2*1e6, fit1, '-k', lw=1, label=r'fit with $TSWA=${:.4}$\pm${:.1} (kHz mm$^-$$^2$mrad$^-$$^2$)'.format(popt[1]*1e-6*1250, perr[1]*1e-6*1250))
#    leg = ax2.legend()
#    leg.get_frame().set_alpha(0)
    ax2.set_ylim([Qmin, Qmax])
#    ax2.yaxis.tick_right()
    ax2.set_ylabel(r'Fractional Tune $dQ$')
#    ax2.yaxis.set_label_position('right')
    ax2.set_xlabel(r'Action $J_x$ / (mm$\cdot$mrad)')
    tight_layout()
    return
项目:neurotools    作者:michaelerule    | 项目源码 | 文件源码
def benjamini_hochberg_positive_correlations(pvalues,alpha):
    '''
    Derived from the following matlab code (c) Wilson Truccolo
        function [pID,pN] = fdr(p,q)
        % FORMAT pt = fdr(p,q)
        % 
        % p   - vector of p-values
        % q   - False Discovery Rate level
        %
        % pID - p-value threshold based on independence or positive dependence
        % pN  - Nonparametric p-value threshold
        %
        % This function takes a vector of p-values and a False Discovery Rate
        % (FDR). It returns two p-value thresholds, one based on an assumption of
        % independence or positive dependence, and one that makes no assumptions
        % about how the tests are correlated. For imaging data, an assumption of
        % positive dependence is reasonable, so it should be OK to use the first
        % (more sensitive) threshold.
        % 
        % Reference: Benjamini and Hochberg, J Royal Statistical Society. Series B
        % (Methodological), V 57, No. 1 (1995), pp. 289-300.
        % _____________________________________________________________________________
        % @(#)fdr.m 1.3 Tom Nichols 02/01/18
        % Wilson Truccolo: modified 10/19/2007

        p = sort(p(:));
        V = length(p);
        I = (1:V)';
        cVID = 1;
        cVN = sum(1./(1:V));
        pID = p(max(find(p<=I/V*q/cVID)));
        pN = p(max(find(p<=I/V*q/cVN)));
    '''
    pvalues = sorted(ravel(pvalues))
    V = len(pvalues)
    X = float64(arange(1,V+1))*alpha/V
    cVN  = sum(1./arange(1,V+1))
    pID  = find( pvalues<=X )
    pID  = pvalues[pID[-1]] if len(pID)>0 else 0#pvalues[0]
    pN   = find( pvalues<=X/cVN )
    pN   = pvalues[pN [-1]] if len(pN )>0 else 0#pvalues[0]
    print(pID, pN)
    return pID, pN