Python numpy 模块,interp() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.interp()

项目:pyrsss    作者:butala    | 项目源码 | 文件源码
def nan_helper(y):
    """
    Helper to handle indices and logical indices of NaNs.

    Input:
        - y, 1d numpy array with possible NaNs
    Output:
        - nans, logical indices of NaNs
        - index, a function, with signature indices= index(logical_indices),
          to convert logical indices of NaNs to 'equivalent' indices
    Example:
        >>> # linear interpolation of NaNs
        >>> nans, x= nan_helper(y)
        >>> y[nans]= NP.interp(x(nans), x(~nans), y[~nans])
    """
    # Source: http://stackoverflow.com/questions/6518811/interpolate-nan-values-in-a-numpy-array
    return NP.isnan(y), lambda z: z.nonzero()[0]
项目:uchroma    作者:cyanogen    | 项目源码 | 文件源码
def scale(value, src_min, src_max, dst_min, dst_max, round_=False):
    """
    Scale a value from one range to another.

    :param value: Input value
    :param src_min: Min value of input range
    :param src_max: Max value of input range
    :param dst_min: Min value of output range
    :param dst_max: Max value of output range
    :param round_: True if the scale value should be rounded to an integer

    :return: The scaled value
    """
    scaled = interp(clamp(value, src_min, src_max), [src_min, src_max], [dst_min, dst_max])
    if round_:
        scaled = int(round(scaled))

    return scaled
项目:MIT-Thesis    作者:alec-heif    | 项目源码 | 文件源码
def predict(self, x):
        """
        Predict labels for provided features.
        Using a piecewise linear function.
        1) If x exactly matches a boundary then associated prediction
        is returned. In case there are multiple predictions with the
        same boundary then one of them is returned. Which one is
        undefined (same as java.util.Arrays.binarySearch).
        2) If x is lower or higher than all boundaries then first or
        last prediction is returned respectively. In case there are
        multiple predictions with the same boundary then the lowest
        or highest is returned respectively.
        3) If x falls between two values in boundary array then
        prediction is treated as piecewise linear function and
        interpolated value is returned. In case there are multiple
        values with the same boundary then the same rules as in 2)
        are used.

        :param x:
          Feature or RDD of Features to be labeled.
        """
        if isinstance(x, RDD):
            return x.map(lambda v: self.predict(v))
        return np.interp(x, self.boundaries, self.predictions)
项目:planetplanet    作者:rodluger    | 项目源码 | 文件源码
def quantile(x, q, weights=None):
    """
    Like numpy.percentile, but:

    * Values of q are quantiles [0., 1.] rather than percentiles [0., 100.]
    * scalar q not supported (q must be iterable)
    * optional weights on x

    """
    if weights is None:
        return np.percentile(x, [100. * qi for qi in q])
    else:
        idx = np.argsort(x)
        xsorted = x[idx]
        cdf = np.add.accumulate(weights[idx])
        cdf /= cdf[-1]
        return np.interp(q, cdf, xsorted).tolist()
项目:planetplanet    作者:rodluger    | 项目源码 | 文件源码
def convolve(self, lam, flux):
        '''
        Convolve flux with normalized filter throughput.

        :param array_like lam: High-res wavelength grid [:math:`\mu \mathrm{m}`]
        :param array_like flux:  High-res flux grid \
               [:math:`\mathrm{W/m}^2 / \mu \mathrm{m}`]

        :returns array_like F: Flux convolved with normalized throughput

        '''

        # interpolate filter throughout to HR grid
        T = np.interp(lam, self.wl, self.throughput)

        # Convolve with normalized throughput
        F = np.sum(flux * T) / np.sum(T)

        return F
项目:pyqha    作者:mauropalumbo75    | 项目源码 | 文件源码
def compute_Cv(T,Vmin,V,Cvib):
    """
    This function computes the isocoric heat capacity as a function of temperature.
    From *Cvib*, which is a matrix with *Cvib(T,V)* as from the harmonic calculations
    determines the *Cv* at each temperature by linear interpolation between the values
    at the two volumes closest to Vmin(T). Vmin(T) is from the minimization of F(V,T)
    and *V* is the array of volumes used for it.
    Returns *Cv(T)*.

    Work in progress... for now it uses all volumes in the interpolation.

    """
    Cv = np.zeros(len(T))
    for iT in range(0,len(T)):
        Cv_interpolated = np.interp(Vmin[iT], V, Cvib[iT,:])
        Cv[iT] = Cv_interpolated

    return Cv
项目:DVH-Analytics    作者:cutright    | 项目源码 | 文件源码
def dose_to_volume(dvh, volume, *roi_volume):

    # if an roi_volume is not given, volume is assumed to be fractional
    if roi_volume:
        if isinstance(roi_volume[0], basestring):
            return 0
        roi_volume = roi_volume[0]
    else:
        roi_volume = 1

    dose_high = np.argmax(dvh < (volume / roi_volume))
    y = volume / roi_volume
    x_range = [dose_high - 1, dose_high]
    y_range = [dvh[dose_high - 1], dvh[dose_high]]
    dose = np.interp(y, y_range, x_range) * 0.01

    return dose
项目:financial_life    作者:MartinPyka    | 项目源码 | 文件源码
def join_data(dates_list, data_list):
    """ This functions makes heterogenous time series data align
    with one time series axis 
    dates   : list of date-lists
    data    : list of data-lists_lock

    Returns:
        dates, and data, but this time, data shares the same
        date-points
    """
    # first get all unique dates from every sublist and make one list out of them
    rdates = sorted(list(set([date for sublist in dates_list for date in sublist])))
    rdata = []

    # go through each vector and interpolate data if necessary
    for dates, data_vecs in zip(dates_list, data_list):
        for data in data_vecs:
            if len(data) > 0:
                rdata.append(np.interp(rdates,dates, data).tolist())
            else:    # if data is empty, then just create a zero-length vector
                rdata.append(np.zeros(len(rdates)))
    return rdates, rdata
项目:POWER    作者:pennelise    | 项目源码 | 文件源码
def power_curve_query(ws,TI,opt="normal_TI"):
    import numpy as np
    hub_height_ws = np.arange(3,13.5,0.5)

    power_normal_TI = np.array([0,20,63,116,177,248,331,428,540,667,812,972,1141,1299,1448,1561,1633,1661,1677,1678,1680])
    power_low_TI = np.array([0,18,61,114,174,244,325,421,532,657,801,961,1134,1304,1463,1585,1654,1675,1680,1680,1680])
    power_high_TI = np.array([0,24,68,123,185,258,344,446,562,693,841,994,1148,1287,1419,1519,1589,1637,1665,1679,1680])

    if "var_TI" not in opt:    
        if "normal_TI" in opt:
            power = power_normal_TI
        if "low_TI" in opt:
            power = power_low_TI
        if "high_TI" in opt:
            power = power_high_TI    

        power_interp = np.interp(ws, hub_height_ws, power)
    else:
        from power_curve_query_func import power_curve_var_TI 
        power_interp = power_curve_var_TI(ws,TI)

    return power_interp
项目:POWER    作者:pennelise    | 项目源码 | 文件源码
def power_curve_var_TI(ws,TI):

    import numpy as np
    hub_height_ws = np.arange(3,13.5,0.5)

    power_normal_TI = np.array([0,20,63,116,177,248,331,428,540,667,812,972,1141,1299,1448,1561,1633,1661,1677,1678,1680])
    power_low_TI = np.array([0,18,61,114,174,244,325,421,532,657,801,961,1134,1304,1463,1585,1654,1675,1680,1680,1680])
    power_high_TI = np.array([0,24,68,123,185,258,344,446,562,693,841,994,1148,1287,1419,1519,1589,1637,1665,1679,1680])  

    power_interp = np.zeros(len(ws))
    power_interp[:] = np.nan
    index = 0
    for i,j in zip(ws,TI):
        if j < 10:
            power_interp[index] = np.interp(i, hub_height_ws, power_low_TI)
        if j >= 10 and j < 15:   
            power_interp[index] = np.interp(i, hub_height_ws, power_normal_TI)
        if j >= 15 and j < 20:
            power_interp[index] = np.interp(i, hub_height_ws, power_high_TI)  
        index += 1    
    return power_interp
项目:agefromname    作者:JasonKessler    | 项目源码 | 文件源码
def _get_estimated_counts_all_names(self,
                                        sex,
                                        current_year=datetime.now().year,
                                        minimum_age=0):
        '''
        :param sex: str, m or f for sex.
        :param current_year: int, optional, defaults to current year
        :param minimum_age: int, optional, defaults to 0
        :return: pd.Series, with int indices indicating years of
            birth, and estimated counts of total population with that name and birth year
        '''
        sex = self._check_and_normalize_gender(sex)
        cur_df = (self._year_of_birth_df[
                      self._birth_year_df_mask(current_year=current_year,
                                               first_name=None, minimum_age=minimum_age, sex=sex)
                  ][['first_name', 'year_of_birth', 'count']])
        year_stats = (self._mortality_df[self._mortality_df.as_of_year == current_year]
                      [['year_of_birth', sex + '_prob_alive']])
        cur_df['prob_alive'] = np.interp(cur_df.year_of_birth,
                                         year_stats.year_of_birth,
                                         year_stats[sex + '_prob_alive'])
        cur_df['estimated_count'] = cur_df['prob_alive'] * cur_df['count']
        return cur_df  # .set_index('year_of_birth')['estimated_count']
项目:burro    作者:yconst    | 项目源码 | 文件源码
def decide(self, img_arr):
        if config.camera.crop_top or config.camera.crop_bottom:
            h, w, _ = img_arr.shape
            t = config.camera.crop_top
            l = h - config.camera.crop_bottom
            img_arr = img_arr[t:l, :]

        img_arr = np.interp(img_arr, config.camera.output_range,
                            config.model.input_range)
        img_arr = np.expand_dims(img_arr, axis=0)
        prediction = self.model.predict(img_arr)
        if len(prediction) == 2:
            yaw = methods.angle_to_yaw(prediction[0][0])
            throttle = prediction[1][0]
        else:
            yaw = methods.angle_to_yaw(prediction[0][0])
            throttle = 0

        avf = config.model.yaw_average_factor
        yaw = avf * self.yaw + (1.0 - avf) * yaw
        self.yaw = yaw
        return methods.yaw_to_angle(yaw), throttle
项目:pystudio    作者:satorchi    | 项目源码 | 文件源码
def Pbias(self,TES):
    '''
    find the Pbias at 90% Rn
    '''    
    filterinfo=self.filterinfo(TES)
    if filterinfo==None:return None

    Rn_ratio=self.Rn_ratio(TES)
    if not isinstance(Rn_ratio,np.ndarray):return None

    istart,iend=self.selected_iv_curve(TES)

    Rn_ratio=Rn_ratio[istart:iend]
    Ptes=self.Ptes(TES)
    Ptes=Ptes[istart:iend]

    # check that Rn_ratio is increasing
    increasing=np.diff(Rn_ratio).mean()
    if increasing<0:
        Pbias=np.interp(90., np.flip(Rn_ratio,0), np.flip(Ptes,0))
    else:
        Pbias=np.interp(90., Rn_ratio, Ptes)

    return Pbias
项目:Global_GPP_VPM_NCEP_C3C4    作者:zhangyaonju    | 项目源码 | 文件源码
def VIsmooth_ref(x):
    #the size of EVIgood is 92*5760000, the size of the reference data is 46*5760000
    x[x == -9999] = np.nan
    EVIgood = x[0:92]
    reference = np.concatenate([x[115:], x[92:], x[92:115]])
    if np.sum(np.isnan(EVIgood)) == 92:
        return np.concatenate([x[92:], x[23:69], x[92:]])
    ############################
    #here require complicated algorithm
    #first get the difference between these two
    diff = EVIgood - reference
    #fun = cdll.LoadLibrary(os.getcwd() + '/bise.so')
    #outdiff = (c_double * len(EVIgood))()
    #nans, y = nan_helper(diff)
    #diff[nans] = np.interp(y(nans), y(~nans), diff[~nans])
    diff[reference == 0] = 0
    diff = pd.Series(diff)
    reconstructVI = reference+diff.interpolate()
    SGVI = savgol_filter(np.array(reconstructVI[23:69]), window_length=5, polyorder=3)
    SGVI[SGVI < 0] = 0
    return np.concatenate([SGVI, x[23:69], x[92:]])
项目:StructEngPy    作者:zhuoju36    | 项目源码 | 文件源码
def spectrum_analysis(model:Model.fem_model,n,spec):
    """
    sepctrum analysis\n
    n: number of modes to use\n
    spec: a list of tuples (period,acceleration response)
    """
    freq,mode=eigen_mode(model,n)
    M_=np.dot(mode.T,model.M)
    M_=np.dot(M_,mode)
    K_=np.dot(mode.T,model.K)
    K_=np.dot(K_,mode)
    C_=np.dot(mode.T,model.C)
    C_=np.dot(C_,mode)
    d_=[]
    for (m_,k_,c_) in zip(M_.diag(),K_.diag(),C_.diag()):
        sdof=SDOFSystem(m_,k_)
        T=sdof.omega_d()
        d_.append(np.interp(T,spec[0],spec[1]*m_))
    d=np.dot(d_,mode)
    #CQC
    return d
项目:StructEngPy    作者:zhuoju36    | 项目源码 | 文件源码
def __init__(self,alpha_max,Tg,xi):
        gamma=0.9+(0.05-xi)/(0.3+6*xi)
        eta1=0.02+(0.05-xi)/(4+32*xi)
        eta1=eta1 if eta1>0 else 0
        eta2=1+(0.05-xi)/(0.08+1.6*xi)
        eta2=eta2 if eta2>0.55 else 0.55
        T=np.linspace(0,6,601)
        alpha=[]
        for t in T:
            if t<0.1:
                alpha.append(np.interp(t,[0,0.1],[0.45*alpha_max,eta2*alpha_max]))
            elif t<Tg:
                alpha.append(eta2*alpha_max)
            elif t<5*Tg:
                alpha.append((Tg/t)**gamma*eta2*alpha_max)
            else:
                alpha.append((eta2*0.2**gamma-eta1*(t-5*Tg))*alpha_max)
        self.__spectrum={'T':T,'alpha':alpha}
项目:2020plus    作者:KarchinLab    | 项目源码 | 文件源码
def _update_tsg_metrics(self, y_true, y_pred, prob):
        self.tsg_gene_pred = pd.Series(y_pred, self.y.index)
        self.tsg_gene_score = pd.Series(prob, self.y.index)

        # compute metrics for classification
        self.tsg_gene_count[self.num_pred] = sum(y_pred)
        prec, recall, fscore, support = metrics.precision_recall_fscore_support(y_true, y_pred)
        tsg_col = 1  # column for metrics relate to tsg
        self.tsg_precision[self.num_pred] = prec[tsg_col]
        self.tsg_recall[self.num_pred] = recall[tsg_col]
        self.tsg_f1_score[self.num_pred] = fscore[tsg_col]
        self.logger.debug('Tsg Iter %d: Precission=%s, Recall=%s, f1_score=%s' % (
                          self.num_pred + 1, str(prec), str(recall), str(fscore)))

        # compute ROC curve metrics
        fpr, tpr, thresholds = metrics.roc_curve(y_true, prob)
        self.tsg_tpr_array[self.num_pred, :] = interp(self.tsg_fpr_array, fpr, tpr)
        #self.tsg_tpr_array[0] = 0.0

        # compute Precision-Recall curve metrics
        p, r, thresh = metrics.precision_recall_curve(y_true, prob)
        p, r, thresh = p[::-1], r[::-1], thresh[::-1]  # reverse order of results
        self.tsg_precision_array[self.num_pred, :] = interp(self.tsg_recall_array, r, p)
项目:nmmn    作者:rsnemmen    | 项目源码 | 文件源码
def findlum(self,x):
        """
    Given the input frequency in units of log(nu/Hz), returns the list [log(nu),
    Lnu] with the frequency and luminosity closest to the specified value.

    >>> lognu,lum=s.findlum(14)

    will look for the freq. and lum. nearest nu=10^14 Hz
        """
        # Performs interpolation before the search
        if not hasattr(self, 'nlnui'): self.interp()

        # Looks for the frequency
        i=lsd.search(x,self.lognui) # index

        return [self.lognui[i], self.nlnui[i]/self.nui[i]]
项目:nmmn    作者:rsnemmen    | 项目源码 | 文件源码
def ion(self):
        """
    Calculates the rate of ionizing photons in the SED.

    >>> q=s.ion()
        """
        import scipy.integrate
        import scipy.stats
        h=6.62607e-27   # Planck constant in CGS

        # Performs interpolation before integrating. This is a precaution in 
        # case the user specify weird integration limits.           
        if not hasattr(self, 'nlnui'): self.interp()

        # 13.6 eV - "infty"
        xi, xf = 15.52, 22.

        # Gets only the elements corresponding to ionizing frequencies
        i=numpy.where((self.lognui>=xi) & (self.lognui<=xf))
        x,y = self.lognui[i],self.lli[i]    # easier notation

        # Calculates ionizing rate using integration (trapezoidal rule)
        q=scipy.integrate.trapz(self.nlnui[i]/self.nui[i]/(h*self.nui[i]), self.nui[i])

        return q
项目:nmmn    作者:rsnemmen    | 项目源码 | 文件源码
def sum(seds):
    """
Given a list of SEDs previously interpolated in the same binning, 
sums their luminosities and returns a SED object with the sum.

>>> ss=sum([s,s1,s2])

returns the SED ss <- [lognu, s+s1+s2],
where s+s1+s2 -> log10[nuLnu(s)+nuLnu(s1)+nuLnu(s2)], lognu being the common 
units of frequency for the SEDs after interpolation.

The method is designed to automatically call the interp method for each SED
if needed.
    """
    # Precaution in case the user did not use the interp method
    seds[0].interp(seds)

    sums=numpy.zeros_like(seds[0].lognui)   # initializes the sum

    for sed in seds:
        sums=sums+sed.nlnui

    return SED(lognu=seds[0].lognui, ll=numpy.log10(sums), logfmt=1)
项目:glasstone    作者:GOFAI    | 项目源码 | 文件源码
def _soviet_summary(actual_yield, scale_range):
    keys = list(_soviet_summary_x.keys())
    keys.sort() # keys may be returned in arbitrary order
    if keys[-1] < actual_yield or actual_yield < keys[0]:
        raise ValueOutsideGraphError(actual_yield)
    for k in range(len(keys)):
        k1 = keys[k]
        k2 = keys[k + 1]
        if k1 <=  actual_yield <= k2:
            xs1 = _soviet_summary_x[k1]
            ys1 = _soviet_summary_y[k1]
            xs2 = _soviet_summary_x[k2]
            ys2 = _soviet_summary_y[k2]
            if xs1[-1] < scale_range or scale_range < xs1[0] or xs2[-1] < scale_range or scale_range < xs2[0]:
                raise ValueOutsideGraphError(scale_range)
            y1 = np.interp(scale_range, xs1, ys1)
            y2 = np.interp(scale_range, xs2, ys2)
            return 10**np.interp(actual_yield, [k1, k2], [y1, y2])
项目:glasstone    作者:GOFAI    | 项目源码 | 文件源码
def _soviet_neutron(actual_yield, scale_range):
    keys = list(_soviet_neutron_x.keys())
    keys.sort() # keys may be returned in arbitrary order
    if keys[-1] < actual_yield or actual_yield < keys[0]:
        raise ValueOutsideGraphError(actual_yield)
    for k in range(len(keys)):
        k1 = keys[k]
        k2 = keys[k + 1]
        if k1 <=  actual_yield <= k2:
            xs1 = _soviet_neutron_x[k1]
            ys1 = _soviet_neutron_y[k1]
            xs2 = _soviet_neutron_x[k2]
            ys2 = _soviet_neutron_y[k2]
            if xs1[-1] < scale_range or scale_range < xs1[0] or xs2[-1] < scale_range or scale_range < xs2[0]:
                raise ValueOutsideGraphError(scale_range)
            y1 = np.interp(scale_range, xs1, ys1)
            y2 = np.interp(scale_range, xs2, ys2)
            return 10**np.interp(actual_yield, [k1, k2], [y1, y2])
项目:glasstone    作者:GOFAI    | 项目源码 | 文件源码
def _soviet_gamma(actual_yield, scale_range):
    keys = list(_soviet_gamma_x.keys())
    keys.sort() # keys may be returned in arbitrary order
    if keys[-1] < actual_yield or actual_yield < keys[0]:
        raise ValueOutsideGraphError(actual_yield)
    for k in range(len(keys)):
        k1 = keys[k]
        k2 = keys[k + 1]
        if k1 <=  actual_yield <= k2:
            xs1 = _soviet_gamma_x[k1]
            ys1 = _soviet_gamma_y[k1]
            xs2 = _soviet_gamma_x[k2]
            ys2 = _soviet_gamma_y[k2]
            if xs1[-1] < scale_range or scale_range < xs1[0] or xs2[-1] < scale_range or scale_range < xs2[0]:
                raise ValueOutsideGraphError(scale_range)
            y1 = np.interp(scale_range, xs1, ys1)
            y2 = np.interp(scale_range, xs2, ys2)
            return 10**np.interp(actual_yield, [k1, k2], [y1, y2])

# These functions adjust doses on the basis of the season-dependent scales
# found in the graphs
项目:yt    作者:yt-project    | 项目源码 | 文件源码
def __call__(self):
        # Read in the ds
        ds = load(self.data_file)
        ds.setup_deprecated_fields()
        exact = self.get_analytical_solution()

        ad = ds.all_data()
        position = ad['x']
        for k in self.fields:
            field = ad[k].d
            for xmin, xmax in zip(self.left_edges, self.right_edges):
                mask = (position >= xmin)*(position <= xmax)
                exact_field = np.interp(position[mask], exact['pos'], exact[k])
                myname = "ShockTubeTest_%s" % k
                # yield test vs analytical solution 
                yield AssertWrapper(myname, assert_allclose, field[mask], 
                                    exact_field, self.rtol, self.atol)
项目:yt    作者:yt-project    | 项目源码 | 文件源码
def interpolate_ages(data, file_stars, interp_tb=None, interp_ages=None,
                     current_time=None):
    if interp_tb is None:
        t_stars, a_stars = read_star_field(file_stars,
                                     field="t_stars")
        # timestamp of file should match amr timestamp
        if current_time:
            tdiff = YTQuantity(b2t(t_stars), 'Gyr') - current_time.in_units('Gyr')
            if np.abs(tdiff) > 1e-4:
                mylog.info("Timestamp mismatch in star " +
                           "particle header: %s", tdiff)
        mylog.info("Interpolating ages")
        interp_tb, interp_ages = b2t(data)
        interp_tb = YTArray(interp_tb, 'Gyr')
        interp_ages = YTArray(interp_ages, 'Gyr')
    temp = np.interp(data, interp_tb, interp_ages)
    return interp_tb, interp_ages, temp
项目:GY-91_and_PiCamera_RaspberryPi    作者:mikechan0731    | 项目源码 | 文件源码
def magic3(self):
        time_end = 5123

        real_hofong_fix_pts = pd.read_csv('20161012_HoFong/control_points_coodination.csv').sort(ascending=False)
        real_hofong_fix_pts['N'] = real_hofong_fix_pts['N'] - real_hofong_fix_pts['N'][129]
        real_hofong_fix_pts['E'] = real_hofong_fix_pts['E'] - real_hofong_fix_pts['E'][129] # last data name=2717, index=129


        N_diff = np.diff(real_hofong_fix_pts['N'])
        E_diff = np.diff(real_hofong_fix_pts['E'])

        hofong_deg = np.rad2deg(np.arctan2(N_diff, E_diff))
        hofong_deg = hofong_deg - hofong_deg[0]
        hofong_deg_diff = np.cumsum(np.diff(hofong_deg))

        interp_hofong = np.interp(np.arange(100), np.arange(hofong_deg_diff.size), hofong_deg_diff)


        #plt.plot(hofong_deg, label='hahaxd')
        #plt.plot(hofong_deg_diff, label='hehexd')
        plt.plot(interp_hofong)
        plt.legend()
        plt.show()
项目:GY-91_and_PiCamera_RaspberryPi    作者:mikechan0731    | 项目源码 | 文件源码
def generate_dist_per_sec(self):
        time_end= int(np.amax(self.raw_data['time']))

        #===== acc =====
        #??? x, y ????????????????????????
        ax_interp_10ms = self.acc_normalize(np.interp(np.arange(0.0,time_end,0.01), self.raw_data['time'], self.raw_data['ax']))
        ay_interp_10ms = self.acc_normalize(np.interp(np.arange(0.0,time_end,0.01), self.raw_data['time'], self.raw_data['ay']))
        rxy_interp_10ms = np.sqrt(ax_interp_10ms**2 + ay_interp_10ms**2)

        plt.plot(ax_interp_10ms, c='b')
        plt.plot(ay_interp_10ms, c='g')
        plt.plot(self.detrend_1d(rxy_interp_10ms, time_lst=np.arange(0.0,time_end,0.01)), c='k')

        plt.show()

        axy, vxy, sxy = self.another_integral(rxy_interp_10ms, time_lst= np.arange(0.0,time_end,0.01))
        return axy, vxy, sxy
项目:CAAPR    作者:Stargrazer82301    | 项目源码 | 文件源码
def convolve(self, wavelengths, densities):
        # define short names for the involved wavelength grids
        wa = wavelengths
        wb = self._Wavelengths

        # create a combined wavelength grid, restricted to the overlapping interval
        w1 = wa[ (wa>=wb[0]) & (wa<=wb[-1]) ]
        w2 = wb[ (wb>=wa[0]) & (wb<=wa[-1]) ]
        w = np.unique(np.hstack((w1,w2)))
        if len(w) < 2: return 0

        # log-log interpolate SED and transmission on the combined wavelength grid
        # (use scipy interpolation function for SED because np.interp does not support broadcasting)
        F = np.exp(interp1d(np.log(wa), _log(densities), copy=False, bounds_error=False, fill_value=0.)(np.log(w)))
        T = np.exp(np.interp(np.log(w), np.log(wb), _log(self._Transmission), left=0., right=0.))

        # perform the integration
        if self._PhotonCounter:
            return np.trapz(x=w, y=w*F*T) / self._IntegratedTransmission
        else:
            return np.trapz(x=w, y=F*T) / self._IntegratedTransmission

    ## This function calculates and returns the integrated value for a given spectral energy distribution over the
    #  filter's wavelength range,
项目:CAAPR    作者:Stargrazer82301    | 项目源码 | 文件源码
def integrate(self, wavelengths, densities):
        # define short names for the involved wavelength grids
        wa = wavelengths
        wb = self._Wavelengths

        # create a combined wavelength grid, restricted to the overlapping interval
        w1 = wa[(wa >= wb[0]) & (wa <= wb[-1])]
        w2 = wb[(wb >= wa[0]) & (wb <= wa[-1])]
        w = np.unique(np.hstack((w1, w2)))
        if len(w) < 2: return 0

        # log-log interpolate SED and transmission on the combined wavelength grid
        # (use scipy interpolation function for SED because np.interp does not support broadcasting)
        F = np.exp(interp1d(np.log(wa), _log(densities), copy=False, bounds_error=False, fill_value=0.)(np.log(w)))
        T = np.exp(np.interp(np.log(w), np.log(wb), _log(self._Transmission), left=0., right=0.))

        # perform the integration
        if self._PhotonCounter: return np.trapz(x=w, y=w * F * T)
        else: return np.trapz(x=w, y=F * T)

## This private helper function returns the natural logarithm for positive values, and a large negative number
# (but not infinity) for zero or negative values.
项目:CAAPR    作者:Stargrazer82301    | 项目源码 | 文件源码
def convolve(self, wavelengths, densities):
        # define short names for the involved wavelength grids
        wa = wavelengths
        wb = self._Wavelengths

        # create a combined wavelength grid, restricted to the overlapping interval
        w1 = wa[ (wa>=wb[0]) & (wa<=wb[-1]) ]
        w2 = wb[ (wb>=wa[0]) & (wb<=wa[-1]) ]
        w = np.unique(np.hstack((w1,w2)))
        if len(w) < 2: return 0

        # log-log interpolate SED and transmission on the combined wavelength grid
        # (use scipy interpolation function for SED because np.interp does not support broadcasting)
        F = np.exp(interp1d(np.log(wa), _log(densities), copy=False, bounds_error=False, fill_value=0.)(np.log(w)))
        T = np.exp(np.interp(np.log(w), np.log(wb), _log(self._Transmission), left=0., right=0.))

        # perform the integration
        if self._PhotonCounter:
            return np.trapz(x=w, y=w*F*T) / self._IntegratedTransmission
        else:
            return np.trapz(x=w, y=F*T) / self._IntegratedTransmission

    ## This function calculates and returns the integrated value for a given spectral energy distribution over the
    #  filter's wavelength range,
项目:CAAPR    作者:Stargrazer82301    | 项目源码 | 文件源码
def integrate(self, wavelengths, densities):
        # define short names for the involved wavelength grids
        wa = wavelengths
        wb = self._Wavelengths

        # create a combined wavelength grid, restricted to the overlapping interval
        w1 = wa[(wa >= wb[0]) & (wa <= wb[-1])]
        w2 = wb[(wb >= wa[0]) & (wb <= wa[-1])]
        w = np.unique(np.hstack((w1, w2)))
        if len(w) < 2: return 0

        # log-log interpolate SED and transmission on the combined wavelength grid
        # (use scipy interpolation function for SED because np.interp does not support broadcasting)
        F = np.exp(interp1d(np.log(wa), _log(densities), copy=False, bounds_error=False, fill_value=0.)(np.log(w)))
        T = np.exp(np.interp(np.log(w), np.log(wb), _log(self._Transmission), left=0., right=0.))

        # perform the integration
        if self._PhotonCounter: return np.trapz(x=w, y=w * F * T)
        else: return np.trapz(x=w, y=F * T)

## This private helper function returns the natural logarithm for positive values, and a large negative number
# (but not infinity) for zero or negative values.
项目:FC    作者:JanusWind    | 项目源码 | 文件源码
def calc_eff_area( self, v ) :

        # Note. #nvn is a vector similar to inflow particle bulk 
        # velocity and ndir is the look direction. 


        # Normalize the particle velocity.

        vn  = calc_arr_norm( v )
        nvn = tuple( [ -c for c in vn ] )

        # Calculate the particle inflow angle (in degrees) relative to
        # the cup normal (i.e., the cup pointing direction).

        psi = acos( calc_arr_dot( self['dir'], nvn ) )*pi/180.
        if ( psi > 90. ) :
            return 0. 

        # Return the effective collecting area corresponding to "psi".

        return interp( psi, self._spec._eff_deg, self._spec._eff_area )

    #-----------------------------------------------------------------------
    # DEFINE THE FUNCTION TO CALCULATE EXPECTED MAXWELLIAN CURRENT.
    #-----------------------------------------------------------------------
项目:gps    作者:cbfinn    | 项目源码 | 文件源码
def _set_interp_values(self):
        """
        Use iteration-based interpolation to set values of some
        schedule-based parameters.
        """
        # Compute temporal interpolation value.
        t = min((self.iteration_count + 1.0) /
                (self._hyperparams['iterations'] - 1), 1)
        # Perform iteration-based interpolation of entropy penalty.
        if type(self._hyperparams['ent_reg_schedule']) in (int, float):
            self.policy_opt.set_ent_reg(self._hyperparams['ent_reg_schedule'])
        else:
            sch = self._hyperparams['ent_reg_schedule']
            self.policy_opt.set_ent_reg(
                np.exp(np.interp(t, np.linspace(0, 1, num=len(sch)),
                                 np.log(sch)))
            )
        # Perform iteration-based interpolation of Lagrange multiplier.
        if type(self._hyperparams['lg_step_schedule']) in (int, float):
            self._hyperparams['lg_step'] = self._hyperparams['lg_step_schedule']
        else:
            sch = self._hyperparams['lg_step_schedule']
            self._hyperparams['lg_step'] = np.exp(
                np.interp(t, np.linspace(0, 1, num=len(sch)), np.log(sch))
            )
项目:core-framework    作者:RedhawkSDR    | 项目源码 | 文件源码
def _update(self):
        if not self._sink:
            return False

        # Read and format data.
        data, timestamps = self._sink.retrieveData(length=self._frameSize)
        if not data:
            return
        sri = self._sink.sri
        data = self._formatData(data, sri)

        # If xdelta changes, update the X and Y ranges.
        if self._sink.sriChanged():
            # Update the X and Y ranges
            x_min, x_max = self._getXRange(sri)
            y_min, y_max = self._getYRange(sri)
            self._image.set_extent((x_min, x_max, y_max, y_min))

            # Preserve the aspect ratio based on the image size.
            x_range = x_max - x_min
            y_range = y_max - y_min
            self._plot.set_aspect(x_range/y_range*self._aspect)
            self._xdelta = sri.xdelta

        # Resample data from frame size to image size.
        height, width = self._imageData.shape
        indices_out = numpy.linspace(0, len(data)-1, width)
        indices_in = numpy.arange(len(data))
        data = numpy.interp(indices_out, indices_in, data)

        # Store the new row and update the image data.
        self._imageData[self._row] = data
        self._image.set_array(self._imageData)

        # Advance row pointer
        self._row = (self._row + 1) % height

        return True
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def _fourierTransform(self, x, y):
        ## Perform fourier transform. If x values are not sampled uniformly,
        ## then use np.interp to resample before taking fft.
        dx = np.diff(x)
        uniform = not np.any(np.abs(dx-dx[0]) > (abs(dx[0]) / 1000.))
        if not uniform:
            x2 = np.linspace(x[0], x[-1], len(x))
            y = np.interp(x2, x, y)
            x = x2
        f = np.fft.fft(y) / len(y)
        y = abs(f[1:len(f)/2])
        dt = x[-1] - x[0]
        x = np.linspace(0, 0.5*len(x)/dt, len(y))
        return x, y
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def _fourierTransform(self, x, y):
        ## Perform fourier transform. If x values are not sampled uniformly,
        ## then use np.interp to resample before taking fft.
        dx = np.diff(x)
        uniform = not np.any(np.abs(dx-dx[0]) > (abs(dx[0]) / 1000.))
        if not uniform:
            x2 = np.linspace(x[0], x[-1], len(x))
            y = np.interp(x2, x, y)
            x = x2
        f = np.fft.fft(y) / len(y)
        y = abs(f[1:len(f)/2])
        dt = x[-1] - x[0]
        x = np.linspace(0, 0.5*len(x)/dt, len(y))
        return x, y
项目:rain-metrics-python    作者:apendergrass    | 项目源码 | 文件源码
def makedistplots(ppdf1,pamt1,bincrates):
    #### This is how we'll normalize to get changes per degree warming. 
    dry=ppdf1[0]*100 # Change in dry days
    # % rain rates in mm/d for x axis ticks and labeling 
    otn=np.linspace(1,9,9)
    xtickrates=np.append(0,otn*.1)
    xtickrates=np.append(xtickrates,otn)
    xtickrates=np.append(xtickrates,otn*10)
    xtickrates=np.append(xtickrates,otn*100)
    xticks=np.interp(xtickrates,bincrates,range(0,len(bincrates))); #% bin numbers associated with nice number rain rate
    xticks,indices=np.unique(xticks,return_index=True)
    xtickrates=xtickrates[indices]
    ### Bin width - needed to normalize the rain amount distribution
    db=(bincrates[2]-bincrates[1])/bincrates[1];
    ### Now we plot
    plt.figure(figsize=(4,6))
    plt.clf()
    ax=plt.subplot(211)
    plt.plot(range(0,len(pamt1)),pamt1/db, 'k')
    #plt.ylim((-.05,.15))
    plt.xlim((4,130))
    #plt.setp(ax,xticks=xticks,xticklabels=['0','0.1','','','','','','','','','','1','','','','','','','','','10','','','','','','','','','100','','','','','','','','','1000'])
    plt.setp(ax,xticks=xticks,xticklabels=[''])
    #plt.xlabel('Rain rate (mm/d)')
    plt.title('Rain amount (mm/d)')
    ax=plt.subplot(212)
    plt.plot(range(0,len(ppdf1)),ppdf1*100, 'k')
    plt.plot((0,len(ppdf1)),(0,0),'0.5')
    plt.xlim((4,130))
    ### Annotate with the dry day frequency
    ymin, ymax = ax.get_ylim()
    t=plt.text(4,ymax*0.95, "{:.1f}".format(dry)+'%')
    plt.setp(t,va='top',ha='left')
    plt.setp(ax,xticks=xticks,xticklabels=['0','0.1','','','','','','','','','','1','','','','','','','','','10','','','','','','','','','100','','','','','','','','','1000'])
    plt.xlabel('Rain rate (mm/d)')
    plt.title('Rain frequency (%)')
    plt.savefig("rainmetricdemo.pdf")
    return

### Call the function to make the rain distribution
项目:PersonalizedMultitaskLearning    作者:mitmedialab    | 项目源码 | 文件源码
def plotROC(auc_list,fpr_list,tpr_list):
    mean_tpr = 0.0
    mean_fpr = np.linspace(0,1,100)

    plt.figure(figsize=(5,5))

    for i in range(len(fpr_list)):
        mean_tpr += np.interp(mean_fpr, fpr_list[i], tpr_list[i])
        mean_tpr[0] = 0.0
        plt.plot(fpr_list[i], tpr_list[i], lw=1, label='ROC fold %d (area = %0.2f)' % (i, auc_list[i]))

    plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck')

    mean_tpr /= len(fpr_list)
    mean_tpr[-1] = 1.0
    mean_auc = auc(mean_fpr, mean_tpr)
    plt.plot(mean_fpr, mean_tpr, 'k--', label='Mean ROC (area = %0.2f)' % mean_auc, lw=2)

    plt.xlim([-0.05, 1.05])
    plt.ylim([-0.05, 1.05])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('')
    plt.legend(loc="lower right")
    plt.show()

    return mean_auc, mean_fpr, mean_tpr
项目:pyrsss    作者:butala    | 项目源码 | 文件源码
def nan_interp(y, silent=False):
    """
    Return a new array with nans found in *y* filled with linear
    interpolants.
    """
    # Source: http://stackoverflow.com/questions/6518811/interpolate-nan-values-in-a-numpy-array
    nans, x = nan_helper(y)
    z = NP.array(y)
    if not silent and sum(nans) > 0:
        logger.warning('linear interpolation over {} NaN values'.format(sum(nans)))
    z[nans]= NP.interp(x(nans), x(~nans), z[~nans])
    return z
项目:MulensModel    作者:rpoleski    | 项目源码 | 文件源码
def get_satellite_coords(self, times):
        """
        Calculate the coordinates of the satellite for given times
        using interpolation.

        Parameters :
            times: *np.ndarray* or *list of floats*
                Epochs for which satellite coordinates will be calculated.

        Returns :
            satellite_skycoord: *Astropy.coordinates.SkyCord*
                *SkyCord* for satellite at epochs *times*.

        """
        if self._horizons is None:
            self._horizons = Horizons(self.ephemerides_file)
        # We should add some check if all values in times as within range
        # covered by self._horizons.time.

        x = np.interp(
                 times, self._horizons.time, self._horizons.xyz.x)
        y = np.interp(
                 times, self._horizons.time, self._horizons.xyz.y)
        z = np.interp(
                 times, self._horizons.time, self._horizons.xyz.z)

        self._satellite_skycoord = SkyCoord(
                  x=x, y=y, z=z, representation='cartesian')
        self._satellite_skycoord.representation = 'spherical'

        return self._satellite_skycoord
项目:marblecutter    作者:mojodna    | 项目源码 | 文件源码
def apply_latitude_adjustments(pixels):
    data, (bounds, crs), _ = pixels
    (_, height, width) = data.shape

    ys = np.interp(np.arange(height), [0, height - 1], [bounds[3], bounds[1]])
    xs = np.empty_like(ys)
    xs.fill(bounds[0])

    longitudes, latitudes = warp.transform(crs, WGS84_CRS, xs, ys)

    factors = 1 / np.cos(np.radians(latitudes))

    # convert to 2d array, rotate 270º, scale data
    return PixelCollection(data * np.rot90(np.atleast_2d(factors), 3),
                           pixels.bounds)
项目:onsager_deep_learning    作者:mborgerding    | 项目源码 | 文件源码
def nlfunc(r,sc,grid,gg,return_gradient=True):
    'returns xhat_nl = rhat_nl * interp( rhat_nl / sc,grid,gg) and optionally the gradient of xhat_nl wrt rhat_nl'
    g = r * np.interp(r/sc,grid,gg)
    if return_gradient:
        #I had some code that computed the gradient, but it was far more complicated and no faster than just computing the empirical gradient
        # technically, this computes a subgradient
        dr = sc * (grid[1]-grid[0]) * 1e-3
        dgdr = (nlfunc(r+.5*dr,sc,grid,gg,False) - nlfunc(r-.5*dr,sc,grid,gg,False)) / dr
        return (g,dgdr)
    else:
        return g
项目:onsager_deep_learning    作者:mborgerding    | 项目源码 | 文件源码
def nlfunc_(r_,sc_,grid,gg_,return_gradient=True):
    'returns xhat_nl = rhat_nl * interp( rhat_nl / sc,grid,gg) and optionally the gradient of xhat_nl wrt rhat_nl'
    g_ = r_ * interp1d_(r_/sc_,grid,gg_)
    if return_gradient:
        #I had some code that computed the gradient, but it was far more complicated and no faster than just computing the empirical gradient
        # technically, this computes a subgradient
        dr_ = sc_ * (grid[1]-grid[0]) * 1e-3
        dgdr_ = (nlfunc_(r_+.5*dr_,sc_,grid,gg_,False) - nlfunc_(r_-.5*dr_,sc_,grid,gg_,False)) / dr_
        return (g_,dgdr_)
    else:
        return g_
项目:black_holes    作者:codeforgoodconf    | 项目源码 | 文件源码
def interpolate_to_std_domain(wav_rest, fwav):
    stepsize = 1.03  # similar to the native step size
    data_range = [4686 - 150, 4686 + 150]
    steps = range(int((data_range[1] - data_range[0]) / stepsize) + 1)
    wav_rest_standard = [i * stepsize + data_range[0] for i in steps]
    fwav_interp = np.interp(wav_rest_standard, wav_rest, fwav)
    wav_rest = wav_rest_standard
    fwav = fwav_interp

    return wav_rest, fwav
项目:black_holes    作者:codeforgoodconf    | 项目源码 | 文件源码
def standardize_domain(wls, fxs, wl_min, wl_max, n_samples):
    new_wls = [lerp(wl_min, wl_max, i/(n_samples-1)) for i in range(n_samples)]
    new_fxs = np.interp(new_wls, wls, fxs)
    return new_wls, new_fxs
项目:black_holes    作者:codeforgoodconf    | 项目源码 | 文件源码
def process_fits(file_path, wls_out):
    print('processing ' + file_path)
    hdulist = fits.open(file_path)
    wls = 10 ** hdulist[1].data['loglam']
    fxs = hdulist[1].data['flux']
    z = hdulist[2].data['z']
    wls = wls / (1 + z)
    if wls_out[0] < wls[0] or wls_out[-1] > wls[-1]:
        return None
    remove_slope(wls, fxs)
    wls, fxs = gaussian_smooth(wls, fxs)
    wls, fxs = crop_data(wls, fxs, wls_out)
    fxs_out = numpy.interp(wls_out, wls, fxs)
    return fxs_out
项目:self-driving    作者:BoltzmannBrain    | 项目源码 | 文件源码
def _prepData(dataPath, numTimesteps, normalizeData=False):
  """
  Get and preprocess the ground truth drive speeds data.

  Args:
    dataPath: (str) Path to video file.
    numTimesteps: (int) Number of timesteps to interpolate the data to.
    normalizeData: (bool) Normalize the data to [0,1].
  Returns:
    (list) Speeds, one for each timestep.
  """
  with open(dataPath, "rb") as infile:
    driveData = json.load(infile)

  # Prep data: make sure it's in order, and use relative position (b/c seconds
  # values may be incorrect).
  driveData.sort(key = lambda x: x[0])
  dataSpeeds = np.array([d[1] for d in driveData])
  dataTimes = np.array([d[0] for d in driveData])
  dataPositions = ( (dataTimes - dataTimes.min()) /
                   (dataTimes.max() - dataTimes.min()) )
  if normalizeData:
    dataSpeeds = normalize(dataSpeeds)

  # Linearly interpolate data to the number of video frames.
  return np.interp(np.arange(0.0, 1.0, 1.0 / numTimesteps),
                   dataPositions,
                   dataSpeeds)
项目:self-driving    作者:BoltzmannBrain    | 项目源码 | 文件源码
def prepTruthData(dataPath, numFrames, normalizeData=False):
  """
  Get and preprocess the ground truth drive speeds data.

  Args:
    dataPath: (str) Path to JSON of ground truths.
    numFrames: (int) Number of timesteps to interpolate the data to.
    normalizeData: (bool) Normalize the data to [0,1].
  Returns:
    (list) Linearly interpolated truth values, one for each timestep.
  """
  with open(dataPath, "rb") as infile:
    driveData = json.load(infile)

  # Prep data: make sure it's in order, and use relative position (b/c seconds
  # values may be incorrect)
  driveData.sort(key = lambda x: x[0])
  times = np.zeros(len(driveData))
  speeds = np.zeros(len(driveData))
  for i, (time, speed) in enumerate(driveData):
    times[i] = time
    speeds[i] = speed
  positions = (times - times.min()) / (times.max() - times.min())

  if normalizeData:
    speeds = normalize(speeds)

  # Linearly interpolate the data to the number of video frames
  return np.interp(np.arange(0.0, 1.0, 1.0/numFrames), positions, speeds)
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_exceptions(self):
        assert_raises(ValueError, interp, 0, [], [])
        assert_raises(ValueError, interp, 0, [0], [1, 2])
        assert_raises(ValueError, interp, 0, [0, 1], [1, 2], period=0)
        assert_raises(ValueError, interp, 0, [], [], period=360)
        assert_raises(ValueError, interp, 0, [0], [1, 2], period=360)
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_basic(self):
        x = np.linspace(0, 1, 5)
        y = np.linspace(0, 1, 5)
        x0 = np.linspace(0, 1, 50)
        assert_almost_equal(np.interp(x0, x, y), x0)