Python scipy.interpolate 模块,UnivariateSpline() 实例源码

我们从Python开源项目中,提取了以下31个代码示例,用于说明如何使用scipy.interpolate.UnivariateSpline()

项目:CRIkit2    作者:CoherentRamanNIST    | 项目源码 | 文件源码
def fcn(self, data_in):
        """
        Returns a shifted version of the input spectrum to mimic the effect
        of calibration. (Real calibration doesn't shift the spectrum, but 
        rather the independent variable)

        """
        orig_wn = _calib_pix_wn(self.parameters['orig_calib_dict'])[0]
        new_wn = _calib_pix_wn(self.parameters['new_calib_dict'])[0]

        if data_in.ndim == 1:
            spl = _UnivariateSpline(new_wn, data_in, s=0, ext=0)
            output = spl(orig_wn)
        elif data_in.ndim == 2:
            output = _np.zeros(data_in.shape)
            for num, spect in enumerate(data_in):
                spl = _UnivariateSpline(new_wn, spect, s=0, ext=0)
                output[num,:] = spl(orig_wn)
        return output
项目:gullikson-scripts    作者:kgullikson88    | 项目源码 | 文件源码
def integral(x, y, I, k=10):
    """
    Integrate y = f(x) for x = 0 to a such that the integral = I
    I can be an array.

    Returns the values a that are found.
    """
    I = np.atleast_1d(I)

    f = UnivariateSpline(x, y, s=k)

    # Integrate as a function of x
    F = f.antiderivative()
    Y = F(x)

    a = []
    for intval in I:
        F2 = UnivariateSpline(x, Y/Y[-1] - intval, s=0)
        a.append(F2.roots())

    return np.hstack(a)
项目:QuantEcon.lectures.code    作者:QuantEcon    | 项目源码 | 文件源码
def fit_policy_function(self, PF):
        '''
        Fits the policy functions PF using the points xgrid using UnivariateSpline
        '''
        xgrid, S = self.xgrid, self.S

        Vf, cf, nf, xprimef = {}, {}, {}, {}
        for s in range(S):
            PFvec = np.vstack(map(lambda x: PF(x, s), xgrid))
            Vf[s] = UnivariateSpline(xgrid, PFvec[:, 0], s=0)
            cf[s] = UnivariateSpline(xgrid, PFvec[:, 1], s=0, k=1)
            nf[s] = UnivariateSpline(xgrid, PFvec[:, 2], s=0, k=1)
            for sprime in range(S):
                xprimef[s, sprime] = UnivariateSpline(
                    xgrid, PFvec[:, 3 + sprime], s=0, k=1)

        return Vf, [cf, nf, xprimef]
项目:QuantEcon.lectures.code    作者:QuantEcon    | 项目源码 | 文件源码
def fit_policy_function(self,PF):
        '''
        Fits the policy functions PF using the points xgrid using UnivariateSpline
        '''
        xgrid,S = self.xgrid,self.S

        Vf,cf,nf,xprimef = {},{},{},{}
        for s in range(S):
            PFvec = np.vstack(map(lambda x:PF(x,s),xgrid))
            Vf[s] = UnivariateSpline(xgrid,PFvec[:,0],s=0)
            cf[s] = UnivariateSpline(xgrid,PFvec[:,1],s=0,k=1)
            nf[s] = UnivariateSpline(xgrid,PFvec[:,2],s=0,k=1)
            for sprime in range(S):
                xprimef[s,sprime] = UnivariateSpline(xgrid,PFvec[:,3+sprime],s=0,k=1)

        return Vf,[cf,nf,xprimef]
项目:QuantEcon.lectures.code    作者:QuantEcon    | 项目源码 | 文件源码
def fit_policy_function(self,PF):
        '''
        Fits the policy functions PF using the points xgrid using UnivariateSpline
        '''
        xgrid,S = self.xgrid,self.S

        Vf,cf,nf,xprimef = {},{},{},{}
        for s in range(S):
            PFvec = np.vstack(map(lambda x:PF(x,s),xgrid))
            Vf[s] = UnivariateSpline(xgrid,PFvec[:,0],s=0)
            cf[s] = UnivariateSpline(xgrid,PFvec[:,1],s=0,k=1)
            nf[s] = UnivariateSpline(xgrid,PFvec[:,2],s=0,k=1)
            for sprime in range(S):
                xprimef[s,sprime] = UnivariateSpline(xgrid,PFvec[:,3+sprime],s=0,k=1)

        return Vf,[cf,nf,xprimef]
项目:specviz    作者:spacetelescope    | 项目源码 | 文件源码
def evaluate(self, x, degree, smooth):
        """
        Evaluate the spline

        Parameters
        ----------
        x: numpy.ndarray
            The wavelengths to evaluate over.

        degree: int
            The degree of spline to evaluate.

        smooth: float or None
            The smoothing factor used to choose the number of knots.

        Returns
        -------
        The evaluated spline
        """
        _f = UnivariateSpline(self.wave, self.flux,
                              k=degree,
                              s=smooth)
        return _f(x)
项目:plasma    作者:jnkh    | 项目源码 | 文件源码
def resample_signal(t,sig,tmin,tmax,dt):
    order = argsort(t)
    t = t[order]
    sig = sig[order]
    tt = arange(tmin,tmax,dt)
    f = UnivariateSpline(t,sig,s=0,k=1,ext=0)
    sig_interp = f(tt)

    if(any(isnan(sig_interp))):
        print("signals contains nan")
    if(any(t[1:] - t[:-1] <= 0)):
        print("non increasing")
        idx = where(t[1:] - t[:-1] <= 0)[0][0]
        print(t[idx-10:idx+10])

    return tt,sig_interp
项目:toppra    作者:hungpham2511    | 项目源码 | 文件源码
def __init__(self, ss, qs):
        """ All arguments are simiar to SplineInterpolator.
        """
        qs = np.array(qs)
        self.dof = qs.shape[1]
        self.uspl = []
        for i in range(self.dof):
            self.uspl.append(UnivariateSpline(ss, qs[:, i]))
        self.uspld = [spl.derivative() for spl in self.uspl]
        self.uspldd = [spl.derivative() for spl in self.uspld]
项目:muscle-plotter    作者:PedroLopes    | 项目源码 | 文件源码
def calculate_target_coordinates(self):

        x_variables = np.arange(self.major_axis_span, dtype=int)
        xp = [p[0] for p in self.points]
        fp = [p[1] for p in self.points]
        fitline = ip.UnivariateSpline(xp, fp)
        return self.prepare_coordinates(x_variables, fitline(x_variables))
项目:dexml    作者:DexGroves    | 项目源码 | 文件源码
def fit_spline_generator(k, s):
    def fit_spline(x, y):
        return UnivariateSpline(x, y, k=k, s=s)

    return fit_spline
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def MTF50(self, MTFx,MTFy):
        '''
        return object resolution as [line pairs/mm]
               where MTF=50%
               see http://www.imatest.com/docs/sharpness/
        '''
        if self.mtf_x is None:
            self.MTF()
        f = UnivariateSpline(self.mtf_x, self.mtf_y-0.5)
        return f.roots()[0]
项目:CRIkit2    作者:CoherentRamanNIST    | 项目源码 | 文件源码
def fcn(self, data_in):
        """
        Performs the KK.

        Parameters
        ----------
            data : list
                data[0] : Wavenumber vector
                data[1] : NRB spectrum(a)
                data[2] : CARS spectrum(a)

        Returns
        -------
            out : np.array
                Imaginary component the of KK-retrieved spectrum(a)

        See also
        --------
        crikit.process.phase_retr, crikit.process.maths.kk

        """
        if data_in.ndim == 1:
            spl = _UnivariateSpline(self.WN_2, data_in, s=0, ext=0)
            output = spl(self.WN)
        elif data_in.ndim == 2:
            output = _np.zeros(data_in.shape)
            for num, spect in enumerate(data_in):
                spl = _UnivariateSpline(self.WN_2, spect, s=0, ext=0)
                output[num,:] = spl(self.WN)
        return output
        #return data_in
项目:CRIkit2    作者:CoherentRamanNIST    | 项目源码 | 文件源码
def calculate(self, signal):
        sig_shape = signal.shape  # Shape of input signal
        sig_size = signal.shape[-1]  # Length of spectral axis

        # N signals to detrend
        sig_n_to_detrend = int(signal.size/signal.shape[-1])

        tmr = _timeit.default_timer()
        if self.redux == 1:
            output = self._calc(signal)
        else:  # Sub-sample
            # Dummy indep variable
            x = _np.arange(sig_size)
            x_sub = _np.linspace(x[0], x[-1], _np.round(x.size / 
                                 self.redux).astype(_np.integer))

            sub_shape = list(sig_shape)
            sub_shape[-1] = x_sub.size

            signal_sampled = _np.zeros(sub_shape)

            # Spline interpolation/sub-sampling
            for coords in _np.ndindex(signal.shape[0:-1]):
                spl = _USpline(x,signal[coords],s=0)
                signal_sampled[coords] = spl(x_sub)

            # Baseline from sub-sampled signal
            output_sampled = self._calc(signal_sampled)

            output = _np.zeros(signal.shape)
            # Spline interpolation/super-sampling
            for coords in _np.ndindex(output_sampled.shape[0:-1]):
                spl2 = _USpline(x_sub,output_sampled[coords],s=0)
                output[coords] = spl2(x)

        tmr -= _timeit.default_timer()
        self.t = -tmr
        self.t_per_iter = self.t/sig_n_to_detrend

        return output
项目:gullikson-scripts    作者:kgullikson88    | 项目源码 | 文件源码
def make_new_interpolator(self, filename=get_data('Pecaut2013.tsv')):
        df = pandas.read_csv(filename, skiprows=55, sep='|', engine='python')[2:-1]
        sptnum = [self.MS.SpT_To_Number(s.strip()[:-1]) for s in df.SpT.values]
        self.sptnum_to_teff = UnivariateSpline(sptnum, df.Teff.values, s=0)
项目:gullikson-scripts    作者:kgullikson88    | 项目源码 | 文件源码
def Interpolate_Old(self, dictionary, SpT):
        #First, we must convert the relations above into a monotonically increasing system
        #Just add ten when we get to each new spectral type
        relation = DataStructures.xypoint(len(dictionary))

        # Strip the spectral type of the luminosity class information
        SpT = re.search('[A-Z]([0-9]\.?[0-9]*)', SpT).group()

        xpoints = []
        ypoints = []
        for key, index in zip(dictionary, range(len(dictionary))):
            #Convert key to a number
            xpoints.append(self.SpT_To_Number(key))
            ypoints.append(dictionary[key])

        sorting_indices = [i[0] for i in sorted(enumerate(xpoints), key=lambda x: x[1])]
        for index in range(len(dictionary)):
            i = sorting_indices[index]
            relation.x[index] = xpoints[i]
            relation.y[index] = ypoints[i]

        RELATION = UnivariateSpline(relation.x, relation.y, s=0)

        spnum = self.SpT_To_Number(SpT)
        if spnum > 0:
            return RELATION(spnum)
        else:
            return np.nan
项目:gullikson-scripts    作者:kgullikson88    | 项目源码 | 文件源码
def fwhm(x, y, k=10, ret_roots=False):
    """
    Determine full-with-half-maximum of a peaked set of points, x and y.

    Assumes that there is only one peak present in the dataset.  The function
    uses a spline interpolation with smoothing parameter k ('s' in scipy.interpolate.UnivariateSpline).

    If ret_roots=True, return the x-locations at half maximum instead of just
    the distance between them.
    """


    class NoPeaksFound(Exception):
        pass

    half_max = np.max(y) / 2.0
    s = UnivariateSpline(x, y - half_max, s=k)
    roots = s.roots()

    if len(roots) > 2:
        # Multiple peaks. Use the two that straddle the maximum value
        maxvel = x[np.argmax(y)]
        left_idx = np.argmin(maxvel - roots)
        right_idx = np.argmin(roots - maxvel)
        roots = np.array((roots[left_idx], roots[right_idx]))
    elif len(roots) < 2:
        raise NoPeaksFound("No proper peaks were found in the data set; likely "
                           "the dataset is flat (e.g. all zeros).")
    if ret_roots:
        return roots[0], roots[1]

    return abs(roots[1] - roots[0])
项目:QuantEcon.lectures.code    作者:QuantEcon    | 项目源码 | 文件源码
def __call__(self, xgrid, Fs):
        shape, m = Fs.shape[:-1], Fs.shape[-1]
        Fs = Fs.reshape((-1, m))
        F = []
        for Fhat in Fs:
            F.append(UnivariateSpline(xgrid, Fhat, k=self.k, s=self.s))
        return interpolate_wrapper(np.array(F).reshape(shape))
项目:QuantEcon.lectures.code    作者:QuantEcon    | 项目源码 | 文件源码
def bellman_operator(pgrid, c, f0, f1, L0, L1, J):
    """
    Evaluates the value function for a given continuation value
    function; that is, evaluates

        J(p) = min((1 - p) L0, p L1, c + E J(p'))

    Uses linear interpolation between points.
    """
    m = np.size(pgrid)
    assert m == np.size(J)

    J_out = np.zeros(m)
    J_interp = interp.UnivariateSpline(pgrid, J, k=1, ext=0)

    for (p_ind, p) in enumerate(pgrid):
        # Payoff of choosing model 0
        p_c_0 = expect_loss_choose_0(p, L0)
        p_c_1 = expect_loss_choose_1(p, L1)
        p_con = expect_loss_cont(p, c, f0, f1, J_interp)

        J_out[p_ind] = min(p_c_0, p_c_1, p_con)

    return J_out


#  == Now run at given parameters == #

#  First set up distributions
项目:QuantEcon.lectures.code    作者:QuantEcon    | 项目源码 | 文件源码
def bellman_operator(self, J):
        """
        Evaluates the value function for a given continuation value
        function; that is, evaluates

            J(p) = min(pL0, (1-p)L1, c + E[J(p')])

        Uses linear interpolation between points
        """
        payoff_choose_f0 = self.payoff_choose_f0
        payoff_choose_f1 = self.payoff_choose_f1
        payoff_continue = self.payoff_continue
        c, L0, L1, f0, f1 = self.c, self.L0, self.L1, self.f0, self.f1
        m, pgrid = self.m, self.pgrid

        J_out = np.empty(m)
        J_interp = interp.UnivariateSpline(pgrid, J, k=1, ext=0)

        for (p_ind, p) in enumerate(pgrid):
            # Payoff of choosing model 0
            p_c_0 = payoff_choose_f0(p)
            p_c_1 = payoff_choose_f1(p)
            p_con = payoff_continue(p, J_interp)

            J_out[p_ind] = min(p_c_0, p_c_1, p_con)

        return J_out
项目:flight-data-processor    作者:junzis    | 项目源码 | 文件源码
def filter(self, X, Y):
        X, Y = self.sortxy(X, Y)

        # using gaussian kernel to get a better variances
        avg, var = self.kernel(Y)
        spl = UnivariateSpline(X, Y, k=self.k, w=1/np.sqrt(var))

        if self.interpolate:
            xmax = X[-1]
            Xfull = np.arange(xmax)
            Yfull = spl(Xfull)
            return Xfull, Yfull
        else:
            Y1 = spl(X)
            return X, Y1
项目:PicFilter    作者:dhuadaar    | 项目源码 | 文件源码
def _create_LUT_8UC1(self, x, y):
        spl = UnivariateSpline(x, y)
        return spl(xrange(256))
项目:PicFilter    作者:dhuadaar    | 项目源码 | 文件源码
def _create_LUT_8UC1(self, x, y):
        spl = UnivariateSpline(x, y)
        return spl(xrange(256))
项目:CAAPR    作者:Stargrazer82301    | 项目源码 | 文件源码
def generate_milky_way_attenuations(wavelength_min, wavelength_max, Nsamp):

    # Parameter values from Fitzpatrick & Massa 2007, table 5.
    x0 = 4.592
    gamma = 0.922
    c1 = -0.175
    c2 = 0.807
    c3 = 2.991
    c4 = 0.319
    c5 = 6.097
    O1 = 2.055
    O2 = 1.322
    O3 = 0.0
    k_ir = 1.057
    Rv = 3.001

    wl_UV = np.logspace(np.log10(wavelength_min),np.log10(0.2700),Nsamp/2) # UV part stops at 0.27 micron
    wl_ir = np.logspace(np.log10(0.2700),np.log10(wavelength_max),Nsamp/2) # optical-IR part starts at 0.27 micron
    idx = (np.abs(wl_ir-0.550)).argmin() # index closest to V band = 0.55 micron
    idx_U2 = (np.abs(wl_UV-0.2700)).argmin() # index closest to U2 band = 0.27 micron
    idx_U1 = (np.abs(wl_UV-0.2600)).argmin() # index closest to U1 band = 0.26 micron

    # construct UV attenuation curve
    x = 1./wl_UV
    D = Lorentzian(x, x0, gamma)
    k_UV = np.zeros(Nsamp/2)

    for i in range(0,len(x)):
        if x[i] <= c5:
            k_UV[i] = c1 + c2*x[i] + c3*D[i]
        else:
            k_UV[i] = c1 + c2*x[i] + c3*D[i] + c4*(x[i]-c5)**2

    # construct ir attenuation curve
    sample_wl = np.array([10000., 4., 2., 1.3333, 0.5530, 0.4000, 0.3300, 0.2700, 0.2600])
    sample_k = np.append( k_ir*sample_wl[0:4]**-1.84 - Rv, [O3,O2,O1, k_UV[idx_U2], k_UV[idx_U1]])

    spline = interpolate.UnivariateSpline(1./sample_wl,sample_k)
    k_ir = spline(1./wl_ir)

    wl    = np.append(wl_UV,wl_ir)
    Al_Av = np.append(k_UV,k_ir)/Rv + 1.
    return wl, Al_Av

# -----------------------------------------------------------------
项目:CAAPR    作者:Stargrazer82301    | 项目源码 | 文件源码
def makeMilkyWayAtt(minWl, maxWl,Nsamp):

    # Parameter values from Fitzpatrick & Massa 2007, table 5.
    x0 = 4.592
    gamma = 0.922
    c1 = -0.175
    c2 = 0.807
    c3 = 2.991
    c4 = 0.319
    c5 = 6.097
    O1 = 2.055
    O2 = 1.322
    O3 = 0.0
    k_ir = 1.057
    Rv = 3.001
    #Rv = 5.5

    wl_UV = np.logspace(np.log10(minWl),np.log10(0.2700),Nsamp/2) # UV part stops at 0.27 micron
    wl_ir = np.logspace(np.log10(0.2700),np.log10(maxWl),Nsamp/2) # optical-IR part starts at 0.27 micron
    idx = (np.abs(wl_ir-0.550)).argmin() # index closest to V band = 0.55 micron
    idx_U2 = (np.abs(wl_UV-0.2700)).argmin() # index closest to U2 band = 0.27 micron
    idx_U1 = (np.abs(wl_UV-0.2600)).argmin() # index closest to U1 band = 0.26 micron

    # construct UV attenuation curve
    x = 1./wl_UV
    D = Lorentzian(x,x0,gamma)
    k_UV = np.zeros(Nsamp/2)

    for i in range(0,len(x)):
        if x[i] <= c5:
            k_UV[i] = c1 + c2*x[i] + c3*D[i]
        else:
            k_UV[i] = c1 + c2*x[i] + c3*D[i] + c4*(x[i]-c5)**2

    # construct ir attenuation curve
    sample_wl = np.array([10000., 4., 2., 1.3333, 0.5530, 0.4000, 0.3300, 0.2700, 0.2600])
    sample_k = np.append( k_ir*sample_wl[0:4]**-1.84 - Rv, [O3,O2,O1, k_UV[idx_U2], k_UV[idx_U1]])

    spline = interpolate.UnivariateSpline(1./sample_wl,sample_k)
    k_ir = spline(1./wl_ir)

    wl    = np.append(wl_UV,wl_ir)
    Al_Av = np.append(k_UV,k_ir)/Rv + 1.
    return wl, Al_Av
项目:CAAPR    作者:Stargrazer82301    | 项目源码 | 文件源码
def generate_milky_way_attenuations(wavelength_min, wavelength_max, Nsamp):

    # Parameter values from Fitzpatrick & Massa 2007, table 5.
    x0 = 4.592
    gamma = 0.922
    c1 = -0.175
    c2 = 0.807
    c3 = 2.991
    c4 = 0.319
    c5 = 6.097
    O1 = 2.055
    O2 = 1.322
    O3 = 0.0
    k_ir = 1.057
    Rv = 3.001

    wl_UV = np.logspace(np.log10(wavelength_min),np.log10(0.2700),Nsamp/2) # UV part stops at 0.27 micron
    wl_ir = np.logspace(np.log10(0.2700),np.log10(wavelength_max),Nsamp/2) # optical-IR part starts at 0.27 micron
    idx = (np.abs(wl_ir-0.550)).argmin() # index closest to V band = 0.55 micron
    idx_U2 = (np.abs(wl_UV-0.2700)).argmin() # index closest to U2 band = 0.27 micron
    idx_U1 = (np.abs(wl_UV-0.2600)).argmin() # index closest to U1 band = 0.26 micron

    # construct UV attenuation curve
    x = 1./wl_UV
    D = Lorentzian(x, x0, gamma)
    k_UV = np.zeros(Nsamp/2)

    for i in range(0,len(x)):
        if x[i] <= c5:
            k_UV[i] = c1 + c2*x[i] + c3*D[i]
        else:
            k_UV[i] = c1 + c2*x[i] + c3*D[i] + c4*(x[i]-c5)**2

    # construct ir attenuation curve
    sample_wl = np.array([10000., 4., 2., 1.3333, 0.5530, 0.4000, 0.3300, 0.2700, 0.2600])
    sample_k = np.append( k_ir*sample_wl[0:4]**-1.84 - Rv, [O3,O2,O1, k_UV[idx_U2], k_UV[idx_U1]])

    spline = interpolate.UnivariateSpline(1./sample_wl,sample_k)
    k_ir = spline(1./wl_ir)

    wl    = np.append(wl_UV,wl_ir)
    Al_Av = np.append(k_UV,k_ir)/Rv + 1.
    return wl, Al_Av

# -----------------------------------------------------------------
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def adjustUncertToExposureTime(facExpTime, uncertMap, evtLenMap):
    '''
    Adjust image uncertainty (measured at exposure time t0)
    to new exposure time

    facExpTime --> new exp.time / reference exp.time =(t/t0)
    uncertMap --> 2d array mapping image uncertainty

    evtLen --> 2d array mapping event duration within image [sec]
                event duration is relative to exposure time
                e.g. duration = 2 means event is 2x longer than 
                exposure time

    More information can be found at ...
    ----
    K.Bedrich: Quantitative Electroluminescence Imaging, PhD Thesis, 2017
    Subsection 5.1.4.3: Exposure Time Dependency
    ----
    '''

    #fit parameters, obtained from ####[simulateUncertDependencyOnExpTime]
    params =  np.array( 
        #a                 facExpTime        f_0             f_inf         
     [[  2.63017121e+00,   3.05873627e-01,   1.00000000e+01, 2.78233309e-01],
      [  2.26467931e+00,   2.86206621e-01,   8.01396977e+00, 2.04089232e-01],
      [  1.27361168e+00,   5.18377189e-01,   3.04180084e+00, 2.61396338e-01],
      [  7.34546040e-01,   7.34549823e-01,   1.86507345e+00, 2.77563156e-01],
      [  3.82715618e-01,   9.32410141e-01,   1.34510254e+00, 2.91228149e-01],
      [  1.71166071e-01,   1.14092885e+00,   1.11243702e+00, 3.07947386e-01],
      [  6.13455410e-02,   1.43802520e+00,   1.02995065e+00, 3.93920802e-01],
      [  1.65383071e-02,   1.75605076e+00,   1.00859395e+00, 5.02132321e-01],
      [  4.55800114e-03,   1.99855711e+00,   9.98819118e-01, 5.99572776e-01]])

    #event duration relative to exposure time:(1/16...16)
    dur = np.array([  0.0625,   0.125 ,   0.25  ,   
                      0.5   ,   1.    ,   2.    ,
                      4.    ,   8.    ,   16.    ])
    #get factors from interpolation:
    a = UnivariateSpline(dur, params[:, 0], k=3, s=0)
    b = UnivariateSpline(dur, params[:, 1], k=3, s=0)
    start = UnivariateSpline(dur, params[:, 2], k=3, s=0)
    end = UnivariateSpline(dur, params[:, 3], k=3, s=0)
    p0 = a(evtLenMap), b(evtLenMap), start(evtLenMap), end(evtLenMap)
    #uncertainty for new exposure time:
    out = uncertMap * _fitfn(facExpTime, *p0)
    # everywhere where there ARE NO EVENTS --> scale uncert. as if would
    # be normal distributed:
    i = evtLenMap == 0
    out[i] = uncertMap[i] * (1 / facExpTime)**0.5
    return out
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def _interpolate_scipy_wrapper(x, y, new_x, method, fill_value=None,
                               bounds_error=False, order=None, **kwargs):
    """
    passed off to scipy.interpolate.interp1d. method is scipy's kind.
    Returns an array interpolated at new_x.  Add any new methods to
    the list in _clean_interp_method
    """
    try:
        from scipy import interpolate
        # TODO: Why is DatetimeIndex being imported here?
        from pandas import DatetimeIndex  # noqa
    except ImportError:
        raise ImportError('{0} interpolation requires Scipy'.format(method))

    new_x = np.asarray(new_x)

    # ignores some kwargs that could be passed along.
    alt_methods = {
        'barycentric': interpolate.barycentric_interpolate,
        'krogh': interpolate.krogh_interpolate,
        'piecewise_polynomial': interpolate.piecewise_polynomial_interpolate,
    }

    if getattr(x, 'is_all_dates', False):
        # GH 5975, scipy.interp1d can't hande datetime64s
        x, new_x = x._values.astype('i8'), new_x.astype('i8')

    try:
        alt_methods['pchip'] = interpolate.pchip_interpolate
    except AttributeError:
        if method == 'pchip':
            raise ImportError("Your version of scipy does not support "
                              "PCHIP interpolation.")

    interp1d_methods = ['nearest', 'zero', 'slinear', 'quadratic', 'cubic',
                        'polynomial']
    if method in interp1d_methods:
        if method == 'polynomial':
            method = order
        terp = interpolate.interp1d(x, y, kind=method, fill_value=fill_value,
                                    bounds_error=bounds_error)
        new_y = terp(new_x)
    elif method == 'spline':
        # GH #10633
        if not order:
            raise ValueError("order needs to be specified and greater than 0")
        terp = interpolate.UnivariateSpline(x, y, k=order, **kwargs)
        new_y = terp(new_x)
    else:
        # GH 7295: need to be able to write for some reason
        # in some circumstances: check all three
        if not x.flags.writeable:
            x = x.copy()
        if not y.flags.writeable:
            y = y.copy()
        if not new_x.flags.writeable:
            new_x = new_x.copy()
        method = alt_methods[method]
        new_y = method(x, y, new_x, **kwargs)
    return new_y
项目:QuantEcon.lectures.code    作者:QuantEcon    | 项目源码 | 文件源码
def solve_time1_bellman(self):
        '''
        Solve the time 1 Bellman equation for calibration model and initial grid mugrid0
        '''
        model, mugrid0 = self.model, self.mugrid
        S = len(model.pi)

        # First get initial fit
        PP = SequentialAllocation(model)
        c, n, x, V = map(np.vstack, zip(
            *map(lambda mu: PP.time1_value(mu), mugrid0)))

        Vf, cf, nf, xprimef = {}, {}, {}, {}
        for s in range(2):
            cf[s] = UnivariateSpline(x[:, s], c[:, s])
            nf[s] = UnivariateSpline(x[:, s], n[:, s])
            Vf[s] = UnivariateSpline(x[:, s], V[:, s])
            for sprime in range(S):
                xprimef[s, sprime] = UnivariateSpline(x[:, s], x[:, s])
        policies = [cf, nf, xprimef]

        # Create xgrid
        xbar = [x.min(0).max(), x.max(0).min()]
        xgrid = np.linspace(xbar[0], xbar[1], len(mugrid0))
        self.xgrid = xgrid

        # Now iterate on bellman equation
        T = BellmanEquation(model, xgrid, policies)
        diff = 1
        while diff > 1e-5:
            PF = T(Vf)
            Vfnew, policies = self.fit_policy_function(PF)
            diff = 0
            for s in range(S):
                diff = max(diff, np.abs(
                    (Vf[s](xgrid) - Vfnew[s](xgrid)) / Vf[s](xgrid)).max())
            Vf = Vfnew

        # Store value function policies and Bellman Equations
        self.Vf = Vf
        self.policies = policies
        self.T = T
项目:QuantEcon.lectures.code    作者:QuantEcon    | 项目源码 | 文件源码
def solve_time1_bellman(self):
        '''
        Solve the time 1 Bellman equation for calibration Para and initial grid mugrid0
        '''
        Para,mugrid0 = self.Para,self.mugrid
        S = len(Para.Pi)

        #First get initial fit
        PP = Planners_Allocation_Sequential(Para)
        c,n,x,V = map(np.vstack, zip(*map(lambda mu: PP.time1_value(mu),mugrid0)) )

        Vf,cf,nf,xprimef = {},{},{},{}
        for s in range(2):
            cf[s] = UnivariateSpline(x[:,s],c[:,s])
            nf[s] = UnivariateSpline(x[:,s],n[:,s])
            Vf[s] = UnivariateSpline(x[:,s],V[:,s])
            for sprime in range(S):
                xprimef[s,sprime] = UnivariateSpline(x[:,s],x[:,s])
        policies = [cf,nf,xprimef]


        #create xgrid
        xbar = [x.min(0).max(),x.max(0).min()]
        xgrid = np.linspace(xbar[0],xbar[1],len(mugrid0))
        self.xgrid = xgrid

        #Now iterate on bellman equation
        T = BellmanEquation(Para,xgrid,policies)
        diff = 1.
        while diff > 1e-5:
            PF = T(Vf)

            Vfnew,policies = self.fit_policy_function(PF)

            diff = 0.
            for s in range(S):
                diff = max(diff, np.abs((Vf[s](xgrid)-Vfnew[s](xgrid))/Vf[s](xgrid)).max() )

            print(diff)
            Vf = Vfnew

        #store value function policies and Bellman Equations
        self.Vf = Vf
        self.policies = policies
        self.T = T
项目:PyTeCK    作者:kyleniemeyer    | 项目源码 | 文件源码
def estimate_std_dev(indep_variable, dep_variable):
    """

    Parameters
    ----------
    indep_variable : ndarray, list(float)
        Independent variable (e.g., temperature, pressure)
    dep_variable : ndarray, list(float)
        Dependent variable (e.g., ignition delay)

    Returns
    -------
    standard_dev : float
        Standard deviation of difference between data and best-fit line

    """

    assert len(indep_variable) == len(dep_variable), \
        'independent and dependent variables not the same length'

    # ensure data sorted based on independent variable to avoid some problems
    sorted_vars = sorted(zip(indep_variable, dep_variable))
    indep_variable = [pt[0] for pt in sorted_vars]
    dep_variable = [pt[1] for pt in sorted_vars]

    # spline fit of the data
    if len(indep_variable) == 1 or len(indep_variable) == 2:
        # Fit of data will be perfect
        return min_deviation
    elif len(indep_variable) == 3:
        spline = UnivariateSpline(indep_variable, dep_variable, k=2)
    else:
        spline = UnivariateSpline(indep_variable, dep_variable)

    standard_dev = numpy.std(dep_variable - spline(indep_variable))

    if standard_dev < min_deviation:
        print('Standard deviation of {:.2f} too low, '
              'using {:.2f}'.format(standard_dev, min_deviation))
        standard_dev = min_deviation

    return standard_dev
项目:clust    作者:BaselAbujamous    | 项目源码 | 文件源码
def fixnans(Xin, method='spline'):
    def fixrow(rowin, methodloc='spline'):
        rowout = np.array(rowin)

        unknown = isnan(rowin)
        known = [not xx for xx in unknown]

        tknown = np.nonzero(known)[0]
        tunknown = np.nonzero(unknown)[0]

        xknown = np.take(rowin, tknown)

        if methodloc == 'spline':
            if len(xknown) > 3:
                sf = spinter.UnivariateSpline(tknown, xknown)
            else:
                sf = spinter.UnivariateSpline(tknown, xknown, k=len(xknown)-1)
            rowout[tunknown] = sf(tunknown)
        else:
            raise ValueError('Provided interpolation method is not supported')

        return rowout

    Xinloc = deepcopy(Xin)
    N = np.size(Xinloc, 0)
    M = np.size(Xinloc, 1)
    Xout = np.zeros([N, M])

    with warnings.catch_warnings():
        warnings.simplefilter('ignore')
        for i in range(N):
            sumnans = sum(isnan(Xinloc[i]))
            notnans = [x for x in Xinloc[i] if not isnan(x)]
            if sumnans < M - 1:
                if math.isnan(Xinloc[i, 0]):
                    Xinloc[i, 0] = notnans[0]
                if math.isnan(Xinloc[i, -1]):
                    Xinloc[i, -1] = notnans[-1]
                Xout[i] = fixrow(Xinloc[i], method)
            elif sumnans == M - 1:
                Xout[i] = [notnans[0] for x in range(M)]
    return Xout