Python scipy.interpolate 模块,InterpolatedUnivariateSpline() 实例源码

我们从Python开源项目中,提取了以下35个代码示例,用于说明如何使用scipy.interpolate.InterpolatedUnivariateSpline()

项目:croissance    作者:biosustain    | 项目源码 | 文件源码
def resample(series, *, factor=10, size=None):
    """
    Returns a new series re-sampled to a given number of points.

    :param series:
    :param factor: a number of points per unit time to scale the series to.
    :param size: a number of points to scale the series to.
    :return:
    """
    series = series.dropna()
    start, end = series.index[0], series.index[-1]

    if size is None:
        size = (end - start) * factor

    index = numpy.linspace(start, end, size)
    spline = InterpolatedUnivariateSpline(series.index, series.values)
    return pandas.Series(index=index, data=spline(index))
项目:F_UNCLE    作者:fraserphysics    | 项目源码 | 文件源码
def test_call(self):
        """Test of the function call
        """
        models = {'simp': SimpleModel([2, 1])}
        data = self.simSimp(models)

        self.assertEqual(len(data), 3)
        self.assertIsInstance(data[0], np.ndarray)
        self.assertEqual(len(data[1]), 1)
        self.assertIsInstance(data[1][0], np.ndarray)
        self.assertIsInstance(data[2], dict)
        self.assertTrue('mean_fn' in data[2])
        self.assertIsInstance(data[2]['mean_fn'], IUSpline)

        xx = np.arange(10)
        npt.assert_equal(data[0], xx)
        npt.assert_equal(data[1][0], (2 * xx)**2 + 1 * xx)
项目:pyflowgo    作者:pyflowgo    | 项目源码 | 文件源码
def read_slope_from_file(self, filename=None):
        if filename == None:
            filename = '../MaunaUlu74/DEM_maunaulu74.txt'

        distance = []
        slope = []
        # here read the slope file (.txt) where each line represent the distance from the vent (first column) and
        # the corresponding slope in degree (second column) that is then converted in gradiant
        f_slope = open(filename, "r")
        f_slope.readline()
        for line in f_slope:
            split_line = line.strip('\n').split('\t')
            distance.append(float(split_line[0]))
            slope.append(math.radians(float(split_line[1])))
        f_slope.close()

        #slope = self.running_mean(slope, 15)

        # build the spline to interpolate the distance (k=1 : it is a linear interpolation)
        self._slope_spline = interpolate.InterpolatedUnivariateSpline(distance, slope, k=1.)
项目:BAG_framework    作者:ucb-art    | 项目源码 | 文件源码
def __init__(self, scale_list, values, method='spline', extrapolate=False):
        # error checking
        if len(values.shape) != 1:
            raise ValueError('This class only works for 1D data.')
        elif len(scale_list) != 1:
            raise ValueError('input and output dimension mismatch.')

        if method == 'linear':
            k = 1
        elif method == 'spline':
            k = 3
        else:
            raise ValueError('Unsuppoorted interpolation method: %s' % method)

        offset, scale = scale_list[0]
        num_pts = values.shape[0]
        points = np.linspace(offset, (num_pts - 1) * scale + offset, num_pts)  # type: np.multiarray.ndarray

        DiffFunction.__init__(self, [(points[0], points[-1])], delta_list=None)

        ext = 0 if extrapolate else 2
        self.fun = interp.InterpolatedUnivariateSpline(points, values, k=k, ext=ext)
项目:CAAPR    作者:Stargrazer82301    | 项目源码 | 文件源码
def smooth_log(self):

        """
        This function ...
        :return:
        """

        centers = np.array(self.centers)
        counts = np.array(self.counts)

        not_zero = counts != 0

        centers = centers[not_zero]
        counts = counts[not_zero]

        order = 2
        s = InterpolatedUnivariateSpline(centers, np.log10(counts), k=order)

        # Return the spline curve
        return s

    # -----------------------------------------------------------------
项目:CAAPR    作者:Stargrazer82301    | 项目源码 | 文件源码
def smooth_log(self):

        """
        This function ...
        :return:
        """

        centers = np.array(self.centers)
        counts = np.array(self.counts)

        not_zero = counts != 0

        centers = centers[not_zero]
        counts = counts[not_zero]

        order = 2
        s = InterpolatedUnivariateSpline(centers, np.log10(counts), k=order)

        # Return the spline curve
        return s

    # -----------------------------------------------------------------
项目:fluids    作者:CalebBell    | 项目源码 | 文件源码
def set_table(self, n=100, dx=None):
        r'''Method to set an interpolation table of liquids levels versus
        volumes in the tank, for a fully defined tank. Normally run by the
        h_from_V method, this may be run prior to its use with a custom
        specification. Either the number of points on the table, or the
        vertical distance between steps may be specified.

        Parameters
        ----------
        n : float, optional
            Number of points in the interpolation table, [-]
        dx : float, optional
            Vertical distance between steps in the interpolation table, [m]
        '''
        if dx:
            self.heights = np.linspace(0, self.h_max, int(self.h_max/dx)+1)
        else:
            self.heights = np.linspace(0, self.h_max, n)
        self.volumes = [self.V_from_h(h) for h in self.heights]
        self.interp_h_from_V = InterpolatedUnivariateSpline(self.volumes, self.heights, ext=3)
        self.table = True
项目:croissance    作者:biosustain    | 项目源码 | 文件源码
def segment_spline_smoothing(series, series_std_dev=None):
    if series_std_dev is None:
        series_std_dev = series
    segments = segment_by_std_dev(series_std_dev)
    points = segment_points(series, segments)
    spline = InterpolatedUnivariateSpline(points.index, points.values, k=3)
    return pandas.Series(data=spline(series.index), index=series.index)
项目:F_UNCLE    作者:fraserphysics    | 项目源码 | 文件源码
def get_splines(self, x_data, y_data, var_data=0):
        """Overloads the base spline generateion which cannot deal with the
        smooth experiment
        """

        return IUSpline(x_data, y_data), None
项目:F_UNCLE    作者:fraserphysics    | 项目源码 | 文件源码
def _on_call(self, models):
        """Dummy simulation
        """
        sim_model = models
        x_list = np.arange(self.get_option('nDOF'))
        mean_fn = IUSpline(x_list, np.array(sim_model(x_list)))
        return x_list, [np.array(sim_model(x_list))],\
            {'mean_fn': mean_fn, 'tau': 0}
项目:F_UNCLE    作者:fraserphysics    | 项目源码 | 文件源码
def _on_call(self, model1, model2):
        """Dummy simulation
        """

        x_list = np.arange(self.get_option('nDOF'))
        values = np.array(model1(x_list) + model2(x_list))
        mean_fn = IUSpline(x_list, values)
        return x_list, [values,],\
            {'mean_fn': mean_fn, 'tau': 0}
项目:Virtual-Makeup    作者:badarsh2    | 项目源码 | 文件源码
def univariate_plot(lx=[],ly=[]):
    unew = np.arange(lx[0], lx[-1]+1, 1)
    f2 = InterpolatedUnivariateSpline(lx, ly)
    return unew,f2(unew)
项目:pyflowgo    作者:pyflowgo    | 项目源码 | 文件源码
def read_crystal_from_melts(self, filename=None):
        crystal_fraction = []
        temperature = []
        if filename == None:
            filename = '../pyflowgo/MaunaUlu74/Results-melts_MU74.csv'

        with open(filename) as csvfile:
            f_crystal_fraction = csv.DictReader(csvfile, delimiter=',')
            for row in f_crystal_fraction:
                temperature.append(float(row['temperature'])+273.15)
                crystal_fraction.append(float(row['fraq_microlite']))
        self._crystal_spline = interpolate.InterpolatedUnivariateSpline(temperature, crystal_fraction, k=1.)
项目:accpy    作者:kramerfelix    | 项目源码 | 文件源码
def noisefilter(t, signal, avgpts=30, smoothfac=1600):
    smooth = rolling_mean(signal[::-1], avgpts)[::-1]
    fspline = InterpolatedUnivariateSpline(t[:-avgpts], smooth[:-avgpts], k=4)
    fspline.set_smoothing_factor(smoothfac)
    return fspline(t)
项目:fftoptionlib    作者:arraystream    | 项目源码 | 文件源码
def spline_fitting(x_data, y_data, order):
    return InterpolatedUnivariateSpline(x_data, y_data, k=order)
项目:BAG_framework    作者:ucb-art    | 项目源码 | 文件源码
def __init__(self, xvec, yvec, xtol, order=3, ext=3):
        self._xvec = xvec
        self._yvec = yvec
        self._xtol = xtol
        self._order = order
        self._ext = ext
        self._fun = interp.InterpolatedUnivariateSpline(xvec, yvec, k=order, ext=ext)
项目:BAG_framework    作者:ucb-art    | 项目源码 | 文件源码
def ext(self):
        """interpolation extension mode.  See documentation for InterpolatedUnivariateSpline."""
        return self._ext
项目:CAAPR    作者:Stargrazer82301    | 项目源码 | 文件源码
def smooth(self):

        """
        This function ...
        :return:
        """

        order = 2
        s = InterpolatedUnivariateSpline(self.centers, self.counts, k=order)

        # Return the spline curve
        return s

    # -----------------------------------------------------------------
项目:CAAPR    作者:Stargrazer82301    | 项目源码 | 文件源码
def smooth(self):

        """
        This function ...
        :return:
        """

        order = 2
        s = InterpolatedUnivariateSpline(self.centers, self.counts, k=order)

        # Return the spline curve
        return s

    # -----------------------------------------------------------------
项目:gullikson-scripts    作者:kgullikson88    | 项目源码 | 文件源码
def get_interpolator(self, xcolumn, ycolumn, extrap='nearest'):
        """
        Get an interpolator instance between the two columns

        Parameters:
        ===========
        - xcolumn:    string
                      The name of the x column to interpolate between

        - ycolumn:    string
                      The name of the value you want to interpolate

        - extrap:     string
                      How to treat extrapolation. Options are:
                          1. 'nearest': Default behavior. It will return the nearest match
                             to the given 'x' value
                          2. 'extrapolate': Extrapolate the spline. This is probably only
                             safe for very small extrapolations

        Returns:
        ========
        A callable interpolator.
        """
        # Make sure the column names are correct
        assert xcolumn in self.mam_df.keys() and ycolumn in self.mam_df.keys()

        # Sort the dataframe by the x column, and drop any duplicates or nans it might have
        sorted_df = self.mam_df.sort_values(by=xcolumn).dropna(subset=[xcolumn, ycolumn], how='any').drop_duplicates(xcolumn)

        # Make an interpolator
        ext_value = {'nearest': 3, 'extrapolate': 0}
        fcn = spline(sorted_df[xcolumn].values, sorted_df[ycolumn].values, ext=ext_value[extrap])
        return fcn
项目:gullikson-scripts    作者:kgullikson88    | 项目源码 | 文件源码
def get_ccf_data(basedir, primary_name=None, secondary_name=None, vel_arr=np.arange(-900.0, 900.0, 0.1), type='bright'):
    """
    Searches the given directory for CCF files, and classifies
    by star, temperature, metallicity, and vsini
    :param basedir: The directory to search for CCF files
    :keyword primary_name: Optional keyword. If given, it will only get the requested primary star data
    :keyword secondary_name: Same as primary_name, but only reads ccfs for the given secondary
    :keyword vel_arr: The velocities to interpolate each ccf at
    :return: pandas DataFrame
    """
    if not basedir.endswith('/'):
        basedir += '/'
    all_files = ['{}{}'.format(basedir, f) for f in os.listdir(basedir) if type in f.lower()]
    primary = []
    secondary = []
    vsini_values = []
    temperature = []
    gravity = []
    metallicity = []
    ccf = []
    for fname in all_files:
        star1, star2, vsini, temp, logg, metal = classify_filename(fname, type=type)
        if primary_name is not None and star1.lower() != primary_name.lower():
            continue
        if secondary_name is not None and star2.lower() != secondary_name.lower():
            continue
        vel, corr = np.loadtxt(fname, unpack=True)
        fcn = spline(vel, corr)
        ccf.append(fcn(vel_arr))
        primary.append(star1)
        secondary.append(star2)
        vsini_values.append(vsini)
        temperature.append(temp)
        gravity.append(logg)
        metallicity.append(metal)

    # Make a pandas dataframe with all this data
    df = pd.DataFrame(data={'Primary': primary, 'Secondary': secondary, 'Temperature': temperature,
                                'vsini': vsini_values, 'logg': gravity, '[Fe/H]': metallicity, 'CCF': ccf})
    return df
项目:gullikson-scripts    作者:kgullikson88    | 项目源码 | 文件源码
def __init__(self, filt, T_low=3500, T_high=12000, dT=10, feh=0.0):
        """
        Initialize a CompanionFitter instance. It will pre-tabulate
        synthetic photometry for main-sequence stars with Temperatures
        ranging from T_low to T_high, in steps of dT K. All the
        models will have [Fe/H] = feh. Finally, we will interpolate
        the relationship between temperature and magnitude so that
        additional photometry points are made quickly.

        Parameters:
        ===========
        - filt:               A pysynphot bandpass encoding the filter information.

        - T_low, T_high, dT:  floats
                              Parameters describing the temperature grid
                              to interpolate

        -feh:                 float
                              The metallicity [Fe/H] to use for the models
        """
        # Use the Mamajek table to get main-sequence relationships
        MT = Mamajek_Table.MamajekTable()
        MT.mam_df['radius'] = 10**(0.5*MT.mam_df.logL - 2.0*MT.mam_df.logT + 2.0*3.762)
        MT.mam_df['logg'] = 4.44 + np.log10(MT.mam_df.Msun) - 2.0*np.log10(MT.mam_df.radius)
        teff2radius = MT.get_interpolator('Teff', 'radius')
        teff2logg = MT.get_interpolator('Teff', 'logg')

        # Pre-calculate the magnitude at each temperature
        self.temperature = np.arange(T_low, T_high, dT)
        self.magnitude = np.zeros(self.temperature.size)
        for i, T in enumerate(self.temperature):
            logging.info('i = {}/{}: T = {:.1f}'.format(i+1, self.temperature.size, T))
            logg = teff2logg(T)
            R = teff2radius(T)
            spec = pysynphot.Icat('ck04models', T, feh, logg) * R**2
            obs = pysynphot.Observation(spec, filt)
            self.magnitude[i] = obs.effstim('abmag')

        # Interpolate the T-mag curve
        self.interpolator = spline(self.temperature, self.magnitude)
项目:gullikson-scripts    作者:kgullikson88    | 项目源码 | 文件源码
def __call__(self, T):
        """
         Evaluate the spline at the given temperature, returning the interpolated magnitude
        """
        return self.interpolator(T)
项目:picasso    作者:jungmannlab    | 项目源码 | 文件源码
def undrift(locs, info, segmentation, display=True, segmentation_callback=None, rcc_callback=None):
    bounds, segments = _render.segment(locs, info, segmentation,
                                       {'blur_method': 'gaussian', 'min_blur_width': 1},
                                       segmentation_callback)
    shift_y, shift_x = _imageprocess.rcc(segments, 32, rcc_callback)
    t = (bounds[1:] + bounds[:-1]) / 2
    drift_x_pol = _interpolate.InterpolatedUnivariateSpline(t, shift_x, k=3)
    drift_y_pol = _interpolate.InterpolatedUnivariateSpline(t, shift_y, k=3)
    t_inter = _np.arange(info[0]['Frames'])
    drift = (drift_x_pol(t_inter), drift_y_pol(t_inter))
    drift = _np.rec.array(drift, dtype=[('x', 'f'), ('y', 'f')])
    if display:
        fig1 = _plt.figure(figsize=(17, 6))
        _plt.suptitle('Estimated drift')
        _plt.subplot(1, 2, 1)
        _plt.plot(drift.x, label='x interpolated')
        _plt.plot(drift.y, label='y interpolated')
        t = (bounds[1:] + bounds[:-1]) / 2
        _plt.plot(t, shift_x, 'o', color=list(_plt.rcParams['axes.prop_cycle'])[0]['color'], label='x')
        _plt.plot(t, shift_y, 'o', color=list(_plt.rcParams['axes.prop_cycle'])[1]['color'], label='y')
        _plt.legend(loc='best')
        _plt.xlabel('Frame')
        _plt.ylabel('Drift (pixel)')
        _plt.subplot(1, 2, 2)
        _plt.plot(drift.x, drift.y, color=list(_plt.rcParams['axes.prop_cycle'])[2]['color'])
        _plt.plot(shift_x, shift_y, 'o', color=list(_plt.rcParams['axes.prop_cycle'])[2]['color'])
        _plt.axis('equal')
        _plt.xlabel('x')
        _plt.ylabel('y')
        fig1.show()
    locs.x -= drift.x[locs.frame]
    locs.y -= drift.y[locs.frame]
    return drift, locs
项目:the-wizz    作者:morriscb    | 项目源码 | 文件源码
def _make_redshift_spline(z_min, z_max):
    """Utility function for creating a spline for comoving distance to
    redshift.
    """
    redshift_array = np.linspace(
        np.min((z_min - 1e-8, 0.0)), z_max + 1e-8, 1000)
    comov_array = core_utils.WMAP5.comoving_distance(redshift_array)
    comov_dist_to_redshift_spline = iu_spline(comov_array, redshift_array)
    return comov_dist_to_redshift_spline
项目:empymod    作者:empymod    | 项目源码 | 文件源码
def fft(fEM, time, freq, ftarg):
    """Fourier Transform using the Fast Fourier Transform.

    The function is called from one of the modelling routines in :mod:`model`.
    Consult these modelling routines for a description of the input and output
    parameters.

    Returns
    -------
    tEM : array
        Returns time-domain EM response of `fEM` for given `time`.

    conv : bool
        Only relevant for QWE/QUAD.

    """
    # Get ftarg values
    dfreq, nfreq, ntot, pts_per_dec = ftarg

    # If pts_per_dec, we have first to interpolate fEM to required freqs
    if pts_per_dec:
        sfEMr = iuSpline(np.log10(freq), fEM.real)
        sfEMi = iuSpline(np.log10(freq), fEM.imag)
        freq = np.arange(1, nfreq+1)*dfreq
        fEM = sfEMr(np.log10(freq)) + 1j*sfEMi(np.log10(freq))

    # Pad the frequency result
    fEM = np.pad(fEM, (0, ntot-nfreq), 'linear_ramp')

    # Carry out FFT
    ifftEM = fftpack.ifft(np.r_[fEM[1:], 0, fEM[::-1].conj()]).real
    stEM = 2*ntot*fftpack.fftshift(ifftEM*dfreq, 0)

    # Interpolate in time domain
    dt = 1/(2*ntot*dfreq)
    ifEM = iuSpline(np.linspace(-ntot, ntot-1, 2*ntot)*dt, stEM)
    tEM = ifEM(time)/2*np.pi  # (Multiplication of 2/pi in model.tem)

    # Return the electromagnetic time domain field
    # (Second argument is only for QWE)
    return tEM, True


# 3. Utilities
项目:vivarium    作者:ihmeuw    | 项目源码 | 文件源码
def __call__(self, *args, **kwargs):
        # TODO: Should be more defensive about this
        if len(args) == 1:
            # We have a dataframe
            df = args[0]
        else:
            # We have parameters for a single invocation
            df = pd.DataFrame(kwargs)

        if self.key_columns:
            sub_tables = df.groupby(self.key_columns)
        else:
            sub_tables = [(None, df)]

        result = pd.DataFrame(index=df.index)
        for key, sub_table in sub_tables:
            if sub_table.empty:
                continue
            funcs = self.interpolations[key]
            parameters = tuple(sub_table[k] for k in self.parameter_columns)
            for value_column, func in funcs.items():
                out = func(*parameters)
                # This reshape is necessary because RectBivariateSpline and InterpolatedUnivariateSpline return results
                # in slightly different shapes and we need them to be consistent
                if out.shape:
                    result.loc[sub_table.index, value_column] = out.reshape((out.shape[0],))
                else:
                    result.loc[sub_table.index, value_column] = out

        if self.func:
            return self.func(result)

        if len(result.columns) == 1:
            return result[result.columns[0]]

        return result
项目:pytrip    作者:pytrip    | 项目源码 | 文件源码
def __get_1d_function(x, y, kind):
        """
        Train 1-D interpolator
        :param x: x-coordinates of data points
        :param y: y-coordinates of data points
        :param kind: 'linear' or 'spline' interpolation type
        :return Interpolator callable object
        """
        def fun_interp(t):
            return np.interp(t, x, y)

        # input consistency check
        if len(x) != len(y):
            logger.error("len(x) = {:d}, len(y) = {:d}. Both should be equal".format(len(x), len(y)))
            raise Exception("1-D interpolation: X and Y should have the same shape")
        # 1-element data set, return fixed value
        if len(y) == 1:
            # define constant
            def fun_const(t):
                """
                Helper function
                :param t: array-like or scalar
                :return: array of constant values if t is an array of constant scalar if t is a scalar
                """
                try:
                    result = np.ones_like(t) * y[0]  # t is an array
                except TypeError:
                    result = y[0]  # t is a scalar
                return result
            result = fun_const
        # 2-element data set, use linear interpolation from numpy
        elif len(y) == 2:
            result = fun_interp
        else:
            if kind == 'spline':
                # 3-rd degree spline interpolation, passing through all points
                try:
                    from scipy.interpolate import InterpolatedUnivariateSpline
                except ImportError as e:
                    logger.error("Please install scipy on your platform to be able to use spline-based interpolation")
                    raise e
                k = 3
                if len(y) == 3:  # fall back to 2-nd degree spline if only 3 points are present
                    k = 2
                result = InterpolatedUnivariateSpline(x, y, k=k)
            elif kind == 'linear':
                result = fun_interp
            else:
                raise ("Unsupported interpolation type {:s}.".format(kind))
        return result
项目:gullikson-scripts    作者:kgullikson88    | 项目源码 | 文件源码
def get_rv(vel, corr, Npix=None, **fit_kws):
    """
    Get the best radial velocity, with errors.
    This will only work if the ccf was made with the maximum likelihood method!
    Uses the formula given in Zucker (2003) MNRAS, 342, 4  for the rv error.

    Parameters:
    ===========
    - vel:   numpy.ndarray
             The velocities
    - corr:  numpy.ndarray
             The ccf values. Should be the same size as vel
    - Npix:  integer
             The number of pixels used in the CCF.

    Returns:
    ========
    -rv:     float
             The best radial velocity, in km/s
    -rv_err: float
             Uncertainty in the velocity, in km/s
    -ccf:    float
             The CCF power at the maximum velocity.
    """
    vel = np.atleast_1d(vel)
    corr = np.atleast_1d(corr)
    sorter = np.argsort(vel)
    fcn = spline(vel[sorter], corr[sorter])
    fcn_prime = fcn.derivative(1)
    fcn_2prime = fcn.derivative(2)

    guess = vel[np.argmax(corr)]

    def errfcn(v):
        ll = 1e6*fcn_prime(v)**2
        return ll

    out = minimize_scalar(errfcn, bounds=(guess-2, guess+2), method='bounded')
    rv = out.x
    if Npix is None:
        Npix = vel.size

    rv_var = -(Npix * fcn_2prime(rv) * (fcn(rv) / (1 - fcn(rv) ** 2))) ** (-1)
    return rv, np.sqrt(rv_var), fcn(rv)
项目:pyphot    作者:mfouesneau    | 项目源码 | 文件源码
def reduce_resolution(wi, fi, sigma0=0.55, sigma_floor=0.2):
    """ Adapt the resolution of the spectra to match the lick definitions

        Lick definitions have different resolution elements as function
        of wavelength. These definition are hard-coded in this function

    Parameters
    ---------
    wi: ndarray (n, )
        wavelength definition
    fi: ndarray (nspec, n) or (n, )
        spectra to convert
    sigma0: float
        initial broadening in the spectra `fi`
    sigma_floor: float
        minimal dispersion to consider

    Returns
    -------
    flux_red: ndarray (nspec, n) or (n, )
        reduced spectra
    """

    # all in AA
    w_lick_res = (4000., 4400., 4900., 5400., 6000.)
    lick_res = (11.5, 9.2, 8.4, 8.4, 9.8)   # FWHM in AA

    w = np.asarray(wi)
    flux = np.atleast_2d(fi)

    # Linear interpolation of lick_res over w
    # numpy interp does constant instead of extrapolation
    # res = np.interp(w, w_lick_res, lick_res)

    # spline order: 1 linear, 2 quadratic, 3 cubic ...
    from scipy.interpolate import InterpolatedUnivariateSpline
    res = InterpolatedUnivariateSpline(w_lick_res, lick_res, k=1)(w)

    # Compute width from fwhm
    const = 2. * np.sqrt(2. * np.log(2))  # conversion fwhm --> sigma
    lick_sigma = np.sqrt((res ** 2 - sigma0 ** 2)) / const

    # Convolution by g=1/sqrt(2*pi*sigma^2) * exp(-r^2/(2*sigma^2))
    flux_red = np.zeros(flux.shape, dtype=flux.dtype)

    for i, sigma in enumerate(lick_sigma):
        maxsigma = 3. * sigma
        # sampling floor: min (0.2, sigma * 0.1)
        delta = min(sigma_floor, sigma * 0.1)
        delta_wj = np.arange(-maxsigma, + maxsigma, delta)
        wj = delta_wj + w[i]
        for k, fk in enumerate(flux):
            fluxj = np.interp(wj, w, fk, left=0., right=0.)
            flux_red[k, i] = np.sum(fluxj * delta * np.exp(-0.5 * (delta_wj / sigma) ** 2))

    flux_red /= lick_sigma * const

    return flux_red.reshape(np.shape(fi))
项目:ASP    作者:TUIlmenauAMS    | 项目源码 | 文件源码
def OutMidCorrection(self, correctionType, firOrd, fs):
        """
        Method to "correct" the middle outer ear transfer function.
            As appears in :
            - A. Härmä, and K. Palomäki, ''HUTear – a free Matlab toolbox for modeling of human hearing'',
            in Proceedings of the Matlab DSP Conference, pp 96-99, Espoo, Finland 1999.
        """
        # Lookup tables for correction
        f1 = np.array([20, 25, 30, 35, 40, 45, 50, 55, 60, 70, 80, 90, 100, \
        125, 150, 177, 200, 250, 300, 350, 400, 450, 500, 550, \
        600, 700, 800, 900, 1000, 1500, 2000, 2500, 2828, 3000, \
        3500, 4000, 4500, 5000, 5500, 6000, 7000, 8000, 9000, 10000, \
        12748, 15000])

        ELC = np.array([ 31.8, 26.0, 21.7, 18.8, 17.2, 15.4, 14.0, 12.6, 11.6, 10.6, \
            9.2, 8.2, 7.7, 6.7, 5.3, 4.6, 3.9, 2.9, 2.7, 2.3, \
            2.2, 2.3, 2.5, 2.7, 2.9, 3.4, 3.9, 3.9, 3.9, 2.7, \
            0.9, -1.3, -2.5, -3.2, -4.4, -4.1, -2.5, -0.5, 2.0, 5.0, \
            10.2, 15.0, 17.0, 15.5, 11.0, 22.0])

        MAF = np.array([ 73.4, 65.2, 57.9, 52.7, 48.0, 45.0, 41.9, 39.3, 36.8, 33.0, \
            29.7, 27.1, 25.0, 22.0, 18.2, 16.0, 14.0, 11.4, 9.2, 8.0, \
             6.9,  6.2,  5.7,  5.1,  5.0,  5.0,  4.4,  4.3, 3.9, 2.7, \
             0.9, -1.3, -2.5, -3.2, -4.4, -4.1, -2.5, -0.5, 2.0, 5.0, \
            10.2, 15.0, 17.0, 15.5, 11.0, 22.0])

        f2  = np.array([  125,  250,  500, 1000, 1500, 2000, 3000,  \
            4000, 6000, 8000, 10000, 12000, 14000, 16000])

        MAP = np.array([ 30.0, 19.0, 12.0,  9.0, 11.0, 16.0, 16.0, \
            14.0, 14.0,  9.9, 24.7, 32.7, 44.1, 63.7])

        if correctionType == 'ELC':
            freqTable = f1
            CorrectionTable = ELC
        elif correctionType == 'MAF':
            freqTable = f1
            CorrectionTable = MAF
        elif correctionType == 'MAP':
            freqTable = f2
            CorrectionTable = MAP
        else :
            print('Unrecongised operation: ELC will be used instead...')
            freqTable = f1
            CorrectionTable = ELC

        freqN = np.arange(0, firOrd) * fs/2. / (firOrd-1)
        spline = uspline(freqTable, CorrectionTable)
        crc = spline(freqN)
        crclin = 10. ** (-crc/ 10.)
        return crclin, freqN, crc
项目:qudi    作者:Ulm-IQO    | 项目源码 | 文件源码
def estimate_lorentzian_dip(self, x_axis, data, params):
    """ Provides an estimator to obtain initial values for lorentzian function.

    @param numpy.array x_axis: 1D axis values
    @param numpy.array data: 1D data, should have the same dimension as x_axis.
    @param lmfit.Parameters params: object includes parameter dictionary which
                                    can be set

    @return tuple (error, params):

    Explanation of the return parameter:
        int error: error code (0:OK, -1:error)
        Parameters object params: set parameters of initial values
    """
    # check if parameters make sense
    error = self._check_1D_input(x_axis=x_axis, data=data, params=params)

    # check if input x-axis is ordered and increasing
    sorted_indices = np.argsort(x_axis)
    if not np.all(sorted_indices == np.arange(len(x_axis))):
        x_axis = x_axis[sorted_indices]
        data = data[sorted_indices]

    data_smooth, offset = self.find_offset_parameter(x_axis, data)

    # data_level = data-offset
    data_level = data_smooth - offset

    # calculate from the leveled data the amplitude:
    amplitude = data_level.min()

    smoothing_spline = 1    # must be 1<= smoothing_spline <= 5
    function = InterpolatedUnivariateSpline(x_axis, data_level,
                                            k=smoothing_spline)
    numerical_integral = function.integral(x_axis[0], x_axis[-1])

    x_zero = x_axis[np.argmin(data_smooth)]

    # according to the derived formula, calculate sigma. The crucial part is
    # here that the offset was estimated correctly, then the area under the
    # curve is calculated correctly:
    sigma = np.abs(numerical_integral / (np.pi * amplitude))

    # auxiliary variables
    stepsize = x_axis[1] - x_axis[0]
    n_steps = len(x_axis)

    params['amplitude'].set(value=amplitude, max=-1e-12)
    params['sigma'].set(value=sigma, min=stepsize / 2,
                        max=(x_axis[-1] - x_axis[0]) * 10)
    params['center'].set(value=x_zero, min=(x_axis[0]) - n_steps * stepsize,
                         max=(x_axis[-1]) + n_steps * stepsize)
    params['offset'].set(value=offset)

    return error, params
项目:HYDROS    作者:dtold    | 项目源码 | 文件源码
def setup_xarray(scan_list,scan_list2,scan_range,scan_range2,log,log2,precis,precis2,scan_type):
        if scan_type=='1d':
                #for scanning a predefined list of numbers
                if scan_list!=[]:
                        #interpolate to prescribed length
                        l1_spl=US(range(len(scan_list)),scan_list)
                        XARRAY=l1_spl(np.linspace(0,len(scan_list)-1,precis))
                        X2ARRAY=None

                        #for having a second simultaneously varying variable
                        if scan_list2!=[]:
                                #interpolate to prescribed length
                                l2_spl=US(scan_list,scan_list2)
                                X2ARRAY=l2_spl(XARRAY)

                #log- or linearly spaced 1d scan
                if (scan_list==[] and scan_list2==[]):
                        X2ARRAY=None #np.empty(1,dtype=np.float)
                        if log:
                                XARRAY=np.logspace(np.log10(scan_range[0]),np.log10(scan_range[1]),precis)
                        else:
                                XARRAY=np.linspace(scan_range[0],scan_range[1],precis)
        elif scan_type=='2d':
                #for scanning a predefined list of numbers
                if scan_list!=[]:
                        #interpolate to prescribed length
                        l1_spl=US(range(len(scan_list)),scan_list)
                        XARRAY=np.tile(l1_spl(np.linspace(0,len(scan_list)-1,precis)),precis).flatten()

                        #for having a second simultaneously varying variable
                        if scan_list2!=[]:
                                #interpolate to prescribed length
                                l2_spl=US(scan_list,scan_list2)
                                X2ARRAY=np.repeat(l2_spl(XARRAY),precis)
                        else:
                                exit('Error: need to specify two scan_lists for 2d scan!')

                #log- or linearly spaced 2d scan
                if (scan_list==[] and scan_list2==[]):
                        if log:
                                XARRAY=np.tile(np.logspace(np.log10(scan_range[0]),np.log10(scan_range[1]),precis),precis2).flatten()
                        else:
                                XARRAY=np.tile(np.linspace(scan_range[0],scan_range[1],precis),precis2).flatten()
                        if log2:
                                X2ARRAY=np.repeat(np.logspace(np.log10(scan_range2[0]),np.log10(scan_range2[1]),precis2),precis).flatten()
                        else:
                                X2ARRAY=np.repeat(np.linspace(scan_range2[0],scan_range2[1],precis2),precis).flatten()
        return XARRAY, X2ARRAY
项目:empymod    作者:empymod    | 项目源码 | 文件源码
def ffht(fEM, time, freq, ftarg):
    """Fourier Transform using a Cosine- or a Sine-filter.

    It follows the Filter methodology [Anderson_1975]_, see `fht` for more
    information.

    The function is called from one of the modelling routines in :mod:`model`.
    Consult these modelling routines for a description of the input and output
    parameters.

    This function is based on `get_CSEM1D_TD_FHT.m` from the source code
    distributed with [Key_2012]_.

    Returns
    -------
    tEM : array
        Returns time-domain EM response of `fEM` for given `time`.

    conv : bool
        Only relevant for QWE/QUAD.

    """
    # Get ftarg values
    fftfilt, pts_per_dec, ftkind = ftarg

    # Settings depending if cos/sin plus scaling
    if ftkind == 'sin':
        fEM = -fEM.imag
    else:
        fEM = fEM.real

    if pts_per_dec:  # Use pts_per_dec frequencies per decade
        # 1. Interpolate in frequency domain
        sfEM = iuSpline(np.log10(2*np.pi*freq), fEM)
        ifEM = sfEM(np.log10(fftfilt.base/time[:, None]))

        # 2. Filter
        tEM = np.dot(ifEM, getattr(fftfilt, ftkind))

    else:  # Standard FHT procedure
        # Get new times in frequency domain
        _, itime = get_spline_values(fftfilt, time)

        # Re-arranged fEM with shape (ntime, nfreq).  Each row starts one
        # 'freq' higher.
        fEM = np.concatenate((np.tile(fEM, itime.size).squeeze(),
                             np.zeros(itime.size)))
        fEM = fEM.reshape(itime.size, -1)[:, :fftfilt.base.size]

        # 1. Filter
        stEM = np.dot(fEM, getattr(fftfilt, ftkind))

        # 2. Interpolate in time domain
        itEM = iuSpline(np.log10((itime)[::-1]), stEM[::-1])
        tEM = itEM(np.log10(time))

    # Return the electromagnetic time domain field
    # (Second argument is only for QWE)
    return tEM/time, True
项目:vivarium    作者:ihmeuw    | 项目源码 | 文件源码
def __init__(self, data, categorical_parameters, continuous_parameters, func=None, order=1):
        self.key_columns = categorical_parameters
        self.parameter_columns = continuous_parameters
        self.func = func

        if len(self.parameter_columns) not in [1, 2]:
            raise ValueError("Only interpolation over 1 or 2 variables is supported")
        if len(self.parameter_columns) == 1 and order == 0:
            raise ValueError("Order 0 only supported for 2d interpolation")

        # These are the columns which the interpolation function will approximate
        value_columns = sorted(data.columns.difference(set(self.key_columns)|set(self.parameter_columns)))

        if self.key_columns:
            # Since there are key_columns we need to group the table by those
            # columns to get the sub-tables to fit
            sub_tables = data.groupby(self.key_columns)
        else:
            # There are no key columns so we will fit the whole table
            sub_tables = {None: data}.items()

        self.interpolations = {}

        for key, base_table in sub_tables:
            if base_table.empty:
                continue
            # For each permutation of the key columns build interpolations
            self.interpolations[key] = {}
            for value_column in value_columns:
                # For each value in the table build an interpolation function
                if len(self.parameter_columns) == 2:
                    # 2 variable interpolation
                    if order == 0:
                        x = base_table[list(self.parameter_columns)]
                        y = base_table[value_column]
                        func = interpolate.NearestNDInterpolator(x=x.values, y=y.values)
                    else:
                        index, column = self.parameter_columns
                        table = base_table.pivot(index=index, columns=column, values=value_column)
                        x = table.index.values
                        y = table.columns.values
                        z = table.values
                        func = interpolate.RectBivariateSpline(x=x, y=y, z=z, ky=order, kx=order).ev
                else:
                    # 1 variable interpolation
                    base_table = base_table.sort_values(by=self.parameter_columns[0])
                    x = base_table[self.parameter_columns[0]]
                    y = base_table[value_column]
                    func = interpolate.InterpolatedUnivariateSpline(x, y, k=order)
                self.interpolations[key][value_column] = func