Python numpy 模块,expm1() 实例源码

我们从Python开源项目中,提取了以下39个代码示例,用于说明如何使用numpy.expm1()

项目:nanopores    作者:mitschabaude    | 项目源码 | 文件源码
def fit_gamma(ti):
    mu = ti.mean()
    sigma = ti.std()
    C = mu**2/sigma**2

    def f(x):
        return np.exp(x)/(2.*np.expm1(x)/x - 1.)

    def f1(x):
        y = 2.*np.expm1(x)/x - 1.
        z = 2./x**2*(x*np.exp(x) - np.expm1(x))
        return np.exp(x)*(1 - z/y)/y

    # newton solve
    K = 2.*C # initial value
    #print "Newton iteration:"
    for i in range(10):
        dK = -(f(K) - C)/f1(K)
        K = K + dK
    #    print i, "Residual", f(K) - C, "Value K =", K
    #print

    tau = mu*(1. - np.exp(-K))/K
    return K, tau
项目:nanopores    作者:mitschabaude    | 项目源码 | 文件源码
def sample_scalar(self, shape, a):
        AMAX = 30
        if a > AMAX:
            return np.random.poisson(a, shape)
        k = 1
        K = np.full(shape, k)
        s = a/np.expm1(a)
        S = s
        U = np.random.random(shape)
        new = S < U
        while np.any(new):
            k += 1
            K[new] = k
            s = s*a/float(k)
            S = S + s
            new = S < U
        return K
项目:factorix    作者:gbouchar    | 项目源码 | 文件源码
def c(vec):
    """Complement function for probabilities in the log-space: robustly computes 1-P(A) in the log-space
    Args:
        vec: vector of negative numbers representing log-probabilities of an event.

    Returns: the log-probabilities of (1-P(A)) were log(P(A)) are given in the vec numpy array

    Examples:
        >>> c(-1e-200)
        -460.51701859880916

        # >>> np.log(1 - np.exp(-1e-200)) raises a `RuntimeWarning: divide by zero` error
    """
    # return np.log1p(-np.exp(vec))  # Not robust to -1e-200
    if np.max(np.array(vec)) > 0:
        print('vec', vec)
    return np.log(-np.expm1(vec))
项目:factorix    作者:gbouchar    | 项目源码 | 文件源码
def c(vec):
    """Complement function for probabilities in the log-space: robustly computes 1-P(A) in the log-space
    Args:
        vec: vector of negative numbers representing log-probabilities of an event.

    Returns: the log-probabilities of (1-P(A)) were log(P(A)) are given in the vec numpy array

    Examples:
        >>> c(-1e-200)
        -460.51701859880916

        # >>> np.log(1 - np.exp(-1e-200)) raises a `RuntimeWarning: divide by zero` error
    """
    # return np.log1p(-np.exp(vec))  # Not robust to -1e-200
    if np.max(np.array(vec)) > 0:
        print('vec', vec)
    return np.log(-np.expm1(vec))
项目:mcplates    作者:ian-r-rose    | 项目源码 | 文件源码
def vmf_logp(x, lon_lat, kappa):

    if x[1] < -90. or x[1] > 90.:
        raise ZeroProbability
        return -np.inf

    if kappa < eps:
        return np.log(1. / 4. / np.pi)

    mu = np.array([np.cos(lon_lat[1] * d2r) * np.cos(lon_lat[0] * d2r),
                   np.cos(lon_lat[1] * d2r) * np.sin(lon_lat[0] * d2r),
                   np.sin(lon_lat[1] * d2r)])
    test_point = np.transpose(np.array([np.cos(x[1] * d2r) * np.cos(x[0] * d2r),
                                        np.cos(x[1] * d2r) *
                                        np.sin(x[0] * d2r),
                                        np.sin(x[1] * d2r)]))

    logp_elem = np.log( -kappa / ( 2. * np.pi * np.expm1(-2. * kappa)) ) + \
        kappa * (np.dot(test_point, mu) - 1.)

    logp = logp_elem.sum()
    return logp
项目:mixedvines    作者:asnelt    | 项目源码 | 文件源码
def _logpdf(self, samples):
        if self.theta == 0:
            vals = np.zeros(samples.shape[0])
        else:
            vals = np.log(-self.theta * np.expm1(-self.theta)
                          * np.exp(-self.theta
                                   * (samples[:, 0] + samples[:, 1]))
                          / (np.expm1(-self.theta)
                             + np.expm1(-self.theta * samples[:, 0])
                             * np.expm1(-self.theta * samples[:, 1])) ** 2)
        return vals
项目:mixedvines    作者:asnelt    | 项目源码 | 文件源码
def _logcdf(self, samples):
        if self.theta == 0:
            vals = np.sum(np.log(samples), axis=1)
        else:
            old_settings = np.seterr(divide='ignore')
            vals = np.log(-np.log1p(np.expm1(-self.theta * samples[:, 0])
                                    * np.expm1(-self.theta * samples[:, 1])
                                    / (np.expm1(-self.theta)))) \
                - np.log(self.theta)
            np.seterr(**old_settings)
        return vals
项目:mixedvines    作者:asnelt    | 项目源码 | 文件源码
def _ccdf(self, samples):
        if self.theta == 0:
            vals = samples[:, 0]
        else:
            vals = np.exp(-self.theta * samples[:, 1]) \
                * np.expm1(-self.theta * samples[:, 0]) \
                / (np.expm1(-self.theta)
                   + np.expm1(-self.theta * samples[:, 0])
                   * np.expm1(-self.theta * samples[:, 1]))
        return vals
项目:mixedvines    作者:asnelt    | 项目源码 | 文件源码
def _ppcf(self, samples):
        if self.theta == 0:
            vals = samples[:, 0]
        else:
            vals = -np.log1p(samples[:, 0] * np.expm1(-self.theta)
                             / (np.exp(-self.theta * samples[:, 1])
                                - samples[:, 0] * np.expm1(-self.theta
                                                           * samples[:, 1]))) \
                / self.theta
        return vals
项目:mitre    作者:gerberlab    | 项目源码 | 文件源码
def prior_and_posterior_for_category(self, category_to_primitives):
        category_to_prior = {}
        category_to_posterior = {}
        # We want to take the logs of the lengh prior, but some elements 
        # will be zero; entries in the result being -inf is okay, but we want
        # to avoid a potentially alarming warning message
        M = np.zeros(self.length_prior.shape)
        M[self.length_prior > 0.] = np.log(self.length_prior[self.length_prior > 0.])
        M[self.length_prior <= 0.] = -np.inf
        lengths = np.arange(len(M))

        for category, primitives in category_to_primitives.iteritems():
            base_prior = 0.
            for primitive in primitives:
                base_prior += self.base_prior_by_primitive[primitive]
            # Effectively we calculate the probability of never
            # choosing this category in a rule with 0 primitives, 1
            # primitive, etc, then add those up, scaling by the
            # probability of having 0 primitives, 1 primitive, etc.;
            # for speed and numerical behavior we do this in a
            # vectorized way on a log scale:
            inclusion_prior = -1.0*np.expm1(
                logsumexp(M + (np.log1p(-1.0*base_prior) * lengths))
            )
            category_to_prior[category] = inclusion_prior

            states_in_category = 0
            for state in self.state_ensemble_primitives:
                if state.intersection(primitives):
                    states_in_category += 1
            category_to_posterior[category] = (
                states_in_category / float(len(self.state_ensemble))
            )
        return category_to_prior, category_to_posterior
项目:InplusTrader_Linux    作者:zhengwsh    | 项目源码 | 文件源码
def __init__(self, daily_returns, benchmark_daily_returns, risk_free_rate, days, period=DAILY):
        assert(len(daily_returns) == len(benchmark_daily_returns))

        self._portfolio = daily_returns
        self._benchmark = benchmark_daily_returns
        self._risk_free_rate = risk_free_rate
        self._annual_factor = _annual_factor(period)

        self._alpha = None
        self._beta = None
        self._sharpe = None
        self._return = np.expm1(np.log1p(self._portfolio).sum())
        self._annual_return = (1 + self._return) ** (365 / days) - 1
        # self._annual_return = (1 + self._return) ** (self._annual_factor / len(self._portfolio)) - 1
        self._benchmark_return = np.expm1(np.log1p(self._benchmark).sum())
        self._benchmark_annual_return = (1 + self._benchmark_return) ** (365 / days) - 1
        # self._benchmark_annual_return = (1 + self._benchmark_return) ** (self._annual_factor / len(self._portfolio)) - 1
        self._max_drawdown = None
        self._volatility = None
        self._annual_volatility = None
        self._benchmark_volatility = None
        self._benchmark_annual_volatility = None
        self._information_ratio = None
        self._sortino = None
        self._tracking_error = None
        self._annual_tracking_error = None
        self._downside_risk = None
        self._annual_downside_risk = None
        self._calmar = None
项目:InplusTrader_Linux    作者:zhengwsh    | 项目源码 | 文件源码
def __init__(self, daily_returns, benchmark_daily_returns, risk_free_rate, days, period=DAILY):
        assert(len(daily_returns) == len(benchmark_daily_returns))

        self._portfolio = daily_returns
        self._benchmark = benchmark_daily_returns
        self._risk_free_rate = risk_free_rate
        self._annual_factor = _annual_factor(period)
        self._daily_risk_free_rate = self._risk_free_rate / self._annual_factor

        self._alpha = None
        self._beta = None
        self._sharpe = None
        self._return = np.expm1(np.log1p(self._portfolio).sum())
        self._annual_return = (1 + self._return) ** (365 / days) - 1
        self._benchmark_return = np.expm1(np.log1p(self._benchmark).sum())
        self._benchmark_annual_return = (1 + self._benchmark_return) ** (365 / days) - 1
        self._max_drawdown = None
        self._volatility = None
        self._annual_volatility = None
        self._benchmark_volatility = None
        self._benchmark_annual_volatility = None
        self._information_ratio = None
        self._sortino = None
        self._tracking_error = None
        self._annual_tracking_error = None
        self._downside_risk = None
        self._annual_downside_risk = None
        self._calmar = None
        self._avg_excess_return = None
项目:nanopores    作者:mitschabaude    | 项目源码 | 文件源码
def pdf_direct(self, tt, N=50):
        a = self.K
        t = tt/self.tau
        S = np.ones_like(t)
        s = np.ones_like(t)
        for k in range(1, N):
            s *= (a*t)/(k*(k+1.))
            S += s
        return np.exp(-t)*a/np.expm1(a) * S /self.tau
项目:nanopores    作者:mitschabaude    | 项目源码 | 文件源码
def pdf_bessel(self, tt):
        a = self.K
        t = tt/self.tau
        return np.exp(-t)*np.sqrt(a/t)*iv(1., 2.*np.sqrt(a*t))/self.tau/np.expm1(a)
项目:nanopores    作者:mitschabaude    | 项目源码 | 文件源码
def cdf(self, tt, N=50):
        a = self.K
        tau = self.tau
        gamma = sp.stats.gamma.cdf
        S = np.zeros_like(tt)
        s = 1.
        for k in range(1, N):
            s *= a/k
            S += s*gamma(tt, k, scale=tau)
        return 1./np.expm1(a) * S
项目:nanopores    作者:mitschabaude    | 项目源码 | 文件源码
def cfd_vec(self, tt, p, N=50):
        # cdf that takes parameter as vector input, for fitting
        a = p[:, 0:1]
        tau = p[:, 1:2]
        gamma = sp.stats.gamma.cdf
        S = np.ones((p.shape[0], tt.shape[1]))
        s = np.ones((1, tt.shape[0]))
        for k in range(1, N):
            s = s*a/k
            S = S + s*gamma(tt, k, scale=tau)
        return 1./np.expm1(a) * S
项目:nanopores    作者:mitschabaude    | 项目源码 | 文件源码
def f(x):
    return np.exp(x)/(2.*np.expm1(x)/x - 1.)
项目:nanopores    作者:mitschabaude    | 项目源码 | 文件源码
def f1(b, N=50):
    k = np.arange(1, N)
    return np.sum(summand(k, b))*1./np.expm1(b) - np.log(b*np.exp(b)/np.expm1(b))
项目:nanopores    作者:mitschabaude    | 项目源码 | 文件源码
def h(x):
    return np.sqrt(g(x)*(2. - x/np.expm1(x)))
项目:nanopores    作者:mitschabaude    | 项目源码 | 文件源码
def poisson_from_positiveK(mean):
    # solve x/(1 - exp(-x)) == mean
    def f(x):
        return x/(1. - np.exp(-x))
    def f1(x):
        return (np.expm1(x) - x)/(2.*np.cosh(x) - 2.)

    x = solve(mean, f, f1, mean, n=10)
    return x
项目:nanopores    作者:mitschabaude    | 项目源码 | 文件源码
def f(x):
    return exp(x)/(2.*expm1(x)/x - 1.)
项目:nanopores    作者:mitschabaude    | 项目源码 | 文件源码
def f(b, N=50):
    k = np.arange(1, N)
    return np.sum(summand(k, b))*1./np.expm1(b) - np.log(b*np.exp(b)/np.expm1(b))
项目:nanopores    作者:mitschabaude    | 项目源码 | 文件源码
def poisson_from_positiveK(mean):
    # solve x/(1 - exp(-x)) == mean
    def f(x):
        return x/(1. - np.exp(-x))
    def f1(x):
        return (np.expm1(x) - x)/(2.*np.cosh(x) - 2.)

    x = solve_newton(mean, f, f1, mean, n=10)
    return x
项目:nanopores    作者:mitschabaude    | 项目源码 | 文件源码
def sample_(self, shape, a):
        if np.isscalar(a) or a.size == 1:
            return self.sample_scalar(shape, a)

        #print shape, a.shape
        AMAX = 30
        k = 1
        K = np.full(shape, k)

        # for large a values, just use non-truncated poisson
        large = broadcast_mask(a > AMAX)
        small = broadcast_mask(a <= AMAX)
        K[large] = np.random.poisson(a[large], K[large].shape)
        Ksmall = K[small]
        a = a[small]

        # actual algorithm begins here
        s = a/np.expm1(a)
        S = s
        U = np.random.random(Ksmall.shape)
        new = S < U
        while np.any(new):
            k += 1
            Ksmall[new] = k
            s = s*a/float(k)
            S = S + s
            new = S < U
        K[small] = Ksmall
        return K
项目:nanopores    作者:mitschabaude    | 项目源码 | 文件源码
def pdf_(self, x, a):
        return stats.poisson.pmf(x, a)*np.exp(a)/np.expm1(a)
项目:GPflow    作者:GPflow    | 项目源码 | 文件源码
def backward(self, y):
        """
        Inverse of the softplus transform:
        .. math::

           x = \log( \exp(y) - 1)

        The bound for the input y is [self._lower. inf[, self._lower is
        subtracted prior to any calculations. The implementation avoids overflow
        explicitly by applying the log sum exp trick:
        .. math::

           \log ( \exp(y) - \exp(0)) &= ys + \log( \exp(y-ys) - \exp(-ys)) \\
                                     &= ys + \log( 1 - \exp(-ys)

           ys = \max(0, y)

        As y can not be negative, ys could be replaced with y itself.
        However, in case :math:`y=0` this results in np.log(0). Hence the zero is
        replaced by a machine epsilon.
        .. math::

           ys = \max( \epsilon, y)


        """
        ys = np.maximum(y - self._lower, np.finfo(settings.float_type).eps)
        return ys + np.log(-np.expm1(-ys))
项目:kaggle-allstate-claims-severity    作者:alno    | 项目源码 | 文件源码
def norm_y_inv(y_bc):
    return np.expm1((y_bc * norm_y_lambda + 1)**(1/norm_y_lambda))
项目:sparse    作者:mrocklin    | 项目源码 | 文件源码
def expm1(self, out=None):
        assert out is None
        return self.elemwise(np.expm1)
项目:nengolib    作者:arvoelke    | 项目源码 | 文件源码
def step_math(self, dt, J, spiked, voltage, refractory_time):
        # reduce all refractory times by dt
        refractory_time -= dt

        # compute effective dt for each neuron, based on remaining time.
        # note that refractory times that have completed midway into this
        # timestep will be given a partial timestep, and moreover these will
        # be subtracted to zero at the next timestep (or reset by a spike)
        delta_t = (dt - refractory_time).clip(0, dt)

        # update voltage using discretized lowpass filter
        # since v(t) = v(0) + (J - v(0))*(1 - exp(-t/tau)) assuming
        # J is constant over the interval [t, t + dt)
        voltage -= (J - voltage) * np.expm1(-delta_t / self.tau_rc)

        # determine which neurons spiked (set them to 1/dt, else 0)
        spiked_mask = voltage > 1
        spiked[:] = spiked_mask / dt

        # set v(0) = 1 and solve for t to compute the spike time
        t_spike = dt + self.tau_rc * np.log1p(
            -(voltage[spiked_mask] - 1) / (J[spiked_mask] - 1))

        # set spiked voltages to zero, refractory times to tau_ref, and
        # rectify negative voltages to a floor of min_voltage
        voltage[voltage < self.min_voltage] = self.min_voltage
        voltage[spiked_mask] = 0
        refractory_time[spiked_mask] = self.tau_ref + t_spike
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_numpy_method():
    # This type of code is used frequently by PyMC3 users
    x = tt.dmatrix('x')
    data = np.random.rand(5, 5)
    x.tag.test_value = data
    for fct in [np.arccos, np.arccosh, np.arcsin, np.arcsinh,
                np.arctan, np.arctanh, np.ceil, np.cos, np.cosh, np.deg2rad,
                np.exp, np.exp2, np.expm1, np.floor, np.log,
                np.log10, np.log1p, np.log2, np.rad2deg,
                np.sin, np.sinh, np.sqrt, np.tan, np.tanh, np.trunc]:
        y = fct(x)
        f = theano.function([x], y)
        utt.assert_allclose(np.nan_to_num(f(data)),
                            np.nan_to_num(fct(data)))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def impl(self, x):
        # If x is an int8 or uint8, numpy.expm1 will compute the result in
        # half-precision (float16), where we want float32.
        x_dtype = str(getattr(x, 'dtype', ''))
        if x_dtype in ('int8', 'uint8'):
            return numpy.expm1(x, sig='f')
        return numpy.expm1(x)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def c_code(self, node, name, inputs, outputs, sub):
        (x,) = inputs
        (z,) = outputs
        if node.inputs[0].type in complex_types:
            raise NotImplementedError('type not supported', type)
        return "%(z)s = expm1(%(x)s);" % locals()
项目:rqalpha    作者:ricequant    | 项目源码 | 文件源码
def __init__(self, daily_returns, benchmark_daily_returns, risk_free_rate, days, period=DAILY):
        assert(len(daily_returns) == len(benchmark_daily_returns))

        self._portfolio = daily_returns
        self._benchmark = benchmark_daily_returns
        self._risk_free_rate = risk_free_rate
        self._annual_factor = _annual_factor(period)
        self._daily_risk_free_rate = self._risk_free_rate / self._annual_factor

        self._alpha = None
        self._beta = None
        self._sharpe = None
        self._return = np.expm1(np.log1p(self._portfolio).sum())
        self._annual_return = (1 + self._return) ** (365 / days) - 1
        self._benchmark_return = np.expm1(np.log1p(self._benchmark).sum())
        self._benchmark_annual_return = (1 + self._benchmark_return) ** (365 / days) - 1
        self._max_drawdown = None
        self._volatility = None
        self._annual_volatility = None
        self._benchmark_volatility = None
        self._benchmark_annual_volatility = None
        self._information_ratio = None
        self._sortino = None
        self._tracking_error = None
        self._annual_tracking_error = None
        self._downside_risk = None
        self._annual_downside_risk = None
        self._calmar = None
        self._avg_excess_return = None
项目:GZ_travelTime    作者:zhilonglu    | 项目源码 | 文件源码
def mape_ln(y,d):
    c=d.get_label()
    result=np.sum(np.abs(np.expm1(y)-np.abs(np.expm1(c)))/np.abs(np.expm1(c)))/len(c)
    return "mape",result
项目:guacml    作者:guacml    | 项目源码 | 文件源码
def transform_back(self, transformed_target):
        return np.expm1(transformed_target)
项目:MOSFiT    作者:guillochon    | 项目源码 | 文件源码
def process(self, **kwargs):
        """Process module."""
        Transform.process(self, **kwargs)
        self._kappa = kwargs[self.key('kappa')]
        self._kappa_gamma = kwargs[self.key('kappagamma')]
        self._m_ejecta = kwargs[self.key('mejecta')]
        self._v_ejecta = kwargs[self.key('vejecta')]

        self._tau_diff = np.sqrt(self.DIFF_CONST * self._kappa *
                                 self._m_ejecta / self._v_ejecta) / DAY_CGS
        self._trap_coeff = (
            self.TRAP_CONST * self._kappa_gamma * self._m_ejecta /
            (self._v_ejecta ** 2)) / DAY_CGS ** 2
        td2, A = self._tau_diff ** 2, self._trap_coeff  # noqa: F841

        new_lums = np.zeros_like(self._times_to_process)
        if len(self._dense_times_since_exp) < 2:
            return {self.dense_key('luminosities'): new_lums}
        min_te = min(self._dense_times_since_exp)
        tb = max(0.0, min_te)
        linterp = interp1d(
            self._dense_times_since_exp, self._dense_luminosities, copy=False,
            assume_sorted=True)

        uniq_times = np.unique(self._times_to_process[
            (self._times_to_process >= tb) & (
                self._times_to_process <= self._dense_times_since_exp[-1])])
        lu = len(uniq_times)

        num = int(round(self.N_INT_TIMES / 2.0))
        lsp = np.logspace(
            np.log10(self._tau_diff /
                     self._dense_times_since_exp[-1]) +
            self.MIN_LOG_SPACING, 0, num)
        xm = np.unique(np.concatenate((lsp, 1 - lsp)))

        int_times = np.clip(
            tb + (uniq_times.reshape(lu, 1) - tb) * xm, tb,
            self._dense_times_since_exp[-1])

        int_te2s = int_times[:, -1] ** 2
        int_lums = linterp(int_times)  # noqa: F841
        int_args = int_lums * int_times * np.exp(
            (int_times ** 2 - int_te2s.reshape(lu, 1)) / td2)
        int_args[np.isnan(int_args)] = 0.0

        uniq_lums = np.trapz(int_args, int_times)
        uniq_lums *= -2.0 * np.expm1(-A / int_te2s) / td2

        new_lums = uniq_lums[np.searchsorted(uniq_times,
                                             self._times_to_process)]

        return {self.key('tau_diffusion'): self._tau_diff,
                self.dense_key('luminosities'): new_lums}
项目:stanity    作者:hammerlab    | 项目源码 | 文件源码
def gpinv(p, k, sigma):
    """Inverse Generalised Pareto distribution function."""
    x = np.empty(p.shape)
    x.fill(np.nan)
    if sigma <= 0:
        return x
    ok = (p > 0) & (p < 1)
    if np.all(ok):
        if np.abs(k) < np.finfo(float).eps:
            np.negative(p, out=x)
            np.log1p(x, out=x)
            np.negative(x, out=x)
        else:
            np.negative(p, out=x)
            np.log1p(x, out=x)
            x *= -k
            np.expm1(x, out=x)
            x /= k
        x *= sigma
    else:
        if np.abs(k) < np.finfo(float).eps:
            # x[ok] = - np.log1p(-p[ok])
            temp = p[ok]
            np.negative(temp, out=temp)
            np.log1p(temp, out=temp)
            np.negative(temp, out=temp)
            x[ok] = temp
        else:
            # x[ok] = np.expm1(-k * np.log1p(-p[ok])) / k
            temp = p[ok]
            np.negative(temp, out=temp)
            np.log1p(temp, out=temp)
            temp *= -k
            np.expm1(temp, out=temp)
            temp /= k
            x[ok] = temp
        x *= sigma
        x[p == 0] = 0
        if k >= 0:
            x[p == 1] = np.inf
        else:
            x[p == 1] = -sigma / k
    return x
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def test_numpy_ufuncs(self):
        # test ufuncs of numpy 1.9.2. see:
        # http://docs.scipy.org/doc/numpy/reference/ufuncs.html

        # some functions are skipped because it may return different result
        # for unicode input depending on numpy version

        for name, idx in compat.iteritems(self.indices):
            for func in [np.exp, np.exp2, np.expm1, np.log, np.log2, np.log10,
                         np.log1p, np.sqrt, np.sin, np.cos, np.tan, np.arcsin,
                         np.arccos, np.arctan, np.sinh, np.cosh, np.tanh,
                         np.arcsinh, np.arccosh, np.arctanh, np.deg2rad,
                         np.rad2deg]:
                if isinstance(idx, pd.tseries.base.DatetimeIndexOpsMixin):
                    # raise TypeError or ValueError (PeriodIndex)
                    # PeriodIndex behavior should be changed in future version
                    with tm.assertRaises(Exception):
                        func(idx)
                elif isinstance(idx, (Float64Index, Int64Index)):
                    # coerces to float (e.g. np.sin)
                    result = func(idx)
                    exp = Index(func(idx.values), name=idx.name)
                    self.assert_index_equal(result, exp)
                    self.assertIsInstance(result, pd.Float64Index)
                else:
                    # raise AttributeError or TypeError
                    if len(idx) == 0:
                        continue
                    else:
                        with tm.assertRaises(Exception):
                            func(idx)

            for func in [np.isfinite, np.isinf, np.isnan, np.signbit]:
                if isinstance(idx, pd.tseries.base.DatetimeIndexOpsMixin):
                    # raise TypeError or ValueError (PeriodIndex)
                    with tm.assertRaises(Exception):
                        func(idx)
                elif isinstance(idx, (Float64Index, Int64Index)):
                    # results in bool array
                    result = func(idx)
                    exp = func(idx.values)
                    self.assertIsInstance(result, np.ndarray)
                    tm.assertNotIsInstance(result, Index)
                else:
                    if len(idx) == 0:
                        continue
                    else:
                        with tm.assertRaises(Exception):
                            func(idx)
项目:neurotools    作者:michaelerule    | 项目源码 | 文件源码
def nonnegative_bandpass_filter(data,fa=None,fb=None,
    Fs=1000.,order=4,zerophase=True,bandstop=False,
    offset=1.0):
    '''
    For filtering data that must remain non-negative. Due to ringing
    conventional fitering can create values less than zero for non-
    negative real inputs. This may be unrealistic for some data.

    To compensate, this performs the filtering on the natural
    logarithm of the input data. For small numbers, this can lead to
    numeric underflow, so an offset parameter (default 1) is added
    to the data for stability.

    Parameters
    ----------
    data (ndarray): 
        data, filtering performed over last dimension
    fa (number): 
        low-freq cutoff Hz. If none, lowpass at fb
    fb (number): 
        high-freq cutoff Hz. If none, highpass at fa
    Fs (int): 
        Sample rate in Hz
    order (1..6): 
        butterworth filter order. Default 4
    zerophase (boolean): 
        Use forward-backward filtering? (true)
    bandstop (boolean): 
        Do band-stop rather than band-pass
    offset (positive number): 
        Offset data to avoid underflow (1)

    Returns
    -------
    filtered : 
        Filtered signal
    '''
    offset -= 1.0
    data = np.log1p(data+offset)
    filtered = bandpass_filter(data,
        fa=fa, fb=fb, Fs=Fs,
        order=order,
        zerophase=zerophase,
        bandstop=bandstop)
    return np.expm1(filtered)