Python scipy 模块,special() 实例源码

我们从Python开源项目中,提取了以下28个代码示例,用于说明如何使用scipy.special()

项目:pynufft    作者:jyhmiinlin    | 项目源码 | 文件源码
def kaiser_bessel_ft(u, J, alpha, kb_m, d):
    '''
    Interpolation weight for given J/alpha/kb-m
    '''

    u = u * (1.0 + 0.0j)
    import scipy.special
    z = numpy.sqrt((2 * numpy.pi * (J / 2) * u) ** 2.0 - alpha ** 2.0)
    nu = d / 2 + kb_m
    y = ((2 * numpy.pi) ** (d / 2)) * ((J / 2) ** d) * (alpha ** kb_m) / \
        scipy.special.iv(kb_m, alpha) * scipy.special.jv(nu, z) / (z ** nu)
    y = numpy.real(y)
    return y
项目:ldpop    作者:popgenmethods    | 项目源码 | 文件源码
def ordered_log_likelihoods(self, liks):
        try:
            return {time : self.ordered_log_likelihoods(l) for time,l in liks.items()}
        except AttributeError:
            liks = liks * self.antisymmetries

            all_nC = self.config_array[:,:-1,:-1].sum(axis=(1,2))
            liks = liks[all_nC == self.n]

            full_confs = self.config_array[:,:-1,:-1][all_nC == self.n, :, :]

            liks = numpy.log(liks)
            liks -= scipy.special.gammaln(self.n+1)
            for i in (0,1):
                for j in (0,1):
                    liks += scipy.special.gammaln(full_confs[:,i,j]+1)

            full_confs = [tuple(sorted(((i,j),cnf[i,j]) for i in (0,1) for j in (0,1))) for cnf in full_confs]
            return dict(zip(full_confs, liks))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def check_for_x_over_absX(numerators, denominators):
    """Convert x/abs(x) into sign(x). """
    # TODO: this function should dig/search through dimshuffles
    # This won't catch a dimshuffled absolute value
    for den in list(denominators):
        if (den.owner and den.owner.op == T.abs_ and
                den.owner.inputs[0] in numerators):
            if den.owner.inputs[0].type.dtype.startswith('complex'):
                # TODO: Make an Op that projects a complex number to
                #      have unit length but projects 0 to 0.  That
                #      would be a weird Op, but consistent with the
                #      special case below.  I heard there's some
                #      convention in Matlab that is similar to
                #      this... but not sure.
                pass
            else:
                denominators.remove(den)
                numerators.remove(den.owner.inputs[0])
                numerators.append(T.sgn(den.owner.inputs[0]))
    return numerators, denominators
项目:WaveletQuotes    作者:JobyKK    | 项目源码 | 文件源码
def frequency(self, w, s=1.0):
        """Frequency representation of derivative of gaussian.

        Parameters
        ----------
        w : float
            Angular frequency. If s is not specified, i.e. set to 1,
            this can be used as the non-dimensional angular
            frequency w * s.
        s : float
            Scaling factor. Default is 1.

        Returns
        -------
        complex: value of the derivative of gaussian wavelet at the
                 given time
        """
        m = self.m
        x = s * w
        gamma = scipy.special.gamma
        const = -1j ** m / gamma(m + 0.5) ** .5
        function = x ** m * np.exp(-x ** 2 / 2)
        return const * function
项目:WaveletQuotes    作者:JobyKK    | 项目源码 | 文件源码
def frequency(self, w, s=1.0):
        """Frequency representation of derivative of gaussian.

        Parameters
        ----------
        w : float
            Angular frequency. If s is not specified, i.e. set to 1,
            this can be used as the non-dimensional angular
            frequency w * s.
        s : float
            Scaling factor. Default is 1.

        Returns
        -------
        complex: value of the derivative of gaussian wavelet at the
                 given time
        """
        m = self.m
        x = s * w
        gamma = scipy.special.gamma
        const = -1j ** m / gamma(m + 0.5) ** .5
        function = x ** m * np.exp(-x ** 2 / 2)
        return const * function
项目:GRIPy    作者:giruenf    | 项目源码 | 文件源码
def frequency(self, w, s=1.0):
        """Frequency representation of derivative of Gaussian.

        Parameters
        ----------
        w : float
            Angular frequency. If `s` is not specified, i.e. set to 1,
            this can be used as the non-dimensional angular
            frequency w * s.
        s : float
            Scaling factor. Default is 1.

        Returns
        -------
        out : complex
            Value of the derivative of Gaussian wavelet at the
            given time
        """
        m = self.m
        x = s * w
        gamma = scipy.special.gamma
        const = -1j ** m / gamma(m + 0.5) ** .5
        function = x ** m * np.exp(-x ** 2 / 2)
        return const * function
项目:brainiak    作者:brainiak    | 项目源码 | 文件源码
def _gen_legendre(self, run_TRs, orders):
        def reg(x):
            return np.concatenate(
                [scipy.special.legendre(o)(np.linspace(-1, 1, x))[None, :]
                 for o in orders], axis=0)
        reg_poly = scipy.linalg.block_diag(
            *map(reg, run_TRs)).T
        return reg_poly
项目:yt    作者:yt-project    | 项目源码 | 文件源码
def special(self):
        if self._special is None:
            try:
                import scipy.special as special
            except ImportError:
                special = NotAModule(self._name)
            self._special = special
        return self._special
项目:driveboardapp    作者:nortd    | 项目源码 | 文件源码
def test_scipy_special(pyi_builder):
    """
    Test the importability of the `scipy.special` package and related hooks.

    This importation _must_ be tested independent of the importation of all
    other problematic SciPy packages and modules. Combining this test with other
    SciPy tests (e.g., `test_scipy()`) fails to properly exercise the hidden
    imports required by this package.
    """
    pyi_builder.test_source("""import scipy.special""")
项目:pycao    作者:evainlau    | 项目源码 | 文件源码
def __call__(self,time):
        output=0*Z
        for i in range(len(self)):
            output+=scipy.special.binom(len(self)-1, i)*((time)**i)*((1-time)**(len(self)-1-i))*self[i]
            #print i
            #print output
        return output
项目:pymake    作者:dtrckd    | 项目源码 | 文件源码
def table_log_likelihood(self):
        log_likelihood = 0.
        for document_index in xrange(self._D):
            log_likelihood += len(self._k_dt[document_index]) * numpy.log(self._alpha) - log_factorial(len(self._t_dv[document_index]), self._alpha)
            for table_index in xrange(len(self._k_dt[document_index])):
                log_likelihood += scipy.special.gammaln(self._n_dt[document_index][table_index])
            log_likelihood += scipy.special.gammaln(self._alpha)

        return log_likelihood
项目:pymake    作者:dtrckd    | 项目源码 | 文件源码
def topic_log_likelihood(self):
        log_likelihood = self._K * numpy.log(self._gamma) - log_factorial(numpy.sum(self._m_k), self._gamma)
        for topic_index in xrange(self._K):
            log_likelihood += scipy.special.gammaln(self._m_k[topic_index])
        log_likelihood += scipy.special.gammaln(self._gamma)

        return log_likelihood
项目:pymake    作者:dtrckd    | 项目源码 | 文件源码
def word_log_likelihood(self):
        n_k = numpy.sum(self._n_kd, axis=1)
        assert(len(n_k) == self._K)

        log_likelihood = self._K * scipy.special.gammaln(self._V * self._eta)
        for topic_index in xrange(self._K):
            log_likelihood -= scipy.special.gammaln(self._V * self._eta + n_k[topic_index])
            for word_index in xrange(self._V):
                if self._n_kv[topic_index, word_index] > 0:
                    log_likelihood += scipy.special.gammaln(self._n_kv[topic_index, word_index] + self._eta) - scipy.special.gammaln(self._eta)

        return log_likelihood
项目:pymake    作者:dtrckd    | 项目源码 | 文件源码
def log_factorial(n, a):
    if n == 0:
        return 0.
    return scipy.special.gammaln(n + a) - scipy.special.gammaln(a)
项目:pymake    作者:dtrckd    | 项目源码 | 文件源码
def update_tau(self):
        sum_nu = numpy.sum(self._nu, axis=0)
        assert(len(sum_nu) == self._K)
        if self._finite_mode:
            assert(len(sum_nu) == self._K)
            self._tau[0, :] = self._alpha / self._K + sum_nu
            self._tau[1, :] = 1. + self._N - sum_nu
        else:
            N_minus_sum_nu = self._N - sum_nu
            for k in xrange(self._K):
                psi_tau = scipy.special.psi(self._tau)
                assert(psi_tau.shape == ( self._K,2))
                #assert(psi_tau.shape == (2, self._K))

                psi_sum_tau = scipy.special.psi(numpy.sum(self._tau, axis=0))
                assert(len(psi_sum_tau) == self._K)

                psi_tau0_cumsum = numpy.hstack([0, numpy.cumsum(psi_tau[0, :-1])])
                assert(len(psi_tau0_cumsum) == self._K)

                psi_sum_cumsum = numpy.cumsum(psi_sum_tau)
                assert(len(psi_sum_cumsum) == self._K)

                exponent = psi_tau[1, :] + psi_tau0_cumsum - psi_sum_cumsum
                unnormalized = numpy.exp(exponent - numpy.max(exponent))
                assert(len(unnormalized) == self._K)

                qs = numpy.zeros((self._K, self._K))
                for m in xrange(k, self._K):
                    qs[m, 0:m + 1] = unnormalized[0:m + 1] / numpy.sum(unnormalized[0:m + 1])

                self._tau[0, k] = numpy.sum(sum_nu[k:self._K]) + numpy.dot(N_minus_sum_nu[k + 1:self._K], numpy.sum(qs[k + 1:self._K, k + 1:self._K], axis=1)) + self._alpha
                self._tau[1, k] = numpy.dot(N_minus_sum_nu[k:self._K], qs[k:self._K, k]) + 1
        return
项目:pymake    作者:dtrckd    | 项目源码 | 文件源码
def compute_expected_pzk0_qjensen(self, k):
        assert(k >= 0 and k < self._K)
        tau = self._tau[:, 0:k + 1]
        assert(tau.shape == (2, k + 1))

        psi_tau = scipy.special.psi(tau)
        assert(psi_tau.shape == (2, k + 1))

        psi_sum_tau = scipy.special.psi(numpy.sum(tau, axis=0))
        assert(len(psi_sum_tau) == k + 1)

        psi_tau0_cumsum = numpy.hstack([0, numpy.cumsum(psi_tau[0, :-1])])
        assert(len(psi_tau0_cumsum) == k + 1)

        psi_sum_cumsum = numpy.cumsum(psi_sum_tau)
        assert(len(psi_sum_cumsum) == k + 1)

        tmp = psi_tau[1, :] + psi_tau0_cumsum - psi_sum_cumsum
        assert(len(tmp) == k + 1)

        q = numpy.exp(tmp - numpy.max(tmp))
        assert(len(q) == k + 1)

        q = q / numpy.sum(q)
        assert(len(q) == k + 1)

        # compute the lower bound
        lower_bound = numpy.sum(q * (tmp - numpy.log(q)))

        return lower_bound
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_stack_scalar_make_vector_constant(self):
        '''Test that calling stack() on scalars instantiates MakeVector,
        event when the scalar are simple int type.'''
        a = tensor.iscalar('a')
        b = tensor.lscalar('b')
        # test when the constant is the first element.
        # The first element is used in a special way
        s = stack([10, a, b, numpy.int8(3)])
        f = function([a, b], s, mode=self.mode)
        val = f(1, 2)
        self.assertTrue(numpy.all(val == [10, 1, 2, 3]))
        topo = f.maker.fgraph.toposort()
        assert len([n for n in topo if isinstance(n.op, opt.MakeVector)]) > 0
        assert len([n for n in topo if isinstance(n, type(self.join_op))]) == 0
        assert f.maker.fgraph.outputs[0].dtype == 'int64'
项目:ThinkX    作者:AllenDowney    | 项目源码 | 文件源码
def MakeCdf(self, steps=101):
        """Returns the CDF of this distribution."""
        xs = [i / (steps - 1.0) for i in range(steps)]
        ps = special.betainc(self.alpha, self.beta, xs)
        cdf = Cdf(xs, ps)
        return cdf
项目:ThinkX    作者:AllenDowney    | 项目源码 | 文件源码
def Percentile(self, ps):
        """Returns the given percentiles from this distribution.

        ps: scalar, array, or list of [0-100]
        """
        ps = np.asarray(ps) / 100
        xs = special.betaincinv(self.alpha, self.beta, ps)
        return xs
项目:ThinkX    作者:AllenDowney    | 项目源码 | 文件源码
def MakeCdf(self, steps=101):
        """Returns the CDF of this distribution."""
        xs = [i / (steps - 1.0) for i in range(steps)]
        ps = special.betainc(self.alpha, self.beta, xs)
        cdf = Cdf(xs, ps)
        return cdf
项目:ThinkX    作者:AllenDowney    | 项目源码 | 文件源码
def Percentile(self, ps):
        """Returns the given percentiles from this distribution.

        ps: scalar, array, or list of [0-100]
        """
        ps = np.asarray(ps) / 100
        xs = special.betaincinv(self.alpha, self.beta, ps)
        return xs
项目:mac-package-build    作者:persepolisdm    | 项目源码 | 文件源码
def test_scipy_special(pyi_builder):
    """
    Test the importability of the `scipy.special` package and related hooks.

    This importation _must_ be tested independent of the importation of all
    other problematic SciPy packages and modules. Combining this test with other
    SciPy tests (e.g., `test_scipy()`) fails to properly exercise the hidden
    imports required by this package.
    """
    pyi_builder.test_source("""import scipy.special""")
项目:neurotools    作者:michaelerule    | 项目源码 | 文件源码
def poisson_logpdf(k,l):
    '''
    Gives the log-pdf for a poisson distribution with rate l 
    evaluated at points k. k should be a vector of integers.

    Parameters
    ----------

    Returns
    -------
    '''
    # k,l = map(np.float128,(k,l))
    return k*slog(l)-l-np.array([scipy.special.gammaln(x+1) for x in k])
    #return k*slog(l)-l-np.array([log_factorial(x) for x in k])
项目:pymake    作者:dtrckd    | 项目源码 | 文件源码
def velb(self):
        log_likelihood = numpy.zeros(5)

        psi_tau = scipy.special.psi(self._tau)
        assert(psi_tau.shape == ( self._K,2))
        #assert(psi_tau.shape == (2, self._K))
        psi_sum_tau = scipy.special.psi(numpy.sum(self._tau, axis=1)[numpy.newaxis, :])
        assert(psi_sum_tau.shape == (1, self._K))

        if self._finite_mode:
            # compute the probability of feature
            log_likelihood[0] = self._K * numpy.log(self._alpha / self._K) + (self._alpha / self._K - 1.) * numpy.sum(psi_tau[0, :] - psi_sum_tau)
            # compute the probability of feature statistics
            log_likelihood[1] = numpy.sum(self._nu * psi_tau[0, :]) + numpy.sum((1. - self._nu) * psi_tau[1, :]) - self._N * numpy.sum(psi_sum_tau)
        else:
            # compute the probability of feature
            log_likelihood[0] = self._K * numpy.log(self._alpha) + (self._alpha - 1.) * numpy.sum(psi_tau[0, :] - psi_sum_tau)
            # compute the probability of feature statistics
            for k in xrange(self._K):
                log_likelihood[1] += numpy.sum(self._nu[:, k]) * numpy.sum(psi_tau[0, :k + 1] - psi_sum_tau[0, :k + 1])
                log_likelihood[1] += numpy.dot((self._N - numpy.sum(self._nu[:, k])), self.compute_expected_pzk0_qjensen(k))

        # compute the probability of feature distribution
        log_likelihood[2] = -0.5 * self._K * self._D * numpy.log(2 * numpy.pi * self._sigma_a * self._sigma_a)
        log_likelihood[2] -= 0.5 / (self._sigma_a * self._sigma_a) * (numpy.sum(self._phi_cov) + numpy.sum(self._phi_mean * self._phi_mean))

        # compute the probability of data likelihood
        tmp_log_likelihood = numpy.sum(self._X * self._X) - 2 * numpy.sum(self._nu * numpy.dot(self._X, self._phi_mean.transpose()))
        tmp_1 = numpy.dot(numpy.ones((self._N, self._D)), (self._phi_cov + self._phi_mean ** 2).transpose())
        tmp_log_likelihood += numpy.sum(self._nu * tmp_1)
        tmp_1 = numpy.dot(self._nu, self._phi_mean)
        tmp_2 = numpy.sum(numpy.dot(self._nu ** 2, self._phi_mean ** 2))
        tmp_log_likelihood += numpy.sum(tmp_1 * tmp_1) - numpy.sum(tmp_2)

        log_likelihood[3] = -0.5 * self._N * self._D * numpy.log(2 * numpy.pi * self._sigma_x * self._sigma_x)
        log_likelihood[3] -= 0.5 / (self._sigma_x * self._sigma_x) * tmp_log_likelihood

        # entropy of the proposed distribution
        lngamma_tau = scipy.special.gammaln(self._tau)
        assert(lngamma_tau.shape == (2, self._K))
        lngamma_sum_tau = scipy.special.gammaln(numpy.sum(self._tau, axis=0)[numpy.newaxis, :])
        assert(lngamma_sum_tau.shape == (1, self._K))

        # compute the entropy of the distribution
        log_likelihood[4] = numpy.sum(lngamma_tau[0, :] + lngamma_tau[1, :] - lngamma_sum_tau)
        log_likelihood[4] -= numpy.sum((self._tau[0, :] - 1) * psi_tau[0, :] + (self._tau[1, :] - 1) * psi_tau[1, :])
        log_likelihood[4] += numpy.sum((self._tau[0, :] + self._tau[1, :] - 2) * psi_sum_tau)

        assert(numpy.all(self._phi_cov > 0))
        assert(numpy.all(self._nu >= 0) and numpy.all(self._nu <= 1))
        log_likelihood[4] += 0.5 * self._K * self._D * numpy.log(2 * numpy.pi * numpy.e)
        log_likelihood[4] += 0.5 * numpy.sum(numpy.log(self._phi_cov))
        #log_likelihood[4] += 0.5 * numpy.log(numpy.sqrt(numpy.sum(self._phi_cov * self._phi_cov, axis=1)))
        log_likelihood[4] -= numpy.sum(self._nu * numpy.log(self._nu) + (1. - self._nu) * numpy.log(1. - self._nu))

        return numpy.sum(log_likelihood)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def local_subtensor_of_dot(node):
    """
    This optimization translates T.dot(A, B)[idxs] into T.dot(A[idxs_a], B[idxs_b]),
    where idxs_a and idxs_b are defined appropriately.

    idxs_a is the first A.ndim-1 entries of idxs,
    and idxs_b is the remaining entries of idxs (if any),
    modified to skip the second-to-last dimension of B
    (because dot sums over this dimension).

    """
    if not isinstance(node.op, Subtensor):
        return
    if (not node.inputs[0].owner or
            not isinstance(node.inputs[0].owner.op, T.Dot)):
        return
    # If there is other node that use the outputs of the dot
    # We don't want to compute twice the sub part.
    if len(node.inputs[0].clients) > 1:
        return

    a = node.inputs[0].owner.inputs[0]
    b = node.inputs[0].owner.inputs[1]

    idx_list = get_idx_list(node.inputs, node.op.idx_list)

    num_a_indices = min(a.ndim - 1, len(idx_list))
    a_indices = idx_list[:num_a_indices]
    b_indices = idx_list[num_a_indices:]

    # This is necessary because np.dot sums the last index of a with the second to last of b
    # so we want to skip the second-to-last index into b.
    # This wasn't necessary for a, because we just ommitted the last index.
    # We skip this if b.ndim = 1, since then we just want b_sub = b, not b_sub = b[:]
    # (dot also handles b.ndim < 2 as a special case)
    if b.ndim > 1 and len(b_indices) >= b.ndim - 1:
        b_indices = (b_indices[:b.ndim - 2] +
                     (slice(None, None, None),) + b_indices[b.ndim - 2:])

    a_sub = a.__getitem__(tuple(a_indices))
    b_sub = b.__getitem__(tuple(b_indices)) if b_indices else b

    # Copy over previous output stacktrace to a_sub and b_sub,
    # because an error in the subtensor operation (e.g. an index error)
    # on either a or b must correspond to an error in the
    # subtensor operation on their dot product.
    copy_stack_trace(node.outputs[0], [a_sub, b_sub])

    # Copy over previous output stacktrace and previous dot product stacktrace,
    # because an error here may correspond to an either in either the original
    # dot product, or in the dot product after the subtensor operation.
    r = T.dot(a_sub, b_sub)
    copy_stack_trace([node.outputs[0], node.inputs[0]], r)

    return [r]
项目:WaveletQuotes    作者:JobyKK    | 项目源码 | 文件源码
def time(self, t, s=1.0):
        """
        Return a DOG wavelet,

        When m = 2, this is also known as the "Mexican hat", "Marr"
        or "Ricker" wavelet.

        It models the function::

            ``A d^m/dx^m exp(-x^2 / 2)``,

        where ``A = (-1)^(m+1) / (gamma(m + 1/2))^.5``
        and   ``x = t / s``.

        Note that the energy of the return wavelet is not normalised
        according to s.

        Parameters
        ----------
        t : float
            Time. If s is not specified, this can be used as the
            non-dimensional time t/s.
        s : scalar
            Width parameter of the wavelet.

        Returns
        -------
        float : value of the ricker wavelet at the given time


        Notes
        -----
        The derivative of the gaussian has a polynomial representation:

        from http://en.wikipedia.org/wiki/Gaussian_function:

        "Mathematically, the derivatives of the Gaussian function can be
        represented using Hermite functions. The n-th derivative of the
        Gaussian is the Gaussian function itself multiplied by the n-th
        Hermite polynomial, up to scale."

        http://en.wikipedia.org/wiki/Hermite_polynomial

        Here, we want the 'probabilists' Hermite polynomial (He_n),
        which is computed by scipy.special.hermitenorm

        """
        x = t / s
        m = self.m

        # compute the hermite polynomial (used to evaluate the
        # derivative of a gaussian)
        He_n = scipy.special.hermitenorm(m)
        gamma = scipy.special.gamma

        const = (-1) ** (m + 1) / gamma(m + 0.5) ** .5
        function = He_n(x) * np.exp(-x ** 2 / 2)

        return const * function
项目:WaveletQuotes    作者:JobyKK    | 项目源码 | 文件源码
def time(self, t, s=1.0):
        """
        Return a DOG wavelet,

        When m = 2, this is also known as the "Mexican hat", "Marr"
        or "Ricker" wavelet.

        It models the function::

            ``A d^m/dx^m exp(-x^2 / 2)``,

        where ``A = (-1)^(m+1) / (gamma(m + 1/2))^.5``
        and   ``x = t / s``.

        Note that the energy of the return wavelet is not normalised
        according to s.

        Parameters
        ----------
        t : float
            Time. If s is not specified, this can be used as the
            non-dimensional time t/s.
        s : scalar
            Width parameter of the wavelet.

        Returns
        -------
        float : value of the ricker wavelet at the given time


        Notes
        -----
        The derivative of the gaussian has a polynomial representation:

        from http://en.wikipedia.org/wiki/Gaussian_function:

        "Mathematically, the derivatives of the Gaussian function can be
        represented using Hermite functions. The n-th derivative of the
        Gaussian is the Gaussian function itself multiplied by the n-th
        Hermite polynomial, up to scale."

        http://en.wikipedia.org/wiki/Hermite_polynomial

        Here, we want the 'probabilists' Hermite polynomial (He_n),
        which is computed by scipy.special.hermitenorm

        """
        x = t / s
        m = self.m

        # compute the hermite polynomial (used to evaluate the
        # derivative of a gaussian)
        He_n = scipy.special.hermitenorm(m)
        gamma = scipy.special.gamma

        const = (-1) ** (m + 1) / gamma(m + 0.5) ** .5
        function = He_n(x) * np.exp(-x ** 2 / 2)

        return const * function
项目:GRIPy    作者:giruenf    | 项目源码 | 文件源码
def time(self, t, s=1.0):
        """
        Return a Derivative of Gaussian wavelet,

        When m = 2, this is also known as the "Mexican hat", "Marr"
        or "Ricker" wavelet.

        It models the function::

            ``A d^m/dx^m exp(-x^2 / 2)``,

        where ``A = (-1)^(m+1) / (gamma(m + 1/2))^.5``
        and   ``x = t / s``.

        Note that the energy of the return wavelet is not normalised
        according to `s`.

        Parameters
        ----------
        t : float
            Time. If `s` is not specified, this can be used as the
            non-dimensional time t/s.
        s : scalar
            Width parameter of the wavelet.

        Returns
        -------
        out : float
            Value of the DOG wavelet at the given time

        Notes
        -----
        The derivative of the Gaussian has a polynomial representation:

        from http://en.wikipedia.org/wiki/Gaussian_function:

        "Mathematically, the derivatives of the Gaussian function can be
        represented using Hermite functions. The n-th derivative of the
        Gaussian is the Gaussian function itself multiplied by the n-th
        Hermite polynomial, up to scale."

        http://en.wikipedia.org/wiki/Hermite_polynomial

        Here, we want the 'probabilists' Hermite polynomial (He_n),
        which is computed by scipy.special.hermitenorm

        """
        x = t / s
        m = self.m

        # compute the Hermite polynomial (used to evaluate the
        # derivative of a Gaussian)
        He_n = scipy.special.hermitenorm(m)
        gamma = scipy.special.gamma

        const = (-1) ** (m + 1) / gamma(m + 0.5) ** .5
        function = He_n(x) * np.exp(-x ** 2 / 2)

        return const * function