Python numpy 模块,isrealobj() 实例源码

我们从Python开源项目中,提取了以下14个代码示例,用于说明如何使用numpy.isrealobj()

项目:pyrsss    作者:butala    | 项目源码 | 文件源码
def operate(self, x):
        """
        Apply the separable filter to the signal vector *x*.
        """
        X = NP.fft.fftn(x, s=self.k_full)
        if NP.isrealobj(self.h) and NP.isrealobj(x):
            y = NP.real(NP.fft.ifftn(self.H * X))
        else:
            y = NP.fft.ifftn(self.H * X)

        if self.mode == 'full' or self.mode == 'circ':
            return y
        elif self.mode == 'valid':
            slice_list = []
            for i in range(self.ndim):
                if self.m[i]-1 == 0:
                    slice_list.append(slice(None, None, None))
                else:
                    slice_list.append(slice(self.m[i]-1, -(self.m[i]-1), None))
            return y[slice_list]
        else:
            assert(False)
项目:arlpy    作者:org-arl    | 项目源码 | 文件源码
def correlate_periodic(a, v=None):
    """Cross-correlation of two 1-dimensional periodic sequences.

    a and v must be sequences with the same length. If v is not specified, it is
    assumed to be the same as a (i.e. the function computes auto-correlation).

    :param a: input sequence #1
    :param v: input sequence #2
    :returns: discrete periodic cross-correlation of a and v
    """
    a_fft = _np.fft.fft(_np.asarray(a))
    if v is None:
        v_cfft = a_fft.conj()
    else:
        v_cfft = _np.fft.fft(_np.asarray(v)).conj()
    x = _np.fft.ifft(a_fft * v_cfft)
    if _np.isrealobj(a) and (v is None or _np.isrealobj(v)):
        x = x.real
    return x
项目:pumpp    作者:bmcfee    | 项目源码 | 文件源码
def inverse(self, encoded, duration=None):
        '''Inverse static tag transformation'''

        ann = jams.Annotation(namespace=self.namespace, duration=duration)

        if np.isrealobj(encoded):
            detected = (encoded >= 0.5)
        else:
            detected = encoded

        for vd in self.encoder.inverse_transform(np.atleast_2d(detected))[0]:
            vid = np.flatnonzero(self.encoder.transform(np.atleast_2d(vd)))
            ann.append(time=0,
                       duration=duration,
                       value=vd,
                       confidence=encoded[vid])
        return ann
项目:pumpp    作者:bmcfee    | 项目源码 | 文件源码
def decode_events(self, encoded):
        '''Decode labeled events into (time, value) pairs

        Parameters
        ----------
        encoded : np.ndarray, shape=(n_frames, m)
            Frame-level annotation encodings as produced by ``encode_events``.

            Real-valued inputs are thresholded at 0.5.

        Returns
        -------
        [(time, value)] : iterable of tuples
            where `time` is the event time and `value` is an
            np.ndarray, shape=(m,) of the encoded value at that time
        '''
        if np.isrealobj(encoded):
            encoded = (encoded >= 0.5)
        times = frames_to_time(np.arange(encoded.shape[0]),
                               sr=self.sr,
                               hop_length=self.hop_length)

        return zip(times, encoded)
项目:DeepFormants    作者:MLSpeech    | 项目源码 | 文件源码
def atal(x, order, num_coefs):
    x = np.atleast_1d(x)
    n = x.size
    if x.ndim > 1:
        raise ValueError("Only rank 1 input supported for now.")
    if not np.isrealobj(x):
        raise ValueError("Only real input supported for now.")
    a, e, kk = lpc(x, order)
    c = np.zeros(num_coefs)
    c[0] = a[0]
    for m in range(1, order+1):
        c[m] = - a[m]
        for k in range(1, m):
            c[m] += (float(k)/float(m)-1)*a[k]*c[m-k]
    for m in range(order+1, num_coefs):
        for k in range(1, order+1):
            c[m] += (float(k)/float(m)-1)*a[k]*c[m-k]
    return c
项目:deliver    作者:orchestor    | 项目源码 | 文件源码
def test_poly(self):
        assert_array_almost_equal(np.poly([3, -np.sqrt(2), np.sqrt(2)]),
                                  [1, -3, -2, 6])

        # From matlab docs
        A = [[1, 2, 3], [4, 5, 6], [7, 8, 0]]
        assert_array_almost_equal(np.poly(A), [1, -6, -72, -27])

        # Should produce real output for perfect conjugates
        assert_(np.isrealobj(np.poly([+1.082j, +2.613j, -2.613j, -1.082j])))
        assert_(np.isrealobj(np.poly([0+1j, -0+-1j, 1+2j,
                                      1-2j, 1.+3.5j, 1-3.5j])))
        assert_(np.isrealobj(np.poly([1j, -1j, 1+2j, 1-2j, 1+3j, 1-3.j])))
        assert_(np.isrealobj(np.poly([1j, -1j, 1+2j, 1-2j])))
        assert_(np.isrealobj(np.poly([1j, -1j, 2j, -2j])))
        assert_(np.isrealobj(np.poly([1j, -1j])))
        assert_(np.isrealobj(np.poly([1, -1])))

        assert_(np.iscomplexobj(np.poly([1j, -1.0000001j])))

        np.random.seed(42)
        a = np.random.randn(100) + 1j*np.random.randn(100)
        assert_(np.isrealobj(np.poly(np.concatenate((a, np.conjugate(a))))))
项目:fluids    作者:CalebBell    | 项目源码 | 文件源码
def polyval(self, chebcoeff):
        """
        Compute the interpolation values at Chebyshev points.
        chebcoeff: Chebyshev coefficients
        """
        N = len(chebcoeff)
        if N == 1:
            return chebcoeff

        data = even_data(chebcoeff)/2
        data[0] *= 2
        data[N-1] *= 2

        fftdata = 2*(N-1)*fftpack.ifft(data, axis=0)
        complex_values = fftdata[:N]
        # convert to real if input was real
        if np.isrealobj(chebcoeff):
            values = np.real(complex_values)
        else:
            values = complex_values
        return values
项目:fluids    作者:CalebBell    | 项目源码 | 文件源码
def dct(data):
    """
    Compute DCT using FFT
    """
    N = len(data)//2
    fftdata     = fftpack.fft(data, axis=0)[:N+1]
    fftdata     /= N
    fftdata[0]  /= 2.
    fftdata[-1] /= 2.
    if np.isrealobj(data):
        data = np.real(fftdata)
    else:
        data = fftdata
    return data

# ----------------------------------------------------------------
# Add overloaded operators
# ----------------------------------------------------------------
项目:nelpy    作者:nelpy    | 项目源码 | 文件源码
def isreal(self):
        """Returns True if entire signal is real."""
        return np.all(np.isreal(self._ydata))
        # return np.isrealobj(self._ydata)
项目:sporco    作者:bwohlberg    | 项目源码 | 文件源码
def fftconv(a, b, axes=(0,1)):
    """
    Compute a multi-dimensional convolution via the Discrete Fourier Transform.

    Parameters
    ----------
    a : array_like
      Input array
    b : array_like
      Input array
    axes : sequence of ints, optional (default (0,1))
      Axes on which to perform convolution

    Returns
    -------
    ab : ndarray
      Convolution of input arrays, a and b, along specified axes
    """

    if np.isrealobj(a) and np.isrealobj(b):
        fft = rfftn
        ifft = irfftn
    else:
        fft = fftn
        ifft = ifftn
    dims = np.maximum([a.shape[i] for i in axes], [b.shape[i] for i in axes])
    af = fft(a, dims, axes)
    bf = fft(b, dims, axes)
    return ifft(af*bf, dims, axes)
项目:PonyGE2    作者:PonyGE    | 项目源码 | 文件源码
def evaluate(self, ind, **kwargs):
        """
        Note that math functions used in the solutions are imported from either
        utilities.fitness.math_functions or called from numpy.

        :param ind: An individual to be evaluated.
        :param kwargs: An optional parameter for problems with training/test
        data. Specifies the distribution (i.e. training or test) upon which
        evaluation is to be performed.
        :return: The fitness of the evaluated individual.
        """

        dist = kwargs.get('dist', 'training')

        if dist == "training":
            # Set training datasets.
            x = self.training_in
            y = self.training_exp

        elif dist == "test":
            # Set test datasets.
            x = self.test_in
            y = self.test_exp

        else:
            raise ValueError("Unknown dist: " + dist)

        if params['OPTIMIZE_CONSTANTS']:
            # if we are training, then optimize the constants by
            # gradient descent and save the resulting phenotype
            # string as ind.phenotype_with_c0123 (eg x[0] +
            # c[0] * x[1]**c[1]) and values for constants as
            # ind.opt_consts (eg (0.5, 0.7). Later, when testing,
            # use the saved string and constants to evaluate.
            if dist == "training":
                return optimize_constants(x, y, ind)

            else:
                # this string has been created during training
                phen = ind.phenotype_consec_consts
                c = ind.opt_consts
                # phen will refer to x (ie test_in), and possibly to c
                yhat = eval(phen)
                assert np.isrealobj(yhat)

                # let's always call the error function with the
                # true values first, the estimate second
                return params['ERROR_METRIC'](y, yhat)

        else:
            # phenotype won't refer to C
            yhat = eval(ind.phenotype)
            assert np.isrealobj(yhat)

            # let's always call the error function with the true
            # values first, the estimate second
            return params['ERROR_METRIC'](y, yhat)
项目:DeepFormants    作者:MLSpeech    | 项目源码 | 文件源码
def periodogram(x, nfft=None, fs=1):
    """Compute the periodogram of the given signal, with the given fft size.

    Parameters
    ----------
    x : array-like
        input signal
    nfft : int
        size of the fft to compute the periodogram. If None (default), the
        length of the signal is used. if nfft > n, the signal is 0 padded.
    fs : float
        Sampling rate. By default, is 1 (normalized frequency. e.g. 0.5 is the
        Nyquist limit).

    Returns
    -------
    pxx : array-like
        The psd estimate.
    fgrid : array-like
        Frequency grid over which the periodogram was estimated.

    Examples
    --------
    Generate a signal with two sinusoids, and compute its periodogram:

    >>> fs = 1000
    >>> x = np.sin(2 * np.pi  * 0.1 * fs * np.linspace(0, 0.5, 0.5*fs))
    >>> x += np.sin(2 * np.pi  * 0.2 * fs * np.linspace(0, 0.5, 0.5*fs))
    >>> px, fx = periodogram(x, 512, fs)

    Notes
    -----
    Only real signals supported for now.

    Returns the one-sided version of the periodogram.

    Discrepency with matlab: matlab compute the psd in unit of power / radian /
    sample, and we compute the psd in unit of power / sample: to get the same
    result as matlab, just multiply the result from talkbox by 2pi"""
    x = np.atleast_1d(x)
    n = x.size

    if x.ndim > 1:
        raise ValueError("Only rank 1 input supported for now.")
    if not np.isrealobj(x):
        raise ValueError("Only real input supported for now.")
    if not nfft:
        nfft = n
    if nfft < n:
        raise ValueError("nfft < signal size not supported yet")

    pxx = np.abs(fft(x, nfft)) ** 2
    if nfft % 2 == 0:
        pn = nfft / 2 + 1
    else:
        pn = (nfft + 1 )/ 2

    fgrid = np.linspace(0, fs * 0.5, pn)
    return pxx[:pn] / (n * fs), fgrid
项目:DeepFormants    作者:MLSpeech    | 项目源码 | 文件源码
def arspec(x, order, nfft=None, fs=1):
    """Compute the spectral density using an AR model.

    An AR model of the signal is estimated through the Yule-Walker equations;
    the estimated AR coefficient are then used to compute the spectrum, which
    can be computed explicitely for AR models.

    Parameters
    ----------
    x : array-like
        input signal
    order : int
        Order of the LPC computation.
    nfft : int
        size of the fft to compute the periodogram. If None (default), the
        length of the signal is used. if nfft > n, the signal is 0 padded.
    fs : float
        Sampling rate. By default, is 1 (normalized frequency. e.g. 0.5 is the
        Nyquist limit).

    Returns
    -------
    pxx : array-like
        The psd estimate.
    fgrid : array-like
        Frequency grid over which the periodogram was estimated.
    """

    x = np.atleast_1d(x)
    n = x.size

    if x.ndim > 1:
        raise ValueError("Only rank 1 input supported for now.")
    if not np.isrealobj(x):
        raise ValueError("Only real input supported for now.")
    if not nfft:
        nfft = n
    a, e, k = lpc(x, order)

    # This is not enough to deal correctly with even/odd size
    if nfft % 2 == 0:
        pn = nfft / 2 + 1
    else:
        pn = (nfft + 1 )/ 2

    px = 1 / np.fft.fft(a, nfft)[:pn]
    pxx = np.real(np.conj(px) * px)
    pxx /= fs / e
    fx = np.linspace(0, fs * 0.5, pxx.size)
    return pxx, fx
项目:decoding_challenge_cortana_2016_3rd    作者:kingjr    | 项目源码 | 文件源码
def _write_raw_buffer(fid, buf, cals, fmt, inv_comp):
    """Write raw buffer

    Parameters
    ----------
    fid : file descriptor
        an open raw data file.
    buf : array
        The buffer to write.
    cals : array
        Calibration factors.
    fmt : str
        'short', 'int', 'single', or 'double' for 16/32 bit int or 32/64 bit
        float for each item. This will be doubled for complex datatypes. Note
        that short and int formats cannot be used for complex data.
    inv_comp : array | None
        The CTF compensation matrix used to revert compensation
        change when reading.
    """
    if buf.shape[0] != len(cals):
        raise ValueError('buffer and calibration sizes do not match')

    if fmt not in ['short', 'int', 'single', 'double']:
        raise ValueError('fmt must be "short", "single", or "double"')

    if np.isrealobj(buf):
        if fmt == 'short':
            write_function = write_dau_pack16
        elif fmt == 'int':
            write_function = write_int
        elif fmt == 'single':
            write_function = write_float
        else:
            write_function = write_double
    else:
        if fmt == 'single':
            write_function = write_complex64
        elif fmt == 'double':
            write_function = write_complex128
        else:
            raise ValueError('only "single" and "double" supported for '
                             'writing complex data')

    if inv_comp is not None:
        buf = np.dot(inv_comp / np.ravel(cals)[:, None], buf)
    else:
        buf = buf / np.ravel(cals)[:, None]

    write_function(fid, FIFF.FIFF_DATA_BUFFER, buf)