Python numpy 模块,ufunc() 实例源码

我们从Python开源项目中,提取了以下19个代码示例,用于说明如何使用numpy.ufunc()

项目:lazyarray    作者:NeuralEnsemble    | 项目源码 | 文件源码
def __deepcopy__(self, memo):
        obj = type(self).__new__(type(self))
        if isinstance(self.base_value, VectorizedIterable):  # special case, but perhaps need to rethink
            obj.base_value = self.base_value                 # whether deepcopy is appropriate everywhere
        else:
            try:
                obj.base_value = deepcopy(self.base_value)
            except TypeError:  # base_value cannot be copied, e.g. is a generator (but see generator_tools from PyPI)
                obj.base_value = self.base_value  # so here we create a reference rather than deepcopying - could cause problems
        obj._shape = self._shape
        obj.dtype = self.dtype
        obj.operations = []
        for f, arg in self.operations:
            if isinstance(f, numpy.ufunc):
                obj.operations.append((f, deepcopy(arg)))
            else:
                obj.operations.append((deepcopy(f), deepcopy(arg)))
        return obj
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def __array_prepare__(self, result, context=None):
        """
        Gets called prior to a ufunc
        """

        # nice error message for non-ufunc types
        if context is not None and not isinstance(self._values, np.ndarray):
            obj = context[1][0]
            raise TypeError("{obj} with dtype {dtype} cannot perform "
                            "the numpy op {op}".format(
                                obj=type(obj).__name__,
                                dtype=getattr(obj, 'dtype', None),
                                op=context[0].__name__))
        return result

    # complex
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def __array_wrap__(self, result, context=None):
        """
        Gets called after a ufunc. Needs additional handling as
        PeriodIndex stores internal data as int dtype

        Replace this to __numpy_ufunc__ in future version
        """
        if isinstance(context, tuple) and len(context) > 0:
            func = context[0]
            if (func is np.add):
                return self._add_delta(context[1][1])
            elif (func is np.subtract):
                return self._add_delta(-context[1][1])
            elif isinstance(func, np.ufunc):
                if 'M->M' not in func.types:
                    msg = "ufunc '{0}' not supported for the PeriodIndex"
                    # This should be TypeError, but TypeError cannot be raised
                    # from here because numpy catches.
                    raise ValueError(msg.format(func.__name__))

        if com.is_bool_dtype(result):
            return result
        return PeriodIndex(result, freq=self.freq, name=self.name)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def __init__(self, scalar_op, inplace_pattern=None, name=None,
                 nfunc_spec=None, openmp=None):
        if inplace_pattern is None:
            inplace_pattern = {}
        self.name = name
        self.scalar_op = scalar_op
        self.inplace_pattern = inplace_pattern
        self.destroy_map = dict((o, [i]) for o, i in inplace_pattern.items())

        self.ufunc = None
        self.nfunc = None
        if nfunc_spec is None:
            nfunc_spec = getattr(scalar_op, 'nfunc_spec', None)
        self.nfunc_spec = nfunc_spec
        if nfunc_spec:
            self.nfunc = getattr(numpy, nfunc_spec[0])

        # precompute the hash of this node
        self._rehash()
        super(Elemwise, self).__init__(openmp=openmp)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def set_ufunc(self, scalar_op):
        # This is probably a speed up of the implementation
        if isinstance(scalar_op, theano.scalar.basic.Add):
            self.ufunc = numpy.add
        elif isinstance(scalar_op, theano.scalar.basic.Mul):
            self.ufunc = numpy.multiply
        elif isinstance(scalar_op, theano.scalar.basic.Maximum):
            self.ufunc = numpy.maximum
        elif isinstance(scalar_op, theano.scalar.basic.Minimum):
            self.ufunc = numpy.minimum
        elif isinstance(scalar_op, theano.scalar.basic.AND):
            self.ufunc = numpy.bitwise_and
        elif isinstance(scalar_op, theano.scalar.basic.OR):
            self.ufunc = numpy.bitwise_or
        elif isinstance(scalar_op, theano.scalar.basic.XOR):
            self.ufunc = numpy.bitwise_xor
        else:
            self.ufunc = numpy.frompyfunc(scalar_op.impl, 2, 1)
项目:npstreams    作者:LaurentRDC    | 项目源码 | 文件源码
def _check_binary_ufunc(ufunc):
    """ Check that ufunc is suitable for ``ireduce_ufunc`` """
    if not isinstance(ufunc, np.ufunc):
        raise TypeError('{} is not a NumPy Ufunc'.format(ufunc.__name__))
    if not ufunc.nin == 2:
        raise ValueError('Only binary ufuncs are supported, and {} is \
                          not one of them'.format(ufunc.__name__))

    # Ufuncs that always return bool are problematic because they can be reduced
    # but not be accumulated.
    # Recall: numpy.dtype('?') == np.bool
    if all(type_signature[-1] == '?' for type_signature in ufunc.types):
        raise ValueError('Only binary ufuncs that preserve type are supported, \
                          and {} is not one of them'.format(ufunc.__name__))
项目:npstreams    作者:LaurentRDC    | 项目源码 | 文件源码
def _ireduce_ufunc_new_axis(arrays, ufunc, **kwargs):
    """
    Reduction operation for arrays, in the direction of a new axis (i.e. stacking).

    Parameters
    ----------
    arrays : iterable
        Arrays to be reduced.
    ufunc : numpy.ufunc
        Binary universal function. Must have a signature of the form ufunc(x1, x2, ...)
    kwargs
        Keyword arguments are passed to ``ufunc``.

    Yields 
    ------
    reduced : ndarray
    """
    arrays = iter(arrays)
    first = next(arrays)

    kwargs.pop('axis')

    dtype = kwargs.get('dtype', None)
    if dtype is None:
        dtype = first.dtype
    else:
        kwargs['casting'] = 'unsafe'

    # If the out parameter was already given
    # we create the accumulator from it
    # Otherwise, it is a copy of the first array
    accumulator = kwargs.pop('out', None)
    if accumulator is not None:
        accumulator[:] = first
    else:
        accumulator = np.array(first, copy = True).astype(dtype)
    yield accumulator

    for array in arrays:
        ufunc(accumulator, array, out = accumulator, **kwargs)
        yield accumulator
项目:npstreams    作者:LaurentRDC    | 项目源码 | 文件源码
def _ireduce_ufunc_all_axes(arrays, ufunc, **kwargs):
    """
    Reduction operation for arrays, over all axes.

    Parameters
    ----------
    arrays : iterable
        Arrays to be reduced.
    ufunc : numpy.ufunc
        Binary universal function. Must have a signature of the form ufunc(x1, x2, ...)
    kwargs
        Keyword arguments are passed to ``ufunc``. The ``out`` parameter is ignored.

    Yields 
    ------
    reduced : scalar
    """
    arrays = iter(arrays)
    first = next(arrays)

    kwargs['axis'] = None
    kwargs.pop('out', None)         # Remove the out-parameter if provided.
    axis_reduce = partial(ufunc.reduce, **kwargs)

    accumulator = axis_reduce(first)
    yield accumulator

    for array in arrays:
        accumulator = axis_reduce([accumulator, axis_reduce(array)])
        yield accumulator
项目:lombscargle    作者:jakevdp    | 项目源码 | 文件源码
def _validate_method(method, dy, fit_bias, nterms,
                     frequency, assume_regular_frequency):
    fast_method_ok = hasattr(np.ufunc, 'at')
    if not fast_method_ok:
        warnings.warn("Fast Lomb-Scargle methods require numpy version 1.8 "
                      "or newer. Using slower methods instead.")

    # automatically choose the appropiate method
    if method == 'auto':
        if nterms != 1:
            if (fast_method_ok and len(frequency) > 100
                    and _is_regular(frequency, assume_regular_frequency)):
                method = 'fastchi2'
            else:
                method = 'chi2'
        elif (fast_method_ok and len(frequency) > 100
              and _is_regular(frequency, assume_regular_frequency)):
            method = 'fast'
        elif dy is None and not fit_bias:
            method = 'scipy'
        else:
            method = 'slow'


    if method not in METHODS:
        raise ValueError("invalid method: {0}".format(method))

    return method
项目:lazyarray    作者:NeuralEnsemble    | 项目源码 | 文件源码
def _build_ufunc(func):
    """Return a ufunc that works with lazy arrays"""
    def larray_compatible_ufunc(x):
        if isinstance(x, larray):
            y = deepcopy(x)
            y.apply(func)
            return y
        else:
            return func(x)
    return larray_compatible_ufunc
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def __array_wrap__(self, result, context=None):
        """
        Gets called after a ufunc
        """
        return self._constructor(result, index=self.index,
                                 copy=False).__finalize__(self)
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def apply(self, func, axis=0, broadcast=False, reduce=False):
        """
        Analogous to DataFrame.apply, for SparseDataFrame

        Parameters
        ----------
        func : function
            Function to apply to each column
        axis : {0, 1, 'index', 'columns'}
        broadcast : bool, default False
            For aggregation functions, return object of same size with values
            propagated

        Returns
        -------
        applied : Series or SparseDataFrame
        """
        if not len(self.columns):
            return self
        axis = self._get_axis_number(axis)

        if isinstance(func, np.ufunc):
            new_series = {}
            for k, v in compat.iteritems(self):
                applied = func(v)
                applied.fill_value = func(applied.fill_value)
                new_series[k] = applied
            return self._constructor(
                new_series, index=self.index, columns=self.columns,
                default_fill_value=self._default_fill_value,
                kind=self._default_kind).__finalize__(self)
        else:
            if not broadcast:
                return self._apply_standard(func, axis, reduce=reduce)
            else:
                return self._apply_broadcast(func, axis)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def __getstate__(self):
        d = copy(self.__dict__)
        d.pop('ufunc')
        d.pop('nfunc')
        d.pop('__epydoc_asRoutine', None)
        d.pop('_hashval')
        return d
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def __setstate__(self, d):
        super(Elemwise, self).__setstate__(d)
        self.ufunc = None
        self.nfunc = None
        if getattr(self, 'nfunc_spec', None):
            self.nfunc = getattr(numpy, self.nfunc_spec[0])
        elif 0 < self.scalar_op.nin < 32:
            self.ufunc = numpy.frompyfunc(self.scalar_op.impl,
                                          self.scalar_op.nin,
                                          self.scalar_op.nout)
        self._rehash()
项目:npstreams    作者:LaurentRDC    | 项目源码 | 文件源码
def reduce_ufunc(*args, **kwargs):
    """
    Streaming reduction generator function from a binary NumPy ufunc. Essentially the
    function equivalent to `ireduce_ufunc`.

    ``ufunc`` must be a NumPy binary Ufunc (i.e. it takes two arguments). Moreover,
    for performance reasons, ufunc must have the same return types as input types.
    This precludes the use of ``numpy.greater``, for example.

    Note that performance is much better for the default ``axis = -1``. In such a case,
    reduction operations can occur in-place. This also allows to operate in constant-memory.

    Parameters
    ----------
    arrays : iterable
        Arrays to be reduced.
    ufunc : numpy.ufunc
        Binary universal function.
    axis : int or None, optional
        Reduction axis. Default is to reduce the arrays in the stream as if 
        they had been stacked along a new axis, then reduce along this new axis.
        If None, arrays are flattened before reduction. If `axis` is an int larger that
        the number of dimensions in the arrays of the stream, arrays are reduced
        along the new axis. Note that not all of NumPy Ufuncs support 
        ``axis = None``, e.g. ``numpy.subtract``.
    dtype : numpy.dtype or None, optional
        Overrides the dtype of the calculation and output arrays.
    ignore_nan : bool, optional
        If True and ufunc has an identity value (e.g. ``numpy.add.identity`` is 0), then NaNs
        are replaced with this identity. An error is raised if ``ufunc`` has no identity (e.g. ``numpy.maximum.identity`` is ``None``).
    kwargs
        Keyword arguments are passed to ``ufunc``. Note that some valid ufunc keyword arguments
        (e.g. ``keepdims``) are not valid for all streaming functions. Note that
        contrary to NumPy v. 1.10+, ``casting = 'unsafe`` is the default in npstreams.

    Yields 
    ------
    reduced : ndarray or scalar

    Raises
    ------
    TypeError : if ``ufunc`` is not NumPy ufunc.
    ValueError : if ``ignore_nan`` is True but ``ufunc`` has no identity
    ValueError: if ``ufunc`` is not a binary ufunc
    ValueError: if ``ufunc`` does not have the same input type as output type
    """ 
    return last(ireduce_ufunc(*args, **kwargs))
项目:npstreams    作者:LaurentRDC    | 项目源码 | 文件源码
def _ireduce_ufunc_existing_axis(arrays, ufunc, **kwargs):
    """
    Reduction operation for arrays, in the direction of an existing axis.

    Parameters
    ----------
    arrays : iterable
        Arrays to be reduced.
    ufunc : numpy.ufunc
        Binary universal function. Must have a signature of the form ufunc(x1, x2, ...)
    kwargs
        Keyword arguments are passed to ``ufunc``. The ``out`` parameter is ignored.

    Yields 
    ------
    reduced : ndarray
    """
    arrays = iter(arrays)
    first = next(arrays)

    if kwargs['axis'] not in range(first.ndim):
        raise ValueError('Axis {} not supported on arrays of shape {}.'.format(kwargs['axis'], first.shape))

    # Remove the out-parameter if provided.
    kwargs.pop('out', None)

    dtype = kwargs.get('dtype')
    if dtype is None:
        dtype = first.dtype

    axis_reduce = partial(ufunc.reduce, **kwargs)

    accumulator = np.atleast_1d(axis_reduce(first))
    yield accumulator

    # On the first pass of the following loop, accumulator is missing a dimensions
    # therefore, the stacking function cannot be 'concatenate'
    second = next(arrays)
    accumulator = np.stack([accumulator, np.atleast_1d(axis_reduce(second))], axis = -1)
    yield accumulator

    # On the second pass, the new dimensions exists, and thus we switch to
    # using concatenate.
    for array in arrays:
        reduced = np.expand_dims(np.atleast_1d(axis_reduce(array)), axis = accumulator.ndim - 1)
        accumulator = np.concatenate([accumulator, reduced], axis = accumulator.ndim - 1)
        yield accumulator
项目:plotnine    作者:has2k1    | 项目源码 | 文件源码
def compute_group(cls, data, scales, **params):
        fun = params['fun']
        n = params['n']
        args = params['args']
        xlim = params['xlim']

        try:
            range_x = xlim or scales.x.dimension((0, 0))
        except AttributeError:
            raise PlotnineError(
                "Missing 'x' aesthetic and 'xlim' is {}".format(xlim))

        if not hasattr(fun, '__call__'):
            raise PlotnineError(
                "stat_function requires parameter 'fun' to be " +
                "a function or any other callable object")

        old_fun = fun
        if isinstance(args, (list, tuple)):
            def fun(x):
                return old_fun(x, *args)
        elif isinstance(args, dict):
            def fun(x):
                return old_fun(x, **args)
        elif args is not None:
            def fun(x):
                return old_fun(x, args)
        else:
            def fun(x):
                return old_fun(x)

        x = np.linspace(range_x[0], range_x[1], n)

        # continuous scale
        with suppress(AttributeError):
            x = scales.x.trans.inverse(x)

        # We know these can handle array-likes
        if isinstance(old_fun, (np.ufunc, np.vectorize)):
            y = fun(x)
        else:
            y = [fun(val) for val in x]

        new_data = pd.DataFrame({'x': x, 'y': y})
        return new_data
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def prepare_node(self, node, storage_map, compute_map, impl):
        # Postpone the ufunc building to the last minutes
        # NumPy ufunc support only up to 31 inputs.
        # But our c code support more.
        if (len(node.inputs) < 32 and
                (self.nfunc is None or
                 self.scalar_op.nin != len(node.inputs)) and
                self.ufunc is None and
                impl == 'py'):

            ufunc = numpy.frompyfunc(self.scalar_op.impl,
                                     len(node.inputs),
                                     self.scalar_op.nout)
            if self.scalar_op.nin > 0:
                # We can reuse it for many nodes
                self.ufunc = ufunc
            else:
                node.tag.ufunc = ufunc

        # Numpy ufuncs will sometimes perform operations in
        # float16, in particular when the input is int8.
        # This is not something that we want, and we do not
        # do it in the C code, so we specify that the computation
        # should be carried out in the returned dtype.
        # This is done via the "sig" kwarg of the ufunc, its value
        # should be something like "ff->f", where the characters
        # represent the dtype of the inputs and outputs.

        # NumPy 1.10.1 raise an error when giving the signature
        # when the input is complex. So add it only when inputs is int.
        out_dtype = node.outputs[0].dtype
        if (out_dtype in float_dtypes and
                isinstance(self.nfunc, numpy.ufunc) and
                node.inputs[0].dtype in discrete_dtypes):
            char = numpy.sctype2char(out_dtype)
            sig = char * node.nin + '->' + char * node.nout
            node.tag.sig = sig
        node.tag.fake_node = Apply(
            self.scalar_op,
            [get_scalar_type(dtype=input.type.dtype).make_variable()
             for input in node.inputs],
            [get_scalar_type(dtype=output.type.dtype).make_variable()
             for output in node.outputs])

        self.scalar_op.prepare_node(node.tag.fake_node, None, None, impl)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def perform(self, node, inp, out):
        input, = inp
        output, = out
        axis = self.axis
        if axis is None:
            axis = list(range(input.ndim))
        variable = input
        to_reduce = reversed(sorted(axis))

        if hasattr(self, 'acc_dtype') and self.acc_dtype is not None:
            acc_dtype = self.acc_dtype
        else:
            acc_dtype = node.outputs[0].type.dtype

        if to_reduce:
            for dimension in to_reduce:
                # If it's a zero-size array, use scalar_op.identity
                # if available
                if variable.shape[dimension] == 0:
                    if hasattr(self.scalar_op, 'identity'):
                        # Compute the shape of the output
                        v_shape = list(variable.shape)
                        del v_shape[dimension]
                        variable = numpy.empty(tuple(v_shape),
                                               dtype=acc_dtype)
                        variable.fill(self.scalar_op.identity)
                    else:
                        raise ValueError((
                            "Input (%s) has zero-size on axis %s, but "
                            "self.scalar_op (%s) has no attribute 'identity'"
                            % (variable, dimension, self.scalar_op)))
                else:
                    # Numpy 1.6 has a bug where you sometimes have to specify
                    # "dtype='object'" in reduce for it to work, if the ufunc
                    # was built with "frompyfunc". We need to find out if we
                    # are in one of these cases (only "object" is supported in
                    # the output).
                    if ((self.ufunc.ntypes == 1) and
                            (self.ufunc.types[0][-1] == 'O')):
                        variable = self.ufunc.reduce(variable, dimension,
                                                     dtype='object')
                    else:
                        variable = self.ufunc.reduce(variable, dimension,
                                                     dtype=acc_dtype)

            variable = numpy.asarray(variable)
            if numpy.may_share_memory(variable, input):
                # perhaps numpy is clever for reductions of size 1?
                # We don't want this.
                variable = variable.copy()
            output[0] = theano._asarray(variable,
                                        dtype=node.outputs[0].type.dtype)
        else:
            # Force a copy
            output[0] = numpy.array(variable, copy=True,
                                    dtype=node.outputs[0].type.dtype)