Python theano 模块,Op() 实例源码

我们从Python开源项目中,提取了以下16个代码示例,用于说明如何使用theano.Op()

项目:epfl-semester-project-biaxialnn    作者:onanypoint    | 项目源码 | 文件源码
def perform(self, node, inputs_storage, output_storage):
        """Peform the transformation from output to feature space.

        Defines the Python implementation of the op. It is in charge of doing 
        the processing to go from output space (statematrix) to feature space.

        Parameters
        ----------
        node : 
            Reference to an Apply node which was previously obtained via 
            the Op‘s make_node() method.
        inputs_storage : array_like
            A list of references to data which can be operated on using 
            non-symbolic statements
        output_storage : array_like
            A list of storage cells where the output is to be stored
        """
        state, time = inputs_storage
        output_storage[0][0] = np.array(self.d.f.note_state_single_to_input_form(state, time), dtype='int8')
项目:deep-prior-pp    作者:moberweger    | 项目源码 | 文件源码
def __init__(self, inputDim=None, outputDim=None, activation=None):
        """
        :type inputDim: tuple of [int]
        :param inputDim: dimensionality of input

        :type outputDim: tuple of [int]
        :param outputDim: number of hidden units

        :type activation: theano.Op or function
        :param activation: Non linearity to be applied in the hidden layer
        """

        super(NonlinearityLayerParams, self).__init__(inputDim, outputDim)

        self._outputDim = self._inputDim
        self._activation = activation
项目:deep-prior-pp    作者:moberweger    | 项目源码 | 文件源码
def __init__(self, inputDim=None, outputDim=None, activation=None, hasBias=True, init_method=None):
        """
        :type inputDim: tuple of [int]
        :param inputDim: dimensionality of input

        :type outputDim: tuple of [int]
        :param outputDim: number of hidden units

        :type activation: theano.Op or function
        :param activation: Non linearity to be applied in the hidden layer
        """

        super(HiddenLayerParams, self).__init__(inputDim, outputDim)

        self._activation = activation
        self._hasbias = hasBias
        self._init_method = init_method
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def register_opt2(tracks, *tags, **kwargs):
    '''
    Decorator for the new GraphToGPU optimizer.
    Takes an extra parameter(Op) compared to register_opt decorator.

    Parameters
    ----------
    tracks : List of Op class Or Op instance or None
        The Node's Op to which optimization is being applied.

    tags : String
        The optimization tag to which the optimizer will be registered.

    '''
    def f(local_opt):
        name = (kwargs and kwargs.pop('name')) or local_opt.__name__
        opt = theano.gof.local_optimizer(tracks)(local_opt)
        gpu_optimizer2.register(name, opt, 'fast_run', 'gpuarray', *tags)
        return local_opt
    return f
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def local_gpu_elemwise_careduce(node):
    """
    Merge some GpuCAReduceCuda and GPUElemwise.

    """
    if (isinstance(node.op, GpuCAReduceCuda) and
            node.op.pre_scalar_op is None and
            node.inputs[0].owner and
            isinstance(node.inputs[0].owner.op, GpuElemwise) and
            # The Op support all scalar with 1 inputs.  We don't
            # automatically add more case, as some like trigonometic
            # operation with some reduction pattern will probably results
            # in slow down.
            isinstance(node.inputs[0].owner.op.scalar_op, scalar.basic.Sqr)):
        op = node.op
        inp = node.inputs[0].owner.inputs[0]
        return [gpu_ca_reduce_cuda(scalar_op=op.scalar_op,
                                   axis=op.axis,
                                   reduce_mask=op.reduce_mask,
                                   pre_scalar_op=scalar.basic.sqr)(inp)]
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def ___test_infer_shape_tuple(self):

        a = tensor.tensor3(dtype='int32')
        b = tensor.tensor3(dtype='int32')
        c = tensor.tensor3(dtype='int32')

        A = numpy.asarray([1, 0], dtype='int32').reshape((2, 1, 1))
        B = numpy.asarray(numpy.random.rand(1, 4, 1), dtype='int32')
        C = numpy.asarray(numpy.random.rand(1, 1, 7), dtype='int32')

        f = function([a, b, c], choose(a, (b, c)))
        shape = (2, 4, 7)
        assert numpy.allclose(f(A, B, C).shape, shape)

        self._compile_and_check([a, b, c],  # theano.function inputs
                                [self.op(a, (b, c))],  # theano.function outputs
                                # Always use not square matrix!
                                # inputs data
                                [A, B, C],
                                # Op that should be removed from the graph.
                                self.op_class)
项目:deep-prior    作者:moberweger    | 项目源码 | 文件源码
def __init__(self, inputDim=None, outputDim=None, activation=None):
        """
        :type inputDim: tuple of [int]
        :param inputDim: dimensionality of input

        :type outputDim: tuple of [int]
        :param outputDim: number of hidden units

        :type activation: theano.Op or function
        :param activation: Non linearity to be applied in the hidden layer
        """

        super(HiddenLayerParams, self).__init__(inputDim, outputDim)

        self._activation = activation
项目:structured-output-ae    作者:sbelharbi    | 项目源码 | 文件源码
def __init__(self, input, n_in, n_out, W=None, b=None,
                 activation=T.tanh, rng=None):
        """
        Typical hidden layer of an MLP: units are fully connected and have
        tangente hyperbolic activation function. Weight matrix (W) is of shape
        (n_in, n_out) and the bias vector (b) is of shape (nout,).

        Hidden unit activation is given by: tanh(dot(input, w)+ b)

        :type rng: numpy.random.RandomState
        :param rng: a random number generator used to initiaze the weights.

        :type input: theano.tensor.dmatrix
        :param input: a symbolic tensor of shape (n_examples, n_in)

        :type n_in: int
        :param n_in: dimension of the input

        :type n_out: int
        :param n_out: number of hidden units

        :type activation: theano.Op or function
        :param activation:  Non linearity to be applied in the hidden layer.
        """
        if rng is None:
            rng = np.random.RandomState()

        super(HiddenLayer, self).__init__(
            input, n_in, n_out, activation=activation, rng=rng)
        self.reset_layer()

        if W is not None:
            self.W = W

        if b is not None:
            self.b = b

        self.params = [self.W, self.b]
        self.setup_outputs(input)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def local_assert_no_cpu_op(node):
    if (all([var.owner and isinstance(var.owner.op, HostFromGpu)
             for var in node.inputs]) and
        any([[c for c in var.clients if isinstance(c[0].op, GpuFromHost)]
             for var in node.outputs])):

            if config.assert_no_cpu_op == "warn":
                _logger.warning(("CPU Op %s is detected in the computation "
                                 "graph") % node)
            elif config.assert_no_cpu_op == "raise":
                raise AssertionError("The Op %s is on CPU." % node)
            elif config.assert_no_cpu_op == "pdb":
                pdb.set_trace()

# Register the local_assert_no_cpu_op:
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def safe_make_node(op, *inputs):
    """ Emulate the behaviour of make_node when op is a function.

    Normally op in an instead of the Op class.
    """
    node = op(*inputs)
    if isinstance(node, list):
        return node[0].owner
    else:
        return node.owner
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_Op_integers(self):
        """Test behaviour of ARange Op on integer inputs"""
        start, stop, step = iscalars('start', 'stop', 'step')
        out = ARange(start.type.dtype)(start, stop, step)
        f = function([start, stop, step], out)

        assert numpy.all(f(0, 5, 1) == numpy.arange(0, 5, 1))
        assert numpy.all(f(2, 11, 4) == numpy.arange(2, 11, 4))
        assert numpy.all(f(-5, 1, 1) == numpy.arange(-5, 1, 1))
        assert numpy.all(f(10, 2, -2) == numpy.arange(10, 2, -2))
        assert numpy.all(f(10, 2, 2) == numpy.arange(10, 2, 2))
        assert numpy.all(f(0, 0, 1) == numpy.arange(0, 0, 1))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_dtype_cache(self):
        """Checks that the same Op is returned on repeated calls to arange
        using the same dtype, but not for different dtypes."""

        start, stop, step = iscalars('start', 'stop', 'step')
        out1 = arange(start, stop, step)
        out2 = arange(start, stop, step, dtype=out1.dtype)
        out3 = arange(start, stop, 2., dtype=out1.dtype)
        out4 = arange(start, stop, 2.)

        assert out1.owner.op is out2.owner.op
        assert out2.owner.op is out3.owner.op
        assert out3.owner.op is not out4.owner.op
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_infer_shape(self):
        for shp1, shp2 in [
            ((5, 4), (7, 4)),
            ((1, 4), (7, 4)),
            ((5, 1), (7, 4)),
            ((5, 4), (1, 4)),
            ((5, 4), (7, 1)),

            ((5, 4), (4,)),
            ((1, 4), (4,)),
            ((5, 1), (4,)),
            ((5, 4), (1,)),

            ((4,), (5, 4)),
            ((1,), (5, 4)),
            ((4,), (1, 4)),
            ((4,), (3, 1)),

            ((4,), (4,)),
            ((1,), (4,)),
            ((4,), (1,)),
            ((1,), (1,)),
        ]:
            a = tensor.tensor(dtype='int32',
                              broadcastable=[n == 1 for n in shp1])
            c = tensor.tensor(dtype='float32',
                              broadcastable=[n == 1 for n in shp2])
            A = numpy.asarray(numpy.random.rand(*shp1) * shp2[0], dtype='int32')
            C = numpy.asarray(numpy.random.rand(*shp2) * shp2[0], dtype='float32')
            self._compile_and_check([a, c],  # theano.function inputs
                                    [self.op(a, c)],  # theano.function outputs
                                    # Always use not square matrix!
                                    # inputs data
                                    [A, C],
                                    # Op that should be removed from the graph.
                                    self.op_class)

# Disabled as it isn't implemented.
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def grad(self, inputs, output_grads):
        return [output_grads[0] * inputs[1], output_grads[0] * inputs[0]]


# 2. Op returns x + y and x - y
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def dnn_pool(img, ws, stride=None, mode='max', pad=None):
    """
    GPU pooling using cuDNN from NVIDIA.

    The memory layout to use is 'bc01', that is 'batch', 'channel',
    'first dim', 'second dim' in that order.

    `ws`, `stride` and `pad` must have the same length.

    Parameters
    ----------
    img
        Images to do the pooling over.
    ws : tuple
        Subsampling window size.  Should have 2 or 3 elements.
    stride : tuple
        Subsampling stride (default: (1, 1) or (1, 1, 1)).
    mode : {'max', 'average_inc_pad', 'average_exc_pad', 'sum'}
    pad : tuple
        (padX, padY) or (padX, padY, padZ)
        default: (0, 0) or (0, 0, 0)

    .. warning:: The cuDNN library only works with GPU that have a compute
        capability of 3.0 or higer.  This means that older GPU will not
        work with this Op.

    Notes
    -----
    This Op implements the ignore_border=True of max_pool_2d.

    """
    img = gpu_contiguous(img)
    if stride is None:
        stride = (1,) * len(ws)
    if pad is None:
        pad = (0,) * len(ws)
    if mode == "sum":
        ret = GpuDnnPool(mode="average_inc_pad")(img, ws, stride, pad)
        context_name = ret.type.context_name
        window_elem = theano.tensor.prod(ws).astype(ret.dtype)
        return as_gpuarray_variable(ret * window_elem, context_name)
    return GpuDnnPool(mode=mode)(img, ws, stride, pad)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def searchsorted(x, v, side='left', sorter=None):
    """Find indices where elements should be inserted to maintain order.

    Wrapping of numpy.searchsorted. Find the indices into a sorted array
    `x` such that, if the corresponding elements in `v` were inserted
    before the indices, the order of `x` would be preserved.

    Parameters
    ----------
    x: 1-D tensor (array-like)
        Input array. If `sorter` is None, then it must be sorted in
        ascending order, otherwise `sorter` must be an array of indices
        which sorts it.
    v: tensor (array-like)
        Contains the values to be inserted into `x`.
    side: {'left', 'right'}, optional.
        If 'left' (default), the index of the first suitable
        location found is given. If 'right', return the last such index. If
        there is no suitable index, return either 0 or N (where N is the length
        of `x`).
    sorter: 1-D tensor of integers (array-like), optional
        Contains indices that sort array `x` into ascending order.
        They are typically the result of argsort.

    Returns
    -------
    indices : tensor of integers (int64)
        Array of insertion points with the same shape as `v`.

    See Also
    --------
    `numpy.searchsorted <https://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.searchsorted.html>`_

    Notes
    -----
    * Binary search is used to find the required insertion points.
    * This Op is working **only on CPU** currently.

    Examples
    --------
    >>> from theano import tensor
    >>> x = tensor.dvector()
    >>> idx = x.searchsorted(3)
    >>> idx.eval({x: [1,2,3,4,5]})
    array(2)
    >>> tensor.extra_ops.searchsorted([1,2,3,4,5], 3).eval()
    array(2)
    >>> tensor.extra_ops.searchsorted([1,2,3,4,5], 3, side='right').eval()
    array(3)
    >>> tensor.extra_ops.searchsorted([1,2,3,4,5], [-10, 10, 2, 3]).eval()
    array([0, 5, 1, 2])

    .. versionadded:: 0.9

    """
    return SearchsortedOp(side=side)(x, v, sorter)