Python theano.tensor 模块,TensorVariable() 实例源码

我们从Python开源项目中,提取了以下39个代码示例,用于说明如何使用theano.tensor.TensorVariable()

项目:reseg    作者:fvisin    | 项目源码 | 文件源码
def get_output_shape_for(self, input_shape, **kwargs):
        # self.crop is a tensor --> we cannot know in advance how much
        # we will crop
        if isinstance(self.crop, T.TensorVariable):
            if self.data_format == 'bc01':
                input_shape = list(input_shape)
                input_shape[2] = None
                input_shape[3] = None
            else:
                input_shape = list(input_shape)
                input_shape[1] = None
                input_shape[2] = None
        # self.crop is a list of ints
        else:
            if self.data_format == 'bc01':
                input_shape = list(input_shape)
                input_shape[2] -= self.crop[0]
                input_shape[3] -= self.crop[1]
            else:
                input_shape = list(input_shape)
                input_shape[1] -= self.crop[0]
                input_shape[2] -= self.crop[1]
        return input_shape
项目:lemontree    作者:khshim    | 项目源码 | 文件源码
def get_inputs_of_variables(variables):
    """
    This function returns required inputs for the (tensor variable) variable.
    The order of the inputs are toposorted.

    Parameters
    ----------
    variable: list
        a list of (tensor variable) to see.
        usally this is a theano function output list. (loss, accuracy, etc.)

    Returns
    -------
    list
        a list of required inputs to compute the variable.
    """
    # assert
    assert isinstance(variables, list), 'Variables should be a list of tensor variable(s).'
    assert all(isinstance(var, T.TensorVariable) for var in variables), 'All input should be a tensor variable.'

    # do
    variable_inputs = [var for var in graph.inputs(variables) if isinstance(var, T.TensorVariable)]
    variable_inputs = list(OrderedDict.fromkeys(variable_inputs))  # preserve order and make to list
    print('Required inputs are:', variable_inputs)
    return variable_inputs
项目:Attentive_reader    作者:caglar    | 项目源码 | 文件源码
def shared_like(variable, name=None):
    """Construct a shared variable to hold the value of a tensor variable.

    Parameters
    ----------
    variable : :class:`~tensor.TensorVariable`
        The variable whose dtype and ndim will be used to construct
        the new shared variable.
    name : :obj:`str` or :obj:`None`
        The name of the shared variable. If None, the name is determined
        based on variable's name.

    """
    variable = tensor.as_tensor_variable(variable)
    if name is None:
        name = "shared_{}".format(variable.name)
    return theano.shared(numpy.zeros((0,) * variable.ndim,
                                     dtype=variable.dtype),
                         name=name)
项目:Attentive_reader    作者:caglar    | 项目源码 | 文件源码
def is_graph_input(variable):
    """Check if variable is a user-provided graph input.

    To be considered an input the variable must have no owner, and not
    be a constant or shared variable.

    Parameters
    ----------
    variable : :class:`~tensor.TensorVariable`

    Returns
    -------
    bool
        ``True`` If the variable is a user-provided input to the graph.

    """
    return (not variable.owner and
            not isinstance(variable, SharedVariable) and
            not isinstance(variable, Constant))
项目:Attentive_reader    作者:caglar    | 项目源码 | 文件源码
def put_hook(variable, hook_fn, *args):
    r"""Put a hook on a Theano variables.

    Ensures that the hook function is executed every time when the value
    of the Theano variable is available.

    Parameters
    ----------
    variable : :class:`~tensor.TensorVariable`
        The variable to put a hook on.
    hook_fn : function
        The hook function. Should take a single argument: the variable's
        value.
    \*args : list
        Positional arguments to pass to the hook function.

    """
    return printing.Print(global_fn=lambda _, x: hook_fn(x, *args))(variable)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_sanity_check_slice(self):

        mySymbolicMatricesList = TypedListType(T.TensorType(
            theano.config.floatX, (False, False)))()

        mySymbolicSlice = SliceType()()

        z = GetItem()(mySymbolicMatricesList, mySymbolicSlice)

        self.assertFalse(isinstance(z, T.TensorVariable))

        f = theano.function([mySymbolicMatricesList, mySymbolicSlice],
                            z)

        x = rand_ranged_matrix(-1000, 1000, [100, 101])

        self.assertTrue(numpy.array_equal(f([x], slice(0, 1, 1)), [x]))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def make_node(self, x, index):
        assert isinstance(x.type, TypedListType)
        if not isinstance(index, Variable):
            if isinstance(index, slice):
                index = Constant(SliceType(), index)
                return Apply(self, [x, index], [x.type()])
            else:
                index = T.constant(index, ndim=0, dtype='int64')
                return Apply(self, [x, index], [x.ttype()])
        if isinstance(index.type, SliceType):
            return Apply(self, [x, index], [x.type()])
        elif isinstance(index, T.TensorVariable) and index.ndim == 0:
            assert index.dtype == 'int64'
            return Apply(self, [x, index], [x.ttype()])
        else:
            raise TypeError('Expected scalar or slice as index.')
项目:GELUs    作者:hendrycks    | 项目源码 | 文件源码
def apply(self, x):
        s = x.shape
        if isinstance(x, np.ndarray):
            return np.dot(x.reshape((s[0],np.prod(s[1:]))) - self.mean.get_value(), self.ZCA_mat.get_value()).reshape(s)
        elif isinstance(x, T.TensorVariable):
            return T.dot(x.flatten(2) - self.mean.dimshuffle('x',0), self.ZCA_mat).reshape(s)
        else:
            raise NotImplementedError("Whitening only implemented for numpy arrays or Theano TensorVariables")
项目:GELUs    作者:hendrycks    | 项目源码 | 文件源码
def invert(self, x):
        s = x.shape
        if isinstance(x, np.ndarray):
            return (np.dot(x.reshape((s[0],np.prod(s[1:]))), self.inv_ZCA_mat.get_value()) + self.mean.get_value()).reshape(s)
        elif isinstance(x, T.TensorVariable):
            return (T.dot(x.flatten(2), self.inv_ZCA_mat) + self.mean.dimshuffle('x',0)).reshape(s)
        else:
            raise NotImplementedError("Whitening only implemented for numpy arrays or Theano TensorVariables")

# T.nnet.relu has some issues with very large inputs, this is more stable
项目:NMT    作者:tuzhaopeng    | 项目源码 | 文件源码
def dbg_hook(hook, x):
    if not isinstance(x, TT.TensorVariable):
        x.out = theano.printing.Print(global_fn=hook)(x.out)
        return x
    else:
        return theano.printing.Print(global_fn=hook)(x)
项目:NMT    作者:tuzhaopeng    | 项目源码 | 文件源码
def dbg_hook(hook, x):
    if not isinstance(x, TT.TensorVariable):
        x.out = theano.printing.Print(global_fn=hook)(x.out)
        return x
    else:
        return theano.printing.Print(global_fn=hook)(x)
项目:reseg    作者:fvisin    | 项目源码 | 文件源码
def __init__(self, l_in, crop, data_format='bc01', centered=True,
                 **kwargs):
        super(CropLayer, self).__init__(l_in, crop, **kwargs)
        assert data_format in ['bc01', 'b01c']
        if not isinstance(crop, T.TensorVariable):
            crop = lasagne.utils.as_tuple(crop, 2)
        self.crop = crop
        self.data_format = data_format
        self.centered = centered
项目:NADE    作者:MarcCote    | 项目源码 | 文件源码
def __init__(self, dataset, batch_size, use_mask_as_input=False, keep_mask=False, seed=1234):
        """
        Parameters
        ----------
        dataset : `SequenceDataset` object
            Dataset of datasets (one for each bundle).
        batch_size : int
            Number of examples per batch. *Must be greater than the number of
            bundles in `bundles_dataset`.*
        seed : int (optional)
            Seed of the random numbers generator used to sample a different
            regressive mask for each example.
        """
        super().__init__(dataset, batch_size)

        self.use_mask_as_input = use_mask_as_input
        self.seed = seed
        self.rng = np.random.RandomState(self.seed)
        self.keep_mask = keep_mask

        # Allocate memory for the autoregressive mask.
        self.mask_shape = (len(dataset),) + self.dataset.input_shape
        self._shared_mask_o_lt_d = sharedX(np.zeros(self.mask_shape), name='autoregressive_mask', keep_on_cpu=True)

        # Add a new attribute: a symbolic variable representing the auto regressive mask.
        self._shared_mask_o_lt_d.set_value(self.generate_autoregressive_mask())
        self.dataset.mask_o_lt_d = T.TensorVariable(type=T.TensorType("floatX", [False]*dataset.inputs.ndim), name=dataset.name+'_symb_mask')

        # Keep only `batch_size` masks as test values.
        self.dataset.mask_o_lt_d.tag.test_value = self._shared_mask_o_lt_d.get_value()[:batch_size]  # For debugging Theano graphs.

        if self.use_mask_as_input:
            self.dataset.symb_inputs.tag.test_value = np.concatenate([self.dataset.symb_inputs.tag.test_value * self.dataset.mask_o_lt_d.tag.test_value,
                                                                      self.dataset.mask_o_lt_d.tag.test_value], axis=1)
项目:weightnorm    作者:openai    | 项目源码 | 文件源码
def apply(self, x):
        s = x.shape
        if isinstance(x, np.ndarray):
            return np.dot(x.reshape((s[0],np.prod(s[1:]))) - self.mean.get_value(), self.ZCA_mat.get_value()).reshape(s)
        elif isinstance(x, T.TensorVariable):
            return T.dot(x.flatten(2) - self.mean.dimshuffle('x',0), self.ZCA_mat).reshape(s)
        else:
            raise NotImplementedError("Whitening only implemented for numpy arrays or Theano TensorVariables")
项目:weightnorm    作者:openai    | 项目源码 | 文件源码
def invert(self, x):
        s = x.shape
        if isinstance(x, np.ndarray):
            return (np.dot(x.reshape((s[0],np.prod(s[1:]))), self.inv_ZCA_mat.get_value()) + self.mean.get_value()).reshape(s)
        elif isinstance(x, T.TensorVariable):
            return (T.dot(x.flatten(2), self.inv_ZCA_mat) + self.mean.dimshuffle('x',0)).reshape(s)
        else:
            raise NotImplementedError("Whitening only implemented for numpy arrays or Theano TensorVariables")

# T.nnet.relu has some issues with very large inputs, this is more stable
项目:Attentive_reader    作者:caglar    | 项目源码 | 文件源码
def check_theano_variable(variable, n_dim, dtype_prefix):
    """Check number of dimensions and dtype of a Theano variable.

    If the input is not a Theano variable, it is converted to one. `None`
    input is handled as a special case: no checks are done.

    Parameters
    ----------
    variable : :class:`~tensor.TensorVariable` or convertible to one
        A variable to check.
    n_dim : int
        Expected number of dimensions or None. If None, no check is
        performed.
    dtype : str
        Expected dtype prefix or None. If None, no check is performed.

    """
    if variable is None:
        return

    if not isinstance(variable, tensor.Variable):
        variable = tensor.as_tensor_variable(variable)

    if n_dim and variable.ndim != n_dim:
        raise ValueError("Wrong number of dimensions:"
                         "\n\texpected {}, got {}".format(
                             n_dim, variable.ndim))

    if dtype_prefix and not variable.dtype.startswith(dtype_prefix):
        raise ValueError("Wrong dtype prefix:"
                         "\n\texpected starting with {}, got {}".format(
                             dtype_prefix, variable.dtype))
项目:Attentive_reader    作者:caglar    | 项目源码 | 文件源码
def dbg_hook(hook, x):
    if not isinstance(x, TT.TensorVariable):
        x.out = theano.printing.Print(global_fn=hook)(x.out)
        return x
    else:
        return theano.printing.Print(global_fn=hook)(x)
项目:lmkit    作者:jiangnanhugo    | 项目源码 | 文件源码
def apply(self, input_, mask=None):
        """

        Parameters
        ----------
        inputs_ : :class:`~tensor.TensorVariable`
            sequence to feed into GRU. Axes are mb, sequence, features

        mask : :class:`~tensor.TensorVariable`
            A 1D binary array with 1 or 0 to represent data given available.

        Returns
        -------
        output: :class:`theano.tensor.TensorVariable`
            sequence to feed out. Axes are batch, sequence, features
        """
        states_from_in = self.input_to_state_transform.apply(input_)
        update_from_in = self.input_to_update_transform.apply(input_)
        reset_from_in = self.input_to_reset_transform.apply(input_)

        gate_inputs = tensor.concatenate([update_from_in, reset_from_in], axis=2)

        if self.use_mine:
            output = self.rnn.apply(inputs=states_from_in, update_inputs=update_from_in, reset_inputs=reset_from_in, mask=mask)
        else:
            output = self.rnn.apply(inputs=states_from_in, gate_inputs=gate_inputs)

        return output
项目:triple-gan    作者:zhenxuan00    | 项目源码 | 文件源码
def apply(self, x):
        s = x.shape
        if isinstance(x, np.ndarray):
            return np.dot(x.reshape((s[0],np.prod(s[1:]))) - self.mean.get_value(), self.ZCA_mat.get_value()).reshape(s)
        elif isinstance(x, T.TensorVariable):
            return T.dot(x.flatten(2) - self.mean.dimshuffle('x',0), self.ZCA_mat).reshape(s)
        else:
            raise NotImplementedError("Whitening only implemented for numpy arrays or Theano TensorVariables")
项目:triple-gan    作者:zhenxuan00    | 项目源码 | 文件源码
def invert(self, x):
        s = x.shape
        if isinstance(x, np.ndarray):
            return (np.dot(x.reshape((s[0],np.prod(s[1:]))), self.inv_ZCA_mat.get_value()) + self.mean.get_value()).reshape(s)
        elif isinstance(x, T.TensorVariable):
            return (T.dot(x.flatten(2), self.inv_ZCA_mat) + self.mean.dimshuffle('x',0)).reshape(s)
        else:
            raise NotImplementedError("Whitening only implemented for numpy arrays or Theano TensorVariables")
项目:triple-gan    作者:zhenxuan00    | 项目源码 | 文件源码
def apply(self, x):
        s = x.shape
        if isinstance(x, np.ndarray):
            return np.dot(x.reshape((s[0],np.prod(s[1:]))) - self.mean.get_value(), self.ZCA_mat.get_value()).reshape(s)
        elif isinstance(x, T.TensorVariable):
            return T.dot(x.flatten(2) - self.mean.dimshuffle('x',0), self.ZCA_mat).reshape(s)
        else:
            raise NotImplementedError("Whitening only implemented for numpy arrays or Theano TensorVariables")
项目:triple-gan    作者:zhenxuan00    | 项目源码 | 文件源码
def invert(self, x):
        s = x.shape
        if isinstance(x, np.ndarray):
            return (np.dot(x.reshape((s[0],np.prod(s[1:]))), self.inv_ZCA_mat.get_value()) + self.mean.get_value()).reshape(s)
        elif isinstance(x, T.TensorVariable):
            return (T.dot(x.flatten(2), self.inv_ZCA_mat) + self.mean.dimshuffle('x',0)).reshape(s)
        else:
            raise NotImplementedError("Whitening only implemented for numpy arrays or Theano TensorVariables")

# T.nnet.relu has some issues with very large inputs, this is more stable
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def make_node(self, x, index, toInsert):
        assert isinstance(x.type, TypedListType)
        assert x.ttype == toInsert.type
        if not isinstance(index, Variable):
            index = T.constant(index, ndim=0, dtype='int64')
        else:
            assert index.dtype == 'int64'
            assert isinstance(index, T.TensorVariable) and index.ndim == 0
        return Apply(self, [x, index, toInsert], [x.type()])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def __setstate__(self, d):
        self.__dict__.update(d)
        if "allow_gc" not in self.__dict__:
            self.allow_gc = True
            self.info['allow_gc'] = True
        if not hasattr(self, 'gpua'):
            self.gpua = False
            self.info['gpua'] = False
        if not hasattr(self, 'var_mappings'):
            # Generate the mappings between inner and outer inputs and outputs
            # if they haven't already been generated.
            self.var_mappings = self.get_oinp_iinp_iout_oout_mappings()
        if hasattr(self, 'fn'):
            if not hasattr(self, 'thunk_mit_mot_out_slices'):
                # The thunk has been compiled before mit_mot preallocation
                # feature was implemented. Mark every mit_mot output tap as
                # not having been preallocated
                self.mitmots_preallocated = [False] * self.n_mit_mot_outs

            if not hasattr(self, 'outs_is_tensor'):
                # The thunk has been compiled before the analysis, at
                # compilation time, of the location of the inputs and outputs.
                # Perform this analysis here.
                self.inps_is_tensor = [isinstance(out, theano.tensor.TensorVariable)
                                       for out in self.fn.maker.fgraph.inputs]
                self.outs_is_tensor = [isinstance(out, theano.tensor.TensorVariable)
                                       for out in self.fn.maker.fgraph.outputs]

        # Ensure that the graph associated with the inner function is valid.
        self.validate_inner_graph()
项目:sgnmt    作者:ucam-smt    | 项目源码 | 文件源码
def preprocess(self, attended):
        """Preprocess the sequence for computing attention weights.

        Args:
            attended (TensorVariable): The attended sequence, time is 
                                       the 1-st dimension.
        """
        return self.attended_transformer.apply(attended)
项目:sgnmt    作者:ucam-smt    | 项目源码 | 文件源码
def take_glimpses(self, attended, preprocessed_attended=None,
                      attended_mask=None, **states):
        r"""Compute attention weights and produce glimpses.

        Parameters
        ----------
        attended : :class:`~tensor.TensorVariable`
            The sequence, time is the 1-st dimension.
        preprocessed_attended : :class:`~tensor.TensorVariable`
            The preprocessed sequence. If ``None``, is computed by calling
            :meth:`preprocess`.
        attended_mask : :class:`~tensor.TensorVariable`
            A 0/1 mask specifying available data. 0 means that the
            corresponding sequence element is fake.
        \*\*states
            The states of the network.

        Returns
        -------
        weighted_averages : :class:`~theano.Variable`
            Linear combinations of sequence elements with the attention
            weights.
        weights : :class:`~theano.Variable`
            The attention weights. The first dimension is batch, the second
            is time.

        """
        energies = self.compute_energies(attended, preprocessed_attended,
                                         states)
        weights = self.compute_weights(energies, attended_mask)
        weighted_averages = self.compute_weighted_averages(weights, attended)
        return weighted_averages, weights.T
项目:sgnmt    作者:ucam-smt    | 项目源码 | 文件源码
def preprocess(self, attended):
        """Preprocess the sequence for computing attention weights.

        Parameters
        ----------
        attended : :class:`~tensor.TensorVariable`
            The attended sequence, time is the 1-st dimension.

        """
        return self.attended_transformer.apply(attended)
项目:attention-sum-reader    作者:sohuren    | 项目源码 | 文件源码
def categorical_cross_entropy_with_masking(self, application_call, y, x, mask, **kwargs):
        """Computationally stable cross-entropy for pre-softmax values.

        Parameters
        ----------
        y : :class:`~tensor.TensorVariable`
            In the case of a matrix argument, each row represents a
            probabilility distribution. In the vector case, each element
            represents a distribution by specifying the position of 1 in a
            1-hot vector.
        x : :class:`~tensor.TensorVariable`
            A matrix, each row contains unnormalized probabilities of a
            distribution.
        mask: a mask of the elements to filter in the same shape as x

        Returns
        -------
        cost : :class:`~tensor.TensorVariable`
            A vector of cross-entropies between respective distributions
            from y and x.

        """
        x = self.log_probabilities(x, mask)
        # DEBUG
        #x = theano.tensor.printing.Print("log probabilities: ")(x)
        #y = theano.tensor.printing.Print("target factoids: ")(y)
        #mask = theano.tensor.printing.Print("mask: ")(mask)
        application_call.add_auxiliary_variable(
            x.copy(name='log_probabilities'))
        if y.ndim == x.ndim - 1:
            indices = tensor.arange(y.shape[0]) * x.shape[1] + y
            cost = -x.flatten()[indices]
        elif y.ndim == x.ndim:
            cost = -(x * y).sum(axis=1)
        else:
            raise TypeError('rank mismatch between x and y')
        return cost
项目:asreader    作者:rkadlec    | 项目源码 | 文件源码
def categorical_cross_entropy_with_masking(self, application_call, y, x, mask, **kwargs):
        """Computationally stable cross-entropy for pre-softmax values.

        Parameters
        ----------
        y : :class:`~tensor.TensorVariable`
            In the case of a matrix argument, each row represents a
            probabilility distribution. In the vector case, each element
            represents a distribution by specifying the position of 1 in a
            1-hot vector.
        x : :class:`~tensor.TensorVariable`
            A matrix, each row contains unnormalized probabilities of a
            distribution.
        mask: a mask of the elements to filter in the same shape as x

        Returns
        -------
        cost : :class:`~tensor.TensorVariable`
            A vector of cross-entropies between respective distributions
            from y and x.

        """
        x = self.log_probabilities(x, mask)
        # DEBUG
        #x = theano.tensor.printing.Print("log probabilities: ")(x)
        #y = theano.tensor.printing.Print("target factoids: ")(y)
        #mask = theano.tensor.printing.Print("mask: ")(mask)
        application_call.add_auxiliary_variable(
            x.copy(name='log_probabilities'))
        if y.ndim == x.ndim - 1:
            indices = tensor.arange(y.shape[0]) * x.shape[1] + y
            cost = -x.flatten()[indices]
        elif y.ndim == x.ndim:
            cost = -(x * y).sum(axis=1)
        else:
            raise TypeError('rank mismatch between x and y')
        return cost
项目:DCNMT    作者:SwordYork    | 项目源码 | 文件源码
def apply(self, inputs, gate_inputs, states, mask=None):
        """Apply the gated recurrent transition.
        Parameters
        ----------
        states : :class:`~tensor.TensorVariable`
            The 2 dimensional matrix of current states in the shape
            (batch_size, dim). Required for `one_step` usage.
        inputs : :class:`~tensor.TensorVariable`
            The 2 dimensional matrix of inputs in the shape (batch_size,
            dim)
        gate_inputs : :class:`~tensor.TensorVariable`
            The 2 dimensional matrix of inputs to the gates in the
            shape (batch_size, 2 * dim).
        mask : :class:`~tensor.TensorVariable`
            A 1D binary array in the shape (batch,) which is 1 if there is
            the charater available, 0 if there is the delimiter.
        Returns
        -------
        output : :class:`~tensor.TensorVariable`
            Next states of the network.
        """
        gate_values = self.gate_activation.apply(
            states.dot(self.state_to_gates) + gate_inputs)
        update_values = gate_values[:, :self.dim]
        reset_values = gate_values[:, self.dim:]
        states_reset = states * reset_values
        next_states = self.activation.apply(
            states_reset.dot(self.state_to_state) + inputs)
        next_states = (next_states * update_values +
                       states * (1 - update_values))
        if mask:
            next_states = (mask[:, None] * next_states +
                           (1 - mask[:, None]) * self.initial_states(mask.shape[0]))
        return next_states
项目:DCNMT    作者:SwordYork    | 项目源码 | 文件源码
def apply(self, inputs, gate_inputs, states, input_states, mask=None):
        """Apply the gated recurrent transition.
        Parameters
        ----------
        states : :class:`~tensor.TensorVariable`
            The 2 dimensional matrix of current states in the shape
            (batch_size, dim). Required for `one_step` usage.
        inputs : :class:`~tensor.TensorVariable`
            The 2 dimensional matrix of inputs in the shape (batch_size,
            dim)
        gate_inputs : :class:`~tensor.TensorVariable`
            The 2 dimensional matrix of inputs to the gates in the
            shape (batch_size, 2 * dim).
        input_states : :class:`~tensor.TensorVariable`
            The 2 dimensional matrix of outputs of decoder in the shape
            (batch_size, dim), which generated by decoder.
        mask : :class:`~tensor.TensorVariable`
            A 1D binary array in the shape (batch,) which is 1 if there is
            the charater available, 0 if there is the delimiter.
        Returns
        -------
        output : :class:`~tensor.TensorVariable`
            Next states of the network.
        """
        # put masked states at last may be possible
        if mask:
            states = (mask[:, None] * states + (1 - mask[:, None]) * input_states)
        gate_values = self.gate_activation.apply(
            states.dot(self.state_to_gates) + gate_inputs)
        update_values = gate_values[:, :self.dim]
        reset_values = gate_values[:, self.dim:]
        states_reset = states * reset_values
        next_states = self.activation.apply(
            states_reset.dot(self.state_to_state) + inputs)
        next_states = (next_states * update_values +
                       states * (1 - update_values))
        return next_states

    # using constant initial_states
项目:DCNMT    作者:SwordYork    | 项目源码 | 文件源码
def apply(self, inputs, gate_inputs, states, mask=None):
        """Apply the gated recurrent transition.
        Parameters
        ----------
        states : :class:`~tensor.TensorVariable`
            The 2 dimensional matrix of current states in the shape
            (batch_size, dim). Required for `one_step` usage.
        inputs : :class:`~tensor.TensorVariable`
            The 2 dimensional matrix of inputs in the shape (batch_size,
            dim)
        gate_inputs : :class:`~tensor.TensorVariable`
            The 2 dimensional matrix of inputs to the gates in the
            shape (batch_size, 2 * dim).
        input_states : :class:`~tensor.TensorVariable`
            The 2 dimensional matrix of outputs of decoder in the shape
            (batch_size, dim), which generated by decoder.
        mask : :class:`~tensor.TensorVariable`
            A 1D binary array in the shape (batch,) which is 1 if there is
            the charater available, 0 if there is the delimiter.
        Returns
        -------
        output : :class:`~tensor.TensorVariable`
            Next states of the network.
        """
        if mask:
            states = (mask[:, None] * states + (1 - mask[:, None]) * self.initial_states(mask.shape[0]))
        gate_values = self.gate_activation.apply(
            states.dot(self.state_to_gates) + gate_inputs)
        update_values = gate_values[:, :self.dim]
        reset_values = gate_values[:, self.dim:]
        states_reset = states * reset_values
        next_states = self.activation.apply(
            states_reset.dot(self.state_to_state) + inputs)
        next_states = (next_states * update_values +
                       states * (1 - update_values))

        return next_states
项目:NMT-Coverage    作者:tuzhaopeng    | 项目源码 | 文件源码
def dbg_hook(hook, x):
    if not isinstance(x, TT.TensorVariable):
        x.out = theano.printing.Print(global_fn=hook)(x.out)
        return x
    else:
        return theano.printing.Print(global_fn=hook)(x)
项目:NMT-Coverage    作者:tuzhaopeng    | 项目源码 | 文件源码
def dbg_hook(hook, x):
    if not isinstance(x, TT.TensorVariable):
        x.out = theano.printing.Print(global_fn=hook)(x.out)
        return x
    else:
        return theano.printing.Print(global_fn=hook)(x)
项目:odin_old    作者:trungnt13    | 项目源码 | 文件源码
def is_expression(v):
    '''placeholder also is an expression'''
    return isinstance(v, theano.tensor.TensorVariable)
项目:dict_based_learning    作者:tombosc    | 项目源码 | 文件源码
def inner_apply(self, inputs, states, cells, mask=None):
        """Apply the Long Short Term Memory transition.

        Parameters
        ----------
        states : :class:`~tensor.TensorVariable`
            The 2 dimensional matrix of current states in the shape
            (batch_size, features). Required for `one_step` usage.
        cells : :class:`~tensor.TensorVariable`
            The 2 dimensional matrix of current cells in the shape
            (batch_size, features). Required for `one_step` usage.
        inputs : :class:`~tensor.TensorVariable`
            The 2 dimensional matrix of inputs in the shape (batch_size,
            features * 4). The `inputs` needs to be four times the
            dimension of the LSTM brick to insure each four gates receive
            different transformations of the input. See [Grav13]_
            equations 7 to 10 for more details. The `inputs` are then split
            in this order: Input gates, forget gates, cells and output
            gates.
        mask : :class:`~tensor.TensorVariable`
            A 1D binary array in the shape (batch,) which is 1 if there is
            data available, 0 if not. Assumed to be 1-s only if not given.

        .. [Grav13] Graves, Alex, *Generating sequences with recurrent*
            *neural networks*, arXiv preprint arXiv:1308.0850 (2013).

        Returns
        -------
        states : :class:`~tensor.TensorVariable`
            Next states of the network.
        cells : :class:`~tensor.TensorVariable`
            Next cell activations of the network.

        """
        def slice_last(x, no):
            return x[:, no*self.dim: (no+1)*self.dim]

        activation = tensor.dot(states, self.W_state) + inputs
        in_gate = self.gate_activation.apply(
            slice_last(activation, 0) + cells * self.W_cell_to_in)
        forget_gate = self.gate_activation.apply(
            slice_last(activation, 1) + cells * self.W_cell_to_forget)
        next_cells = (
            forget_gate * cells +
            in_gate * self.activation.apply(slice_last(activation, 2)))
        out_gate = self.gate_activation.apply(
            slice_last(activation, 3) + next_cells * self.W_cell_to_out)
        next_states = out_gate * self.activation.apply(next_cells)

        if mask:
            next_states = (mask[:, None] * next_states +
                           (1 - mask[:, None]) * states)
            next_cells = (mask[:, None] * next_cells +
                          (1 - mask[:, None]) * cells)

        return next_states, next_cells, in_gate, forget_gate, out_gate
项目:NADE    作者:MarcCote    | 项目源码 | 文件源码
def __init__(self, dataset, batch_size, batch_id, ordering_id, use_mask_as_input=False, seed=1234):
        """
        Parameters
        ----------
        dataset : `SequenceDataset` object
            Dataset of datasets (one for each bundle).
        batch_size : int
            Number of examples per batch. *Must be greater than the number of
            bundles in `bundles_dataset`.*
        seed : int (optional)
            Seed of the random numbers generator used to sample a different
            regressive mask for each example.
        """
        super().__init__(dataset)
        self.use_mask_as_input = use_mask_as_input
        self.seed = seed
        self.rng = np.random.RandomState(self.seed)
        self.batch_size = batch_size
        self.batch_id = batch_id
        self.ordering_id = ordering_id

        # Determine the start and the end of the batch that will be used by this batch scheduler.
        assert batch_id*self.batch_size < len(self.dataset)
        self.batch_start = batch_id*self.batch_size
        self.batch_end = min((batch_id+1)*self.batch_size, len(dataset))

        # Determine the ordering that will be used by this batch scheduler.
        self.d = 0
        self.D = self.dataset.input_shape[0]
        self.ordering = np.arange(self.D)
        for _ in range(ordering_id+1):
            self.rng.shuffle(self.ordering)

        # Matrix mask that will be used when concatenating the mask.
        self._shared_Moltd = sharedX(np.zeros((self.batch_end-self.batch_start, self.D)), name='Moltd')

        # Vector mask that will be broadcasted across all inputs.
        # self._shared_mod = sharedX(np.zeros((1, self.D)), name='mod')
        self._shared_mod = sharedX(np.zeros((self.D,)), name='mod')

        # Add a new attributes: a symbolic variable representing the auto regressive mask.
        self.change_masks(self.d)
        self.Moltd = T.TensorVariable(type=T.TensorType("floatX", [False]*dataset.inputs.ndim), name="symb_Moltd")
        self.mod = T.TensorVariable(type=T.TensorType("floatX", [True, False]), name="symb_mod")

        # Keep only `(self.batch_end-self.batch_start)` examples as test values.
        self.dataset.symb_inputs.tag.test_value = self.dataset.inputs.get_value()[:(self.batch_end-self.batch_start)]
        if self.dataset.has_targets:
            self.dataset.symb_targets.tag.test_value = self.dataset.targets.get_value()[:(self.batch_end-self.batch_start)]

        self.Moltd.tag.test_value = self._shared_Moltd.get_value()[:(self.batch_end-self.batch_start)]
        self.mod.tag.test_value = self._shared_mod.get_value()[None, :]

        if self.use_mask_as_input:
            self.dataset.symb_inputs.tag.test_value = np.concatenate([self.dataset.symb_inputs.tag.test_value * self.Moltd.tag.test_value,
                                                                      self.Moltd.tag.test_value], axis=1)
项目:Generative-models    作者:aalitaiga    | 项目源码 | 文件源码
def apply(self, inputs, states, mask=None):
        """Apply the Long Short Term Memory transition.
        Parameters
        ----------
        states : :class:`~tensor.TensorVariable`
            The 2 dimensional matrix of current states in the shape
            (batch_size, features). Required for `one_step` usage.
        inputs : :class:`~tensor.TensorVariable`
            The 2 dimensional matrix of inputs in the shape (batch_size,
            features * 4). The `inputs` needs to be four times the
            dimension of the LSTM brick to insure each four gates receive
            different transformations of the input. See [Grav13]_
            equations 7 to 10 for more details. The `inputs` are then split
            in this order: Input gates, forget gates, cells and output
            gates.
        mask : :class:`~tensor.TensorVariable`
            A 1D binary array in the shape (batch,) which is 1 if there is
            data available, 0 if not. Assumed to be 1-s only if not given.
        .. [Grav13] Graves, Alex, *Generating sequences with recurrent*
            *neural networks*, arXiv preprint arXiv:1308.0850 (2013).
        Returns
        -------
        states : :class:`~tensor.TensorVariable`
            Next states of the network.
        cells : :class:`~tensor.TensorVariable`
            Next cell activations of the network.
        """
        def slice_last(x, no):
            return x[:, no*self.dim: (no+1)*self.dim]

        activation = tensor.dot(states, self.W_ss) + tensor.dot(inputs, self.W_is)
        out_gate = self.gate_activation.apply(slice_last(activation, 0))
        forget_gate = self.gate_activation.apply(slice_last(activation, 1))
        in_gate = self.gate_activation.apply(slice_last(activation, 2))

        next_cells = (
            forget_gate * cells +
            in_gate * self.activation.apply(slice_last(activation, 2)))
        out_gate = self.gate_activation.apply(
            slice_last(activation, 3) + next_cells * self.W_cell_to_out)
        next_states = out_gate * self.activation.apply(next_cells)

        if mask:
            next_states = (mask[:, None] * next_states +
                           (1 - mask[:, None]) * states)
            next_cells = (mask[:, None] * next_cells +
                          (1 - mask[:, None]) * cells)

        return next_states, next_cells
项目:seq2graph    作者:masterkeywikz    | 项目源码 | 文件源码
def connect(self, inputs, noise=0, dropout=0):
        '''Create Theano variables representing the outputs of this layer.

        Parameters
        ----------
        inputs : dict of Theano expressions
            Symbolic inputs to this layer, given as a dictionary mapping string
            names to Theano expressions. Each string key should be of the form
            "{layer_name}:{output_name}" and refers to a specific output from
            a specific layer in the graph.
        noise : dict of noise values, optional
            This dictionary should map the names of outputs from this layer to
            the zero-mean, isotropic noise to add to that output. Defaults to 0,
            which does not add noise to any outputs.
        dropout : dict of dropout values, optional
            This dictionary should map the names of outputs in this layer to
            dropout values for that output. Defaults to 0, which does not drop
            out any units in any outputs.

        Returns
        -------
        outputs : dict
            A dictionary mapping names to Theano expressions for the outputs
            from this layer.
        updates : sequence of (parameter, expression) tuples
            Updates that should be performed by a Theano function that computes
            something using this layer.
        '''
        outputs, updates = self.transform(inputs)

        # transform the outputs to be a list of ordered pairs if needed.
        if isinstance(outputs, dict):
            outputs = sorted(outputs.items())
        if isinstance(outputs, (TT.TensorVariable, SS.SparseVariable)):
            outputs = [('out', outputs)]

        # set up outputs for this layer by adding noise and dropout as needed.
        rng = self.kwargs.get('trng') or RandomStreams()
        if not isinstance(noise, dict):
            noise = {self.output_name(): noise}
        if not isinstance(dropout, dict):
            dropout = {self.output_name(): dropout}
        outs = {}
        for name, expr in outputs:
            scoped = self.output_name(name)
            noisy = add_noise(expr, noise.get(scoped, 0), rng)
            dropped = add_dropout(noisy, dropout.get(scoped, 0), rng)
            outs[scoped] = dropped

        return outs, updates