Python theano.tensor 模块,nnet() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用theano.tensor.nnet()

项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3):
    """Apply batch normalization on x given mean, var, beta and gamma.
    """
    # TODO remove this if statement when Theano without
    # T.nnet.bn.batch_normalization_test is deprecated
    if not hasattr(T.nnet.bn, 'batch_normalization_test'):
        return _old_batch_normalization(x, mean, var, beta, gamma, epsilon)

    if mean.ndim == 1:
        # based on TensorFlow's default: normalize along rightmost dimension
        reduction_axes = range(x.ndim - 1)
    else:
        reduction_axes = [i for i in range(x.ndim) if mean.broadcastable[i]]

    return T.nnet.bn.batch_normalization_test(
        x, gamma, beta, mean, var, reduction_axes, epsilon)


# TODO remove this function when Theano without
# T.nnet.bn.batch_normalization_train is deprecated
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3):
    '''Apply batch normalization on x given mean, var, beta and gamma.
    '''
    # TODO remove this if statement when Theano without
    # T.nnet.bn.batch_normalization_test is deprecated
    if not hasattr(T.nnet.bn, 'batch_normalization_test'):
        return _old_batch_normalization(x, mean, var, beta, gamma, epsilon)

    if mean.ndim == 1:
        # based on TensorFlow's default: normalize along rightmost dimension
        reduction_axes = range(x.ndim - 1)
    else:
        reduction_axes = [i for i in range(x.ndim) if mean.broadcastable[i]]

    return T.nnet.bn.batch_normalization_test(
        x, gamma, beta, mean, var, reduction_axes, epsilon)


# TODO remove this function when Theano without
# T.nnet.bn.batch_normalization_train is deprecated
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3):
    """Apply batch normalization on x given mean, var, beta and gamma.
    """
    # TODO remove this if statement when Theano without
    # T.nnet.bn.batch_normalization_test is deprecated
    if not hasattr(T.nnet.bn, 'batch_normalization_test'):
        return _old_batch_normalization(x, mean, var, beta, gamma, epsilon)

    if mean.ndim == 1:
        # based on TensorFlow's default: normalize along rightmost dimension
        reduction_axes = range(x.ndim - 1)
    else:
        reduction_axes = [i for i in range(x.ndim) if mean.broadcastable[i]]

    return T.nnet.bn.batch_normalization_test(
        x, gamma, beta, mean, var, reduction_axes, epsilon)


# TODO remove this function when Theano without
# T.nnet.bn.batch_normalization_train is deprecated
项目:keras_superpixel_pooling    作者:parag2489    | 项目源码 | 文件源码
def normalize_batch_in_training(x, gamma, beta,
                                reduction_axes, epsilon=1e-3):
    """Computes mean and std for batch then apply batch_normalization on batch.
    """
    # TODO remove this if statement when Theano without
    # T.nnet.bn.batch_normalization_train is deprecated
    if not hasattr(T.nnet.bn, 'batch_normalization_train'):
        return _old_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon)

    if gamma is None:
        if beta is None:
            gamma = ones_like(x)
        else:
            gamma = ones_like(beta)
    if beta is None:
        if gamma is None:
            beta = zeros_like(x)
        beta = zeros_like(gamma)

    normed, mean, stdinv = T.nnet.bn.batch_normalization_train(
        x, gamma, beta, reduction_axes, epsilon)

    return normed, mean, T.inv(stdinv ** 2)
项目:keras_superpixel_pooling    作者:parag2489    | 项目源码 | 文件源码
def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3):
    """Apply batch normalization on x given mean, var, beta and gamma.
    """
    # TODO remove this if statement when Theano without
    # T.nnet.bn.batch_normalization_test is deprecated
    if not hasattr(T.nnet.bn, 'batch_normalization_test'):
        return _old_batch_normalization(x, mean, var, beta, gamma, epsilon)

    if gamma is None:
        gamma = ones_like(var)
    if beta is None:
        beta = zeros_like(mean)

    if mean.ndim == 1:
        # based on TensorFlow's default: normalize along rightmost dimension
        reduction_axes = list(range(x.ndim - 1))
    else:
        reduction_axes = [i for i in range(x.ndim) if mean.broadcastable[i]]

    return T.nnet.bn.batch_normalization_test(
        x, gamma, beta, mean, var, reduction_axes, epsilon)


# TODO remove this function when Theano without
# T.nnet.bn.batch_normalization_train is deprecated
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def batch_normalization(x, mean, var, beta, gamma, epsilon=0.0001):
    '''Apply batch normalization on x given mean, var, beta and gamma.
    '''
    ndim = x.ndim
    dev = theano.config.device
    use_cudnn = ndim < 5 and (dev.startswith('cuda') or dev.startswith('gpu'))
    if use_cudnn:
        try:
            axis = mean.broadcastable.index(False)
            if axis != 1:
                shuffle_pattern = list(range(ndim))
                shuffle_pattern[1] = shuffle_pattern[axis]
                shuffle_pattern[axis] = 1
                x = x.dimshuffle(shuffle_pattern)
                mean = mean.dimshuffle(shuffle_pattern)
                var = var.dimshuffle(shuffle_pattern)
                beta = beta.dimshuffle(shuffle_pattern)
                gamma = gamma.dimshuffle(shuffle_pattern)
            normed = theano.sandbox.cuda.dnn.dnn_batch_normalization_test(x, gamma, beta, mean, var,
                                                                          'spatial', epsilon)
            if axis != 1:
                normed = normed.dimshuffle(shuffle_pattern)
            return normed
        except AttributeError:
            pass
        except ValueError:
            pass
    return T.nnet.bn.batch_normalization(x, gamma, beta, mean, sqrt(var + epsilon),
                                         mode='high_mem')


# SHAPE OPERATIONS
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def elu(x, alpha=1.0):
    """ Exponential linear unit

    # Arguments
        x: Tensor to compute the activation function for.
        alpha: scalar
    """
    _assert_has_capability(T.nnet, 'elu')
    return T.nnet.elu(x, alpha)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def relu(x, alpha=0., max_value=None):
    _assert_has_capability(T.nnet, 'relu')
    x = T.nnet.relu(x, alpha)
    if max_value is not None:
        x = T.minimum(x, max_value)
    return x
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def softmax(x):
    return T.nnet.softmax(x)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def softplus(x):
    return T.nnet.softplus(x)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def binary_crossentropy(output, target, from_logits=False):
    if from_logits:
        output = T.nnet.sigmoid(output)
    # avoid numerical instability with _EPSILON clipping
    output = T.clip(output, _EPSILON, 1.0 - _EPSILON)
    return T.nnet.binary_crossentropy(output, target)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def sigmoid(x):
    return T.nnet.sigmoid(x)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def hard_sigmoid(x):
    return T.nnet.hard_sigmoid(x)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def deconv2d(x, kernel, output_shape, strides=(1, 1),
             border_mode='valid',
             dim_ordering='default',
             image_shape=None, filter_shape=None):
    '''2D deconvolution (transposed convolution).

    # Arguments
        kernel: kernel tensor.
        output_shape: desired dimensions of output.
        strides: strides tuple.
        border_mode: string, "same" or "valid".
        dim_ordering: "tf" or "th".
            Whether to use Theano or TensorFlow dimension ordering
        in inputs/kernels/ouputs.
    '''
    flip_filters = False
    if dim_ordering == 'default':
        dim_ordering = image_dim_ordering()
    if dim_ordering not in {'th', 'tf'}:
        raise Exception('Unknown dim_ordering ' + str(dim_ordering))

    x = _preprocess_conv2d_input(x, dim_ordering)
    kernel = _preprocess_conv2d_kernel(kernel, dim_ordering)
    kernel = kernel.dimshuffle((1, 0, 2, 3))
    th_border_mode = _preprocess_border_mode(border_mode)
    np_kernel = kernel.eval()
    filter_shape = _preprocess_conv2d_filter_shape(dim_ordering, filter_shape)

    op = T.nnet.abstract_conv.AbstractConv2d_gradInputs(imshp=output_shape,
                                                        kshp=filter_shape,
                                                        subsample=strides,
                                                        border_mode=th_border_mode,
                                                        filter_flip=not flip_filters)
    conv_out = op(kernel, x, output_shape[2:])

    conv_out = _postprocess_conv2d_output(conv_out, x, border_mode, np_kernel,
                                          strides, dim_ordering)
    return conv_out
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def normalize_batch_in_training(x, gamma, beta,
                                reduction_axes, epsilon=1e-3):
    """Computes mean and std for batch then apply batch_normalization on batch.
    """
    # TODO remove this if statement when Theano without
    # T.nnet.bn.batch_normalization_train is deprecated
    if not hasattr(T.nnet.bn, 'batch_normalization_train'):
        return _old_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon)

    normed, mean, stdinv = T.nnet.bn.batch_normalization_train(
        x, gamma, beta, reduction_axes, epsilon)

    return normed, mean, T.inv(stdinv ** 2)
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def elu(x, alpha=1.0):
    """ Exponential linear unit

    # Arguments
        x: Tensor to compute the activation function for.
        alpha: scalar
    """
    _assert_has_capability(T.nnet, 'elu')
    return T.nnet.elu(x, alpha)
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def softmax(x):
    return T.nnet.softmax(x)
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def softplus(x):
    return T.nnet.softplus(x)
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def categorical_crossentropy(output, target, from_logits=False):
    if from_logits:
        output = T.nnet.softmax(output)
    else:
        # scale preds so that the class probas of each sample sum to 1
        output /= output.sum(axis=-1, keepdims=True)
    # avoid numerical instability with _EPSILON clipping
    output = T.clip(output, _EPSILON, 1.0 - _EPSILON)
    return T.nnet.categorical_crossentropy(output, target)
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def binary_crossentropy(output, target, from_logits=False):
    if from_logits:
        output = T.nnet.sigmoid(output)
    # avoid numerical instability with _EPSILON clipping
    output = T.clip(output, _EPSILON, 1.0 - _EPSILON)
    return T.nnet.binary_crossentropy(output, target)
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def sigmoid(x):
    return T.nnet.sigmoid(x)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def normalize_batch_in_training(x, gamma, beta,
                                reduction_axes, epsilon=1e-3):
    '''Computes mean and std for batch then apply batch_normalization on batch.
    '''
    # TODO remove this if statement when Theano without
    # T.nnet.bn.batch_normalization_train is deprecated
    if not hasattr(T.nnet.bn, 'batch_normalization_train'):
        return _old_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon)

    normed, mean, stdinv = T.nnet.bn.batch_normalization_train(
        x, gamma, beta, reduction_axes, epsilon)

    return normed, mean, T.inv(stdinv ** 2)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def _old_normalize_batch_in_training(x, gamma, beta,
                                     reduction_axes, epsilon=1e-3):
    '''Computes mean and std for batch then apply batch_normalization on batch.
    '''
    dev = theano.config.device
    use_cudnn = ndim(x) < 5 and reduction_axes == [0, 2, 3] and (dev.startswith('cuda') or dev.startswith('gpu'))
    if use_cudnn:
        broadcast_beta = beta.dimshuffle('x', 0, 'x', 'x')
        broadcast_gamma = gamma.dimshuffle('x', 0, 'x', 'x')
        try:
            normed, mean, stdinv = theano.sandbox.cuda.dnn.dnn_batch_normalization_train(
                x, broadcast_gamma, broadcast_beta, 'spatial', epsilon)
            var = T.inv(stdinv ** 2)
            return normed, T.flatten(mean), T.flatten(var)
        except AttributeError:
            pass

    var = x.var(reduction_axes)
    mean = x.mean(reduction_axes)

    target_shape = []
    for axis in range(ndim(x)):
        if axis in reduction_axes:
            target_shape.append(1)
        else:
            target_shape.append(x.shape[axis])
    target_shape = T.stack(*target_shape)

    broadcast_mean = T.reshape(mean, target_shape)
    broadcast_var = T.reshape(var, target_shape)
    broadcast_beta = T.reshape(beta, target_shape)
    broadcast_gamma = T.reshape(gamma, target_shape)
    normed = batch_normalization(x, broadcast_mean, broadcast_var,
                                 broadcast_beta, broadcast_gamma,
                                 epsilon)
    return normed, mean, var


# TODO remove this if statement when Theano without
# T.nnet.bn.batch_normalization_test is deprecated
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def elu(x, alpha=1.0):
    """ Exponential linear unit

    # Arguments
        x: Tensor to compute the activation function for.
        alpha: scalar
    """
    _assert_has_capability(T.nnet, 'elu')
    return T.nnet.elu(x, alpha)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def softmax(x):
    return T.nnet.softmax(x)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def softplus(x):
    return T.nnet.softplus(x)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def categorical_crossentropy(output, target, from_logits=False):
    if from_logits:
        output = T.nnet.softmax(output)
    else:
        # scale preds so that the class probas of each sample sum to 1
        output /= output.sum(axis=-1, keepdims=True)
    # avoid numerical instability with _EPSILON clipping
    output = T.clip(output, _EPSILON, 1.0 - _EPSILON)
    return T.nnet.categorical_crossentropy(output, target)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def binary_crossentropy(output, target, from_logits=False):
    if from_logits:
        output = T.nnet.sigmoid(output)
    # avoid numerical instability with _EPSILON clipping
    output = T.clip(output, _EPSILON, 1.0 - _EPSILON)
    return T.nnet.binary_crossentropy(output, target)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def sigmoid(x):
    return T.nnet.sigmoid(x)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def deconv2d(x, kernel, output_shape, strides=(1, 1),
             border_mode='valid',
             dim_ordering='default',
             image_shape=None, filter_shape=None):
    '''2D deconvolution (transposed convolution).

    # Arguments
        kernel: kernel tensor.
        output_shape: desired dimensions of output.
        strides: strides tuple.
        border_mode: string, "same" or "valid".
        dim_ordering: "tf" or "th".
            Whether to use Theano or TensorFlow dimension ordering
        in inputs/kernels/ouputs.
    '''
    flip_filters = False
    if dim_ordering == 'default':
        dim_ordering = image_dim_ordering()
    if dim_ordering not in {'th', 'tf'}:
        raise ValueError('Unknown dim_ordering ' + dim_ordering)

    x = _preprocess_conv2d_input(x, dim_ordering)
    kernel = _preprocess_conv2d_kernel(kernel, dim_ordering)
    kernel = kernel.dimshuffle((1, 0, 2, 3))
    th_border_mode = _preprocess_border_mode(border_mode)
    np_kernel = kernel.eval()
    filter_shape = _preprocess_conv2d_filter_shape(dim_ordering, filter_shape)
    filter_shape = tuple(filter_shape[i] for i in (1, 0, 2, 3))

    op = T.nnet.abstract_conv.AbstractConv2d_gradInputs(imshp=output_shape,
                                                        kshp=filter_shape,
                                                        subsample=strides,
                                                        border_mode=th_border_mode,
                                                        filter_flip=not flip_filters)
    conv_out = op(kernel, x, output_shape[2:])

    conv_out = _postprocess_conv2d_output(conv_out, x, border_mode, np_kernel,
                                          strides, dim_ordering)
    return conv_out
项目:reading-text-in-the-wild    作者:mathDR    | 项目源码 | 文件源码
def relu(x, alpha=0., max_value=None):
    assert hasattr(T.nnet, 'relu'), ('It looks like like your version of '
                                     'Theano is out of date. '
                                     'Install the latest version with:\n'
                                     'pip install git+git://github.com/Theano/Theano.git --upgrade --no-deps')
    x = T.nnet.relu(x, alpha)
    if max_value is not None:
        x = T.minimum(x, max_value)
    return x
项目:reading-text-in-the-wild    作者:mathDR    | 项目源码 | 文件源码
def softmax(x):
    return T.nnet.softmax(x)
项目:reading-text-in-the-wild    作者:mathDR    | 项目源码 | 文件源码
def softplus(x):
    return T.nnet.softplus(x)
项目:reading-text-in-the-wild    作者:mathDR    | 项目源码 | 文件源码
def categorical_crossentropy(output, target, from_logits=False):
    if from_logits:
        output = T.nnet.softmax(output)
    else:
        # scale preds so that the class probas of each sample sum to 1
        output /= output.sum(axis=-1, keepdims=True)
    # avoid numerical instability with _EPSILON clipping
    output = T.clip(output, _EPSILON, 1.0 - _EPSILON)
    return T.nnet.categorical_crossentropy(output, target)
项目:reading-text-in-the-wild    作者:mathDR    | 项目源码 | 文件源码
def binary_crossentropy(output, target, from_logits=False):
    if from_logits:
        output = T.nnet.sigmoid(output)
    # avoid numerical instability with _EPSILON clipping
    output = T.clip(output, _EPSILON, 1.0 - _EPSILON)
    return T.nnet.binary_crossentropy(output, target)
项目:reading-text-in-the-wild    作者:mathDR    | 项目源码 | 文件源码
def hard_sigmoid(x):
    return T.nnet.hard_sigmoid(x)
项目:pyrl    作者:frsong    | 项目源码 | 文件源码
def relu(x):
        return tensor.nnet.relu(x)
项目:pyrl    作者:frsong    | 项目源码 | 文件源码
def relu(x):
        return tensor.switch(x > 0, x, 0)

#def relu(x):
#    return tensor.nnet.relu(x)
#    #return tensor.switch(x > upper, upper, tensor.nnet.relu(x))
项目:deep-motion-analysis    作者:Brimborough    | 项目源码 | 文件源码
def __init__(self, rng, f='ReLU', g=lambda x: x, params=None):
        if   f == 'ReLU':
            if hasattr(T.nnet, 'relu'):
                self.f = T.nnet.relu
            else:
                self.f = lambda x: T.switch(x<0,0,x)
            self.g = lambda x: x
        elif f == 'PReLU':
            # Avoids dying ReLU units
            if hasattr(T.nnet, 'relu'):
                self.f = lambda x: T.nnet.relu(x, alpha=0.01)
            else:
                self.f = lambda x: T.switch(x<=0,a*x,x)
            self.g = lambda x: x
        elif f == 'tanh':
            self.f = T.tanh
            self.g = T.arctanh
        elif f == 'sigmoid':
            self.f = T.nnet.sigmoid
            self.g = lambda x: x
        elif f == 'softmax':
            self.f = T.nnet.softmax
            self.g = lambda x: x
        elif f == 'softplus':
            self.f = T.nnet.softplus
            self.g = lambda x: x
        elif f == 'identity':
            self.f = lambda x: x
            self.g = lambda x: x
        else:
            self.f = f
            self.g = g

        self.params = [] if params is None else params
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def normalize_batch_in_training(x, gamma, beta,
                                reduction_axes, epsilon=1e-3):
    """Computes mean and std for batch then apply batch_normalization on batch.
    """
    # TODO remove this if statement when Theano without
    # T.nnet.bn.batch_normalization_train is deprecated
    if not hasattr(T.nnet.bn, 'batch_normalization_train'):
        return _old_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon)

    normed, mean, stdinv = T.nnet.bn.batch_normalization_train(
        x, gamma, beta, reduction_axes, epsilon)

    return normed, mean, T.inv(stdinv ** 2)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def elu(x, alpha=1.0):
    """ Exponential linear unit

    # Arguments
        x: Tensor to compute the activation function for.
        alpha: scalar
    """
    _assert_has_capability(T.nnet, 'elu')
    return T.nnet.elu(x, alpha)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def softmax(x):
    return T.nnet.softmax(x)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def softplus(x):
    return T.nnet.softplus(x)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def categorical_crossentropy(output, target, from_logits=False):
    if from_logits:
        output = T.nnet.softmax(output)
    else:
        # scale preds so that the class probas of each sample sum to 1
        output /= output.sum(axis=-1, keepdims=True)
    # avoid numerical instability with _EPSILON clipping
    output = T.clip(output, _EPSILON, 1.0 - _EPSILON)
    return T.nnet.categorical_crossentropy(output, target)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def binary_crossentropy(output, target, from_logits=False):
    if from_logits:
        output = T.nnet.sigmoid(output)
    # avoid numerical instability with _EPSILON clipping
    output = T.clip(output, _EPSILON, 1.0 - _EPSILON)
    return T.nnet.binary_crossentropy(output, target)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def sigmoid(x):
    return T.nnet.sigmoid(x)
项目:keras_superpixel_pooling    作者:parag2489    | 项目源码 | 文件源码
def elu(x, alpha=1.0):
    """ Exponential linear unit

    # Arguments
        x: Tensor to compute the activation function for.
        alpha: scalar
    """
    _assert_has_capability(T.nnet, 'elu')
    return T.nnet.elu(x, alpha)
项目:keras_superpixel_pooling    作者:parag2489    | 项目源码 | 文件源码
def softmax(x):
    return T.nnet.softmax(x)
项目:keras_superpixel_pooling    作者:parag2489    | 项目源码 | 文件源码
def softplus(x):
    return T.nnet.softplus(x)
项目:keras_superpixel_pooling    作者:parag2489    | 项目源码 | 文件源码
def categorical_crossentropy(output, target, from_logits=False):
    if from_logits:
        output = T.nnet.softmax(output)
    else:
        # scale preds so that the class probas of each sample sum to 1
        output /= output.sum(axis=-1, keepdims=True)
    # avoid numerical instability with _EPSILON clipping
    output = T.clip(output, _EPSILON, 1.0 - _EPSILON)
    return T.nnet.categorical_crossentropy(output, target)