Python theano.tensor 模块,std() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用theano.tensor.std()

项目:lemontree    作者:khshim    | 项目源码 | 文件源码
def get_output(self, input_):
        """
        This function overrides the parents' one.
        Creates symbolic function to compute output from an input.
        The symbolic function use theano switch function conditioned by flag.

        Math Expression
        ---------------
        y = (x - mean(x)) / std(x)
            mean and std through each data point.

        Parameters
        ----------
        input_: TensorVariable

        Returns
        -------
        Tensorvariable
        """
        dim_mean = T.mean(input_, axis=1)
        dim_std = T.std(input_, axis=1)
        return self.gamma * (input_ - dim_mean.dimshuffle(0, 'x')) / (dim_std.dimshuffle(0, 'x') + 1e-7) + self.beta
项目:keras_superpixel_pooling    作者:parag2489    | 项目源码 | 文件源码
def normalize_batch_in_training(x, gamma, beta,
                                reduction_axes, epsilon=1e-3):
    """Computes mean and std for batch then apply batch_normalization on batch.
    """
    # TODO remove this if statement when Theano without
    # T.nnet.bn.batch_normalization_train is deprecated
    if not hasattr(T.nnet.bn, 'batch_normalization_train'):
        return _old_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon)

    if gamma is None:
        if beta is None:
            gamma = ones_like(x)
        else:
            gamma = ones_like(beta)
    if beta is None:
        if gamma is None:
            beta = zeros_like(x)
        beta = zeros_like(gamma)

    normed, mean, stdinv = T.nnet.bn.batch_normalization_train(
        x, gamma, beta, reduction_axes, epsilon)

    return normed, mean, T.inv(stdinv ** 2)
项目:keras_superpixel_pooling    作者:parag2489    | 项目源码 | 文件源码
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
    if dtype is None:
        dtype = floatx()
    if seed is None:
        seed = np.random.randint(1, 10e6)
    rng = RandomStreams(seed=seed)
    normal_tensor = rng.normal(size=shape, avg=mean, std=stddev, dtype=dtype)
    # Poor man's truncated normal: we literally clip the tensor
    return T.clip(normal_tensor, mean - 2 * stddev, mean + 2 * stddev)


# Theano implementation of CTC
# Used with permission from Shawn Tan
# https://github.com/shawntan/
# Note that tensorflow's native CTC code is significantly
# faster than this
项目:InnerOuterRNN    作者:Chemoinformatics    | 项目源码 | 文件源码
def normalize_batch_in_training(x, gamma, beta,
                                reduction_axes, epsilon=0.0001):
    '''Compute mean and std for batch then apply batch_normalization on batch.
    '''
    var = x.var(reduction_axes)
    mean = x.mean(reduction_axes)

    target_shape = []
    for axis in range(ndim(x)):
        if axis in reduction_axes:
            target_shape.append(1)
        else:
            target_shape.append(x.shape[axis])
    target_shape = T.stack(*target_shape)

    broadcast_mean = T.reshape(mean, target_shape)
    broadcast_var = T.reshape(var, target_shape)
    broadcast_beta = T.reshape(beta, target_shape)
    broadcast_gamma = T.reshape(gamma, target_shape)
    normed = batch_normalization(x, broadcast_mean, broadcast_var,
                                 broadcast_beta, broadcast_gamma,
                                 epsilon)
    return normed, mean, var
项目:lowrank-highwaynetwork    作者:Avmb    | 项目源码 | 文件源码
def output(self, x):
        x_mean = T.mean(x, axis=0)
        x_std = T.std(x, axis=0)
        rv = (x - x_mean) / (x_std + self.epsilon)
        if self.with_scale:
            rv = rv * self.S
        if self.with_bias:
            rv = rv + self.B

        new_mean = self.tau * x_mean + (1.0 - self.tau) * self.Mean
        new_std = self.tau * x_std + (1.0 - self.tau) * self.Std
        self.register_training_updates((self.Mean, new_mean),
                                       (self.Std, new_std))

        return rv
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def std(x, axis=None, keepdims=False):
    return T.std(x, axis=axis, keepdims=keepdims)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def normalize_batch_in_training(x, gamma, beta,
                                reduction_axes, epsilon=0.0001):
    '''Computes mean and std for batch then apply batch_normalization on batch.
    '''
    dev = theano.config.device
    use_cudnn = ndim(x) < 5 and reduction_axes == [0, 2, 3] and (dev.startswith('cuda') or dev.startswith('gpu'))
    if use_cudnn:
        broadcast_beta = beta.dimshuffle('x', 0, 'x', 'x')
        broadcast_gamma = gamma.dimshuffle('x', 0, 'x', 'x')
        try:
            normed, mean, stdinv = theano.sandbox.cuda.dnn.dnn_batch_normalization_train(
                x, broadcast_gamma, broadcast_beta, 'spatial', epsilon)
            var = T.inv(stdinv ** 2)
            return normed, T.flatten(mean), T.flatten(var)
        except AttributeError:
            pass

    var = x.var(reduction_axes)
    mean = x.mean(reduction_axes)

    target_shape = []
    for axis in range(ndim(x)):
        if axis in reduction_axes:
            target_shape.append(1)
        else:
            target_shape.append(x.shape[axis])
    target_shape = T.stack(*target_shape)

    broadcast_mean = T.reshape(mean, target_shape)
    broadcast_var = T.reshape(var, target_shape)
    broadcast_beta = T.reshape(beta, target_shape)
    broadcast_gamma = T.reshape(gamma, target_shape)
    normed = batch_normalization(x, broadcast_mean, broadcast_var,
                                 broadcast_beta, broadcast_gamma,
                                 epsilon)
    return normed, mean, var
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def random_normal(shape, mean=0.0, std=1.0, dtype=_FLOATX, seed=None):
    if seed is None:
        seed = np.random.randint(1, 10e6)
    rng = RandomStreams(seed=seed)
    return rng.normal(size=shape, avg=mean, std=std, dtype=dtype)
项目:keraflow    作者:ipod825    | 项目源码 | 文件源码
def random_normal(self, shape, mean=0.0, std=1.0, dtype=_FLOATX):
        return self.rng.normal(size=shape, avg=mean, std=std, dtype=dtype)
项目:keraflow    作者:ipod825    | 项目源码 | 文件源码
def std(self, x, axis=None, keepdims=False):
        return T.std(x, axis=axis, keepdims=keepdims)
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def std(x, axis=None, keepdims=False):
    return T.std(x, axis=axis, keepdims=keepdims)
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def normalize_batch_in_training(x, gamma, beta,
                                reduction_axes, epsilon=1e-3):
    """Computes mean and std for batch then apply batch_normalization on batch.
    """
    # TODO remove this if statement when Theano without
    # T.nnet.bn.batch_normalization_train is deprecated
    if not hasattr(T.nnet.bn, 'batch_normalization_train'):
        return _old_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon)

    normed, mean, stdinv = T.nnet.bn.batch_normalization_train(
        x, gamma, beta, reduction_axes, epsilon)

    return normed, mean, T.inv(stdinv ** 2)
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def random_normal(shape, mean=0.0, std=1.0, dtype=None, seed=None):
    if dtype is None:
        dtype = floatx()
    if seed is None:
        seed = np.random.randint(1, 10e6)
    rng = RandomStreams(seed=seed)
    return rng.normal(size=shape, avg=mean, std=std, dtype=dtype)
项目:NNBuilder    作者:aeloyq    | 项目源码 | 文件源码
def std(self, t, axis, keepdims):
            return T.std(t, axis, keepdims=keepdims)
项目:NNBuilder    作者:aeloyq    | 项目源码 | 文件源码
def normal(self, shape, avg, std, ndim, dtype):
        return kernel.randomgraph.normal(shape, avg, std, ndim, dtype=dtype)
项目:drmad    作者:bigaidream-projects    | 项目源码 | 文件源码
def stat_extract(modelStats, params, trackLayers):    
    stats = ['mean', 'std', 'max', 'const', 'spars', 'wmean', 'wstd', 'wmax',               
             'rnoise', 'rnstd', 'bias', 'bstd', 'a', 'astd']

    i = 0
    for param in filter(lambda name: name in stats, params.activTrack):
        tempParam = modelStats[i]
        trackLayers[param] = np.append(trackLayers[param], np.array([tempParam]), axis = 0)        
        i += 1
    return trackLayers            



#        for key in params.activTrack: allStats += [netStats[key]]
#        hStat = T.stacklists(allStats)
项目:lemontree    作者:khshim    | 项目源码 | 文件源码
def __init__(self, input_shape, momentum=0.99, mean_only=False):
        """
        This function initializes the class.
        Input is 2D tenor, output is 2D tensor.
        For long term experiment, use momentum = 0.99, else, 0.9.
        If we use mean_only = True, not using Theano / cuDNN batch normalization.

        Parameters
        ----------
        input_shape: tuple
            a tuple of single value, i.e., (input dim,)
            since input shape is same to output shape, there is no output shape argument.
        momentum: float, default: 0.99
            a float value which will used to average inference mean and variance.
            using exponential moving average.
        mean_only: bool, default: False
            a bool value whether use substract mean only, not divide with std.
        """
        super(BatchNormalization1DLayer, self).__init__()
        # check asserts
        assert isinstance(input_shape, tuple) and len(input_shape) == 1, '"input_shape" should be a tuple with single value.'
        assert momentum > 0 and momentum < 1, '"momentum" should be a float value in range (0, 1).'
        assert isinstance(mean_only, bool), '"mean_only" should be a bool value.'

        # set members
        self.input_shape = input_shape
        self.momentum = momentum
        self.mean_only = mean_only
        self.updates = OrderedDict()
项目:lemontree    作者:khshim    | 项目源码 | 文件源码
def __init__(self, input_shape, momentum=0.99, mean_only=False):
        """
        This function initializes the class.
        Input is 4D tenor, output is 4D tensor.
        For long term experiment, use momentum = 0.99, else, 0.9.
        If we use mean_only = True, not using Theano / cuDNN batch normalization.

        Parameters
        ----------
        input_shape: tuple
            a tuple of three values, i.e., (input channel, input width, input height)
            since input shape is same to output shape, there is no output shape argument.
        momentum: float, default: 0.99
            a float value which will used to average inference mean and variance.
            using exponential moving average.
        mean_only: bool, default: False
            a bool value whether use substract mean only, not divide with std.
        """
        super(BatchNormalization3DLayer, self).__init__()
        # check asserts
        assert isinstance(input_shape, tuple) and len(input_shape) == 3, '"input_shape" should be a tuple with three value.'
        assert momentum > 0 and momentum < 1, '"momentum" should be a float value in range (0, 1).'
        assert isinstance(mean_only, bool), '"mean_only" should be a bool value.'

        # set members
        self.input_shape = input_shape
        self.momentum = momentum
        self.mean_only = mean_only
        self.updates = OrderedDict()
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def std(x, axis=None, keepdims=False):
    return T.std(x, axis=axis, keepdims=keepdims)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def normalize_batch_in_training(x, gamma, beta,
                                reduction_axes, epsilon=1e-3):
    '''Computes mean and std for batch then apply batch_normalization on batch.
    '''
    # TODO remove this if statement when Theano without
    # T.nnet.bn.batch_normalization_train is deprecated
    if not hasattr(T.nnet.bn, 'batch_normalization_train'):
        return _old_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon)

    normed, mean, stdinv = T.nnet.bn.batch_normalization_train(
        x, gamma, beta, reduction_axes, epsilon)

    return normed, mean, T.inv(stdinv ** 2)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def _old_normalize_batch_in_training(x, gamma, beta,
                                     reduction_axes, epsilon=1e-3):
    '''Computes mean and std for batch then apply batch_normalization on batch.
    '''
    dev = theano.config.device
    use_cudnn = ndim(x) < 5 and reduction_axes == [0, 2, 3] and (dev.startswith('cuda') or dev.startswith('gpu'))
    if use_cudnn:
        broadcast_beta = beta.dimshuffle('x', 0, 'x', 'x')
        broadcast_gamma = gamma.dimshuffle('x', 0, 'x', 'x')
        try:
            normed, mean, stdinv = theano.sandbox.cuda.dnn.dnn_batch_normalization_train(
                x, broadcast_gamma, broadcast_beta, 'spatial', epsilon)
            var = T.inv(stdinv ** 2)
            return normed, T.flatten(mean), T.flatten(var)
        except AttributeError:
            pass

    var = x.var(reduction_axes)
    mean = x.mean(reduction_axes)

    target_shape = []
    for axis in range(ndim(x)):
        if axis in reduction_axes:
            target_shape.append(1)
        else:
            target_shape.append(x.shape[axis])
    target_shape = T.stack(*target_shape)

    broadcast_mean = T.reshape(mean, target_shape)
    broadcast_var = T.reshape(var, target_shape)
    broadcast_beta = T.reshape(beta, target_shape)
    broadcast_gamma = T.reshape(gamma, target_shape)
    normed = batch_normalization(x, broadcast_mean, broadcast_var,
                                 broadcast_beta, broadcast_gamma,
                                 epsilon)
    return normed, mean, var


# TODO remove this if statement when Theano without
# T.nnet.bn.batch_normalization_test is deprecated
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def random_normal(shape, mean=0.0, std=1.0, dtype=None, seed=None):
    if dtype is None:
        dtype = floatx()
    if seed is None:
        seed = np.random.randint(1, 10e6)
    rng = RandomStreams(seed=seed)
    return rng.normal(size=shape, avg=mean, std=std, dtype=dtype)
项目:LeNet5    作者:LukaszObara    | 项目源码 | 文件源码
def __init__(self, input, shape, gamma=None, beta=None, epsilon=1e-6,
                 activation_fn=None):
        self.input = input
        self.shape = shape

        rng = np.random.RandomState(45)

        if gamma is None:
            gamma_values = rng.uniform(low=-1.0, high=1.0, size=shape)\
                            .astype(theano.config.floatX)
            gamma = theano.shared(name='gamma', value=gamma_values, 
                                  borrow=True)

        if beta is None:
            beta_values = np.zeros(shape=shape, dtype=theano.config.floatX)\
                            .astype(theano.config.floatX)
            beta = theano.shared(name='beta', value=beta_values, borrow=True)

        self.gamma = gamma
        self.beta = beta

        self.mean = T.mean(input, axis=0)
        self.std = T.std(input + epsilon, axis=0) 

        l_output = T.nnet.bn.batch_normalization(input, self.gamma, self.beta, 
                                                 self.mean, self.std)

        self.output = (l_output if activation_fn is None 
                       else activation_fn(l_output))

        self.params = [self.gamma, self.beta]
项目:LeNet5    作者:LukaszObara    | 项目源码 | 文件源码
def __init__(self, input, shape, gamma=None, beta=None, epsilon=1e-6,
                 activation_fn=None):
        self.input = input
        self.shape = shape

        rng = np.random.RandomState(45)

        if gamma is None:
            gamma_values = rng.uniform(low=-1.0, high=1.0, size=shape)\
                            .astype(theano.config.floatX)
            gamma = theano.shared(name='gamma', value=gamma_values, 
                                  borrow=True)

        if beta is None:
            beta_values = np.zeros(shape=shape, dtype=theano.config.floatX)\
                            .astype(theano.config.floatX)
            beta = theano.shared(name='beta', value=beta_values, borrow=True)

        self.gamma = gamma
        self.beta = beta

        self.mean = T.mean(input, axis=0)
        self.std = T.std(input + epsilon, axis=0) 

        l_output = T.nnet.bn.batch_normalization(input, self.gamma, self.beta, 
                                                 self.mean, self.std)

        self.output = (l_output if activation_fn is None 
                       else activation_fn(l_output))

        self.params = [self.gamma, self.beta]
项目:reading-text-in-the-wild    作者:mathDR    | 项目源码 | 文件源码
def std(x, axis=None, keepdims=False):
    return T.std(x, axis=axis, keepdims=keepdims)
项目:reading-text-in-the-wild    作者:mathDR    | 项目源码 | 文件源码
def random_normal(shape, mean=0.0, std=1.0, dtype=_FLOATX, seed=None):
    if seed is None:
        seed = np.random.randint(10e6)
    rng = RandomStreams(seed=seed)
    return rng.normal(size=shape, avg=mean, std=std, dtype=dtype)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def std(x, axis=None, keepdims=False):
    return T.std(x, axis=axis, keepdims=keepdims)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def normalize_batch_in_training(x, gamma, beta,
                                reduction_axes, epsilon=1e-3):
    """Computes mean and std for batch then apply batch_normalization on batch.
    """
    # TODO remove this if statement when Theano without
    # T.nnet.bn.batch_normalization_train is deprecated
    if not hasattr(T.nnet.bn, 'batch_normalization_train'):
        return _old_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon)

    normed, mean, stdinv = T.nnet.bn.batch_normalization_train(
        x, gamma, beta, reduction_axes, epsilon)

    return normed, mean, T.inv(stdinv ** 2)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def random_normal(shape, mean=0.0, std=1.0, dtype=None, seed=None):
    if dtype is None:
        dtype = floatx()
    if seed is None:
        seed = np.random.randint(1, 10e6)
    rng = RandomStreams(seed=seed)
    return rng.normal(size=shape, avg=mean, std=std, dtype=dtype)
项目:keras_superpixel_pooling    作者:parag2489    | 项目源码 | 文件源码
def std(x, axis=None, keepdims=False):
    return T.std(x, axis=axis, keepdims=keepdims)
项目:keras_superpixel_pooling    作者:parag2489    | 项目源码 | 文件源码
def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
    if dtype is None:
        dtype = floatx()
    if seed is None:
        seed = np.random.randint(1, 10e6)
    rng = RandomStreams(seed=seed)
    return rng.normal(size=shape, avg=mean, std=stddev, dtype=dtype)
项目:Hat    作者:qiuqiangkong    | 项目源码 | 文件源码
def std(x, axis=None):
    return T.std(x, axis)
项目:Hat    作者:qiuqiangkong    | 项目源码 | 文件源码
def rng_normal(size, avg, std):
    seed = np.random.randint(10e6)
    rng = RandomStreams(seed)
    return rng.normal(size, avg, std)

# binomial distribution. p is p(y=1)
项目:ip-avsr    作者:lzuwei    | 项目源码 | 文件源码
def get_output_for(self, input, **kwargs):
        # compute featurewise mean and std for the minibatch
        orig_shape = input.shape
        temp = T.reshape(input, (-1, orig_shape[-1]))
        means = T.mean(input, 0, dtype=input.dtype)
        stds = T.std(input, 0)
        temp = (temp - means) / stds
        input = T.reshape(temp, orig_shape)
        return input
项目:ip-avsr    作者:lzuwei    | 项目源码 | 文件源码
def __init__(self, incomings, coeffs=Normal(std=0.01, mean=1.0), cropping=None, **kwargs):
        super(AdaptiveElemwiseSumLayer, self).__init__(incomings, T.add,
                                                       cropping=cropping, **kwargs)
        '''
        if isinstance(coeffs, list):
            if len(coeffs) != len(incomings):
                raise ValueError("Mismatch: got %d coeffs for %d incomings" %
                                 (len(coeffs), len(incomings)))
        else:
            coeffs = [coeffs] * len(incomings)
        '''
        self.coeffs = []
        for i in range(len(incomings)):
            coeff = theano.shared(np.float32(1.0), 'adacoeff{}'.format(i))
            self.coeffs.append(self.add_param(coeff, coeff.shape, trainable=True, scaling_param=True))
项目:statestream    作者:VolkerFischer    | 项目源码 | 文件源码
def std(x, axis=None, keepdims=False):
    return T.std(x, axis=axis, keepdims=keepdims)
项目:InnerOuterRNN    作者:Chemoinformatics    | 项目源码 | 文件源码
def std(x, axis=None, keepdims=False):
    return T.std(x, axis=axis, keepdims=keepdims)
项目:InnerOuterRNN    作者:Chemoinformatics    | 项目源码 | 文件源码
def random_normal(shape, mean=0.0, std=1.0, dtype=_FLOATX, seed=None):
    if seed is None:
        seed = np.random.randint(1, 10e6)
    rng = RandomStreams(seed=seed)
    return rng.normal(size=shape, avg=mean, std=std, dtype=dtype)
项目:IQA_BIECON_release    作者:jongyookim    | 项目源码 | 文件源码
def aggregation_fn(self, feat_vec):
        feat_avg = T.mean(feat_vec, axis=0, keepdims=True)
        return feat_avg
        # feat_std = T.std(feat_vec, axis=0, keepdims=True)
        # return T.concatenate([feat_avg, feat_std], axis=1)
项目:odin_old    作者:trungnt13    | 项目源码 | 文件源码
def std(x, axis=None, keepdims=False):
    return T.std(x, axis=axis, keepdims=keepdims)
项目:odin_old    作者:trungnt13    | 项目源码 | 文件源码
def normal(self, shape, mean, std, dtype=_FLOATX):
        return self._rng.normal(size=shape, avg=mean, std=std, dtype=dtype)
项目:odin_old    作者:trungnt13    | 项目源码 | 文件源码
def random_normal(shape, mean=0.0, std=1.0, dtype=_FLOATX, seed=None):
    if seed is None:
        seed = get_random_magic_seed()
    rng = RandomStreams(seed=seed)
    return rng.normal(size=shape, avg=mean, std=std, dtype=dtype)
项目:GRAN    作者:jiwoongim    | 项目源码 | 文件源码
def collect_statistics(self, X):
        """Updates Statistics of data"""
        stat_mean = T.mean(X, axis=0)
        stat_std  = T.std(X, axis=0)

        updates_stats = [(self.stat_mean, stat_mean), (self.stat_std, stat_std)]
        return updates_stats
项目:GRAN    作者:jiwoongim    | 项目源码 | 文件源码
def conv(self, X, subsample=(2, 2), border_mode=(2, 2), atype='sigmoid', testF=False):

        ConH0 = dnn_conv(X , self.W.dimshuffle(1,0,2,3), subsample=subsample, border_mode=border_mode)
        if testF:
            ConH1 = (ConH0 - self.stat_mean.dimshuffle('x', 0, 'x', 'x')) \
                                / (self.stat_std.dimshuffle('x', 0, 'x', 'x') + TINY) 
        else:
            mean    = ConH0.mean(axis=[0,2,3]).dimshuffle('x', 0, 'x', 'x')
            std     = ConH0.std( axis=[0,2,3]).dimshuffle('x', 0, 'x', 'x')
            ConH1   = (ConH0 - mean) / (std + TINY)

        ConH2 = self.eta.dimshuffle('x', 0, 'x', 'x') * ConH1 \
                                    + self.beta.dimshuffle('x', 0, 'x', 'x')

        return activation_fn_th(ConH2, atype=atype)
项目:GRAN    作者:jiwoongim    | 项目源码 | 文件源码
def collect_statistics(self, X):
        """ updates statistics on data"""
        stat_mean = T.mean(X, axis=0)
        stat_std  = T.std(X, axis=0)

        updates_stats = [(self.stat_mean, stat_mean), (self.stat_std, stat_std)]
        return updates_stats
项目:GRAN    作者:jiwoongim    | 项目源码 | 文件源码
def post_batch_norm(self, X, testF=False):

        Z = T.dot(X, self.W) + self.zbias   
        if testF:
            Z       = (Z - self.stat_mean) / (self.stat_std + TINY)
        else:
            mean    = Z.mean(axis=0)
            std     = Z.std( axis=0)
            Z       = (Z - mean) / (std + TINY)

        return Z
项目:lowrank-highwaynetwork    作者:Avmb    | 项目源码 | 文件源码
def output(self, x):
        d_0 = global_theano_rand.binomial(x.shape, p=1-self.d_p_0, dtype=FLOATX)
        d_1 = global_theano_rand.binomial((x.shape[0], self.projection_dim), p=1-self.d_p_1, dtype=FLOATX)

        tl_raw = T.dot(x * d_0, self.W_tl)
        hl_raw = T.dot(x * d_0, self.W_hl)
        tl_mean = T.mean(tl_raw, axis=0)
        hl_mean = T.mean(hl_raw, axis=0)
        tl_std = T.std(tl_raw, axis=0)
        hl_std = T.std(hl_raw, axis=0)
        tl = (tl_raw - tl_mean) / (tl_std + self.epsilon)
        hl = (hl_raw - hl_mean) / (hl_std + self.epsilon)
        new_Mean_tl = self.tau * tl_mean + (1.0 - self.tau) * self.Mean_tl
        new_Mean_hl = self.tau * hl_mean + (1.0 - self.tau) * self.Mean_hl
        new_Std_tl = self.tau * tl_std + (1.0 - self.tau) * self.Std_tl
        new_Std_hl = self.tau * hl_std + (1.0 - self.tau) * self.Std_hl

        tr_raw = (tl * d_1).dot(self.W_tr) + (x * d_0 * self.D_h)
        hr_raw = (hl * d_1).dot(self.W_hr) + (x * d_0 * self.D_t)
        tr_mean = T.mean(tr_raw, axis=0)
        hr_mean = T.mean(hr_raw, axis=0)
        tr_std = T.std(tr_raw, axis=0)
        hr_std = T.std(hr_raw, axis=0)
        tr = (tr_raw - tr_mean) / (tr_std + self.epsilon)
        hr = (hr_raw - hr_mean) / (hr_std + self.epsilon)
        new_Mean_tr = self.tau * tr_mean + (1.0 - self.tau) * self.Mean_tr
        new_Mean_hr = self.tau * hr_mean + (1.0 - self.tau) * self.Mean_hr
        new_Std_tr = self.tau * tr_std + (1.0 - self.tau) * self.Std_tr
        new_Std_hr = self.tau * hr_std + (1.0 - self.tau) * self.Std_hr

        t  = T.nnet.sigmoid(tr * self.S_t + self.B_t)
        h  = self._act(hr * self.S_h + self.B_h)
        rv = h * t + x * (1 - t)

        self.register_training_updates((self.Mean_tl, new_Mean_tl), 
                                       (self.Mean_hl, new_Mean_hl), 
                                       (self.Mean_tr, new_Mean_tr), 
                                       (self.Mean_hr, new_Mean_hr),
                                       (self.Std_tl, new_Std_tl), 
                                       (self.Std_hl, new_Std_hl), 
                                       (self.Std_tr, new_Std_tr), 
                                       (self.Std_hr, new_Std_hr))

        return rv
项目:lowrank-highwaynetwork    作者:Avmb    | 项目源码 | 文件源码
def output(self, x):
        d_0 = global_theano_rand.binomial(x.shape, p=1-self.d_p_0, dtype=FLOATX)
        d_1 = global_theano_rand.binomial((x.shape[0], self.projection_dim), p=1-self.d_p_1, dtype=FLOATX)

        tl_raw = T.dot(x * d_0, self.W_tl)
        hl_raw = T.dot(x * d_0, self.W_hl)
        tl_mean = T.mean(tl_raw, axis=0)
        hl_mean = T.mean(hl_raw, axis=0)
        tl_std = T.std(tl_raw, axis=0)
        hl_std = T.std(hl_raw, axis=0)
        tl = (tl_raw - tl_mean) / (tl_std + self.epsilon)
        hl = (hl_raw - hl_mean) / (hl_std + self.epsilon)
        new_Mean_tl = self.tau * tl_mean + (1.0 - self.tau) * self.Mean_tl
        new_Mean_hl = self.tau * hl_mean + (1.0 - self.tau) * self.Mean_hl
        new_Std_tl = self.tau * tl_std + (1.0 - self.tau) * self.Std_tl
        new_Std_hl = self.tau * hl_std + (1.0 - self.tau) * self.Std_hl

        tr_raw = (tl * d_1).dot(self.W_tr)
        hr_raw = (hl * d_1).dot(self.W_hr)
        tr_mean = T.mean(tr_raw, axis=0)
        hr_mean = T.mean(hr_raw, axis=0)
        tr_std = T.std(tr_raw, axis=0)
        hr_std = T.std(hr_raw, axis=0)
        tr = (tr_raw - tr_mean) / (tr_std + self.epsilon)
        hr = (hr_raw - hr_mean) / (hr_std + self.epsilon)
        new_Mean_tr = self.tau * tr_mean + (1.0 - self.tau) * self.Mean_tr
        new_Mean_hr = self.tau * hr_mean + (1.0 - self.tau) * self.Mean_hr
        new_Std_tr = self.tau * tr_std + (1.0 - self.tau) * self.Std_tr
        new_Std_hr = self.tau * hr_std + (1.0 - self.tau) * self.Std_hr

        t  = T.nnet.sigmoid(tr * self.S_t + self.B_t)
        h  = self._act(hr * self.S_h + self.B_h)
        rv = h * t + x * (1 - t)

        self.register_training_updates((self.Mean_tl, new_Mean_tl), 
                                       (self.Mean_hl, new_Mean_hl), 
                                       (self.Mean_tr, new_Mean_tr), 
                                       (self.Mean_hr, new_Mean_hr),
                                       (self.Std_tl, new_Std_tl), 
                                       (self.Std_hl, new_Std_hl), 
                                       (self.Std_tr, new_Std_tr), 
                                       (self.Std_hr, new_Std_hr))

        return rv
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def _old_normalize_batch_in_training(x, gamma, beta,
                                     reduction_axes, epsilon=1e-3):
    """Computes mean and std for batch then apply batch_normalization on batch.
    """
    dev = theano.config.device
    use_cudnn = ndim(x) < 5 and reduction_axes == [0, 2, 3] and (dev.startswith('cuda') or dev.startswith('gpu'))
    if use_cudnn:
        broadcast_beta = beta.dimshuffle('x', 0, 'x', 'x')
        broadcast_gamma = gamma.dimshuffle('x', 0, 'x', 'x')
        try:
            normed, mean, stdinv = theano.sandbox.cuda.dnn.dnn_batch_normalization_train(
                x, broadcast_gamma, broadcast_beta, 'spatial', epsilon)
            normed = theano.tensor.as_tensor_variable(normed)
            mean = theano.tensor.as_tensor_variable(mean)
            stdinv = theano.tensor.as_tensor_variable(stdinv)
            var = T.inv(stdinv ** 2)
            return normed, T.flatten(mean), T.flatten(var)
        except AttributeError:
            pass

    var = x.var(reduction_axes)
    mean = x.mean(reduction_axes)

    target_shape = []
    for axis in range(ndim(x)):
        if axis in reduction_axes:
            target_shape.append(1)
        else:
            target_shape.append(x.shape[axis])
    target_shape = T.stack(*target_shape)

    broadcast_mean = T.reshape(mean, target_shape)
    broadcast_var = T.reshape(var, target_shape)
    broadcast_beta = T.reshape(beta, target_shape)
    broadcast_gamma = T.reshape(gamma, target_shape)
    normed = batch_normalization(x, broadcast_mean, broadcast_var,
                                 broadcast_beta, broadcast_gamma,
                                 epsilon)
    return normed, mean, var


# TODO remove this if statement when Theano without
# T.nnet.bn.batch_normalization_test is deprecated
项目:neural-painter    作者:zxytim    | 项目源码 | 文件源码
def get_func(rng, nonlin, hidden_size=100, nr_hidden=3,
             input_dim=2,
             output_dim=1, recurrent=False,
             output_nonlin=lambda x: x,
             use_bias=True,
             std=1, mean=0):
    '''return function of [0,1]^2 -> intensity \in [0, 1]^c '''
    coords = T.matrix()
    v = coords

    def get_weights(shape):
        W = theano.shared(rng.randn(*shape) * std + mean)
        if use_bias:
            b = theano.shared(rng.randn(shape[1]) * std + mean)
        else:
            b = theano.shared(np.zeros(shape[1]))
        return W, b

    def apply_linear(v, W, b):
        '''Wx + b'''
        return T.dot(v, W) + b.dimshuffle('x', 0)

    def make_linear(v, shape):
        W, b = get_weights(shape)
        return apply_linear(v, W, b)

    v = make_linear(v, (input_dim, hidden_size))
    v = nonlin(v)

    hidden_shape = (hidden_size, hidden_size)
    W, b = None, None
    for i in range(nr_hidden):
        if W is None or not recurrent:
            W, b = get_weights(hidden_shape)
        v = apply_linear(v, W, b)
        v = nonlin(v)

    v = make_linear(v, (hidden_size, output_dim))
    v = output_nonlin(v)
    v = (v - v.min(axis=0, keepdims=True)) / (
        v.max(axis=0) - v.min(axis=0) + 1e-8).dimshuffle('x', 0)

    return theano.function([coords], v)