Python theano.tensor 模块,var() 实例源码

我们从Python开源项目中,提取了以下38个代码示例,用于说明如何使用theano.tensor.var()

项目:aspect_adversarial    作者:yuanzh    | 项目源码 | 文件源码
def create_updates(self, input):
        if self.mode == 0:
            now_mean = T.mean(input, axis=0)
            now_var = T.var(input, axis=0)
            batch = T.cast(input.shape[0], theano.config.floatX)
        else:
            now_mean = T.mean(input, axis=(0,2,3))
            now_var = T.var(input, axis=(0,2,3))
            batch = T.cast(input.shape[0]*input.shape[2]*input.shape[3], theano.config.floatX)
        if self.updates is None:
            new_mean = self.momentum * self.mean + (1.0-self.momentum) * now_mean
            new_var = self.momentum * self.var + (1.0-self.momentum) * ((batch+1.0)/batch*now_var)
        else:
            new_mean = self.momentum * self.updates[0][1] + (1.0-self.momentum) * now_mean
            new_var = self.momentum * self.updates[1][1] + (1.0-self.momentum) * ((batch+1.0)/batch*now_var)
        self.updates = [(self.mean, new_mean), (self.var, new_var)]
项目:aspect_adversarial    作者:yuanzh    | 项目源码 | 文件源码
def get_result(self, input, create_updates) :
        if create_updates:
            self.create_updates(input)

        # returns BN result for given input.
        epsilon = np.float64(1e-06).astype(theano.config.floatX)

        if self.mode == 0:
            now_mean = T.mean(input, axis=0)
            now_var = T.var(input, axis=0)
        else:
            now_mean = T.mean(input, axis=(0,2,3))
            now_var = T.var(input, axis=(0,2,3))
        now_mean = self.run_mode * self.mean + (1.0-self.run_mode) * now_mean
        now_var = self.run_mode * self.var + (1.0-self.run_mode) * now_var

        if self.mode == 0:
            output = self.gamma * (input - now_mean) / (T.sqrt(now_var+epsilon)) + self.beta
        else:
            output = self.gamma.dimshuffle(('x', 0, 'x', 'x')) * (input - now_mean.dimshuffle(('x', 0, 'x', 'x'))) \
                    / (T.sqrt(now_var+epsilon).dimshuffle(('x', 0, 'x', 'x'))) + self.beta.dimshuffle(('x', 0, 'x', 'x'))

        return output
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3):
    """Apply batch normalization on x given mean, var, beta and gamma.
    """
    # TODO remove this if statement when Theano without
    # T.nnet.bn.batch_normalization_test is deprecated
    if not hasattr(T.nnet.bn, 'batch_normalization_test'):
        return _old_batch_normalization(x, mean, var, beta, gamma, epsilon)

    if mean.ndim == 1:
        # based on TensorFlow's default: normalize along rightmost dimension
        reduction_axes = range(x.ndim - 1)
    else:
        reduction_axes = [i for i in range(x.ndim) if mean.broadcastable[i]]

    return T.nnet.bn.batch_normalization_test(
        x, gamma, beta, mean, var, reduction_axes, epsilon)


# TODO remove this function when Theano without
# T.nnet.bn.batch_normalization_train is deprecated
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3):
    '''Apply batch normalization on x given mean, var, beta and gamma.
    '''
    # TODO remove this if statement when Theano without
    # T.nnet.bn.batch_normalization_test is deprecated
    if not hasattr(T.nnet.bn, 'batch_normalization_test'):
        return _old_batch_normalization(x, mean, var, beta, gamma, epsilon)

    if mean.ndim == 1:
        # based on TensorFlow's default: normalize along rightmost dimension
        reduction_axes = range(x.ndim - 1)
    else:
        reduction_axes = [i for i in range(x.ndim) if mean.broadcastable[i]]

    return T.nnet.bn.batch_normalization_test(
        x, gamma, beta, mean, var, reduction_axes, epsilon)


# TODO remove this function when Theano without
# T.nnet.bn.batch_normalization_train is deprecated
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3):
    """Apply batch normalization on x given mean, var, beta and gamma.
    """
    # TODO remove this if statement when Theano without
    # T.nnet.bn.batch_normalization_test is deprecated
    if not hasattr(T.nnet.bn, 'batch_normalization_test'):
        return _old_batch_normalization(x, mean, var, beta, gamma, epsilon)

    if mean.ndim == 1:
        # based on TensorFlow's default: normalize along rightmost dimension
        reduction_axes = range(x.ndim - 1)
    else:
        reduction_axes = [i for i in range(x.ndim) if mean.broadcastable[i]]

    return T.nnet.bn.batch_normalization_test(
        x, gamma, beta, mean, var, reduction_axes, epsilon)


# TODO remove this function when Theano without
# T.nnet.bn.batch_normalization_train is deprecated
项目:keras_superpixel_pooling    作者:parag2489    | 项目源码 | 文件源码
def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3):
    """Apply batch normalization on x given mean, var, beta and gamma.
    """
    # TODO remove this if statement when Theano without
    # T.nnet.bn.batch_normalization_test is deprecated
    if not hasattr(T.nnet.bn, 'batch_normalization_test'):
        return _old_batch_normalization(x, mean, var, beta, gamma, epsilon)

    if gamma is None:
        gamma = ones_like(var)
    if beta is None:
        beta = zeros_like(mean)

    if mean.ndim == 1:
        # based on TensorFlow's default: normalize along rightmost dimension
        reduction_axes = list(range(x.ndim - 1))
    else:
        reduction_axes = [i for i in range(x.ndim) if mean.broadcastable[i]]

    return T.nnet.bn.batch_normalization_test(
        x, gamma, beta, mean, var, reduction_axes, epsilon)


# TODO remove this function when Theano without
# T.nnet.bn.batch_normalization_train is deprecated
项目:InnerOuterRNN    作者:Chemoinformatics    | 项目源码 | 文件源码
def normalize_batch_in_training(x, gamma, beta,
                                reduction_axes, epsilon=0.0001):
    '''Compute mean and std for batch then apply batch_normalization on batch.
    '''
    var = x.var(reduction_axes)
    mean = x.mean(reduction_axes)

    target_shape = []
    for axis in range(ndim(x)):
        if axis in reduction_axes:
            target_shape.append(1)
        else:
            target_shape.append(x.shape[axis])
    target_shape = T.stack(*target_shape)

    broadcast_mean = T.reshape(mean, target_shape)
    broadcast_var = T.reshape(var, target_shape)
    broadcast_beta = T.reshape(beta, target_shape)
    broadcast_gamma = T.reshape(gamma, target_shape)
    normed = batch_normalization(x, broadcast_mean, broadcast_var,
                                 broadcast_beta, broadcast_gamma,
                                 epsilon)
    return normed, mean, var
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def var(x, axis=None, keepdims=False):
    return T.var(x, axis=axis, keepdims=keepdims)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def normalize_batch_in_training(x, gamma, beta,
                                reduction_axes, epsilon=0.0001):
    '''Computes mean and std for batch then apply batch_normalization on batch.
    '''
    dev = theano.config.device
    use_cudnn = ndim(x) < 5 and reduction_axes == [0, 2, 3] and (dev.startswith('cuda') or dev.startswith('gpu'))
    if use_cudnn:
        broadcast_beta = beta.dimshuffle('x', 0, 'x', 'x')
        broadcast_gamma = gamma.dimshuffle('x', 0, 'x', 'x')
        try:
            normed, mean, stdinv = theano.sandbox.cuda.dnn.dnn_batch_normalization_train(
                x, broadcast_gamma, broadcast_beta, 'spatial', epsilon)
            var = T.inv(stdinv ** 2)
            return normed, T.flatten(mean), T.flatten(var)
        except AttributeError:
            pass

    var = x.var(reduction_axes)
    mean = x.mean(reduction_axes)

    target_shape = []
    for axis in range(ndim(x)):
        if axis in reduction_axes:
            target_shape.append(1)
        else:
            target_shape.append(x.shape[axis])
    target_shape = T.stack(*target_shape)

    broadcast_mean = T.reshape(mean, target_shape)
    broadcast_var = T.reshape(var, target_shape)
    broadcast_beta = T.reshape(beta, target_shape)
    broadcast_gamma = T.reshape(gamma, target_shape)
    normed = batch_normalization(x, broadcast_mean, broadcast_var,
                                 broadcast_beta, broadcast_gamma,
                                 epsilon)
    return normed, mean, var
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def batch_normalization(x, mean, var, beta, gamma, epsilon=0.0001):
    '''Apply batch normalization on x given mean, var, beta and gamma.
    '''
    ndim = x.ndim
    dev = theano.config.device
    use_cudnn = ndim < 5 and (dev.startswith('cuda') or dev.startswith('gpu'))
    if use_cudnn:
        try:
            axis = mean.broadcastable.index(False)
            if axis != 1:
                shuffle_pattern = list(range(ndim))
                shuffle_pattern[1] = shuffle_pattern[axis]
                shuffle_pattern[axis] = 1
                x = x.dimshuffle(shuffle_pattern)
                mean = mean.dimshuffle(shuffle_pattern)
                var = var.dimshuffle(shuffle_pattern)
                beta = beta.dimshuffle(shuffle_pattern)
                gamma = gamma.dimshuffle(shuffle_pattern)
            normed = theano.sandbox.cuda.dnn.dnn_batch_normalization_test(x, gamma, beta, mean, var,
                                                                          'spatial', epsilon)
            if axis != 1:
                normed = normed.dimshuffle(shuffle_pattern)
            return normed
        except AttributeError:
            pass
        except ValueError:
            pass
    return T.nnet.bn.batch_normalization(x, gamma, beta, mean, sqrt(var + epsilon),
                                         mode='high_mem')


# SHAPE OPERATIONS
项目:aspect_adversarial    作者:yuanzh    | 项目源码 | 文件源码
def __init__(self, input_shape, mode=0 , momentum=0.9) :
        '''
        # params :
        input_shape :
            when mode is 0, we assume 2D input. (mini_batch_size, # features)
            when mode is 1, we assume 4D input. (mini_batch_size, # of channel, # row, # column)
        mode : 
            0 : feature-wise mode (normal BN)
            1 : window-wise mode (CNN mode BN)
        momentum : momentum for exponential average
        '''
        self.input_shape = input_shape
        self.mode = mode
        self.momentum = momentum
        #self.run_mode = 0 # run_mode : 0 means training, 1 means inference
        self.run_mode = theano.shared(np.float64(0.0).astype(theano.config.floatX))

        self.insize = input_shape[1]

        # random setting of gamma and beta, setting initial mean and std
        rng = default_rng
        self.gamma = create_shared(rng.uniform(low=-(1.0/self.insize)**0.5, high=(1.0/self.insize)**0.5, size=(input_shape[1])).astype(theano.config.floatX), name='gamma')
        self.beta = create_shared(np.zeros((input_shape[1]), dtype=theano.config.floatX), name='beta')
        self.mean = create_shared(np.zeros((input_shape[1]), dtype=theano.config.floatX), name='mean')
        self.var = create_shared(np.ones((input_shape[1]), dtype=theano.config.floatX), name='var')

        # parameter save for update
        self.params = [self.gamma, self.beta]
        self.updates = None
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def var(x, axis=None, keepdims=False):
    return T.var(x, axis=axis, keepdims=keepdims)
项目:NNBuilder    作者:aeloyq    | 项目源码 | 文件源码
def var(self, t, axis, keepdims):
            return T.var(t, axis, keepdims=keepdims)
项目:drmad    作者:bigaidream-projects    | 项目源码 | 文件源码
def bn_shared(params, outFilters, index):    

    ''' Setup BN shared variables.    

    '''
    normParam = {}       
    template = np.ones((outFilters,), dtype=theano.config.floatX)
    normParam['mean'] = theano.shared(value=0.*template, name='mean_%d' % (index), borrow=True)
    normParam['var'] = theano.shared(value=1.*template, name='var_%d' % (index), borrow=True)                                
    normParam['mean_batch'] = theano.shared(value=0.*template, name='mean_batch_%d' % (index), borrow=True) # need for exact 
    normParam['var_batch'] = theano.shared(value=1.*template, name='var_batch_%d' % (index), borrow=True) # need for exact                               
    normParam['iter'] = theano.shared(np.float32(1.), name='iter')                 

    paramsBN = [normParam['mean'], normParam['var'], normParam['mean_batch'], normParam['var_batch'], normParam['iter']]
    return normParam, paramsBN
项目:drmad    作者:bigaidream-projects    | 项目源码 | 文件源码
def bn_layer(x, a, b, normParam, params, phase):

    ''' Apply BN.    

    # phase = 0 : BN eval with m1v1, BN ups weighter average 
    # phase = 1 : BN eval with m2v2, no BN ups

    '''

    minAlpha = params.movingAvMin
    iterStep = params.movingAvStep                  
    # compute mean & variance    
    if params.model == 'convnet':
        mean1 = T.mean(x, axis = (0, 2, 3))
        var1 = T.var(x, axis = (0, 2, 3))
    else:
        mean1 = T.mean(x, axis = 0)
        var1 = T.var(x, axis = 0)

    # moving average as a proxi for validation model 
    alpha = (1.-phase)*T.maximum(minAlpha, 1./normParam['iter'])                     
    mean2 = (1.-alpha)*normParam['mean'] + alpha*mean1 
    var2 = (1.-alpha)*normParam['var'] + alpha*var1   

    mean = (1.-phase)*mean2 + phase*mean1 
    var = (1.-phase)*var1 + phase*var1
    std = T.sqrt(var+eps)

    # apply transformation: 
    if params.model == 'convnet':
        x = bn.batch_normalization(x, a.dimshuffle('x', 0, 'x', 'x'), b.dimshuffle('x', 0, 'x', 'x'), 
                                mean.dimshuffle('x', 0, 'x', 'x'), std.dimshuffle('x', 0, 'x', 'x'), mode='high_mem')
    else:    
        x = bn.batch_normalization(x, a, b, mean, std) 
    updateBN = [mean2, var2, mean1, var1, normParam['iter']+iterStep]  
    return x, updateBN
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def var(x, axis=None, keepdims=False):
    return T.var(x, axis=axis, keepdims=keepdims)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def _old_normalize_batch_in_training(x, gamma, beta,
                                     reduction_axes, epsilon=1e-3):
    '''Computes mean and std for batch then apply batch_normalization on batch.
    '''
    dev = theano.config.device
    use_cudnn = ndim(x) < 5 and reduction_axes == [0, 2, 3] and (dev.startswith('cuda') or dev.startswith('gpu'))
    if use_cudnn:
        broadcast_beta = beta.dimshuffle('x', 0, 'x', 'x')
        broadcast_gamma = gamma.dimshuffle('x', 0, 'x', 'x')
        try:
            normed, mean, stdinv = theano.sandbox.cuda.dnn.dnn_batch_normalization_train(
                x, broadcast_gamma, broadcast_beta, 'spatial', epsilon)
            var = T.inv(stdinv ** 2)
            return normed, T.flatten(mean), T.flatten(var)
        except AttributeError:
            pass

    var = x.var(reduction_axes)
    mean = x.mean(reduction_axes)

    target_shape = []
    for axis in range(ndim(x)):
        if axis in reduction_axes:
            target_shape.append(1)
        else:
            target_shape.append(x.shape[axis])
    target_shape = T.stack(*target_shape)

    broadcast_mean = T.reshape(mean, target_shape)
    broadcast_var = T.reshape(var, target_shape)
    broadcast_beta = T.reshape(beta, target_shape)
    broadcast_gamma = T.reshape(gamma, target_shape)
    normed = batch_normalization(x, broadcast_mean, broadcast_var,
                                 broadcast_beta, broadcast_gamma,
                                 epsilon)
    return normed, mean, var


# TODO remove this if statement when Theano without
# T.nnet.bn.batch_normalization_test is deprecated
项目:hred-latent-piecewise    作者:julianser    | 项目源码 | 文件源码
def DPrint(name, var):
    if PRINT_VARS is False:
        return var

    return theano.printing.Print(name)(var)
项目:hred-latent-piecewise    作者:julianser    | 项目源码 | 文件源码
def NormalizationOperator(normop_type, x, gamma, mask, estimated_mean=0.0, estimated_var=1.0):
    if normop_type.upper() == 'BN':
        if x.ndim == 3:
            return FeedforwardBatchNormalization(x, gamma, mask, estimated_mean=0.0, estimated_var=1.0)
        elif x.ndim == 2:
            return RecurrentBatchNormalization(x, gamma, mask, estimated_mean=0.0, estimated_var=1.0)
    elif normop_type.upper() == 'LN':
        return LayerNormalization(x, gamma, mask, estimated_mean=0.0, estimated_var=1.0)
    elif normop_type.upper() == 'NONE' or normop_type.upper() == '':
        assert x.ndim == 3 or x.ndim == 2

        output = x + 0.0*gamma
        if x.ndim == 3:
            x_mean = T.mean(x, axis=1).dimshuffle(0, 1, 'x')
            x_var = T.var(x, axis=1).dimshuffle(0, 1, 'x')
        else:
            x_mean = T.mean(x, axis=1).dimshuffle(0, 'x')
            x_var = T.var(x, axis=1).dimshuffle(0, 'x')

        return output, x_mean[0], x_var[0]
    else:
        raise ValueError("Error! normop_type must take a value in set {\'BN\', \'LN\', \'NONE\'}!")


# Batch normalization of input variable on first and second tensor indices (time x batch example x hidden units)
# Elements where mask is zero, will not be used to compute the mean and variance estimates,
# however these elements will still be batch normalized.
项目:hred-latent-piecewise    作者:julianser    | 项目源码 | 文件源码
def LayerNormalization(x, gamma, mask, estimated_mean=0.0, estimated_var=1.0):
    assert x.ndim == 3 or x.ndim == 2
    if x.ndim == 3:
        x_mean = T.mean(x, axis=2).dimshuffle(0, 1, 'x')
        x_var = T.var(x, axis=2).dimshuffle(0, 1, 'x')
        return gamma*((x - x_mean) / T.sqrt(x_var+1e-7)), x_mean[0, 0], x_var[0, 0]

    elif x.ndim == 2:
        x_mean = T.mean(x, axis=1).dimshuffle(0, 'x')
        x_var = T.var(x, axis=1).dimshuffle(0, 'x')
        return gamma*((x - x_mean) / T.sqrt(x_var+1e-7)), x_mean[0], x_var[0]



# Does theano.batched_dot. If last_axis is on it will loop over the last axis, otherwise it will loop over the first axis.
项目:ADEM    作者:mike-n-7    | 项目源码 | 文件源码
def DPrint(name, var):
    if PRINT_VARS is False:
        return var

    return theano.printing.Print(name)(var)
项目:ADEM    作者:mike-n-7    | 项目源码 | 文件源码
def NormalizationOperator(normop_type, x, gamma, mask, estimated_mean=0.0, estimated_var=1.0):
    if normop_type.upper() == 'BN':
        if x.ndim == 3:
            return FeedforwardBatchNormalization(x, gamma, mask, estimated_mean=0.0, estimated_var=1.0)
        elif x.ndim == 2:
            return RecurrentBatchNormalization(x, gamma, mask, estimated_mean=0.0, estimated_var=1.0)
    elif normop_type.upper() == 'LN':
        return LayerNormalization(x, gamma, mask, estimated_mean=0.0, estimated_var=1.0)
    elif normop_type.upper() == 'NONE' or normop_type.upper() == '':
        assert x.ndim == 3 or x.ndim == 2

        output = x + 0.0*gamma
        if x.ndim == 3:
            x_mean = T.mean(x, axis=1).dimshuffle(0, 1, 'x')
            x_var = T.var(x, axis=1).dimshuffle(0, 1, 'x')
        else:
            x_mean = T.mean(x, axis=1).dimshuffle(0, 'x')
            x_var = T.var(x, axis=1).dimshuffle(0, 'x')

        return output, x_mean[0], x_var[0]
    else:
        raise ValueError("Error! normop_type must take a value in set {\'BN\', \'LN\', \'NONE\'}!")


# Batch normalization of input variable on first and second tensor indices (time x batch example x hidden units)
# Elements where mask is zero, will not be used to compute the mean and variance estimates,
# however these elements will still be batch normalized.
项目:ADEM    作者:mike-n-7    | 项目源码 | 文件源码
def LayerNormalization(x, gamma, mask, estimated_mean=0.0, estimated_var=1.0):
    assert x.ndim == 3 or x.ndim == 2
    if x.ndim == 3:
        x_mean = T.mean(x, axis=2).dimshuffle(0, 1, 'x')
        x_var = T.var(x, axis=2).dimshuffle(0, 1, 'x')
        return gamma*((x - x_mean) / T.sqrt(x_var+1e-7)), x_mean[0, 0], x_var[0, 0]

    elif x.ndim == 2:
        x_mean = T.mean(x, axis=1).dimshuffle(0, 'x')
        x_var = T.var(x, axis=1).dimshuffle(0, 'x')
        return gamma*((x - x_mean) / T.sqrt(x_var+1e-7)), x_mean[0], x_var[0]



# Does theano.batched_dot. If last_axis is on it will loop over the last axis, otherwise it will loop over the first axis.
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def var(x, axis=None, keepdims=False):
    return T.var(x, axis=axis, keepdims=keepdims)
项目:keras_superpixel_pooling    作者:parag2489    | 项目源码 | 文件源码
def var(x, axis=None, keepdims=False):
    return T.var(x, axis=axis, keepdims=keepdims)
项目:Hat    作者:qiuqiangkong    | 项目源码 | 文件源码
def var(x, axis=None):
    return T.var(x, axis)
项目:Hat    作者:qiuqiangkong    | 项目源码 | 文件源码
def batch_normalization(inputs, gamma, beta, mean, var, eps):
    return T.nnet.bn.batch_normalization(inputs, gamma, beta, mean, T.sqrt(var+eps), mode='high_mem')

### random numbers
项目:statestream    作者:VolkerFischer    | 项目源码 | 文件源码
def var(x, axis=None, keepdims=False):
    return T.var(x, axis=axis, keepdims=keepdims)
项目:InnerOuterRNN    作者:Chemoinformatics    | 项目源码 | 文件源码
def var(x, axis=None, keepdims=False):
    return T.var(x, axis=axis, keepdims=keepdims)
项目:InnerOuterRNN    作者:Chemoinformatics    | 项目源码 | 文件源码
def batch_normalization(x, mean, var, beta, gamma, epsilon=0.0001):
    '''Apply batch normalization on x given mean, var, beta and gamma.
    '''
    normed = T.nnet.bn.batch_normalization(x, gamma, beta, mean,
                                           sqrt(var) + epsilon,
                                           mode='high_mem')
    return normed


# SHAPE OPERATIONS
项目:odin_old    作者:trungnt13    | 项目源码 | 文件源码
def var(x, axis=None, keepdims=False):
    return T.var(x, axis=axis, keepdims=keepdims)
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def _old_normalize_batch_in_training(x, gamma, beta,
                                     reduction_axes, epsilon=1e-3):
    """Computes mean and std for batch then apply batch_normalization on batch.
    """
    dev = theano.config.device
    use_cudnn = ndim(x) < 5 and reduction_axes == [0, 2, 3] and (dev.startswith('cuda') or dev.startswith('gpu'))
    if use_cudnn:
        broadcast_beta = beta.dimshuffle('x', 0, 'x', 'x')
        broadcast_gamma = gamma.dimshuffle('x', 0, 'x', 'x')
        try:
            normed, mean, stdinv = theano.sandbox.cuda.dnn.dnn_batch_normalization_train(
                x, broadcast_gamma, broadcast_beta, 'spatial', epsilon)
            normed = theano.tensor.as_tensor_variable(normed)
            mean = theano.tensor.as_tensor_variable(mean)
            stdinv = theano.tensor.as_tensor_variable(stdinv)
            var = T.inv(stdinv ** 2)
            return normed, T.flatten(mean), T.flatten(var)
        except AttributeError:
            pass

    var = x.var(reduction_axes)
    mean = x.mean(reduction_axes)

    target_shape = []
    for axis in range(ndim(x)):
        if axis in reduction_axes:
            target_shape.append(1)
        else:
            target_shape.append(x.shape[axis])
    target_shape = T.stack(*target_shape)

    broadcast_mean = T.reshape(mean, target_shape)
    broadcast_var = T.reshape(var, target_shape)
    broadcast_beta = T.reshape(beta, target_shape)
    broadcast_gamma = T.reshape(gamma, target_shape)
    normed = batch_normalization(x, broadcast_mean, broadcast_var,
                                 broadcast_beta, broadcast_gamma,
                                 epsilon)
    return normed, mean, var


# TODO remove this if statement when Theano without
# T.nnet.bn.batch_normalization_test is deprecated
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def _old_batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3):
    """Apply batch normalization on x given mean, var, beta and gamma.
    """
    if mean.ndim == 1 and x.ndim > 1:
        # in TensorFlow's batch_normalization, if the parameters are vectors
        # the batch normalization should be applied along the rightmost axis.
        # Theano expects the parameters to always have x.ndim dimensions.
        shuffle_pattern = ['x'] * (x.ndim - 1) + [0]
        mean = mean.dimshuffle(shuffle_pattern)
        var = var.dimshuffle(shuffle_pattern)
        beta = beta.dimshuffle(shuffle_pattern)
        gamma = gamma.dimshuffle(shuffle_pattern)

    ndim = x.ndim
    dev = theano.config.device
    use_cudnn = ndim < 5 and (dev.startswith('cuda') or dev.startswith('gpu'))
    if use_cudnn:
        try:
            axis = mean.broadcastable.index(False)
            if axis != 1:
                shuffle_pattern = list(range(ndim))
                shuffle_pattern[1] = shuffle_pattern[axis]
                shuffle_pattern[axis] = 1
                result = theano.sandbox.cuda.dnn.dnn_batch_normalization_test(
                    x.dimshuffle(shuffle_pattern),
                    gamma.dimshuffle(shuffle_pattern),
                    beta.dimshuffle(shuffle_pattern),
                    mean.dimshuffle(shuffle_pattern),
                    var.dimshuffle(shuffle_pattern),
                    'spatial', epsilon).dimshuffle(shuffle_pattern)
            else:
                result = theano.sandbox.cuda.dnn.dnn_batch_normalization_test(
                    x, gamma, beta, mean, var, 'spatial', epsilon)
            return theano.tensor.as_tensor_variable(result)
        except AttributeError:
            pass
        except ValueError:
            pass
    return T.nnet.bn.batch_normalization(x, gamma, beta, mean, sqrt(var + epsilon),
                                         mode='high_mem')


# SHAPE OPERATIONS
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def _old_batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3):
    '''Apply batch normalization on x given mean, var, beta and gamma.
    '''
    if mean.ndim == 1 and x.ndim > 1:
        # in TensorFlow's batch_normalization, if the parameters are vectors
        # the batch normalization should be applied along the rightmost axis.
        # Theano expects the parameters to always have x.ndim dimensions.
        shuffle_pattern = ['x'] * (x.ndim - 1) + [0]
        mean = mean.dimshuffle(shuffle_pattern)
        var = var.dimshuffle(shuffle_pattern)
        beta = beta.dimshuffle(shuffle_pattern)
        gamma = gamma.dimshuffle(shuffle_pattern)

    ndim = x.ndim
    dev = theano.config.device
    use_cudnn = ndim < 5 and (dev.startswith('cuda') or dev.startswith('gpu'))
    if use_cudnn:
        try:
            axis = mean.broadcastable.index(False)
            if axis != 1:
                shuffle_pattern = list(range(ndim))
                shuffle_pattern[1] = shuffle_pattern[axis]
                shuffle_pattern[axis] = 1
                return theano.sandbox.cuda.dnn.dnn_batch_normalization_test(
                    x.dimshuffle(shuffle_pattern),
                    gamma.dimshuffle(shuffle_pattern),
                    beta.dimshuffle(shuffle_pattern),
                    mean.dimshuffle(shuffle_pattern),
                    var.dimshuffle(shuffle_pattern),
                    'spatial', epsilon).dimshuffle(shuffle_pattern)
            else:
                return theano.sandbox.cuda.dnn.dnn_batch_normalization_test(
                    x, gamma, beta, mean, var, 'spatial', epsilon)
        except AttributeError:
            pass
        except ValueError:
            pass
    return T.nnet.bn.batch_normalization(x, gamma, beta, mean, sqrt(var + epsilon),
                                         mode='high_mem')


# SHAPE OPERATIONS
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def _old_normalize_batch_in_training(x, gamma, beta,
                                     reduction_axes, epsilon=1e-3):
    """Computes mean and std for batch then apply batch_normalization on batch.
    """
    dev = theano.config.device
    use_cudnn = ndim(x) < 5 and reduction_axes == [0, 2, 3] and (dev.startswith('cuda') or dev.startswith('gpu'))
    if use_cudnn:
        broadcast_beta = beta.dimshuffle('x', 0, 'x', 'x')
        broadcast_gamma = gamma.dimshuffle('x', 0, 'x', 'x')
        try:
            normed, mean, stdinv = theano.sandbox.cuda.dnn.dnn_batch_normalization_train(
                x, broadcast_gamma, broadcast_beta, 'spatial', epsilon)
            normed = theano.tensor.as_tensor_variable(normed)
            mean = theano.tensor.as_tensor_variable(mean)
            stdinv = theano.tensor.as_tensor_variable(stdinv)
            var = T.inv(stdinv ** 2)
            return normed, T.flatten(mean), T.flatten(var)
        except AttributeError:
            pass

    var = x.var(reduction_axes)
    mean = x.mean(reduction_axes)

    target_shape = []
    for axis in range(ndim(x)):
        if axis in reduction_axes:
            target_shape.append(1)
        else:
            target_shape.append(x.shape[axis])
    target_shape = T.stack(*target_shape)

    broadcast_mean = T.reshape(mean, target_shape)
    broadcast_var = T.reshape(var, target_shape)
    broadcast_beta = T.reshape(beta, target_shape)
    broadcast_gamma = T.reshape(gamma, target_shape)
    normed = batch_normalization(x, broadcast_mean, broadcast_var,
                                 broadcast_beta, broadcast_gamma,
                                 epsilon)
    return normed, mean, var


# TODO remove this if statement when Theano without
# T.nnet.bn.batch_normalization_test is deprecated
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def _old_batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3):
    """Apply batch normalization on x given mean, var, beta and gamma.
    """
    if mean.ndim == 1 and x.ndim > 1:
        # in TensorFlow's batch_normalization, if the parameters are vectors
        # the batch normalization should be applied along the rightmost axis.
        # Theano expects the parameters to always have x.ndim dimensions.
        shuffle_pattern = ['x'] * (x.ndim - 1) + [0]
        mean = mean.dimshuffle(shuffle_pattern)
        var = var.dimshuffle(shuffle_pattern)
        beta = beta.dimshuffle(shuffle_pattern)
        gamma = gamma.dimshuffle(shuffle_pattern)

    ndim = x.ndim
    dev = theano.config.device
    use_cudnn = ndim < 5 and (dev.startswith('cuda') or dev.startswith('gpu'))
    if use_cudnn:
        try:
            axis = mean.broadcastable.index(False)
            if axis != 1:
                shuffle_pattern = list(range(ndim))
                shuffle_pattern[1] = shuffle_pattern[axis]
                shuffle_pattern[axis] = 1
                result = theano.sandbox.cuda.dnn.dnn_batch_normalization_test(
                    x.dimshuffle(shuffle_pattern),
                    gamma.dimshuffle(shuffle_pattern),
                    beta.dimshuffle(shuffle_pattern),
                    mean.dimshuffle(shuffle_pattern),
                    var.dimshuffle(shuffle_pattern),
                    'spatial', epsilon).dimshuffle(shuffle_pattern)
            else:
                result = theano.sandbox.cuda.dnn.dnn_batch_normalization_test(
                    x, gamma, beta, mean, var, 'spatial', epsilon)
            return theano.tensor.as_tensor_variable(result)
        except AttributeError:
            pass
        except ValueError:
            pass
    return T.nnet.bn.batch_normalization(x, gamma, beta, mean, sqrt(var + epsilon),
                                         mode='high_mem')


# SHAPE OPERATIONS
项目:keras_superpixel_pooling    作者:parag2489    | 项目源码 | 文件源码
def _old_normalize_batch_in_training(x, gamma, beta,
                                     reduction_axes, epsilon=1e-3):
    """Computes mean and std for batch then apply batch_normalization on batch.
    """
    if gamma is None:
        gamma = ones_like(x)
    if beta is None:
        beta = zeros_like(x)

    dev = theano.config.device
    use_cudnn = ndim(x) < 5 and reduction_axes == [0, 2, 3] and (dev.startswith('cuda') or dev.startswith('gpu'))
    if use_cudnn:
        broadcast_beta = beta.dimshuffle('x', 0, 'x', 'x')
        broadcast_gamma = gamma.dimshuffle('x', 0, 'x', 'x')
        try:
            normed, mean, stdinv = theano.sandbox.cuda.dnn.dnn_batch_normalization_train(
                x, broadcast_gamma, broadcast_beta, 'spatial', epsilon)
            normed = theano.tensor.as_tensor_variable(normed)
            mean = theano.tensor.as_tensor_variable(mean)
            stdinv = theano.tensor.as_tensor_variable(stdinv)
            var = T.inv(stdinv ** 2)
            return normed, T.flatten(mean), T.flatten(var)
        except AttributeError:
            pass

    var = x.var(reduction_axes)
    mean = x.mean(reduction_axes)

    target_shape = []
    for axis in range(ndim(x)):
        if axis in reduction_axes:
            target_shape.append(1)
        else:
            target_shape.append(x.shape[axis])
    target_shape = T.stack(*target_shape)

    broadcast_mean = T.reshape(mean, target_shape)
    broadcast_var = T.reshape(var, target_shape)
    broadcast_beta = T.reshape(beta, target_shape)
    broadcast_gamma = T.reshape(gamma, target_shape)
    normed = batch_normalization(x, broadcast_mean, broadcast_var,
                                 broadcast_beta, broadcast_gamma,
                                 epsilon)
    return normed, mean, var


# TODO remove this if statement when Theano without
# T.nnet.bn.batch_normalization_test is deprecated
项目:keras_superpixel_pooling    作者:parag2489    | 项目源码 | 文件源码
def _old_batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3):
    """Apply batch normalization on x given mean, var, beta and gamma.
    """
    if gamma is None:
        gamma = ones_like(var)
    if beta is None:
        beta = zeros_like(mean)

    if mean.ndim == 1 and x.ndim > 1:
        # in TensorFlow's batch_normalization, if the parameters are vectors
        # the batch normalization should be applied along the rightmost axis.
        # Theano expects the parameters to always have x.ndim dimensions.
        shuffle_pattern = ['x'] * (x.ndim - 1) + [0]
        mean = mean.dimshuffle(shuffle_pattern)
        var = var.dimshuffle(shuffle_pattern)
        beta = beta.dimshuffle(shuffle_pattern)
        gamma = gamma.dimshuffle(shuffle_pattern)

    ndim = x.ndim
    dev = theano.config.device
    use_cudnn = ndim < 5 and (dev.startswith('cuda') or dev.startswith('gpu'))
    if use_cudnn:
        try:
            axis = mean.broadcastable.index(False)
            if axis != 1:
                shuffle_pattern = list(range(ndim))
                shuffle_pattern[1] = shuffle_pattern[axis]
                shuffle_pattern[axis] = 1
                result = theano.sandbox.cuda.dnn.dnn_batch_normalization_test(
                    x.dimshuffle(shuffle_pattern),
                    gamma.dimshuffle(shuffle_pattern),
                    beta.dimshuffle(shuffle_pattern),
                    mean.dimshuffle(shuffle_pattern),
                    var.dimshuffle(shuffle_pattern),
                    'spatial', epsilon).dimshuffle(shuffle_pattern)
            else:
                result = theano.sandbox.cuda.dnn.dnn_batch_normalization_test(
                    x, gamma, beta, mean, var, 'spatial', epsilon)
            return theano.tensor.as_tensor_variable(result)
        except AttributeError:
            pass
        except ValueError:
            pass
    return T.nnet.bn.batch_normalization(x, gamma, beta, mean, sqrt(var + epsilon),
                                         mode='high_mem')


# SHAPE OPERATIONS