Python torch.nn.functional 模块,batch_norm() 实例源码

我们从Python开源项目中,提取了以下22个代码示例,用于说明如何使用torch.nn.functional.batch_norm()

项目:diracnets    作者:szagoruyko    | 项目源码 | 文件源码
def batch_norm(x, params, stats, base, mode):
    return F.batch_norm(x, weight=params[base + '.weight'],
                        bias=params[base + '.bias'],
                        running_mean=stats[base + '.running_mean'],
                        running_var=stats[base + '.running_var'],
                        training=mode)
项目:diracnets    作者:szagoruyko    | 项目源码 | 文件源码
def block(o, params, stats, base, mode, j):
    w = params[base + '.conv']
    alpha = params[base + '.alpha']
    beta = params[base + '.beta']
    delta = Variable(stats[size2name(w.size())])
    w = beta * F.normalize(w.view(w.size(0), -1)).view_as(w) + alpha * delta
    o = F.conv2d(ncrelu(o), w, stride=1, padding=1)
    o = batch_norm(o, params, stats, base + '.bn', mode)
    return o
项目:FewShotLearning    作者:gitabcworld    | 项目源码 | 文件源码
def forward(self, input_, time):
        self._check_input_dim(input_)
        if time >= self.max_length:
            time = self.max_length - 1
        running_mean = getattr(self, 'running_mean_{}'.format(time))
        running_var = getattr(self, 'running_var_{}'.format(time))
        return functional.batch_norm(
            input=input_, running_mean=running_mean, running_var=running_var,
            weight=self.weight, bias=self.bias, training=self.training,
            momentum=self.momentum, eps=self.eps)
项目:vnet.pytorch    作者:mattmacy    | 项目源码 | 文件源码
def forward(self, input):
        self._check_input_dim(input)
        return F.batch_norm(
            input, self.running_mean, self.running_var, self.weight, self.bias,
            True, self.momentum, self.eps)
项目:Recognizing-Textual-Entailment    作者:codedecde    | 项目源码 | 文件源码
def forward(self, input_, index):
        if index >= self.max_len:
            index = self.max_len - 1
        self._check_input_dim(input_, index)
        running_mean = getattr(self, 'running_mean_{}'.format(index))
        running_var = getattr(self, 'running_var_{}'.format(index))
        return F.batch_norm(
            input_, running_mean, running_var, self.weight, self.bias,
            self.training, self.momentum, self.eps)
项目:benchmark    作者:pytorch    | 项目源码 | 文件源码
def forward(self, input_, time):
        self._check_input_dim(input_)
        if time >= self.max_length:
            time = self.max_length - 1
        running_mean = getattr(self, 'running_mean_{}'.format(time))
        running_var = getattr(self, 'running_var_{}'.format(time))
        return functional.batch_norm(
            input=input_, running_mean=running_mean, running_var=running_var,
            weight=self.weight, bias=self.bias, training=self.training,
            momentum=self.momentum, eps=self.eps)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_batchnorm_raises_error_if_running_mean_is_not_same_size_as_input(self):
        input = Variable(torch.rand(2, 10))
        running_var = torch.rand(10)
        wrong_sizes = [9, 11]
        for size in wrong_sizes:
            with self.assertRaises(RuntimeError):
                F.batch_norm(input, torch.rand(size), running_var)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_batchnorm_raises_error_if_running_var_is_not_same_size_as_input(self):
        input = Variable(torch.rand(2, 10))
        running_mean = torch.rand(10)
        wrong_sizes = [9, 11]
        for size in wrong_sizes:
            with self.assertRaises(RuntimeError):
                F.batch_norm(input, running_mean, torch.rand(size))
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_batchnorm_raises_error_if_weight_is_not_same_size_as_input(self):
        input = Variable(torch.rand(2, 10))
        running_mean = torch.rand(10)
        running_var = torch.rand(10)
        wrong_sizes = [9, 11]
        for size in wrong_sizes:
            with self.assertRaises(RuntimeError):
                F.batch_norm(input, running_mean, running_var, weight=Parameter(torch.rand(size)))
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_batchnorm_raises_error_if_bias_is_not_same_size_as_input(self):
        input = Variable(torch.rand(2, 10))
        running_mean = torch.rand(10)
        running_var = torch.rand(10)
        wrong_sizes = [9, 11]
        for size in wrong_sizes:
            with self.assertRaises(RuntimeError):
                F.batch_norm(input, running_mean, running_var, bias=Parameter(torch.rand(size)))
项目:SMASH    作者:ajbrock    | 项目源码 | 文件源码
def forward(self, input):
        return F.batch_norm(input, 
                        running_mean=self.running_mean[:input.size(1)], 
                        running_var=self.running_var[:input.size(1)],
                        weight=self.weight[:input.size(1)],
                        bias=self.bias[:input.size(1)],
                        training=self.training)

# A convenience wrapper to prevent the forward() method of SMASH from
# being annoyingly verbose. This version of a fully-connected layer simply 
# slices its weights according to the size of the incoming tensor.
# Note that the bias does not need slicing, as it's defined wrt the output dim.
项目:pyscatwave    作者:edouardoyallon    | 项目源码 | 文件源码
def f(o, params, stats, mode):
    o = F.batch_norm(o, running_mean=stats['bn.running_mean'],
                     running_var=stats['bn.running_var'],
                     weight=params['bn.weight'],
                     bias=params['bn.bias'], training=mode)
    o = F.conv2d(o, params['conv1.weight'], params['conv1.bias'])
    o = F.relu(o)
    o = o.view(o.size(0), -1)
    o = F.linear(o, params['linear2.weight'], params['linear2.bias'])
    o = F.relu(o)
    o = F.linear(o, params['linear3.weight'], params['linear3.bias'])
    return o
项目:efficient_densenet_pytorch    作者:gpleiss    | 项目源码 | 文件源码
def test_forward_eval_mode_computes_forward_pass():
    momentum = 0.1
    eps = 1e-5

    weight = torch.randn(10).cuda()
    bias = torch.randn(10).cuda()
    running_mean = torch.randn(10).cuda()
    running_var = torch.randn(10).abs().cuda()

    input_1 = torch.randn(4, 5).cuda()
    input_2 = torch.randn(4, 5).cuda()
    storage = torch.Storage(40).cuda()

    bn = F.batch_norm(
        input=Variable(torch.cat([input_1, input_2], dim=1)),
        running_mean=running_mean,
        running_var=running_var,
        weight=Parameter(weight),
        bias=Parameter(bias),
        training=False,
        momentum=momentum,
        eps=eps
    ).data

    input_efficient = torch.cat([input_1, input_2], dim=1)
    func = _EfficientBatchNorm(
        storage=storage,
        running_mean=running_mean,
        running_var=running_var,
        training=False,
        momentum=momentum,
        eps=eps
    )
    bn_efficient = func.forward(weight, bias, input_efficient)

    assert(almost_equal(bn, bn_efficient))
    assert(bn_efficient.storage().data_ptr() == storage.data_ptr())
项目:efficient_densenet_pytorch    作者:gpleiss    | 项目源码 | 文件源码
def test_forward_train_mode_computes_forward_pass():
    momentum = 0.1
    eps = 1e-5

    weight = torch.randn(10).cuda()
    bias = torch.randn(10).cuda()
    running_mean = torch.randn(10).cuda()
    running_var = torch.randn(10).abs().cuda()
    running_mean_efficient = running_mean.clone()
    running_var_efficient = running_var.clone()

    input_1 = torch.randn(4, 5).cuda()
    input_2 = torch.randn(4, 5).cuda()
    storage = torch.Storage(40).cuda()

    bn = F.batch_norm(
        input=Variable(torch.cat([input_1, input_2], dim=1)),
        running_mean=running_mean,
        running_var=running_var,
        weight=Parameter(weight),
        bias=Parameter(bias),
        training=True,
        momentum=momentum,
        eps=eps
    ).data

    input_efficient = torch.cat([input_1, input_2], dim=1)
    func = _EfficientBatchNorm(
        storage=storage,
        running_mean=running_mean_efficient,
        running_var=running_var_efficient,
        training=True,
        momentum=momentum,
        eps=eps
    )
    bn_efficient = func.forward(weight, bias, input_efficient)

    assert(almost_equal(bn, bn_efficient))
    assert(bn_efficient.storage().data_ptr() == storage.data_ptr())
    assert(almost_equal(running_mean, running_mean_efficient))
    assert(almost_equal(running_var, running_var_efficient))
项目:attention-transfer    作者:szagoruyko    | 项目源码 | 文件源码
def batch_norm(x, params, stats, base, mode):
    return F.batch_norm(x, weight=params[base + '.weight'],
                        bias=params[base + '.bias'],
                        running_mean=stats[base + '.running_mean'],
                        running_var=stats[base + '.running_var'],
                        training=mode)
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_batchnorm_raises_error_if_running_mean_is_not_same_size_as_input(self):
        input = Variable(torch.rand(2, 10))
        running_var = torch.rand(10)
        wrong_sizes = [9, 11]
        for size in wrong_sizes:
            with self.assertRaises(RuntimeError):
                F.batch_norm(input, torch.rand(size), running_var)
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_batchnorm_raises_error_if_running_var_is_not_same_size_as_input(self):
        input = Variable(torch.rand(2, 10))
        running_mean = torch.rand(10)
        wrong_sizes = [9, 11]
        for size in wrong_sizes:
            with self.assertRaises(RuntimeError):
                F.batch_norm(input, running_mean, torch.rand(size))
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_batchnorm_raises_error_if_weight_is_not_same_size_as_input(self):
        input = Variable(torch.rand(2, 10))
        running_mean = torch.rand(10)
        running_var = torch.rand(10)
        wrong_sizes = [9, 11]
        for size in wrong_sizes:
            with self.assertRaises(RuntimeError):
                F.batch_norm(input, running_mean, running_var, weight=Parameter(torch.rand(size)))
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_batchnorm_raises_error_if_bias_is_not_same_size_as_input(self):
        input = Variable(torch.rand(2, 10))
        running_mean = torch.rand(10)
        running_var = torch.rand(10)
        wrong_sizes = [9, 11]
        for size in wrong_sizes:
            with self.assertRaises(RuntimeError):
                F.batch_norm(input, running_mean, running_var, bias=Parameter(torch.rand(size)))
项目:Rocket-Launching    作者:zhougr1993    | 项目源码 | 文件源码
def batch_norm(x, params, stats, base, mode):
    return F.batch_norm(x, weight=params[base + '.weight'],
                        bias=params[base + '.bias'],
                        running_mean=stats[base + '.running_mean'],
                        running_var=stats[base + '.running_var'],
                        training=mode)
项目:jaccardSegment    作者:bermanmaxim    | 项目源码 | 文件源码
def batch_normalization(self, input, # other arguments are ignored
                            name, is_training, activation_fn=None, scale=True, eps=0.001):
        output = F.batch_norm(input, self.weights[name + '/moving_mean'], self.weights[name + '/moving_variance'],
                              weight=self.weights[name + '/gamma'], bias=self.weights[name + '/beta'], eps=eps)
        if activation_fn is not None:
            if activation_fn == 'relu':
                output = F.relu(output)
            else:
                raise NotImplementedError
        return output
项目:efficient_densenet_pytorch    作者:gpleiss    | 项目源码 | 文件源码
def test_backward_train_mode_computes_forward_pass():
    momentum = 0.1
    eps = 1e-5

    weight = torch.randn(10).cuda()
    bias = torch.randn(10).cuda()
    running_mean = torch.randn(10).cuda()
    running_var = torch.randn(10).abs().cuda()
    weight_efficient = weight.clone()
    bias_efficient = bias.clone()
    running_mean_efficient = running_mean.clone()
    running_var_efficient = running_var.clone()

    input_1 = torch.randn(4, 5).cuda()
    input_2 = torch.randn(4, 5).cuda()
    storage = torch.Storage(40).cuda()

    input_var = Variable(torch.cat([input_1, input_2], dim=1), requires_grad=True)
    weight_var = Parameter(weight)
    bias_var = Parameter(bias)
    bn_var = F.batch_norm(
        input=input_var,
        running_mean=running_mean,
        running_var=running_var,
        weight=weight_var,
        bias=bias_var,
        training=True,
        momentum=momentum,
        eps=eps
    )
    bn = bn_var.data
    bn_var.backward(gradient=input_var.data.clone().fill_(1))
    input_grad = input_var.grad.data
    weight_grad = weight_var.grad.data
    bias_grad = bias_var.grad.data

    input_efficient = torch.cat([input_1, input_2], dim=1)
    input_efficient_orig = input_efficient.clone()
    func = _EfficientBatchNorm(
        storage=storage,
        running_mean=running_mean_efficient,
        running_var=running_var_efficient,
        training=True,
        momentum=momentum,
        eps=eps
    )
    bn_efficient = func.forward(weight_efficient, bias_efficient, input_efficient)
    grad_out_efficient = bn_efficient.clone().fill_(1)
    weight_grad_efficient, bias_grad_efficient, input_grad_efficient = func.backward(
            weight_efficient, bias_efficient, input_efficient_orig, grad_out_efficient)

    assert(almost_equal(bn, bn_efficient))
    assert(grad_out_efficient.storage().data_ptr() == input_grad_efficient.storage().data_ptr())
    assert(almost_equal(input_grad, input_grad_efficient))
    assert(almost_equal(weight_grad, weight_grad_efficient))
    assert(almost_equal(bias_grad, bias_grad_efficient))