Python chainer.functions 模块,ReLU() 实例源码

我们从Python开源项目中,提取了以下6个代码示例,用于说明如何使用chainer.functions.ReLU()

项目:chainer-faster-rcnn    作者:mitmul    | 项目源码 | 文件源码
def __init__(self, train=False):
        super(VGG16, self).__init__()
        self.trunk = [
            ('conv1_1', L.Convolution2D(3, 64, 3, 1, 1)),
            ('_relu1_1', F.ReLU()),
            ('conv1_2', L.Convolution2D(64, 64, 3, 1, 1)),
            ('_relu1_2', F.ReLU()),
            ('_pool1', F.MaxPooling2D(2, 2)),
            ('conv2_1', L.Convolution2D(64, 128, 3, 1, 1)),
            ('_relu2_1', F.ReLU()),
            ('conv2_2', L.Convolution2D(128, 128, 3, 1, 1)),
            ('_relu2_2', F.ReLU()),
            ('_pool2', F.MaxPooling2D(2, 2)),
            ('conv3_1', L.Convolution2D(128, 256, 3, 1, 1)),
            ('_relu3_1', F.ReLU()),
            ('conv3_2', L.Convolution2D(256, 256, 3, 1, 1)),
            ('_relu3_2', F.ReLU()),
            ('conv3_3', L.Convolution2D(256, 256, 3, 1, 1)),
            ('_relu3_3', F.ReLU()),
            ('_pool3', F.MaxPooling2D(2, 2)),
            ('conv4_1', L.Convolution2D(256, 512, 3, 1, 1)),
            ('_relu4_1', F.ReLU()),
            ('conv4_2', L.Convolution2D(512, 512, 3, 1, 1)),
            ('_relu4_2', F.ReLU()),
            ('conv4_3', L.Convolution2D(512, 512, 3, 1, 1)),
            ('_relu4_3', F.ReLU()),
            ('_pool4', F.MaxPooling2D(2, 2)),
            ('conv5_1', L.Convolution2D(512, 512, 3, 1, 1)),
            ('_relu5_1', F.ReLU()),
            ('conv5_2', L.Convolution2D(512, 512, 3, 1, 1)),
            ('_relu5_2', F.ReLU()),
            ('conv5_3', L.Convolution2D(512, 512, 3, 1, 1)),
            ('_relu5_3', F.ReLU()),
        ]
        for name, link in self.trunk:
            if not name.startswith('_'):
                self.add_link(name, link)
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def check_backward(self, x_data, y_grad, use_cudnn=True):
        gradient_check.check_backward(
            functions.ReLU(use_cudnn), x_data, y_grad,
            **self.check_backward_options)
项目:deel    作者:uei    | 项目源码 | 文件源码
def __init__(self, train=False):
        super(VGG16, self).__init__()
        self.trunk = [
            ('conv1_1', L.Convolution2D(3, 64, 3, 1, 1)),
            ('relu1_1', F.ReLU()),
            ('conv1_2', L.Convolution2D(64, 64, 3, 1, 1)),
            ('relu1_2', F.ReLU()),
            ('pool1', F.MaxPooling2D(2, 2)),
            ('conv2_1', L.Convolution2D(64, 128, 3, 1, 1)),
            ('relu2_1', F.ReLU()),
            ('conv2_2', L.Convolution2D(128, 128, 3, 1, 1)),
            ('relu2_2', F.ReLU()),
            ('pool2', F.MaxPooling2D(2, 2)),
            ('conv3_1', L.Convolution2D(128, 256, 3, 1, 1)),
            ('relu3_1', F.ReLU()),
            ('conv3_2', L.Convolution2D(256, 256, 3, 1, 1)),
            ('relu3_2', F.ReLU()),
            ('conv3_3', L.Convolution2D(256, 256, 3, 1, 1)),
            ('relu3_3', F.ReLU()),
            ('pool3', F.MaxPooling2D(2, 2)),
            ('conv4_1', L.Convolution2D(256, 512, 3, 1, 1)),
            ('relu4_1', F.ReLU()),
            ('conv4_2', L.Convolution2D(512, 512, 3, 1, 1)),
            ('relu4_2', F.ReLU()),
            ('conv4_3', L.Convolution2D(512, 512, 3, 1, 1)),
            ('relu4_3', F.ReLU()),
            ('pool4', F.MaxPooling2D(2, 2)),
            ('conv5_1', L.Convolution2D(512, 512, 3, 1, 1)),
            ('relu5_1', F.ReLU()),
            ('conv5_2', L.Convolution2D(512, 512, 3, 1, 1)),
            ('relu5_2', F.ReLU()),
            ('conv5_3', L.Convolution2D(512, 512, 3, 1, 1)),
            ('relu5_3', F.ReLU()),
            ('rpn_conv_3x3', L.Convolution2D(512, 512, 3, 1, 1)),
            ('rpn_relu_3x3', F.ReLU()),
        ]
        for name, link in self.trunk:
            if 'conv' in name:
                self.add_link(name, link)
项目:cgp-cnn    作者:sg-nm    | 项目源码 | 文件源码
def __call__(self, x, train):
        param_num = 0
        for name, f in self.forward:
            if 'conv1' in name:
                x = getattr(self, name)(x)
                param_num += (f.W.shape[0]*f.W.shape[2]*f.W.shape[3]*f.W.shape[1]+f.W.shape[0])
            elif 'bn1' in name:
                x = getattr(self, name)(x, not train)
                param_num += x.data.shape[1]*2
        return (F.relu(x), param_num)


# [(CONV -> Batch -> ReLU -> CONV -> Batch) + (x)]
项目:cgp-cnn    作者:sg-nm    | 项目源码 | 文件源码
def __init__(self, ksize, n_out, initializer):
        super(ResBlock, self).__init__()
        pad_size = ksize // 2
        links  = [('conv1', L.Convolution2D(None, n_out, ksize, pad=pad_size, initialW=initializer))]
        links += [('bn1', L.BatchNormalization(n_out))]
        links += [('_act1', F.ReLU())]
        links += [('conv2', L.Convolution2D(n_out, n_out, ksize, pad=pad_size, initialW=initializer))]
        links += [('bn2', L.BatchNormalization(n_out))]
        for link in links:
            if not link[0].startswith('_'):
                self.add_link(*link)
        self.forward = links
项目:chainer-cifar    作者:dsanno    | 项目源码 | 文件源码
def __init__(self, depth=18, alpha=16, start_channel=16, skip=False):
        super(PyramidNet, self).__init__()
        channel_diff = float(alpha) / depth
        channel = start_channel
        links = [('bconv1', BatchConv2D(3, channel, 3, 1, 1))]
        skip_size = depth * 3 - 3
        for i in six.moves.range(depth):
            if skip:
                skip_ratio = float(i) / skip_size * 0.5
            else:
                skip_ratio = 0
            in_channel = channel
            channel += channel_diff
            links.append(('py{}'.format(len(links)), PyramidBlock(int(round(in_channel)), int(round(channel)),  skip_ratio=skip_ratio)))
        in_channel = channel
        channel += channel_diff
        links.append(('py{}'.format(len(links)), PyramidBlock(int(round(in_channel)), int(round(channel)), stride=2)))
        for i in six.moves.range(depth - 1):
            if skip:
                skip_ratio = float(i + depth) / skip_size * 0.5
            else:
                skip_ratio = 0
            in_channel = channel
            channel += channel_diff
            links.append(('py{}'.format(len(links)), PyramidBlock(int(round(in_channel)), int(round(channel)),  skip_ratio=skip_ratio)))
        in_channel = channel
        channel += channel_diff
        links.append(('py{}'.format(len(links)), PyramidBlock(int(round(in_channel)), int(round(channel)), stride=2)))
        for i in six.moves.range(depth - 1):
            if skip:
                skip_ratio = float(i + depth * 2 - 1) / skip_size * 0.5
            else:
                skip_ratio = 0
            in_channel = channel
            channel += channel_diff
            links.append(('py{}'.format(len(links)), PyramidBlock(int(round(in_channel)), int(round(channel)),  skip_ratio=skip_ratio)))
        links.append(('bn{}'.format(len(links)), L.BatchNormalization(int(round(channel)))))
        links.append(('_relu{}'.format(len(links)), F.ReLU()))
        links.append(('_apool{}'.format(len(links)), F.AveragePooling2D(8, 1, 0, False)))
        links.append(('fc{}'.format(len(links)), L.Linear(int(round(channel)), 10)))

        for name, f in links:
            if not name.startswith('_'):
                self.add_link(*(name, f))
        self.layers = links