Python chainer.links 模块,Deconvolution2D() 实例源码

我们从Python开源项目中,提取了以下47个代码示例,用于说明如何使用chainer.links.Deconvolution2D()

项目:chainer-fast-neuralstyle    作者:yusuketomoto    | 项目源码 | 文件源码
def __init__(self):
        super(FastStyleNet, self).__init__(
            c1=L.Convolution2D(3, 32, 9, stride=1, pad=4),
            c2=L.Convolution2D(32, 64, 4, stride=2, pad=1),
            c3=L.Convolution2D(64, 128, 4,stride=2, pad=1),
            r1=ResidualBlock(128, 128),
            r2=ResidualBlock(128, 128),
            r3=ResidualBlock(128, 128),
            r4=ResidualBlock(128, 128),
            r5=ResidualBlock(128, 128),
            d1=L.Deconvolution2D(128, 64, 4, stride=2, pad=1),
            d2=L.Deconvolution2D(64, 32, 4, stride=2, pad=1),
            d3=L.Deconvolution2D(32, 3, 9, stride=1, pad=4),
            b1=L.BatchNormalization(32),
            b2=L.BatchNormalization(64),
            b3=L.BatchNormalization(128),
            b4=L.BatchNormalization(64),
            b5=L.BatchNormalization(32),
        )
项目:tensorboard-pytorch    作者:lanpa    | 项目源码 | 文件源码
def __init__(self, n_hidden, bottom_width=4, ch=512, wscale=0.02):
        super(Generator, self).__init__()
        self.n_hidden = n_hidden
        self.ch = ch
        self.bottom_width = bottom_width

        with self.init_scope():
            w = chainer.initializers.Normal(wscale)
            self.l0 = L.Linear(self.n_hidden, bottom_width * bottom_width * ch,
                               initialW=w)
            self.dc1 = L.Deconvolution2D(ch, ch // 2, 4, 2, 1, initialW=w)
            self.dc2 = L.Deconvolution2D(ch // 2, ch // 4, 4, 2, 1, initialW=w)
            self.dc3 = L.Deconvolution2D(ch // 4, ch // 8, 4, 2, 1, initialW=w)
            self.dc4 = L.Deconvolution2D(ch // 8, 3, 3, 1, 1, initialW=w)
            self.bn0 = L.BatchNormalization(bottom_width * bottom_width * ch)
            self.bn1 = L.BatchNormalization(ch // 2)
            self.bn2 = L.BatchNormalization(ch // 4)
            self.bn3 = L.BatchNormalization(ch // 8)
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def setUp(self):
        self.link = L.Deconvolution2D(
            self.in_channels, self.out_channels, self.ksize,
            stride=self.stride, pad=self.pad, nobias=self.nobias)
        self.link.W.data[...] = numpy.random.uniform(
            -1, 1, self.link.W.data.shape).astype(numpy.float32)
        if not self.nobias:
            self.link.b.data[...] = numpy.random.uniform(
                -1, 1, self.link.b.data.shape).astype(numpy.float32)

        self.link.zerograds()

        N = 2
        h, w = 3, 2
        kh, kw = _pair(self.ksize)
        out_h = conv.get_deconv_outsize(h, kh, self.stride, self.pad)
        out_w = conv.get_deconv_outsize(w, kw, self.stride, self.pad)
        self.gy = numpy.random.uniform(
            -1, 1, (N, self.out_channels, out_h, out_w)).astype(numpy.float32)
        self.x = numpy.random.uniform(
            -1, 1, (N, self.in_channels, h, w)).astype(numpy.float32)
项目:chainermn    作者:chainer    | 项目源码 | 文件源码
def __init__(self, n_hidden, bottom_width=4, ch=512, wscale=0.02):
        super(Generator, self).__init__()
        self.n_hidden = n_hidden
        self.ch = ch
        self.bottom_width = bottom_width

        with self.init_scope():
            w = chainer.initializers.Normal(wscale)
            self.l0 = L.Linear(self.n_hidden, bottom_width * bottom_width * ch,
                               initialW=w)
            self.dc1 = L.Deconvolution2D(ch, ch // 2, 4, 2, 1, initialW=w)
            self.dc2 = L.Deconvolution2D(ch // 2, ch // 4, 4, 2, 1, initialW=w)
            self.dc3 = L.Deconvolution2D(ch // 4, ch // 8, 4, 2, 1, initialW=w)
            self.dc4 = L.Deconvolution2D(ch // 8, 3, 3, 1, 1, initialW=w)
            self.bn0 = L.BatchNormalization(bottom_width * bottom_width * ch)
            self.bn1 = L.BatchNormalization(ch // 2)
            self.bn2 = L.BatchNormalization(ch // 4)
            self.bn3 = L.BatchNormalization(ch // 8)
项目:chainer-examples    作者:nocotan    | 项目源码 | 文件源码
def __init__(self, n_hidden=128, bottom_width=4, ch=512, wscale=0.02):
        super(Generator, self).__init__()
        self.n_hidden = n_hidden
        self.ch = ch
        self.bottom_width = bottom_width

        with self.init_scope():
            w = chainer.initializers.Normal(wscale)
            self.l0 = L.Linear(self.n_hidden, bottom_width*bottom_width*ch,
                               initialW=w)
            self.dc1 = L.Deconvolution2D(ch, ch//2, 4, 2, 1, initialW=w)
            self.dc2 = L.Deconvolution2D(ch//2, ch//4, 4, 2, 1, initialW=w)
            self.dc3 = L.Deconvolution2D(ch//4, ch//8, 4, 2, 1, initialW=w)
            self.dc4 = L.Deconvolution2D(ch//8, 3, 3, 1, 1, initialW=w)
            self.bn0 = L.BatchNormalization(bottom_width*bottom_width*ch)
            self.bn1 = L.BatchNormalization(ch//2)
            self.bn2 = L.BatchNormalization(ch//4)
            self.bn3 = L.BatchNormalization(ch//8)
项目:chainer-examples    作者:nocotan    | 项目源码 | 文件源码
def __init__(self, n_hidden=128, bottom_width=4, ch=512, wscale=0.02):
        super(Generator, self).__init__()
        self.n_hidden = n_hidden
        self.ch = ch
        self.bottom_width = bottom_width

        with self.init_scope():
            w = chainer.initializers.Normal(wscale)
            self.l0 = L.Linear(self.n_hidden, bottom_width*bottom_width*ch,
                               initialW=w)
            self.dc1 = L.Deconvolution2D(ch, ch//2, 4, 2, 1, initialW=w)
            self.dc2 = L.Deconvolution2D(ch//2, ch//4, 4, 2, 1, initialW=w)
            self.dc3 = L.Deconvolution2D(ch//4, ch//8, 4, 2, 1, initialW=w)
            self.dc4 = L.Deconvolution2D(ch//8, 3, 3, 1, 1, initialW=w)
            self.bn0 = L.BatchNormalization(bottom_width*bottom_width*ch)
            self.bn1 = L.BatchNormalization(ch//2)
            self.bn2 = L.BatchNormalization(ch//4)
            self.bn3 = L.BatchNormalization(ch//8)
项目:chainer-image-generation    作者:fukuta0614    | 项目源码 | 文件源码
def __init__(self, n_hidden, activate='sigmoid', size=64, ch=512, wscale=0.02):
        assert (size % 16 == 0)
        initial_size = size // 16
        self.n_hidden = n_hidden
        if activate == 'sigmoid':
            self.activate = F.sigmoid
        elif activate == 'tanh':
            self.activate = F.tanh
        else:
            raise ValueError('invalid activate function')
        self.ch = ch
        self.initial_size = initial_size
        w = chainer.initializers.Normal(wscale)
        super(Generator, self).__init__(
            l0=L.Linear(self.n_hidden, initial_size * initial_size * ch, initialW=w),
            dc1=L.Deconvolution2D(ch // 1, ch // 2, 4, 2, 1, initialW=w),
            dc2=L.Deconvolution2D(ch // 2, ch // 4, 4, 2, 1, initialW=w),
            dc3=L.Deconvolution2D(ch // 4, ch // 8, 4, 2, 1, initialW=w),
            dc4=L.Deconvolution2D(ch // 8, 3, 4, 2, 1, initialW=w),
            bn0=L.BatchNormalization(initial_size * initial_size * ch),
            bn1=L.BatchNormalization(ch // 2),
            bn2=L.BatchNormalization(ch // 4),
            bn3=L.BatchNormalization(ch // 8),
        )
项目:chainer-image-generation    作者:fukuta0614    | 项目源码 | 文件源码
def __init__(self, n_hidden, activate='sigmoid', size=64, ch=512, wscale=0.02):
        assert (size % 8 == 0)
        initial_size = size // 8
        self.n_hidden = n_hidden
        self.ch = ch
        self.initial_size = initial_size
        if activate == 'sigmoid':
            self.activate = F.sigmoid
        elif activate == 'tanh':
            self.activate = F.tanh
        else:
            raise ValueError('invalid activate function')
        w = chainer.initializers.Normal(wscale)
        super(Generator2, self).__init__(
            l0=L.Linear(self.n_hidden, initial_size * initial_size * ch, initialW=w),
            dc1=L.Deconvolution2D(ch // 1, ch // 2, 4, 2, 1, initialW=w),
            dc2=L.Deconvolution2D(ch // 2, ch // 4, 4, 2, 1, initialW=w),
            dc3=L.Deconvolution2D(ch // 4, ch // 8, 4, 2, 1, initialW=w),
            dc4=L.Deconvolution2D(ch // 8, 3, 3, 1, 1, initialW=w),
            bn0=L.BatchNormalization(initial_size * initial_size * ch),
            bn1=L.BatchNormalization(ch // 2),
            bn2=L.BatchNormalization(ch // 4),
            bn3=L.BatchNormalization(ch // 8),
        )
项目:chainer-image-generation    作者:fukuta0614    | 项目源码 | 文件源码
def __init__(self, n_hidden, activate='sigmoid', size=64, ch=512, wscale=0.02):
        assert (size % 8 == 0)
        initial_size = size // 8
        self.n_hidden = n_hidden
        if activate == 'sigmoid':
            self.activate = F.sigmoid
        elif activate == 'tanh':
            self.activate = F.tanh
        else:
            raise ValueError('invalid activate function')
        self.ch = ch
        self.initial_size = initial_size
        w = chainer.initializers.Normal(wscale)
        super(Generator, self).__init__(
            l0=L.Linear(self.n_hidden, initial_size * initial_size * ch, initialW=w),
            dc1=L.Deconvolution2D(ch // 1, ch // 2, 4, 2, 1, initialW=w),
            dc2=L.Deconvolution2D(ch // 2, ch // 4, 4, 2, 1, initialW=w),
            dc3=L.Deconvolution2D(ch // 4, ch // 8, 4, 2, 1, initialW=w),
            dc4=L.Deconvolution2D(ch // 8, 3, 3, 1, 1, initialW=w),
        )
项目:chainer-image-generation    作者:fukuta0614    | 项目源码 | 文件源码
def __init__(self, density=1, size=64, latent_size=128, channel=3):
        assert (size % 16 == 0)
        initial_size = size / 16
        super(Generator, self).__init__(
            g1=L.Linear(latent_size, initial_size * initial_size * 256 * density, wscale=0.02 * math.sqrt(latent_size)),
            norm1=L.BatchNormalization(initial_size * initial_size * 256 * density),
            g2=L.Deconvolution2D(256 * density, 128 * density, 4, stride=2, pad=1,
                                 wscale=0.02 * math.sqrt(4 * 4 * 256 * density)),
            norm2=L.BatchNormalization(128 * density),
            g3=L.Deconvolution2D(128 * density, 64 * density, 4, stride=2, pad=1,
                                 wscale=0.02 * math.sqrt(4 * 4 * 128 * density)),
            norm3=L.BatchNormalization(64 * density),
            g4=L.Deconvolution2D(64 * density, 32 * density, 4, stride=2, pad=1,
                                 wscale=0.02 * math.sqrt(4 * 4 * 64 * density)),
            norm4=L.BatchNormalization(32 * density),
            g5=L.Deconvolution2D(32 * density, channel, 4, stride=2, pad=1,
                                 wscale=0.02 * math.sqrt(4 * 4 * 32 * density)),
        )
        self.density = density
        self.latent_size = latent_size
        self.initial_size = initial_size
项目:chainer-image-generation    作者:fukuta0614    | 项目源码 | 文件源码
def __init__(self, density=1, size=64, latent_size=100, channel=3):
        assert (size % 16 == 0)
        initial_size = size / 16
        super(Generator_origin, self).__init__(
            g1=L.Linear(latent_size, initial_size * initial_size * 256 * density, wscale=0.02 * math.sqrt(latent_size)),
            norm1=L.BatchNormalization(initial_size * initial_size * 256 * density),
            g2=L.Deconvolution2D(256 * density, 128 * density, 4, stride=2, pad=1,
                                 wscale=0.02 * math.sqrt(4 * 4 * 256 * density)),
            norm2=L.BatchNormalization(128 * density),
            g3=L.Deconvolution2D(128 * density, 64 * density, 4, stride=2, pad=1,
                                 wscale=0.02 * math.sqrt(4 * 4 * 128 * density)),
            norm3=L.BatchNormalization(64 * density),
            g4=L.Deconvolution2D(64 * density, 32 * density, 4, stride=2, pad=1,
                                 wscale=0.02 * math.sqrt(4 * 4 * 64 * density)),
            norm4=L.BatchNormalization(32 * density),
            g5=L.Deconvolution2D(32 * density, channel, 4, stride=2, pad=1,
                                 wscale=0.02 * math.sqrt(4 * 4 * 32 * density)),
        )
        self.density = density
        self.latent_size = latent_size
        self.initial_size = initial_size
项目:chainer-image-generation    作者:fukuta0614    | 项目源码 | 文件源码
def __init__(self, ch0, ch1, bn=True, sample='down', activation=F.relu, dropout=False, noise=False):
        self.bn = bn
        self.activation = activation
        self.dropout = dropout
        self.noise = noise
        layers = {}
        w = chainer.initializers.Normal(0.02)
        if sample == 'down':
            layers['c'] = L.Convolution2D(ch0, ch1, 4, 2, 1, initialW=w)
        elif sample == 'up':
            layers['c'] = L.Deconvolution2D(ch0, ch1, 4, 2, 1, initialW=w)
        elif sample == 'c7s1':
            layers['c'] = L.Convolution2D(ch0, ch1, 7, 1, 3, initialW=w)
        if bn:
            if self.noise:
                layers['batchnorm'] = L.BatchNormalization(ch1, use_gamma=False)
            else:
                layers['batchnorm'] = L.BatchNormalization(ch1)
        super(CBR, self).__init__(**layers)
项目:chainer-image-generation    作者:fukuta0614    | 项目源码 | 文件源码
def __init__(self, ch=128, wscale=0.02):
        w = chainer.initializers.Normal(wscale)
        super(GeneratorOld, self).__init__(
            conv1=L.Convolution2D(3, ch // 4, 5, 1, 2, initialW=w),
            conv2=L.Convolution2D(ch // 4, ch // 2, 3, 2, 1, initialW=w),
            conv3=L.Convolution2D(ch // 2, ch, 3, 2, 1, initialW=w),
            res1=ResBlock(ch, ch, bn=False),
            res2=ResBlock(ch, ch, bn=False),
            res3=ResBlock(ch, ch, bn=False),
            res4=ResBlock(ch, ch, bn=False),
            res5=ResBlock(ch, ch, bn=False),
            res6=ResBlock(ch, ch, bn=False),
            res7=ResBlock(ch, ch, bn=False),
            res8=ResBlock(ch, ch, bn=False),
            res9=ResBlock(ch, ch, bn=False),
            dc1=L.Deconvolution2D(ch, ch // 2, 4, 2, 1, initialW=w),
            dc2=L.Deconvolution2D(ch // 2, ch // 4, 4, 2, 1, initialW=w),
            dc3=L.Convolution2D(ch // 4, 3, 5, 1, 2, initialW=w),
        )

    # noinspection PyCallingNonCallable,PyUnresolvedReferences
项目:Semantic-Segmentation-using-Adversarial-Networks    作者:oyam    | 项目源码 | 文件源码
def __init__(self, n_class=21):
        self.train=True
        super(FCN32s, self).__init__(
            conv1_1=L.Convolution2D(3, 64, 3, stride=1, pad=100),
            conv1_2=L.Convolution2D(64, 64, 3, stride=1, pad=1),
            conv2_1=L.Convolution2D(64, 128, 3, stride=1, pad=1),
            conv2_2=L.Convolution2D(128, 128, 3, stride=1, pad=1),
            conv3_1=L.Convolution2D(128, 256, 3, stride=1, pad=1),
            conv3_2=L.Convolution2D(256, 256, 3, stride=1, pad=1),
            conv3_3=L.Convolution2D(256, 256, 3, stride=1, pad=1),
            conv4_1=L.Convolution2D(256, 512, 3, stride=1, pad=1),
            conv4_2=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv4_3=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv5_1=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv5_2=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv5_3=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            fc6=L.Convolution2D(512, 4096, 7, stride=1, pad=0),
            fc7=L.Convolution2D(4096, 4096, 1, stride=1, pad=0),
            score_fr=L.Convolution2D(4096, n_class, 1, stride=1, pad=0,
                nobias=True, initialW=np.zeros((n_class, 4096, 1, 1))),
            upscore=L.Deconvolution2D(n_class, n_class, 64, stride=32, pad=0,
                nobias=True, initialW=f.bilinear_interpolation_kernel(n_class, n_class, ksize=64)),
        )
项目:cv-api    作者:yasunorikudo    | 项目源码 | 文件源码
def __init__(self):
        super(FastStyleNet, self).__init__(
            c1=L.Convolution2D(3, 32, 9, stride=1, pad=4),
            c2=L.Convolution2D(32, 64, 4, stride=2, pad=1),
            c3=L.Convolution2D(64, 128, 4,stride=2, pad=1),
            r1=ResidualBlock(128, 128),
            r2=ResidualBlock(128, 128),
            r3=ResidualBlock(128, 128),
            r4=ResidualBlock(128, 128),
            r5=ResidualBlock(128, 128),
            d1=L.Deconvolution2D(128, 64, 4, stride=2, pad=1),
            d2=L.Deconvolution2D(64, 32, 4, stride=2, pad=1),
            d3=L.Deconvolution2D(32, 3, 9, stride=1, pad=4),
            b1=L.BatchNormalization(32),
            b2=L.BatchNormalization(64),
            b3=L.BatchNormalization(128),
            b4=L.BatchNormalization(64),
            b5=L.BatchNormalization(32),
        )
项目:GAN    作者:lyakaap    | 项目源码 | 文件源码
def __init__(self, n_hidden, bottom_width=4, ch=512, wscale=0.02):
        super(Generator, self).__init__()
        self.n_hidden = n_hidden
        self.ch = ch
        self.bottom_width = bottom_width

        with self.init_scope():
            w = chainer.initializers.Normal(wscale)
            self.l0 = L.Linear(self.n_hidden, bottom_width * bottom_width * ch,
                               initialW=w)
            self.dc1 = L.Deconvolution2D(ch, ch // 2, 4, 2, 1, initialW=w)
            self.dc2 = L.Deconvolution2D(ch // 2, ch // 4, 4, 2, 1, initialW=w)
            self.dc3 = L.Deconvolution2D(ch // 4, ch // 8, 4, 2, 1, initialW=w)
            self.dc4 = L.Deconvolution2D(ch // 8, 3, 3, 1, 1, initialW=w)
            self.bn0 = L.BatchNormalization(bottom_width * bottom_width * ch)
            self.bn1 = L.BatchNormalization(ch // 2)
            self.bn2 = L.BatchNormalization(ch // 4)
            self.bn3 = L.BatchNormalization(ch // 8)
项目:chainer-visualization    作者:hvy    | 项目源码 | 文件源码
def check_add_deconv_layers(self, nobias=True):

        """Add a deconvolutional layer for each convolutional layer already
        defined in the network."""

        if len(self.deconv_blocks) == len(self.conv_blocks):
            return

        for conv_block in self.conv_blocks:
            deconv_block = []
            for conv in conv_block:
                out_channels, in_channels, kh, kw = conv.W.data.shape

                if isinstance(conv.W.data, cuda.ndarray):
                    initialW = cuda.cupy.asnumpy(conv.W.data)
                else:
                    initialW = conv.W.data

                deconv = L.Deconvolution2D(out_channels, in_channels,
                                           (kh, kw), stride=conv.stride,
                                           pad=conv.pad,
                                           initialW=initialW,
                                           nobias=nobias)

                if isinstance(conv.W.data, cuda.ndarray):
                    deconv.to_gpu()

                self.add_link('de{}'.format(conv.name), deconv)
                deconv_block.append(deconv)

            self.deconv_blocks.append(deconv_block)
项目:fcn    作者:wkentaro    | 项目源码 | 文件源码
def __init__(self, n_class=21):
        self.n_class = n_class
        kwargs = {
            'initialW': chainer.initializers.Zero(),
            'initial_bias': chainer.initializers.Zero(),
        }
        super(FCN16s, self).__init__()
        with self.init_scope():
            self.conv1_1 = L.Convolution2D(3, 64, 3, 1, 100, **kwargs)
            self.conv1_2 = L.Convolution2D(64, 64, 3, 1, 1, **kwargs)

            self.conv2_1 = L.Convolution2D(64, 128, 3, 1, 1, **kwargs)
            self.conv2_2 = L.Convolution2D(128, 128, 3, 1, 1, **kwargs)

            self.conv3_1 = L.Convolution2D(128, 256, 3, 1, 1, **kwargs)
            self.conv3_2 = L.Convolution2D(256, 256, 3, 1, 1, **kwargs)
            self.conv3_3 = L.Convolution2D(256, 256, 3, 1, 1, **kwargs)

            self.conv4_1 = L.Convolution2D(256, 512, 3, 1, 1, **kwargs)
            self.conv4_2 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs)
            self.conv4_3 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs)

            self.conv5_1 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs)
            self.conv5_2 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs)
            self.conv5_3 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs)

            self.fc6 = L.Convolution2D(512, 4096, 7, 1, 0, **kwargs)
            self.fc7 = L.Convolution2D(4096, 4096, 1, 1, 0, **kwargs)

            self.score_fr = L.Convolution2D(4096, n_class, 1, 1, 0, **kwargs)
            self.score_pool4 = L.Convolution2D(512, n_class, 1, 1, 0, **kwargs)

            self.upscore2 = L.Deconvolution2D(
                n_class, n_class, 4, 2, nobias=True,
                initialW=initializers.UpsamplingDeconvWeight())
            self.upscore16 = L.Deconvolution2D(
                n_class, n_class, 32, 16, nobias=True,
                initialW=initializers.UpsamplingDeconvWeight())
项目:fcn    作者:wkentaro    | 项目源码 | 文件源码
def __init__(self, n_class=21):
        self.n_class = n_class
        kwargs = {
            'initialW': chainer.initializers.Zero(),
            'initial_bias': chainer.initializers.Zero(),
        }
        super(FCN32s, self).__init__()
        with self.init_scope():
            self.conv1_1 = L.Convolution2D(3, 64, 3, 1, 100, **kwargs)
            self.conv1_2 = L.Convolution2D(64, 64, 3, 1, 1, **kwargs)

            self.conv2_1 = L.Convolution2D(64, 128, 3, 1, 1, **kwargs)
            self.conv2_2 = L.Convolution2D(128, 128, 3, 1, 1, **kwargs)

            self.conv3_1 = L.Convolution2D(128, 256, 3, 1, 1, **kwargs)
            self.conv3_2 = L.Convolution2D(256, 256, 3, 1, 1, **kwargs)
            self.conv3_3 = L.Convolution2D(256, 256, 3, 1, 1, **kwargs)

            self.conv4_1 = L.Convolution2D(256, 512, 3, 1, 1, **kwargs)
            self.conv4_2 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs)
            self.conv4_3 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs)

            self.conv5_1 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs)
            self.conv5_2 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs)
            self.conv5_3 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs)

            self.fc6 = L.Convolution2D(512, 4096, 7, 1, 0, **kwargs)
            self.fc7 = L.Convolution2D(4096, 4096, 1, 1, 0, **kwargs)

            self.score_fr = L.Convolution2D(4096, n_class, 1, 1, 0, **kwargs)

            self.upscore = L.Deconvolution2D(
                n_class, n_class, 64, 32, 0, nobias=True,
                initialW=initializers.UpsamplingDeconvWeight())
项目:chainer-stack-gan    作者:dsanno    | 项目源码 | 文件源码
def __init__(self):
        initialW = chainer.initializers.Normal(0.02)
        super(Generator1, self).__init__(
            conv1=L.Deconvolution2D(100, 1024, 4, initialW=initialW),
            bn1=L.BatchNormalization(1024),
            up=UpSampling(4, 1024, 64),
        )
项目:Human-Pose-Estimation-Using-FCN    作者:jessiechouuu    | 项目源码 | 文件源码
def __init__(self):
        super(MyFcn, self).__init__(
            conv1_1=L.Convolution2D(  3,  64, 3, stride=1, pad=1),
            conv1_2=L.Convolution2D( 64,  64, 3, stride=1, pad=1),

            conv2_1=L.Convolution2D( 64, 128, 3, stride=1, pad=1),
            conv2_2=L.Convolution2D(128, 128, 3, stride=1, pad=1),

            conv3_1=L.Convolution2D(128, 256, 3, stride=1, pad=1),
            conv3_2=L.Convolution2D(256, 256, 3, stride=1, pad=1),
            conv3_3=L.Convolution2D(256, 256, 3, stride=1, pad=1),

            conv4_1=L.Convolution2D(256, 512, 3, stride=1, pad=1),
            conv4_2=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv4_3=L.Convolution2D(512, 512, 3, stride=1, pad=1),

            conv5_1=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv5_2=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv5_3=L.Convolution2D(512, 512, 3, stride=1, pad=1),

            score_pool3=L.Convolution2D(256, MyFcn.CLASSES, 1, stride=1, pad=0),
            score_pool4=L.Convolution2D(512, MyFcn.CLASSES, 1, stride=1, pad=0),
            score_pool5=L.Convolution2D(512, MyFcn.CLASSES, 1, stride=1, pad=0),

            upsample_pool4=L.Deconvolution2D(MyFcn.CLASSES, MyFcn.CLASSES, ksize= 4, stride=2, pad=1),
            upsample_pool5=L.Deconvolution2D(MyFcn.CLASSES, MyFcn.CLASSES, ksize= 8, stride=4, pad=2),
            upsample_final=L.Deconvolution2D(MyFcn.CLASSES, MyFcn.CLASSES, ksize=16, stride=8, pad=4),
        )
        self.train = True
项目:chainer-wasserstein-gan    作者:hvy    | 项目源码 | 文件源码
def __init__(self):
        super().__init__(
            dc1=L.Deconvolution2D(None, 256, 4, stride=1, pad=0, nobias=True),
            dc2=L.Deconvolution2D(256, 128, 4, stride=2, pad=1, nobias=True),
            dc3=L.Deconvolution2D(128, 64, 4, stride=2, pad=1, nobias=True),
            dc4=L.Deconvolution2D(64, 3, 4, stride=2, pad=1, nobias=True),
            bn_dc1=L.BatchNormalization(256),
            bn_dc2=L.BatchNormalization(128),
            bn_dc3=L.BatchNormalization(64)
        )
项目:chainer-LSGAN    作者:pfnet-research    | 项目源码 | 文件源码
def __init__(self, size=None):

        super().__init__(
            dc1=L.Deconvolution2D(None, 256, 4, stride=1, pad=0, nobias=True),
            dc2=L.Deconvolution2D(256, 128, 4, stride=2, pad=1, nobias=True),
            dc3=L.Deconvolution2D(128, 64, 4, stride=2, pad=1, nobias=True),
            dc4=L.Deconvolution2D(64, 3, 4, stride=2, pad=1, nobias=True),
            bn_dc1=L.BatchNormalization(256),
            bn_dc2=L.BatchNormalization(128),
            bn_dc3=L.BatchNormalization(64)
        )
项目:chainer-LSGAN    作者:pfnet-research    | 项目源码 | 文件源码
def __init__(self, size=None):

        super().__init__(
            dc1=L.Deconvolution2D(None, 256, 4, stride=1, pad=0, nobias=True),
            dc2=L.Deconvolution2D(256, 128, 4, stride=2, pad=1, nobias=True),
            dc3=L.Deconvolution2D(128, 64, 4, stride=2, pad=2, nobias=True),
            dc4=L.Deconvolution2D(64, 1, 4, stride=2, pad=1, nobias=True),
            bn_dc1=L.BatchNormalization(256),
            bn_dc2=L.BatchNormalization(128),
            bn_dc3=L.BatchNormalization(64)
        )
项目:chainer-pix2pix    作者:wuhuikai    | 项目源码 | 文件源码
def __init__(self, feature_map_nc, output_nc, w_init=None):
        super(Generator, self).__init__(
            c1=L.Convolution2D(None, feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init),
            c2=L.Convolution2D(None, 2*feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init),
            c3=L.Convolution2D(None, 4*feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init),
            c4=L.Convolution2D(None, 8*feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init),
            c5=L.Convolution2D(None, 8*feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init),
            c6=L.Convolution2D(None, 8*feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init),
            c7=L.Convolution2D(None, 8*feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init),
            c8=L.Convolution2D(None, 8*feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init),
            dc1=L.Deconvolution2D(None, 8*feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init),
            dc2=L.Deconvolution2D(None, 8*feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init),
            dc3=L.Deconvolution2D(None, 8*feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init),
            dc4=L.Deconvolution2D(None, 8*feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init),
            dc5=L.Deconvolution2D(None, 4*feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init),
            dc6=L.Deconvolution2D(None, 2*feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init),
            dc7=L.Deconvolution2D(None, feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init),
            dc8=L.Deconvolution2D(None, output_nc, ksize=4, stride=2, pad=1, initialW=w_init),
            b2=L.BatchNormalization(2*feature_map_nc),
            b3=L.BatchNormalization(4*feature_map_nc),
            b4=L.BatchNormalization(8*feature_map_nc),
            b5=L.BatchNormalization(8*feature_map_nc),
            b6=L.BatchNormalization(8*feature_map_nc),
            b7=L.BatchNormalization(8*feature_map_nc),
            b8=L.BatchNormalization(8*feature_map_nc),
            b1_d=L.BatchNormalization(8*feature_map_nc),
            b2_d=L.BatchNormalization(8*feature_map_nc),
            b3_d=L.BatchNormalization(8*feature_map_nc),
            b4_d=L.BatchNormalization(8*feature_map_nc),
            b5_d=L.BatchNormalization(4*feature_map_nc),
            b6_d=L.BatchNormalization(2*feature_map_nc),
            b7_d=L.BatchNormalization(feature_map_nc)
        )
项目:chainer-pix2pix    作者:pfnet-research    | 项目源码 | 文件源码
def __init__(self, ch0, ch1, bn=True, sample='down', activation=F.relu, dropout=False):
        self.bn = bn
        self.activation = activation
        self.dropout = dropout
        layers = {}
        w = chainer.initializers.Normal(0.02)
        if sample=='down':
            layers['c'] = L.Convolution2D(ch0, ch1, 4, 2, 1, initialW=w)
        else:
            layers['c'] = L.Deconvolution2D(ch0, ch1, 4, 2, 1, initialW=w)
        if bn:
            layers['batchnorm'] = L.BatchNormalization(ch1)
        super(CBR, self).__init__(**layers)
项目:alphabetic    作者:hitokun-s    | 项目源码 | 文件源码
def __init__(self, nz=30):
        super(Generator, self).__init__(
                l0z=L.Linear(nz, 6 * 6 * 128, wscale=0.02 * math.sqrt(nz)),
                dc1=L.Deconvolution2D(128, 64, 4, stride=2, pad=1, wscale=0.02 * math.sqrt(4 * 4 * 128)),
                dc2=L.Deconvolution2D(64, 32, 4, stride=2, pad=1, wscale=0.02 * math.sqrt(4 * 4 * 64)),
                dc3=L.Deconvolution2D(32, 1, 4, stride=2, pad=1, wscale=0.02 * math.sqrt(4 * 4 * 32)),
                bn0l=L.BatchNormalization(6 * 6 * 128),
                bn0=L.BatchNormalization(128),
                bn1=L.BatchNormalization(64),
                bn2=L.BatchNormalization(32)
        )
项目:chainer-examples    作者:nocotan    | 项目源码 | 文件源码
def __init__(self, ch=512, wscale=0.02):
        w = chainer.initializers.Normal(wscale)
        self.ch = ch
        super(Discriminator, self).__init__()
        with self.init_scope():
            self.c0 = L.Convolution2D(3, ch // 8, 3, 1, 1, initialW=w)
            self.c1 = L.Convolution2D(ch // 8, ch // 4, 4, 2, 1, initialW=w)
            self.c2 = L.Convolution2D(ch // 4, ch // 2, 4, 2, 1, initialW=w)
            self.c3 = L.Convolution2D(ch // 2, ch // 1, 4, 2, 1, initialW=w)
            self.l4 = L.Linear(4*4*ch, 128, initialW=w)
            self.l5 = L.Linear(128, 4*4*ch, initialW=w)
            self.dc3 = L.Deconvolution2D(ch // 1, ch // 2, 4, 2, 1, initialW=w)
            self.dc2 = L.Deconvolution2D(ch // 2, ch // 4, 4, 2, 1, initialW=w)
            self.dc1 = L.Deconvolution2D(ch // 4, ch // 8, 4, 2, 1, initialW=w)
            self.dc0 = L.Deconvolution2D(ch // 8, 3, 3, 1, 1, initialW=w)
项目:chainer-examples    作者:nocotan    | 项目源码 | 文件源码
def __init__(self, z_dim):
        super(Generator, self).__init__(
            l1=L.Deconvolution2D(z_dim, 128, 3, 2, 0),
            bn1=L.BatchNormalization(128),
            l2=L.Deconvolution2D(128, 128, 3, 2, 1),
            bn2=L.BatchNormalization(128),
            l3=L.Deconvolution2D(128, 128, 3, 2, 1),
            bn3=L.BatchNormalization(128),
            l4=L.Deconvolution2D(128, 128, 3, 2, 2),
            bn4=L.BatchNormalization(128),
            l5=L.Deconvolution2D(128, 1, 3, 2, 2, outsize=(28, 28)),
        )
        self.train = True
项目:pose2img    作者:Hi-king    | 项目源码 | 文件源码
def __init__(self, ch0, ch1, bn=True, sample='down', activation=F.relu, dropout=False):
        self.bn = bn
        self.activation = activation
        self.dropout = dropout
        layers = {}
        w = chainer.initializers.Normal(0.02)
        if sample == 'down':
            layers['c'] = L.Convolution2D(ch0, ch1, 4, 2, 1, initialW=w)
        else:
            layers['c'] = L.Deconvolution2D(ch0, ch1, 4, 2, 1, initialW=w)
        if bn:
            layers['batchnorm'] = L.BatchNormalization(ch1)
        super(CBR, self).__init__(**layers)
项目:mahalita    作者:Eiji-Kb    | 项目源码 | 文件源码
def __init__(self):
        super(COLORNet, self).__init__(
            fx0=L.Linear(1000, 512),
            fx1=L.Linear(512, 256),

            convf_0=L.Convolution2D(512, 256, 1, stride=1, pad=0),

            bn0 = L.BatchNormalization(512),
            conv5_1=L.Convolution2D(512, 512, 1, stride=1, pad=0),
            deconv5_1 = L.Deconvolution2D(512, 512, 2, stride=2, pad=0),

            conv4_1=L.Convolution2D(512, 256, 1, stride=1, pad=0),
            bn1 = L.BatchNormalization(512),
            deconv4_1 = L.Deconvolution2D(256, 256, 2, stride=2, pad=0),
            bn2 = L.BatchNormalization(256),

            conv4_2=L.Convolution2D(256, 128, 3, stride=1, pad=1),
            bn3 = L.BatchNormalization(128),

            deconv3_1 = L.Deconvolution2D(128, 128, 2, stride=2, pad=0),
            bn4 = L.BatchNormalization(128),

            conv3_1=L.Convolution2D(128, 64, 3, stride=1, pad=1),
            bn5 = L.BatchNormalization(64),

            deconv2_1 = L.Deconvolution2D(64, 64, 2, stride=2, pad=0),
            bn6 = L.BatchNormalization(64),

            conv2_1=L.Convolution2D(64, 3, 3, stride=1, pad=1),
            bn7 = L.BatchNormalization(3),
            bn8 = L.BatchNormalization(3),

            conv1_1=L.Convolution2D(3, 3, 3, stride=1, pad=1),
            bn9 = L.BatchNormalization(3),

            conv0_5=L.Convolution2D(3, 2, 3, stride=1, pad=1),
        )
项目:Semantic-Segmentation-using-Adversarial-Networks    作者:oyam    | 项目源码 | 文件源码
def __init__(self, n_class=21):
        self.train=True
        super(FCN16s, self).__init__(
            conv1_1=L.Convolution2D(3, 64, 3, stride=1, pad=100),
            conv1_2=L.Convolution2D(64, 64, 3, stride=1, pad=1),
            conv2_1=L.Convolution2D(64, 128, 3, stride=1, pad=1),
            conv2_2=L.Convolution2D(128, 128, 3, stride=1, pad=1),
            conv3_1=L.Convolution2D(128, 256, 3, stride=1, pad=1),
            conv3_2=L.Convolution2D(256, 256, 3, stride=1, pad=1),
            conv3_3=L.Convolution2D(256, 256, 3, stride=1, pad=1),
            conv4_1=L.Convolution2D(256, 512, 3, stride=1, pad=1),
            conv4_2=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv4_3=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv5_1=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv5_2=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv5_3=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            fc6=L.Convolution2D(512, 4096, 7, stride=1, pad=0),
            fc7=L.Convolution2D(4096, 4096, 1, stride=1, pad=0),
            score_fr=L.Convolution2D(4096, n_class, 1, stride=1, pad=0,
                nobias=True, initialW=np.zeros((n_class, 4096, 1, 1))),
            score_pool4=L.Convolution2D(512, n_class, 1, stride=1, pad=0,
                nobias=True, initialW=np.zeros((n_class, 512, 1, 1))),
            upscore2=L.Deconvolution2D(n_class, n_class, 4, stride=2,
                nobias=True, initialW=f.bilinear_interpolation_kernel(n_class, n_class, ksize=4), use_cudnn=False),
            upscore16=L.Deconvolution2D(n_class, n_class, 32, stride=16,
                nobias=True, initialW=f.bilinear_interpolation_kernel(n_class, n_class, ksize=32), use_cudnn=False),
        )
项目:Semantic-Segmentation-using-Adversarial-Networks    作者:oyam    | 项目源码 | 文件源码
def __init__(self, n_class=21):
        self.train = True
        super(FCN8s, self).__init__(
            conv1_1=L.Convolution2D(3, 64, 3, stride=1, pad=100),
            conv1_2=L.Convolution2D(64, 64, 3, stride=1, pad=1),
            conv2_1=L.Convolution2D(64, 128, 3, stride=1, pad=1),
            conv2_2=L.Convolution2D(128, 128, 3, stride=1, pad=1),
            conv3_1=L.Convolution2D(128, 256, 3, stride=1, pad=1),
            conv3_2=L.Convolution2D(256, 256, 3, stride=1, pad=1),
            conv3_3=L.Convolution2D(256, 256, 3, stride=1, pad=1),
            conv4_1=L.Convolution2D(256, 512, 3, stride=1, pad=1),
            conv4_2=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv4_3=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv5_1=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv5_2=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv5_3=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            fc6=L.Convolution2D(512, 4096, 7, stride=1, pad=0),
            fc7=L.Convolution2D(4096, 4096, 1, stride=1, pad=0),
            score_fr=L.Convolution2D(4096, n_class, 1, stride=1, pad=0,
                nobias=True, initialW=np.zeros((n_class, 4096, 1, 1))),
            score_pool3=L.Convolution2D(256, n_class, 1, stride=1, pad=0,
                nobias=True, initialW=np.zeros((n_class, 256, 1, 1))),
            score_pool4=L.Convolution2D(512, n_class, 1, stride=1, pad=0,
                nobias=True, initialW=np.zeros((n_class, 512, 1, 1))),
            upscore2=L.Deconvolution2D(n_class, n_class, 4, stride=2,
                nobias=True, initialW=f.bilinear_interpolation_kernel(n_class, n_class, ksize=4), use_cudnn=False),
            upscore_pool4=L.Deconvolution2D(n_class, n_class, 4, stride=2,
                nobias=True, initialW=f.bilinear_interpolation_kernel(n_class, n_class, ksize=4), use_cudnn=False),
            upscore8=L.Deconvolution2D(n_class, n_class, 16, stride=8,
                nobias=True, initialW=f.bilinear_interpolation_kernel(n_class, n_class, ksize=16), use_cudnn=False),
        )
项目:chainer-gan-improvements    作者:hvy    | 项目源码 | 文件源码
def __init__(self, n_z, out_shape):
        super(Generator, self).__init__(
            fc0=L.Linear(n_z, lindim(out_shape, 2**4, 256)),
            dc1=L.Deconvolution2D(256, 128, 4, stride=2, pad=1),
            dc2=L.Deconvolution2D(128, 64, 4, stride=2, pad=1),
            dc3=L.Deconvolution2D(64, 32, 4, stride=2, pad=1),
            dc4=L.Deconvolution2D(32, 1, 4, stride=2, pad=1),
            bn0=L.BatchNormalization(lindim(out_shape, 2**4, 256)),
            bn1=L.BatchNormalization(128),
            bn2=L.BatchNormalization(64),
            bn3=L.BatchNormalization(32)
        )
        self.out_shape = out_shape
项目:GAN    作者:lyakaap    | 项目源码 | 文件源码
def __init__(self):
        initializer = initializers.HeNormal()
        super(Generator, self).__init__(
                # num of noise that becomes the seed of Generation is 100 
            l0z = L.Linear(100, 7*7*128, initialW = initializer),
            dc1 = L.Deconvolution2D(128, 64, 4, stride=2, pad=1, initialW = initializer),
            dc2 = L.Deconvolution2D(64, 1, 4, stride=2, pad=1, initialW = initializer),
            bn0 = L.BatchNormalization(7*7*128),
            bn1 = L.BatchNormalization(64),
        )
项目:GAN    作者:lyakaap    | 项目源码 | 文件源码
def __init__(self):
        initializer = initializers.HeNormal()
        super(Generator, self).__init__(
            l0z = L.Linear(100, 7*7*128, initialW = initializer),
            dc1 = L.Deconvolution2D(128, 64, 4, stride=2, pad=1, initialW = initializer),
            dc2 = L.Deconvolution2D(64, 1, 4, stride=2, pad=1, initialW = initializer),
            bn0 = L.BatchNormalization(7*7*128),
            bn1 = L.BatchNormalization(64),
        )
项目:GAN    作者:lyakaap    | 项目源码 | 文件源码
def __init__(self):
        initializer = initializers.HeNormal()
        super(Generator, self).__init__(
            l0z = L.Linear(100+10, 7*7*128, initialW = initializer),
            dc1 = L.Deconvolution2D(128, 64, 4, stride=2, pad=1, initialW = initializer),
            dc2 = L.Deconvolution2D(64, 1, 4, stride=2, pad=1, initialW = initializer),
            bn0 = L.BatchNormalization(7*7*128),
            bn1 = L.BatchNormalization(64),
        )
项目:GAN    作者:lyakaap    | 项目源码 | 文件源码
def __init__(self):
        initializer = initializers.HeNormal()
        super(Generator, self).__init__(
                # num of noise that becomes the seed of Generation is 100 
            l0z = L.Linear(100, 7*7*128, initialW = initializer),
            dc1 = L.Deconvolution2D(128, 64, 4, stride=2, pad=1, initialW = initializer),
            dc2 = L.Deconvolution2D(64, 1, 4, stride=2, pad=1, initialW = initializer),
            bn0 = L.BatchNormalization(7*7*128),
            bn1 = L.BatchNormalization(64),
        )
项目:chainer_img2img_example    作者:taizan    | 项目源码 | 文件源码
def __init__(self):
        super(AutoENC, self).__init__(
            c0=L.Convolution2D(None, 32, 3, 1, 1),
            c1=L.Convolution2D(32, 64, 4, 2, 1),
            c2=L.Convolution2D(64, 128, 4, 2, 1),
            c3=L.Convolution2D(128, 256, 4, 2, 1),
            c4=L.Convolution2D(256, 512, 4, 2, 1),
            c5=L.Convolution2D(512, 512, 3, 1, 1),

            dc5=L.Deconvolution2D(512, 512, 4, 2, 1),
            dc4=L.Deconvolution2D(512, 256, 4, 2, 1),
            dc3=L.Deconvolution2D(256, 128, 4, 2, 1),
            dc2=L.Deconvolution2D(128, 64, 4, 2, 1),
            dc1=L.Convolution2D(64, 32, 3, 1, 1),
            dc0=L.Convolution2D(32, 3, 3, 1, 1),

            bnc0=L.BatchNormalization(32),
            bnc1=L.BatchNormalization(64),
            bnc2=L.BatchNormalization(128),
            bnc3=L.BatchNormalization(256),
            bnc4=L.BatchNormalization(512),
            bnc5=L.BatchNormalization(512),

            bnd5=L.BatchNormalization(512),
            bnd4=L.BatchNormalization(256),
            bnd3=L.BatchNormalization(128),
            bnd2=L.BatchNormalization(64),
            bnd1=L.BatchNormalization(32)
        )
项目:chainer-gan-experiments    作者:Aixile    | 项目源码 | 文件源码
def __init__(self, ch0, ch1, \
                nn='conv', \
                norm='bn', \
                activation=F.relu, \
                dropout=False, \
                noise=None, \
                w_init=None, \
                k_size = 3, \
                normalize_input=False ):

        self.norm = norm
        self.normalize_input = normalize_input
        self.activation = activation
        self.dropout = dropout
        self.noise = noise
        self.nn = nn
        layers = {}

        if w_init == None:
            w = chainer.initializers.GlorotNormal()
        else:
            w = w_init

        if nn == 'down_conv':
            layers['c'] = L.Convolution2D(ch0, ch1, 4, 2, 1, initialW=w)

        elif nn == 'up_deconv':
            layers['c'] = L.Deconvolution2D(ch0, ch1, 4, 2, 1, initialW=w)

        elif nn == 'up_subpixel':
            pad = k_size//2
            layers['c'] = L.Convolution2D(ch0, ch1*4, k_size, 1, pad, initialW=w)

        elif nn=='conv' or nn=='up_unpooling':
            pad = k_size//2
            layers['c'] = L.Convolution2D(ch0, ch1, k_size, 1, pad, initialW=w)

        elif nn=='linear':
            layers['c'] = L.Linear(ch0, ch1, initialW=w)

        else:
            raise Exception("Cannot find method %s" % nn)

        if self.norm == 'bn':
            if self.noise:
                layers['n'] = L.BatchNormalization(ch1, use_gamma=False)
            else:
                layers['n'] = L.BatchNormalization(ch1)
        elif self.norm == 'ln':
                layers['n'] = L.LayerNormalization(ch1)

        super(NNBlock, self).__init__(**layers)
项目:fcn    作者:wkentaro    | 项目源码 | 文件源码
def __init__(self, n_class=21):
        self.n_class = n_class
        kwargs = {
            'initialW': chainer.initializers.Zero(),
            'initial_bias': chainer.initializers.Zero(),
        }
        super(FCN8s, self).__init__()
        with self.init_scope():
            self.conv1_1 = L.Convolution2D(3, 64, 3, 1, 100, **kwargs)
            self.conv1_2 = L.Convolution2D(64, 64, 3, 1, 1, **kwargs)

            self.conv2_1 = L.Convolution2D(64, 128, 3, 1, 1, **kwargs)
            self.conv2_2 = L.Convolution2D(128, 128, 3, 1, 1, **kwargs)

            self.conv3_1 = L.Convolution2D(128, 256, 3, 1, 1, **kwargs)
            self.conv3_2 = L.Convolution2D(256, 256, 3, 1, 1, **kwargs)
            self.conv3_3 = L.Convolution2D(256, 256, 3, 1, 1, **kwargs)

            self.conv4_1 = L.Convolution2D(256, 512, 3, 1, 1, **kwargs)
            self.conv4_2 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs)
            self.conv4_3 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs)

            self.conv5_1 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs)
            self.conv5_2 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs)
            self.conv5_3 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs)

            self.fc6 = L.Convolution2D(512, 4096, 7, 1, 0, **kwargs)
            self.fc7 = L.Convolution2D(4096, 4096, 1, 1, 0, **kwargs)

            self.score_fr = L.Convolution2D(4096, n_class, 1, 1, 0, **kwargs)

            self.upscore2 = L.Deconvolution2D(
                n_class, n_class, 4, 2, 0, nobias=True,
                initialW=initializers.UpsamplingDeconvWeight())
            self.upscore8 = L.Deconvolution2D(
                n_class, n_class, 16, 8, 0, nobias=True,
                initialW=initializers.UpsamplingDeconvWeight())

            self.score_pool3 = L.Convolution2D(256, n_class, 1, 1, 0, **kwargs)
            self.score_pool4 = L.Convolution2D(512, n_class, 1, 1, 0, **kwargs)
            self.upscore_pool4 = L.Deconvolution2D(
                n_class, n_class, 4, 2, 0, nobias=True,
                initialW=initializers.UpsamplingDeconvWeight())
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def __init__(self, name, CLASSES, preNet=None):
        super(FCN, self).__init__(
            # params of the model
            #ipdb.set_trace(),

            conv1_1     = L.Convolution2D(3, 64, 3, stride=(1, 1), pad=(1, 1), initialW=None, initial_bias=None),
            conv1_2     = L.Convolution2D(64, 64, 3, stride=(1, 1), pad=(1, 1), initialW=None, initial_bias=None),

            conv2_1     = L.Convolution2D(64, 128, 3, stride=(1, 1), pad=(1, 1), initialW=None, initial_bias=None),
            conv2_2     = L.Convolution2D(128, 128, 3, stride=(1, 1), pad=(1, 1), initialW=None, initial_bias=None),

            conv3_1     = L.Convolution2D(128, 256, 3, stride=(1, 1), pad=(1, 1), initialW=None, initial_bias=None),
            conv3_2     = L.Convolution2D(256, 256, 3, stride=(1, 1), pad=(1, 1), initialW=None, initial_bias=None),
            conv3_3     = L.Convolution2D(256, 256, 3, stride=(1, 1), pad=(1, 1), initialW=None, initial_bias=None),            

            conv4_1     = L.Convolution2D(256, 512, 3, stride=(1, 1), pad=(1, 1), initialW=None, initial_bias=None),
            conv4_2     = L.Convolution2D(512, 512, 3, stride=(1, 1), pad=(1, 1), initialW=None, initial_bias=None),
            conv4_3     = L.Convolution2D(512, 512, 3, stride=(1, 1), pad=(1, 1), initialW=None, initial_bias=None),            

            conv5_1     = L.Convolution2D(512, 512, 3, stride=(1, 1), pad=(1, 1), initialW=None, initial_bias=None),
            conv5_2     = L.Convolution2D(512, 512, 3, stride=(1, 1), pad=(1, 1), initialW=None, initial_bias=None),
            conv5_3     = L.Convolution2D(512, 512, 3, stride=(1, 1), pad=(1, 1), initialW=None, initial_bias=None),    

            #conv_aux   = L.Convolution2D(512, 512, (7, 6), stride=(1, 1), pad=(0, 1), initialW=None, initial_bias=None),
            #conv_auxb  = L.Convolution2D(512, 512, (7, 7), stride=(1, 1), pad=(0, 0), initialW=None, initial_bias=None),

            fc_6        = L.Convolution2D(512, 4096, 1, stride=(1, 1), pad=(0, 0), initialW=None, initial_bias=None),
            fc_7        = L.Convolution2D(4096, 4096, 1, stride=(1, 1), pad=(0, 0), initialW=None, initial_bias=None),
            #fc_6       = L.Linear(512, 4096, wscale=1, bias=0, nobias=False, initialW=None, initial_bias=None),    
            #fc_7       = L.Linear(4096, 4096, wscale=1, bias=0, nobias=False, initialW=None, initial_bias=None),           
            conv_aux2   = L.Convolution2D(4096, CLASSES, 1, stride=(1, 1), pad=(0, 0), initialW=None, initial_bias=None),


            score2      = L.Deconvolution2D(CLASSES, CLASSES, (4,5), stride=(2, 2), pad=(1, 2)),
            score_pool4 = L.Convolution2D(512, CLASSES, 1, stride=(1, 1), pad=(0, 0), initialW=None, initial_bias=None),

            score4      = L.Deconvolution2D(CLASSES, CLASSES, (4,3), stride=(2, 2), pad=(1, 1)),
            score_pool3 = L.Convolution2D(256, CLASSES, 1, stride=(1, 1), pad=(0, 0), initialW=None, initial_bias=None),


            upsample    = L.Deconvolution2D(CLASSES, CLASSES, 8, stride=(8, 8), pad=(0, 0)),
            classi      = L.Convolution2D(CLASSES, CLASSES, 1, stride=(1, 1), pad=(0, 0), initialW=None, initial_bias=None),


        ),
        self.name = name
        self.classes = CLASSES
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def __init__(self, name, CLASSES, preNet=None):
        super(FCN_BND, self).__init__(
            # params of the model
            #ipdb.set_trace(),

            conv1_1     = L.Convolution2D(3, 64, 3, stride=(1, 1), pad=(1, 1), initialW=None, initial_bias=None),
            bn1_1       = L.BatchNormalization(64),
            conv1_2     = L.Convolution2D(64, 64, 3, stride=(1, 1), pad=(1, 1), initialW=None, initial_bias=None),
            bn1_2       = L.BatchNormalization(64),

            conv2_1     = L.Convolution2D(64, 128, 3, stride=(1, 1), pad=(1, 1), initialW=None, initial_bias=None),
            bn2_1       = L.BatchNormalization(128),
            conv2_2     = L.Convolution2D(128, 128, 3, stride=(1, 1), pad=(1, 1), initialW=None, initial_bias=None),
            bn2_2       = L.BatchNormalization(128),

            conv3_1     = L.Convolution2D(128, 256, 3, stride=(1, 1), pad=(1, 1), initialW=None, initial_bias=None),
            bn3_1       = L.BatchNormalization(256),
            conv3_2     = L.Convolution2D(256, 256, 3, stride=(1, 1), pad=(1, 1), initialW=None, initial_bias=None),
            bn3_2       = L.BatchNormalization(256),
            conv3_3     = L.Convolution2D(256, 256, 3, stride=(1, 1), pad=(1, 1), initialW=None, initial_bias=None),
            bn3_3       = L.BatchNormalization(256),

            conv4_1     = L.Convolution2D(256, 512, 3, stride=(1, 1), pad=(1, 1), initialW=None, initial_bias=None),
            bn4_1       = L.BatchNormalization(512),
            conv4_2     = L.Convolution2D(512, 512, 3, stride=(1, 1), pad=(1, 1), initialW=None, initial_bias=None),
            bn4_2       = L.BatchNormalization(512),
            conv4_3     = L.Convolution2D(512, 512, 3, stride=(1, 1), pad=(1, 1), initialW=None, initial_bias=None),
            bn4_3       = L.BatchNormalization(512),

            conv5_1     = L.Convolution2D(512, 512, 3, stride=(1, 1), pad=(1, 1), initialW=None, initial_bias=None),
            bn5_1       = L.BatchNormalization(512),
            conv5_2     = L.Convolution2D(512, 512, 3, stride=(1, 1), pad=(1, 1), initialW=None, initial_bias=None),
            bn5_2       = L.BatchNormalization(512),
            conv5_3     = L.Convolution2D(512, 512, 3, stride=(1, 1), pad=(1, 1), initialW=None, initial_bias=None),
            bn5_3       = L.BatchNormalization(512),

            fc6         = L.Convolution2D(512, 4096, 7, stride=(1, 1), pad=(3, 3), initialW=None, initial_bias=None),
            fc7         = L.Convolution2D(4096, 4096, 1, stride=(1, 1), pad=(0, 0), initialW=None, initial_bias=None),

            score_fr    = L.Convolution2D(4096, CLASSES, 1, stride=(1, 1), pad=(0, 0), initialW=None, initial_bias=None),
            score2      = L.Deconvolution2D(CLASSES, CLASSES, (4,3), stride=(2, 2), pad=(1, 1), initialW=None, initial_bias=None),
            score_pool4     = L.Convolution2D(512, CLASSES, 1, stride=(1, 1), pad=(0, 0), initialW=None, initial_bias=None),
            score4      = L.Deconvolution2D(CLASSES, CLASSES, (4,3), stride=(2, 2), pad=(1, 1), initialW=None, initial_bias=None),
            score_pool3     = L.Convolution2D(256, CLASSES, 1, stride=(1, 1), pad=(0, 0), initialW=None, initial_bias=None),
            upsample    = L.Deconvolution2D(CLASSES, CLASSES, 8, stride=(8, 8), pad=(0, 0), initialW=None, initial_bias=None),
        ),
        self.name = name
        self.classes = CLASSES
项目:SketchSimplification    作者:La4La    | 项目源码 | 文件源码
def __init__(self):
        super(GEN, self).__init__(
            dc1=L.Convolution2D(None, 48, 5, 2, 2, wscale=0.02*math.sqrt(1*5*5)),
            fc2=L.Convolution2D(48, 128, 3, 1, 1, wscale=0.02*math.sqrt(48*3*3)),
            fc3=L.Convolution2D(128, 128, 3, 1, 1, wscale=0.02*math.sqrt(128*3*3)),
            dc4=L.Convolution2D(128, 256, 3, 2, 1, wscale=0.02*math.sqrt(128*3*3)),
            fc5=L.Convolution2D(256, 256, 3, 1, 1, wscale=0.02*math.sqrt(256*3*3)),
            fc6=L.Convolution2D(256, 256, 3, 1, 1, wscale=0.02*math.sqrt(256*3*3)),
            dc7=L.Convolution2D(256, 256, 3, 2, 1, wscale=0.02*math.sqrt(256*3*3)),
            fc8=L.Convolution2D(256, 512, 3, 1, 1, wscale=0.02*math.sqrt(256*3*3)),
            fc9=L.Convolution2D(512, 1024, 3, 1, 1, wscale=0.02*math.sqrt(512*3*3)),
            fc10=L.Convolution2D(1024, 1024, 3, 1, 1, wscale=0.02*math.sqrt(1024*3*3)),
            fc11=L.Convolution2D(1024, 1024, 3, 1, 1, wscale=0.02*math.sqrt(1024*3*3)),
            fc12=L.Convolution2D(1024, 1024, 3, 1, 1, wscale=0.02*math.sqrt(1024*3*3)),
            fc13=L.Convolution2D(1024, 512, 3, 1, 1, wscale=0.02*math.sqrt(1024*3*3)),
            fc14=L.Convolution2D(512, 256, 3, 1, 1, wscale=0.02*math.sqrt(512*3*3)),
            uc15=L.Deconvolution2D(256, 256, 4, 2, 1, wscale=0.02*math.sqrt(256*4*4)),
            fc16=L.Convolution2D(256, 256, 3, 1, 1, wscale=0.02*math.sqrt(256*3*3)),
            fc17=L.Convolution2D(256, 128, 3, 1, 1, wscale=0.02*math.sqrt(256*3*3)),
            uc18=L.Deconvolution2D(128, 128, 4, 2, 1, wscale=0.02*math.sqrt(128*4*4)),
            fc19=L.Convolution2D(128, 128, 3, 1, 1, wscale=0.02*math.sqrt(128*3*3)),
            fc20=L.Convolution2D(128, 48, 3, 1, 1, wscale=0.02*math.sqrt(128*3*3)),
            uc21=L.Deconvolution2D(48, 48, 4, 2, 1, wscale=0.02*math.sqrt(48*4*4)),
            fc22=L.Convolution2D(48, 24, 3, 1, 1, wscale=0.02*math.sqrt(48*3*3)),
            fc23=L.Convolution2D(24, 1, 3, 1, 1, wscale=0.02*math.sqrt(24*3*3)),

            bn1=L.BatchNormalization(48),
            bn2=L.BatchNormalization(128),
            bn3=L.BatchNormalization(128),
            bn4=L.BatchNormalization(256),
            bn5=L.BatchNormalization(256),
            bn6=L.BatchNormalization(256),
            bn7=L.BatchNormalization(256),
            bn8=L.BatchNormalization(512),
            bn9=L.BatchNormalization(1024),
            bn10=L.BatchNormalization(1024),
            bn11=L.BatchNormalization(1024),
            bn12=L.BatchNormalization(1024),
            bn13=L.BatchNormalization(512),
            bn14=L.BatchNormalization(256),
            bn15=L.BatchNormalization(256),
            bn16=L.BatchNormalization(256),
            bn17=L.BatchNormalization(128),
            bn18=L.BatchNormalization(128),
            bn19=L.BatchNormalization(128),
            bn20=L.BatchNormalization(48),
            bn21=L.BatchNormalization(48),
            bn22=L.BatchNormalization(24)
        )
项目:SketchSimplification    作者:La4La    | 项目源码 | 文件源码
def __init__(self):
        super(DilatedGEN, self).__init__(
            dc1=L.Convolution2D(None, 48, 5, 2, 2, wscale=0.02*math.sqrt(1*5*5)),
            fc2=L.Convolution2D(48, 128, 3, 1, 1, wscale=0.02*math.sqrt(48*3*3)),
            fc3=L.Convolution2D(128, 128, 3, 1, 1, wscale=0.02*math.sqrt(128*3*3)),
            dc4=L.Convolution2D(128, 256, 3, 2, 1, wscale=0.02*math.sqrt(128*3*3)),
            fc5=L.Convolution2D(256, 256, 3, 1, 1, wscale=0.02*math.sqrt(256*3*3)),
            fc6=L.Convolution2D(256, 256, 3, 1, 1, wscale=0.02*math.sqrt(256*3*3)),
            dc7=L.Convolution2D(256, 256, 3, 2, 1, wscale=0.02*math.sqrt(256*3*3)),
            fc8=L.Convolution2D(256, 512, 3, 1, 1, wscale=0.02*math.sqrt(256*3*3)),
            fc9=L.DilatedConvolution2D(512, 1024, 3, 1, 2, dilate=2, wscale=0.02*math.sqrt(512*3*3)),
            fc10=L.DilatedConvolution2D(1024, 1024, 3, 1, 4, dilate=4, wscale=0.02*math.sqrt(1024*3*3)),
            fc11=L.DilatedConvolution2D(1024, 1024, 3, 1, 8, dilate=8, wscale=0.02*math.sqrt(1024*3*3)),
            fc12=L.Convolution2D(1024, 1024, 3, 1, 1, wscale=0.02*math.sqrt(1024*3*3)),
            fc13=L.Convolution2D(1024, 512, 3, 1, 1, wscale=0.02*math.sqrt(1024*3*3)),
            fc14=L.Convolution2D(512, 256, 3, 1, 1, wscale=0.02*math.sqrt(512*3*3)),
            uc15=L.Deconvolution2D(256, 256, 4, 2, 1, wscale=0.02*math.sqrt(256*4*4)),
            fc16=L.Convolution2D(256, 256, 3, 1, 1, wscale=0.02*math.sqrt(256*3*3)),
            fc17=L.Convolution2D(256, 128, 3, 1, 1, wscale=0.02*math.sqrt(256*3*3)),
            uc18=L.Deconvolution2D(128, 128, 4, 2, 1, wscale=0.02*math.sqrt(128*4*4)),
            fc19=L.Convolution2D(128, 128, 3, 1, 1, wscale=0.02*math.sqrt(128*3*3)),
            fc20=L.Convolution2D(128, 48, 3, 1, 1, wscale=0.02*math.sqrt(128*3*3)),
            uc21=L.Deconvolution2D(48, 48, 4, 2, 1, wscale=0.02*math.sqrt(48*4*4)),
            fc22=L.Convolution2D(48, 24, 3, 1, 1, wscale=0.02*math.sqrt(48*3*3)),
            fc23=L.Convolution2D(24, 1, 3, 1, 1, wscale=0.02*math.sqrt(24*3*3)),

            bn1=L.BatchNormalization(48),
            bn2=L.BatchNormalization(128),
            bn3=L.BatchNormalization(128),
            bn4=L.BatchNormalization(256),
            bn5=L.BatchNormalization(256),
            bn6=L.BatchNormalization(256),
            bn7=L.BatchNormalization(256),
            bn8=L.BatchNormalization(512),
            bn9=L.BatchNormalization(1024),
            bn10=L.BatchNormalization(1024),
            bn11=L.BatchNormalization(1024),
            bn12=L.BatchNormalization(1024),
            bn13=L.BatchNormalization(512),
            bn14=L.BatchNormalization(256),
            bn15=L.BatchNormalization(256),
            bn16=L.BatchNormalization(256),
            bn17=L.BatchNormalization(128),
            bn18=L.BatchNormalization(128),
            bn19=L.BatchNormalization(128),
            bn20=L.BatchNormalization(48),
            bn21=L.BatchNormalization(48),
            bn22=L.BatchNormalization(24)
        )
项目:SketchSimplification    作者:La4La    | 项目源码 | 文件源码
def __init__(self):
        super(mGEN, self).__init__(
            dc1=L.Convolution2D(None, 48, 5, 2, 2, wscale=0.02 * math.sqrt(1 * 5 * 5)),
            fc2=L.Convolution2D(48, 128, 3, 1, 1, wscale=0.02 * math.sqrt(48 * 3 * 3)),
            fc3=L.Convolution2D(128, 128, 3, 1, 1, wscale=0.02 * math.sqrt(128 * 3 * 3)),
            dc4=L.Convolution2D(128, 256, 3, 2, 1, wscale=0.02 * math.sqrt(128 * 3 * 3)),
            fc5=L.Convolution2D(256, 256, 3, 1, 1, wscale=0.02 * math.sqrt(256 * 3 * 3)),
            fc6=L.Convolution2D(256, 256, 3, 1, 1, wscale=0.02 * math.sqrt(256 * 3 * 3)),
            dc7=L.Convolution2D(256, 256, 3, 2, 1, wscale=0.02 * math.sqrt(256 * 3 * 3)),
            fc8=L.Convolution2D(256, 512, 3, 1, 1, wscale=0.02 * math.sqrt(256 * 3 * 3)),
            fc9=L.DilatedConvolution2D(512, 1024, 3, 1, 2, dilate=2, wscale=0.02 * math.sqrt(512 * 3 * 3)),
            fc10=L.DilatedConvolution2D(1024, 1024, 3, 1, 4, dilate=4, wscale=0.02 * math.sqrt(1024 * 3 * 3)),
            fc11=L.DilatedConvolution2D(1024, 1024, 3, 1, 8, dilate=8, wscale=0.02 * math.sqrt(1024 * 3 * 3)),
            fc12=L.Convolution2D(1024, 1024, 3, 1, 1, wscale=0.02 * math.sqrt(1024 * 3 * 3)),
            fc13=L.Convolution2D(1024, 512, 3, 1, 1, wscale=0.02 * math.sqrt(1024 * 3 * 3)),
            fc14=L.Convolution2D(512, 256, 3, 1, 1, wscale=0.02 * math.sqrt(512 * 3 * 3)),
            uc15=L.Deconvolution2D(256, 256, 4, 2, 1, wscale=0.02 * math.sqrt(256 * 4 * 4)),
            fc16=L.Convolution2D(256, 256, 3, 1, 1, wscale=0.02 * math.sqrt(256 * 3 * 3)),
            fc17=L.Convolution2D(256, 128, 3, 1, 1, wscale=0.02 * math.sqrt(256 * 3 * 3)),
            uc18=L.Deconvolution2D(128, 128, 4, 2, 1, wscale=0.02 * math.sqrt(128 * 4 * 4)),
            fc19=L.Convolution2D(128, 128, 3, 1, 1, wscale=0.02 * math.sqrt(128 * 3 * 3)),
            fc20=L.Convolution2D(128, 48, 3, 1, 1, wscale=0.02 * math.sqrt(128 * 3 * 3)),
            uc21=L.Deconvolution2D(48, 48, 4, 2, 1, wscale=0.02 * math.sqrt(48 * 4 * 4)),
            fc22=L.Convolution2D(48, 24, 3, 1, 1, wscale=0.02 * math.sqrt(48 * 3 * 3)),
            fc23=L.Convolution2D(25, 1, 3, 1, 1, wscale=0.02 * math.sqrt(24 * 3 * 3)),

            bn1=L.BatchNormalization(48),
            bn2=L.BatchNormalization(128),
            bn3=L.BatchNormalization(128),
            bn4=L.BatchNormalization(256),
            bn5=L.BatchNormalization(256),
            bn6=L.BatchNormalization(256),
            bn7=L.BatchNormalization(256),
            bn8=L.BatchNormalization(512),
            bn9=L.BatchNormalization(1024),
            bn10=L.BatchNormalization(1024),
            bn11=L.BatchNormalization(1024),
            bn12=L.BatchNormalization(1024),
            bn13=L.BatchNormalization(512),
            bn14=L.BatchNormalization(256),
            bn15=L.BatchNormalization(256),
            bn16=L.BatchNormalization(256),
            bn17=L.BatchNormalization(128),
            bn18=L.BatchNormalization(128),
            bn19=L.BatchNormalization(128),
            bn20=L.BatchNormalization(48),
            bn21=L.BatchNormalization(48),
            bn22=L.BatchNormalization(24)
        )
项目:deep-learning-for-human-part-discovery-in-images    作者:shiba24    | 项目源码 | 文件源码
def __init__(self, VGGModel=None, n_class=15):
        if VGGModel is None:
            self.wb = load_VGGmodel()
        else:
            self.wb = VGGModel
        self.n_class = n_class
        # layers which is trained
        super(HumanPartsNet, self).__init__(
            conv1_1=L.Convolution2D(  3,  64, 3, stride=1, pad=100, initialW=self.wb["conv1_1_W"], initial_bias=self.wb["conv1_1_b"]),
            conv1_2=L.Convolution2D( 64,  64, 3, stride=1, pad=1, initialW=self.wb["conv1_2_W"], initial_bias=self.wb["conv1_2_b"]),
            conv2_1=L.Convolution2D( 64, 128, 3, stride=1, pad=1, initialW=self.wb["conv2_1_W"], initial_bias=self.wb["conv2_1_b"]),
            conv2_2=L.Convolution2D(128, 128, 3, stride=1, pad=1, initialW=self.wb["conv2_2_W"], initial_bias=self.wb["conv2_2_b"]), 
            conv3_1=L.Convolution2D(128, 256, 3, stride=1, pad=1, initialW=self.wb["conv3_1_W"], initial_bias=self.wb["conv3_1_b"]),
            conv3_2=L.Convolution2D(256, 256, 3, stride=1, pad=1, initialW=self.wb["conv3_2_W"], initial_bias=self.wb["conv3_2_b"]),
            conv3_3=L.Convolution2D(256, 256, 3, stride=1, pad=1, initialW=self.wb["conv3_3_W"], initial_bias=self.wb["conv3_3_b"]),
            conv4_1=L.Convolution2D(256, 512, 3, stride=1, pad=1, initialW=self.wb["conv4_1_W"], initial_bias=self.wb["conv4_1_b"]),
            conv4_2=L.Convolution2D(512, 512, 3, stride=1, pad=1, initialW=self.wb["conv4_2_W"], initial_bias=self.wb["conv4_2_b"]),
            conv4_3=L.Convolution2D(512, 512, 3, stride=1, pad=1, initialW=self.wb["conv4_3_W"], initial_bias=self.wb["conv4_3_b"]), 
            conv5_1=L.Convolution2D(512, 512, 3, stride=1, pad=1, initialW=self.wb["conv5_1_W"], initial_bias=self.wb["conv5_1_b"]),
            conv5_2=L.Convolution2D(512, 512, 3, stride=1, pad=1, initialW=self.wb["conv5_2_W"], initial_bias=self.wb["conv5_2_b"]),
            conv5_3=L.Convolution2D(512, 512, 3, stride=1, pad=1, initialW=self.wb["conv5_3_W"], initial_bias=self.wb["conv5_3_b"]),

            upsample_pool1=L.Convolution2D(64, self.n_class, ksize=1, stride=1, pad=0, wscale=0.01),
            upsample_pool2=L.Convolution2D(128, self.n_class, ksize=1, stride=1, pad=0, wscale=0.01),
            upsample_pool3=L.Convolution2D(256, self.n_class, ksize=1, stride=1, pad=0, wscale=0.01),
            upsample_pool4=L.Convolution2D(512, self.n_class, ksize=1, stride=1, pad=0, wscale=0.01),

            fc6_conv=L.Convolution2D(512, 4096, 7, stride=1, pad=0, initialW=self.wb["fc6_W"], initial_bias=self.wb["fc6_b"]),
            fc7_conv=L.Convolution2D(4096, 4096, 1, stride=1, pad=0, initialW=self.wb["fc7_W"], initial_bias=self.wb["fc7_b"]),

            upconv1=L.Deconvolution2D(4096, self.n_class, ksize= 4, stride=2, pad=0, nobias=True, 
                                      initialW=self.get_deconv_filter([4, 4, self.n_class, 4096])),
            upconv2=L.Deconvolution2D(self.n_class, self.n_class, ksize= 4, stride=2, pad=0, nobias=True,
                                      initialW=self.get_deconv_filter([4, 4, self.n_class, self.n_class])),
            upconv3=L.Deconvolution2D(self.n_class, self.n_class, ksize= 4, stride=2, pad=0, nobias=True,
                                      initialW=self.get_deconv_filter([4, 4, self.n_class, self.n_class])),
            upconv4=L.Deconvolution2D(self.n_class, self.n_class, ksize= 4, stride=2, pad=0, nobias=True,
                                      initialW=self.get_deconv_filter([4, 4, self.n_class, self.n_class])),
            upconv5=L.Deconvolution2D(self.n_class, self.n_class, ksize= 4, stride=2, pad=0, nobias=True,
                                      initialW=self.get_deconv_filter([4, 4, self.n_class, self.n_class])),            
        )
        self.train = True
        del self.wb