Python chainer.links 模块,Convolution2D() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用chainer.links.Convolution2D()

项目:fcn    作者:wkentaro    | 项目源码 | 文件源码
def __init__(self, n_class=1000):
        super(VGG16, self).__init__()
        with self.init_scope():
            self.conv1_1 = L.Convolution2D(3, 64, 3, 1, 1)
            self.conv1_2 = L.Convolution2D(64, 64, 3, 1, 1)

            self.conv2_1 = L.Convolution2D(64, 128, 3, 1, 1)
            self.conv2_2 = L.Convolution2D(128, 128, 3, 1, 1)

            self.conv3_1 = L.Convolution2D(128, 256, 3, 1, 1)
            self.conv3_2 = L.Convolution2D(256, 256, 3, 1, 1)
            self.conv3_3 = L.Convolution2D(256, 256, 3, 1, 1)

            self.conv4_1 = L.Convolution2D(256, 512, 3, 1, 1)
            self.conv4_2 = L.Convolution2D(512, 512, 3, 1, 1)
            self.conv4_3 = L.Convolution2D(512, 512, 3, 1, 1)

            self.conv5_1 = L.Convolution2D(512, 512, 3, 1, 1)
            self.conv5_2 = L.Convolution2D(512, 512, 3, 1, 1)
            self.conv5_3 = L.Convolution2D(512, 512, 3, 1, 1)

            self.fc6 = L.Linear(25088, 4096)
            self.fc7 = L.Linear(4096, 4096)
            self.fc8 = L.Linear(4096, n_class)
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def __init__(self, n_actions, n_input_channels=4,
                 activation=F.relu, bias=0.1):
        self.n_actions = n_actions
        self.n_input_channels = n_input_channels
        self.activation = activation

        super().__init__()
        with self.init_scope():
            self.conv_layers = chainer.ChainList(
                L.Convolution2D(n_input_channels, 32, 8, stride=4,
                                initial_bias=bias),
                L.Convolution2D(32, 64, 4, stride=2, initial_bias=bias),
                L.Convolution2D(64, 64, 3, stride=1, initial_bias=bias))

            self.a_stream = MLP(3136, n_actions, [512])
            self.v_stream = MLP(3136, 1, [512])
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def init_like_torch(link):
    # Mimic torch's default parameter initialization
    # TODO(muupan): Use chainer's initializers when it is merged
    for l in link.links():
        if isinstance(l, L.Linear):
            out_channels, in_channels = l.W.data.shape
            stdv = 1 / np.sqrt(in_channels)
            l.W.data[:] = np.random.uniform(-stdv, stdv, size=l.W.data.shape)
            if l.b is not None:
                l.b.data[:] = np.random.uniform(-stdv, stdv,
                                                size=l.b.data.shape)
        elif isinstance(l, L.Convolution2D):
            out_channels, in_channels, kh, kw = l.W.data.shape
            stdv = 1 / np.sqrt(in_channels * kh * kw)
            l.W.data[:] = np.random.uniform(-stdv, stdv, size=l.W.data.shape)
            if l.b is not None:
                l.b.data[:] = np.random.uniform(-stdv, stdv,
                                                size=l.b.data.shape)
项目:chainer-cyclegan    作者:Aixile    | 项目源码 | 文件源码
def __init__(self, ch0, ch1, bn=True, sample='down', activation=F.relu, dropout=False, noise=False):
        self.bn = bn
        self.activation = activation
        self.dropout = dropout
        self.sample = sample
        self.noise = noise
        layers = {}
        w = chainer.initializers.Normal(0.02)
        if sample=='down':
            layers['c'] = L.Convolution2D(ch0, ch1, 4, 2, 1, initialW=w)
        elif sample=='none-9':
            layers['c'] = L.Convolution2D(ch0, ch1, 9, 1, 4, initialW=w)
        elif sample=='none-7':
            layers['c'] = L.Convolution2D(ch0, ch1, 7, 1, 3, initialW=w)
        elif sample=='none-5':
            layers['c'] = L.Convolution2D(ch0, ch1, 5, 1, 2, initialW=w)
        else:
            layers['c'] = L.Convolution2D(ch0, ch1, 3, 1, 1, initialW=w)
        if bn:
            if self.noise:
                layers['batchnorm'] = L.BatchNormalization(ch1, use_gamma=False)
            else:
                layers['batchnorm'] = L.BatchNormalization(ch1)
        super(CBR, self).__init__(**layers)
项目:chainer-fast-neuralstyle    作者:yusuketomoto    | 项目源码 | 文件源码
def __init__(self):
        super(FastStyleNet, self).__init__(
            c1=L.Convolution2D(3, 32, 9, stride=1, pad=4),
            c2=L.Convolution2D(32, 64, 4, stride=2, pad=1),
            c3=L.Convolution2D(64, 128, 4,stride=2, pad=1),
            r1=ResidualBlock(128, 128),
            r2=ResidualBlock(128, 128),
            r3=ResidualBlock(128, 128),
            r4=ResidualBlock(128, 128),
            r5=ResidualBlock(128, 128),
            d1=L.Deconvolution2D(128, 64, 4, stride=2, pad=1),
            d2=L.Deconvolution2D(64, 32, 4, stride=2, pad=1),
            d3=L.Deconvolution2D(32, 3, 9, stride=1, pad=4),
            b1=L.BatchNormalization(32),
            b2=L.BatchNormalization(64),
            b3=L.BatchNormalization(128),
            b4=L.BatchNormalization(64),
            b5=L.BatchNormalization(32),
        )
项目:chainer-fast-neuralstyle    作者:yusuketomoto    | 项目源码 | 文件源码
def __init__(self):
        super(VGG, self).__init__(
            conv1_1=L.Convolution2D(3, 64, 3, stride=1, pad=1),
            conv1_2=L.Convolution2D(64, 64, 3, stride=1, pad=1),

            conv2_1=L.Convolution2D(64, 128, 3, stride=1, pad=1),
            conv2_2=L.Convolution2D(128, 128, 3, stride=1, pad=1),

            conv3_1=L.Convolution2D(128, 256, 3, stride=1, pad=1),
            conv3_2=L.Convolution2D(256, 256, 3, stride=1, pad=1),
            conv3_3=L.Convolution2D(256, 256, 3, stride=1, pad=1),

            conv4_1=L.Convolution2D(256, 512, 3, stride=1, pad=1),
            conv4_2=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv4_3=L.Convolution2D(512, 512, 3, stride=1, pad=1),

            conv5_1=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv5_2=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv5_3=L.Convolution2D(512, 512, 3, stride=1, pad=1)
        )
        self.train = False
        self.mean = np.asarray(120, dtype=np.float32)
项目:chainer-fcis    作者:knorth55    | 项目源码 | 文件源码
def __init__(self, in_size, out_size, ch, stride=2):
        super(BottleNeckA, self).__init__()
        initialW = chainer.initializers.HeNormal()

        with self.init_scope():
            self.conv1 = L.Convolution2D(
                in_size, ch, 1, stride, 0, initialW=initialW, nobias=True)
            self.bn1 = L.BatchNormalization(ch, eps=self.eps)
            self.conv2 = L.Convolution2D(
                ch, ch, 3, 1, 1, initialW=initialW, nobias=True)
            self.bn2 = L.BatchNormalization(ch, eps=self.eps)
            self.conv3 = L.Convolution2D(
                ch, out_size, 1, 1, 0, initialW=initialW, nobias=True)
            self.bn3 = L.BatchNormalization(out_size, eps=self.eps)

            self.conv4 = L.Convolution2D(
                in_size, out_size, 1, stride, 0,
                initialW=initialW, nobias=True)
            self.bn4 = L.BatchNormalization(out_size)
项目:chainer-fcis    作者:knorth55    | 项目源码 | 文件源码
def __init__(self, in_size, out_size, ch, stride=1):
        super(DilatedBottleNeckA, self).__init__()
        initialW = chainer.initializers.HeNormal()

        with self.init_scope():
            self.conv1 = L.Convolution2D(
                in_size, ch, 1, stride, 0, initialW=initialW, nobias=True)
            self.bn1 = L.BatchNormalization(ch, eps=self.eps)
            self.conv2 = L.DilatedConvolution2D(
                ch, ch, 3, 1, 2, dilate=2,
                initialW=initialW, nobias=True)
            self.bn2 = L.BatchNormalization(ch, eps=self.eps)
            self.conv3 = L.Convolution2D(
                ch, out_size, 1, 1, 0, initialW=initialW, nobias=True)
            self.bn3 = L.BatchNormalization(out_size, eps=self.eps)

            self.conv4 = L.Convolution2D(
                in_size, out_size, 1, stride, 0,
                initialW=initialW, nobias=True)
            self.bn4 = L.BatchNormalization(out_size)
项目:chainer-segnet    作者:pfnet-research    | 项目源码 | 文件源码
def __init__(self, n_encdec=4, n_classes=12, in_channel=3, n_mid=64):
        assert n_encdec >= 1
        w = math.sqrt(2)
        super(SegNet, self).__init__(
            conv_cls=L.Convolution2D(n_mid, n_classes, 1, 1, 0, w))

        # Create and add EncDecs
        for i in six.moves.range(1, n_encdec + 1):
            name = 'encdec{}'.format(i)
            self.add_link(name, EncDec(n_mid if i > 1 else in_channel, n_mid))
        for d in six.moves.range(1, n_encdec):
            encdec = getattr(self, 'encdec{}'.format(d))
            encdec.inside = getattr(self, 'encdec{}'.format(d + 1))
            setattr(self, 'encdec{}'.format(d), encdec)

        self.n_encdec = n_encdec
        self.n_classes = n_classes
        self.train = True
项目:deep-crf    作者:aonotas    | 项目源码 | 文件源码
def __init__(self, emb_dim=100, window_size=3, init_emb=None,
                 hidden_dim=100, vocab_size=0, splitter=u' ', add_dim=0,
                 PAD_IDX=None):
        """
        Neural network tagger by dos (Santos and Zadrozny, ICML 2014).
        """
        assert window_size % 2 == 1, 'window_size must be odd.'
        dim = emb_dim
        hidden_dim = hidden_dim + add_dim
        self.add_dim = add_dim
        self.hidden_dim = hidden_dim
        super(BaseCNNEncoder, self).__init__(emb=L.EmbedID(vocab_size, emb_dim, ignore_label=-1),
                                             conv=L.Convolution2D(1, hidden_dim, ksize=(window_size, dim),
                                                                  stride=(1, dim), pad=(window_size // 2, 0)))
        self.splitter = splitter
        self.char_level_flag = True if self.splitter is None else False
        self.word_level_flag = not self.char_level_flag
        self.emb_dim = emb_dim
        self.window_size = window_size
        self.dim = dim
        self.PAD_IDX = PAD_IDX
        self.train = True
        # initialize embeddings
        if init_emb is not None:
            self.emb.W = init_emb
项目:chainer-neural-style    作者:dsanno    | 项目源码 | 文件源码
def __init__(self):
        super(VGG, self).__init__(
            conv1_1=L.Convolution2D(3, 64, 3, stride=1, pad=1),
            conv1_2=L.Convolution2D(64, 64, 3, stride=1, pad=1),

            conv2_1=L.Convolution2D(64, 128, 3, stride=1, pad=1),
            conv2_2=L.Convolution2D(128, 128, 3, stride=1, pad=1),

            conv3_1=L.Convolution2D(128, 256, 3, stride=1, pad=1),
            conv3_2=L.Convolution2D(256, 256, 3, stride=1, pad=1),
            conv3_3=L.Convolution2D(256, 256, 3, stride=1, pad=1),

            conv4_1=L.Convolution2D(256, 512, 3, stride=1, pad=1),
            conv4_2=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv4_3=L.Convolution2D(512, 512, 3, stride=1, pad=1),
        )
        self.mean = np.asarray([104, 117, 124], dtype=np.float32)
项目:chainer-neural-style    作者:dsanno    | 项目源码 | 文件源码
def __init__(self):
        super(VGG19, self).__init__(
            conv1_1=L.Convolution2D(3, 64, 3, stride=1, pad=1),
            conv1_2=L.Convolution2D(64, 64, 3, stride=1, pad=1),

            conv2_1=L.Convolution2D(64, 128, 3, stride=1, pad=1),
            conv2_2=L.Convolution2D(128, 128, 3, stride=1, pad=1),

            conv3_1=L.Convolution2D(128, 256, 3, stride=1, pad=1),
            conv3_2=L.Convolution2D(256, 256, 3, stride=1, pad=1),
            conv3_3=L.Convolution2D(256, 256, 3, stride=1, pad=1),
            conv3_4=L.Convolution2D(256, 256, 3, stride=1, pad=1),

            conv4_1=L.Convolution2D(256, 512, 3, stride=1, pad=1),
            conv4_2=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv4_3=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv4_4=L.Convolution2D(512, 512, 3, stride=1, pad=1),

            conv5_1=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv5_2=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv5_3=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv5_4=L.Convolution2D(512, 512, 3, stride=1, pad=1),
        )
        self.mean = np.asarray([104, 117, 124], dtype=np.float32)
项目:tensorboard-pytorch    作者:lanpa    | 项目源码 | 文件源码
def __init__(self, bottom_width=4, ch=512, wscale=0.02):
        w = chainer.initializers.Normal(wscale)
        super(Discriminator, self).__init__()
        with self.init_scope():
            self.c0_0 = L.Convolution2D(3, ch // 8, 3, 1, 1, initialW=w)
            self.c0_1 = L.Convolution2D(ch // 8, ch // 4, 4, 2, 1, initialW=w)
            self.c1_0 = L.Convolution2D(ch // 4, ch // 4, 3, 1, 1, initialW=w)
            self.c1_1 = L.Convolution2D(ch // 4, ch // 2, 4, 2, 1, initialW=w)
            self.c2_0 = L.Convolution2D(ch // 2, ch // 2, 3, 1, 1, initialW=w)
            self.c2_1 = L.Convolution2D(ch // 2, ch // 1, 4, 2, 1, initialW=w)
            self.c3_0 = L.Convolution2D(ch // 1, ch // 1, 3, 1, 1, initialW=w)
            self.l4 = L.Linear(bottom_width * bottom_width * ch, 1, initialW=w)
            self.bn0_1 = L.BatchNormalization(ch // 4, use_gamma=False)
            self.bn1_0 = L.BatchNormalization(ch // 4, use_gamma=False)
            self.bn1_1 = L.BatchNormalization(ch // 2, use_gamma=False)
            self.bn2_0 = L.BatchNormalization(ch // 2, use_gamma=False)
            self.bn2_1 = L.BatchNormalization(ch // 1, use_gamma=False)
            self.bn3_0 = L.BatchNormalization(ch // 1, use_gamma=False)
项目:async-rl    作者:muupan    | 项目源码 | 文件源码
def init_like_torch(link):
    # Mimic torch's default parameter initialization
    # TODO(muupan): Use chainer's initializers when it is merged
    for l in link.links():
        if isinstance(l, L.Linear):
            out_channels, in_channels = l.W.data.shape
            stdv = 1 / np.sqrt(in_channels)
            l.W.data[:] = np.random.uniform(-stdv, stdv, size=l.W.data.shape)
            if l.b is not None:
                l.b.data[:] = np.random.uniform(-stdv, stdv,
                                                size=l.b.data.shape)
        elif isinstance(l, L.Convolution2D):
            out_channels, in_channels, kh, kw = l.W.data.shape
            stdv = 1 / np.sqrt(in_channels * kh * kw)
            l.W.data[:] = np.random.uniform(-stdv, stdv, size=l.W.data.shape)
            if l.b is not None:
                l.b.data[:] = np.random.uniform(-stdv, stdv,
                                                size=l.b.data.shape)
项目:DeepLearning    作者:Wanwannodao    | 项目源码 | 文件源码
def __init__(self, n_actions):
        initializer = chainer.initializers.HeNormal()
        c1 = 32
        c2 = 64
        c3 = 64
        fc_unit = 256

        super(QFunction, self).__init__(
             # the size of the inputs to each layer will be inferred
            conv1=L.Convolution2D(4, c1, 8, stride=4, pad=0),
            conv2=L.Convolution2D(c1, c2, 4, stride=2, pad=0),
            conv3=L.Convolution2D(c2, c3, 3, stride=1, pad=0),
            #conv4=L.Convolution2D(64, c4, 3, stride=1, pad=1),
            fc1=L.Linear(3136, fc_unit, initialW=initializer),
            fc2=L.Linear(fc_unit, n_actions, initialW=initializer),
            #bnorm1=L.BatchNormalization(c1),
            #bnorm2=L.BatchNormalization(c2),
            #bnorm3=L.BatchNormalization(c3),
            #bnorm4=L.BatchNormalization(c4),
        )
项目:MultimodalDL    作者:masataka46    | 项目源码 | 文件源码
def __init__(self):
        super(MDL_full, self).__init__(
            convR1=L.Convolution2D(3,  96, 11, stride=4),
            convR2=L.Convolution2D(96, 256,  5, pad=2),
            convR3=L.Convolution2D(256, 384,  3, pad=1),
            convR4=L.Convolution2D(384, 384,  3, pad=1),
            convR5=L.Convolution2D(384, 256,  3, pad=1),
            fcR6=L.Linear(9216, 4096),
            fcR7=L.Linear(4096, 4096),
            convD1=L.Convolution2D(3,  96, 11, stride=4),
            convD2=L.Convolution2D(96, 256,  5, pad=2),
            convD3=L.Convolution2D(256, 384,  3, pad=1),
            convD4=L.Convolution2D(384, 384,  3, pad=1),
            convD5=L.Convolution2D(384, 256,  3, pad=1),
            fcD6=L.Linear(9216, 4096),
            fcD7=L.Linear(4096, 4096),
            fc8=L.Bilinear(4096, 4096, 4096),
            fc9=L.Linear(4096, 1000),
        )
        self.train = True
项目:chainer-began    作者:hvy    | 项目源码 | 文件源码
def __init__(self, n, h, in_size, in_channels, embed_size, block_size):
        super().__init__(
            l0=L.Convolution2D(in_channels, n, 3, stride=1, pad=1),
            ln=L.Linear(None, h))

        self.n_blocks = int(log2(in_size / embed_size)) + 1
        self.block_size = block_size

        for i in range(self.n_blocks):
            n_in = (i + 1) * n
            n_out = (i + 2) * n if i < self.n_blocks - 1 else n_in
            for j in range(block_size - 1):
                self.add_link('c{}'.format(i * block_size + j),
                              L.Convolution2D(n_in, n_in, 3, stride=1, pad=1))
            self.add_link('c{}'.format(i * block_size + block_size - 1),
                          L.Convolution2D(n_in, n_out, 3, stride=1, pad=1))
项目:chainer-dfi    作者:dsanno    | 项目源码 | 文件源码
def __init__(self):
        super(VGG19, self).__init__(
            conv1_1=L.Convolution2D(3, 64, 3, stride=1, pad=1),
            conv1_2=L.Convolution2D(64, 64, 3, stride=1, pad=1),

            conv2_1=L.Convolution2D(64, 128, 3, stride=1, pad=1),
            conv2_2=L.Convolution2D(128, 128, 3, stride=1, pad=1),

            conv3_1=L.Convolution2D(128, 256, 3, stride=1, pad=1),
            conv3_2=L.Convolution2D(256, 256, 3, stride=1, pad=1),
            conv3_3=L.Convolution2D(256, 256, 3, stride=1, pad=1),
            conv3_4=L.Convolution2D(256, 256, 3, stride=1, pad=1),

            conv4_1=L.Convolution2D(256, 512, 3, stride=1, pad=1),
            conv4_2=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv4_3=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv4_4=L.Convolution2D(512, 512, 3, stride=1, pad=1),

            conv5_1=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv5_2=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv5_3=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv5_4=L.Convolution2D(512, 512, 3, stride=1, pad=1),
        )
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def __init__(self):
        super(GoogLeNet, self).__init__(
            conv1=L.Convolution2D(3,  64, 7, stride=2, pad=3),
            conv2_reduce=L.Convolution2D(64,  64, 1),
            conv2=L.Convolution2D(64, 192, 3, stride=1, pad=1),
            inc3a=L.Inception(192,  64,  96, 128, 16,  32,  32),
            inc3b=L.Inception(256, 128, 128, 192, 32,  96,  64),
            inc4a=L.Inception(480, 192,  96, 208, 16,  48,  64),
            inc4b=L.Inception(512, 160, 112, 224, 24,  64,  64),
            inc4c=L.Inception(512, 128, 128, 256, 24,  64,  64),
            inc4d=L.Inception(512, 112, 144, 288, 32,  64,  64),
            inc4e=L.Inception(528, 256, 160, 320, 32, 128, 128),
            inc5a=L.Inception(832, 256, 160, 320, 32, 128, 128),
            inc5b=L.Inception(832, 384, 192, 384, 48, 128, 128),
            loss3_fc=L.Linear(1024, 1000),

            loss1_conv=L.Convolution2D(512, 128, 1),
            loss1_fc1=L.Linear(4 * 4 * 128, 1024),
            loss1_fc2=L.Linear(1024, 1000),

            loss2_conv=L.Convolution2D(528, 128, 1),
            loss2_fc1=L.Linear(4 * 4 * 128, 1024),
            loss2_fc2=L.Linear(1024, 1000)
        )
        self.train = True
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def test_convolution(self):
        self.init_func()
        self.assertEqual(len(self.func.layers), 1)
        f = self.func.l1
        self.assertIsInstance(f, links.Convolution2D)
        for i in range(3):  # 3 == group
            in_slice = slice(i * 4, (i + 1) * 4)  # 4 == channels
            out_slice = slice(i * 2, (i + 1) * 2)  # 2 == num / group
            w = f.W.data[out_slice, in_slice]
            numpy.testing.assert_array_equal(
                w.flatten(), range(i * 32, (i + 1) * 32))

        numpy.testing.assert_array_equal(
            f.b.data, range(6))

        self.call(['x'], ['y'])
        self.mock.assert_called_once_with(self.inputs[0])
项目:deep_metric_learning    作者:ronekko    | 项目源码 | 文件源码
def __init__(self, out_dim):
        super(SimpleConvnet, self).__init__(
            conv1=L.Convolution2D(3, 50, 3),
            bn_conv1=L.BatchNormalization(50),
            conv21=L.Convolution2D(50, 100, 3),
            bn_conv21=L.BatchNormalization(100),
            conv22=L.Convolution2D(100, 100, 1),
            bn_conv22=L.BatchNormalization(100),
            conv31=L.Convolution2D(100, 200, 3),
            bn_conv31=L.BatchNormalization(200),
            conv32=L.Convolution2D(200, 200, 3),
            bn_conv32=L.BatchNormalization(200),
            conv41=L.Convolution2D(200, 400, 3),
            bn_conv41=L.BatchNormalization(400),
            conv42=L.Convolution2D(400, 400, 1),
            bn_conv42=L.BatchNormalization(400),
            conv5=L.Convolution2D(400, 400, 1),
            bn_conv5=L.BatchNormalization(400),
            conv6=L.Convolution2D(400, 400, 1),
            bn_conv6=L.BatchNormalization(400),
            linear1=L.Linear(400, 400),
            bn_linear1=L.BatchNormalization(400),
            linear2=L.Linear(400, out_dim)
        )
项目:pyramidal_residual_networks    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, in_channel, out_channel, filter_sizes=(3, 3), strides=(1, 1), pads=(1, 1)):
        super(BN_Conv_BN_ReLU_Conv_BN, self).__init__()
        modules = []
        modules += [('bn1', L.BatchNormalization(in_channel))]
        modules += [('conv1', L.Convolution2D(in_channel, out_channel, filter_sizes[0], strides[0], pads[0]))]
        modules += [('bn2', L.BatchNormalization(out_channel))]
        modules += [('conv2', L.Convolution2D(out_channel, out_channel, filter_sizes[1], strides[1], pads[1]))]
        modules += [('bn3', L.BatchNormalization(out_channel))]
        # register layers
        [self.add_link(*link) for link in modules]
        self.modules = modules
        self.in_channel = in_channel
        self.out_channel = out_channel
        self.filter_sizes = filter_sizes
        self.strides = strides
        self.pads = pads
项目:TOHO_AI    作者:re53min    | 项目源码 | 文件源码
def __init__(self, n_outputs, train=True):
        super(ImageNet, self).__init__(
            conv1=L.Convolution2D(None, 96, 11, stride=4),
            bn1=L.BatchNormalization(96),
            conv2=L.Convolution2D(None, 128, 5, pad=2),
            bn2=L.BatchNormalization(128),
            conv3=L.Convolution2D(None, 256, 3, pad=1),
            conv4=L.Convolution2D(None, 384, 3, pad=1),

            l5=L.Linear(None, 512),
            l6=L.Linear(512, n_outputs),

        )
        for param in self.params():
            param.data[...] = np.random.uniform(-0.1, 0.1, param.data.shape)
        self.train = train
项目:Human-Pose-Estimation-Using-FCN    作者:jessiechouuu    | 项目源码 | 文件源码
def __init__(self):
        super(VGGNet, self).__init__(
            conv1_1=L.Convolution2D(3, 64, 3, stride=1, pad=1),
            conv1_2=L.Convolution2D(64, 64, 3, stride=1, pad=1),

            conv2_1=L.Convolution2D(64, 128, 3, stride=1, pad=1),
            conv2_2=L.Convolution2D(128, 128, 3, stride=1, pad=1),

            conv3_1=L.Convolution2D(128, 256, 3, stride=1, pad=1),
            conv3_2=L.Convolution2D(256, 256, 3, stride=1, pad=1),
            conv3_3=L.Convolution2D(256, 256, 3, stride=1, pad=1),

            conv4_1=L.Convolution2D(256, 512, 3, stride=1, pad=1),
            conv4_2=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv4_3=L.Convolution2D(512, 512, 3, stride=1, pad=1),

            conv5_1=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv5_2=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv5_3=L.Convolution2D(512, 512, 3, stride=1, pad=1),

            fc6=L.Linear(25088, 4096),
            fc7=L.Linear(4096, 4096),
            fc8=L.Linear(4096, 1000)
        )
        self.train = False
项目:DeepLearning    作者:fushuyue    | 项目源码 | 文件源码
def __init__(self):
        super(CNN_Drop, self).__init__(

            # input 3 channel of 32*32
            conv1_1=L.Convolution2D(3, 64, 3, pad=1),
            conv1_2=L.Convolution2D(64, 64, 3, pad=1),


            conv2_1=L.Convolution2D(64, 128, 3, pad=1),
            conv2_2=L.Convolution2D(128, 128, 3, pad=1),


            conv3_1=L.Convolution2D(128, 256, 3, pad=1),
            conv3_2=L.Convolution2D(256, 256, 3, pad=1),
            conv3_3=L.Convolution2D(256, 256, 3, pad=1),
            conv3_4=L.Convolution2D(256, 256, 3, pad=1),

            fc4 = L.Linear(256*4*4, 500),                                       
            fc5 = L.Linear(500, 500),                                         
            fc6 = L.Linear(500,10),
        )
项目:DeepLearning    作者:fushuyue    | 项目源码 | 文件源码
def __init__(self):
        super(CNN_Pooling, self).__init__(

            # input 3 channel of 32*32
            conv1_1=L.Convolution2D(3, 64, 3, pad=1),
            conv1_2=L.Convolution2D(64, 64, 3, pad=1),


            conv2_1=L.Convolution2D(64, 128, 3, pad=1),
            conv2_2=L.Convolution2D(128, 128, 3, pad=1),


            conv3_1=L.Convolution2D(128, 256, 3, pad=1),
            conv3_2=L.Convolution2D(256, 256, 3, pad=1),
            conv3_3=L.Convolution2D(256, 256, 3, pad=1),
            conv3_4=L.Convolution2D(256, 256, 3, pad=1),

            fc4 = L.Linear(256*4*4, 500),                                       
            fc5 = L.Linear(500, 500),                                         
            fc6 = L.Linear(500,10),
        )
项目:DeepLearning    作者:fushuyue    | 项目源码 | 文件源码
def __init__(self):
        super(CNN_avePooling, self).__init__(

            # input 3 channel of 32*32
            conv1_1=L.Convolution2D(3, 64, 3, pad=1 ),
            conv1_2=L.Convolution2D(64, 64, 3, pad=1),

            conv2_1=L.Convolution2D(64, 128, 3, pad=1 ),
            conv2_2=L.Convolution2D(128, 128, 3, pad=1),

            conv3_1=L.Convolution2D(128, 256, 3, pad=1),
            conv3_2=L.Convolution2D(256, 256, 3, pad=1),
            conv3_3=L.Convolution2D(256, 256, 3, pad=1),
            conv3_4=L.Convolution2D(256, 256, 3, pad=1),

            fc4 = L.Linear(256*4*4, 500),                                       
            fc5 = L.Linear(500, 500),                                         
            fc6 = L.Linear(500,10),
        )
项目:SketchSimplification    作者:La4La    | 项目源码 | 文件源码
def __init__(self):
        super(DIS, self).__init__(
            c1=L.Convolution2D(1, 16, 5, 2, 2, wscale=0.02*math.sqrt(5*5*1)),
            c2=L.Convolution2D(16, 32, 3, 2, 1, wscale=0.02*math.sqrt(3*3*16)),
            c3=L.Convolution2D(32, 64, 3, 2, 1, wscale=0.02*math.sqrt(3*3*32)),
            c4=L.Convolution2D(64, 128, 3, 2, 1, wscale=0.02*math.sqrt(3*3*64)),
            c5=L.Convolution2D(128, 256, 3, 2, 1, wscale=0.02*math.sqrt(3*3*128)),
            c6=L.Convolution2D(256, 512, 3, 2, 1, wscale=0.02*math.sqrt(3*3*256)),
            c7=L.Linear(4*4*512, 2, wscale=0.02*math.sqrt(4*4*512)),

            bn1=L.BatchNormalization(16),
            bn2=L.BatchNormalization(32),
            bn3=L.BatchNormalization(64),
            bn4=L.BatchNormalization(128),
            bn5=L.BatchNormalization(256),
            bn6=L.BatchNormalization(512)
        )
项目:chainercv    作者:chainer    | 项目源码 | 文件源码
def __init__(
            self, in_channels=512, mid_channels=512, ratios=[0.5, 1, 2],
            anchor_scales=[8, 16, 32], feat_stride=16,
            initialW=None,
            proposal_creator_params=dict(),
    ):
        self.anchor_base = generate_anchor_base(
            anchor_scales=anchor_scales, ratios=ratios)
        self.feat_stride = feat_stride
        self.proposal_layer = ProposalCreator(**proposal_creator_params)

        n_anchor = self.anchor_base.shape[0]
        super(RegionProposalNetwork, self).__init__()
        with self.init_scope():
            self.conv1 = L.Convolution2D(
                in_channels, mid_channels, 3, 1, 1, initialW=initialW)
            self.score = L.Convolution2D(
                mid_channels, n_anchor * 2, 1, 1, 0, initialW=initialW)
            self.loc = L.Convolution2D(
                mid_channels, n_anchor * 4, 1, 1, 0, initialW=initialW)
项目:chainercv    作者:chainer    | 项目源码 | 文件源码
def __init__(self):
        super(VGG16, self).__init__()
        with self.init_scope():
            self.conv1_1 = L.Convolution2D(64, 3, pad=1)
            self.conv1_2 = L.Convolution2D(64, 3, pad=1)

            self.conv2_1 = L.Convolution2D(128, 3, pad=1)
            self.conv2_2 = L.Convolution2D(128, 3, pad=1)

            self.conv3_1 = L.Convolution2D(256, 3, pad=1)
            self.conv3_2 = L.Convolution2D(256, 3, pad=1)
            self.conv3_3 = L.Convolution2D(256, 3, pad=1)

            self.conv4_1 = L.Convolution2D(512, 3, pad=1)
            self.conv4_2 = L.Convolution2D(512, 3, pad=1)
            self.conv4_3 = L.Convolution2D(512, 3, pad=1)
            self.norm4 = Normalize(512, initial=initializers.Constant(20))

            self.conv5_1 = L.DilatedConvolution2D(512, 3, pad=1)
            self.conv5_2 = L.DilatedConvolution2D(512, 3, pad=1)
            self.conv5_3 = L.DilatedConvolution2D(512, 3, pad=1)

            self.conv6 = L.DilatedConvolution2D(1024, 3, pad=6, dilate=6)
            self.conv7 = L.Convolution2D(1024, 1)
项目:chainercv    作者:chainer    | 项目源码 | 文件源码
def __init__(self):
        init = {
            'initialW': initializers.LeCunUniform(),
            'initial_bias': initializers.Zero(),
        }
        super(VGG16Extractor300, self).__init__()
        with self.init_scope():
            self.conv8_1 = L.Convolution2D(256, 1, **init)
            self.conv8_2 = L.Convolution2D(512, 3, stride=2, pad=1, **init)

            self.conv9_1 = L.Convolution2D(128, 1, **init)
            self.conv9_2 = L.Convolution2D(256, 3, stride=2, pad=1, **init)

            self.conv10_1 = L.Convolution2D(128, 1, **init)
            self.conv10_2 = L.Convolution2D(256, 3, **init)

            self.conv11_1 = L.Convolution2D(128, 1, **init)
            self.conv11_2 = L.Convolution2D(256, 3, **init)
项目:chainercv    作者:chainer    | 项目源码 | 文件源码
def __init__(self):
        init = {
            'initialW': initializers.LeCunUniform(),
            'initial_bias': initializers.Zero(),
        }
        super(VGG16Extractor512, self).__init__()
        with self.init_scope():
            self.conv8_1 = L.Convolution2D(256, 1, **init)
            self.conv8_2 = L.Convolution2D(512, 3, stride=2, pad=1, **init)

            self.conv9_1 = L.Convolution2D(128, 1, **init)
            self.conv9_2 = L.Convolution2D(256, 3, stride=2, pad=1, **init)

            self.conv10_1 = L.Convolution2D(128, 1, **init)
            self.conv10_2 = L.Convolution2D(256, 3, stride=2, pad=1, **init)

            self.conv11_1 = L.Convolution2D(128, 1, **init)
            self.conv11_2 = L.Convolution2D(256, 3, stride=2, pad=1, **init)

            self.conv12_1 = L.Convolution2D(128, 1, **init)
            self.conv12_2 = L.Convolution2D(256, 4, pad=1, **init)
项目:chainercv    作者:chainer    | 项目源码 | 文件源码
def __init__(
            self, n_class, aspect_ratios,
            initialW=None, initial_bias=None):
        self.n_class = n_class
        self.aspect_ratios = aspect_ratios

        super(Multibox, self).__init__()
        with self.init_scope():
            self.loc = chainer.ChainList()
            self.conf = chainer.ChainList()

        if initialW is None:
            initialW = initializers.LeCunUniform()
        if initial_bias is None:
            initial_bias = initializers.Zero()
        init = {'initialW': initialW, 'initial_bias': initial_bias}

        for ar in aspect_ratios:
            n = (len(ar) + 1) * 2
            self.loc.add_link(L.Convolution2D(n * 4, 3, pad=1, **init))
            self.conf.add_link(L.Convolution2D(
                n * self.n_class, 3, pad=1, **init))
项目:deel    作者:uei    | 项目源码 | 文件源码
def __init__(self):
        super(GoogLeNet, self).__init__(
            conv1=L.Convolution2D(3,  64, 7, stride=2, pad=3),
            conv2_reduce=L.Convolution2D(64,  64, 1),
            conv2=L.Convolution2D(64, 192, 3, stride=1, pad=1),
            inc3a=L.Inception(192,  64,  96, 128, 16,  32,  32),
            inc3b=L.Inception(256, 128, 128, 192, 32,  96,  64),
            inc4a=L.Inception(480, 192,  96, 208, 16,  48,  64),
            inc4b=L.Inception(512, 160, 112, 224, 24,  64,  64),
            inc4c=L.Inception(512, 128, 128, 256, 24,  64,  64),
            inc4d=L.Inception(512, 112, 144, 288, 32,  64,  64),
            inc4e=L.Inception(528, 256, 160, 320, 32, 128, 128),
            inc5a=L.Inception(832, 256, 160, 320, 32, 128, 128),
            inc5b=L.Inception(832, 384, 192, 384, 48, 128, 128),
            loss3_fc=L.Linear(1024, 1000),

            loss1_conv=L.Convolution2D(512, 128, 1),
            loss1_fc1=L.Linear(4 * 4 * 128, 1024),
            loss1_fc2=L.Linear(1024, 1000),

            loss2_conv=L.Convolution2D(528, 128, 1),
            loss2_fc1=L.Linear(4 * 4 * 128, 1024),
            loss2_fc2=L.Linear(1024, 1000)
        )
        self.train = True
项目:chainermn    作者:chainer    | 项目源码 | 文件源码
def __init__(self, in_size, ch, out_size, stride=2):
        super(BottleNeckA, self).__init__()
        initialW = initializers.HeNormal()

        with self.init_scope():
            self.conv1 = L.Convolution2D(
                in_size, ch, 1, stride, 0, initialW=initialW, nobias=True)
            self.bn1 = L.BatchNormalization(ch)
            self.conv2 = L.Convolution2D(
                ch, ch, 3, 1, 1, initialW=initialW, nobias=True)
            self.bn2 = L.BatchNormalization(ch)
            self.conv3 = L.Convolution2D(
                ch, out_size, 1, 1, 0, initialW=initialW, nobias=True)
            self.bn3 = L.BatchNormalization(out_size)

            self.conv4 = L.Convolution2D(
                in_size, out_size, 1, stride, 0,
                initialW=initialW, nobias=True)
            self.bn4 = L.BatchNormalization(out_size)
项目:chainermn    作者:chainer    | 项目源码 | 文件源码
def __init__(self):
        super(GoogLeNet, self).__init__()
        with self.init_scope():
            self.conv1 = L.Convolution2D(None, 64, 7, stride=2, pad=3)
            self.conv2_reduce = L.Convolution2D(None, 64, 1)
            self.conv2 = L.Convolution2D(None, 192, 3, stride=1, pad=1)
            self.inc3a = L.Inception(None, 64, 96, 128, 16, 32, 32)
            self.inc3b = L.Inception(None, 128, 128, 192, 32, 96, 64)
            self.inc4a = L.Inception(None, 192, 96, 208, 16, 48, 64)
            self.inc4b = L.Inception(None, 160, 112, 224, 24, 64, 64)
            self.inc4c = L.Inception(None, 128, 128, 256, 24, 64, 64)
            self.inc4d = L.Inception(None, 112, 144, 288, 32, 64, 64)
            self.inc4e = L.Inception(None, 256, 160, 320, 32, 128, 128)
            self.inc5a = L.Inception(None, 256, 160, 320, 32, 128, 128)
            self.inc5b = L.Inception(None, 384, 192, 384, 48, 128, 128)
            self.loss3_fc = L.Linear(None, 1000)

            self.loss1_conv = L.Convolution2D(None, 128, 1)
            self.loss1_fc1 = L.Linear(None, 1024)
            self.loss1_fc2 = L.Linear(None, 1000)

            self.loss2_conv = L.Convolution2D(None, 128, 1)
            self.loss2_fc1 = L.Linear(None, 1024)
            self.loss2_fc2 = L.Linear(None, 1000)
项目:chainermn    作者:chainer    | 项目源码 | 文件源码
def __init__(self):
        super(GoogLeNet, self).__init__(
            conv1=L.Convolution2D(None,  64, 7, stride=2, pad=3),
            conv2_reduce=L.Convolution2D(None,  64, 1),
            conv2=L.Convolution2D(None, 192, 3, stride=1, pad=1),
            inc3a=L.Inception(None,  64,  96, 128, 16,  32,  32),
            inc3b=L.Inception(None, 128, 128, 192, 32,  96,  64),
            inc4a=L.Inception(None, 192,  96, 208, 16,  48,  64),
            inc4b=L.Inception(None, 160, 112, 224, 24,  64,  64),
            inc4c=L.Inception(None, 128, 128, 256, 24,  64,  64),
            inc4d=L.Inception(None, 112, 144, 288, 32,  64,  64),
            inc4e=L.Inception(None, 256, 160, 320, 32, 128, 128),
            inc5a=L.Inception(None, 256, 160, 320, 32, 128, 128),
            inc5b=L.Inception(None, 384, 192, 384, 48, 128, 128),
            loss3_fc=L.Linear(None, 1000),

            loss1_conv=L.Convolution2D(None, 128, 1),
            loss1_fc1=L.Linear(None, 1024),
            loss1_fc2=L.Linear(None, 1000),

            loss2_conv=L.Convolution2D(None, 128, 1),
            loss2_fc1=L.Linear(None, 1024),
            loss2_fc2=L.Linear(None, 1000)
        )
        self.train = True
项目:chainermn    作者:chainer    | 项目源码 | 文件源码
def __init__(self):
        self.dtype = np.float16
        W = initializers.HeNormal(1 / np.sqrt(2), self.dtype)
        bias = initializers.Zero(self.dtype)
        chainer.Chain.__init__(
            self,
            conv1=L.Convolution2D(None, 96, 11,
                                  stride=4, initialW=W, bias=bias),
            conv2=L.Convolution2D(None, 256, 5, pad=2, initialW=W, bias=bias),
            conv3=L.Convolution2D(None, 384, 3, pad=1, initialW=W, bias=bias),
            conv4=L.Convolution2D(None, 384, 3, pad=1, initialW=W, bias=bias),
            conv5=L.Convolution2D(None, 256, 3, pad=1, initialW=W, bias=bias),
            fc6=L.Linear(None, 4096, initialW=W, bias=bias),
            fc7=L.Linear(None, 4096, initialW=W, bias=bias),
            fc8=L.Linear(None, 1000, initialW=W, bias=bias),
        )
        self.train = True
项目:chainermn    作者:chainer    | 项目源码 | 文件源码
def __init__(self, bottom_width=4, ch=512, wscale=0.02):
        w = chainer.initializers.Normal(wscale)
        super(Discriminator, self).__init__()
        with self.init_scope():
            self.c0_0 = L.Convolution2D(3, ch // 8, 3, 1, 1, initialW=w)
            self.c0_1 = L.Convolution2D(ch // 8, ch // 4, 4, 2, 1, initialW=w)
            self.c1_0 = L.Convolution2D(ch // 4, ch // 4, 3, 1, 1, initialW=w)
            self.c1_1 = L.Convolution2D(ch // 4, ch // 2, 4, 2, 1, initialW=w)
            self.c2_0 = L.Convolution2D(ch // 2, ch // 2, 3, 1, 1, initialW=w)
            self.c2_1 = L.Convolution2D(ch // 2, ch // 1, 4, 2, 1, initialW=w)
            self.c3_0 = L.Convolution2D(ch // 1, ch // 1, 3, 1, 1, initialW=w)
            self.l4 = L.Linear(bottom_width * bottom_width * ch, 1, initialW=w)
            self.bn0_1 = L.BatchNormalization(ch // 4, use_gamma=False)
            self.bn1_0 = L.BatchNormalization(ch // 4, use_gamma=False)
            self.bn1_1 = L.BatchNormalization(ch // 2, use_gamma=False)
            self.bn2_0 = L.BatchNormalization(ch // 2, use_gamma=False)
            self.bn2_1 = L.BatchNormalization(ch // 1, use_gamma=False)
            self.bn3_0 = L.BatchNormalization(ch // 1, use_gamma=False)
项目:chainer-examples    作者:nocotan    | 项目源码 | 文件源码
def __init__(self, n_class, in_ch, n_layer=12, growth_rate=12,
                 dropout_ratio=0.2, block=3):
        in_chs = [in_ch + n_layer * growth_rate * i
                  for i in moves.range(block + 1)]
        super(DenseBlock, self).__init__()
        self.add_link(
            'conv1', L.Convolution2D(3, in_ch, 3, 1, 1, wscale=np.sqrt(2))
        )
        for i in moves.range(block):
            self.add_link('dense%d' % (i+2),
                          DenseBlock(in_chs[i], growth_rate, n_layer))
            if not i == block - 1:
                self.add_link('trans%d' % (i+2), Transition(in_chs[i+1]))

        self.add_link(
            'bn%d' % (block+1), L.BatchNormalization(in_chs[block])
        )
        self.add_link('fc%d' % (block+2), L.Linear(in_chs[block], n_class))
        self.train = True
        self.dropout_ratio = dropout_ratio
        self.block = block
项目:chainer-examples    作者:nocotan    | 项目源码 | 文件源码
def __init__(self, in_size, ch, out_size, stride=2):
        super(BottleNeckA, self).__init__()
        w = initializers.HeNormal()

        with self.init_scope():
            self.conv1 = L.Convolution2D(in_size, ch, 1, stride, 0,
                                         initialW=w, nobias=True)
            self.conv2 = L.Convolution2D(ch, ch, 3, 1, 1,
                                         initialW=w, nobias=True)
            self.conv3 = L.Convolution2D(ch, out_size, 1, 1, 0,
                                         initialW=w, nobias=True)
            self.conv4 = L.Convolution2D(in_size, out_size, 1, stride, 0,
                                         initialW=w, nobias=True)
            self.bn1 = L.BatchNormalization(ch)
            self.bn2 = L.BatchNormalization(ch)
            self.bn3 = L.BatchNormalization(out_size)
            self.bn4 = L.BatchNormalization(out_size)
项目:chainer-examples    作者:nocotan    | 项目源码 | 文件源码
def __init__(self, in_size, ch, out_size, stride=2):
        super(BottleNeckA, self).__init__()
        w = initializers.HeNormal()

        with self.init_scope():
            self.conv1 = L.Convolution2D(in_size, ch, 1, stride, 0,
                                         initialW=w, nobias=True)
            self.conv2 = L.Convolution2D(ch, ch, 3, 1, 1,
                                         initialW=w, nobias=True)
            self.conv3 = L.Convolution2D(ch, out_size, 1, 1, 0,
                                         initialW=w, nobias=True)
            self.conv4 = L.Convolution2D(in_size, out_size, 1, stride, 0,
                                         initialW=2, nobias=True)
            self.bn1 = L.BatchNormalization(ch)
            self.bn2 = L.BatchNormalization(ch)
            self.bn3 = L.BatchNormalization(out_size)
            self.bn4 = L.BatchNormalization(out_size)
项目:chainer-examples    作者:nocotan    | 项目源码 | 文件源码
def __init__(self, in_size, ch, out_size, stride=2):
        super(BottleNeckA, self).__init__()
        w = initializers.HeNormal()

        with self.init_scope():
            self.conv1 = L.Convolution2D(in_size, ch, 1, stride, 0,
                                         initialW=w, nobias=True)
            self.conv2 = L.Convolution2D(ch, ch, 3, 1, 1,
                                         initialW=w, nobias=True)
            self.conv3 = L.Convolution2D(ch, out_size, 1, 1, 0,
                                         initialW=w, nobias=True)
            self.conv4 = L.Convolution2D(in_size, out_size, 1, stride, 0,
                                         initialW=w, nobias=True)
            self.bn1 = L.BatchNormalization(ch)
            self.bn2 = L.BatchNormalization(ch)
            self.bn3 = L.BatchNormalization(out_size)
            self.bn4 = L.BatchNormalization(out_size)
项目:chainer-examples    作者:nocotan    | 项目源码 | 文件源码
def __init__(self, num_class, train=True):
        super(VGG16Net, self).__init__()
        with self.init_scope():
            self.conv1=L.Convolution2D(None, 64, 3, stride=1, pad=1)
            self.conv2=L.Convolution2D(None, 64, 3, stride=1, pad=1)

            self.conv3=L.Convolution2D(None, 128, 3, stride=1, pad=1)
            self.conv4=L.Convolution2D(None, 128, 3, stride=1, pad=1)

            self.conv5=L.Convolution2D(None, 256, 3, stride=1, pad=1)
            self.conv6=L.Convolution2D(None, 256, 3, stride=1, pad=1)
            self.conv7=L.Convolution2D(None, 256, 3, stride=1, pad=1)

            self.conv8=L.Convolution2D(None, 512, 3, stride=1, pad=1)
            self.conv9=L.Convolution2D(None, 512, 3, stride=1, pad=1)
            self.conv10=L.Convolution2D(None, 512, 3, stride=1, pad=1)

            self.conv11=L.Convolution2D(None, 512, 3, stride=1, pad=1)
            self.conv12=L.Convolution2D(None, 512, 3, stride=1, pad=1)
            self.conv13=L.Convolution2D(None, 512, 3, stride=1, pad=1)

            self.fc14=L.Linear(None, 4096)
            self.fc15=L.Linear(None, 4096)
            self.fc16=L.Linear(None, num_class)
项目:chainer-examples    作者:nocotan    | 项目源码 | 文件源码
def __init__(self, bottom_width=4, ch=512, wscale=0.02):
        w = chainer.initializers.Normal(wscale)
        super(Discriminator, self).__init__()
        with self.init_scope():
            self.c0_0 = L.Convolution2D(3, ch//8, 3, 1, 1, initialW=w)
            self.c0_1 = L.Convolution2D(ch//8, ch//4, 4, 2, 1, initialW=w)
            self.c1_0 = L.Convolution2D(ch//4, ch//4, 3, 1, 1, initialW=w)
            self.c1_1 = L.Convolution2D(ch//4, ch//2, 4, 2, 1, initialW=w)
            self.c2_0 = L.Convolution2D(ch//2, ch//2, 3, 1, 1, initialW=w)
            self.c2_1 = L.Convolution2D(ch//2, ch//1, 4, 2, 1, initialW=w)
            self.c3_0 = L.Convolution2D(ch//1, ch//1, 3, 1, 1, initialW=w)
            self.l4 = L.Linear(bottom_width*bottom_width*ch, 1, initialW=w)
            self.bn0_1 = L.BatchNormalization(ch // 4, use_gamma=False)
            self.bn1_0 = L.BatchNormalization(ch // 4, use_gamma=False)
            self.bn1_1 = L.BatchNormalization(ch // 2, use_gamma=False)
            self.bn2_0 = L.BatchNormalization(ch // 2, use_gamma=False)
            self.bn2_1 = L.BatchNormalization(ch // 1, use_gamma=False)
            self.bn3_0 = L.BatchNormalization(ch // 1, use_gamma=False)
项目:chainer-image-generation    作者:fukuta0614    | 项目源码 | 文件源码
def __init__(self, size=64, ch=512, wscale=0.005, use_gamma=True):
        assert (size % 16 == 0)
        initial_size = size // 16
        w = chainer.initializers.Normal(wscale)
        super(Discriminator, self).__init__(
            c0_0=L.Convolution2D(3, ch // 8, 3, 1, 1, initialW=w),
            c0_1=L.Convolution2D(ch // 8, ch // 4, 4, 2, 1, initialW=w),
            c1_1=L.Convolution2D(ch // 4, ch // 2, 4, 2, 1, initialW=w),
            c2_1=L.Convolution2D(ch // 2, ch // 1, 4, 2, 1, initialW=w),
            c3_0=L.Convolution2D(ch // 1, ch // 1, 4, 2, 1, initialW=w),
            l4=L.Linear(initial_size * initial_size * ch, 1, initialW=w),
            bn0_1=L.BatchNormalization(ch // 4, use_gamma=use_gamma),
            bn1_1=L.BatchNormalization(ch // 2, use_gamma=use_gamma),
            bn2_1=L.BatchNormalization(ch // 1, use_gamma=use_gamma),
            bn3_0=L.BatchNormalization(ch // 1, use_gamma=use_gamma),
        )
项目:chainer-image-generation    作者:fukuta0614    | 项目源码 | 文件源码
def __init__(self, size=64, ch=512, wscale=0.005):
        assert (size % 16 == 0)
        initial_size = size // 16
        w = chainer.initializers.Normal(wscale)
        super(Discriminator2, self).__init__(
            c0_0=L.Convolution2D(3, ch // 8, 3, 1, 1, initialW=w),
            c0_1=L.Convolution2D(ch // 8, ch // 4, 4, 2, 1, initialW=w),
            c1_1=L.Convolution2D(ch // 4, ch // 2, 4, 2, 1, initialW=w),
            c2_1=L.Convolution2D(ch // 2, ch // 1, 4, 2, 1, initialW=w),
            c3_0=L.Convolution2D(ch // 1, ch // 1, 4, 2, 1, initialW=w),
            l4=L.Linear(initial_size * initial_size * ch, 1, initialW=w),
            bn0_1=L.BatchNormalization(ch // 4, use_gamma=False),
            bn1_1=L.BatchNormalization(ch // 2, use_gamma=False),
            bn2_1=L.BatchNormalization(ch // 1, use_gamma=False),
            bn3_0=L.BatchNormalization(ch // 1, use_gamma=False),
        )
项目:chainer-image-generation    作者:fukuta0614    | 项目源码 | 文件源码
def __init__(self, bottom_width=8, ch=512, wscale=0.005):
        w = chainer.initializers.Normal(wscale)
        super(DiscriminatorPFN, self).__init__(
            c0_0=L.Convolution2D(3, ch // 8, 3, 1, 1, initialW=w),
            c0_1=L.Convolution2D(ch // 8, ch // 4, 4, 2, 1, initialW=w),
            c1_0=L.Convolution2D(ch // 4, ch // 4, 3, 1, 1, initialW=w),
            c1_1=L.Convolution2D(ch // 4, ch // 2, 4, 2, 1, initialW=w),
            c2_0=L.Convolution2D(ch // 2, ch // 2, 3, 1, 1, initialW=w),
            c2_1=L.Convolution2D(ch // 2, ch // 1, 4, 2, 1, initialW=w),
            c3_0=L.Convolution2D(ch // 1, ch // 1, 3, 1, 1, initialW=w),
            l4=L.Linear(bottom_width * bottom_width * ch, 1, initialW=w),
            bn0_1=L.BatchNormalization(ch // 4, use_gamma=False),
            bn1_0=L.BatchNormalization(ch // 4, use_gamma=False),
            bn1_1=L.BatchNormalization(ch // 2, use_gamma=False),
            bn2_0=L.BatchNormalization(ch // 2, use_gamma=False),
            bn2_1=L.BatchNormalization(ch // 1, use_gamma=False),
            bn3_0=L.BatchNormalization(ch // 1, use_gamma=False),
        )
项目:chainer-image-generation    作者:fukuta0614    | 项目源码 | 文件源码
def __init__(self, size=64, ch=512, wscale=0.005):
        assert (size % 8 == 0)
        initial_size = size // 8

        w = chainer.initializers.Normal(wscale)
        super(Discriminator, self).__init__(
            c0_0=L.Convolution2D(3, ch // 8, 3, 1, 1, initialW=w),
            c0_1=L.Convolution2D(ch // 8, ch // 4, 4, 2, 1, initialW=w),
            c1_0=L.Convolution2D(ch // 4, ch // 4, 3, 1, 1, initialW=w),
            c1_1=L.Convolution2D(ch // 4, ch // 2, 4, 2, 1, initialW=w),
            c2_0=L.Convolution2D(ch // 2, ch // 2, 3, 1, 1, initialW=w),
            c2_1=L.Convolution2D(ch // 2, ch // 1, 4, 2, 1, initialW=w),
            c3_0=L.Convolution2D(ch // 1, ch // 1, 3, 1, 1, initialW=w),
            l4=L.Linear(initial_size * initial_size * ch, 1, initialW=w),
        )

    # noinspection PyCallingNonCallable,PyUnresolvedReferences
项目:chainer-image-generation    作者:fukuta0614    | 项目源码 | 文件源码
def __init__(self, density=1, size=64, latent_size=128, channel=3):
        assert (size % 16 == 0)
        initial_size = size / 16
        super(Encoder, self).__init__(
            enc1=L.Convolution2D(channel, 64 * density, 4, stride=2, pad=1,
                                 wscale=0.02 * math.sqrt(4 * 4 * channel * density)),
            norm1=L.BatchNormalization(64 * density),
            enc2=L.Convolution2D(64 * density, 128 * density, 4, stride=2, pad=1,
                                 wscale=0.02 * math.sqrt(4 * 4 * 64 * density)),
            norm2=L.BatchNormalization(128 * density),
            enc3=L.Convolution2D(128 * density, 128 * density, 4, stride=2, pad=1,
                                 wscale=0.02 * math.sqrt(4 * 4 * 128 * density)),
            norm3=L.BatchNormalization(128 * density),
            enc4=L.Convolution2D(128 * density, 256 * density, 4, stride=2, pad=1,
                                 wscale=0.02 * math.sqrt(4 * 4 * 128 * density)),
            norm4=L.BatchNormalization(256 * density),
            mean=L.Linear(initial_size * initial_size * 256 * density, latent_size,
                          wscale=0.02 * math.sqrt(initial_size * initial_size * 256 * density)),
            ln_var=L.Linear(initial_size * initial_size * 256 * density, latent_size,
                            wscale=0.02 * math.sqrt(initial_size * initial_size * 256 * density)),
        )