Python chainer.functions 模块,convolution_2d() 实例源码

我们从Python开源项目中,提取了以下16个代码示例,用于说明如何使用chainer.functions.convolution_2d()

项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def test_forward_consistency(self, nobias=False):
        x_cpu = chainer.Variable(self.x)
        W_cpu = chainer.Variable(self.W)
        b_cpu = None if nobias else chainer.Variable(self.b)
        y_cpu = functions.convolution_2d(
            x_cpu, W_cpu, b_cpu, stride=self.stride, pad=self.pad,
            use_cudnn=self.use_cudnn, cover_all=self.cover_all)

        x_gpu = chainer.Variable(cuda.to_gpu(self.x))
        W_gpu = chainer.Variable(cuda.to_gpu(self.W))
        b_gpu = None if nobias else chainer.Variable(cuda.to_gpu(self.b))
        y_gpu = functions.convolution_2d(
            x_gpu, W_gpu, b_gpu, stride=self.stride, pad=self.pad,
            use_cudnn=self.use_cudnn, cover_all=self.cover_all)

        gradient_check.assert_allclose(y_cpu.data, y_gpu.data.get())
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def check_backward(self, x_data, W_data, b_data, y_grad):
        xp = cuda.get_array_module(x_data)
        if not self.c_contiguous:
            x_data = xp.asfortranarray(x_data)
            W_data = xp.asfortranarray(W_data)
            y_grad = xp.asfortranarray(y_grad)
            self.assertFalse(x_data.flags.c_contiguous)
            self.assertFalse(W_data.flags.c_contiguous)
            self.assertFalse(y_grad.flags.c_contiguous)
            if b_data is not None:
                b = xp.empty((len(b_data) * 2,), dtype=self.b.dtype)
                b[::2] = b_data
                b_data = b[::2]
                self.assertFalse(b_data.flags.c_contiguous)

        args = (x_data, W_data)
        if b_data is not None:
            args = args + (b_data,)

        gradient_check.check_backward(
            convolution_2d.Convolution2DFunction(
                self.stride, self.pad, self.use_cudnn, self.cover_all),
            args, y_grad, eps=1e-2)
项目:chainer-cf-nade    作者:dsanno    | 项目源码 | 文件源码
def __call__(self, h, train=True):
        """
        in_type:
            h: float32
        in_shape:
            h: (batch_size, hidden_num)
        out_type: float32
        out_shape: (batch_size, rating_num, predicted_item_num)
        """

        xp = cuda.get_array_module(h.data)
        h = self.p(h)
        if hasattr(self, 'q'):
            h = self.q(h)
        h = F.reshape(h, (-1, self.rating_num, self.item_num, 1))
        w = chainer.Variable(xp.asarray(np.tri(self.rating_num, dtype=np.float32).reshape(self.rating_num, self.rating_num, 1, 1)), volatile=h.volatile)
        h = F.convolution_2d(h, w)
        return F.reshape(h, (-1, self.rating_num, self.item_num))
项目:chainer-cf-nade    作者:dsanno    | 项目源码 | 文件源码
def ordinal_loss(y, mask):
    xp = cuda.get_array_module(y.data)
    volatile = y.volatile
    b, c, n = y.data.shape
    max_y = F.broadcast_to(F.max(y, axis=1, keepdims=True), y.data.shape)
    y = y - max_y
    sum_y = F.broadcast_to(F.expand_dims(F.sum(y, axis=1), 1), y.data.shape)
    down_tri = np.tri(c, dtype=np.float32)
    up_tri = down_tri.T
    w1 = Variable(xp.asarray(down_tri.reshape(c, c, 1, 1)), volatile=volatile)
    w2 = Variable(xp.asarray(up_tri.reshape(c, c, 1, 1)), volatile=volatile)
    h = F.exp(F.expand_dims(y, -1))
    h1 = F.convolution_2d(h, w1)
    h1 = F.convolution_2d(F.log(h1), w1)
    h2 = F.convolution_2d(h, w2)
    h2 = F.convolution_2d(F.log(h2), w2)
    h = F.reshape(h1 + h2, (b, c, n))
    return F.sum((h - sum_y - y) * mask) / b
项目:SeRanet    作者:corochann    | 项目源码 | 文件源码
def propdown(self, hid):
        """ This function propagates the hidden units activation downwords to the visible units
        :param hid: Variable Matrix(batch_size, out_channels, image_height_out, image_width_out)  - given h_sample
        :return: Variable Matrix(batch_size, in_channels, image_height, image_width) - probability for each visible units to be v_j = 1
        """
        batch_size = hid.data.shape[0]
        if self.real == 0:
            W_flipped = F.swapaxes(CF.flip(self.conv.W, axes=(2, 3)), axis1=0, axis2=1)
            pre_sigmoid_activation = F.convolution_2d(hid, W_flipped, self.conv.a, pad=self.ksize-1)
                # F.matmul(hid, self.l.W) + F.broadcast_to(self.l.a, (batch_size, self.n_visible))
            v_mean = F.sigmoid(pre_sigmoid_activation)
            #print('W info ', self.conv.W.data.shape, 'W_flipped info ', W_flipped.data.shape)
            #print('W info ', self.conv.W.data[3, 0, 2, 3], 'W_flipped info ', W_flipped.data[0, 3, 8, 7])
            #print('W info ', self.conv.W.data[3, 0, 8, 7], 'W_flipped info ', W_flipped.data[0, 3, 2, 3])
            #print('W info ', self.conv.W.data[19, 0, 4, 0], 'W_flipped info ', W_flipped.data[0, 19, 6, 10])
            #print('pre_sigmoidactivation', F.sum(pre_sigmoid_activation).data)
            #print('v_mean', v_mean.data.shape)
            #print('v_mean sum', F.sum(v_mean).data)
            #print('hid', hid.data.shape)

        else:
            # TODO: check
            W_flipped = F.swapaxes(CF.flip(self.conv.W, axes=(2, 3)), axis1=0, axis2=1)
            v_mean = F.convolution_2d(hid, W_flipped, self.conv.a, pad=self.ksize-1)
        return v_mean
项目:SeRanet    作者:corochann    | 项目源码 | 文件源码
def reconstruct(self, v):
        """

        :param v: Variable Matrix(batch_size, in_channels, image_height, image_width)
        :return: reconstructed_v, Variable Matrix(batch_size, in_channels, image_height, image_width)
        """
        batch_size = v.data.shape[0]
        xp = cuda.get_array_module(v.data)
        if self.real == 0:
            h = F.sigmoid(self.conv(v))
        else:
            std_ch = xp.reshape(self.std, (1, self.in_channels, 1, 1))
            h = F.sigmoid(self.conv(v / std_ch))
        # F.sigmoid(F.matmul(v, self.l.W, transb=True) + F.broadcast_to(self.l.b, (batch_size, self.n_hidden)))
        W_flipped = F.swapaxes(CF.flip(self.conv.W, axes=(2, 3)), axis1=0, axis2=1)
        reconstructed_v = F.sigmoid(F.convolution_2d(h, W_flipped, self.conv.a, pad=self.ksize-1))
            # = F.sigmoid(F.matmul(h, self.l.W) + F.broadcast_to(self.l.a, (batch_size, self.n_visible)))
        return reconstructed_v
项目:chainer-gan-experiments    作者:Aixile    | 项目源码 | 文件源码
def backward_deconvolution(x_in, x, l):
    y = F.convolution_2d(x, l.W, None, l.stride, l.pad)
    return y
项目:chainer-gan-experiments    作者:Aixile    | 项目源码 | 文件源码
def loss_func_tv_l1(x_out):
    xp = cuda.get_array_module(x_out.data)
    b, ch, h, w = x_out.data.shape
    Wx = xp.zeros((ch, ch, 2, 2), dtype="f")
    Wy = xp.zeros((ch, ch, 2, 2), dtype="f")
    for i in range(ch):
        Wx[i,i,0,0] = -1
        Wx[i,i,0,1] = 1
        Wy[i,i,0,0] = -1
        Wy[i,i,1,0] = 1
    return F.sum(F.absolute(F.convolution_2d(x_out, W=Wx))) + F.sum(F.absolute(F.convolution_2d(x_out, W=Wy)))
项目:chainer-gan-experiments    作者:Aixile    | 项目源码 | 文件源码
def loss_func_tv_l2(x_out):
    xp = cuda.get_array_module(x_out.data)
    b, ch, h, w = x_out.data.shape
    Wx = xp.zeros((ch, ch, 2, 2), dtype="f")
    Wy = xp.zeros((ch, ch, 2, 2), dtype="f")
    for i in range(ch):
        Wx[i,i,0,0] = -1
        Wx[i,i,0,1] = 1
        Wy[i,i,0,0] = -1
        Wy[i,i,1,0] = 1
    return F.sum(F.convolution_2d(x_out, W=Wx) ** 2) + F.sum(F.convolution_2d(x_out, W=Wy) ** 2)
项目:chainer-neural-style    作者:dsanno    | 项目源码 | 文件源码
def total_variation(x):
    xp = cuda.get_array_module(x.data)
    b, ch, h, w = x.data.shape
    wh = xp.asarray([[[[1], [-1]], [[0], [0]], [[0], [0]]], [[[0], [0]], [[1], [-1]], [[0], [0]]], [[[0], [0]], [[0], [0]], [[1], [-1]]]], dtype=np.float32)
    ww = xp.asarray([[[[1, -1]], [[0, 0]], [[0, 0]]], [[[0, 0]], [[1, -1]], [[0, 0]]], [[[0, 0]], [[0, 0]], [[1, -1]]]], dtype=np.float32)
    return F.sum(F.convolution_2d(x, W=wh) ** 2) + F.sum(F.convolution_2d(x, W=ww) ** 2)
项目:chainer-neural-style    作者:dsanno    | 项目源码 | 文件源码
def patch(x, ksize=3, stride=1, pad=0):
    xp = cuda.get_array_module(x.data)
    b, ch, h, w = x.data.shape
    w = xp.identity(ch * ksize * ksize, dtype=np.float32).reshape((ch * ksize * ksize, ch, ksize, ksize))
    return F.convolution_2d(x, W=w, stride=stride, pad=pad)
项目:chainer-neural-style    作者:dsanno    | 项目源码 | 文件源码
def gray(x):
    xp = cuda.get_array_module(x.data)
    w = xp.asarray([[[[0.114]], [[0.587]], [[0.299]]], [[[0.114]], [[0.587]], [[0.299]]], [[[0.114]], [[0.587]], [[0.299]]]], dtype=np.float32)
    return F.convolution_2d(x, W=w)
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def forward(self):
        x = chainer.Variable(self.x)
        W = chainer.Variable(self.W)
        return functions.convolution_2d(
            x, W, None, stride=self.stride, pad=self.pad,
            use_cudnn=self.use_cudnn)
项目:chainer-cifar    作者:dsanno    | 项目源码 | 文件源码
def __init__(self, ch_in, ch_out, ksize, stride=1, pad=0, activation=F.relu):
        if hasattr(ksize, '__getitem__'):
            kh, kw = ksize
        else:
            kh, kw = ksize, ksize
        super(WNConv2D, self).__init__(
            wn_conv=WeightNormalization(F.convolution_2d, (ch_out, ch_in, kh, kw), stride=stride, pad=pad),
        )
        self.activation=activation
项目:Multitask-and-Transfer-Learning    作者:AI-ON    | 项目源码 | 文件源码
def __call__(self, id, x):
        W = self.W_embedding(id)
        b = F.squeeze(self.b_embedding(id))
        # Reshape the vector to be the right dimensions for 2D conv
        W = F.reshape(W, (self.out_channels, self.in_channels, self.kh, self.kw))
        return F.convolution_2d(x, W, b, self.stride, self.pad)
项目:GrouPy    作者:tscohen    | 项目源码 | 文件源码
def __call__(self, x):

        # Apply a mask to the filters (optional)
        if self.filter_mask is not None:
            w, m = F.broadcast(self.W, Variable(self.filter_mask))
            w = w * m
            # w = self.W * Variable(self.filter_mask)
        else:
            w = self.W

        # Transform the filters
        # w.shape  == (out_channels, in_channels, input_stabilizer_size, ksize, ksize)
        # tw.shape == (out_channels, output_stabilizer_size, in_channels, input_stabilizer_size, ksize, ksize)
        tw = TransformGFilter(self.inds)(w)

        # Fold the transformed filters
        tw_shape = (self.out_channels * self.output_stabilizer_size,
                    self.in_channels * self.input_stabilizer_size,
                    self.ksize, self.ksize)
        tw = F.Reshape(tw_shape)(tw)

        # If flat_channels is False, we need to flatten the input feature maps to have a single 1d feature dimension.
        if not self.flat_channels:
            batch_size = x.data.shape[0]
            in_ny, in_nx = x.data.shape[-2:]
            x = F.reshape(x, (batch_size, self.in_channels * self.input_stabilizer_size, in_ny, in_nx))

        # Perform the 2D convolution
        y = F.convolution_2d(x, tw, b=None, stride=self.stride, pad=self.pad, use_cudnn=self.use_cudnn)

        # Unfold the output feature maps
        # We do this even if flat_channels is True, because we need to add the same bias to each G-feature map
        batch_size, _, ny_out, nx_out = y.data.shape
        y = F.reshape(y, (batch_size, self.out_channels, self.output_stabilizer_size, ny_out, nx_out))

        # Add a bias to each G-feature map
        if self.usebias:
            bb = F.Reshape((1, self.out_channels, 1, 1, 1))(self.b)
            y, b = F.broadcast(y, bb)
            y = y + b

        # Flatten feature channels if needed
        if self.flat_channels:
            n, nc, ng, nx, ny = y.data.shape
            y = F.reshape(y, (n, nc * ng, nx, ny))

        return y