Python chainer.functions 模块,relu() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用chainer.functions.relu()

项目:chainer-gan-experiments    作者:Aixile    | 项目源码 | 文件源码
def __init__(self, in_ch=3, out_len=128, base_size=128, down_layers=4, use_bn=True, w_init=None):
        layers = {}

        self.down_layers = down_layers
        if use_bn:
            norm = 'bn'
        else:
            norm = None

        act = F.relu
        #if w_init is None:
        #    w_init = chainer.initializers.Normal(0.02)

        layers['c_first'] = NNBlock(in_ch, base_size, nn='down_conv', norm=None, activation=act,  w_init=w_init)
        base = base_size

        for i in range(down_layers-1):
            layers['c'+str(i)] = NNBlock(base, base*2, nn='down_conv', norm=norm, activation=act,  w_init=w_init)
            base*=2

        layers['c_last'] = NNBlock(None, out_len, nn='linear', norm=None, activation=None, w_init=w_init)

        super(DCGANEncoder, self).__init__(**layers)
项目:chainer-visualization    作者:hvy    | 项目源码 | 文件源码
def feature_map_activations(self, x):

        """Forward pass through the convolutional layers of the VGG returning
        all of its intermediate feature map activations."""

        hs = []
        pre_pooling_sizes = []

        h = x
        for conv_block, mp in zip(self.conv_blocks, self.mps):
            for conv in conv_block:
                h = F.relu(conv(h))

            pre_pooling_sizes.append(h.data.shape[2:])

            # Disable cuDNN, else pooling indices will not be stored
            with chainer.using_config('use_cudnn', 'never'):
                h = mp.apply((h,))[0]
            hs.append(h)

        return hs, pre_pooling_sizes
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def __init__(self, in_size, out_size, hidden_sizes, nonlinearity=F.relu,
                 last_wscale=1):
        self.in_size = in_size
        self.out_size = out_size
        self.hidden_sizes = hidden_sizes
        self.nonlinearity = nonlinearity

        super().__init__()
        with self.init_scope():
            if hidden_sizes:
                hidden_layers = []
                hidden_layers.append(L.Linear(in_size, hidden_sizes[0]))
                for hin, hout in zip(hidden_sizes, hidden_sizes[1:]):
                    hidden_layers.append(L.Linear(hin, hout))
                self.hidden_layers = chainer.ChainList(*hidden_layers)
                self.output = L.Linear(hidden_sizes[-1], out_size,
                                       initialW=LeCunNormal(last_wscale))
            else:
                self.output = L.Linear(in_size, out_size,
                                       initialW=LeCunNormal(last_wscale))
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def __call__(self, state):
        h = state
        for layer in self.hidden_layers:
            h = F.relu(layer(h))
        v = self.v(h)
        mu = self.mu(h)

        if self.scale_mu:
            mu = scale_by_tanh(mu, high=self.action_space.high,
                               low=self.action_space.low)

        mat_diag = F.exp(self.mat_diag(h))
        if hasattr(self, 'mat_non_diag'):
            mat_non_diag = self.mat_non_diag(h)
            tril = lower_triangular_matrix(mat_diag, mat_non_diag)
            mat = matmul_v3(tril, tril, transb=True)
        else:
            mat = F.expand_dims(mat_diag ** 2, axis=2)
        return QuadraticActionValue(
            mu, mat, v, min_action=self.action_space.low,
            max_action=self.action_space.high)
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def __init__(self, n_actions, n_input_channels=4,
                 activation=F.relu, bias=0.1):
        self.n_actions = n_actions
        self.n_input_channels = n_input_channels
        self.activation = activation

        super().__init__()
        with self.init_scope():
            self.conv_layers = chainer.ChainList(
                L.Convolution2D(n_input_channels, 32, 8, stride=4,
                                initial_bias=bias),
                L.Convolution2D(32, 64, 4, stride=2, initial_bias=bias),
                L.Convolution2D(64, 64, 3, stride=1, initial_bias=bias))

            self.a_stream = MLP(3136, n_actions, [512])
            self.v_stream = MLP(3136, 1, [512])
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def __init__(self, n_dim_obs, n_dim_action, n_hidden_channels,
                 n_hidden_layers, nonlinearity=F.relu,
                 last_wscale=1.):
        self.n_input_channels = n_dim_obs + n_dim_action
        self.n_hidden_layers = n_hidden_layers
        self.n_hidden_channels = n_hidden_channels
        self.nonlinearity = nonlinearity
        super().__init__(
            in_size=self.n_input_channels,
            out_size=1,
            hidden_sizes=[self.n_hidden_channels] * self.n_hidden_layers,
            nonlinearity=nonlinearity,
            last_wscale=last_wscale,
        )
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def __init__(self, n_dim_obs, n_dim_action, n_hidden_channels,
                 n_hidden_layers, nonlinearity=F.relu, last_wscale=1.):
        assert n_hidden_layers >= 1
        self.n_input_channels = n_dim_obs + n_dim_action
        self.n_hidden_layers = n_hidden_layers
        self.n_hidden_channels = n_hidden_channels
        self.nonlinearity = nonlinearity

        super().__init__()
        with self.init_scope():
            # No need to pass nonlinearity to obs_mlp because it has no
            # hidden layers
            self.obs_mlp = MLP(in_size=n_dim_obs, out_size=n_hidden_channels,
                               hidden_sizes=[])
            self.mlp = MLP(in_size=n_hidden_channels + n_dim_action,
                           out_size=1,
                           hidden_sizes=([self.n_hidden_channels] *
                                         (self.n_hidden_layers - 1)),
                           nonlinearity=nonlinearity,
                           last_wscale=last_wscale,
                           )

        self.output = self.mlp.output
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def __init__(self, n_input_channels, n_actions,
                 n_hidden_layers=0, n_hidden_channels=None,
                 beta=1.0, nonlinearity=F.relu,
                 last_wscale=1.0,
                 min_prob=0.0):
        self.n_input_channels = n_input_channels
        self.n_actions = n_actions
        self.n_hidden_layers = n_hidden_layers
        self.n_hidden_channels = n_hidden_channels
        self.beta = beta

        super().__init__(
            model=MLP(n_input_channels,
                      n_actions,
                      (n_hidden_channels,) * n_hidden_layers,
                      nonlinearity=nonlinearity,
                      last_wscale=last_wscale),
            beta=self.beta,
            min_prob=min_prob)
项目:chainer-cyclegan    作者:Aixile    | 项目源码 | 文件源码
def __init__(self, ch0, ch1, bn=True, sample='down', activation=F.relu, dropout=False, noise=False):
        self.bn = bn
        self.activation = activation
        self.dropout = dropout
        self.sample = sample
        self.noise = noise
        layers = {}
        w = chainer.initializers.Normal(0.02)
        if sample=='down':
            layers['c'] = L.Convolution2D(ch0, ch1, 4, 2, 1, initialW=w)
        elif sample=='none-9':
            layers['c'] = L.Convolution2D(ch0, ch1, 9, 1, 4, initialW=w)
        elif sample=='none-7':
            layers['c'] = L.Convolution2D(ch0, ch1, 7, 1, 3, initialW=w)
        elif sample=='none-5':
            layers['c'] = L.Convolution2D(ch0, ch1, 5, 1, 2, initialW=w)
        else:
            layers['c'] = L.Convolution2D(ch0, ch1, 3, 1, 1, initialW=w)
        if bn:
            if self.noise:
                layers['batchnorm'] = L.BatchNormalization(ch1, use_gamma=False)
            else:
                layers['batchnorm'] = L.BatchNormalization(ch1)
        super(CBR, self).__init__(**layers)
项目:gconv_experiments    作者:tscohen    | 项目源码 | 文件源码
def __call__(self, x, t, train=True, finetune=False):

        # First conv layer
        h = self[0](x)

        # Residual blocks
        for i in range(1, len(self) - 2):
            h = self[i](h, train, finetune)

        # BN, relu, pool, final layer
        h = self[-2](h)
        h = F.relu(h)
        n, nc, ns, nx, ny = h.data.shape
        h = F.reshape(h, (n, nc * ns, nx, ny))
        h = F.average_pooling_2d(h, ksize=h.data.shape[2:])
        h = self[-1](h)
        h = F.reshape(h, h.data.shape[:2])

        return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
项目:gconv_experiments    作者:tscohen    | 项目源码 | 文件源码
def __call__(self, x, t, train=True, finetune=False):

        h = x

        # First conv layer
        h = self[0](h)

        # Residual blocks
        for i in range(1, len(self) - 2):
            h = self[i](h, train, finetune)

        # BN, relu, pool, final layer
        h = self[-2](h)
        h = F.relu(h)
        h = F.average_pooling_2d(h, ksize=h.data.shape[2:])
        h = self[-1](h)
        h = F.reshape(h, h.data.shape[:2])

        return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
项目:NANHM-for-GEC    作者:shinochin    | 项目源码 | 文件源码
def __call__(self, ht, xs, d_bar_s_1):
        #ht:encoder?????????????????
        #batch_size * n_words * in_size
        #xs:??????
        if d_bar_s_1 == None:
            d_bar_s_1 = np.zeros(self.in_size)

        ht_T = list(map(F.transpose, ht))
        phi_ht = list(map(W1, ht_T))

        d_s = rnn(d_bar_s_1, y_s_1)

        phi_d = F.transpose_sequence(W2(F.transpose_sequence(d_s)))
        u_st = list(map(lambda x: phi_d*x, phi_ht))   #(4)

        sum_u = F.sum(u_st)
        alpha_st = list(map(lambda x:x/sum_u, u_st))   #(3)
        z_s = F.argmax(alpha_st, axis=0)

        c_s = F.sum(list(map(lambda x,y:x*y , alpha_st, ht)))   #(2)

        d_bar_s = F.relu(W3(F.concat([c_s, d_s])))

        return d_bar_s, d_s, c_s, z_s
项目:gym-malware    作者:endgameinc    | 项目源码 | 文件源码
def __init__(self, obs_size, n_actions, n_hidden_channels=[1024,256]):
        super(QFunction,self).__init__()
        net = []
        inpdim = obs_size
        for i,n_hid in enumerate(n_hidden_channels):
            net += [ ('l{}'.format(i), L.Linear( inpdim, n_hid ) ) ]
            net += [ ('norm{}'.format(i), L.BatchNormalization( n_hid ) ) ]
            net += [ ('_act{}'.format(i), F.relu ) ]
            inpdim = n_hid

        net += [('output', L.Linear( inpdim, n_actions) )]

        with self.init_scope():
            for n in net:
                if not n[0].startswith('_'):
                    setattr(self, n[0], n[1])

        self.forward = net
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def forward(self, ws, ss, ps):
        batchsize = len(ws)
        xp = chainer.cuda.get_array_module(ws[0])
        ws = map(self.emb_word, ws)
        ss = [F.reshape(self.emb_suf(s), (s.shape[0], 4 * self.afix_dim)) for s in ss]
        ps = [F.reshape(self.emb_prf(s), (s.shape[0], 4 * self.afix_dim)) for s in ps]
        # [(sentence length, (word_dim + suf_dim + prf_dim))]
        xs_f = [F.dropout(F.concat([w, s, p]),
            self.dropout_ratio, train=self.train) for w, s, p in zip(ws, ss, ps)]
        xs_b = [x[::-1] for x in xs_f]
        cx_f, hx_f, cx_b, hx_b = self._init_state(xp, batchsize)
        _, _, hs_f = self.lstm_f(hx_f, cx_f, xs_f, train=self.train)
        _, _, hs_b = self.lstm_b(hx_b, cx_b, xs_b, train=self.train)
        hs_b = [x[::-1] for x in hs_b]
        # ys: [(sentence length, number of category)]
        ys = [self.linear2(F.relu(
                self.linear1(F.concat([h_f, h_b]))))
                    for h_f, h_b in zip(hs_f, hs_b)]
        return ys
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def predict(self, xs):
        """
        batch: list of splitted sentences
        """
        xs = [self.extractor.process(x) for x in xs]
        batchsize = len(xs)
        ws, cs, ls = zip(*xs)
        ws = map(self.emb_word, ws)
        cs = [F.squeeze(
            F.max_pooling_2d(
                self.conv_char(
                    F.expand_dims(
                        self.emb_char(c), 1)), (l, 1)))
                    for c, l in zip(cs, ls)]
        xs_f = [F.dropout(F.concat([w, c]),
            self.dropout_ratio, train=self.train) for w, c in zip(ws, cs)]
        xs_b = [x[::-1] for x in xs_f]
        cx_f, hx_f, cx_b, hx_b = self._init_state(batchsize)
        _, _, hs_f = self.lstm_f(hx_f, cx_f, xs_f, train=self.train)
        _, _, hs_b = self.lstm_b(hx_b, cx_b, xs_b, train=self.train)
        hs_b = [x[::-1] for x in hs_b]
        ys = [self.linear2(F.relu(self.linear1(F.concat([h_f, h_b]))))
                    for h_f, h_b in zip(hs_f, hs_b)]
        return [y.data[1:-1] for y in ys]
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def __call__(self, ws, cs, ls, ts):
        h_w = self.emb_word(ws) #_(batchsize, windowsize, word_dim)
        h_c = self.emb_char(cs) # (batchsize, windowsize, max_char_len, char_dim)
        batchsize, windowsize, _, _ = h_c.data.shape
        # (batchsize, windowsize, char_dim)
        h_c = F.sum(h_c, 2)
        h_c, ls = F.broadcast(h_c, F.reshape(ls, (batchsize, windowsize, 1)))
        h_c = h_c / ls
        h = F.concat([h_w, h_c], 2)
        h = F.reshape(h, (batchsize, -1))
        # ys = self.linear1(h)
        h = F.relu(self.linear1(h))
        h = F.dropout(h, ratio=.5, train=self.train)
        ys = self.linear2(h)

        loss = F.softmax_cross_entropy(ys, ts)
        acc = F.accuracy(ys, ts)
        chainer.report({
            "loss": loss,
            "accuracy": acc
            }, self)
        return loss
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def forward(self, ws, ss, ps):
        batchsize, length = ws.shape
        xp = chainer.cuda.get_array_module(ws[0])
        ws = self.emb_word(ws) # (batch, length, word_dim)
        ss = F.reshape(self.emb_suf(ss), (batchsize, length, -1))
        ps = F.reshape(self.emb_prf(ps), (batchsize, length, -1))
        hs = F.transpose(F.concat([ws, ss, ps], 2), (1, 0, 2))
        hs = F.dropout(hs, self.dropout_ratio, train=self.train)
        hs = F.split_axis(hs, length, 0)
        hs_f = []
        hs_b = []
        self._init_state()
        for h_in_f, h_in_b in zip(hs, reversed(hs)):
            h_f = self.lstm_f2(self.lstm_f1(F.squeeze(h_in_f, 0)))
            hs_f.append(h_f)
            h_b = self.lstm_b2(self.lstm_b1(F.squeeze(h_in_b, 0)))
            hs_b.append(h_b)

        ys = [self.linear2(F.relu(self.linear1(F.concat([h_f, h_b]))))
                for h_f, h_b in zip(hs_f, reversed(hs_b))]
        return ys
项目:cnn-text-classification    作者:marevol    | 项目源码 | 文件源码
def __call__(self, x, train=True):
        hlist = []
        h_0 = self['embed'](x)
        if not self.non_static:
            h_0 = Variable(h_0.data)
        h_1 = F.reshape(h_0, (h_0.shape[0], 1, h_0.shape[1], h_0.shape[2]))
        for filter_h in self.filter_sizes:
            pool_size = (self.doc_length - filter_h + 1, 1)
            h = F.max_pooling_2d(F.relu(self['conv' + str(filter_h)](h_1)), pool_size)
            hlist.append(h)
        h = F.concat(hlist)
        pos = 0
        while pos < len(self.hidden_units) - 1:
            h = F.dropout(F.relu(self['l' + str(pos)](h)))
            pos += 1
        y = F.relu(self['l' + str(pos)](h))
        return y
项目:MultimodalDL    作者:masataka46    | 项目源码 | 文件源码
def __call__(self, x, t):
        self.clear()
        h = F.max_pooling_2d(F.relu(
            F.local_response_normalization(self.conv1(x))), 3, stride=2)
        h = F.max_pooling_2d(F.relu(
            F.local_response_normalization(self.conv2(h))), 3, stride=2)
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
        h = F.dropout(F.relu(self.fc6(h)), train=self.train)
        h = F.dropout(F.relu(self.fc7(h)), train=self.train)
        h = self.fc8(h)

        self.loss = F.softmax_cross_entropy(h, t)
        self.accuracy = F.accuracy(h, t)
        return self.loss
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def __call__(self, x, t):
        self.clear()
        h = self.bn1(self.conv1(x), test=not self.train)
        h = F.max_pooling_2d(F.relu(h), 3, stride=2)
        h = self.bn2(self.conv2(h), test=not self.train)
        h = F.max_pooling_2d(F.relu(h), 3, stride=2)
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
        h = F.dropout(F.relu(self.fc6(h)), train=self.train)
        h = F.dropout(F.relu(self.fc7(h)), train=self.train)
        h = self.fc8(h)

        self.loss = F.softmax_cross_entropy(h, t)
        self.accuracy = F.accuracy(h, t)
        return self.loss
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def __call__(self, x, t):
        self.clear()
        h = F.max_pooling_2d(F.relu(
            F.local_response_normalization(self.conv1(x))), 3, stride=2)
        h = F.max_pooling_2d(F.relu(
            F.local_response_normalization(self.conv2(h))), 3, stride=2)
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
        h = F.dropout(F.relu(self.fc6(h)), train=self.train)
        h = F.dropout(F.relu(self.fc7(h)), train=self.train)
        h = self.fc8(h)

        self.loss = F.softmax_cross_entropy(h, t)
        self.accuracy = F.accuracy(h, t)
        return self.loss
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def __call__(self, x):
        # assume x is on GPU 0
        x1 = F.copy(x, 1)

        z0 = self.first0(x)
        z1 = self.first1(x1)

        # sync
        h0 = z0 + F.copy(z1, 0)
        h1 = z1 + F.copy(z0, 1)

        y0 = self.second0(F.relu(h0))
        y1 = self.second1(F.relu(h1))

        # sync
        y = y0 + F.copy(y1, 0)
        return y
项目:DeepPoseComparison    作者:ynaka81    | 项目源码 | 文件源码
def predict(self, x):
        """ Predict 2D pose from image. """
        # layer1
        h = F.relu(self.conv1(x))
        h = F.max_pooling_2d(h, 3, stride=2)
        # layer2
        h = F.relu(self.conv2(h))
        h = F.max_pooling_2d(h, 3, stride=2)
        # layer3-5
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.relu(self.conv5(h))
        h = F.max_pooling_2d(h, 3, stride=2)
        # layer6-8
        h = F.dropout(F.relu(self.fc6(h)), train=self.train)
        h = F.dropout(F.relu(self.fc7(h)), train=self.train)
        h = self.fc8(h)
        return F.reshape(h, (-1, self.Nj, 2))
项目:convolutional-pose-machines-chainer    作者:tomoyukun    | 项目源码 | 文件源码
def __call__(self, x):
        h = self.conv1(x)
        h = F.relu(h)
        h = F.max_pooling_2d(h, ksize=3, stride=2)
        h = self.conv2(h)
        h = F.relu(h)
        h = F.max_pooling_2d(h, ksize=3, stride=2)
        h = self.conv3(h)
        h = F.relu(h)
        h = F.max_pooling_2d(h, ksize=3, stride=2)
        h = self.conv4(h)
        h = F.relu(h)
        h = self.conv5(h)
        h = F.relu(h)
        h = self.conv6(h)
        h = F.relu(h)
        h = self.conv7(h)

        return h
项目:convolutional-pose-machines-chainer    作者:tomoyukun    | 项目源码 | 文件源码
def __call__(self, pmap, fmap, cmap):
        fmap = self.conv0(fmap)
        fmap = F.relu(fmap)
        cmap = F.average_pooling_2d(cmap, ksize=8, stride=8)

        h = F.concat((fmap, pmap, cmap), 1)
        h = self.conv1(h)
        h = F.relu(h)
        h = self.conv2(h)
        h = F.relu(h)
        h = self.conv3(h)
        h = F.relu(h)
        h = self.conv4(h)
        h = F.relu(h)
        h = self.conv5(h)

        return h
项目:convolutional-pose-machines-chainer    作者:tomoyukun    | 项目源码 | 文件源码
def __call__(self, x):
        h = self.conv1(x)
        h = F.relu(h)
        h = F.max_pooling_2d(h, ksize=3, stride=2)
        h = self.conv2(h)
        h = F.relu(h)
        h = F.max_pooling_2d(h, ksize=3, stride=2)
        h = self.conv3(h)
        h = F.relu(h)
        h = F.max_pooling_2d(h, ksize=3, stride=2)
        h = self.conv4(h)
        h = F.relu(h)
        h = self.conv5(h)
        h = F.relu(h)
        h = self.conv6(h)
        h = F.relu(h)
        h = self.conv7(h)

        return h
项目:DeepLearning    作者:fushuyue    | 项目源码 | 文件源码
def __call__(self, x):

        h = F.relu(self.conv1_1(x))
        h = F.relu(self.conv1_2(h))
        h = F.max_pooling_2d(h, 2, 2)

        h = F.relu(self.conv2_1(h))
        h = F.relu(self.conv2_2(h))
        h = F.max_pooling_2d(h, 2, 2)

        h = F.relu(self.conv3_1(h))
        h = F.relu(self.conv3_2(h))
        h = F.relu(self.conv3_3(h))
        h = F.relu(self.conv3_4(h))
        h = F.max_pooling_2d(h, 2, 2)

        h = F.relu(self.fc4(h))
        h = F.relu(self.fc5(h))
        h = self.fc6(h)
        L_out = h
        return L_out


# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
项目:DeepLearning    作者:fushuyue    | 项目源码 | 文件源码
def __call__(self, x):

        h = F.relu(self.conv1_1(x))
        h = F.relu(self.conv1_2(h))
        h = F.average_pooling_2d(h, 2, 2)

        h = F.relu(self.conv2_1(h))
        h = F.relu(self.conv2_2(h))
        h = F.average_pooling_2d(h, 2, 2)

        h = F.relu(self.conv3_1(h))
        h = F.relu(self.conv3_2(h))
        h = F.relu(self.conv3_3(h))
        h = F.relu(self.conv3_4(h))
        h = F.average_pooling_2d(h, 2, 2)

        h = F.relu(self.fc4(h))
        h = F.relu(self.fc5(h))
        h = self.fc6(h)
        L_out = h
        return L_out
项目:ddnn    作者:kunglab    | 项目源码 | 文件源码
def to_function(self):
        if self.nonlinearity.lower() == "clipped_relu":
            return clipped_relu()
        if self.nonlinearity.lower() == "crelu":
            return crelu()
        if self.nonlinearity.lower() == "elu":
            return elu()
        if self.nonlinearity.lower() == "hard_sigmoid":
            return hard_sigmoid()
        if self.nonlinearity.lower() == "leaky_relu":
            return leaky_relu()
        if self.nonlinearity.lower() == "relu":
            return relu()
        if self.nonlinearity.lower() == "sigmoid":
            return sigmoid()
        if self.nonlinearity.lower() == "softmax":
            return softmax()
        if self.nonlinearity.lower() == "softplus":
            return softplus()
        if self.nonlinearity.lower() == "tanh":
            return tanh()
        if self.nonlinearity.lower() == "bst":
            return bst()
        raise NotImplementedError()
项目:chainer-cifar    作者:dsanno    | 项目源码 | 文件源码
def __call__(self, x):
        h = self.bconv1_1(x)
        h = self.bconv1_2(h)
        h = F.dropout(F.max_pooling_2d(h, 2), 0.25)
        h = self.bconv2_1(h)
        h = self.bconv2_2(h)
        h = F.dropout(F.max_pooling_2d(h, 2), 0.25)
        h = self.bconv3_1(h)
        h = self.bconv3_2(h)
        h = self.bconv3_3(h)
        h = self.bconv3_4(h)
        h = F.dropout(F.max_pooling_2d(h, 2), 0.25)
        h = F.relu(self.fc4(F.dropout(h)))
        h = F.relu(self.fc5(F.dropout(h)))
        h = self.fc6(h)
        return h
项目:chainercv    作者:chainer    | 项目源码 | 文件源码
def __call__(self, x):
        """Compute feature maps from a batch of images.

        This method extracts feature maps from
        :obj:`conv4_3`, :obj:`conv7`, :obj:`conv8_2`,
        :obj:`conv9_2`, :obj:`conv10_2`, and :obj:`conv11_2`.

        Args:
            x (ndarray): An array holding a batch of images.
                The images should be resized to :math:`300\\times 300`.

        Returns:
            list of Variable:
            Each variable contains a feature map.
        """

        ys = super(VGG16Extractor300, self).__call__(x)
        for i in range(8, 11 + 1):
            h = ys[-1]
            h = F.relu(self['conv{:d}_1'.format(i)](h))
            h = F.relu(self['conv{:d}_2'.format(i)](h))
            ys.append(h)
        return ys
项目:chainercv    作者:chainer    | 项目源码 | 文件源码
def check_forward(self, x_data):
        x = chainer.Variable(x_data)
        # Make the batch normalization to be the identity function.
        self.l.bn.avg_var[:] = 1
        self.l.bn.avg_mean[:] = 0
        with chainer.using_config('train', False):
            y = self.l(x)

        self.assertIsInstance(y, chainer.Variable)
        self.assertIsInstance(y.array, self.l.xp.ndarray)

        if self.activ == 'relu':
            np.testing.assert_almost_equal(
                cuda.to_cpu(y.array), np.maximum(cuda.to_cpu(x_data), 0),
                decimal=4
            )
        elif self.activ == 'add_one':
            np.testing.assert_almost_equal(
                cuda.to_cpu(y.array), cuda.to_cpu(x_data) + 1,
                decimal=4
            )
项目:voxcelchain    作者:hiroaki-kaneda    | 项目源码 | 文件源码
def fwd(self,x):
        h = F.max_pooling_nd(F.local_response_normalization(F.relu(self.conv1(x))), 3, stride=2)
        h = F.max_pooling_nd(F.local_response_normalization(F.relu(self.conv2(h))), 3, stride=2)
        h = F.dropout(F.relu(self.fc3(h)), train=self.train)
        h = self.fc4(h)
        return h
项目:chainer_pong    作者:icoxfog417    | 项目源码 | 文件源码
def __call__(self, state: np.ndarray):
        _state = self.arr_to_gpu(state)
        s = Variable(_state)
        h1 = F.relu(self.l1(s))
        h2 = F.relu(self.l2(h1))
        h3 = F.relu(self.l3(h2))
        h4 = F.relu(self.l4(h3))
        q_value = self.out(h4)
        return q_value
项目:brain_segmentation    作者:Ryo-Ito    | 项目源码 | 文件源码
def __call__(self, x, train):
        h = F.relu(self.bnorm1(x, test=not train))
        h = self.conv1(h)
        h = F.relu(self.bnorm2(h, test=not train))
        h = self.conv2(h)
        return h + x
项目:nelder_mead    作者:owruby    | 项目源码 | 文件源码
def __call__(self, x, t):
        h = F.relu(self.l1(x))
        h = self.l2(h)

        self.loss = F.softmax_cross_entropy(h, t)
        self.accuracy = F.accuracy(h, t)

        return self.loss
项目:chainer-gan-experiments    作者:Aixile    | 项目源码 | 文件源码
def __init__(self, ch, bn=True, activation=F.relu, k_size=3):
        self.bn = bn
        self.activation = activation
        layers = {}
        pad = k_size//2
        layers['c0'] = L.Convolution2D(ch, ch, 3, 1, pad)
        layers['c1'] = L.Convolution2D(ch, ch, 3, 1, pad)
        if bn:
            layers['bn0'] = L.BatchNormalization(ch)
            layers['bn1'] = L.BatchNormalization(ch)
        super(ResBlock, self).__init__(**layers)
项目:chainer-gan-experiments    作者:Aixile    | 项目源码 | 文件源码
def differentiable_backward(self, g):
        if self.normalize_input:
            raise NotImplementedError

        if self.activation is F.leaky_relu:
            g = backward_leaky_relu(self.x, g)
        elif self.activation is F.relu:
            g = backward_relu(self.x, g)
        elif self.activation is F.tanh:
            g = backward_tanh(self.x, g)
        elif self.activation is F.sigmoid:
            g = backward_sigmoid(self.x, g)
        elif not self.activation is None:
            raise NotImplementedError

        if self.norm == 'ln':
            g = backward_layernormalization(self.nx, g, self.n)
        elif not self.norm is None:
            raise NotImplementedError

        if self.nn == 'down_conv' or self.nn == 'conv':
            g = backward_convolution(None, g, self.c)
        elif self.nn == 'linear':
            g = backward_linear(None, g, self.c)
        elif self.nn == 'up_deconv':
            g = backward_deconvolution(None, g, self.c)
        else:
            raise NotImplementedError

        return g
项目:chainer-visualization    作者:hvy    | 项目源码 | 文件源码
def __call__(self, x):

        """Return a softmax probability distribution over predicted classes."""

        # Convolutional layers
        hs, _ = self.feature_map_activations(x)
        h = hs[-1]

        # Fully connected layers
        h = F.dropout(F.relu(self.fc6(h)))
        h = F.dropout(F.relu(self.fc7(h)))
        h = self.fc8(h)

        return F.softmax(h)
项目:chainer-visualization    作者:hvy    | 项目源码 | 文件源码
def activations(self, x, layer_idx):

        """Return filter activations projected back to the input space, i.e.
        images with shape (n_feature_maps, 3, 224, 224) for a particula layer.

        The layer index is expected to be 0-based.
        """

        if x.shape[0] != 1:
            raise TypeError('Visualization is only supported for a single image at a time')

        self.check_add_deconv_layers()
        hs, unpooling_sizes = self.feature_map_activations(x)
        hs = [h.data for h in hs]

        activation_maps = []
        n_activation_maps = hs[layer_idx].shape[1]

        xp = self.xp

        for i in range(n_activation_maps):  # For each channel
            h = hs[layer_idx].copy()

            condition = xp.zeros_like(h)
            condition[0][i] = 1  # Keep one feature map and zero all other

            h = Variable(xp.where(condition, h, xp.zeros_like(h)))

            for i in reversed(range(layer_idx+1)):
                p = self.mps[i]
                h = F.upsampling_2d(h, p.indexes, p.kh, p.sy, p.ph, unpooling_sizes[i])
                for deconv in reversed(self.deconv_blocks[i]):
                    h = deconv(F.relu(h))

            activation_maps.append(h.data)

        return xp.concatenate(activation_maps)
项目:chainer-spatial-transformer-networks    作者:hvy    | 项目源码 | 文件源码
def __call__(self, x):
        h = self.st(x)
        h = F.average_pooling_2d(h, 2, 2)  # For TC and RTS datasets
        h = F.relu(self.conv1(h))
        h = F.max_pooling_2d(h, 2, 2)
        h = F.relu(self.conv2(h))
        h = F.max_pooling_2d(h, 2, 2)
        h = self.fc(h)
        return h
项目:chainer-spatial-transformer-networks    作者:hvy    | 项目源码 | 文件源码
def affine_matrix(self, x):
        h = F.max_pooling_2d(x, 2, 2)
        h = F.relu(self.conv1(h))
        h = F.max_pooling_2d(h, 2, 2)
        h = F.relu(self.conv2(h))
        h = F.max_pooling_2d(h, 2, 2)
        theta = F.reshape(self.fc(h), (x.shape[0], 2, 3))
        return theta
项目:ROCStory_skipthought_baseline    作者:soskek    | 项目源码 | 文件源码
def solve(self, x_seq, pos, neg, train=True, variablize=False, onebyone=True):
        if variablize:# If arguments are just arrays (not variables), make them variables
            x_seq = [chainer.Variable(x, volatile=not train) for x in x_seq]
            x_seq = [F.dropout(x, ratio=self.dropout_ratio, train=train) for x in x_seq]
            pos = self.act1(self.W_candidate(
                F.dropout(chainer.Variable(pos, volatile=not train),
                          ratio=self.dropout_ratio, train=train)))
            neg = self.act1(self.W_candidate(
                F.dropout(chainer.Variable(neg, volatile=not train),
                          ratio=self.dropout_ratio, train=train)))
        if onebyone and train:
            target_x_seq = [self.act1(self.W_candidate(x)) for x in x_seq[:4]]# 1,2,3,4,5-th targets
            onebyone_loss = 0.

        self.LSTM.reset_state()
        for i, x in enumerate(x_seq):
            h = self.LSTM( F.dropout(x, ratio=self.dropout_ratio, train=train) )
            if onebyone and train and target_x_seq[i+1:]:
                pos_score, neg_score = self.calculate_score(h, target_x_seq[i+1:], neg,
                                                            multipos=True)
                onebyone_loss += F.relu( self.margin - pos_score + neg_score )

        pos_score, neg_score = self.calculate_score(h, pos, neg)
        accum_loss = F.relu( self.margin - pos_score + neg_score )
        TorFs = sum(accum_loss.data < self.margin)

        if onebyone and train:
            return F.sum(accum_loss) + F.sum(onebyone_loss), TorFs
        else:
            return F.sum(accum_loss), TorFs
项目:dockerfiles    作者:floydhub    | 项目源码 | 文件源码
def __call__(self, x):
        h1 = F.relu(self.l1(x))
        h2 = F.relu(self.l2(h1))
        return self.l3(h2)
项目:fcn    作者:wkentaro    | 项目源码 | 文件源码
def __call__(self, x, t=None):
        h = x
        h = F.relu(self.conv1_1(h))
        h = F.relu(self.conv1_2(h))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.relu(self.conv2_1(h))
        h = F.relu(self.conv2_2(h))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.relu(self.conv3_1(h))
        h = F.relu(self.conv3_2(h))
        h = F.relu(self.conv3_3(h))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.relu(self.conv4_1(h))
        h = F.relu(self.conv4_2(h))
        h = F.relu(self.conv4_3(h))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.relu(self.conv5_1(h))
        h = F.relu(self.conv5_2(h))
        h = F.relu(self.conv5_3(h))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.dropout(F.relu(self.fc6(h)), ratio=.5)
        h = F.dropout(F.relu(self.fc7(h)), ratio=.5)
        h = self.fc8(h)
        fc8 = h

        self.score = fc8

        if t is None:
            assert not chainer.config.train
            return

        self.loss = F.softmax_cross_entropy(fc8, t)
        self.accuracy = F.accuracy(self.score, t)
        return self.loss
项目:ai-bs-summer17    作者:uchibe    | 项目源码 | 文件源码
def pi_and_v(self, state):

        def forward(head, lstm, tail):
            h = F.relu(head(state))
            h = lstm(h)
            return tail(h)

        pout = forward(self.pi_head, self.pi_lstm, self.pi)
        vout = forward(self.v_head, self.v_lstm, self.v)

        return pout, vout
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def __init__(self, in_size, out_size, hidden_sizes, normalize_input=True,
                 normalize_output=False, nonlinearity=F.relu, last_wscale=1):
        self.in_size = in_size
        self.out_size = out_size
        self.hidden_sizes = hidden_sizes
        self.normalize_input = normalize_input
        self.normalize_output = normalize_output
        self.nonlinearity = nonlinearity

        super().__init__()
        with self.init_scope():
            if normalize_input:
                self.input_bn = L.BatchNormalization(in_size)
                self.input_bn.avg_var[:] = 1

            if hidden_sizes:
                hidden_layers = []
                hidden_layers.append(LinearBN(in_size, hidden_sizes[0]))
                for hin, hout in zip(hidden_sizes, hidden_sizes[1:]):
                    hidden_layers.append(LinearBN(hin, hout))
                self.hidden_layers = chainer.ChainList(*hidden_layers)
                self.output = L.Linear(hidden_sizes[-1], out_size,
                                       initialW=LeCunNormal(last_wscale))
            else:
                self.output = L.Linear(in_size, out_size,
                                       initialW=LeCunNormal(last_wscale))

            if normalize_output:
                self.output_bn = L.BatchNormalization(out_size)
                self.output_bn.avg_var[:] = 1
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def __init__(self, n_input_channels=4, n_output_channels=512,
                 activation=F.relu, bias=0.1):
        self.n_input_channels = n_input_channels
        self.activation = activation
        self.n_output_channels = n_output_channels

        layers = [
            L.Convolution2D(n_input_channels, 32, 8, stride=4,
                            initial_bias=bias),
            L.Convolution2D(32, 64, 4, stride=2, initial_bias=bias),
            L.Convolution2D(64, 64, 3, stride=1, initial_bias=bias),
            L.Linear(3136, n_output_channels, initial_bias=bias),
        ]

        super(NatureDQNHead, self).__init__(*layers)
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def __init__(self, n_input_channels=4, n_output_channels=256,
                 activation=F.relu, bias=0.1):
        self.n_input_channels = n_input_channels
        self.activation = activation
        self.n_output_channels = n_output_channels

        layers = [
            L.Convolution2D(n_input_channels, 16, 8, stride=4,
                            initial_bias=bias),
            L.Convolution2D(16, 32, 4, stride=2, initial_bias=bias),
            L.Linear(2592, n_output_channels, initial_bias=bias),
        ]

        super(NIPSDQNHead, self).__init__(*layers)
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def __init__(self, ndim_obs, n_actions, n_hidden_channels,
                 n_hidden_layers, nonlinearity=F.relu,
                 last_wscale=1.0):
        super().__init__(model=MLP(
            in_size=ndim_obs, out_size=n_actions,
            hidden_sizes=[n_hidden_channels] * n_hidden_layers,
            nonlinearity=nonlinearity,
            last_wscale=last_wscale))