Python chainer.functions 模块,elu() 实例源码

我们从Python开源项目中,提取了以下35个代码示例,用于说明如何使用chainer.functions.elu()

项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def forward(self, ws, ss, ps):
        batchsize = len(ws)
        xp = chainer.cuda.get_array_module(ws[0])
        ws = map(self.emb_word, ws)
        ss = [F.reshape(self.emb_suf(s), (s.shape[0], 4 * self.afix_dim)) for s in ss]
        ps = [F.reshape(self.emb_prf(s), (s.shape[0], 4 * self.afix_dim)) for s in ps]
        xs_f = [F.dropout(F.concat([w, s, p]),
            self.dropout_ratio, train=self.train) for w, s, p in zip(ws, ss, ps)]
        xs_b = [x[::-1] for x in xs_f]
        cx_f, hx_f, cx_b, hx_b = self._init_state(xp, batchsize)
        _, _, hs_f = self.lstm_f(hx_f, cx_f, xs_f, train=self.train)
        _, _, hs_b = self.lstm_b(hx_b, cx_b, xs_b, train=self.train)
        hs_b = [x[::-1] for x in hs_b]
        # ys: [(sentence length, number of category)]
        hs = [F.concat([h_f, h_b]) for h_f, h_b in zip(hs_f, hs_b)]

        cat_ys = [self.linear_cat2(
            F.dropout(F.elu(self.linear_cat1(h)), 0.5, train=self.train)) for h in hs]

        dep_ys = [self.biaffine(
            F.elu(F.dropout(self.linear_dep(h), 0.32, train=self.train)),
            F.elu(F.dropout(self.linear_head(h), 0.32, train=self.train))) for h in hs]

        return cat_ys, dep_ys
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def forward(self, ws, ss, ps):
        batchsize = len(ws)
        xp = chainer.cuda.get_array_module(ws[0])
        ws = map(self.emb_word, ws)
        ss = [F.reshape(self.emb_suf(s), (s.shape[0], 4 * self.afix_dim)) for s in ss]
        ps = [F.reshape(self.emb_prf(s), (s.shape[0], 4 * self.afix_dim)) for s in ps]
        xs_f = [F.dropout(F.concat([w, s, p]),
            self.dropout_ratio, train=self.train) for w, s, p in zip(ws, ss, ps)]
        xs_b = [x[::-1] for x in xs_f]
        cx_f, hx_f, cx_b, hx_b = self._init_state(xp, batchsize)
        _, _, hs_f = self.lstm_f(hx_f, cx_f, xs_f, train=self.train)
        _, _, hs_b = self.lstm_b(hx_b, cx_b, xs_b, train=self.train)
        hs_b = [x[::-1] for x in hs_b]
        # ys: [(sentence length, number of category)]
        hs = [F.concat([h_f, h_b]) for h_f, h_b in zip(hs_f, hs_b)]

        cat_ys = [self.linear_cat2(
            F.dropout(F.elu(self.linear_cat1(h)), 0.5, train=self.train)) for h in hs]

        dep_ys = [self.biaffine(
            F.elu(F.dropout(self.linear_dep(h), 0.32, train=self.train)),
            F.elu(F.dropout(self.linear_head(h), 0.32, train=self.train))) for h in hs]

        return cat_ys, dep_ys
项目:ddnn    作者:kunglab    | 项目源码 | 文件源码
def to_function(self):
        if self.nonlinearity.lower() == "clipped_relu":
            return clipped_relu()
        if self.nonlinearity.lower() == "crelu":
            return crelu()
        if self.nonlinearity.lower() == "elu":
            return elu()
        if self.nonlinearity.lower() == "hard_sigmoid":
            return hard_sigmoid()
        if self.nonlinearity.lower() == "leaky_relu":
            return leaky_relu()
        if self.nonlinearity.lower() == "relu":
            return relu()
        if self.nonlinearity.lower() == "sigmoid":
            return sigmoid()
        if self.nonlinearity.lower() == "softmax":
            return softmax()
        if self.nonlinearity.lower() == "softplus":
            return softplus()
        if self.nonlinearity.lower() == "tanh":
            return tanh()
        if self.nonlinearity.lower() == "bst":
            return bst()
        raise NotImplementedError()
项目:unrolled-gan    作者:musyoku    | 项目源码 | 文件源码
def to_function(self):
        if self.nonlinearity.lower() == "clipped_relu":
            return clipped_relu()
        if self.nonlinearity.lower() == "crelu":
            return crelu()
        if self.nonlinearity.lower() == "elu":
            return elu()
        if self.nonlinearity.lower() == "hard_sigmoid":
            return hard_sigmoid()
        if self.nonlinearity.lower() == "leaky_relu":
            return leaky_relu()
        if self.nonlinearity.lower() == "relu":
            return relu()
        if self.nonlinearity.lower() == "sigmoid":
            return sigmoid()
        if self.nonlinearity.lower() == "softmax":
            return softmax()
        if self.nonlinearity.lower() == "softplus":
            return softplus()
        if self.nonlinearity.lower() == "tanh":
            return tanh()
        raise NotImplementedError()
项目:LSGAN    作者:musyoku    | 项目源码 | 文件源码
def to_function(self):
        if self.nonlinearity.lower() == "clipped_relu":
            return clipped_relu()
        if self.nonlinearity.lower() == "crelu":
            return crelu()
        if self.nonlinearity.lower() == "elu":
            return elu()
        if self.nonlinearity.lower() == "hard_sigmoid":
            return hard_sigmoid()
        if self.nonlinearity.lower() == "leaky_relu":
            return leaky_relu()
        if self.nonlinearity.lower() == "relu":
            return relu()
        if self.nonlinearity.lower() == "sigmoid":
            return sigmoid()
        if self.nonlinearity.lower() == "softmax":
            return softmax()
        if self.nonlinearity.lower() == "softplus":
            return softplus()
        if self.nonlinearity.lower() == "tanh":
            return tanh()
        raise NotImplementedError()
项目:adgm    作者:musyoku    | 项目源码 | 文件源码
def to_function(self):
        if self.nonlinearity.lower() == "clipped_relu":
            return clipped_relu()
        if self.nonlinearity.lower() == "crelu":
            return crelu()
        if self.nonlinearity.lower() == "elu":
            return elu()
        if self.nonlinearity.lower() == "hard_sigmoid":
            return hard_sigmoid()
        if self.nonlinearity.lower() == "leaky_relu":
            return leaky_relu()
        if self.nonlinearity.lower() == "relu":
            return relu()
        if self.nonlinearity.lower() == "sigmoid":
            return sigmoid()
        if self.nonlinearity.lower() == "softmax":
            return softmax()
        if self.nonlinearity.lower() == "softplus":
            return softplus()
        if self.nonlinearity.lower() == "tanh":
            return tanh()
        raise NotImplementedError()
项目:chainer-gan-experiments    作者:Aixile    | 项目源码 | 文件源码
def selu(x):
    alpha = float(1.6732632423543772848170429916717)
    scale = float(1.0507009873554804934193349852946)
    return  scale * F.elu(x, alpha = alpha)
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def parse_activation(activation_str):
    if activation_str == 'relu':
        return F.relu
    elif activation_str == 'elu':
        return F.elu
    elif activation_str == 'lrelu':
        return F.leaky_relu
    else:
        raise RuntimeError(
            'Not supported activation: {}'.format(activation_str))
项目:chainer-fast-neuralstyle    作者:yusuketomoto    | 项目源码 | 文件源码
def __call__(self, x, test=False):
        h = self.b1(F.elu(self.c1(x)), test=test)
        h = self.b2(F.elu(self.c2(h)), test=test)
        h = self.b3(F.elu(self.c3(h)), test=test)
        h = self.r1(h, test=test)
        h = self.r2(h, test=test)
        h = self.r3(h, test=test)
        h = self.r4(h, test=test)
        h = self.r5(h, test=test)
        h = self.b4(F.elu(self.d1(h)), test=test)
        h = self.b5(F.elu(self.d2(h)), test=test)
        y = self.d3(h)
        return (F.tanh(y)+1)*127.5
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def forward(self, ws, cs, ls, dep_ts=None):
        batchsize = len(ws)
        xp = chainer.cuda.get_array_module(ws[0])
        ws = map(self.emb_word, ws)
        cs = [F.squeeze(
            F.max_pooling_2d(
                self.conv_char(
                    F.expand_dims(
                        self.emb_char(c), 1)), (int(l[0]), 1)))
                    for c, l in zip(cs, ls)]
        xs_f = [F.dropout(F.concat([w, c]),
            self.dropout_ratio, train=self.train) for w, c in zip(ws, cs)]
        xs_b = [x[::-1] for x in xs_f]
        cx_f, hx_f, cx_b, hx_b = self._init_state(xp, batchsize)
        _, _, hs_f = self.lstm_f(hx_f, cx_f, xs_f, train=self.train)
        _, _, hs_b = self.lstm_b(hx_b, cx_b, xs_b, train=self.train)
        hs_b = [x[::-1] for x in hs_b]
        hs = [F.concat([h_f, h_b]) for h_f, h_b in zip(hs_f, hs_b)]


        dep_ys = [self.biaffine_arc(
            F.elu(F.dropout(self.arc_dep(h), 0.32, train=self.train)),
            F.elu(F.dropout(self.arc_head(h), 0.32, train=self.train))) for h in hs]

        if dep_ts is not None:
            heads = dep_ts
        else:
            heads = [F.argmax(y, axis=1) for y in dep_ys]

        cat_ys = [
                self.biaffine_tag(
            F.elu(F.dropout(self.rel_dep(h), 0.32, train=self.train)),
            F.elu(F.dropout(self.rel_head(
                F.embed_id(t, h, ignore_label=IGNORE)), 0.32, train=self.train))) \
                        for h, t in zip(hs, heads)]

        return cat_ys, dep_ys
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def forward(self, ws, cs):
        batchsize, length, max_word_len = cs.shape
        ws = self.emb_word(ws) # (batch, length, word_dim)
        cs = F.reshape(
            F.max_pooling_2d(
                self.conv_char(
                    F.reshape(
                        self.emb_char(cs),
                        (batchsize * length, 1, max_word_len, 50))), (max_word_len, 1)),
                    (batchsize, length, self.char_dim))

        hs = F.transpose(F.concat([ws, cs], 2), (1, 0, 2))
        hs = F.dropout(hs, self.dropout_ratio, train=self.train)
        hs = F.split_axis(hs, length, 0)
        hs_f = []
        hs_b = []
        self._init_state()
        for h_in_f, h_in_b in zip(hs, reversed(hs)):
            h_f = self.lstm_f2(self.lstm_f1(F.reshape(h_in_f, (batchsize, -1))))
            hs_f.append(h_f)
            h_b = self.lstm_b2(self.lstm_b1(F.reshape(h_in_b, (batchsize, -1))))
            hs_b.append(h_b)

        hs = [F.concat([h_f, h_b]) for h_f, h_b in zip(hs_f, reversed(hs_b))]

        cat_ys = [self.linear_cat2(F.dropout(
            F.elu(self.linear_cat1(h)), 0.5, train=self.train)) for h in hs]

        hs = [F.reshape(h, (length, -1)) for h in \
                F.split_axis(F.transpose(F.stack(hs, 2), (0, 2, 1)), batchsize, 0)]

        dep_ys = [self.biaffine(
            F.relu(F.dropout(self.linear_dep(h), 0.32, train=self.train)),
            F.relu(F.dropout(self.linear_head(h), 0.32, train=self.train))) for h in hs]
        return cat_ys, dep_ys
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def forward(self, ws, ss, ps):
        batchsize, length = ws.shape
        xp = chainer.cuda.get_array_module(ws[0])
        ws = self.emb_word(ws) # (batch, length, word_dim)
        ss = F.reshape(self.emb_suf(ss), (batchsize, length, -1))
        ps = F.reshape(self.emb_prf(ps), (batchsize, length, -1))
        hs = F.transpose(F.concat([ws, ss, ps], 2), (1, 0, 2))
        hs = F.dropout(hs, self.dropout_ratio, train=self.train)
        hs = F.split_axis(hs, length, 0)
        hs_f = []
        hs_b = []
        self._init_state()
        for h_in_f, h_in_b in zip(hs, reversed(hs)):
            h_f = self.lstm_f2(self.lstm_f1(F.reshape(h_in_f, (-1, self.in_dim))))
            hs_f.append(h_f)
            h_b = self.lstm_b2(self.lstm_b1(F.reshape(h_in_b, (-1, self.in_dim))))
            hs_b.append(h_b)

        hs = zip(hs_f, reversed(hs_b))

        cat_ys = [self.linear_cat2(F.dropout(
            F.elu(self.linear_cat1(h)), 0.5, train=self.train)) for h in hs]

        dep_ys = [self.biaffine(
            F.elu(F.dropout(self.linear_dep(h), 0.32, train=self.train)),
            F.elu(F.dropout(self.linear_head(h), 0.32, train=self.train))) for h in hs]

        return cat_ys, dep_ys
项目:chainer-began    作者:hvy    | 项目源码 | 文件源码
def __call__(self, x):
        h = F.elu(self.l0(x))

        for i in range(self.n_blocks):
            for j in range(self.block_size):
                h = getattr(self, 'c{}'.format(i * self.block_size + j))(h)
                h = F.elu(h)
            if i < self.n_blocks - 1:
                h = F.max_pooling_2d(h, ksize=2, stride=2)

        return self.ln(h)
项目:chainer-began    作者:hvy    | 项目源码 | 文件源码
def __call__(self, x):
        h = F.reshape(self.l0(x), ((x.shape[0],) + self.embed_shape))

        for i in range(self.n_blocks):
            for j in range(self.block_size):
                h = F.elu(getattr(self, 'c{}'.format(i*j+j))(h))
            if i < self.n_blocks - 1:
                h = F.unpooling_2d(h, ksize=2, stride=2, cover_all=False)

        return self.ln(h)
项目:chainer-speech-recognition    作者:musyoku    | 项目源码 | 文件源码
def __call__(self, x):
        return functions.elu(x, self.alpha)
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def check_forward(self, x_data):
        x = chainer.Variable(x_data)
        y = functions.elu(x, alpha=self.alpha)
        self.assertEqual(y.data.dtype, self.dtype)

        expected = self.x.copy()
        for i in numpy.ndindex(self.x.shape):
            if self.x[i] < 0:
                expected[i] = self.alpha * (numpy.exp(expected[i]) - 1)

        gradient_check.assert_allclose(
            expected, y.data, **self.check_forward_options)
项目:jrm_ssl    作者:Fhrozen    | 项目源码 | 文件源码
def __init__(self, in_size, ch, out_size, stride=2, act=F.elu):
        w = math.sqrt(2)
        super(BottleNeckA, self).__init__(
            conv1=L.Convolution2D(in_size, ch, 1, stride, 0, w, nobias=True),
            bn1=L.BatchNormalization(ch),
            conv2=L.Convolution2D(ch, ch, 3, 1, 1, w, nobias=True),
            bn2=L.BatchNormalization(ch),
            conv3=L.Convolution2D(ch, out_size, 1, 1, 0, w, nobias=True),
            bn3=L.BatchNormalization(out_size),

            conv4=L.Convolution2D(in_size, out_size, 1, stride, 0, w, nobias=True),
            bn4=L.BatchNormalization(out_size),
        )
        self.act=act
项目:jrm_ssl    作者:Fhrozen    | 项目源码 | 文件源码
def __init__(self, in_size, ch, act=F.elu):
        w = math.sqrt(2)
        super(BottleNeckB, self).__init__(
            conv1=L.Convolution2D(in_size, ch, 1, 1, 0, w, nobias=True),
            bn1=L.BatchNormalization(ch),
            conv2=L.Convolution2D(ch, ch, 3, 1, 1, w, nobias=True),
            bn2=L.BatchNormalization(ch),
            conv3=L.Convolution2D(ch, in_size, 1, 1, 0, w, nobias=True),
            bn3=L.BatchNormalization(in_size),
        )
        self.act=act
项目:jrm_ssl    作者:Fhrozen    | 项目源码 | 文件源码
def __init__(self, layer, in_size, ch, out_size, stride=2, act=F.elu):
        super(Block, self).__init__()
        links = [('a', BottleNeckA(in_size, ch, out_size, stride, act))]
        for i in range(layer-1):
            links += [('b{}'.format(i+1), BottleNeckB(out_size, ch, act))]

        for link in links:
            self.add_link(*link)
        self.forward = links
项目:ddnn    作者:kunglab    | 项目源码 | 文件源码
def __init__(self, alpha=1.0):
        self._function = "elu"
        self.alpha = alpha
项目:ddnn    作者:kunglab    | 项目源码 | 文件源码
def __call__(self, x):
        return F.elu(x, self.alpha)
项目:adversarial-autoencoder    作者:musyoku    | 项目源码 | 文件源码
def __call__(self, x):
        return functions.elu(x, self.alpha)
项目:chainer-examples    作者:nocotan    | 项目源码 | 文件源码
def __call__(self, x):
        h = F.elu(self.conv1(x))
        h = F.elu(self.conv2(h))
        return self.conv3(h)
项目:chainer-examples    作者:nocotan    | 项目源码 | 文件源码
def __call__(self, x):
        h = None
        for name, _ in self.forward:
            f = getattr(self, name)
            h_t = f(x)
            if h is None:
                h = h_t
            else:
                h += h_t

        return F.elu(h)
项目:chainer-examples    作者:nocotan    | 项目源码 | 文件源码
def __call__(self, x):
        h = F.elu(self.conv1(x))
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.res2(h, self.train)
        h = self.res3(h, self.train)
        h = self.res4(h, self.train)
        h = self.res5(h, self.train)
        h = F.spatial_pyramid_pooling_2d(h, 3, F.MaxPooling2D)

        h = F.elu(self.conv2(h))
        h = F.dropout(h, ratio=0.5)
        h = self.conv3(h)
        h = F.reshape(h, (-1, self.num_class))

        return h
项目:unrolled-gan    作者:musyoku    | 项目源码 | 文件源码
def __init__(self, alpha=1.0):
        self._function = "elu"
        self.alpha = alpha
项目:unrolled-gan    作者:musyoku    | 项目源码 | 文件源码
def __call__(self, x):
        return F.elu(x, self.alpha)
项目:cv-api    作者:yasunorikudo    | 项目源码 | 文件源码
def __call__(self, x, test=False):
        h = self.b1(F.elu(self.c1(x)), test=test)
        h = self.b2(F.elu(self.c2(h)), test=test)
        h = self.b3(F.elu(self.c3(h)), test=test)
        h = self.r1(h, test=test)
        h = self.r2(h, test=test)
        h = self.r3(h, test=test)
        h = self.r4(h, test=test)
        h = self.r5(h, test=test)
        h = self.b4(F.elu(self.d1(h)), test=test)
        h = self.b5(F.elu(self.d2(h)), test=test)
        y = self.d3(h)
        return (F.tanh(y)+1)*127.5
项目:wavenet    作者:musyoku    | 项目源码 | 文件源码
def _forward_softmax_block(self, x_batch, apply_softmax=True):
        input_batch = Variable(x_batch)
        for layer in self.softmax_conv_layers:
            input_batch = F.elu(input_batch)
            output = layer(input_batch)
            input_batch = output
        if apply_softmax:
            output = F.softmax(output)
        return output
项目:LSGAN    作者:musyoku    | 项目源码 | 文件源码
def __init__(self, alpha=1.0):
        self._function = "elu"
        self.alpha = alpha
项目:LSGAN    作者:musyoku    | 项目源码 | 文件源码
def __call__(self, x):
        return F.elu(x, self.alpha)
项目:adgm    作者:musyoku    | 项目源码 | 文件源码
def __init__(self, alpha=1.0):
        self._function = "elu"
        self.alpha = alpha
项目:adgm    作者:musyoku    | 项目源码 | 文件源码
def __call__(self, x):
        return F.elu(x, self.alpha)
项目:chainer_img2img_example    作者:taizan    | 项目源码 | 文件源码
def __call__(self, x ):
        h = F.elu(self.bnc1(self.c1(x)))
        h = F.elu(self.bnc2(self.c2(h)))
        h = F.elu(self.bnc3(self.c3(h)))
        h = F.elu(self.bnc4(self.c4(h)))
        h = self.c5(h)

        return h
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def forward(self, ws, ss, ps, ls, dep_ts=None):
        batchsize, slen = ws.shape
        xp = chainer.cuda.get_array_module(ws[0])

        wss = self.emb_word(ws)
        sss = F.reshape(self.emb_suf(ss), (batchsize, slen, 4 * self.afix_dim))
        pss = F.reshape(self.emb_prf(ps), (batchsize, slen, 4 * self.afix_dim))
        ins = F.dropout(F.concat([wss, sss, pss], 2), self.dropout_ratio, train=self.train)
        xs_f = F.transpose(ins, (1, 0, 2))
        xs_b = xs_f[::-1]

        cx_f, hx_f, cx_b, hx_b = self._init_state(xp, batchsize)
        _, _, hs_f = self.lstm_f(hx_f, cx_f, xs_f, train=self.train)
        _, _, hs_b = self.lstm_b(hx_b, cx_b, xs_b, train=self.train)

        # (batch, length, hidden_dim)
        hs = F.transpose(F.concat([hs_f, hs_b[::-1]], 2), (1, 0, 2))

        dep_ys = self.biaffine_arc(
            F.elu(F.dropout(self.arc_dep(hs), 0.32, train=self.train)),
            F.elu(F.dropout(self.arc_head(hs), 0.32, train=self.train)))

        if dep_ts is not None and random.random >= 0.5:
            heads = dep_ts
        else:
            heads = F.flatten(F.argmax(dep_ys, axis=2)) + \
                    xp.repeat(xp.arange(0, batchsize * slen, slen), slen)

        hs = F.reshape(hs, (batchsize * slen, -1))
        heads = F.permutate(
                    F.elu(F.dropout(
                        self.rel_head(hs), 0.32, train=self.train)), heads)

        childs = F.elu(F.dropout(self.rel_dep(hs), 0.32, train=self.train))
        cat_ys = self.biaffine_tag(childs, heads)

        dep_ys = F.split_axis(dep_ys, batchsize, 0) if batchsize > 1 else [dep_ys]
        dep_ys = [F.reshape(v, v.shape[1:])[:l, :l] for v, l in zip(dep_ys, ls)]

        cat_ys = F.split_axis(cat_ys, batchsize, 0) if batchsize > 1 else [cat_ys]
        cat_ys = [v[:l] for v, l in zip(cat_ys, ls)]

        return cat_ys, dep_ys