Python chainer.functions 模块,tanh() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用chainer.functions.tanh()

项目:ROCStory_skipthought_baseline    作者:soskek    | 项目源码 | 文件源码
def __init__(self, args):
        super(LSTM, self).__init__(
            # RNN
            LSTM=L.LSTM(args.n_in_units, args.n_units),
            #W_predict=L.Linear(args.n_units, args.n_units),
            W_candidate=L.Linear(args.n_in_units, args.n_units),
        )

        #self.act1 = F.tanh
        self.act1 = F.identity

        self.args = args
        self.n_in_units = args.n_in_units
        self.n_units = args.n_units
        self.dropout_ratio = args.d_ratio
        self.margin = args.margin

        self.initialize_parameters()
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def bound_by_tanh(x, low, high):
    """Bound a given value into [low, high] by tanh.

    Args:
        x (chainer.Variable): value to bound
        low (numpy.ndarray): lower bound
        high (numpy.ndarray): upper bound
    Returns: chainer.Variable
    """
    assert isinstance(x, chainer.Variable)
    assert low is not None
    assert high is not None
    xp = cuda.get_array_module(x.data)
    x_scale = (high - low) / 2
    x_scale = xp.expand_dims(xp.asarray(x_scale), axis=0)
    x_mean = (high + low) / 2
    x_mean = xp.expand_dims(xp.asarray(x_mean), axis=0)
    return F.tanh(x) * x_scale + x_mean
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def __init__(self, obs_size, action_space,
                 n_hidden_layers=2, n_hidden_channels=64,
                 bound_mean=None, normalize_obs=None):
        assert bound_mean in [False, True]
        assert normalize_obs in [False, True]
        super().__init__()
        hidden_sizes = (n_hidden_channels,) * n_hidden_layers
        self.normalize_obs = normalize_obs
        with self.init_scope():
            self.pi = policies.FCGaussianPolicyWithStateIndependentCovariance(
                obs_size, action_space.low.size,
                n_hidden_layers, n_hidden_channels,
                var_type='diagonal', nonlinearity=F.tanh,
                bound_mean=bound_mean,
                min_action=action_space.low, max_action=action_space.high,
                mean_wscale=1e-2)
            self.v = links.MLP(obs_size, 1, hidden_sizes=hidden_sizes)
            if self.normalize_obs:
                self.obs_filter = links.EmpiricalNormalization(
                    shape=obs_size
                )
项目:chainer-cyclegan    作者:Aixile    | 项目源码 | 文件源码
def __init__(self):
        super(Generator_ResBlock_9, self).__init__(
            c1 = CBR(3, 32, bn=True, sample='none-7'),
            c2 = CBR(32, 64, bn=True, sample='down'),
            c3 = CBR(64, 128, bn=True, sample='down'),
            c4 = ResBlock(128, bn=True),
            c5 = ResBlock(128, bn=True),
            c6 = ResBlock(128, bn=True),
            c7 = ResBlock(128, bn=True),
            c8 = ResBlock(128, bn=True),
            c9 = ResBlock(128, bn=True),
            c10 = ResBlock(128, bn=True),
            c11 = ResBlock(128, bn=True),
            c12 = ResBlock(128, bn=True),
            c13 = CBR(128, 64, bn=True, sample='up'),
            c14 = CBR(64, 32, bn=True, sample='up'),
            c15 = CBR(32, 3, bn=True, sample='none-7', activation=F.tanh)
        )
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def __call__(self, xs):
        """
        xs: (batchsize, hidden_dim)
        """

        if self.h is not None:
            h = self.h
            c = self.c
        else:
            xp = chainer.cuda.get_array_module(xs.data)
            batchsize = xs.shape[0]
            h = Variable(xp.zeros((batchsize, self.outsize), 'f'), volatile='AUTO')
            c = Variable(xp.zeros((batchsize, self.outsize), 'f'), volatile='AUTO')

        in_gate = F.sigmoid(self.linear_in(F.concat([xs, h, c])))
        new_in = F.tanh(self.linear_c(F.concat([xs, h])))
        self.c = in_gate * new_in + (1. - in_gate) * c
        out_gate = F.sigmoid(self.linear_out(F.concat([xs, h, self.c])))
        self.h = F.tanh(self.c) * out_gate
        return self.h
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def __call__(self, x):
        if not hasattr(self, 'encoding') or self.encoding is None:
            self.batch_size = x.shape[0]
            self.init()
        dims = len(x.shape) - 1
        f, z, o = F.split_axis(self.pre(x), 3, axis=dims)
        f = F.sigmoid(f)
        z = (1 - f) * F.tanh(z)
        o = F.sigmoid(o)

        if dims == 2:
            self.c = strnn(f, z, self.c[:self.batch_size])
        else:
            self.c = f * self.c + z

        if self.attention:
            context = attention_sum(self.encoding, self.c)
            self.h = o * self.o(F.concat((self.c, context), axis=dims))
        else:
            self.h = self.c * o

        self.x = x
        return self.h
项目:lencon    作者:kiyukuta    | 项目源码 | 文件源码
def decode_once(self, x, state, train=True):

        c = state['c']
        h = state['h']
        h_tilde = state.get('h_tilde', None)

        emb = self.trg_emb(x)

        lstm_in = self.eh(emb) + self.hh(h)
        if h_tilde is not None:
            lstm_in += self.ch(h_tilde)
        c, h = F.lstm(c, lstm_in)
        a = self.attender(h, train=train)
        h_tilde = F.concat([a, h])
        h_tilde = F.tanh(self.w_c(h_tilde))
        o = self.ho(h_tilde)
        state['c'] = c
        state['h'] = h
        state['h_tilde'] = h_tilde
        return o, state
项目:lencon    作者:kiyukuta    | 项目源码 | 文件源码
def decode_once(self, x, state, train=True):
        l = state.get('lengths', self.lengths)
        c = state['c']
        h = state['h']
        h_tilde = state.get('h_tilde', None)

        emb = self.trg_emb(x)
        lemb = self.len_emb(l)
        lstm_in = self.eh(emb) + self.hh(h) + self.lh(lemb)
        if h_tilde is not None:
            lstm_in += self.ch(h_tilde)
        c, h = F.lstm(c, lstm_in)
        a = self.attender(h, train=train)
        h_tilde = F.concat([a, h])

        h_tilde = F.tanh(self.w_c(h_tilde))
        o = self.ho(h_tilde)
        state['c'] = c
        state['h'] = h
        state['h_tilde'] = h_tilde
        return o, state
项目:stock_dqn_f    作者:wdy06    | 项目源码 | 文件源码
def Q_func(self, state):
        if state.ndim == 2:
            agent_state = state[:, - self.agent_state_dim :]
            market_state = state[:,:self.market_state_dim]

        elif state.ndim == 3:
            agent_state = state[:, :,- self.agent_state_dim :]
            market_state = state[:,:,:self.market_state_dim]

        a_state = Variable(agent_state)
        m_state = Variable(market_state)
        a = F.tanh(self.a1(a_state))
        a = F.tanh(self.a2(a))
        a = F.tanh(self.a3(a))
        m = F.tanh(self.s1(m_state))
        m = F.tanh(self.s2(m))
        m = F.tanh(self.s3(m))
        new_state = F.concat((a, m), axis=1)

        h = F.tanh(self.fc4(new_state))
        h = F.tanh(self.fc5(h))
        Q = self.q_value(h)

        return Q
项目:chainer_nmt    作者:odashi    | 项目源码 | 文件源码
def _context(self, p, fb_mat, fbe_mat):
    batch_size, source_length, _ = fb_mat.data.shape
    # {pe,e}_mat: shape = [batch * srclen, atten]
    pe_mat = F.reshape(
        F.broadcast_to(
            F.expand_dims(self.p_e(p), 1),
            [batch_size, source_length, self.atten_size]),
        [batch_size * source_length, self.atten_size])
    e_mat = F.tanh(fbe_mat + pe_mat)
    # a_mat: shape = [batch, srclen]
    a_mat = F.softmax(F.reshape(self.e_a(e_mat), [batch_size, source_length]))
    # q: shape = [batch, 2 * hidden]
    q = F.reshape(
        F.batch_matmul(a_mat, fb_mat, transa=True),
        [batch_size, 2 * self.hidden_size])

    return q
项目:nn_parsers    作者:odashi    | 项目源码 | 文件源码
def __call__(self, text, wid):
    if text in self.__cache:
      #trace('cache hit: ' + text)
      return self.__cache[text]
    #trace('cache new: ' + text)

    self.__reset_state()
    c_list = [XP.iarray([min(ord(c), 0x7f)]) for c in text]
    x_list = [functions.tanh(self.c_x(c)) for c in c_list]
    for x in x_list:
      f = self.x_f(x)
    for x in reversed(x_list):
      b = self.x_b(x)
    e = functions.tanh(self.w_e(XP.iarray([wid])) + self.f_e(f) + self.b_e(b))
    self.__cache[text] = e
    return e
项目:ddnn    作者:kunglab    | 项目源码 | 文件源码
def to_function(self):
        if self.nonlinearity.lower() == "clipped_relu":
            return clipped_relu()
        if self.nonlinearity.lower() == "crelu":
            return crelu()
        if self.nonlinearity.lower() == "elu":
            return elu()
        if self.nonlinearity.lower() == "hard_sigmoid":
            return hard_sigmoid()
        if self.nonlinearity.lower() == "leaky_relu":
            return leaky_relu()
        if self.nonlinearity.lower() == "relu":
            return relu()
        if self.nonlinearity.lower() == "sigmoid":
            return sigmoid()
        if self.nonlinearity.lower() == "softmax":
            return softmax()
        if self.nonlinearity.lower() == "softplus":
            return softplus()
        if self.nonlinearity.lower() == "tanh":
            return tanh()
        if self.nonlinearity.lower() == "bst":
            return bst()
        raise NotImplementedError()
项目:nmtrain    作者:philip30    | 项目源码 | 文件源码
def __call__(self):
    mem_optimize = nmtrain.optimization.chainer_mem_optimize
    # Calculate Attention vector
    a = self.attention(self.S, self.h)
    # Calculate context vector
    c = F.squeeze(F.batch_matmul(self.S, a, transa=True), axis=2)
    # Calculate hidden vector + context
    self.ht = self.context_project(F.concat((self.h, c), axis=1))
    # Calculate Word probability distribution
    y = mem_optimize(self.affine_vocab, F.tanh(self.ht), level=1)
    if self.use_lexicon:
      y = self.lexicon_model(y, a, self.ht, self.lexicon_matrix)

    if nmtrain.environment.is_train():
      return nmtrain.models.decoders.Output(y=y)
    else:
      # Return the vocabulary size output projection
      return nmtrain.models.decoders.Output(y=y, a=a)
项目:chainer-pix2pix    作者:wuhuikai    | 项目源码 | 文件源码
def __call__(self, x, test=False, dropout=True):
        e1 = self.c1(x)
        e2 = self.b2(self.c2(F.leaky_relu(e1)), test=test)
        e3 = self.b3(self.c3(F.leaky_relu(e2)), test=test)
        e4 = self.b4(self.c4(F.leaky_relu(e3)), test=test)
        e5 = self.b5(self.c5(F.leaky_relu(e4)), test=test)
        e6 = self.b6(self.c6(F.leaky_relu(e5)), test=test)
        e7 = self.b7(self.c7(F.leaky_relu(e6)), test=test)
        e8 = self.b8(self.c8(F.leaky_relu(e7)), test=test)
        d1 = F.concat((F.dropout(self.b1_d(self.dc1(F.relu(e8)), test=test), train=dropout), e7))
        d2 = F.concat((F.dropout(self.b2_d(self.dc2(F.relu(d1)), test=test), train=dropout), e6))
        d3 = F.concat((F.dropout(self.b3_d(self.dc3(F.relu(d2)), test=test), train=dropout), e5))
        d4 = F.concat((self.b4_d(self.dc4(F.relu(d3)), test=test), e4))
        d5 = F.concat((self.b5_d(self.dc5(F.relu(d4)), test=test), e3))
        d6 = F.concat((self.b6_d(self.dc6(F.relu(d5)), test=test), e2))
        d7 = F.concat((self.b7_d(self.dc7(F.relu(d6)), test=test), e1))
        y = F.tanh(self.dc8(F.relu(d7)))

        return y
项目:chainer-image-generation    作者:fukuta0614    | 项目源码 | 文件源码
def __init__(self, n_hidden, activate='sigmoid', size=64, ch=512, wscale=0.02):
        assert (size % 16 == 0)
        initial_size = size // 16
        self.n_hidden = n_hidden
        if activate == 'sigmoid':
            self.activate = F.sigmoid
        elif activate == 'tanh':
            self.activate = F.tanh
        else:
            raise ValueError('invalid activate function')
        self.ch = ch
        self.initial_size = initial_size
        w = chainer.initializers.Normal(wscale)
        super(Generator, self).__init__(
            l0=L.Linear(self.n_hidden, initial_size * initial_size * ch, initialW=w),
            dc1=L.Deconvolution2D(ch // 1, ch // 2, 4, 2, 1, initialW=w),
            dc2=L.Deconvolution2D(ch // 2, ch // 4, 4, 2, 1, initialW=w),
            dc3=L.Deconvolution2D(ch // 4, ch // 8, 4, 2, 1, initialW=w),
            dc4=L.Deconvolution2D(ch // 8, 3, 4, 2, 1, initialW=w),
            bn0=L.BatchNormalization(initial_size * initial_size * ch),
            bn1=L.BatchNormalization(ch // 2),
            bn2=L.BatchNormalization(ch // 4),
            bn3=L.BatchNormalization(ch // 8),
        )
项目:chainer-image-generation    作者:fukuta0614    | 项目源码 | 文件源码
def __init__(self, n_hidden, activate='sigmoid', size=64, ch=512, wscale=0.02):
        assert (size % 8 == 0)
        initial_size = size // 8
        self.n_hidden = n_hidden
        self.ch = ch
        self.initial_size = initial_size
        if activate == 'sigmoid':
            self.activate = F.sigmoid
        elif activate == 'tanh':
            self.activate = F.tanh
        else:
            raise ValueError('invalid activate function')
        w = chainer.initializers.Normal(wscale)
        super(Generator2, self).__init__(
            l0=L.Linear(self.n_hidden, initial_size * initial_size * ch, initialW=w),
            dc1=L.Deconvolution2D(ch // 1, ch // 2, 4, 2, 1, initialW=w),
            dc2=L.Deconvolution2D(ch // 2, ch // 4, 4, 2, 1, initialW=w),
            dc3=L.Deconvolution2D(ch // 4, ch // 8, 4, 2, 1, initialW=w),
            dc4=L.Deconvolution2D(ch // 8, 3, 3, 1, 1, initialW=w),
            bn0=L.BatchNormalization(initial_size * initial_size * ch),
            bn1=L.BatchNormalization(ch // 2),
            bn2=L.BatchNormalization(ch // 4),
            bn3=L.BatchNormalization(ch // 8),
        )
项目:chainer-image-generation    作者:fukuta0614    | 项目源码 | 文件源码
def __init__(self, n_hidden, activate='sigmoid', size=64, ch=512, wscale=0.02):
        assert (size % 8 == 0)
        initial_size = size // 8
        self.n_hidden = n_hidden
        if activate == 'sigmoid':
            self.activate = F.sigmoid
        elif activate == 'tanh':
            self.activate = F.tanh
        else:
            raise ValueError('invalid activate function')
        self.ch = ch
        self.initial_size = initial_size
        w = chainer.initializers.Normal(wscale)
        super(Generator, self).__init__(
            l0=L.Linear(self.n_hidden, initial_size * initial_size * ch, initialW=w),
            dc1=L.Deconvolution2D(ch // 1, ch // 2, 4, 2, 1, initialW=w),
            dc2=L.Deconvolution2D(ch // 2, ch // 4, 4, 2, 1, initialW=w),
            dc3=L.Deconvolution2D(ch // 4, ch // 8, 4, 2, 1, initialW=w),
            dc4=L.Deconvolution2D(ch // 8, 3, 3, 1, 1, initialW=w),
        )
项目:chainer-image-generation    作者:fukuta0614    | 项目源码 | 文件源码
def __call__(self, x, train=True):
        h = F.relu(self.conv1(x))
        h = F.relu(self.conv2(h))
        h = F.relu(self.conv3(h))
        h = self.res1(h, train)
        h = self.res2(h, train)
        h = self.res3(h, train)
        h = self.res4(h, train)
        h = self.res5(h, train)
        h = self.res6(h, train)
        h = self.res7(h, train)
        h = self.res8(h, train)
        h = self.res9(h, train)
        h = F.relu(self.dc1(h))
        h = F.relu(self.dc2(h))
        h = self.dc3(h)

        return F.tanh(h)
项目:Semantic-Segmentation-using-Adversarial-Networks    作者:oyam    | 项目源码 | 文件源码
def __call__(self, x):
        h = F.relu(self.conv1_1(x))
        h = F.relu(self.conv1_2(h))
        h = F.max_pooling_2d(h, 2, stride=2)
        h = F.relu(self.conv2_1(h))
        h = F.relu(self.conv2_2(h))
        h = F.max_pooling_2d(h, 2, stride=2)
        h = F.relu(self.conv3_1(h))
        h = F.relu(self.conv3_2(h))
        h = F.max_pooling_2d(h, 2, stride=2)
        h = F.relu(self.conv4_1(h))
        h = F.relu(self.conv4_2(h))
        h = F.spatial_pyramid_pooling_2d(h, 3, F.MaxPooling2D)
        h = F.tanh(self.fc4(h))
        h = F.dropout(h, ratio=.5, train=self.train)
        h = F.tanh(self.fc5(h))
        h = F.dropout(h, ratio=.5, train=self.train)
        h = self.fc6(h)
        return h
项目:unrolled-gan    作者:musyoku    | 项目源码 | 文件源码
def to_function(self):
        if self.nonlinearity.lower() == "clipped_relu":
            return clipped_relu()
        if self.nonlinearity.lower() == "crelu":
            return crelu()
        if self.nonlinearity.lower() == "elu":
            return elu()
        if self.nonlinearity.lower() == "hard_sigmoid":
            return hard_sigmoid()
        if self.nonlinearity.lower() == "leaky_relu":
            return leaky_relu()
        if self.nonlinearity.lower() == "relu":
            return relu()
        if self.nonlinearity.lower() == "sigmoid":
            return sigmoid()
        if self.nonlinearity.lower() == "softmax":
            return softmax()
        if self.nonlinearity.lower() == "softplus":
            return softplus()
        if self.nonlinearity.lower() == "tanh":
            return tanh()
        raise NotImplementedError()
项目:wavenet    作者:rampage644    | 项目源码 | 文件源码
def __call__(self, v, h, label):
        v_t = self.vertical_conv_t(v)
        v_s = self.vertical_conv_s(v)
        to_vertical_t = self.v_to_h_conv_t(v_t)
        to_vertical_s = self.v_to_h_conv_s(v_s)

        # v_gate = self.vertical_gate_conv(v)
        # label bias is added to both vertical and horizontal conv
        # here we take only shape as it should be the same
        label = F.broadcast_to(F.expand_dims(F.expand_dims(self.label(label), -1), -1), v_t.shape)
        v_t, v_s = v_t + label, v_s + label
        v = F.tanh(v_t) * F.sigmoid(v_s)

        h_t = self.horizontal_conv_t(h)
        h_s = self.horizontal_conv_s(h)
        h_t, h_s = h_t + to_vertical_t + label, h_s + to_vertical_s + label
        h = self.horizontal_output(F.tanh(h_t) * F.sigmoid(h_s))

        return v, h
项目:chainer-cf-nade    作者:dsanno    | 项目源码 | 文件源码
def __call__(self, x1, train=True):
        """
        in_type:
            x1: float32
        in_shape:
            x1: (batch_size, train_item_num * rating_num)
        out_type: float32
        out_shape: (batch_size, hidden_num)
        """

        xp = cuda.get_array_module(x1.data)
        h = self.a(x1)
        if hasattr(self, 'b'):
            h = self.b(h)
#        h = F.dropout(h, train=train)
        return F.tanh(h)
项目:NlpUtil    作者:trtd56    | 项目源码 | 文件源码
def __call__(self, x):
        if not hasattr(self, 'encoding') or self.encoding is None:
            self.batch_size = x.shape[0]
            self.init()
        dims = len(x.shape) - 1
        f, z, o = F.split_axis(self.pre(x), 3, axis=dims)
        f = F.sigmoid(f)
        z = (1 - f) * F.tanh(z)
        o = F.sigmoid(o)

        if dims == 2:
            self.c = strnn(f, z, self.c[:self.batch_size])
        else:
            self.c = f * self.c + z

        if self.attention:
            context = attention_sum(self.encoding, self.c)
            self.h = o * self.o(F.concat((self.c, context), axis=dims))
        else:
            self.h = self.c * o

        self.x = x
        return self.h
项目:LSGAN    作者:musyoku    | 项目源码 | 文件源码
def to_function(self):
        if self.nonlinearity.lower() == "clipped_relu":
            return clipped_relu()
        if self.nonlinearity.lower() == "crelu":
            return crelu()
        if self.nonlinearity.lower() == "elu":
            return elu()
        if self.nonlinearity.lower() == "hard_sigmoid":
            return hard_sigmoid()
        if self.nonlinearity.lower() == "leaky_relu":
            return leaky_relu()
        if self.nonlinearity.lower() == "relu":
            return relu()
        if self.nonlinearity.lower() == "sigmoid":
            return sigmoid()
        if self.nonlinearity.lower() == "softmax":
            return softmax()
        if self.nonlinearity.lower() == "softplus":
            return softplus()
        if self.nonlinearity.lower() == "tanh":
            return tanh()
        raise NotImplementedError()
项目:adgm    作者:musyoku    | 项目源码 | 文件源码
def to_function(self):
        if self.nonlinearity.lower() == "clipped_relu":
            return clipped_relu()
        if self.nonlinearity.lower() == "crelu":
            return crelu()
        if self.nonlinearity.lower() == "elu":
            return elu()
        if self.nonlinearity.lower() == "hard_sigmoid":
            return hard_sigmoid()
        if self.nonlinearity.lower() == "leaky_relu":
            return leaky_relu()
        if self.nonlinearity.lower() == "relu":
            return relu()
        if self.nonlinearity.lower() == "sigmoid":
            return sigmoid()
        if self.nonlinearity.lower() == "softmax":
            return softmax()
        if self.nonlinearity.lower() == "softplus":
            return softplus()
        if self.nonlinearity.lower() == "tanh":
            return tanh()
        raise NotImplementedError()
项目:Multitask-and-Transfer-Learning    作者:AI-ON    | 项目源码 | 文件源码
def __call__(self, x):
        z = self.W_z(x)
        h_bar = self.W(x)
        if self.h is not None:
            r = F.sigmoid(self.W_r(x) + self.U_r(self.h))
            z += self.U_z(self.h)
            h_bar += self.U(r * self.h)
        z = F.sigmoid(z)
        h_bar = F.tanh(h_bar)

        if self.h is not None:
            h_new = F.linear_interpolate(z, h_bar, self.h)
        else:
            h_new = z * h_bar
        self.h = h_new  # save the state
        return h_new
项目:chainer-gan-experiments    作者:Aixile    | 项目源码 | 文件源码
def differentiable_backward(self, g):
        if self.normalize_input:
            raise NotImplementedError

        if self.activation is F.leaky_relu:
            g = backward_leaky_relu(self.x, g)
        elif self.activation is F.relu:
            g = backward_relu(self.x, g)
        elif self.activation is F.tanh:
            g = backward_tanh(self.x, g)
        elif self.activation is F.sigmoid:
            g = backward_sigmoid(self.x, g)
        elif not self.activation is None:
            raise NotImplementedError

        if self.norm == 'ln':
            g = backward_layernormalization(self.nx, g, self.n)
        elif not self.norm is None:
            raise NotImplementedError

        if self.nn == 'down_conv' or self.nn == 'conv':
            g = backward_convolution(None, g, self.c)
        elif self.nn == 'linear':
            g = backward_linear(None, g, self.c)
        elif self.nn == 'up_deconv':
            g = backward_deconvolution(None, g, self.c)
        else:
            raise NotImplementedError

        return g
项目:chainer-gan-experiments    作者:Aixile    | 项目源码 | 文件源码
def __init__(self, latent=128, out_ch=3, base_size=1024, use_bn=True, up_layers=4, upsampling='up_deconv'):
        layers = {}

        self.up_layers = up_layers
        self.base_size = base_size
        self.latent = latent

        if use_bn:
            norm = 'bn'
            w = chainer.initializers.Normal(0.02)
        else:
            norm = None
            w = None

        base = base_size

        layers['c_first'] = NNBlock(latent, 4*4*base, nn='linear', norm=norm, w_init=w)

        for i in range(up_layers-1):
            layers['c'+str(i)] = NNBlock(base, base//2, nn=upsampling, norm=norm, w_init=w)
            base = base//2

        layers['c'+str(up_layers-1)] = NNBlock(base, out_ch, nn=upsampling, norm=None, w_init=w, activation=F.tanh)
        #print(layers)

        super(DCGANGenerator, self).__init__(**layers)
项目:vaelm    作者:TatsuyaShirakawa    | 项目源码 | 文件源码
def __call__(self, h, train=True, dpratio=0.5):
        h = F.dropout(h, train=train, ratio=dpratio)
        for i in range(self.num_layers):
            h = F.tanh(self.get_l(i)(h))
        return (self.lmu(h), F.exp(self.lsigma(h)))
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def scale_by_tanh(x, low, high):
    xp = cuda.get_array_module(x.data)
    scale = (high - low) / 2
    scale = xp.expand_dims(xp.asarray(scale, dtype=np.float32), axis=0)
    mean = (high + low) / 2
    mean = xp.expand_dims(xp.asarray(mean, dtype=np.float32), axis=0)
    return F.tanh(x) * scale + mean
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def compute_mean_and_var(self, x):
        # mean = self.mean_layer(x)
        mean = F.tanh(self.mean_layer(x)) * 2.0
        var = F.softplus(self.var_layer(x))
        return mean, var
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def compute_mean_and_var(self, x):
        # mean = self.mean_layer(x)
        mean = F.tanh(self.mean_layer(x)) * 2.0
        var = F.softplus(F.broadcast_to(self.var_layer(x), mean.data.shape))
        return mean, var
项目:chainer-cyclegan    作者:Aixile    | 项目源码 | 文件源码
def __init__(self):
        super(Generator_ResBlock_6, self).__init__(
            c1 = CBR(3, 32, bn=True, sample='none-7'),
            c2 = CBR(32, 64, bn=True, sample='down'),
            c3 = CBR(64, 128, bn=True, sample='down'),
            c4 = ResBlock(128, bn=True),
            c5 = ResBlock(128, bn=True),
            c6 = ResBlock(128, bn=True),
            c7 = ResBlock(128, bn=True),
            c8 = ResBlock(128, bn=True),
            c9 = ResBlock(128, bn=True),
            c10 = CBR(128, 64, bn=True, sample='up'),
            c11 = CBR(64, 32, bn=True, sample='up'),
            c12 = CBR(32, 3, bn=True, sample='none-7', activation=F.tanh)
        )
项目:chainer-fast-neuralstyle    作者:yusuketomoto    | 项目源码 | 文件源码
def __call__(self, x, test=False):
        h = self.b1(F.elu(self.c1(x)), test=test)
        h = self.b2(F.elu(self.c2(h)), test=test)
        h = self.b3(F.elu(self.c3(h)), test=test)
        h = self.r1(h, test=test)
        h = self.r2(h, test=test)
        h = self.r3(h, test=test)
        h = self.r4(h, test=test)
        h = self.r5(h, test=test)
        h = self.b4(F.elu(self.d1(h)), test=test)
        h = self.b5(F.elu(self.d2(h)), test=test)
        y = self.d3(h)
        return (F.tanh(y)+1)*127.5
项目:tensorboard-pytorch    作者:lanpa    | 项目源码 | 文件源码
def encode(self, x):
        h1 = F.tanh(self.le1(x))
        mu = self.le2_mu(h1)
        ln_var = self.le2_ln_var(h1)  # log(sigma**2)
        return mu, ln_var
项目:tensorboard-pytorch    作者:lanpa    | 项目源码 | 文件源码
def decode(self, z, sigmoid=True):
        h1 = F.tanh(self.ld1(z))
        h2 = self.ld2(h1)
        if sigmoid:
            return F.sigmoid(h2)
        else:
            return h2
项目:lencon    作者:kiyukuta    | 项目源码 | 文件源码
def _attend(self, p):
        p = self.xh(p)
        p = F.expand_dims(p, 1)
        p = F.broadcast_to(p, self.shape2)

        h = F.tanh(self.h + p)
        shape3 = (self.batchsize * self.src_len, self.dim_hid)
        h_reshaped = F.reshape(h, shape3)
        weight_reshaped = self.hw(h_reshaped)
        weight = F.reshape(weight_reshaped, (self.batchsize, self.src_len, 1))
        weight = F.where(self.mask, weight, self.minf)
        attention = F.softmax(weight)
        return attention
项目:dgm    作者:ashwindcruz    | 项目源码 | 文件源码
def planar_flows(self,z):
        self.z_trans = []
        self.z_trans.append(z)
        self.phi = []

        for i in range(self.num_trans):
            flow_w_name = 'flow_w_' + str(i)
            flow_b_name = 'flow_b_' + str(i)
            flow_u_name = 'flow_u_' + str(i)

            h = self[flow_w_name](z)
            h = F.sum(h,axis=(1))
            h = self[flow_b_name](h)
            h = F.tanh(h)
            h_tanh = h

            dim_latent = z.shape[1]
            h = F.transpose(F.tile(h, (dim_latent,1)))
            h = self[flow_u_name](h)

            z += h

            self.z_trans.append(z)

            # Calculate and store the phi term
            h_tanh_derivative = 1-(h_tanh*h_tanh)
            h_tanh_derivative = F.transpose(F.tile(h_tanh_derivative, (dim_latent,1))) 

            phi = self[flow_w_name](h_tanh_derivative) # Equation (11)
            self.phi.append(phi)

        return z
项目:dgm    作者:ashwindcruz    | 项目源码 | 文件源码
def planar_flows(self,z):
        self.z_trans = []
        self.z_trans.append(z)
        self.phi = []

        for i in range(self.num_trans):
            flow_w_name = 'flow_w_' + str(i)
            flow_b_name = 'flow_b_' + str(i)
            flow_u_name = 'flow_u_' + str(i)

            h = self[flow_w_name](z)
            h = F.sum(h,axis=(1))
            h = self[flow_b_name](h)
            h = F.tanh(h)
            h_tanh = h

            dim_latent = z.shape[1]
            h = F.transpose(F.tile(h, (dim_latent,1)))
            h = self[flow_u_name](h)

            z += h

            self.z_trans.append(z)

            # Calculate and store the phi term
            h_tanh_derivative = 1-(h_tanh*h_tanh)
            h_tanh_derivative = F.transpose(F.tile(h_tanh_derivative, (dim_latent,1))) 

            phi = self[flow_w_name](h_tanh_derivative) # Equation (11)
            self.phi.append(phi)

        return z
项目:chainer-speech-recognition    作者:musyoku    | 项目源码 | 文件源码
def Tanh():
    return functions.tanh

# Pooling
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def encode(self, x):
        h1 = F.tanh(self.le1(x))
        mu = self.le2_mu(h1)
        ln_var = self.le2_ln_var(h1)  # log(sigma**2)
        return mu, ln_var
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def decode(self, z, sigmoid=True):
        h1 = F.tanh(self.ld1(z))
        h2 = self.ld2(h1)
        if sigmoid:
            return F.sigmoid(h2)
        else:
            return h2
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def node(self, left, right):
        return F.tanh(self.l(F.concat((left, right))))
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def forward(self):
        x = chainer.Variable(self.x)
        return functions.tanh(x, use_cudnn=self.use_cudnn)
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def check_forward(self, x_data, use_cudnn=True):
        x = chainer.Variable(x_data)
        y = functions.tanh(x, use_cudnn=use_cudnn)
        self.assertEqual(y.data.dtype, self.dtype)
        y_expect = functions.tanh(chainer.Variable(self.x))
        gradient_check.assert_allclose(y_expect.data, y.data)
项目:stock_dqn_f    作者:wdy06    | 项目源码 | 文件源码
def Q_func(self, state, train=True):

        test = not train

        s = Variable(state)
        h = F.tanh(self.bn1(self.fc1(s),test=test))
        h = F.tanh(self.bn2(self.fc2(h),test=test))  
        h = F.tanh(self.bn3(self.fc3(h),test=test))
        h = F.tanh(self.bn4(self.fc4(h),test=test))
        h = F.tanh(self.bn5(self.fc5(h),test=test))
        Q = self.q_value(h)

        return Q
项目:stock_dqn_f    作者:wdy06    | 项目源码 | 文件源码
def Q_func(self, state):

        s = Variable(state)
        h = F.tanh(self.fc1(state))
        h = F.tanh(self.fc2(h))  
        h = F.tanh(self.fc3(h))
        h = F.tanh(self.fc4(h))
        h = F.tanh(self.fc5(h))
        Q = self.q_value(h)

        return Q
项目:chainer_nmt    作者:odashi    | 项目源码 | 文件源码
def _initialize_decoder(self, pc, p):
    return F.tanh(self.pc_qc(pc)), F.tanh(self.p_q(p))
项目:chainer_nmt    作者:odashi    | 项目源码 | 文件源码
def _initialize_decoder(self, fc, bc, f, b):
    return (
        F.tanh(self.fc_pc(fc) + self.bc_pc(bc)),
        F.tanh(self.f_p(f) + self.b_p(b)))
项目:nn_parsers    作者:odashi    | 项目源码 | 文件源码
def __call__(self, x):
    return functions.tanh(self.w_xy(x))