Python chainer.links 模块,EmbedID() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用chainer.links.EmbedID()

项目:vaelm    作者:TatsuyaShirakawa    | 项目源码 | 文件源码
def __init__(self, vocab_size, hidden_size, num_layers, ignore_label=-1):

        self.vocab_size = vocab_size
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.ignore_label = ignore_label

        args = {'embed': L.EmbedID(vocab_size, hidden_size, ignore_label=ignore_label)}
        for i in range(self.num_layers):
            args.update({'l{}'.format(i): L.StatelessLSTM(hidden_size, hidden_size)})
            setattr(self, 'h{}'.format(i), None)
            setattr(self, 'c{}'.format(i), None)

        super(Encoder, self).__init__(**args)

        for param in self.params():
            param.data[...] = np.random.uniform(-0.1, 0.1, param.data.shape)
项目:vaelm    作者:TatsuyaShirakawa    | 项目源码 | 文件源码
def __init__(self, vocab_size, hidden_size, num_layers, ignore_label=-1):

        self.vocab_size = vocab_size
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.ignore_label = ignore_label

        args = {'embed': L.EmbedID(vocab_size, hidden_size, ignore_label=ignore_label),        
                'hy': L.Linear(hidden_size, vocab_size)}

        for i in range(self.num_layers):
            args.update({'l{}'.format(i): L.StatelessLSTM(hidden_size, hidden_size)})
            setattr(self, 'h{}'.format(i), None)
            setattr(self, 'c{}'.format(i), None)

        super(RNNLM, self).__init__(**args)

        for param in self.params():
            param.data[...] = np.random.uniform(-0.1, 0.1, param.data.shape)

        self.reset_state()
项目:LSTMVAE    作者:ashwatthaman    | 项目源码 | 文件源码
def __init__(self,args):
        self.setArgs(args)
        super(VAE, self).__init__(
                embed = L.EmbedID(self.n_vocab,self.n_embed),
                #VAEenc
                enc_f = LSTM(self.n_layers,self.n_embed, self.out_size, dropout=self.drop_ratio),
                enc_b = LSTM(self.n_layers,self.n_embed, self.out_size, dropout=self.drop_ratio),

                le2_mu=L.Linear(4*self.out_size, self.n_latent),
                le2_ln_var=L.Linear(4*self.out_size, self.n_latent),
                #VAEdec
                ld_h = L.Linear(self.n_latent,2*self.out_size),
                ld_c = L.Linear(self.n_latent,2*self.out_size),

                dec = LSTM(self.n_layers,self.n_embed, 2*self.out_size, dropout=self.drop_ratio),
                h2w = L.Linear(2*self.out_size,self.n_vocab),
        )
项目:LSTMVAE    作者:ashwatthaman    | 项目源码 | 文件源码
def __init__(self,args):
        self.setArgs(args)
        super(CVAEHidden, self).__init__(
                categ_enc_b_h = L.EmbedID(self.categ_size,self.out_size),
                categ_enc_b_c = L.EmbedID(self.categ_size,self.out_size),
                categ_enc_f_h = L.EmbedID(self.categ_size,self.out_size),
                categ_enc_f_c = L.EmbedID(self.categ_size,self.out_size),
                categ_dec_h   = L.EmbedID(self.categ_size,2*self.out_size),
                categ_dec_c   = L.EmbedID(self.categ_size,2*self.out_size),
                embed = L.EmbedID(self.n_vocab,self.n_embed),
                #VAEenc
                enc_f = LSTM(self.n_layers,self.n_embed, self.out_size, dropout=self.drop_ratio),
                enc_b = LSTM(self.n_layers,self.n_embed, self.out_size, dropout=self.drop_ratio),

                le2_mu=L.Linear(4*self.out_size, self.n_latent),
                le2_ln_var=L.Linear(4*self.out_size, self.n_latent),
                #VAEdec
                ld_h = L.Linear(self.n_latent,2*self.out_size),
                ld_c = L.Linear(self.n_latent,2*self.out_size),

                dec = LSTM(self.n_layers,self.n_embed, 2*self.out_size, dropout=self.drop_ratio),
                h2w = L.Linear(2*self.out_size,self.n_vocab),
        )
项目:NANHM-for-GEC    作者:shinochin    | 项目源码 | 文件源码
def __init__(self, n_layers, n_source_vocab, n_target_vocab, n_source_char, n_units, n_sentences):
        super(Seq2seq, self).__init__(
            embed_xw=L.EmbedID(n_source_vocab, n_units),
            embed_xc=L.EmbedID(n_source_char, n_units),
            embed_y=L.EmbedID(n_target_vocab, n_units * 2),
            encoder_fw=L.NStepGRU(n_layers, n_units, n_units, 0.1),
            encoder_bw=L.NStepGRU(n_layers, n_units, n_units, 0.1),
            encoder_fc=L.NStepGRU(n_layers, n_units, n_units, 0.1),
            encoder_bc=L.NStepGRU(n_layers, n_units, n_units, 0.1),
            decoder=My.NStepGRU(n_layers, n_units * 2, n_units * 2, 0.1),
            W=L.Linear(n_units * 2, n_target_vocab),
        )
        self.n_layers = n_layers
        self.n_units = n_units
        self.n_params = 6
        self.n_sentences = n_sentences
        self.n_process = 0
        self.n_sen = len(str(n_sentences))
项目:NANHM-for-GEC    作者:shinochin    | 项目源码 | 文件源码
def __init__(self, n_layers, n_source_vocab, n_target_vocab, n_source_char, n_target_char, n_units, n_sentences):
        super(Seq2seq, self).__init__(
            embed_x=L.EmbedID(n_source_vocab, n_units),
            embed_y=L.EmbedID(n_target_vocab, n_units * 2),
            embed_xc=L.EmbedID(n_source_char, n_units),
            embed_yc=L.EmbedID(n_target_char, n_units),
            encoder_f=L.NStepGRU(n_layers, n_units, n_units, 0.1),
            encoder_b=L.NStepGRU(n_layers, n_units, n_units, 0.1),
            char_encoder=L.NStepGRU(n_layers, n_units, n_units, 0.1),
            decoder=My.NStepGRU(n_layers, n_units * 2, n_units * 2, 0.1),
            char_decoder=L.NStepGRU(n_layers, n_units, n_units, 0.1),
            char_att_decoder=My.NStepGRU(n_layers, n_units, n_units, 0.1),
            W=L.Linear(n_units * 2, n_target_vocab),
            W_hat=L.Linear(n_units * 4, n_units),
            W_char=L.Linear(n_units, n_target_char),
        )
        self.n_layers = n_layers
        self.n_units = n_units
        self.n_params = 7
        self.n_sentences = n_sentences
        self.n_process = 0
        self.n_sen = len(str(n_sentences))
项目:NANHM-for-GEC    作者:shinochin    | 项目源码 | 文件源码
def __init__(self, n_layers, n_source_vocab, n_target_vocab, n_source_char, n_target_char, n_units):
        super(Seq2seq, self).__init__(
            embed_x=L.EmbedID(n_source_vocab, n_units),
            embed_y=L.EmbedID(n_target_vocab, n_units * 2),
            embed_xc=L.EmbedID(n_source_char, n_units),
            embed_yc=L.EmbedID(n_target_char, n_units),
            encoder_f=L.NStepGRU(n_layers, n_units, n_units, 0.1),
            encoder_b=L.NStepGRU(n_layers, n_units, n_units, 0.1),
            char_encoder=L.NStepGRU(n_layers, n_units, n_units, 0.1),
            decoder=My.NStepGRU(n_layers, n_units * 2, n_units * 2, 0.1),
            char_decoder=L.NStepGRU(n_layers, n_units, n_units, 0.1),
            char_att_decoder=My.NStepGRU(n_layers, n_units, n_units, 0.1),
            W=L.Linear(n_units * 2, n_target_vocab),
            W_hat=L.Linear(n_units * 4, n_units),
            W_char=L.Linear(n_units, n_target_char),
        )
        self.n_layers = n_layers
        self.n_units = n_units
        self.n_params = 6
项目:deep-crf    作者:aonotas    | 项目源码 | 文件源码
def __init__(self, emb_dim=100, window_size=3, init_emb=None,
                 hidden_dim=100, vocab_size=0, splitter=u' ', add_dim=0,
                 PAD_IDX=None):
        """
        Neural network tagger by dos (Santos and Zadrozny, ICML 2014).
        """
        assert window_size % 2 == 1, 'window_size must be odd.'
        dim = emb_dim
        hidden_dim = hidden_dim + add_dim
        self.add_dim = add_dim
        self.hidden_dim = hidden_dim
        super(BaseCNNEncoder, self).__init__(emb=L.EmbedID(vocab_size, emb_dim, ignore_label=-1),
                                             conv=L.Convolution2D(1, hidden_dim, ksize=(window_size, dim),
                                                                  stride=(1, dim), pad=(window_size // 2, 0)))
        self.splitter = splitter
        self.char_level_flag = True if self.splitter is None else False
        self.word_level_flag = not self.char_level_flag
        self.emb_dim = emb_dim
        self.window_size = window_size
        self.dim = dim
        self.PAD_IDX = PAD_IDX
        self.train = True
        # initialize embeddings
        if init_emb is not None:
            self.emb.W = init_emb
项目:lencon    作者:kiyukuta    | 项目源码 | 文件源码
def __init__(self,
                 src_vcb_num,
                 trg_vcb_num,
                 dim_emb,
                 dim_hid):

        lstm_init_bias = get_lstm_init_bias(dim_hid)

        super().__init__(
            src_emb=L.EmbedID(src_vcb_num, dim_emb, ignore_label=-1),
            encoder=BiLstmEncoder(dim_emb, dim_hid),
            # decoder (TODO: make Decoder class)
            trg_emb=L.EmbedID(trg_vcb_num, dim_emb, ignore_label=-1),
            eh=L.Linear(dim_emb, dim_hid * 4, initial_bias=lstm_init_bias),
            hh=L.Linear(dim_hid, dim_hid * 4, nobias=True),
            ho=L.Linear(dim_hid, trg_vcb_num),
        )

        self.dim_hid = dim_hid
        self.dim_emb = dim_emb
        self.src_vcb_num = src_vcb_num
        self.trg_vcb_num = trg_vcb_num
项目:blstm-cws    作者:chantera    | 项目源码 | 文件源码
def __init__(self, embeddings, n_labels, dropout=0.5, train=True):
        vocab_size, embed_size = embeddings.shape
        feature_size = embed_size
        super(BLSTMBase, self).__init__(
            embed=L.EmbedID(
                in_size=vocab_size,
                out_size=embed_size,
                initialW=embeddings,
            ),
            f_lstm=LSTM(feature_size, feature_size, dropout),
            b_lstm=LSTM(feature_size, feature_size, dropout),
            linear=L.Linear(feature_size * 2, n_labels),
        )
        self._dropout = dropout
        self._n_labels = n_labels
        self.train = train
项目:chainer_nmt    作者:odashi    | 项目源码 | 文件源码
def __init__(
      self,
      src_vocab_size,
      trg_vocab_size,
      embed_size,
      hidden_size):
    super(SimpleEncoderDecoder, self).__init__(
        # Encoder
        x_i = L.EmbedID(src_vocab_size, embed_size),
        i_p = L.Linear(embed_size, 4 * hidden_size, nobias=True),
        p_p = L.Linear(hidden_size, 4 * hidden_size),
        # Decoder initializer
        pc_qc = L.Linear(hidden_size, hidden_size),
        p_q = L.Linear(hidden_size, hidden_size),
        # Decoder
        y_j = L.EmbedID(trg_vocab_size, embed_size),
        j_q = L.Linear(embed_size, 4 * hidden_size, nobias=True),
        q_q = L.Linear(hidden_size, 4 * hidden_size),
        q_z = L.Linear(hidden_size, trg_vocab_size))
    self.src_vocab_size = src_vocab_size
    self.trg_vocab_size = trg_vocab_size
    self.embed_size = embed_size
    self.hidden_size = hidden_size
项目:convolutional_seq2seq    作者:soskek    | 项目源码 | 文件源码
def __init__(self, n_layers, n_source_vocab, n_target_vocab, n_units,
                 max_length=50, dropout=0.2, width=3):
        init_emb = chainer.initializers.Normal(0.1)
        init_out = VarInNormal(1.)
        super(Seq2seq, self).__init__(
            embed_x=L.EmbedID(n_source_vocab, n_units, ignore_label=-1,
                              initialW=init_emb),
            embed_y=L.EmbedID(n_target_vocab, n_units, ignore_label=-1,
                              initialW=init_emb),
            embed_position_x=L.EmbedID(max_length, n_units,
                                       initialW=init_emb),
            embed_position_y=L.EmbedID(max_length, n_units,
                                       initialW=init_emb),
            encoder=ConvGLUEncoder(n_layers, n_units, width, dropout),
            decoder=ConvGLUDecoder(n_layers, n_units, width, dropout),
            W=L.Linear(n_units, n_target_vocab, initialW=init_out),
        )
        self.n_layers = n_layers
        self.n_units = n_units
        self.n_target_vocab = n_target_vocab
        self.max_length = max_length
        self.width = width
        self.dropout = dropout
项目:wavenet    作者:rampage644    | 项目源码 | 文件源码
def __init__(self, in_channels, out_channels, filter_size, mask='B', nobias=False):
        super(ResidualBlock, self).__init__(
            vertical_conv_t=CroppedConvolution(
                in_channels, out_channels, ksize=[filter_size//2+1, filter_size],
                pad=[filter_size//2+1, filter_size//2]),
            vertical_conv_s=CroppedConvolution(
                in_channels, out_channels, ksize=[filter_size//2+1, filter_size],
                pad=[filter_size//2+1, filter_size//2]),
            v_to_h_conv_t=L.Convolution2D(out_channels, out_channels, 1),
            v_to_h_conv_s=L.Convolution2D(out_channels, out_channels, 1),

            horizontal_conv_t=MaskedConvolution2D(
                in_channels, out_channels, ksize=[1, filter_size],
                pad=[0, filter_size // 2], mask=mask),
            horizontal_conv_s=MaskedConvolution2D(
                in_channels, out_channels, ksize=[1, filter_size],
                pad=[0, filter_size // 2], mask=mask),

            horizontal_output=MaskedConvolution2D(out_channels, out_channels, 1, mask=mask),
            label=L.EmbedID(10, out_channels)
        )
项目:sentence_similarity    作者:MorinoseiMorizo    | 项目源码 | 文件源码
def __init__(self, N_SOURCE_VOCAB, N_TARGET_VOCAB, N_EMBED, N_HIDDEN, train=True):
        super(EncDecModel, self).__init__(
                # Encoder
                enc_embed=L.EmbedID(N_SOURCE_VOCAB, N_EMBED),
                enc_lstm_1=L.LSTM(N_EMBED, N_HIDDEN),
                enc_lstm_2=L.LSTM(N_HIDDEN, N_HIDDEN),
                # Decoder initializer
                enc_dec_1_c=L.Linear(N_HIDDEN, N_HIDDEN),
                enc_dec_1_h=L.Linear(N_HIDDEN, N_HIDDEN),
                enc_dec_2_c=L.Linear(N_HIDDEN, N_HIDDEN),
                enc_dec_2_h=L.Linear(N_HIDDEN, N_HIDDEN),
                # Decoder
                dec_embed=L.EmbedID(N_TARGET_VOCAB, N_EMBED),
                dec_lstm_1=L.LSTM(N_EMBED, N_HIDDEN),
                dec_lstm_2=L.LSTM(N_HIDDEN, N_HIDDEN),
                dec_output=L.Linear(N_HIDDEN, N_TARGET_VOCAB),
        )
        for param in self.params():
            param.data[...] = self.xp.random.uniform(-0.08, 0.08, param.data.shape)
        self.train = train
        self.src_vocab_size = N_SOURCE_VOCAB
        self.trg_vocab_size = N_TARGET_VOCAB
        self.embed_size = N_EMBED
        self.hidden_size = N_HIDDEN
项目:DSTC6-End-to-End-Conversation-Modeling    作者:dialogtekgeek    | 项目源码 | 文件源码
def __init__(self, n_layers, in_size, out_size, embed_size, hidden_size, proj_size, dropout=0.5):
        """Initialize encoder with structure parameters

        Args:
            n_layers (int): Number of layers.
            in_size (int): Dimensionality of input vectors.
            out_size (int): Dimensionality of output vectors.
            embed_size (int): Dimensionality of word embedding.
            hidden_size (int) : Dimensionality of hidden vectors.
            proj_size (int) : Dimensionality of projection before softmax.
            dropout (float): Dropout ratio.
        """
        super(LSTMDecoder, self).__init__(
            embed = L.EmbedID(in_size, embed_size),
            lstm = L.NStepLSTM(n_layers, embed_size, hidden_size, dropout),
            proj = L.Linear(hidden_size, proj_size),
            out = L.Linear(proj_size, out_size)
        )
        self.dropout = dropout
        for param in self.params():
            param.data[...] = np.random.uniform(-0.1, 0.1, param.data.shape)
项目:DSTC6-End-to-End-Conversation-Modeling    作者:dialogtekgeek    | 项目源码 | 文件源码
def __init__(self, n_layers, in_size, out_size, embed_size, hidden_size, proj_size, dropout=0.5):
        """Initialize encoder with structure parameters

        Args:
            n_layers (int): Number of layers.
            in_size (int): Dimensionality of input vectors.
            out_size (int): Dimensionality of output vectors.
            embed_size (int): Dimensionality of word embedding.
            hidden_size (int) : Dimensionality of hidden vectors.
            proj_size (int) : Dimensionality of projection before softmax.
            dropout (float): Dropout ratio.
        """
        super(LSTMDecoder, self).__init__(
            embed = L.EmbedID(in_size, embed_size),
            lstm = L.NStepLSTM(n_layers, embed_size, hidden_size, dropout),
            proj = L.Linear(hidden_size, proj_size),
            out = L.Linear(proj_size, out_size)
        )
        self.dropout = dropout
        for param in self.params():
            param.data[...] = np.random.uniform(-0.1, 0.1, param.data.shape)
项目:DSTC6-End-to-End-Conversation-Modeling    作者:dialogtekgeek    | 项目源码 | 文件源码
def __init__(self, n_layers, in_size, out_size, embed_size, hidden_size, proj_size, dropout=0.5):
        """Initialize encoder with structure parameters

        Args:
            n_layers (int): Number of layers.
            in_size (int): Dimensionality of input vectors.
            out_size (int): Dimensionality of output vectors.
            embed_size (int): Dimensionality of word embedding.
            hidden_size (int) : Dimensionality of hidden vectors.
            proj_size (int) : Dimensionality of projection before softmax.
            dropout (float): Dropout ratio.
        """
        super(LSTMDecoder, self).__init__(
            embed = L.EmbedID(in_size, embed_size),
            lstm = L.NStepLSTM(n_layers, embed_size, hidden_size, dropout),
            proj = L.Linear(hidden_size, proj_size),
            out = L.Linear(proj_size, out_size)
        )
        self.dropout = dropout
        for param in self.params():
            param.data[...] = np.random.uniform(-0.1, 0.1, param.data.shape)
项目:Multitask-and-Transfer-Learning    作者:AI-ON    | 项目源码 | 文件源码
def __init__(self, embed_size, in_channels=None,
                 out_channels=16, ksize=(3, 3), stride=1,
                 pad=0, initialW=None):
        super(EmbeddingConv2D, self).__init__()

        self.embed_size = embed_size
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.kh, self.kw = _pair(ksize)
        self.stride = _pair(stride)
        self.pad = _pair(pad)

        with self.init_scope():
            W_initializer = initializers._get_initializer(initialW)
            vec_size = self.out_channels * self.in_channels * self.kh * self.kw
            self.W_embedding = L.EmbedID(embed_size, vec_size, initialW=W_initializer)
            self.b_embedding = L.EmbedID(embed_size, out_channels)
项目:NANHM-for-GEC    作者:shinochin    | 项目源码 | 文件源码
def __init__(self, n_layers, n_source_vocab, n_target_vocab, n_source_char, n_units):
        super(Seq2seq, self).__init__(
            embed_xw=L.EmbedID(n_source_vocab, n_units),
            embed_xc=L.EmbedID(n_source_char, n_units),
            embed_y=L.EmbedID(n_target_vocab, n_units * 2),
            encoder_fw=L.NStepGRU(n_layers, n_units, n_units, 0.1),
            encoder_bw=L.NStepGRU(n_layers, n_units, n_units, 0.1),
            encoder_fc=L.NStepGRU(n_layers, n_units, n_units, 0.1),
            encoder_bc=L.NStepGRU(n_layers, n_units, n_units, 0.1),
            decoder=My.NStepGRU(n_layers, n_units * 2, n_units * 2, 0.1),
            W=L.Linear(n_units * 2, n_target_vocab),
        )
        self.n_layers = n_layers
        self.n_units = n_units
        self.n_params = 5
项目:NANHM-for-GEC    作者:shinochin    | 项目源码 | 文件源码
def __init__(self, n_layers, n_source_vocab, n_target_vocab, n_units):
        super(Seq2seq, self).__init__(
            embed_x=L.EmbedID(n_source_vocab, n_units),
            embed_y=L.EmbedID(n_target_vocab, n_units),
            encoder=L.NStepGRU(n_layers, n_units, n_units, 0.1),
            decoder=L.NStepGRU(n_layers, n_units, n_units, 0.1),
            W=L.Linear(n_units, n_target_vocab),
        )
        self.n_layers = n_layers
        self.n_units = n_units
项目:NANHM-for-GEC    作者:shinochin    | 项目源码 | 文件源码
def __init__(self, n_layers, n_source_vocab, n_target_vocab, n_source_char, n_target_char, n_units):
        super(Seq2seq, self).__init__(
            embed_x=L.EmbedID(n_source_vocab, n_units),
            embed_y=L.EmbedID(n_target_vocab, n_units * 2),
            embed_xc=L.EmbedID(n_source_char, n_units),
            embed_yc=L.EmbedID(n_target_char, n_units * 2),
            encoder_f=L.NStepGRU(n_layers, n_units, n_units, 0.1),
            encoder_b=L.NStepGRU(n_layers, n_units, n_units, 0.1),
            decoder=My.NStepGRU(n_layers, n_units * 2, n_units * 2, 0.1),
            #decoder_att=D.AttGRUdec(n_layers, n_units * 2, n_units * 2, n_target_vocab),
            W=L.Linear(n_units * 2, n_target_vocab),
        )
        self.n_layers = n_layers
        self.n_units = n_units
项目:NANHM-for-GEC    作者:shinochin    | 项目源码 | 文件源码
def __init__(self, n_layers, n_source_vocab, n_target_vocab, n_units):
        super(Seq2seq, self).__init__(
            embed_x=L.EmbedID(n_source_vocab, n_units),
            embed_y=L.EmbedID(n_target_vocab, n_units * 2),
            encoder_f=L.NStepGRU(n_layers, n_units, n_units, 0.1),
            encoder_b=L.NStepGRU(n_layers, n_units, n_units, 0.1),
            decoder=L.NStepGRU(n_layers, n_units * 2, n_units * 2, 0.1),
            W=L.Linear(n_units * 2, n_target_vocab),
        )
        self.n_layers = n_layers
        self.n_units = n_units
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def __init__(self, model_path, word_dim=None, afix_dim=None, nlayers=2,
            hidden_dim=128, elu_dim=64, dep_dim=100, dropout_ratio=0.5):
        self.model_path = model_path
        defs_file = model_path + "/tagger_defs.txt"
        if word_dim is None:
            self.train = False
            Param.load(self, defs_file)
            self.extractor = FeatureExtractor(model_path)
        else:
            # training
            self.train = True
            p = Param(self)
            p.dep_dim = dep_dim
            p.word_dim = word_dim
            p.afix_dim = afix_dim
            p.hidden_dim = hidden_dim
            p.elu_dim = elu_dim
            p.nlayers = nlayers
            p.n_words = len(read_model_defs(model_path + "/words.txt"))
            p.n_suffixes = len(read_model_defs(model_path + "/suffixes.txt"))
            p.n_prefixes = len(read_model_defs(model_path + "/prefixes.txt"))
            p.targets = read_model_defs(model_path + "/target.txt")
            p.dump(defs_file)

        self.in_dim = self.word_dim + 8 * self.afix_dim
        self.dropout_ratio = dropout_ratio
        super(LSTMParser, self).__init__(
                emb_word=L.EmbedID(self.n_words, self.word_dim),
                emb_suf=L.EmbedID(self.n_suffixes, self.afix_dim, ignore_label=IGNORE),
                emb_prf=L.EmbedID(self.n_prefixes, self.afix_dim, ignore_label=IGNORE),
                lstm_f=L.NStepLSTM(nlayers, self.in_dim,
                    self.hidden_dim, self.dropout_ratio),
                lstm_b=L.NStepLSTM(nlayers, self.in_dim,
                    self.hidden_dim, self.dropout_ratio),
                linear_cat1=L.Linear(2 * self.hidden_dim, self.elu_dim),
                linear_cat2=L.Linear(self.elu_dim, len(self.targets)),
                linear_dep=L.Linear(2 * self.hidden_dim, self.dep_dim),
                linear_head=L.Linear(2 * self.hidden_dim, self.dep_dim),
                biaffine=Biaffine(self.dep_dim)
                )
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def __init__(self, model_path, word_dim=None, afix_dim=None,
            nlayers=2, hidden_dim=128, relu_dim=64, dropout_ratio=0.5):
        self.model_path = model_path
        defs_file = model_path + "/tagger_defs.txt"
        if word_dim is None:
            self.train = False
            Param.load(self, defs_file)
            self.extractor = FeatureExtractor(model_path)
        else:
            self.train = True
            p = Param(self)
            p.word_dim = word_dim
            p.afix_dim = afix_dim
            p.hidden_dim = hidden_dim
            p.relu_dim = relu_dim
            p.nlayers = nlayers
            p.dump(defs_file)

        self.targets = read_model_defs(model_path + "/target.txt")
        self.words = read_model_defs(model_path + "/words.txt")
        self.suffixes = read_model_defs(model_path + "/suffixes.txt")
        self.prefixes = read_model_defs(model_path + "/prefixes.txt")
        self.in_dim = self.word_dim + 8 * self.afix_dim
        self.dropout_ratio = dropout_ratio
        super(LSTMTagger, self).__init__(
                emb_word=L.EmbedID(len(self.words), self.word_dim),
                emb_suf=L.EmbedID(len(self.suffixes), self.afix_dim, ignore_label=IGNORE),
                emb_prf=L.EmbedID(len(self.prefixes), self.afix_dim, ignore_label=IGNORE),
                lstm_f=L.NStepLSTM(nlayers, self.in_dim, self.hidden_dim, 0.),
                lstm_b=L.NStepLSTM(nlayers, self.in_dim, self.hidden_dim, 0.),
                linear1=L.Linear(2 * self.hidden_dim, self.relu_dim),
                linear2=L.Linear(self.relu_dim, len(self.targets)),
                )
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def __init__(self, model_path, word_dim=None, char_dim=None, nlayers=2,
            hidden_dim=128, dep_dim=100, dropout_ratio=0.5):
        self.model_path = model_path
        defs_file = model_path + "/tagger_defs.txt"
        if word_dim is None:
            self.train = False
            Param.load(self, defs_file)
            self.extractor = FeatureExtractor(model_path)
        else:
            self.train = True
            p = Param(self)
            p.dep_dim = dep_dim
            p.word_dim = word_dim
            p.char_dim = char_dim
            p.hidden_dim = hidden_dim
            p.nlayers = nlayers
            p.n_words = len(read_model_defs(model_path + "/words.txt"))
            p.n_chars = len(read_model_defs(model_path + "/chars.txt"))
            p.targets = read_model_defs(model_path + "/target.txt")
            p.dump(defs_file)

        self.in_dim = self.word_dim + self.char_dim
        self.dropout_ratio = dropout_ratio
        super(BiaffineJaLSTMParser, self).__init__(
                emb_word=L.EmbedID(self.n_words, self.word_dim),
                emb_char=L.EmbedID(self.n_chars, 50, ignore_label=IGNORE),
                conv_char=L.Convolution2D(1, self.char_dim,
                    (3, 50), stride=1, pad=(1, 0)),
                lstm_f=L.NStepLSTM(self.nlayers, self.in_dim,
                    self.hidden_dim, 0.32),
                lstm_b=L.NStepLSTM(self.nlayers, self.in_dim,
                    self.hidden_dim, 0.32),
                arc_dep=L.Linear(2 * self.hidden_dim, self.dep_dim),
                arc_head=L.Linear(2 * self.hidden_dim, self.dep_dim),
                rel_dep=L.Linear(2 * self.hidden_dim, self.dep_dim),
                rel_head=L.Linear(2 * self.hidden_dim, self.dep_dim),
                biaffine_arc=Biaffine(self.dep_dim),
                biaffine_tag=L.Bilinear(self.dep_dim, self.dep_dim, len(self.targets))
                )
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def __init__(self, model_path, word_dim=None, char_dim=None, nlayers=2,
            hidden_dim=128, relu_dim=64, dep_dim=100, dropout_ratio=0.5):
        self.model_path = model_path
        defs_file = model_path + "/tagger_defs.txt"
        if word_dim is None:
            # use as supertagger
            self.train = False
            Param.load(self, defs_file)
            self.extractor = FeatureExtractor(model_path)
        else:
            # training
            self.train = True
            p = Param(self)
            p.dep_dim = dep_dim
            p.word_dim = word_dim
            p.char_dim = char_dim
            p.hidden_dim = hidden_dim
            p.relu_dim = relu_dim
            p.nlayers = nlayers
            p.n_words = len(read_model_defs(model_path + "/words.txt"))
            p.n_chars = len(read_model_defs(model_path + "/chars.txt"))
            p.targets = read_model_defs(model_path + "/target.txt")
            p.dump(defs_file)

        self.in_dim = self.word_dim + self.char_dim
        self.dropout_ratio = dropout_ratio
        super(PeepHoleJaLSTMParser, self).__init__(
                emb_word=L.EmbedID(self.n_words, self.word_dim),
                emb_char=L.EmbedID(self.n_chars, 50, ignore_label=IGNORE),
                conv_char=L.Convolution2D(1, self.char_dim,
                    (3, 50), stride=1, pad=(1, 0)),
                lstm_f1=DyerLSTM(self.in_dim, self.hidden_dim),
                lstm_f2=DyerLSTM(self.hidden_dim, self.hidden_dim),
                lstm_b1=DyerLSTM(self.in_dim, self.hidden_dim),
                lstm_b2=DyerLSTM(self.hidden_dim, self.hidden_dim),
                linear_cat1=L.Linear(2 * self.hidden_dim, self.relu_dim),
                linear_cat2=L.Linear(self.relu_dim, len(self.targets)),
                linear_dep=L.Linear(2 * self.hidden_dim, self.dep_dim),
                linear_head=L.Linear(2 * self.hidden_dim, self.dep_dim),
                biaffine=Biaffine(self.dep_dim)
                )
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def __init__(self, model_path, word_dim=None, afix_dim=None,
            nlayers=2, hidden_dim=128, relu_dim=64, dropout_ratio=0.5):
        self.model_path = model_path
        defs_file = model_path + "/tagger_defs.txt"
        if word_dim is None:
            self.train = False
            Param.load(self, defs_file)
            self.extractor = FeatureExtractor(model_path)
        else:
            self.train = True
            p = Param(self)
            p.word_dim = word_dim
            p.afix_dim = afix_dim
            p.hidden_dim = hidden_dim
            p.relu_dim = relu_dim
            p.nlayers = nlayers
            p.dump(defs_file)

        self.targets = read_model_defs(model_path + "/target.txt")
        self.words = read_model_defs(model_path + "/words.txt")
        self.suffixes = read_model_defs(model_path + "/suffixes.txt")
        self.prefixes = read_model_defs(model_path + "/prefixes.txt")
        self.in_dim = self.word_dim + 8 * self.afix_dim
        self.dropout_ratio = dropout_ratio
        super(LSTMTagger, self).__init__(
                emb_word=L.EmbedID(len(self.words), self.word_dim),
                emb_suf=L.EmbedID(len(self.suffixes), self.afix_dim, ignore_label=IGNORE),
                emb_prf=L.EmbedID(len(self.prefixes), self.afix_dim, ignore_label=IGNORE),
                lstm_f=L.NStepLSTM(nlayers, self.in_dim, self.hidden_dim, 0.),
                lstm_b=L.NStepLSTM(nlayers, self.in_dim, self.hidden_dim, 0.),
                linear1=L.Linear(2 * self.hidden_dim, self.relu_dim),
                linear2=L.Linear(self.relu_dim, len(self.targets)),
                )
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def __init__(self, model_path, word_dim=None, afix_dim=None, nlayers=2,
            hidden_dim=128, elu_dim=64, dep_dim=100, dropout_ratio=0.5):
        self.model_path = model_path
        defs_file = model_path + "/tagger_defs.txt"
        if word_dim is None:
            self.train = False
            Param.load(self, defs_file)
            self.extractor = FeatureExtractor(model_path)
        else:
            # training
            self.train = True
            p = Param(self)
            p.dep_dim = dep_dim
            p.word_dim = word_dim
            p.afix_dim = afix_dim
            p.hidden_dim = hidden_dim
            p.elu_dim = elu_dim
            p.nlayers = nlayers
            p.n_words = len(read_model_defs(model_path + "/words.txt"))
            p.n_suffixes = len(read_model_defs(model_path + "/suffixes.txt"))
            p.n_prefixes = len(read_model_defs(model_path + "/prefixes.txt"))
            p.targets = read_model_defs(model_path + "/target.txt")
            p.dump(defs_file)

        self.in_dim = self.word_dim + 8 * self.afix_dim
        self.dropout_ratio = dropout_ratio
        super(LSTMParser, self).__init__(
                emb_word=L.EmbedID(self.n_words, self.word_dim),
                emb_suf=L.EmbedID(self.n_suffixes, self.afix_dim, ignore_label=IGNORE),
                emb_prf=L.EmbedID(self.n_prefixes, self.afix_dim, ignore_label=IGNORE),
                lstm_f=L.NStepLSTM(nlayers, self.in_dim,
                    self.hidden_dim, self.dropout_ratio),
                lstm_b=L.NStepLSTM(nlayers, self.in_dim,
                    self.hidden_dim, self.dropout_ratio),
                linear_cat1=L.Linear(2 * self.hidden_dim, self.elu_dim),
                linear_cat2=L.Linear(self.elu_dim, len(self.targets)),
                linear_dep=L.Linear(2 * self.hidden_dim, self.dep_dim),
                linear_head=L.Linear(2 * self.hidden_dim, self.dep_dim),
                biaffine=Biaffine(self.dep_dim)
                )
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def __init__(self, model_path, word_dim=None, afix_dim=None, nlayers=2,
            hidden_dim=128, dep_dim=100, dropout_ratio=0.5):
        self.model_path = model_path
        defs_file = model_path + "/tagger_defs.txt"
        if word_dim is None:
            self.train = False
            Param.load(self, defs_file)
            self.extractor = FeatureExtractor(model_path)
        else:
            # training
            self.train = True
            p = Param(self)
            p.dep_dim = dep_dim
            p.word_dim = word_dim
            p.afix_dim = afix_dim
            p.hidden_dim = hidden_dim
            p.nlayers = nlayers
            p.n_words = len(read_model_defs(model_path + "/words.txt"))
            p.n_suffixes = len(read_model_defs(model_path + "/suffixes.txt"))
            p.n_prefixes = len(read_model_defs(model_path + "/prefixes.txt"))
            p.targets = read_model_defs(model_path + "/target.txt")
            p.dump(defs_file)

        self.in_dim = self.word_dim + 8 * self.afix_dim
        self.dropout_ratio = dropout_ratio
        super(FastBiaffineLSTMParser, self).__init__(
                emb_word=L.EmbedID(self.n_words, self.word_dim, ignore_label=IGNORE),
                emb_suf=L.EmbedID(self.n_suffixes, self.afix_dim, ignore_label=IGNORE),
                emb_prf=L.EmbedID(self.n_prefixes, self.afix_dim, ignore_label=IGNORE),
                lstm_f=L.NStepLSTM(self.nlayers, self.in_dim, self.hidden_dim, 0.32),
                lstm_b=L.NStepLSTM(self.nlayers, self.in_dim, self.hidden_dim, 0.32),
                arc_dep=L.Linear(2 * self.hidden_dim, self.dep_dim),
                arc_head=L.Linear(2 * self.hidden_dim, self.dep_dim),
                rel_dep=L.Linear(2 * self.hidden_dim, self.dep_dim),
                rel_head=L.Linear(2 * self.hidden_dim, self.dep_dim),
                biaffine_arc=Biaffine(self.dep_dim),
                biaffine_tag=Bilinear(self.dep_dim, self.dep_dim, len(self.targets))
                )
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def __init__(self, model_path, word_dim=None, char_dim=None):
        self.model_path = model_path
        defs_file = model_path + "/tagger_defs.txt"
        if word_dim is None:
            # use as supertagger
            with open(defs_file) as f:
                defs = json.load(f)
            self.word_dim = defs["word_dim"]
            self.char_dim = defs["char_dim"]
        else:
            # training
            self.word_dim = word_dim
            self.char_dim = char_dim
            with open(defs_file, "w") as f:
                json.dump({"model": self.__class__.__name__,
                           "word_dim": self.word_dim,
                           "char_dim": self.char_dim}, f)

        self.extractor = FeatureExtractor(model_path)
        self.targets = read_model_defs(model_path + "/target.txt")
        self.train = True

        hidden_dim = 1000
        in_dim = WINDOW_SIZE * (self.word_dim + self.char_dim)
        super(JaCCGEmbeddingTagger, self).__init__(
                emb_word=L.EmbedID(len(self.extractor.words), self.word_dim),
                emb_char=L.EmbedID(len(self.extractor.chars),
                            self.char_dim, ignore_label=IGNORE),
                linear1=L.Linear(in_dim, hidden_dim),
                linear2=L.Linear(hidden_dim, len(self.targets)),
                )
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def __init__(self, model_path, word_dim=None, afix_dim=None,
            nlayers=2, hidden_dim=128, relu_dim=64, dropout_ratio=0.5):
        self.model_path = model_path
        defs_file = model_path + "/tagger_defs.txt"
        if word_dim is None:
            self.train = False
            Param.load(self, defs_file)
            self.extractor = FeatureExtractor(model_path)
        else:
            self.train = True
            p = Param(self)
            p.word_dim = word_dim
            p.afix_dim = afix_dim
            p.hidden_dim = hidden_dim
            p.relu_dim = relu_dim
            p.nlayers = nlayers
            p.dropout_ratio = dropout_ratio
            p.in_dim = self.word_dim + 8 * self.afix_dim
            p.n_words = len(read_model_defs(model_path + "/words.txt"))
            p.n_suffixes = len(read_model_defs(model_path + "/suffixes.txt"))
            p.n_prefixes = len(read_model_defs(model_path + "/prefixes.txt"))
            p.targets = read_model_defs(model_path + "/target.txt")
            p.dump(defs_file)

        super(PeepHoleLSTMTagger, self).__init__(
                emb_word=L.EmbedID(self.n_words, self.word_dim, ignore_label=IGNORE),
                emb_suf=L.EmbedID(self.n_suffixes, self.afix_dim, ignore_label=IGNORE),
                emb_prf=L.EmbedID(self.n_prefixes, self.afix_dim, ignore_label=IGNORE),
                lstm_f1=DyerLSTM(self.in_dim, self.hidden_dim),
                lstm_f2=DyerLSTM(self.hidden_dim, self.hidden_dim),
                lstm_b1=DyerLSTM(self.in_dim, self.hidden_dim),
                lstm_b2=DyerLSTM(self.hidden_dim, self.hidden_dim),
                linear1=L.Linear(2 * self.hidden_dim, self.relu_dim),
                linear2=L.Linear(self.relu_dim, len(self.targets)),
                )
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def __init__(self, model_path, word_dim=None, afix_dim=None, nlayers=2,
            hidden_dim=128, elu_dim=64, dep_dim=100, dropout_ratio=0.5, use_cudnn=False):
        self.model_path = model_path
        defs_file = model_path + "/tagger_defs.txt"
        if word_dim is None:
            self.train = False
            Param.load(self, defs_file)
            self.extractor = FeatureExtractor(model_path)
        else:
            self.train = True
            p = Param(self)
            p.dep_dim = dep_dim
            p.word_dim = word_dim
            p.afix_dim = afix_dim
            p.hidden_dim = hidden_dim
            p.elu_dim = elu_dim
            p.nlayers = nlayers
            p.dump(defs_file)

        self.targets = read_model_defs(model_path + "/target.txt")
        self.words = read_model_defs(model_path + "/words.txt")
        self.suffixes = read_model_defs(model_path + "/suffixes.txt")
        self.prefixes = read_model_defs(model_path + "/prefixes.txt")
        self.in_dim = self.word_dim + 8 * self.afix_dim
        self.dropout_ratio = dropout_ratio
        super(PeepHoleLSTMParser, self).__init__(
                emb_word=L.EmbedID(len(self.words), self.word_dim, ignore_label=IGNORE),
                emb_suf=L.EmbedID(len(self.suffixes), self.afix_dim, ignore_label=IGNORE),
                emb_prf=L.EmbedID(len(self.prefixes), self.afix_dim, ignore_label=IGNORE),
                lstm_f1=DyerLSTM(self.in_dim, self.hidden_dim),
                lstm_f2=DyerLSTM(self.hidden_dim, self.hidden_dim),
                lstm_b1=DyerLSTM(self.in_dim, self.hidden_dim),
                lstm_b2=DyerLSTM(self.hidden_dim, self.hidden_dim),
                linear_cat1=L.Linear(2 * self.hidden_dim, self.elu_dim),
                linear_cat2=L.Linear(self.elu_dim, len(self.targets)),
                linear_dep=L.Linear(2 * self.hidden_dim, self.dep_dim),
                linear_head=L.Linear(2 * self.hidden_dim, self.dep_dim),
                biaffine=Biaffine(self.dep_dim)
                )
项目:lencon    作者:kiyukuta    | 项目源码 | 文件源码
def __init__(self,
                 src_vcb_num,
                 trg_vcb_num,
                 dim_emb,
                 dim_hid):

        super().__init__(src_vcb_num,
                         trg_vcb_num,
                         dim_emb,
                         dim_hid)
        max_len = 300
        self.add_link('lh', L.Linear(dim_emb, dim_hid * 4, nobias=True))
        self.add_link('len_emb', L.EmbedID(max_len, dim_emb, ignore_label=-1))
项目:lencon    作者:kiyukuta    | 项目源码 | 文件源码
def __init__(self,
                 src_vcb_num,
                 trg_vcb_num,
                 dim_emb,
                 dim_hid):

        super().__init__(src_vcb_num,
                         trg_vcb_num,
                         dim_emb,
                         dim_hid)
        max_len = 300
        self.add_link('lh', L.Linear(dim_emb, dim_hid * 4, nobias=True))
        self.add_link('len_emb', L.EmbedID(max_len, dim_emb, ignore_label=-1))
项目:lesson    作者:SPJ-AI    | 项目源码 | 文件源码
def __init__(self, n_vocab, n_units):
        #n_units = ??????????
        super(LSTM, self).__init__(
            embed=L.EmbedID(n_vocab, n_units, ignore_label=-1),
            l1=L.LSTM(n_units, n_units),
            l2=L.Linear(n_units, n_vocab)
        )
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def __init__(self, n_vocab, n_units, train=True):
        super(RNNLM, self).__init__(
            embed=L.EmbedID(n_vocab, n_units),
            l1=L.LSTM(n_units, n_units),
            l2=L.LSTM(n_units, n_units),
            l3=L.Linear(n_units, n_vocab),
        )
        self.train = train
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def __init__(self, n_vocab, n_units):
        super(RecursiveNet, self).__init__(
            embed=L.EmbedID(n_vocab, n_units),
            l=L.Linear(n_units * 2, n_units),
            w=L.Linear(n_units, n_label))
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def __init__(self, n_vocab, n_units, loss_func):
        super(ContinuousBoW, self).__init__(
            embed=F.EmbedID(n_vocab, n_units),
            loss_func=loss_func,
        )
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def __init__(self, n_vocab, n_units, loss_func):
        super(SkipGram, self).__init__(
            embed=L.EmbedID(n_vocab, n_units),
            loss_func=loss_func,
        )
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def setUp(self):
        self.link = links.EmbedID(3, 2, ignore_label=self.ignore_label)
        self.link.ignore_label
        self.link.zerograds()

        self.W = self.link.W.data.copy()  # fixed on CPU
        self.x = numpy.array(self.x_data, dtype=numpy.int32)
        y_shape = self.x.shape + (2,)
        self.gy = numpy.random.uniform(-1, 1, y_shape).astype(numpy.float32)
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def test_old_unpickle(self):
        embed = links.EmbedID(3, 4)
        # To emulate an old pickled file
        delattr(embed, 'ignore_label')
        x = chainer.Variable(numpy.arange(2, dtype=numpy.int32))
        y = embed(x)
        self.assertEqual(y.data.shape, (2, 4))
项目:chainer_nmt    作者:odashi    | 项目源码 | 文件源码
def __init__(
      self,
      src_vocab_size,
      trg_vocab_size,
      embed_size,
      hidden_size,
      atten_size):
    super(AttentionEncoderDecoder, self).__init__(
        # Encoder
        x_i = L.EmbedID(src_vocab_size, embed_size),
        i_f = L.Linear(embed_size, 4 * hidden_size, nobias=True),
        f_f = L.Linear(hidden_size, 4 * hidden_size),
        i_b = L.Linear(embed_size, 4 * hidden_size, nobias=True),
        b_b = L.Linear(hidden_size, 4 * hidden_size),
        # Attention
        fb_e = L.Linear(2 * hidden_size, atten_size, nobias=True),
        p_e = L.Linear(hidden_size, atten_size),
        e_a = L.Linear(atten_size, 1, nobias=True),
        # Decoder initializer
        fc_pc = L.Linear(hidden_size, hidden_size, nobias=True),
        bc_pc = L.Linear(hidden_size, hidden_size),
        f_p = L.Linear(hidden_size, hidden_size, nobias=True),
        b_p = L.Linear(hidden_size, hidden_size),
        # Decoder
        y_j = L.EmbedID(trg_vocab_size, embed_size),
        j_p = L.Linear(embed_size, 4 * hidden_size, nobias=True),
        q_p = L.Linear(2 * hidden_size, 4 * hidden_size, nobias=True),
        p_p = L.Linear(hidden_size, 4 * hidden_size),
        p_z = L.Linear(hidden_size, trg_vocab_size))
    self.src_vocab_size = src_vocab_size
    self.trg_vocab_size = trg_vocab_size
    self.embed_size = embed_size
    self.hidden_size = hidden_size
    self.atten_size = atten_size
项目:vsmlib    作者:undertherain    | 项目源码 | 文件源码
def __init__(self, n_vocab, n_units, loss_func):
        super(ContinuousBoW, self).__init__()

        with self.init_scope():
            self.embed = L.EmbedID(n_vocab, n_units, initialW=I.Uniform(1. / n_units))
            self.loss_func = loss_func
项目:vsmlib    作者:undertherain    | 项目源码 | 文件源码
def __init__(self, n_vocab, n_units, loss_func):
        super(SkipGram, self).__init__()

        with self.init_scope():
            self.embed = L.EmbedID(n_vocab, n_units, initialW=I.Uniform(1. / n_units))
            self.loss_func = loss_func
项目:vsmlib    作者:undertherain    | 项目源码 | 文件源码
def __init__(self, n_vocab_char, n_units, n_units_char, index2charIds, dropout=.2):  #dropout ratio, zero indicates no dropout
        super(RNN, self).__init__()
        with self.init_scope():
            self.embed = L.EmbedID(
                n_vocab_char, n_units_char, initialW=I.Uniform(1. / n_units_char))  # word embedding
            self.mid = L.LSTM(n_units_char, n_units_char)  # the first LSTM layer
            self.out = L.Linear(n_units_char, n_units)  # the feed-forward output layer
            self.dropout = dropout
            self.index2charIds = index2charIds
项目:vsmlib    作者:undertherain    | 项目源码 | 文件源码
def __init__(self, n_vocab, n_units, loss_func):
        super(ContinuousBoW, self).__init__()

        with self.init_scope():
            self.embed = L.EmbedID(
                n_vocab, n_units, initialW=I.Uniform(1. / n_units))
            self.loss_func = loss_func
项目:vsmlib    作者:undertherain    | 项目源码 | 文件源码
def __init__(self, n_vocab_char, n_units, n_units_char):
        super(RNN, self).__init__()
        with self.init_scope():
            self.embed = L.EmbedID(
                n_vocab_char, n_units_char, initialW=I.Uniform(1. / n_units_char))  # word embedding
            self.mid = L.LSTM(n_units_char, n_units_char)  # the first LSTM layer
            self.out = L.Linear(n_units_char, n_units)  # the feed-forward output layer
项目:vsmlib    作者:undertherain    | 项目源码 | 文件源码
def __init__(self, n_vocab, input_channel, output_channel, n_label, embed_dim, position_dims=50, freeze=True, train=True):
        super(CNN, self).__init__(
            embed=L.EmbedID(n_vocab, embed_dim), 
            dist1=L.EmbedID(n_vocab, position_dims),
            dist2=L.EmbedID(n_vocab, position_dims),
            conv1=L.Convolution2D(
                input_channel, output_channel, (3, embed_dim + 2*position_dims)),
            l1=L.Linear(output_channel, n_label)
        )
        self.train = train
        self.freeze = freeze
项目:vsmlib    作者:undertherain    | 项目源码 | 文件源码
def __init__(self, n_vocab, input_channel, output_channel, n_label, embed_dim, freeze, train=True):
        super(CNN, self).__init__(
            embed=L.EmbedID(n_vocab, embed_dim), 
            conv3=L.Convolution2D(
                input_channel, output_channel, (3, embed_dim)),
            conv4=L.Convolution2D(
                input_channel, output_channel, (4, embed_dim)),
            conv5=L.Convolution2D(
                input_channel, output_channel, (5, embed_dim)),
            l1=L.Linear(3 * output_channel, n_label)
        )
        self.train = train
        self.freeze = freeze
项目:TOHO_AI    作者:re53min    | 项目源码 | 文件源码
def __init__(self, n_vocab, n_units, train=True):
        super(GRU, self).__init__(
            embed=L.EmbedID(n_vocab, n_units, ignore_label=-1),
            l1=L.StatefulGRU(n_units, n_units),
            l2=L.StatefulGRU(n_units, n_units),
            l3=L.Linear(n_units, n_vocab),
        )
        for param in self.params():
            param.data[...] = np.random.uniform(-0.1, 0.1, param.data.shape)
        self.train = train