Python keras.layers.embeddings 模块,Embedding() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.layers.embeddings.Embedding()

项目:taxi    作者:xuguanggen    | 项目源码 | 文件源码
def build_mlp(n_con,n_emb,vocabs_size,n_dis,emb_size,cluster_size):
    hidden_size = 800
    con = Sequential()
    con.add(Dense(input_dim=n_con,output_dim=emb_size))

    emb_list = []
    for i in range(n_emb):
        emb = Sequential()
        emb.add(Embedding(input_dim=vocabs_size[i],output_dim=emb_size,input_length=n_dis))
        emb.add(Flatten())
        emb_list.append(emb)

    model = Sequential()
    model.add(Merge([con] + emb_list,mode='concat'))
    model.add(BatchNormalization())
    model.add(Dense(hidden_size,activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(cluster_size,activation='softmax'))
    model.add(Lambda(caluate_point, output_shape =[2]))
    return model
项目:aes-gated-word-char    作者:unkn0wnxx    | 项目源码 | 文件源码
def create_char_cnn_model(self, emb_dim, word_maxlen, vocab_char_size,
                              char_maxlen):
        from aes.layers import Conv1DMask
        logger.info('Building character CNN model')
        input_char = Input(shape=(char_maxlen, ), name='input_char')
        char_emb = Embedding(
            vocab_char_size, emb_dim, mask_zero=True)(input_char)
        cnn = Conv1DMask(
            filters=emb_dim, kernel_size=3, padding='same')(char_emb)
        dropped = Dropout(0.5)(cnn)
        mot = MeanOverTime(mask_zero=True)(dropped)
        densed = Dense(self.num_outputs, name='dense')(mot)
        output = Activation('sigmoid')(densed)
        model = Model(inputs=input_char, outputs=output)
        model.get_layer('dense').bias.set_value(self.bias)
        logger.info('  Done')
        return model
项目:aes-gated-word-char    作者:unkn0wnxx    | 项目源码 | 文件源码
def create_char_gru_model(self, emb_dim, word_maxlen, vocab_char_size,
                              char_maxlen):
        from keras.layers import GRU
        logger.info('Building character GRU model')
        input_char = Input(shape=(char_maxlen, ), name='input_char')
        char_emb = Embedding(
            vocab_char_size, emb_dim, mask_zero=True)(input_char)
        gru = GRU(
            300,
            return_sequences=True,
            dropout=self.dropout,
            recurrent_dropout=self.recurrent_dropout)(char_emb)
        dropped = Dropout(0.5)(gru)
        mot = MeanOverTime(mask_zero=True)(dropped)
        densed = Dense(self.num_outputs, name='dense')(mot)
        output = Activation('sigmoid')(densed)
        model = Model(inputs=input_char, outputs=output)
        model.get_layer('dense').bias.set_value(self.bias)
        logger.info('  Done')
        return model
项目:aes-gated-word-char    作者:unkn0wnxx    | 项目源码 | 文件源码
def create_char_rnn_model(self, emb_dim, word_maxlen, vocab_char_size,
                              char_maxlen):
        from keras.layers import SimpleRNN
        logger.info('Building character RNN model')
        input_char = Input(shape=(char_maxlen, ), name='input_char')
        char_emb = Embedding(
            vocab_char_size, emb_dim, mask_zero=True)(input_char)
        rnn = SimpleRNN(
            300,
            return_sequences=True,
            dropout=self.dropout,
            recurrent_dropout=self.recurrent_dropout)(char_emb)
        dropped = Dropout(0.5)(rnn)
        mot = MeanOverTime(mask_zero=True)(dropped)
        densed = Dense(self.num_outputs, name='dense')(mot)
        output = Activation('sigmoid')(densed)
        model = Model(inputs=input_char, outputs=output)
        model.get_layer('dense').bias.set_value(self.bias)
        logger.info('  Done')
        return model
项目:knowledgeflow    作者:3rduncle    | 项目源码 | 文件源码
def buildEmbedding(self):
        weights = self.embedding_params.get('weights')
        #assert weights
        trainable = self.params.get('embedding_trainable', False)
        if trainable:
            logging.info('Embedding Weights is Trainable!')
        else:
            logging.info('Embedding Weights is Not Trainable!')
        with tf.name_scope('embedding'):
            W = tf.Variable(
                weights,
                name = 'embedding',
                trainable = trainable,
                dtype = tf.float32
            )
            self.tensors['q_embedding'] = tf.nn.embedding_lookup(W, self.tensors['q_input'])
            self.tensors['a_embedding'] = tf.nn.embedding_lookup(W, self.tensors['a_input'])
项目:kaggle-quora-question-pairs    作者:voletiv    | 项目源码 | 文件源码
def createBaseNetworkSmall(inputDim, inputLength):
    baseNetwork = Sequential()
    baseNetwork.add(Embedding(input_dim=inputDim,
                              output_dim=inputDim, input_length=inputLength))
    baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05)))
    baseNetwork.add(MaxPooling1D(pool_size=3, strides=3))
    baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05)))
    baseNetwork.add(MaxPooling1D(pool_size=3, strides=3))
    baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05)))
    baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05)))
    baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05)))
    baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05)))
    baseNetwork.add(MaxPooling1D(pool_size=3, strides=3))
    baseNetwork.add(Flatten())
    baseNetwork.add(Dense(1024, activation='relu'))
    baseNetwork.add(Dropout(0.5))
    baseNetwork.add(Dense(1024, activation='relu'))
    baseNetwork.add(Dropout(0.5))
    return baseNetwork
项目:kaggle-quora-question-pairs    作者:voletiv    | 项目源码 | 文件源码
def createBaseNetworkLarge(inputDim, inputLength):
    baseNetwork = Sequential()
    baseNetwork.add(Embedding(input_dim=inputDim,
                              output_dim=inputDim, input_length=inputLength))
    baseNetwork.add(Conv1D(1024, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02)))
    baseNetwork.add(MaxPooling1D(pool_size=3, strides=3))
    baseNetwork.add(Conv1D(1024, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02)))
    baseNetwork.add(MaxPooling1D(pool_size=3, strides=3))
    baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02)))
    baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02)))
    baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02)))
    baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02)))
    baseNetwork.add(MaxPooling1D(pool_size=3, strides=3))
    baseNetwork.add(Flatten())
    baseNetwork.add(Dense(2048, activation='relu'))
    baseNetwork.add(Dropout(0.5))
    baseNetwork.add(Dense(2048, activation='relu'))
    baseNetwork.add(Dropout(0.5))
    return baseNetwork
项目:kaggle-quora-question-pairs    作者:voletiv    | 项目源码 | 文件源码
def createBaseNetworkSmall(inputDim, inputLength):
    baseNetwork = Sequential()
    baseNetwork.add(Embedding(input_dim=inputDim,
                              output_dim=inputDim, input_length=inputLength))
    baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05)))
    baseNetwork.add(MaxPooling1D(pool_size=3, strides=3))
    baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05)))
    baseNetwork.add(MaxPooling1D(pool_size=3, strides=3))
    baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05)))
    baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05)))
    baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05)))
    baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05)))
    baseNetwork.add(MaxPooling1D(pool_size=3, strides=3))
    baseNetwork.add(Flatten())
    baseNetwork.add(Dense(1024, activation='relu'))
    baseNetwork.add(Dropout(0.5))
    baseNetwork.add(Dense(1024, activation='relu'))
    baseNetwork.add(Dropout(0.5))
    return baseNetwork
项目:kaggle-quora-question-pairs    作者:voletiv    | 项目源码 | 文件源码
def createBaseNetworkLarge(inputDim, inputLength):
    baseNetwork = Sequential()
    baseNetwork.add(Embedding(input_dim=inputDim,
                              output_dim=inputDim, input_length=inputLength))
    baseNetwork.add(Conv1D(1024, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02)))
    baseNetwork.add(MaxPooling1D(pool_size=3, strides=3))
    baseNetwork.add(Conv1D(1024, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02)))
    baseNetwork.add(MaxPooling1D(pool_size=3, strides=3))
    baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02)))
    baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02)))
    baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02)))
    baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02)))
    baseNetwork.add(MaxPooling1D(pool_size=3, strides=3))
    baseNetwork.add(Flatten())
    baseNetwork.add(Dense(2048, activation='relu'))
    baseNetwork.add(Dropout(0.5))
    baseNetwork.add(Dense(2048, activation='relu'))
    baseNetwork.add(Dropout(0.5))
    return baseNetwork
项目:kaggle-quora-question-pairs    作者:voletiv    | 项目源码 | 文件源码
def createBaseNetworkSmall(inputDim, inputLength):
    baseNetwork = Sequential()
    baseNetwork.add(Embedding(input_dim=inputDim,
                              output_dim=inputDim, input_length=inputLength))
    baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05)))
    baseNetwork.add(MaxPooling1D(pool_size=3, strides=3))
    baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05)))
    baseNetwork.add(MaxPooling1D(pool_size=3, strides=3))
    baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05)))
    baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05)))
    baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05)))
    baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05)))
    baseNetwork.add(MaxPooling1D(pool_size=3, strides=3))
    baseNetwork.add(Flatten())
    baseNetwork.add(Dense(1024, activation='relu'))
    baseNetwork.add(Dropout(0.5))
    baseNetwork.add(Dense(1024, activation='relu'))
    baseNetwork.add(Dropout(0.5))
    return baseNetwork
项目:kaggle-quora-question-pairs    作者:voletiv    | 项目源码 | 文件源码
def createBaseNetworkSmall(inputDim, inputLength):
        baseNetwork = Sequential()
        baseNetwork.add(Embedding(input_dim=inputDim, output_dim=inputDim, input_length=inputLength))
        baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05)))
        baseNetwork.add(MaxPooling1D(pool_size=3, strides=3))
        baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05)))
        baseNetwork.add(MaxPooling1D(pool_size=3, strides=3))
        baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05)))
        baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05)))
        baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05)))
        baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05)))
        baseNetwork.add(MaxPooling1D(pool_size=3, strides=3))
        baseNetwork.add(Flatten())
        baseNetwork.add(Dense(1024, activation='relu'))
        baseNetwork.add(Dropout(0.5))
        baseNetwork.add(Dense(1024, activation='relu'))
        baseNetwork.add(Dropout(0.5))
        return baseNetwork
项目:kaggle-quora-question-pairs    作者:voletiv    | 项目源码 | 文件源码
def createBaseNetworkLarge(inputDim, inputLength):
        baseNetwork = Sequential()
        baseNetwork.add(Embedding(input_dim=inputDim, output_dim=inputDim, input_length=inputLength))
        baseNetwork.add(Conv1D(1024, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02)))
        baseNetwork.add(MaxPooling1D(pool_size=3, strides=3))
        baseNetwork.add(Conv1D(1024, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02)))
        baseNetwork.add(MaxPooling1D(pool_size=3, strides=3))
        baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02)))
        baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02)))
        baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02)))
        baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02)))
        baseNetwork.add(MaxPooling1D(pool_size=3, strides=3))
        baseNetwork.add(Flatten())
        baseNetwork.add(Dense(2048, activation='relu'))
        baseNetwork.add(Dropout(0.5))
        baseNetwork.add(Dense(2048, activation='relu'))
        baseNetwork.add(Dropout(0.5))
        return baseNetwork
项目:patriots    作者:wdxtub    | 项目源码 | 文件源码
def train_lstm(dict,x,y,xt,yt):
  model = Sequential()
  model.add(Embedding(len(dict)+1, 256, input_length=maxlen))
  model.add(LSTM(output_dim=128, activation='sigmoid', inner_activation='hard_sigmoid'))
  model.add(Dropout(0.5))
  model.add(Dense(1))
  # model.add(Dense(input_dim = 32, output_dim = 1))
  model.add(Activation('sigmoid'))
  print ('??????')
  #model.compile(loss='binary_crossentropy', optimizer='adam', class_mode="binary")
  model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
  print ("??????")
  model.fit(x, y, batch_size=lstm_batch_size, epochs=lstm_epochs, verbose=0)
  print ("??????")
  print ("????")
  yaml_string = model.to_yaml()
  with open(modeldir + '/lstm.yml', 'w') as outfile:
    outfile.write( yaml.dump(yaml_string, default_flow_style=True) )
  model.save_weights(modeldir + '/lstm.h5')
  print ("?????")
  score = model.evaluate(xt, yt, verbose=0)
  print ("???:",score[1])
  return model
项目:temporal-attention    作者:dosht    | 项目源码 | 文件源码
def main():
    print("\n\nLoading data...")
    data_dir = "/data/translate"
    vocab_size = 20000
    en, fr = prepare_date(data_dir, vocab_size)

    print("\n\nbuilding the model...")
    embedding_size = 64
    hidden_size = 32
    model = Sequential()
    model.add(Embedding(en.max_features, embedding_size, input_length=en.max_length, mask_zero=True))
    model.add(Bidirectional(GRU(hidden_size), merge_mode='sum'))
    model.add(RepeatVector(fr.max_length))
    model.add(GRU(embedding_size))
    model.add(Dense(fr.max_length, activation="softmax"))
    model.compile('rmsprop', 'mse')
    print(model.get_config())

    print("\n\nFitting the model...")
    model.fit(en.examples, fr.examples)

    print("\n\nEvaluation...")
    #TODO
项目:temporal-attention    作者:dosht    | 项目源码 | 文件源码
def main():
    print("\n\nLoading data...")
    data_dir = "/data/translate"
    vocab_size = 20000
    en, fr = prepare_date(data_dir, vocab_size)

    print("\n\nbuilding the model...")
    embedding_size = 64
    hidden_size = 32
    model = Sequential()
    model.add(Embedding(en.max_features, embedding_size, input_length=en.max_length, mask_zero=True))
    model.add(Bidirectional(GRU(hidden_size), merge_mode='sum'))
    model.add(RepeatVector(fr.max_length))
    model.add(GRU(embedding_size))
    model.add(Dense(fr.max_length, activation="softmax"))
    model.compile('rmsprop', 'mse')
    print(model.get_config())

    print("\n\nFitting the model...")
    model.fit(en.examples, fr.examples)

    print("\n\nEvaluation...")
    #TODO
项目:narrative-prediction    作者:roemmele    | 项目源码 | 文件源码
def create_model(self):
        model = Sequential()
        model.add(Embedding(output_dim=self.n_embedding_nodes, input_dim=self.lexicon_size + 1,
                            input_length=self.n_timesteps, mask_zero=True, name='embedding_layer'))
        for layer_num in range(self.n_hidden_layers):
            if layer_num == self.n_hidden_layers - 1:
                return_sequences = False
            else: #add extra hidden layers
                return_sequences = True
            model.add(GRU(output_dim=self.n_hidden_nodes, return_sequences=return_sequences, name='hidden_layer' + str(layer_num + 1)))
        model.add(Dense(output_dim=self.n_output_classes, activation='softmax', name='output_layer'))
        # if emb_weights is not None:
        #     #initialize weights with lm weights
        #     model.layers[0].set_weights(emb_weights) #set embeddings
        # if layer1_weights is not None:
        #     model.layers[1].set_weights(layer1_weights) #set recurrent layer 1         
        model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
        return model
项目:narrative-prediction    作者:roemmele    | 项目源码 | 文件源码
def create_model(self, n_timesteps, batch_size=1, pred_layer=True):

        model = Sequential()

        # if self.embeddings is None:
        model.add(Embedding(self.lexicon_size + 1, self.n_embedding_nodes,
                            batch_input_shape=(batch_size, n_timesteps)))#, mask_zero=True))

        model.add(Reshape((self.n_embedding_nodes * n_timesteps,)))

        for layer_num in range(self.n_hidden_layers):
            model.add(Dense(self.n_hidden_nodes, batch_input_shape=(batch_size, n_timesteps, self.n_embedding_nodes), activation='tanh'))

        if pred_layer: 
            model.add(Dense(self.lexicon_size + 1, activation="softmax"))

        #select optimizer and compile
        model.compile(loss="sparse_categorical_crossentropy", 
                      optimizer=eval(self.optimizer)(clipvalue=self.clipvalue, lr=self.lr, decay=self.decay))

        return model
项目:narrative-prediction    作者:roemmele    | 项目源码 | 文件源码
def create_model(self):

        if self.embedded_input:
            cause_word_layer = Input(shape=(self.n_embedding_nodes,), name="cause_word_layer") 
            effect_word_layer = Input(shape=(self.n_embedding_nodes,), name="effect_word_layer")
            cause_emb_layer = Dense(output_dim=self.n_embedding_nodes, name='cause_emb_layer', activation='tanh')(cause_word_layer)
            effect_emb_layer = Dense(output_dim=self.n_embedding_nodes, name='effect_emb_layer', activation='tanh')(effect_word_layer)
        else:
            cause_word_layer = Input(shape=(1,), name="cause_word_layer")
            effect_word_layer = Input(shape=(1,), name="effect_word_layer")
            cause_emb_layer = Embedding(self.lexicon_size + 1, self.n_embedding_nodes, name='cause_emb_layer')(cause_word_layer)
            effect_emb_layer = Embedding(self.lexicon_size + 1, self.n_embedding_nodes, name='effect_emb_layer')(effect_word_layer)
            flatten_layer = Flatten(name='flatten_layer')
            cause_emb_layer = flatten_layer(cause_emb_layer)
            effect_emb_layer = flatten_layer(effect_emb_layer)


        merge_layer = merge([cause_emb_layer, effect_emb_layer], mode='concat', concat_axis=-1, name='merge_layer')
        dense_layer = Dense(output_dim=self.n_hidden_nodes, name='dense_layer', activation='tanh')(merge_layer)
        pred_layer = Dense(output_dim=1, name='pred_layer', activation='sigmoid')(dense_layer)
        model = Model(input=[cause_word_layer, effect_word_layer], output=pred_layer)
        model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
        return model
项目:word2vec-keras-in-gensim    作者:niitsuma    | 项目源码 | 文件源码
def build_keras_model_sg(index_size,vector_size,
                         context_size,
                         #code_dim,
                         sub_batch_size=256,
                         learn_vectors=True,learn_hidden=True,
                         model=None):

    kerasmodel = Graph()
    kerasmodel.add_input(name='point' , input_shape=(1,), dtype=int)
    kerasmodel.add_input(name='index' , input_shape=(1,), dtype=int)
    kerasmodel.add_node(Embedding(index_size, vector_size, input_length=sub_batch_size,weights=[model.syn0]),name='embedding', input='index')
    kerasmodel.add_node(Embedding(context_size, vector_size, input_length=sub_batch_size,weights=[model.keras_syn1]),name='embedpoint', input='point')
    kerasmodel.add_node(Lambda(lambda x:x.sum(2))   , name='merge',inputs=['embedding','embedpoint'], merge_mode='mul')
    kerasmodel.add_node(Activation('sigmoid'), name='sigmoid', input='merge')
    kerasmodel.add_output(name='code',input='sigmoid')
    kerasmodel.compile('rmsprop', {'code':'mse'})
    return kerasmodel
项目:word2vec-keras-in-gensim    作者:niitsuma    | 项目源码 | 文件源码
def build_keras_model_cbow(index_size,vector_size,
                           context_size,
                           #code_dim,
                           sub_batch_size=1,
                           model=None,cbow_mean=False):

    kerasmodel = Graph()
    kerasmodel.add_input(name='point' , input_shape=(sub_batch_size,), dtype='int')
    kerasmodel.add_input(name='index' , input_shape=(1,), dtype='int')
    kerasmodel.add_node(Embedding(index_size, vector_size, weights=[model.syn0]),name='embedding', input='index')
    kerasmodel.add_node(Embedding(context_size, vector_size, input_length=sub_batch_size,weights=[model.keras_syn1]),name='embedpoint', input='point')
    if cbow_mean:
        kerasmodel.add_node(Lambda(lambda x:x.mean(1),output_shape=(vector_size,)),name='average',input='embedding')
    else:
        kerasmodel.add_node(Lambda(lambda x:x.sum(1),output_shape=(vector_size,)),name='average',input='embedding')

    kerasmodel.add_node(Activation('sigmoid'), name='sigmoid',inputs=['average','embedpoint'], merge_mode='dot',dot_axes=-1)
    kerasmodel.add_output(name='code',input='sigmoid')
    kerasmodel.compile('rmsprop', {'code':'mse'})
    return kerasmodel
项目:visual_turing_test-tutorial    作者:mateuszmalinowski    | 项目源码 | 文件源码
def textual_embedding(self, language_model, mask_zero):
        """
        Note:
        * mask_zero only makes sense if embedding is learnt
        """
        if self._config.textual_embedding_dim > 0:
            print('Textual Embedding is on')
            language_model.add(Embedding(
                self._config.input_dim, 
                self._config.textual_embedding_dim, 
                mask_zero=mask_zero))
        else:
            print('Textual Embedding is off')
            language_model.add(Reshape(
                input_shape=(self._config.max_input_time_steps, self._config.input_dim),
                dims=(self._config.max_input_time_steps, self._config.input_dim)))
            if mask_zero:
                language_model.add(Masking(0))
        return language_model
项目:visual_turing_test-tutorial    作者:mateuszmalinowski    | 项目源码 | 文件源码
def textual_embedding_fixed_length(self, language_model, mask_zero):
        """
        In contrast to textual_embedding, it produces a fixed length output.
        """
        if self._config.textual_embedding_dim > 0:
            print('Textual Embedding with fixed length is on')
            language_model.add(Embedding(
                self._config.input_dim, 
                self._config.textual_embedding_dim,
                input_length=self._config.max_input_time_steps,
                mask_zero=mask_zero))
        else:
            print('Textual Embedding with fixed length is off')
            language_model.add(Reshape(
                input_shape=(self._config.max_input_time_steps, self._config.input_dim),
                dims=(self._config.max_input_time_steps, self._config.input_dim)))
            if mask_zero:
                language_model.add(Masking(0))
        return language_model
项目:visual_turing_test-tutorial    作者:mateuszmalinowski    | 项目源码 | 文件源码
def create(self):

        assert self._config.textual_embedding_dim == 0, \
                'Embedding cannot be learnt but must be fixed'

        language_forward = Sequential()
        language_forward.add(self._config.recurrent_encoder(
            self._config.hidden_state_dim, return_sequences=False,
            input_shape=(self._config.max_input_time_steps, self._config.input_dim)))
        self.language_forward = language_forward

        language_backward = Sequential()
        language_backward.add(self._config.recurrent_encoder(
            self._config.hidden_state_dim, return_sequences=False,
            go_backwards=True,
            input_shape=(self._config.max_input_time_steps, self._config.input_dim)))
        self.language_backward = language_backward

        self.add(Merge([language_forward, language_backward]))
        self.deep_mlp()
        self.add(Dense(self._config.output_dim))
        self.add(Activation('softmax'))
项目:visual_turing_test-tutorial    作者:mateuszmalinowski    | 项目源码 | 文件源码
def create(self):
        self._input_name = 'text'
        self._output_name = 'output'

        self.add_input(
                name=self._input_name, 
                input_shape=(self._config.max_input_time_steps, self._config.input_dim,))
        self.inputs['text'].input = T.imatrix()
        self.add_node(Embedding(
                self._config.input_dim, 
                self._config.textual_embedding_dim, 
                mask_zero=True), 
                name='embedding', input='text')
        self.add_node(
                self._config.recurrent_encoder(
                    self._config.hidden_state_dim, 
                    return_sequences=False,
                    go_backwards=self._config.go_backwards),
                name='recurrent', input='embedding') 
        self.add_node(Dropout(0.5), name='dropout', input='recurrent')
        self.add_node(Dense(self._config.output_dim), name='dense', input='dropout')
        self.add_node(Activation('softmax'), name='softmax', input='dense')
        self.add_output(name=self._output_name, input='softmax')
项目:SNLI-Keras    作者:adamzjk    | 项目源码 | 文件源码
def prep_embd(self):
    # Add a Embed Layer to convert word index to vector
    if not os.path.exists('GloVe_' + self.dataset + '.npy'):
      self.load_GloVe()
    embed_matrix = np.load('GloVe_' + self.dataset + '.npy')
    self.Embed = Embedding(input_dim = self.Vocab,
                           output_dim = self.EmbeddingSize,
                           input_length = self.SentMaxLen,
                           trainable = False,
                           weights = [embed_matrix],
                           name = 'embed_snli')

  # TODO Decomposable Attention Model by Ankur P. Parikh et al. 2016
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def call(self, x, mask=None):
        # input shape: (nb_samples, time (padded with zeros), input_dim)
        # note that the .build() method of subclasses MUST define
        # self.input_spec with a complete input shape.
        input_shape = self.input_spec[0].shape
        if K._BACKEND == 'tensorflow':
            if not input_shape[1]:
                raise Exception('When using TensorFlow, you should define '
                                'explicitly the number of timesteps of '
                                'your sequences.\n'
                                'If your first layer is an Embedding, '
                                'make sure to pass it an "input_length" '
                                'argument. Otherwise, make sure '
                                'the first layer has '
                                'an "input_shape" or "batch_input_shape" '
                                'argument, including the time axis. '
                                'Found input shape at layer ' + self.name +
                                ': ' + str(input_shape))
        if self.layer.stateful:
            initial_states = self.layer.states
        else:
            initial_states = self.layer.get_initial_states(x)
        constants = self.get_constants(x)
        preprocessed_input = self.layer.preprocess_input(x)

        last_output, outputs, states = K.rnn(self.step, preprocessed_input,
                                             initial_states,
                                             go_backwards=self.layer.go_backwards,
                                             mask=mask,
                                             constants=constants,
                                             unroll=self.layer.unroll,
                                             input_length=input_shape[1])
        if self.layer.stateful:
            self.updates = []
            for i in range(len(states)):
                self.updates.append((self.layer.states[i], states[i]))

        if self.layer.return_sequences:
            return outputs
        else:
项目:BiMPM_keras    作者:ijinmao    | 项目源码 | 文件源码
def __init__(self, sequence_length, nb_words,
                 word_embedding_dim, embedding_matrix):
        self.model = Sequential()
        self.model.add(Embedding(nb_words,
                                 word_embedding_dim,
                                 weights=[embedding_matrix],
                                 input_length=sequence_length,
                                 trainable=False))
项目:BiMPM_keras    作者:ijinmao    | 项目源码 | 文件源码
def __init__(self, sequence_length, nb_chars, nb_per_word,
                 embedding_dim, rnn_dim, rnn_unit='gru', dropout=0.0):
        def _collapse_input(x, nb_per_word=0):
            x = K.reshape(x, (-1, nb_per_word))
            return x

        def _unroll_input(x, sequence_length=0, rnn_dim=0):
            x = K.reshape(x, (-1, sequence_length, rnn_dim))
            return x

        if rnn_unit == 'gru':
            rnn = GRU
        else:
            rnn = LSTM
        self.model = Sequential()
        self.model.add(Lambda(_collapse_input,
                              arguments={'nb_per_word': nb_per_word},
                              output_shape=(nb_per_word,),
                              input_shape=(sequence_length, nb_per_word,)))
        self.model.add(Embedding(nb_chars,
                                 embedding_dim,
                                 input_length=nb_per_word,
                                 trainable=True))
        self.model.add(rnn(rnn_dim,
                           dropout=dropout,
                           recurrent_dropout=dropout))
        self.model.add(Lambda(_unroll_input,
                              arguments={'sequence_length': sequence_length,
                                         'rnn_dim': rnn_dim},
                              output_shape=(sequence_length, rnn_dim)))
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_embedding():
    layer_test(Embedding,
               kwargs={'output_dim': 4, 'input_dim': 10, 'input_length': 2},
               input_shape=(3, 2),
               input_dtype='int32',
               expected_output_dtype=K.floatx())
项目:TF_MemN2N-tableQA    作者:vendi12    | 项目源码 | 文件源码
def make_character_embedding_layer(word_index):
    embeddings = get_embeddings()
    nb_words = min(MAX_NB_WORDS, len(word_index))
    embedding_matrix = np.zeros((nb_words, EMBEDDING_DIM))

    for word, i in word_index.items():
        if i >= MAX_NB_WORDS:
            continue
        embedding_vector = embeddings.get(word)
        if embedding_vector is not None:
            embedding_matrix[i] = embedding_vector

    embedding_layer = Embedding(nb_words, EMBEDDING_DIM, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH, trainable=False)
    return embedding_layer
项目:quora_duplicate    作者:ijinmao    | 项目源码 | 文件源码
def __init__(self, sequence_length, nb_words,
                 word_embedding_dim, embedding_matrix):
        self.model = Sequential()
        self.model.add(Embedding(nb_words,
                                 word_embedding_dim,
                                 weights=[embedding_matrix],
                                 input_length=sequence_length,
                                 trainable=False))
项目:quora_duplicate    作者:ijinmao    | 项目源码 | 文件源码
def __init__(self, sequence_length, nb_chars, nb_per_word,
                 embedding_dim, rnn_dim, rnn_unit='gru', dropout=0.0):
        def _collapse_input(x, nb_per_word=0):
            x = K.reshape(x, (-1, nb_per_word))
            return x

        def _unroll_input(x, sequence_length=0, rnn_dim=0):
            x = K.reshape(x, (-1, sequence_length, rnn_dim))
            return x

        if rnn_unit == 'gru':
            rnn = GRU
        else:
            rnn = LSTM
        self.model = Sequential()
        self.model.add(Lambda(_collapse_input,
                              arguments={'nb_per_word': nb_per_word},
                              output_shape=(nb_per_word,),
                              input_shape=(sequence_length, nb_per_word,)))
        self.model.add(Embedding(nb_chars,
                                 embedding_dim,
                                 input_length=nb_per_word,
                                 trainable=True))
        self.model.add(rnn(rnn_dim,
                           dropout=dropout,
                           recurrent_dropout=dropout))
        self.model.add(Lambda(_unroll_input,
                              arguments={'sequence_length': sequence_length,
                                         'rnn_dim': rnn_dim},
                              output_shape=(sequence_length, rnn_dim)))
项目:hyperas    作者:maxpumperla    | 项目源码 | 文件源码
def model(X_train, X_test, y_train, y_test, max_features, maxlen):
    model = Sequential()
    model.add(Embedding(max_features, 128, input_length=maxlen))
    model.add(LSTM(128))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    early_stopping = EarlyStopping(monitor='val_loss', patience=4)
    checkpointer = ModelCheckpoint(filepath='keras_weights.hdf5',
                                   verbose=1,
                                   save_best_only=True)

    model.fit(X_train, y_train,
              batch_size={{choice([32, 64, 128])}},
              nb_epoch=1,
              validation_split=0.08,
              callbacks=[early_stopping, checkpointer])

    score, acc = model.evaluate(X_test, y_test, verbose=0)

    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
项目:hyperas    作者:maxpumperla    | 项目源码 | 文件源码
def model(X_train, X_test, y_train, y_test, maxlen, max_features):
    embedding_size = 300
    pool_length = 4
    lstm_output_size = 100
    batch_size = 200
    nb_epoch = 1

    model = Sequential()
    model.add(Embedding(max_features, embedding_size, input_length=maxlen))
    model.add(Dropout({{uniform(0, 1)}}))
    # Note that we use unnamed parameters here, which is bad style, but is used here
    # to demonstrate that it works. Always prefer named parameters.
    model.add(Convolution1D({{choice([64, 128])}},
                            {{choice([6, 8])}},
                            border_mode='valid',
                            activation='relu',
                            subsample_length=1))
    model.add(MaxPooling1D(pool_length=pool_length))
    model.add(LSTM(lstm_output_size))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    print('Train...')
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch,
              validation_data=(X_test, y_test))
    score, acc = model.evaluate(X_test, y_test, batch_size=batch_size)

    print('Test score:', score)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
项目:triplets-extraction    作者:zsctju    | 项目源码 | 文件源码
def creat_binary_tag_LSTM( sourcevocabsize,targetvocabsize, source_W,input_seq_lenth ,output_seq_lenth ,
    hidden_dim ,emd_dim,loss='categorical_crossentropy',optimizer = 'rmsprop'):
    encoder_a = Sequential()
    encoder_b = Sequential()
    encoder_c = Sequential()
    l_A_embedding = Embedding(input_dim=sourcevocabsize+1,
                        output_dim=emd_dim,
                        input_length=input_seq_lenth,
                        mask_zero=True,
                        weights=[source_W])
    encoder_a.add(l_A_embedding)
    encoder_a.add(Dropout(0.3))
    encoder_b.add(l_A_embedding)
    encoder_b.add(Dropout(0.3))
    encoder_c.add(l_A_embedding)

    Model = Sequential()

    encoder_a.add(LSTM(hidden_dim,return_sequences=True))
    encoder_b.add(LSTM(hidden_dim,return_sequences=True,go_backwards=True))
    encoder_rb = Sequential()
    encoder_rb.add(ReverseLayer2(encoder_b))
    encoder_ab=Merge(( encoder_a,encoder_rb),mode='concat')
    Model.add(encoder_ab)

    decodelayer=LSTMDecoder_tag(hidden_dim=hidden_dim, output_dim=hidden_dim
                                         , input_length=input_seq_lenth,
                                        output_length=output_seq_lenth,
                                        state_input=False,
                                         return_sequences=True)
    Model.add(decodelayer)
    Model.add(TimeDistributedDense(targetvocabsize+1))
    Model.add(Activation('softmax'))
    Model.compile(loss=loss, optimizer=optimizer)
    return Model
项目:nli_generation    作者:jstarc    | 项目源码 | 文件源码
def make_fixed_embeddings(glove, seq_len):
    glove_mat = np.array(glove.values())
    return Embedding(input_dim = glove_mat.shape[0], output_dim = glove_mat.shape[1], 
                       weights = [glove_mat], trainable = False, input_length  = seq_len)
项目:nli_generation    作者:jstarc    | 项目源码 | 文件源码
def baseline_train(noise_examples, hidden_size, noise_dim, glove, hypo_len, version):
    prem_input = Input(shape=(None,), dtype='int32', name='prem_input')
    hypo_input = Input(shape=(hypo_len + 1,), dtype='int32', name='hypo_input')
    noise_input = Input(shape=(1,), dtype='int32', name='noise_input')
    train_input = Input(shape=(None,), dtype='int32', name='train_input')
    class_input = Input(shape=(3,), name='class_input')
    concat_dim = hidden_size + noise_dim + 3
    prem_embeddings = make_fixed_embeddings(glove, None)(prem_input)
    hypo_embeddings = make_fixed_embeddings(glove, hypo_len + 1)(hypo_input)

    premise_layer = LSTM(output_dim=hidden_size, return_sequences=False,
                            inner_activation='sigmoid', name='premise')(prem_embeddings)

    noise_layer = Embedding(noise_examples, noise_dim,
                            input_length = 1, name='noise_embeddings')(noise_input)
    flat_noise = Flatten(name='noise_flatten')(noise_layer)    
    merged = merge([premise_layer, class_input, flat_noise], mode='concat')
    creative = Dense(concat_dim, name = 'cmerge')(merged)
    fake_merge = Lambda(lambda x:x[0], output_shape=lambda x:x[0])([hypo_embeddings, creative])
    hypo_layer = FeedLSTM(output_dim=concat_dim, return_sequences=True,
                         feed_layer = creative, inner_activation='sigmoid', 
                         name='attention')([fake_merge])

    hs = HierarchicalSoftmax(len(glove), trainable = True, name='hs')([hypo_layer, train_input])
    inputs = [prem_input, hypo_input, noise_input, train_input, class_input]


    model_name = 'version' + str(version)
    model = Model(input=inputs, output=hs, name = model_name)
    model.compile(loss=hs_categorical_crossentropy, optimizer='adam')

    return model
项目:aes-gated-word-char    作者:unkn0wnxx    | 项目源码 | 文件源码
def create_gate_positional_model(self, char_cnn_kernel, cnn_kernel,
                                     emb_dim, emb_path, vocab_word,
                                     vocab_word_size, word_maxlen,
                                     vocab_char_size, char_maxlen):
        from aes.layers import Conv1DMask, GatePositional, MaxPooling1DMask
        logger.info('Building gate positional model')
        input_char = Input(shape=(char_maxlen, ), name='input_char')
        char_emb = Embedding(
            vocab_char_size, emb_dim, mask_zero=True)(input_char)
        char_cnn = Conv1DMask(
            filters=emb_dim,
            kernel_size=3,
            padding='same')(char_emb)
        char_input = MaxPooling1DMask(
            pool_size=char_maxlen / word_maxlen, padding='same')(char_cnn)
        input_word = Input(shape=(word_maxlen, ), name='input_word')
        word_input = Embedding(
            vocab_word_size, emb_dim, mask_zero=True,
            name='word_emb')(input_word)
        gate = GatePositional()([char_input, word_input])
        final_input = Dense(50)(gate)
        cnn = Conv1DMask(
            filters=emb_dim,
            kernel_size=3,
            padding='same')(final_input)
        dropped = Dropout(0.5)(cnn)
        mot = MeanOverTime(mask_zero=True)(dropped)
        densed = Dense(self.num_outputs, name='dense')(mot)
        output = Activation('sigmoid')(densed)
        model = Model(inputs=[input_char, input_word], outputs=output)
        model.get_layer('dense').bias.set_value(self.bias)
        if emb_path:
            from emb_reader import EmbReader as EmbReader
            logger.info('Initializing lookup table')
            emb_reader = EmbReader(emb_path, emb_dim=emb_dim)
            model.get_layer('word_emb').embeddings.set_value(
                emb_reader.get_emb_matrix_given_vocab(
                    vocab_word,
                    model.get_layer('word_emb').embeddings.get_value()))
        logger.info('  Done')
        return model
项目:aes-gated-word-char    作者:unkn0wnxx    | 项目源码 | 文件源码
def create_gate_matrix_model(self, char_cnn_kernel, cnn_kernel, emb_dim,
                                 emb_path, vocab_word, vocab_word_size,
                                 word_maxlen, vocab_char_size, char_maxlen):
        from aes.layers import Conv1DMask, GateMatrix, MaxPooling1DMask
        logger.info('Building gate matrix model')
        input_char = Input(shape=(char_maxlen, ), name='input_char')
        char_emb = Embedding(
            vocab_char_size, emb_dim, mask_zero=True)(input_char)
        char_cnn = Conv1DMask(
            filters=emb_dim,
            kernel_size=char_cnn_kernel,
            padding='same')(char_emb)
        char_input = MaxPooling1DMask(
            pool_size=char_maxlen / word_maxlen, padding='same')(char_cnn)
        input_word = Input(shape=(word_maxlen, ), name='input_word')
        word_input = Embedding(
            vocab_word_size, emb_dim, mask_zero=True,
            name='word_emb')(input_word)
        gate = GateMatrix()([char_input, word_input])
        final_input = Dense(50)(gate)
        cnn = Conv1DMask(
            filters=emb_dim,
            kernel_size=cnn_kernel,
            padding='same')(final_input)
        dropped = Dropout(0.5)(cnn)
        mot = MeanOverTime(mask_zero=True)(dropped)
        densed = Dense(self.num_outputs, name='dense')(mot)
        output = Activation('sigmoid')(densed)
        model = Model(inputs=[input_char, input_word], outputs=output)
        model.get_layer('dense').bias.set_value(self.bias)
        if emb_path:
            from emb_reader import EmbReader as EmbReader
            logger.info('Initializing lookup table')
            emb_reader = EmbReader(emb_path, emb_dim=emb_dim)
            model.get_layer('word_emb').embeddings.set_value(
                emb_reader.get_emb_matrix_given_vocab(
                    vocab_word,
                    model.get_layer('word_emb').embeddings.get_value()))
        logger.info('  Done')
        return model
项目:aes-gated-word-char    作者:unkn0wnxx    | 项目源码 | 文件源码
def create_gate_vector_model(self, char_cnn_kernel, cnn_kernel, emb_dim,
                                 emb_path, vocab_word, vocab_word_size,
                                 word_maxlen, vocab_char_size, char_maxlen):
        from aes.layers import Conv1DMask, GateVector, MaxPooling1DMask
        logger.info('Building gate vector model')
        input_char = Input(shape=(char_maxlen, ), name='input_char')
        char_emb = Embedding(
            vocab_char_size, emb_dim, mask_zero=True)(input_char)
        char_cnn = Conv1DMask(
            filters=emb_dim,
            kernel_size=char_cnn_kernel,
            padding='same')(char_emb)
        char_input = MaxPooling1DMask(
            pool_size=char_maxlen / word_maxlen, padding='same')(char_cnn)
        input_word = Input(shape=(word_maxlen, ), name='input_word')
        word_input = Embedding(
            vocab_word_size, emb_dim, mask_zero=True,
            name='word_emb')(input_word)
        gate = GateVector()([char_input, word_input])
        final_input = Dense(50)(gate)
        cnn = Conv1DMask(
            filters=emb_dim,
            kernel_size=cnn_kernel,
            padding='same')(final_input)
        dropped = Dropout(0.5)(cnn)
        mot = MeanOverTime(mask_zero=True)(dropped)
        densed = Dense(self.num_outputs, name='dense')(mot)
        output = Activation('sigmoid')(densed)
        model = Model(inputs=[input_char, input_word], outputs=output)
        model.get_layer('dense').bias.set_value(self.bias)
        if emb_path:
            from emb_reader import EmbReader as EmbReader
            logger.info('Initializing lookup table')
            emb_reader = EmbReader(emb_path, emb_dim=emb_dim)
            model.get_layer('word_emb').embeddings.set_value(
                emb_reader.get_emb_matrix_given_vocab(
                    vocab_word,
                    model.get_layer('word_emb').embeddings.get_value()))
        logger.info('  Done')
        return model
项目:aes-gated-word-char    作者:unkn0wnxx    | 项目源码 | 文件源码
def create_concat_model(self, emb_dim, emb_path, vocab_word,
                            vocab_word_size, word_maxlen, vocab_char_size,
                            char_maxlen):
        from aes.layers import Conv1DMask, MaxPooling1DMask
        from keras.layers import concatenate
        logger.info('Building concatenation model')
        input_char = Input(shape=(char_maxlen, ), name='input_char')
        char_emb = Embedding(
            vocab_char_size, emb_dim, mask_zero=True)(input_char)
        char_cnn = Conv1DMask(
            filters=emb_dim, kernel_size=3, padding='same')(char_emb)
        char_input = MaxPooling1DMask(
            pool_size=char_maxlen / word_maxlen, padding='same')(char_cnn)
        input_word = Input(shape=(word_maxlen, ), name='input_word')
        word_input = Embedding(
            vocab_word_size, emb_dim, mask_zero=True,
            name='word_emb')(input_word)
        merged = concatenate([char_input, word_input], axis=1)
        merged_dropped = Dropout(0.5)(merged)
        final_input = Dense(50)(merged_dropped)
        cnn = Conv1DMask(
            filters=emb_dim, kernel_size=3, padding='same')(final_input)
        dropped = Dropout(0.5)(cnn)
        mot = MeanOverTime(mask_zero=True)(dropped)
        densed = Dense(self.num_outputs, name='dense')(mot)
        output = Activation('sigmoid')(densed)
        model = Model(inputs=[input_char, input_word], outputs=output)
        model.get_layer('dense').bias.set_value(self.bias)
        if emb_path:
            from emb_reader import EmbReader as EmbReader
            logger.info('Initializing lookup table')
            emb_reader = EmbReader(emb_path, emb_dim=emb_dim)
            model.get_layer('word_emb').embeddings.set_value(
                emb_reader.get_emb_matrix_given_vocab(
                    vocab_word,
                    model.get_layer('word_emb').embeddings.get_value()))
        logger.info('  Done')
        return model
项目:aes-gated-word-char    作者:unkn0wnxx    | 项目源码 | 文件源码
def create_word_cnn_model(self, emb_dim, emb_path, vocab_word,
                              vocab_word_size, word_maxlen):
        from aes.layers import Conv1DMask
        logger.info('Building word CNN model')
        input_word = Input(shape=(word_maxlen, ), name='input_word')
        word_emb = Embedding(
            vocab_word_size, emb_dim, mask_zero=True,
            name='word_emb')(input_word)
        cnn = Conv1DMask(
            filters=emb_dim, kernel_size=3, padding='same')(word_emb)
        dropped = Dropout(0.5)(cnn)
        mot = MeanOverTime(mask_zero=True)(dropped)
        densed = Dense(self.num_outputs, name='dense')(mot)
        output = Activation('sigmoid')(densed)
        model = Model(inputs=input_word, outputs=output)
        model.get_layer('dense').bias.set_value(self.bias)
        if emb_path:
            from emb_reader import EmbReader as EmbReader
            logger.info('Initializing lookup table')
            emb_reader = EmbReader(emb_path, emb_dim=emb_dim)
            model.get_layer('word_emb').embeddings.set_value(
                emb_reader.get_emb_matrix_given_vocab(
                    vocab_word,
                    model.get_layer('word_emb').embeddings.get_value()))
        logger.info('  Done')
        return model
项目:aes-gated-word-char    作者:unkn0wnxx    | 项目源码 | 文件源码
def create_word_lstm_model(self, emb_dim, emb_path, vocab_word,
                               vocab_word_size, word_maxlen):
        from keras.layers import LSTM
        logger.info('Building word LSTM model')
        input_word = Input(shape=(word_maxlen, ), name='input_word')
        word_emb = Embedding(
            vocab_word_size, emb_dim, mask_zero=True,
            name='word_emb')(input_word)
        lstm = LSTM(
            300,
            return_sequences=True,
            dropout=self.dropout,
            recurrent_dropout=self.recurrent_dropout)(word_emb)
        dropped = Dropout(0.5)(lstm)
        mot = MeanOverTime(mask_zero=True)(dropped)
        densed = Dense(self.num_outputs, name='dense')(mot)
        output = Activation('sigmoid')(densed)
        model = Model(inputs=input_word, outputs=output)
        model.get_layer('dense').bias.set_value(self.bias)
        if emb_path:
            from emb_reader import EmbReader as EmbReader
            logger.info('Initializing lookup table')
            emb_reader = EmbReader(emb_path, emb_dim=emb_dim)
            model.get_layer('word_emb').embeddings.set_value(
                emb_reader.get_emb_matrix_given_vocab(
                    vocab_word,
                    model.get_layer('word_emb').embeddings.get_value()))
        logger.info('  Done')
        return model
项目:aes-gated-word-char    作者:unkn0wnxx    | 项目源码 | 文件源码
def create_word_gru_model(self, emb_dim, emb_path, vocab_word,
                              vocab_word_size, word_maxlen):
        from keras.layers import GRU
        logger.info('Building word GRU model')
        input_word = Input(shape=(word_maxlen, ), name='input_word')
        word_emb = Embedding(
            vocab_word_size, emb_dim, mask_zero=True,
            name='word_emb')(input_word)
        gru = GRU(
            300,
            return_sequences=True,
            dropout=self.dropout,
            recurrent_dropout=self.recurrent_dropout)(word_emb)
        dropped = Dropout(0.5)(gru)
        mot = MeanOverTime(mask_zero=True)(dropped)
        densed = Dense(self.num_outputs, name='dense')(mot)
        output = Activation('sigmoid')(densed)
        model = Model(inputs=input_word, outputs=output)
        model.get_layer('dense').bias.set_value(self.bias)
        if emb_path:
            from emb_reader import EmbReader as EmbReader
            logger.info('Initializing lookup table')
            emb_reader = EmbReader(emb_path, emb_dim=emb_dim)
            model.get_layer('word_emb').embeddings.set_value(
                emb_reader.get_emb_matrix_given_vocab(
                    vocab_word,
                    model.get_layer('word_emb').embeddings.get_value()))
        logger.info('  Done')
        return model
项目:ParseLawDocuments    作者:FanhuaandLuomu    | 项目源码 | 文件源码
def Mem_Model(story_maxlen,query_maxlen,vocab_size):
    input_encoder_m=Input(shape=(story_maxlen,),dtype='int32',name='input_encoder_m')

    x=Embedding(output_dim=64,input_dim=vocab_size,input_length=story_maxlen)(input_encoder_m)

    x=Dropout(0.5)(x)

    question_encoder=Input(shape=(query_maxlen,),dtype='int32',name='question_encoder')

    y=Embedding(output_dim=64,input_dim=vocab_size,input_length=query_maxlen)(question_encoder)

    y=Dropout(0.5)(y)

    z=merge([x,y],mode='dot',dot_axes=[2,2])
    # z=merge([x,y],mode='sum')

    match=Activation('softmax')(z)

    input_encoder_c=Input(shape=(story_maxlen,),dtype='int32',name='input_encoder_c')

    c=Embedding(output_dim=query_maxlen,input_dim=vocab_size,input_length=story_maxlen)(input_encoder_c)

    c=Dropout(0.5)(c)

    response=merge([match,c],mode='sum')

    w=Permute((2,1))(response)

    answer=merge([w,y],mode='concat',concat_axis=-1)

    lstm=LSTM(32)(answer)

    lstm=Dropout(0.5)(lstm)

    main_loss=Dense(50,activation='sigmoid',name='main_output')(lstm)

    model=Model(input=[input_encoder_m,question_encoder,input_encoder_c],output=main_loss)
    return model
项目:VDCNN    作者:yuhsinliu1993    | 项目源码 | 文件源码
def build_model(num_filters, num_classes, sequence_max_length=512, num_quantized_chars=71, embedding_size=16, learning_rate=0.001, top_k=3, model_path=None):

    inputs = Input(shape=(sequence_max_length, ), dtype='int32', name='inputs')

    embedded_sent = Embedding(num_quantized_chars, embedding_size, input_length=sequence_max_length)(inputs)

    # First conv layer
    conv = Conv1D(filters=64, kernel_size=3, strides=2, padding="same")(embedded_sent)

    # Each ConvBlock with one MaxPooling Layer
    for i in range(len(num_filters)):
        conv = ConvBlockLayer(get_conv_shape(conv), num_filters[i])(conv)
        conv = MaxPooling1D(pool_size=3, strides=2, padding="same")(conv)

    # k-max pooling (Finds values and indices of the k largest entries for the last dimension)
    def _top_k(x):
        x = tf.transpose(x, [0, 2, 1])
        k_max = tf.nn.top_k(x, k=top_k)
        return tf.reshape(k_max[0], (-1, num_filters[-1] * top_k))
    k_max = Lambda(_top_k, output_shape=(num_filters[-1] * top_k,))(conv)

    # 3 fully-connected layer with dropout regularization
    fc1 = Dropout(0.2)(Dense(512, activation='relu', kernel_initializer='he_normal')(k_max))
    fc2 = Dropout(0.2)(Dense(512, activation='relu', kernel_initializer='he_normal')(fc1))
    fc3 = Dense(num_classes, activation='softmax')(fc2)

    # define optimizer
    sgd = SGD(lr=learning_rate, decay=1e-6, momentum=0.9, nesterov=False)
    model = Model(inputs=inputs, outputs=fc3)
    model.compile(optimizer=sgd, loss='mean_squared_error', metrics=['accuracy'])

    if model_path is not None:
        model.load_weights(model_path)

    return model
项目:product-category-classifier    作者:two-tap    | 项目源码 | 文件源码
def build_text_model(word_index):
  text_input = Input(shape=(MAX_SEQUENCE_LENGTH,))

  embedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM))

  for word, i in word_index.items():
    embedding_vector = embeddings_index.get(word)

    if embedding_vector is not None:
      # words not found in embedding index will be all-zeros.
      embedding_matrix[i] = embedding_vector[:EMBEDDING_DIM]

  embedding_layer = Embedding(embedding_matrix.shape[0],
                              embedding_matrix.shape[1],
                              weights=[embedding_matrix],
                              input_length=MAX_SEQUENCE_LENGTH)



  x = embedding_layer(text_input)
  x.trainable = False
  x = Conv1D(128, 5, activation='relu')(x)
  x = MaxPooling1D(5)(x)
  x = Conv1D(128, 5, activation='relu')(x)
  x = MaxPooling1D(5)(x)
  x = Flatten()(x)
  x = Dense(1024, activation='relu')(x)

  return x, text_input

##
## Image model
##
项目:knowledgeflow    作者:3rduncle    | 项目源码 | 文件源码
def buildEmbedding(self, name):
        weights = self.embedding_params.get('weights')
        assert weights
        self.layers[name] = Embedding(
            weights[0].shape[0],
            weights[0].shape[1],
            weights = weights,
            trainable = self.params.get('embedding_trainable', False),
            name=name
        )
项目:keras-recommendation    作者:sonyisme    | 项目源码 | 文件源码
def test_unitnorm_constraint(self):
        lookup = Sequential()
        lookup.add(Embedding(3, 2, weights=[self.W1], W_constraint=unitnorm()))
        lookup.add(Flatten())
        lookup.add(Dense(2, 1))
        lookup.add(Activation('sigmoid'))
        lookup.compile(loss='binary_crossentropy', optimizer='sgd', class_mode='binary')
        lookup.train(self.X1, np.array([[1], [0]], dtype='int32'))
        norm = np.linalg.norm(lookup.params[0].get_value(), axis=1)
        self.assertTrue(np.allclose(norm, np.ones_like(norm).astype('float32')))
项目:keras_text_classifier    作者:cdj0311    | 项目源码 | 文件源码
def createmodel(self):
        """
        create cnn model structure
        :return: model structure
        """
        max_features = max(self.words.values()) + 1 # input dims
        model = Sequential()
        if self.W is None:
            model.add(Embedding(max_features, self.embedding_length, input_length=self.maxlen, dropout=0.2))
        else:
            model.add(Embedding(max_features, self.layer1_size, weights=[self.W], input_length=self.maxlen, dropout=0.2))

        model.add(Convolution1D(nb_filter=self.nb_filter,
                                filter_length=self.filter_length,
                                border_mode='valid',
                                activation='relu',
                                subsample_length=1))

        model.add(MaxPooling1D(pool_length=model.output_shape[1]))
        model.add(Flatten())
        model.add(Dense(self.hidden_dims))
        model.add(Dropout(0.2))
        model.add(Activation('relu'))
        model.add(Dense(self.nb_classes))
        model.add(Activation('softmax'))
        model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=["accuracy"])
        return model