Python keras.layers 模块,Merge() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.layers.Merge()

项目:Image-Captioning    作者:Shobhit20    | 项目源码 | 文件源码
def create_model(self, ret_model = False):

        image_model = Sequential()
        image_model.add(Dense(EMBEDDING_DIM, input_dim = 4096, activation='relu'))
        image_model.add(RepeatVector(self.max_length))

        lang_model = Sequential()
        lang_model.add(Embedding(self.vocab_size, 256, input_length=self.max_length))
        lang_model.add(LSTM(256,return_sequences=True))
        lang_model.add(TimeDistributed(Dense(EMBEDDING_DIM)))

        model = Sequential()
        model.add(Merge([image_model, lang_model], mode='concat'))
        model.add(LSTM(1000,return_sequences=False))
        model.add(Dense(self.vocab_size))
        model.add(Activation('softmax'))

        print ("Model created!")

        if(ret_model==True):
            return model

        model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
        return model
项目:eva    作者:israelg99    | 项目源码 | 文件源码
def __call__(self, model):
        original = model

        tanh_out = CausalAtrousConvolution1D(self.filters, 2, atrous_rate=self.rate, border_mode='valid')(model)
        tanh_out = Activation('tanh')(tanh_out)

        sigm_out = CausalAtrousConvolution1D(self.filters, 2, atrous_rate=self.rate, border_mode='valid')(model)
        sigm_out = Activation('sigmoid')(sigm_out)

        model = Merge(mode='mul')([tanh_out, sigm_out])

        skip_x = Convolution1D(self.filters, 1, border_mode='same')(model)

        res_x = Convolution1D(self.filters, 1, border_mode='same')(model)
        res_x = Merge(mode='sum')([original, res_x])
        return res_x, skip_x
项目:eva    作者:israelg99    | 项目源码 | 文件源码
def __call__(self, model):
        if self.crop_right:
            model = Lambda(lambda x: x[:, :, :K.int_shape(x)[2]-1, :])(model)

        if self.v is not None:
            model = Merge(mode='sum')([model, self.v])

        if self.h is not None:
            hV = Dense(output_dim=2*self.filters)(self.h)
            hV = Reshape((1, 1, 2*self.filters))(hV)
            model = Lambda(lambda x: x[0]+x[1])([model,hV])

        model_f = Lambda(lambda x: x[:,:,:,:self.filters])(model)
        model_g = Lambda(lambda x: x[:,:,:,self.filters:])(model)

        model_f = Lambda(lambda x: K.tanh(x))(model_f)
        model_g = Lambda(lambda x: K.sigmoid(x))(model_g)

        res = Merge(mode='mul')([model_f, model_g])
        return res
项目:eva    作者:israelg99    | 项目源码 | 文件源码
def __call__(self, model1, model2=None):
        if model2 is None:
            h_model = model1
            filter_size = (7, 7)
        else:
            h_model = model2
            filter_size = (3, 3)

        v_model = PaddedConvolution2D(self.filters, filter_size, 'vertical')(model1)
        feed_vertical = FeedVertical(self.filters)(v_model)
        v_model = GatedBlock(self.filters, h=self.h)(v_model)

        h_model_new = PaddedConvolution2D(self.filters, filter_size, 'horizontal', 'A')(h_model)
        h_model_new = GatedBlock(self.filters, v=feed_vertical, h=self.h, crop_right=True)(h_model_new)
        h_model_new = Convolution2D(self.filters, 1, 1, border_mode='valid')(h_model_new)

        return (v_model, h_model_new if model2 is None else Merge(mode='sum')([h_model_new, h_model]))
项目:text_classification    作者:senochow    | 项目源码 | 文件源码
def CNNWithKeywordLayer(embed_matrix, embed_input, sequence_length, keywords_length, filter_sizes, num_filters, dropout_prob, hidden_dims, model_variation, embedding_dim=300):
    ''' 2-way input model: left is cnn for sentence embedding while right is keywords

    '''
    embed1 = Embedding(embed_input, embedding_dim,input_length=sequence_length, weights=[embed_matrix])
    # 1. question model part
    question_branch = Sequential()
    cnn_model = TextCNN(sequence_length, embedding_dim, filter_sizes, num_filters)
    question_branch.add(embed1)
    question_branch.add(cnn_model)
    # 2. keyword model part
    #keyword_branch = KeywordLayer(keywords_length, embed_input, embedding_dim, embed_matrix)
    keyword_branch = LSTMLayer(embed_matrix, embed_input, keywords_length, dropout_prob, hidden_dims, embedding_dim)
    # 3. merge layer
    merged = Merge([question_branch, keyword_branch], mode='concat')
    final_model = Sequential()
    final_model.add(merged)
    final_model.add(Dense(hidden_dims, W_constraint = maxnorm(3)))
    final_model.add(Dropout(0.5))
    final_model.add(Activation('relu'))
    final_model.add(Dense(1))
    final_model.add(Activation('sigmoid'))
    #sgd = SGD(lr=0.01, momentum=0.9)
    final_model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
    return final_model
项目:text_classification    作者:senochow    | 项目源码 | 文件源码
def QuestionWithAnswersModel(embed_matrix, embed_input, sequence_length, ans_cnt, keywords_length, filter_sizes, num_filters, dropout_prob, hidden_dims, embedding_dim=300):
    ''' path1: question embedding (CNN model)
        path2: answer embeddin(Hierachical RNN model)
        merge
    '''
    # path 1
    embed1 = Embedding(embed_input, embedding_dim,input_length=sequence_length, weights=[embed_matrix])
    question_branch = Sequential()
    cnn_model = TextCNN(sequence_length, embedding_dim, filter_sizes, num_filters)
    question_branch.add(embed1)
    question_branch.add(cnn_model)
    # path 2
    answer_branch = HierarchicalRNN(embed_matrix, embed_input, ans_cnt, keywords_length, embedding_dim)
    merged = Merge([question_branch, answer_branch], mode='concat')
    final_model = Sequential()
    final_model.add(merged)
    final_model.add(Dense(hidden_dims, W_constraint = maxnorm(3)))
    final_model.add(Dropout(0.5))
    final_model.add(Activation('relu'))
    final_model.add(Dense(1))
    final_model.add(Activation('sigmoid'))
    final_model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
    return final_model
# vim: set expandtab ts=4 sw=4 sts=4 tw=100:
项目:keras-resnet    作者:codekansas    | 项目源码 | 文件源码
def call(self, x, mask=None):
        layer_output = self.layer.call(x, mask)
        if isinstance(self.merge_mode, str):
            self.merge_mode = Merge(mode=self.merge_mode)
        output = self.merge_mode([x, layer_output])
        return output
项目:keras-resnet    作者:codekansas    | 项目源码 | 文件源码
def get_config(self):
        config = {"merge_mode": {'class_name': 'Merge',
                                 'config': self.merge_mode.get_config()}}
        base_config = super(Residual, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))
项目:gandlf    作者:codekansas    | 项目源码 | 文件源码
def call(self, x, mask=None):
        layer_output = self.layer.call(x, mask)
        if isinstance(self.merge_mode, str):
            self.merge_mode = Merge(mode=self.merge_mode)
        output = self.merge_mode([x, layer_output])
        return output
项目:gandlf    作者:codekansas    | 项目源码 | 文件源码
def get_config(self):
        config = {'merge_mode': {'class_name': 'Merge',
                                 'config': self.merge_mode.get_config()}}
        base_config = super(Residual, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))
项目:visualqa    作者:AndreiBarsan    | 项目源码 | 文件源码
def __init__(self, lang_model: language_models.ALanguageModel,
                 img_model: image_models.AImageModel, language_only,
                 num_hidden_units, activation, dropout, num_hidden_layers, nb_classes):
        """
        :param lang_model: the language model to use
        :param img_model: the image model to use
        :param language_only: use a language model only to answer question and ignore images
        :param num_hidden_units: number of hidden units per hidden layer
        :param activation: activation function type
        :param dropout: fraction of nodes which should be dropped out in each training step,
        between 0 and 1.
        :param num_hidden_layers: the number of hidden layers
        :param nb_classes: the number of possible answers we allow (softmax size in the end)
        """
        # Start constructing the Keras model.
        model = Sequential()

        if language_only:
            # Language only means we *ignore the images* and only rely on the
            # question to compute an answers. Interestingly enough, this does not
            # suck horribly.
            model.add(Merge([lang_model.model()], mode='concat', concat_axis=1))
        else:
            model.add(Merge([lang_model.model(), img_model.model()], mode='concat', concat_axis=1))

        if dropout > 0:
            model.add(Dropout(dropout))

        for i in range(num_hidden_layers):
            model.add(Dense(num_hidden_units, init='uniform'))
            model.add(Activation(activation))
            if dropout > 0:
                model.add(Dropout(dropout))

        model.add(Dense(nb_classes, init='uniform'))
        model.add(Activation('softmax'))

        print('Compiling Keras model...')
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
        print('Compilation done...')
        self._model = model
项目:eva    作者:israelg99    | 项目源码 | 文件源码
def Wavenet(input_shape, filters, depth, stacks, last=0, h=None, build=True):
    # TODO: Soft targets? A float to make targets a gaussian with stdev.
    # TODO: Train only receptive field. The temporal-first outputs are computed from zero-padding.
    # TODO: Global conditioning?
    # TODO: Local conditioning?

    _, nb_bins = input_shape

    input_audio = Input(input_shape, name='audio_input')

    model = CausalAtrousConvolution1D(filters, 2, mask_type='A', atrous_rate=1, border_mode='valid')(input_audio)

    out, skip_connections = WavenetBlocks(filters, depth, stacks)(model)

    out = Merge(mode='sum', name='merging_skips')(skip_connections)
    out = PReLU()(out)

    out = Convolution1D(nb_bins, 1, border_mode='same')(out)
    out = PReLU()(out)

    out = Convolution1D(nb_bins, 1, border_mode='same')(out)

    # https://storage.googleapis.com/deepmind-live-cms/documents/BlogPost-Fig2-Anim-160908-r01.gif
    if last > 0:
        out = Lambda(lambda x: x[:, -last:], output_shape=(last, out._keras_shape[2]), name='last_out')(out)

    out = Activation('softmax')(out)

    if build:
        model = Model(input_audio, out)
        model.compile(Nadam(), 'sparse_categorical_crossentropy')

    return model
项目:eva    作者:israelg99    | 项目源码 | 文件源码
def __call__(self, model):
        # 2h -> h
        block = PReLU()(model)
        block = MaskedConvolution2D(self.filters//2, 1, 1)(block)

        # h 3x3 -> h
        block = PReLU()(block)
        block = MaskedConvolution2D(self.filters//2, 3, 3, border_mode='same')(block)

        # h -> 2h
        block = PReLU()(block)
        block = MaskedConvolution2D(self.filters, 1, 1)(block)

        return Merge(mode='sum')([model, block])
项目:DeepLearn    作者:GauravBh1010tt    | 项目源码 | 文件源码
def prepare_model(ninputs=9600, n_feats=45,nclass=4,n_tfidf=10001):
    inp1 = Input(shape=(ninputs,))
    inp2 = Input(shape=(n_feats,))
    inp3 = Input(shape=(n_tfidf,))
    reg = 0.00005
    out_neurons1 = 500
    #out_neurons2 = 20
    #out_neurons2 = 10
    m1 = Dense(input_dim=ninputs, output_dim=out_neurons1,activation='sigmoid'\
                      ,kernel_regularizer=regularizers.l2(0.00000001))(inp1)
    m1 = Dropout(0.2)(m1)
    m1 = Dense(100,activation='sigmoid')(m1)
    #m1 = Dropout(0.2)(m1)
    #m1 = Dense(4, activation='sigmoid')(m1)

    #m2 = Dense(input_dim=n_feats, output_dim=n_feats,activation='relu')(inp2)
    m2 = Dense(50,activation='relu')(inp2)
    #m2=Dense(4,activation='relu')(m2)

    m3 = Dense(500, input_dim=n_tfidf, activation='relu',\
                    kernel_regularizer=regularizers.l2(reg))(inp3)

    m3 = Dropout(0.4)(m3)
    m3 = Dense(50, activation='relu')(m3)
    #m3 = Dropout(0.4)(m3)
    #m3 = Dense(4, activation='softmax')(m3)


    #m1 = Dense(input_dim=ninputs, output_dim=out_neurons2,activation='sigmoid')(m1)
    #m2 = Dense(input_dim=ninputs, output_dim=out_neurons2,activation='softmax')(m2)

    m = Merge(mode='concat')([m1,m2,m3])

    #mul = Multiply()([m1,m2])
    #add = Abs()([m1,m2])
    #m = Merge(mode='concat')([mul,add])

    score = Dense(output_dim=nclass,activation='softmax')(m)
    model = Model([inp1,inp2,inp3],score)
    model.compile(loss='categorical_crossentropy', optimizer='adam')
    return model
项目:DeepLearn    作者:GauravBh1010tt    | 项目源码 | 文件源码
def prepare_model2(ninputs=9600, n_feats=45,nclass=4,n_tfidf=10001):
    inp1 = Input(shape=(ninputs,))
    inp2 = Input(shape=(n_feats,))
    inp3 = Input(shape=(n_tfidf,))
    reg = 0.00005
    out_neurons1 = 500
    #out_neurons2 = 20
    #out_neurons2 = 10
    m1 = Dense(input_dim=ninputs, output_dim=out_neurons1,activation='sigmoid'\
                      ,kernel_regularizer=regularizers.l2(0.00000001))(inp1)
    m1 = Dropout(0.2)(m1)
    m1 = Dense(100,activation='sigmoid')(m1)
    #m1 = Dropout(0.2)(m1)
    #m1 = Dense(4, activation='sigmoid')(m1)

    m2 = Dense(input_dim=n_feats, output_dim=n_feats,activation='relu')(inp2)
    m2 = Dense(4,activation='relu')(inp2)
    #m2=Dense(4,activation='relu')(m2)

    m3 = Dense(500, input_dim=n_tfidf, activation='relu',\
                    kernel_regularizer=regularizers.l2(reg))(inp3)

    m3 = Dropout(0.4)(m3)
    m3 = Dense(50, activation='relu')(m3)
    #m3 = Dropout(0.4)(m3)
    #m3 = Dense(4, activation='softmax')(m3)


    #m1 = Dense(input_dim=ninputs, output_dim=out_neurons2,activation='sigmoid')(m1)
    #m2 = Dense(input_dim=ninputs, output_dim=out_neurons2,activation='softmax')(m2)

    m = Merge(mode='concat')([m1,m2,m3])

    #mul = Multiply()([m1,m2])
    #add = Abs()([m1,m2])
    #m = Merge(mode='concat')([mul,add])

    score = Dense(output_dim=nclass,activation='softmax')(m)
    model = Model([inp1,inp2,inp3],score)
    model.compile(loss='categorical_crossentropy', optimizer='adam')
    return model
项目:MovieTaster-Open    作者:lujiaying    | 项目源码 | 文件源码
def skipgram_model(vocab_size, embedding_dim=100, paradigm='Functional'):
    # Sequential paradigm
    if paradigm == 'Sequential':
        target = Sequential()
        target.add(Embedding(vocab_size, embedding_dim, input_length=1))
        context = Sequential()
        context.add(Embedding(vocab_size, embedding_dim, input_length=1))

        # merge the pivot and context models
        model = Sequential()
        model.add(Merge([target, context], mode='dot'))
        model.add(Reshape((1,), input_shape=(1,1)))
        model.add(Activation('sigmoid'))
        model.compile(optimizer='adam', loss='binary_crossentropy')
        return model

    # Functional paradigm
    elif paradigm == 'Functional':
        target = Input(shape=(1,), name='target')
        context = Input(shape=(1,), name='context')
        #print target.shape, context.shape
        shared_embedding = Embedding(vocab_size, embedding_dim, input_length=1, name='shared_embedding')
        embedding_target = shared_embedding(target)
        embedding_context = shared_embedding(context)
        #print embedding_target.shape, embedding_context.shape

        merged_vector = dot([embedding_target, embedding_context], axes=-1)
        reshaped_vector = Reshape((1,), input_shape=(1,1))(merged_vector)
        #print merged_vector.shape
        prediction = Dense(1, input_shape=(1,), activation='sigmoid')(reshaped_vector)
        #print prediction.shape

        model = Model(inputs=[target, context], outputs=prediction)
        model.compile(optimizer='adam', loss='binary_crossentropy')
        return model

    else:
        print('paradigm error')
        return None
项目:Youtube8mdataset_kagglechallenge    作者:jasonlee27    | 项目源码 | 文件源码
def load_model(self, frm_modelweights=''):

        first_model = Sequential()
        first_model.add(LSTM(2048,
                             input_shape=(m, self.feature_size),
                             return_sequences=False,
                             activation='relu',
                             name='fc1_left'))
        first_model.add(Dropout(0.5))
        first_model.add(LSTM(2048,
                             return_sequences=False,
                             activation='relu',
                             name='fc1_left'))

        second_model = Sequential()
        second_model.add(LSTM(2048,
                              input_shape=(n - m, self.feature_size),
                              return_sequences=False,
                              activation='relu',
                              name='fc1_right'))
        second_model.add(Dropout(0.5))
        second_model.add(LSTM(2048,
                              return_sequences=False,
                              activation='relu',
                              name='fc1_right'))

        model = Sequential()
        model.add(Merge([first_model, second_model], mode='concat'))
        model.add(Dense(4096, activation='relu', name='fc2'))
        model.add(Dropout(0.5))
        model.add(Dense(self.numclasses, activation='softmax', name='prediction'))

        if frm_modelweights:
            model.load_weights(frm_modelweights, by_name=True)
            print("Frame model loaded with weights from %s." % frm_modelweights)
        else:
            print "Empty frame model loaded."

        return model
项目:Youtube8mdataset_kagglechallenge    作者:jasonlee27    | 项目源码 | 文件源码
def load_model(self, frm_modelweights='', frmdiff_modelweights=''):
        frm_model = Sequential()
        frm_model.add(GRU(4096,
                          return_sequences=True,
                          input_dim=self.feature_size,
                          input_length=MAX_FRAMES,
                          activation='relu',
                          name='fc1'))
        frm_model.add(Dropout(0.3))
        frm_model.add(GRU(4096,
                          return_sequences=False,
                          activation='relu',
                          name='fc2'))
        frm_model.add(Dropout(0.3))
        frm_model.add(Dense(self.numclasses, activation='softmax', name='frm_prediction'))
        if frm_modelweights:
            frm_model.load_weights(frm_modelweights, by_name=True)
            print("Frame model loaded with weights from %s." % frm_modelweights)
        else:
            print "Empty frame model loaded."

        '''
        frmdiff_model = Sequential()
        frmdiff_model.add(GRU(4096, input_dim=self.feature_size, activation='relu', name='fc1'))
        frmdiff_model.add(Dropout(0.3))
        frmdiff_model.add(GRU(4096, activation='relu', name='fc2'))
        frmdiff_model.add(Dropout(0.3))
        frmdiff_model.add(Dense(self.numclasses, activation='softmax', name='frmdiff_feature'))

        if frmdiff_modelweights:
            frmdiff_model.load_weights(frmdiff_modelweights, by_name=True)
            print('Frame model loaded with weights from %s.' % frmdiff_modelweights)
        else:
            print "Empty frame model loaded."

        model = Sequential()
        model.add(Merge([frm_model, frmdiff_model], mode='concat'))
        model.add(Dense(self.numclasses, activation='softmax', name='predictions'))
        '''

        return frm_model
项目:VQA_Keras    作者:iamaaditya    | 项目源码 | 文件源码
def model(args):
# Image model
    model_image = Sequential()
    model_image.add(Reshape((args.img_vec_dim,), input_shape=(args.img_vec_dim,)))
    model_image.add(Dense(args.num_hidden_units_mlp))
    model_image.add(Activation(args.activation_1))
    model_image.add(Dropout(args.dropout))


    # Language Model
    model_language = Sequential()
    model_language.add(Embedding(args.vocabulary_size, args.word_emb_dim, input_length=args.max_ques_length))
    model_language.add(LSTM(args.num_hidden_units_lstm, return_sequences=True, input_shape=(args.max_ques_length, args.word_emb_dim)))
    model_language.add(LSTM(args.num_hidden_units_lstm, return_sequences=True))
    model_language.add(LSTM(args.num_hidden_units_lstm, return_sequences=False))
    model_language.add(Dense(args.num_hidden_units_mlp))
    model_language.add(Activation(args.activation_1))
    model_language.add(Dropout(args.dropout))


    # combined model
    model = Sequential()
    model.add(Merge([model_language, model_image], mode='mul'))

    # for _ in xrange(number_of_dense_layers):
    for i in xrange(args.num_hidden_layers_mlp):
        model.add(Dense(args.num_hidden_units_mlp))
        model.add(Activation(args.activation_1))
        model.add(Dropout(args.dropout))

    model.add(Dense(args.nb_classes))
    model.add(Activation(args.class_activation))


    return model
项目:VQA_Keras    作者:iamaaditya    | 项目源码 | 文件源码
def model(args):

    # Image model
    model_image = Sequential()
    model_image.add(Reshape((args.img_vec_dim,), input_shape=(args.img_vec_dim,)))

    # Language Model
    model_language = Sequential()
    model_language.add(Embedding(args.vocabulary_size, args.word_emb_dim, input_length=args.max_ques_length))
    model_language.add(LSTM(args.num_hidden_units_lstm, return_sequences=True, input_shape=(args.max_ques_length, args.word_emb_dim)))
    model_language.add(LSTM(args.num_hidden_units_lstm, return_sequences=True))
    model_language.add(LSTM(args.num_hidden_units_lstm, return_sequences=False))

    # combined model
    model = Sequential()
    model.add(Merge([model_language, model_image], mode='concat', concat_axis=1))


    for i in xrange(args.num_hidden_layers_mlp):
        model.add(Dense(args.num_hidden_units_mlp))
        model.add(Dropout(args.dropout))

    model.add(Dense(args.nb_classes))
    model.add(Activation(args.class_activation))

    return model
项目:VQA-Keras-Visual-Question-Answering    作者:anantzoid    | 项目源码 | 文件源码
def vqa_model(embedding_matrix, num_words, embedding_dim, seq_length, dropout_rate, num_classes):
    vgg_model = img_model(dropout_rate)
    lstm_model = Word2VecModel(embedding_matrix, num_words, embedding_dim, seq_length, dropout_rate)
    print "Merging final model..."
    fc_model = Sequential()
    fc_model.add(Merge([vgg_model, lstm_model], mode='mul'))
    fc_model.add(Dropout(dropout_rate))
    fc_model.add(Dense(1000, activation='tanh'))
    fc_model.add(Dropout(dropout_rate))
    fc_model.add(Dense(num_classes, activation='softmax'))
    fc_model.compile(optimizer='rmsprop', loss='categorical_crossentropy',
        metrics=['accuracy'])
    return fc_model
项目:NeuralNetwork-ImageQA    作者:ayushoriginal    | 项目源码 | 文件源码
def vis_lstm():
    embedding_matrix = embedding.load()
    embedding_model = Sequential()
    embedding_model.add(Embedding(
        embedding_matrix.shape[0],
        embedding_matrix.shape[1],
        weights = [embedding_matrix],
        trainable = False))

    image_model = Sequential()
    image_model.add(Dense(
        embedding_matrix.shape[1],
        input_dim=4096,
        activation='linear'))
    image_model.add(Reshape((1,embedding_matrix.shape[1])))

    main_model = Sequential()
    main_model.add(Merge(
        [image_model,embedding_model],
        mode = 'concat',        
        concat_axis = 1))
    main_model.add(LSTM(1001))
    main_model.add(Dropout(0.5))
    main_model.add(Dense(1001,activation='softmax'))

    return main_model
项目:NeuralNetwork-ImageQA    作者:ayushoriginal    | 项目源码 | 文件源码
def vis_lstm_2():
    embedding_matrix = embedding.load()
    embedding_model = Sequential()
    embedding_model.add(Embedding(
        embedding_matrix.shape[0],
        embedding_matrix.shape[1],
        weights = [embedding_matrix],
        trainable = False))

    image_model_1 = Sequential()
    image_model_1.add(Dense(
        embedding_matrix.shape[1],
        input_dim=4096,
        activation='linear'))
    image_model_1.add(Reshape((1,embedding_matrix.shape[1])))

    image_model_2 = Sequential()
    image_model_2.add(Dense(
        embedding_matrix.shape[1],
        input_dim=4096,
        activation='linear'))
    image_model_2.add(Reshape((1,embedding_matrix.shape[1])))

    main_model = Sequential()
    main_model.add(Merge(
        [image_model_1,embedding_model,image_model_2],
        mode = 'concat',
        concat_axis = 1))
    main_model.add(LSTM(1001))
    main_model.add(Dropout(0.5))
    main_model.add(Dense(1001,activation='softmax'))

    return main_model
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def generator_containing_discriminator_ae(generator, discriminator):
    model_left = Sequential()
    model_left.add(generator)
    discriminator.trainable = False
    model_left.add(discriminator)

    model_right = Sequential()
    model_right.add(generator)
    model_right.add(Reshape((784,)))

    model = Sequential()
    model.add(Merge([model_left, model_right], mode='concat', concat_axis=1))
    return model
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def generator_containing_discriminator_ae(generator, discriminator):
    model_left = Sequential()
    model_left.add(generator)
    discriminator.trainable = False
    model_left.add(discriminator)

    model_right = Sequential()
    model_right.add(generator)
    model_right.add(Reshape((784,)))

    model = Sequential()
    model.add(Merge([model_left, model_right], mode='concat', concat_axis=1))
    return model
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def generator_containing_discriminator_ae(self, generator, discriminator):
        model_left = Sequential()
        model_left.add(generator)
        discriminator.trainable = False
        model_left.add(discriminator)

        model_right = Sequential()
        model_right.add(generator)
        model_right.add(Reshape((144*144,)))

        model = Sequential()
        model.add(Merge([model_left, model_right], mode='concat', concat_axis=1))
        return model
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def generator_containing_discriminator_ae(self, generator, discriminator):
        model_left = Sequential()
        model_left.add(generator)
        discriminator.trainable = False
        model_left.add(discriminator)

        model_right = Sequential()
        model_right.add(generator)
        model_right.add(Reshape((144*144,)))

        model = Sequential()
        model.add(Merge([model_left, model_right], mode='concat', concat_axis=1))
        return model
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def generator_containing_discriminator_ae(generator, discriminator):
    model_left = Sequential()
    model_left.add(generator)
    discriminator.trainable = False
    model_left.add(discriminator)

    model_right = Sequential()
    model_right.add(generator)
    model_right.add(Reshape((784,)))

    model = Sequential()
    model.add(Merge([model_left, model_right], mode='concat', concat_axis=1))
    return model
项目:VQA-Demo-GUI    作者:anujshah1003    | 项目源码 | 文件源码
def VQA_MODEL():
    image_feature_size          = 4096
    word_feature_size           = 300
    number_of_LSTM              = 3
    number_of_hidden_units_LSTM = 512
    max_length_questions        = 30
    number_of_dense_layers      = 3
    number_of_hidden_units      = 1024
    activation_function         = 'tanh'
    dropout_pct                 = 0.5


    # Image model
    model_image = Sequential()
    model_image.add(Reshape((image_feature_size,), input_shape=(image_feature_size,)))

    # Language Model
    model_language = Sequential()
    model_language.add(LSTM(number_of_hidden_units_LSTM, return_sequences=True, input_shape=(max_length_questions, word_feature_size)))
    model_language.add(LSTM(number_of_hidden_units_LSTM, return_sequences=True))
    model_language.add(LSTM(number_of_hidden_units_LSTM, return_sequences=False))

    # combined model
    model = Sequential()
    model.add(Merge([model_language, model_image], mode='concat', concat_axis=1))

    for _ in xrange(number_of_dense_layers):
        model.add(Dense(number_of_hidden_units, init='uniform'))
        model.add(Activation(activation_function))
        model.add(Dropout(dropout_pct))

    model.add(Dense(1000))
    model.add(Activation('softmax'))

    return model
项目:semeval2017-scienceie    作者:UKPLab    | 项目源码 | 文件源码
def build_cnn_char_complex(input_dim, output_dim,nb_filter):
    randomEmbeddingLayer = Embedding(input_dim,32, input_length=maxlen,dropout=0.1)
    poolingLayer = Lambda(max_1d, output_shape=(nb_filter,))
    conv_filters = []
    for n_gram in range(2,4):
        ngramModel = Sequential()
        ngramModel.add(randomEmbeddingLayer)
        ngramModel.add(Convolution1D(nb_filter=nb_filter,
                                     filter_length=n_gram,
                                     border_mode="valid",
                                     activation="relu",
                                     subsample_length=1))
        ngramModel.add(poolingLayer)
        conv_filters.append(ngramModel)

    clf = Sequential()
    clf.add(Merge(conv_filters,mode="concat"))
    clf.add(Activation("relu"))
    clf.add(Dense(100))
    clf.add(Dropout(0.1))
    clf.add(Activation("tanh"))
    clf.add(Dense(output_dim=output_dim, activation='softmax'))

    clf.compile(optimizer='adagrad',
                     loss='categorical_crossentropy',
                     metrics=['accuracy'])
    return clf
项目:text_classification    作者:senochow    | 项目源码 | 文件源码
def TextCNN(sequence_length, embedding_dim, filter_sizes, num_filters):
    ''' Convolutional Neural Network, including conv + pooling

    Args:
        sequence_length: ???????
        embedding_dim: ?????
        filter_sizes:  filter???
        num_filters: filter??

    Returns:
        features extracted by CNN
    '''
    graph_in = Input(shape=(sequence_length, embedding_dim))
    convs = []
    for fsz in filter_sizes:
        conv = Convolution1D(nb_filter=num_filters,
                         filter_length=fsz,
                         border_mode='valid',
                         activation='relu',
                         subsample_length=1)(graph_in)
        pool = MaxPooling1D()(conv)
        flatten = Flatten()(pool)
        convs.append(flatten)
    if len(filter_sizes)>1:
        out = Merge(mode='concat')(convs)
    else:
        out = convs[0]
    graph = Model(input=graph_in, output=out)
    return graph
项目:2016CCF-SouGou    作者:AbnerYang    | 项目源码 | 文件源码
def build_model(cat, hidden_dim):
    graph_in = Input(shape=(sequence_length, embedding_dim))
    convs = []
    for fsz in filter_sizes:
        conv = Convolution1D(nb_filter=num_filters,
                             filter_length=fsz,
                             border_mode='valid',
                             activation='relu',
                             subsample_length=1)(graph_in)
        pool = MaxPooling1D(pool_length=2)(conv)
        flatten = Flatten()(pool)
        convs.append(flatten)

    if len(filter_sizes)>1:
        out = Merge(mode='concat')(convs)
    else:
        out = convs[0]

    graph = Model(input=graph_in, output=out)

    # main sequential model
    model = Sequential()
    if not model_variation=='CNN-static':
        model.add(Embedding(len(vocabulary), embedding_dim, input_length=sequence_length,
                            weights=embedding_weights))
    model.add(Dropout(dropout_prob[0], input_shape=(sequence_length, embedding_dim)))
    model.add(graph)
    model.add(Dense(hidden_dim))
    model.add(Dropout(dropout_prob[1]))
    model.add(Activation('relu'))
    model.add(Dense(cat))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    return model
项目:lipnet    作者:grishasergei    | 项目源码 | 文件源码
def build_model(self, models, input_dims, output_dim):
        keras_models = [None] * len(models)
        for i, m in enumerate(models):
            m.build_model(input_dims[i], output_dim)
            keras_models[i] = m.model
        merged = Merge(keras_models, mode='concat')
        self.model.add(merged)
        self.model.add(Dense(output_dim, activation='softmax'))
        self.model.compile(optimizer='adam',
                           loss='categorical_crossentropy',
                           metrics=['accuracy'])
项目:caption_generator    作者:anuragmishracse    | 项目源码 | 文件源码
def create_model(self, ret_model = False):
        #base_model = VGG16(weights='imagenet', include_top=False, input_shape = (224, 224, 3))
        #base_model.trainable=False
        image_model = Sequential()
        #image_model.add(base_model)
        #image_model.add(Flatten())
        image_model.add(Dense(EMBEDDING_DIM, input_dim = 4096, activation='relu'))

        image_model.add(RepeatVector(self.max_cap_len))

        lang_model = Sequential()
        lang_model.add(Embedding(self.vocab_size, 256, input_length=self.max_cap_len))
        lang_model.add(LSTM(256,return_sequences=True))
        lang_model.add(TimeDistributed(Dense(EMBEDDING_DIM)))

        model = Sequential()
        model.add(Merge([image_model, lang_model], mode='concat'))
        model.add(LSTM(1000,return_sequences=False))
        model.add(Dense(self.vocab_size))
        model.add(Activation('softmax'))

        print "Model created!"

        if(ret_model==True):
            return model

        model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
        return model
项目:VQA    作者:VedantYadav    | 项目源码 | 文件源码
def vis_lstm():
    embedding_matrix = embedding.load()
    embedding_model = Sequential()
    embedding_model.add(Embedding(
        embedding_matrix.shape[0],
        embedding_matrix.shape[1],
        weights = [embedding_matrix],
        trainable = False))

    image_model = Sequential()
    image_model.add(Dense(
        embedding_matrix.shape[1],
        input_dim=4096,
        activation='linear'))
    image_model.add(Reshape((1,embedding_matrix.shape[1])))

    main_model = Sequential()
    main_model.add(Merge(
        [image_model,embedding_model],
        mode = 'concat',        
        concat_axis = 1))
    main_model.add(LSTM(1001))
    main_model.add(Dropout(0.5))
    main_model.add(Dense(1001,activation='softmax'))

    return main_model
项目:VQA    作者:VedantYadav    | 项目源码 | 文件源码
def vis_lstm_2():
    embedding_matrix = embedding.load()
    embedding_model = Sequential()
    embedding_model.add(Embedding(
        embedding_matrix.shape[0],
        embedding_matrix.shape[1],
        weights = [embedding_matrix],
        trainable = False))

    image_model_1 = Sequential()
    image_model_1.add(Dense(
        embedding_matrix.shape[1],
        input_dim=4096,
        activation='linear'))
    image_model_1.add(Reshape((1,embedding_matrix.shape[1])))

    image_model_2 = Sequential()
    image_model_2.add(Dense(
        embedding_matrix.shape[1],
        input_dim=4096,
        activation='linear'))
    image_model_2.add(Reshape((1,embedding_matrix.shape[1])))

    main_model = Sequential()
    main_model.add(Merge(
        [image_model_1,embedding_model,image_model_2],
        mode = 'concat',
        concat_axis = 1))
    main_model.add(LSTM(1001))
    main_model.add(Dropout(0.5))
    main_model.add(Dense(1001,activation='softmax'))

    return main_model
项目:sota_sentiment    作者:jbarnesspain    | 项目源码 | 文件源码
def create_cnn(W, max_length, dim=300,
               dropout=.5, output_dim=8):

    # Convolutional model
    filter_sizes=(2,3,4)
    num_filters = 3


    graph_in = Input(shape=(max_length, len(W[0])))
    convs = []
    for fsz in filter_sizes:
        conv = Convolution1D(nb_filter=num_filters,
                 filter_length=fsz,
                 border_mode='valid',
                 activation='relu',
                 subsample_length=1)(graph_in)
        pool = MaxPooling1D(pool_length=2)(conv)
        flatten = Flatten()(pool)
        convs.append(flatten)

    out = Merge(mode='concat')(convs)
    graph = Model(input=graph_in, output=out)

    # Full model
    model = Sequential()
    model.add(Embedding(output_dim=W.shape[1],
                        input_dim=W.shape[0],
                        input_length=max_length, weights=[W],
                        trainable=True))
    model.add(Dropout(dropout))
    model.add(graph)
    model.add(Dense(dim, activation='relu'))
    model.add(Dropout(dropout))
    model.add(Dense(output_dim, activation='softmax'))
    if output_dim == 2:
        model.compile('adam', 'binary_crossentropy',
                  metrics=['accuracy'])
    else:
        model.compile('adam', 'categorical_crossentropy',
                  metrics=['accuracy'])
    return model

    return model
项目:BMM_attentional_CNN    作者:dvatterott    | 项目源码 | 文件源码
def minst_attention(inc_noise=False, attention=True):
    #make layers
    inputs = Input(shape=(1,image_size,image_size),name='input')

    conv_1a = Convolution2D(32, 3, 3,activation='relu',name='conv_1')
    maxp_1a = MaxPooling2D((3, 3), strides=(2,2),name='convmax_1')
    norm_1a = crosschannelnormalization(name="convpool_1")
    zero_1a = ZeroPadding2D((2,2),name='convzero_1')

    conv_2a = Convolution2D(32,3,3,activation='relu',name='conv_2')
    maxp_2a = MaxPooling2D((3, 3), strides=(2,2),name='convmax_2')
    norm_2a = crosschannelnormalization(name="convpool_2")
    zero_2a = ZeroPadding2D((2,2),name='convzero_2')

    dense_1a = Lambda(global_average_pooling,output_shape=global_average_pooling_shape,name='dense_1')
    dense_2a = Dense(10, activation = 'softmax', init='uniform',name='dense_2')

    #make actual model
    if inc_noise:
        inputs_noise = noise.GaussianNoise(2.5)(inputs)
        input_pad = ZeroPadding2D((1,1),input_shape=(1,image_size,image_size),name='input_pad')(inputs_noise)
    else:
        input_pad = ZeroPadding2D((1,1),input_shape=(1,image_size,image_size),name='input_pad')(inputs)

    conv_1 = conv_1a(input_pad)
    conv_1 = maxp_1a(conv_1)
    conv_1 = norm_1a(conv_1)
    conv_1 = zero_1a(conv_1)

    conv_2_x = conv_2a(conv_1)
    conv_2 = maxp_2a(conv_2_x)
    conv_2 = norm_2a(conv_2)
    conv_2 = zero_2a(conv_2)
    conv_2 = Dropout(0.5)(conv_2)

    dense_1 = dense_1a(conv_2)
    dense_2 = dense_2a(dense_1)

    conv_shape1 = Lambda(change_shape1,output_shape=(32,),name='chg_shape')(conv_2_x)
    find_att = dense_2a(conv_shape1)

    if attention:
        find_att = Lambda(attention_control,output_shape=att_shape,name='att_con')([find_att,dense_2])
    else:
        find_att = Lambda(no_attention_control,output_shape=att_shape,name='att_con')([find_att,dense_2])

    zero_3a = ZeroPadding2D((1,1),name='convzero_3')(find_att)
    apply_attention  = Merge(mode='mul',name='attend')([zero_3a,conv_1])

    conv_3 = conv_2a(apply_attention)
    conv_3 = maxp_2a(conv_3)
    conv_3 = norm_2a(conv_3)
    conv_3 = zero_2a(conv_3)

    dense_3 = dense_1a(conv_3)
    dense_4 = dense_2a(dense_3)

    model = Model(input=inputs,output=dense_4)

    return model
项目:DeepLearn    作者:GauravBh1010tt    | 项目源码 | 文件源码
def buildModel(loss_type,lamda):

    inpx = Input(shape=(dimx,))
    inpy = Input(shape=(dimy,))

    hx = Reshape((28,14,1))(inpx)
    hx = Conv2D(128, (3, 3), activation='relu', padding='same')(hx)
    hx = MaxPooling2D((2, 2), padding='same')(hx)
    hx = Conv2D(64, (3, 3), activation='relu', padding='same')(hx)
    hx = MaxPooling2D((2, 2), padding='same')(hx)
    hx = Conv2D(49, (3, 3), activation='relu', padding='same')(hx)
    hx = MaxPooling2D((2, 2), padding='same')(hx)
    hx = Flatten()(hx)
    hx1 = Dense(hdim_deep,activation='sigmoid')(hx)
    hx2 = Dense(hdim_deep2, activation='sigmoid',name='hid_l1')(hx1)
    hx = Dense(hdim, activation='sigmoid',name='hid_l')(hx2)

    hy = Reshape((28,14,1))(inpy)
    hy = Conv2D(128, (3, 3), activation='relu', padding='same')(hy)
    hy = MaxPooling2D((2, 2), padding='same')(hy)
    hy = Conv2D(64, (3, 3), activation='relu', padding='same')(hy)
    hy = MaxPooling2D((2, 2), padding='same')(hy)
    hy = Conv2D(49, (3, 3), activation='relu', padding='same')(hy)
    hy = MaxPooling2D((2, 2), padding='same')(hy)
    hy = Flatten()(hy)
    hy1 = Dense(hdim_deep,activation='sigmoid')(hy)
    hy2 = Dense(hdim_deep2, activation='sigmoid',name='hid_r1')(hy1)
    hy = Dense(hdim, activation='sigmoid',name='hid_r')(hy2)

    h =  Merge(mode="sum")([hx,hy]) 

    recx = Dense(dimx)(h)
    recy = Dense(dimy)(h)

    branchModel = Model( [inpx,inpy],[recx,recy,h,hx1,hy1,hx2,hy2])

    [recx1,recy1,h1,_,_,_,_] = branchModel( [inpx, ZeroPadding()(inpy)])
    [recx2,recy2,h2,_,_,_,_] = branchModel( [ZeroPadding()(inpx), inpy ])

    #you may probably add a reconstruction from combined
    [recx3,recy3,h3,hx_1,hy_1,hx_2,hy_2] = branchModel([inpx, inpy])

    lamda2,lamda3 = 0.001,0.05

    corr1=CorrnetCost(-lamda)([h1,h2])
    corr2=CorrnetCost(-lamda2)([hx_1,hy_1])
    corr3=CorrnetCost(-lamda3)([hx_2,hy_2])

    model = Model( [inpx,inpy],[recy1,recx2,recx1,recy2,corr1,corr2,corr3])
    model.compile( loss=["mse","mse","mse","mse",corr_loss,corr_loss,corr_loss],optimizer="rmsprop")

    return model, branchModel
项目:DeepLearn    作者:GauravBh1010tt    | 项目源码 | 文件源码
def prepare_model(ninputs=9600,n_feats=47, nclass=5):
    """
    Set up and compile the model architecture (Logistic regression)
    """
    inp1 = Input(shape=(ninputs,))
    inp2 = Input(shape=(n_feats,))
    out_neurons1 = 50
    out_neurons2 = 20
    out_neurons2 = 10
    m1 = Dense(input_dim=ninputs, output_dim=out_neurons1,activation='sigmoid')(inp1)
    m2 = Dense(input_dim=ninputs, output_dim=out_neurons1,activation='softmax')(inp2)

    m1 = Dense(input_dim=ninputs, output_dim=out_neurons2,activation='sigmoid')(m1)
    m2 = Dense(input_dim=ninputs, output_dim=out_neurons2,activation='softmax')(m2)

    #m1 = Dense(input_dim=ninputs, output_dim=out_neurons2,activation='sigmoid')(m1)
    #m2 = Dense(input_dim=ninputs, output_dim=out_neurons2,activation='softmax')(m2)

    m = Merge(mode='concat')([m1,m2])

    #mul = Multiply()([m1,m2])
    #add = Abs()([m1,m2])
    #m = Merge(mode='concat')([mul,add])

    score = Dense(output_dim=nclass,activation='softmax')(m)
    model = Model([inp1,inp2],score)
    model.compile(loss='categorical_crossentropy', optimizer='adam')
    return model
    '''
    lrmodel = Sequential()
    lrmodel.add(Dense(input_dim=ninputs, output_dim=nclass))
    #lrmodel.add(Activation('softmax'))
    #lrmodel.compile(loss='categorical_crossentropy', optimizer='adam')

    #return lrmodel

    model_feat = Sequential()
    model_feat.add(Dense(input_dim=27, output_dim=nclass))
    merge_model = Sequential()
    merge_model.add(Merge([lrmodel, model_feat], mode='concat'))
    merge_model.add(Dense(output_dim=nclass))
    merge_model.add(Activation('softmax'))
    merge_model.compile(loss='categorical_crossentropy', optimizer='adam')
    return merge_model'''

    '''lrmodel.add(Dense(input_dim=ninputs, output_dim=1000,activation = 'relu'))
    lrmodel.add(Dropout(0.5))
    lrmodel.add(Dense(output_dim=500,activation = 'relu'))
    lrmodel.add(Dropout(0.5))
    lrmodel.add(Dense(output_dim=nclass))'''
    #return merge_model
项目:DeepLearn    作者:GauravBh1010tt    | 项目源码 | 文件源码
def trainCNN(obj, dataset_headLines, dataset_body):
    embedding_dim = 300
    LSTM_neurons = 50
    dense_neuron = 16
    dimx = 100
    dimy = 200
    lamda = 0.0
    nb_filter = 100
    filter_length = 4
    vocab_size = 10000
    batch_size = 50
    epochs = 5
    ntn_out = 16
    ntn_in = nb_filter 
    state = False


    train_head,train_body,embedding_matrix = obj.process_data(sent_Q=dataset_headLines,
                                                     sent_A=dataset_body,dimx=dimx,dimy=dimy,
                                                     wordVec_model = wordVec_model)    
    inpx = Input(shape=(dimx,),dtype='int32',name='inpx')
    #x = Embedding(output_dim=embedding_dim, input_dim=vocab_size, input_length=dimx)(inpx)
    x = word2vec_embedding_layer(embedding_matrix)(inpx)  
    inpy = Input(shape=(dimy,),dtype='int32',name='inpy')
    #y = Embedding(output_dim=embedding_dim, input_dim=vocab_size, input_length=dimy)(inpy)
    y = word2vec_embedding_layer(embedding_matrix)(inpy)
    ques = Convolution1D(nb_filter=nb_filter, filter_length=filter_length,
                         border_mode='valid', activation='relu',
                         subsample_length=1)(x)

    ans = Convolution1D(nb_filter=nb_filter, filter_length=filter_length,
                        border_mode='valid', activation='relu',
                        subsample_length=1)(y)

    #hx = Lambda(max_1d, output_shape=(nb_filter,))(ques)
    #hy = Lambda(max_1d, output_shape=(nb_filter,))(ans)
    hx = GlobalMaxPooling1D()(ques)
    hy = GlobalMaxPooling1D()(ans)
    #wordVec_model = []
    #h =  Merge(mode="concat",name='h')([hx,hy])

    h1 = Multiply()([hx,hy])
    h2 = Abs()([hx,hy])

    h =  Merge(mode="concat",name='h')([h1,h2])
    #h = NeuralTensorLayer(output_dim=1,input_dim=ntn_in)([hx,hy])
    #h = ntn_layer(ntn_in,ntn_out,activation=None)([hx,hy])
    #score = h
    wrap = Dense(dense_neuron, activation='relu',name='wrap')(h)
    #score = Dense(1,activation='sigmoid',name='score')(h)
    #wrap = Dense(dense_neuron,activation='relu',name='wrap')(h)
    score = Dense(4,activation='softmax',name='score')(wrap)

    #score=K.clip(score,1e-7,1.0-1e-7)
    #corr = CorrelationRegularization(-lamda)([hx,hy])
    #model = Model( [inpx,inpy],[score,corr])
    model = Model( [inpx,inpy],score)
    model.compile( loss='categorical_crossentropy',optimizer="adadelta",metrics=['accuracy'])    
    return model,train_head,train_body
项目:DeepLearn    作者:GauravBh1010tt    | 项目源码 | 文件源码
def buildModel(loss_type,lamda):

    inpx = Input(shape=(dimx,))
    inpy = Input(shape=(dimy,))

    hx = Dense(hdim_deep,activation='sigmoid')(inpx)
    hx = Dense(hdim_deep2, activation='sigmoid',name='hid_l1')(hx)
    hx = Dense(hdim, activation='sigmoid',name='hid_l')(hx)

    hy = Dense(hdim_deep,activation='sigmoid')(inpy)
    hy = Dense(hdim_deep2, activation='sigmoid',name='hid_r1')(hy)
    hy = Dense(hdim, activation='sigmoid',name='hid_r')(hy)

    #h = Activation("sigmoid")( Merge(mode="sum")([hx,hy]) )
    h =  Merge(mode="sum")([hx,hy]) 

    #recx = Dense(hdim_deep,activation='sigmoid')(h)
    recx = Dense(dimx)(h)
    #recy = Dense(hdim_deep,activation='sigmoid')(h)
    recy = Dense(dimy)(h)

    branchModel = Model( [inpx,inpy],[recx,recy,h])

    #inpx = Input(shape=(dimx,))
    #inpy = Input(shape=(dimy,))

    [recx1,recy1,h1] = branchModel( [inpx, ZeroPadding()(inpy)])
    [recx2,recy2,h2] = branchModel( [ZeroPadding()(inpx), inpy ])

    #you may probably add a reconstruction from combined
    [recx3,recy3,h] = branchModel([inpx, inpy])

    corr=CorrnetCost(-lamda)([h1,h2])

    if loss_type == 1:
        model = Model( [inpx,inpy],[recy1,recx2,recx3,recx1,recy2,recy3,corr])
        model.compile( loss=["mse","mse","mse","mse","mse","mse",corr_loss],optimizer="rmsprop")
    elif loss_type == 2:
        model = Model( [inpx,inpy],[recy1,recx2,recx1,recy2,corr])
        model.compile( loss=["mse","mse","mse","mse",corr_loss],optimizer="rmsprop")
    elif loss_type == 3:
        model = Model( [inpx,inpy],[recy1,recx2,recx3,recx1,recy2,recy3])
        model.compile( loss=["mse","mse","mse","mse","mse","mse"],optimizer="rmsprop")
    elif loss_type == 4:
        model = Model( [inpx,inpy],[recy1,recx2,recx1,recy2])
        model.compile( loss=["mse","mse","mse","mse"],optimizer="rmsprop")

    return model, branchModel
项目:ParseLawDocuments    作者:FanhuaandLuomu    | 项目源码 | 文件源码
def Mem_Model2(story_maxlen,query_maxlen,vocab_size):
    input_encoder_m = Sequential()
    input_encoder_m.add(Embedding(input_dim=vocab_size,
                                  output_dim=128,
                                  input_length=story_maxlen))
    input_encoder_m.add(Dropout(0.5))
    # output: (samples, story_maxlen, embedding_dim)
    # embed the question into a sequence of vectors
    question_encoder = Sequential()
    question_encoder.add(Embedding(input_dim=vocab_size,
                                   output_dim=128,
                                   input_length=query_maxlen))
    question_encoder.add(Dropout(0.5))
    # output: (samples, query_maxlen, embedding_dim)
    # compute a 'match' between input sequence elements (which are vectors)
    # and the question vector sequence
    match = Sequential()
    match.add(Merge([input_encoder_m, question_encoder],
                    mode='dot',
                    dot_axes=[2, 2]))
    match.add(Activation('softmax'))

    plot(match,to_file='model_1.png')

    # output: (samples, story_maxlen, query_maxlen)
    # embed the input into a single vector with size = story_maxlen:
    input_encoder_c = Sequential()
    # input_encoder_c.add(Embedding(input_dim=vocab_size,
    #                               output_dim=query_maxlen,
    #                               input_length=story_maxlen))
    input_encoder_c.add(Embedding(input_dim=vocab_size,
                                  output_dim=query_maxlen,
                                  input_length=story_maxlen))
    input_encoder_c.add(Dropout(0.5))
    # output: (samples, story_maxlen, query_maxlen)
    # sum the match vector with the input vector:
    response = Sequential()
    response.add(Merge([match, input_encoder_c], mode='sum'))
    # output: (samples, story_maxlen, query_maxlen)
    response.add(Permute((2, 1)))  # output: (samples, query_maxlen, story_maxlen)

    plot(response,to_file='model_2.png')

    # concatenate the match vector with the question vector,
    # and do logistic regression on top
    answer = Sequential()
    answer.add(Merge([response, question_encoder], mode='concat', concat_axis=-1))
    # the original paper uses a matrix multiplication for this reduction step.
    # we choose to use a RNN instead.
    answer.add(LSTM(64))
    # one regularization layer -- more would probably be needed.
    answer.add(Dropout(0.5))
    answer.add(Dense(50))
    # we output a probability distribution over the vocabulary
    answer.add(Activation('sigmoid'))

    return answer

# ??????? ?????k???1
项目:wavenet    作者:basveeling    | 项目源码 | 文件源码
def build_model(fragment_length, nb_filters, nb_output_bins, dilation_depth, nb_stacks, use_skip_connections,
                learn_all_outputs, _log, desired_sample_rate, use_bias, res_l2, final_l2):
    def residual_block(x):
        original_x = x
        # TODO: initalization, regularization?
        # Note: The AtrousConvolution1D with the 'causal' flag is implemented in github.com/basveeling/keras#@wavenet.
        tanh_out = CausalAtrousConvolution1D(nb_filters, 2, atrous_rate=2 ** i, border_mode='valid', causal=True,
                                             bias=use_bias,
                                             name='dilated_conv_%d_tanh_s%d' % (2 ** i, s), activation='tanh',
                                             W_regularizer=l2(res_l2))(x)
        sigm_out = CausalAtrousConvolution1D(nb_filters, 2, atrous_rate=2 ** i, border_mode='valid', causal=True,
                                             bias=use_bias,
                                             name='dilated_conv_%d_sigm_s%d' % (2 ** i, s), activation='sigmoid',
                                             W_regularizer=l2(res_l2))(x)
        x = layers.Merge(mode='mul', name='gated_activation_%d_s%d' % (i, s))([tanh_out, sigm_out])

        res_x = layers.Convolution1D(nb_filters, 1, border_mode='same', bias=use_bias,
                                     W_regularizer=l2(res_l2))(x)
        skip_x = layers.Convolution1D(nb_filters, 1, border_mode='same', bias=use_bias,
                                      W_regularizer=l2(res_l2))(x)
        res_x = layers.Merge(mode='sum')([original_x, res_x])
        return res_x, skip_x

    input = Input(shape=(fragment_length, nb_output_bins), name='input_part')
    out = input
    skip_connections = []
    out = CausalAtrousConvolution1D(nb_filters, 2, atrous_rate=1, border_mode='valid', causal=True,
                                    name='initial_causal_conv')(out)
    for s in range(nb_stacks):
        for i in range(0, dilation_depth + 1):
            out, skip_out = residual_block(out)
            skip_connections.append(skip_out)

    if use_skip_connections:
        out = layers.Merge(mode='sum')(skip_connections)
    out = layers.Activation('relu')(out)
    out = layers.Convolution1D(nb_output_bins, 1, border_mode='same',
                               W_regularizer=l2(final_l2))(out)
    out = layers.Activation('relu')(out)
    out = layers.Convolution1D(nb_output_bins, 1, border_mode='same')(out)

    if not learn_all_outputs:
        raise DeprecationWarning('Learning on just all outputs is wasteful, now learning only inside receptive field.')
        out = layers.Lambda(lambda x: x[:, -1, :], output_shape=(out._keras_shape[-1],))(
            out)  # Based on gif in deepmind blog: take last output?

    out = layers.Activation('softmax', name="output_softmax")(out)
    model = Model(input, out)

    receptive_field, receptive_field_ms = compute_receptive_field()

    _log.info('Receptive Field: %d (%dms)' % (receptive_field, int(receptive_field_ms)))
    return model
项目:tartarus    作者:sergiooramas    | 项目源码 | 文件源码
def get_model_4(params):
    embedding_weights = pickle.load(open(common.TRAINDATA_DIR+"/embedding_weights_w2v_%s.pk" % params['embeddings_suffix'],"rb"))
    graph_in = Input(shape=(params['sequence_length'], params['embedding_dim']))
    convs = []
    for fsz in params['filter_sizes']:
        conv = Convolution1D(nb_filter=params['num_filters'],
                             filter_length=fsz,
                             border_mode='valid',
                             activation='relu',
                             subsample_length=1)
        x = conv(graph_in)
        logging.debug("Filter size: %s" % fsz)
        logging.debug("Output CNN: %s" % str(conv.output_shape))

        pool = GlobalMaxPooling1D()
        x = pool(x)
        logging.debug("Output Pooling: %s" % str(pool.output_shape))
        convs.append(x)

    if len(params['filter_sizes'])>1:
        merge = Merge(mode='concat')
        out = merge(convs)
        logging.debug("Merge: %s" % str(merge.output_shape))
    else:
        out = convs[0]

    graph = Model(input=graph_in, output=out)

    # main sequential model
    model = Sequential()
    if not params['model_variation']=='CNN-static':
        model.add(Embedding(len(embedding_weights[0]), params['embedding_dim'], input_length=params['sequence_length'],
                            weights=embedding_weights))
    model.add(Dropout(params['dropout_prob'][0], input_shape=(params['sequence_length'], params['embedding_dim'])))
    model.add(graph)
    model.add(Dense(params['n_dense']))
    model.add(Dropout(params['dropout_prob'][1]))
    model.add(Activation('relu'))

    model.add(Dense(output_dim=params["n_out"], init="uniform"))
    model.add(Activation(params['final_activation']))
    logging.debug("Output CNN: %s" % str(model.output_shape))

    if params['final_activation'] == 'linear':
        model.add(Lambda(lambda x :K.l2_normalize(x, axis=1)))

    return model

# word2vec ARCH with LSTM
项目:CNN-Sentence-Classifier    作者:shagunsodhani    | 项目源码 | 文件源码
def _predefined_model(args, embedding_matrix):
    '''function to use one of the predefined models (based on the paper)'''
    (filtersize_list, number_of_filters_per_filtersize, pool_length_list,
     dropout_list, optimizer, use_embeddings, embeddings_trainable) \
        = _param_selector(args)

    if (use_embeddings):
        embedding_layer = Embedding(args.nb_words + 1,
                                    args.embedding_dim,
                                    weights=[embedding_matrix],
                                    input_length=args.max_sequence_len,
                                    trainable=embeddings_trainable)
    else:
        embedding_layer = Embedding(args.nb_words + 1,
                                    args.embedding_dim,
                                    weights=None,
                                    input_length=args.max_sequence_len,
                                    trainable=embeddings_trainable)

    print('Defining model.')

    input_node = Input(shape=(args.max_sequence_len, args.embedding_dim))
    conv_list = []
    for index, filtersize in enumerate(filtersize_list):
        nb_filter = number_of_filters_per_filtersize[index]
        pool_length = pool_length_list[index]
        conv = Conv1D(nb_filter=nb_filter, filter_length=filtersize, activation='relu')(input_node)
        pool = MaxPooling1D(pool_length=pool_length)(conv)
        flatten = Flatten()(pool)
        conv_list.append(flatten)

    if (len(filtersize_list) > 1):
        out = Merge(mode='concat')(conv_list)
    else:
        out = conv_list[0]

    graph = Model(input=input_node, output=out)

    model = Sequential()
    model.add(embedding_layer)
    model.add(Dropout(dropout_list[0], input_shape=(args.max_sequence_len, args.embedding_dim)))
    model.add(graph)
    model.add(Dense(150))
    model.add(Dropout(dropout_list[1]))
    model.add(Activation('relu'))
    model.add(Dense(args.len_labels_index, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['acc'])
    return model
项目:video-action-recognition    作者:ap916    | 项目源码 | 文件源码
def CNN():
    input_frames=10
    batch_size=10
    nb_classes = 101
    nb_epoch = 10
    img_rows, img_cols = 150,150
    img_channels = 2*input_frames
    chunk_size=8
    requiredLines = 1000
    total_predictions = 0
    correct_predictions = 0

    print 'Loading dictionary...'
    with open('../dataset/temporal_test_data.pickle','rb') as f1:
        temporal_test_data=pickle.load(f1)

    t_model = prepareTemporalModel(img_channels,img_rows,img_cols,nb_classes)
    f_model = prepareFeaturesModel(nb_classes,requiredLines)

    merged_layer = Merge([t_model, f_model], mode='ave')
    model = Sequential()
    model.add(merged_layer)
    model.add(Dense(nb_classes, W_regularizer=l2(0.01)))
    model.add(Activation('softmax'))
    model.load_weights('combined_merge_model.h5')

    print 'Compiling model...'
    gc.collect()
    sgd = SGD(lr=0.0001, decay=1e-6, momentum=0.9, nesterov=True,clipnorm=0.1)
    model.compile(loss='hinge',optimizer=sgd, metrics=['accuracy'])

    keys=temporal_test_data.keys()
    random.shuffle(keys)

    # Starting the training of the final model.
    for chunk in chunks(keys,chunk_size):

        tX_test,tY_test=t_getTrainData(chunk,nb_classes,img_rows,img_cols)
        fX_test,fY_test=f_getTrainData(chunk,nb_classes,requiredLines)
        if (tX_test is not None and fX_test is not None):
                preds = model.predict([tX_test,fX_test])
                print (preds)
                print ('-'*40)
                print (tY_test)

                total_predictions += fX_test.shape[0]
                correct_predictions += totalCorrectPred(preds,tY_test)

                comparisons=[]
                maximum=np.argmax(tY_test,axis=1)
                for i,j in enumerate(maximum):
                    comparisons.append(preds[i][j])
                with open('compare.txt','a') as f1:
                    f1.write(str(comparisons))
                    f1.write('\n\n')
    print "\nThe accuracy was found out to be: ",str(correct_predictions*100/total_predictions)
项目:video-action-recognition    作者:ap916    | 项目源码 | 文件源码
def CNN():
    input_frames=10
    batch_size=10
    nb_classes = 101
    nb_epoch = 10
    img_rows, img_cols = 150,150
    img_channels = 2*input_frames
    chunk_size=8
    requiredLines = 1000
    total_predictions = 0
    correct_predictions = 0

    print 'Loading dictionary...'
    with open('../dataset/temporal_test_data.pickle','rb') as f1:
        temporal_test_data=pickle.load(f1)

    t_model = prepareTemporalModel(img_channels,img_rows,img_cols,nb_classes)
    f_model = prepareFeaturesModel(nb_classes,requiredLines)

    merged_layer = Merge([t_model, f_model], mode='ave')
    model = Sequential()
    model.add(merged_layer)
    model.load_weights('combined_average_model.h5')
    print 'Compiling model...'
    gc.collect()
    sgd = SGD(lr=0.0001, decay=1e-6, momentum=0.9, nesterov=True,clipnorm=0.1)
    model.compile(loss='categorical_crossentropy',optimizer=sgd, metrics=['accuracy'])

    keys=temporal_test_data.keys()
    random.shuffle(keys)

    # Starting the training of the final model.
    for chunk in chunks(keys,chunk_size):

        tX_test,tY_test=t_getTrainData(chunk,nb_classes,img_rows,img_cols)
        fX_test,fY_test=f_getTrainData(chunk,nb_classes,requiredLines)
        if (tX_test is not None and fX_test is not None):
                preds = model.predict([tX_test,fX_test])
                print (preds)
                print ('-'*40)
                print (tY_test)

                total_predictions += fX_test.shape[0]
                correct_predictions += totalCorrectPred(preds,tY_test)

                comparisons=[]
                maximum=np.argmax(tY_test,axis=1)
                for i,j in enumerate(maximum):
                    comparisons.append(preds[i][j])
                with open('compare.txt','a') as f1:
                    f1.write(str(comparisons))
                    f1.write('\n\n')
    print "\nThe accuracy was found out to be: ",str(correct_predictions*100/total_predictions)
项目:snli_dual_encoder_lstm    作者:hist0613    | 项目源码 | 文件源码
def load_model():
    if not os.path.exists(TRAINED_CLASSIFIER_PATH):
        print("No pre-trained model...")
        print("Start building model...")

        print("Now loading SNLI data...")
        X_train_1, X_train_2, Y_train, X_test_1, X_test_2, Y_test, X_dev_1, X_dev_2, Y_dev = load_data()

        print("Now loading embedding matrix...")
        embedding_matrix = load_embedding_matrix()

        print("Now building dual encoder lstm model...")
        # define lstm for sentence1
        branch1 = Sequential()
        branch1.add(Embedding(output_dim=EMBEDDING_DIM,
                              input_dim=MAX_NB_WORDS,
                              input_length=MAX_SEQUENCE_LENGTH,
                              weights=[embedding_matrix],
                              mask_zero=True,
                              trainable=False))
        branch1.add(LSTM(output_dim=LSTM_DIM))

        # define lstm for sentence2
        branch2 = Sequential()
        branch2.add(Embedding(output_dim=EMBEDDING_DIM,
                              input_dim=MAX_NB_WORDS,
                              input_length=MAX_SEQUENCE_LENGTH,
                              weights=[embedding_matrix],
                              mask_zero=True,
                              trainable=False))
        branch2.add(LSTM(output_dim=LSTM_DIM))

        # define classifier model
        model = Sequential()
        # Merge layer holds a weight matrix of itself
        model.add(Merge([branch1, branch2], mode='mul'))
        model.add(Dense(3))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy',
                      optimizer=OPTIMIZER,
                      metrics=['accuracy'])

        print("Now training the model...")
        print("\tbatch_size={}, nb_epoch={}".format(BATCH_SIZE, NB_EPOCH))
        model.fit([X_train_1, X_train_2], Y_train,
                  batch_size=BATCH_SIZE, nb_epoch=NB_EPOCH,
                  validation_data=([X_test_1, X_test_2], Y_test))

        print("Now saving the model... at {}".format(TRAINED_CLASSIFIER_PATH))
        model.save(TRAINED_CLASSIFIER_PATH)

    else:
        print("Found pre-trained model...")
        model = K_load_model(TRAINED_CLASSIFIER_PATH)

    return model
项目:semeval2017-scienceie    作者:UKPLab    | 项目源码 | 文件源码
def build_cnn_char_threeModels(input_dim, output_dim,nb_filter,filter_size=3):
    left = Sequential()
    left.add(Embedding(input_dim,
             32, # character embedding size
             input_length=L,
             dropout=0.2))
    left.add(Convolution1D(nb_filter=nb_filter,
                          filter_length=filter_size,border_mode="valid",activation="relu",subsample_length=1))
    left.add(GlobalMaxPooling1D())
    left.add(Dense(100))
    left.add(Dropout(0.2))
    left.add(Activation("tanh"))

    center = Sequential()
    center.add(Embedding(input_dim,
             32, # character embedding size
             input_length=M,
             dropout=0.2))
    center.add(Convolution1D(nb_filter=nb_filter,
                          filter_length=filter_size,border_mode="valid",activation="relu",subsample_length=1))
    center.add(GlobalMaxPooling1D())
    center.add(Dense(100))
    center.add(Dropout(0.2))
    center.add(Activation("tanh"))

    right = Sequential()
    right.add(Embedding(input_dim,
             32, # character embedding size
             input_length=R,
             dropout=0.2))
    right.add(Convolution1D(nb_filter=nb_filter,
                          filter_length=filter_size,border_mode="valid",activation="relu",subsample_length=1))
    right.add(GlobalMaxPooling1D())
    right.add(Dense(100))
    right.add(Dropout(0.2))
    right.add(Activation("tanh"))

    clf = Sequential()
    clf.add(Merge([left,center,right],mode="concat"))
    clf.add(Dense(output_dim=output_dim, activation='softmax'))

    clf.compile(optimizer='adagrad',
                     loss='categorical_crossentropy',
                     metrics=['accuracy'])
    return clf