Python keras.layers.core 模块,Merge() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.layers.core.Merge()

项目:taxi    作者:xuguanggen    | 项目源码 | 文件源码
def build_mlp(n_con,n_emb,vocabs_size,n_dis,emb_size,cluster_size):
    hidden_size = 800
    con = Sequential()
    con.add(Dense(input_dim=n_con,output_dim=emb_size))

    emb_list = []
    for i in range(n_emb):
        emb = Sequential()
        emb.add(Embedding(input_dim=vocabs_size[i],output_dim=emb_size,input_length=n_dis))
        emb.add(Flatten())
        emb_list.append(emb)

    model = Sequential()
    model.add(Merge([con] + emb_list,mode='concat'))
    model.add(BatchNormalization())
    model.add(Dense(hidden_size,activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(cluster_size,activation='softmax'))
    model.add(Lambda(caluate_point, output_shape =[2]))
    return model
项目:DeepST    作者:lucktroy    | 项目源码 | 文件源码
def seqCNN_CPT(c_conf=(4, 3, 32, 32), p_conf=(4, 3, 32, 32), t_conf=(4, 3, 32, 32)):
    '''
    C - Temporal Closeness
    P - Period
    T - Trend
    conf = (nb_flow, seq_len, map_height, map_width)
    '''
    model = Sequential()
    components = []

    for conf in [c_conf, p_conf, t_conf]:
        if conf is not None:
            components.append(seqCNNBaseLayer1(conf))
            nb_flow = conf[0]
    model.add(Merge(components, mode='concat', concat_axis=1))  # concat
    model.add(Convolution2D(64, 3, 3, border_mode='same'))
    model.add(Activation('relu'))

    model.add(Convolution2D(64, 3, 3, border_mode='same'))
    model.add(Activation('relu'))

    model.add(Convolution2D(nb_flow, 3, 3, border_mode='same'))
    model.add(Activation('tanh'))
    return model
项目:deep-coref    作者:clarkkev    | 项目源码 | 文件源码
def test_merge_dot1(self):
        print('Test merge: dot')
        left = Sequential()
        left.add(Dense(input_dim=input_dim, output_dim=nb_hidden))
        left.add(Activation('relu'))

        right = Sequential()
        right.add(Dense(input_dim=input_dim, output_dim=nb_hidden))
        right.add(Activation('relu'))

        model = Sequential()
        model.add(Merge([left, right], mode='dot', dot_axes=1))

        model.add(Dense(nb_class))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
项目:deep-coref    作者:clarkkev    | 项目源码 | 文件源码
def test_merge_dot2(self):
        print('Test merge: dot')
        left = Sequential()
        left.add(Dense(input_dim=input_dim, output_dim=nb_hidden))
        left.add(Activation('relu'))

        right = Sequential()
        right.add(Dense(input_dim=input_dim, output_dim=nb_hidden))
        right.add(Activation('relu'))

        model = Sequential()
        model.add(Merge([left, right], mode='dot', dot_axes=([1], [1])))

        model.add(Dense(nb_class))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
项目:visual_turing_test-tutorial    作者:mateuszmalinowski    | 项目源码 | 文件源码
def create(self):

        assert self._config.textual_embedding_dim == 0, \
                'Embedding cannot be learnt but must be fixed'

        language_forward = Sequential()
        language_forward.add(self._config.recurrent_encoder(
            self._config.hidden_state_dim, return_sequences=False,
            input_shape=(self._config.max_input_time_steps, self._config.input_dim)))
        self.language_forward = language_forward

        language_backward = Sequential()
        language_backward.add(self._config.recurrent_encoder(
            self._config.hidden_state_dim, return_sequences=False,
            go_backwards=True,
            input_shape=(self._config.max_input_time_steps, self._config.input_dim)))
        self.language_backward = language_backward

        self.add(Merge([language_forward, language_backward]))
        self.deep_mlp()
        self.add(Dense(self._config.output_dim))
        self.add(Activation('softmax'))
项目:visual_turing_test-tutorial    作者:mateuszmalinowski    | 项目源码 | 文件源码
def create(self):
        assert self._config.merge_mode in ['max', 'ave', 'sum'], \
                'Merge mode of this model is either max, ave or sum'

        self.textual_embedding(self, mask_zero=False)
        #self.textual_embedding(self, mask_zero=True)
        self.add(MaskedConvolution1D(
            nb_filter=self._config.language_cnn_filters,
            filter_length=self._config.language_cnn_filter_length,
            border_mode='valid',
            activation=self._config.language_cnn_activation,
            subsample_length=1))
        self.temporal_pooling(self)
        #self.add(DropMask())
        self.deep_mlp()
        self.add(Dense(self._config.output_dim))
        self.add(Activation('softmax'))
项目:visual_turing_test-tutorial    作者:mateuszmalinowski    | 项目源码 | 文件源码
def create(self):
        assert self._config.merge_mode in ['max', 'ave', 'sum'], \
                'Merge mode of this model is either max, ave or sum'

        model_list = [None] * self._config.language_cnn_views
        for j in xrange(1,self._config.language_cnn_views+1):
            current_view = Sequential()
            self.textual_embedding(current_view, mask_zero=True)
            current_view.add(Convolution1D(
                nb_filter=self._config.language_cnn_filters,
                filter_length=j,
                border_mode='valid',
                activation=self._config.language_cnn_activation,
                subsample_length=1))
            self.temporal_pooling(current_view)
            model_list[j-1] = current_view

        self.add(Merge(model_list, mode='concat'))
        self.deep_mlp()
        self.add(Dense(self._config.output_dim))
        self.add(Activation('softmax'))
项目:deep-mil-for-whole-mammogram-classification    作者:wentaozhu    | 项目源码 | 文件源码
def convolution2Dgroup(n_group, nb_filter, nb_row, nb_col, **kwargs):
    def f(input):
        return Merge([
            Convolution2D(nb_filter//n_group,nb_row,nb_col)(
                splittensor(axis=1,
                            ratio_split=n_group,
                            id_split=i)(input))
            for i in range(n_group)
        ],mode='concat',concat_axis=1)

    return f
项目:deep-mil-for-whole-mammogram-classification    作者:wentaozhu    | 项目源码 | 文件源码
def convolution2Dgroup(n_group, nb_filter, nb_row, nb_col, **kwargs):
    def f(input):
        return Merge([
            Convolution2D(nb_filter//n_group,nb_row,nb_col)(
                splittensor(axis=1,
                            ratio_split=n_group,
                            id_split=i)(input))
            for i in range(n_group)
        ],mode='concat',concat_axis=1)

    return f
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_merge_dot():
    (X_train, y_train), (X_test, y_test) = _get_test_data()

    left = Sequential()
    left.add(Dense(input_dim=input_dim, output_dim=nb_hidden))
    left.add(Activation('relu'))

    right = Sequential()
    right.add(Dense(input_dim=input_dim, output_dim=nb_hidden))
    right.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([left, right], mode='dot', dot_axes=1))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    left = Sequential()
    left.add(Dense(input_dim=input_dim, output_dim=nb_hidden))
    left.add(Activation('relu'))

    right = Sequential()
    right.add(Dense(input_dim=input_dim, output_dim=nb_hidden))
    right.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([left, right], mode='dot', dot_axes=[1, 1]))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_merge_concat():
    (X_train, y_train), (X_test, y_test) = _get_test_data()

    left = Sequential(name='branch_1')
    left.add(Dense(nb_hidden, input_shape=(input_dim,), name='dense_1'))
    left.add(Activation('relu', name='relu_1'))

    right = Sequential(name='branch_2')
    right.add(Dense(nb_hidden, input_shape=(input_dim,), name='dense_2'))
    right.add(Activation('relu', name='relu_2'))

    model = Sequential(name='merged_branches')
    model.add(Merge([left, right], mode='concat', name='merge'))
    model.add(Dense(nb_class, name='final_dense'))
    model.add(Activation('softmax', name='softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_data=([X_test, X_test], y_test))
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_split=0.1)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)

    loss = model.evaluate([X_test, X_test], y_test, verbose=0)

    model.predict([X_test, X_test], verbose=0)
    model.predict_classes([X_test, X_test], verbose=0)
    model.predict_proba([X_test, X_test], verbose=0)
    model.get_config()

    fname = 'test_merge_concat_temp.h5'
    model.save_weights(fname, overwrite=True)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate([X_test, X_test], y_test, verbose=0)
    assert(loss == nloss)
项目:triplets-extraction    作者:zsctju    | 项目源码 | 文件源码
def creat_binary_tag_LSTM( sourcevocabsize,targetvocabsize, source_W,input_seq_lenth ,output_seq_lenth ,
    hidden_dim ,emd_dim,loss='categorical_crossentropy',optimizer = 'rmsprop'):
    encoder_a = Sequential()
    encoder_b = Sequential()
    encoder_c = Sequential()
    l_A_embedding = Embedding(input_dim=sourcevocabsize+1,
                        output_dim=emd_dim,
                        input_length=input_seq_lenth,
                        mask_zero=True,
                        weights=[source_W])
    encoder_a.add(l_A_embedding)
    encoder_a.add(Dropout(0.3))
    encoder_b.add(l_A_embedding)
    encoder_b.add(Dropout(0.3))
    encoder_c.add(l_A_embedding)

    Model = Sequential()

    encoder_a.add(LSTM(hidden_dim,return_sequences=True))
    encoder_b.add(LSTM(hidden_dim,return_sequences=True,go_backwards=True))
    encoder_rb = Sequential()
    encoder_rb.add(ReverseLayer2(encoder_b))
    encoder_ab=Merge(( encoder_a,encoder_rb),mode='concat')
    Model.add(encoder_ab)

    decodelayer=LSTMDecoder_tag(hidden_dim=hidden_dim, output_dim=hidden_dim
                                         , input_length=input_seq_lenth,
                                        output_length=output_seq_lenth,
                                        state_input=False,
                                         return_sequences=True)
    Model.add(decodelayer)
    Model.add(TimeDistributedDense(targetvocabsize+1))
    Model.add(Activation('softmax'))
    Model.compile(loss=loss, optimizer=optimizer)
    return Model
项目:knowledgeflow    作者:3rduncle    | 项目源码 | 文件源码
def buildDecomposition(self):
        q_embedding = self.tensors['q-embedding']
        a_embedding = self.tensors['a-embedding']
        q_match = self.tensors['q-match']
        a_match = self.tensors['a-match']
        # compute q+, q-, a+, a-
        # ?????????????BATCH_SIZE????????????
        # ??Lambda???Lambda?ouput_shape?????BATCH_SIZE??
        # ??????Lambda????????BATCH_SIZE???????
        # ????BATCH_SIZE?????????????????????
        # ????Merge??BATCH_SIZE????Lambda??
        q_channels = Merge(
            mode=lambda x: decomposite(*x),
            output_shape=(self.params['batch_size'], 2, self.q_length, self.wdim),
            name='q-channels'
        )([q_embedding, q_match])

        a_channels = Merge(
            mode=lambda x: decomposite(*x),
            output_shape=(self.params['batch_size'], 2, self.a_length, self.wdim),
            name='a-channels',
        )([a_embedding, a_match])
        print('q_channels', q_channels._keras_shape, K.ndim(q_channels))
        print('a_channels', a_channels._keras_shape, K.ndim(a_channels))
        self.tensors['q-channels'] = q_channels
        self.tensors['a-channels'] = a_channels
项目:knowledgeflow    作者:3rduncle    | 项目源码 | 文件源码
def linkFeature(self, input_name, conv_name, activation='tanh'):
        print('Am I called')
        filters = self.params.get('filters')
        nb_filter = self.params.get('nb_filter')
        convs = self.layers.get(conv_name)
        assert filters
        assert convs
        features = []
        for fsz, conv in zip(filters, convs):
            conv_output = conv(self.tensors[input_name])
            if type(activation) == type(''):
                act = Activation(
                    activation, name='%s-act-%d' % (input_name, fsz)
                )(conv_output)
            else:
                act = activation(
                    name='%s-advanced-act-%d' % (input_name, fsz)
                )(conv_output)
            maxpool = Lambda(
                lambda x: K.max(x[:,:,:,0], axis=2),
                output_shape=(nb_filter,),
                name='%s-maxpool-%d' % (input_name, fsz)
            )(act)
            features.append(maxpool)
        if len(features) > 1:
            return Merge(mode='concat', name='%s-feature' % input_name)(features)
        else:
            return features[0]
项目:knowledgeflow    作者:3rduncle    | 项目源码 | 文件源码
def doubleFeature(self, pos, neg, conv_name, activation='tanh'):
        name = '%s+%s' % (pos, neg)
        filters = self.params['filters']
        nb_filter = self.params['nb_filter']
        convs = self.layers[conv_name]
        features = []
        pos = self.tensors[pos]
        neg = self.tensors[neg]
        for fsz, conv in zip(filters, convs):
            sum = Merge(
                mode='sum',
            )([conv(pos), conv(neg)])
            if type(activation) == type(''):
                act = Activation(
                    activation, name='%s-act-%d' % ('+'.join(input_names), fsz)
                )(sum)
            else:
                act = activation(
                    name='%s-advanced-act-%d' % (name, fsz)
                )(sum)
            maxpool = Lambda(
                lambda x: K.max(x, axis=1),
                output_shape=(nb_filter,),
                name='%s-maxpool-%d' % (name, fsz)
            )(act)
            print('maxpool', maxpool._keras_shape)
            features.append(maxpool)
        if len(features) > 1:
            return Merge(
                mode='concat', 
                name='%s-feature' % name,
            )(features)
        else:
            return features[0]
项目:knowledgeflow    作者:3rduncle    | 项目源码 | 文件源码
def linkFeature(self, input_name, conv_name, activation='tanh'):
        filters = self.params.get('filters')
        nb_filter = self.params.get('nb_filter')
        convs = self.layers.get(conv_name)
        assert filters
        assert convs
        features = []
        for fsz, conv in zip(filters, convs):
            conv_output = conv(self.tensors[input_name])
            if type(activation) == type(''):
                act = Activation(
                    activation, name='%s-act-%d' % (input_name, fsz)
                )(conv_output)
            else:
                act = activation(
                    name='%s-advanced-act-%d' % (input_name, fsz)
                )(conv_output)
            maxpool = Lambda(
                lambda x: K.max(x, axis=1),
                output_shape=(nb_filter,),
                name='%s-maxpool-%d' % (input_name, fsz)
            )(act)
            features.append(maxpool)
        if len(features) > 1:
            return Merge(mode='concat', name='%s-feature' % input_name)(features)
        else:
            return features[0]
项目:knowledgeflow    作者:3rduncle    | 项目源码 | 文件源码
def buildFeatures(self, type='shared'):
        assert self.checkTensor('q+')
        assert self.checkTensor('q-')
        assert self.checkTensor('a+')
        assert self.checkTensor('a-')
        srelu = lambda name: SReLU(name=name)
        features = []
        if type == 'shared':
            q_features = Merge(
                mode='concat',
                name='q-features',
            )([
                self.linkFeature('q+', 'shared-convolution', activation=srelu),
                self.linkFeature('q-', 'shared-convolution', activation=srelu)
            ])
            a_features = Merge(
                mode='concat',
                name='a-features',
            )([
                self.linkFeature('a+', 'shared-convolution', activation=srelu),
                self.linkFeature('a-', 'shared-convolution', activation=srelu)
            ])
        else:
            raise Error('Not Supported')
        self.tensors['q-features'] = q_features
        self.tensors['a-features'] = a_features
项目:keras-recommendation    作者:sonyisme    | 项目源码 | 文件源码
def add_model(self, model, graph, parent=None):
        """
        Recursively adds `model` and its components to the pydot graph
        """
        this = self.get_name(model)
        if isinstance(model, Model):
            parent = self.add_edge(parent, this, graph)
            for child in reversed(model.layers):
                parent = self.add_model(child, graph, parent)
        elif isinstance(model, Merge):
            for child in model.models:
                self.add_model(child, graph, this)
            return self.add_edge(parent, this, graph)
        else:
            return self.add_edge(parent, this, graph)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_merge_dot():
    (X_train, y_train), (X_test, y_test) = _get_test_data()

    left = Sequential()
    left.add(Dense(input_dim=input_dim, output_dim=nb_hidden))
    left.add(Activation('relu'))

    right = Sequential()
    right.add(Dense(input_dim=input_dim, output_dim=nb_hidden))
    right.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([left, right], mode='dot', dot_axes=1))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    left = Sequential()
    left.add(Dense(input_dim=input_dim, output_dim=nb_hidden))
    left.add(Activation('relu'))

    right = Sequential()
    right.add(Dense(input_dim=input_dim, output_dim=nb_hidden))
    right.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([left, right], mode='dot', dot_axes=[1, 1]))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_merge_concat():
    (X_train, y_train), (X_test, y_test) = _get_test_data()

    left = Sequential(name='branch_1')
    left.add(Dense(nb_hidden, input_shape=(input_dim,), name='dense_1'))
    left.add(Activation('relu', name='relu_1'))

    right = Sequential(name='branch_2')
    right.add(Dense(nb_hidden, input_shape=(input_dim,), name='dense_2'))
    right.add(Activation('relu', name='relu_2'))

    model = Sequential(name='merged_branches')
    model.add(Merge([left, right], mode='concat', name='merge'))
    model.add(Dense(nb_class, name='final_dense'))
    model.add(Activation('softmax', name='softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_data=([X_test, X_test], y_test))
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_split=0.1)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)

    loss = model.evaluate([X_test, X_test], y_test, verbose=0)

    model.predict([X_test, X_test], verbose=0)
    model.predict_classes([X_test, X_test], verbose=0)
    model.predict_proba([X_test, X_test], verbose=0)
    model.get_config()

    fname = 'test_merge_concat_temp.h5'
    model.save_weights(fname, overwrite=True)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate([X_test, X_test], y_test, verbose=0)
    assert(loss == nloss)
项目:mlprojects-py    作者:srinathperera    | 项目源码 | 文件源码
def test_embeddings(df, y_train):

    df = df[['Agencia_ID','Canal_ID']]
    X = np.array(df.values.copy())
    y = y_train
    print "Before", X.shape, y.shape

    print np.max(X[:,0]), np.max(X[:,1])

    #X = np.array([[0, 0], [1, 1], [2, 2], [3, 3], [3, 4], [5,6]])
    X_list = [X[:, 0].reshape(-1, 1), X[:, 1].reshape(-1, 1)]

    #y = np.array([0, 0, 0, 1, 1, 6])

    print "After", X.shape, y.shape, X_list[0].shape, X_list[1].shape


    embed1 = Sequential()
    embed1.add(Embedding(30000, 5, input_length=1))
    embed1.add(Reshape(dims=(5,)))

    embed2 = Sequential()
    embed2.add(Embedding(12, 3, input_length=1))
    embed2.add(Reshape(dims=(3,)))


    model = Sequential()
    model.add(Merge([embed1, embed2], mode='concat'))
    model.add(Dense(32, init='uniform'))
    model.add(Activation('relu'))
    model.add(Dense(1, init='uniform'))
    model.add(Activation('sigmoid'))
    model.compile(loss='mean_absolute_error', optimizer='adam')

    model.fit(X_list, y, nb_epoch=10, batch_size=4)

#print_category_stats()
#test_embeddings()
项目:DeepST    作者:lucktroy    | 项目源码 | 文件源码
def seqCNN_CPT2(c_conf=(4, 3, 32, 32), p_conf=(4, 3, 32, 32), t_conf=(4, 3, 32, 32)):
    '''
    C - Temporal Closeness
    P - Period
    T - Trend
    conf = (nb_flow, seq_len, map_height, map_width)
    '''
    model = Sequential()
    components = []

    for conf in [c_conf, p_conf, t_conf]:
        if conf is not None:
            components.append(seqCNNBaseLayer1_2(conf))
            nb_flow = conf[0]
    # model.add(Merge(components, mode='concat', concat_axis=1))  # concat
    if len(components) > 1:
        model.add(Merge(components, mode='sum'))
    else:
        model = components[0]
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 3, 3, border_mode='same'))
    model.add(Activation('relu'))

    model.add(Convolution2D(64, 3, 3, border_mode='same'))
    model.add(Activation('relu'))

    model.add(Convolution2D(nb_flow, 3, 3, border_mode='same'))
    model.add(Activation('tanh'))
    return model
项目:DeepST    作者:lucktroy    | 项目源码 | 文件源码
def seqCNN_CPTM(c_conf=(4, 3, 32, 32), p_conf=(4, 3, 32, 32), t_conf=(4, 3, 32, 32), metadata_dim=None):
    '''
    C - Temporal Closeness
    P - Period
    T - Trend
    conf = (nb_flow, seq_len, map_height, map_width)
    metadata_dim
    '''
    model = Sequential()
    components = []
    for conf in [c_conf, p_conf, t_conf]:
        if conf is not None:
            components.append(seqCNNBaseLayer1_2(conf))
            # nb_flow = conf[0]
            nb_flow, _, map_height, map_width = conf
    # model.add(Merge(components, mode='concat', concat_axis=1))  # concat
    if len(components) > 1:
        model.add(Merge(components, mode='sum'))
    else:
        model = components[0]
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 3, 3, border_mode='same'))
    model.add(Activation('relu'))

    model.add(Convolution2D(64, 3, 3, border_mode='same'))
    model.add(Activation('relu'))

    model.add(Convolution2D(nb_flow, 3, 3, border_mode='same'))

    metadata_processor = Sequential()
    # metadata_processor.add(Dense(output_dim=nb_flow * map_height * map_width, input_dim=metadata_dim))
    metadata_processor.add(Dense(output_dim=10, input_dim=metadata_dim))
    metadata_processor.add(Activation('relu'))
    metadata_processor.add(Dense(output_dim=nb_flow * map_height * map_width))
    metadata_processor.add(Activation('relu'))
    metadata_processor.add(Reshape((nb_flow, map_height, map_width)))

    model_final=Sequential()
    model_final.add(Merge([model, metadata_processor], mode='sum'))
    model_final.add(Activation('tanh'))
    return model_final
项目:DeepST    作者:lucktroy    | 项目源码 | 文件源码
def lateFusion(metadata_dim, n_flow=2, seq_len=3, map_height=32, map_width=32):
    model=Sequential()
    mat_model=seqCNNBase(n_flow, seq_len, map_height, map_width)
    metadata_processor=Sequential()
    metadata_processor.add(Dense(output_dim=n_flow * map_height * map_width, input_dim=metadata_dim))
    metadata_processor.add(Reshape((n_flow, map_height, map_width)))
    # metadata_processor.add(Activation('relu'))

    model=Sequential()
    model.add(Merge([mat_model, metadata_processor], mode='sum'))
    model.add(Activation('tanh'))
    return model
项目:taxi    作者:xuguanggen    | 项目源码 | 文件源码
def build_lstm(n_con,n_emb,vocabs_size,n_dis,emb_size,cluster_size):
    hidden_size = 800

    con = Sequential()
    con.add(Dense(input_dim=n_con,output_dim=emb_size))

    emb_list = []
    for i in range(n_emb):
        emb = Sequential()
        emb.add(Embedding(input_dim=vocabs_size[i],output_dim=emb_size,input_length=n_dis))
        emb.add(Flatten())
        emb_list.append(emb)


    in_dimension = 2
    seq = Sequential()
    seq.add(BatchNormalization(input_shape=((MAX_LENGTH,in_dimension))))
    seq.add(Masking([0]*in_dimension,input_shape=(MAX_LENGTH,in_dimension)))
    seq.add(LSTM(emb_size,return_sequences=False,input_shape=(MAX_LENGTH,in_dimension)))

    model = Sequential()
    model.add(Merge([con]+emb_list+[seq],mode='concat'))
    model.add(BatchNormalization())
    model.add(Dense(hidden_size,activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(cluster_size,activation='softmax'))
    model.add(Lambda(caluate_point,output_shape=[2]))
    return model
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_merge_dot():
    (X_train, y_train), (X_test, y_test) = _get_test_data()

    left = Sequential()
    left.add(Dense(input_dim=input_dim, output_dim=nb_hidden))
    left.add(Activation('relu'))

    right = Sequential()
    right.add(Dense(input_dim=input_dim, output_dim=nb_hidden))
    right.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([left, right], mode='dot', dot_axes=1))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    left = Sequential()
    left.add(Dense(input_dim=input_dim, output_dim=nb_hidden))
    left.add(Activation('relu'))

    right = Sequential()
    right.add(Dense(input_dim=input_dim, output_dim=nb_hidden))
    right.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([left, right], mode='dot', dot_axes=[1, 1]))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_merge_concat():
    (X_train, y_train), (X_test, y_test) = _get_test_data()

    left = Sequential(name='branch_1')
    left.add(Dense(nb_hidden, input_shape=(input_dim,), name='dense_1'))
    left.add(Activation('relu', name='relu_1'))

    right = Sequential(name='branch_2')
    right.add(Dense(nb_hidden, input_shape=(input_dim,), name='dense_2'))
    right.add(Activation('relu', name='relu_2'))

    model = Sequential(name='merged_branches')
    model.add(Merge([left, right], mode='concat', name='merge'))
    model.add(Dense(nb_class, name='final_dense'))
    model.add(Activation('softmax', name='softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_data=([X_test, X_test], y_test))
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_split=0.1)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)

    loss = model.evaluate([X_test, X_test], y_test, verbose=0)

    model.predict([X_test, X_test], verbose=0)
    model.predict_classes([X_test, X_test], verbose=0)
    model.predict_proba([X_test, X_test], verbose=0)
    model.get_config()

    fname = 'test_merge_concat_temp.h5'
    model.save_weights(fname, overwrite=True)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate([X_test, X_test], y_test, verbose=0)
    assert(loss == nloss)
项目:deep-coref    作者:clarkkev    | 项目源码 | 文件源码
def test_merge(self):
        layer_1 = core.Layer()
        layer_2 = core.Layer()
        layer_1.set_input_shape((None,))
        layer_2.set_input_shape((None,))
        layer = core.Merge([layer_1, layer_2])
        self._runner(layer)
项目:deep-coref    作者:clarkkev    | 项目源码 | 文件源码
def test_merge_overlap(self):
        print('Test merge overlap')
        left = Sequential()
        left.add(Dense(nb_hidden, input_shape=(input_dim,)))
        left.add(Activation('relu'))

        model = Sequential()
        model.add(Merge([left, left], mode='sum'))

        model.add(Dense(nb_class))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_test, y_test))
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=2, validation_data=(X_test, y_test))
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=2, validation_split=0.1)
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=1, validation_split=0.1)
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

        model.train_on_batch(X_train[:32], y_train[:32])

        loss = model.evaluate(X_train, y_train, verbose=0)
        print('loss:', loss)
        if loss > 0.6:
            raise Exception('Score too low, learning issue.')
        preds = model.predict(X_test, verbose=0)
        classes = model.predict_classes(X_test, verbose=0)
        probas = model.predict_proba(X_test, verbose=0)
        print(model.get_config(verbose=1))

        model.save_weights('temp.h5', overwrite=True)
        model.load_weights('temp.h5')

        nloss = model.evaluate(X_train, y_train, verbose=0)
        print(nloss)
        assert(loss == nloss)
项目:RecommendationSystem    作者:TURuibo    | 项目源码 | 文件源码
def add_model(self, model, graph, parent=None):
        """
        Recursively adds `model` and its components to the pydot graph
        """
        this = self.get_name(model)
        if isinstance(model, Model):
            parent = self.add_edge(parent, this, graph)
            for child in reversed(model.layers):
                parent = self.add_model(child, graph, parent)
        elif isinstance(model, Merge):
            for child in model.models:
                self.add_model(child, graph, this)
            return self.add_edge(parent, this, graph)
        else:
            return self.add_edge(parent, this, graph)
项目:convnets-keras    作者:heuritech    | 项目源码 | 文件源码
def convolution2Dgroup(n_group, nb_filter, nb_row, nb_col, **kwargs):
    def f(input):
        return Merge([
                         Convolution2D(nb_filter // n_group, nb_row, nb_col)(
                             splittensor(axis=1,
                                         ratio_split=n_group,
                                         id_split=i)(input))
                         for i in range(n_group)
                         ], mode='concat', concat_axis=1)

    return f
项目:visual_turing_test-tutorial    作者:mateuszmalinowski    | 项目源码 | 文件源码
def create(self):
        assert self._config.merge_mode in ['max', 'ave', 'sum'], \
                'Merge mode of this model is either max, ave or sum'

        self.textual_embedding(self, mask_zero=False)
        self.stacked_RNN(self)
        self.add(self._config.recurrent_encoder(
            self._config.hidden_state_dim, 
            return_sequences=True,
            go_backwards=self._config.go_backwards))
        self.add(Dropout(0.5))
        self.add(TimeDistributedDense(self._config.output_dim))
        self.temporal_pooling(self)
        self.add(Activation('softmax'))
项目:visual_turing_test-tutorial    作者:mateuszmalinowski    | 项目源码 | 文件源码
def create(self):
        assert self._config.merge_mode in ['max', 'ave', 'sum'], \
                'Merge mode of this model is either max, ave or sum'

        unigram = Sequential() 
        self.textual_embedding(unigram, mask_zero=True)
        unigram.add(Convolution1D(
            nb_filter=self._config.language_cnn_filters,
            filter_length=1,
            border_mode='valid',
            activation=self._config.language_cnn_activation,
            subsample_length=1))
        self.temporal_pooling(unigram)

        bigram = Sequential()
        self.textual_embedding(bigram, mask_zero=True)
        bigram.add(Convolution1D(
            nb_filter=self._config.language_cnn_filters,
            filter_length=2,
            border_mode='valid',
            activation=self._config.language_cnn_activation,
            subsample_length=1))
        self.temporal_pooling(bigram)

        trigram = Sequential()
        self.textual_embedding(trigram, mask_zero=True)
        trigram.add(Convolution1D(
            nb_filter=self._config.language_cnn_filters,
            filter_length=3,
            border_mode='valid',
            activation=self._config.language_cnn_activation,
            subsample_length=1))
        self.temporal_pooling(trigram)

        self.add(Merge([unigram, bigram, trigram], mode='concat'))
        self.deep_mlp()
        self.add(Dense(self._config.output_dim))
        self.add(Activation('softmax'))
项目:visual_turing_test-tutorial    作者:mateuszmalinowski    | 项目源码 | 文件源码
def create(self):
        language_model = Sequential()
        self.textual_embedding(language_model, mask_zero=True)
        self.stacked_RNN(language_model)
        language_model.add(self._config.recurrent_encoder(
            self._config.hidden_state_dim, 
            return_sequences=False,
            go_backwards=self._config.go_backwards))
        self.language_model = language_model


        visual_model_factory = \
                select_sequential_visual_model[self._config.trainable_perception_name](
                    self._config.visual_dim)
        visual_model = visual_model_factory.create()
        visual_dimensionality = visual_model_factory.get_dimensionality()
        self.visual_embedding(visual_model, visual_dimensionality)
        self.visual_model = visual_model

        if self._config.multimodal_merge_mode == 'dot':
            self.add(Merge([language_model, visual_model], mode='dot', dot_axes=[(1,),(1,)]))
        else:
            self.add(Merge([language_model, visual_model], mode=self._config.multimodal_merge_mode))

        self.deep_mlp()
        self.add(Dense(self._config.output_dim))
        self.add(Activation('softmax'))
项目:visual_turing_test-tutorial    作者:mateuszmalinowski    | 项目源码 | 文件源码
def create(self):
        language_model = Sequential()
        self.textual_embedding(language_model, mask_zero=True)
        self.language_model = language_model

        visual_model_factory = \
                select_sequential_visual_model[self._config.trainable_perception_name](
                    self._config.visual_dim)
        visual_model = visual_model_factory.create()
        visual_dimensionality = visual_model_factory.get_dimensionality()
        self.visual_embedding(visual_model, visual_dimensionality)
        #visual_model = Sequential()
        #self.visual_embedding(visual_model)
        # the below should contain all zeros
        zero_model = Sequential()
        zero_model.add(RepeatVector(self._config.max_input_time_steps)-1)
        visual_model.add(Merge[visual_model, zero_model], mode='concat')
        self.visual_model = visual_model

        if self._config.multimodal_merge_mode == 'dot':
            self.add(Merge([language_model, visual_model], mode='dot', dot_axes=[(1,),(1,)]))
        else:
            self.add(Merge([language_model, visual_model], mode=self._config.multimodal_merge_mode))

        self.add(self._config.recurrent_encoder(
            self._config.hidden_state_dim, 
            return_sequences=False,
            go_backwards=self._config.go_backwards))
        self.deep_mlp()
        self.add(Dense(self._config.output_dim))
        self.add(Activation('softmax'))
项目:visual_turing_test-tutorial    作者:mateuszmalinowski    | 项目源码 | 文件源码
def create(self):
        language_model = Sequential()
        self.textual_embedding(language_model, mask_zero=True)
        self.language_model = language_model


        visual_model_factory = \
                select_sequential_visual_model[self._config.trainable_perception_name](
                    self._config.visual_dim)
        visual_model = visual_model_factory.create()
        visual_dimensionality = visual_model_factory.get_dimensionality()
        self.visual_embedding(visual_model, visual_dimensionality)
        #visual_model = Sequential()
        #self.visual_embedding(visual_model)
        self.visual_model = visual_model
        visual_model.add(RepeatVector(self._config.max_input_time_steps))

        if self._config.multimodal_merge_mode == 'dot':
            self.add(Merge([language_model, visual_model], mode='dot', dot_axes=[(1,),(1,)]))
        else:
            self.add(Merge([language_model, visual_model], mode=self._config.multimodal_merge_mode))

        self.add(self._config.recurrent_encoder(
            self._config.hidden_state_dim, 
            return_sequences=False,
            go_backwards=self._config.go_backwards))
        self.deep_mlp()
        self.add(Dense(self._config.output_dim))
        self.add(Activation('softmax'))
项目:visual_turing_test-tutorial    作者:mateuszmalinowski    | 项目源码 | 文件源码
def create(self):
        language_model = Sequential()
        self.textual_embedding(language_model, mask_zero=True)
        self.stacked_RNN(language_model)
        language_model.add(self._config.recurrent_encoder(
            self._config.hidden_state_dim, 
            return_sequences=False,
            go_backwards=self._config.go_backwards))
        self.language_model = language_model

        visual_model_factory = \
                select_sequential_visual_model[self._config.trainable_perception_name](
                    self._config.visual_dim)
        visual_model = visual_model_factory.create()
        visual_dimensionality = visual_model_factory.get_dimensionality()
        self.visual_embedding(visual_model, visual_dimensionality)
        #visual_model = Sequential()
        #self.visual_embedding(visual_model)
        self.visual_model = visual_model

        if self._config.multimodal_merge_mode == 'dot':
            self.add(Merge([language_model, visual_model], mode='dot', dot_axes=[(1,),(1,)]))
        else:
            self.add(Merge([language_model, visual_model], mode=self._config.multimodal_merge_mode))

        self.add(Dropout(0.5))
        self.add(Dense(self._config.output_dim))

        self.add(RepeatVector(self._config.max_output_time_steps))
        self.add(self._config.recurrent_decoder(
                self._config.hidden_state_dim, return_sequences=True))
        self.add(Dropout(0.5))
        self.add(TimeDistributedDense(self._config.output_dim))
        self.add(Activation('softmax'))


###
# Graph-based models
###
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_merge_sum():
    (X_train, y_train), (X_test, y_test) = _get_test_data()
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([left, right], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_data=([X_test, X_test], y_test))
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_split=0.1)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)

    loss = model.evaluate([X_test, X_test], y_test, verbose=0)

    model.predict([X_test, X_test], verbose=0)
    model.predict_classes([X_test, X_test], verbose=0)
    model.predict_proba([X_test, X_test], verbose=0)

    # test weight saving
    fname = 'test_merge_sum_temp.h5'
    model.save_weights(fname, overwrite=True)
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))
    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))
    model = Sequential()
    model.add(Merge([left, right], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.load_weights(fname)
    os.remove(fname)
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    nloss = model.evaluate([X_test, X_test], y_test, verbose=0)
    assert(loss == nloss)

    # test serialization
    config = model.get_config()
    Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_merge_recursivity():
    (X_train, y_train), (X_test, y_test) = _get_test_data()
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))

    righter = Sequential()
    righter.add(Dense(nb_hidden, input_shape=(input_dim,)))
    righter.add(Activation('relu'))

    intermediate = Sequential()
    intermediate.add(Merge([left, right], mode='sum'))
    intermediate.add(Dense(nb_hidden))
    intermediate.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([intermediate, righter], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_data=([X_test, X_test, X_test], y_test))
    model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_split=0.1)
    model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)

    loss = model.evaluate([X_test, X_test, X_test], y_test, verbose=0)

    model.predict([X_test, X_test, X_test], verbose=0)
    model.predict_classes([X_test, X_test, X_test], verbose=0)
    model.predict_proba([X_test, X_test, X_test], verbose=0)

    fname = 'test_merge_recursivity_temp.h5'
    model.save_weights(fname, overwrite=True)
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate([X_test, X_test, X_test], y_test, verbose=0)
    assert(loss == nloss)

    # test serialization
    config = model.get_config()
    Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_merge_overlap():
    (X_train, y_train), (X_test, y_test) = _get_test_data()
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([left, left], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=2, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

    model.train_on_batch(X_train[:32], y_train[:32])

    loss = model.evaluate(X_test, y_test, verbose=0)
    model.predict(X_test, verbose=0)
    model.predict_classes(X_test, verbose=0)
    model.predict_proba(X_test, verbose=0)

    fname = 'test_merge_overlap_temp.h5'
    print(model.layers)
    model.save_weights(fname, overwrite=True)
    print(model.trainable_weights)

    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(X_test, y_test, verbose=0)
    assert(loss == nloss)

    # test serialization
    config = model.get_config()
    Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
项目:RNNIPTag    作者:ml-slac    | 项目源码 | 文件源码
def buildModel_SimpleDense(dataset, useAdam=True):

    print "Building Model Dense IP3D..."

    #################
    # Configuration #
    #################

    X_train = dataset['X_train']
    y_train = dataset['y_train']
    labels_train = dataset['labels_train']
    labels_test  = dataset['labels_test']
    sample_weight = dataset['weights_train']

    # split by "continuous variable" and "categorization variable"

    X_train_vec =   [X_train[:, 0:ntrk_cut, 0:2], X_train[:, 0:ntrk_cut,-1]]

    left = Sequential()
    left.add( Activation('linear', input_shape=(ntrk_cut, 2) ) )

    right = Sequential()
    right.add(Embedding(max_embed_features, embed_size, mask_zero=False, input_length=ntrk_cut))

    model = Sequential()
    model.add( Merge([left, right],mode='concat') )
    model.add(Flatten())

    model.add(Dense(128) )
    model.add(Activation('relu'))
    model.add(Dropout(0.2))

    model.add(Dense(64))
    model.add(Activation('relu'))
    model.add(Dropout(0.2))

    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    # try using different optimizers and different optimizer configs
    print "Compiling ..."
    if useAdam:
        model.compile(loss='binary_crossentropy', optimizer='adam', metrics=["accuracy"])
    else:
        model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=["accuracy"]) ## sgd
    print "Finish Compilation"

    print("Train...")
    history = model.fit( X_train_vec , y_train, batch_size=batch_size, nb_epoch=nb_epoch, validation_split=0.2, shuffle = True, sample_weight= sample_weight)
    print "Finish Training"

    return (model, history)
项目:knowledgeflow    作者:3rduncle    | 项目源码 | 文件源码
def buildComposition(self, shared=True):
        q_input = self.tensors['q_input']
        a_input = self.tensors['a_input']
        if shared:
            q_embedding = self.layers['shared-embedding'](q_input)
            a_embedding = self.layers['shared-embedding'](a_input)
        else:
            q_embedding = self.layers['q-embedding'](q_input)
            a_embedding = self.layers['a-embedding'](a_input)

        print('Embedding ndim q %d a %d' % (K.ndim(q_embedding), K.ndim(a_embedding)))
        print('Embedding shape ', q_embedding._keras_shape, a_embedding._keras_shape)

        # compute Semantic Matching
        cross = Merge(
        #   [q_embedding, a_embedding],
            mode=semantic_matrix,
            output_shape=(self.q_length, self.a_length),
            name='semantic'
        )
        semantic = cross([q_embedding, a_embedding])
        print('Semantic ndim %d' % K.ndim(semantic))
        print('Semantic shape ', semantic._keras_shape)
        print('Semantic shape ', cross.get_output_shape_at(0))

        # compute cross 
        q_match = merge(
            [a_embedding, semantic],
            mode=lambda x: match_matrix(*x,axis=0, w=self.params['window']),
            output_shape=(self.q_length, self.wdim),
            name='q_match'
        )
        print('q_match ', q_match._keras_shape, K.ndim(q_match))

        a_match = merge(
            [q_embedding, semantic],
            mode=lambda x: match_matrix(*x,axis=1, w=self.params['window']),
            output_shape=(self.a_length, self.wdim),
            name='a_match'
        )
        print('Match ndim q %d a %d' % (K.ndim(q_match), K.ndim(a_match)))
        print('Match shape ', q_match._keras_shape, a_match._keras_shape)
        self.tensors['q-embedding'] = q_embedding
        self.tensors['a-embedding'] = a_embedding
        self.tensors['q-match'] = q_match
        self.tensors['a-match'] = a_match
项目:knowledgeflow    作者:3rduncle    | 项目源码 | 文件源码
def buildDecomposition(self):
        q_embedding = self.tensors['q-embedding']
        a_embedding = self.tensors['a-embedding']
        q_match = self.tensors['q-match']
        a_match = self.tensors['a-match']
        # compute q+, q-, a+, a-
        # ?????????????BATCH_SIZE????????????
        # ??Lambda???Lambda?ouput_shape?????BATCH_SIZE??
        # ??????Lambda????????BATCH_SIZE???????
        # ????BATCH_SIZE?????????????????????
        # ????Merge??BATCH_SIZE????Lambda??
        q_pos = Merge(
            mode=lambda x: parallel(*x),
            output_shape=(self.params['batch_size'], self.q_length, self.wdim),
            name='q+'
        )([q_embedding, q_match])

        # ?????????1 - q_pos????????_keras_shape??
        # ?????output_shape?????batch_size??Merge??
        q_neg = Merge(
            mode=lambda x: x[0] - x[1],
            output_shape=(self.params['batch_size'], self.q_length, self.wdim),
            name='q-'
        )([q_embedding, q_pos])
        print('q_pos', q_pos._keras_shape, K.ndim(q_pos))
        print('q_neg', q_neg._keras_shape, K.ndim(q_neg))

        a_pos = Merge(
            mode=lambda x: parallel(*x),
            output_shape=(self.params['batch_size'], self.a_length, self.wdim),
            name='a+',
        )([a_embedding, a_match])
        a_neg = Merge(
            mode=lambda x: x[0] - x[1],
            output_shape=(self.params['batch_size'], self.a_length, self.wdim),
            name='a-'
        )([a_embedding, a_pos])
        print('a_pos', a_pos._keras_shape, K.ndim(a_pos))
        print('a_neg', a_neg._keras_shape, K.ndim(a_neg))
        self.tensors['q+'] = q_pos
        self.tensors['q-'] = q_neg
        self.tensors['a+'] = a_pos
        self.tensors['a-'] = a_neg
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_merge_sum():
    (X_train, y_train), (X_test, y_test) = _get_test_data()
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([left, right], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_data=([X_test, X_test], y_test))
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_split=0.1)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)

    loss = model.evaluate([X_test, X_test], y_test, verbose=0)

    model.predict([X_test, X_test], verbose=0)
    model.predict_classes([X_test, X_test], verbose=0)
    model.predict_proba([X_test, X_test], verbose=0)

    # test weight saving
    fname = 'test_merge_sum_temp.h5'
    model.save_weights(fname, overwrite=True)
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))
    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))
    model = Sequential()
    model.add(Merge([left, right], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.load_weights(fname)
    os.remove(fname)
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    nloss = model.evaluate([X_test, X_test], y_test, verbose=0)
    assert(loss == nloss)

    # test serialization
    config = model.get_config()
    Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_merge_recursivity():
    (X_train, y_train), (X_test, y_test) = _get_test_data()
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))

    righter = Sequential()
    righter.add(Dense(nb_hidden, input_shape=(input_dim,)))
    righter.add(Activation('relu'))

    intermediate = Sequential()
    intermediate.add(Merge([left, right], mode='sum'))
    intermediate.add(Dense(nb_hidden))
    intermediate.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([intermediate, righter], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_data=([X_test, X_test, X_test], y_test))
    model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_split=0.1)
    model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)

    loss = model.evaluate([X_test, X_test, X_test], y_test, verbose=0)

    model.predict([X_test, X_test, X_test], verbose=0)
    model.predict_classes([X_test, X_test, X_test], verbose=0)
    model.predict_proba([X_test, X_test, X_test], verbose=0)

    fname = 'test_merge_recursivity_temp.h5'
    model.save_weights(fname, overwrite=True)
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate([X_test, X_test, X_test], y_test, verbose=0)
    assert(loss == nloss)

    # test serialization
    config = model.get_config()
    Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_merge_overlap():
    (X_train, y_train), (X_test, y_test) = _get_test_data()
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([left, left], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=2, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

    model.train_on_batch(X_train[:32], y_train[:32])

    loss = model.evaluate(X_test, y_test, verbose=0)
    model.predict(X_test, verbose=0)
    model.predict_classes(X_test, verbose=0)
    model.predict_proba(X_test, verbose=0)

    fname = 'test_merge_overlap_temp.h5'
    print(model.layers)
    model.save_weights(fname, overwrite=True)
    print(model.trainable_weights)

    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(X_test, y_test, verbose=0)
    assert(loss == nloss)

    # test serialization
    config = model.get_config()
    Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
项目:mlprojects-py    作者:srinathperera    | 项目源码 | 文件源码
def __build_keras_model(self):
        models = []

        model_store = Sequential()
        model_store.add(Embedding(1115, 10, input_length=1))
        model_store.add(Reshape(target_shape=(10,)))
        models.append(model_store)

        model_dow = Sequential()
        model_dow.add(Embedding(7, 6, input_length=1))
        model_dow.add(Reshape(target_shape=(6,)))
        models.append(model_dow)

        model_promo = Sequential()
        model_promo.add(Dense(1, input_dim=1))
        models.append(model_promo)

        model_year = Sequential()
        model_year.add(Embedding(3, 2, input_length=1))
        model_year.add(Reshape(target_shape=(2,)))
        models.append(model_year)

        model_month = Sequential()
        model_month.add(Embedding(12, 6, input_length=1))
        model_month.add(Reshape(target_shape=(6,)))
        models.append(model_month)

        model_day = Sequential()
        model_day.add(Embedding(31, 10, input_length=1))
        model_day.add(Reshape(target_shape=(10,)))
        models.append(model_day)

        model_germanstate = Sequential()
        model_germanstate.add(Embedding(12, 6, input_length=1))
        model_germanstate.add(Reshape(target_shape=(6,)))
        models.append(model_germanstate)

        self.model = Sequential()
        self.model.add(Merge(models, mode='concat'))
        self.model.add(Dense(1000, init='uniform'))
        self.model.add(Activation('relu'))
        self.model.add(Dense(500, init='uniform'))
        self.model.add(Activation('relu'))
        self.model.add(Dense(1))
        self.model.add(Activation('sigmoid'))

        self.model.compile(loss='mean_absolute_error', optimizer='adam')
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_merge_sum():
    (X_train, y_train), (X_test, y_test) = _get_test_data()
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([left, right], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_data=([X_test, X_test], y_test))
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_split=0.1)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)

    loss = model.evaluate([X_test, X_test], y_test, verbose=0)

    model.predict([X_test, X_test], verbose=0)
    model.predict_classes([X_test, X_test], verbose=0)
    model.predict_proba([X_test, X_test], verbose=0)

    # test weight saving
    fname = 'test_merge_sum_temp.h5'
    model.save_weights(fname, overwrite=True)
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))
    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))
    model = Sequential()
    model.add(Merge([left, right], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.load_weights(fname)
    os.remove(fname)
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    nloss = model.evaluate([X_test, X_test], y_test, verbose=0)
    assert(loss == nloss)

    # test serialization
    config = model.get_config()
    Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_merge_recursivity():
    (X_train, y_train), (X_test, y_test) = _get_test_data()
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))

    righter = Sequential()
    righter.add(Dense(nb_hidden, input_shape=(input_dim,)))
    righter.add(Activation('relu'))

    intermediate = Sequential()
    intermediate.add(Merge([left, right], mode='sum'))
    intermediate.add(Dense(nb_hidden))
    intermediate.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([intermediate, righter], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_data=([X_test, X_test, X_test], y_test))
    model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_split=0.1)
    model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)

    loss = model.evaluate([X_test, X_test, X_test], y_test, verbose=0)

    model.predict([X_test, X_test, X_test], verbose=0)
    model.predict_classes([X_test, X_test, X_test], verbose=0)
    model.predict_proba([X_test, X_test, X_test], verbose=0)

    fname = 'test_merge_recursivity_temp.h5'
    model.save_weights(fname, overwrite=True)
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate([X_test, X_test, X_test], y_test, verbose=0)
    assert(loss == nloss)

    # test serialization
    config = model.get_config()
    Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_merge_overlap():
    (X_train, y_train), (X_test, y_test) = _get_test_data()
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([left, left], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=2, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

    model.train_on_batch(X_train[:32], y_train[:32])

    loss = model.evaluate(X_test, y_test, verbose=0)
    model.predict(X_test, verbose=0)
    model.predict_classes(X_test, verbose=0)
    model.predict_proba(X_test, verbose=0)

    fname = 'test_merge_overlap_temp.h5'
    print(model.layers)
    model.save_weights(fname, overwrite=True)
    print(model.trainable_weights)

    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(X_test, y_test, verbose=0)
    assert(loss == nloss)

    # test serialization
    config = model.get_config()
    Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)