Python keras.layers.core 模块,TimeDistributedDense() 实例源码

我们从Python开源项目中,提取了以下24个代码示例,用于说明如何使用keras.layers.core.TimeDistributedDense()

项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_timedistributeddense():
    from keras import regularizers
    from keras import constraints

    layer_test(core.TimeDistributedDense,
               kwargs={'output_dim': 2, 'input_length': 2},
               input_shape=(3, 2, 3))

    layer_test(core.TimeDistributedDense,
               kwargs={'output_dim': 3,
                       'W_regularizer': regularizers.l2(0.01),
                       'b_regularizer': regularizers.l1(0.01),
                       'activity_regularizer': regularizers.activity_l2(0.01),
                       'W_constraint': constraints.MaxNorm(1),
                       'b_constraint': constraints.MaxNorm(1)},
               input_shape=(3, 2, 3))
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_timedistributeddense():
    from keras import regularizers
    from keras import constraints

    layer_test(core.TimeDistributedDense,
               kwargs={'output_dim': 2, 'input_length': 2},
               input_shape=(3, 2, 3))

    layer_test(core.TimeDistributedDense,
               kwargs={'output_dim': 3,
                       'W_regularizer': regularizers.l2(0.01),
                       'b_regularizer': regularizers.l1(0.01),
                       'activity_regularizer': regularizers.activity_l2(0.01),
                       'W_constraint': constraints.MaxNorm(1),
                       'b_constraint': constraints.MaxNorm(1)},
               input_shape=(3, 2, 3))
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_timedistributeddense():
    from keras import regularizers
    from keras import constraints

    layer_test(core.TimeDistributedDense,
               kwargs={'output_dim': 2, 'input_length': 2},
               input_shape=(3, 2, 3))

    layer_test(core.TimeDistributedDense,
               kwargs={'output_dim': 3,
                       'W_regularizer': regularizers.l2(0.01),
                       'b_regularizer': regularizers.l1(0.01),
                       'activity_regularizer': regularizers.activity_l2(0.01),
                       'W_constraint': constraints.MaxNorm(1),
                       'b_constraint': constraints.MaxNorm(1)},
               input_shape=(3, 2, 3))
项目:seq2graph    作者:masterkeywikz    | 项目源码 | 文件源码
def __init__(self, output_dim, hidden_dim, output_length, depth=1, dropout=0.25, **kwargs):
        super(SimpleSeq2seq, self).__init__()
        if type(depth) not in [list, tuple]:
            depth = (depth, depth)
        self.encoder = LSTM(hidden_dim, **kwargs)
        self.decoder = LSTM(hidden_dim, return_sequences=True, **kwargs)
        for i in range(1, depth[0]):
            self.add(LSTM(hidden_dim, return_sequences=True, **kwargs))
            self.add(Dropout(dropout))
        self.add(self.encoder)
        self.add(Dropout(dropout))
        self.add(RepeatVector(output_length))
        self.add(self.decoder)
        for i in range(1, depth[1]):
            self.add(LSTM(hidden_dim, return_sequences=True, **kwargs))
            self.add(Dropout(dropout))
        #if depth[1] > 1:
        self.add(TimeDistributedDense(output_dim, activation='softmax'))
项目:visual_turing_test-tutorial    作者:mateuszmalinowski    | 项目源码 | 文件源码
def create(self):
        self.textual_embedding(self, mask_zero=True)
        self.stacked_RNN(self)
        self.add(self._config.recurrent_encoder(
            self._config.hidden_state_dim, 
            return_sequences=False,
            go_backwards=self._config.go_backwards))
        self.add(Dropout(0.5))
        self.add(RepeatVector(self._config.max_output_time_steps))
        self.add(self._config.recurrent_decoder(
                self._config.hidden_state_dim, return_sequences=True))
        self.add(Dropout(0.5))
        self.add(TimeDistributedDense(self._config.output_dim))
        self.add(Activation('softmax'))


###
# Multimodal models
###
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_masking():
    np.random.seed(1337)
    X = np.array([[[1], [1]],
                  [[0], [0]]])
    model = Sequential()
    model.add(Masking(mask_value=0, input_shape=(2, 1)))
    model.add(TimeDistributedDense(1, init='one'))
    model.compile(loss='mse', optimizer='sgd')
    y = np.array([[[1], [1]],
                  [[1], [1]]])
    loss = model.train_on_batch(X, y)
    assert loss == 0
项目:triplets-extraction    作者:zsctju    | 项目源码 | 文件源码
def creat_binary_tag_LSTM( sourcevocabsize,targetvocabsize, source_W,input_seq_lenth ,output_seq_lenth ,
    hidden_dim ,emd_dim,loss='categorical_crossentropy',optimizer = 'rmsprop'):
    encoder_a = Sequential()
    encoder_b = Sequential()
    encoder_c = Sequential()
    l_A_embedding = Embedding(input_dim=sourcevocabsize+1,
                        output_dim=emd_dim,
                        input_length=input_seq_lenth,
                        mask_zero=True,
                        weights=[source_W])
    encoder_a.add(l_A_embedding)
    encoder_a.add(Dropout(0.3))
    encoder_b.add(l_A_embedding)
    encoder_b.add(Dropout(0.3))
    encoder_c.add(l_A_embedding)

    Model = Sequential()

    encoder_a.add(LSTM(hidden_dim,return_sequences=True))
    encoder_b.add(LSTM(hidden_dim,return_sequences=True,go_backwards=True))
    encoder_rb = Sequential()
    encoder_rb.add(ReverseLayer2(encoder_b))
    encoder_ab=Merge(( encoder_a,encoder_rb),mode='concat')
    Model.add(encoder_ab)

    decodelayer=LSTMDecoder_tag(hidden_dim=hidden_dim, output_dim=hidden_dim
                                         , input_length=input_seq_lenth,
                                        output_length=output_seq_lenth,
                                        state_input=False,
                                         return_sequences=True)
    Model.add(decodelayer)
    Model.add(TimeDistributedDense(targetvocabsize+1))
    Model.add(Activation('softmax'))
    Model.compile(loss=loss, optimizer=optimizer)
    return Model
项目:sciDT    作者:edvisees    | 项目源码 | 文件源码
def fit_model(self, X, Y, use_attention, att_context, bidirectional):
    print >>sys.stderr, "Input shape:", X.shape, Y.shape
    early_stopping = EarlyStopping(patience = 2)
    num_classes = len(self.label_ind)
    if bidirectional:
      tagger = Graph()
      tagger.add_input(name='input', input_shape=X.shape[1:])
      if use_attention:
        tagger.add_node(TensorAttention(X.shape[1:], context=att_context), name='attention', input='input')
        lstm_input_node = 'attention'
      else:
        lstm_input_node = 'input'
      tagger.add_node(LSTM(X.shape[-1]/2, return_sequences=True), name='forward', input=lstm_input_node)
      tagger.add_node(LSTM(X.shape[-1]/2, return_sequences=True, go_backwards=True), name='backward', input=lstm_input_node)
      tagger.add_node(TimeDistributedDense(num_classes, activation='softmax'), name='softmax', inputs=['forward', 'backward'], merge_mode='concat', concat_axis=-1)
      tagger.add_output(name='output', input='softmax')
      tagger.summary()
      tagger.compile('adam', {'output':'categorical_crossentropy'})
      tagger.fit({'input':X, 'output':Y}, validation_split=0.1, callbacks=[early_stopping], show_accuracy=True, nb_epoch=100, batch_size=10)
    else:
      tagger = Sequential()
      word_proj_dim = 50
      if use_attention:
        _, input_len, timesteps, input_dim = X.shape
        tagger.add(HigherOrderTimeDistributedDense(input_dim=input_dim, output_dim=word_proj_dim))
        att_input_shape = (input_len, timesteps, word_proj_dim)
        print >>sys.stderr, "Attention input shape:", att_input_shape
        tagger.add(Dropout(0.5))
        tagger.add(TensorAttention(att_input_shape, context=att_context))
      else:
        _, input_len, input_dim = X.shape
        tagger.add(TimeDistributedDense(input_dim=input_dim, input_length=input_len, output_dim=word_proj_dim))
      tagger.add(LSTM(input_dim=word_proj_dim, output_dim=word_proj_dim, input_length=input_len, return_sequences=True))
      tagger.add(TimeDistributedDense(num_classes, activation='softmax'))
      tagger.summary()
      tagger.compile(loss='categorical_crossentropy', optimizer='adam')
      tagger.fit(X, Y, validation_split=0.1, callbacks=[early_stopping], show_accuracy=True, batch_size=10)

    return tagger
项目:keras-recommendation    作者:sonyisme    | 项目源码 | 文件源码
def test_seq_to_seq(self):
        print('sequence to sequence data:')
        (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(5, 10), output_shape=(5, 10),
            classification=False)
        print('X_train:', X_train.shape)
        print('X_test:', X_test.shape)
        print('y_train:', y_train.shape)
        print('y_test:', y_test.shape)

        model = Sequential()
        model.add(TimeDistributedDense(X_train.shape[-1], y_train.shape[-1]))
        model.compile(loss='hinge', optimizer='rmsprop')
        history = model.fit(X_train, y_train, nb_epoch=12, batch_size=16, validation_data=(X_test, y_test), verbose=2)
        self.assertTrue(history.validation_loss[-1] < 0.75)
项目:keras-recommendation    作者:sonyisme    | 项目源码 | 文件源码
def build_lstm_autoencoder(autoencoder, X_train, X_test):
    X_train = X_train[:, np.newaxis, :] 
    X_test = X_test[:, np.newaxis, :]
    print("Modified X_train: ", X_train.shape)
    print("Modified X_test: ", X_test.shape)

    # The TimeDistributedDense isn't really necessary, however you need a lot of GPU memory to do 784x394-394x784
    autoencoder.add(TimeDistributedDense(input_dim, 16))
    autoencoder.add(AutoEncoder(encoder=LSTM(16, 8, activation=activation, return_sequences=True),
                                decoder=LSTM(8, input_dim, activation=activation, return_sequences=True),
                                output_reconstruction=False, tie_weights=True))
    return autoencoder, X_train, X_test
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_masking():
    np.random.seed(1337)
    X = np.array([[[1], [1]],
                  [[0], [0]]])
    model = Sequential()
    model.add(Masking(mask_value=0, input_shape=(2, 1)))
    model.add(TimeDistributedDense(1, init='one'))
    model.compile(loss='mse', optimizer='sgd')
    y = np.array([[[1], [1]],
                  [[1], [1]]])
    loss = model.train_on_batch(X, y)
    assert loss == 0
项目:PhilosophyLSTM    作者:guilherme-pombo    | 项目源码 | 文件源码
def create_model(word_coding):
    """
    Create the LSTM model
    :param word_coding:
    :return:
    """
    model = Graph()
    model.add_input(name='input', input_shape=(sd_len, input_dim))
    model.add_node(TimeDistributedDense(input_dim=input_dim, output_dim=lstm_hdim, input_length=sd_len),
                   name=layerNames[0], input='input')
    model.add_node(BatchNormalization(), name=layerNames[1], input=layerNames[0])

    model.add_node(LSTM(input_dim=lstm_hdim, output_dim=lstm_hdim, return_sequences=True), name=layerNames[2] + 'left',
                   input=layerNames[1])
    model.add_node(BatchNormalization(), name=layerNames[3] + 'left', input=layerNames[2] + 'left')

    model.add_node(LSTM(input_dim=lstm_hdim, output_dim=lstm_hdim, return_sequences=True, go_backwards=True),
                   name=layerNames[2] + 'right', input=layerNames[1])
    model.add_node(BatchNormalization(), name=layerNames[3] + 'right', input=layerNames[2] + 'right')

    model.add_node(LSTM(input_dim=lstm_hdim, output_dim=lstm_hdim, return_sequences=False), name=layerNames[6] + 'left',
                   input=layerNames[3] + 'left')

    model.add_node(LSTM(input_dim=lstm_hdim, output_dim=lstm_hdim, return_sequences=False, go_backwards=True),
                   name=layerNames[6] + 'right', input=layerNames[3] + 'right')

    model.add_node(BatchNormalization(), name=layerNames[7], inputs=[layerNames[6] + 'left', layerNames[6] + 'right'])
    model.add_node(Dropout(0.2), name=layerNames[8], input=layerNames[7])

    model.add_node(Dense(input_dim=bridge_dim, output_dim=dense_dim), name=layerNames[9], input=layerNames[8])
    model.add_node(ELU(), name=layerNames[10], input=layerNames[9])
    model.add_node(Dropout(0.2), name=layerNames[11], input=layerNames[10])

    model.add_node(Dense(input_dim=dense_dim, output_dim=len(word_coding)), name=layerNames[12], input=layerNames[11])
    model.add_node(Activation('softmax'), name=layerNames[13], input=layerNames[12])
    model.add_output(name='output1', input=layerNames[13])

    model.compile(optimizer='rmsprop', loss={'output1': 'categorical_crossentropy'})

    return model
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_masking():
    np.random.seed(1337)
    X = np.array([[[1], [1]],
                  [[0], [0]]])
    model = Sequential()
    model.add(Masking(mask_value=0, input_shape=(2, 1)))
    model.add(TimeDistributedDense(1, init='one'))
    model.compile(loss='mse', optimizer='sgd')
    y = np.array([[[1], [1]],
                  [[1], [1]]])
    loss = model.train_on_batch(X, y)
    assert loss == 0
项目:deep-coref    作者:clarkkev    | 项目源码 | 文件源码
def test_loss_masking(self):
        X = np.array(
            [[[1, 1], [2, 1], [3, 1], [5, 5]],
             [[1, 5], [5, 0], [0, 0], [0, 0]]], dtype=np.int32)
        model = Sequential()
        model.add(Masking(mask_value=0, input_shape=(None, 2)))
        model.add(TimeDistributedDense(1, init='one'))
        model.compile(loss='mse', optimizer='sgd')
        y = model.predict(X)
        loss = model.fit(X, 4*y, nb_epoch=1, batch_size=2, verbose=1).history['loss'][0]
        assert loss == 285.
项目:deep-coref    作者:clarkkev    | 项目源码 | 文件源码
def test_time_dist_dense(self):
        layer = core.TimeDistributedDense(10, input_shape=(None, 10))
        self._runner(layer)
项目:deep-coref    作者:clarkkev    | 项目源码 | 文件源码
def test_seq_to_seq(self):
        print('sequence to sequence data:')
        (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(3, 5), output_shape=(3, 5),
                                                             classification=False)
        print('X_train:', X_train.shape)
        print('X_test:', X_test.shape)
        print('y_train:', y_train.shape)
        print('y_test:', y_test.shape)

        model = Sequential()
        model.add(TimeDistributedDense(y_train.shape[-1], input_shape=(None, X_train.shape[-1])))
        model.compile(loss='hinge', optimizer='rmsprop')
        history = model.fit(X_train, y_train, nb_epoch=12, batch_size=16, validation_data=(X_test, y_test), verbose=2)
        self.assertTrue(history.history['val_loss'][-1] < 0.8)
项目:deep-coref    作者:clarkkev    | 项目源码 | 文件源码
def build_lstm_autoencoder(autoencoder, X_train, X_test):
    X_train = X_train[:, np.newaxis, :]
    X_test = X_test[:, np.newaxis, :]
    print("Modified X_train: ", X_train.shape)
    print("Modified X_test: ", X_test.shape)

    # The TimeDistributedDense isn't really necessary, however you need a lot of GPU memory to do 784x394-394x784
    autoencoder.add(TimeDistributedDense(input_dim, 16))
    autoencoder.add(AutoEncoder(encoder=LSTM(16, 8, activation=activation, return_sequences=True),
                                decoder=LSTM(8, input_dim, activation=activation, return_sequences=True),
                                output_reconstruction=False))
    return autoencoder, X_train, X_test
项目:RecommendationSystem    作者:TURuibo    | 项目源码 | 文件源码
def test_seq_to_seq(self):
        print('sequence to sequence data:')
        (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(5, 10), output_shape=(5, 10),
            classification=False)
        print('X_train:', X_train.shape)
        print('X_test:', X_test.shape)
        print('y_train:', y_train.shape)
        print('y_test:', y_test.shape)

        model = Sequential()
        model.add(TimeDistributedDense(X_train.shape[-1], y_train.shape[-1]))
        model.compile(loss='hinge', optimizer='rmsprop')
        history = model.fit(X_train, y_train, nb_epoch=12, batch_size=16, validation_data=(X_test, y_test), verbose=2)
        self.assertTrue(history.validation_loss[-1] < 0.75)
项目:RecommendationSystem    作者:TURuibo    | 项目源码 | 文件源码
def build_lstm_autoencoder(autoencoder, X_train, X_test):
    X_train = X_train[:, np.newaxis, :] 
    X_test = X_test[:, np.newaxis, :]
    print("Modified X_train: ", X_train.shape)
    print("Modified X_test: ", X_test.shape)

    # The TimeDistributedDense isn't really necessary, however you need a lot of GPU memory to do 784x394-394x784
    autoencoder.add(TimeDistributedDense(input_dim, 16))
    autoencoder.add(AutoEncoder(encoder=LSTM(16, 8, activation=activation, return_sequences=True),
                                decoder=LSTM(8, input_dim, activation=activation, return_sequences=True),
                                output_reconstruction=False, tie_weights=True))
    return autoencoder, X_train, X_test
项目:music_rnn    作者:chengjunwen    | 项目源码 | 文件源码
def SimpleRNNModel(self, nHidden=120, lr = 0.01):
                self.rnnModel.add(SimpleRNN( nHidden, input_shape =( None, self.maxFeatures), activation='sigmoid', return_sequences=True))
                self.rnnModel.add(TimeDistributedDense(self.maxFeatures))
                self.rnnModel.add(Activation('softmax'))
                rmsprop = RMSprop(lr=lr, rho=0.9, epsilon=1e-06)
                self.rnnModel.compile(loss='categorical_crossentropy', optimizer=rmsprop)
项目:music_rnn    作者:chengjunwen    | 项目源码 | 文件源码
def LSTMModel(self, nHidden=150, lr = 0.01):
#               print('nHidden: %i\tlr: %.3f' % ( nHidden, lr) )
                self.rnnModel.add(GRU( nHidden, activation='sigmoid', input_shape =( None, self.maxFeatures), return_sequences=True))
#                self.rnnModel.add(LSTM( nHidden, activation='sigmoid', input_shape =( None, nHidden), return_sequences=True))
                self.rnnModel.add(TimeDistributedDense(nHidden))
                self.rnnModel.add(Activation('relu'))
                self.rnnModel.add(TimeDistributedDense(self.maxFeatures))
                self.rnnModel.add(Activation('softmax'))
                rmsprop = RMSprop(lr=lr, rho=0.9, epsilon=1e-06)
                self.rnnModel.compile(loss='categorical_crossentropy', optimizer=rmsprop)
项目:visual_turing_test-tutorial    作者:mateuszmalinowski    | 项目源码 | 文件源码
def create(self):
        assert self._config.merge_mode in ['max', 'ave', 'sum'], \
                'Merge mode of this model is either max, ave or sum'

        self.textual_embedding(self, mask_zero=False)
        self.stacked_RNN(self)
        self.add(self._config.recurrent_encoder(
            self._config.hidden_state_dim, 
            return_sequences=True,
            go_backwards=self._config.go_backwards))
        self.add(Dropout(0.5))
        self.add(TimeDistributedDense(self._config.output_dim))
        self.temporal_pooling(self)
        self.add(Activation('softmax'))
项目:visual_turing_test-tutorial    作者:mateuszmalinowski    | 项目源码 | 文件源码
def create(self):
        language_model = Sequential()
        self.textual_embedding(language_model, mask_zero=True)
        self.stacked_RNN(language_model)
        language_model.add(self._config.recurrent_encoder(
            self._config.hidden_state_dim, 
            return_sequences=False,
            go_backwards=self._config.go_backwards))
        self.language_model = language_model

        visual_model_factory = \
                select_sequential_visual_model[self._config.trainable_perception_name](
                    self._config.visual_dim)
        visual_model = visual_model_factory.create()
        visual_dimensionality = visual_model_factory.get_dimensionality()
        self.visual_embedding(visual_model, visual_dimensionality)
        #visual_model = Sequential()
        #self.visual_embedding(visual_model)
        self.visual_model = visual_model

        if self._config.multimodal_merge_mode == 'dot':
            self.add(Merge([language_model, visual_model], mode='dot', dot_axes=[(1,),(1,)]))
        else:
            self.add(Merge([language_model, visual_model], mode=self._config.multimodal_merge_mode))

        self.add(Dropout(0.5))
        self.add(Dense(self._config.output_dim))

        self.add(RepeatVector(self._config.max_output_time_steps))
        self.add(self._config.recurrent_decoder(
                self._config.hidden_state_dim, return_sequences=True))
        self.add(Dropout(0.5))
        self.add(TimeDistributedDense(self._config.output_dim))
        self.add(Activation('softmax'))


###
# Graph-based models
###
项目:stock-predict-by-RNN-LSTM    作者:blockchain99    | 项目源码 | 文件源码
def __prepare_model(self):
        print('Build model...')
        model = Sequential()
        model.add(TimeDistributedDense(output_dim=self.hidden_cnt,
                                       input_dim=self.input_dim,
                                       input_length=self.input_length,
                                       activation='sigmoid'))
#         model.add(TimeDistributed(Dense(output_dim=self.hidden_cnt,
#                                         input_dim=self.input_dim,
#                                         input_length=self.input_length,
#                                         activation='sigmoid')))
# my modification since import error from keras.layers.core import TimeDistributedMerge
#         model.add(TimeDistributedMerge(mode='ave'))   #comment by me

##################### my ref #########################################################
# # add a layer that returns the concatenation
# # of the positive part of the input and
# # the opposite of the negative part
# 
# def antirectifier(x):
#     x -= K.mean(x, axis=1, keepdims=True)
#     x = K.l2_normalize(x, axis=1)
#     pos = K.relu(x)
#     neg = K.relu(-x)
#     return K.concatenate([pos, neg], axis=1)
# 
# def antirectifier_output_shape(input_shape):
#     shape = list(input_shape)
#     assert len(shape) == 2  # only valid for 2D tensors
#     shape[-1] *= 2
#     return tuple(shape)
# 
# model.add(Lambda(antirectifier, output_shape=antirectifier_output_shape))
#############################################################################

        model.add(Lambda(function=lambda x: K.mean(x, axis=1), 
                   output_shape=lambda shape: (shape[0],) + shape[2:]))
#         model.add(Dropout(0.5))
        model.add(Dropout(0.93755))
        model.add(Dense(self.hidden_cnt, activation='tanh'))
        model.add(Dense(self.output_dim, activation='softmax'))

        # try using different optimizers and different optimizer configs
        print('Compile model...')
#         sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
#         model.compile(loss='categorical_crossentropy', optimizer=sgd)
#         return model
##my add
        adagrad = keras.optimizers.Adagrad(lr=0.01, epsilon=1e-08, decay=0.0)
        model.compile(loss='categorical_crossentropy', optimizer=adagrad)
        return model