我们从Python开源项目中,提取了以下15个代码示例,用于说明如何使用keras.layers.core.RepeatVector()。
def __init__(self, output_dim, hidden_dim, output_length, depth=1, dropout=0.25, **kwargs): super(SimpleSeq2seq, self).__init__() if type(depth) not in [list, tuple]: depth = (depth, depth) self.encoder = LSTM(hidden_dim, **kwargs) self.decoder = LSTM(hidden_dim, return_sequences=True, **kwargs) for i in range(1, depth[0]): self.add(LSTM(hidden_dim, return_sequences=True, **kwargs)) self.add(Dropout(dropout)) self.add(self.encoder) self.add(Dropout(dropout)) self.add(RepeatVector(output_length)) self.add(self.decoder) for i in range(1, depth[1]): self.add(LSTM(hidden_dim, return_sequences=True, **kwargs)) self.add(Dropout(dropout)) #if depth[1] > 1: self.add(TimeDistributedDense(output_dim, activation='softmax'))
def create(self): self.textual_embedding(self, mask_zero=True) self.stacked_RNN(self) self.add(self._config.recurrent_encoder( self._config.hidden_state_dim, return_sequences=False, go_backwards=self._config.go_backwards)) self.add(Dropout(0.5)) self.add(RepeatVector(self._config.max_output_time_steps)) self.add(self._config.recurrent_decoder( self._config.hidden_state_dim, return_sequences=True)) self.add(Dropout(0.5)) self.add(TimeDistributedDense(self._config.output_dim)) self.add(Activation('softmax')) ### # Multimodal models ###
def build_CNN_LSTM(channels, width, height, lstm_output_size, nb_classes): model = Sequential() # 1 conv model.add(Convolution2D(64, 3, 3, border_mode='same', activation='relu', input_shape=(channels, height, width))) model.add(BatchNormalization(mode=0, axis=1)) # 2 conv model.add(Convolution2D(64, 3, 3, border_mode='same', activation='relu')) model.add(BatchNormalization(mode=0, axis=1)) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2))) # 3 conv model.add(Convolution2D(128, 3, 3, border_mode='same', activation='relu')) model.add(BatchNormalization(mode=0, axis=1)) # 4 conv model.add(Convolution2D(128, 3, 3, border_mode='same', activation='relu')) model.add(BatchNormalization(mode=0, axis=1)) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2))) # flaten a = model.add(Flatten()) # 1 dense model.add(Dense(512, activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) # 2 dense model.add(Dense(512, activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) # lstm model.add(RepeatVector(lstm_output_size)) model.add(LSTM(512, return_sequences=True)) model.add(TimeDistributed(Dropout(0.5))) model.add(TimeDistributed(Dense(nb_classes, activation='softmax'))) model.summary() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=[categorical_accuracy_per_sequence], sample_weight_mode='temporal' ) return model
def _buildDecoder(self, z, latent_rep_size, max_length, charset_length): h = Dense(latent_rep_size, name='latent_input', activation = 'relu')(z) h = RepeatVector(max_length, name='repeat_vector')(h) h = GRU(501, return_sequences = True, name='gru_1')(h) h = GRU(501, return_sequences = True, name='gru_2')(h) h = GRU(501, return_sequences = True, name='gru_3')(h) return TimeDistributed(Dense(charset_length, activation='softmax'), name='decoded_mean')(h)
def test_repeat_vector(): layer_test(core.RepeatVector, kwargs={'n': 3}, input_shape=(3, 2))
def build_model(input_size, seq_len, hidden_size): """???? seq2seq ??""" model = Sequential() model.add(GRU(input_dim=input_size, output_dim=hidden_size, return_sequences=False)) model.add(Dense(hidden_size, activation="relu")) model.add(RepeatVector(seq_len)) model.add(GRU(hidden_size, return_sequences=True)) model.add(TimeDistributed(Dense(output_dim=input_size, activation="softmax"))) model.compile(loss="categorical_crossentropy", optimizer='adam') return model
def build_model(input_size, seq_len, hidden_size): """???? sequence to sequence ??""" model = Sequential() model.add(GRU(input_dim=input_size, output_dim=hidden_size, return_sequences=False)) model.add(Dense(hidden_size, activation="relu")) model.add(RepeatVector(seq_len)) model.add(GRU(hidden_size, return_sequences=True)) model.add(TimeDistributed(Dense(output_dim=input_size, activation="linear"))) model.compile(loss="mse", optimizer='adam') return model
def test_repeat_vector(self): layer = core.RepeatVector(10) self._runner(layer)
def create(self): language_model = Sequential() self.textual_embedding(language_model, mask_zero=True) self.language_model = language_model visual_model_factory = \ select_sequential_visual_model[self._config.trainable_perception_name]( self._config.visual_dim) visual_model = visual_model_factory.create() visual_dimensionality = visual_model_factory.get_dimensionality() self.visual_embedding(visual_model, visual_dimensionality) #visual_model = Sequential() #self.visual_embedding(visual_model) # the below should contain all zeros zero_model = Sequential() zero_model.add(RepeatVector(self._config.max_input_time_steps)-1) visual_model.add(Merge[visual_model, zero_model], mode='concat') self.visual_model = visual_model if self._config.multimodal_merge_mode == 'dot': self.add(Merge([language_model, visual_model], mode='dot', dot_axes=[(1,),(1,)])) else: self.add(Merge([language_model, visual_model], mode=self._config.multimodal_merge_mode)) self.add(self._config.recurrent_encoder( self._config.hidden_state_dim, return_sequences=False, go_backwards=self._config.go_backwards)) self.deep_mlp() self.add(Dense(self._config.output_dim)) self.add(Activation('softmax'))
def create(self): language_model = Sequential() self.textual_embedding(language_model, mask_zero=True) self.language_model = language_model visual_model_factory = \ select_sequential_visual_model[self._config.trainable_perception_name]( self._config.visual_dim) visual_model = visual_model_factory.create() visual_dimensionality = visual_model_factory.get_dimensionality() self.visual_embedding(visual_model, visual_dimensionality) #visual_model = Sequential() #self.visual_embedding(visual_model) self.visual_model = visual_model visual_model.add(RepeatVector(self._config.max_input_time_steps)) if self._config.multimodal_merge_mode == 'dot': self.add(Merge([language_model, visual_model], mode='dot', dot_axes=[(1,),(1,)])) else: self.add(Merge([language_model, visual_model], mode=self._config.multimodal_merge_mode)) self.add(self._config.recurrent_encoder( self._config.hidden_state_dim, return_sequences=False, go_backwards=self._config.go_backwards)) self.deep_mlp() self.add(Dense(self._config.output_dim)) self.add(Activation('softmax'))
def create(self): language_model = Sequential() self.textual_embedding(language_model, mask_zero=True) self.stacked_RNN(language_model) language_model.add(self._config.recurrent_encoder( self._config.hidden_state_dim, return_sequences=False, go_backwards=self._config.go_backwards)) self.language_model = language_model visual_model_factory = \ select_sequential_visual_model[self._config.trainable_perception_name]( self._config.visual_dim) visual_model = visual_model_factory.create() visual_dimensionality = visual_model_factory.get_dimensionality() self.visual_embedding(visual_model, visual_dimensionality) #visual_model = Sequential() #self.visual_embedding(visual_model) self.visual_model = visual_model if self._config.multimodal_merge_mode == 'dot': self.add(Merge([language_model, visual_model], mode='dot', dot_axes=[(1,),(1,)])) else: self.add(Merge([language_model, visual_model], mode=self._config.multimodal_merge_mode)) self.add(Dropout(0.5)) self.add(Dense(self._config.output_dim)) self.add(RepeatVector(self._config.max_output_time_steps)) self.add(self._config.recurrent_decoder( self._config.hidden_state_dim, return_sequences=True)) self.add(Dropout(0.5)) self.add(TimeDistributedDense(self._config.output_dim)) self.add(Activation('softmax')) ### # Graph-based models ###
def build(self): enc_size = self.size_of_env_observation() argument_size = IntegerArguments.size_of_arguments input_enc = InputLayer(batch_input_shape=(self.batch_size, enc_size), name='input_enc') input_arg = InputLayer(batch_input_shape=(self.batch_size, argument_size), name='input_arg') input_prg = Embedding(input_dim=PROGRAM_VEC_SIZE, output_dim=PROGRAM_KEY_VEC_SIZE, input_length=1, batch_input_shape=(self.batch_size, 1)) f_enc = Sequential(name='f_enc') f_enc.add(Merge([input_enc, input_arg], mode='concat')) f_enc.add(MaxoutDense(128, nb_feature=4)) self.f_enc = f_enc program_embedding = Sequential(name='program_embedding') program_embedding.add(input_prg) f_enc_convert = Sequential(name='f_enc_convert') f_enc_convert.add(f_enc) f_enc_convert.add(RepeatVector(1)) f_lstm = Sequential(name='f_lstm') f_lstm.add(Merge([f_enc_convert, program_embedding], mode='concat')) f_lstm.add(LSTM(256, return_sequences=False, stateful=True, W_regularizer=l2(0.0000001))) f_lstm.add(Activation('relu', name='relu_lstm_1')) f_lstm.add(RepeatVector(1)) f_lstm.add(LSTM(256, return_sequences=False, stateful=True, W_regularizer=l2(0.0000001))) f_lstm.add(Activation('relu', name='relu_lstm_2')) # plot(f_lstm, to_file='f_lstm.png', show_shapes=True) f_end = Sequential(name='f_end') f_end.add(f_lstm) f_end.add(Dense(1, W_regularizer=l2(0.001))) f_end.add(Activation('sigmoid', name='sigmoid_end')) f_prog = Sequential(name='f_prog') f_prog.add(f_lstm) f_prog.add(Dense(PROGRAM_KEY_VEC_SIZE, activation="relu")) f_prog.add(Dense(PROGRAM_VEC_SIZE, W_regularizer=l2(0.0001))) f_prog.add(Activation('softmax', name='softmax_prog')) # plot(f_prog, to_file='f_prog.png', show_shapes=True) f_args = [] for ai in range(1, IntegerArguments.max_arg_num+1): f_arg = Sequential(name='f_arg%s' % ai) f_arg.add(f_lstm) f_arg.add(Dense(IntegerArguments.depth, W_regularizer=l2(0.0001))) f_arg.add(Activation('softmax', name='softmax_arg%s' % ai)) f_args.append(f_arg) # plot(f_arg, to_file='f_arg.png', show_shapes=True) self.model = Model([input_enc.input, input_arg.input, input_prg.input], [f_end.output, f_prog.output] + [fa.output for fa in f_args], name="npi") self.compile_model() plot(self.model, to_file='model.png', show_shapes=True)
def build_keras_model_score_word_sg(index_size,vector_size, #vocab_size, context_size, #code_dim, score_vector_size, sub_batch_size=256, word_vectors=None, score_vectors=None, hidden_vectors=None, model=None ): """ >>> word_vectors=np.array([[1,2,-1,1],[3,4,-1,-2],[5,6,-2,-2]]) >>> score_vectors=np.array([[10,20,11,21,5,6,7,8],[30,40,33,41,9,8,7,6]]) >>> hidden_vectors=np.array([[1,0,1,1],[0,1,1,1]]) >>> sub_batch_size=3 >>> vector_size=4 >>> score_vector_size=2 >>> kerasmodel=build_keras_model_score_word_sg(index_size=3,vector_size=vector_size,context_size=2,score_vector_size=score_vector_size,sub_batch_size=sub_batch_size,word_vectors=word_vectors,score_vectors=score_vectors,hidden_vectors=hidden_vectors) >>> ind=[[0,1,2],[1,2,0]] >>> ipt=[[1,0,1],[0,1,0]] >>> tmp1=kerasmodel.predict({'index':np.array(ind),'point':np.array(ipt)}) >>> tmp3=np.array([[score_vectors[ipt[i][j]].reshape((score_vector_size,vector_size)).dot(word_vectors[ind[i][j]]) for j in range(sub_batch_size) ] for i in range(2)]) >>> tmp2=np.array([[word_vectors[ind[i][j]].dot(hidden_vectors[ipt[i][j]].T) for j in range(sub_batch_size) ] for i in range(2)]) >>> np.linalg.norm(1/(1+np.exp(-tmp2))-tmp1['code'])+np.linalg.norm(tmp1['score']-tmp3) < 0.0001 True """ kerasmodel = Graph() kerasmodel.add_input(name='point' , input_shape=(sub_batch_size,), dtype=int) kerasmodel.add_input(name='index' , input_shape=(sub_batch_size,), dtype=int) if word_vectors is None: kerasmodel.add_node(Embedding(index_size, vector_size, input_length=sub_batch_size ),name='embedding', input='index') else: kerasmodel.add_node(Embedding(index_size, vector_size, input_length=sub_batch_size,weights=[word_vectors]),name='embedding', input='index') if hidden_vectors is None: kerasmodel.add_node(Embedding(context_size, vector_size, input_length=sub_batch_size ),name='embedpoint', input='point') else: kerasmodel.add_node(Embedding(context_size, vector_size, input_length=sub_batch_size,weights=[hidden_vectors]),name='embedpoint', input='point') kerasmodel.add_node(Lambda(lambda x:x.sum(2)) , name='merge',inputs=['embedding','embedpoint'], merge_mode='mul') kerasmodel.add_node(Activation('sigmoid'), name='sigmoid', input='merge') kerasmodel.add_output(name='code',input='sigmoid') if score_vectors is None: kerasmodel.add_node(Embedding(context_size, score_vector_size*vector_size, input_length=sub_batch_size, ),name='embedscore', input='point') else: kerasmodel.add_node(Embedding(context_size, score_vector_size*vector_size, input_length=sub_batch_size,weights=[score_vectors]),name='embedscore', input='point') kerasmodel.add_node(Reshape((sub_batch_size,score_vector_size,vector_size,)) , name='score1',input='embedscore') kerasmodel.add_node(Flatten(), name='index1',input='embedding') kerasmodel.add_node(RepeatVector(score_vector_size), name='index2',input='index1') kerasmodel.add_node(Reshape((score_vector_size,sub_batch_size,vector_size,)) , name='index3',input='index2') kerasmodel.add_node(Permute((2,1,3,)) , name='index4',input='index3') kerasmodel.add_node(Lambda(lambda x:x.sum(-1)) , name='scorenode',inputs=['score1','index4'], merge_mode='mul') kerasmodel.add_output(name='score',input='scorenode') kerasmodel.compile('rmsprop', {'code':'mse','score':'mse'}) return kerasmodel