我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.layers.core.Dropout()。
def build_model(layers): model = Sequential() model.add(GRU(input_dim=layers[0], output_dim=layers[1], activation='tanh', return_sequences=True)) model.add(Dropout(0.15)) # Dropout overfitting # model.add(GRU(layers[2],activation='tanh', return_sequences=True)) # model.add(Dropout(0.2)) # Dropout overfitting model.add(GRU(layers[2], activation='tanh', return_sequences=False)) model.add(Dropout(0.15)) # Dropout overfitting model.add(Dense(output_dim=layers[3])) model.add(Activation("linear")) start = time.time() # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) # model.compile(loss="mse", optimizer=sgd) model.compile(loss="mse", optimizer="rmsprop") # Nadam rmsprop print "Compilation Time : ", time.time() - start return model
def cnn_word_model(self): embed_input = Input(shape=(self.opt['max_sequence_length'], self.opt['embedding_dim'],)) outputs = [] for i in range(len(self.kernel_sizes)): output_i = Conv1D(self.opt['filters_cnn'], kernel_size=self.kernel_sizes[i], activation=None, kernel_regularizer=l2(self.opt['regul_coef_conv']), padding='same')(embed_input) output_i = BatchNormalization()(output_i) output_i = Activation('relu')(output_i) output_i = GlobalMaxPooling1D()(output_i) outputs.append(output_i) output = concatenate(outputs, axis=1) output = Dropout(rate=self.opt['dropout_rate'])(output) output = Dense(self.opt['dense_dim'], activation=None, kernel_regularizer=l2(self.opt['regul_coef_dense']))(output) output = BatchNormalization()(output) output = Activation('relu')(output) output = Dropout(rate=self.opt['dropout_rate'])(output) output = Dense(1, activation=None, kernel_regularizer=l2(self.opt['regul_coef_dense']))(output) output = BatchNormalization()(output) act_output = Activation('sigmoid')(output) model = Model(inputs=embed_input, outputs=act_output) return model
def lstm_word_model(self): embed_input = Input(shape=(self.opt['max_sequence_length'], self.opt['embedding_dim'],)) output = Bidirectional(LSTM(self.opt['units_lstm'], activation='tanh', kernel_regularizer=l2(self.opt['regul_coef_lstm']), dropout=self.opt['dropout_rate']))(embed_input) output = Dropout(rate=self.opt['dropout_rate'])(output) output = Dense(self.opt['dense_dim'], activation=None, kernel_regularizer=l2(self.opt['regul_coef_dense']))(output) output = BatchNormalization()(output) output = Activation('relu')(output) output = Dropout(rate=self.opt['dropout_rate'])(output) output = Dense(1, activation=None, kernel_regularizer=l2(self.opt['regul_coef_dense']))(output) output = BatchNormalization()(output) act_output = Activation('sigmoid')(output) model = Model(inputs=embed_input, outputs=act_output) return model
def build_model(layers): model = Sequential() model.add(LSTM( input_dim=layers[0], output_dim=layers[1], return_sequences=True)) model.add(Dropout(0.2)) model.add(LSTM( layers[2], return_sequences=False)) model.add(Dropout(0.2)) model.add(Dense( output_dim=layers[3])) model.add(Activation("linear")) start = time.time() model.compile(loss="mse", optimizer="rmsprop") print("> Compilation Time : ", time.time() - start) return model
def init_model(): start_time = time.time() print 'Compiling Model ... ' model = Sequential() model.add(Dense(500, input_dim=784)) model.add(Activation('relu')) model.add(Dropout(0.4)) model.add(Dense(300)) model.add(Activation('relu')) model.add(Dropout(0.4)) model.add(Dense(10)) model.add(Activation('softmax')) rms = RMSprop() model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy']) print 'Model compiled in {0} seconds'.format(time.time() - start_time) return model
def init_model(): """ """ start_time = time.time() print 'Compiling model...' model = Sequential() model.add(Convolution2D(64, 3,3, border_mode='valid', input_shape=INPUT_SHAPE)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Dropout(.25)) model.add(Flatten()) model.add(Dense(10)) model.add(Activation('softmax')) rms = RMSprop() model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy']) print 'Model compiled in {0} seconds'.format(time.time() - start_time) model.summary() return model
def make_model(batch_size, image_dim): model = Sequential() model.add(BatchNormalization(batch_input_shape=(batch_size,image_dim[1],image_dim[2],1))) model.add(Conv2D( 16 , [3,3], activation='relu',padding='same')) #model.add(Dropout(0.2)) model.add(Conv2D( 32 , [3,3], activation='relu',padding='same')) #model.add(Dropout(0.2)) model.add(Conv2D( 64 , [3,3], activation='relu',padding='same')) model.add(Dropout(0.2)) #model.add(Conv2D( 16 , [3,3], activation='relu',padding='same')) #model.add(Dropout(0.2)) #model.add(Conv2D( 16 , [3,3], activation='relu',padding='same')) #model.add(Dropout(0.2)) #model.add(Conv2D( 16 , [3,3], activation='relu',padding='same')) #model.add(Conv2D(64, (3, 3), activation='relu',padding='same')) #model.add(Conv2D(64, (3, 3), activation='relu',padding='same')) #model.add(Conv2D(64, (3, 3), activation='relu',padding='same')) model.add(Conv2D(1, kernel_size=1, padding='same', activation='sigmoid')) return(model)
def __initial_conv_block(input, k=1, dropout=0.0, initial=False): init = input channel_axis = 1 if K.image_dim_ordering() == 'th' else -1 # Check if input number of filters is same as 16 * k, else create convolution2d for this input if initial: if K.image_dim_ordering() == 'th': init = Conv2D(16 * k, (1, 1), kernel_initializer='he_normal', padding='same')(init) else: init = Conv2D(16 * k, (1, 1), kernel_initializer='he_normal', padding='same')(init) x = BatchNormalization(axis=channel_axis)(input) x = Activation('relu')(x) x = Conv2D(16 * k, (3, 3), padding='same', kernel_initializer='he_normal')(x) if dropout > 0.0: x = Dropout(dropout)(x) x = BatchNormalization(axis=channel_axis)(x) x = Activation('relu')(x) x = Conv2D(16 * k, (3, 3), padding='same', kernel_initializer='he_normal')(x) m = add([init, x]) return m
def build_model(): """ ???? """ model = Sequential() model.add(LSTM(units=Conf.LAYERS[1], input_shape=(Conf.LAYERS[1], Conf.LAYERS[0]), return_sequences=True)) model.add(Dropout(0.2)) model.add(LSTM(Conf.LAYERS[2], return_sequences=False)) model.add(Dropout(0.2)) model.add(Dense(units=Conf.LAYERS[3])) # model.add(BatchNormalization(weights=None, epsilon=1e-06, momentum=0.9)) model.add(Activation("tanh")) # act = PReLU(alpha_initializer='zeros', weights=None) # act = LeakyReLU(alpha=0.3) # model.add(act) start = time.time() model.compile(loss="mse", optimizer="rmsprop") print("> Compilation Time : ", time.time() - start) return model
def build_model(layers): """ ???? """ model = Sequential() model.add(LSTM(units=layers[1], input_shape=(layers[1], layers[0]), return_sequences=True)) model.add(Dropout(0.2)) model.add(LSTM(layers[2], return_sequences=False)) model.add(Dropout(0.2)) model.add(Dense(units=layers[3])) model.add(Activation("tanh")) start = time.time() model.compile(loss="mse", optimizer="rmsprop") print("> Compilation Time : ", time.time() - start) return model
def __init__(self, sizes, cell = RNNCell.LSTM, dropout = 0.2, activation = 'linear', loss = 'mse', optimizer = 'rmsprop'): #beta_1 self.model = Sequential() self.model.add(cell( input_dim = sizes[0], output_dim = sizes[1], return_sequences = True )) for i in range(2, len(sizes) - 1): self.model.add(cell(sizes[i], return_sequences = False)) self.model.add(Dropout(dropout)) self.model.add(Dense(output_dim = sizes[-1])) self.model.add(Activation(activation)) self.model.compile(loss=loss, optimizer=optimizer)
def first_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5): k1,k2 = filters out = Conv1D(k1,1,padding='same')(tensor_input) out = BatchNormalization()(out) out = Activation('relu')(out) out = Dropout(dropout)(out) out = Conv1D(k2,kernel_size,padding='same')(out) pooling = MaxPooling1D(pooling_size,padding='same')(tensor_input) # out = merge([out,pooling],mode='sum') out = add([out,pooling]) return out
def repeated_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5): k1,k2 = filters out = BatchNormalization()(x) out = Activation('relu')(out) out = Conv1D(k1,kernel_size,padding='same')(out) out = BatchNormalization()(out) out = Activation('relu')(out) out = Dropout(dropout)(out) out = Conv1D(k2,kernel_size,padding='same')(out) pooling = MaxPooling1D(pooling_size,padding='same')(x) out = add([out, pooling]) #out = merge([out,pooling]) return out
def first_2d_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5): k1,k2 = filters out = Conv2D(k1,1,padding='same',data_format='channels_last')(tensor_input) out = BatchNormalization()(out) out = Activation('relu')(out) out = Dropout(dropout)(out) out = Conv2D(k2,kernel_size,padding='same',data_format='channels_last')(out) pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(tensor_input) # out = merge([out,pooling],mode='sum') out = add([out,pooling]) return out
def repeated_2d_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5): k1,k2 = filters out = BatchNormalization()(x) out = Activation('relu')(out) out = Conv2D(k1,kernel_size,padding='same',data_format='channels_last')(out) out = BatchNormalization()(out) out = Activation('relu')(out) out = Dropout(dropout)(out) out = Conv2D(k2,kernel_size,padding='same',data_format='channels_last')(out) pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(x) out = add([out, pooling]) #out = merge([out,pooling]) return out
def build_simple_rnn_model(timestep,input_dim,output_dim,dropout=0.4,lr=0.001): input = Input((timestep,input_dim)) # LSTM, Single output = LSTM(50,return_sequences=False)(input) # for _ in range(1): # output = LSTM(32,return_sequences=True)(output) # output = LSTM(50,return_sequences=False)(output) output = Dropout(dropout)(output) output = Dense(output_dim)(output) model = Model(inputs=input,outputs=output) optimizer = Adam(lr=lr) model.compile(loss='mae',optimizer=optimizer,metrics=['mse']) return model
def first_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5): k1,k2 = filters out = Conv1D(k1,1,padding='same')(tensor_input) out = BatchNormalization()(out) out = Activation('relu')(out) out = Dropout(dropout)(out) out = Conv1D(k2,kernel_size,strides=2,padding='same')(out) pooling = MaxPooling1D(pooling_size,strides=2,padding='same')(tensor_input) # out = merge([out,pooling],mode='sum') out = add([out,pooling]) return out
def repeated_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5): k1,k2 = filters out = BatchNormalization()(x) out = Activation('relu')(out) out = Conv1D(k1,kernel_size,padding='same')(out) out = BatchNormalization()(out) out = Activation('relu')(out) out = Dropout(dropout)(out) out = Conv1D(k2,kernel_size,strides=2,padding='same')(out) pooling = MaxPooling1D(pooling_size,strides=2,padding='same')(x) out = add([out, pooling]) #out = merge([out,pooling]) return out
def repeated_2d_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5): k1,k2 = filters out = BatchNormalization()(x) out = Activation('relu')(out) out = Conv2D(k1,kernel_size,2,padding='same',data_format='channels_last')(out) out = BatchNormalization()(out) out = Activation('relu')(out) out = Dropout(dropout)(out) out = Conv2D(k2,kernel_size,2,padding='same',data_format='channels_last')(out) pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(x) out = add([out, pooling]) #out = merge([out,pooling]) return out
def model(X_train, X_test, Y_train, Y_test): model = Sequential() model.add(Dense(512, input_shape=(784,))) model.add(Activation('relu')) model.add(Dropout({{uniform(0, 1)}})) model.add(Dense({{choice([400, 512, 600])}})) model.add(Activation('relu')) model.add(Dropout({{uniform(0, 1)}})) model.add(Dense(10)) model.add(Activation('softmax')) rms = RMSprop() model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy']) nb_epoch = 10 batch_size = 128 model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=2, validation_data=(X_test, Y_test)) score, acc = model.evaluate(X_test, Y_test, verbose=0) return {'loss': -acc, 'status': STATUS_OK, 'model': model}
def model(X_train, Y_train, X_test, Y_test): model = Sequential() model.add(Dense(50, input_shape=(784,))) model.add(Activation('relu')) model.add(Dropout({{uniform(0, 1)}})) model.add(Dense({{choice([20, 30, 40])}})) model.add(Activation('relu')) model.add(Dropout({{uniform(0, 1)}})) model.add(Dense(10)) model.add(Activation('softmax')) rms = RMSprop() model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy']) model.fit(X_train, Y_train, batch_size={{choice([64, 128])}}, nb_epoch=1, verbose=2, validation_data=(X_test, Y_test)) score, acc = model.evaluate(X_test, Y_test, verbose=0) print('Test accuracy:', acc) return {'loss': -acc, 'status': STATUS_OK, 'model': model}
def ensemble_model(X_train, X_test, Y_train, Y_test): model = Sequential() model.add(Dense(512, input_shape=(784,))) model.add(Activation('relu')) model.add(Dropout({{uniform(0, 1)}})) model.add(Dense({{choice([400, 512, 600])}})) model.add(Activation('relu')) model.add(Dropout({{uniform(0, 1)}})) model.add(Dense(10)) model.add(Activation('softmax')) rms = RMSprop() model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy']) nb_epoch = 10 batch_size = 128 model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=2, validation_data=(X_test, Y_test)) score, acc = model.evaluate(X_test, Y_test, verbose=0) return {'loss': -acc, 'status': STATUS_OK, 'model': model}
def lenet5(self): model = Sequential() model.add(Conv2D(64, (5, 5,), name='conv1', padding='same', activation='relu', input_shape=self.ip_shape[1:])) model.add(MaxPooling2D(pool_size=(2, 2), name='pool1')) # Local Normalization model.add(Conv2D(64, (5, 5,), padding='same', activation='relu', name='conv2')) # Local Normalization model.add(MaxPooling2D(pool_size=(2, 2), name='pool2')) model.add(Flatten()) model.add(Dense(128, activation='relu', name='dense1')) model.add(Dropout(0.5)) model.add(Dense(64, activation='relu', name='dense2')) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax', name='dense3')) adam = keras.optimizers.Adam(lr=self.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=["accuracy"]) return model
def simple_nn(self): model = Sequential() model.add(Conv2D(64, (self.stride, self.stride,), name='conv1', padding='same', activation='relu', input_shape=self.ip_shape[1:])) model.add(MaxPooling2D(pool_size=(2, 2), name='pool1')) model.add(Flatten()) model.add(Dense(64, activation='relu', name='dense2')) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax', name='dense3')) adam = keras.optimizers.Adam(lr=self.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=["accuracy"]) return model
def cuda_cnn(self): model = Sequential() model.add(Conv2D(32, (5, 5), border_mode='same', activation='relu', input_shape=self.ip_shape[1:])) model.add(MaxPooling2D(pool_size=(2, 2))) # model.add(contrast normalization) model.add(Conv2D(32, (5, 5), border_mode='valid', activation='relu')) model.add(AveragePooling2D(border_mode='same')) # model.add(contrast normalization) model.add(Conv2D(64, (5, 5), border_mode='valid', activation='relu')) model.add(AveragePooling2D(border_mode='same')) model.add(Flatten()) model.add(Dense(16, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax')) adam = keras.optimizers.Adam(lr=self.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=["accuracy"]) return model
def small_nn(self): model = Sequential() model.add(Conv2D(64, (self.stride, self.stride,), name='conv1', padding='same', activation='relu', input_shape=self.ip_shape[1:])) model.add(MaxPooling2D(pool_size=(2, 2), name='pool1')) model.add(BatchNormalization()) model.add(Flatten()) model.add(Dense(32, activation='relu', name='dense1')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax', name='dense2')) adam = keras.optimizers.Adam(lr=self.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=["accuracy"]) return model
def double_conv_layer(x, size, dropout, batch_norm): from keras.models import Model from keras.layers import Input, merge, Convolution2D, MaxPooling2D, UpSampling2D from keras.layers.normalization import BatchNormalization from keras.layers.core import Dropout, Activation conv = Convolution2D(size, 3, 3, border_mode='same')(x) if batch_norm == True: conv = BatchNormalization(mode=0, axis=1)(conv) conv = Activation('relu')(conv) conv = Convolution2D(size, 3, 3, border_mode='same')(conv) if batch_norm == True: conv = BatchNormalization(mode=0, axis=1)(conv) conv = Activation('relu')(conv) if dropout > 0: conv = Dropout(dropout)(conv) return conv
def VGG_16_KERAS(classes_number, optim_name='Adam', learning_rate=-1): from keras.layers.core import Dense, Dropout, Flatten from keras.applications.vgg16 import VGG16 from keras.models import Model base_model = VGG16(include_top=True, weights='imagenet') x = base_model.layers[-2].output del base_model.layers[-1:] x = Dense(classes_number, activation='softmax', name='predictions')(x) vgg16 = Model(input=base_model.input, output=x) optim = get_optim('VGG16_KERAS', optim_name, learning_rate) vgg16.compile(optimizer=optim, loss='categorical_crossentropy', metrics=['accuracy']) # print(vgg16.summary()) return vgg16 # MIN: 1.00 Fast: 60 sec
def VGG_16_2_v2(classes_number, optim_name='Adam', learning_rate=-1): from keras.layers.core import Dense, Dropout, Flatten from keras.applications.vgg16 import VGG16 from keras.models import Model from keras.layers import Input input_tensor = Input(shape=(3, 224, 224)) base_model = VGG16(input_tensor=input_tensor, include_top=False, weights='imagenet') x = base_model.output x = Flatten()(x) x = Dense(256, activation='relu')(x) x = Dropout(0.2)(x) x = Dense(256, activation='relu')(x) x = Dropout(0.2)(x) x = Dense(classes_number, activation='softmax', name='predictions')(x) vgg16 = Model(input=base_model.input, output=x) optim = get_optim('VGG16_KERAS', optim_name, learning_rate) vgg16.compile(optimizer=optim, loss='categorical_crossentropy', metrics=['accuracy']) # print(vgg16.summary()) return vgg16
def Xception_wrapper(classes_number, optim_name='Adam', learning_rate=-1): from keras.layers.core import Dense, Dropout, Flatten from keras.applications.xception import Xception from keras.models import Model # Only tensorflow base_model = Xception(include_top=True, weights='imagenet') x = base_model.layers[-2].output del base_model.layers[-1:] x = Dense(classes_number, activation='softmax', name='predictions')(x) model = Model(input=base_model.input, output=x) optim = get_optim('Xception_wrapper', optim_name, learning_rate) model.compile(optimizer=optim, loss='categorical_crossentropy', metrics=['accuracy']) print(model.summary()) return model
def model(X_train, Y_train, X_test, Y_test): model = Sequential() model.add(Dense({{choice([15, 512, 1024])}},input_dim=8,init='uniform', activation='softplus')) model.add(Dropout({{uniform(0, 1)}})) model.add(Dense({{choice([256, 512, 1024])}})) model.add(Activation({{choice(['relu', 'sigmoid','softplus'])}})) model.add(Dropout({{uniform(0, 1)}})) model.add(Dense(1, init='uniform', activation='sigmoid')) model.compile(loss='mse', metrics=['accuracy'], optimizer={{choice(['rmsprop', 'adam', 'sgd'])}}) model.fit(X_train, Y_train, batch_size={{choice([10, 50, 100])}}, nb_epoch={{choice([1, 50])}}, show_accuracy=True, verbose=2, validation_data=(X_test, Y_test)) score, acc = model.evaluate(X_test, Y_test, verbose=0) print('Test accuracy:', acc) return {'loss': -acc, 'status': STATUS_OK, 'model': model}
def create_network(): from keras.models import Sequential from keras.layers.core import Dense, Dropout, Activation model = Sequential() model.add(Dense(512, input_shape=(784,))) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(Dense(512)) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(Dense(10)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return KerasNetwork(model, 'mlp_weights.hd5')
def create_model_2(): inputs = Input((32, 32, 32, 1)) #noise = GaussianNoise(sigma=0.1)(x) conv1 = Convolution3D(32, 3, 3, 3, activation='relu', border_mode='same')(inputs) conv1 = SpatialDropout3D(0.1)(conv1) conv1 = Convolution3D(64, 3, 3, 3, activation='relu', border_mode='same')(conv1) pool1 = MaxPooling3D(pool_size=(2,2, 2))(conv1) x = Flatten()(pool1) x = Dense(64, init='normal')(x) x = Dropout(0.5)(x) predictions = Dense(1, init='normal', activation='sigmoid')(x) model = Model(input=inputs, output=predictions) model.summary() optimizer = Adam(lr=1e-5) model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['binary_accuracy','precision','recall','mean_squared_error','accuracy']) return model
def get_model(img_channels, img_width, img_height, dropout=0.5): model = Sequential() model.add(Convolution2D(32, 3, 3, input_shape=( img_channels, img_width, img_height))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Convolution2D(32, 3, 3)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Convolution2D(32, 3, 3)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(64)) model.add(Activation('relu')) model.add(Dropout(dropout)) model.add(Dense(1)) model.add(Activation('sigmoid')) return model
def get_model(shape, dropout=0.5, path=None): print('building neural network') model=Sequential() model.add(Convolution2D(512, 3, 3, border_mode='same', input_shape=shape)) model.add(Activation('relu')) model.add(Convolution2D(512, 3, 3, border_mode='same')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(SpatialDropout2D(dropout)) model.add(Flatten())#input_shape=shape)) # model.add(Dense(4096)) # model.add(Activation('relu')) # model.add(Dropout(0.5)) model.add(Dense(512)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(1)) #model.add(Activation('linear')) return model
def create_base_model(nb_features, nb_classes, learning_rate=0.02): model = Sequential() # input layer + first hidden layer model.add(Dense(512, kernel_initializer='lecun_uniform', input_shape=(nb_features,))) model.add(PReLU()) model.add(Dropout(0.5)) # additional hidden layer model.add(Dense(512, kernel_initializer='lecun_uniform')) model.add(PReLU()) model.add(Dropout(0.75)) # output layer model.add(Dense(nb_classes, kernel_initializer='lecun_uniform')) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=learning_rate), metrics=['accuracy']) return model
def get_model(): model = Sequential() model.add(LSTM( 32, input_shape=(look_back, 1), return_sequences=True )) model.add(Dropout(0.2)) model.add(LSTM( 64, return_sequences=False )) model.add(Dropout(0.2)) model.add(Dense(1)) model.add(Activation('linear')) model.compile(loss='mse', optimizer='adam') return model
def build_model(dropout): model = Sequential() model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape = INPUT_SHAPE)) model.add(Conv2D(3, (1, 1), activation='relu')) model.add(Conv2D(12, (5, 5), activation='relu')) model.add(MaxPooling2D(pool_size = (2, 2))) model.add(Conv2D(16, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size = (2, 2))) model.add(Conv2D(24, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size = (2, 2))) model.add(Conv2D(48, (3, 3), activation='relu')) model.add(Flatten()) model.add(Dropout(dropout)) model.add(Dense(64, activation = 'relu')) model.add(Dropout(dropout)) model.add(Dense(32, activation = 'relu')) model.add(Dropout(dropout)) model.add(Dense(1)) return model
def build_model(dropout_rate = 0.2): input_image = Input(shape = IMAGE_SHAPE, dtype = 'float32', name = INPUT_IMAGE) x = MaxPooling2D()(input_image) x = MaxPooling2D()(x) x = MaxPooling2D()(x) x = MaxPooling2D()(x) x = Dropout(dropout_rate)(x) x = Conv2D(32, kernel_size=3, strides=(2,2))(x) x = MaxPooling2D()(x) x = Conv2D(32, kernel_size=3, strides=(2,2))(x) x = MaxPooling2D()(x) x = Dropout(dropout_rate)(x) image_out = Flatten()(x) # image_out = Dense(32, activation='relu')(conv) input_lidar_panorama = Input(shape = PANORAMA_SHAPE, dtype = 'float32', name = INPUT_LIDAR_PANORAMA) x = pool_and_conv(input_lidar_panorama) x = pool_and_conv(x) x = Dropout(dropout_rate)(x) panorama_out = Flatten()(x) input_lidar_slices = Input(shape = SLICES_SHAPE, dtype = 'float32', name = INPUT_LIDAR_SLICES) x = MaxPooling3D(pool_size=(2,2,1))(input_lidar_slices) x = Conv3D(32, kernel_size=3, strides=(2,2,1))(x) x = MaxPooling3D(pool_size=(2,2,1))(x) x = Dropout(dropout_rate)(x) x = Conv3D(32, kernel_size=2, strides=(2,2,1))(x) x = MaxPooling3D(pool_size=(2,2,1))(x) x = Dropout(dropout_rate)(x) slices_out = Flatten()(x) x = keras.layers.concatenate([image_out, panorama_out, slices_out]) x = Dense(32, activation='relu')(x) x = Dense(32, activation='relu')(x) x = Dense(32, activation='relu')(x) pose_output = Dense(9, name=OUTPUT_POSE)(x) model = Model(inputs=[input_image, input_lidar_panorama, input_lidar_slices], outputs=[pose_output]) # Fix error with TF and Keras import tensorflow as tf tf.python.control_flow_ops = tf model.compile(loss='mean_squared_error', optimizer='adam') return model
def test_dropout(): layer_test(core.Dropout, kwargs={'p': 0.5}, input_shape=(3, 2)) layer_test(core.SpatialDropout1D, kwargs={'p': 0.5}, input_shape=(2, 3, 4)) layer_test(core.SpatialDropout2D, kwargs={'p': 0.5}, input_shape=(2, 3, 4, 5)) layer_test(core.SpatialDropout3D, kwargs={'p': 0.5}, input_shape=(2, 3, 4, 5, 6))
def init_model(): start_time = time.time() print 'Compiling Model ... ' model = Sequential() model.add(Dense(500, input_dim=784)) model.add(Activation('relu')) model.add(Dropout(0.4)) model.add(Dense(300)) model.add(Activation('relu')) model.add(Dropout(0.4)) model.add(Dense(10)) model.add(Activation('softmax')) rms = RMSprop() model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy']) print 'Model compield in {0} seconds'.format(time.time() - start_time) return model
def build_model(): model = Sequential() layers = [1, 50, 100, 1] model.add(LSTM( layers[1], input_shape=(None, layers[0]), return_sequences=True)) model.add(Dropout(0.2)) model.add(LSTM( layers[2], return_sequences=False)) model.add(Dropout(0.2)) model.add(Dense( layers[3])) model.add(Activation("linear")) start = time.time() model.compile(loss="mse", optimizer="rmsprop") print "Compilation Time : ", time.time() - start return model
def build_model(): model = Sequential() layers = [2, 50, 100, 1] model.add(LSTM( input_dim=layers[0], output_dim=layers[1], return_sequences=True)) model.add(Dropout(0.2)) model.add(LSTM( layers[2], return_sequences=False)) model.add(Dropout(0.2)) model.add(Dense( output_dim=layers[3])) model.add(Activation("linear")) start = time.time() model.compile(loss="mse", optimizer="rmsprop") print "Compilation Time : ", time.time() - start return model
def Net_model(lr=0.005,decay=1e-6,momentum=0.9): model = Sequential() model.add(Convolution2D(nb_filters1, nb_conv, nb_conv, border_mode='valid', input_shape=(1, img_rows, img_cols))) model.add(Activation('tanh')) model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool))) model.add(Convolution2D(nb_filters2, nb_conv, nb_conv)) model.add(Activation('tanh')) model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool))) #model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(1000)) #Full connection model.add(Activation('tanh')) #model.add(Dropout(0.5)) model.add(Dense(nb_classes)) model.add(Activation('softmax')) sgd = SGD(lr=lr, decay=decay, momentum=momentum, nesterov=True) model.compile(loss='categorical_crossentropy', optimizer=sgd) return model
def createNetwork(self): model = Sequential() firstlayer = True for l in self.layers: if firstlayer: model.add(Dense(l.size, input_shape=self.input_shape)) firstlayer = False else: model.add(Dense(l.size)) model.add(Activation(l.activation)) if l.dropout > 0: model.add(Dropout(l.dropout)) # final part model.add(Dense(self.noutputs)) if Config.task_type == "classification": model.add(Activation('softmax')) model.compile(loss=Config.loss, optimizer=RMSprop()) return model
def build_model(layers): model = Sequential() model.add(Dense(layers[1], input_shape=(20,), activation='relu')) model.add(Dropout(0.2)) # Dropout overfitting # model.add(Dense(layers[2],activation='tanh')) # model.add(Dropout(0.2)) # Dropout overfitting model.add(Dense(layers[2], activation='relu')) model.add(Dropout(0.2)) # Dropout overfitting model.add(Dense(output_dim=layers[3])) model.add(Activation("softmax")) model.summary() start = time.time() # sgd = SGD(lr=0.5, decay=1e-6, momentum=0.9, nesterov=True) # model.compile(loss="mse", optimizer=sgd) model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=['accuracy']) # Nadam RMSprop() print "Compilation Time : ", time.time() - start return model