我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.layers.core.Activation()。
def build_model(layers): model = Sequential() model.add(GRU(input_dim=layers[0], output_dim=layers[1], activation='tanh', return_sequences=True)) model.add(Dropout(0.15)) # Dropout overfitting # model.add(GRU(layers[2],activation='tanh', return_sequences=True)) # model.add(Dropout(0.2)) # Dropout overfitting model.add(GRU(layers[2], activation='tanh', return_sequences=False)) model.add(Dropout(0.15)) # Dropout overfitting model.add(Dense(output_dim=layers[3])) model.add(Activation("linear")) start = time.time() # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) # model.compile(loss="mse", optimizer=sgd) model.compile(loss="mse", optimizer="rmsprop") # Nadam rmsprop print "Compilation Time : ", time.time() - start return model
def generator_model(noise_dim=100, aux_dim=47, model_name="generator"): # Merge noise and auxilary inputs gen_input = Input(shape=(noise_dim,), name="noise_input") aux_input = Input(shape=(aux_dim,), name="auxilary_input") x = concatenate([gen_input, aux_input], axis=-1) # Dense Layer 1 x = Dense(10 * 100)(x) x = BatchNormalization()(x) x = LeakyReLU(0.2)(x) # output shape is 10*100 # Reshape the tensors to support CNNs x = Reshape((100, 10))(x) # shape is 100 x 10 # Conv Layer 1 x = Conv1D(filters=250, kernel_size=13, padding='same')(x) x = BatchNormalization()(x) x = LeakyReLU(0.2)(x) # output shape is 100 x 250 x = UpSampling1D(size=2)(x) # output shape is 200 x 250 # Conv Layer 2 x = Conv1D(filters=100, kernel_size=13, padding='same')(x) x = BatchNormalization()(x) x = LeakyReLU(0.2)(x) # output shape is 200 x 100 x = UpSampling1D(size=2)(x) # output shape is 400 x 100 # Conv Layer 3 x = Conv1D(filters=1, kernel_size=13, padding='same')(x) x = BatchNormalization()(x) x = Activation('tanh')(x) # final output shape is 400 x 1 generator_model = Model( outputs=[x], inputs=[gen_input, aux_input], name=model_name) return generator_model
def discriminator_model(): model = Sequential() model.add(Convolution2D(64,5,5, border_mode='same', input_shape=(1,28,28), dim_ordering="th")) model.add(Activation('tanh')) model.add(MaxPooling2D(pool_size=(2,2), dim_ordering="th")) model.add(Convolution2D(128,5,5, border_mode='same', dim_ordering="th")) model.add(Activation('tanh')) model.add(MaxPooling2D(pool_size=(2,2), dim_ordering="th")) model.add(Flatten()) model.add(Dense(1024)) model.add(Activation('tanh')) model.add(Dense(1)) model.add(Activation('sigmoid')) return model
def discriminator_model(): """ return a (b, 1) logits""" model = Sequential() model.add(Convolution2D(64, 4, 4,border_mode='same',input_shape=(IN_CH*2, img_cols, img_rows))) model.add(BatchNormalization(mode=2)) model.add(Activation('tanh')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Convolution2D(128, 4, 4,border_mode='same')) model.add(BatchNormalization(mode=2)) model.add(Activation('tanh')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Convolution2D(512, 4, 4,border_mode='same')) model.add(BatchNormalization(mode=2)) model.add(Activation('tanh')) model.add(Convolution2D(1, 4, 4,border_mode='same')) model.add(BatchNormalization(mode=2)) model.add(Activation('tanh')) model.add(Activation('sigmoid')) return model
def discriminator_model(): model = Sequential() model.add(Convolution2D( 64, 5, 5, border_mode='same', input_shape=(1, 28, 28))) model.add(Activation('tanh')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Convolution2D(128, 5, 5)) model.add(Activation('tanh')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(1024)) model.add(Activation('tanh')) model.add(Dense(1)) model.add(Activation('sigmoid')) return model
def build(input_shape, classes): model = Sequential() # CONV => RELU => POOL model.add(Conv2D(20, kernel_size=5, padding="same", input_shape=input_shape)) model.add(Activation("relu")) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) # CONV => RELU => POOL model.add(Conv2D(50, kernel_size=5, padding="same")) model.add(Activation("relu")) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) # Flatten => RELU layers model.add(Flatten()) model.add(Dense(500)) model.add(Activation("relu")) # a softmax classifier model.add(Dense(classes)) model.add(Activation("softmax")) return model # network and training
def build_model(layers): model = Sequential() model.add(LSTM( input_dim=layers[0], output_dim=layers[1], return_sequences=True)) model.add(Dropout(0.2)) model.add(LSTM( layers[2], return_sequences=False)) model.add(Dropout(0.2)) model.add(Dense( output_dim=layers[3])) model.add(Activation("linear")) start = time.time() model.compile(loss="mse", optimizer="rmsprop") print("> Compilation Time : ", time.time() - start) return model
def init_model(): start_time = time.time() print 'Compiling Model ... ' model = Sequential() model.add(Dense(500, input_dim=784)) model.add(Activation('relu')) model.add(Dropout(0.4)) model.add(Dense(300)) model.add(Activation('relu')) model.add(Dropout(0.4)) model.add(Dense(10)) model.add(Activation('softmax')) rms = RMSprop() model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy']) print 'Model compiled in {0} seconds'.format(time.time() - start_time) return model
def init_model(): """ """ start_time = time.time() print 'Compiling model...' model = Sequential() model.add(Convolution2D(64, 3,3, border_mode='valid', input_shape=INPUT_SHAPE)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Dropout(.25)) model.add(Flatten()) model.add(Dense(10)) model.add(Activation('softmax')) rms = RMSprop() model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy']) print 'Model compiled in {0} seconds'.format(time.time() - start_time) model.summary() return model
def build(nc, w, h, loss='categorical_crossentropy', optimizer='adam', **kwargs): data_shape = w * h if None not in (w, h) else -1 # TODO: -1 or None? inp = Input(shape=(h, w, 3)) enet = encoder.build(inp) enet = decoder.build(enet, nc=nc) name = 'enet_naive_upsampling' enet = Reshape((data_shape, nc))(enet) # TODO: need to remove data_shape for multi-scale training enet = Activation('softmax')(enet) model = Model(inputs=inp, outputs=enet) model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy', 'mean_squared_error']) return model, name
def build(nc, w, h, loss='categorical_crossentropy', # optimizer='adadelta'): optimizer='adam', metrics=None, **kwargs): data_shape = w * h if None not in (w, h) else -1 # TODO: -1 or None? inp = Input(shape=(h, w, 3), name='image') enet = encoder.build(inp) enet = decoder.build(enet, nc=nc) name = 'enet_unpooling' # TODO: need to remove data_shape for multi-scale training enet = Reshape((data_shape, nc))(enet) enet = Activation('softmax', name='output')(enet) model = Model(inputs=inp, outputs=enet) if metrics is None: metrics = ['accuracy'] model.compile(optimizer=optimizer, loss=loss, metrics=metrics) return model, name
def __init__(self): super().__init__() self._learning = True self._learning_rate = .1 self._discount = .1 self._epsilon = .9 # Create Model model = Sequential() model.add(Dense(2, init='lecun_uniform', input_shape=(2,))) model.add(Activation('relu')) model.add(Dense(10, init='lecun_uniform')) model.add(Activation('relu')) model.add(Dense(4, init='lecun_uniform')) model.add(Activation('linear')) rms = RMSprop() model.compile(loss='mse', optimizer=rms) self._model = model
def fire_module(x, squeeze=16, expand=64): x = Convolution2D(squeeze, 1, 1, border_mode='valid')(x) x = Activation('relu')(x) left = Convolution2D(expand, 1, 1, border_mode='valid')(x) left = Activation('relu')(left) right= ZeroPadding2D(padding=(1, 1))(x) right = Convolution2D(expand, 3, 3, border_mode='valid')(right) right = Activation('relu')(right) y = merge([left, right], mode='concat', concat_axis=1) return y # Original SqueezeNet from paper. Global Average Pool implemented manually with Average Pooling Layer
def fire_module(x, squeeze=16, expand=64): x = Convolution2D(squeeze, 1, 1, border_mode='valid')(x) x = Activation('relu')(x) left = Convolution2D(expand, 1, 1, border_mode='valid')(x) left = Activation('relu')(left) right= ZeroPadding2D(padding=(1, 1))(x) right = Convolution2D(expand, 3, 3, border_mode='valid')(right) right = Activation('relu')(right) x = merge([left, right], mode='concat', concat_axis=1) return x # Original SqueezeNet from paper. Global Average Pool implemented manually with Average Pooling Layer
def build_model(): """ ???? """ model = Sequential() model.add(LSTM(units=Conf.LAYERS[1], input_shape=(Conf.LAYERS[1], Conf.LAYERS[0]), return_sequences=True)) model.add(Dropout(0.2)) model.add(LSTM(Conf.LAYERS[2], return_sequences=False)) model.add(Dropout(0.2)) model.add(Dense(units=Conf.LAYERS[3])) # model.add(BatchNormalization(weights=None, epsilon=1e-06, momentum=0.9)) model.add(Activation("tanh")) # act = PReLU(alpha_initializer='zeros', weights=None) # act = LeakyReLU(alpha=0.3) # model.add(act) start = time.time() model.compile(loss="mse", optimizer="rmsprop") print("> Compilation Time : ", time.time() - start) return model
def build_model(layers): """ ???? """ model = Sequential() model.add(LSTM(units=layers[1], input_shape=(layers[1], layers[0]), return_sequences=True)) model.add(Dropout(0.2)) model.add(LSTM(layers[2], return_sequences=False)) model.add(Dropout(0.2)) model.add(Dense(units=layers[3])) model.add(Activation("tanh")) start = time.time() model.compile(loss="mse", optimizer="rmsprop") print("> Compilation Time : ", time.time() - start) return model
def __init__(self, sizes, cell = RNNCell.LSTM, dropout = 0.2, activation = 'linear', loss = 'mse', optimizer = 'rmsprop'): #beta_1 self.model = Sequential() self.model.add(cell( input_dim = sizes[0], output_dim = sizes[1], return_sequences = True )) for i in range(2, len(sizes) - 1): self.model.add(cell(sizes[i], return_sequences = False)) self.model.add(Dropout(dropout)) self.model.add(Dense(output_dim = sizes[-1])) self.model.add(Activation(activation)) self.model.compile(loss=loss, optimizer=optimizer)
def get_simple_model(): model = Sequential() model.add(ZeroPadding2D(padding=(3, 3), input_shape=(nb_input_layers, NB_ROWS, NB_COLS))) model.add(Convolution2D(96, 5, 5)) model.add(Activation('relu')) model.add(ZeroPadding2D(padding=(1, 1))) model.add(Convolution2D(192, 3, 3)) model.add(Activation('relu')) model.add(Flatten()) model.add(Dense(nb_classes)) model.add(Activation('softmax')) print("Compiling model") model.compile(loss='categorical_crossentropy', optimizer='adam') print("Compiled model") return model ###############################################################################
def first_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5): k1,k2 = filters out = Conv1D(k1,1,padding='same')(tensor_input) out = BatchNormalization()(out) out = Activation('relu')(out) out = Dropout(dropout)(out) out = Conv1D(k2,kernel_size,padding='same')(out) pooling = MaxPooling1D(pooling_size,padding='same')(tensor_input) # out = merge([out,pooling],mode='sum') out = add([out,pooling]) return out
def repeated_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5): k1,k2 = filters out = BatchNormalization()(x) out = Activation('relu')(out) out = Conv1D(k1,kernel_size,padding='same')(out) out = BatchNormalization()(out) out = Activation('relu')(out) out = Dropout(dropout)(out) out = Conv1D(k2,kernel_size,padding='same')(out) pooling = MaxPooling1D(pooling_size,padding='same')(x) out = add([out, pooling]) #out = merge([out,pooling]) return out
def first_2d_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5): k1,k2 = filters out = Conv2D(k1,1,padding='same',data_format='channels_last')(tensor_input) out = BatchNormalization()(out) out = Activation('relu')(out) out = Dropout(dropout)(out) out = Conv2D(k2,kernel_size,padding='same',data_format='channels_last')(out) pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(tensor_input) # out = merge([out,pooling],mode='sum') out = add([out,pooling]) return out
def repeated_2d_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5): k1,k2 = filters out = BatchNormalization()(x) out = Activation('relu')(out) out = Conv2D(k1,kernel_size,padding='same',data_format='channels_last')(out) out = BatchNormalization()(out) out = Activation('relu')(out) out = Dropout(dropout)(out) out = Conv2D(k2,kernel_size,padding='same',data_format='channels_last')(out) pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(x) out = add([out, pooling]) #out = merge([out,pooling]) return out
def first_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5): k1,k2 = filters out = Conv1D(k1,1,padding='same')(tensor_input) out = BatchNormalization()(out) out = Activation('relu')(out) out = Dropout(dropout)(out) out = Conv1D(k2,kernel_size,strides=2,padding='same')(out) pooling = MaxPooling1D(pooling_size,strides=2,padding='same')(tensor_input) # out = merge([out,pooling],mode='sum') out = add([out,pooling]) return out
def repeated_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5): k1,k2 = filters out = BatchNormalization()(x) out = Activation('relu')(out) out = Conv1D(k1,kernel_size,padding='same')(out) out = BatchNormalization()(out) out = Activation('relu')(out) out = Dropout(dropout)(out) out = Conv1D(k2,kernel_size,strides=2,padding='same')(out) pooling = MaxPooling1D(pooling_size,strides=2,padding='same')(x) out = add([out, pooling]) #out = merge([out,pooling]) return out
def repeated_2d_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5): k1,k2 = filters out = BatchNormalization()(x) out = Activation('relu')(out) out = Conv2D(k1,kernel_size,2,padding='same',data_format='channels_last')(out) out = BatchNormalization()(out) out = Activation('relu')(out) out = Dropout(dropout)(out) out = Conv2D(k2,kernel_size,2,padding='same',data_format='channels_last')(out) pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(x) out = add([out, pooling]) #out = merge([out,pooling]) return out
def model(X_train, X_test, Y_train, Y_test): model = Sequential() model.add(Dense(512, input_shape=(784,))) model.add(Activation('relu')) model.add(Dropout({{uniform(0, 1)}})) model.add(Dense({{choice([400, 512, 600])}})) model.add(Activation('relu')) model.add(Dropout({{uniform(0, 1)}})) model.add(Dense(10)) model.add(Activation('softmax')) rms = RMSprop() model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy']) nb_epoch = 10 batch_size = 128 model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=2, validation_data=(X_test, Y_test)) score, acc = model.evaluate(X_test, Y_test, verbose=0) return {'loss': -acc, 'status': STATUS_OK, 'model': model}
def model(X_train, Y_train, X_test, Y_test): model = Sequential() model.add(Dense(50, input_shape=(784,))) model.add(Activation('relu')) model.add(Dropout({{uniform(0, 1)}})) model.add(Dense({{choice([20, 30, 40])}})) model.add(Activation('relu')) model.add(Dropout({{uniform(0, 1)}})) model.add(Dense(10)) model.add(Activation('softmax')) rms = RMSprop() model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy']) model.fit(X_train, Y_train, batch_size={{choice([64, 128])}}, nb_epoch=1, verbose=2, validation_data=(X_test, Y_test)) score, acc = model.evaluate(X_test, Y_test, verbose=0) print('Test accuracy:', acc) return {'loss': -acc, 'status': STATUS_OK, 'model': model}
def ensemble_model(X_train, X_test, Y_train, Y_test): model = Sequential() model.add(Dense(512, input_shape=(784,))) model.add(Activation('relu')) model.add(Dropout({{uniform(0, 1)}})) model.add(Dense({{choice([400, 512, 600])}})) model.add(Activation('relu')) model.add(Dropout({{uniform(0, 1)}})) model.add(Dense(10)) model.add(Activation('softmax')) rms = RMSprop() model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy']) nb_epoch = 10 batch_size = 128 model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=2, validation_data=(X_test, Y_test)) score, acc = model.evaluate(X_test, Y_test, verbose=0) return {'loss': -acc, 'status': STATUS_OK, 'model': model}
def small_nn_soft(self, temp): model = Sequential() model.add(Conv2D(64, (self.stride, self.stride,), name='conv1', padding='same', activation='relu', input_shape=self.ip_shape[1:])) model.add(MaxPooling2D(pool_size=(2, 2), name='pool1')) model.add(BatchNormalization()) model.add(Flatten()) model.add(Dense(32, activation='relu', name='dense1')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(10, name='dense2')) model.add(Lambda(lambda x: x / temp)) model.add(Activation('softmax')) adam = keras.optimizers.Adam(lr=self.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=["accuracy"]) return model
def build_model(self): img_input = Input(shape=(img_channels, img_rows, img_cols)) # one conv at the beginning (spatial size: 32x32) x = ZeroPadding2D((1, 1))(img_input) x = Convolution2D(16, nb_row=3, nb_col=3)(x) # Stage 1 (spatial size: 32x32) x = bottleneck(x, n, 16, 16 * k, dropout=0.3, subsample=(1, 1)) # Stage 2 (spatial size: 16x16) x = bottleneck(x, n, 16 * k, 32 * k, dropout=0.3, subsample=(2, 2)) # Stage 3 (spatial size: 8x8) x = bottleneck(x, n, 32 * k, 64 * k, dropout=0.3, subsample=(2, 2)) x = BatchNormalization(mode=0, axis=1)(x) x = Activation('relu')(x) x = AveragePooling2D((8, 8), strides=(1, 1))(x) x = Flatten()(x) preds = Dense(nb_classes, activation='softmax')(x) self.model = Model(input=img_input, output=preds) self.keras_get_params()
def block(self, num_filters, num_layers, kernel_size, strides, input_tensor): x = Conv2D(num_layers, (1, 1), strides=strides)(input_tensor) x = Activation(selu)(x) x = Conv2D(num_filters, kernel_size, padding='same')(x) x = Activation(selu)(x) x = Conv2D(num_filters*4, (1, 1))(x) shortcut = Conv2D(num_filters*4, (1, 1), strides=strides, )(input_tensor) x = layers.add([x, shortcut]) x = Activation(selu)(x) return x
def double_conv_layer(x, size, dropout, batch_norm): from keras.models import Model from keras.layers import Input, merge, Convolution2D, MaxPooling2D, UpSampling2D from keras.layers.normalization import BatchNormalization from keras.layers.core import Dropout, Activation conv = Convolution2D(size, 3, 3, border_mode='same')(x) if batch_norm == True: conv = BatchNormalization(mode=0, axis=1)(conv) conv = Activation('relu')(conv) conv = Convolution2D(size, 3, 3, border_mode='same')(conv) if batch_norm == True: conv = BatchNormalization(mode=0, axis=1)(conv) conv = Activation('relu')(conv) if dropout > 0: conv = Dropout(dropout)(conv) return conv
def fire_module(x, fire_id, squeeze=16, expand=64, dim_ordering='th'): s_id = 'fire' + str(fire_id) + '/' if dim_ordering is 'tf': c_axis = 3 else: c_axis = 1 x = Convolution2D(squeeze, 1, 1, border_mode='valid', name=s_id + sq1x1)(x) x = Activation('relu', name=s_id + relu + sq1x1)(x) left = Convolution2D(expand, 1, 1, border_mode='valid', name=s_id + exp1x1)(x) left = Activation('relu', name=s_id + relu + exp1x1)(left) right = Convolution2D(expand, 3, 3, border_mode='same', name=s_id + exp3x3)(x) right = Activation('relu', name=s_id + relu + exp3x3)(right) x = merge([left, right], mode='concat', concat_axis=c_axis, name=s_id + 'concat') return x # Original SqueezeNet from paper.
def model(X_train, Y_train, X_test, Y_test): model = Sequential() model.add(Dense({{choice([15, 512, 1024])}},input_dim=8,init='uniform', activation='softplus')) model.add(Dropout({{uniform(0, 1)}})) model.add(Dense({{choice([256, 512, 1024])}})) model.add(Activation({{choice(['relu', 'sigmoid','softplus'])}})) model.add(Dropout({{uniform(0, 1)}})) model.add(Dense(1, init='uniform', activation='sigmoid')) model.compile(loss='mse', metrics=['accuracy'], optimizer={{choice(['rmsprop', 'adam', 'sgd'])}}) model.fit(X_train, Y_train, batch_size={{choice([10, 50, 100])}}, nb_epoch={{choice([1, 50])}}, show_accuracy=True, verbose=2, validation_data=(X_test, Y_test)) score, acc = model.evaluate(X_test, Y_test, verbose=0) print('Test accuracy:', acc) return {'loss': -acc, 'status': STATUS_OK, 'model': model}
def make_dcgan_generator(Xk_g, n_lat, n_chan=1): n_g_hid1 = 1024 # size of hidden layer in generator layer 1 n_g_hid2 = 128 # size of hidden layer in generator layer 2 x = Dense(n_g_hid1)(Xk_g) x = BatchNormalization(mode=2)(x) x = Activation('relu')(x) x = Dense(n_g_hid2*7*7)(x) x = BatchNormalization(mode=2)(x) x = Activation('relu')(x) x = Reshape((n_g_hid2, 7, 7))(x) x = Deconvolution2D(64, 5, 5, output_shape=(128, 64, 14, 14), border_mode='same', activation=None, subsample=(2,2), init='orthogonal', dim_ordering='th')(x) x = BatchNormalization(mode=2, axis=1)(x) x = Activation('relu')(x) g = Deconvolution2D(n_chan, 5, 5, output_shape=(128, n_chan, 28, 28), border_mode='same', activation='sigmoid', subsample=(2,2), init='orthogonal', dim_ordering='th')(x) return g
def make_dcgan_generator(Xk_g, n_lat, n_chan=1): n_g_hid1 = 1024 # size of hidden layer in generator layer 1 n_g_hid2 = 128 # size of hidden layer in generator layer 2 x = Dense(n_g_hid1, init=conv2D_init)(Xk_g) x = BatchNormalization(mode=2, )(x) x = Activation('relu')(x) x = Dense(n_g_hid2*7*7, init=conv2D_init)(x) x = Reshape((n_g_hid2, 7, 7))(x) x = BatchNormalization(mode=2, axis=1)(x) x = Activation('relu')(x) x = Deconvolution2D(64, 5, 5, output_shape=(128, 64, 14, 14), border_mode='same', activation=None, subsample=(2,2), init=conv2D_init, dim_ordering='th')(x) x = BatchNormalization(mode=2, axis=1)(x) x = Activation('relu')(x) g = Deconvolution2D(n_chan, 5, 5, output_shape=(128, n_chan, 28, 28), border_mode='same', activation='sigmoid', subsample=(2,2), init=conv2D_init, dim_ordering='th')(x) return g
def create_network(): from keras.models import Sequential from keras.layers.core import Dense, Dropout, Activation model = Sequential() model.add(Dense(512, input_shape=(784,))) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(Dense(512)) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(Dense(10)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return KerasNetwork(model, 'mlp_weights.hd5')
def model_default(input_shape): model = Sequential() model.add(Convolution2D(32,8,8,subsample=(4,4), border_mode='same',init='he_uniform',input_shape=input_shape)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Convolution2D(64,4,4, subsample=(2,2),border_mode='same' , init='he_uniform')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Convolution2D(64,3,3, subsample=(1,1),border_mode='same' , init='he_uniform')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(512, init='he_uniform')) model.add(Activation('relu')) model.add(Dense(2, init='he_uniform')) return model # Model WITH BATCHNORM NO MAXPOOL NO Dropout
def make_init_model(): input_data = Input(shape=(32, 32, 3)) init_model_index = random.randint(1, 4) init_model_index = 2 if init_model_index == 1: # one conv layer with kernel num = 64 stem_conv_1 = Conv2D(64, (1, 1), padding='same')(input_data) elif init_model_index == 2: # two conv layers with kernel num = 64 stem_conv_1 = Conv2D(64, (1, 1), padding='same')(input_data) stem_conv_2 = Conv2D(64, (1, 1), padding='same')(stem_conv_1) elif init_model_index == 3: # one conv layer with a wider kernel num = 128 stem_conv_1 = Conv2D(128, (1, 1), padding='same')(input_data) elif init_model_index == 4: # two conv layers with a wider kernel_num = 128 stem_conv_1 = Conv2D(128, (1, 1), padding='same')(input_data) stem_conv_2 = Conv2D(128, (1, 1), padding='same')(stem_conv_1) stem_global_pooling_1 = GlobalMaxPooling2D()(stem_conv_1) stem_softmax_1 = Activation('softmax')(stem_global_pooling_1) model = Model(inputs=input_data, outputs=stem_softmax_1) return model
def Build(model_list): print model_list for idx, layer in enumerate(model_list): type = layer[0] if type == 'InputLayer': input = Input(shape=layer[1]) x = input elif type == 'Conv2D': x = Conv2D(filters=layer[2], kernel_size=layer[1], padding='same')(x) elif type == 'InceptionBlock': x = inception_block(x, idx) elif type == 'ResidualBlock': x = residual_block(x, layer[1], idx) elif type == "GlobalMaxPooling2D": x = GlobalMaxPooling2D()(x) elif type == "Activation": x = Activation('softmax')(x) model = Model(inputs=input, outputs=x) return model
def buildModelLSTM_3(self): model = Sequential() layers = [self.inOutVecDim, 57, 57 * 2, 32, self.inOutVecDim] model.add(LSTM(input_dim=layers[0], output_dim=layers[1], return_sequences=False)) model.add(Dense( output_dim=layers[4])) model.add(Activation(self.activation)) optimizer = keras.optimizers.RMSprop(lr=0.001) model.compile(loss="mae", optimizer=optimizer) return model
def buildModelLSTM_4(self): model = Sequential() layers = [self.inOutVecDim, 57, 57 * 2, 57, self.inOutVecDim] model.add(LSTM(input_dim=layers[0], output_dim=layers[1], return_sequences=True)) model.add(LSTM(layers[2], return_sequences=False)) model.add(Dense(output_dim=layers[4])) model.add(Activation(self.activation)) optimizer = keras.optimizers.RMSprop(lr=0.001) model.compile(loss="mae", optimizer=optimizer) return model
def get_model(img_channels, img_width, img_height, dropout=0.5): model = Sequential() model.add(Convolution2D(32, 3, 3, input_shape=( img_channels, img_width, img_height))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Convolution2D(32, 3, 3)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Convolution2D(32, 3, 3)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(64)) model.add(Activation('relu')) model.add(Dropout(dropout)) model.add(Dense(1)) model.add(Activation('sigmoid')) return model
def get_model(shape, dropout=0.5, path=None): print('building neural network') model=Sequential() model.add(Convolution2D(512, 3, 3, border_mode='same', input_shape=shape)) model.add(Activation('relu')) model.add(Convolution2D(512, 3, 3, border_mode='same')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(SpatialDropout2D(dropout)) model.add(Flatten())#input_shape=shape)) # model.add(Dense(4096)) # model.add(Activation('relu')) # model.add(Dropout(0.5)) model.add(Dense(512)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(1)) #model.add(Activation('linear')) return model
def create_base_model(nb_features, nb_classes, learning_rate=0.02): model = Sequential() # input layer + first hidden layer model.add(Dense(512, kernel_initializer='lecun_uniform', input_shape=(nb_features,))) model.add(PReLU()) model.add(Dropout(0.5)) # additional hidden layer model.add(Dense(512, kernel_initializer='lecun_uniform')) model.add(PReLU()) model.add(Dropout(0.75)) # output layer model.add(Dense(nb_classes, kernel_initializer='lecun_uniform')) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=learning_rate), metrics=['accuracy']) return model
def get_model(): model = Sequential() model.add(LSTM( 32, input_shape=(look_back, 1), return_sequences=True )) model.add(Dropout(0.2)) model.add(LSTM( 64, return_sequences=False )) model.add(Dropout(0.2)) model.add(Dense(1)) model.add(Activation('linear')) model.compile(loss='mse', optimizer='adam') return model