Python keras.layers.core 模块,Dense() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.layers.core.Dense()

项目:snake_game    作者:wing3s    | 项目源码 | 文件源码
def main():
    game_width = 12
    game_height = 9
    nb_frames = 4
    actions = ((-1, 0), (1, 0), (0, -1), (0, 1), (0, 0))

    # Recipe of deep reinforcement learning model
    model = Sequential()
    model.add(Convolution2D(
        16,
        nb_row=3,
        nb_col=3,
        activation='relu',
        input_shape=(nb_frames, game_height, game_width)))
    model.add(Convolution2D(32, nb_row=3, nb_col=3, activation='relu'))
    model.add(Flatten())
    model.add(Dense(256, activation='relu'))
    model.add(Dense(len(actions)))
    model.compile(RMSprop(), 'MSE')

    agent = Agent(
        model, nb_frames, snake_game, actions, size=(game_width, game_height))
    agent.train(nb_epochs=10000, batch_size=64, gamma=0.8, save_model=True)
    agent.play(nb_rounds=10)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_activity_regularization():
    from keras.engine import Input, Model

    layer = core.ActivityRegularization(l1=0.01, l2=0.01)

    # test in functional API
    x = Input(shape=(3,))
    z = core.Dense(2)(x)
    y = layer(z)
    model = Model(input=x, output=y)
    model.compile('rmsprop', 'mse', mode='FAST_COMPILE')

    model.predict(np.random.random((2, 3)))

    # test serialization
    model_config = model.get_config()
    model = Model.from_config(model_config)
    model.compile('rmsprop', 'mse')
项目:Deep-Learning-with-Keras    作者:PacktPublishing    | 项目源码 | 文件源码
def build(input_shape, classes):
        model = Sequential()
        # CONV => RELU => POOL
        model.add(Conv2D(20, kernel_size=5, padding="same",
            input_shape=input_shape))
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
        # CONV => RELU => POOL
        model.add(Conv2D(50, kernel_size=5, padding="same"))
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
        # Flatten => RELU layers
        model.add(Flatten())
        model.add(Dense(500))
        model.add(Activation("relu"))

        # a softmax classifier
        model.add(Dense(classes))
        model.add(Activation("softmax"))

        return model

# network and training
项目:DeepLearning    作者:STHSF    | 项目源码 | 文件源码
def build_model(layers):
    model = Sequential()

    model.add(LSTM(
        input_dim=layers[0],
        output_dim=layers[1],
        return_sequences=True))
    model.add(Dropout(0.2))

    model.add(LSTM(
        layers[2],
        return_sequences=False))
    model.add(Dropout(0.2))

    model.add(Dense(
        output_dim=layers[3]))
    model.add(Activation("linear"))

    start = time.time()
    model.compile(loss="mse", optimizer="rmsprop")
    print("> Compilation Time : ", time.time() - start)
    return model
项目:deep_learning_ex    作者:zatonovo    | 项目源码 | 文件源码
def init_model():
    start_time = time.time()
    print 'Compiling Model ... '
    model = Sequential()
    model.add(Dense(500, input_dim=784))
    model.add(Activation('relu'))
    model.add(Dropout(0.4))
    model.add(Dense(300))
    model.add(Activation('relu'))
    model.add(Dropout(0.4))
    model.add(Dense(10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms,
      metrics=['accuracy'])
    print 'Model compiled in {0} seconds'.format(time.time() - start_time)
    return model
项目:deep_learning_ex    作者:zatonovo    | 项目源码 | 文件源码
def init_model():
    """
    """
    start_time = time.time()
    print 'Compiling model...'
    model = Sequential()

    model.add(Convolution2D(64, 3,3, border_mode='valid', input_shape=INPUT_SHAPE))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2,2)))
    model.add(Dropout(.25))

    model.add(Flatten())

    model.add(Dense(10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms,
      metrics=['accuracy'])
    print 'Model compiled in {0} seconds'.format(time.time() - start_time)

    model.summary()
    return model
项目:keras-molecules    作者:maxhodak    | 项目源码 | 文件源码
def _buildEncoder(self, x, latent_rep_size, max_length, epsilon_std = 0.01):
        h = Convolution1D(9, 9, activation = 'relu', name='conv_1')(x)
        h = Convolution1D(9, 9, activation = 'relu', name='conv_2')(h)
        h = Convolution1D(10, 11, activation = 'relu', name='conv_3')(h)
        h = Flatten(name='flatten_1')(h)
        h = Dense(435, activation = 'relu', name='dense_1')(h)

        def sampling(args):
            z_mean_, z_log_var_ = args
            batch_size = K.shape(z_mean_)[0]
            epsilon = K.random_normal(shape=(batch_size, latent_rep_size), mean=0., std = epsilon_std)
            return z_mean_ + K.exp(z_log_var_ / 2) * epsilon

        z_mean = Dense(latent_rep_size, name='z_mean', activation = 'linear')(h)
        z_log_var = Dense(latent_rep_size, name='z_log_var', activation = 'linear')(h)

        def vae_loss(x, x_decoded_mean):
            x = K.flatten(x)
            x_decoded_mean = K.flatten(x_decoded_mean)
            xent_loss = max_length * objectives.binary_crossentropy(x, x_decoded_mean)
            kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis = -1)
            return xent_loss + kl_loss

        return (vae_loss, Lambda(sampling, output_shape=(latent_rep_size,), name='lambda')([z_mean, z_log_var]))
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_dense():
    from keras import regularizers
    from keras import constraints

    layer_test(core.Dense,
               kwargs={'output_dim': 3},
               input_shape=(3, 2))

    layer_test(core.Dense,
               kwargs={'output_dim': 3,
                       'W_regularizer': regularizers.l2(0.01),
                       'b_regularizer': regularizers.l1(0.01),
                       'activity_regularizer': regularizers.activity_l2(0.01),
                       'W_constraint': constraints.MaxNorm(1),
                       'b_constraint': constraints.MaxNorm(1)},
               input_shape=(3, 2))
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_LearningRateScheduler():
    (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
                                                         nb_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         nb_class=nb_class)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(nb_class, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbks = [callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=5)
    assert (float(K.get_value(model.optimizer.lr)) - 0.2) < K.epsilon()
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_sequential_count_params():
    input_dim = 20
    nb_units = 10
    nb_classes = 2

    n = input_dim * nb_units + nb_units
    n += nb_units * nb_units + nb_units
    n += nb_units * nb_classes + nb_classes

    model = Sequential()
    model.add(Dense(nb_units, input_shape=(input_dim,)))
    model.add(Dense(nb_units))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))
    model.build()

    assert(n == model.count_params())

    model.compile('sgd', 'binary_crossentropy')
    assert(n == model.count_params())
项目:blackjacklearner    作者:srome    | 项目源码 | 文件源码
def __init__(self):
        super().__init__()
        self._learning = True
        self._learning_rate = .1
        self._discount = .1
        self._epsilon = .9

        # Create Model
        model = Sequential()

        model.add(Dense(2, init='lecun_uniform', input_shape=(2,)))
        model.add(Activation('relu'))

        model.add(Dense(10, init='lecun_uniform'))
        model.add(Activation('relu'))

        model.add(Dense(4, init='lecun_uniform'))
        model.add(Activation('linear'))

        rms = RMSprop()
        model.compile(loss='mse', optimizer=rms)

        self._model = model
项目:minos    作者:guybedo    | 项目源码 | 文件源码
def test_custom_parameters(self):
        experiment_parameters = ExperimentParameters()
        experiment_parameters.layout_parameter('blocks', int_param(1, 10))
        param = experiment_parameters.get_layout_parameter('blocks')
        self.assertTrue(
            1 == param.lo and 10 == param.hi,
            'Should have set values')
        experiment_parameters.layout_parameter('layers', int_param(1, 3))
        param = experiment_parameters.get_layout_parameter('layers')
        self.assertTrue(
            1 == param.lo and 3 == param.hi,
            'Should have set values')
        experiment_parameters.layer_parameter('Dense.activation', string_param(['relu', 'tanh']))
        param = experiment_parameters.get_layer_parameter('Dense.activation')
        self.assertTrue(
            'relu' == param.values[0] and 'tanh' == param.values[1],
            'Should have set values')
项目:minos    作者:guybedo    | 项目源码 | 文件源码
def test_custom_definitions(self):

        def custom_activation(x):
            return x

        register_custom_activation('custom_activation', custom_activation)
        register_custom_layer('Dense2', Dense, dict(test='test'))
        experiment_parameters = ExperimentParameters(use_default_values=False)
        custom_params = experiment_parameters.get_layer_parameter('Dense2')
        self.assertIsNotNone(
            custom_params,
            'Should have registered custom layer')
        self.assertTrue(
            'test' in custom_params,
            'Should have registered custom layer params')
        activations = experiment_parameters.get_layer_parameter('Dense.activation')
        self.assertTrue(
            'custom_activation' in activations.values,
            'Should have registered custom_activation')
项目:copper_price_forecast    作者:liyinwei    | 项目源码 | 文件源码
def build_model():
    """
    ????
    """
    model = Sequential()

    model.add(LSTM(units=Conf.LAYERS[1], input_shape=(Conf.LAYERS[1], Conf.LAYERS[0]), return_sequences=True))
    model.add(Dropout(0.2))

    model.add(LSTM(Conf.LAYERS[2], return_sequences=False))
    model.add(Dropout(0.2))

    model.add(Dense(units=Conf.LAYERS[3]))
    # model.add(BatchNormalization(weights=None, epsilon=1e-06, momentum=0.9))
    model.add(Activation("tanh"))
    # act = PReLU(alpha_initializer='zeros', weights=None)
    # act = LeakyReLU(alpha=0.3)
    # model.add(act)

    start = time.time()
    model.compile(loss="mse", optimizer="rmsprop")
    print("> Compilation Time : ", time.time() - start)
    return model
项目:copper_price_forecast    作者:liyinwei    | 项目源码 | 文件源码
def build_model():
    """
    ????
    """
    model = Sequential()

    model.add(LSTM(units=Conf.LAYERS[1], input_shape=(Conf.LAYERS[1], Conf.LAYERS[0]), return_sequences=True))
    model.add(Dropout(0.2))

    model.add(LSTM(Conf.LAYERS[2], return_sequences=False))
    model.add(Dropout(0.2))

    model.add(Dense(units=Conf.LAYERS[3]))
    # model.add(BatchNormalization(weights=None, epsilon=1e-06, momentum=0.9))
    model.add(Activation("tanh"))
    # act = PReLU(alpha_initializer='zeros', weights=None)
    # act = LeakyReLU(alpha=0.3)
    # model.add(act)

    start = time.time()
    model.compile(loss="mse", optimizer="rmsprop")
    print("> Compilation Time : ", time.time() - start)
    return model
项目:copper_price_forecast    作者:liyinwei    | 项目源码 | 文件源码
def build_model(layers):
    """
    ????
    """
    model = Sequential()

    model.add(LSTM(units=layers[1], input_shape=(layers[1], layers[0]), return_sequences=True))
    model.add(Dropout(0.2))

    model.add(LSTM(layers[2], return_sequences=False))
    model.add(Dropout(0.2))

    model.add(Dense(units=layers[3]))
    model.add(Activation("tanh"))

    start = time.time()
    model.compile(loss="mse", optimizer="rmsprop")
    print("> Compilation Time : ", time.time() - start)
    return model
项目:Quantrade    作者:quant-trade    | 项目源码 | 文件源码
def __init__(self, sizes,
                 cell       = RNNCell.LSTM,
                 dropout    = 0.2,
                 activation = 'linear',
                 loss       = 'mse',
                 optimizer  = 'rmsprop'): #beta_1
        self.model = Sequential()

        self.model.add(cell(
            input_dim        = sizes[0],
            output_dim       = sizes[1],
            return_sequences = True
        ))

        for i in range(2, len(sizes) - 1):
            self.model.add(cell(sizes[i], return_sequences = False))
            self.model.add(Dropout(dropout))

        self.model.add(Dense(output_dim = sizes[-1]))
        self.model.add(Activation(activation))

        self.model.compile(loss=loss, optimizer=optimizer)
项目:policy_net_go    作者:gurgehx    | 项目源码 | 文件源码
def get_simple_model():
    model = Sequential()
    model.add(ZeroPadding2D(padding=(3, 3), input_shape=(nb_input_layers, NB_ROWS, NB_COLS)))
    model.add(Convolution2D(96, 5, 5))
    model.add(Activation('relu'))

    model.add(ZeroPadding2D(padding=(1, 1)))
    model.add(Convolution2D(192, 3, 3))
    model.add(Activation('relu'))

    model.add(Flatten())

    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))

    print("Compiling model")
    model.compile(loss='categorical_crossentropy', optimizer='adam')
    print("Compiled model")

    return model

###############################################################################
项目:CCIR    作者:xiaogang00    | 项目源码 | 文件源码
def get_graph(num_users, num_items, latent_dim):

    model = Graph()
    model.add_input(name='user_input', input_shape=(num_users,))
    model.add_input(name='positive_item_input', input_shape=(num_items,))
    model.add_input(name='negative_item_input', input_shape=(num_items,))

    model.add_node(layer=Dense(latent_dim, input_shape = (num_users,)),
                   name='user_latent',
                   input='user_input')
    model.add_shared_node(layer=Dense(latent_dim, input_shape = (num_items,)), 
                          name='item_latent', 
                          inputs=['positive_item_input', 'negative_item_input'],
                          merge_mode=None, 
                          outputs=['positive_item_latent', 'negative_item_latent'])

    model.add_node(layer=Activation('linear'), name='user_pos', inputs=['user_latent', 'positive_item_latent'], merge_mode='dot', dot_axes=1)
    model.add_node(layer=Activation('linear'), name='user_neg', inputs=['user_latent', 'negative_item_latent'], merge_mode='dot', dot_axes=1)

    model.add_output(name='triplet_loss_out', inputs=['user_pos', 'user_neg'])
    model.compile(loss={'triplet_loss_out': ranking_loss}, optimizer=Adam())#Adagrad(lr=0.1, epsilon=1e-06))

    return model
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def build_simple_rnn_model(timestep,input_dim,output_dim,dropout=0.4,lr=0.001):
    input = Input((timestep,input_dim))
    # LSTM, Single
    output = LSTM(50,return_sequences=False)(input)
    # for _ in range(1):
    #     output = LSTM(32,return_sequences=True)(output)
    # output = LSTM(50,return_sequences=False)(output)
    output = Dropout(dropout)(output)
    output = Dense(output_dim)(output)

    model =  Model(inputs=input,outputs=output)

    optimizer = Adam(lr=lr)

    model.compile(loss='mae',optimizer=optimizer,metrics=['mse'])

    return model
项目:hyperas    作者:maxpumperla    | 项目源码 | 文件源码
def model(X_train, X_test, Y_train, Y_test):
    model = Sequential()
    model.add(Dense(512, input_shape=(784,)))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([400, 512, 600])}}))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy'])

    nb_epoch = 10
    batch_size = 128

    model.fit(X_train, Y_train,
              batch_size=batch_size, nb_epoch=nb_epoch,
              verbose=2,
              validation_data=(X_test, Y_test))

    score, acc = model.evaluate(X_test, Y_test, verbose=0)

    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
项目:hyperas    作者:maxpumperla    | 项目源码 | 文件源码
def model(X_train, Y_train, X_test, Y_test):
    model = Sequential()
    model.add(Dense(50, input_shape=(784,)))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([20, 30, 40])}}))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy'])

    model.fit(X_train, Y_train,
              batch_size={{choice([64, 128])}},
              nb_epoch=1,
              verbose=2,
              validation_data=(X_test, Y_test))
    score, acc = model.evaluate(X_test, Y_test, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
项目:hyperas    作者:maxpumperla    | 项目源码 | 文件源码
def ensemble_model(X_train, X_test, Y_train, Y_test):
    model = Sequential()
    model.add(Dense(512, input_shape=(784,)))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([400, 512, 600])}}))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy'])

    nb_epoch = 10
    batch_size = 128

    model.fit(X_train, Y_train,
              batch_size=batch_size, nb_epoch=nb_epoch,
              verbose=2,
              validation_data=(X_test, Y_test))

    score, acc = model.evaluate(X_test, Y_test, verbose=0)

    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
项目:MixtureOfExperts    作者:krishnakalyan3    | 项目源码 | 文件源码
def lenet5(self):
        model = Sequential()
        model.add(Conv2D(64, (5, 5,), name='conv1',
                         padding='same',
                                activation='relu',
                                input_shape=self.ip_shape[1:]))

        model.add(MaxPooling2D(pool_size=(2, 2), name='pool1'))
        # Local Normalization
        model.add(Conv2D(64, (5, 5,), padding='same', activation='relu', name='conv2'))
        # Local Normalization
        model.add(MaxPooling2D(pool_size=(2, 2), name='pool2'))

        model.add(Flatten())
        model.add(Dense(128, activation='relu', name='dense1'))
        model.add(Dropout(0.5))
        model.add(Dense(64, activation='relu', name='dense2'))
        model.add(Dropout(0.5))
        model.add(Dense(10, activation='softmax', name='dense3'))

        adam = keras.optimizers.Adam(lr=self.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
        model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=["accuracy"])
        return model
项目:MixtureOfExperts    作者:krishnakalyan3    | 项目源码 | 文件源码
def simple_nn(self):
        model = Sequential()
        model.add(Conv2D(64, (self.stride, self.stride,), name='conv1',
                         padding='same',
                         activation='relu',
                         input_shape=self.ip_shape[1:]))

        model.add(MaxPooling2D(pool_size=(2, 2), name='pool1'))

        model.add(Flatten())
        model.add(Dense(64, activation='relu', name='dense2'))
        model.add(Dropout(0.5))
        model.add(Dense(10, activation='softmax', name='dense3'))
        adam = keras.optimizers.Adam(lr=self.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

        model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=["accuracy"])
        return model
项目:MixtureOfExperts    作者:krishnakalyan3    | 项目源码 | 文件源码
def cuda_cnn(self):
        model = Sequential()
        model.add(Conv2D(32, (5, 5),
                         border_mode='same',
                         activation='relu',
                         input_shape=self.ip_shape[1:]))

        model.add(MaxPooling2D(pool_size=(2, 2)))
        # model.add(contrast normalization)
        model.add(Conv2D(32, (5, 5), border_mode='valid', activation='relu'))
        model.add(AveragePooling2D(border_mode='same'))
        # model.add(contrast normalization)
        model.add(Conv2D(64, (5, 5), border_mode='valid', activation='relu'))
        model.add(AveragePooling2D(border_mode='same'))
        model.add(Flatten())
        model.add(Dense(16, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(10, activation='softmax'))
        adam = keras.optimizers.Adam(lr=self.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

        model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=["accuracy"])
        return model
项目:MixtureOfExperts    作者:krishnakalyan3    | 项目源码 | 文件源码
def small_nn(self):
        model = Sequential()
        model.add(Conv2D(64, (self.stride, self.stride,), name='conv1',
                         padding='same',
                         activation='relu',
                         input_shape=self.ip_shape[1:]))
        model.add(MaxPooling2D(pool_size=(2, 2), name='pool1'))
        model.add(BatchNormalization())

        model.add(Flatten())
        model.add(Dense(32, activation='relu', name='dense1'))
        model.add(BatchNormalization())
        model.add(Dropout(0.5))
        model.add(Dense(10, activation='softmax', name='dense2'))
        adam = keras.optimizers.Adam(lr=self.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

        model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=["accuracy"])
        return model
项目:MixtureOfExperts    作者:krishnakalyan3    | 项目源码 | 文件源码
def small_nn_soft(self, temp):
        model = Sequential()
        model.add(Conv2D(64, (self.stride, self.stride,), name='conv1',
                         padding='same',
                         activation='relu',
                         input_shape=self.ip_shape[1:]))
        model.add(MaxPooling2D(pool_size=(2, 2), name='pool1'))
        model.add(BatchNormalization())

        model.add(Flatten())
        model.add(Dense(32, activation='relu', name='dense1'))
        model.add(BatchNormalization())
        model.add(Dropout(0.5))
        model.add(Dense(10, name='dense2'))
        model.add(Lambda(lambda x: x / temp))
        model.add(Activation('softmax'))

        adam = keras.optimizers.Adam(lr=self.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
        model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=["accuracy"])
        return model
项目:neuroevolution    作者:cosmoharrigan    | 项目源码 | 文件源码
def create_rnn():
    """Create a recurrent neural network to compute a control policy.

    Reference:
    Koutnik, Jan, Jurgen Schmidhuber, and Faustino Gomez. "Evolving deep
    unsupervised convolutional networks for vision-based reinforcement
    learning." Proceedings of the 2014 conference on Genetic and
    evolutionary computation. ACM, 2014.
    """
    model = Sequential()

    model.add(SimpleRNN(output_dim=3, stateful=True, batch_input_shape=(1, 1, 3)))
    model.add(Dense(input_dim=3, output_dim=3))

    model.compile(loss='mse', optimizer='rmsprop')

    return model
项目:Theano-MPI    作者:uoguelph-mlrg    | 项目源码 | 文件源码
def build_model(self):


        img_input = Input(shape=(img_channels, img_rows, img_cols))

        # one conv at the beginning (spatial size: 32x32)
        x = ZeroPadding2D((1, 1))(img_input)
        x = Convolution2D(16, nb_row=3, nb_col=3)(x)

        # Stage 1 (spatial size: 32x32)
        x = bottleneck(x, n, 16, 16 * k, dropout=0.3, subsample=(1, 1))
        # Stage 2 (spatial size: 16x16)
        x = bottleneck(x, n, 16 * k, 32 * k, dropout=0.3, subsample=(2, 2))
        # Stage 3 (spatial size: 8x8)
        x = bottleneck(x, n, 32 * k, 64 * k, dropout=0.3, subsample=(2, 2))

        x = BatchNormalization(mode=0, axis=1)(x)
        x = Activation('relu')(x)
        x = AveragePooling2D((8, 8), strides=(1, 1))(x)
        x = Flatten()(x)
        preds = Dense(nb_classes, activation='softmax')(x)

        self.model = Model(input=img_input, output=preds)

        self.keras_get_params()
项目:KAGGLE_CERVICAL_CANCER_2017    作者:ZFTurbo    | 项目源码 | 文件源码
def VGG_16_KERAS(classes_number, optim_name='Adam', learning_rate=-1):
    from keras.layers.core import Dense, Dropout, Flatten
    from keras.applications.vgg16 import VGG16
    from keras.models import Model

    base_model = VGG16(include_top=True, weights='imagenet')
    x = base_model.layers[-2].output
    del base_model.layers[-1:]
    x = Dense(classes_number, activation='softmax', name='predictions')(x)
    vgg16 = Model(input=base_model.input, output=x)

    optim = get_optim('VGG16_KERAS', optim_name, learning_rate)
    vgg16.compile(optimizer=optim, loss='categorical_crossentropy', metrics=['accuracy'])
    # print(vgg16.summary())
    return vgg16


# MIN: 1.00 Fast: 60 sec
项目:KAGGLE_CERVICAL_CANCER_2017    作者:ZFTurbo    | 项目源码 | 文件源码
def VGG_16_2_v2(classes_number, optim_name='Adam', learning_rate=-1):
    from keras.layers.core import Dense, Dropout, Flatten
    from keras.applications.vgg16 import VGG16
    from keras.models import Model
    from keras.layers import Input

    input_tensor = Input(shape=(3, 224, 224))
    base_model = VGG16(input_tensor=input_tensor, include_top=False, weights='imagenet')
    x = base_model.output
    x = Flatten()(x)
    x = Dense(256, activation='relu')(x)
    x = Dropout(0.2)(x)
    x = Dense(256, activation='relu')(x)
    x = Dropout(0.2)(x)
    x = Dense(classes_number, activation='softmax', name='predictions')(x)
    vgg16 = Model(input=base_model.input, output=x)

    optim = get_optim('VGG16_KERAS', optim_name, learning_rate)
    vgg16.compile(optimizer=optim, loss='categorical_crossentropy', metrics=['accuracy'])
    # print(vgg16.summary())
    return vgg16
项目:deep    作者:54chen    | 项目源码 | 文件源码
def model(X_train, Y_train, X_test, Y_test):
    model = Sequential()
    model.add(Dense({{choice([15, 512, 1024])}},input_dim=8,init='uniform', activation='softplus'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([256, 512, 1024])}}))
    model.add(Activation({{choice(['relu', 'sigmoid','softplus'])}}))
    model.add(Dropout({{uniform(0, 1)}}))

    model.add(Dense(1, init='uniform', activation='sigmoid'))

    model.compile(loss='mse', metrics=['accuracy'],
                  optimizer={{choice(['rmsprop', 'adam', 'sgd'])}})

    model.fit(X_train, Y_train,
              batch_size={{choice([10, 50, 100])}},
              nb_epoch={{choice([1, 50])}},
              show_accuracy=True,
              verbose=2,
              validation_data=(X_test, Y_test))
    score, acc = model.evaluate(X_test, Y_test, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
项目:tf-wgan    作者:kuleshov    | 项目源码 | 文件源码
def make_dcgan_discriminator(Xk_d):
  x = Convolution2D(nb_filter=64, nb_row=5, nb_col=5, subsample=(2,2),
        activation=None, border_mode='same', init='glorot_uniform',
        dim_ordering='th')(Xk_d)
  x = BatchNormalization(mode=2, axis=1)(x)
  x = LeakyReLU(0.2)(x)

  x = Convolution2D(nb_filter=128, nb_row=5, nb_col=5, subsample=(2,2),
        activation=None, border_mode='same', init='glorot_uniform',
        dim_ordering='th')(x)
  x = BatchNormalization(mode=2, axis=1)(x)
  x = LeakyReLU(0.2)(x)

  x = Flatten()(x)
  x = Dense(1024)(x)
  x = BatchNormalization(mode=2)(x)
  x = LeakyReLU(0.2)(x)

  d = Dense(1, activation=None)(x)

  return d
项目:tf-wgan    作者:kuleshov    | 项目源码 | 文件源码
def make_dcgan_generator(Xk_g, n_lat, n_chan=1):
  n_g_hid1 = 1024 # size of hidden layer in generator layer 1
  n_g_hid2 = 128  # size of hidden layer in generator layer 2

  x = Dense(n_g_hid1)(Xk_g)
  x = BatchNormalization(mode=2)(x)
  x = Activation('relu')(x)

  x = Dense(n_g_hid2*7*7)(x)
  x = BatchNormalization(mode=2)(x)
  x = Activation('relu')(x)
  x = Reshape((n_g_hid2, 7, 7))(x)

  x = Deconvolution2D(64, 5, 5, output_shape=(128, 64, 14, 14), 
        border_mode='same', activation=None, subsample=(2,2), 
        init='orthogonal', dim_ordering='th')(x)
  x = BatchNormalization(mode=2, axis=1)(x)
  x = Activation('relu')(x)

  g = Deconvolution2D(n_chan, 5, 5, output_shape=(128, n_chan, 28, 28), 
        border_mode='same', activation='sigmoid', subsample=(2,2), 
        init='orthogonal', dim_ordering='th')(x)

  return g
项目:tf-wgan    作者:kuleshov    | 项目源码 | 文件源码
def make_dcgan_discriminator(Xk_d):
  x = Convolution2D(nb_filter=64, nb_row=4, nb_col=4, subsample=(2,2),
        activation=None, border_mode='same', init=conv2D_init,
        dim_ordering='th')(Xk_d)
  # x = BatchNormalization(mode=2, axis=1)(x) # <- makes things much worse!
  x = LeakyReLU(0.2)(x)

  x = Convolution2D(nb_filter=128, nb_row=4, nb_col=4, subsample=(2,2),
        activation=None, border_mode='same', init=conv2D_init,
        dim_ordering='th')(x)
  x = BatchNormalization(mode=2, axis=1)(x)
  x = LeakyReLU(0.2)(x)

  x = Flatten()(x)
  x = Dense(1024, init=conv2D_init)(x)
  x = BatchNormalization(mode=2)(x)
  x = LeakyReLU(0.2)(x)

  d = Dense(1, activation=None)(x)

  return d
项目:tf-wgan    作者:kuleshov    | 项目源码 | 文件源码
def make_dcgan_generator(Xk_g, n_lat, n_chan=1):
  n_g_hid1 = 1024 # size of hidden layer in generator layer 1
  n_g_hid2 = 128  # size of hidden layer in generator layer 2

  x = Dense(n_g_hid1, init=conv2D_init)(Xk_g)
  x = BatchNormalization(mode=2, )(x)
  x = Activation('relu')(x)

  x = Dense(n_g_hid2*7*7, init=conv2D_init)(x)
  x = Reshape((n_g_hid2, 7, 7))(x)
  x = BatchNormalization(mode=2, axis=1)(x)
  x = Activation('relu')(x)

  x = Deconvolution2D(64, 5, 5, output_shape=(128, 64, 14, 14), 
        border_mode='same', activation=None, subsample=(2,2), 
        init=conv2D_init, dim_ordering='th')(x)
  x = BatchNormalization(mode=2, axis=1)(x)
  x = Activation('relu')(x)

  g = Deconvolution2D(n_chan, 5, 5, output_shape=(128, n_chan, 28, 28), 
        border_mode='same', activation='sigmoid', subsample=(2,2), 
        init=conv2D_init, dim_ordering='th')(x)

  return g
项目:nuts-ml    作者:maet3608    | 项目源码 | 文件源码
def create_network():
    from keras.models import Sequential
    from keras.layers.core import Dense, Dropout, Activation

    model = Sequential()
    model.add(Dense(512, input_shape=(784,)))
    model.add(Activation('relu'))
    model.add(Dropout(0.2))
    model.add(Dense(512))
    model.add(Activation('relu'))
    model.add(Dropout(0.2))
    model.add(Dense(10))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return KerasNetwork(model, 'mlp_weights.hd5')
项目:Hotpot    作者:Liang-Qiu    | 项目源码 | 文件源码
def prep_model(inputs, N, s0pad, s1pad, c):
    outputs = B.rnn_input(inputs, N, s0pad,
                dropout=c['dropout'], dropoutfix_inp=c['dropoutfix_inp'], dropoutfix_rec=c['dropoutfix_rec'],
                sdim=c['sdim'],
                rnnbidi=c['rnnbidi'], rnn=c['rnn'], rnnact=c['rnnact'], rnninit=c['rnninit'],
                rnnbidi_mode=c['rnnbidi_mode'], rnnlevels=c['rnnlevels'])

    # Projection
    if c['project']:
        proj = Dense(int(N*c['pdim']), activation=c['pact'], kernel_regularizer=l2(c['l2reg']), name='proj')
        e0p = proj(outputs[0])
        e1p = proj(outputs[1])
        N = N*c['pdim']
        return [e0p, e1p], N 
    else:
        return [outputs[0], outputs[1]], N

    #input_dim=int(N*c['sdim'])
项目:Hotpot    作者:Liang-Qiu    | 项目源码 | 文件源码
def prep_model(inputs, N, s0pad, s1pad, c):
    outputs = B.rnn_input(inputs, N, s0pad,
                dropout=c['dropout'], dropoutfix_inp=c['dropoutfix_inp'], dropoutfix_rec=c['dropoutfix_rec'],
                sdim=c['sdim'],
                rnnbidi=c['rnnbidi'], rnn=c['rnn'], rnnact=c['rnnact'], rnninit=c['rnninit'],
                rnnbidi_mode=c['rnnbidi_mode'], rnnlevels=c['rnnlevels'])

    # Projection
    if c['project']:
        proj = Dense(int(N*c['pdim']), activation=c['pact'], kernel_regularizer=l2(c['l2reg']), name='proj')
        e0p = proj(outputs[0])
        e1p = proj(outputs[1])
        N = N*c['pdim']
        return [e0p, e1p], N 
    else:
        return [outputs[0], outputs[1]], N

    #input_dim=int(N*c['sdim'])
项目:nli_generation    作者:jstarc    | 项目源码 | 文件源码
def attention_model(hidden_size, glove):

    prem_input = Input(shape=(None,), dtype='int32')
    hypo_input = Input(shape=(None,), dtype='int32')

    prem_embeddings = make_fixed_embeddings(glove, None)(prem_input)
    hypo_embeddings = make_fixed_embeddings(glove, None)(hypo_input)
    premise_layer = LSTM(output_dim=hidden_size, return_sequences=True, 
                            inner_activation='sigmoid')(prem_embeddings)
    hypo_layer = LSTM(output_dim=hidden_size, return_sequences=True, 
                            inner_activation='sigmoid')(hypo_embeddings)    
    attention = LstmAttentionLayer(output_dim = hidden_size) ([hypo_layer, premise_layer])
    final_dense = Dense(3, activation='softmax')(attention)

    model = Model(input=[prem_input, hypo_input], output=final_dense)
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    return model
项目:DeepRL-FlappyBird    作者:hashbangCoder    | 项目源码 | 文件源码
def model_default(input_shape):
    model = Sequential()
    model.add(Convolution2D(32,8,8,subsample=(4,4), border_mode='same',init='he_uniform',input_shape=input_shape))

    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Convolution2D(64,4,4, subsample=(2,2),border_mode='same' , init='he_uniform'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Convolution2D(64,3,3, subsample=(1,1),border_mode='same' , init='he_uniform'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(512, init='he_uniform'))
    model.add(Activation('relu'))
    model.add(Dense(2, init='he_uniform'))

    return model


# Model WITH BATCHNORM NO MAXPOOL NO Dropout
项目:data-science-bowl-2017    作者:tondonia    | 项目源码 | 文件源码
def create_model_2():
    inputs = Input((32, 32, 32, 1))

    #noise = GaussianNoise(sigma=0.1)(x)

    conv1 = Convolution3D(32, 3, 3, 3, activation='relu', border_mode='same')(inputs)
    conv1 = SpatialDropout3D(0.1)(conv1)
    conv1 = Convolution3D(64, 3, 3, 3, activation='relu', border_mode='same')(conv1)
    pool1 = MaxPooling3D(pool_size=(2,2, 2))(conv1)

    x = Flatten()(pool1)
    x = Dense(64, init='normal')(x)
    x = Dropout(0.5)(x)
    predictions = Dense(1, init='normal', activation='sigmoid')(x)

    model = Model(input=inputs, output=predictions)
    model.summary()
    optimizer = Adam(lr=1e-5)
    model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['binary_accuracy','precision','recall','mean_squared_error','accuracy'])

    return model
项目:data-science-bowl-2017    作者:tondonia    | 项目源码 | 文件源码
def create_model_1():
    inputs = Input((32, 32, 32, 1))

    #noise = GaussianNoise(sigma=0.1)(x)

    conv1 = Convolution3D(32, 3, 3, 3, activation='relu', border_mode='same')(inputs)
    conv1 = SpatialDropout3D(0.1)(conv1)
    conv1 = Convolution3D(64, 3, 3, 3, activation='relu', border_mode='same')(conv1)
    pool1 = MaxPooling3D(pool_size=(2,2, 2))(conv1)

    x = Flatten()(pool1)
    x = Dense(64, init='normal')(x)
    predictions = Dense(1, init='normal', activation='sigmoid')(x)

    model = Model(input=inputs, output=predictions)
    model.summary()
    optimizer = Adam(lr=1e-5)
    model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['binary_accuracy','precision','recall','mean_squared_error','accuracy'])

    return model
项目:Deep-Forecast    作者:amirstar    | 项目源码 | 文件源码
def buildModelLSTM_3(self):
        model = Sequential()

        layers = [self.inOutVecDim, 57, 57 * 2, 32, self.inOutVecDim]
        model.add(LSTM(input_dim=layers[0], output_dim=layers[1],
            return_sequences=False))

        model.add(Dense(
            output_dim=layers[4]))

        model.add(Activation(self.activation))

        optimizer = keras.optimizers.RMSprop(lr=0.001)
        model.compile(loss="mae", optimizer=optimizer)

        return model
项目:Deep-Forecast    作者:amirstar    | 项目源码 | 文件源码
def buildModelLSTM_4(self):
        model = Sequential()

        layers = [self.inOutVecDim, 57, 57 * 2, 57, self.inOutVecDim]
        model.add(LSTM(input_dim=layers[0], output_dim=layers[1],
            return_sequences=True))

        model.add(LSTM(layers[2],
            return_sequences=False))

        model.add(Dense(output_dim=layers[4]))

        model.add(Activation(self.activation))

        optimizer = keras.optimizers.RMSprop(lr=0.001)
        model.compile(loss="mae", optimizer=optimizer)

        return model
项目:sc2_predictor    作者:hellno    | 项目源码 | 文件源码
def get_model(img_channels, img_width, img_height, dropout=0.5):

    model = Sequential()
    model.add(Convolution2D(32, 3, 3, input_shape=(
        img_channels, img_width, img_height)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Convolution2D(32, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Convolution2D(32, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(64))
    model.add(Activation('relu'))
    model.add(Dropout(dropout))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    return model
项目:sc2_predictor    作者:hellno    | 项目源码 | 文件源码
def get_model(shape, dropout=0.5, path=None):
    print('building neural network')

    model=Sequential()

    model.add(Convolution2D(512, 3, 3, border_mode='same', input_shape=shape))
    model.add(Activation('relu'))
    model.add(Convolution2D(512, 3, 3, border_mode='same'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(SpatialDropout2D(dropout))

    model.add(Flatten())#input_shape=shape))
    # model.add(Dense(4096))
    # model.add(Activation('relu'))
    # model.add(Dropout(0.5))
    model.add(Dense(512))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    model.add(Dense(1))
    #model.add(Activation('linear'))

    return model
项目:product-category-classifier    作者:two-tap    | 项目源码 | 文件源码
def build_image_model():
  base_model = InceptionV3(weights='imagenet', include_top=False)

   # Freeze Inception's weights - we don't want to train these
  for layer in base_model.layers:
     layer.trainable = False

  # add a fully connected layer after Inception - we do want to train these
  x = base_model.output
  x = GlobalAveragePooling2D()(x)
  x = Dense(2048, activation='relu')(x)

  return x, base_model.input


# Build the two models.
项目:RIDDLE    作者:jisungk    | 项目源码 | 文件源码
def create_base_model(nb_features, nb_classes, learning_rate=0.02):
    model = Sequential() 

    # input layer + first hidden layer 
    model.add(Dense(512, kernel_initializer='lecun_uniform', input_shape=(nb_features,)))
    model.add(PReLU()) 
    model.add(Dropout(0.5)) 

    # additional hidden layer
    model.add(Dense(512, kernel_initializer='lecun_uniform')) 
    model.add(PReLU()) 
    model.add(Dropout(0.75)) 

    # output layer 
    model.add(Dense(nb_classes, kernel_initializer='lecun_uniform')) 
    model.add(Activation('softmax')) 

    model.compile(loss='categorical_crossentropy', 
        optimizer=Adam(lr=learning_rate), metrics=['accuracy'])  

    return model