Python keras.models 模块,Sequential() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.models.Sequential()

项目:AutoSleepScorerDev    作者:skjerns    | 项目源码 | 文件源码
def tsinalis(input_shape, n_classes):
    """
    Input size should be [batch, 1d, 2d, ch] = (None, 1, 15000, 1)
    """
    model = Sequential(name='Tsinalis')
    model.add(Conv1D (kernel_size = (200), filters = 20, input_shape=input_shape, activation='relu'))
    print(model.input_shape)
    print(model.output_shape)
    model.add(MaxPooling1D(pool_size = (20), strides=(10)))
    print(model.output_shape)
    model.add(keras.layers.core.Reshape([20,-1,1]))
    print(model.output_shape)    
    model.add(Conv2D (kernel_size = (20,30), filters = 400, activation='relu'))
    print(model.output_shape)
    model.add(MaxPooling2D(pool_size = (1,10), strides=(1,2)))
    print(model.output_shape)
    model.add(Flatten())
    print(model.output_shape)
    model.add(Dense (500, activation='relu'))
    model.add(Dense (500, activation='relu'))
    model.add(Dense(n_classes, activation = 'softmax',activity_regularizer=keras.regularizers.l2()  ))
    model.compile( loss='categorical_crossentropy', optimizer=keras.optimizers.SGD(), metrics=[keras.metrics.categorical_accuracy])
    return model
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def make_generator():
    """Creates a generator model that takes a 100-dimensional noise vector as a "seed", and outputs images
    of size 28x28x1."""
    model = Sequential()
    model.add(Dense(1024, input_dim=100))
    model.add(LeakyReLU())
    model.add(Dense(128 * 7 * 7))
    model.add(BatchNormalization())
    model.add(LeakyReLU())
    if K.image_data_format() == 'channels_first':
        model.add(Reshape((128, 7, 7), input_shape=(128 * 7 * 7,)))
        bn_axis = 1
    else:
        model.add(Reshape((7, 7, 128), input_shape=(128 * 7 * 7,)))
        bn_axis = -1
    model.add(Conv2DTranspose(128, (5, 5), strides=2, padding='same'))
    model.add(BatchNormalization(axis=bn_axis))
    model.add(LeakyReLU())
    model.add(Convolution2D(64, (5, 5), padding='same'))
    model.add(BatchNormalization(axis=bn_axis))
    model.add(LeakyReLU())
    model.add(Conv2DTranspose(64, (5, 5), strides=2, padding='same'))
    model.add(BatchNormalization(axis=bn_axis))
    model.add(LeakyReLU())
    # Because we normalized training inputs to lie in the range [-1, 1],
    # the tanh function should be used for the output of the generator to ensure its output
    # also lies in this range.
    model.add(Convolution2D(1, (5, 5), padding='same', activation='tanh'))
    return model
项目:snake_game    作者:wing3s    | 项目源码 | 文件源码
def main():
    game_width = 12
    game_height = 9
    nb_frames = 4
    actions = ((-1, 0), (1, 0), (0, -1), (0, 1), (0, 0))

    # Recipe of deep reinforcement learning model
    model = Sequential()
    model.add(Convolution2D(
        16,
        nb_row=3,
        nb_col=3,
        activation='relu',
        input_shape=(nb_frames, game_height, game_width)))
    model.add(Convolution2D(32, nb_row=3, nb_col=3, activation='relu'))
    model.add(Flatten())
    model.add(Dense(256, activation='relu'))
    model.add(Dense(len(actions)))
    model.compile(RMSprop(), 'MSE')

    agent = Agent(
        model, nb_frames, snake_game, actions, size=(game_width, game_height))
    agent.train(nb_epochs=10000, batch_size=64, gamma=0.8, save_model=True)
    agent.play(nb_rounds=10)
项目:DeepTrade_keras    作者:happynoom    | 项目源码 | 文件源码
def __init__(self, input_shape, lr=0.01, n_layers=2, n_hidden=8, rate_dropout=0.2, loss='risk_estimation'):
        print("initializing..., learing rate %s, n_layers %s, n_hidden %s, dropout rate %s." %(lr, n_layers, n_hidden, rate_dropout))
        self.model = Sequential()
        self.model.add(Dropout(rate=rate_dropout, input_shape=(input_shape[0], input_shape[1])))
        for i in range(0, n_layers - 1):
            self.model.add(LSTM(n_hidden * 4, return_sequences=True, activation='tanh',
                                recurrent_activation='hard_sigmoid', kernel_initializer='glorot_uniform',
                                recurrent_initializer='orthogonal', bias_initializer='zeros',
                                dropout=rate_dropout, recurrent_dropout=rate_dropout))
        self.model.add(LSTM(n_hidden, return_sequences=False, activation='tanh',
                                recurrent_activation='hard_sigmoid', kernel_initializer='glorot_uniform',
                                recurrent_initializer='orthogonal', bias_initializer='zeros',
                                dropout=rate_dropout, recurrent_dropout=rate_dropout))
        self.model.add(Dense(1, kernel_initializer=initializers.glorot_uniform()))
        # self.model.add(BatchNormalization(axis=-1, moving_mean_initializer=Constant(value=0.5),
        #               moving_variance_initializer=Constant(value=0.25)))
        self.model.add(BatchRenormalization(axis=-1, beta_init=Constant(value=0.5)))
        self.model.add(Activation('relu_limited'))
        opt = RMSprop(lr=lr)
        self.model.compile(loss=loss,
                      optimizer=opt,
                      metrics=['accuracy'])
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def make_teacher_model(train_data, validation_data, nb_epoch=3):
    '''Train a simple CNN as teacher model.
    '''
    model = Sequential()
    model.add(Conv2D(64, 3, 3, input_shape=input_shape,
                     border_mode='same', name='conv1'))
    model.add(MaxPooling2D(name='pool1'))
    model.add(Conv2D(64, 3, 3, border_mode='same', name='conv2'))
    model.add(MaxPooling2D(name='pool2'))
    model.add(Flatten(name='flatten'))
    model.add(Dense(64, activation='relu', name='fc1'))
    model.add(Dense(nb_class, activation='softmax', name='fc2'))
    model.compile(loss='categorical_crossentropy',
                  optimizer=SGD(lr=0.01, momentum=0.9),
                  metrics=['accuracy'])

    train_x, train_y = train_data
    history = model.fit(train_x, train_y, nb_epoch=nb_epoch,
                        validation_data=validation_data)
    return model, history
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_vector_regression():
    '''
    Perform float data prediction (regression) using 2 layer MLP
    with tanh and sigmoid activations.
    '''
    np.random.seed(1337)
    nb_hidden = 10
    (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=500,
                                                         nb_test=200,
                                                         input_shape=(20,),
                                                         output_shape=(2,),
                                                         classification=False)

    model = Sequential([
        Dense(nb_hidden, input_shape=(X_train.shape[-1],), activation='tanh'),
        Dense(y_train.shape[-1])
    ])

    model.compile(loss='hinge', optimizer='adagrad')
    history = model.fit(X_train, y_train, nb_epoch=20, batch_size=16,
                        validation_data=(X_test, y_test), verbose=0)
    assert (history.history['val_loss'][-1] < 0.9)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_temporal_regression():
    '''
    Predict float numbers (regression) based on sequences
    of float numbers of length 3 using a single layer of GRU units
    '''
    (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=500,
                                                         nb_test=400,
                                                         input_shape=(3, 5),
                                                         output_shape=(2,),
                                                         classification=False)
    model = Sequential()
    model.add(GRU(y_train.shape[-1],
                  input_shape=(X_train.shape[1], X_train.shape[2])))
    model.compile(loss='hinge', optimizer='adam')
    history = model.fit(X_train, y_train, nb_epoch=5, batch_size=16,
                        validation_data=(X_test, y_test), verbose=0)
    assert(history.history['val_loss'][-1] < 1.)
项目:soph    作者:Linusp    | 项目源码 | 文件源码
def understand_variable_length_handle():
    """????????? recurrent layer ??????"""
    model = Sequential()
    model.add(GRU(input_dim=256, output_dim=256, return_sequences=True))
    model.compile(loss='mean_squared_error', optimizer='sgd')
    train_x = np.random.randn(100, 78, 256)
    train_y = np.random.randn(100, 78, 256)
    model.fit(train_x, train_y, verbose=0)

    inz_1 = np.random.randn(1, 78, 256)
    rez_1 = model.predict_proba(inz_1, verbose=0)

    inz_2 = np.random.randn(1, 87, 256)
    rez_2 = model.predict_proba(inz_2, verbose=0)

    print()
    print('=========== understand variable length =================')
    print('With `return_sequence=True`')
    print('Input shape is: {}, output shae is {}'.format(inz_1.shape, rez_1.shape))
    print('Input shape is: {}, output shae is {}'.format(inz_2.shape, rez_2.shape))
    print('====================== end =============================')
项目:namegenderclassifier    作者:joaoalvarenga    | 项目源码 | 文件源码
def train(self, dataset, train_split=0.8, dense_size=32, learning_rate=0.001, batch_size=32, epochs=50, activation='relu'):
        self.__load_dataset(dataset, train_split)

        train_x = np.array(self.__train_data[:, 0].tolist())
        train_y = to_categorical(self.__train_data[:, 1], 2)

        test_x = np.array(self.__test_data[:, 0].tolist())
        test_y = to_categorical(self.__test_data[:, 1], 2)

        print(train_x.shape)
        self.__model = Sequential()
        self.__model.add(Dense(dense_size, input_dim=train_x.shape[1], activation=activation, init='glorot_uniform'))
        self.__model.add(Dense(train_y.shape[1], activation='softmax', init='glorot_uniform'))
        self.__model.compile(optimizer=Adam(lr=0.001), loss='categorical_crossentropy', metrics=['categorical_accuracy'])

        self.__model.fit(train_x, train_y, batch_size=batch_size, nb_epoch=epochs, validation_data=(test_x, test_y), verbose=2)
项目:DeepAnomaly    作者:adiyoss    | 项目源码 | 文件源码
def test(path_test, input_size, hidden_size, batch_size, save_dir, model_name, maxlen):
    db = read_data(path_test)

    X = create_sequences(db[:-maxlen], win_size=maxlen, step=maxlen)
    X = np.reshape(X, (X.shape[0], X.shape[1], input_size))

    # build the model: 1 layer LSTM
    print('Build model...')
    model = Sequential()
    model.add(LSTM(hidden_size, return_sequences=False, input_shape=(maxlen, input_size)))
    model.add(Dense(maxlen))

    model.load_weights(save_dir + model_name)
    model.compile(loss='mse', optimizer='adam')

    prediction = model.predict(X, batch_size, verbose=1)
    prediction = prediction.flatten()
    # prediction_container = np.array(prediction).flatten()
    Y = db[maxlen:]
    plt.plot(prediction, label='prediction')
    plt.plot(Y, label='true')
    plt.legend()
    plt.show()
项目:dcgan    作者:kyloon    | 项目源码 | 文件源码
def discriminator_model():
    model = Sequential()
    model.add(Convolution2D(64,5,5,
                            border_mode='same',
                            input_shape=(1,28,28),
                            dim_ordering="th"))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2,2), dim_ordering="th"))
    model.add(Convolution2D(128,5,5, border_mode='same', dim_ordering="th"))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2,2), dim_ordering="th"))
    model.add(Flatten())
    model.add(Dense(1024))
    model.add(Activation('tanh'))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))
    return model
项目:shenlan    作者:vector-1127    | 项目源码 | 文件源码
def discriminator_model():
    """ return a (b, 1) logits"""
    model = Sequential()
    model.add(Convolution2D(64, 4, 4,border_mode='same',input_shape=(IN_CH*2, img_cols, img_rows)))
    model.add(BatchNormalization(mode=2))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Convolution2D(128, 4, 4,border_mode='same'))
    model.add(BatchNormalization(mode=2))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Convolution2D(512, 4, 4,border_mode='same'))
    model.add(BatchNormalization(mode=2))
    model.add(Activation('tanh'))
    model.add(Convolution2D(1, 4, 4,border_mode='same'))
    model.add(BatchNormalization(mode=2))
    model.add(Activation('tanh'))

    model.add(Activation('sigmoid'))
    return model
项目:golden_touch    作者:at553    | 项目源码 | 文件源码
def train_model(self):
        # scale
        scaler = MinMaxScaler(feature_range=(0, 1))
        dataset = scaler.fit_transform(self.data)

        # split into train and test sets
        train_size = int(len(dataset) * 0.95)
        train, test = dataset[0:train_size, :], dataset[train_size:len(dataset), :]

        look_back = 5
        trainX, trainY = self.create_dataset(train, look_back)

        # reshape input to be [samples, time steps, features]
        trainX = numpy.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
        # create and fit the LSTM network
        model = Sequential()
        model.add(LSTM(6, input_dim=look_back))
        model.add(Dense(1))
        model.compile(loss='mean_squared_error', optimizer='adam')
        model.fit(trainX, trainY, nb_epoch=100, batch_size=1, verbose=2)
        return model
项目:Deep-Learning-with-Keras    作者:PacktPublishing    | 项目源码 | 文件源码
def azureml_main(dataframe1 = None, dataframe2 = None):
    # Execution logic goes here
    # print('Input pandas.DataFrame #1:\r\n\r\n{0}'.format(dataframe1))

    # If a zip file is connected to the third input port is connected,
    # it is unzipped under ".\Script Bundle". This directory is added
    # to sys.path. Therefore, if your zip file contains a Python file
    # mymodule.py you can import it using:
    # import mymodule
    model = Sequential()
    model.add(Dense(1, input_dim=784, activation="relu"))
    model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
    data = np.random.random((1000,784))
    labels = np.random.randint(2, size=(1000,1))
    model.fit(data, labels, nb_epoch=10, batch_size=32)
    model.evaluate(data, labels)

    return dataframe1,
项目:Deep-Learning-with-Keras    作者:PacktPublishing    | 项目源码 | 文件源码
def model_generator():
    model = Sequential()
    nch = 256
    reg = lambda: l1l2(l1=1e-7, l2=1e-7)
    h = 5
    model.add(Dense(nch * 4 * 4, input_dim=100, W_regularizer=reg()))
    model.add(BatchNormalization(mode=0))
    model.add(Reshape(dim_ordering_shape((nch, 4, 4))))
    model.add(Convolution2D(nch / 2, h, h, border_mode='same', W_regularizer=reg()))
    model.add(BatchNormalization(mode=0, axis=1))
    model.add(LeakyReLU(0.2))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Convolution2D(nch / 2, h, h, border_mode='same', W_regularizer=reg()))
    model.add(BatchNormalization(mode=0, axis=1))
    model.add(LeakyReLU(0.2))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Convolution2D(nch / 4, h, h, border_mode='same', W_regularizer=reg()))
    model.add(BatchNormalization(mode=0, axis=1))
    model.add(LeakyReLU(0.2))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Convolution2D(3, h, h, border_mode='same', W_regularizer=reg()))
    model.add(Activation('sigmoid'))
    return model
项目:Deep-Learning-with-Keras    作者:PacktPublishing    | 项目源码 | 文件源码
def discriminator_model():
    model = Sequential()
    model.add(Convolution2D(
                        64, 5, 5,
                        border_mode='same',
                        input_shape=(1, 28, 28)))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Convolution2D(128, 5, 5))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(1024))
    model.add(Activation('tanh'))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))
    return model
项目:Deep-Learning-with-Keras    作者:PacktPublishing    | 项目源码 | 文件源码
def build(input_shape, classes):
        model = Sequential()
        # CONV => RELU => POOL
        model.add(Conv2D(20, kernel_size=5, padding="same",
            input_shape=input_shape))
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
        # CONV => RELU => POOL
        model.add(Conv2D(50, kernel_size=5, padding="same"))
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
        # Flatten => RELU layers
        model.add(Flatten())
        model.add(Dense(500))
        model.add(Activation("relu"))

        # a softmax classifier
        model.add(Dense(classes))
        model.add(Activation("softmax"))

        return model

# network and training
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def test_GWD(self):
        # Compute categorical crossentropy
        indices = self.mock_y > 0
        selected_log = -np.log(self.mock_x_softmax[indices])
        self.loss = 0#np.sum(selected_log) / np.sum(self.mock_y)
        # Create keras model with this activation and compile it
        model = Sequential()
        activation_layer = Lambda(lambda x: x,
                                  input_shape=self.data_shape[1:],
                                  output_shape=self.data_shape[1:]
                                  )
        model.add(activation_layer)
        model.compile('sgd', loss=gwd)

        # Predict data from the model
        loss = model.evaluate(self.mock_y, self.mock_y, batch_size=1, verbose=0)
        # Assertions
        print('Expected loss: {}'.format(self.loss))
        print('Actual loss: {}'.format(loss))
        self.assertTrue(np.allclose(loss, self.loss),
                        msg='Categorical cross-entropy loss 3D does not produce the expected results')
项目:AutoSleepScorerDev    作者:skjerns    | 项目源码 | 文件源码
def largeann(input_shape, n_classes, layers=3, neurons=2000, dropout=0.35 ):
    """
    for working with extracted features
    """
#    gpu = switch_gpu()
#    with K.tf.device('/gpu:{}'.format(gpu)):
#        K.set_session(K.tf.Session(config=K.tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)))
    model = Sequential(name='ann')
#    model.gpu = gpu
    for l in range(layers):
        model.add(Dense (neurons, input_shape=input_shape, activation='elu', kernel_initializer='he_normal'))
        model.add(BatchNormalization())
        model.add(Dropout(dropout))
    model.add(Dense(n_classes, activation = 'softmax'))
    model.compile(loss='categorical_crossentropy', optimizer=Adam(), metrics=[keras.metrics.categorical_accuracy])
    return model

#%% everyhing recurrent for ANN
项目:AutoSleepScorerDev    作者:skjerns    | 项目源码 | 文件源码
def rcnn(input_shape, n_classes):
    """
    Input size should be [batch, 1d, ch] = (XXX, 3000, 1)
    """
    model = Sequential(name='RCNN test')
    model.add(Conv1D (kernel_size = (200), filters = 20, batch_input_shape=input_shape, activation='elu'))
    model.add(MaxPooling1D(pool_size = (20), strides=(10)))
    model.add(Conv1D (kernel_size = (20), filters = 200, activation='elu'))
    model.add(MaxPooling1D(pool_size = (10), strides=(3)))
    model.add(Conv1D (kernel_size = (20), filters = 200, activation='elu'))
    model.add(MaxPooling1D(pool_size = (10), strides=(3)))
    model.add(Dense (512, activation='elu'))
    model.add(Dense (512, activation='elu'))
    model.add(Reshape((1,model.output_shape[1])))
    model.add(LSTM(256, stateful=True, return_sequences=False))
    model.add(Dropout(0.3))
    model.add(Dense(n_classes, activation = 'sigmoid'))
    model.compile(loss='categorical_crossentropy', optimizer=Adadelta())
    return model
项目:AutoSleepScorerDev    作者:skjerns    | 项目源码 | 文件源码
def rnn_old(input_shape, n_classes):
    """
    Input size should be [batch, 1d, 2d, ch] = (None, 1, 15000, 1)
    """
    model = Sequential(name='Simple 1D CNN')
    model.add(keras.layers.LSTM(50, stateful=True, batch_input_shape=input_shape, return_sequences=False))
    model.add(Dense(n_classes, activation='sigmoid'))
    print(model.output_shape)
    model.compile(loss='categorical_crossentropy', optimizer=Adadelta(), metrics=[keras.metrics.categorical_accuracy])
    return model








#%% old models
项目:3HAN    作者:ni9elf    | 项目源码 | 文件源码
def HAN1(MAX_NB_WORDS, MAX_WORDS, MAX_SENTS, EMBEDDING_DIM, WORDGRU, embedding_matrix, DROPOUTPER):
    #model = Sequential()
    wordInputs = Input(shape=(MAX_WORDS,), name='word1', dtype='float32')

    wordEmbedding = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, weights=[embedding_matrix], mask_zero=True, trainable=True, name='emb1')(wordInputs) #Assuming all the sentences have same number of words. Check for input_length again.


    hij = Bidirectional(GRU(WORDGRU, name='gru1', return_sequences=True))(wordEmbedding)


    wordDrop = Dropout(DROPOUTPER, name='drop1')(hij)

    alpha_its, Si = AttentionLayer(name='att1')(wordDrop)   

    v6 = Dense(1, activation="sigmoid", name="dense")(Si)
    #model.add(Dense(1, activation="sigmoid", name="documentOut3"))
    model = Model(inputs=[wordInputs] , outputs=[v6])
    model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
    return model
项目:Sentiment-Analysis    作者:jasonwu0731    | 项目源码 | 文件源码
def __init__(self, n_classes, vocab_size, max_len, num_units=128,
                 useBiDirection=False, useAttention=False, learning_rate=0.001, dropout=0, embedding_size=300):
        self.model = Sequential()
        self.model.add(Embedding(input_dim=vocab_size,
                                 output_dim=embedding_size, input_length=max_len))
        lstm_model = LSTM(num_units, dropout=dropout)
        if useBiDirection:
            lstm_model = Bidirectional(lstm_model)
        if useAttention:
            lstm_model = lstm_model
            print("Attention not implement yet ... ")
        self.model.add(lstm_model)
        self.model.add(Dense(n_classes, activation='softmax'))

        self.model.summary()
        self.model.compile(loss='categorical_crossentropy',
                           optimizer=Adam(lr=learning_rate),
                           metrics=['accuracy'])
项目:autonomio    作者:autonomio    | 项目源码 | 文件源码
def regression(X, Y, epochs, reg_mode):

    x, y = np.array(X),np.array(Y)

    model = Sequential()

    if reg_mode == 'linear':
        model.add(Dense(1, input_dim=x.shape[1]))
        model.compile(optimizer='rmsprop', metrics=['accuracy'], loss='mse')

    elif reg_mode == 'logistic':
        model.add(Dense(1, activation='sigmoid', input_dim=x.shape[1]))
        model.compile(optimizer='rmsprop', metrics=['accuracy'], loss='binary_crossentropy')

    elif reg_mode == 'regularized':
        reg = l1_l2(l1=0.01, l2=0.01)
        model.add(Dense(1, activation='sigmoid', W_regularizer=reg, input_dim=x.shape[1]))
        model.compile(optimizer='rmsprop', metrics=['accuracy'], loss='binary_crossentropy')

    out = model.fit(x, y, nb_epoch=epochs, verbose=0, validation_split=.33)

    return model, out
项目:DeepLearning    作者:STHSF    | 项目源码 | 文件源码
def build_model(layers):
    model = Sequential()

    model.add(LSTM(
        input_dim=layers[0],
        output_dim=layers[1],
        return_sequences=True))
    model.add(Dropout(0.2))

    model.add(LSTM(
        layers[2],
        return_sequences=False))
    model.add(Dropout(0.2))

    model.add(Dense(
        output_dim=layers[3]))
    model.add(Activation("linear"))

    start = time.time()
    model.compile(loss="mse", optimizer="rmsprop")
    print("> Compilation Time : ", time.time() - start)
    return model
项目:keras-timeseries-prediction    作者:gcarq    | 项目源码 | 文件源码
def build_model(look_back: int, batch_size: int=1) -> Sequential:
    """
    The function builds a keras Sequential model
    :param look_back: number of previous time steps as int
    :param batch_size: batch_size as int, defaults to 1
    :return: keras Sequential model
    """
    model = Sequential()
    model.add(LSTM(64,
                   activation='relu',
                   batch_input_shape=(batch_size, look_back, 1),
                   stateful=True,
                   return_sequences=False))
    model.add(Dense(1, activation='linear'))
    model.compile(loss='mean_squared_error', optimizer='adam')
    return model
项目:deep_learning_ex    作者:zatonovo    | 项目源码 | 文件源码
def init_model():
    start_time = time.time()
    print 'Compiling Model ... '
    model = Sequential()
    model.add(Dense(500, input_dim=784))
    model.add(Activation('relu'))
    model.add(Dropout(0.4))
    model.add(Dense(300))
    model.add(Activation('relu'))
    model.add(Dropout(0.4))
    model.add(Dense(10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms,
      metrics=['accuracy'])
    print 'Model compiled in {0} seconds'.format(time.time() - start_time)
    return model
项目:deep_learning_ex    作者:zatonovo    | 项目源码 | 文件源码
def init_model():
    """
    """
    start_time = time.time()
    print 'Compiling model...'
    model = Sequential()

    model.add(Convolution2D(64, 3,3, border_mode='valid', input_shape=INPUT_SHAPE))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2,2)))
    model.add(Dropout(.25))

    model.add(Flatten())

    model.add(Dense(10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms,
      metrics=['accuracy'])
    print 'Model compiled in {0} seconds'.format(time.time() - start_time)

    model.summary()
    return model
项目:cloudml-samples    作者:GoogleCloudPlatform    | 项目源码 | 文件源码
def model_fn(input_dim,
             labels_dim,
             hidden_units=[100, 70, 50, 20],
             learning_rate=0.1):
  """Create a Keras Sequential model with layers."""
  model = models.Sequential()

  for units in hidden_units:
    model.add(layers.Dense(units=units,
                           input_dim=input_dim,
                           activation=relu))
    input_dim = units

  # Add a dense final layer with sigmoid function
  model.add(layers.Dense(labels_dim, activation=sigmoid))
  compile_model(model, learning_rate)
  return model
项目:minc_keras    作者:tfunck    | 项目源码 | 文件源码
def make_model(batch_size, image_dim):
    model = Sequential()
    model.add(BatchNormalization(batch_input_shape=(batch_size,image_dim[1],image_dim[2],1)))
    model.add(Conv2D( 16 , [3,3],  activation='relu',padding='same'))
    #model.add(Dropout(0.2))
    model.add(Conv2D( 32 , [3,3],  activation='relu',padding='same'))
    #model.add(Dropout(0.2))
    model.add(Conv2D( 64 , [3,3],  activation='relu',padding='same'))
    model.add(Dropout(0.2))
    #model.add(Conv2D( 16 , [3,3],  activation='relu',padding='same'))
    #model.add(Dropout(0.2))
    #model.add(Conv2D( 16 , [3,3],  activation='relu',padding='same'))
    #model.add(Dropout(0.2))
    #model.add(Conv2D( 16 , [3,3],  activation='relu',padding='same'))
    #model.add(Conv2D(64, (3, 3), activation='relu',padding='same'))
    #model.add(Conv2D(64, (3, 3), activation='relu',padding='same'))
    #model.add(Conv2D(64, (3, 3), activation='relu',padding='same'))
    model.add(Conv2D(1, kernel_size=1,  padding='same', activation='sigmoid'))

    return(model)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_trainable_argument():
    x = np.random.random((5, 3))
    y = np.random.random((5, 2))

    model = Sequential()
    model.add(Dense(2, input_dim=3, trainable=False))
    model.compile('rmsprop', 'mse')
    out = model.predict(x)
    model.train_on_batch(x, y)
    out_2 = model.predict(x)
    assert_allclose(out, out_2)

    # test with nesting
    input = Input(shape=(3,))
    output = model(input)
    model = Model(input, output)
    model.compile('rmsprop', 'mse')
    out = model.predict(x)
    model.train_on_batch(x, y)
    out_2 = model.predict(x)
    assert_allclose(out, out_2)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_batchnorm_mode_0_or_2():
    for mode in [0, 2]:
        model = Sequential()
        norm_m0 = normalization.BatchNormalization(mode=mode, input_shape=(10,), momentum=0.8)
        model.add(norm_m0)
        model.compile(loss='mse', optimizer='sgd')

        # centered on 5.0, variance 10.0
        X = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10))
        model.fit(X, X, nb_epoch=4, verbose=0)
        out = model.predict(X)
        out -= K.eval(norm_m0.beta)
        out /= K.eval(norm_m0.gamma)

        assert_allclose(out.mean(), 0.0, atol=1e-1)
        assert_allclose(out.std(), 1.0, atol=1e-1)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_masking_layer():
    ''' This test based on a previously failing issue here:
    https://github.com/fchollet/keras/issues/1567

    '''
    I = np.random.random((6, 3, 4))
    V = np.abs(np.random.random((6, 3, 5)))
    V /= V.sum(axis=-1, keepdims=True)

    model = Sequential()
    model.add(Masking(input_shape=(3, 4)))
    model.add(recurrent.LSTM(output_dim=5, return_sequences=True, unroll=False))
    model.compile(loss='categorical_crossentropy', optimizer='adam')
    model.fit(I, V, nb_epoch=1, batch_size=100, verbose=1)

    model = Sequential()
    model.add(Masking(input_shape=(3, 4)))
    model.add(recurrent.LSTM(output_dim=5, return_sequences=True, unroll=True))
    model.compile(loss='categorical_crossentropy', optimizer='adam')
    model.fit(I, V, nb_epoch=1, batch_size=100, verbose=1)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_EarlyStopping_reuse():
    patience = 3
    data = np.random.random((100, 1))
    labels = np.where(data > 0.5, 1, 0)
    model = Sequential((
        Dense(1, input_dim=1, activation='relu'),
        Dense(1, activation='sigmoid'),
    ))
    model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
    stopper = callbacks.EarlyStopping(monitor='acc', patience=patience)
    weights = model.get_weights()

    hist = model.fit(data, labels, callbacks=[stopper])
    assert len(hist.epoch) >= patience

    # This should allow training to go for at least `patience` epochs
    model.set_weights(weights)
    hist = model.fit(data, labels, callbacks=[stopper])
    assert len(hist.epoch) >= patience
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_LearningRateScheduler():
    (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
                                                         nb_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         nb_class=nb_class)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(nb_class, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbks = [callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=5)
    assert (float(K.get_value(model.optimizer.lr)) - 0.2) < K.epsilon()
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_sequential_model_saving_2():
    # test with custom optimizer, loss
    custom_opt = optimizers.rmsprop
    custom_loss = objectives.mse
    model = Sequential()
    model.add(Dense(2, input_dim=3))
    model.add(Dense(3))
    model.compile(loss=custom_loss, optimizer=custom_opt(), metrics=['acc'])

    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname,
                       custom_objects={'custom_opt': custom_opt,
                                       'custom_loss': custom_loss})
    os.remove(fname)

    out2 = model.predict(x)
    assert_allclose(out, out2, atol=1e-05)
项目:sota_sentiment    作者:jbarnesspain    | 项目源码 | 文件源码
def create_BiLSTM(wordvecs, lstm_dim=300, output_dim=2, dropout=.5,
                weights=None, train=True):
    model = Sequential()
    if weights != None:
        model.add(Embedding(len(wordvecs)+1,
            len(wordvecs['the']),
            weights=[weights],
                    trainable=train))
    else:
        model.add(Embedding(len(wordvecs)+1,
            len(wordvecs['the']),
                    trainable=train))
    model.add(Dropout(dropout))
    model.add(Bidirectional(LSTM(lstm_dim)))
    model.add(Dropout(dropout))
    model.add(Dense(output_dim, activation='softmax'))
    if output_dim == 2:
        model.compile('adam', 'binary_crossentropy',
                  metrics=['accuracy'])
    else:
        model.compile('adam', 'categorical_crossentropy',
                  metrics=['accuracy'])
    return model
项目:blackjacklearner    作者:srome    | 项目源码 | 文件源码
def __init__(self):
        super().__init__()
        self._learning = True
        self._learning_rate = .1
        self._discount = .1
        self._epsilon = .9

        # Create Model
        model = Sequential()

        model.add(Dense(2, init='lecun_uniform', input_shape=(2,)))
        model.add(Activation('relu'))

        model.add(Dense(10, init='lecun_uniform'))
        model.add(Activation('relu'))

        model.add(Dense(4, init='lecun_uniform'))
        model.add(Activation('linear'))

        rms = RMSprop()
        model.compile(loss='mse', optimizer=rms)

        self._model = model
项目:State-Frequency-Memory-stock-prediction    作者:z331565360    | 项目源码 | 文件源码
def build_model(layers, freq, learning_rate):
    model = Sequential()

    model.add(ITOSFM(
        input_dim=layers[0],
        hidden_dim=layers[1],
        output_dim=layers[2],
        freq_dim = freq,
        return_sequences=True))

    start = time.time()

    rms = keras.optimizers.RMSprop(lr=learning_rate)
    model.compile(loss="mse", optimizer="rmsprop")

    print "Compilation Time : ", time.time() - start
    return model
项目:State-Frequency-Memory-stock-prediction    作者:z331565360    | 项目源码 | 文件源码
def build_model(layers, freq, learning_rate):
    model = Sequential()

    model.add(ITOSFM(
        input_dim=layers[0],
        hidden_dim=layers[1],
        output_dim=layers[2],
        freq_dim = freq,
        return_sequences=True))

    start = time.time()

    rms = keras.optimizers.RMSprop(lr=learning_rate)
    model.compile(loss="mse", optimizer="rmsprop")

    print "Compilation Time : ", time.time() - start
    return model
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def make_discriminator():
    """Creates a discriminator model that takes an image as input and outputs a single value, representing whether
    the input is real or generated. Unlike normal GANs, the output is not sigmoid and does not represent a probability!
    Instead, the output should be as large and negative as possible for generated inputs and as large and positive
    as possible for real inputs.

    Note that the improved WGAN paper suggests that BatchNormalization should not be used in the discriminator."""
    model = Sequential()
    if K.image_data_format() == 'channels_first':
        model.add(Convolution2D(64, (5, 5), padding='same', input_shape=(1, 28, 28)))
    else:
        model.add(Convolution2D(64, (5, 5), padding='same', input_shape=(28, 28, 1)))
    model.add(LeakyReLU())
    model.add(Convolution2D(128, (5, 5), kernel_initializer='he_normal', strides=[2, 2]))
    model.add(LeakyReLU())
    model.add(Convolution2D(128, (5, 5), kernel_initializer='he_normal', padding='same', strides=[2, 2]))
    model.add(LeakyReLU())
    model.add(Flatten())
    model.add(Dense(1024, kernel_initializer='he_normal'))
    model.add(LeakyReLU())
    model.add(Dense(1, kernel_initializer='he_normal'))
    return model
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def test_instancenorm_correctness_rank1():
    # make sure it works with rank1 input tensor (batched)
    model = Sequential()
    norm = normalization.InstanceNormalization(input_shape=(10,), axis=None)
    model.add(norm)
    model.compile(loss='mse', optimizer='sgd')

    # centered on 5.0, variance 10.0
    x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10))
    model.fit(x, x, epochs=4, verbose=0)
    out = model.predict(x)
    out -= K.eval(norm.beta)
    out /= K.eval(norm.gamma)

    assert_allclose(out.mean(), 0.0, atol=1e-1)
    assert_allclose(out.std(), 1.0, atol=1e-1)
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def test_instancenorm_perinstancecorrectness():
    model = Sequential()
    norm = normalization.InstanceNormalization(input_shape=(10,))
    model.add(norm)
    model.compile(loss='mse', optimizer='sgd')

    # bimodal distribution
    z = np.random.normal(loc=5.0, scale=10.0, size=(2, 10))
    y = np.random.normal(loc=-5.0, scale=17.0, size=(2, 10))
    x = np.append(z, y)
    x = np.reshape(x, (4, 10))
    model.fit(x, x, epochs=4, batch_size=4, verbose=1)
    out = model.predict(x)
    out -= K.eval(norm.beta)
    out /= K.eval(norm.gamma)

    # verify that each instance in the batch is individually normalized
    for i in range(4):
        instance = out[i]
        assert_allclose(instance.mean(), 0.0, atol=1e-1)
        assert_allclose(instance.std(), 1.0, atol=1e-1)

    # if each instance is normalized, so should the batch
    assert_allclose(out.mean(), 0.0, atol=1e-1)
    assert_allclose(out.std(), 1.0, atol=1e-1)
项目:soph    作者:Linusp    | 项目源码 | 文件源码
def try_variable_length_train():
    """????????

    ?????????? train_x ? train_y ? dtype ? object ???
    ?? shape ???? (100,) ?????????
    """
    model = Sequential()
    model.add(GRU(input_dim=256, output_dim=256, return_sequences=True))
    model.compile(loss='mean_squared_error', optimizer='sgd')

    train_x = []
    train_y = []
    for i in range(100):
        seq_length = np.random.randint(78, 87 + 1)
        sequence = []
        for _ in range(seq_length):
            sequence.append([np.random.randn() for _ in range(256)])

        train_x.append(np.array(sequence))
        train_y.append(np.array(sequence))

    train_x = np.array(train_x)
    train_y = np.array(train_y)

    model.fit(np.array(train_x), np.array(train_y))
项目:Q-A-Recommender-System-Machine-Learning    作者:Yuanxiang-Wu    | 项目源码 | 文件源码
def gen_Model(num_units, actfn='linear', reg_coeff=0.0, last_act='softmax'):
    ''' Generate a neural network model of approporiate architecture
    Args:
        num_units: architecture of network in the format [n1, n2, ... , nL]
        actfn: activation function for hidden layers ('relu'/'sigmoid'/'linear'/'softmax')
        reg_coeff: L2-regularization coefficient
        last_act: activation function for final layer ('relu'/'sigmoid'/'linear'/'softmax')
    Output:
        model: Keras sequential model with appropriate fully-connected architecture
    '''
    model = Sequential()
    for i in range(1, len(num_units)):
        if i == 1 and i < len(num_units) - 1:
            model.add(Dense(input_dim=num_units[0], output_dim=num_units[i], activation=actfn,
                W_regularizer=Reg.l2(l=reg_coeff), init='glorot_normal'))
        elif i == 1 and i == len(num_units) - 1:
            model.add(Dense(input_dim=num_units[0], output_dim=num_units[i], activation=last_act,
                W_regularizer=Reg.l2(l=reg_coeff), init='glorot_normal'))
        elif i < len(num_units) - 1:
            model.add(Dense(output_dim=num_units[i], activation=actfn,
                W_regularizer=Reg.l2(l=reg_coeff), init='glorot_normal'))
        elif i == len(num_units) - 1:
            model.add(Dense(output_dim=num_units[i], activation=last_act,
                W_regularizer=Reg.l2(l=reg_coeff), init='glorot_normal'))
    return model
项目:pCVR    作者:xjtushilei    | 项目源码 | 文件源码
def make_model(dense_layer_sizes, filters, kernel_size, pool_size):
    '''Creates model comprised of 2 convolutional layers followed by dense layers

    dense_layer_sizes: List of layer sizes.
        This list has one number for each layer
    filters: Number of convolutional filters in each convolutional layer
    kernel_size: Convolutional kernel size
    pool_size: Size of pooling area for max pooling
    '''

    model = Sequential()
    model.add(Conv2D(filters, kernel_size,
                     padding='valid',
                     input_shape=input_shape))
    model.add(Activation('relu'))
    model.add(Conv2D(filters, kernel_size))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=pool_size))
    model.add(Dropout(0.25))

    model.add(Flatten())
    for layer_size in dense_layer_sizes:
        model.add(Dense(layer_size))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(num_classes))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy',
                  optimizer='adadelta',
                  metrics=['accuracy'])

    return model
项目:pCVR    作者:xjtushilei    | 项目源码 | 文件源码
def make_teacher_model(train_data, validation_data, epochs=3):
    '''Train a simple CNN as teacher model.
    '''
    model = Sequential()
    model.add(Conv2D(64, 3, input_shape=input_shape,
                     padding='same', name='conv1'))
    model.add(MaxPooling2D(2, name='pool1'))
    model.add(Conv2D(64, 3, padding='same', name='conv2'))
    model.add(MaxPooling2D(2, name='pool2'))
    model.add(Flatten(name='flatten'))
    model.add(Dense(64, activation='relu', name='fc1'))
    model.add(Dense(num_class, activation='softmax', name='fc2'))
    model.compile(loss='categorical_crossentropy',
                  optimizer=SGD(lr=0.01, momentum=0.9),
                  metrics=['accuracy'])

    train_x, train_y = train_data
    history = model.fit(train_x, train_y,
                        epochs=epochs,
                        validation_data=validation_data)
    return model, history
项目:DeepAnomaly    作者:adiyoss    | 项目源码 | 文件源码
def test(path_test, input_size, hidden_size, batch_size, save_dir, model_name, maxlen):
    db = read_data(path_test)
    X = create_sequences(db, maxlen, maxlen)
    y = create_sequences(db, maxlen, maxlen)
    X = np.reshape(X, (X.shape[0], X.shape[1], 1))
    y = np.reshape(y, (y.shape[0], y.shape[1], 1))

    # build the model: 1 layer LSTM
    print('Build model...')
    model = Sequential()
    # "Encode" the input sequence using an RNN, producing an output of HIDDEN_SIZE
    # note: in a situation where your input sequences have a variable length,
    # use input_shape=(None, nb_feature).
    model.add(LSTM(hidden_size, input_shape=(maxlen, input_size)))
    # For the decoder's input, we repeat the encoded input for each time step
    model.add(RepeatVector(maxlen))
    # The decoder RNN could be multiple layers stacked or a single layer
    model.add(LSTM(hidden_size, return_sequences=True))

    # For each of step of the output sequence, decide which character should be chosen
    model.add(TimeDistributed(Dense(1)))

    model.load_weights(save_dir + model_name)

    model.compile(loss='mae', optimizer='adam')
    model.summary()

    prediction = model.predict(X, batch_size, verbose=1, )
    prediction = prediction.flatten()
    # prediction_container = np.array(prediction).flatten()
    plt.plot(prediction.flatten()[:4000], label='prediction')
    plt.plot(y.flatten()[maxlen:4000 + maxlen], label='true')
    plt.legend()
    plt.show()

    store_prediction_and_ground_truth(model)
项目:DeepAnomaly    作者:adiyoss    | 项目源码 | 文件源码
def train_normal_model(path_train, input_size, hidden_size, batch_size, early_stopping_patience, val_percentage, save_dir, model_name, maxlen):

    if not os.path.exists(save_dir):
        os.mkdir(save_dir)

    db = read_data(path_train)
    train_x = db[:-140]
    train_y = db[140:]

    X = create_sequences(train_x, 140, 140)
    y = create_sequences(train_y, 140, 140)
    X = np.reshape(X, (X.shape[0], X.shape[1], 1))

    # preparing the callbacks
    check_pointer = callbacks.ModelCheckpoint(filepath=save_dir + model_name, verbose=1, save_best_only=True)
    early_stop = callbacks.EarlyStopping(patience=early_stopping_patience, verbose=1)

    # build the model: 1 layer LSTM
    print('Build model...')
    model = Sequential()
    model.add(LSTM(hidden_size, return_sequences=False, input_shape=(maxlen, input_size)))
    model.add(Dense(140))

    model.compile(loss='mse', optimizer='adam')
    model.summary()

    model.fit(X, y, batch_size=batch_size, nb_epoch=100, validation_split=val_percentage,
              callbacks=[check_pointer, early_stop])

    return model
项目:DeepAnomaly    作者:adiyoss    | 项目源码 | 文件源码
def train_normal_model(path_train, input_size, hidden_size, batch_size, early_stopping_patience, val_percentage,
                       save_dir, model_name, maxlen):
    if not os.path.exists(save_dir):
        os.mkdir(save_dir)

    db = read_data(path_train)
    train_x = db[:-maxlen]
    train_y = db[maxlen:]

    X = create_sequences(train_x, maxlen, maxlen)
    y = create_sequences(train_y, maxlen, maxlen)
    X = np.reshape(X, (X.shape[0], X.shape[1], 1))
    y = np.reshape(y, (y.shape[0], y.shape[1], 1))
    #
    # preparing the callbacks
    check_pointer = callbacks.ModelCheckpoint(filepath=save_dir + model_name, verbose=1, save_best_only=True)
    early_stop = callbacks.EarlyStopping(patience=early_stopping_patience, verbose=1)

    # build the model: 1 layer LSTM
    print('Build model...')
    model = Sequential()
    # "Encode" the input sequence using an RNN, producing an output of HIDDEN_SIZE
    # note: in a situation where your input sequences have a variable length,
    # use input_shape=(None, nb_feature).
    model.add(LSTM(hidden_size, input_shape=(maxlen, input_size)))
    # For the decoder's input, we repeat the encoded input for each time step
    model.add(RepeatVector(maxlen))
    # The decoder RNN could be multiple layers stacked or a single layer
    model.add(LSTM(hidden_size, return_sequences=True))

    # For each of step of the output sequence, decide which character should be chosen
    model.add(TimeDistributed(Dense(1)))

    model.compile(loss='mae', optimizer='adam')
    model.summary()

    model.fit(X, y, batch_size=batch_size, nb_epoch=50, validation_split=val_percentage,
              callbacks=[check_pointer, early_stop])

    return model